diff --git a/EdgeDatabase.go b/EdgeDatabase.go index f2ea566..6a6542c 100644 --- a/EdgeDatabase.go +++ b/EdgeDatabase.go @@ -195,7 +195,7 @@ func insertUserMessage(c *fiber.Ctx, content string) edgedb.UUID { return inserted.id } -func insertBotMessage(c *fiber.Ctx, content string, selected bool, llmUUID edgedb.UUID) edgedb.UUID { +func insertBotMessage(c *fiber.Ctx, content string, llmUUID edgedb.UUID) edgedb.UUID { var lastArea Area err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` SELECT Area { @@ -215,15 +215,15 @@ func insertBotMessage(c *fiber.Ctx, content string, selected bool, llmUUID edged INSERT Message { role := $0, content := $2, - selected := $3, + selected := false, conversation := ( SELECT Area - FILTER .id = $4 + FILTER .id = $3 LIMIT 1 ).conversation, area := ( SELECT Area - FILTER .id = $4 + FILTER .id = $3 LIMIT 1 ), llm := ( @@ -232,7 +232,7 @@ func insertBotMessage(c *fiber.Ctx, content string, selected bool, llmUUID edged LIMIT 1 ) } - `, &inserted, "bot", llmUUID, content, selected, lastArea.ID) + `, &inserted, "bot", llmUUID, content, lastArea.ID) if err != nil { fmt.Println("Error inserting bot message") panic(err) diff --git a/JadeInternalError.md b/JadeInternalError.md new file mode 100644 index 0000000..196c789 --- /dev/null +++ b/JadeInternalError.md @@ -0,0 +1,33 @@ +# Request errors +The format of request error is: aa-bb-cccc +Where aa is the provider number, bb the error type and cccc the error code. + +## Provider number +00: OpenAI +01: Anthropic +02: Mistral +03: Google +04: Groq +05: Nim +06: Perplexity +07: TogetherAI +08: DeepSeek +09: Firework +10: Custom +99: JADE + +## Error type +00: Database error +01: Golang error +02: HTTP error +03: Missing status code + +## Error code +0000: Can't find API key in database +0001: Error using `jsonBody, err := json.Marshal(requestBody)` in RequestProvider function +0002: Error using `req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))` in RequestProvider function +0003: Error using `resp, err := client.Do(req)` in RequestProvider function +0004: Error using `body, err := io.ReadAll(resp.Body)` in RequestProvider function +0005: Error using `err = json.Unmarshal(body, &chatCompletionResponse)` in RequestProvider function +0006: Can't find modelInfo in database +0007: No choice at the end of request. Mostly because an error is missing. diff --git a/Request.go b/Request.go index 6d4363c..19f1099 100644 --- a/Request.go +++ b/Request.go @@ -117,6 +117,8 @@ func GenerateMultipleMessagesHandler(c *fiber.Ctx) error { authCookie := c.Cookies("jade-edgedb-auth-token") + var messages []Message = getAllSelectedMessages(c) + // Create a wait group to synchronize the goroutines var wg sync.WaitGroup @@ -136,38 +138,34 @@ func GenerateMultipleMessagesHandler(c *fiber.Ctx) error { defer cancel() // Ensure the context is cancelled to free resources // Determine which message function to call based on the model - var addMessageFunc func(c *fiber.Ctx, selectedLLM LLM, selected bool) edgedb.UUID + var addMessageFunc func(c *fiber.Ctx, llm LLM, messages []Message) string switch selectedLLMs[idx].Model.Company.Name { case "openai": - addMessageFunc = addOpenaiMessage + addMessageFunc = RequestOpenai case "anthropic": - addMessageFunc = addAnthropicMessage + addMessageFunc = RequestAnthropic case "mistral": - addMessageFunc = addMistralMessage + addMessageFunc = RequestMistral case "groq": - addMessageFunc = addGroqMessage - case "gooseai": - addMessageFunc = addGooseaiMessage + addMessageFunc = RequestGroq case "huggingface": - addMessageFunc = addHuggingfaceMessage + addMessageFunc = RequestHuggingface case "google": - addMessageFunc = addGoogleMessage + addMessageFunc = RequestGoogle case "perplexity": - addMessageFunc = addPerplexityMessage + addMessageFunc = RequestPerplexity case "fireworks": - addMessageFunc = addFireworkMessage + addMessageFunc = RequestFirework case "nim": - addMessageFunc = addNimMessage + addMessageFunc = RequestNim case "together": - addMessageFunc = addTogetherMessage + addMessageFunc = RequestTogether case "deepseek": - addMessageFunc = addDeepseekMessage + addMessageFunc = RequestDeepseek } - var messageID edgedb.UUID - if addMessageFunc != nil { - messageID = addMessageFunc(c, selectedLLMs[idx], false) - } + var content string = addMessageFunc(c, selectedLLMs[idx], messages) + var messageUUID edgedb.UUID = insertBotMessage(c, content, selectedLLMs[idx].ID) var message Message err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` @@ -189,7 +187,7 @@ func GenerateMultipleMessagesHandler(c *fiber.Ctx) error { } } FILTER .id = $0; - `, &message, messageID) + `, &message, messageUUID) if err != nil { fmt.Println("Error getting message for the placeholder. The function addProviderMessage seem to not return any message ID.") panic(err) @@ -201,6 +199,12 @@ func GenerateMultipleMessagesHandler(c *fiber.Ctx) error { default: select { case firstDone <- idx: + _ = edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": authCookie}).Execute(edgeCtx, ` + UPDATE Message + FILTER .id = $0 + SET {selected := true}; + `, messageUUID) + outIcon := `User Image` sendEvent( diff --git a/RequestAnthropic.go b/RequestAnthropic.go index f77a929..a2fda05 100644 --- a/RequestAnthropic.go +++ b/RequestAnthropic.go @@ -3,11 +3,10 @@ package main import ( "bytes" "encoding/json" - "fmt" "io" "net/http" + "strings" - "github.com/edgedb/edgedb-go" "github.com/gofiber/fiber/v2" ) @@ -37,22 +36,17 @@ type AnthropicUsage struct { OutputTokens int32 `json:"output_tokens"` } -func addAnthropicMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { - Messages := getAllSelectedMessages(c) +var AnthropicErrorCodes map[string]string - chatCompletion, err := RequestAnthropic(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) - if err != nil { - fmt.Println("Error requesting Anthropic: ", err) - id := insertBotMessage(c, "Error requesting Anthropic, model may not be available anymore. Better error message in development.", selected, llm.ID) - return id - } else if len(chatCompletion.Content) == 0 { - fmt.Println("No response from Anthropic") - id := insertBotMessage(c, "No response from Anthropic", selected, llm.ID) - return id - } else { - id := insertBotMessage(c, chatCompletion.Content[0].Text, selected, llm.ID) - return id - } +// TODO Update +func init() { + AnthropicErrorCodes = make(map[string]string) + AnthropicErrorCodes["401"] = "Invalid Authentication - Ensure that the API key is still valid." + AnthropicErrorCodes["403"] = "Accessing the API from an unsupported country, region, or territory." + AnthropicErrorCodes["429"] = "Rate limit reached for requests - You are sending requests too quickly." + AnthropicErrorCodes["429"] = "You have run out of credits or hit your maximum monthly spend - Buy more credits or learn how to increase your limits." + AnthropicErrorCodes["500"] = "Issue on OpenAI servers - Retry your request after a brief wait and contact OpenAI if the issue persists. Check the status page https://status.openai.com/." + AnthropicErrorCodes["503"] = "OpenAI servers are experiencing high traffic - Please retry your requests after a brief wait." } func TestAnthropicKey(apiKey string) bool { @@ -110,13 +104,16 @@ func TestAnthropicKey(apiKey string) bool { return true } -func RequestAnthropic(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (AnthropicChatCompletionResponse, error) { +func RequestAnthropic(c *fiber.Ctx, llm LLM, messages []Message) string { + model := llm.Model.ModelID + temperature := float64(llm.Temperature) + context := llm.Context + maxTokens := int(llm.MaxToken) + if maxTokens == 0 { maxTokens = 4096 } - fmt.Println("Requesting anthropic using max token:", maxTokens) - var apiKey struct { Key string `edgedb:"key"` } @@ -128,7 +125,7 @@ func RequestAnthropic(c *fiber.Ctx, model string, messages []Message, temperatur LIMIT 1 `, &apiKey, "anthropic") if err != nil { - return AnthropicChatCompletionResponse{}, fmt.Errorf("error getting Anthropic API key: %w", err) + return "JADE internal error: 01-00-0000. Please contact the support." } url := "https://api.anthropic.com/v1/messages" @@ -143,12 +140,13 @@ func RequestAnthropic(c *fiber.Ctx, model string, messages []Message, temperatur jsonBody, err := json.Marshal(requestBody) if err != nil { - return AnthropicChatCompletionResponse{}, fmt.Errorf("error marshaling JSON: %w", err) + return "JADE internal error: 01-01-0001. Please contact the support." + } req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) if err != nil { - return AnthropicChatCompletionResponse{}, fmt.Errorf("error creating request: %w", err) + return "JADE internal error: 01-02-0002. Please contact the support." } req.Header.Set("x-api-key", apiKey.Key) @@ -158,19 +156,25 @@ func RequestAnthropic(c *fiber.Ctx, model string, messages []Message, temperatur client := &http.Client{} resp, err := client.Do(req) if err != nil { - return AnthropicChatCompletionResponse{}, fmt.Errorf("error sending request: %w", err) + return "JADE internal error: 01-02-0003. Please contact the support." } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - return AnthropicChatCompletionResponse{}, fmt.Errorf("error reading response body: %w", err) + return "JADE internal error: 01-01-0004. Please contact the support." + } + + for key, value := range AnthropicErrorCodes { + if strings.Contains(resp.Status, key) { + return value + } } var chatCompletionResponse AnthropicChatCompletionResponse err = json.Unmarshal(body, &chatCompletionResponse) if err != nil { - return AnthropicChatCompletionResponse{}, fmt.Errorf("error unmarshaling JSON: %w", err) + return "JADE internal error: 01-01-0005. Please contact the support." } var usedModelInfo ModelInfo @@ -183,12 +187,16 @@ func RequestAnthropic(c *fiber.Ctx, model string, messages []Message, temperatur LIMIT 1 `, &usedModelInfo, model) if err != nil { - return AnthropicChatCompletionResponse{}, fmt.Errorf("error getting model info: %w", err) + return "JADE internal error: 01-00-0006. Please contact the support." } var inputCost float32 = float32(chatCompletionResponse.Usage.InputTokens) * usedModelInfo.InputPrice var outputCost float32 = float32(chatCompletionResponse.Usage.OutputTokens) * usedModelInfo.OutputPrice addUsage(c, inputCost, outputCost, chatCompletionResponse.Usage.InputTokens, chatCompletionResponse.Usage.OutputTokens, model) - return chatCompletionResponse, nil + if len(chatCompletionResponse.Content) == 0 { + return "JADE internal error: 01-03-0007. Please contact the support." + } + + return chatCompletionResponse.Content[0].Text } diff --git a/RequestCustomEndpoint.go b/RequestCustomEndpoint.go new file mode 100644 index 0000000..3af982b --- /dev/null +++ b/RequestCustomEndpoint.go @@ -0,0 +1,66 @@ +package main + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "strings" + + "github.com/gofiber/fiber/v2" +) + +func RequestHuggingface(c *fiber.Ctx, llm LLM, messages []Message) string { + url := llm.Endpoint.Endpoint + temperature := float64(llm.Temperature) + context := llm.Context + maxTokens := int(llm.MaxToken) + + requestBody := OpenaiChatCompletionRequest{ + Model: "tgi", + Messages: Message2RequestMessage(messages, context), + Temperature: temperature, + MaxTokens: maxTokens, + } + + jsonBody, err := json.Marshal(requestBody) + if err != nil { + return "JADE internal error: 10-01-0001. Please contact the support." + } + + req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) + if err != nil { + return "JADE internal error: 10-02-0002. Please contact the support." + } + + req.Header.Set("Authorization", "Bearer "+llm.Endpoint.Key) + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return "JADE internal error: 10-02-0003. Please contact the support." + } + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "JADE internal error: 10-01-0004. Please contact the support." + } + + for key, value := range OpenaiErrorCodes { + if strings.Contains(resp.Status, key) { + return value + } + } + + var chatCompletionResponse OpenaiChatCompletionResponse + err = json.Unmarshal(body, &chatCompletionResponse) + if err != nil { + return "JADE internal error: 10-01-0005. Please contact the support." + } + + addUsage(c, 0, 0, 0, 0, llm.Model.ModelID) + + return chatCompletionResponse.Choices[0].Message.Content +} diff --git a/RequestDeepseek.go b/RequestDeepseek.go index cb57220..254d36b 100644 --- a/RequestDeepseek.go +++ b/RequestDeepseek.go @@ -3,61 +3,13 @@ package main import ( "bytes" "encoding/json" - "fmt" "io" "net/http" + "strings" - "github.com/edgedb/edgedb-go" "github.com/gofiber/fiber/v2" ) -type DeepseekChatCompletionRequest struct { - Model string `json:"model"` - Messages []RequestMessage `json:"messages"` - MaxTokens int `json:"max_tokens"` - Temperature float64 `json:"temperature"` -} - -type DeepseekChatCompletionResponse struct { - ID string `json:"id"` - Object string `json:"object"` - Created int64 `json:"created"` - Model string `json:"model"` - Usage DeepseekUsage `json:"usage"` - Choices []DeepseekChoice `json:"choices"` -} - -type DeepseekUsage struct { - PromptTokens int32 `json:"prompt_tokens"` - CompletionTokens int32 `json:"completion_tokens"` - TotalTokens int32 `json:"total_tokens"` -} - -type DeepseekChoice struct { - Message Message `json:"message"` - FinishReason string `json:"finish_reason"` - Index int `json:"index"` -} - -func addDeepseekMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { - Messages := getAllSelectedMessages(c) - - chatCompletion, err := RequestDeepseek(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) - if err != nil { - fmt.Println("Error requesting Deepseek: ", err) - id := insertBotMessage(c, "Error requesting DeepSeek, model may not be available anymore. Better error message in development.", selected, llm.ID) - return id - } else if len(chatCompletion.Choices) == 0 { - fmt.Println("No response from DeepSeek") - id := insertBotMessage(c, "No response from DeepSeek", selected, llm.ID) - return id - } else { - Content := chatCompletion.Choices[0].Message.Content - id := insertBotMessage(c, Content, selected, llm.ID) - return id - } -} - func TestDeepseekKey(apiKey string) bool { url := "https://api.deepseek.com/chat/completions" @@ -69,7 +21,7 @@ func TestDeepseekKey(apiKey string) bool { }, } - requestBody := DeepseekChatCompletionRequest{ + requestBody := OpenaiChatCompletionRequest{ Model: "deepseek-chat", Messages: deepseekMessages, Temperature: 0, @@ -78,13 +30,11 @@ func TestDeepseekKey(apiKey string) bool { jsonBody, err := json.Marshal(requestBody) if err != nil { - fmt.Println("Failed to test Deepseek API key - json.Marshal :", err) return false } req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) if err != nil { - fmt.Println("Failed to test Deepseek API key - http.NewRequest :", err) return false } @@ -94,35 +44,36 @@ func TestDeepseekKey(apiKey string) bool { client := &http.Client{} resp, err := client.Do(req) if err != nil { - fmt.Println("Failed to test Deepseek API key - client.Do :", err) return false } defer resp.Body.Close() - fmt.Println(resp.Status) - body, err := io.ReadAll(resp.Body) if err != nil { - fmt.Println("Failed to test Deepseek API key - io.ReadAll :", err) return false } - var chatCompletionResponse DeepseekChatCompletionResponse + var chatCompletionResponse OpenaiChatCompletionResponse err = json.Unmarshal(body, &chatCompletionResponse) if err != nil { - fmt.Println("Failed to test Deepseek API key - json.Marshal :", err) return false } if chatCompletionResponse.Usage.CompletionTokens == 0 { - fmt.Println("Failed to test Deepseek API key - No completion tokens :", err) return false } return true } -func RequestDeepseek(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (DeepseekChatCompletionResponse, error) { +func RequestDeepseek(c *fiber.Ctx, llm LLM, messages []Message) string { + model := llm.Model.ModelID + temperature := float64(llm.Temperature) + context := llm.Context + maxTokens := int(llm.MaxToken) + + url := "https://api.deepseek.com/chat/completions" + var apiKey string err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` with @@ -134,12 +85,10 @@ func RequestDeepseek(c *fiber.Ctx, model string, messages []Message, temperature select filtered_keys.key limit 1 `, &apiKey, "deepseek") if err != nil { - return DeepseekChatCompletionResponse{}, fmt.Errorf("error getting DeepSeek API key: %w", err) + return "JADE internal error: 08-00-0000. Please contact the support." } - url := "https://api.deepseek.com/chat/completions" - - requestBody := DeepseekChatCompletionRequest{ + requestBody := OpenaiChatCompletionRequest{ Model: model, Messages: Message2RequestMessage(messages, context), MaxTokens: maxTokens, @@ -148,12 +97,12 @@ func RequestDeepseek(c *fiber.Ctx, model string, messages []Message, temperature jsonBody, err := json.Marshal(requestBody) if err != nil { - return DeepseekChatCompletionResponse{}, fmt.Errorf("error marshaling JSON: %w", err) + return "JADE internal error: 08-01-0001. Please contact the support." } req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) if err != nil { - return DeepseekChatCompletionResponse{}, fmt.Errorf("error creating request: %w", err) + return "JADE internal error: 08-02-0002. Please contact the support." } req.Header.Set("Content-Type", "application/json") @@ -162,24 +111,25 @@ func RequestDeepseek(c *fiber.Ctx, model string, messages []Message, temperature client := &http.Client{} resp, err := client.Do(req) if err != nil { - return DeepseekChatCompletionResponse{}, fmt.Errorf("error sending request: %w", err) + return "JADE internal error: 08-02-0003. Please contact the support." } defer resp.Body.Close() - // TODO: Add a message to the user and do it for all 400 things - if resp.Status == "402 Payment Required" { - return DeepseekChatCompletionResponse{}, fmt.Errorf("error reading response body: %w", err) - } - body, err := io.ReadAll(resp.Body) if err != nil { - return DeepseekChatCompletionResponse{}, fmt.Errorf("error reading response body: %w", err) + return "JADE internal error: 08-01-0004. Please contact the support." } - var chatCompletionResponse DeepseekChatCompletionResponse + for key, value := range OpenaiErrorCodes { + if strings.Contains(resp.Status, key) { + return value + } + } + + var chatCompletionResponse OpenaiChatCompletionResponse err = json.Unmarshal(body, &chatCompletionResponse) if err != nil { - return DeepseekChatCompletionResponse{}, fmt.Errorf("error unmarshaling JSON: %w", err) + return "JADE internal error: 08-01-0005. Please contact the support." } var usedModelInfo ModelInfo @@ -192,12 +142,16 @@ func RequestDeepseek(c *fiber.Ctx, model string, messages []Message, temperature LIMIT 1 `, &usedModelInfo, model) if err != nil { - return DeepseekChatCompletionResponse{}, fmt.Errorf("error getting model info: %w", err) + return "JADE internal error: 08-00-0006. Please contact the support." } var inputCost float32 = float32(chatCompletionResponse.Usage.PromptTokens) * usedModelInfo.InputPrice var outputCost float32 = float32(chatCompletionResponse.Usage.CompletionTokens) * usedModelInfo.OutputPrice addUsage(c, inputCost, outputCost, chatCompletionResponse.Usage.PromptTokens, chatCompletionResponse.Usage.CompletionTokens, model) - return chatCompletionResponse, nil + if len(chatCompletionResponse.Choices) == 0 { + return "JADE internal error: 08-03-0007. Please contact the support." + } + + return chatCompletionResponse.Choices[0].Message.Content } diff --git a/RequestFirework.go b/RequestFirework.go index 6b7becb..1d5da02 100644 --- a/RequestFirework.go +++ b/RequestFirework.go @@ -3,61 +3,13 @@ package main import ( "bytes" "encoding/json" - "fmt" "io" "net/http" + "strings" - "github.com/edgedb/edgedb-go" "github.com/gofiber/fiber/v2" ) -type FireworkChatCompletionRequest struct { - Model string `json:"model"` - Messages []RequestMessage `json:"messages"` - MaxTokens int `json:"max_tokens"` - Temperature float64 `json:"temperature"` -} - -type FireworkChatCompletionResponse struct { - ID string `json:"id"` - Object string `json:"object"` - Created int64 `json:"created"` - Model string `json:"model"` - Usage FireworkUsage `json:"usage"` - Choices []FireworkChoice `json:"choices"` -} - -type FireworkUsage struct { - PromptTokens int32 `json:"prompt_tokens"` - CompletionTokens int32 `json:"completion_tokens"` - TotalTokens int32 `json:"total_tokens"` -} - -type FireworkChoice struct { - Message Message `json:"message"` - FinishReason string `json:"finish_reason"` - Index int `json:"index"` -} - -func addFireworkMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { - Messages := getAllSelectedMessages(c) - - chatCompletion, err := RequestFirework(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) - if err != nil { - fmt.Println("Error requesting Firework: ", err) - id := insertBotMessage(c, "Error requesting Firework, model may not be available anymore. Better error message in development.", selected, llm.ID) - return id - } else if len(chatCompletion.Choices) == 0 { - fmt.Println("No response from Firework") - id := insertBotMessage(c, "No response from Firework", selected, llm.ID) - return id - } else { - Content := chatCompletion.Choices[0].Message.Content - id := insertBotMessage(c, Content, selected, llm.ID) - return id - } -} - func TestFireworkKey(apiKey string) bool { url := "https://api.fireworks.ai/inference/v1/chat/completions" @@ -101,7 +53,7 @@ func TestFireworkKey(apiKey string) bool { return false } - var chatCompletionResponse FireworkChatCompletionResponse + var chatCompletionResponse OpenaiChatCompletionResponse err = json.Unmarshal(body, &chatCompletionResponse) if err != nil { return false @@ -112,7 +64,12 @@ func TestFireworkKey(apiKey string) bool { return true } -func RequestFirework(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (FireworkChatCompletionResponse, error) { +func RequestFirework(c *fiber.Ctx, llm LLM, messages []Message) string { + model := llm.Model.ModelID + temperature := float64(llm.Temperature) + context := llm.Context + maxTokens := int(llm.MaxToken) + var apiKey string err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` with @@ -122,15 +79,15 @@ func RequestFirework(c *fiber.Ctx, model string, messages []Message, temperature } filter .company.name = $0 AND .$0 LIMIT 1 `, &usedModelInfo, model) + if err != nil { + return "JADE internal error: 04-00-0006. Please contact the support." + } var inputCost float32 = float32(chatCompletionResponse.Usage.PromptTokens) * usedModelInfo.InputPrice var outputCost float32 = float32(chatCompletionResponse.Usage.CompletionTokens) * usedModelInfo.OutputPrice addUsage(c, inputCost, outputCost, chatCompletionResponse.Usage.PromptTokens, chatCompletionResponse.Usage.CompletionTokens, model) - return chatCompletionResponse, nil + if len(chatCompletionResponse.Choices) == 0 { + return "JADE internal error: 04-03-0007. Please contact the support." + } + + return chatCompletionResponse.Choices[0].Message.Content } diff --git a/RequestHuggingface.go b/RequestHuggingface.go deleted file mode 100644 index 7aea546..0000000 --- a/RequestHuggingface.go +++ /dev/null @@ -1,106 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - - "github.com/edgedb/edgedb-go" - "github.com/gofiber/fiber/v2" -) - -type HuggingfaceChatCompletionRequest struct { - Model string `json:"model"` - Messages []RequestMessage `json:"messages"` - Temperature float64 `json:"temperature"` - MaxTokens int `json:"max_tokens"` - Stream bool `json:"stream"` -} - -type HuggingfaceChatCompletionResponse struct { - ID string `json:"id"` - Object string `json:"object"` - Created int64 `json:"created"` - Model string `json:"model"` - Choices []HuggingfaceChoice `json:"choices"` -} - -type HuggingfaceUsage struct { - PromptTokens int32 `json:"prompt_tokens"` - CompletionTokens int32 `json:"completion_tokens"` - TotalTokens int32 `json:"total_tokens"` -} - -type HuggingfaceChoice struct { - Message Message `json:"message"` - FinishReason string `json:"finish_reason"` - Index int `json:"index"` -} - -func addHuggingfaceMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { - Messages := getAllSelectedMessages(c) - - chatCompletion, err := RequestHuggingface(c, llm, Messages, float64(llm.Temperature), int(llm.MaxToken)) - if err != nil { - fmt.Println("Error requesting Huggingface: ", err) - id := insertBotMessage(c, "Error requesting Huggingface.", selected, llm.ID) - return id - } else if len(chatCompletion.Choices) == 0 { - fmt.Println("No response from Endpoint") - id := insertBotMessage(c, "No response from Endpoint", selected, llm.ID) - return id - } else { - Content := chatCompletion.Choices[0].Message.Content - id := insertBotMessage(c, Content, selected, llm.ID) - return id - } -} - -func RequestHuggingface(c *fiber.Ctx, llm LLM, messages []Message, temperature float64, maxTokens int) (HuggingfaceChatCompletionResponse, error) { - url := llm.Endpoint.Endpoint - - requestBody := HuggingfaceChatCompletionRequest{ - Model: "tgi", - Messages: Message2RequestMessage(messages, llm.Context), - Temperature: temperature, - MaxTokens: maxTokens, - Stream: false, - } - - jsonBody, err := json.Marshal(requestBody) - if err != nil { - return HuggingfaceChatCompletionResponse{}, fmt.Errorf("error marshaling JSON: %w", err) - } - - req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) - if err != nil { - return HuggingfaceChatCompletionResponse{}, fmt.Errorf("error creating request: %w", err) - } - - req.Header.Set("Authorization", "Bearer "+llm.Endpoint.Key) - req.Header.Set("Content-Type", "application/json") - - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - return HuggingfaceChatCompletionResponse{}, fmt.Errorf("error sending request: %w", err) - } - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return HuggingfaceChatCompletionResponse{}, fmt.Errorf("error reading response body: %w", err) - } - - var chatCompletionResponse HuggingfaceChatCompletionResponse - err = json.Unmarshal(body, &chatCompletionResponse) - if err != nil { - return HuggingfaceChatCompletionResponse{}, fmt.Errorf("error unmarshaling JSON: %w", err) - } - - addUsage(c, 0, 0, 0, 0, llm.Model.ModelID) - - return chatCompletionResponse, nil -} diff --git a/RequestMistral.go b/RequestMistral.go index 7806fe3..c185c9a 100644 --- a/RequestMistral.go +++ b/RequestMistral.go @@ -3,59 +3,13 @@ package main import ( "bytes" "encoding/json" - "fmt" "io" "net/http" + "strings" - "github.com/edgedb/edgedb-go" "github.com/gofiber/fiber/v2" ) -type MistralChatCompletionRequest struct { - Model string `json:"model"` - Messages []RequestMessage `json:"messages"` - MaxTokens int `json:"max_tokens"` - Temperature float64 `json:"temperature"` -} -type MistralChatCompletionResponse struct { - ID string `json:"id"` - Object string `json:"object"` - Created int64 `json:"created"` - Model string `json:"model"` - Usage MistralUsage `json:"usage"` - Choices []MistralChoice `json:"choices"` -} - -type MistralUsage struct { - PromptTokens int32 `json:"prompt_tokens"` - CompletionTokens int32 `json:"completion_tokens"` - TotalTokens int32 `json:"total_tokens"` -} - -type MistralChoice struct { - Message Message `json:"message"` - FinishReason string `json:"finish_reason"` - Index int `json:"index"` -} - -func addMistralMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { - Messages := getAllSelectedMessages(c) - - chatCompletion, err := RequestMistral(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) - if err != nil { - fmt.Println("Error requesting Mistral: ", err) - id := insertBotMessage(c, "Error requesting Mistral, model may not be available anymore. Better error message in development.", selected, llm.ID) - return id - } else if len(chatCompletion.Choices) == 0 { - id := insertBotMessage(c, "No response from Mistral", selected, llm.ID) - return id - } else { - Content := chatCompletion.Choices[0].Message.Content - id := insertBotMessage(c, Content, selected, llm.ID) - return id - } -} - func TestMistralKey(apiKey string) bool { url := "https://api.mistral.ai/v1/chat/completions" @@ -67,7 +21,7 @@ func TestMistralKey(apiKey string) bool { }, } - requestBody := MistralChatCompletionRequest{ + requestBody := OpenaiChatCompletionRequest{ Model: "open-mistral-7b", Messages: mistralMessages, Temperature: 0, @@ -76,13 +30,11 @@ func TestMistralKey(apiKey string) bool { jsonBody, err := json.Marshal(requestBody) if err != nil { - fmt.Println("Error marshalling request to Mistral") return false } req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) if err != nil { - fmt.Println("Error creating request to Mistral") return false } @@ -93,31 +45,32 @@ func TestMistralKey(apiKey string) bool { client := &http.Client{} resp, err := client.Do(req) if err != nil { - fmt.Println("Error sending request to Mistral") return false } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - fmt.Println("Error reading response from Mistral") return false } - var chatCompletionResponse MistralChatCompletionResponse + var chatCompletionResponse OpenaiChatCompletionResponse err = json.Unmarshal(body, &chatCompletionResponse) if err != nil { - fmt.Println("Error unmarshalling response from Mistral") return false } if chatCompletionResponse.Usage.CompletionTokens == 0 { - fmt.Println("No response from Mistral") return false } return true } -func RequestMistral(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (MistralChatCompletionResponse, error) { +func RequestMistral(c *fiber.Ctx, llm LLM, messages []Message) string { + model := llm.Model.ModelID + temperature := float64(llm.Temperature) + context := llm.Context + maxTokens := int(llm.MaxToken) + var apiKey string err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` with @@ -129,12 +82,12 @@ func RequestMistral(c *fiber.Ctx, model string, messages []Message, temperature select filtered_keys.key limit 1 `, &apiKey, "mistral") if err != nil { - return MistralChatCompletionResponse{}, fmt.Errorf("error getting OpenAI API key: %w", err) + return "JADE internal error: 02-00-0000. Please contact the support." } url := "https://api.mistral.ai/v1/chat/completions" - requestBody := MistralChatCompletionRequest{ + requestBody := OpenaiChatCompletionRequest{ Model: model, Messages: Message2RequestMessage(messages, context), MaxTokens: maxTokens, @@ -143,34 +96,39 @@ func RequestMistral(c *fiber.Ctx, model string, messages []Message, temperature jsonBody, err := json.Marshal(requestBody) if err != nil { - return MistralChatCompletionResponse{}, fmt.Errorf("error marshaling JSON: %w", err) + return "JADE internal error: 02-01-0001. Please contact the support." } req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) if err != nil { - return MistralChatCompletionResponse{}, fmt.Errorf("error creating request: %w", err) + return "JADE internal error: 02-02-0002. Please contact the support." } req.Header.Set("Content-Type", "application/json") - req.Header.Set("Accept", "application/json") req.Header.Set("Authorization", "Bearer "+apiKey) client := &http.Client{} resp, err := client.Do(req) if err != nil { - return MistralChatCompletionResponse{}, fmt.Errorf("error sending request: %w", err) + return "JADE internal error: 02-02-0003. Please contact the support." } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - return MistralChatCompletionResponse{}, fmt.Errorf("error reading response body: %w", err) + return "JADE internal error: 02-01-0004. Please contact the support." } - var chatCompletionResponse MistralChatCompletionResponse + for key, value := range OpenaiErrorCodes { + if strings.Contains(resp.Status, key) { + return value + } + } + + var chatCompletionResponse OpenaiChatCompletionResponse err = json.Unmarshal(body, &chatCompletionResponse) if err != nil { - return MistralChatCompletionResponse{}, fmt.Errorf("error unmarshaling JSON: %w", err) + return "JADE internal error: 02-01-0005. Please contact the support." } var usedModelInfo ModelInfo @@ -183,16 +141,16 @@ func RequestMistral(c *fiber.Ctx, model string, messages []Message, temperature LIMIT 1 `, &usedModelInfo, model) if err != nil { - return MistralChatCompletionResponse{}, fmt.Errorf("error getting model info: %w", err) - } - - if usedModelInfo.InputPrice == 0 || usedModelInfo.OutputPrice == 0 { - return MistralChatCompletionResponse{}, fmt.Errorf("model %s not found in Mistral", model) + return "JADE internal error: 02-00-0006. Please contact the support." } var inputCost float32 = float32(chatCompletionResponse.Usage.PromptTokens) * usedModelInfo.InputPrice var outputCost float32 = float32(chatCompletionResponse.Usage.CompletionTokens) * usedModelInfo.OutputPrice addUsage(c, inputCost, outputCost, chatCompletionResponse.Usage.PromptTokens, chatCompletionResponse.Usage.CompletionTokens, model) - return chatCompletionResponse, nil + if len(chatCompletionResponse.Choices) == 0 { + return "JADE internal error: 02-03-0007. Please contact the support." + } + + return chatCompletionResponse.Choices[0].Message.Content } diff --git a/RequestNim.go b/RequestNim.go index dda5701..b4f3e65 100644 --- a/RequestNim.go +++ b/RequestNim.go @@ -3,66 +3,15 @@ package main import ( "bytes" "encoding/json" - "fmt" "io" "net/http" + "strings" - "github.com/edgedb/edgedb-go" "github.com/gofiber/fiber/v2" ) -type NimChatCompletionRequest struct { - Model string `json:"model"` - Messages []RequestMessage `json:"messages"` - MaxTokens int `json:"max_tokens"` - Temperature float64 `json:"temperature"` -} - -type NimChatCompletionResponse struct { - ID string `json:"id"` - Object string `json:"object"` - Created int64 `json:"created"` - Model string `json:"model"` - Usage NimUsage `json:"usage"` - Choices []NimChoice `json:"choices"` -} - -type NimUsage struct { - PromptTokens int32 `json:"prompt_tokens"` - CompletionTokens int32 `json:"completion_tokens"` - TotalTokens int32 `json:"total_tokens"` -} - -type NimChoice struct { - Message RequestMessage `json:"message"` - FinishReason string `json:"finish_reason"` - Index int `json:"index"` -} - -func addNimMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { - Messages := getAllSelectedMessages(c) - - chatCompletion, err := RequestNim(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) - if err != nil { - fmt.Println("Error requesting NIM: ", err) - id := insertBotMessage(c, "Error requesting NIM, model may not be available anymore. Better error message in development.", selected, llm.ID) - return id - } else if len(chatCompletion.Choices) == 0 { - fmt.Println("No response from NIM") - id := insertBotMessage(c, "No response from NIM", selected, llm.ID) - return id - } else { - Content := chatCompletion.Choices[0].Message.Content - id := insertBotMessage(c, Content, selected, llm.ID) - return id - } -} - func TestNimKey(apiKey string) bool { url := "https://integrate.api.nvidia.com/v1/chat/completions" - //apiKey := "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" - - fmt.Println("Testing new Nvidia NIM key:", apiKey) // Convert messages to OpenAI format nimMessages := []RequestMessage{ @@ -72,7 +21,7 @@ func TestNimKey(apiKey string) bool { }, } - requestBody := NimChatCompletionRequest{ + requestBody := OpenaiChatCompletionRequest{ Model: "meta/llama3-8b-instruct", Messages: nimMessages, Temperature: 0, @@ -81,13 +30,11 @@ func TestNimKey(apiKey string) bool { jsonBody, err := json.Marshal(requestBody) if err != nil { - fmt.Println("Error when testing NIM key. Cant parse JSON request.") return false } req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) if err != nil { - fmt.Println("Error when testing NIM key. Cant generate new request") return false } @@ -97,41 +44,33 @@ func TestNimKey(apiKey string) bool { client := &http.Client{} resp, err := client.Do(req) if err != nil { - fmt.Println("Error when testing NIM key. Cant send request.") return false } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - fmt.Println("Error when testing NIM key. Cant read response.") return false } - var chatCompletionResponse NimChatCompletionResponse + var chatCompletionResponse OpenaiChatCompletionResponse err = json.Unmarshal(body, &chatCompletionResponse) if err != nil { - fmt.Println(resp.Status) - fmt.Println(resp.Body) - - fmt.Println("Error when testing NIM key. Cant unmarshal response.") return false } if chatCompletionResponse.Usage.CompletionTokens == 0 { - fmt.Println(resp.Status) - fmt.Println(resp.Body) - - fmt.Println("Error when testing NIM key. No completion token.") return false } - Content := chatCompletionResponse.Choices[0].Message.Content - fmt.Println(Content) - return true } -func RequestNim(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxToken int) (NimChatCompletionResponse, error) { +func RequestNim(c *fiber.Ctx, llm LLM, messages []Message) string { + model := llm.Model.ModelID + temperature := float64(llm.Temperature) + context := llm.Context + maxTokens := int(llm.MaxToken) + var apiKey string err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` with @@ -143,26 +82,26 @@ func RequestNim(c *fiber.Ctx, model string, messages []Message, temperature floa select filtered_keys.key limit 1 `, &apiKey, "nim") if err != nil { - return NimChatCompletionResponse{}, fmt.Errorf("error getting NIM API key: %w", err) + return "JADE internal error: 05-00-0000. Please contact the support." } url := "https://integrate.api.nvidia.com/v1/chat/completions" - requestBody := NimChatCompletionRequest{ + requestBody := OpenaiChatCompletionRequest{ Model: model, Messages: Message2RequestMessage(messages, context), - MaxTokens: maxToken, + MaxTokens: maxTokens, Temperature: temperature, } jsonBody, err := json.Marshal(requestBody) if err != nil { - return NimChatCompletionResponse{}, fmt.Errorf("error marshaling JSON: %w", err) + return "JADE internal error: 05-01-0001. Please contact the support." } req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) if err != nil { - return NimChatCompletionResponse{}, fmt.Errorf("error creating request: %w", err) + return "JADE internal error: 05-02-0002. Please contact the support." } req.Header.Set("Content-Type", "application/json") @@ -171,19 +110,25 @@ func RequestNim(c *fiber.Ctx, model string, messages []Message, temperature floa client := &http.Client{} resp, err := client.Do(req) if err != nil { - return NimChatCompletionResponse{}, fmt.Errorf("error sending request: %w", err) + return "JADE internal error: 05-02-0003. Please contact the support." } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - return NimChatCompletionResponse{}, fmt.Errorf("error reading response body: %w", err) + return "JADE internal error: 05-01-0004. Please contact the support." } - var chatCompletionResponse NimChatCompletionResponse + for key, value := range OpenaiErrorCodes { + if strings.Contains(resp.Status, key) { + return value + } + } + + var chatCompletionResponse OpenaiChatCompletionResponse err = json.Unmarshal(body, &chatCompletionResponse) if err != nil { - return NimChatCompletionResponse{}, fmt.Errorf("error unmarshaling JSON: %w", err) + return "JADE internal error: 05-01-0005. Please contact the support." } var usedModelInfo ModelInfo @@ -196,12 +141,16 @@ func RequestNim(c *fiber.Ctx, model string, messages []Message, temperature floa LIMIT 1 `, &usedModelInfo, model) if err != nil { - return NimChatCompletionResponse{}, fmt.Errorf("error getting model info: %w", err) + return "JADE internal error: 05-00-0006. Please contact the support." } var inputCost float32 = float32(chatCompletionResponse.Usage.PromptTokens) * usedModelInfo.InputPrice var outputCost float32 = float32(chatCompletionResponse.Usage.CompletionTokens) * usedModelInfo.OutputPrice addUsage(c, inputCost, outputCost, chatCompletionResponse.Usage.PromptTokens, chatCompletionResponse.Usage.CompletionTokens, model) - return chatCompletionResponse, nil + if len(chatCompletionResponse.Choices) == 0 { + return "JADE internal error: 05-03-0007. Please contact the support." + } + + return chatCompletionResponse.Choices[0].Message.Content } diff --git a/RequestOpenai.go b/RequestOpenai.go index 423e32d..f4dd758 100644 --- a/RequestOpenai.go +++ b/RequestOpenai.go @@ -3,11 +3,10 @@ package main import ( "bytes" "encoding/json" - "fmt" "io" "net/http" + "strings" - "github.com/edgedb/edgedb-go" "github.com/gofiber/fiber/v2" ) @@ -39,23 +38,16 @@ type OpenaiChoice struct { Index int `json:"index"` } -func addOpenaiMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { - Messages := getAllSelectedMessages(c) +var OpenaiErrorCodes map[string]string - chatCompletion, err := RequestOpenai(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) - if err != nil { - fmt.Println("Error requesting OpenAI: ", err) - id := insertBotMessage(c, "Error requesting OpenAI, model may not be available anymore. Better error message in development.", selected, llm.ID) - return id - } else if len(chatCompletion.Choices) == 0 { - fmt.Println("No response from OpenAI") - id := insertBotMessage(c, "No response from OpenAI", selected, llm.ID) - return id - } else { - Content := chatCompletion.Choices[0].Message.Content - id := insertBotMessage(c, Content, selected, llm.ID) - return id - } +func init() { + OpenaiErrorCodes = make(map[string]string) + OpenaiErrorCodes["401"] = "Invalid Authentication - Ensure that the API key is still valid." + OpenaiErrorCodes["403"] = "Accessing the API from an unsupported country, region, or territory." + OpenaiErrorCodes["429"] = "Rate limit reached for requests - You are sending requests too quickly." + OpenaiErrorCodes["429"] = "You have run out of credits or hit your maximum monthly spend - Buy more credits or learn how to increase your limits." + OpenaiErrorCodes["500"] = "Issue on Provider servers - Retry your request after a brief wait and contact the provider if the issue persists." + OpenaiErrorCodes["503"] = "Servers are experiencing high traffic - Please retry your requests after a brief wait." } func TestOpenaiKey(apiKey string) bool { @@ -112,7 +104,12 @@ func TestOpenaiKey(apiKey string) bool { return true } -func RequestOpenai(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (OpenaiChatCompletionResponse, error) { +func RequestOpenai(c *fiber.Ctx, llm LLM, messages []Message) string { + model := llm.Model.ModelID + temperature := float64(llm.Temperature) + context := llm.Context + maxTokens := int(llm.MaxToken) + var apiKey string err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` with @@ -124,7 +121,7 @@ func RequestOpenai(c *fiber.Ctx, model string, messages []Message, temperature f select filtered_keys.key limit 1 `, &apiKey, "openai") if err != nil { - return OpenaiChatCompletionResponse{}, fmt.Errorf("error getting OpenAI API key: %w", err) + return "JADE internal error: 00-00-0000. Please contact the support." } url := "https://api.openai.com/v1/chat/completions" @@ -138,12 +135,12 @@ func RequestOpenai(c *fiber.Ctx, model string, messages []Message, temperature f jsonBody, err := json.Marshal(requestBody) if err != nil { - return OpenaiChatCompletionResponse{}, fmt.Errorf("error marshaling JSON: %w", err) + return "JADE internal error: 00-01-0001. Please contact the support." } req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) if err != nil { - return OpenaiChatCompletionResponse{}, fmt.Errorf("error creating request: %w", err) + return "JADE internal error: 00-02-0002. Please contact the support." } req.Header.Set("Content-Type", "application/json") @@ -152,19 +149,25 @@ func RequestOpenai(c *fiber.Ctx, model string, messages []Message, temperature f client := &http.Client{} resp, err := client.Do(req) if err != nil { - return OpenaiChatCompletionResponse{}, fmt.Errorf("error sending request: %w", err) + return "JADE internal error: 00-02-0003. Please contact the support." } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - return OpenaiChatCompletionResponse{}, fmt.Errorf("error reading response body: %w", err) + return "JADE internal error: 00-01-0004. Please contact the support." + } + + for key, value := range OpenaiErrorCodes { + if strings.Contains(resp.Status, key) { + return value + } } var chatCompletionResponse OpenaiChatCompletionResponse err = json.Unmarshal(body, &chatCompletionResponse) if err != nil { - return OpenaiChatCompletionResponse{}, fmt.Errorf("error unmarshaling JSON: %w", err) + return "JADE internal error: 00-01-0005. Please contact the support." } var usedModelInfo ModelInfo @@ -177,12 +180,16 @@ func RequestOpenai(c *fiber.Ctx, model string, messages []Message, temperature f LIMIT 1 `, &usedModelInfo, model) if err != nil { - return OpenaiChatCompletionResponse{}, fmt.Errorf("error getting model info: %w", err) + return "JADE internal error: 00-00-0006. Please contact the support." } var inputCost float32 = float32(chatCompletionResponse.Usage.PromptTokens) * usedModelInfo.InputPrice var outputCost float32 = float32(chatCompletionResponse.Usage.CompletionTokens) * usedModelInfo.OutputPrice addUsage(c, inputCost, outputCost, chatCompletionResponse.Usage.PromptTokens, chatCompletionResponse.Usage.CompletionTokens, model) - return chatCompletionResponse, nil + if len(chatCompletionResponse.Choices) == 0 { + return "JADE internal error: 00-03-0007. Please contact the support." + } + + return chatCompletionResponse.Choices[0].Message.Content } diff --git a/RequestPerplexity.go b/RequestPerplexity.go index ed4a48c..68c5e09 100644 --- a/RequestPerplexity.go +++ b/RequestPerplexity.go @@ -3,62 +3,13 @@ package main import ( "bytes" "encoding/json" - "fmt" "io" "net/http" "strings" - "github.com/edgedb/edgedb-go" "github.com/gofiber/fiber/v2" ) -type PerplexityChatCompletionRequest struct { - Model string `json:"model"` - Messages []RequestMessage `json:"messages"` - MaxTokens int `json:"max_tokens"` - Temperature float64 `json:"temperature"` -} - -type PerplexityChatCompletionResponse struct { - ID string `json:"id"` - Object string `json:"object"` - Created int64 `json:"created"` - Model string `json:"model"` - Usage PerplexityUsage `json:"usage"` - Choices []PerplexityChoice `json:"choices"` -} - -type PerplexityUsage struct { - PromptTokens int32 `json:"prompt_tokens"` - CompletionTokens int32 `json:"completion_tokens"` - TotalTokens int32 `json:"total_tokens"` -} - -type PerplexityChoice struct { - Message Message `json:"message"` - FinishReason string `json:"finish_reason"` - Index int `json:"index"` -} - -func addPerplexityMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { - Messages := getAllSelectedMessages(c) - - chatCompletion, err := RequestPerplexity(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) - if err != nil { - fmt.Println("Error requesting Perplexity: ", err) - id := insertBotMessage(c, "Error requesting Perplexity, model may not be available anymore. Better error message in development.", selected, llm.ID) - return id - } else if len(chatCompletion.Choices) == 0 { - fmt.Println("No response from Perplexity") - id := insertBotMessage(c, "No response from Perplexity", selected, llm.ID) - return id - } else { - Content := chatCompletion.Choices[0].Message.Content - id := insertBotMessage(c, Content, selected, llm.ID) - return id - } -} - func TestPerplexityKey(apiKey string) bool { url := "https://api.perplexity.ai/chat/completions" @@ -70,7 +21,7 @@ func TestPerplexityKey(apiKey string) bool { }, } - requestBody := PerplexityChatCompletionRequest{ + requestBody := OpenaiChatCompletionRequest{ Model: "llama-3-8b-instruct", Messages: perplexityMessages, Temperature: 0, @@ -102,7 +53,7 @@ func TestPerplexityKey(apiKey string) bool { return false } - var chatCompletionResponse PerplexityChatCompletionResponse + var chatCompletionResponse OpenaiChatCompletionResponse err = json.Unmarshal(body, &chatCompletionResponse) if err != nil { return false @@ -113,7 +64,12 @@ func TestPerplexityKey(apiKey string) bool { return true } -func RequestPerplexity(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (PerplexityChatCompletionResponse, error) { +func RequestPerplexity(c *fiber.Ctx, llm LLM, messages []Message) string { + model := llm.Model.ModelID + temperature := float64(llm.Temperature) + context := llm.Context + maxTokens := int(llm.MaxToken) + var apiKey string err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` with @@ -125,12 +81,12 @@ func RequestPerplexity(c *fiber.Ctx, model string, messages []Message, temperatu select filtered_keys.key limit 1 `, &apiKey, "perplexity") if err != nil { - return PerplexityChatCompletionResponse{}, fmt.Errorf("error getting Perplexity API key: %w", err) + return "JADE internal error: 06-00-0000. Please contact the support." } url := "https://api.perplexity.ai/chat/completions" - requestBody := PerplexityChatCompletionRequest{ + requestBody := OpenaiChatCompletionRequest{ Model: model, Messages: Message2RequestMessage(messages, context), MaxTokens: maxTokens, @@ -139,12 +95,12 @@ func RequestPerplexity(c *fiber.Ctx, model string, messages []Message, temperatu jsonBody, err := json.Marshal(requestBody) if err != nil { - return PerplexityChatCompletionResponse{}, fmt.Errorf("error marshaling JSON: %w", err) + return "JADE internal error: 06-01-0001. Please contact the support." } req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) if err != nil { - return PerplexityChatCompletionResponse{}, fmt.Errorf("error creating request: %w", err) + return "JADE internal error: 06-02-0002. Please contact the support." } req.Header.Set("Content-Type", "application/json") @@ -153,19 +109,25 @@ func RequestPerplexity(c *fiber.Ctx, model string, messages []Message, temperatu client := &http.Client{} resp, err := client.Do(req) if err != nil { - return PerplexityChatCompletionResponse{}, fmt.Errorf("error sending request: %w", err) + return "JADE internal error: 06-02-0003. Please contact the support." } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - return PerplexityChatCompletionResponse{}, fmt.Errorf("error reading response body: %w", err) + return "JADE internal error: 06-01-0004. Please contact the support." } - var chatCompletionResponse PerplexityChatCompletionResponse + for key, value := range OpenaiErrorCodes { + if strings.Contains(resp.Status, key) { + return value + } + } + + var chatCompletionResponse OpenaiChatCompletionResponse err = json.Unmarshal(body, &chatCompletionResponse) if err != nil { - return PerplexityChatCompletionResponse{}, fmt.Errorf("error unmarshaling JSON: %w", err) + return "JADE internal error: 06-01-0005. Please contact the support." } var usedModelInfo ModelInfo @@ -178,19 +140,16 @@ func RequestPerplexity(c *fiber.Ctx, model string, messages []Message, temperatu LIMIT 1 `, &usedModelInfo, model) if err != nil { - return PerplexityChatCompletionResponse{}, fmt.Errorf("error getting model info: %w", err) + return "JADE internal error: 06-00-0006. Please contact the support." } var inputCost float32 = float32(chatCompletionResponse.Usage.PromptTokens) * usedModelInfo.InputPrice var outputCost float32 = float32(chatCompletionResponse.Usage.CompletionTokens) * usedModelInfo.OutputPrice - - // If online model end with -online add a small cost - if strings.HasSuffix(model, "-online") { - inputCost += 0.005 - outputCost += 0.005 - } - addUsage(c, inputCost, outputCost, chatCompletionResponse.Usage.PromptTokens, chatCompletionResponse.Usage.CompletionTokens, model) - return chatCompletionResponse, nil + if len(chatCompletionResponse.Choices) == 0 { + return "JADE internal error: 06-03-0007. Please contact the support." + } + + return chatCompletionResponse.Choices[0].Message.Content } diff --git a/RequestTogetherai.go b/RequestTogetherai.go index 151b031..d534493 100644 --- a/RequestTogetherai.go +++ b/RequestTogetherai.go @@ -3,61 +3,28 @@ package main import ( "bytes" "encoding/json" - "fmt" "io" "net/http" + "strings" - "github.com/edgedb/edgedb-go" "github.com/gofiber/fiber/v2" ) -type TogetherChatCompletionRequest struct { - Model string `json:"model"` - Messages []RequestMessage `json:"messages"` - MaxTokens int `json:"max_tokens"` - Temperature float64 `json:"temperature"` -} - type TogetherChatCompletionResponse struct { ID string `json:"id"` Object string `json:"object"` Created int64 `json:"created"` Model string `json:"model"` - Usage TogetherUsage `json:"usage"` + Usage OpenaiUsage `json:"usage"` Choices []TogetherChoice `json:"choices"` } -type TogetherUsage struct { - PromptTokens int32 `json:"prompt_tokens"` - CompletionTokens int32 `json:"completion_tokens"` - TotalTokens int32 `json:"total_tokens"` -} - type TogetherChoice struct { Text string `json:"text"` FinishReason string `json:"finish_reason"` Index int `json:"index"` } -func addTogetherMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { - Messages := getAllSelectedMessages(c) - - chatCompletion, err := RequestTogether(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) - if err != nil { - fmt.Println("Error requesting Together: ", err) - id := insertBotMessage(c, "Error requesting Together, model may not be available anymore. Better error message in development.", selected, llm.ID) - return id - } else if len(chatCompletion.Choices) == 0 { - fmt.Println("No response from Together") - id := insertBotMessage(c, "No response from Together", selected, llm.ID) - return id - } else { - Content := chatCompletion.Choices[0].Text - id := insertBotMessage(c, Content, selected, llm.ID) - return id - } -} - func TestTogetherKey(apiKey string) bool { url := "https://api.together.xyz/v1/completions" @@ -69,7 +36,7 @@ func TestTogetherKey(apiKey string) bool { }, } - requestBody := TogetherChatCompletionRequest{ + requestBody := OpenaiChatCompletionRequest{ Model: "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", Messages: togetherMessages, Temperature: 0, @@ -101,7 +68,7 @@ func TestTogetherKey(apiKey string) bool { return false } - var chatCompletionResponse TogetherChatCompletionResponse + var chatCompletionResponse OpenaiChatCompletionResponse err = json.Unmarshal(body, &chatCompletionResponse) if err != nil { return false @@ -112,7 +79,12 @@ func TestTogetherKey(apiKey string) bool { return true } -func RequestTogether(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (TogetherChatCompletionResponse, error) { +func RequestTogether(c *fiber.Ctx, llm LLM, messages []Message) string { + model := llm.Model.ModelID + temperature := float64(llm.Temperature) + context := llm.Context + maxTokens := int(llm.MaxToken) + var apiKey string err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` with @@ -124,12 +96,12 @@ func RequestTogether(c *fiber.Ctx, model string, messages []Message, temperature select filtered_keys.key limit 1 `, &apiKey, "together") if err != nil { - return TogetherChatCompletionResponse{}, fmt.Errorf("error getting Together AI API key: %w", err) + return "JADE internal error: 07-00-0000. Please contact the support." } url := "https://api.together.xyz/v1/completions" - requestBody := TogetherChatCompletionRequest{ + requestBody := OpenaiChatCompletionRequest{ Model: model, Messages: Message2RequestMessage(messages, context), MaxTokens: maxTokens, @@ -138,12 +110,12 @@ func RequestTogether(c *fiber.Ctx, model string, messages []Message, temperature jsonBody, err := json.Marshal(requestBody) if err != nil { - return TogetherChatCompletionResponse{}, fmt.Errorf("error marshaling JSON: %w", err) + return "JADE internal error: 07-01-0001. Please contact the support." } req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) if err != nil { - return TogetherChatCompletionResponse{}, fmt.Errorf("error creating request: %w", err) + return "JADE internal error: 07-02-0002. Please contact the support." } req.Header.Set("Content-Type", "application/json") @@ -152,19 +124,25 @@ func RequestTogether(c *fiber.Ctx, model string, messages []Message, temperature client := &http.Client{} resp, err := client.Do(req) if err != nil { - return TogetherChatCompletionResponse{}, fmt.Errorf("error sending request: %w", err) + return "JADE internal error: 07-02-0003. Please contact the support." } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - return TogetherChatCompletionResponse{}, fmt.Errorf("error reading response body: %w", err) + return "JADE internal error: 07-01-0004. Please contact the support." + } + + for key, value := range OpenaiErrorCodes { + if strings.Contains(resp.Status, key) { + return value + } } var chatCompletionResponse TogetherChatCompletionResponse err = json.Unmarshal(body, &chatCompletionResponse) if err != nil { - return TogetherChatCompletionResponse{}, fmt.Errorf("error unmarshaling JSON: %w", err) + return "JADE internal error: 07-01-0005. Please contact the support." } var usedModelInfo ModelInfo @@ -177,12 +155,16 @@ func RequestTogether(c *fiber.Ctx, model string, messages []Message, temperature LIMIT 1 `, &usedModelInfo, model) if err != nil { - return TogetherChatCompletionResponse{}, fmt.Errorf("error getting model info: %w", err) + return "JADE internal error: 07-00-0006. Please contact the support." } var inputCost float32 = float32(chatCompletionResponse.Usage.PromptTokens) * usedModelInfo.InputPrice var outputCost float32 = float32(chatCompletionResponse.Usage.CompletionTokens) * usedModelInfo.OutputPrice addUsage(c, inputCost, outputCost, chatCompletionResponse.Usage.PromptTokens, chatCompletionResponse.Usage.CompletionTokens, model) - return chatCompletionResponse, nil + if len(chatCompletionResponse.Choices) == 0 { + return "JADE internal error: 07-03-0007. Please contact the support." + } + + return chatCompletionResponse.Choices[0].Text } diff --git a/TODO.md b/TODO.md index c69ce5f..76752a6 100644 --- a/TODO.md +++ b/TODO.md @@ -25,3 +25,4 @@ [ ] Change the terms of service and enter keys page to an HTML [ ] Split Chat.go into smaller files [ ] Create a Request package +[ ] Use the normal RequestProvider function instead of TestProvider to remove TestProvider diff --git a/main.go b/main.go index 566d749..5a73792 100644 --- a/main.go +++ b/main.go @@ -147,7 +147,6 @@ func addKeys(c *fiber.Ctx) error { "anthropic": TestAnthropicKey, "mistral": TestMistralKey, "groq": TestGroqKey, - "gooseai": TestGooseaiKey, "google": TestGoogleKey, "nim": TestNimKey, "perplexity": TestPerplexityKey,