diff --git a/Request.go b/Request.go index 311eabf..779cbb5 100644 --- a/Request.go +++ b/Request.go @@ -13,11 +13,18 @@ import ( "github.com/gofiber/fiber/v2" ) +type RequestMessage struct { + Role string `json:"role"` + Content string `json:"content"` +} + var lastSelectedLLMs []LLM func GeneratePlaceholderHandler(c *fiber.Ctx) error { + // Step 1 I create a User message and send it as output with a placeholder + // that will make a request to GenerateMultipleMessagesHandler when loading message := c.FormValue("message", "") - selectedLLMIds := []string{"1e5a07c4-12fe-11ef-8da6-67d29b408c53"} // TODO Hanle in the UI + selectedLLMIds := []string{"3cd15ca8-1433-11ef-9f22-93f2b78c78de"} // TODO Hanle in the UI var selectedLLMs []LLM var selectedLLM LLM @@ -32,6 +39,7 @@ func GeneratePlaceholderHandler(c *fiber.Ctx) error { temperature, modelInfo : { modelID, + maxToken, company : { icon, name @@ -63,6 +71,8 @@ func GeneratePlaceholderHandler(c *fiber.Ctx) error { } func GenerateMultipleMessagesHandler(c *fiber.Ctx) error { + // Step 2 generate multiple messages + // And send them one by one using events insertArea() selectedLLMs := lastSelectedLLMs diff --git a/RequestAnthropic.go b/RequestAnthropic.go index 1d01fa3..b1e4adb 100644 --- a/RequestAnthropic.go +++ b/RequestAnthropic.go @@ -11,10 +11,10 @@ import ( ) type AnthropicChatCompletionRequest struct { - Model string `json:"model"` - Messages []Message `json:"messages"` - MaxTokens int `json:"max_tokens"` - Temperature float64 `json:"temperature"` + Model string `json:"model"` + Messages []RequestMessage `json:"messages"` + MaxTokens int `json:"max_tokens"` + Temperature float64 `json:"temperature"` } type AnthropicChatCompletionResponse struct { @@ -38,7 +38,7 @@ type AnthropicUsage struct { func addAnthropicMessage(llm LLM, selected bool) edgedb.UUID { Messages := getAllSelectedMessages() - chatCompletion, err := RequestAnthropic(llm.Model.ModelID, Messages, 2048, float64(llm.Temperature)) + chatCompletion, err := RequestAnthropic(llm.Model.ModelID, Messages, int(llm.Model.MaxToken), float64(llm.Temperature)) if err != nil { fmt.Println("Error:", err) } else if len(chatCompletion.Content) == 0 { @@ -56,7 +56,7 @@ func addAnthropicMessage(llm LLM, selected bool) edgedb.UUID { func TestAnthropicKey(apiKey string) bool { url := "https://api.anthropic.com/v1/messages" - AnthropicMessages := []Message{ + AnthropicMessages := []RequestMessage{ { Role: "user", Content: "Hello", @@ -119,18 +119,20 @@ func RequestAnthropic(model string, messages []Message, maxTokens int, temperatu select filtered_keys.key limit 1 `, &apiKey, "anthropic") if err != nil { - return AnthropicChatCompletionResponse{}, fmt.Errorf("error getting OpenAI API key: %w", err) + return AnthropicChatCompletionResponse{}, fmt.Errorf("error getting Anthropic API key: %w", err) } url := "https://api.anthropic.com/v1/messages" requestBody := AnthropicChatCompletionRequest{ Model: model, - Messages: ChangeRoleBot2Assistant(messages), + Messages: Message2RequestMessage(messages), MaxTokens: maxTokens, Temperature: temperature, } + fmt.Println(maxTokens) + jsonBody, err := json.Marshal(requestBody) if err != nil { return AnthropicChatCompletionResponse{}, fmt.Errorf("error marshaling JSON: %w", err) diff --git a/RequestGroq.go b/RequestGroq.go index 06956c5..e3b884f 100644 --- a/RequestGroq.go +++ b/RequestGroq.go @@ -11,9 +11,9 @@ import ( ) type GroqChatCompletionRequest struct { - Model string `json:"model"` - Messages []Message `json:"messages"` - Temperature float64 `json:"temperature"` + Model string `json:"model"` + Messages []RequestMessage `json:"messages"` + Temperature float64 `json:"temperature"` } type GroqChatCompletionResponse struct { @@ -68,7 +68,7 @@ func TestGroqKey(apiKey string) bool { requestBody := GroqChatCompletionRequest{ Model: "llama3-8b-8192", - Messages: groqMessages, + Messages: Message2RequestMessage(groqMessages), Temperature: 0, } @@ -127,7 +127,7 @@ func RequestGroq(model string, messages []Message, temperature float64) (GroqCha requestBody := GroqChatCompletionRequest{ Model: model, - Messages: ChangeRoleBot2Assistant(messages), + Messages: Message2RequestMessage(messages), Temperature: temperature, } diff --git a/RequestMistral.go b/RequestMistral.go index 2ce6d1e..979496c 100644 --- a/RequestMistral.go +++ b/RequestMistral.go @@ -11,9 +11,9 @@ import ( ) type MistralChatCompletionRequest struct { - Model string `json:"model"` - Messages []Message `json:"messages"` - Temperature float64 `json:"temperature"` + Model string `json:"model"` + Messages []RequestMessage `json:"messages"` + Temperature float64 `json:"temperature"` } type MistralChatCompletionResponse struct { ID string `json:"id"` @@ -58,7 +58,7 @@ func TestMistralKey(apiKey string) bool { url := "https://api.mistral.ai/v1/chat/completions" // Convert messages to Mistral format - mistralMessages := []Message{ + mistralMessages := []RequestMessage{ { Role: "user", Content: "Hello", @@ -133,7 +133,7 @@ func RequestMistral(model string, messages []Message, temperature float64) (Mist requestBody := MistralChatCompletionRequest{ Model: model, - Messages: ChangeRoleBot2Assistant(messages), + Messages: Message2RequestMessage(messages), Temperature: temperature, } diff --git a/RequestOpenai.go b/RequestOpenai.go index f10dd14..827aafd 100644 --- a/RequestOpenai.go +++ b/RequestOpenai.go @@ -11,9 +11,9 @@ import ( ) type OpenaiChatCompletionRequest struct { - Model string `json:"model"` - Messages []Message `json:"messages"` - Temperature float64 `json:"temperature"` + Model string `json:"model"` + Messages []RequestMessage `json:"messages"` + Temperature float64 `json:"temperature"` } type OpenaiChatCompletionResponse struct { @@ -60,7 +60,7 @@ func TestOpenaiKey(apiKey string) bool { url := "https://api.openai.com/v1/chat/completions" // Convert messages to OpenAI format - openaiMessages := []Message{ + openaiMessages := []RequestMessage{ { Role: "user", Content: "Hello", @@ -124,11 +124,13 @@ func RequestOpenai(model string, messages []Message, temperature float64) (Opena return OpenaiChatCompletionResponse{}, fmt.Errorf("error getting OpenAI API key: %w", err) } + fmt.Println("Messages:", messages) + url := "https://api.openai.com/v1/chat/completions" requestBody := OpenaiChatCompletionRequest{ Model: model, - Messages: ChangeRoleBot2Assistant(messages), + Messages: Message2RequestMessage(messages), Temperature: temperature, } diff --git a/database.go b/database.go index 928bc6c..f80d17a 100644 --- a/database.go +++ b/database.go @@ -76,7 +76,7 @@ type LLM struct { type ModelInfo struct { ID edgedb.UUID `edgedb:"id"` Name string `edgedb:"name"` - MaxToken int32 `edgedb:"max_token"` + MaxToken int32 `edgedb:"maxToken"` InputPrice float32 `edgedb:"inputPrice"` OutputPrice float32 `edgedb:"outputPrice"` ModelID string `edgedb:"modelID"` diff --git a/utils.go b/utils.go index bafc76c..c8d596c 100644 --- a/utils.go +++ b/utils.go @@ -96,8 +96,8 @@ func getExistingKeys() (bool, bool, bool, bool) { return openaiExists, anthropicExists, mistralExists, groqExists } -func ChangeRoleBot2Assistant(messages []Message) []Message { - openaiMessages := make([]Message, len(messages)) +func Message2RequestMessage(messages []Message) []RequestMessage { + m := make([]RequestMessage, len(messages)) for i, msg := range messages { var role string switch msg.Role { @@ -108,10 +108,10 @@ func ChangeRoleBot2Assistant(messages []Message) []Message { default: role = "system" } - openaiMessages[i] = Message{ + m[i] = RequestMessage{ Role: role, Content: msg.Content, } } - return openaiMessages + return m }