fix request bug

This commit is contained in:
Adrien Bouvais 2024-05-17 12:18:17 +02:00
parent e9d47faa1d
commit 17880f1d0f
7 changed files with 43 additions and 29 deletions

View File

@ -13,11 +13,18 @@ import (
"github.com/gofiber/fiber/v2" "github.com/gofiber/fiber/v2"
) )
type RequestMessage struct {
Role string `json:"role"`
Content string `json:"content"`
}
var lastSelectedLLMs []LLM var lastSelectedLLMs []LLM
func GeneratePlaceholderHandler(c *fiber.Ctx) error { func GeneratePlaceholderHandler(c *fiber.Ctx) error {
// Step 1 I create a User message and send it as output with a placeholder
// that will make a request to GenerateMultipleMessagesHandler when loading
message := c.FormValue("message", "") message := c.FormValue("message", "")
selectedLLMIds := []string{"1e5a07c4-12fe-11ef-8da6-67d29b408c53"} // TODO Hanle in the UI selectedLLMIds := []string{"3cd15ca8-1433-11ef-9f22-93f2b78c78de"} // TODO Hanle in the UI
var selectedLLMs []LLM var selectedLLMs []LLM
var selectedLLM LLM var selectedLLM LLM
@ -32,6 +39,7 @@ func GeneratePlaceholderHandler(c *fiber.Ctx) error {
temperature, temperature,
modelInfo : { modelInfo : {
modelID, modelID,
maxToken,
company : { company : {
icon, icon,
name name
@ -63,6 +71,8 @@ func GeneratePlaceholderHandler(c *fiber.Ctx) error {
} }
func GenerateMultipleMessagesHandler(c *fiber.Ctx) error { func GenerateMultipleMessagesHandler(c *fiber.Ctx) error {
// Step 2 generate multiple messages
// And send them one by one using events
insertArea() insertArea()
selectedLLMs := lastSelectedLLMs selectedLLMs := lastSelectedLLMs

View File

@ -11,10 +11,10 @@ import (
) )
type AnthropicChatCompletionRequest struct { type AnthropicChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []Message `json:"messages"` Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"` MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
type AnthropicChatCompletionResponse struct { type AnthropicChatCompletionResponse struct {
@ -38,7 +38,7 @@ type AnthropicUsage struct {
func addAnthropicMessage(llm LLM, selected bool) edgedb.UUID { func addAnthropicMessage(llm LLM, selected bool) edgedb.UUID {
Messages := getAllSelectedMessages() Messages := getAllSelectedMessages()
chatCompletion, err := RequestAnthropic(llm.Model.ModelID, Messages, 2048, float64(llm.Temperature)) chatCompletion, err := RequestAnthropic(llm.Model.ModelID, Messages, int(llm.Model.MaxToken), float64(llm.Temperature))
if err != nil { if err != nil {
fmt.Println("Error:", err) fmt.Println("Error:", err)
} else if len(chatCompletion.Content) == 0 { } else if len(chatCompletion.Content) == 0 {
@ -56,7 +56,7 @@ func addAnthropicMessage(llm LLM, selected bool) edgedb.UUID {
func TestAnthropicKey(apiKey string) bool { func TestAnthropicKey(apiKey string) bool {
url := "https://api.anthropic.com/v1/messages" url := "https://api.anthropic.com/v1/messages"
AnthropicMessages := []Message{ AnthropicMessages := []RequestMessage{
{ {
Role: "user", Role: "user",
Content: "Hello", Content: "Hello",
@ -119,18 +119,20 @@ func RequestAnthropic(model string, messages []Message, maxTokens int, temperatu
select filtered_keys.key limit 1 select filtered_keys.key limit 1
`, &apiKey, "anthropic") `, &apiKey, "anthropic")
if err != nil { if err != nil {
return AnthropicChatCompletionResponse{}, fmt.Errorf("error getting OpenAI API key: %w", err) return AnthropicChatCompletionResponse{}, fmt.Errorf("error getting Anthropic API key: %w", err)
} }
url := "https://api.anthropic.com/v1/messages" url := "https://api.anthropic.com/v1/messages"
requestBody := AnthropicChatCompletionRequest{ requestBody := AnthropicChatCompletionRequest{
Model: model, Model: model,
Messages: ChangeRoleBot2Assistant(messages), Messages: Message2RequestMessage(messages),
MaxTokens: maxTokens, MaxTokens: maxTokens,
Temperature: temperature, Temperature: temperature,
} }
fmt.Println(maxTokens)
jsonBody, err := json.Marshal(requestBody) jsonBody, err := json.Marshal(requestBody)
if err != nil { if err != nil {
return AnthropicChatCompletionResponse{}, fmt.Errorf("error marshaling JSON: %w", err) return AnthropicChatCompletionResponse{}, fmt.Errorf("error marshaling JSON: %w", err)

View File

@ -11,9 +11,9 @@ import (
) )
type GroqChatCompletionRequest struct { type GroqChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []Message `json:"messages"` Messages []RequestMessage `json:"messages"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
type GroqChatCompletionResponse struct { type GroqChatCompletionResponse struct {
@ -68,7 +68,7 @@ func TestGroqKey(apiKey string) bool {
requestBody := GroqChatCompletionRequest{ requestBody := GroqChatCompletionRequest{
Model: "llama3-8b-8192", Model: "llama3-8b-8192",
Messages: groqMessages, Messages: Message2RequestMessage(groqMessages),
Temperature: 0, Temperature: 0,
} }
@ -127,7 +127,7 @@ func RequestGroq(model string, messages []Message, temperature float64) (GroqCha
requestBody := GroqChatCompletionRequest{ requestBody := GroqChatCompletionRequest{
Model: model, Model: model,
Messages: ChangeRoleBot2Assistant(messages), Messages: Message2RequestMessage(messages),
Temperature: temperature, Temperature: temperature,
} }

View File

@ -11,9 +11,9 @@ import (
) )
type MistralChatCompletionRequest struct { type MistralChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []Message `json:"messages"` Messages []RequestMessage `json:"messages"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
type MistralChatCompletionResponse struct { type MistralChatCompletionResponse struct {
ID string `json:"id"` ID string `json:"id"`
@ -58,7 +58,7 @@ func TestMistralKey(apiKey string) bool {
url := "https://api.mistral.ai/v1/chat/completions" url := "https://api.mistral.ai/v1/chat/completions"
// Convert messages to Mistral format // Convert messages to Mistral format
mistralMessages := []Message{ mistralMessages := []RequestMessage{
{ {
Role: "user", Role: "user",
Content: "Hello", Content: "Hello",
@ -133,7 +133,7 @@ func RequestMistral(model string, messages []Message, temperature float64) (Mist
requestBody := MistralChatCompletionRequest{ requestBody := MistralChatCompletionRequest{
Model: model, Model: model,
Messages: ChangeRoleBot2Assistant(messages), Messages: Message2RequestMessage(messages),
Temperature: temperature, Temperature: temperature,
} }

View File

@ -11,9 +11,9 @@ import (
) )
type OpenaiChatCompletionRequest struct { type OpenaiChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []Message `json:"messages"` Messages []RequestMessage `json:"messages"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
type OpenaiChatCompletionResponse struct { type OpenaiChatCompletionResponse struct {
@ -60,7 +60,7 @@ func TestOpenaiKey(apiKey string) bool {
url := "https://api.openai.com/v1/chat/completions" url := "https://api.openai.com/v1/chat/completions"
// Convert messages to OpenAI format // Convert messages to OpenAI format
openaiMessages := []Message{ openaiMessages := []RequestMessage{
{ {
Role: "user", Role: "user",
Content: "Hello", Content: "Hello",
@ -124,11 +124,13 @@ func RequestOpenai(model string, messages []Message, temperature float64) (Opena
return OpenaiChatCompletionResponse{}, fmt.Errorf("error getting OpenAI API key: %w", err) return OpenaiChatCompletionResponse{}, fmt.Errorf("error getting OpenAI API key: %w", err)
} }
fmt.Println("Messages:", messages)
url := "https://api.openai.com/v1/chat/completions" url := "https://api.openai.com/v1/chat/completions"
requestBody := OpenaiChatCompletionRequest{ requestBody := OpenaiChatCompletionRequest{
Model: model, Model: model,
Messages: ChangeRoleBot2Assistant(messages), Messages: Message2RequestMessage(messages),
Temperature: temperature, Temperature: temperature,
} }

View File

@ -76,7 +76,7 @@ type LLM struct {
type ModelInfo struct { type ModelInfo struct {
ID edgedb.UUID `edgedb:"id"` ID edgedb.UUID `edgedb:"id"`
Name string `edgedb:"name"` Name string `edgedb:"name"`
MaxToken int32 `edgedb:"max_token"` MaxToken int32 `edgedb:"maxToken"`
InputPrice float32 `edgedb:"inputPrice"` InputPrice float32 `edgedb:"inputPrice"`
OutputPrice float32 `edgedb:"outputPrice"` OutputPrice float32 `edgedb:"outputPrice"`
ModelID string `edgedb:"modelID"` ModelID string `edgedb:"modelID"`

View File

@ -96,8 +96,8 @@ func getExistingKeys() (bool, bool, bool, bool) {
return openaiExists, anthropicExists, mistralExists, groqExists return openaiExists, anthropicExists, mistralExists, groqExists
} }
func ChangeRoleBot2Assistant(messages []Message) []Message { func Message2RequestMessage(messages []Message) []RequestMessage {
openaiMessages := make([]Message, len(messages)) m := make([]RequestMessage, len(messages))
for i, msg := range messages { for i, msg := range messages {
var role string var role string
switch msg.Role { switch msg.Role {
@ -108,10 +108,10 @@ func ChangeRoleBot2Assistant(messages []Message) []Message {
default: default:
role = "system" role = "system"
} }
openaiMessages[i] = Message{ m[i] = RequestMessage{
Role: role, Role: role,
Content: msg.Content, Content: msg.Content,
} }
} }
return openaiMessages return m
} }