Added max tokens working for last providers

This commit is contained in:
Adrien Bouvais 2024-08-03 16:29:05 +02:00
parent 183bec7203
commit 306dc1c0d8
6 changed files with 24 additions and 12 deletions

View File

@ -14,6 +14,7 @@ import (
type FireworkChatCompletionRequest struct { type FireworkChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
@ -41,7 +42,7 @@ type FireworkChoice struct {
func addFireworkMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { func addFireworkMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
Messages := getAllSelectedMessages(c) Messages := getAllSelectedMessages(c)
chatCompletion, err := RequestFirework(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context) chatCompletion, err := RequestFirework(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting Firework: ", err) fmt.Println("Error requesting Firework: ", err)
id := insertBotMessage(c, "Error requesting Firework, model may not be available anymore. Better error message in development.", selected, llm.ID) id := insertBotMessage(c, "Error requesting Firework, model may not be available anymore. Better error message in development.", selected, llm.ID)
@ -110,7 +111,7 @@ func TestFireworkKey(apiKey string) bool {
return true return true
} }
func RequestFirework(c *fiber.Ctx, model string, messages []Message, temperature float64, context string) (FireworkChatCompletionResponse, error) { func RequestFirework(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (FireworkChatCompletionResponse, error) {
var apiKey string var apiKey string
err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, `
with with
@ -130,6 +131,7 @@ func RequestFirework(c *fiber.Ctx, model string, messages []Message, temperature
requestBody := FireworkChatCompletionRequest{ requestBody := FireworkChatCompletionRequest{
Model: "accounts/fireworks/models/" + model, Model: "accounts/fireworks/models/" + model,
Messages: Message2RequestMessage(messages, context), Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens,
Temperature: temperature, Temperature: temperature,
} }

View File

@ -14,6 +14,7 @@ import (
type GroqChatCompletionRequest struct { type GroqChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
@ -41,7 +42,7 @@ type GroqChoice struct {
func addGroqMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { func addGroqMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
Messages := getAllSelectedMessages(c) Messages := getAllSelectedMessages(c)
chatCompletion, err := RequestGroq(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context) chatCompletion, err := RequestGroq(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting Groq: ", err) fmt.Println("Error requesting Groq: ", err)
id := insertBotMessage(c, "Error requesting Groq, model may not be available anymore. Better error message in development.", selected, llm.ID) id := insertBotMessage(c, "Error requesting Groq, model may not be available anymore. Better error message in development.", selected, llm.ID)
@ -110,7 +111,7 @@ func TestGroqKey(apiKey string) bool {
return true return true
} }
func RequestGroq(c *fiber.Ctx, model string, messages []Message, temperature float64, context string) (GroqChatCompletionResponse, error) { func RequestGroq(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (GroqChatCompletionResponse, error) {
var apiKey string var apiKey string
err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, `
with with
@ -130,6 +131,7 @@ func RequestGroq(c *fiber.Ctx, model string, messages []Message, temperature flo
requestBody := GroqChatCompletionRequest{ requestBody := GroqChatCompletionRequest{
Model: model, Model: model,
Messages: Message2RequestMessage(messages, context), Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens,
Temperature: temperature, Temperature: temperature,
} }

View File

@ -15,6 +15,7 @@ type HuggingfaceChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
MaxTokens int `json:"max_tokens"`
Stream bool `json:"stream"` Stream bool `json:"stream"`
} }
@ -41,7 +42,7 @@ type HuggingfaceChoice struct {
func addHuggingfaceMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { func addHuggingfaceMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
Messages := getAllSelectedMessages(c) Messages := getAllSelectedMessages(c)
chatCompletion, err := RequestHuggingface(c, llm, Messages, float64(llm.Temperature)) chatCompletion, err := RequestHuggingface(c, llm, Messages, float64(llm.Temperature), int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting Huggingface: ", err) fmt.Println("Error requesting Huggingface: ", err)
id := insertBotMessage(c, "Error requesting Huggingface.", selected, llm.ID) id := insertBotMessage(c, "Error requesting Huggingface.", selected, llm.ID)
@ -57,13 +58,14 @@ func addHuggingfaceMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
} }
} }
func RequestHuggingface(c *fiber.Ctx, llm LLM, messages []Message, temperature float64) (HuggingfaceChatCompletionResponse, error) { func RequestHuggingface(c *fiber.Ctx, llm LLM, messages []Message, temperature float64, maxTokens int) (HuggingfaceChatCompletionResponse, error) {
url := llm.Endpoint.Endpoint url := llm.Endpoint.Endpoint
requestBody := HuggingfaceChatCompletionRequest{ requestBody := HuggingfaceChatCompletionRequest{
Model: "tgi", Model: "tgi",
Messages: Message2RequestMessage(messages, llm.Context), Messages: Message2RequestMessage(messages, llm.Context),
Temperature: temperature, Temperature: temperature,
MaxTokens: maxTokens,
Stream: false, Stream: false,
} }

View File

@ -14,6 +14,7 @@ import (
type MistralChatCompletionRequest struct { type MistralChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
type MistralChatCompletionResponse struct { type MistralChatCompletionResponse struct {
@ -40,7 +41,7 @@ type MistralChoice struct {
func addMistralMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { func addMistralMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
Messages := getAllSelectedMessages(c) Messages := getAllSelectedMessages(c)
chatCompletion, err := RequestMistral(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context) chatCompletion, err := RequestMistral(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting Mistral: ", err) fmt.Println("Error requesting Mistral: ", err)
id := insertBotMessage(c, "Error requesting Mistral, model may not be available anymore. Better error message in development.", selected, llm.ID) id := insertBotMessage(c, "Error requesting Mistral, model may not be available anymore. Better error message in development.", selected, llm.ID)
@ -115,7 +116,7 @@ func TestMistralKey(apiKey string) bool {
return true return true
} }
func RequestMistral(c *fiber.Ctx, model string, messages []Message, temperature float64, context string) (MistralChatCompletionResponse, error) { func RequestMistral(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (MistralChatCompletionResponse, error) {
var apiKey string var apiKey string
err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, `
with with
@ -135,6 +136,7 @@ func RequestMistral(c *fiber.Ctx, model string, messages []Message, temperature
requestBody := MistralChatCompletionRequest{ requestBody := MistralChatCompletionRequest{
Model: model, Model: model,
Messages: Message2RequestMessage(messages, context), Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens,
Temperature: temperature, Temperature: temperature,
} }

View File

@ -14,6 +14,7 @@ import (
type OpenaiChatCompletionRequest struct { type OpenaiChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
@ -41,7 +42,7 @@ type OpenaiChoice struct {
func addOpenaiMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { func addOpenaiMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
Messages := getAllSelectedMessages(c) Messages := getAllSelectedMessages(c)
chatCompletion, err := RequestOpenai(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context) chatCompletion, err := RequestOpenai(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting OpenAI: ", err) fmt.Println("Error requesting OpenAI: ", err)
id := insertBotMessage(c, "Error requesting OpenAI, model may not be available anymore. Better error message in development.", selected, llm.ID) id := insertBotMessage(c, "Error requesting OpenAI, model may not be available anymore. Better error message in development.", selected, llm.ID)
@ -110,7 +111,7 @@ func TestOpenaiKey(apiKey string) bool {
return true return true
} }
func RequestOpenai(c *fiber.Ctx, model string, messages []Message, temperature float64, context string) (OpenaiChatCompletionResponse, error) { func RequestOpenai(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (OpenaiChatCompletionResponse, error) {
var apiKey string var apiKey string
err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, `
with with
@ -130,6 +131,7 @@ func RequestOpenai(c *fiber.Ctx, model string, messages []Message, temperature f
requestBody := OpenaiChatCompletionRequest{ requestBody := OpenaiChatCompletionRequest{
Model: model, Model: model,
Messages: Message2RequestMessage(messages, context), Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens,
Temperature: temperature, Temperature: temperature,
} }

View File

@ -15,6 +15,7 @@ import (
type PerplexityChatCompletionRequest struct { type PerplexityChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
@ -42,7 +43,7 @@ type PerplexityChoice struct {
func addPerplexityMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID { func addPerplexityMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
Messages := getAllSelectedMessages(c) Messages := getAllSelectedMessages(c)
chatCompletion, err := RequestPerplexity(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context) chatCompletion, err := RequestPerplexity(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting Perplexity: ", err) fmt.Println("Error requesting Perplexity: ", err)
id := insertBotMessage(c, "Error requesting Perplexity, model may not be available anymore. Better error message in development.", selected, llm.ID) id := insertBotMessage(c, "Error requesting Perplexity, model may not be available anymore. Better error message in development.", selected, llm.ID)
@ -111,7 +112,7 @@ func TestPerplexityKey(apiKey string) bool {
return true return true
} }
func RequestPerplexity(c *fiber.Ctx, model string, messages []Message, temperature float64, context string) (PerplexityChatCompletionResponse, error) { func RequestPerplexity(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (PerplexityChatCompletionResponse, error) {
var apiKey string var apiKey string
err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, `
with with
@ -131,6 +132,7 @@ func RequestPerplexity(c *fiber.Ctx, model string, messages []Message, temperatu
requestBody := PerplexityChatCompletionRequest{ requestBody := PerplexityChatCompletionRequest{
Model: model, Model: model,
Messages: Message2RequestMessage(messages, context), Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens,
Temperature: temperature, Temperature: temperature,
} }