Used auto formating on go files

This commit is contained in:
Adrien Bouvais 2024-08-04 07:58:21 +02:00
parent 6053549444
commit 5384383908
13 changed files with 75 additions and 86 deletions

View File

@ -77,7 +77,7 @@ type LLM struct {
ID edgedb.UUID `edgedb:"id"` ID edgedb.UUID `edgedb:"id"`
Name string `edgedb:"name"` Name string `edgedb:"name"`
Context string `edgedb:"context"` Context string `edgedb:"context"`
MaxToken int32 `edgedb:"max_tokens"` MaxToken int32 `edgedb:"max_tokens"`
Temperature float32 `edgedb:"temperature"` Temperature float32 `edgedb:"temperature"`
Model ModelInfo `edgedb:"modelInfo"` Model ModelInfo `edgedb:"modelInfo"`
Endpoint CustomEndpoint `edgedb:"custom_endpoint"` Endpoint CustomEndpoint `edgedb:"custom_endpoint"`

14
LLM.go
View File

@ -50,7 +50,7 @@ func deleteLLMtoDelete(c *fiber.Ctx) {
} }
} }
func createLLM(c *fiber.Ctx) error { func createLLM(c *fiber.Ctx) error {
name := c.FormValue("model-name-input") name := c.FormValue("model-name-input")
modelID := c.FormValue("selectedLLMId") modelID := c.FormValue("selectedLLMId")
temperature := c.FormValue("temperature-slider") temperature := c.FormValue("temperature-slider")
@ -61,16 +61,16 @@ func deleteLLMtoDelete(c *fiber.Ctx) {
url := c.FormValue("model-url-input") url := c.FormValue("model-url-input")
token := c.FormValue("model-key-input") token := c.FormValue("model-key-input")
customID := c.FormValue("model-cid-input") customID := c.FormValue("model-cid-input")
maxTokenStr := c.FormValue("max-token-input") maxTokenStr := c.FormValue("max-token-input")
maxToken, err := strconv.Atoi(maxTokenStr) maxToken, err := strconv.Atoi(maxTokenStr)
if err != nil { if err != nil {
maxToken = 1024 maxToken = 1024
} }
fmt.Println("Adding LLM with maxtoken:", maxToken) fmt.Println("Adding LLM with maxtoken:", maxToken)
// TODO change the company // TODO change the company
if modelID == "custom" { if modelID == "custom" {
err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).Execute(edgeCtx, ` err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).Execute(edgeCtx, `
WITH WITH

View File

@ -1,13 +1,3 @@
// I guess it should be some kind of package with different part for different Company
// I will maybe do it in the future, rn, I don't have time to learn that and I like it like that
// It do need more time and try and error to do all of them but they are fully independant
// And this is simple, I don't need to trick my mind to undersant that some part are share, ect
// If I want to see how I go from the message to the response, I can. That what I want
// If you are wondering how it work:
// User send message -> Generate HTML of one user message and one bot placeholder ----
// -> Send HTML and append it to the chat container -> The placeholder do a load HTMX request to GenerateMultipleMessagesHandler ----
// -> Make multiple request in parallel to all APIs -> Send one SSE event per message receive.
package main package main
import ( import (
@ -165,8 +155,8 @@ func GenerateMultipleMessagesHandler(c *fiber.Ctx) error {
addMessageFunc = addPerplexityMessage addMessageFunc = addPerplexityMessage
case "fireworks": case "fireworks":
addMessageFunc = addFireworkMessage addMessageFunc = addFireworkMessage
case "nim": case "nim":
addMessageFunc = addNimMessage addMessageFunc = addNimMessage
} }
var messageID edgedb.UUID var messageID edgedb.UUID

View File

@ -14,7 +14,7 @@ import (
type AnthropicChatCompletionRequest struct { type AnthropicChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"` MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
Context string `json:"system"` Context string `json:"system"`
} }
@ -42,8 +42,8 @@ func addAnthropicMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestAnthropic(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) chatCompletion, err := RequestAnthropic(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting Anthropic: ", err) fmt.Println("Error requesting Anthropic: ", err)
id := insertBotMessage(c, "Error requesting Anthropic, model may not be available anymore. Better error message in development.", selected, llm.ID) id := insertBotMessage(c, "Error requesting Anthropic, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id return id
} else if len(chatCompletion.Content) == 0 { } else if len(chatCompletion.Content) == 0 {
fmt.Println("No response from Anthropic") fmt.Println("No response from Anthropic")
@ -112,12 +112,12 @@ func TestAnthropicKey(apiKey string) bool {
func RequestAnthropic(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (AnthropicChatCompletionResponse, error) { func RequestAnthropic(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (AnthropicChatCompletionResponse, error) {
if maxTokens == 0 { if maxTokens == 0 {
maxTokens = 4096 maxTokens = 4096
} }
fmt.Println("Requesting anthropic using max token:", maxTokens) fmt.Println("Requesting anthropic using max token:", maxTokens)
var apiKey struct { var apiKey struct {
Key string `edgedb:"key"` Key string `edgedb:"key"`
} }
err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, ` err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, `
@ -136,7 +136,7 @@ func RequestAnthropic(c *fiber.Ctx, model string, messages []Message, temperatur
requestBody := AnthropicChatCompletionRequest{ requestBody := AnthropicChatCompletionRequest{
Model: model, Model: model,
Messages: Message2RequestMessage(messages, ""), Messages: Message2RequestMessage(messages, ""),
MaxTokens: maxTokens, MaxTokens: maxTokens,
Temperature: temperature, Temperature: temperature,
Context: context, Context: context,
} }

View File

@ -14,7 +14,7 @@ import (
type FireworkChatCompletionRequest struct { type FireworkChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"` MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
@ -44,8 +44,8 @@ func addFireworkMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestFirework(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) chatCompletion, err := RequestFirework(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting Firework: ", err) fmt.Println("Error requesting Firework: ", err)
id := insertBotMessage(c, "Error requesting Firework, model may not be available anymore. Better error message in development.", selected, llm.ID) id := insertBotMessage(c, "Error requesting Firework, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id return id
} else if len(chatCompletion.Choices) == 0 { } else if len(chatCompletion.Choices) == 0 {
fmt.Println("No response from Firework") fmt.Println("No response from Firework")
@ -131,7 +131,7 @@ func RequestFirework(c *fiber.Ctx, model string, messages []Message, temperature
requestBody := FireworkChatCompletionRequest{ requestBody := FireworkChatCompletionRequest{
Model: "accounts/fireworks/models/" + model, Model: "accounts/fireworks/models/" + model,
Messages: Message2RequestMessage(messages, context), Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens, MaxTokens: maxTokens,
Temperature: temperature, Temperature: temperature,
} }

View File

@ -57,8 +57,8 @@ func addGoogleMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestGoogle(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context) chatCompletion, err := RequestGoogle(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context)
if err != nil { if err != nil {
fmt.Println("Error requesting Google: ", err) fmt.Println("Error requesting Google: ", err)
id := insertBotMessage(c, "Error requesting Google.", selected, llm.ID) id := insertBotMessage(c, "Error requesting Google.", selected, llm.ID)
return id return id
} else if len(chatCompletion.Candidates) == 0 { } else if len(chatCompletion.Candidates) == 0 {
fmt.Println("No response from Google") fmt.Println("No response from Google")

View File

@ -14,7 +14,7 @@ import (
type GroqChatCompletionRequest struct { type GroqChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"` MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
@ -44,8 +44,8 @@ func addGroqMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestGroq(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) chatCompletion, err := RequestGroq(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting Groq: ", err) fmt.Println("Error requesting Groq: ", err)
id := insertBotMessage(c, "Error requesting Groq, model may not be available anymore. Better error message in development.", selected, llm.ID) id := insertBotMessage(c, "Error requesting Groq, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id return id
} else if len(chatCompletion.Choices) == 0 { } else if len(chatCompletion.Choices) == 0 {
fmt.Println("No response from Groq") fmt.Println("No response from Groq")
@ -131,7 +131,7 @@ func RequestGroq(c *fiber.Ctx, model string, messages []Message, temperature flo
requestBody := GroqChatCompletionRequest{ requestBody := GroqChatCompletionRequest{
Model: model, Model: model,
Messages: Message2RequestMessage(messages, context), Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens, MaxTokens: maxTokens,
Temperature: temperature, Temperature: temperature,
} }

View File

@ -15,7 +15,7 @@ type HuggingfaceChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
MaxTokens int `json:"max_tokens"` MaxTokens int `json:"max_tokens"`
Stream bool `json:"stream"` Stream bool `json:"stream"`
} }
@ -44,8 +44,8 @@ func addHuggingfaceMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestHuggingface(c, llm, Messages, float64(llm.Temperature), int(llm.MaxToken)) chatCompletion, err := RequestHuggingface(c, llm, Messages, float64(llm.Temperature), int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting Huggingface: ", err) fmt.Println("Error requesting Huggingface: ", err)
id := insertBotMessage(c, "Error requesting Huggingface.", selected, llm.ID) id := insertBotMessage(c, "Error requesting Huggingface.", selected, llm.ID)
return id return id
} else if len(chatCompletion.Choices) == 0 { } else if len(chatCompletion.Choices) == 0 {
fmt.Println("No response from Endpoint") fmt.Println("No response from Endpoint")
@ -65,7 +65,7 @@ func RequestHuggingface(c *fiber.Ctx, llm LLM, messages []Message, temperature f
Model: "tgi", Model: "tgi",
Messages: Message2RequestMessage(messages, llm.Context), Messages: Message2RequestMessage(messages, llm.Context),
Temperature: temperature, Temperature: temperature,
MaxTokens: maxTokens, MaxTokens: maxTokens,
Stream: false, Stream: false,
} }

View File

@ -14,7 +14,7 @@ import (
type MistralChatCompletionRequest struct { type MistralChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"` MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
type MistralChatCompletionResponse struct { type MistralChatCompletionResponse struct {
@ -43,8 +43,8 @@ func addMistralMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestMistral(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) chatCompletion, err := RequestMistral(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting Mistral: ", err) fmt.Println("Error requesting Mistral: ", err)
id := insertBotMessage(c, "Error requesting Mistral, model may not be available anymore. Better error message in development.", selected, llm.ID) id := insertBotMessage(c, "Error requesting Mistral, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id return id
} else if len(chatCompletion.Choices) == 0 { } else if len(chatCompletion.Choices) == 0 {
id := insertBotMessage(c, "No response from Mistral", selected, llm.ID) id := insertBotMessage(c, "No response from Mistral", selected, llm.ID)
@ -136,7 +136,7 @@ func RequestMistral(c *fiber.Ctx, model string, messages []Message, temperature
requestBody := MistralChatCompletionRequest{ requestBody := MistralChatCompletionRequest{
Model: model, Model: model,
Messages: Message2RequestMessage(messages, context), Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens, MaxTokens: maxTokens,
Temperature: temperature, Temperature: temperature,
} }

View File

@ -1,31 +1,30 @@
package main package main
import ( import (
"bytes"
"encoding/json"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"encoding/json"
"bytes"
"github.com/edgedb/edgedb-go" "github.com/edgedb/edgedb-go"
"github.com/gofiber/fiber/v2" "github.com/gofiber/fiber/v2"
) )
type NimChatCompletionRequest struct { type NimChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"` MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
type NimChatCompletionResponse struct { type NimChatCompletionResponse struct {
ID string `json:"id"` ID string `json:"id"`
Object string `json:"object"` Object string `json:"object"`
Created int64 `json:"created"` Created int64 `json:"created"`
Model string `json:"model"` Model string `json:"model"`
Usage NimUsage `json:"usage"` Usage NimUsage `json:"usage"`
Choices []NimChoice `json:"choices"` Choices []NimChoice `json:"choices"`
} }
type NimUsage struct { type NimUsage struct {
@ -45,8 +44,8 @@ func addNimMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestNim(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) chatCompletion, err := RequestNim(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting NIM: ", err) fmt.Println("Error requesting NIM: ", err)
id := insertBotMessage(c, "Error requesting NIM, model may not be available anymore. Better error message in development.", selected, llm.ID) id := insertBotMessage(c, "Error requesting NIM, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id return id
} else if len(chatCompletion.Choices) == 0 { } else if len(chatCompletion.Choices) == 0 {
fmt.Println("No response from NIM") fmt.Println("No response from NIM")
@ -60,10 +59,10 @@ func addNimMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
} }
func TestNimKey(apiKey string) bool { func TestNimKey(apiKey string) bool {
url := "https://integrate.api.nvidia.com/v1/chat/completions" url := "https://integrate.api.nvidia.com/v1/chat/completions"
//apiKey := "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" //apiKey := "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"
fmt.Println("Testing new Nvidia NIM key:", apiKey) fmt.Println("Testing new Nvidia NIM key:", apiKey)
// Convert messages to OpenAI format // Convert messages to OpenAI format
nimMessages := []RequestMessage{ nimMessages := []RequestMessage{
@ -81,13 +80,13 @@ func TestNimKey(apiKey string) bool {
jsonBody, err := json.Marshal(requestBody) jsonBody, err := json.Marshal(requestBody)
if err != nil { if err != nil {
fmt.Println("Error when testing NIM key. Cant parse JSON request.") fmt.Println("Error when testing NIM key. Cant parse JSON request.")
return false return false
} }
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))
if err != nil { if err != nil {
fmt.Println("Error when testing NIM key. Cant generate new request") fmt.Println("Error when testing NIM key. Cant generate new request")
return false return false
} }
@ -97,36 +96,36 @@ func TestNimKey(apiKey string) bool {
client := &http.Client{} client := &http.Client{}
resp, err := client.Do(req) resp, err := client.Do(req)
if err != nil { if err != nil {
fmt.Println("Error when testing NIM key. Cant send request.") fmt.Println("Error when testing NIM key. Cant send request.")
return false return false
} }
defer resp.Body.Close() defer resp.Body.Close()
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
fmt.Println("Error when testing NIM key. Cant read response.") fmt.Println("Error when testing NIM key. Cant read response.")
return false return false
} }
var chatCompletionResponse NimChatCompletionResponse var chatCompletionResponse NimChatCompletionResponse
err = json.Unmarshal(body, &chatCompletionResponse) err = json.Unmarshal(body, &chatCompletionResponse)
if err != nil { if err != nil {
fmt.Println(resp.Status) fmt.Println(resp.Status)
fmt.Println(resp.Body) fmt.Println(resp.Body)
fmt.Println("Error when testing NIM key. Cant unmarshal response.") fmt.Println("Error when testing NIM key. Cant unmarshal response.")
return false return false
} }
if chatCompletionResponse.Usage.CompletionTokens == 0 { if chatCompletionResponse.Usage.CompletionTokens == 0 {
fmt.Println(resp.Status) fmt.Println(resp.Status)
fmt.Println(resp.Body) fmt.Println(resp.Body)
fmt.Println("Error when testing NIM key. No completion token.") fmt.Println("Error when testing NIM key. No completion token.")
return false return false
} }
Content := chatCompletionResponse.Choices[0].Message.Content Content := chatCompletionResponse.Choices[0].Message.Content
fmt.Println(Content) fmt.Println(Content)
return true return true
} }
@ -151,7 +150,7 @@ func RequestNim(c *fiber.Ctx, model string, messages []Message, temperature floa
requestBody := NimChatCompletionRequest{ requestBody := NimChatCompletionRequest{
Model: model, Model: model,
Messages: Message2RequestMessage(messages, context), Messages: Message2RequestMessage(messages, context),
MaxTokens: maxToken, MaxTokens: maxToken,
Temperature: temperature, Temperature: temperature,
} }

View File

@ -14,7 +14,7 @@ import (
type OpenaiChatCompletionRequest struct { type OpenaiChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"` MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
@ -44,8 +44,8 @@ func addOpenaiMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestOpenai(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) chatCompletion, err := RequestOpenai(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting OpenAI: ", err) fmt.Println("Error requesting OpenAI: ", err)
id := insertBotMessage(c, "Error requesting OpenAI, model may not be available anymore. Better error message in development.", selected, llm.ID) id := insertBotMessage(c, "Error requesting OpenAI, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id return id
} else if len(chatCompletion.Choices) == 0 { } else if len(chatCompletion.Choices) == 0 {
fmt.Println("No response from OpenAI") fmt.Println("No response from OpenAI")
@ -131,7 +131,7 @@ func RequestOpenai(c *fiber.Ctx, model string, messages []Message, temperature f
requestBody := OpenaiChatCompletionRequest{ requestBody := OpenaiChatCompletionRequest{
Model: model, Model: model,
Messages: Message2RequestMessage(messages, context), Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens, MaxTokens: maxTokens,
Temperature: temperature, Temperature: temperature,
} }

View File

@ -15,7 +15,7 @@ import (
type PerplexityChatCompletionRequest struct { type PerplexityChatCompletionRequest struct {
Model string `json:"model"` Model string `json:"model"`
Messages []RequestMessage `json:"messages"` Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"` MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"` Temperature float64 `json:"temperature"`
} }
@ -45,8 +45,8 @@ func addPerplexityMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestPerplexity(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken)) chatCompletion, err := RequestPerplexity(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil { if err != nil {
fmt.Println("Error requesting Perplexity: ", err) fmt.Println("Error requesting Perplexity: ", err)
id := insertBotMessage(c, "Error requesting Perplexity, model may not be available anymore. Better error message in development.", selected, llm.ID) id := insertBotMessage(c, "Error requesting Perplexity, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id return id
} else if len(chatCompletion.Choices) == 0 { } else if len(chatCompletion.Choices) == 0 {
fmt.Println("No response from Perplexity") fmt.Println("No response from Perplexity")
@ -132,7 +132,7 @@ func RequestPerplexity(c *fiber.Ctx, model string, messages []Message, temperatu
requestBody := PerplexityChatCompletionRequest{ requestBody := PerplexityChatCompletionRequest{
Model: model, Model: model,
Messages: Message2RequestMessage(messages, context), Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens, MaxTokens: maxTokens,
Temperature: temperature, Temperature: temperature,
} }

View File

@ -191,7 +191,7 @@ func addKeys(c *fiber.Ctx) error {
"groq": c.FormValue("groq_key"), "groq": c.FormValue("groq_key"),
"gooseai": c.FormValue("goose_key"), "gooseai": c.FormValue("goose_key"),
"google": c.FormValue("google_key"), "google": c.FormValue("google_key"),
"nim": c.FormValue("nim_key"), "nim": c.FormValue("nim_key"),
"perplexity": c.FormValue("perplexity_key"), "perplexity": c.FormValue("perplexity_key"),
"fireworks": c.FormValue("fireworks_key"), "fireworks": c.FormValue("fireworks_key"),
} }
@ -203,7 +203,7 @@ func addKeys(c *fiber.Ctx) error {
"groq": TestGroqKey, "groq": TestGroqKey,
"gooseai": TestGooseaiKey, "gooseai": TestGooseaiKey,
"google": TestGoogleKey, "google": TestGoogleKey,
"nim": TestNimKey, "nim": TestNimKey,
"perplexity": TestPerplexityKey, "perplexity": TestPerplexityKey,
"fireworks": TestFireworkKey, "fireworks": TestFireworkKey,
} }