Used auto formating on go files

This commit is contained in:
Adrien Bouvais 2024-08-04 07:58:21 +02:00
parent 6053549444
commit 5384383908
13 changed files with 75 additions and 86 deletions

View File

@ -77,7 +77,7 @@ type LLM struct {
ID edgedb.UUID `edgedb:"id"`
Name string `edgedb:"name"`
Context string `edgedb:"context"`
MaxToken int32 `edgedb:"max_tokens"`
MaxToken int32 `edgedb:"max_tokens"`
Temperature float32 `edgedb:"temperature"`
Model ModelInfo `edgedb:"modelInfo"`
Endpoint CustomEndpoint `edgedb:"custom_endpoint"`

14
LLM.go
View File

@ -50,7 +50,7 @@ func deleteLLMtoDelete(c *fiber.Ctx) {
}
}
func createLLM(c *fiber.Ctx) error {
func createLLM(c *fiber.Ctx) error {
name := c.FormValue("model-name-input")
modelID := c.FormValue("selectedLLMId")
temperature := c.FormValue("temperature-slider")
@ -61,16 +61,16 @@ func deleteLLMtoDelete(c *fiber.Ctx) {
url := c.FormValue("model-url-input")
token := c.FormValue("model-key-input")
customID := c.FormValue("model-cid-input")
maxTokenStr := c.FormValue("max-token-input")
maxTokenStr := c.FormValue("max-token-input")
maxToken, err := strconv.Atoi(maxTokenStr)
maxToken, err := strconv.Atoi(maxTokenStr)
if err != nil {
maxToken = 1024
}
maxToken = 1024
}
fmt.Println("Adding LLM with maxtoken:", maxToken)
fmt.Println("Adding LLM with maxtoken:", maxToken)
// TODO change the company
// TODO change the company
if modelID == "custom" {
err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).Execute(edgeCtx, `
WITH

View File

@ -1,13 +1,3 @@
// I guess it should be some kind of package with different part for different Company
// I will maybe do it in the future, rn, I don't have time to learn that and I like it like that
// It do need more time and try and error to do all of them but they are fully independant
// And this is simple, I don't need to trick my mind to undersant that some part are share, ect
// If I want to see how I go from the message to the response, I can. That what I want
// If you are wondering how it work:
// User send message -> Generate HTML of one user message and one bot placeholder ----
// -> Send HTML and append it to the chat container -> The placeholder do a load HTMX request to GenerateMultipleMessagesHandler ----
// -> Make multiple request in parallel to all APIs -> Send one SSE event per message receive.
package main
import (
@ -165,8 +155,8 @@ func GenerateMultipleMessagesHandler(c *fiber.Ctx) error {
addMessageFunc = addPerplexityMessage
case "fireworks":
addMessageFunc = addFireworkMessage
case "nim":
addMessageFunc = addNimMessage
case "nim":
addMessageFunc = addNimMessage
}
var messageID edgedb.UUID

View File

@ -14,7 +14,7 @@ import (
type AnthropicChatCompletionRequest struct {
Model string `json:"model"`
Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"`
Context string `json:"system"`
}
@ -42,8 +42,8 @@ func addAnthropicMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestAnthropic(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil {
fmt.Println("Error requesting Anthropic: ", err)
id := insertBotMessage(c, "Error requesting Anthropic, model may not be available anymore. Better error message in development.", selected, llm.ID)
fmt.Println("Error requesting Anthropic: ", err)
id := insertBotMessage(c, "Error requesting Anthropic, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id
} else if len(chatCompletion.Content) == 0 {
fmt.Println("No response from Anthropic")
@ -112,12 +112,12 @@ func TestAnthropicKey(apiKey string) bool {
func RequestAnthropic(c *fiber.Ctx, model string, messages []Message, temperature float64, context string, maxTokens int) (AnthropicChatCompletionResponse, error) {
if maxTokens == 0 {
maxTokens = 4096
}
maxTokens = 4096
}
fmt.Println("Requesting anthropic using max token:", maxTokens)
fmt.Println("Requesting anthropic using max token:", maxTokens)
var apiKey struct {
var apiKey struct {
Key string `edgedb:"key"`
}
err := edgeGlobalClient.WithGlobals(map[string]interface{}{"ext::auth::client_token": c.Cookies("jade-edgedb-auth-token")}).QuerySingle(edgeCtx, `
@ -136,7 +136,7 @@ func RequestAnthropic(c *fiber.Ctx, model string, messages []Message, temperatur
requestBody := AnthropicChatCompletionRequest{
Model: model,
Messages: Message2RequestMessage(messages, ""),
MaxTokens: maxTokens,
MaxTokens: maxTokens,
Temperature: temperature,
Context: context,
}

View File

@ -14,7 +14,7 @@ import (
type FireworkChatCompletionRequest struct {
Model string `json:"model"`
Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"`
}
@ -44,8 +44,8 @@ func addFireworkMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestFirework(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil {
fmt.Println("Error requesting Firework: ", err)
id := insertBotMessage(c, "Error requesting Firework, model may not be available anymore. Better error message in development.", selected, llm.ID)
fmt.Println("Error requesting Firework: ", err)
id := insertBotMessage(c, "Error requesting Firework, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id
} else if len(chatCompletion.Choices) == 0 {
fmt.Println("No response from Firework")
@ -131,7 +131,7 @@ func RequestFirework(c *fiber.Ctx, model string, messages []Message, temperature
requestBody := FireworkChatCompletionRequest{
Model: "accounts/fireworks/models/" + model,
Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens,
MaxTokens: maxTokens,
Temperature: temperature,
}

View File

@ -57,8 +57,8 @@ func addGoogleMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestGoogle(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context)
if err != nil {
fmt.Println("Error requesting Google: ", err)
id := insertBotMessage(c, "Error requesting Google.", selected, llm.ID)
fmt.Println("Error requesting Google: ", err)
id := insertBotMessage(c, "Error requesting Google.", selected, llm.ID)
return id
} else if len(chatCompletion.Candidates) == 0 {
fmt.Println("No response from Google")

View File

@ -14,7 +14,7 @@ import (
type GroqChatCompletionRequest struct {
Model string `json:"model"`
Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"`
}
@ -44,8 +44,8 @@ func addGroqMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestGroq(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil {
fmt.Println("Error requesting Groq: ", err)
id := insertBotMessage(c, "Error requesting Groq, model may not be available anymore. Better error message in development.", selected, llm.ID)
fmt.Println("Error requesting Groq: ", err)
id := insertBotMessage(c, "Error requesting Groq, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id
} else if len(chatCompletion.Choices) == 0 {
fmt.Println("No response from Groq")
@ -131,7 +131,7 @@ func RequestGroq(c *fiber.Ctx, model string, messages []Message, temperature flo
requestBody := GroqChatCompletionRequest{
Model: model,
Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens,
MaxTokens: maxTokens,
Temperature: temperature,
}

View File

@ -15,7 +15,7 @@ type HuggingfaceChatCompletionRequest struct {
Model string `json:"model"`
Messages []RequestMessage `json:"messages"`
Temperature float64 `json:"temperature"`
MaxTokens int `json:"max_tokens"`
MaxTokens int `json:"max_tokens"`
Stream bool `json:"stream"`
}
@ -44,8 +44,8 @@ func addHuggingfaceMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestHuggingface(c, llm, Messages, float64(llm.Temperature), int(llm.MaxToken))
if err != nil {
fmt.Println("Error requesting Huggingface: ", err)
id := insertBotMessage(c, "Error requesting Huggingface.", selected, llm.ID)
fmt.Println("Error requesting Huggingface: ", err)
id := insertBotMessage(c, "Error requesting Huggingface.", selected, llm.ID)
return id
} else if len(chatCompletion.Choices) == 0 {
fmt.Println("No response from Endpoint")
@ -65,7 +65,7 @@ func RequestHuggingface(c *fiber.Ctx, llm LLM, messages []Message, temperature f
Model: "tgi",
Messages: Message2RequestMessage(messages, llm.Context),
Temperature: temperature,
MaxTokens: maxTokens,
MaxTokens: maxTokens,
Stream: false,
}

View File

@ -14,7 +14,7 @@ import (
type MistralChatCompletionRequest struct {
Model string `json:"model"`
Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"`
}
type MistralChatCompletionResponse struct {
@ -43,8 +43,8 @@ func addMistralMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestMistral(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil {
fmt.Println("Error requesting Mistral: ", err)
id := insertBotMessage(c, "Error requesting Mistral, model may not be available anymore. Better error message in development.", selected, llm.ID)
fmt.Println("Error requesting Mistral: ", err)
id := insertBotMessage(c, "Error requesting Mistral, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id
} else if len(chatCompletion.Choices) == 0 {
id := insertBotMessage(c, "No response from Mistral", selected, llm.ID)
@ -136,7 +136,7 @@ func RequestMistral(c *fiber.Ctx, model string, messages []Message, temperature
requestBody := MistralChatCompletionRequest{
Model: model,
Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens,
MaxTokens: maxTokens,
Temperature: temperature,
}

View File

@ -1,31 +1,30 @@
package main
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"encoding/json"
"bytes"
"github.com/edgedb/edgedb-go"
"github.com/gofiber/fiber/v2"
)
type NimChatCompletionRequest struct {
Model string `json:"model"`
Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"`
}
type NimChatCompletionResponse struct {
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Usage NimUsage `json:"usage"`
Choices []NimChoice `json:"choices"`
ID string `json:"id"`
Object string `json:"object"`
Created int64 `json:"created"`
Model string `json:"model"`
Usage NimUsage `json:"usage"`
Choices []NimChoice `json:"choices"`
}
type NimUsage struct {
@ -45,8 +44,8 @@ func addNimMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestNim(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil {
fmt.Println("Error requesting NIM: ", err)
id := insertBotMessage(c, "Error requesting NIM, model may not be available anymore. Better error message in development.", selected, llm.ID)
fmt.Println("Error requesting NIM: ", err)
id := insertBotMessage(c, "Error requesting NIM, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id
} else if len(chatCompletion.Choices) == 0 {
fmt.Println("No response from NIM")
@ -60,10 +59,10 @@ func addNimMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
}
func TestNimKey(apiKey string) bool {
url := "https://integrate.api.nvidia.com/v1/chat/completions"
//apiKey := "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"
url := "https://integrate.api.nvidia.com/v1/chat/completions"
//apiKey := "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"
fmt.Println("Testing new Nvidia NIM key:", apiKey)
fmt.Println("Testing new Nvidia NIM key:", apiKey)
// Convert messages to OpenAI format
nimMessages := []RequestMessage{
@ -81,13 +80,13 @@ func TestNimKey(apiKey string) bool {
jsonBody, err := json.Marshal(requestBody)
if err != nil {
fmt.Println("Error when testing NIM key. Cant parse JSON request.")
fmt.Println("Error when testing NIM key. Cant parse JSON request.")
return false
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody))
if err != nil {
fmt.Println("Error when testing NIM key. Cant generate new request")
fmt.Println("Error when testing NIM key. Cant generate new request")
return false
}
@ -97,36 +96,36 @@ func TestNimKey(apiKey string) bool {
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
fmt.Println("Error when testing NIM key. Cant send request.")
fmt.Println("Error when testing NIM key. Cant send request.")
return false
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
fmt.Println("Error when testing NIM key. Cant read response.")
fmt.Println("Error when testing NIM key. Cant read response.")
return false
}
var chatCompletionResponse NimChatCompletionResponse
err = json.Unmarshal(body, &chatCompletionResponse)
if err != nil {
fmt.Println(resp.Status)
fmt.Println(resp.Body)
if err != nil {
fmt.Println(resp.Status)
fmt.Println(resp.Body)
fmt.Println("Error when testing NIM key. Cant unmarshal response.")
fmt.Println("Error when testing NIM key. Cant unmarshal response.")
return false
}
if chatCompletionResponse.Usage.CompletionTokens == 0 {
fmt.Println(resp.Status)
fmt.Println(resp.Body)
fmt.Println("Error when testing NIM key. No completion token.")
fmt.Println(resp.Status)
fmt.Println(resp.Body)
fmt.Println("Error when testing NIM key. No completion token.")
return false
}
Content := chatCompletionResponse.Choices[0].Message.Content
fmt.Println(Content)
Content := chatCompletionResponse.Choices[0].Message.Content
fmt.Println(Content)
return true
}
@ -151,7 +150,7 @@ func RequestNim(c *fiber.Ctx, model string, messages []Message, temperature floa
requestBody := NimChatCompletionRequest{
Model: model,
Messages: Message2RequestMessage(messages, context),
MaxTokens: maxToken,
MaxTokens: maxToken,
Temperature: temperature,
}

View File

@ -14,7 +14,7 @@ import (
type OpenaiChatCompletionRequest struct {
Model string `json:"model"`
Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"`
}
@ -44,8 +44,8 @@ func addOpenaiMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestOpenai(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil {
fmt.Println("Error requesting OpenAI: ", err)
id := insertBotMessage(c, "Error requesting OpenAI, model may not be available anymore. Better error message in development.", selected, llm.ID)
fmt.Println("Error requesting OpenAI: ", err)
id := insertBotMessage(c, "Error requesting OpenAI, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id
} else if len(chatCompletion.Choices) == 0 {
fmt.Println("No response from OpenAI")
@ -131,7 +131,7 @@ func RequestOpenai(c *fiber.Ctx, model string, messages []Message, temperature f
requestBody := OpenaiChatCompletionRequest{
Model: model,
Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens,
MaxTokens: maxTokens,
Temperature: temperature,
}

View File

@ -15,7 +15,7 @@ import (
type PerplexityChatCompletionRequest struct {
Model string `json:"model"`
Messages []RequestMessage `json:"messages"`
MaxTokens int `json:"max_tokens"`
MaxTokens int `json:"max_tokens"`
Temperature float64 `json:"temperature"`
}
@ -45,8 +45,8 @@ func addPerplexityMessage(c *fiber.Ctx, llm LLM, selected bool) edgedb.UUID {
chatCompletion, err := RequestPerplexity(c, llm.Model.ModelID, Messages, float64(llm.Temperature), llm.Context, int(llm.MaxToken))
if err != nil {
fmt.Println("Error requesting Perplexity: ", err)
id := insertBotMessage(c, "Error requesting Perplexity, model may not be available anymore. Better error message in development.", selected, llm.ID)
fmt.Println("Error requesting Perplexity: ", err)
id := insertBotMessage(c, "Error requesting Perplexity, model may not be available anymore. Better error message in development.", selected, llm.ID)
return id
} else if len(chatCompletion.Choices) == 0 {
fmt.Println("No response from Perplexity")
@ -132,7 +132,7 @@ func RequestPerplexity(c *fiber.Ctx, model string, messages []Message, temperatu
requestBody := PerplexityChatCompletionRequest{
Model: model,
Messages: Message2RequestMessage(messages, context),
MaxTokens: maxTokens,
MaxTokens: maxTokens,
Temperature: temperature,
}

View File

@ -191,7 +191,7 @@ func addKeys(c *fiber.Ctx) error {
"groq": c.FormValue("groq_key"),
"gooseai": c.FormValue("goose_key"),
"google": c.FormValue("google_key"),
"nim": c.FormValue("nim_key"),
"nim": c.FormValue("nim_key"),
"perplexity": c.FormValue("perplexity_key"),
"fireworks": c.FormValue("fireworks_key"),
}
@ -203,7 +203,7 @@ func addKeys(c *fiber.Ctx) error {
"groq": TestGroqKey,
"gooseai": TestGooseaiKey,
"google": TestGoogleKey,
"nim": TestNimKey,
"nim": TestNimKey,
"perplexity": TestPerplexityKey,
"fireworks": TestFireworkKey,
}