Added max token = 10 when testing the key

This commit is contained in:
Adrien Bouvais 2024-08-07 10:50:11 +02:00
parent 6c3e4d4b8a
commit c25a5772ce
6 changed files with 6 additions and 0 deletions

View File

@ -73,6 +73,7 @@ func TestFireworkKey(apiKey string) bool {
Model: "accounts/fireworks/models/llama-v2-7b-chat",
Messages: fireworkMessages,
Temperature: 0,
MaxTokens: 10,
}
jsonBody, err := json.Marshal(requestBody)

View File

@ -73,6 +73,7 @@ func TestGroqKey(apiKey string) bool {
Model: "llama3-8b-8192",
Messages: Message2RequestMessage(groqMessages, ""),
Temperature: 0,
MaxTokens: 10,
}
jsonBody, err := json.Marshal(requestBody)

View File

@ -71,6 +71,7 @@ func TestMistralKey(apiKey string) bool {
Model: "open-mistral-7b",
Messages: mistralMessages,
Temperature: 0,
MaxTokens: 10,
}
jsonBody, err := json.Marshal(requestBody)

View File

@ -76,6 +76,7 @@ func TestNimKey(apiKey string) bool {
Model: "meta/llama3-8b-instruct",
Messages: nimMessages,
Temperature: 0,
MaxTokens: 10,
}
jsonBody, err := json.Marshal(requestBody)

View File

@ -73,6 +73,7 @@ func TestOpenaiKey(apiKey string) bool {
Model: "gpt-3.5-turbo",
Messages: openaiMessages,
Temperature: 0,
MaxTokens: 10,
}
jsonBody, err := json.Marshal(requestBody)

View File

@ -74,6 +74,7 @@ func TestPerplexityKey(apiKey string) bool {
Model: "llama-3-8b-instruct",
Messages: perplexityMessages,
Temperature: 0,
MaxTokens: 10,
}
jsonBody, err := json.Marshal(requestBody)