diff --git a/RequestFirework.go b/RequestFirework.go index 6c8c91b..6b7becb 100644 --- a/RequestFirework.go +++ b/RequestFirework.go @@ -73,6 +73,7 @@ func TestFireworkKey(apiKey string) bool { Model: "accounts/fireworks/models/llama-v2-7b-chat", Messages: fireworkMessages, Temperature: 0, + MaxTokens: 10, } jsonBody, err := json.Marshal(requestBody) diff --git a/RequestGroq.go b/RequestGroq.go index 13b3e3f..43e23f7 100644 --- a/RequestGroq.go +++ b/RequestGroq.go @@ -73,6 +73,7 @@ func TestGroqKey(apiKey string) bool { Model: "llama3-8b-8192", Messages: Message2RequestMessage(groqMessages, ""), Temperature: 0, + MaxTokens: 10, } jsonBody, err := json.Marshal(requestBody) diff --git a/RequestMistral.go b/RequestMistral.go index 030a663..7806fe3 100644 --- a/RequestMistral.go +++ b/RequestMistral.go @@ -71,6 +71,7 @@ func TestMistralKey(apiKey string) bool { Model: "open-mistral-7b", Messages: mistralMessages, Temperature: 0, + MaxTokens: 10, } jsonBody, err := json.Marshal(requestBody) diff --git a/RequestNim.go b/RequestNim.go index 9ef5d49..dda5701 100644 --- a/RequestNim.go +++ b/RequestNim.go @@ -76,6 +76,7 @@ func TestNimKey(apiKey string) bool { Model: "meta/llama3-8b-instruct", Messages: nimMessages, Temperature: 0, + MaxTokens: 10, } jsonBody, err := json.Marshal(requestBody) diff --git a/RequestOpenai.go b/RequestOpenai.go index 9ce9267..423e32d 100644 --- a/RequestOpenai.go +++ b/RequestOpenai.go @@ -73,6 +73,7 @@ func TestOpenaiKey(apiKey string) bool { Model: "gpt-3.5-turbo", Messages: openaiMessages, Temperature: 0, + MaxTokens: 10, } jsonBody, err := json.Marshal(requestBody) diff --git a/RequestPerplexity.go b/RequestPerplexity.go index a7f0a12..ed4a48c 100644 --- a/RequestPerplexity.go +++ b/RequestPerplexity.go @@ -74,6 +74,7 @@ func TestPerplexityKey(apiKey string) bool { Model: "llama-3-8b-instruct", Messages: perplexityMessages, Temperature: 0, + MaxTokens: 10, } jsonBody, err := json.Marshal(requestBody)