50 lines
1.4 KiB
Go
50 lines
1.4 KiB
Go
package openai
|
|
|
|
import (
|
|
"fmt"
|
|
"iter"
|
|
)
|
|
|
|
var MaxRetriesErr = fmt.Errorf("max retries exceeded")
|
|
var BadResponseErr = fmt.Errorf("bad_response_status_code")
|
|
|
|
type CreateCompletionReq struct {
|
|
Model string `json:"model"`
|
|
Messages []Message `json:"messages"`
|
|
Verbosity string `json:"verbosity,omitempty"`
|
|
Temperature float32 `json:"temperature,omitempty"`
|
|
PresencePenalty int `json:"presence_penalty,omitempty"`
|
|
FrequencyPenalty int `json:"frequency_penalty,omitempty"`
|
|
TopP int `json:"top_p,omitempty"`
|
|
MaxCompletionTokens int `json:"max_completition_tokens,omitempty"`
|
|
Stream bool `json:"stream,omitempty"`
|
|
}
|
|
|
|
func (api *API) CreateCompletionStream(history []Message, message string, temp float32) (iter.Seq2[AIResponse, error], error) {
|
|
params := CreateCompletionReq{
|
|
Model: api.model,
|
|
Messages: append(history, Message{
|
|
Role: "user",
|
|
Content: message,
|
|
}),
|
|
Temperature: temp,
|
|
Stream: true,
|
|
}
|
|
req := NewRequest("chat/completions", params)
|
|
return req.DoStream(api)
|
|
}
|
|
|
|
func (api *API) CreateCompletion(history []Message, message string, temp float32) (AIResponse, error) {
|
|
params := CreateCompletionReq{
|
|
Model: api.model,
|
|
Messages: append(history, Message{
|
|
Role: "user",
|
|
Content: message,
|
|
}),
|
|
Temperature: temp,
|
|
Stream: false,
|
|
}
|
|
req := NewRequest("chat/completions", params)
|
|
return req.Do(api)
|
|
}
|