fix MaxCompletionTokens typo (#862)
* fix spelling error * fix lint * Update chat.go * Update chat.go
This commit is contained in:
4
chat.go
4
chat.go
@@ -207,9 +207,9 @@ type ChatCompletionRequest struct {
|
|||||||
// This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models.
|
// This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models.
|
||||||
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens
|
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens
|
||||||
MaxTokens int `json:"max_tokens,omitempty"`
|
MaxTokens int `json:"max_tokens,omitempty"`
|
||||||
// MaxCompletionsTokens An upper bound for the number of tokens that can be generated for a completion,
|
// MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion,
|
||||||
// including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning
|
// including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning
|
||||||
MaxCompletionsTokens int `json:"max_completion_tokens,omitempty"`
|
MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`
|
||||||
Temperature float32 `json:"temperature,omitempty"`
|
Temperature float32 `json:"temperature,omitempty"`
|
||||||
TopP float32 `json:"top_p,omitempty"`
|
TopP float32 `json:"top_p,omitempty"`
|
||||||
N int `json:"n,omitempty"`
|
N int `json:"n,omitempty"`
|
||||||
|
|||||||
18
chat_test.go
18
chat_test.go
@@ -100,7 +100,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "log_probs_unsupported",
|
name: "log_probs_unsupported",
|
||||||
in: openai.ChatCompletionRequest{
|
in: openai.ChatCompletionRequest{
|
||||||
MaxCompletionsTokens: 1000,
|
MaxCompletionTokens: 1000,
|
||||||
LogProbs: true,
|
LogProbs: true,
|
||||||
Model: openai.O1Preview,
|
Model: openai.O1Preview,
|
||||||
},
|
},
|
||||||
@@ -109,7 +109,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "message_type_unsupported",
|
name: "message_type_unsupported",
|
||||||
in: openai.ChatCompletionRequest{
|
in: openai.ChatCompletionRequest{
|
||||||
MaxCompletionsTokens: 1000,
|
MaxCompletionTokens: 1000,
|
||||||
Model: openai.O1Mini,
|
Model: openai.O1Mini,
|
||||||
Messages: []openai.ChatCompletionMessage{
|
Messages: []openai.ChatCompletionMessage{
|
||||||
{
|
{
|
||||||
@@ -122,7 +122,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "tool_unsupported",
|
name: "tool_unsupported",
|
||||||
in: openai.ChatCompletionRequest{
|
in: openai.ChatCompletionRequest{
|
||||||
MaxCompletionsTokens: 1000,
|
MaxCompletionTokens: 1000,
|
||||||
Model: openai.O1Mini,
|
Model: openai.O1Mini,
|
||||||
Messages: []openai.ChatCompletionMessage{
|
Messages: []openai.ChatCompletionMessage{
|
||||||
{
|
{
|
||||||
@@ -143,7 +143,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "set_temperature_unsupported",
|
name: "set_temperature_unsupported",
|
||||||
in: openai.ChatCompletionRequest{
|
in: openai.ChatCompletionRequest{
|
||||||
MaxCompletionsTokens: 1000,
|
MaxCompletionTokens: 1000,
|
||||||
Model: openai.O1Mini,
|
Model: openai.O1Mini,
|
||||||
Messages: []openai.ChatCompletionMessage{
|
Messages: []openai.ChatCompletionMessage{
|
||||||
{
|
{
|
||||||
@@ -160,7 +160,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "set_top_unsupported",
|
name: "set_top_unsupported",
|
||||||
in: openai.ChatCompletionRequest{
|
in: openai.ChatCompletionRequest{
|
||||||
MaxCompletionsTokens: 1000,
|
MaxCompletionTokens: 1000,
|
||||||
Model: openai.O1Mini,
|
Model: openai.O1Mini,
|
||||||
Messages: []openai.ChatCompletionMessage{
|
Messages: []openai.ChatCompletionMessage{
|
||||||
{
|
{
|
||||||
@@ -178,7 +178,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "set_n_unsupported",
|
name: "set_n_unsupported",
|
||||||
in: openai.ChatCompletionRequest{
|
in: openai.ChatCompletionRequest{
|
||||||
MaxCompletionsTokens: 1000,
|
MaxCompletionTokens: 1000,
|
||||||
Model: openai.O1Mini,
|
Model: openai.O1Mini,
|
||||||
Messages: []openai.ChatCompletionMessage{
|
Messages: []openai.ChatCompletionMessage{
|
||||||
{
|
{
|
||||||
@@ -197,7 +197,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "set_presence_penalty_unsupported",
|
name: "set_presence_penalty_unsupported",
|
||||||
in: openai.ChatCompletionRequest{
|
in: openai.ChatCompletionRequest{
|
||||||
MaxCompletionsTokens: 1000,
|
MaxCompletionTokens: 1000,
|
||||||
Model: openai.O1Mini,
|
Model: openai.O1Mini,
|
||||||
Messages: []openai.ChatCompletionMessage{
|
Messages: []openai.ChatCompletionMessage{
|
||||||
{
|
{
|
||||||
@@ -214,7 +214,7 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "set_frequency_penalty_unsupported",
|
name: "set_frequency_penalty_unsupported",
|
||||||
in: openai.ChatCompletionRequest{
|
in: openai.ChatCompletionRequest{
|
||||||
MaxCompletionsTokens: 1000,
|
MaxCompletionTokens: 1000,
|
||||||
Model: openai.O1Mini,
|
Model: openai.O1Mini,
|
||||||
Messages: []openai.ChatCompletionMessage{
|
Messages: []openai.ChatCompletionMessage{
|
||||||
{
|
{
|
||||||
@@ -297,7 +297,7 @@ func TestO1ModelChatCompletions(t *testing.T) {
|
|||||||
server.RegisterHandler("/v1/chat/completions", handleChatCompletionEndpoint)
|
server.RegisterHandler("/v1/chat/completions", handleChatCompletionEndpoint)
|
||||||
_, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{
|
_, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{
|
||||||
Model: openai.O1Preview,
|
Model: openai.O1Preview,
|
||||||
MaxCompletionsTokens: 1000,
|
MaxCompletionTokens: 1000,
|
||||||
Messages: []openai.ChatCompletionMessage{
|
Messages: []openai.ChatCompletionMessage{
|
||||||
{
|
{
|
||||||
Role: openai.ChatMessageRoleUser,
|
Role: openai.ChatMessageRoleUser,
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionsTokens") //nolint:lll
|
ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionTokens") //nolint:lll
|
||||||
ErrCompletionUnsupportedModel = errors.New("this model is not supported with this method, please use CreateChatCompletion client method instead") //nolint:lll
|
ErrCompletionUnsupportedModel = errors.New("this model is not supported with this method, please use CreateChatCompletion client method instead") //nolint:lll
|
||||||
ErrCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateCompletionStream") //nolint:lll
|
ErrCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateCompletionStream") //nolint:lll
|
||||||
ErrCompletionRequestPromptTypeNotSupported = errors.New("the type of CompletionRequest.Prompt only supports string and []string") //nolint:lll
|
ErrCompletionRequestPromptTypeNotSupported = errors.New("the type of CompletionRequest.Prompt only supports string and []string") //nolint:lll
|
||||||
|
|||||||
Reference in New Issue
Block a user