fix MaxCompletionTokens typo (#862)

* fix spelling error

* fix lint

* Update chat.go

* Update chat.go
This commit is contained in:
Winston Liu
2024-10-03 12:17:16 -07:00
committed by GitHub
parent fdd59d9341
commit bac7d59361
3 changed files with 31 additions and 31 deletions

22
chat.go
View File

@@ -207,18 +207,18 @@ type ChatCompletionRequest struct {
// This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models. // This value is now deprecated in favor of max_completion_tokens, and is not compatible with o1 series models.
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens // refs: https://platform.openai.com/docs/api-reference/chat/create#chat-create-max_tokens
MaxTokens int `json:"max_tokens,omitempty"` MaxTokens int `json:"max_tokens,omitempty"`
// MaxCompletionsTokens An upper bound for the number of tokens that can be generated for a completion, // MaxCompletionTokens An upper bound for the number of tokens that can be generated for a completion,
// including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning // including visible output tokens and reasoning tokens https://platform.openai.com/docs/guides/reasoning
MaxCompletionsTokens int `json:"max_completion_tokens,omitempty"` MaxCompletionTokens int `json:"max_completion_tokens,omitempty"`
Temperature float32 `json:"temperature,omitempty"` Temperature float32 `json:"temperature,omitempty"`
TopP float32 `json:"top_p,omitempty"` TopP float32 `json:"top_p,omitempty"`
N int `json:"n,omitempty"` N int `json:"n,omitempty"`
Stream bool `json:"stream,omitempty"` Stream bool `json:"stream,omitempty"`
Stop []string `json:"stop,omitempty"` Stop []string `json:"stop,omitempty"`
PresencePenalty float32 `json:"presence_penalty,omitempty"` PresencePenalty float32 `json:"presence_penalty,omitempty"`
ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"` ResponseFormat *ChatCompletionResponseFormat `json:"response_format,omitempty"`
Seed *int `json:"seed,omitempty"` Seed *int `json:"seed,omitempty"`
FrequencyPenalty float32 `json:"frequency_penalty,omitempty"` FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`
// LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string. // LogitBias is must be a token id string (specified by their token ID in the tokenizer), not a word string.
// incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}` // incorrect: `"logit_bias":{"You": 6}`, correct: `"logit_bias":{"1639": 6}`
// refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias // refs: https://platform.openai.com/docs/api-reference/chat/create#chat/create-logit_bias

View File

@@ -100,17 +100,17 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
{ {
name: "log_probs_unsupported", name: "log_probs_unsupported",
in: openai.ChatCompletionRequest{ in: openai.ChatCompletionRequest{
MaxCompletionsTokens: 1000, MaxCompletionTokens: 1000,
LogProbs: true, LogProbs: true,
Model: openai.O1Preview, Model: openai.O1Preview,
}, },
expectedError: openai.ErrO1BetaLimitationsLogprobs, expectedError: openai.ErrO1BetaLimitationsLogprobs,
}, },
{ {
name: "message_type_unsupported", name: "message_type_unsupported",
in: openai.ChatCompletionRequest{ in: openai.ChatCompletionRequest{
MaxCompletionsTokens: 1000, MaxCompletionTokens: 1000,
Model: openai.O1Mini, Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{ Messages: []openai.ChatCompletionMessage{
{ {
Role: openai.ChatMessageRoleSystem, Role: openai.ChatMessageRoleSystem,
@@ -122,8 +122,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
{ {
name: "tool_unsupported", name: "tool_unsupported",
in: openai.ChatCompletionRequest{ in: openai.ChatCompletionRequest{
MaxCompletionsTokens: 1000, MaxCompletionTokens: 1000,
Model: openai.O1Mini, Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{ Messages: []openai.ChatCompletionMessage{
{ {
Role: openai.ChatMessageRoleUser, Role: openai.ChatMessageRoleUser,
@@ -143,8 +143,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
{ {
name: "set_temperature_unsupported", name: "set_temperature_unsupported",
in: openai.ChatCompletionRequest{ in: openai.ChatCompletionRequest{
MaxCompletionsTokens: 1000, MaxCompletionTokens: 1000,
Model: openai.O1Mini, Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{ Messages: []openai.ChatCompletionMessage{
{ {
Role: openai.ChatMessageRoleUser, Role: openai.ChatMessageRoleUser,
@@ -160,8 +160,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
{ {
name: "set_top_unsupported", name: "set_top_unsupported",
in: openai.ChatCompletionRequest{ in: openai.ChatCompletionRequest{
MaxCompletionsTokens: 1000, MaxCompletionTokens: 1000,
Model: openai.O1Mini, Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{ Messages: []openai.ChatCompletionMessage{
{ {
Role: openai.ChatMessageRoleUser, Role: openai.ChatMessageRoleUser,
@@ -178,8 +178,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
{ {
name: "set_n_unsupported", name: "set_n_unsupported",
in: openai.ChatCompletionRequest{ in: openai.ChatCompletionRequest{
MaxCompletionsTokens: 1000, MaxCompletionTokens: 1000,
Model: openai.O1Mini, Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{ Messages: []openai.ChatCompletionMessage{
{ {
Role: openai.ChatMessageRoleUser, Role: openai.ChatMessageRoleUser,
@@ -197,8 +197,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
{ {
name: "set_presence_penalty_unsupported", name: "set_presence_penalty_unsupported",
in: openai.ChatCompletionRequest{ in: openai.ChatCompletionRequest{
MaxCompletionsTokens: 1000, MaxCompletionTokens: 1000,
Model: openai.O1Mini, Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{ Messages: []openai.ChatCompletionMessage{
{ {
Role: openai.ChatMessageRoleUser, Role: openai.ChatMessageRoleUser,
@@ -214,8 +214,8 @@ func TestO1ModelsChatCompletionsBetaLimitations(t *testing.T) {
{ {
name: "set_frequency_penalty_unsupported", name: "set_frequency_penalty_unsupported",
in: openai.ChatCompletionRequest{ in: openai.ChatCompletionRequest{
MaxCompletionsTokens: 1000, MaxCompletionTokens: 1000,
Model: openai.O1Mini, Model: openai.O1Mini,
Messages: []openai.ChatCompletionMessage{ Messages: []openai.ChatCompletionMessage{
{ {
Role: openai.ChatMessageRoleUser, Role: openai.ChatMessageRoleUser,
@@ -296,8 +296,8 @@ func TestO1ModelChatCompletions(t *testing.T) {
defer teardown() defer teardown()
server.RegisterHandler("/v1/chat/completions", handleChatCompletionEndpoint) server.RegisterHandler("/v1/chat/completions", handleChatCompletionEndpoint)
_, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{ _, err := client.CreateChatCompletion(context.Background(), openai.ChatCompletionRequest{
Model: openai.O1Preview, Model: openai.O1Preview,
MaxCompletionsTokens: 1000, MaxCompletionTokens: 1000,
Messages: []openai.ChatCompletionMessage{ Messages: []openai.ChatCompletionMessage{
{ {
Role: openai.ChatMessageRoleUser, Role: openai.ChatMessageRoleUser,

View File

@@ -7,7 +7,7 @@ import (
) )
var ( var (
ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionsTokens") //nolint:lll ErrO1MaxTokensDeprecated = errors.New("this model is not supported MaxTokens, please use MaxCompletionTokens") //nolint:lll
ErrCompletionUnsupportedModel = errors.New("this model is not supported with this method, please use CreateChatCompletion client method instead") //nolint:lll ErrCompletionUnsupportedModel = errors.New("this model is not supported with this method, please use CreateChatCompletion client method instead") //nolint:lll
ErrCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateCompletionStream") //nolint:lll ErrCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateCompletionStream") //nolint:lll
ErrCompletionRequestPromptTypeNotSupported = errors.New("the type of CompletionRequest.Prompt only supports string and []string") //nolint:lll ErrCompletionRequestPromptTypeNotSupported = errors.New("the type of CompletionRequest.Prompt only supports string and []string") //nolint:lll