b8c13e4c017ab031ede870da538c0b0e3cf4996b
* test: Add tests for improved coverage before refactoring This commit adds tests to improve coverage before refactoring to ensure that the changes do not break anything. * refactor: replace goto statement with loop This commit introduces a refactor to improve the clarity of the control flow within the method. The goto statement can sometimes make the code hard to understand and maintain, hence this refactor aims to resolve that. * refactor: extract for-loop from Recv to another method This commit improves code readability and maintainability by making the Recv method simpler.
Go OpenAI
This library provides unofficial Go clients for OpenAI API. We support:
- ChatGPT
- GPT-3, GPT-4
- DALL·E 2
- Whisper
Installation:
go get github.com/sashabaranov/go-openai
ChatGPT example usage:
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main() {
client := openai.NewClient("your token")
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: "Hello!",
},
},
},
)
if err != nil {
fmt.Printf("ChatCompletion error: %v\n", err)
return
}
fmt.Println(resp.Choices[0].Message.Content)
}
Other examples:
ChatGPT streaming completion
package main
import (
"context"
"errors"
"fmt"
"io"
openai "github.com/sashabaranov/go-openai"
)
func main() {
c := openai.NewClient("your token")
ctx := context.Background()
req := openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
MaxTokens: 20,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: "Lorem ipsum",
},
},
Stream: true,
}
stream, err := c.CreateChatCompletionStream(ctx, req)
if err != nil {
fmt.Printf("ChatCompletionStream error: %v\n", err)
return
}
defer stream.Close()
fmt.Printf("Stream response: ")
for {
response, err := stream.Recv()
if errors.Is(err, io.EOF) {
fmt.Println("\nStream finished")
return
}
if err != nil {
fmt.Printf("\nStream error: %v\n", err)
return
}
fmt.Printf(response.Choices[0].Delta.Content)
}
}
GPT-3 completion
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main() {
c := openai.NewClient("your token")
ctx := context.Background()
req := openai.CompletionRequest{
Model: openai.GPT3Ada,
MaxTokens: 5,
Prompt: "Lorem ipsum",
}
resp, err := c.CreateCompletion(ctx, req)
if err != nil {
fmt.Printf("Completion error: %v\n", err)
return
}
fmt.Println(resp.Choices[0].Text)
}
GPT-3 streaming completion
package main
import (
"errors"
"context"
"fmt"
"io"
openai "github.com/sashabaranov/go-openai"
)
func main() {
c := openai.NewClient("your token")
ctx := context.Background()
req := openai.CompletionRequest{
Model: openai.GPT3Ada,
MaxTokens: 5,
Prompt: "Lorem ipsum",
Stream: true,
}
stream, err := c.CreateCompletionStream(ctx, req)
if err != nil {
fmt.Printf("CompletionStream error: %v\n", err)
return
}
defer stream.Close()
for {
response, err := stream.Recv()
if errors.Is(err, io.EOF) {
fmt.Println("Stream finished")
return
}
if err != nil {
fmt.Printf("Stream error: %v\n", err)
return
}
fmt.Printf("Stream response: %v\n", response)
}
}
Audio Speech-To-Text
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main() {
c := openai.NewClient("your token")
ctx := context.Background()
req := openai.AudioRequest{
Model: openai.Whisper1,
FilePath: "recording.mp3",
}
resp, err := c.CreateTranscription(ctx, req)
if err != nil {
fmt.Printf("Transcription error: %v\n", err)
return
}
fmt.Println(resp.Text)
}
Audio Captions
package main
import (
"context"
"fmt"
"os"
openai "github.com/sashabaranov/go-openai"
)
func main() {
c := openai.NewClient(os.Getenv("OPENAI_KEY"))
req := openai.AudioRequest{
Model: openai.Whisper1,
FilePath: os.Args[1],
Format: openai.AudioResponseFormatSRT,
}
resp, err := c.CreateTranscription(context.Background(), req)
if err != nil {
fmt.Printf("Transcription error: %v\n", err)
return
}
f, err := os.Create(os.Args[1] + ".srt")
if err != nil {
fmt.Printf("Could not open file: %v\n", err)
return
}
defer f.Close()
if _, err := f.WriteString(resp.Text); err != nil {
fmt.Printf("Error writing to file: %v\n", err)
return
}
}
DALL-E 2 image generation
package main
import (
"bytes"
"context"
"encoding/base64"
"fmt"
openai "github.com/sashabaranov/go-openai"
"image/png"
"os"
)
func main() {
c := openai.NewClient("your token")
ctx := context.Background()
// Sample image by link
reqUrl := openai.ImageRequest{
Prompt: "Parrot on a skateboard performs a trick, cartoon style, natural light, high detail",
Size: openai.CreateImageSize256x256,
ResponseFormat: openai.CreateImageResponseFormatURL,
N: 1,
}
respUrl, err := c.CreateImage(ctx, reqUrl)
if err != nil {
fmt.Printf("Image creation error: %v\n", err)
return
}
fmt.Println(respUrl.Data[0].URL)
// Example image as base64
reqBase64 := openai.ImageRequest{
Prompt: "Portrait of a humanoid parrot in a classic costume, high detail, realistic light, unreal engine",
Size: openai.CreateImageSize256x256,
ResponseFormat: openai.CreateImageResponseFormatB64JSON,
N: 1,
}
respBase64, err := c.CreateImage(ctx, reqBase64)
if err != nil {
fmt.Printf("Image creation error: %v\n", err)
return
}
imgBytes, err := base64.StdEncoding.DecodeString(respBase64.Data[0].B64JSON)
if err != nil {
fmt.Printf("Base64 decode error: %v\n", err)
return
}
r := bytes.NewReader(imgBytes)
imgData, err := png.Decode(r)
if err != nil {
fmt.Printf("PNG decode error: %v\n", err)
return
}
file, err := os.Create("example.png")
if err != nil {
fmt.Printf("File creation error: %v\n", err)
return
}
defer file.Close()
if err := png.Encode(file, imgData); err != nil {
fmt.Printf("PNG encode error: %v\n", err)
return
}
fmt.Println("The image was saved as example.png")
}
Configuring proxy
config := openai.DefaultConfig("token")
proxyUrl, err := url.Parse("http://localhost:{port}")
if err != nil {
panic(err)
}
transport := &http.Transport{
Proxy: http.ProxyURL(proxyUrl),
}
config.HTTPClient = &http.Client{
Transport: transport,
}
c := openai.NewClientWithConfig(config)
See also: https://pkg.go.dev/github.com/sashabaranov/go-openai#ClientConfig
ChatGPT support context
package main
import (
"bufio"
"context"
"fmt"
"os"
"strings"
"github.com/sashabaranov/go-openai"
)
func main() {
client := openai.NewClient("your token")
messages := make([]openai.ChatCompletionMessage, 0)
reader := bufio.NewReader(os.Stdin)
fmt.Println("Conversation")
fmt.Println("---------------------")
for {
fmt.Print("-> ")
text, _ := reader.ReadString('\n')
// convert CRLF to LF
text = strings.Replace(text, "\n", "", -1)
messages = append(messages, openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleUser,
Content: text,
})
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
Messages: messages,
},
)
if err != nil {
fmt.Printf("ChatCompletion error: %v\n", err)
continue
}
content := resp.Choices[0].Message.Content
messages = append(messages, openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleAssistant,
Content: content,
})
fmt.Println(content)
}
}
Azure OpenAI ChatGPT
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main() {
config := openai.DefaultAzureConfig("your Azure OpenAI Key", "https://your Azure OpenAI Endpoint")
// If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
// config.AzureModelMapperFunc = func(model string) string {
// azureModelMapping = map[string]string{
// "gpt-3.5-turbo": "your gpt-3.5-turbo deployment name",
// }
// return azureModelMapping[model]
// }
client := openai.NewClientWithConfig(config)
resp, err := client.CreateChatCompletion(
context.Background(),
openai.ChatCompletionRequest{
Model: openai.GPT3Dot5Turbo,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: "Hello Azure OpenAI!",
},
},
},
)
if err != nil {
fmt.Printf("ChatCompletion error: %v\n", err)
return
}
fmt.Println(resp.Choices[0].Message.Content)
}
Azure OpenAI Embeddings
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main() {
config := openai.DefaultAzureConfig("your Azure OpenAI Key", "https://your Azure OpenAI Endpoint")
config.APIVersion = "2023-05-15" // optional update to latest API version
//If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
//config.AzureModelMapperFunc = func(model string) string {
// azureModelMapping = map[string]string{
// "gpt-3.5-turbo":"your gpt-3.5-turbo deployment name",
// }
// return azureModelMapping[model]
//}
input := "Text to vectorize"
client := openai.NewClientWithConfig(config)
resp, err := client.CreateEmbeddings(
context.Background(),
openai.EmbeddingRequest{
Input: []string{input},
Model: openai.AdaEmbeddingV2,
})
if err != nil {
fmt.Printf("CreateEmbeddings error: %v\n", err)
return
}
vectors := resp.Data[0].Embedding // []float32 with 1536 dimensions
fmt.Println(vectors[:10], "...", vectors[len(vectors)-10:])
}
Error handling
Open-AI maintains clear documentation on how to handle API errors
example:
e := &openai.APIError{}
if errors.As(err, &e) {
switch e.HTTPStatusCode {
case 401:
// invalid auth or key (do not retry)
case 429:
// rate limiting or engine overload (wait and retry)
case 500:
// openai server error (retry)
default:
// unhandled
}
}
See the examples/ folder for more.
Description
Languages
Go
100%