openai

package module
v0.0.0-...-1a690b5 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 13, 2023 License: MIT Imports: 11 Imported by: 2

README

Golang OpenAI API Client

An Golang native implementation to easily interacting with OpenAI API.

https://beta.openai.com/docs/api-reference/

Usage

You can use environment variable to store API secret key

export OPENAI_KEY=[YOUR_KEY]

To initialize engine, use this:

e := openai.New(os.Getenv("OPENAI_KEY"))
Tips
Model

If you want to use the most powerful model to generate text outputs, ensure that you are using "text-davinci-003". This model is defined as constant openai.ModelTextDavinci003.

Text edition

You can use the bundle Completion+Edit to regenerate the response based on the last context.

e := openai.New(os.Getenv("OPENAI_KEY"))
ctx := context.Background()
completionResp, err := e.Completion(ctx, &openai.CompletionOptions{
	// Choose model, you can see list of available models in models.go file
	Model: openai.ModelTextDavinci001,
	// Number of completion tokens to generate response. By default - 1024
	MaxTokens: 1200,
	// Text to completion
	Prompt: []string{"Write a little bit of Wikipedia. What is that?"},
})

editResp, err := e.Edit(ctx, &EditOptions{
	Model:       ModelTextDavinci001,
	Input:       completionResp.Choices[0],
	Instruction: "Please rewrite a bit more and add more information about Wikipedia in different aspects. Please build based on that for 4 topics",
})
Text completion example

Given a prompt, the model will return one or more predicted completions.

Note: the default number of completion tokens is 1024, if you want to increase or decrease this limit, you should change MaxTokens parameter.

e := openai.New(os.Getenv("OPENAI_KEY"))
r, err := e.Completion(context.Background(), &openai.CompletionOptions{
	// Choose model, you can see list of available models in models.go file
	Model: openai.ModelTextDavinci001,
	// Number of completion tokens to generate response. By default - 1024
	MaxTokens: 1200,
	// Text to completion
	Prompt: []string{"Write a little bit of Wikipedia. What is that?"},
})

You will get the next output:

{
  "id": "cmpl-6SrcYDLCVT7xyHKVNuSLNuhRvwOJ1",
  "object": "text_completion",
  "created": 1672337322,
  "model": "text-davinci-001",
  "choices": [
    {
      "text": "\n\nWikipedia is a free online encyclopedia, created and edited by volunteers.",
      "index": 0,
      "finish_reason": "stop"
    }
  ],
  "usage": {
    "prompt_tokens": 11,
    "completion_tokens": 15,
    "total_tokens": 26
  }
}

To get only the text you should use the next code:

fmt.Println(r.Choices[0].Text)

So, the full code will be:

package main 


import (
	"context"
	"encoding/json"
	"log"
	"os"
	"testing"
  "github.com/0x9ef/openai-go"
)

func main() {
	e := openai.New(os.Getenv("OPENAI_KEY"))
	r, err := e.Completion(context.Background(), &openai.CompletionOptions{
		// Choose model, you can see list of available models in models.go file
		Model: openai.ModelTextDavinci001,
		// Text to completion
		Prompt: []string{"Write a little bit of Wikipedia. What is that?"},
	})

	if b, err := json.MarshalIndent(r, "", "  "); err != nil {
		panic(err)
	} else {
		fmt.Println(string(b))
	}

	// Wikipedia is a free online encyclopedia, created and edited by volunteers.
	fmt.Println("What is the Wikipedia?", r.Choices[0].Text)
}
Models list/retrieve

Lists the currently available models, and provides basic information about each one such as the owner and availability.

e := openai.New(os.Getenv("OPENAI_KEY"))
r, err := e.ListModels(context.Background())
if err != nil {
	log.Fatal(err)
}

You will get the next output:

{
  "data": [
    {
      "id": "babbage",
      "object": "model",
      "owned_by": "openai"
    },
    {
      "id": "ada",
      "object": "model",
      "owned_by": "openai"
    },
    {
      "id": "text-davinci-002",
      "object": "model",
      "owned_by": "openai"
    },
    {
      "id": "davinci",
      "object": "model",
      "owned_by": "openai"
    },
    ...
  ]
}

To retrieve information about specified model instead of all models, you can do this:

e := openai.New(os.Getenv("OPENAI_KEY"))
r, err := e.RetrieveModel(context.Background(), &openai.RetrieveModelOptions{
	ID: openai.ModelDavinci,
})
if err != nil {
	log.Fatal(err)
}

License

MIT

Documentation

Overview

Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.

Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.

Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.

Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.

Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.

Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.

Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.

Index

Constants

View Source
const (
	Size256    = "256x256"
	Size512    = "512x512"
	Size1024   = "1024x1024"
	SizeSmall  = Size256
	SizeMedium = Size512
	SizeBig    = Size1024
)
View Source
const (
	ResponseFormatUrl     = "url"
	ResponseFormatB64Json = "b64_json"
)

Variables

This section is empty.

Functions

This section is empty.

Types

type APIError

type APIError struct {
	Err struct {
		StatusCode int    `json:"status_code"`
		Message    string `json:"message"`
		Type       string `json:"type"`
	} `json:"error"`
}

func (APIError) Error

func (e APIError) Error() string

type AudioOptions

type AudioOptions struct {
	// The audio file to process, in one of these formats:
	// mp3, mp4, mpeg, mpga, m4a, wav, or webm.
	File io.Reader `binding:"required"`
	// The format of the audio file.
	AudioFormat string `binding:"required"`
	// ID of the model to use. Only whisper-1 is currently available.
	Model Model `binding:"required"`
	// An optional text to guide the model's style or continue a previous audio segment.
	// The prompt should match the audio language for transcriptions and English for translations.
	Prompt string
	// The sampling temperature, between 0 and 1.
	// Higher values like 0.8 will make the output more random, while lower values
	// like 0.2 will make it more focused and deterministic.
	// If set to 0, the model will use log probability to automatically increase
	// the temperature until certain thresholds are hit.
	Temperature float32
}

type CompletionOptions

type CompletionOptions struct {
	// ID of the model to use.
	Model Model `json:"model" binding:"required"`
	// Prompt to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
	Prompt []string `json:"prompt" binding:"required"`
	// The maximum number of tokens to generate in the completion.
	// The token count of your prompt plus max_tokens cannot exceed the model's context length.
	// Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
	MaxTokens int `json:"max_tokens,omitempty" binding:"omitempty,max=4096"`
	// What sampling temperature to use. Higher values means the model will take more risks.
	// Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
	Temperature float32 `json:"temperature,omitempty"`
	// How many completions to generate for each prompt.
	N int `json:"n,omitempty"`
	// Up to 4 sequences where the API will stop generating further tokens.
	// The returned text will not contain the stop sequence.
	Stop []string `json:"stop,omitempty"`
}

type CompletionResponse

type CompletionResponse struct {
	Id      string `json:"id"`
	Object  string `json:"object"`
	Created int    `json:"created"`
	Model   Model  `json:"model"`
	Choices []struct {
		Text         string `json:"text"`
		Index        int    `json:"index"`
		FinishReason string `json:"finish_reason"`
	} `json:"choices"`
	Usage struct {
		PromptTokens     int `json:"prompt_tokens"`
		CompletionTokens int `json:"completion_tokens"`
		TotalTokens      int `json:"total_tokens"`
	} `json:"usage"`
}

type EditOptions

type EditOptions struct {
	// ID of the model to use.
	Model Model `json:"model" binding:"required"`
	// The input text to use as a starting point for the edit.
	Input string `json:"input" binding:"required"`
	// The instruction that tells the model how to edit the prompt.
	Instruction string `json:"instruction" binding:"required"`
	// How many edits to generate for the input and instruction.
	// Defaults to 1.
	N int `json:"n,omitempty"`
	// What sampling temperature to use. Higher values means the model will take more risks.
	// Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
	Temperature float32 `json:"temperature,omitempty"`
}

type EditResponse

type EditResponse struct {
	Id      string `json:"id"`
	Object  string `json:"object"`
	Created int    `json:"created"`
	Choices []struct {
		Text         string `json:"text"`
		Index        int    `json:"index"`
		FinishReason string `json:"finish_reason"`
	} `json:"choices"`
	Usage struct {
		PromptTokens     int `json:"prompt_tokens"`
		CompletionTokens int `json:"completion_tokens"`
		TotalTokens      int `json:"total_tokens"`
	} `json:"usage"`
}

type Engine

type Engine struct {
	// contains filtered or unexported fields
}

func New

func New(apiKey string) *Engine

New is used to initialize engine.

func (*Engine) Completion

func (e *Engine) Completion(ctx context.Context, opts *CompletionOptions) (*CompletionResponse, error)

Completion given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.

The default number of tokens to complete is 1024. Docs: https://beta.openai.com/docs/api-reference/completions

func (*Engine) Edit

func (e *Engine) Edit(ctx context.Context, opts *EditOptions) (*EditResponse, error)

Edit given a prompt and an instruction, the model will return an edited version of the prompt.

Pay attention! This works only with text-davinci-edit-001 See issue: https://community.openai.com/t/is-edit-endpoint-documentation-incorrect/23361/10

Docs: https://beta.openai.com/docs/api-reference/edits

func (*Engine) ImageCreate

func (e *Engine) ImageCreate(ctx context.Context, opts *ImageCreateOptions) (*ImageCreateResponse, error)

ImageCreate given a prompt and/or an input image, the model will generate a new image.

Docs: https://beta.openai.com/docs/api-reference/images/create

func (*Engine) ImageEdit

func (e *Engine) ImageEdit(ctx context.Context, opts *ImageEditOptions) (*ImageEditResponse, error)

ImageEdit creates an edited or extended image given an original image and a prompt.

Docs: https://beta.openai.com/docs/api-reference/images/create-edit

func (*Engine) ImageVariation

func (e *Engine) ImageVariation(ctx context.Context, opts *ImageVariationOptions) (*ImageCreateResponse, error)

ImageVariation creates a variation of a given image.

Docs: https://beta.openai.com/docs/api-reference/images/create-variation

func (*Engine) ListModels

func (e *Engine) ListModels(ctx context.Context) (*ListModelsResponse, error)

ListModels lists the currently available models, and provides basic information about each one such as the owner and availability.

Docs: https://beta.openai.com/docs/api-reference/models/list

func (*Engine) Moderate

func (e *Engine) Moderate(ctx context.Context, input string) (*ModerationResponse, error)

Moderate classifies if text violates OpenAI's Content Policy

Docs: https://platform.openai.com/docs/api-reference/moderations/create

func (*Engine) RetrieveModel

func (e *Engine) RetrieveModel(ctx context.Context, opts *RetrieveModelOptions) (*RetrieveModelResponse, error)

RetrieveModel retrieves a model instance, providing basic information about the model such as the owner and permissioning.

Docs: https://beta.openai.com/docs/api-reference/models/retrieve

func (*Engine) SetApiKey

func (e *Engine) SetApiKey(apiKey string)

SetApiKey is used to set API key to access OpenAI API.

func (*Engine) SetOrganizationId

func (e *Engine) SetOrganizationId(organizationId string)

SetOrganizationId is used to set organization ID if user belongs to multiple organizations.

func (*Engine) Transcribe

func (e *Engine) Transcribe(ctx context.Context, opts *TranscribeOptions) (*TranscribeResponse, error)

Transcribe audio into the input language.

Docs: https://platform.openai.com/docs/api-reference/audio/create

func (*Engine) Translate

func (e *Engine) Translate(ctx context.Context, opts *TranslateOptions) (*TranslateResponse, error)

Translate audio into English.

Docs: https://platform.openai.com/docs/api-reference/audio/create

type ImageCreateOptions

type ImageCreateOptions struct {
	Prompt string `json:"prompt" binding:"required"`
	// The number of images to generate.
	// Must be between 1 and 10.
	N int `json:"n,omitempty" binding:"omitempty,min=1,max=10"`
	// The size of the generated images.
	// Must be one of 256x256, 512x512, or 1024x1024.
	Size string `json:"size,omitempty" binding:"oneof=256x256 512x512 1024x1024"`
	// The format in which the generated images are returned.
	// Must be one of url or b64_json
	ResponseFormat string `json:"response_format,omitempty" binding:"omitempty,oneof=url b64_json"`
}

type ImageCreateResponse

type ImageCreateResponse struct {
	Created int `json:"created"`
	Data    []struct {
		Url string `json:"url"`
	} `json:"data"`
}

type ImageEditOptions

type ImageEditOptions struct {
	// The image to edit. Must be a valid PNG file, less than 4MB, and square.
	// If mask is not provided, image must have transparency, which will be used as the mask.
	Image string `binding:"required"`
	// An additional image whose fully transparent areas (e.g. where alpha is zero)
	// indicate where image should be edited. Must be a valid PNG file, less than 4MB,
	// and have the same dimensions as image.
	Mask string `binding:"omitempty"`
	// A text description of the desired image(s). The maximum length is 1000 characters.
	Prompt string `binding:"required,max=1000"`
	// The number of images to generate.
	// Must be between 1 and 10.
	N int `binding:"min=1,max=10"`
	// The size of the generated images.
	// Must be one of 256x256, 512x512, or 1024x1024.
	Size string `binding:"omitempty,oneof=256x256 512x512 1024x1024"`
	// The format in which the generated images are returned.
	// Must be one of url or b64_json
	ResponseFormat string `binding:"omitempty,oneof=url b64_json"`
}

type ImageEditResponse

type ImageEditResponse struct {
	Created int `json:"created"`
	Data    []struct {
		Url string `json:"url"`
	} `json:"data"`
}

type ImageVariationOptions

type ImageVariationOptions struct {
	// The image to edit. Must be a valid PNG file, less than 4MB, and square.
	// If mask is not provided, image must have transparency, which will be used as the mask.
	Image string `binding:"required"`
	// The number of images to generate.
	// Must be between 1 and 10.
	N int `binding:"min=1,max=10"`
	// The size of the generated images.
	// Must be one of 256x256, 512x512, or 1024x1024.
	Size string `binding:"oneof=256x256 512x512 1024x1024"`
	// The format in which the generated images are returned.
	// Must be one of url or b64_json
	ResponseFormat string `binding:"omitempty,oneof=url b64_json"`
}

type ImageVariationResponse

type ImageVariationResponse struct {
	Created int `json:"created"`
	Data    []struct {
		Url string `json:"url"`
	} `json:"data"`
}

type ListModelsResponse

type ListModelsResponse struct {
	Data []struct {
		ID      Model  `json:"id"`
		Object  string `json:"object"`
		OwnedBy string `json:"owned_by"`
	} `json:"data"`
}

type Model

type Model string

Generative Pre-trained Transformer (GPT) model.

Learn more: https://beta.openai.com/docs/models

const (
	ModelCodexDavinci002 Model = "code-davinci-002"
	ModelCodexCushman001 Model = "code-cushman-001"
)

The Codex models are descendants of our GPT-3 models that can understand and generate code. Their training data contains both natural language and billions of lines of public code from GitHub. They’re most capable in Python and proficient in over a dozen languages including JavaScript, Go, Perl, PHP, Ruby, Swift, TypeScript, SQL, and even Shell.

Learn more: https://platform.openai.com/docs/models/codex

const (
	ModelGPT3Ada            Model = "ada"
	ModelGPT3Babbage        Model = "babbage"
	ModelGPT3TextBabbage    Model = "text-babbage-001"
	ModelGPT3Curie          Model = "curie"
	ModelGPT3TextCurie001   Model = "text-curie-001"
	ModelGPT3Davince        Model = "davinci"
	ModelGPT3TextDavince    Model = "text-davinci-001"
	ModelGPT3TextDavinci002 Model = "text-davinci-002"
	ModelGPT3TextDavinci003 Model = "text-davinci-003"
	ModelGPT3TextAda001     Model = "text-ada-001"
	// DefaultModel is alias to ModelGPT3TextDavinci003
	DefaultModel = ModelGPT3TextDavinci003
)

GPT-3 models can understand and generate natural language. These models were superceded by the more powerful GPT-3.5 generation models. However, the original GPT-3 base models (davinci, curie, ada, and babbage) are current the only models that are available to fine-tune.

const (
	ModelGPT3Dot5Turbo0301 Model = "gpt-3.5-turbo-0301"
	ModelGPT3Dot5Turbo     Model = "gpt-3.5-turbo"
)

GPT-3.5 models can understand and generate natural language or code. Our most capable and cost effective model in the GPT-3.5 family is gpt-3.5-turbo which has been optimized for chat but works well for traditional completions tasks as well.

Learn more: https://platform.openai.com/docs/models/gpt-3-5

const (
	ModelGPT4        Model = "gpt-4"
	ModelGPT432K0314 Model = "gpt-4-32k-0314"
	ModelGPT432K     Model = "gpt-4-32k"
	ModelGPT40314    Model = "gpt-4-0314"
)

GPT4 generation models. GPT-4 is a large multimodal model (accepting text inputs and emitting text outputs today, with image inputs coming in the future) that can solve difficult problems with greater accuracy than any of our previous models, thanks to its broader general knowledge and advanced reasoning capabilities. Like gpt-3.5-turbo, GPT-4 is optimized for chat but works well for traditional completions tasks.

Learn more: https://platform.openai.com/docs/models/gpt-4

const (
	ModelWhisper Model = "whisper-1"
)

ModelWhisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification. The Whisper v2-large model is currently available through our API with the whisper-1 model name.

Learn more: https://platform.openai.com/docs/models/whisper

type ModerationResponse

type ModerationResponse struct {
	Id      string `json:"id"`
	Model   string `json:"model"`
	Results []struct {
		Categories struct {
			// Content that expresses, incites, or promotes hate based on race, gender, ethnicity,
			// religion, nationality, sexual orientation, disability status, or caste.
			Hate bool `json:"hate"`
			// Hateful content that also includes violence or serious harm towards the targeted group.
			HateThreatening bool `json:"hate/threatening"`
			// Content that promotes, encourages, or depicts acts of self-harm, such as suicide,
			// cutting, and eating disorders.
			SelfHarm bool `json:"self-harm"`
			// Content meant to arouse sexual excitement, such as the description of sexual activity,
			// or that promotes sexual services (excluding sex education and wellness).
			Sexual bool `json:"sexual"`
			// Sexual content that includes an individual who is under 18 years old.
			SexualMinors bool `json:"sexual/minors"`
			// Content that promotes or glorifies violence or celebrates the suffering or humiliation of others.
			Violence bool `json:"violence"`
			// Violent content that depicts death, violence, or serious physical injury in extreme graphic detail.
			ViolenceGraphic bool `json:"violence/graphic"`
		} `json:"categories"`
		CategoryScores struct {
			Hate            float64 `json:"hate"`
			HateThreatening float64 `json:"hate/threatening"`
			SelfHarm        float64 `json:"self-harm"`
			Sexual          float64 `json:"sexual"`
			SexualMinors    float64 `json:"sexual/minors"`
			Violence        float64 `json:"violence"`
			ViolenceGraphic float64 `json:"violence/graphic"`
		} `json:"category_scores"`
		Flagged bool `json:"flagged"`
	} `json:"results"`
}

type ResponseFormat

type ResponseFormat string

ResponseFormat represents image format of response. It can be encoded as URL, or with base64+json.

type RetrieveModelOptions

type RetrieveModelOptions struct {
	// The ID of the model.
	ID Model `json:"id" binding:"required"`
}

type RetrieveModelResponse

type RetrieveModelResponse struct {
	ID      Model  `json:"id"`
	Object  string `json:"object"`
	OwnedBy string `json:"owned_by"`
}

type Size

type Size string

Size represents X*Y size wide of image.

type TranscribeOptions

type TranscribeOptions struct {
	*AudioOptions
	// The language of the input audio. Supplying the input language in ISO-639-1
	// format will improve accuracy and latency.
	Language string
}

type TranscribeResponse

type TranscribeResponse struct {
	Text string `json:"text"`
}

type TranslateOptions

type TranslateOptions struct {
	*AudioOptions
}

type TranslateResponse

type TranslateResponse struct {
	Text string `json:"text"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL