Documentation ¶
Overview ¶
Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.
Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.
Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.
Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.
Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.
Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.
Copyright (c) 2022 0x9ef. All rights reserved. Use of this source code is governed by an MIT license that can be found in the LICENSE file.
Index ¶
- Constants
- type APIError
- type AudioOptions
- type CompletionOptions
- type CompletionResponse
- type EditOptions
- type EditResponse
- type Engine
- func (e *Engine) Completion(ctx context.Context, opts *CompletionOptions) (*CompletionResponse, error)
- func (e *Engine) Edit(ctx context.Context, opts *EditOptions) (*EditResponse, error)
- func (e *Engine) ImageCreate(ctx context.Context, opts *ImageCreateOptions) (*ImageCreateResponse, error)
- func (e *Engine) ImageEdit(ctx context.Context, opts *ImageEditOptions) (*ImageEditResponse, error)
- func (e *Engine) ImageVariation(ctx context.Context, opts *ImageVariationOptions) (*ImageCreateResponse, error)
- func (e *Engine) ListModels(ctx context.Context) (*ListModelsResponse, error)
- func (e *Engine) Moderate(ctx context.Context, input string) (*ModerationResponse, error)
- func (e *Engine) RetrieveModel(ctx context.Context, opts *RetrieveModelOptions) (*RetrieveModelResponse, error)
- func (e *Engine) SetApiKey(apiKey string)
- func (e *Engine) SetOrganizationId(organizationId string)
- func (e *Engine) Transcribe(ctx context.Context, opts *TranscribeOptions) (*TranscribeResponse, error)
- func (e *Engine) Translate(ctx context.Context, opts *TranslateOptions) (*TranslateResponse, error)
- type ImageCreateOptions
- type ImageCreateResponse
- type ImageEditOptions
- type ImageEditResponse
- type ImageVariationOptions
- type ImageVariationResponse
- type ListModelsResponse
- type Model
- type ModerationResponse
- type ResponseFormat
- type RetrieveModelOptions
- type RetrieveModelResponse
- type Size
- type TranscribeOptions
- type TranscribeResponse
- type TranslateOptions
- type TranslateResponse
Constants ¶
const ( Size256 = "256x256" Size512 = "512x512" Size1024 = "1024x1024" SizeSmall = Size256 SizeMedium = Size512 SizeBig = Size1024 )
const ( ResponseFormatUrl = "url" ResponseFormatB64Json = "b64_json" )
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type APIError ¶
type AudioOptions ¶
type AudioOptions struct { // The audio file to process, in one of these formats: // mp3, mp4, mpeg, mpga, m4a, wav, or webm. File io.Reader `binding:"required"` // The format of the audio file. AudioFormat string `binding:"required"` // ID of the model to use. Only whisper-1 is currently available. Model Model `binding:"required"` // An optional text to guide the model's style or continue a previous audio segment. // The prompt should match the audio language for transcriptions and English for translations. Prompt string // The sampling temperature, between 0 and 1. // Higher values like 0.8 will make the output more random, while lower values // like 0.2 will make it more focused and deterministic. // If set to 0, the model will use log probability to automatically increase // the temperature until certain thresholds are hit. Temperature float32 }
type CompletionOptions ¶
type CompletionOptions struct { // ID of the model to use. Model Model `json:"model" binding:"required"` // Prompt to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Prompt []string `json:"prompt" binding:"required"` // The maximum number of tokens to generate in the completion. // The token count of your prompt plus max_tokens cannot exceed the model's context length. // Most models have a context length of 2048 tokens (except for the newest models, which support 4096). MaxTokens int `json:"max_tokens,omitempty" binding:"omitempty,max=4096"` // What sampling temperature to use. Higher values means the model will take more risks. // Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. Temperature float32 `json:"temperature,omitempty"` // How many completions to generate for each prompt. N int `json:"n,omitempty"` // Up to 4 sequences where the API will stop generating further tokens. // The returned text will not contain the stop sequence. Stop []string `json:"stop,omitempty"` }
type CompletionResponse ¶
type CompletionResponse struct { Id string `json:"id"` Object string `json:"object"` Created int `json:"created"` Model Model `json:"model"` Choices []struct { Text string `json:"text"` Index int `json:"index"` FinishReason string `json:"finish_reason"` } `json:"choices"` Usage struct { PromptTokens int `json:"prompt_tokens"` CompletionTokens int `json:"completion_tokens"` TotalTokens int `json:"total_tokens"` } `json:"usage"` }
type EditOptions ¶
type EditOptions struct { // ID of the model to use. Model Model `json:"model" binding:"required"` // The input text to use as a starting point for the edit. Input string `json:"input" binding:"required"` // The instruction that tells the model how to edit the prompt. Instruction string `json:"instruction" binding:"required"` // How many edits to generate for the input and instruction. // Defaults to 1. N int `json:"n,omitempty"` // What sampling temperature to use. Higher values means the model will take more risks. // Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. Temperature float32 `json:"temperature,omitempty"` }
type EditResponse ¶
type EditResponse struct { Id string `json:"id"` Object string `json:"object"` Created int `json:"created"` Choices []struct { Text string `json:"text"` Index int `json:"index"` FinishReason string `json:"finish_reason"` } `json:"choices"` Usage struct { PromptTokens int `json:"prompt_tokens"` CompletionTokens int `json:"completion_tokens"` TotalTokens int `json:"total_tokens"` } `json:"usage"` }
type Engine ¶
type Engine struct {
// contains filtered or unexported fields
}
func (*Engine) Completion ¶
func (e *Engine) Completion(ctx context.Context, opts *CompletionOptions) (*CompletionResponse, error)
Completion given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
The default number of tokens to complete is 1024. Docs: https://beta.openai.com/docs/api-reference/completions
func (*Engine) Edit ¶
func (e *Engine) Edit(ctx context.Context, opts *EditOptions) (*EditResponse, error)
Edit given a prompt and an instruction, the model will return an edited version of the prompt.
Pay attention! This works only with text-davinci-edit-001 See issue: https://community.openai.com/t/is-edit-endpoint-documentation-incorrect/23361/10
func (*Engine) ImageCreate ¶
func (e *Engine) ImageCreate(ctx context.Context, opts *ImageCreateOptions) (*ImageCreateResponse, error)
ImageCreate given a prompt and/or an input image, the model will generate a new image.
Docs: https://beta.openai.com/docs/api-reference/images/create
func (*Engine) ImageEdit ¶
func (e *Engine) ImageEdit(ctx context.Context, opts *ImageEditOptions) (*ImageEditResponse, error)
ImageEdit creates an edited or extended image given an original image and a prompt.
Docs: https://beta.openai.com/docs/api-reference/images/create-edit
func (*Engine) ImageVariation ¶
func (e *Engine) ImageVariation(ctx context.Context, opts *ImageVariationOptions) (*ImageCreateResponse, error)
ImageVariation creates a variation of a given image.
Docs: https://beta.openai.com/docs/api-reference/images/create-variation
func (*Engine) ListModels ¶
func (e *Engine) ListModels(ctx context.Context) (*ListModelsResponse, error)
ListModels lists the currently available models, and provides basic information about each one such as the owner and availability.
Docs: https://beta.openai.com/docs/api-reference/models/list
func (*Engine) Moderate ¶
Moderate classifies if text violates OpenAI's Content Policy
Docs: https://platform.openai.com/docs/api-reference/moderations/create
func (*Engine) RetrieveModel ¶
func (e *Engine) RetrieveModel(ctx context.Context, opts *RetrieveModelOptions) (*RetrieveModelResponse, error)
RetrieveModel retrieves a model instance, providing basic information about the model such as the owner and permissioning.
Docs: https://beta.openai.com/docs/api-reference/models/retrieve
func (*Engine) SetOrganizationId ¶
SetOrganizationId is used to set organization ID if user belongs to multiple organizations.
func (*Engine) Transcribe ¶
func (e *Engine) Transcribe(ctx context.Context, opts *TranscribeOptions) (*TranscribeResponse, error)
Transcribe audio into the input language.
Docs: https://platform.openai.com/docs/api-reference/audio/create
func (*Engine) Translate ¶
func (e *Engine) Translate(ctx context.Context, opts *TranslateOptions) (*TranslateResponse, error)
Translate audio into English.
Docs: https://platform.openai.com/docs/api-reference/audio/create
type ImageCreateOptions ¶
type ImageCreateOptions struct { Prompt string `json:"prompt" binding:"required"` // The number of images to generate. // Must be between 1 and 10. N int `json:"n,omitempty" binding:"omitempty,min=1,max=10"` // The size of the generated images. // Must be one of 256x256, 512x512, or 1024x1024. Size string `json:"size,omitempty" binding:"oneof=256x256 512x512 1024x1024"` // The format in which the generated images are returned. // Must be one of url or b64_json ResponseFormat string `json:"response_format,omitempty" binding:"omitempty,oneof=url b64_json"` }
type ImageCreateResponse ¶
type ImageEditOptions ¶
type ImageEditOptions struct { // The image to edit. Must be a valid PNG file, less than 4MB, and square. // If mask is not provided, image must have transparency, which will be used as the mask. Image string `binding:"required"` // An additional image whose fully transparent areas (e.g. where alpha is zero) // indicate where image should be edited. Must be a valid PNG file, less than 4MB, // and have the same dimensions as image. Mask string `binding:"omitempty"` // A text description of the desired image(s). The maximum length is 1000 characters. Prompt string `binding:"required,max=1000"` // The number of images to generate. // Must be between 1 and 10. N int `binding:"min=1,max=10"` // The size of the generated images. // Must be one of 256x256, 512x512, or 1024x1024. Size string `binding:"omitempty,oneof=256x256 512x512 1024x1024"` // The format in which the generated images are returned. // Must be one of url or b64_json ResponseFormat string `binding:"omitempty,oneof=url b64_json"` }
type ImageEditResponse ¶
type ImageVariationOptions ¶
type ImageVariationOptions struct { // The image to edit. Must be a valid PNG file, less than 4MB, and square. // If mask is not provided, image must have transparency, which will be used as the mask. Image string `binding:"required"` // The number of images to generate. // Must be between 1 and 10. N int `binding:"min=1,max=10"` // The size of the generated images. // Must be one of 256x256, 512x512, or 1024x1024. Size string `binding:"oneof=256x256 512x512 1024x1024"` // The format in which the generated images are returned. // Must be one of url or b64_json ResponseFormat string `binding:"omitempty,oneof=url b64_json"` }
type ImageVariationResponse ¶
type ListModelsResponse ¶
type Model ¶
type Model string
Generative Pre-trained Transformer (GPT) model.
Learn more: https://beta.openai.com/docs/models
const ( ModelCodexDavinci002 Model = "code-davinci-002" ModelCodexCushman001 Model = "code-cushman-001" )
The Codex models are descendants of our GPT-3 models that can understand and generate code. Their training data contains both natural language and billions of lines of public code from GitHub. They’re most capable in Python and proficient in over a dozen languages including JavaScript, Go, Perl, PHP, Ruby, Swift, TypeScript, SQL, and even Shell.
Learn more: https://platform.openai.com/docs/models/codex
const ( ModelGPT3Ada Model = "ada" ModelGPT3Babbage Model = "babbage" ModelGPT3TextBabbage Model = "text-babbage-001" ModelGPT3Curie Model = "curie" ModelGPT3TextCurie001 Model = "text-curie-001" ModelGPT3Davince Model = "davinci" ModelGPT3TextDavince Model = "text-davinci-001" ModelGPT3TextDavinci002 Model = "text-davinci-002" ModelGPT3TextDavinci003 Model = "text-davinci-003" ModelGPT3TextAda001 Model = "text-ada-001" // DefaultModel is alias to ModelGPT3TextDavinci003 DefaultModel = ModelGPT3TextDavinci003 )
GPT-3 models can understand and generate natural language. These models were superceded by the more powerful GPT-3.5 generation models. However, the original GPT-3 base models (davinci, curie, ada, and babbage) are current the only models that are available to fine-tune.
const ( ModelGPT3Dot5Turbo0301 Model = "gpt-3.5-turbo-0301" ModelGPT3Dot5Turbo Model = "gpt-3.5-turbo" )
GPT-3.5 models can understand and generate natural language or code. Our most capable and cost effective model in the GPT-3.5 family is gpt-3.5-turbo which has been optimized for chat but works well for traditional completions tasks as well.
Learn more: https://platform.openai.com/docs/models/gpt-3-5
const ( ModelGPT4 Model = "gpt-4" ModelGPT432K0314 Model = "gpt-4-32k-0314" ModelGPT432K Model = "gpt-4-32k" ModelGPT40314 Model = "gpt-4-0314" )
GPT4 generation models. GPT-4 is a large multimodal model (accepting text inputs and emitting text outputs today, with image inputs coming in the future) that can solve difficult problems with greater accuracy than any of our previous models, thanks to its broader general knowledge and advanced reasoning capabilities. Like gpt-3.5-turbo, GPT-4 is optimized for chat but works well for traditional completions tasks.
Learn more: https://platform.openai.com/docs/models/gpt-4
const (
ModelWhisper Model = "whisper-1"
)
ModelWhisper is a general-purpose speech recognition model. It is trained on a large dataset of diverse audio and is also a multi-task model that can perform multilingual speech recognition as well as speech translation and language identification. The Whisper v2-large model is currently available through our API with the whisper-1 model name.
Learn more: https://platform.openai.com/docs/models/whisper
type ModerationResponse ¶
type ModerationResponse struct { Id string `json:"id"` Model string `json:"model"` Results []struct { Categories struct { // Content that expresses, incites, or promotes hate based on race, gender, ethnicity, // religion, nationality, sexual orientation, disability status, or caste. Hate bool `json:"hate"` // Hateful content that also includes violence or serious harm towards the targeted group. HateThreatening bool `json:"hate/threatening"` // Content that promotes, encourages, or depicts acts of self-harm, such as suicide, // cutting, and eating disorders. SelfHarm bool `json:"self-harm"` // Content meant to arouse sexual excitement, such as the description of sexual activity, // or that promotes sexual services (excluding sex education and wellness). Sexual bool `json:"sexual"` // Sexual content that includes an individual who is under 18 years old. SexualMinors bool `json:"sexual/minors"` // Content that promotes or glorifies violence or celebrates the suffering or humiliation of others. Violence bool `json:"violence"` // Violent content that depicts death, violence, or serious physical injury in extreme graphic detail. ViolenceGraphic bool `json:"violence/graphic"` } `json:"categories"` CategoryScores struct { Hate float64 `json:"hate"` HateThreatening float64 `json:"hate/threatening"` SelfHarm float64 `json:"self-harm"` Sexual float64 `json:"sexual"` SexualMinors float64 `json:"sexual/minors"` Violence float64 `json:"violence"` ViolenceGraphic float64 `json:"violence/graphic"` } `json:"category_scores"` Flagged bool `json:"flagged"` } `json:"results"` }
type ResponseFormat ¶
type ResponseFormat string
ResponseFormat represents image format of response. It can be encoded as URL, or with base64+json.
type RetrieveModelOptions ¶
type RetrieveModelOptions struct { // The ID of the model. ID Model `json:"id" binding:"required"` }
type RetrieveModelResponse ¶
type TranscribeOptions ¶
type TranscribeOptions struct { *AudioOptions // The language of the input audio. Supplying the input language in ISO-639-1 // format will improve accuracy and latency. Language string }
type TranscribeResponse ¶
type TranscribeResponse struct {
Text string `json:"text"`
}
type TranslateOptions ¶
type TranslateOptions struct {
*AudioOptions
}
type TranslateResponse ¶
type TranslateResponse struct {
Text string `json:"text"`
}