openaigo

package module
v0.0.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: May 8, 2023 License: MIT Imports: 9 Imported by: 0

README

openaigo fork for go 1.16

Go CodeQL App Test over API License FOSSA Status
Maintainability Go Report Card codecov
Reference GoDoc

Yet another API client for api.openai.com.

This library is community-maintained, NOT officially supported by OpenAI.

Usage Example

package main

import (
	"context"
	"fmt"
	"os"

	"github.com/otiai10/openaigo"
)

func main() {
	client := openaigo.NewClient(os.Getenv("OPENAI_API_KEY"))
	request := openaigo.ChatCompletionRequestBody{
		Model: "gpt-3.5-turbo",
		Messages: []openaigo.ChatMessage{
			{Role: "user", Content: "Hello!"},
		},
	}
	ctx := context.Background()
	response, err := client.Chat(ctx, request)
	fmt.Println(response, err)
}

if you just want to try, hit commands below.

git clone https://github.com/otiai10/openaigo.git
cd openaigo
OPENAI_API_KEY=YourAPIKey go run ./testapp/main.go

API Keys?

Visit https://beta.openai.com/account/api-keys and you can create your own API key to get started for free.

Endpoint Support

Need Proxy?

client := openaigo.NewClient(OPENAI_API_KEY)
// You can set whatever you want
transport := &http.Transport{ Proxy: http.ProxyFromEnvironment }
client.HTTPClient = &http.Client{ Transport: transport }
// Done!

Issues

Report any issue here or any feedback is welcomed.

Documentation

Index

Constants

View Source
const (
	Size256  string = "256x256"
	Size512  string = "512x512"
	Size1024 string = "1024x1024"
)
View Source
const (
	// {{{ https://beta.openai.com/docs/models/gpt-3
	TextDavinci003 = "text-davinci-003"
	TextCurie001   = "text-curie-001"
	TextBabbage001 = "text-babbage-001"
	TextAda001     = "text-ada-001"

	// {{{ https://platform.openai.com/docs/models/gpt-3-5
	GPT3_5Turbo      = "gpt-3.5-turbo"
	GPT3_5Turbo_0301 = "gpt-3.5-turbo--0301"

	// {{{ https://platform.openai.com/docs/models/gpt-4
	GPT4          = "gpt-4"
	GPT4_0314     = "gpt-3-0314"
	GPT4_32K      = "gpt-4-32k"
	GPT4_32K_0314 = "gpt-4-32k-0314"
)

https://beta.openai.com/docs/models/overview

View Source
const DefaultOpenAIAPIURL = "https://api.openai.com/v1"

Variables

View Source
var (
	StreamPrefixDATA  = []byte("data: ")
	StreamPrefixERROR = []byte("error: ")
	StreamDataDONE    = []byte("[DONE]")
)

Functions

This section is empty.

Types

type APIError

type APIError struct {
	Message string       `json:"message"`
	Type    APIErrorType `json:"type"`
	Param   interface{}  `json:"param"` // TODO: typing
	Code    interface{}  `json:"code"`  // TODO: typing

	Status     string
	StatusCode int
}

func (APIError) Error

func (err APIError) Error() string

type APIErrorType

type APIErrorType string
const (
	ErrorInsufficientQuota APIErrorType = "insufficient_quota"
	ErrorInvalidRequest    APIErrorType = "invalid_request_error"
)

type ChatChoice

type ChatChoice struct {
	Index        int         `json:"index"`
	Message      ChatMessage `json:"message"`
	FinishReason string      `json:"finish_reason"`
	Delta        ChatMessage `json:"delta"` // Only appears in stream response
}

type ChatCompletionRequestBody

type ChatCompletionRequestBody struct {

	// Model: ID of the model to use.
	// Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported.
	Model string `json:"model"`

	// Messages: The messages to generate chat completions for, in the chat format.
	// https://platform.openai.com/docs/guides/chat/introduction
	// Including the conversation history helps when user instructions refer to prior messages.
	// In the example above, the user’s final question of “Where was it played?” only makes sense in the context of the prior messages about the World Series of 2020.
	// Because the models have no memory of past requests, all relevant information must be supplied via the conversation.
	// If a conversation cannot fit within the model’s token limit, it will need to be shortened in some way.
	Messages []ChatMessage `json:"messages"`

	// Temperature: What sampling temperature to use, between 0 and 2.
	// Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
	// We generally recommend altering this or top_p but not both.
	// Defaults to 1.
	Temperature float32 `json:"temperature,omitempty"`

	// TopP: An alternative to sampling with temperature, called nucleus sampling,
	// where the model considers the results of the tokens with top_p probability mass.
	// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
	// We generally recommend altering this or temperature but not both.
	// Defaults to 1.
	TopP float32 `json:"top_p,omitempty"`

	// N: How many chat completion choices to generate for each input message.
	// Defaults to 1.
	N int `json:"n,omitempty"`

	// Stream: If set, partial message deltas will be sent, like in ChatGPT.
	// Tokens will be sent as data-only server-sent events as they become available,
	// with the stream terminated by a data: [DONE] message.
	Stream bool `json:"stream,omitempty"`

	// StreamCallback is a callback funciton to handle stream response.
	// If provided, this library automatically set `Stream` `true`.
	// This field is added by github.com/otiai10/openaigo only to handle Stream.
	// Thus, it is omitted when the client excute HTTP request.
	StreamCallback func(res ChatCompletionResponse, done bool, err error) `json:"-"`

	// Stop: Up to 4 sequences where the API will stop generating further tokens.
	// Defaults to null.
	Stop []string `json:"stop,omitempty"`

	// MaxTokens: The maximum number of tokens allowed for the generated answer.
	// By default, the number of tokens the model can return will be (4096 - prompt tokens).
	MaxTokens int `json:"max_tokens,omitempty"`

	// PresencePenalty: Number between -2.0 and 2.0.
	// Positive values penalize new tokens based on whether they appear in the text so far,
	// increasing the model's likelihood to talk about new topics.
	// See more information about frequency and presence penalties.
	// https://platform.openai.com/docs/api-reference/parameter-details
	PresencePenalty float32 `json:"presence_penalty,omitempty"`

	// FrequencyPenalty: Number between -2.0 and 2.0.
	// Positive values penalize new tokens based on their existing frequency in the text so far,
	// decreasing the model's likelihood to repeat the same line verbatim.
	// See more information about frequency and presence penalties.
	// https://platform.openai.com/docs/api-reference/parameter-details
	FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`

	// LogitBias: Modify the likelihood of specified tokens appearing in the completion.
	// Accepts a json object that maps tokens (specified by their token ID in the tokenizer)
	// to an associated bias value from -100 to 100.
	// Mathematically, the bias is added to the logits generated by the model prior to sampling.
	// The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;
	// values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
	LogitBias map[string]int `json:"logit_bias,omitempty"`

	// User: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
	// https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids
	User string `json:"user,omitempty"`
}

ChatCompletionRequestBody: https://platform.openai.com/docs/guides/chat/chat-completions-beta https://platform.openai.com/docs/api-reference/chat

type ChatCompletionResponse

type ChatCompletionResponse struct {
	ID      string       `json:"id"`
	Object  string       `json:"object"`
	Created int64        `json:"created"`
	Choices []ChatChoice `json:"choices"`
	Usage   Usage        `json:"usage"`
}

type ChatMessage

type ChatMessage struct {

	// Role: Either of "system", "user", "assistant".
	// Typically, a conversation is formatted with a system message first, followed by alternating user and assistant messages.
	// The system message helps set the behavior of the assistant. In the example above, the assistant was instructed with “You are a helpful assistant.”
	// The user messages help instruct the assistant. They can be generated by the end users of an application, or set by a developer as an instruction.
	// The assistant messages help store prior responses. They can also be written by a developer to help give examples of desired behavior.
	Role string `json:"role"`

	// Content: A content of the message.
	Content string `json:"content"`
}

ChatMessage: An element of messages parameter. The main input is the messages parameter. Messages must be an array of message objects, where each object has a role (either “system”, “user”, or “assistant”) and content (the content of the message). Conversations can be as short as 1 message or fill many pages.

type Choice

type Choice struct {
	Text         string `json:"text"`
	Index        int    `json:"index"`
	LogProbs     int    `json:"logprobs"`
	FinishReason string `json:"finish_reason"`
}

type Client

type Client struct {

	// APIKey issued by OpenAI console.
	// See https://beta.openai.com/account/api-keys
	APIKey string

	// BaseURL of API including the version.
	// e.g., https://api.openai.com/v1
	BaseURL string

	// Organization
	Organization string

	// HTTPClient (optional) to proxy HTTP request.
	// If nil, *http.DefaultClient will be used.
	HTTPClient *http.Client
}

Client for api.openai.com API endpoints.

func NewClient

func NewClient(apikey string) *Client

func (*Client) CancelFineTune

func (client *Client) CancelFineTune(ctx context.Context, id string) (resp FineTuneCancelResponse, err error)

CancelFineTune: POST https://api.openai.com/v1/fine-tunes/{fine_tune_id}/cancel Immediately cancel a fine-tune job. See https://beta.openai.com/docs/api-reference/fine-tunes/cancel

func (*Client) Chat

func (client *Client) Chat(ctx context.Context, body ChatCompletionRequestBody) (resp ChatCompletionResponse, err error)

Chat, short-hand of ChatCompletion. Creates a completion for the chat message.

func (*Client) ChatCompletion

func (client *Client) ChatCompletion(ctx context.Context, body ChatCompletionRequestBody) (resp ChatCompletionResponse, err error)

ChatCompletion: POST https://api.openai.com/v1/chat/completions Creates a completion for the chat message. See https://platform.openai.com/docs/api-reference/chat/create

func (*Client) Completion

func (client *Client) Completion(ctx context.Context, body CompletionRequestBody) (resp CompletionResponse, err error)

Completion: POST https://api.openai.com/v1/completions Creates a completion for the provided prompt and parameters See https://beta.openai.com/docs/api-reference/completions/create

func (*Client) CreateEdit

func (client *Client) CreateEdit(ctx context.Context, body EditCreateRequestBody) (resp EditCreateResponse, err error)

Edit: POST https://api.openai.com/v1/edits Creates a new edit for the provided input, instruction, and parameters. See https://beta.openai.com/docs/api-reference/edits/create

func (*Client) CreateEmbedding

func (client *Client) CreateEmbedding(ctx context.Context, body EmbeddingCreateRequestBody) (resp EmbeddingCreateResponse, err error)

CreateEmbedding: POST https://api.openai.com/v1/embeddings Creates an embedding vector representing the input text. See https://beta.openai.com/docs/api-reference/embeddings/create

func (*Client) CreateFineTune

func (client *Client) CreateFineTune(ctx context.Context, body FineTuneCreateRequestBody) (resp FineTuneCreateResponse, err error)

CreateFineTune: POST https://api.openai.com/v1/fine-tunes Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. Learn more about Fine-tuning: https://beta.openai.com/docs/guides/fine-tuning See https://beta.openai.com/docs/api-reference/fine-tunes/create

func (*Client) CreateImage

func (client *Client) CreateImage(ctx context.Context, body ImageGenerationRequestBody) (resp ImageGenerationResponse, err error)

CreateImage: POST https://api.openai.com/v1/images/generations Creates an image given a prompt. See https://beta.openai.com/docs/api-reference/images/create

func (*Client) CreateImageVariation

func (client *Client) CreateImageVariation(ctx context.Context, body ImageVariationRequestBody) (resp ImageVariationResponse, err error)

CreateImageVariation: POST https://api.openai.com/v1/images/variations Creates a variation of a given image. See https://beta.openai.com/docs/api-reference/images/create-variation

func (*Client) CreateModeration

func (client *Client) CreateModeration(ctx context.Context, body ModerationCreateRequestBody) (resp ModerationCreateResponse, err error)

CreateModeration: POST https://api.openai.com/v1/moderations Classifies if text violates OpenAI's Content Policy. See https://beta.openai.com/docs/api-reference/moderations/create

func (*Client) DeleteFile

func (client *Client) DeleteFile(ctx context.Context, id string) (resp FileDeleteResponse, err error)

DeleteFile: DELETE https://api.openai.com/v1/files/{file_id} Delete a file. See https://beta.openai.com/docs/api-reference/files/delete

func (*Client) DeleteFineTuneModel

func (client *Client) DeleteFineTuneModel(ctx context.Context, id string) (resp FineTuneDeleteModelResponse, err error)

DeleteFineTuneModel: DELETE https://api.openai.com/v1/models/{model} Delete a fine-tuned model. You must have the Owner role in your organization. See https://beta.openai.com/docs/api-reference/fine-tunes/delete-model

func (*Client) EditImage

func (client *Client) EditImage(ctx context.Context, body ImageEditRequestBody) (resp ImageEditResponse, err error)

func (*Client) ListFiles

func (client *Client) ListFiles(ctx context.Context) (resp FileListResponse, err error)

ListFiles: GET https://api.openai.com/v1/files Returns a list of files that belong to the user's organization. See https://beta.openai.com/docs/api-reference/files/list

func (*Client) ListFineTuneEvents

func (client *Client) ListFineTuneEvents(ctx context.Context, id string) (resp FineTuneListEventsResponse, err error)

ListFineTuneEvents: GET https://api.openai.com/v1/fine-tunes/{fine_tune_id}/events Get fine-grained status updates for a fine-tune job. See https://beta.openai.com/docs/api-reference/fine-tunes/events

func (*Client) ListFineTunes

func (client *Client) ListFineTunes(ctx context.Context) (resp FineTuneListResponse, err error)

ListFineTunes: GET https://api.openai.com/v1/fine-tunes List your organization's fine-tuning jobs. See https://beta.openai.com/docs/api-reference/fine-tunes/list

func (*Client) ListModels

func (client *Client) ListModels(ctx context.Context) (resp ModelsListResponse, err error)

ListModels: GET /models Lists the currently available models, and provides basic information about each one such as the owner and availability. See https://beta.openai.com/docs/api-reference/models/list

func (*Client) RetrieveFile

func (client *Client) RetrieveFile(ctx context.Context, id string) (resp FileRetrieveResponse, err error)

RetrieveFile: GET https://api.openai.com/v1/files/{file_id} Returns information about a specific file. See https://beta.openai.com/docs/api-reference/files/retrieve

func (*Client) RetrieveFileContent

func (client *Client) RetrieveFileContent(ctx context.Context, id string) (res io.ReadCloser, err error)

RetrieveFileContent: GET https://api.openai.com/v1/files/{file_id}/content Returns the contents of the specified file. User must Close response after used. See https://beta.openai.com/docs/api-reference/files/retrieve-content

func (*Client) RetrieveFineTune

func (client *Client) RetrieveFineTune(ctx context.Context, id string) (resp FineTuneRetrieveResponse, err error)

RetrieveFineTune: GET https://api.openai.com/v1/fine-tunes/{fine_tune_id} Gets info about the fine-tune job. Learn more about Fine-tuning https://beta.openai.com/docs/guides/fine-tuning See https://beta.openai.com/docs/api-reference/fine-tunes/retrieve

func (*Client) RetrieveModel

func (client *Client) RetrieveModel(ctx context.Context, model string) (resp ModelRetrieveResponse, err error)

RetrieveModel: GET /models/{model} Retrieves a model instance, providing basic information about the model such as the owner and permissioning. See https://beta.openai.com/docs/api-reference/models/retrieve

func (*Client) UploadFile

func (client *Client) UploadFile(ctx context.Context, body FileUploadRequestBody) (resp FileUploadResponse, err error)

UploadFile: POST https://api.openai.com/v1/files Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. See https://beta.openai.com/docs/api-reference/files/upload

type CompletionRequestBody

type CompletionRequestBody struct {

	// Model: ID of the model to use.
	// You can use the List models API to see all of your available models, or see our Model overview for descriptions of them.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-model
	Model string `json:"model"`

	// Prompt: The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
	// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-prompt
	Prompt []string `json:"prompt"`

	// MaxTokens: The maximum number of tokens to generate in the completion.
	// The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-max_tokens
	MaxTokens int `json:"max_tokens,omitempty"`

	// Temperature: What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer.
	// We generally recommend altering this or top_p but not both.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-temperature
	Temperature float32 `json:"temperature,omitempty"`

	// Suffix: The suffix that comes after a completion of inserted text.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-suffix
	Suffix string `json:"suffix,omitempty"`

	// TopP: An alternative to sampling with temperature, called nucleus sampling,
	// where the model considers the results of the tokens with top_p probability mass.
	// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
	// We generally recommend altering this or temperature but not both.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-top_p
	TopP float32 `json:"top_p,omitempty"`

	// N: How many completions to generate for each prompt.
	// Note: Because this parameter generates many completions, it can quickly consume your token quota.
	// Use carefully and ensure that you have reasonable settings for max_tokens and stop.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-n
	N int `json:"n,omitempty"`

	// Stream: Whether to stream back partial progress.
	// If set, tokens will be sent as data-only server-sent events as they become available,
	// with the stream terminated by a data: [DONE] message.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream
	Stream bool `json:"stream,omitempty"`

	// LogProbs: Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens.
	// For example, if logprobs is 5, the API will return a list of the 5 most likely tokens. The API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response.
	// The maximum value for logprobs is 5. If you need more than this, please contact us through our Help center and describe your use case.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-logprobs
	LogProbs int `json:"logprobs,omitempty"`

	// Echo: Echo back the prompt in addition to the completion.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-echo
	Echo bool `json:"echo,omitempty"`

	// Stop: Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-stop
	Stop []string `json:"stop,omitempty"`

	// PresencePenalty: Number between -2.0 and 2.0.
	// Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
	// See more information about frequency and presence penalties.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-presence_penalty
	PresencePenalty float32 `json:"presence_penalty,omitempty"`

	// FrequencyPenalty: Number between -2.0 and 2.0.
	// Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
	// See more information about frequency and presence penalties.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-frequency_penalty
	FrequencyPenalty float32 `json:"frequency_penalty,omitempty"`

	// BestOf: Generates best_of completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed.
	// When used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n.
	// Note: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-best_of
	BestOf int `json:"best_of,omitempty"`

	// LogitBias: Modify the likelihood of specified tokens appearing in the completion.
	// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this tokenizer tool (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
	// As an example, you can pass {"50256": -100} to prevent the <|endoftext|> token from being generated.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-logit_bias
	LogitBias map[string]int `json:"logit_bias,omitempty"`

	// User: A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more.
	// See https://beta.openai.com/docs/api-reference/completions/create#completions/create-user
	User string `json:"user,omitempty"`
}

type CompletionResponse

type CompletionResponse struct {
	ID      string     `json:"id"`
	Object  ObjectType `json:"object"`
	Created int64      `json:"created"`
	Model   string     `json:"model"`
	Choices []Choice   `json:"choices"`
	Usage   Usage
}

type EditCreateRequestBody

type EditCreateRequestBody struct {
	Model       string  `json:"model"`
	Instruction string  `json:"instruction"`
	Input       string  `json:"input,omitempty"`
	N           int     `json:"n,omitempty"`
	Temperature float32 `json:"temperature,omitempty"`
	TopP        float32 `json:"top_p,omitempty"`
}

type EditCreateResponse

type EditCreateResponse struct {
	Object  ObjectType `json:"object"`
	Created int64      `json:"created"`
	Choices []Choice   `json:"choices"`
	Usage   Usage      `json:"usage"`
}

type EmbeddingCreateRequestBody

type EmbeddingCreateRequestBody struct {
	Model string   `json:"model"`
	Input []string `json:"input"`
	User  string   `json:"user,omitempty"`
}

type EmbeddingCreateResponse

type EmbeddingCreateResponse struct {
	Object string          `json:"object"`
	Data   []EmbeddingData `json:"data"`
	Usage  Usage           `json:"usage"`
}

type EmbeddingData

type EmbeddingData struct {
	Object    string    `json:"object"`
	Embedding []float32 `json:"embedding"`
	Index     int       `json:"index"`
}

type FileData

type FileData struct {
	ID        string `json:"id"`
	Object    string `json:"object"`
	Bytes     int64  `json:"bytes"`
	CreatedAt int64  `json:"created_at"`
	Filename  string `json:"filename"`
	Purpose   string `json:"purpuse"`
}

type FileDeleteResponse

type FileDeleteResponse struct {
	ID      string `json:"id"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
}

type FileListResponse

type FileListResponse struct {
	Object string     `json:"object"`
	Data   []FileData `json:"data"`
}

type FileRetrieveResponse

type FileRetrieveResponse FileData

type FileUploadRequestBody

type FileUploadRequestBody struct {
	File    io.Reader
	Purpose string
}

func (FileUploadRequestBody) ToMultipartFormData

func (body FileUploadRequestBody) ToMultipartFormData() (*bytes.Buffer, string, error)

type FileUploadResponse

type FileUploadResponse FileData

type FineTuneCancelResponse

type FineTuneCancelResponse struct {
	Events       []FineTuneEvent `json:"events"`
	FineTuneData `json:",inline"`
}

type FineTuneCreateRequestBody

type FineTuneCreateRequestBody struct {
	TrainingFile                 string    `json:"training_file"`
	ValidationFile               string    `json:"validation_file,omitempty"`
	Model                        string    `json:"model,omitempty"`
	NEpochs                      int       `json:"n_epochs,omitempty"`
	BatchSize                    int       `json:"batch_size,omitempty"`
	LearningRateMultiplier       float32   `json:"learning_rate_multiplier,omitempty"`
	PromptLossWeight             float32   `json:"prompt_loss_weight,omitempty"`
	ComputeClassificationMetrics bool      `json:"compute_classification_metrics,omitempty"`
	ClassificationNClasses       int       `json:"classification_n_classes,omitempty"`
	ClassificationPositiveClass  string    `json:"classification_positive_class,omitempty"`
	ClassificationBetas          []float32 `json:"classification_betas,omitempty"`
	Suffix                       string    `json:"suffix,omitempty"`
}

type FineTuneCreateResponse

type FineTuneCreateResponse struct {
	Events       []FineTuneEvent `json:"events"`
	FineTuneData `json:",inline"`
}

type FineTuneData

type FineTuneData struct {
	ID              string          `json:"id"`
	Object          string          `json:"object"`
	Model           string          `json:"model"`
	CreatedAt       int64           `json:"created_at"`
	Events          []FineTuneEvent `json:"events"`
	FineTunedModel  interface{}     `json:"fine_tuned_model"` // TODO: typing
	Hyperparams     Hyperparams     `json:"hyperparams"`
	OrganizationID  string          `json:"organization_id"`
	ResultFiles     []FileData      `json:"result_files"`
	Status          string          `json:"status"`
	ValidationFiles []FileData      `json:"validation_files"`
	TrainingFiles   []FileData      `json:"training_files"`
	UpdatedAt       int64           `json:"updated_at"`
}

type FineTuneDeleteModelResponse

type FineTuneDeleteModelResponse struct {
	ID      string `json:"string"`
	Object  string `json:"object"`
	Deleted bool   `json:"deleted"`
}

type FineTuneEvent

type FineTuneEvent struct {
	Object    string `json:"object"`
	CreatedAt int64  `json:"created_at"`
	Level     string `json:"level"`
	Message   string `json:"message"`
}

type FineTuneListEventsResponse

type FineTuneListEventsResponse struct {
	Object string          `json:"object"`
	Data   []FineTuneEvent `json:"data"`
}

type FineTuneListResponse

type FineTuneListResponse struct {
	Object string         `json:"object"`
	Data   []FineTuneData `json:"data"`
}

type FineTuneRetrieveResponse

type FineTuneRetrieveResponse struct {
	Events       []FineTuneEvent `json:"events"`
	FineTuneData `json:",inline"`
}

type Hyperparams

type Hyperparams struct {
	BatchSize              int     `json:"batch_size"`
	LearningRateMultiplier float32 `json:"learning_rate_multiplier"`
	NEpochs                int     `json:"n_epochs"`
	PromptLossWeight       float32 `json:"prompt_loss_weight"`
}

type ImageData

type ImageData struct {
	Base64 string `json:"b64_json"`
	URL    string `json:"url"`
}

type ImageEditRequestBody

type ImageEditRequestBody struct {
	// image Required
	// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
	// User MUST close it if it's like ReadCloser.
	Image io.Reader

	// n integer Optional Defaults to 1
	// The number of images to generate. Must be between 1 and 10.
	N int

	// size string Optional Defaults to 1024x1024
	// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
	Size string

	// response_format string Optional Defaults to url
	// The format in which the generated images are returned. Must be one of url or b64_json.
	ResponseFormat string

	// user string Optional
	// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
	// Learn more: https://beta.openai.com/docs/guides/safety-best-practices/end-user-ids
	User string

	// mask string Optional
	// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited.
	// Must be a valid PNG file, less than 4MB, and have the same dimensions as image.
	// User MUST close it if it's like ReadCloser.
	Mask io.Reader

	// prompt string Required
	// A text description of the desired image(s). The maximum length is 1000 characters.
	Prompt string
}

func (ImageEditRequestBody) ToMultipartFormData

func (body ImageEditRequestBody) ToMultipartFormData() (buf *bytes.Buffer, contenttype string, err error)

type ImageEditResponse

type ImageEditResponse ImageResponse

type ImageGenerationRequestBody

type ImageGenerationRequestBody struct {
	Prompt         string `json:"prompt"`
	N              int    `json:"n,omitempty"`
	Size           string `json:"size,omitempty"`
	ResponseFormat string `json:"response_format,omitempty"`
	User           string `json:"user,omitempty"`
}

type ImageGenerationResponse

type ImageGenerationResponse ImageResponse

type ImageResponse

type ImageResponse struct {
	Created int64       `json:"created"`
	Data    []ImageData `json:"data"`
}

type ImageVariationRequestBody

type ImageVariationRequestBody struct {
	// image Required
	// The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square.
	// User MUST close it if it's like ReadCloser.
	Image io.Reader

	// n integer Optional Defaults to 1
	// The number of images to generate. Must be between 1 and 10.
	N int

	// size string Optional Defaults to 1024x1024
	// The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024.
	Size string

	// response_format string Optional Defaults to url
	// The format in which the generated images are returned. Must be one of url or b64_json.
	ResponseFormat string

	// user string Optional
	// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
	// Learn more: https://beta.openai.com/docs/guides/safety-best-practices/end-user-ids
	User string
}

func (ImageVariationRequestBody) ToMultipartFormData

func (body ImageVariationRequestBody) ToMultipartFormData() (buf *bytes.Buffer, contenttype string, err error)

type ImageVariationResponse

type ImageVariationResponse ImageResponse

type ModelData

type ModelData struct {
	ID         string            `json:"id"`
	Object     ObjectType        `json:"object"`
	Created    int64             `json:"created"`
	OwnedBy    string            `json:"owned_by"`
	Permission []ModelPermission `json:"permission"`
	Root       string            `json:"root"`
	Parent     string            `json:"parent"`
}

type ModelPermission

type ModelPermission struct {
	ID                 string     `json:"id"`
	Object             ObjectType `json:"object"`
	Created            int64      `json:"created"`
	AllowCreateEngine  bool       `json:"allow_create_engine"`
	AllowSampling      bool       `json:"allow_sampling"`
	AllowLogProbs      bool       `json:"allow_logprobs"`
	AllowSearchIndices bool       `json:"allow_search_indices"`
	AllowView          bool       `json:"allow_view"`
	AllowFineTuning    bool       `json:"allow_fine_tuning"`
	Organization       string     `json:"organization"`
	Group              string     `json:"group"`
	IsBlocking         bool       `json:"is_blocking"`
}

type ModelRetrieveResponse

type ModelRetrieveResponse ModelData

type ModelsListResponse

type ModelsListResponse struct {
	Data   []ModelData `json:"data"`
	Object ObjectType
}

type ModerationCreateRequestBody

type ModerationCreateRequestBody struct {
	Input string `json:"input"`
	Model string `json:"model,omitempty"`
}

type ModerationCreateResponse

type ModerationCreateResponse struct {
	ID      string           `json:"id"`
	Model   string           `json:"model"`
	Results []ModerationData `json:"results"`
}

type ModerationData

type ModerationData struct {
	Categories struct {
		Hate            bool `json:"hate"`
		HateThreatening bool `json:"hate/threatening"`
		SelfHarm        bool `json:"self-harm"`
		Sexual          bool `json:"sexual"`
		SexualMinors    bool `json:"sexual/minors"`
		Violence        bool `json:"violence"`
		ViolenceGraphic bool `json:"violence/graphic"`
	} `json:"categories"`
	CategoryScores struct {
		Hate            float32 `json:"hate"`
		HateThreatening float32 `json:"hate/threatening"`
		SelfHarm        float32 `json:"self-harm"`
		Sexual          float32 `json:"sexual"`
		SexualMinors    float32 `json:"sexual/minors"`
		Violence        float32 `json:"violence"`
		ViolenceGraphic float32 `json:"violence/graphic"`
	} `json:"category_scores"`
}

type MultipartFormDataRequestBody

type MultipartFormDataRequestBody interface {
	ToMultipartFormData() (*bytes.Buffer, string, error)
}

type ObjectType

type ObjectType string
const (
	OTModel           ObjectType = "model"
	OTModelPermission ObjectType = "model_permission"
	OTList            ObjectType = "list"
	OTEdit            ObjectType = "edit"
	OTTextCompletion  ObjectType = "text_completion"
	OTEEmbedding      ObjectType = "embedding"
	OTFile            ObjectType = "file"
	OTFineTune        ObjectType = "fine-tune"
	OTFineTuneEvent   ObjectType = "fine-tune-event"
)

type Usage

type Usage struct {
	PromptTokens     int `json:"prompt_tokens"`
	CompletionTokens int `json:"completion_tokens"`
	TotalTokens      int `json:"total_tokens"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL