openAi

package
v0.1.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 29, 2024 License: MIT Imports: 5 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

This section is empty.

Types

type ChatCompletion

type ChatCompletion struct {
	// A unique identifier for the chat completion.
	ID string `json:"id"`
	//	A list of chat completion Choices. Can be more than one if n is greater than 1.
	Choices []ChatCompletionChoice `json:"choices"`
	// The Unix timestamp (in seconds) of when the chat completion was Created.
	Created int `json:"created"`
	// The Model used for the chat completion.
	Model string `json:"model"`
	// This fingerprint represents the backend configuration that the model runs with.
	// Can be used in conjunction with the seed request parameter to understand
	// when backend changes have been made that might impact determinism.
	System_fingerprint string `json:"system_fingerprint"`
	// The Object type, which is always chat.completion.
	Object string `json:"object"`
	// Usage statistics for the completion request.
	Usage chatCompletionUsage `json:"usage"`
}

Represents a chat completion response returned by model, based on the provided input. (https://platform.openai.com/docs/api-reference/chat/object)

type ChatCompletionChoice

type ChatCompletionChoice struct {
	// The reason the model stopped generating tokens.
	// This will be stop if the model hit a natural stop
	// point or a provided stop sequence, length if the
	// maximum number of tokens specified in the request
	// was reached, content_filter if content was omitted
	// due to a flag from our content filters, tool_calls
	// if the model called a tool, or function_call (deprecated)
	// if the model called a function.
	Finish_reason string `json:"finish_reason"`
	// The Index of the choice in the list of choices.
	Index int `json:"index"`
	// A chat completion Message generated by the model.
	Message Message `json:"message"`
}

A list of chat completion choices. Can be more than one if n is greater than 1.

type ChatCompletionChunk

type ChatCompletionChunk struct {
	// A unique identifier for the chat completion. Each chunk has the same ID.
	ID string `json:"id"`
	// A list of chat completion Choices. Can be more than one if n is greater than 1.
	Choices []chatCompletionChunkChoice `json:"choices"`
	// The Unix timestamp (in seconds) of when the chat completion was Created.
	Created int `json:"created"`
	// The Model used for the chat completion.
	Model string `json:"model"`
	// This fingerprint represents the backend configuration that the model runs with.
	// Can be used in conjunction with the seed request parameter to understand
	// when backend changes have been made that might impact determinism.
	System_fingerprint string `json:"system_fingerprint"`
	// The Object type, which is always chat.completion.
	Object string `json:"object"`
}

type Client

type Client interface {
	CreateChatCompletion(command CreateChatCompletionCommand) (*ChatCompletion, error)
	CreateChatCompletionStream(command CreateChatCompletionCommand) (<-chan ChatCompletionChunk, error)
}

func NewClient

func NewClient(apiKey string) Client

type CreateChatCompletionCommand

type CreateChatCompletionCommand struct {
	// A list of Messages comprising the conversation so far.
	// [Example Python Code]
	// (https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models)
	Messages []Message `json:"messages"`
	// ID of the Model to use. See the
	// [Model endpoint compatibility table]
	// (https://platform.openai.com/docs/models/Model-endpoint-compatibility)
	// for details on which models work with the Chat API.
	Model string `json:"model"`
	// Number between -2.0 and 2.0. Positive values penalize new tokens
	// based on their existing frequency in the text so far, decreasing the model's
	// likelihood to repeat the same line verbatim.
	// [See more information about frequency and presence penalties.]
	// (https://platform.openai.com/docs/guides/text-generation/parameter-details)
	Frequency_penalty *float32 `json:"frequency_penalty,omitempty"`
	// The maximum number of [tokens] (https://platform.openai.com/tokenizer)
	// that can be generated in the chat completion. The total length of input
	// tokens and generated tokens is limited by the model's context length.
	// [Example Python code for counting tokens.]
	// (https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
	Max_tokens *int `json:"max_tokens,omitempty"`
	// How many chat completion choices to generate for each input message.
	// Note that you will be charged based on the number of generated tokens
	//  across all of the choices. Keep N as 1 to minimize costs.
	N *int `json:"n,omitempty"`
	// Number between -2.0 and 2.0. Positive values penalize new tokens based
	// on whether they appear in the text so far, increasing the model's
	// likelihood to talk about new topics.
	// [See more information about frequency and presence penalties.]
	// (https://platform.openai.com/docs/guides/text-generation/parameter-details)
	Presence_penalty *float32 `json:"presence_penalty,omitempty"`
	// An object specifying the format that the model must output.
	// Compatible with gpt-4-1106-preview and gpt-3.5-turbo-1106.
	// Setting to { "type": "json_object" } enables JSON mode,
	// which guarantees the message the model generates is valid JSON.
	// Important: when using JSON mode, you must also instruct the model to
	// produce JSON yourself via a system or user message. Without this,
	// the model may generate an unending stream of whitespace until the generation
	// reaches the token limit, resulting in a long-running and seemingly "stuck" request.
	// Also note that the message content may be partially cut off if
	// finish_reason="length", which indicates the generation exceeded max_tokens
	// or the conversation exceeded the max context length.
	Response_format *ResponseFormat `json:"response_format,omitempty"`
	// Up to 4 sequences where the API will Stop generating further tokens.
	Stop *[]string `json:"stop,omitempty"`
	// If set, partial message deltas will be sent, like in ChatGPT.
	// Tokens will be sent as data-only [server-sent events]
	// (https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
	// as they become available, with the Stream terminated by a data:
	// [DONE] message. [Example Python code.]
	// (https://cookbook.openai.com/examples/how_to_stream_completions)
	Stream *bool `json:"stream,omitempty"`
	// What sampling Temperature to use, between 0 and 2. Higher values
	// like 0.8 will make the output more random, while lower values like 0.2
	// will make it more focused and deterministic. We generally recommend
	// altering this or top_p but not both.
	Temperature *float32 `json:"temperature,omitempty"`
	// An alternative to sampling with temperature, called nucleus sampling,
	// where the model considers the results of the tokens with Top_p
	// probability mass. So 0.1 means only the tokens comprising the top
	// 10% probability mass are considered. We generally recommend altering
	// this or temperature but not both.
	Top_p *float32 `json:"top_p,omitempty"`
	// A unique identifier representing your end-User, which can help OpenAI
	// to monitor and detect abuse. [Learn more.]
	// (https://platform.openai.com/docs/guides/safety-best-practices/end-User-ids)
	User *string `json:"user,omitempty"`
}

type Message

type Message struct {
	// The contents of the message.
	Content string `json:"content"`
	// The Role of the messages author, can be
	// "system", "user", "assistant", or "tool".
	Role string `json:"role"`
	// An optional Name for the participant.
	// Provides the model information to differentiate
	// between participants of the same role.
	Name *string `json:"name,omitempty"`
	// The tool calls generated by the model, such as function calls.
	Tool_calls *[]toolCall `json:"tool_calls,omitempty"`
}

type OpenAiErrResponse

type OpenAiErrResponse struct {
	Error OpenAiError `json:"error"`
}

type OpenAiError

type OpenAiError struct {
	Message string  `json:"message"`
	Type    string  `json:"type"`
	Param   *string `json:"param"`
	Code    *string `json:"code"`
}

func (*OpenAiError) Error

func (e *OpenAiError) Error() string

type ResponseFormat

type ResponseFormat struct {
	// Must be one of text or json_object.
	Typ *string `json:"type,omitempty"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL