Documentation ¶
Index ¶
- Variables
- type ConfigOption
- type ConfigOptions
- type LLM
- func (o *LLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error)
- func (o *LLM) Generate(ctx context.Context, prompts []string, options ...llms.CallOption) ([]*llms.Generation, error)
- func (o *LLM) GeneratePrompt(ctx context.Context, prompts []schema.PromptValue, options ...llms.CallOption) (llms.LLMResult, error)
- func (o *LLM) GetNumTokens(text string) int
- type Request
- type Response
Constants ¶
This section is empty.
Variables ¶
View Source
var ErrEmptyResponse = errors.New("empty response")
View Source
var (
ErrMissingRegion = errors.New("empty region")
)
Functions ¶
This section is empty.
Types ¶
type ConfigOption ¶
type ConfigOption func(*ConfigOptions)
func DontUseHumanAssistantPrompt ¶
func DontUseHumanAssistantPrompt() ConfigOption
func WithBedrockRuntimeClient ¶
func WithBedrockRuntimeClient(client *bedrockruntime.Client) ConfigOption
func WithModel ¶
func WithModel(modelID string) ConfigOption
type ConfigOptions ¶
type ConfigOptions struct { DontUseHumanAssistantPrompt bool BedrockRuntimeClient *bedrockruntime.Client ModelID string }
type LLM ¶
func (*LLM) Generate ¶
func (o *LLM) Generate(ctx context.Context, prompts []string, options ...llms.CallOption) ([]*llms.Generation, error)
func (*LLM) GeneratePrompt ¶
func (o *LLM) GeneratePrompt(ctx context.Context, prompts []schema.PromptValue, options ...llms.CallOption) (llms.LLMResult, error)
func (*LLM) GetNumTokens ¶
type Response ¶
type Response struct {
Completion string `json:"completion"`
}
func ProcessStreamingOutput ¶
func ProcessStreamingOutput(output *bedrockruntime.InvokeModelWithResponseStreamOutput, handler func(ctx context.Context, chunk []byte) error) (Response, error)
Click to show internal directories.
Click to hide internal directories.