cubicpb

package
v1.6.5 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 16, 2023 License: Apache-2.0 Imports: 12 Imported by: 4

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	RecognitionConfig_Encoding_name = map[int32]string{
		0: "RAW_LINEAR16",
		1: "WAV",
		2: "MP3",
		3: "FLAC",
		4: "VOX8000",
		5: "ULAW8000",
		6: "ALAW8000",
		7: "OPUS",
	}
	RecognitionConfig_Encoding_value = map[string]int32{
		"RAW_LINEAR16": 0,
		"WAV":          1,
		"MP3":          2,
		"FLAC":         3,
		"VOX8000":      4,
		"ULAW8000":     5,
		"ALAW8000":     6,
		"OPUS":         7,
	}
)

Enum value maps for RecognitionConfig_Encoding.

View Source
var File_cubic_proto protoreflect.FileDescriptor

Functions

func RegisterCubicServer

func RegisterCubicServer(s *grpc.Server, srv CubicServer)

Types

type CompileContextRequest added in v1.5.0

type CompileContextRequest struct {

	// Unique identifier of the model to compile the context information for. The
	// model chosen needs to support context which can be verified by checking its
	// `ModelAttributes.ContextInfo` obtained via `ListModels`.
	ModelId string `protobuf:"bytes,1,opt,name=model_id,json=modelId,proto3" json:"model_id,omitempty"`
	// The token that is associated with the provided list of phrases or words
	// (e.g "menu_item", "airport" etc.). Must be one of the tokens included in
	// the model being used, which can be retrieved by calling the `ListModels`
	// method.
	Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
	// List of phrases and/or words to be compiled.
	Phrases []*ContextPhrase `protobuf:"bytes,3,rep,name=phrases,proto3" json:"phrases,omitempty"`
	// contains filtered or unexported fields
}

The top-level message sent by the client for the `CompileContext` request. It contains a list of phrases or words, paired with a context token included in the model being used. The token specifies a category such as "menu_item", "airport", "contact", "product_name" etc. The context token is used to determine the places in the recognition output where the provided list of phrases or words may appear. The allowed context tokens for a given model can be found in its `ModelAttributes.ContextInfo` obtained via the `ListModels` method.

func (*CompileContextRequest) Descriptor deprecated added in v1.5.0

func (*CompileContextRequest) Descriptor() ([]byte, []int)

Deprecated: Use CompileContextRequest.ProtoReflect.Descriptor instead.

func (*CompileContextRequest) GetModelId added in v1.5.0

func (x *CompileContextRequest) GetModelId() string

func (*CompileContextRequest) GetPhrases added in v1.5.0

func (x *CompileContextRequest) GetPhrases() []*ContextPhrase

func (*CompileContextRequest) GetToken added in v1.5.0

func (x *CompileContextRequest) GetToken() string

func (*CompileContextRequest) ProtoMessage added in v1.5.0

func (*CompileContextRequest) ProtoMessage()

func (*CompileContextRequest) ProtoReflect added in v1.5.0

func (x *CompileContextRequest) ProtoReflect() protoreflect.Message

func (*CompileContextRequest) Reset added in v1.5.0

func (x *CompileContextRequest) Reset()

func (*CompileContextRequest) String added in v1.5.0

func (x *CompileContextRequest) String() string

type CompileContextResponse added in v1.5.0

type CompileContextResponse struct {

	// Context information in a compact form that is efficient for use in
	// subsequent recognition requests. The size of the compiled form will depend
	// on the amount of text that was sent for compilation. For 1000 words it's
	// generally less than 100 kilobytes.
	Context *CompiledContext `protobuf:"bytes,1,opt,name=context,proto3" json:"context,omitempty"`
	// contains filtered or unexported fields
}

The message returned to the client by the `CompileContext` method.

func (*CompileContextResponse) Descriptor deprecated added in v1.5.0

func (*CompileContextResponse) Descriptor() ([]byte, []int)

Deprecated: Use CompileContextResponse.ProtoReflect.Descriptor instead.

func (*CompileContextResponse) GetContext added in v1.5.0

func (x *CompileContextResponse) GetContext() *CompiledContext

func (*CompileContextResponse) ProtoMessage added in v1.5.0

func (*CompileContextResponse) ProtoMessage()

func (*CompileContextResponse) ProtoReflect added in v1.5.0

func (x *CompileContextResponse) ProtoReflect() protoreflect.Message

func (*CompileContextResponse) Reset added in v1.5.0

func (x *CompileContextResponse) Reset()

func (*CompileContextResponse) String added in v1.5.0

func (x *CompileContextResponse) String() string

type CompiledContext added in v1.5.0

type CompiledContext struct {

	// The context information compiled by the `CompileContext` method.
	Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
	// contains filtered or unexported fields
}

Context information in a compact form that is efficient for use in subsequent recognition requests. The size of the compiled form will depend on the amount of text that was sent for compilation. For 1000 words it's generally less than 100 kilobytes.

func (*CompiledContext) Descriptor deprecated added in v1.5.0

func (*CompiledContext) Descriptor() ([]byte, []int)

Deprecated: Use CompiledContext.ProtoReflect.Descriptor instead.

func (*CompiledContext) GetData added in v1.5.0

func (x *CompiledContext) GetData() []byte

func (*CompiledContext) ProtoMessage added in v1.5.0

func (*CompiledContext) ProtoMessage()

func (*CompiledContext) ProtoReflect added in v1.5.0

func (x *CompiledContext) ProtoReflect() protoreflect.Message

func (*CompiledContext) Reset added in v1.5.0

func (x *CompiledContext) Reset()

func (*CompiledContext) String added in v1.5.0

func (x *CompiledContext) String() string

type ConfusionNetworkArc

type ConfusionNetworkArc struct {

	// Word in the recognized transcript
	Word string `protobuf:"bytes,1,opt,name=word,proto3" json:"word,omitempty"`
	// Confidence estimate between 0 and 1.  A higher number represents a higher
	// likelihood that the word was correctly recognized.
	Confidence float64 `protobuf:"fixed64,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// contains filtered or unexported fields
}

An Arc inside a Confusion Network Link

func (*ConfusionNetworkArc) Descriptor deprecated

func (*ConfusionNetworkArc) Descriptor() ([]byte, []int)

Deprecated: Use ConfusionNetworkArc.ProtoReflect.Descriptor instead.

func (*ConfusionNetworkArc) GetConfidence

func (x *ConfusionNetworkArc) GetConfidence() float64

func (*ConfusionNetworkArc) GetWord

func (x *ConfusionNetworkArc) GetWord() string

func (*ConfusionNetworkArc) ProtoMessage

func (*ConfusionNetworkArc) ProtoMessage()

func (*ConfusionNetworkArc) ProtoReflect added in v1.5.0

func (x *ConfusionNetworkArc) ProtoReflect() protoreflect.Message

func (*ConfusionNetworkArc) Reset

func (x *ConfusionNetworkArc) Reset()

func (*ConfusionNetworkArc) String

func (x *ConfusionNetworkArc) String() string
type ConfusionNetworkLink struct {

	// Time offset relative to the beginning of audio received by the recognizer
	// and corresponding to the start of this link
	StartTime *duration.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
	// Duration of the current link in the confusion network
	Duration *duration.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"`
	// Arcs between this link
	Arcs []*ConfusionNetworkArc `protobuf:"bytes,3,rep,name=arcs,proto3" json:"arcs,omitempty"`
	// contains filtered or unexported fields
}

A Link inside a confusion network

func (*ConfusionNetworkLink) Descriptor deprecated

func (*ConfusionNetworkLink) Descriptor() ([]byte, []int)

Deprecated: Use ConfusionNetworkLink.ProtoReflect.Descriptor instead.

func (*ConfusionNetworkLink) GetArcs

func (x *ConfusionNetworkLink) GetArcs() []*ConfusionNetworkArc

func (*ConfusionNetworkLink) GetDuration

func (x *ConfusionNetworkLink) GetDuration() *duration.Duration

func (*ConfusionNetworkLink) GetStartTime

func (x *ConfusionNetworkLink) GetStartTime() *duration.Duration

func (*ConfusionNetworkLink) ProtoMessage

func (*ConfusionNetworkLink) ProtoMessage()

func (*ConfusionNetworkLink) ProtoReflect added in v1.5.0

func (x *ConfusionNetworkLink) ProtoReflect() protoreflect.Message

func (*ConfusionNetworkLink) Reset

func (x *ConfusionNetworkLink) Reset()

func (*ConfusionNetworkLink) String

func (x *ConfusionNetworkLink) String() string

type ContextInfo added in v1.5.0

type ContextInfo struct {

	// If this is set to true, the model supports taking context information into
	// account to aid speech recognition. The information may be sent with with
	// recognition requests via RecognitionContext inside RecognitionConfig.
	SupportsContext bool `protobuf:"varint,1,opt,name=supports_context,json=supportsContext,proto3" json:"supports_context,omitempty"`
	// A list of tokens (e.g "name", "airport" etc.) that serve has placeholders
	// in the model where a client provided list of phrases or words may be used
	// to aid speech recognition and produce the exact desired recognition output.
	AllowedContextTokens []string `protobuf:"bytes,2,rep,name=allowed_context_tokens,json=allowedContextTokens,proto3" json:"allowed_context_tokens,omitempty"`
	// contains filtered or unexported fields
}

Model information specifc to supporting recognition context.

func (*ContextInfo) Descriptor deprecated added in v1.5.0

func (*ContextInfo) Descriptor() ([]byte, []int)

Deprecated: Use ContextInfo.ProtoReflect.Descriptor instead.

func (*ContextInfo) GetAllowedContextTokens added in v1.5.0

func (x *ContextInfo) GetAllowedContextTokens() []string

func (*ContextInfo) GetSupportsContext added in v1.5.0

func (x *ContextInfo) GetSupportsContext() bool

func (*ContextInfo) ProtoMessage added in v1.5.0

func (*ContextInfo) ProtoMessage()

func (*ContextInfo) ProtoReflect added in v1.5.0

func (x *ContextInfo) ProtoReflect() protoreflect.Message

func (*ContextInfo) Reset added in v1.5.0

func (x *ContextInfo) Reset()

func (*ContextInfo) String added in v1.5.0

func (x *ContextInfo) String() string

type ContextPhrase added in v1.5.0

type ContextPhrase struct {

	// The actual phrase or word.
	Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
	// This is an optional field. The boost value is a positive number which is
	// used to increase the probability of the phrase or word appearing in the
	// output. This setting can be used to differentiate between similar sounding
	// words, with the desired word given a bigger boost value.
	//
	// By default, all phrases or words are given an equal probability of 1/N
	// (where N = total number of phrases or words). If a boost value is provided,
	// the new probability is (boost + 1) * 1/N. We normalize the boosted
	// probabilities for all the phrases or words so that they sum to one. This
	// means that the boost value only has an effect if there are relative
	// differences in the values for different phrases or words. That is, if all
	// phrases or words have the same boost value, after normalization they will
	// all still have the same probability. This also means that the boost value
	// can be any positive value, but it is best to stick between 0 to 20.
	//
	// Negative values are not supported and will be treated as 0 values.
	Boost float32 `protobuf:"fixed32,2,opt,name=boost,proto3" json:"boost,omitempty"`
	// contains filtered or unexported fields
}

A phrase or word that is to be compiled into context information that can be later used to improve speech recognition during a `Recognize` or `StreamingRecognize` call. Along with the phrase or word itself, there is an optional boost parameter that can be used to boost the likelihood of the phrase or word in the recognition output.

func (*ContextPhrase) Descriptor deprecated added in v1.5.0

func (*ContextPhrase) Descriptor() ([]byte, []int)

Deprecated: Use ContextPhrase.ProtoReflect.Descriptor instead.

func (*ContextPhrase) GetBoost added in v1.5.0

func (x *ContextPhrase) GetBoost() float32

func (*ContextPhrase) GetText added in v1.5.0

func (x *ContextPhrase) GetText() string

func (*ContextPhrase) ProtoMessage added in v1.5.0

func (*ContextPhrase) ProtoMessage()

func (*ContextPhrase) ProtoReflect added in v1.5.0

func (x *ContextPhrase) ProtoReflect() protoreflect.Message

func (*ContextPhrase) Reset added in v1.5.0

func (x *ContextPhrase) Reset()

func (*ContextPhrase) String added in v1.5.0

func (x *ContextPhrase) String() string

type CubicClient

type CubicClient interface {
	// Queries the Version of the Server
	Version(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*VersionResponse, error)
	// Retrieves a list of available speech recognition models
	ListModels(ctx context.Context, in *ListModelsRequest, opts ...grpc.CallOption) (*ListModelsResponse, error)
	// Performs synchronous speech recognition: receive results after all audio
	// has been sent and processed.  It is expected that this request be typically
	// used for short audio content: less than a minute long.  For longer content,
	// the `StreamingRecognize` method should be preferred.
	Recognize(ctx context.Context, in *RecognizeRequest, opts ...grpc.CallOption) (*RecognitionResponse, error)
	// Performs bidirectional streaming speech recognition.  Receive results while
	// sending audio.  This method is only available via GRPC and not via
	// HTTP+JSON. However, a web browser may use websockets to use this service.
	StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (Cubic_StreamingRecognizeClient, error)
	// Compiles recognition context information, such as a specialized list of
	// words or phrases, into a compact, efficient form to send with subsequent
	// `Recognize` or `StreamingRecognize` requests to customize speech
	// recognition. For example, a list of contact names may be compiled in a
	// mobile app and sent with each recognition request so that the app user's
	// contact names are more likely to be recognized than arbitrary names. This
	// pre-compilation ensures that there is no added latency for the recognition
	// request. It is important to note that in order to compile context for a
	// model, that model has to support context in the first place, which can be
	// verified by checking its `ModelAttributes.ContextInfo` obtained via the
	// `ListModels` method. Also, the compiled data will be model specific; that
	// is, the data compiled for one model will generally not be usable with a
	// different model.
	CompileContext(ctx context.Context, in *CompileContextRequest, opts ...grpc.CallOption) (*CompileContextResponse, error)
}

CubicClient is the client API for Cubic service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewCubicClient

func NewCubicClient(cc grpc.ClientConnInterface) CubicClient

type CubicServer

type CubicServer interface {
	// Queries the Version of the Server
	Version(context.Context, *empty.Empty) (*VersionResponse, error)
	// Retrieves a list of available speech recognition models
	ListModels(context.Context, *ListModelsRequest) (*ListModelsResponse, error)
	// Performs synchronous speech recognition: receive results after all audio
	// has been sent and processed.  It is expected that this request be typically
	// used for short audio content: less than a minute long.  For longer content,
	// the `StreamingRecognize` method should be preferred.
	Recognize(context.Context, *RecognizeRequest) (*RecognitionResponse, error)
	// Performs bidirectional streaming speech recognition.  Receive results while
	// sending audio.  This method is only available via GRPC and not via
	// HTTP+JSON. However, a web browser may use websockets to use this service.
	StreamingRecognize(Cubic_StreamingRecognizeServer) error
	// Compiles recognition context information, such as a specialized list of
	// words or phrases, into a compact, efficient form to send with subsequent
	// `Recognize` or `StreamingRecognize` requests to customize speech
	// recognition. For example, a list of contact names may be compiled in a
	// mobile app and sent with each recognition request so that the app user's
	// contact names are more likely to be recognized than arbitrary names. This
	// pre-compilation ensures that there is no added latency for the recognition
	// request. It is important to note that in order to compile context for a
	// model, that model has to support context in the first place, which can be
	// verified by checking its `ModelAttributes.ContextInfo` obtained via the
	// `ListModels` method. Also, the compiled data will be model specific; that
	// is, the data compiled for one model will generally not be usable with a
	// different model.
	CompileContext(context.Context, *CompileContextRequest) (*CompileContextResponse, error)
}

CubicServer is the server API for Cubic service.

type Cubic_StreamingRecognizeClient

type Cubic_StreamingRecognizeClient interface {
	Send(*StreamingRecognizeRequest) error
	Recv() (*RecognitionResponse, error)
	grpc.ClientStream
}

type Cubic_StreamingRecognizeServer

type Cubic_StreamingRecognizeServer interface {
	Send(*RecognitionResponse) error
	Recv() (*StreamingRecognizeRequest, error)
	grpc.ServerStream
}

type ListModelsRequest

type ListModelsRequest struct {
	// contains filtered or unexported fields
}

The top-level message sent by the client for the `ListModels` method.

func (*ListModelsRequest) Descriptor deprecated

func (*ListModelsRequest) Descriptor() ([]byte, []int)

Deprecated: Use ListModelsRequest.ProtoReflect.Descriptor instead.

func (*ListModelsRequest) ProtoMessage

func (*ListModelsRequest) ProtoMessage()

func (*ListModelsRequest) ProtoReflect added in v1.5.0

func (x *ListModelsRequest) ProtoReflect() protoreflect.Message

func (*ListModelsRequest) Reset

func (x *ListModelsRequest) Reset()

func (*ListModelsRequest) String

func (x *ListModelsRequest) String() string

type ListModelsResponse

type ListModelsResponse struct {

	// List of models available for use that match the request.
	Models []*Model `protobuf:"bytes,1,rep,name=models,proto3" json:"models,omitempty"`
	// contains filtered or unexported fields
}

The message returned to the client by the `ListModels` method.

func (*ListModelsResponse) Descriptor deprecated

func (*ListModelsResponse) Descriptor() ([]byte, []int)

Deprecated: Use ListModelsResponse.ProtoReflect.Descriptor instead.

func (*ListModelsResponse) GetModels

func (x *ListModelsResponse) GetModels() []*Model

func (*ListModelsResponse) ProtoMessage

func (*ListModelsResponse) ProtoMessage()

func (*ListModelsResponse) ProtoReflect added in v1.5.0

func (x *ListModelsResponse) ProtoReflect() protoreflect.Message

func (*ListModelsResponse) Reset

func (x *ListModelsResponse) Reset()

func (*ListModelsResponse) String

func (x *ListModelsResponse) String() string

type Model

type Model struct {

	// Unique identifier of the model.  This identifier is used to choose the
	// model that should be used for recognition, and is specified in the
	// `RecognitionConfig` message.
	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
	// Model name.  This is a concise name describing the model, and maybe
	// presented to the end-user, for example, to help choose which model to use
	// for their recognition task.
	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
	// Model attributes
	Attributes *ModelAttributes `protobuf:"bytes,3,opt,name=attributes,proto3" json:"attributes,omitempty"`
	// contains filtered or unexported fields
}

Description of a Cubic Model

func (*Model) Descriptor deprecated

func (*Model) Descriptor() ([]byte, []int)

Deprecated: Use Model.ProtoReflect.Descriptor instead.

func (*Model) GetAttributes

func (x *Model) GetAttributes() *ModelAttributes

func (*Model) GetId

func (x *Model) GetId() string

func (*Model) GetName

func (x *Model) GetName() string

func (*Model) ProtoMessage

func (*Model) ProtoMessage()

func (*Model) ProtoReflect added in v1.5.0

func (x *Model) ProtoReflect() protoreflect.Message

func (*Model) Reset

func (x *Model) Reset()

func (*Model) String

func (x *Model) String() string

type ModelAttributes

type ModelAttributes struct {

	// Audio sample rate supported by the model
	SampleRate uint32 `protobuf:"varint,1,opt,name=sample_rate,json=sampleRate,proto3" json:"sample_rate,omitempty"`
	// Attributes specifc to supporting recognition context.
	ContextInfo *ContextInfo `protobuf:"bytes,2,opt,name=context_info,json=contextInfo,proto3" json:"context_info,omitempty"`
	// contains filtered or unexported fields
}

Attributes of a Cubic Model

func (*ModelAttributes) Descriptor deprecated

func (*ModelAttributes) Descriptor() ([]byte, []int)

Deprecated: Use ModelAttributes.ProtoReflect.Descriptor instead.

func (*ModelAttributes) GetContextInfo added in v1.5.0

func (x *ModelAttributes) GetContextInfo() *ContextInfo

func (*ModelAttributes) GetSampleRate

func (x *ModelAttributes) GetSampleRate() uint32

func (*ModelAttributes) ProtoMessage

func (*ModelAttributes) ProtoMessage()

func (*ModelAttributes) ProtoReflect added in v1.5.0

func (x *ModelAttributes) ProtoReflect() protoreflect.Message

func (*ModelAttributes) Reset

func (x *ModelAttributes) Reset()

func (*ModelAttributes) String

func (x *ModelAttributes) String() string

type RecognitionAlternative

type RecognitionAlternative struct {

	// Text representing the transcription of the words that the user spoke.
	//
	// The transcript will be formatted according to the servers formatting
	// configuration. If you want the raw transcript, please see the field
	// `raw_transcript`.  If the server is configured to not use any formatting,
	// then this field will contain the raw transcript.
	//
	// As an example, if the spoken utterance was "four people", and the
	// server was configured to format numbers, this field would be set to
	// "4 people".
	Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
	// Text representing the transcription of the words that the user spoke,
	// without any formatting.  This field will be populated only the config
	// `RecognitionConfig.enable_raw_transcript` is set to true. Otherwise this
	// field will be an empty string. If you want the formatted transcript, please
	// see the field `transcript`.
	//
	// As an example, if the spoken utterance was `here are four words`,
	// this field would be set to "HERE ARE FOUR WORDS".
	RawTranscript string `protobuf:"bytes,6,opt,name=raw_transcript,json=rawTranscript,proto3" json:"raw_transcript,omitempty"`
	// Confidence estimate between 0 and 1. A higher number represents a higher
	// likelihood of the output being correct.
	Confidence float64 `protobuf:"fixed64,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// A list of word-specific information for each recognized word in the
	// `transcript` field. This is available only if `enable_word_confidence` or
	// `enable_word_time_offsets` was set to `true` in the `RecognitionConfig`.
	Words []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
	// A list of word-specific information for each recognized word in the
	// `raw_transcript` field. This is available only if `enable_word_confidence`
	// or `enable_word_time_offsets` was set to `true` _and_
	// `enable_raw_transcript` is also set to `true` in the `RecognitionConfig`.
	RawWords []*WordInfo `protobuf:"bytes,7,rep,name=raw_words,json=rawWords,proto3" json:"raw_words,omitempty"`
	// Time offset relative to the beginning of audio received by the recognizer
	// and corresponding to the start of this utterance.
	StartTime *duration.Duration `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
	// Duration of the current utterance in the spoken audio.
	Duration *duration.Duration `protobuf:"bytes,5,opt,name=duration,proto3" json:"duration,omitempty"`
	// contains filtered or unexported fields
}

A recognition hypothesis

func (*RecognitionAlternative) Descriptor deprecated

func (*RecognitionAlternative) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionAlternative.ProtoReflect.Descriptor instead.

func (*RecognitionAlternative) GetConfidence

func (x *RecognitionAlternative) GetConfidence() float64

func (*RecognitionAlternative) GetDuration added in v1.1.0

func (x *RecognitionAlternative) GetDuration() *duration.Duration

func (*RecognitionAlternative) GetRawTranscript added in v1.3.0

func (x *RecognitionAlternative) GetRawTranscript() string

func (*RecognitionAlternative) GetRawWords added in v1.6.0

func (x *RecognitionAlternative) GetRawWords() []*WordInfo

func (*RecognitionAlternative) GetStartTime added in v1.1.0

func (x *RecognitionAlternative) GetStartTime() *duration.Duration

func (*RecognitionAlternative) GetTranscript

func (x *RecognitionAlternative) GetTranscript() string

func (*RecognitionAlternative) GetWords

func (x *RecognitionAlternative) GetWords() []*WordInfo

func (*RecognitionAlternative) ProtoMessage

func (*RecognitionAlternative) ProtoMessage()

func (*RecognitionAlternative) ProtoReflect added in v1.5.0

func (x *RecognitionAlternative) ProtoReflect() protoreflect.Message

func (*RecognitionAlternative) Reset

func (x *RecognitionAlternative) Reset()

func (*RecognitionAlternative) String

func (x *RecognitionAlternative) String() string

type RecognitionAudio

type RecognitionAudio struct {
	Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
	// contains filtered or unexported fields
}

Audio to be sent to the recognizer

func (*RecognitionAudio) Descriptor deprecated

func (*RecognitionAudio) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionAudio.ProtoReflect.Descriptor instead.

func (*RecognitionAudio) GetData

func (x *RecognitionAudio) GetData() []byte

func (*RecognitionAudio) ProtoMessage

func (*RecognitionAudio) ProtoMessage()

func (*RecognitionAudio) ProtoReflect added in v1.5.0

func (x *RecognitionAudio) ProtoReflect() protoreflect.Message

func (*RecognitionAudio) Reset

func (x *RecognitionAudio) Reset()

func (*RecognitionAudio) String

func (x *RecognitionAudio) String() string

type RecognitionConfig

type RecognitionConfig struct {

	// Unique identifier of the model to use, as obtained from a `Model` message.
	ModelId string `protobuf:"bytes,1,opt,name=model_id,json=modelId,proto3" json:"model_id,omitempty"`
	// Encoding of audio data sent/streamed through the `RecognitionAudio`
	// messages.  For encodings like WAV/MP3 that have headers, the headers are
	// expected to be sent at the beginning of the stream, not in every
	// `RecognitionAudio` message.
	//
	// If not specified, the default encoding is RAW_LINEAR16.
	//
	// Depending on how they are configured, server instances of this service may
	// not support all the encodings enumerated above. They are always required to
	// accept RAW_LINEAR16.  If any other `Encoding` is specified, and it is not
	// available on the server being used, the recognition request will result in
	// an appropriate error message.
	AudioEncoding RecognitionConfig_Encoding `` /* 152-byte string literal not displayed */
	// Idle Timeout of the created Recognizer.  If no audio data is received by
	// the recognizer for this duration, ongoing rpc calls will result in an
	// error, the recognizer will be destroyed and thus more audio may not be sent
	// to the same recognizer.  The server may impose a limit on the maximum idle
	// timeout that can be specified, and if the value in this message exceeds
	// that serverside value, creating of the recognizer will fail with an error.
	IdleTimeout *duration.Duration `protobuf:"bytes,3,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"`
	// This is an optional field.  If this is set to true, each result will
	// include a list of words and the start time offset (timestamp) and the
	// duration for each of those words.  If set to `false`, no word-level
	// timestamps will be returned.  The default is `false`.
	EnableWordTimeOffsets bool `` /* 129-byte string literal not displayed */
	// This is an optional field.  If this is set to true, each result will
	// include a list of words and the confidence for those words.  If `false`, no
	// word-level confidence information is returned.  The default is `false`.
	EnableWordConfidence bool `protobuf:"varint,5,opt,name=enable_word_confidence,json=enableWordConfidence,proto3" json:"enable_word_confidence,omitempty"`
	// This is an optional field.  If this is set to true, the field
	// `RecognitionAlternative.raw_transcript` will be populated with the raw
	// transcripts output from the recognizer will be exposed without any
	// formatting rules applied.  If this is set to false, that field will not
	// be set in the results.  The RecognitionAlternative.transcript will
	// always be populated with text formatted according to the server's settings.
	EnableRawTranscript bool `protobuf:"varint,6,opt,name=enable_raw_transcript,json=enableRawTranscript,proto3" json:"enable_raw_transcript,omitempty"`
	// This is an optional field.  If this is set to true, the results will
	// include a confusion network.  If set to `false`, no confusion network will
	// be returned.  The default is `false`.  If the model being used does not
	// support a confusion network, results may be returned without a confusion
	// network available.  If this field is set to `true`, then
	// `enable_raw_transcript` is also forced to be true.
	EnableConfusionNetwork bool `` /* 130-byte string literal not displayed */
	// This is an optional field.  If the audio has multiple channels, this field
	// should be configured with the list of channel indices that should be
	// transcribed.  Channels are 0-indexed.
	//
	// Example: `[0]` for a mono file, `[0, 1]` for a stereo file.
	//
	// If this field is not set, a mono file will be assumed by default and only
	// channel-0 will be transcribed even if the file actually has additional
	// channels.
	//
	// Channels that are present in the audio may be omitted, but it is an error
	// to include a channel index in this field that is not present in the audio.
	// Channels may be listed in any order but the same index may not be repeated
	// in this list.
	//
	// BAD: `[0, 2]` for a stereo file; BAD: `[0, 0]` for a mono file.
	AudioChannels []uint32 `protobuf:"varint,8,rep,packed,name=audio_channels,json=audioChannels,proto3" json:"audio_channels,omitempty"`
	// This is an optional field.  If there is any metadata associated with the
	// audio being sent, use this field to provide it to cubic.  The server may
	// record this metadata when processing the request.  The server does not use
	// this field for any other purpose.
	Metadata *RecognitionMetadata `protobuf:"bytes,9,opt,name=metadata,proto3" json:"metadata,omitempty"`
	// This is an optional field for providing any additional context information
	// that may aid speech recognition.  This can also be used to add
	// out-of-vocabulary words to the model or boost recognition of specific
	// proper names or commands. Context information must be pre-compiled via the
	// `CompileContext()` method.
	Context *RecognitionContext `protobuf:"bytes,10,opt,name=context,proto3" json:"context,omitempty"`
	// contains filtered or unexported fields
}

Configuration for setting up a Recognizer

func (*RecognitionConfig) Descriptor deprecated

func (*RecognitionConfig) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionConfig.ProtoReflect.Descriptor instead.

func (*RecognitionConfig) GetAudioChannels

func (x *RecognitionConfig) GetAudioChannels() []uint32

func (*RecognitionConfig) GetAudioEncoding

func (x *RecognitionConfig) GetAudioEncoding() RecognitionConfig_Encoding

func (*RecognitionConfig) GetContext added in v1.5.0

func (x *RecognitionConfig) GetContext() *RecognitionContext

func (*RecognitionConfig) GetEnableConfusionNetwork

func (x *RecognitionConfig) GetEnableConfusionNetwork() bool

func (*RecognitionConfig) GetEnableRawTranscript

func (x *RecognitionConfig) GetEnableRawTranscript() bool

func (*RecognitionConfig) GetEnableWordConfidence

func (x *RecognitionConfig) GetEnableWordConfidence() bool

func (*RecognitionConfig) GetEnableWordTimeOffsets

func (x *RecognitionConfig) GetEnableWordTimeOffsets() bool

func (*RecognitionConfig) GetIdleTimeout

func (x *RecognitionConfig) GetIdleTimeout() *duration.Duration

func (*RecognitionConfig) GetMetadata added in v1.4.0

func (x *RecognitionConfig) GetMetadata() *RecognitionMetadata

func (*RecognitionConfig) GetModelId

func (x *RecognitionConfig) GetModelId() string

func (*RecognitionConfig) ProtoMessage

func (*RecognitionConfig) ProtoMessage()

func (*RecognitionConfig) ProtoReflect added in v1.5.0

func (x *RecognitionConfig) ProtoReflect() protoreflect.Message

func (*RecognitionConfig) Reset

func (x *RecognitionConfig) Reset()

func (*RecognitionConfig) String

func (x *RecognitionConfig) String() string

type RecognitionConfig_Encoding

type RecognitionConfig_Encoding int32

The encoding of the audio data to be sent for recognition.

For best results, the audio source should be captured and transmitted using the RAW_LINEAR16 encoding.

const (
	// Raw (headerless) Uncompressed 16-bit signed little endian samples (linear
	// PCM), single channel, sampled at the rate expected by the chosen `Model`.
	RecognitionConfig_RAW_LINEAR16 RecognitionConfig_Encoding = 0
	// WAV (data with RIFF headers), with data sampled at a rate equal to or
	// higher than the sample rate expected by the chosen Model.
	RecognitionConfig_WAV RecognitionConfig_Encoding = 1
	// MP3 data, sampled at a rate equal to or higher than the sampling rate
	// expected by the chosen Model.
	RecognitionConfig_MP3 RecognitionConfig_Encoding = 2
	// FLAC data, sampled at a rate equal to or higher than the sample rate
	// expected by the chosen Model.
	RecognitionConfig_FLAC RecognitionConfig_Encoding = 3
	// VOX data (Dialogic ADPCM), sampled at 8 KHz.
	RecognitionConfig_VOX8000 RecognitionConfig_Encoding = 4
	// μ-law (8-bit) encoded RAW data, single channel, sampled at 8 KHz.
	RecognitionConfig_ULAW8000 RecognitionConfig_Encoding = 5
	// A-law (8-bit) encoded RAW data, single channel, sampled at 8 KHz.
	RecognitionConfig_ALAW8000 RecognitionConfig_Encoding = 6
	// Opus (16-bit) encoded RAW data, sampled at a rate equal to or higher than the sample rate expected by the chosen Model.
	RecognitionConfig_OPUS RecognitionConfig_Encoding = 7
)

func (RecognitionConfig_Encoding) Descriptor added in v1.5.0

func (RecognitionConfig_Encoding) Enum added in v1.5.0

func (RecognitionConfig_Encoding) EnumDescriptor deprecated

func (RecognitionConfig_Encoding) EnumDescriptor() ([]byte, []int)

Deprecated: Use RecognitionConfig_Encoding.Descriptor instead.

func (RecognitionConfig_Encoding) Number added in v1.5.0

func (RecognitionConfig_Encoding) String

func (RecognitionConfig_Encoding) Type added in v1.5.0

type RecognitionConfusionNetwork

type RecognitionConfusionNetwork struct {
	Links []*ConfusionNetworkLink `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"`
	// contains filtered or unexported fields
}

Confusion network in recognition output

func (*RecognitionConfusionNetwork) Descriptor deprecated

func (*RecognitionConfusionNetwork) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionConfusionNetwork.ProtoReflect.Descriptor instead.

func (*RecognitionConfusionNetwork) ProtoMessage

func (*RecognitionConfusionNetwork) ProtoMessage()

func (*RecognitionConfusionNetwork) ProtoReflect added in v1.5.0

func (*RecognitionConfusionNetwork) Reset

func (x *RecognitionConfusionNetwork) Reset()

func (*RecognitionConfusionNetwork) String

func (x *RecognitionConfusionNetwork) String() string

type RecognitionContext added in v1.5.0

type RecognitionContext struct {

	// List of compiled context information, with each entry being compiled from a
	// list of words or phrases using the `CompileContext` method.
	Compiled []*CompiledContext `protobuf:"bytes,1,rep,name=compiled,proto3" json:"compiled,omitempty"`
	// contains filtered or unexported fields
}

A collection of additional context information that may aid speech recognition. This can be used to add out-of-vocabulary words to the model or to boost recognition of specific proper names or commands.

func (*RecognitionContext) Descriptor deprecated added in v1.5.0

func (*RecognitionContext) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionContext.ProtoReflect.Descriptor instead.

func (*RecognitionContext) GetCompiled added in v1.5.0

func (x *RecognitionContext) GetCompiled() []*CompiledContext

func (*RecognitionContext) ProtoMessage added in v1.5.0

func (*RecognitionContext) ProtoMessage()

func (*RecognitionContext) ProtoReflect added in v1.5.0

func (x *RecognitionContext) ProtoReflect() protoreflect.Message

func (*RecognitionContext) Reset added in v1.5.0

func (x *RecognitionContext) Reset()

func (*RecognitionContext) String added in v1.5.0

func (x *RecognitionContext) String() string

type RecognitionMetadata added in v1.4.0

type RecognitionMetadata struct {

	// Any custom metadata that the client wants to associate with the recording.
	// This could be a simple string (e.g. a tracing ID) or structured data
	// (e.g. JSON)
	CustomMetadata string `protobuf:"bytes,1,opt,name=custom_metadata,json=customMetadata,proto3" json:"custom_metadata,omitempty"`
	// contains filtered or unexported fields
}

Metadata associated with the audio to be recognized.

func (*RecognitionMetadata) Descriptor deprecated added in v1.4.0

func (*RecognitionMetadata) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionMetadata.ProtoReflect.Descriptor instead.

func (*RecognitionMetadata) GetCustomMetadata added in v1.4.0

func (x *RecognitionMetadata) GetCustomMetadata() string

func (*RecognitionMetadata) ProtoMessage added in v1.4.0

func (*RecognitionMetadata) ProtoMessage()

func (*RecognitionMetadata) ProtoReflect added in v1.5.0

func (x *RecognitionMetadata) ProtoReflect() protoreflect.Message

func (*RecognitionMetadata) Reset added in v1.4.0

func (x *RecognitionMetadata) Reset()

func (*RecognitionMetadata) String added in v1.4.0

func (x *RecognitionMetadata) String() string

type RecognitionResponse

type RecognitionResponse struct {
	Results []*RecognitionResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"`
	// contains filtered or unexported fields
}

Collection of sequence of recognition results in a portion of audio. When transcribing a single audio channel (e.g. RAW_LINEAR16 input, or a mono file), results will be ordered chronologically. When transcribing multiple channels, the results of all channels will be interleaved. Results of each individual channel will be chronological. No such promise is made for the ordering of results of different channels, as results are returned for each channel individually as soon as they are ready.

func (*RecognitionResponse) Descriptor deprecated

func (*RecognitionResponse) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionResponse.ProtoReflect.Descriptor instead.

func (*RecognitionResponse) GetResults

func (x *RecognitionResponse) GetResults() []*RecognitionResult

func (*RecognitionResponse) ProtoMessage

func (*RecognitionResponse) ProtoMessage()

func (*RecognitionResponse) ProtoReflect added in v1.5.0

func (x *RecognitionResponse) ProtoReflect() protoreflect.Message

func (*RecognitionResponse) Reset

func (x *RecognitionResponse) Reset()

func (*RecognitionResponse) String

func (x *RecognitionResponse) String() string

type RecognitionResult

type RecognitionResult struct {

	// An n-best list of recognition hypotheses alternatives
	Alternatives []*RecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
	// If this is set to true, it denotes that the result is an interim partial
	// result, and could change after more audio is processed.  If unset, or set
	// to false, it denotes that this is a final result and will not change.
	//
	// Servers are not required to implement support for returning partial
	// results, and clients should generally not depend on their availability.
	IsPartial bool `protobuf:"varint,2,opt,name=is_partial,json=isPartial,proto3" json:"is_partial,omitempty"`
	// If `enable_confusion_network` was set to true in the `RecognitionConfig`,
	// and if the model supports it, a confusion network will be available in the
	// results.
	Cnet *RecognitionConfusionNetwork `protobuf:"bytes,3,opt,name=cnet,proto3" json:"cnet,omitempty"`
	// Channel of the audio file that this result was transcribed from.  For a
	// mono file, or RAW_LINEAR16 input, this will be set to 0.
	AudioChannel uint32 `protobuf:"varint,4,opt,name=audio_channel,json=audioChannel,proto3" json:"audio_channel,omitempty"`
	// contains filtered or unexported fields
}

A recognition result corresponding to a portion of audio.

func (*RecognitionResult) Descriptor deprecated

func (*RecognitionResult) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionResult.ProtoReflect.Descriptor instead.

func (*RecognitionResult) GetAlternatives

func (x *RecognitionResult) GetAlternatives() []*RecognitionAlternative

func (*RecognitionResult) GetAudioChannel

func (x *RecognitionResult) GetAudioChannel() uint32

func (*RecognitionResult) GetCnet

func (*RecognitionResult) GetIsPartial

func (x *RecognitionResult) GetIsPartial() bool

func (*RecognitionResult) ProtoMessage

func (*RecognitionResult) ProtoMessage()

func (*RecognitionResult) ProtoReflect added in v1.5.0

func (x *RecognitionResult) ProtoReflect() protoreflect.Message

func (*RecognitionResult) Reset

func (x *RecognitionResult) Reset()

func (*RecognitionResult) String

func (x *RecognitionResult) String() string

type RecognizeRequest

type RecognizeRequest struct {

	// Provides configuration to create the recognizer.
	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
	// The audio data to be recognized
	Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3" json:"audio,omitempty"`
	// contains filtered or unexported fields
}

The top-level message sent by the client for the `Recognize` method. Both the `RecognitionConfig` and `RecognitionAudio` fields are required. The entire audio data must be sent in one request. If your audio data is larger, please use the `StreamingRecognize` call..

func (*RecognizeRequest) Descriptor deprecated

func (*RecognizeRequest) Descriptor() ([]byte, []int)

Deprecated: Use RecognizeRequest.ProtoReflect.Descriptor instead.

func (*RecognizeRequest) GetAudio

func (x *RecognizeRequest) GetAudio() *RecognitionAudio

func (*RecognizeRequest) GetConfig

func (x *RecognizeRequest) GetConfig() *RecognitionConfig

func (*RecognizeRequest) ProtoMessage

func (*RecognizeRequest) ProtoMessage()

func (*RecognizeRequest) ProtoReflect added in v1.5.0

func (x *RecognizeRequest) ProtoReflect() protoreflect.Message

func (*RecognizeRequest) Reset

func (x *RecognizeRequest) Reset()

func (*RecognizeRequest) String

func (x *RecognizeRequest) String() string

type StreamingRecognizeRequest

type StreamingRecognizeRequest struct {

	// Types that are assignable to Request:
	//	*StreamingRecognizeRequest_Config
	//	*StreamingRecognizeRequest_Audio
	Request isStreamingRecognizeRequest_Request `protobuf_oneof:"request"`
	// contains filtered or unexported fields
}

The top-level message sent by the client for the `StreamingRecognize` request. Multiple `StreamingRecognizeRequest` messages are sent. The first message must contain a `RecognitionConfig` message only, and all subsequent messages must contain `RecognitionAudio` only. All `RecognitionAudio` messages must contain non-empty audio. If audio content is empty, the server may interpret it as end of stream and stop accepting any further messages.

func (*StreamingRecognizeRequest) Descriptor deprecated

func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int)

Deprecated: Use StreamingRecognizeRequest.ProtoReflect.Descriptor instead.

func (*StreamingRecognizeRequest) GetAudio

func (*StreamingRecognizeRequest) GetConfig

func (*StreamingRecognizeRequest) GetRequest

func (m *StreamingRecognizeRequest) GetRequest() isStreamingRecognizeRequest_Request

func (*StreamingRecognizeRequest) ProtoMessage

func (*StreamingRecognizeRequest) ProtoMessage()

func (*StreamingRecognizeRequest) ProtoReflect added in v1.5.0

func (*StreamingRecognizeRequest) Reset

func (x *StreamingRecognizeRequest) Reset()

func (*StreamingRecognizeRequest) String

func (x *StreamingRecognizeRequest) String() string

type StreamingRecognizeRequest_Audio

type StreamingRecognizeRequest_Audio struct {
	Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3,oneof"`
}

type StreamingRecognizeRequest_Config

type StreamingRecognizeRequest_Config struct {
	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3,oneof"`
}

type UnimplementedCubicServer added in v1.5.0

type UnimplementedCubicServer struct {
}

UnimplementedCubicServer can be embedded to have forward compatible implementations.

func (*UnimplementedCubicServer) CompileContext added in v1.5.0

func (*UnimplementedCubicServer) ListModels added in v1.5.0

func (*UnimplementedCubicServer) Recognize added in v1.5.0

func (*UnimplementedCubicServer) StreamingRecognize added in v1.5.0

func (*UnimplementedCubicServer) Version added in v1.5.0

type VersionResponse

type VersionResponse struct {

	// version of the cubic library handling the recognition
	Cubic string `protobuf:"bytes,1,opt,name=cubic,proto3" json:"cubic,omitempty"`
	// version of the server handling these requests
	Server string `protobuf:"bytes,2,opt,name=server,proto3" json:"server,omitempty"`
	// contains filtered or unexported fields
}

The message sent by the server for the `Version` method.

func (*VersionResponse) Descriptor deprecated

func (*VersionResponse) Descriptor() ([]byte, []int)

Deprecated: Use VersionResponse.ProtoReflect.Descriptor instead.

func (*VersionResponse) GetCubic

func (x *VersionResponse) GetCubic() string

func (*VersionResponse) GetServer

func (x *VersionResponse) GetServer() string

func (*VersionResponse) ProtoMessage

func (*VersionResponse) ProtoMessage()

func (*VersionResponse) ProtoReflect added in v1.5.0

func (x *VersionResponse) ProtoReflect() protoreflect.Message

func (*VersionResponse) Reset

func (x *VersionResponse) Reset()

func (*VersionResponse) String

func (x *VersionResponse) String() string

type WordInfo

type WordInfo struct {

	// The actual word in the text
	Word string `protobuf:"bytes,1,opt,name=word,proto3" json:"word,omitempty"`
	// Confidence estimate between 0 and 1.  A higher number represents a
	// higher likelihood that the word was correctly recognized.
	Confidence float64 `protobuf:"fixed64,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// Time offset relative to the beginning of audio received by the recognizer
	// and corresponding to the start of this spoken word.
	StartTime *duration.Duration `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
	// Duration of the current word in the spoken audio.
	Duration *duration.Duration `protobuf:"bytes,4,opt,name=duration,proto3" json:"duration,omitempty"`
	// contains filtered or unexported fields
}

Word-specific information for recognized words

func (*WordInfo) Descriptor deprecated

func (*WordInfo) Descriptor() ([]byte, []int)

Deprecated: Use WordInfo.ProtoReflect.Descriptor instead.

func (*WordInfo) GetConfidence

func (x *WordInfo) GetConfidence() float64

func (*WordInfo) GetDuration

func (x *WordInfo) GetDuration() *duration.Duration

func (*WordInfo) GetStartTime

func (x *WordInfo) GetStartTime() *duration.Duration

func (*WordInfo) GetWord

func (x *WordInfo) GetWord() string

func (*WordInfo) ProtoMessage

func (*WordInfo) ProtoMessage()

func (*WordInfo) ProtoReflect added in v1.5.0

func (x *WordInfo) ProtoReflect() protoreflect.Message

func (*WordInfo) Reset

func (x *WordInfo) Reset()

func (*WordInfo) String

func (x *WordInfo) String() string

Directories

Path Synopsis
gw module

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL