transcribev5

package
v0.0.0-...-1cf2bc0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jan 5, 2024 License: Apache-2.0 Imports: 8 Imported by: 2

Documentation

Index

Constants

View Source
const (
	TranscribeService_Version_FullMethodName            = "/cobaltspeech.transcribe.v5.TranscribeService/Version"
	TranscribeService_ListModels_FullMethodName         = "/cobaltspeech.transcribe.v5.TranscribeService/ListModels"
	TranscribeService_StreamingRecognize_FullMethodName = "/cobaltspeech.transcribe.v5.TranscribeService/StreamingRecognize"
	TranscribeService_CompileContext_FullMethodName     = "/cobaltspeech.transcribe.v5.TranscribeService/CompileContext"
)

Variables

View Source
var (
	ByteOrder_name = map[int32]string{
		0: "BYTE_ORDER_UNSPECIFIED",
		1: "BYTE_ORDER_LITTLE_ENDIAN",
		2: "BYTE_ORDER_BIG_ENDIAN",
	}
	ByteOrder_value = map[string]int32{
		"BYTE_ORDER_UNSPECIFIED":   0,
		"BYTE_ORDER_LITTLE_ENDIAN": 1,
		"BYTE_ORDER_BIG_ENDIAN":    2,
	}
)

Enum value maps for ByteOrder.

View Source
var (
	AudioEncoding_name = map[int32]string{
		0: "AUDIO_ENCODING_UNSPECIFIED",
		1: "AUDIO_ENCODING_SIGNED",
		2: "AUDIO_ENCODING_UNSIGNED",
		3: "AUDIO_ENCODING_IEEE_FLOAT",
		4: "AUDIO_ENCODING_ULAW",
		5: "AUDIO_ENCODING_ALAW",
	}
	AudioEncoding_value = map[string]int32{
		"AUDIO_ENCODING_UNSPECIFIED": 0,
		"AUDIO_ENCODING_SIGNED":      1,
		"AUDIO_ENCODING_UNSIGNED":    2,
		"AUDIO_ENCODING_IEEE_FLOAT":  3,
		"AUDIO_ENCODING_ULAW":        4,
		"AUDIO_ENCODING_ALAW":        5,
	}
)

Enum value maps for AudioEncoding.

View Source
var (
	AudioFormatHeadered_name = map[int32]string{
		0: "AUDIO_FORMAT_HEADERED_UNSPECIFIED",
		1: "AUDIO_FORMAT_HEADERED_WAV",
		2: "AUDIO_FORMAT_HEADERED_MP3",
		3: "AUDIO_FORMAT_HEADERED_FLAC",
		4: "AUDIO_FORMAT_HEADERED_OGG_OPUS",
	}
	AudioFormatHeadered_value = map[string]int32{
		"AUDIO_FORMAT_HEADERED_UNSPECIFIED": 0,
		"AUDIO_FORMAT_HEADERED_WAV":         1,
		"AUDIO_FORMAT_HEADERED_MP3":         2,
		"AUDIO_FORMAT_HEADERED_FLAC":        3,
		"AUDIO_FORMAT_HEADERED_OGG_OPUS":    4,
	}
)

Enum value maps for AudioFormatHeadered.

View Source
var File_cobaltspeech_transcribe_v5_transcribe_proto protoreflect.FileDescriptor
View Source
var TranscribeService_ServiceDesc = grpc.ServiceDesc{
	ServiceName: "cobaltspeech.transcribe.v5.TranscribeService",
	HandlerType: (*TranscribeServiceServer)(nil),
	Methods: []grpc.MethodDesc{
		{
			MethodName: "Version",
			Handler:    _TranscribeService_Version_Handler,
		},
		{
			MethodName: "ListModels",
			Handler:    _TranscribeService_ListModels_Handler,
		},
		{
			MethodName: "CompileContext",
			Handler:    _TranscribeService_CompileContext_Handler,
		},
	},
	Streams: []grpc.StreamDesc{
		{
			StreamName:    "StreamingRecognize",
			Handler:       _TranscribeService_StreamingRecognize_Handler,
			ServerStreams: true,
			ClientStreams: true,
		},
	},
	Metadata: "cobaltspeech/transcribe/v5/transcribe.proto",
}

TranscribeService_ServiceDesc is the grpc.ServiceDesc for TranscribeService service. It's only intended for direct use with grpc.RegisterService, and not to be introspected or modified (even as a copy)

Functions

func RegisterTranscribeServiceServer

func RegisterTranscribeServiceServer(s grpc.ServiceRegistrar, srv TranscribeServiceServer)

Types

type AudioEncoding

type AudioEncoding int32

The encoding of the audio data to be sent for recognition.

const (
	// AUDIO_ENCODING_UNSPECIFIED is the default value of this type and will
	// result in an error.
	AudioEncoding_AUDIO_ENCODING_UNSPECIFIED AudioEncoding = 0
	// PCM signed-integer
	AudioEncoding_AUDIO_ENCODING_SIGNED AudioEncoding = 1
	// PCM unsigned-integer
	AudioEncoding_AUDIO_ENCODING_UNSIGNED AudioEncoding = 2
	// PCM IEEE-Float
	AudioEncoding_AUDIO_ENCODING_IEEE_FLOAT AudioEncoding = 3
	// G.711 mu-law
	AudioEncoding_AUDIO_ENCODING_ULAW AudioEncoding = 4
	// G.711 a-law
	AudioEncoding_AUDIO_ENCODING_ALAW AudioEncoding = 5
)

func (AudioEncoding) Descriptor

func (AudioEncoding) Enum

func (x AudioEncoding) Enum() *AudioEncoding

func (AudioEncoding) EnumDescriptor deprecated

func (AudioEncoding) EnumDescriptor() ([]byte, []int)

Deprecated: Use AudioEncoding.Descriptor instead.

func (AudioEncoding) Number

func (AudioEncoding) String

func (x AudioEncoding) String() string

func (AudioEncoding) Type

type AudioFormatHeadered

type AudioFormatHeadered int32
const (
	// AUDIO_FORMAT_HEADERED_UNSPECIFIED is the default value of this type.
	AudioFormatHeadered_AUDIO_FORMAT_HEADERED_UNSPECIFIED AudioFormatHeadered = 0
	// WAV with RIFF headers
	AudioFormatHeadered_AUDIO_FORMAT_HEADERED_WAV AudioFormatHeadered = 1
	// MP3 format with a valid frame header at the beginning of data
	AudioFormatHeadered_AUDIO_FORMAT_HEADERED_MP3 AudioFormatHeadered = 2
	// FLAC format
	AudioFormatHeadered_AUDIO_FORMAT_HEADERED_FLAC AudioFormatHeadered = 3
	// Opus format with OGG header
	AudioFormatHeadered_AUDIO_FORMAT_HEADERED_OGG_OPUS AudioFormatHeadered = 4
)

func (AudioFormatHeadered) Descriptor

func (AudioFormatHeadered) Enum

func (AudioFormatHeadered) EnumDescriptor deprecated

func (AudioFormatHeadered) EnumDescriptor() ([]byte, []int)

Deprecated: Use AudioFormatHeadered.Descriptor instead.

func (AudioFormatHeadered) Number

func (AudioFormatHeadered) String

func (x AudioFormatHeadered) String() string

func (AudioFormatHeadered) Type

type AudioFormatRAW

type AudioFormatRAW struct {

	// Encoding of the samples. It must be specified explicitly and using the
	// default value of `AUDIO_ENCODING_UNSPECIFIED` will result in an error.
	Encoding AudioEncoding `protobuf:"varint,1,opt,name=encoding,proto3,enum=cobaltspeech.transcribe.v5.AudioEncoding" json:"encoding,omitempty"`
	// Bit depth of each sample (e.g. 8, 16, 24, 32, etc.). This is a required
	// field.
	BitDepth uint32 `protobuf:"varint,2,opt,name=bit_depth,json=bitDepth,proto3" json:"bit_depth,omitempty"`
	// Byte order of the samples. This field must be set to a value other than
	// `BYTE_ORDER_UNSPECIFIED` when the `bit_depth` is greater than 8.
	ByteOrder ByteOrder `` /* 131-byte string literal not displayed */
	// Sampling rate in Hz. This is a required field.
	SampleRate uint32 `protobuf:"varint,4,opt,name=sample_rate,json=sampleRate,proto3" json:"sample_rate,omitempty"`
	// Number of channels present in the audio. E.g.: 1 (mono), 2 (stereo), etc.
	// This is a required field.
	Channels uint32 `protobuf:"varint,5,opt,name=channels,proto3" json:"channels,omitempty"`
	// contains filtered or unexported fields
}

Details of audio in raw format

func (*AudioFormatRAW) Descriptor deprecated

func (*AudioFormatRAW) Descriptor() ([]byte, []int)

Deprecated: Use AudioFormatRAW.ProtoReflect.Descriptor instead.

func (*AudioFormatRAW) GetBitDepth

func (x *AudioFormatRAW) GetBitDepth() uint32

func (*AudioFormatRAW) GetByteOrder

func (x *AudioFormatRAW) GetByteOrder() ByteOrder

func (*AudioFormatRAW) GetChannels

func (x *AudioFormatRAW) GetChannels() uint32

func (*AudioFormatRAW) GetEncoding

func (x *AudioFormatRAW) GetEncoding() AudioEncoding

func (*AudioFormatRAW) GetSampleRate

func (x *AudioFormatRAW) GetSampleRate() uint32

func (*AudioFormatRAW) ProtoMessage

func (*AudioFormatRAW) ProtoMessage()

func (*AudioFormatRAW) ProtoReflect

func (x *AudioFormatRAW) ProtoReflect() protoreflect.Message

func (*AudioFormatRAW) Reset

func (x *AudioFormatRAW) Reset()

func (*AudioFormatRAW) String

func (x *AudioFormatRAW) String() string

type ByteOrder

type ByteOrder int32

Byte order of multi-byte data

const (
	// BYTE_ORDER_UNSPECIFIED is the default value of this type.
	ByteOrder_BYTE_ORDER_UNSPECIFIED ByteOrder = 0
	// Little Endian byte order
	ByteOrder_BYTE_ORDER_LITTLE_ENDIAN ByteOrder = 1
	// Big Endian byte order
	ByteOrder_BYTE_ORDER_BIG_ENDIAN ByteOrder = 2
)

func (ByteOrder) Descriptor

func (ByteOrder) Descriptor() protoreflect.EnumDescriptor

func (ByteOrder) Enum

func (x ByteOrder) Enum() *ByteOrder

func (ByteOrder) EnumDescriptor deprecated

func (ByteOrder) EnumDescriptor() ([]byte, []int)

Deprecated: Use ByteOrder.Descriptor instead.

func (ByteOrder) Number

func (x ByteOrder) Number() protoreflect.EnumNumber

func (ByteOrder) String

func (x ByteOrder) String() string

func (ByteOrder) Type

type CompileContextRequest

type CompileContextRequest struct {

	// Unique identifier of the model to compile the context information for. The
	// model chosen needs to support context which can be verified by checking its
	// `ModelAttributes.ContextInfo` obtained via `ListModels`.
	ModelId string `protobuf:"bytes,1,opt,name=model_id,json=modelId,proto3" json:"model_id,omitempty"`
	// The token that is associated with the provided list of phrases or words
	// (e.g "menu_item", "airport" etc.). Must be one of the tokens included in
	// the model being used, which can be retrieved by calling the `ListModels`
	// method.
	Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
	// List of phrases and/or words to be compiled.
	Phrases []*ContextPhrase `protobuf:"bytes,3,rep,name=phrases,proto3" json:"phrases,omitempty"`
	// contains filtered or unexported fields
}

The top-level message sent by the client for the `CompileContext` request. It contains a list of phrases or words, paired with a context token included in the model being used. The token specifies a category such as "menu_item", "airport", "contact", "product_name" etc. The context token is used to determine the places in the recognition output where the provided list of phrases or words may appear. The allowed context tokens for a given model can be found in its `ModelAttributes.ContextInfo` obtained via the `ListModels` method.

func (*CompileContextRequest) Descriptor deprecated

func (*CompileContextRequest) Descriptor() ([]byte, []int)

Deprecated: Use CompileContextRequest.ProtoReflect.Descriptor instead.

func (*CompileContextRequest) GetModelId

func (x *CompileContextRequest) GetModelId() string

func (*CompileContextRequest) GetPhrases

func (x *CompileContextRequest) GetPhrases() []*ContextPhrase

func (*CompileContextRequest) GetToken

func (x *CompileContextRequest) GetToken() string

func (*CompileContextRequest) ProtoMessage

func (*CompileContextRequest) ProtoMessage()

func (*CompileContextRequest) ProtoReflect

func (x *CompileContextRequest) ProtoReflect() protoreflect.Message

func (*CompileContextRequest) Reset

func (x *CompileContextRequest) Reset()

func (*CompileContextRequest) String

func (x *CompileContextRequest) String() string

type CompileContextResponse

type CompileContextResponse struct {

	// Context information in a compact form that is efficient for use in
	// subsequent recognition requests. The size of the compiled form will depend
	// on the amount of text that was sent for compilation. For 1000 words it's
	// generally less than 100 kilobytes.
	Context *CompiledContext `protobuf:"bytes,1,opt,name=context,proto3" json:"context,omitempty"`
	// contains filtered or unexported fields
}

The message returned to the client by the `CompileContext` method.

func (*CompileContextResponse) Descriptor deprecated

func (*CompileContextResponse) Descriptor() ([]byte, []int)

Deprecated: Use CompileContextResponse.ProtoReflect.Descriptor instead.

func (*CompileContextResponse) GetContext

func (x *CompileContextResponse) GetContext() *CompiledContext

func (*CompileContextResponse) ProtoMessage

func (*CompileContextResponse) ProtoMessage()

func (*CompileContextResponse) ProtoReflect

func (x *CompileContextResponse) ProtoReflect() protoreflect.Message

func (*CompileContextResponse) Reset

func (x *CompileContextResponse) Reset()

func (*CompileContextResponse) String

func (x *CompileContextResponse) String() string

type CompiledContext

type CompiledContext struct {

	// The context information compiled by the `CompileContext` method.
	Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
	// contains filtered or unexported fields
}

Context information in a compact form that is efficient for use in subsequent recognition requests. The size of the compiled form will depend on the amount of text that was sent for compilation. For 1000 words it's generally less than 100 kilobytes.

func (*CompiledContext) Descriptor deprecated

func (*CompiledContext) Descriptor() ([]byte, []int)

Deprecated: Use CompiledContext.ProtoReflect.Descriptor instead.

func (*CompiledContext) GetData

func (x *CompiledContext) GetData() []byte

func (*CompiledContext) ProtoMessage

func (*CompiledContext) ProtoMessage()

func (*CompiledContext) ProtoReflect

func (x *CompiledContext) ProtoReflect() protoreflect.Message

func (*CompiledContext) Reset

func (x *CompiledContext) Reset()

func (*CompiledContext) String

func (x *CompiledContext) String() string

type ConfusionNetworkArc

type ConfusionNetworkArc struct {

	// Word in the recognized transcript
	Word string `protobuf:"bytes,1,opt,name=word,proto3" json:"word,omitempty"`
	// Confidence estimate between 0 and 1. A higher number represents a higher
	// likelihood that the word was correctly recognized.
	Confidence float64 `protobuf:"fixed64,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// Features related to this arc
	Features *ConfusionNetworkArcFeatures `protobuf:"bytes,3,opt,name=features,proto3" json:"features,omitempty"`
	// contains filtered or unexported fields
}

An Arc inside a Confusion Network Link

func (*ConfusionNetworkArc) Descriptor deprecated

func (*ConfusionNetworkArc) Descriptor() ([]byte, []int)

Deprecated: Use ConfusionNetworkArc.ProtoReflect.Descriptor instead.

func (*ConfusionNetworkArc) GetConfidence

func (x *ConfusionNetworkArc) GetConfidence() float64

func (*ConfusionNetworkArc) GetFeatures

func (*ConfusionNetworkArc) GetWord

func (x *ConfusionNetworkArc) GetWord() string

func (*ConfusionNetworkArc) ProtoMessage

func (*ConfusionNetworkArc) ProtoMessage()

func (*ConfusionNetworkArc) ProtoReflect

func (x *ConfusionNetworkArc) ProtoReflect() protoreflect.Message

func (*ConfusionNetworkArc) Reset

func (x *ConfusionNetworkArc) Reset()

func (*ConfusionNetworkArc) String

func (x *ConfusionNetworkArc) String() string

type ConfusionNetworkArcFeatures

type ConfusionNetworkArcFeatures struct {

	// A map of features that are used for recalculating confidence scores of this
	// confusion network arc
	Confidence map[string]float64 `` /* 163-byte string literal not displayed */
	// contains filtered or unexported fields
}

Features related to confusion network arcs

func (*ConfusionNetworkArcFeatures) Descriptor deprecated

func (*ConfusionNetworkArcFeatures) Descriptor() ([]byte, []int)

Deprecated: Use ConfusionNetworkArcFeatures.ProtoReflect.Descriptor instead.

func (*ConfusionNetworkArcFeatures) GetConfidence

func (x *ConfusionNetworkArcFeatures) GetConfidence() map[string]float64

func (*ConfusionNetworkArcFeatures) ProtoMessage

func (*ConfusionNetworkArcFeatures) ProtoMessage()

func (*ConfusionNetworkArcFeatures) ProtoReflect

func (*ConfusionNetworkArcFeatures) Reset

func (x *ConfusionNetworkArcFeatures) Reset()

func (*ConfusionNetworkArcFeatures) String

func (x *ConfusionNetworkArcFeatures) String() string
type ConfusionNetworkLink struct {

	// Time offset in milliseconds relative to the beginning of audio received by
	// the recognizer and corresponding to the start of this link
	StartTimeMs uint64 `protobuf:"varint,1,opt,name=start_time_ms,json=startTimeMs,proto3" json:"start_time_ms,omitempty"`
	// Duration in milliseconds of the current link in the confusion network
	DurationMs uint64 `protobuf:"varint,2,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"`
	// Arcs between this link
	Arcs []*ConfusionNetworkArc `protobuf:"bytes,3,rep,name=arcs,proto3" json:"arcs,omitempty"`
	// contains filtered or unexported fields
}

A Link inside a confusion network

func (*ConfusionNetworkLink) Descriptor deprecated

func (*ConfusionNetworkLink) Descriptor() ([]byte, []int)

Deprecated: Use ConfusionNetworkLink.ProtoReflect.Descriptor instead.

func (*ConfusionNetworkLink) GetArcs

func (x *ConfusionNetworkLink) GetArcs() []*ConfusionNetworkArc

func (*ConfusionNetworkLink) GetDurationMs

func (x *ConfusionNetworkLink) GetDurationMs() uint64

func (*ConfusionNetworkLink) GetStartTimeMs

func (x *ConfusionNetworkLink) GetStartTimeMs() uint64

func (*ConfusionNetworkLink) ProtoMessage

func (*ConfusionNetworkLink) ProtoMessage()

func (*ConfusionNetworkLink) ProtoReflect

func (x *ConfusionNetworkLink) ProtoReflect() protoreflect.Message

func (*ConfusionNetworkLink) Reset

func (x *ConfusionNetworkLink) Reset()

func (*ConfusionNetworkLink) String

func (x *ConfusionNetworkLink) String() string

type ContextInfo

type ContextInfo struct {

	// If this is set to true, the model supports taking context information into
	// account to aid speech recognition. The information may be sent with with
	// recognition requests via RecognitionContext inside RecognitionConfig.
	SupportsContext bool `protobuf:"varint,1,opt,name=supports_context,json=supportsContext,proto3" json:"supports_context,omitempty"`
	// A list of tokens (e.g "name", "airport" etc.) that serve has placeholders
	// in the model where a client provided list of phrases or words may be used
	// to aid speech recognition and produce the exact desired recognition output.
	AllowedContextTokens []string `protobuf:"bytes,2,rep,name=allowed_context_tokens,json=allowedContextTokens,proto3" json:"allowed_context_tokens,omitempty"`
	// contains filtered or unexported fields
}

Model information specifc to supporting recognition context.

func (*ContextInfo) Descriptor deprecated

func (*ContextInfo) Descriptor() ([]byte, []int)

Deprecated: Use ContextInfo.ProtoReflect.Descriptor instead.

func (*ContextInfo) GetAllowedContextTokens

func (x *ContextInfo) GetAllowedContextTokens() []string

func (*ContextInfo) GetSupportsContext

func (x *ContextInfo) GetSupportsContext() bool

func (*ContextInfo) ProtoMessage

func (*ContextInfo) ProtoMessage()

func (*ContextInfo) ProtoReflect

func (x *ContextInfo) ProtoReflect() protoreflect.Message

func (*ContextInfo) Reset

func (x *ContextInfo) Reset()

func (*ContextInfo) String

func (x *ContextInfo) String() string

type ContextPhrase

type ContextPhrase struct {

	// The actual phrase or word.
	Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
	// This is an optional field. The boost factor is a positive number which is
	// used to multiply the probability of the phrase or word appearing in the
	// output. This setting can be used to differentiate between similar sounding
	// words, with the desired word given a bigger boost factor.
	//
	// By default, all phrases or words provided in the `RecongitionContext` are
	// given an equal probability of occurring. Boost factors larger than 1 make
	// the phrase or word more probable and boost factors less than 1 make it less
	// likely. A boost factor of 2 corresponds to making the phrase or word twice
	// as likely, while a boost factor of 0.5 means half as likely.
	Boost float32 `protobuf:"fixed32,2,opt,name=boost,proto3" json:"boost,omitempty"`
	// contains filtered or unexported fields
}

A phrase or word that is to be compiled into context information that can be later used to improve speech recognition during a `StreamingRecognize` call. Along with the phrase or word itself, there is an optional boost parameter that can be used to boost the likelihood of the phrase or word in the recognition output.

func (*ContextPhrase) Descriptor deprecated

func (*ContextPhrase) Descriptor() ([]byte, []int)

Deprecated: Use ContextPhrase.ProtoReflect.Descriptor instead.

func (*ContextPhrase) GetBoost

func (x *ContextPhrase) GetBoost() float32

func (*ContextPhrase) GetText

func (x *ContextPhrase) GetText() string

func (*ContextPhrase) ProtoMessage

func (*ContextPhrase) ProtoMessage()

func (*ContextPhrase) ProtoReflect

func (x *ContextPhrase) ProtoReflect() protoreflect.Message

func (*ContextPhrase) Reset

func (x *ContextPhrase) Reset()

func (*ContextPhrase) String

func (x *ContextPhrase) String() string

type ListModelsRequest

type ListModelsRequest struct {
	// contains filtered or unexported fields
}

The top-level message sent by the client for the `ListModels` method.

func (*ListModelsRequest) Descriptor deprecated

func (*ListModelsRequest) Descriptor() ([]byte, []int)

Deprecated: Use ListModelsRequest.ProtoReflect.Descriptor instead.

func (*ListModelsRequest) ProtoMessage

func (*ListModelsRequest) ProtoMessage()

func (*ListModelsRequest) ProtoReflect

func (x *ListModelsRequest) ProtoReflect() protoreflect.Message

func (*ListModelsRequest) Reset

func (x *ListModelsRequest) Reset()

func (*ListModelsRequest) String

func (x *ListModelsRequest) String() string

type ListModelsResponse

type ListModelsResponse struct {

	// List of models available for use that match the request.
	Models []*Model `protobuf:"bytes,1,rep,name=models,proto3" json:"models,omitempty"`
	// contains filtered or unexported fields
}

The message returned to the client by the `ListModels` method.

func (*ListModelsResponse) Descriptor deprecated

func (*ListModelsResponse) Descriptor() ([]byte, []int)

Deprecated: Use ListModelsResponse.ProtoReflect.Descriptor instead.

func (*ListModelsResponse) GetModels

func (x *ListModelsResponse) GetModels() []*Model

func (*ListModelsResponse) ProtoMessage

func (*ListModelsResponse) ProtoMessage()

func (*ListModelsResponse) ProtoReflect

func (x *ListModelsResponse) ProtoReflect() protoreflect.Message

func (*ListModelsResponse) Reset

func (x *ListModelsResponse) Reset()

func (*ListModelsResponse) String

func (x *ListModelsResponse) String() string

type Model

type Model struct {

	// Unique identifier of the model. This identifier is used to choose the model
	// that should be used for recognition, and is specified in the
	// `RecognitionConfig` message.
	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
	// Model name. This is a concise name describing the model, and may be
	// presented to the end-user, for example, to help choose which model to use
	// for their recognition task.
	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
	// Model attributes
	Attributes *ModelAttributes `protobuf:"bytes,3,opt,name=attributes,proto3" json:"attributes,omitempty"`
	// contains filtered or unexported fields
}

Description of a Transcribe Model

func (*Model) Descriptor deprecated

func (*Model) Descriptor() ([]byte, []int)

Deprecated: Use Model.ProtoReflect.Descriptor instead.

func (*Model) GetAttributes

func (x *Model) GetAttributes() *ModelAttributes

func (*Model) GetId

func (x *Model) GetId() string

func (*Model) GetName

func (x *Model) GetName() string

func (*Model) ProtoMessage

func (*Model) ProtoMessage()

func (*Model) ProtoReflect

func (x *Model) ProtoReflect() protoreflect.Message

func (*Model) Reset

func (x *Model) Reset()

func (*Model) String

func (x *Model) String() string

type ModelAttributes

type ModelAttributes struct {

	// Audio sample rate(native) supported by the model
	SampleRate uint32 `protobuf:"varint,1,opt,name=sample_rate,json=sampleRate,proto3" json:"sample_rate,omitempty"`
	// Attributes specifc to supporting recognition context.
	ContextInfo *ContextInfo `protobuf:"bytes,2,opt,name=context_info,json=contextInfo,proto3" json:"context_info,omitempty"`
	// Audio sample rates other than the model's native sample rate(sample_rate) that can be accommodated
	SupportedSampleRates []uint32 `` /* 131-byte string literal not displayed */
	// contains filtered or unexported fields
}

Attributes of a Transcribe Model

func (*ModelAttributes) Descriptor deprecated

func (*ModelAttributes) Descriptor() ([]byte, []int)

Deprecated: Use ModelAttributes.ProtoReflect.Descriptor instead.

func (*ModelAttributes) GetContextInfo

func (x *ModelAttributes) GetContextInfo() *ContextInfo

func (*ModelAttributes) GetSampleRate

func (x *ModelAttributes) GetSampleRate() uint32

func (*ModelAttributes) GetSupportedSampleRates

func (x *ModelAttributes) GetSupportedSampleRates() []uint32

func (*ModelAttributes) ProtoMessage

func (*ModelAttributes) ProtoMessage()

func (*ModelAttributes) ProtoReflect

func (x *ModelAttributes) ProtoReflect() protoreflect.Message

func (*ModelAttributes) Reset

func (x *ModelAttributes) Reset()

func (*ModelAttributes) String

func (x *ModelAttributes) String() string

type RecognitionAlternative

type RecognitionAlternative struct {

	// Text representing the transcription of the words that the user spoke.
	//
	// The transcript will be formatted according to the servers formatting
	// configuration. If you want the raw transcript, please see the field
	// `transcript_raw`. If the server is configured to not use any formatting,
	// then this field will contain the raw transcript.
	//
	// As an example, if the spoken utterance was "four people", and the server
	// was configured to format numbers, this field would be set to "4 people".
	TranscriptFormatted string `protobuf:"bytes,1,opt,name=transcript_formatted,json=transcriptFormatted,proto3" json:"transcript_formatted,omitempty"`
	// Text representing the transcription of the words that the user spoke,
	// without any formatting applied. If you want the formatted transcript,
	// please see the field `transcript_formatted`.
	//
	// As an example, if the spoken utterance was `four people`, this field would
	// be set to "FOUR PEOPLE".
	TranscriptRaw string `protobuf:"bytes,2,opt,name=transcript_raw,json=transcriptRaw,proto3" json:"transcript_raw,omitempty"`
	// Time offset in milliseconds relative to the beginning of audio received by
	// the recognizer and corresponding to the start of this utterance.
	StartTimeMs uint64 `protobuf:"varint,3,opt,name=start_time_ms,json=startTimeMs,proto3" json:"start_time_ms,omitempty"`
	// Duration in milliseconds of the current utterance in the spoken audio.
	DurationMs uint64 `protobuf:"varint,4,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"`
	// Confidence estimate between 0 and 1. A higher number represents a higher
	// likelihood of the output being correct.
	Confidence float64 `protobuf:"fixed64,5,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// Word-level details corresponding to the transcripts. This is available only
	// if `enable_word_details` was set to `true` in the `RecognitionConfig`.
	WordDetails *WordDetails `protobuf:"bytes,6,opt,name=word_details,json=wordDetails,proto3" json:"word_details,omitempty"`
	// contains filtered or unexported fields
}

A recognition hypothesis

func (*RecognitionAlternative) Descriptor deprecated

func (*RecognitionAlternative) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionAlternative.ProtoReflect.Descriptor instead.

func (*RecognitionAlternative) GetConfidence

func (x *RecognitionAlternative) GetConfidence() float64

func (*RecognitionAlternative) GetDurationMs

func (x *RecognitionAlternative) GetDurationMs() uint64

func (*RecognitionAlternative) GetStartTimeMs

func (x *RecognitionAlternative) GetStartTimeMs() uint64

func (*RecognitionAlternative) GetTranscriptFormatted

func (x *RecognitionAlternative) GetTranscriptFormatted() string

func (*RecognitionAlternative) GetTranscriptRaw

func (x *RecognitionAlternative) GetTranscriptRaw() string

func (*RecognitionAlternative) GetWordDetails

func (x *RecognitionAlternative) GetWordDetails() *WordDetails

func (*RecognitionAlternative) ProtoMessage

func (*RecognitionAlternative) ProtoMessage()

func (*RecognitionAlternative) ProtoReflect

func (x *RecognitionAlternative) ProtoReflect() protoreflect.Message

func (*RecognitionAlternative) Reset

func (x *RecognitionAlternative) Reset()

func (*RecognitionAlternative) String

func (x *RecognitionAlternative) String() string

type RecognitionAudio

type RecognitionAudio struct {
	Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
	// contains filtered or unexported fields
}

Audio to be sent to the recognizer

func (*RecognitionAudio) Descriptor deprecated

func (*RecognitionAudio) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionAudio.ProtoReflect.Descriptor instead.

func (*RecognitionAudio) GetData

func (x *RecognitionAudio) GetData() []byte

func (*RecognitionAudio) ProtoMessage

func (*RecognitionAudio) ProtoMessage()

func (*RecognitionAudio) ProtoReflect

func (x *RecognitionAudio) ProtoReflect() protoreflect.Message

func (*RecognitionAudio) Reset

func (x *RecognitionAudio) Reset()

func (*RecognitionAudio) String

func (x *RecognitionAudio) String() string

type RecognitionConfig

type RecognitionConfig struct {

	// Unique identifier of the model to use, as obtained from a `Model` message.
	ModelId string `protobuf:"bytes,1,opt,name=model_id,json=modelId,proto3" json:"model_id,omitempty"`
	// Format of the audio to be sent for recognition.
	//
	// Depending on how they are configured, server instances of this service may
	// not support all the formats provided in the API. One format that is
	// guaranteed to be supported is the RAW format with little-endian 16-bit
	// signed samples with the sample rate matching that of the model being
	// requested.
	//
	// Types that are assignable to AudioFormat:
	//
	//	*RecognitionConfig_AudioFormatRaw
	//	*RecognitionConfig_AudioFormatHeadered
	AudioFormat isRecognitionConfig_AudioFormat `protobuf_oneof:"audio_format"`
	// This is an optional field. If the audio has multiple channels, this field
	// can be configured with the list of channel indices that should be
	// considered for the recognition task. These channels are 0-indexed.
	//
	// Example: `[0]` for a mono file, `[0, 1]` for a stereo file.
	// Example: `[1]` to only transcribe the second channel of a stereo file.
	//
	// If this field is not set, all the channels in the audio will be processed.
	//
	// Channels that are present in the audio may be omitted, but it is an error
	// to include a channel index in this field that is not present in the audio.
	// Channels may be listed in any order but the same index may not be repeated
	// in this list.
	//
	// BAD: `[0, 2]` for a stereo file; BAD: `[0, 0]` for a mono file.
	SelectedAudioChannels []uint32 `` /* 134-byte string literal not displayed */
	// This is an optional field. It can be used to indicate that the audio being
	// streamed to the recognizer is offset from the original stream by the
	// provided duration in milliseconds. This offset will be added to all
	// timestamps in results returned by the recognizer.
	//
	// The default value of this field is 0ms, so the timestamps in the
	// recognition result will not be modified.
	//
	// Example use case where this field can be helpful: if a recognition session
	// was interrupted and audio needs to be sent to a new session from the point
	// where the session was previously interrupted, the offset could be set to
	// the point where the interruption had happened.
	AudioTimeOffsetMs uint64 `protobuf:"varint,5,opt,name=audio_time_offset_ms,json=audioTimeOffsetMs,proto3" json:"audio_time_offset_ms,omitempty"`
	// This is an optional field. If this is set to `true`, each result will
	// include word level details of the transcript. These details are specified
	// in the `WordDetails` message. If set to `false`, no word-level details will
	// be returned. The default is `false`.
	EnableWordDetails bool `protobuf:"varint,6,opt,name=enable_word_details,json=enableWordDetails,proto3" json:"enable_word_details,omitempty"`
	// This is an optional field. If this is set to true, each result will include
	// a confusion network. If set to `false`, no confusion network will be
	// returned. The default is `false`. If the model being used does not support
	// returning a confusion network, this field will have no effect. Tokens in
	// the confusion network always correspond to tokens in the `transcript_raw`
	// returned.
	EnableConfusionNetwork bool `` /* 130-byte string literal not displayed */
	// This is an optional field. If there is any metadata associated with the
	// audio being sent, use this field to provide it to the recognizer. The
	// server may record this metadata when processing the request. The server
	// does not use this field for any other purpose.
	Metadata *RecognitionMetadata `protobuf:"bytes,8,opt,name=metadata,proto3" json:"metadata,omitempty"`
	// This is an optional field for providing any additional context information
	// that may aid speech recognition. This can also be used to add
	// out-of-vocabulary words to the model or boost recognition of specific
	// proper names or commands. Context information must be pre-compiled via the
	// `CompileContext()` method.
	Context *RecognitionContext `protobuf:"bytes,9,opt,name=context,proto3" json:"context,omitempty"`
	// contains filtered or unexported fields
}

Configuration for setting up a Recognizer

func (*RecognitionConfig) Descriptor deprecated

func (*RecognitionConfig) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionConfig.ProtoReflect.Descriptor instead.

func (*RecognitionConfig) GetAudioFormat

func (m *RecognitionConfig) GetAudioFormat() isRecognitionConfig_AudioFormat

func (*RecognitionConfig) GetAudioFormatHeadered

func (x *RecognitionConfig) GetAudioFormatHeadered() AudioFormatHeadered

func (*RecognitionConfig) GetAudioFormatRaw

func (x *RecognitionConfig) GetAudioFormatRaw() *AudioFormatRAW

func (*RecognitionConfig) GetAudioTimeOffsetMs

func (x *RecognitionConfig) GetAudioTimeOffsetMs() uint64

func (*RecognitionConfig) GetContext

func (x *RecognitionConfig) GetContext() *RecognitionContext

func (*RecognitionConfig) GetEnableConfusionNetwork

func (x *RecognitionConfig) GetEnableConfusionNetwork() bool

func (*RecognitionConfig) GetEnableWordDetails

func (x *RecognitionConfig) GetEnableWordDetails() bool

func (*RecognitionConfig) GetMetadata

func (x *RecognitionConfig) GetMetadata() *RecognitionMetadata

func (*RecognitionConfig) GetModelId

func (x *RecognitionConfig) GetModelId() string

func (*RecognitionConfig) GetSelectedAudioChannels

func (x *RecognitionConfig) GetSelectedAudioChannels() []uint32

func (*RecognitionConfig) ProtoMessage

func (*RecognitionConfig) ProtoMessage()

func (*RecognitionConfig) ProtoReflect

func (x *RecognitionConfig) ProtoReflect() protoreflect.Message

func (*RecognitionConfig) Reset

func (x *RecognitionConfig) Reset()

func (*RecognitionConfig) String

func (x *RecognitionConfig) String() string

type RecognitionConfig_AudioFormatHeadered

type RecognitionConfig_AudioFormatHeadered struct {
	// Audio has a self-describing header. Headers are expected to be sent at
	// the beginning of the entire audio file/stream, and not in every
	// `RecognitionAudio` message.
	//
	// The default value of this type is AUDIO_FORMAT_HEADERED_UNSPECIFIED. If
	// this value is used, the server may attempt to detect the format of the
	// audio. However, it is recommended that the exact format be specified.
	AudioFormatHeadered AudioFormatHeadered `` /* 140-byte string literal not displayed */
}

type RecognitionConfig_AudioFormatRaw

type RecognitionConfig_AudioFormatRaw struct {
	// Audio is raw data without any headers
	AudioFormatRaw *AudioFormatRAW `protobuf:"bytes,2,opt,name=audio_format_raw,json=audioFormatRaw,proto3,oneof"`
}

type RecognitionConfusionNetwork

type RecognitionConfusionNetwork struct {
	Links []*ConfusionNetworkLink `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"`
	// contains filtered or unexported fields
}

Confusion network in recognition output

func (*RecognitionConfusionNetwork) Descriptor deprecated

func (*RecognitionConfusionNetwork) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionConfusionNetwork.ProtoReflect.Descriptor instead.

func (*RecognitionConfusionNetwork) ProtoMessage

func (*RecognitionConfusionNetwork) ProtoMessage()

func (*RecognitionConfusionNetwork) ProtoReflect

func (*RecognitionConfusionNetwork) Reset

func (x *RecognitionConfusionNetwork) Reset()

func (*RecognitionConfusionNetwork) String

func (x *RecognitionConfusionNetwork) String() string

type RecognitionContext

type RecognitionContext struct {

	// List of compiled context information, with each entry being compiled from a
	// list of words or phrases using the `CompileContext` method.
	Compiled []*CompiledContext `protobuf:"bytes,1,rep,name=compiled,proto3" json:"compiled,omitempty"`
	// contains filtered or unexported fields
}

A collection of additional context information that may aid speech recognition. This can be used to add out-of-vocabulary words to the model or to boost recognition of specific proper names or commands.

func (*RecognitionContext) Descriptor deprecated

func (*RecognitionContext) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionContext.ProtoReflect.Descriptor instead.

func (*RecognitionContext) GetCompiled

func (x *RecognitionContext) GetCompiled() []*CompiledContext

func (*RecognitionContext) ProtoMessage

func (*RecognitionContext) ProtoMessage()

func (*RecognitionContext) ProtoReflect

func (x *RecognitionContext) ProtoReflect() protoreflect.Message

func (*RecognitionContext) Reset

func (x *RecognitionContext) Reset()

func (*RecognitionContext) String

func (x *RecognitionContext) String() string

type RecognitionError

type RecognitionError struct {
	Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"`
	// contains filtered or unexported fields
}

Developer-facing error message about a non-fatal recognition issue.

func (*RecognitionError) Descriptor deprecated

func (*RecognitionError) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionError.ProtoReflect.Descriptor instead.

func (*RecognitionError) GetMessage

func (x *RecognitionError) GetMessage() string

func (*RecognitionError) ProtoMessage

func (*RecognitionError) ProtoMessage()

func (*RecognitionError) ProtoReflect

func (x *RecognitionError) ProtoReflect() protoreflect.Message

func (*RecognitionError) Reset

func (x *RecognitionError) Reset()

func (*RecognitionError) String

func (x *RecognitionError) String() string

type RecognitionMetadata

type RecognitionMetadata struct {

	// Any custom metadata that the client wants to associate with the recording.
	// This could be a simple string (e.g. a tracing ID) or structured data
	// (e.g. JSON).
	CustomMetadata string `protobuf:"bytes,1,opt,name=custom_metadata,json=customMetadata,proto3" json:"custom_metadata,omitempty"`
	// contains filtered or unexported fields
}

Metadata associated with the audio to be recognized.

func (*RecognitionMetadata) Descriptor deprecated

func (*RecognitionMetadata) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionMetadata.ProtoReflect.Descriptor instead.

func (*RecognitionMetadata) GetCustomMetadata

func (x *RecognitionMetadata) GetCustomMetadata() string

func (*RecognitionMetadata) ProtoMessage

func (*RecognitionMetadata) ProtoMessage()

func (*RecognitionMetadata) ProtoReflect

func (x *RecognitionMetadata) ProtoReflect() protoreflect.Message

func (*RecognitionMetadata) Reset

func (x *RecognitionMetadata) Reset()

func (*RecognitionMetadata) String

func (x *RecognitionMetadata) String() string

type RecognitionResult

type RecognitionResult struct {

	// An n-best list of recognition hypotheses alternatives
	Alternatives []*RecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
	// If this is set to true, it denotes that the result is an interim partial
	// result, and could change after more audio is processed. If unset, or set to
	// false, it denotes that this is a final result and will not change.
	//
	// Servers are not required to implement support for returning partial
	// results, and clients should generally not depend on their availability.
	IsPartial bool `protobuf:"varint,2,opt,name=is_partial,json=isPartial,proto3" json:"is_partial,omitempty"`
	// If `enable_confusion_network` was set to true in the `RecognitionConfig`,
	// and if the model supports it, a confusion network will be available in the
	// results.
	Cnet *RecognitionConfusionNetwork `protobuf:"bytes,3,opt,name=cnet,proto3" json:"cnet,omitempty"`
	// Channel of the audio file that this result was transcribed from. Channels
	// are 0-indexed, so the for mono audio data, this value will always be 0.
	AudioChannel uint32 `protobuf:"varint,4,opt,name=audio_channel,json=audioChannel,proto3" json:"audio_channel,omitempty"`
	// contains filtered or unexported fields
}

A recognition result corresponding to a portion of audio.

func (*RecognitionResult) Descriptor deprecated

func (*RecognitionResult) Descriptor() ([]byte, []int)

Deprecated: Use RecognitionResult.ProtoReflect.Descriptor instead.

func (*RecognitionResult) GetAlternatives

func (x *RecognitionResult) GetAlternatives() []*RecognitionAlternative

func (*RecognitionResult) GetAudioChannel

func (x *RecognitionResult) GetAudioChannel() uint32

func (*RecognitionResult) GetCnet

func (*RecognitionResult) GetIsPartial

func (x *RecognitionResult) GetIsPartial() bool

func (*RecognitionResult) ProtoMessage

func (*RecognitionResult) ProtoMessage()

func (*RecognitionResult) ProtoReflect

func (x *RecognitionResult) ProtoReflect() protoreflect.Message

func (*RecognitionResult) Reset

func (x *RecognitionResult) Reset()

func (*RecognitionResult) String

func (x *RecognitionResult) String() string

type StreamingRecognizeRequest

type StreamingRecognizeRequest struct {

	// Types that are assignable to Request:
	//
	//	*StreamingRecognizeRequest_Config
	//	*StreamingRecognizeRequest_Audio
	Request isStreamingRecognizeRequest_Request `protobuf_oneof:"request"`
	// contains filtered or unexported fields
}

The top-level messages sent by the client for the `StreamingRecognize` method. In this streaming call, multiple `StreamingRecognizeRequest` messages should be sent. The first message must contain a `RecognitionConfig` message only, and all subsequent messages must contain `RecognitionAudio` only. All `RecognitionAudio` messages must contain non-empty audio. If audio content is empty, the server may choose to interpret it as end of stream and stop accepting any further messages.

func (*StreamingRecognizeRequest) Descriptor deprecated

func (*StreamingRecognizeRequest) Descriptor() ([]byte, []int)

Deprecated: Use StreamingRecognizeRequest.ProtoReflect.Descriptor instead.

func (*StreamingRecognizeRequest) GetAudio

func (*StreamingRecognizeRequest) GetConfig

func (*StreamingRecognizeRequest) GetRequest

func (m *StreamingRecognizeRequest) GetRequest() isStreamingRecognizeRequest_Request

func (*StreamingRecognizeRequest) ProtoMessage

func (*StreamingRecognizeRequest) ProtoMessage()

func (*StreamingRecognizeRequest) ProtoReflect

func (*StreamingRecognizeRequest) Reset

func (x *StreamingRecognizeRequest) Reset()

func (*StreamingRecognizeRequest) String

func (x *StreamingRecognizeRequest) String() string

type StreamingRecognizeRequest_Audio

type StreamingRecognizeRequest_Audio struct {
	Audio *RecognitionAudio `protobuf:"bytes,2,opt,name=audio,proto3,oneof"`
}

type StreamingRecognizeRequest_Config

type StreamingRecognizeRequest_Config struct {
	Config *RecognitionConfig `protobuf:"bytes,1,opt,name=config,proto3,oneof"`
}

type StreamingRecognizeResponse

type StreamingRecognizeResponse struct {

	// A new recognition result. This field will be unset if a new result is not
	// yet available.
	Result *RecognitionResult `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"`
	// A non-fatal error message. If a server encountered a non-fatal error when
	// processing the recognition request, it will be returned in this message.
	// The server will continue to process audio and produce further results.
	// Clients can continue streaming audio even after receiving these messages.
	// This error message is meant to be informational.
	//
	// An example of when these errors maybe produced: audio is sampled at a lower
	// rate than expected by model, producing possibly less accurate results.
	//
	// This field will be unset if there is no error to report.
	Error *RecognitionError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
	// contains filtered or unexported fields
}

The messages returned by the server for the `StreamingRecognize` request. Multiple messages of this type will be delivered on the stream, for multiple results, as soon as results are available from the audio submitted so far. If the audio has multiple channels, the results of all channels will be interleaved. Results of each individual channel will be chronological. However, there is no guarantee of the order of results across channels.

Clients should process both the `result` and `error` fields in each message. At least one of these fields will be present in the message. If both `result` and `error` are present, the result is still valid.

func (*StreamingRecognizeResponse) Descriptor deprecated

func (*StreamingRecognizeResponse) Descriptor() ([]byte, []int)

Deprecated: Use StreamingRecognizeResponse.ProtoReflect.Descriptor instead.

func (*StreamingRecognizeResponse) GetError

func (*StreamingRecognizeResponse) GetResult

func (*StreamingRecognizeResponse) ProtoMessage

func (*StreamingRecognizeResponse) ProtoMessage()

func (*StreamingRecognizeResponse) ProtoReflect

func (*StreamingRecognizeResponse) Reset

func (x *StreamingRecognizeResponse) Reset()

func (*StreamingRecognizeResponse) String

func (x *StreamingRecognizeResponse) String() string

type TranscribeServiceClient

type TranscribeServiceClient interface {
	// Queries the version of the server.
	Version(ctx context.Context, in *VersionRequest, opts ...grpc.CallOption) (*VersionResponse, error)
	// Retrieves a list of available speech recognition models.
	ListModels(ctx context.Context, in *ListModelsRequest, opts ...grpc.CallOption) (*ListModelsResponse, error)
	// Performs bidirectional streaming speech recognition. Receive results while
	// sending audio. This method is only available via GRPC and not via
	// HTTP+JSON. However, a web browser may use websockets to use this service.
	StreamingRecognize(ctx context.Context, opts ...grpc.CallOption) (TranscribeService_StreamingRecognizeClient, error)
	// Compiles recognition context information, such as a specialized list of
	// words or phrases, into a compact, efficient form to send with subsequent
	// `StreamingRecognize` requests to customize speech recognition. For example,
	// a list of contact names may be compiled in a mobile app and sent with each
	// recognition request so that the app user's contact names are more likely to
	// be recognized than arbitrary names. This pre-compilation ensures that there
	// is no added latency for the recognition request. It is important to note
	// that in order to compile context for a model, that model has to support
	// context in the first place, which can be verified by checking its
	// `ModelAttributes.ContextInfo` obtained via the `ListModels` method. Also,
	// the compiled data will be model specific; that is, the data compiled for
	// one model will generally not be usable with a different model.
	CompileContext(ctx context.Context, in *CompileContextRequest, opts ...grpc.CallOption) (*CompileContextResponse, error)
}

TranscribeServiceClient is the client API for TranscribeService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.

type TranscribeServiceServer

type TranscribeServiceServer interface {
	// Queries the version of the server.
	Version(context.Context, *VersionRequest) (*VersionResponse, error)
	// Retrieves a list of available speech recognition models.
	ListModels(context.Context, *ListModelsRequest) (*ListModelsResponse, error)
	// Performs bidirectional streaming speech recognition. Receive results while
	// sending audio. This method is only available via GRPC and not via
	// HTTP+JSON. However, a web browser may use websockets to use this service.
	StreamingRecognize(TranscribeService_StreamingRecognizeServer) error
	// Compiles recognition context information, such as a specialized list of
	// words or phrases, into a compact, efficient form to send with subsequent
	// `StreamingRecognize` requests to customize speech recognition. For example,
	// a list of contact names may be compiled in a mobile app and sent with each
	// recognition request so that the app user's contact names are more likely to
	// be recognized than arbitrary names. This pre-compilation ensures that there
	// is no added latency for the recognition request. It is important to note
	// that in order to compile context for a model, that model has to support
	// context in the first place, which can be verified by checking its
	// `ModelAttributes.ContextInfo` obtained via the `ListModels` method. Also,
	// the compiled data will be model specific; that is, the data compiled for
	// one model will generally not be usable with a different model.
	CompileContext(context.Context, *CompileContextRequest) (*CompileContextResponse, error)
	// contains filtered or unexported methods
}

TranscribeServiceServer is the server API for TranscribeService service. All implementations must embed UnimplementedTranscribeServiceServer for forward compatibility

type TranscribeService_StreamingRecognizeClient

type TranscribeService_StreamingRecognizeClient interface {
	Send(*StreamingRecognizeRequest) error
	Recv() (*StreamingRecognizeResponse, error)
	grpc.ClientStream
}

type TranscribeService_StreamingRecognizeServer

type TranscribeService_StreamingRecognizeServer interface {
	Send(*StreamingRecognizeResponse) error
	Recv() (*StreamingRecognizeRequest, error)
	grpc.ServerStream
}

type UnimplementedTranscribeServiceServer

type UnimplementedTranscribeServiceServer struct {
}

UnimplementedTranscribeServiceServer must be embedded to have forward compatible implementations.

func (UnimplementedTranscribeServiceServer) CompileContext

func (UnimplementedTranscribeServiceServer) ListModels

func (UnimplementedTranscribeServiceServer) StreamingRecognize

func (UnimplementedTranscribeServiceServer) Version

type UnsafeTranscribeServiceServer

type UnsafeTranscribeServiceServer interface {
	// contains filtered or unexported methods
}

UnsafeTranscribeServiceServer may be embedded to opt out of forward compatibility for this service. Use of this interface is not recommended, as added methods to TranscribeServiceServer will result in compilation errors.

type VersionRequest

type VersionRequest struct {
	// contains filtered or unexported fields
}

The top-level message sent by the client for the `Version` method.

func (*VersionRequest) Descriptor deprecated

func (*VersionRequest) Descriptor() ([]byte, []int)

Deprecated: Use VersionRequest.ProtoReflect.Descriptor instead.

func (*VersionRequest) ProtoMessage

func (*VersionRequest) ProtoMessage()

func (*VersionRequest) ProtoReflect

func (x *VersionRequest) ProtoReflect() protoreflect.Message

func (*VersionRequest) Reset

func (x *VersionRequest) Reset()

func (*VersionRequest) String

func (x *VersionRequest) String() string

type VersionResponse

type VersionResponse struct {

	// Version of the server handling these requests.
	Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
	// contains filtered or unexported fields
}

The message sent by the server for the `Version` method.

func (*VersionResponse) Descriptor deprecated

func (*VersionResponse) Descriptor() ([]byte, []int)

Deprecated: Use VersionResponse.ProtoReflect.Descriptor instead.

func (*VersionResponse) GetVersion

func (x *VersionResponse) GetVersion() string

func (*VersionResponse) ProtoMessage

func (*VersionResponse) ProtoMessage()

func (*VersionResponse) ProtoReflect

func (x *VersionResponse) ProtoReflect() protoreflect.Message

func (*VersionResponse) Reset

func (x *VersionResponse) Reset()

func (*VersionResponse) String

func (x *VersionResponse) String() string

type WordDetails

type WordDetails struct {

	// Word-level information corresponding to the `transcript_formatted` field.
	Formatted []*WordInfo `protobuf:"bytes,1,rep,name=formatted,proto3" json:"formatted,omitempty"`
	// Word-level information corresponding to the `transcript_raw` field.
	Raw []*WordInfo `protobuf:"bytes,2,rep,name=raw,proto3" json:"raw,omitempty"`
	// contains filtered or unexported fields
}

func (*WordDetails) Descriptor deprecated

func (*WordDetails) Descriptor() ([]byte, []int)

Deprecated: Use WordDetails.ProtoReflect.Descriptor instead.

func (*WordDetails) GetFormatted

func (x *WordDetails) GetFormatted() []*WordInfo

func (*WordDetails) GetRaw

func (x *WordDetails) GetRaw() []*WordInfo

func (*WordDetails) ProtoMessage

func (*WordDetails) ProtoMessage()

func (*WordDetails) ProtoReflect

func (x *WordDetails) ProtoReflect() protoreflect.Message

func (*WordDetails) Reset

func (x *WordDetails) Reset()

func (*WordDetails) String

func (x *WordDetails) String() string

type WordInfo

type WordInfo struct {

	// The actual word in the text
	Word string `protobuf:"bytes,1,opt,name=word,proto3" json:"word,omitempty"`
	// Confidence estimate between 0 and 1. A higher number represents a higher
	// likelihood that the word was correctly recognized.
	Confidence float64 `protobuf:"fixed64,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
	// Time offset in milliseconds relative to the beginning of audio received by
	// the recognizer and corresponding to the start of this spoken word.
	StartTimeMs uint64 `protobuf:"varint,3,opt,name=start_time_ms,json=startTimeMs,proto3" json:"start_time_ms,omitempty"`
	// Duration in milliseconds of the current word in the spoken audio.
	DurationMs uint64 `protobuf:"varint,4,opt,name=duration_ms,json=durationMs,proto3" json:"duration_ms,omitempty"`
	// contains filtered or unexported fields
}

Word level details for recognized words in a transcript

func (*WordInfo) Descriptor deprecated

func (*WordInfo) Descriptor() ([]byte, []int)

Deprecated: Use WordInfo.ProtoReflect.Descriptor instead.

func (*WordInfo) GetConfidence

func (x *WordInfo) GetConfidence() float64

func (*WordInfo) GetDurationMs

func (x *WordInfo) GetDurationMs() uint64

func (*WordInfo) GetStartTimeMs

func (x *WordInfo) GetStartTimeMs() uint64

func (*WordInfo) GetWord

func (x *WordInfo) GetWord() string

func (*WordInfo) ProtoMessage

func (*WordInfo) ProtoMessage()

func (*WordInfo) ProtoReflect

func (x *WordInfo) ProtoReflect() protoreflect.Message

func (*WordInfo) Reset

func (x *WordInfo) Reset()

func (*WordInfo) String

func (x *WordInfo) String() string

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL