mediatranslation

package
v0.0.0-...-6cb3ea0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jun 7, 2021 License: Apache-2.0 Imports: 11 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	StreamingTranslateSpeechResponse_SpeechEventType_name = map[int32]string{
		0: "SPEECH_EVENT_TYPE_UNSPECIFIED",
		1: "END_OF_SINGLE_UTTERANCE",
	}
	StreamingTranslateSpeechResponse_SpeechEventType_value = map[string]int32{
		"SPEECH_EVENT_TYPE_UNSPECIFIED": 0,
		"END_OF_SINGLE_UTTERANCE":       1,
	}
)

Enum value maps for StreamingTranslateSpeechResponse_SpeechEventType.

View Source
var File_google_cloud_mediatranslation_v1alpha1_media_translation_proto protoreflect.FileDescriptor

Functions

func RegisterSpeechTranslationServiceServer

func RegisterSpeechTranslationServiceServer(s *grpc.Server, srv SpeechTranslationServiceServer)

Types

type SpeechTranslationServiceClient

type SpeechTranslationServiceClient interface {
	// Performs bidirectional streaming speech translation: receive results while
	// sending audio. This method is only available via the gRPC API (not REST).
	StreamingTranslateSpeech(ctx context.Context, opts ...grpc.CallOption) (SpeechTranslationService_StreamingTranslateSpeechClient, error)
}

SpeechTranslationServiceClient is the client API for SpeechTranslationService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

type SpeechTranslationServiceServer

type SpeechTranslationServiceServer interface {
	// Performs bidirectional streaming speech translation: receive results while
	// sending audio. This method is only available via the gRPC API (not REST).
	StreamingTranslateSpeech(SpeechTranslationService_StreamingTranslateSpeechServer) error
}

SpeechTranslationServiceServer is the server API for SpeechTranslationService service.

type SpeechTranslationService_StreamingTranslateSpeechClient

type SpeechTranslationService_StreamingTranslateSpeechClient interface {
	Send(*StreamingTranslateSpeechRequest) error
	Recv() (*StreamingTranslateSpeechResponse, error)
	grpc.ClientStream
}

type SpeechTranslationService_StreamingTranslateSpeechServer

type SpeechTranslationService_StreamingTranslateSpeechServer interface {
	Send(*StreamingTranslateSpeechResponse) error
	Recv() (*StreamingTranslateSpeechRequest, error)
	grpc.ServerStream
}

type StreamingTranslateSpeechConfig

type StreamingTranslateSpeechConfig struct {

	// Required. The common config for all the following audio contents.
	AudioConfig *TranslateSpeechConfig `protobuf:"bytes,1,opt,name=audio_config,json=audioConfig,proto3" json:"audio_config,omitempty"`
	// Optional. If `false` or omitted, the system performs
	// continuous translation (continuing to wait for and process audio even if
	// the user pauses speaking) until the client closes the input stream (gRPC
	// API) or until the maximum time limit has been reached. May return multiple
	// `StreamingTranslateSpeechResult`s with the `is_final` flag set to `true`.
	//
	// If `true`, the speech translator will detect a single spoken utterance.
	// When it detects that the user has paused or stopped speaking, it will
	// return an `END_OF_SINGLE_UTTERANCE` event and cease translation.
	// When the client receives `END_OF_SINGLE_UTTERANCE` event, the client should
	// stop sending the requests. However, clients should keep receiving remaining
	// responses until the stream is terminated. To construct the complete
	// sentence in a streaming way, one should override (if `is_final` of previous
	// response is false), or append (if 'is_final' of previous response is true).
	SingleUtterance bool `protobuf:"varint,2,opt,name=single_utterance,json=singleUtterance,proto3" json:"single_utterance,omitempty"`
	// Optional. Stability control for the media translation text. The value should be
	// "LOW", "MEDIUM", "HIGH". It applies to text/text_and_audio translation
	// only.
	// For audio translation mode, we only support HIGH stability mode,
	// low/medium stability mode will throw argument error.
	// Default empty string will be treated as "HIGH" in audio translation mode;
	// will be treated as "LOW" in other translation mode.
	// Note that stability and speed would be trade off.
	// 1. "LOW": In low mode, translation service will start to do translation
	// right after getting recognition response. The speed will be faster.
	// 2. "MEDIUM": In medium mode, translation service will
	// check if the recognition response is stable enough or not, and only
	// translate recognition response which is not likely to be changed later.
	// 3. "HIGH": In high mode, translation service will wait for more stable
	// recognition responses, and then start to do translation. Also, the
	// following recognition responses cannot modify previous recognition
	// responses. Thus it may impact quality in some situation. "HIGH" stability
	// will generate "final" responses more frequently.
	//
	Stability string `protobuf:"bytes,3,opt,name=stability,proto3" json:"stability,omitempty"`
	// Optional. Translation mode, the value should be "text", "audio", "text_and_audio".
	// Default empty string will be treated as "text".
	// 1. "text": The response will be text translation. Text translation has a
	// field "is_final". Detailed definition can be found in
	// `TextTranslationResult`.
	// 2. "audio": The response will be audio translation. Audio translation does
	// not have "is_final" field, which means each audio translation response is
	// stable and will not be changed by later response.
	// Translation mode "audio" can only be used with "high" stability mode,
	// 3. "text_and_audio": The response will have a text translation, when
	// "is_final" is true, we will also output its corresponding audio
	// translation. When "is_final" is false, audio_translation field will be
	// empty.
	TranslationMode string `protobuf:"bytes,4,opt,name=translation_mode,json=translationMode,proto3" json:"translation_mode,omitempty"`
	// Optional. If disable_interim_results is true, we will only return "final" responses.
	// Otherwise, we will return all the responses. Default value will be false.
	// User can only set disable_interim_results to be true with "high" stability
	// mode.
	DisableInterimResults bool `` /* 127-byte string literal not displayed */
	// contains filtered or unexported fields
}

Config used for streaming translation.

func (*StreamingTranslateSpeechConfig) Descriptor deprecated

func (*StreamingTranslateSpeechConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingTranslateSpeechConfig.ProtoReflect.Descriptor instead.

func (*StreamingTranslateSpeechConfig) GetAudioConfig

func (*StreamingTranslateSpeechConfig) GetDisableInterimResults

func (x *StreamingTranslateSpeechConfig) GetDisableInterimResults() bool

func (*StreamingTranslateSpeechConfig) GetSingleUtterance

func (x *StreamingTranslateSpeechConfig) GetSingleUtterance() bool

func (*StreamingTranslateSpeechConfig) GetStability

func (x *StreamingTranslateSpeechConfig) GetStability() string

func (*StreamingTranslateSpeechConfig) GetTranslationMode

func (x *StreamingTranslateSpeechConfig) GetTranslationMode() string

func (*StreamingTranslateSpeechConfig) ProtoMessage

func (*StreamingTranslateSpeechConfig) ProtoMessage()

func (*StreamingTranslateSpeechConfig) ProtoReflect

func (*StreamingTranslateSpeechConfig) Reset

func (x *StreamingTranslateSpeechConfig) Reset()

func (*StreamingTranslateSpeechConfig) String

type StreamingTranslateSpeechRequest

type StreamingTranslateSpeechRequest struct {

	// The streaming request, which is either a streaming config or content.
	//
	// Types that are assignable to StreamingRequest:
	//	*StreamingTranslateSpeechRequest_StreamingConfig
	//	*StreamingTranslateSpeechRequest_AudioContent
	StreamingRequest isStreamingTranslateSpeechRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
	// contains filtered or unexported fields
}

The top-level message sent by the client for the `StreamingTranslateSpeech` method. Multiple `StreamingTranslateSpeechRequest` messages are sent. The first message must contain a `streaming_config` message and must not contain `audio_content` data. All subsequent messages must contain `audio_content` data and must not contain a `streaming_config` message.

func (*StreamingTranslateSpeechRequest) Descriptor deprecated

func (*StreamingTranslateSpeechRequest) Descriptor() ([]byte, []int)

Deprecated: Use StreamingTranslateSpeechRequest.ProtoReflect.Descriptor instead.

func (*StreamingTranslateSpeechRequest) GetAudioContent

func (x *StreamingTranslateSpeechRequest) GetAudioContent() []byte

func (*StreamingTranslateSpeechRequest) GetStreamingConfig

func (*StreamingTranslateSpeechRequest) GetStreamingRequest

func (m *StreamingTranslateSpeechRequest) GetStreamingRequest() isStreamingTranslateSpeechRequest_StreamingRequest

func (*StreamingTranslateSpeechRequest) ProtoMessage

func (*StreamingTranslateSpeechRequest) ProtoMessage()

func (*StreamingTranslateSpeechRequest) ProtoReflect

func (*StreamingTranslateSpeechRequest) Reset

func (*StreamingTranslateSpeechRequest) String

type StreamingTranslateSpeechRequest_AudioContent

type StreamingTranslateSpeechRequest_AudioContent struct {
	// The audio data to be translated. Sequential chunks of audio data are sent
	// in sequential `StreamingTranslateSpeechRequest` messages. The first
	// `StreamingTranslateSpeechRequest` message must not contain
	// `audio_content` data and all subsequent `StreamingTranslateSpeechRequest`
	// messages must contain `audio_content` data. The audio bytes must be
	// encoded as specified in `StreamingTranslateSpeechConfig`. Note: as with
	// all bytes fields, protobuffers use a pure binary representation (not
	// base64).
	AudioContent []byte `protobuf:"bytes,2,opt,name=audio_content,json=audioContent,proto3,oneof"`
}

type StreamingTranslateSpeechRequest_StreamingConfig

type StreamingTranslateSpeechRequest_StreamingConfig struct {
	// Provides information to the recognizer that specifies how to process the
	// request. The first `StreamingTranslateSpeechRequest` message must contain
	// a `streaming_config` message.
	StreamingConfig *StreamingTranslateSpeechConfig `protobuf:"bytes,1,opt,name=streaming_config,json=streamingConfig,proto3,oneof"`
}

type StreamingTranslateSpeechResponse

type StreamingTranslateSpeechResponse struct {

	// Output only. If set, returns a [google.rpc.Status][google.rpc.Status] message that
	// specifies the error for the operation.
	Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
	// Output only. The translation result that is currently being processed (For text
	// translation, `is_final` could be `true` or `false`.
	// For audio translation, we do not have is_final field, which means each
	// audio response is stable and will not get changed later. For
	// text_and_audio, we still have `is_final` field in text translation, but we
	// only output corresponsding audio when `is_final` is true.).
	Result *StreamingTranslateSpeechResult `protobuf:"bytes,2,opt,name=result,proto3" json:"result,omitempty"`
	// Output only. Indicates the type of speech event.
	SpeechEventType StreamingTranslateSpeechResponse_SpeechEventType `` /* 202-byte string literal not displayed */
	// contains filtered or unexported fields
}

A streaming speech translation response corresponding to a portion of the audio currently processed.

func (*StreamingTranslateSpeechResponse) Descriptor deprecated

func (*StreamingTranslateSpeechResponse) Descriptor() ([]byte, []int)

Deprecated: Use StreamingTranslateSpeechResponse.ProtoReflect.Descriptor instead.

func (*StreamingTranslateSpeechResponse) GetError

func (*StreamingTranslateSpeechResponse) GetResult

func (*StreamingTranslateSpeechResponse) GetSpeechEventType

func (*StreamingTranslateSpeechResponse) ProtoMessage

func (*StreamingTranslateSpeechResponse) ProtoMessage()

func (*StreamingTranslateSpeechResponse) ProtoReflect

func (*StreamingTranslateSpeechResponse) Reset

func (*StreamingTranslateSpeechResponse) String

type StreamingTranslateSpeechResponse_SpeechEventType

type StreamingTranslateSpeechResponse_SpeechEventType int32

Indicates the type of speech event.

const (
	// No speech event specified.
	StreamingTranslateSpeechResponse_SPEECH_EVENT_TYPE_UNSPECIFIED StreamingTranslateSpeechResponse_SpeechEventType = 0
	// This event indicates that the server has detected the end of the user's
	// speech utterance and expects no additional speech. Therefore, the server
	// will not process additional audio (although it may subsequently return
	// additional results). When the client receives `END_OF_SINGLE_UTTERANCE`
	// event, the client should stop sending the requests. However, clients
	// should keep receiving remaining responses until the stream is terminated.
	// To construct the complete sentence in a streaming way, one should
	// override (if `is_final` of previous response is `false`), or append (if
	// `is_final` of previous response is `true`). This event is only sent if
	// `single_utterance` was set to `true`, and is not used otherwise.
	StreamingTranslateSpeechResponse_END_OF_SINGLE_UTTERANCE StreamingTranslateSpeechResponse_SpeechEventType = 1
)

func (StreamingTranslateSpeechResponse_SpeechEventType) Descriptor

func (StreamingTranslateSpeechResponse_SpeechEventType) Enum

func (StreamingTranslateSpeechResponse_SpeechEventType) EnumDescriptor deprecated

Deprecated: Use StreamingTranslateSpeechResponse_SpeechEventType.Descriptor instead.

func (StreamingTranslateSpeechResponse_SpeechEventType) Number

func (StreamingTranslateSpeechResponse_SpeechEventType) String

func (StreamingTranslateSpeechResponse_SpeechEventType) Type

type StreamingTranslateSpeechResult

type StreamingTranslateSpeechResult struct {

	// Text translation result.
	TextTranslationResult *StreamingTranslateSpeechResult_TextTranslationResult `` /* 126-byte string literal not displayed */
	// Audio translation result.
	AudioTranslationResult *StreamingTranslateSpeechResult_AudioTranslationResult `` /* 129-byte string literal not displayed */
	// Output only. The debug only recognition result in original language. This field is debug
	// only and will be set to empty string if not available.
	// This is implementation detail and will not be backward compatible.
	RecognitionResult string `protobuf:"bytes,3,opt,name=recognition_result,json=recognitionResult,proto3" json:"recognition_result,omitempty"`
	// Output only.
	DetectedSourceLanguageCode string `` /* 143-byte string literal not displayed */
	// contains filtered or unexported fields
}

A streaming speech translation result corresponding to a portion of the audio that is currently being processed.

func (*StreamingTranslateSpeechResult) Descriptor deprecated

func (*StreamingTranslateSpeechResult) Descriptor() ([]byte, []int)

Deprecated: Use StreamingTranslateSpeechResult.ProtoReflect.Descriptor instead.

func (*StreamingTranslateSpeechResult) GetAudioTranslationResult

func (*StreamingTranslateSpeechResult) GetDetectedSourceLanguageCode

func (x *StreamingTranslateSpeechResult) GetDetectedSourceLanguageCode() string

func (*StreamingTranslateSpeechResult) GetRecognitionResult

func (x *StreamingTranslateSpeechResult) GetRecognitionResult() string

func (*StreamingTranslateSpeechResult) GetTextTranslationResult

func (*StreamingTranslateSpeechResult) ProtoMessage

func (*StreamingTranslateSpeechResult) ProtoMessage()

func (*StreamingTranslateSpeechResult) ProtoReflect

func (*StreamingTranslateSpeechResult) Reset

func (x *StreamingTranslateSpeechResult) Reset()

func (*StreamingTranslateSpeechResult) String

type StreamingTranslateSpeechResult_AudioTranslationResult

type StreamingTranslateSpeechResult_AudioTranslationResult struct {

	// Output only. The translated audio.
	AudioTranslation []byte `protobuf:"bytes,1,opt,name=audio_translation,json=audioTranslation,proto3" json:"audio_translation,omitempty"`
	// contains filtered or unexported fields
}

Audio translation result.

func (*StreamingTranslateSpeechResult_AudioTranslationResult) Descriptor deprecated

Deprecated: Use StreamingTranslateSpeechResult_AudioTranslationResult.ProtoReflect.Descriptor instead.

func (*StreamingTranslateSpeechResult_AudioTranslationResult) GetAudioTranslation

func (*StreamingTranslateSpeechResult_AudioTranslationResult) ProtoMessage

func (*StreamingTranslateSpeechResult_AudioTranslationResult) ProtoReflect

func (*StreamingTranslateSpeechResult_AudioTranslationResult) Reset

func (*StreamingTranslateSpeechResult_AudioTranslationResult) String

type StreamingTranslateSpeechResult_TextTranslationResult

type StreamingTranslateSpeechResult_TextTranslationResult struct {

	// Output only. The translated sentence.
	Translation string `protobuf:"bytes,1,opt,name=translation,proto3" json:"translation,omitempty"`
	// Output only. If `false`, this `StreamingTranslateSpeechResult` represents
	// an interim result that may change. If `true`, this is the final time the
	// translation service will return this particular
	// `StreamingTranslateSpeechResult`, the streaming translator will not
	// return any further hypotheses for this portion of the transcript and
	// corresponding audio.
	IsFinal bool `protobuf:"varint,2,opt,name=is_final,json=isFinal,proto3" json:"is_final,omitempty"`
	// contains filtered or unexported fields
}

Text translation result.

func (*StreamingTranslateSpeechResult_TextTranslationResult) Descriptor deprecated

Deprecated: Use StreamingTranslateSpeechResult_TextTranslationResult.ProtoReflect.Descriptor instead.

func (*StreamingTranslateSpeechResult_TextTranslationResult) GetIsFinal

func (*StreamingTranslateSpeechResult_TextTranslationResult) GetTranslation

func (*StreamingTranslateSpeechResult_TextTranslationResult) ProtoMessage

func (*StreamingTranslateSpeechResult_TextTranslationResult) ProtoReflect

func (*StreamingTranslateSpeechResult_TextTranslationResult) Reset

func (*StreamingTranslateSpeechResult_TextTranslationResult) String

type TranslateSpeechConfig

type TranslateSpeechConfig struct {

	// Required. Encoding of audio data.
	// Supported formats:
	//
	// - `linear16`
	//
	//   Uncompressed 16-bit signed little-endian samples (Linear PCM).
	//
	// - `flac`
	//
	//   `flac` (Free Lossless Audio Codec) is the recommended encoding
	//   because it is lossless--therefore recognition is not compromised--and
	//   requires only about half the bandwidth of `linear16`.
	//
	// - `mulaw`
	//
	//   8-bit samples that compand 14-bit audio samples using G.711 PCMU/mu-law.
	//
	// - `amr`
	//
	//   Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 8000.
	//
	// - `amr-wb`
	//
	//   Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must be 16000.
	//
	// - `ogg-opus`
	//
	//   Opus encoded audio frames in Ogg container
	//   ([OggOpus](https://wiki.xiph.org/OggOpus)).
	//   `sample_rate_hertz` must be one of 8000, 12000, 16000, 24000, or 48000.
	//
	// - `mp3`
	//
	//   MP3 audio. Support all standard MP3 bitrates (which range from 32-320
	//   kbps). When using this encoding, `sample_rate_hertz` has to match the
	//   sample rate of the file being used.
	//
	//
	AudioEncoding string `protobuf:"bytes,1,opt,name=audio_encoding,json=audioEncoding,proto3" json:"audio_encoding,omitempty"`
	// Required. Source language code (BCP-47) of the input audio.
	SourceLanguageCode string `protobuf:"bytes,2,opt,name=source_language_code,json=sourceLanguageCode,proto3" json:"source_language_code,omitempty"`
	// Required. Target language code (BCP-47) of the output.
	TargetLanguageCode string `protobuf:"bytes,3,opt,name=target_language_code,json=targetLanguageCode,proto3" json:"target_language_code,omitempty"`
	// Optional. A list of up to 3 additional language codes (BCP-47), listing possible
	// alternative languages of the supplied audio. If alternative source
	// languages are listed, speech translation result will translate in the most
	// likely language detected including the main source_language_code. The
	// translated result will include the language code of the language detected
	// in the audio.
	// Note:
	// 1. If the provided alternative_source_language_code is not supported
	// by current API version, we will skip that language code.
	// 2. If user only provided one eligible alternative_source_language_codes,
	// the translation will happen between source_language_code and
	// alternative_source_language_codes. The target_language_code will be
	// ignored. It will be useful in conversation mode.
	AlternativeSourceLanguageCodes []string `` /* 155-byte string literal not displayed */
	// Optional. Sample rate in Hertz of the audio data. Valid values are:
	// 8000-48000. 16000 is optimal. For best results, set the sampling rate of
	// the audio source to 16000 Hz. If that's not possible, use the native sample
	// rate of the audio source (instead of re-sampling).
	//
	SampleRateHertz int32 `protobuf:"varint,4,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
	// Optional.
	Model string `protobuf:"bytes,5,opt,name=model,proto3" json:"model,omitempty"`
	// contains filtered or unexported fields
}

Provides information to the speech translation that specifies how to process the request.

func (*TranslateSpeechConfig) Descriptor deprecated

func (*TranslateSpeechConfig) Descriptor() ([]byte, []int)

Deprecated: Use TranslateSpeechConfig.ProtoReflect.Descriptor instead.

func (*TranslateSpeechConfig) GetAlternativeSourceLanguageCodes

func (x *TranslateSpeechConfig) GetAlternativeSourceLanguageCodes() []string

func (*TranslateSpeechConfig) GetAudioEncoding

func (x *TranslateSpeechConfig) GetAudioEncoding() string

func (*TranslateSpeechConfig) GetModel

func (x *TranslateSpeechConfig) GetModel() string

func (*TranslateSpeechConfig) GetSampleRateHertz

func (x *TranslateSpeechConfig) GetSampleRateHertz() int32

func (*TranslateSpeechConfig) GetSourceLanguageCode

func (x *TranslateSpeechConfig) GetSourceLanguageCode() string

func (*TranslateSpeechConfig) GetTargetLanguageCode

func (x *TranslateSpeechConfig) GetTargetLanguageCode() string

func (*TranslateSpeechConfig) ProtoMessage

func (*TranslateSpeechConfig) ProtoMessage()

func (*TranslateSpeechConfig) ProtoReflect

func (x *TranslateSpeechConfig) ProtoReflect() protoreflect.Message

func (*TranslateSpeechConfig) Reset

func (x *TranslateSpeechConfig) Reset()

func (*TranslateSpeechConfig) String

func (x *TranslateSpeechConfig) String() string

type UnimplementedSpeechTranslationServiceServer

type UnimplementedSpeechTranslationServiceServer struct {
}

UnimplementedSpeechTranslationServiceServer can be embedded to have forward compatible implementations.

func (*UnimplementedSpeechTranslationServiceServer) StreamingTranslateSpeech

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL