genproto: google.golang.org/genproto/googleapis/assistant/embedded/v1alpha1 Index | Files

package embedded

import "google.golang.org/genproto/googleapis/assistant/embedded/v1alpha1"

Index

Package Files

embedded_assistant.pb.go

Variables

var (
    AudioInConfig_Encoding_name = map[int32]string{
        0:  "ENCODING_UNSPECIFIED",
        1:  "LINEAR16",
        2:  "FLAC",
    }
    AudioInConfig_Encoding_value = map[string]int32{
        "ENCODING_UNSPECIFIED": 0,
        "LINEAR16":             1,
        "FLAC":                 2,
    }
)

Enum value maps for AudioInConfig_Encoding.

var (
    AudioOutConfig_Encoding_name = map[int32]string{
        0:  "ENCODING_UNSPECIFIED",
        1:  "LINEAR16",
        2:  "MP3",
        3:  "OPUS_IN_OGG",
    }
    AudioOutConfig_Encoding_value = map[string]int32{
        "ENCODING_UNSPECIFIED": 0,
        "LINEAR16":             1,
        "MP3":                  2,
        "OPUS_IN_OGG":          3,
    }
)

Enum value maps for AudioOutConfig_Encoding.

var (
    ConverseResult_MicrophoneMode_name = map[int32]string{
        0:  "MICROPHONE_MODE_UNSPECIFIED",
        1:  "CLOSE_MICROPHONE",
        2:  "DIALOG_FOLLOW_ON",
    }
    ConverseResult_MicrophoneMode_value = map[string]int32{
        "MICROPHONE_MODE_UNSPECIFIED": 0,
        "CLOSE_MICROPHONE":            1,
        "DIALOG_FOLLOW_ON":            2,
    }
)

Enum value maps for ConverseResult_MicrophoneMode.

var (
    ConverseResponse_EventType_name = map[int32]string{
        0:  "EVENT_TYPE_UNSPECIFIED",
        1:  "END_OF_UTTERANCE",
    }
    ConverseResponse_EventType_value = map[string]int32{
        "EVENT_TYPE_UNSPECIFIED": 0,
        "END_OF_UTTERANCE":       1,
    }
)

Enum value maps for ConverseResponse_EventType.

var File_google_assistant_embedded_v1alpha1_embedded_assistant_proto protoreflect.FileDescriptor

func RegisterEmbeddedAssistantServer Uses

func RegisterEmbeddedAssistantServer(s *grpc.Server, srv EmbeddedAssistantServer)

type AudioInConfig Uses

type AudioInConfig struct {

    // *Required* Encoding of audio data sent in all `audio_in` messages.
    Encoding AudioInConfig_Encoding `protobuf:"varint,1,opt,name=encoding,proto3,enum=google.assistant.embedded.v1alpha1.AudioInConfig_Encoding" json:"encoding,omitempty"`
    // *Required* Sample rate (in Hertz) of the audio data sent in all `audio_in`
    // messages. Valid values are from 16000-24000, but 16000 is optimal.
    // For best results, set the sampling rate of the audio source to 16000 Hz.
    // If that's not possible, use the native sample rate of the audio source
    // (instead of re-sampling).
    SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
    // contains filtered or unexported fields
}

Specifies how to process the `audio_in` data that will be provided in subsequent requests. For recommended settings, see the Google Assistant SDK [best practices](https://developers.google.com/assistant/sdk/develop/grpc/best-practices/audio).

func (*AudioInConfig) Descriptor Uses

func (*AudioInConfig) Descriptor() ([]byte, []int)

Deprecated: Use AudioInConfig.ProtoReflect.Descriptor instead.

func (*AudioInConfig) GetEncoding Uses

func (x *AudioInConfig) GetEncoding() AudioInConfig_Encoding

func (*AudioInConfig) GetSampleRateHertz Uses

func (x *AudioInConfig) GetSampleRateHertz() int32

func (*AudioInConfig) ProtoMessage Uses

func (*AudioInConfig) ProtoMessage()

func (*AudioInConfig) ProtoReflect Uses

func (x *AudioInConfig) ProtoReflect() protoreflect.Message

func (*AudioInConfig) Reset Uses

func (x *AudioInConfig) Reset()

func (*AudioInConfig) String Uses

func (x *AudioInConfig) String() string

type AudioInConfig_Encoding Uses

type AudioInConfig_Encoding int32

Audio encoding of the data sent in the audio message. Audio must be one-channel (mono). The only language supported is "en-US".

const (
    // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
    AudioInConfig_ENCODING_UNSPECIFIED AudioInConfig_Encoding = 0
    // Uncompressed 16-bit signed little-endian samples (Linear PCM).
    // This encoding includes no header, only the raw audio bytes.
    AudioInConfig_LINEAR16 AudioInConfig_Encoding = 1
    // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
    // Codec) is the recommended encoding because it is
    // lossless--therefore recognition is not compromised--and
    // requires only about half the bandwidth of `LINEAR16`. This encoding
    // includes the `FLAC` stream header followed by audio data. It supports
    // 16-bit and 24-bit samples, however, not all fields in `STREAMINFO` are
    // supported.
    AudioInConfig_FLAC AudioInConfig_Encoding = 2
)

func (AudioInConfig_Encoding) Descriptor Uses

func (AudioInConfig_Encoding) Descriptor() protoreflect.EnumDescriptor

func (AudioInConfig_Encoding) Enum Uses

func (x AudioInConfig_Encoding) Enum() *AudioInConfig_Encoding

func (AudioInConfig_Encoding) EnumDescriptor Uses

func (AudioInConfig_Encoding) EnumDescriptor() ([]byte, []int)

Deprecated: Use AudioInConfig_Encoding.Descriptor instead.

func (AudioInConfig_Encoding) Number Uses

func (x AudioInConfig_Encoding) Number() protoreflect.EnumNumber

func (AudioInConfig_Encoding) String Uses

func (x AudioInConfig_Encoding) String() string

func (AudioInConfig_Encoding) Type Uses

func (AudioInConfig_Encoding) Type() protoreflect.EnumType

type AudioOut Uses

type AudioOut struct {

    // *Output-only* The audio data containing the assistant's response to the
    // query. Sequential chunks of audio data are received in sequential
    // `ConverseResponse` messages.
    AudioData []byte `protobuf:"bytes,1,opt,name=audio_data,json=audioData,proto3" json:"audio_data,omitempty"`
    // contains filtered or unexported fields
}

The audio containing the assistant's response to the query. Sequential chunks of audio data are received in sequential `ConverseResponse` messages.

func (*AudioOut) Descriptor Uses

func (*AudioOut) Descriptor() ([]byte, []int)

Deprecated: Use AudioOut.ProtoReflect.Descriptor instead.

func (*AudioOut) GetAudioData Uses

func (x *AudioOut) GetAudioData() []byte

func (*AudioOut) ProtoMessage Uses

func (*AudioOut) ProtoMessage()

func (*AudioOut) ProtoReflect Uses

func (x *AudioOut) ProtoReflect() protoreflect.Message

func (*AudioOut) Reset Uses

func (x *AudioOut) Reset()

func (*AudioOut) String Uses

func (x *AudioOut) String() string

type AudioOutConfig Uses

type AudioOutConfig struct {

    // *Required* The encoding of audio data to be returned in all `audio_out`
    // messages.
    Encoding AudioOutConfig_Encoding `protobuf:"varint,1,opt,name=encoding,proto3,enum=google.assistant.embedded.v1alpha1.AudioOutConfig_Encoding" json:"encoding,omitempty"`
    // *Required* The sample rate in Hertz of the audio data returned in
    // `audio_out` messages. Valid values are: 16000-24000.
    SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz,proto3" json:"sample_rate_hertz,omitempty"`
    // *Required* Current volume setting of the device's audio output.
    // Valid values are 1 to 100 (corresponding to 1% to 100%).
    VolumePercentage int32 `protobuf:"varint,3,opt,name=volume_percentage,json=volumePercentage,proto3" json:"volume_percentage,omitempty"`
    // contains filtered or unexported fields
}

Specifies the desired format for the server to use when it returns `audio_out` messages.

func (*AudioOutConfig) Descriptor Uses

func (*AudioOutConfig) Descriptor() ([]byte, []int)

Deprecated: Use AudioOutConfig.ProtoReflect.Descriptor instead.

func (*AudioOutConfig) GetEncoding Uses

func (x *AudioOutConfig) GetEncoding() AudioOutConfig_Encoding

func (*AudioOutConfig) GetSampleRateHertz Uses

func (x *AudioOutConfig) GetSampleRateHertz() int32

func (*AudioOutConfig) GetVolumePercentage Uses

func (x *AudioOutConfig) GetVolumePercentage() int32

func (*AudioOutConfig) ProtoMessage Uses

func (*AudioOutConfig) ProtoMessage()

func (*AudioOutConfig) ProtoReflect Uses

func (x *AudioOutConfig) ProtoReflect() protoreflect.Message

func (*AudioOutConfig) Reset Uses

func (x *AudioOutConfig) Reset()

func (*AudioOutConfig) String Uses

func (x *AudioOutConfig) String() string

type AudioOutConfig_Encoding Uses

type AudioOutConfig_Encoding int32

Audio encoding of the data returned in the audio message. All encodings are raw audio bytes with no header, except as indicated below.

const (
    // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
    AudioOutConfig_ENCODING_UNSPECIFIED AudioOutConfig_Encoding = 0
    // Uncompressed 16-bit signed little-endian samples (Linear PCM).
    AudioOutConfig_LINEAR16 AudioOutConfig_Encoding = 1
    // MP3 audio encoding. The sample rate is encoded in the payload.
    AudioOutConfig_MP3 AudioOutConfig_Encoding = 2
    // Opus-encoded audio wrapped in an ogg container. The result will be a
    // file which can be played natively on Android and in some browsers (such
    // as Chrome). The quality of the encoding is considerably higher than MP3
    // while using the same bitrate. The sample rate is encoded in the payload.
    AudioOutConfig_OPUS_IN_OGG AudioOutConfig_Encoding = 3
)

func (AudioOutConfig_Encoding) Descriptor Uses

func (AudioOutConfig_Encoding) Descriptor() protoreflect.EnumDescriptor

func (AudioOutConfig_Encoding) Enum Uses

func (x AudioOutConfig_Encoding) Enum() *AudioOutConfig_Encoding

func (AudioOutConfig_Encoding) EnumDescriptor Uses

func (AudioOutConfig_Encoding) EnumDescriptor() ([]byte, []int)

Deprecated: Use AudioOutConfig_Encoding.Descriptor instead.

func (AudioOutConfig_Encoding) Number Uses

func (x AudioOutConfig_Encoding) Number() protoreflect.EnumNumber

func (AudioOutConfig_Encoding) String Uses

func (x AudioOutConfig_Encoding) String() string

func (AudioOutConfig_Encoding) Type Uses

func (AudioOutConfig_Encoding) Type() protoreflect.EnumType

type ConverseConfig Uses

type ConverseConfig struct {

    // *Required* Specifies how to process the subsequent incoming audio.
    AudioInConfig *AudioInConfig `protobuf:"bytes,1,opt,name=audio_in_config,json=audioInConfig,proto3" json:"audio_in_config,omitempty"`
    // *Required* Specifies how to format the audio that will be returned.
    AudioOutConfig *AudioOutConfig `protobuf:"bytes,2,opt,name=audio_out_config,json=audioOutConfig,proto3" json:"audio_out_config,omitempty"`
    // *Required* Represents the current dialog state.
    ConverseState *ConverseState `protobuf:"bytes,3,opt,name=converse_state,json=converseState,proto3" json:"converse_state,omitempty"`
    // contains filtered or unexported fields
}

Specifies how to process the `ConverseRequest` messages.

func (*ConverseConfig) Descriptor Uses

func (*ConverseConfig) Descriptor() ([]byte, []int)

Deprecated: Use ConverseConfig.ProtoReflect.Descriptor instead.

func (*ConverseConfig) GetAudioInConfig Uses

func (x *ConverseConfig) GetAudioInConfig() *AudioInConfig

func (*ConverseConfig) GetAudioOutConfig Uses

func (x *ConverseConfig) GetAudioOutConfig() *AudioOutConfig

func (*ConverseConfig) GetConverseState Uses

func (x *ConverseConfig) GetConverseState() *ConverseState

func (*ConverseConfig) ProtoMessage Uses

func (*ConverseConfig) ProtoMessage()

func (*ConverseConfig) ProtoReflect Uses

func (x *ConverseConfig) ProtoReflect() protoreflect.Message

func (*ConverseConfig) Reset Uses

func (x *ConverseConfig) Reset()

func (*ConverseConfig) String Uses

func (x *ConverseConfig) String() string

type ConverseRequest Uses

type ConverseRequest struct {

    // Exactly one of these fields must be specified in each `ConverseRequest`.
    //
    // Types that are assignable to ConverseRequest:
    //	*ConverseRequest_Config
    //	*ConverseRequest_AudioIn
    ConverseRequest isConverseRequest_ConverseRequest `protobuf_oneof:"converse_request"`
    // contains filtered or unexported fields
}

The top-level message sent by the client. Clients must send at least two, and typically numerous `ConverseRequest` messages. The first message must contain a `config` message and must not contain `audio_in` data. All subsequent messages must contain `audio_in` data and must not contain a `config` message.

func (*ConverseRequest) Descriptor Uses

func (*ConverseRequest) Descriptor() ([]byte, []int)

Deprecated: Use ConverseRequest.ProtoReflect.Descriptor instead.

func (*ConverseRequest) GetAudioIn Uses

func (x *ConverseRequest) GetAudioIn() []byte

func (*ConverseRequest) GetConfig Uses

func (x *ConverseRequest) GetConfig() *ConverseConfig

func (*ConverseRequest) GetConverseRequest Uses

func (m *ConverseRequest) GetConverseRequest() isConverseRequest_ConverseRequest

func (*ConverseRequest) ProtoMessage Uses

func (*ConverseRequest) ProtoMessage()

func (*ConverseRequest) ProtoReflect Uses

func (x *ConverseRequest) ProtoReflect() protoreflect.Message

func (*ConverseRequest) Reset Uses

func (x *ConverseRequest) Reset()

func (*ConverseRequest) String Uses

func (x *ConverseRequest) String() string

type ConverseRequest_AudioIn Uses

type ConverseRequest_AudioIn struct {
    // The audio data to be recognized. Sequential chunks of audio data are sent
    // in sequential `ConverseRequest` messages. The first `ConverseRequest`
    // message must not contain `audio_in` data and all subsequent
    // `ConverseRequest` messages must contain `audio_in` data. The audio bytes
    // must be encoded as specified in `AudioInConfig`.
    // Audio must be sent at approximately real-time (16000 samples per second).
    // An error will be returned if audio is sent significantly faster or
    // slower.
    AudioIn []byte `protobuf:"bytes,2,opt,name=audio_in,json=audioIn,proto3,oneof"`
}

type ConverseRequest_Config Uses

type ConverseRequest_Config struct {
    // The `config` message provides information to the recognizer that
    // specifies how to process the request.
    // The first `ConverseRequest` message must contain a `config` message.
    Config *ConverseConfig `protobuf:"bytes,1,opt,name=config,proto3,oneof"`
}

type ConverseResponse Uses

type ConverseResponse struct {

    // Exactly one of these fields will be populated in each `ConverseResponse`.
    //
    // Types that are assignable to ConverseResponse:
    //	*ConverseResponse_Error
    //	*ConverseResponse_EventType_
    //	*ConverseResponse_AudioOut
    //	*ConverseResponse_Result
    ConverseResponse isConverseResponse_ConverseResponse `protobuf_oneof:"converse_response"`
    // contains filtered or unexported fields
}

The top-level message received by the client. A series of one or more `ConverseResponse` messages are streamed back to the client.

func (*ConverseResponse) Descriptor Uses

func (*ConverseResponse) Descriptor() ([]byte, []int)

Deprecated: Use ConverseResponse.ProtoReflect.Descriptor instead.

func (*ConverseResponse) GetAudioOut Uses

func (x *ConverseResponse) GetAudioOut() *AudioOut

func (*ConverseResponse) GetConverseResponse Uses

func (m *ConverseResponse) GetConverseResponse() isConverseResponse_ConverseResponse

func (*ConverseResponse) GetError Uses

func (x *ConverseResponse) GetError() *status.Status

func (*ConverseResponse) GetEventType Uses

func (x *ConverseResponse) GetEventType() ConverseResponse_EventType

func (*ConverseResponse) GetResult Uses

func (x *ConverseResponse) GetResult() *ConverseResult

func (*ConverseResponse) ProtoMessage Uses

func (*ConverseResponse) ProtoMessage()

func (*ConverseResponse) ProtoReflect Uses

func (x *ConverseResponse) ProtoReflect() protoreflect.Message

func (*ConverseResponse) Reset Uses

func (x *ConverseResponse) Reset()

func (*ConverseResponse) String Uses

func (x *ConverseResponse) String() string

type ConverseResponse_AudioOut Uses

type ConverseResponse_AudioOut struct {
    // *Output-only* The audio containing the assistant's response to the query.
    AudioOut *AudioOut `protobuf:"bytes,3,opt,name=audio_out,json=audioOut,proto3,oneof"`
}

type ConverseResponse_Error Uses

type ConverseResponse_Error struct {
    // *Output-only* If set, returns a [google.rpc.Status][google.rpc.Status]
    // message that specifies the error for the operation. If an error occurs
    // during processing, this message will be set and there will be no further
    // messages sent.
    Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3,oneof"`
}

type ConverseResponse_EventType Uses

type ConverseResponse_EventType int32

Indicates the type of event.

const (
    // No event specified.
    ConverseResponse_EVENT_TYPE_UNSPECIFIED ConverseResponse_EventType = 0
    // This event indicates that the server has detected the end of the user's
    // speech utterance and expects no additional speech. Therefore, the server
    // will not process additional audio (although it may subsequently return
    // additional results). The client should stop sending additional audio
    // data, half-close the gRPC connection, and wait for any additional results
    // until the server closes the gRPC connection.
    ConverseResponse_END_OF_UTTERANCE ConverseResponse_EventType = 1
)

func (ConverseResponse_EventType) Descriptor Uses

func (ConverseResponse_EventType) Descriptor() protoreflect.EnumDescriptor

func (ConverseResponse_EventType) Enum Uses

func (x ConverseResponse_EventType) Enum() *ConverseResponse_EventType

func (ConverseResponse_EventType) EnumDescriptor Uses

func (ConverseResponse_EventType) EnumDescriptor() ([]byte, []int)

Deprecated: Use ConverseResponse_EventType.Descriptor instead.

func (ConverseResponse_EventType) Number Uses

func (x ConverseResponse_EventType) Number() protoreflect.EnumNumber

func (ConverseResponse_EventType) String Uses

func (x ConverseResponse_EventType) String() string

func (ConverseResponse_EventType) Type Uses

func (ConverseResponse_EventType) Type() protoreflect.EnumType

type ConverseResponse_EventType_ Uses

type ConverseResponse_EventType_ struct {
    // *Output-only* Indicates the type of event.
    EventType ConverseResponse_EventType `protobuf:"varint,2,opt,name=event_type,json=eventType,proto3,enum=google.assistant.embedded.v1alpha1.ConverseResponse_EventType,oneof"`
}

type ConverseResponse_Result Uses

type ConverseResponse_Result struct {
    // *Output-only* The semantic result for the user's spoken query.
    Result *ConverseResult `protobuf:"bytes,5,opt,name=result,proto3,oneof"`
}

type ConverseResult Uses

type ConverseResult struct {

    // *Output-only* The recognized transcript of what the user said.
    SpokenRequestText string `protobuf:"bytes,1,opt,name=spoken_request_text,json=spokenRequestText,proto3" json:"spoken_request_text,omitempty"`
    // *Output-only* The text of the assistant's spoken response. This is only
    // returned for an IFTTT action.
    SpokenResponseText string `protobuf:"bytes,2,opt,name=spoken_response_text,json=spokenResponseText,proto3" json:"spoken_response_text,omitempty"`
    // *Output-only* State information for subsequent `ConverseRequest`. This
    // value should be saved in the client and returned in the
    // `conversation_state` with the next `ConverseRequest`. (The client does not
    // need to interpret or otherwise use this value.) There is no need to save
    // this information across device restarts.
    ConversationState []byte `protobuf:"bytes,3,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"`
    // *Output-only* Specifies the mode of the microphone after this `Converse`
    // RPC is processed.
    MicrophoneMode ConverseResult_MicrophoneMode `protobuf:"varint,4,opt,name=microphone_mode,json=microphoneMode,proto3,enum=google.assistant.embedded.v1alpha1.ConverseResult_MicrophoneMode" json:"microphone_mode,omitempty"`
    // *Output-only* Updated volume level. The value will be 0 or omitted
    // (indicating no change) unless a voice command such as "Increase the volume"
    // or "Set volume level 4" was recognized, in which case the value will be
    // between 1 and 100 (corresponding to the new volume level of 1% to 100%).
    // Typically, a client should use this volume level when playing the
    // `audio_out` data, and retain this value as the current volume level and
    // supply it in the `AudioOutConfig` of the next `ConverseRequest`. (Some
    // clients may also implement other ways to allow the current volume level to
    // be changed, for example, by providing a knob that the user can turn.)
    VolumePercentage int32 `protobuf:"varint,5,opt,name=volume_percentage,json=volumePercentage,proto3" json:"volume_percentage,omitempty"`
    // contains filtered or unexported fields
}

The semantic result for the user's spoken query.

func (*ConverseResult) Descriptor Uses

func (*ConverseResult) Descriptor() ([]byte, []int)

Deprecated: Use ConverseResult.ProtoReflect.Descriptor instead.

func (*ConverseResult) GetConversationState Uses

func (x *ConverseResult) GetConversationState() []byte

func (*ConverseResult) GetMicrophoneMode Uses

func (x *ConverseResult) GetMicrophoneMode() ConverseResult_MicrophoneMode

func (*ConverseResult) GetSpokenRequestText Uses

func (x *ConverseResult) GetSpokenRequestText() string

func (*ConverseResult) GetSpokenResponseText Uses

func (x *ConverseResult) GetSpokenResponseText() string

func (*ConverseResult) GetVolumePercentage Uses

func (x *ConverseResult) GetVolumePercentage() int32

func (*ConverseResult) ProtoMessage Uses

func (*ConverseResult) ProtoMessage()

func (*ConverseResult) ProtoReflect Uses

func (x *ConverseResult) ProtoReflect() protoreflect.Message

func (*ConverseResult) Reset Uses

func (x *ConverseResult) Reset()

func (*ConverseResult) String Uses

func (x *ConverseResult) String() string

type ConverseResult_MicrophoneMode Uses

type ConverseResult_MicrophoneMode int32

Possible states of the microphone after a `Converse` RPC completes.

const (
    // No mode specified.
    ConverseResult_MICROPHONE_MODE_UNSPECIFIED ConverseResult_MicrophoneMode = 0
    // The service is not expecting a follow-on question from the user.
    // The microphone should remain off until the user re-activates it.
    ConverseResult_CLOSE_MICROPHONE ConverseResult_MicrophoneMode = 1
    // The service is expecting a follow-on question from the user. The
    // microphone should be re-opened when the `AudioOut` playback completes
    // (by starting a new `Converse` RPC call to send the new audio).
    ConverseResult_DIALOG_FOLLOW_ON ConverseResult_MicrophoneMode = 2
)

func (ConverseResult_MicrophoneMode) Descriptor Uses

func (ConverseResult_MicrophoneMode) Descriptor() protoreflect.EnumDescriptor

func (ConverseResult_MicrophoneMode) Enum Uses

func (x ConverseResult_MicrophoneMode) Enum() *ConverseResult_MicrophoneMode

func (ConverseResult_MicrophoneMode) EnumDescriptor Uses

func (ConverseResult_MicrophoneMode) EnumDescriptor() ([]byte, []int)

Deprecated: Use ConverseResult_MicrophoneMode.Descriptor instead.

func (ConverseResult_MicrophoneMode) Number Uses

func (x ConverseResult_MicrophoneMode) Number() protoreflect.EnumNumber

func (ConverseResult_MicrophoneMode) String Uses

func (x ConverseResult_MicrophoneMode) String() string

func (ConverseResult_MicrophoneMode) Type Uses

func (ConverseResult_MicrophoneMode) Type() protoreflect.EnumType

type ConverseState Uses

type ConverseState struct {

    // *Required* The `conversation_state` value returned in the prior
    // `ConverseResponse`. Omit (do not set the field) if there was no prior
    // `ConverseResponse`. If there was a prior `ConverseResponse`, do not omit
    // this field; doing so will end that conversation (and this new request will
    // start a new conversation).
    ConversationState []byte `protobuf:"bytes,1,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"`
    // contains filtered or unexported fields
}

Provides information about the current dialog state.

func (*ConverseState) Descriptor Uses

func (*ConverseState) Descriptor() ([]byte, []int)

Deprecated: Use ConverseState.ProtoReflect.Descriptor instead.

func (*ConverseState) GetConversationState Uses

func (x *ConverseState) GetConversationState() []byte

func (*ConverseState) ProtoMessage Uses

func (*ConverseState) ProtoMessage()

func (*ConverseState) ProtoReflect Uses

func (x *ConverseState) ProtoReflect() protoreflect.Message

func (*ConverseState) Reset Uses

func (x *ConverseState) Reset()

func (*ConverseState) String Uses

func (x *ConverseState) String() string

type EmbeddedAssistantClient Uses

type EmbeddedAssistantClient interface {
    // Initiates or continues a conversation with the embedded assistant service.
    // Each call performs one round-trip, sending an audio request to the service
    // and receiving the audio response. Uses bidirectional streaming to receive
    // results, such as the `END_OF_UTTERANCE` event, while sending audio.
    //
    // A conversation is one or more gRPC connections, each consisting of several
    // streamed requests and responses.
    // For example, the user says *Add to my shopping list* and the assistant
    // responds *What do you want to add?*. The sequence of streamed requests and
    // responses in the first gRPC message could be:
    //
    // *   ConverseRequest.config
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseResponse.event_type.END_OF_UTTERANCE
    // *   ConverseResponse.result.microphone_mode.DIALOG_FOLLOW_ON
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    //
    // The user then says *bagels* and the assistant responds
    // *OK, I've added bagels to your shopping list*. This is sent as another gRPC
    // connection call to the `Converse` method, again with streamed requests and
    // responses, such as:
    //
    // *   ConverseRequest.config
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseResponse.event_type.END_OF_UTTERANCE
    // *   ConverseResponse.result.microphone_mode.CLOSE_MICROPHONE
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    //
    // Although the precise order of responses is not guaranteed, sequential
    // ConverseResponse.audio_out messages will always contain sequential portions
    // of audio.
    Converse(ctx context.Context, opts ...grpc.CallOption) (EmbeddedAssistant_ConverseClient, error)
}

EmbeddedAssistantClient is the client API for EmbeddedAssistant service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewEmbeddedAssistantClient Uses

func NewEmbeddedAssistantClient(cc grpc.ClientConnInterface) EmbeddedAssistantClient

type EmbeddedAssistantServer Uses

type EmbeddedAssistantServer interface {
    // Initiates or continues a conversation with the embedded assistant service.
    // Each call performs one round-trip, sending an audio request to the service
    // and receiving the audio response. Uses bidirectional streaming to receive
    // results, such as the `END_OF_UTTERANCE` event, while sending audio.
    //
    // A conversation is one or more gRPC connections, each consisting of several
    // streamed requests and responses.
    // For example, the user says *Add to my shopping list* and the assistant
    // responds *What do you want to add?*. The sequence of streamed requests and
    // responses in the first gRPC message could be:
    //
    // *   ConverseRequest.config
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseResponse.event_type.END_OF_UTTERANCE
    // *   ConverseResponse.result.microphone_mode.DIALOG_FOLLOW_ON
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    //
    // The user then says *bagels* and the assistant responds
    // *OK, I've added bagels to your shopping list*. This is sent as another gRPC
    // connection call to the `Converse` method, again with streamed requests and
    // responses, such as:
    //
    // *   ConverseRequest.config
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseRequest.audio_in
    // *   ConverseResponse.event_type.END_OF_UTTERANCE
    // *   ConverseResponse.result.microphone_mode.CLOSE_MICROPHONE
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    // *   ConverseResponse.audio_out
    //
    // Although the precise order of responses is not guaranteed, sequential
    // ConverseResponse.audio_out messages will always contain sequential portions
    // of audio.
    Converse(EmbeddedAssistant_ConverseServer) error
}

EmbeddedAssistantServer is the server API for EmbeddedAssistant service.

type EmbeddedAssistant_ConverseClient Uses

type EmbeddedAssistant_ConverseClient interface {
    Send(*ConverseRequest) error
    Recv() (*ConverseResponse, error)
    grpc.ClientStream
}

type EmbeddedAssistant_ConverseServer Uses

type EmbeddedAssistant_ConverseServer interface {
    Send(*ConverseResponse) error
    Recv() (*ConverseRequest, error)
    grpc.ServerStream
}

type UnimplementedEmbeddedAssistantServer Uses

type UnimplementedEmbeddedAssistantServer struct {
}

UnimplementedEmbeddedAssistantServer can be embedded to have forward compatible implementations.

func (*UnimplementedEmbeddedAssistantServer) Converse Uses

func (*UnimplementedEmbeddedAssistantServer) Converse(EmbeddedAssistant_ConverseServer) error

Package embedded imports 11 packages (graph). Updated 2020-07-03. Refresh now. Tools for package owners.