embedded

package
v0.0.0-...-694d95b Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: May 23, 2018 License: Apache-2.0 Imports: 7 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var AssistResponse_EventType_name = map[int32]string{
	0: "EVENT_TYPE_UNSPECIFIED",
	1: "END_OF_UTTERANCE",
}
View Source
var AssistResponse_EventType_value = map[string]int32{
	"EVENT_TYPE_UNSPECIFIED": 0,
	"END_OF_UTTERANCE":       1,
}
View Source
var AudioInConfig_Encoding_name = map[int32]string{
	0: "ENCODING_UNSPECIFIED",
	1: "LINEAR16",
	2: "FLAC",
}
View Source
var AudioInConfig_Encoding_value = map[string]int32{
	"ENCODING_UNSPECIFIED": 0,
	"LINEAR16":             1,
	"FLAC":                 2,
}
View Source
var AudioOutConfig_Encoding_name = map[int32]string{
	0: "ENCODING_UNSPECIFIED",
	1: "LINEAR16",
	2: "MP3",
	3: "OPUS_IN_OGG",
}
View Source
var AudioOutConfig_Encoding_value = map[string]int32{
	"ENCODING_UNSPECIFIED": 0,
	"LINEAR16":             1,
	"MP3":                  2,
	"OPUS_IN_OGG":          3,
}
View Source
var DialogStateOut_MicrophoneMode_name = map[int32]string{
	0: "MICROPHONE_MODE_UNSPECIFIED",
	1: "CLOSE_MICROPHONE",
	2: "DIALOG_FOLLOW_ON",
}
View Source
var DialogStateOut_MicrophoneMode_value = map[string]int32{
	"MICROPHONE_MODE_UNSPECIFIED": 0,
	"CLOSE_MICROPHONE":            1,
	"DIALOG_FOLLOW_ON":            2,
}
View Source
var ScreenOutConfig_ScreenMode_name = map[int32]string{
	0: "SCREEN_MODE_UNSPECIFIED",
	1: "OFF",
	3: "PLAYING",
}
View Source
var ScreenOutConfig_ScreenMode_value = map[string]int32{
	"SCREEN_MODE_UNSPECIFIED": 0,
	"OFF":                     1,
	"PLAYING":                 3,
}
View Source
var ScreenOut_Format_name = map[int32]string{
	0: "FORMAT_UNSPECIFIED",
	1: "HTML",
}
View Source
var ScreenOut_Format_value = map[string]int32{
	"FORMAT_UNSPECIFIED": 0,
	"HTML":               1,
}

Functions

func RegisterEmbeddedAssistantServer

func RegisterEmbeddedAssistantServer(s *grpc.Server, srv EmbeddedAssistantServer)

Types

type AssistConfig

type AssistConfig struct {
	// Types that are valid to be assigned to Type:
	//	*AssistConfig_AudioInConfig
	//	*AssistConfig_TextQuery
	Type isAssistConfig_Type `protobuf_oneof:"type"`
	// *Required* Specifies how to format the audio that will be returned.
	AudioOutConfig *AudioOutConfig `protobuf:"bytes,2,opt,name=audio_out_config,json=audioOutConfig" json:"audio_out_config,omitempty"`
	// *Optional* Specifies the desired format to use when server returns a
	// visual screen response.
	ScreenOutConfig *ScreenOutConfig `protobuf:"bytes,8,opt,name=screen_out_config,json=screenOutConfig" json:"screen_out_config,omitempty"`
	// *Required* Represents the current dialog state.
	DialogStateIn *DialogStateIn `protobuf:"bytes,3,opt,name=dialog_state_in,json=dialogStateIn" json:"dialog_state_in,omitempty"`
	// Device configuration that uniquely identifies a specific device.
	DeviceConfig         *DeviceConfig `protobuf:"bytes,4,opt,name=device_config,json=deviceConfig" json:"device_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
	XXX_unrecognized     []byte        `json:"-"`
	XXX_sizecache        int32         `json:"-"`
}

Specifies how to process the `AssistRequest` messages.

func (*AssistConfig) Descriptor

func (*AssistConfig) Descriptor() ([]byte, []int)

func (*AssistConfig) GetAudioInConfig

func (m *AssistConfig) GetAudioInConfig() *AudioInConfig

func (*AssistConfig) GetAudioOutConfig

func (m *AssistConfig) GetAudioOutConfig() *AudioOutConfig

func (*AssistConfig) GetDeviceConfig

func (m *AssistConfig) GetDeviceConfig() *DeviceConfig

func (*AssistConfig) GetDialogStateIn

func (m *AssistConfig) GetDialogStateIn() *DialogStateIn

func (*AssistConfig) GetScreenOutConfig

func (m *AssistConfig) GetScreenOutConfig() *ScreenOutConfig

func (*AssistConfig) GetTextQuery

func (m *AssistConfig) GetTextQuery() string

func (*AssistConfig) GetType

func (m *AssistConfig) GetType() isAssistConfig_Type

func (*AssistConfig) ProtoMessage

func (*AssistConfig) ProtoMessage()

func (*AssistConfig) Reset

func (m *AssistConfig) Reset()

func (*AssistConfig) String

func (m *AssistConfig) String() string

func (*AssistConfig) XXX_DiscardUnknown

func (m *AssistConfig) XXX_DiscardUnknown()

func (*AssistConfig) XXX_Marshal

func (m *AssistConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AssistConfig) XXX_Merge

func (dst *AssistConfig) XXX_Merge(src proto.Message)

func (*AssistConfig) XXX_OneofFuncs

func (*AssistConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*AssistConfig) XXX_Size

func (m *AssistConfig) XXX_Size() int

func (*AssistConfig) XXX_Unmarshal

func (m *AssistConfig) XXX_Unmarshal(b []byte) error

type AssistConfig_AudioInConfig

type AssistConfig_AudioInConfig struct {
	AudioInConfig *AudioInConfig `protobuf:"bytes,1,opt,name=audio_in_config,json=audioInConfig,oneof"`
}

type AssistConfig_TextQuery

type AssistConfig_TextQuery struct {
	TextQuery string `protobuf:"bytes,6,opt,name=text_query,json=textQuery,oneof"`
}

type AssistRequest

type AssistRequest struct {
	// Exactly one of these fields must be specified in each `AssistRequest`.
	//
	// Types that are valid to be assigned to Type:
	//	*AssistRequest_Config
	//	*AssistRequest_AudioIn
	Type                 isAssistRequest_Type `protobuf_oneof:"type"`
	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
	XXX_unrecognized     []byte               `json:"-"`
	XXX_sizecache        int32                `json:"-"`
}

The top-level message sent by the client. Clients must send at least two, and typically numerous `AssistRequest` messages. The first message must contain a `config` message and must not contain `audio_in` data. All subsequent messages must contain `audio_in` data and must not contain a `config` message.

func (*AssistRequest) Descriptor

func (*AssistRequest) Descriptor() ([]byte, []int)

func (*AssistRequest) GetAudioIn

func (m *AssistRequest) GetAudioIn() []byte

func (*AssistRequest) GetConfig

func (m *AssistRequest) GetConfig() *AssistConfig

func (*AssistRequest) GetType

func (m *AssistRequest) GetType() isAssistRequest_Type

func (*AssistRequest) ProtoMessage

func (*AssistRequest) ProtoMessage()

func (*AssistRequest) Reset

func (m *AssistRequest) Reset()

func (*AssistRequest) String

func (m *AssistRequest) String() string

func (*AssistRequest) XXX_DiscardUnknown

func (m *AssistRequest) XXX_DiscardUnknown()

func (*AssistRequest) XXX_Marshal

func (m *AssistRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AssistRequest) XXX_Merge

func (dst *AssistRequest) XXX_Merge(src proto.Message)

func (*AssistRequest) XXX_OneofFuncs

func (*AssistRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*AssistRequest) XXX_Size

func (m *AssistRequest) XXX_Size() int

func (*AssistRequest) XXX_Unmarshal

func (m *AssistRequest) XXX_Unmarshal(b []byte) error

type AssistRequest_AudioIn

type AssistRequest_AudioIn struct {
	AudioIn []byte `protobuf:"bytes,2,opt,name=audio_in,json=audioIn,proto3,oneof"`
}

type AssistRequest_Config

type AssistRequest_Config struct {
	Config *AssistConfig `protobuf:"bytes,1,opt,name=config,oneof"`
}

type AssistResponse

type AssistResponse struct {
	// *Output-only* Indicates the type of event.
	EventType AssistResponse_EventType `` /* 147-byte string literal not displayed */
	// *Output-only* The audio containing the Assistant's response to the query.
	AudioOut *AudioOut `protobuf:"bytes,3,opt,name=audio_out,json=audioOut" json:"audio_out,omitempty"`
	// *Output-only* Contains the Assistant's visual response to the query.
	ScreenOut *ScreenOut `protobuf:"bytes,4,opt,name=screen_out,json=screenOut" json:"screen_out,omitempty"`
	// *Output-only* Contains the action triggered by the query with the
	// appropriate payloads and semantic parsing.
	DeviceAction *DeviceAction `protobuf:"bytes,6,opt,name=device_action,json=deviceAction" json:"device_action,omitempty"`
	// *Output-only* This repeated list contains zero or more speech recognition
	// results that correspond to consecutive portions of the audio currently
	// being processed, starting with the portion corresponding to the earliest
	// audio (and most stable portion) to the portion corresponding to the most
	// recent audio. The strings can be concatenated to view the full
	// in-progress response. When the speech recognition completes, this list
	// will contain one item with `stability` of `1.0`.
	SpeechResults []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=speech_results,json=speechResults" json:"speech_results,omitempty"`
	// *Output-only* Contains output related to the user's query.
	DialogStateOut       *DialogStateOut `protobuf:"bytes,5,opt,name=dialog_state_out,json=dialogStateOut" json:"dialog_state_out,omitempty"`
	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
	XXX_unrecognized     []byte          `json:"-"`
	XXX_sizecache        int32           `json:"-"`
}

The top-level message received by the client. A series of one or more `AssistResponse` messages are streamed back to the client.

func (*AssistResponse) Descriptor

func (*AssistResponse) Descriptor() ([]byte, []int)

func (*AssistResponse) GetAudioOut

func (m *AssistResponse) GetAudioOut() *AudioOut

func (*AssistResponse) GetDeviceAction

func (m *AssistResponse) GetDeviceAction() *DeviceAction

func (*AssistResponse) GetDialogStateOut

func (m *AssistResponse) GetDialogStateOut() *DialogStateOut

func (*AssistResponse) GetEventType

func (m *AssistResponse) GetEventType() AssistResponse_EventType

func (*AssistResponse) GetScreenOut

func (m *AssistResponse) GetScreenOut() *ScreenOut

func (*AssistResponse) GetSpeechResults

func (m *AssistResponse) GetSpeechResults() []*SpeechRecognitionResult

func (*AssistResponse) ProtoMessage

func (*AssistResponse) ProtoMessage()

func (*AssistResponse) Reset

func (m *AssistResponse) Reset()

func (*AssistResponse) String

func (m *AssistResponse) String() string

func (*AssistResponse) XXX_DiscardUnknown

func (m *AssistResponse) XXX_DiscardUnknown()

func (*AssistResponse) XXX_Marshal

func (m *AssistResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AssistResponse) XXX_Merge

func (dst *AssistResponse) XXX_Merge(src proto.Message)

func (*AssistResponse) XXX_Size

func (m *AssistResponse) XXX_Size() int

func (*AssistResponse) XXX_Unmarshal

func (m *AssistResponse) XXX_Unmarshal(b []byte) error

type AssistResponse_EventType

type AssistResponse_EventType int32

Indicates the type of event.

const (
	// No event specified.
	AssistResponse_EVENT_TYPE_UNSPECIFIED AssistResponse_EventType = 0
	// This event indicates that the server has detected the end of the user's
	// speech utterance and expects no additional speech. Therefore, the server
	// will not process additional audio (although it may subsequently return
	// additional results). The client should stop sending additional audio
	// data, half-close the gRPC connection, and wait for any additional results
	// until the server closes the gRPC connection.
	AssistResponse_END_OF_UTTERANCE AssistResponse_EventType = 1
)

func (AssistResponse_EventType) EnumDescriptor

func (AssistResponse_EventType) EnumDescriptor() ([]byte, []int)

func (AssistResponse_EventType) String

func (x AssistResponse_EventType) String() string

type AudioInConfig

type AudioInConfig struct {
	// *Required* Encoding of audio data sent in all `audio_in` messages.
	Encoding AudioInConfig_Encoding `` /* 126-byte string literal not displayed */
	// *Required* Sample rate (in Hertz) of the audio data sent in all `audio_in`
	// messages. Valid values are from 16000-24000, but 16000 is optimal.
	// For best results, set the sampling rate of the audio source to 16000 Hz.
	// If that's not possible, use the native sample rate of the audio source
	// (instead of re-sampling).
	SampleRateHertz      int32    `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz" json:"sample_rate_hertz,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Specifies how to process the `audio_in` data that will be provided in subsequent requests. For recommended settings, see the Google Assistant SDK [best practices](https://developers.google.com/assistant/sdk/guides/service/python/best-practices/audio).

func (*AudioInConfig) Descriptor

func (*AudioInConfig) Descriptor() ([]byte, []int)

func (*AudioInConfig) GetEncoding

func (m *AudioInConfig) GetEncoding() AudioInConfig_Encoding

func (*AudioInConfig) GetSampleRateHertz

func (m *AudioInConfig) GetSampleRateHertz() int32

func (*AudioInConfig) ProtoMessage

func (*AudioInConfig) ProtoMessage()

func (*AudioInConfig) Reset

func (m *AudioInConfig) Reset()

func (*AudioInConfig) String

func (m *AudioInConfig) String() string

func (*AudioInConfig) XXX_DiscardUnknown

func (m *AudioInConfig) XXX_DiscardUnknown()

func (*AudioInConfig) XXX_Marshal

func (m *AudioInConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AudioInConfig) XXX_Merge

func (dst *AudioInConfig) XXX_Merge(src proto.Message)

func (*AudioInConfig) XXX_Size

func (m *AudioInConfig) XXX_Size() int

func (*AudioInConfig) XXX_Unmarshal

func (m *AudioInConfig) XXX_Unmarshal(b []byte) error

type AudioInConfig_Encoding

type AudioInConfig_Encoding int32

Audio encoding of the data sent in the audio message. Audio must be one-channel (mono).

const (
	// Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
	AudioInConfig_ENCODING_UNSPECIFIED AudioInConfig_Encoding = 0
	// Uncompressed 16-bit signed little-endian samples (Linear PCM).
	// This encoding includes no header, only the raw audio bytes.
	AudioInConfig_LINEAR16 AudioInConfig_Encoding = 1
	// [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio
	// Codec) is the recommended encoding because it is
	// lossless--therefore recognition is not compromised--and
	// requires only about half the bandwidth of `LINEAR16`. This encoding
	// includes the `FLAC` stream header followed by audio data. It supports
	// 16-bit and 24-bit samples, however, not all fields in `STREAMINFO` are
	// supported.
	AudioInConfig_FLAC AudioInConfig_Encoding = 2
)

func (AudioInConfig_Encoding) EnumDescriptor

func (AudioInConfig_Encoding) EnumDescriptor() ([]byte, []int)

func (AudioInConfig_Encoding) String

func (x AudioInConfig_Encoding) String() string

type AudioOut

type AudioOut struct {
	// *Output-only* The audio data containing the Assistant's response to the
	// query. Sequential chunks of audio data are received in sequential
	// `AssistResponse` messages.
	AudioData            []byte   `protobuf:"bytes,1,opt,name=audio_data,json=audioData,proto3" json:"audio_data,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The audio containing the Assistant's response to the query. Sequential chunks of audio data are received in sequential `AssistResponse` messages.

func (*AudioOut) Descriptor

func (*AudioOut) Descriptor() ([]byte, []int)

func (*AudioOut) GetAudioData

func (m *AudioOut) GetAudioData() []byte

func (*AudioOut) ProtoMessage

func (*AudioOut) ProtoMessage()

func (*AudioOut) Reset

func (m *AudioOut) Reset()

func (*AudioOut) String

func (m *AudioOut) String() string

func (*AudioOut) XXX_DiscardUnknown

func (m *AudioOut) XXX_DiscardUnknown()

func (*AudioOut) XXX_Marshal

func (m *AudioOut) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AudioOut) XXX_Merge

func (dst *AudioOut) XXX_Merge(src proto.Message)

func (*AudioOut) XXX_Size

func (m *AudioOut) XXX_Size() int

func (*AudioOut) XXX_Unmarshal

func (m *AudioOut) XXX_Unmarshal(b []byte) error

type AudioOutConfig

type AudioOutConfig struct {
	// *Required* The encoding of audio data to be returned in all `audio_out`
	// messages.
	Encoding AudioOutConfig_Encoding `` /* 127-byte string literal not displayed */
	// *Required* The sample rate in Hertz of the audio data returned in
	// `audio_out` messages. Valid values are: 16000-24000.
	SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz" json:"sample_rate_hertz,omitempty"`
	// *Required* Current volume setting of the device's audio output.
	// Valid values are 1 to 100 (corresponding to 1% to 100%).
	VolumePercentage     int32    `protobuf:"varint,3,opt,name=volume_percentage,json=volumePercentage" json:"volume_percentage,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Specifies the desired format for the server to use when it returns `audio_out` messages.

func (*AudioOutConfig) Descriptor

func (*AudioOutConfig) Descriptor() ([]byte, []int)

func (*AudioOutConfig) GetEncoding

func (m *AudioOutConfig) GetEncoding() AudioOutConfig_Encoding

func (*AudioOutConfig) GetSampleRateHertz

func (m *AudioOutConfig) GetSampleRateHertz() int32

func (*AudioOutConfig) GetVolumePercentage

func (m *AudioOutConfig) GetVolumePercentage() int32

func (*AudioOutConfig) ProtoMessage

func (*AudioOutConfig) ProtoMessage()

func (*AudioOutConfig) Reset

func (m *AudioOutConfig) Reset()

func (*AudioOutConfig) String

func (m *AudioOutConfig) String() string

func (*AudioOutConfig) XXX_DiscardUnknown

func (m *AudioOutConfig) XXX_DiscardUnknown()

func (*AudioOutConfig) XXX_Marshal

func (m *AudioOutConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AudioOutConfig) XXX_Merge

func (dst *AudioOutConfig) XXX_Merge(src proto.Message)

func (*AudioOutConfig) XXX_Size

func (m *AudioOutConfig) XXX_Size() int

func (*AudioOutConfig) XXX_Unmarshal

func (m *AudioOutConfig) XXX_Unmarshal(b []byte) error

type AudioOutConfig_Encoding

type AudioOutConfig_Encoding int32

Audio encoding of the data returned in the audio message. All encodings are raw audio bytes with no header, except as indicated below.

const (
	// Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][].
	AudioOutConfig_ENCODING_UNSPECIFIED AudioOutConfig_Encoding = 0
	// Uncompressed 16-bit signed little-endian samples (Linear PCM).
	AudioOutConfig_LINEAR16 AudioOutConfig_Encoding = 1
	// MP3 audio encoding. The sample rate is encoded in the payload.
	AudioOutConfig_MP3 AudioOutConfig_Encoding = 2
	// Opus-encoded audio wrapped in an ogg container. The result will be a
	// file which can be played natively on Android and in some browsers (such
	// as Chrome). The quality of the encoding is considerably higher than MP3
	// while using the same bitrate. The sample rate is encoded in the payload.
	AudioOutConfig_OPUS_IN_OGG AudioOutConfig_Encoding = 3
)

func (AudioOutConfig_Encoding) EnumDescriptor

func (AudioOutConfig_Encoding) EnumDescriptor() ([]byte, []int)

func (AudioOutConfig_Encoding) String

func (x AudioOutConfig_Encoding) String() string

type DeviceAction

type DeviceAction struct {
	// JSON containing the device command response generated from the triggered
	// Device Action grammar. The format is given by the
	// `action.devices.EXECUTE` intent for a given
	// [trait](https://developers.google.com/assistant/sdk/reference/traits/).
	DeviceRequestJson    string   `protobuf:"bytes,1,opt,name=device_request_json,json=deviceRequestJson" json:"device_request_json,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The response returned to the device if the user has triggered a Device Action. For example, a device which supports the query *Turn on the light* would receive a `DeviceAction` with a JSON payload containing the semantics of the request.

func (*DeviceAction) Descriptor

func (*DeviceAction) Descriptor() ([]byte, []int)

func (*DeviceAction) GetDeviceRequestJson

func (m *DeviceAction) GetDeviceRequestJson() string

func (*DeviceAction) ProtoMessage

func (*DeviceAction) ProtoMessage()

func (*DeviceAction) Reset

func (m *DeviceAction) Reset()

func (*DeviceAction) String

func (m *DeviceAction) String() string

func (*DeviceAction) XXX_DiscardUnknown

func (m *DeviceAction) XXX_DiscardUnknown()

func (*DeviceAction) XXX_Marshal

func (m *DeviceAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DeviceAction) XXX_Merge

func (dst *DeviceAction) XXX_Merge(src proto.Message)

func (*DeviceAction) XXX_Size

func (m *DeviceAction) XXX_Size() int

func (*DeviceAction) XXX_Unmarshal

func (m *DeviceAction) XXX_Unmarshal(b []byte) error

type DeviceConfig

type DeviceConfig struct {
	// *Required* Unique identifier for the device. The id length must be 128
	// characters or less. Example: DBCDW098234. This MUST match the device_id
	// returned from device registration. This device_id is used to match against
	// the user's registered devices to lookup the supported traits and
	// capabilities of this device. This information should not change across
	// device reboots. However, it should not be saved across
	// factory-default resets.
	DeviceId string `protobuf:"bytes,1,opt,name=device_id,json=deviceId" json:"device_id,omitempty"`
	// *Required* Unique identifier for the device model. The combination of
	// device_model_id and device_id must have been previously associated through
	// device registration.
	DeviceModelId        string   `protobuf:"bytes,3,opt,name=device_model_id,json=deviceModelId" json:"device_model_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

*Required* Fields that identify the device to the Assistant.

See also:

* [Register a Device - REST API](https://developers.google.com/assistant/sdk/reference/device-registration/register-device-manual) * [Device Model and Instance Schemas](https://developers.google.com/assistant/sdk/reference/device-registration/model-and-instance-schemas) * [Device Proto](https://developers.google.com/assistant/sdk/reference/rpc/google.assistant.devices.v1alpha2#device)

func (*DeviceConfig) Descriptor

func (*DeviceConfig) Descriptor() ([]byte, []int)

func (*DeviceConfig) GetDeviceId

func (m *DeviceConfig) GetDeviceId() string

func (*DeviceConfig) GetDeviceModelId

func (m *DeviceConfig) GetDeviceModelId() string

func (*DeviceConfig) ProtoMessage

func (*DeviceConfig) ProtoMessage()

func (*DeviceConfig) Reset

func (m *DeviceConfig) Reset()

func (*DeviceConfig) String

func (m *DeviceConfig) String() string

func (*DeviceConfig) XXX_DiscardUnknown

func (m *DeviceConfig) XXX_DiscardUnknown()

func (*DeviceConfig) XXX_Marshal

func (m *DeviceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DeviceConfig) XXX_Merge

func (dst *DeviceConfig) XXX_Merge(src proto.Message)

func (*DeviceConfig) XXX_Size

func (m *DeviceConfig) XXX_Size() int

func (*DeviceConfig) XXX_Unmarshal

func (m *DeviceConfig) XXX_Unmarshal(b []byte) error

type DeviceLocation

type DeviceLocation struct {
	// Types that are valid to be assigned to Type:
	//	*DeviceLocation_Coordinates
	Type                 isDeviceLocation_Type `protobuf_oneof:"type"`
	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
	XXX_unrecognized     []byte                `json:"-"`
	XXX_sizecache        int32                 `json:"-"`
}

There are three sources of locations. They are used with this precedence:

  1. This `DeviceLocation`, which is primarily used for mobile devices with GPS .
  2. Location specified by the user during device setup; this is per-user, per device. This location is used if `DeviceLocation` is not specified.
  3. Inferred location based on IP address. This is used only if neither of the above are specified.

func (*DeviceLocation) Descriptor

func (*DeviceLocation) Descriptor() ([]byte, []int)

func (*DeviceLocation) GetCoordinates

func (m *DeviceLocation) GetCoordinates() *latlng.LatLng

func (*DeviceLocation) GetType

func (m *DeviceLocation) GetType() isDeviceLocation_Type

func (*DeviceLocation) ProtoMessage

func (*DeviceLocation) ProtoMessage()

func (*DeviceLocation) Reset

func (m *DeviceLocation) Reset()

func (*DeviceLocation) String

func (m *DeviceLocation) String() string

func (*DeviceLocation) XXX_DiscardUnknown

func (m *DeviceLocation) XXX_DiscardUnknown()

func (*DeviceLocation) XXX_Marshal

func (m *DeviceLocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DeviceLocation) XXX_Merge

func (dst *DeviceLocation) XXX_Merge(src proto.Message)

func (*DeviceLocation) XXX_OneofFuncs

func (*DeviceLocation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{})

XXX_OneofFuncs is for the internal use of the proto package.

func (*DeviceLocation) XXX_Size

func (m *DeviceLocation) XXX_Size() int

func (*DeviceLocation) XXX_Unmarshal

func (m *DeviceLocation) XXX_Unmarshal(b []byte) error

type DeviceLocation_Coordinates

type DeviceLocation_Coordinates struct {
	Coordinates *latlng.LatLng `protobuf:"bytes,1,opt,name=coordinates,oneof"`
}

type DialogStateIn

type DialogStateIn struct {
	// *Required* This field must always be set to the
	// [DialogStateOut.conversation_state][google.assistant.embedded.v1alpha2.DialogStateOut.conversation_state] value that was returned in the prior
	// `Assist` RPC. It should only be omitted (field not set) if there was no
	// prior `Assist` RPC because this is the first `Assist` RPC made by this
	// device after it was first setup and/or a factory-default reset.
	ConversationState []byte `protobuf:"bytes,1,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"`
	// *Required* Language of the request in
	// [IETF BCP 47 syntax](https://tools.ietf.org/html/bcp47). For example:
	// "en-US". If you have selected a language for this `device_id` using the
	// [Settings](https://developers.google.com/assistant/sdk/guides/assistant-settings)
	// menu in your phone's Google Assistant app, that selection will override
	// this value.
	LanguageCode string `protobuf:"bytes,2,opt,name=language_code,json=languageCode" json:"language_code,omitempty"`
	// *Optional* Location of the device where the query originated.
	DeviceLocation *DeviceLocation `protobuf:"bytes,5,opt,name=device_location,json=deviceLocation" json:"device_location,omitempty"`
	// *Optional* If true, the server will treat the request as a new conversation
	// and not use state from the prior request. Set this field to true when the
	// conversation should be restarted, such as after a device reboot, or after a
	// significant lapse of time since the prior query.
	IsNewConversation    bool     `protobuf:"varint,7,opt,name=is_new_conversation,json=isNewConversation" json:"is_new_conversation,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Provides information about the current dialog state.

func (*DialogStateIn) Descriptor

func (*DialogStateIn) Descriptor() ([]byte, []int)

func (*DialogStateIn) GetConversationState

func (m *DialogStateIn) GetConversationState() []byte

func (*DialogStateIn) GetDeviceLocation

func (m *DialogStateIn) GetDeviceLocation() *DeviceLocation

func (*DialogStateIn) GetIsNewConversation

func (m *DialogStateIn) GetIsNewConversation() bool

func (*DialogStateIn) GetLanguageCode

func (m *DialogStateIn) GetLanguageCode() string

func (*DialogStateIn) ProtoMessage

func (*DialogStateIn) ProtoMessage()

func (*DialogStateIn) Reset

func (m *DialogStateIn) Reset()

func (*DialogStateIn) String

func (m *DialogStateIn) String() string

func (*DialogStateIn) XXX_DiscardUnknown

func (m *DialogStateIn) XXX_DiscardUnknown()

func (*DialogStateIn) XXX_Marshal

func (m *DialogStateIn) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DialogStateIn) XXX_Merge

func (dst *DialogStateIn) XXX_Merge(src proto.Message)

func (*DialogStateIn) XXX_Size

func (m *DialogStateIn) XXX_Size() int

func (*DialogStateIn) XXX_Unmarshal

func (m *DialogStateIn) XXX_Unmarshal(b []byte) error

type DialogStateOut

type DialogStateOut struct {
	// *Output-only* Supplemental display text from the Assistant. This could be
	// the same as the speech spoken in `AssistResponse.audio_out` or it could
	// be some additional information which aids the user's understanding.
	SupplementalDisplayText string `protobuf:"bytes,1,opt,name=supplemental_display_text,json=supplementalDisplayText" json:"supplemental_display_text,omitempty"`
	// *Output-only* State information for the subsequent `Assist` RPC. This
	// value should be saved in the client and returned in the
	// [`DialogStateIn.conversation_state`](#dialogstatein) field with the next
	// `Assist` RPC. (The client does not need to interpret or otherwise use this
	// value.) This information should be saved across device reboots. However,
	// this value should be cleared (not saved in the client) during a
	// factory-default reset.
	ConversationState []byte `protobuf:"bytes,2,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"`
	// *Output-only* Specifies the mode of the microphone after this `Assist`
	// RPC is processed.
	MicrophoneMode DialogStateOut_MicrophoneMode `` /* 167-byte string literal not displayed */
	// *Output-only* Updated volume level. The value will be 0 or omitted
	// (indicating no change) unless a voice command such as *Increase the volume*
	// or *Set volume level 4* was recognized, in which case the value will be
	// between 1 and 100 (corresponding to the new volume level of 1% to 100%).
	// Typically, a client should use this volume level when playing the
	// `audio_out` data, and retain this value as the current volume level and
	// supply it in the `AudioOutConfig` of the next `AssistRequest`. (Some
	// clients may also implement other ways to allow the current volume level to
	// be changed, for example, by providing a knob that the user can turn.)
	VolumePercentage     int32    `protobuf:"varint,4,opt,name=volume_percentage,json=volumePercentage" json:"volume_percentage,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The dialog state resulting from the user's query. Multiple of these messages may be received.

func (*DialogStateOut) Descriptor

func (*DialogStateOut) Descriptor() ([]byte, []int)

func (*DialogStateOut) GetConversationState

func (m *DialogStateOut) GetConversationState() []byte

func (*DialogStateOut) GetMicrophoneMode

func (m *DialogStateOut) GetMicrophoneMode() DialogStateOut_MicrophoneMode

func (*DialogStateOut) GetSupplementalDisplayText

func (m *DialogStateOut) GetSupplementalDisplayText() string

func (*DialogStateOut) GetVolumePercentage

func (m *DialogStateOut) GetVolumePercentage() int32

func (*DialogStateOut) ProtoMessage

func (*DialogStateOut) ProtoMessage()

func (*DialogStateOut) Reset

func (m *DialogStateOut) Reset()

func (*DialogStateOut) String

func (m *DialogStateOut) String() string

func (*DialogStateOut) XXX_DiscardUnknown

func (m *DialogStateOut) XXX_DiscardUnknown()

func (*DialogStateOut) XXX_Marshal

func (m *DialogStateOut) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DialogStateOut) XXX_Merge

func (dst *DialogStateOut) XXX_Merge(src proto.Message)

func (*DialogStateOut) XXX_Size

func (m *DialogStateOut) XXX_Size() int

func (*DialogStateOut) XXX_Unmarshal

func (m *DialogStateOut) XXX_Unmarshal(b []byte) error

type DialogStateOut_MicrophoneMode

type DialogStateOut_MicrophoneMode int32

Possible states of the microphone after a `Assist` RPC completes.

const (
	// No mode specified.
	DialogStateOut_MICROPHONE_MODE_UNSPECIFIED DialogStateOut_MicrophoneMode = 0
	// The service is not expecting a follow-on question from the user.
	// The microphone should remain off until the user re-activates it.
	DialogStateOut_CLOSE_MICROPHONE DialogStateOut_MicrophoneMode = 1
	// The service is expecting a follow-on question from the user. The
	// microphone should be re-opened when the `AudioOut` playback completes
	// (by starting a new `Assist` RPC call to send the new audio).
	DialogStateOut_DIALOG_FOLLOW_ON DialogStateOut_MicrophoneMode = 2
)

func (DialogStateOut_MicrophoneMode) EnumDescriptor

func (DialogStateOut_MicrophoneMode) EnumDescriptor() ([]byte, []int)

func (DialogStateOut_MicrophoneMode) String

type EmbeddedAssistantClient

type EmbeddedAssistantClient interface {
	// Initiates or continues a conversation with the embedded Assistant Service.
	// Each call performs one round-trip, sending an audio request to the service
	// and receiving the audio response. Uses bidirectional streaming to receive
	// results, such as the `END_OF_UTTERANCE` event, while sending audio.
	//
	// A conversation is one or more gRPC connections, each consisting of several
	// streamed requests and responses.
	// For example, the user says *Add to my shopping list* and the Assistant
	// responds *What do you want to add?*. The sequence of streamed requests and
	// responses in the first gRPC message could be:
	//
	// *   AssistRequest.config
	// *   AssistRequest.audio_in
	// *   AssistRequest.audio_in
	// *   AssistRequest.audio_in
	// *   AssistRequest.audio_in
	// *   AssistResponse.event_type.END_OF_UTTERANCE
	// *   AssistResponse.speech_results.transcript "add to my shopping list"
	// *   AssistResponse.dialog_state_out.microphone_mode.DIALOG_FOLLOW_ON
	// *   AssistResponse.audio_out
	// *   AssistResponse.audio_out
	// *   AssistResponse.audio_out
	//
	//
	// The user then says *bagels* and the Assistant responds
	// *OK, I've added bagels to your shopping list*. This is sent as another gRPC
	// connection call to the `Assist` method, again with streamed requests and
	// responses, such as:
	//
	// *   AssistRequest.config
	// *   AssistRequest.audio_in
	// *   AssistRequest.audio_in
	// *   AssistRequest.audio_in
	// *   AssistResponse.event_type.END_OF_UTTERANCE
	// *   AssistResponse.dialog_state_out.microphone_mode.CLOSE_MICROPHONE
	// *   AssistResponse.audio_out
	// *   AssistResponse.audio_out
	// *   AssistResponse.audio_out
	// *   AssistResponse.audio_out
	//
	// Although the precise order of responses is not guaranteed, sequential
	// `AssistResponse.audio_out` messages will always contain sequential portions
	// of audio.
	Assist(ctx context.Context, opts ...grpc.CallOption) (EmbeddedAssistant_AssistClient, error)
}

EmbeddedAssistantClient is the client API for EmbeddedAssistant service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewEmbeddedAssistantClient

func NewEmbeddedAssistantClient(cc *grpc.ClientConn) EmbeddedAssistantClient

type EmbeddedAssistantServer

type EmbeddedAssistantServer interface {
	// Initiates or continues a conversation with the embedded Assistant Service.
	// Each call performs one round-trip, sending an audio request to the service
	// and receiving the audio response. Uses bidirectional streaming to receive
	// results, such as the `END_OF_UTTERANCE` event, while sending audio.
	//
	// A conversation is one or more gRPC connections, each consisting of several
	// streamed requests and responses.
	// For example, the user says *Add to my shopping list* and the Assistant
	// responds *What do you want to add?*. The sequence of streamed requests and
	// responses in the first gRPC message could be:
	//
	// *   AssistRequest.config
	// *   AssistRequest.audio_in
	// *   AssistRequest.audio_in
	// *   AssistRequest.audio_in
	// *   AssistRequest.audio_in
	// *   AssistResponse.event_type.END_OF_UTTERANCE
	// *   AssistResponse.speech_results.transcript "add to my shopping list"
	// *   AssistResponse.dialog_state_out.microphone_mode.DIALOG_FOLLOW_ON
	// *   AssistResponse.audio_out
	// *   AssistResponse.audio_out
	// *   AssistResponse.audio_out
	//
	//
	// The user then says *bagels* and the Assistant responds
	// *OK, I've added bagels to your shopping list*. This is sent as another gRPC
	// connection call to the `Assist` method, again with streamed requests and
	// responses, such as:
	//
	// *   AssistRequest.config
	// *   AssistRequest.audio_in
	// *   AssistRequest.audio_in
	// *   AssistRequest.audio_in
	// *   AssistResponse.event_type.END_OF_UTTERANCE
	// *   AssistResponse.dialog_state_out.microphone_mode.CLOSE_MICROPHONE
	// *   AssistResponse.audio_out
	// *   AssistResponse.audio_out
	// *   AssistResponse.audio_out
	// *   AssistResponse.audio_out
	//
	// Although the precise order of responses is not guaranteed, sequential
	// `AssistResponse.audio_out` messages will always contain sequential portions
	// of audio.
	Assist(EmbeddedAssistant_AssistServer) error
}

type EmbeddedAssistant_AssistClient

type EmbeddedAssistant_AssistClient interface {
	Send(*AssistRequest) error
	Recv() (*AssistResponse, error)
	grpc.ClientStream
}

type EmbeddedAssistant_AssistServer

type EmbeddedAssistant_AssistServer interface {
	Send(*AssistResponse) error
	Recv() (*AssistRequest, error)
	grpc.ServerStream
}

type ScreenOut

type ScreenOut struct {
	// *Output-only* The format of the provided screen data.
	Format ScreenOut_Format `protobuf:"varint,1,opt,name=format,enum=google.assistant.embedded.v1alpha2.ScreenOut_Format" json:"format,omitempty"`
	// *Output-only* The raw screen data to be displayed as the result of the
	// Assistant query.
	Data                 []byte   `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The Assistant's visual response to query. Contains the entire visual output.

func (*ScreenOut) Descriptor

func (*ScreenOut) Descriptor() ([]byte, []int)

func (*ScreenOut) GetData

func (m *ScreenOut) GetData() []byte

func (*ScreenOut) GetFormat

func (m *ScreenOut) GetFormat() ScreenOut_Format

func (*ScreenOut) ProtoMessage

func (*ScreenOut) ProtoMessage()

func (*ScreenOut) Reset

func (m *ScreenOut) Reset()

func (*ScreenOut) String

func (m *ScreenOut) String() string

func (*ScreenOut) XXX_DiscardUnknown

func (m *ScreenOut) XXX_DiscardUnknown()

func (*ScreenOut) XXX_Marshal

func (m *ScreenOut) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ScreenOut) XXX_Merge

func (dst *ScreenOut) XXX_Merge(src proto.Message)

func (*ScreenOut) XXX_Size

func (m *ScreenOut) XXX_Size() int

func (*ScreenOut) XXX_Unmarshal

func (m *ScreenOut) XXX_Unmarshal(b []byte) error

type ScreenOutConfig

type ScreenOutConfig struct {
	// Current visual screen-mode for the device while issuing the query.
	ScreenMode           ScreenOutConfig_ScreenMode `` /* 152-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
	XXX_unrecognized     []byte                     `json:"-"`
	XXX_sizecache        int32                      `json:"-"`
}

Specifies the desired format for the server to use when it returns `screen_out` response.

func (*ScreenOutConfig) Descriptor

func (*ScreenOutConfig) Descriptor() ([]byte, []int)

func (*ScreenOutConfig) GetScreenMode

func (m *ScreenOutConfig) GetScreenMode() ScreenOutConfig_ScreenMode

func (*ScreenOutConfig) ProtoMessage

func (*ScreenOutConfig) ProtoMessage()

func (*ScreenOutConfig) Reset

func (m *ScreenOutConfig) Reset()

func (*ScreenOutConfig) String

func (m *ScreenOutConfig) String() string

func (*ScreenOutConfig) XXX_DiscardUnknown

func (m *ScreenOutConfig) XXX_DiscardUnknown()

func (*ScreenOutConfig) XXX_Marshal

func (m *ScreenOutConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ScreenOutConfig) XXX_Merge

func (dst *ScreenOutConfig) XXX_Merge(src proto.Message)

func (*ScreenOutConfig) XXX_Size

func (m *ScreenOutConfig) XXX_Size() int

func (*ScreenOutConfig) XXX_Unmarshal

func (m *ScreenOutConfig) XXX_Unmarshal(b []byte) error

type ScreenOutConfig_ScreenMode

type ScreenOutConfig_ScreenMode int32

Possible modes for visual screen-output on the device.

const (
	// No video mode specified.
	// The Assistant may respond as if in `OFF` mode.
	ScreenOutConfig_SCREEN_MODE_UNSPECIFIED ScreenOutConfig_ScreenMode = 0
	// Screen is off (or has brightness or other settings set so low it is
	// not visible). The Assistant will typically not return a screen response
	// in this mode.
	ScreenOutConfig_OFF ScreenOutConfig_ScreenMode = 1
	// The Assistant will typically return a partial-screen response in this
	// mode.
	ScreenOutConfig_PLAYING ScreenOutConfig_ScreenMode = 3
)

func (ScreenOutConfig_ScreenMode) EnumDescriptor

func (ScreenOutConfig_ScreenMode) EnumDescriptor() ([]byte, []int)

func (ScreenOutConfig_ScreenMode) String

type ScreenOut_Format

type ScreenOut_Format int32

Possible formats of the screen data.

const (
	// No format specified.
	ScreenOut_FORMAT_UNSPECIFIED ScreenOut_Format = 0
	// Data will contain a fully-formed HTML5 layout encoded in UTF-8, e.g.
	// "<html><body><div>...</div></body></html>". It is intended to be rendered
	// along with the audio response. Note that HTML5 doctype should be included
	// in the actual HTML data.
	ScreenOut_HTML ScreenOut_Format = 1
)

func (ScreenOut_Format) EnumDescriptor

func (ScreenOut_Format) EnumDescriptor() ([]byte, []int)

func (ScreenOut_Format) String

func (x ScreenOut_Format) String() string

type SpeechRecognitionResult

type SpeechRecognitionResult struct {
	// *Output-only* Transcript text representing the words that the user spoke.
	Transcript string `protobuf:"bytes,1,opt,name=transcript" json:"transcript,omitempty"`
	// *Output-only* An estimate of the likelihood that the Assistant will not
	// change its guess about this result. Values range from 0.0 (completely
	// unstable) to 1.0 (completely stable and final). The default of 0.0 is a
	// sentinel value indicating `stability` was not set.
	Stability            float32  `protobuf:"fixed32,2,opt,name=stability" json:"stability,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The estimated transcription of a phrase the user has spoken. This could be a single segment or the full guess of the user's spoken query.

func (*SpeechRecognitionResult) Descriptor

func (*SpeechRecognitionResult) Descriptor() ([]byte, []int)

func (*SpeechRecognitionResult) GetStability

func (m *SpeechRecognitionResult) GetStability() float32

func (*SpeechRecognitionResult) GetTranscript

func (m *SpeechRecognitionResult) GetTranscript() string

func (*SpeechRecognitionResult) ProtoMessage

func (*SpeechRecognitionResult) ProtoMessage()

func (*SpeechRecognitionResult) Reset

func (m *SpeechRecognitionResult) Reset()

func (*SpeechRecognitionResult) String

func (m *SpeechRecognitionResult) String() string

func (*SpeechRecognitionResult) XXX_DiscardUnknown

func (m *SpeechRecognitionResult) XXX_DiscardUnknown()

func (*SpeechRecognitionResult) XXX_Marshal

func (m *SpeechRecognitionResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SpeechRecognitionResult) XXX_Merge

func (dst *SpeechRecognitionResult) XXX_Merge(src proto.Message)

func (*SpeechRecognitionResult) XXX_Size

func (m *SpeechRecognitionResult) XXX_Size() int

func (*SpeechRecognitionResult) XXX_Unmarshal

func (m *SpeechRecognitionResult) XXX_Unmarshal(b []byte) error

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL