genproto: google.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1 Index | Files

package videointelligence

import "google.golang.org/genproto/googleapis/cloud/videointelligence/v1p3beta1"

Index

Package Files

video_intelligence.pb.go

Variables

var (
    LabelDetectionMode_name = map[int32]string{
        0:  "LABEL_DETECTION_MODE_UNSPECIFIED",
        1:  "SHOT_MODE",
        2:  "FRAME_MODE",
        3:  "SHOT_AND_FRAME_MODE",
    }
    LabelDetectionMode_value = map[string]int32{
        "LABEL_DETECTION_MODE_UNSPECIFIED": 0,
        "SHOT_MODE":                        1,
        "FRAME_MODE":                       2,
        "SHOT_AND_FRAME_MODE":              3,
    }
)

Enum value maps for LabelDetectionMode.

var (
    Likelihood_name = map[int32]string{
        0:  "LIKELIHOOD_UNSPECIFIED",
        1:  "VERY_UNLIKELY",
        2:  "UNLIKELY",
        3:  "POSSIBLE",
        4:  "LIKELY",
        5:  "VERY_LIKELY",
    }
    Likelihood_value = map[string]int32{
        "LIKELIHOOD_UNSPECIFIED": 0,
        "VERY_UNLIKELY":          1,
        "UNLIKELY":               2,
        "POSSIBLE":               3,
        "LIKELY":                 4,
        "VERY_LIKELY":            5,
    }
)

Enum value maps for Likelihood.

var (
    StreamingFeature_name = map[int32]string{
        0:  "STREAMING_FEATURE_UNSPECIFIED",
        1:  "STREAMING_LABEL_DETECTION",
        2:  "STREAMING_SHOT_CHANGE_DETECTION",
        3:  "STREAMING_EXPLICIT_CONTENT_DETECTION",
        4:  "STREAMING_OBJECT_TRACKING",
        23: "STREAMING_AUTOML_ACTION_RECOGNITION",
        21: "STREAMING_AUTOML_CLASSIFICATION",
        22: "STREAMING_AUTOML_OBJECT_TRACKING",
    }
    StreamingFeature_value = map[string]int32{
        "STREAMING_FEATURE_UNSPECIFIED":        0,
        "STREAMING_LABEL_DETECTION":            1,
        "STREAMING_SHOT_CHANGE_DETECTION":      2,
        "STREAMING_EXPLICIT_CONTENT_DETECTION": 3,
        "STREAMING_OBJECT_TRACKING":            4,
        "STREAMING_AUTOML_ACTION_RECOGNITION":  23,
        "STREAMING_AUTOML_CLASSIFICATION":      21,
        "STREAMING_AUTOML_OBJECT_TRACKING":     22,
    }
)

Enum value maps for StreamingFeature.

var (
    Feature_name = map[int32]string{
        0:  "FEATURE_UNSPECIFIED",
        1:  "LABEL_DETECTION",
        2:  "SHOT_CHANGE_DETECTION",
        3:  "EXPLICIT_CONTENT_DETECTION",
        4:  "FACE_DETECTION",
        6:  "SPEECH_TRANSCRIPTION",
        7:  "TEXT_DETECTION",
        9:  "OBJECT_TRACKING",
        12: "LOGO_RECOGNITION",
        13: "CELEBRITY_RECOGNITION",
        14: "PERSON_DETECTION",
    }
    Feature_value = map[string]int32{
        "FEATURE_UNSPECIFIED":        0,
        "LABEL_DETECTION":            1,
        "SHOT_CHANGE_DETECTION":      2,
        "EXPLICIT_CONTENT_DETECTION": 3,
        "FACE_DETECTION":             4,
        "SPEECH_TRANSCRIPTION":       6,
        "TEXT_DETECTION":             7,
        "OBJECT_TRACKING":            9,
        "LOGO_RECOGNITION":           12,
        "CELEBRITY_RECOGNITION":      13,
        "PERSON_DETECTION":           14,
    }
)

Enum value maps for Feature.

var File_google_cloud_videointelligence_v1p3beta1_video_intelligence_proto protoreflect.FileDescriptor

func RegisterStreamingVideoIntelligenceServiceServer Uses

func RegisterStreamingVideoIntelligenceServiceServer(s *grpc.Server, srv StreamingVideoIntelligenceServiceServer)

func RegisterVideoIntelligenceServiceServer Uses

func RegisterVideoIntelligenceServiceServer(s *grpc.Server, srv VideoIntelligenceServiceServer)

type AnnotateVideoProgress Uses

type AnnotateVideoProgress struct {

    // Progress metadata for all videos specified in `AnnotateVideoRequest`.
    AnnotationProgress []*VideoAnnotationProgress `protobuf:"bytes,1,rep,name=annotation_progress,json=annotationProgress,proto3" json:"annotation_progress,omitempty"`
    // contains filtered or unexported fields
}

Video annotation progress. Included in the `metadata` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.

func (*AnnotateVideoProgress) Descriptor Uses

func (*AnnotateVideoProgress) Descriptor() ([]byte, []int)

Deprecated: Use AnnotateVideoProgress.ProtoReflect.Descriptor instead.

func (*AnnotateVideoProgress) GetAnnotationProgress Uses

func (x *AnnotateVideoProgress) GetAnnotationProgress() []*VideoAnnotationProgress

func (*AnnotateVideoProgress) ProtoMessage Uses

func (*AnnotateVideoProgress) ProtoMessage()

func (*AnnotateVideoProgress) ProtoReflect Uses

func (x *AnnotateVideoProgress) ProtoReflect() protoreflect.Message

func (*AnnotateVideoProgress) Reset Uses

func (x *AnnotateVideoProgress) Reset()

func (*AnnotateVideoProgress) String Uses

func (x *AnnotateVideoProgress) String() string

type AnnotateVideoRequest Uses

type AnnotateVideoRequest struct {

    // Input video location. Currently, only
    // [Cloud Storage](https://cloud.google.com/storage/) URIs are
    // supported. URIs must be specified in the following format:
    // `gs://bucket-id/object-id` (other URI formats return
    // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
    // more information, see [Request
    // URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
    // multiple videos, a video URI may include wildcards in the `object-id`.
    // Supported wildcards: '*' to match 0 or more characters;
    // '?' to match 1 character. If unset, the input video should be embedded
    // in the request as `input_content`. If set, `input_content` must be unset.
    InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
    // The video data bytes.
    // If unset, the input video(s) should be specified via the `input_uri`.
    // If set, `input_uri` must be unset.
    InputContent []byte `protobuf:"bytes,6,opt,name=input_content,json=inputContent,proto3" json:"input_content,omitempty"`
    // Required. Requested video annotation features.
    Features []Feature `protobuf:"varint,2,rep,packed,name=features,proto3,enum=google.cloud.videointelligence.v1p3beta1.Feature" json:"features,omitempty"`
    // Additional video context and/or feature-specific parameters.
    VideoContext *VideoContext `protobuf:"bytes,3,opt,name=video_context,json=videoContext,proto3" json:"video_context,omitempty"`
    // Optional. Location where the output (in JSON format) should be stored.
    // Currently, only [Cloud Storage](https://cloud.google.com/storage/)
    // URIs are supported. These must be specified in the following format:
    // `gs://bucket-id/object-id` (other URI formats return
    // [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
    // more information, see [Request
    // URIs](https://cloud.google.com/storage/docs/request-endpoints).
    OutputUri string `protobuf:"bytes,4,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
    // Optional. Cloud region where annotation should take place. Supported cloud
    // regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
    // region is specified, the region will be determined based on video file
    // location.
    LocationId string `protobuf:"bytes,5,opt,name=location_id,json=locationId,proto3" json:"location_id,omitempty"`
    // contains filtered or unexported fields
}

Video annotation request.

func (*AnnotateVideoRequest) Descriptor Uses

func (*AnnotateVideoRequest) Descriptor() ([]byte, []int)

Deprecated: Use AnnotateVideoRequest.ProtoReflect.Descriptor instead.

func (*AnnotateVideoRequest) GetFeatures Uses

func (x *AnnotateVideoRequest) GetFeatures() []Feature

func (*AnnotateVideoRequest) GetInputContent Uses

func (x *AnnotateVideoRequest) GetInputContent() []byte

func (*AnnotateVideoRequest) GetInputUri Uses

func (x *AnnotateVideoRequest) GetInputUri() string

func (*AnnotateVideoRequest) GetLocationId Uses

func (x *AnnotateVideoRequest) GetLocationId() string

func (*AnnotateVideoRequest) GetOutputUri Uses

func (x *AnnotateVideoRequest) GetOutputUri() string

func (*AnnotateVideoRequest) GetVideoContext Uses

func (x *AnnotateVideoRequest) GetVideoContext() *VideoContext

func (*AnnotateVideoRequest) ProtoMessage Uses

func (*AnnotateVideoRequest) ProtoMessage()

func (*AnnotateVideoRequest) ProtoReflect Uses

func (x *AnnotateVideoRequest) ProtoReflect() protoreflect.Message

func (*AnnotateVideoRequest) Reset Uses

func (x *AnnotateVideoRequest) Reset()

func (*AnnotateVideoRequest) String Uses

func (x *AnnotateVideoRequest) String() string

type AnnotateVideoResponse Uses

type AnnotateVideoResponse struct {

    // Annotation results for all videos specified in `AnnotateVideoRequest`.
    AnnotationResults []*VideoAnnotationResults `protobuf:"bytes,1,rep,name=annotation_results,json=annotationResults,proto3" json:"annotation_results,omitempty"`
    // contains filtered or unexported fields
}

Video annotation response. Included in the `response` field of the `Operation` returned by the `GetOperation` call of the `google::longrunning::Operations` service.

func (*AnnotateVideoResponse) Descriptor Uses

func (*AnnotateVideoResponse) Descriptor() ([]byte, []int)

Deprecated: Use AnnotateVideoResponse.ProtoReflect.Descriptor instead.

func (*AnnotateVideoResponse) GetAnnotationResults Uses

func (x *AnnotateVideoResponse) GetAnnotationResults() []*VideoAnnotationResults

func (*AnnotateVideoResponse) ProtoMessage Uses

func (*AnnotateVideoResponse) ProtoMessage()

func (*AnnotateVideoResponse) ProtoReflect Uses

func (x *AnnotateVideoResponse) ProtoReflect() protoreflect.Message

func (*AnnotateVideoResponse) Reset Uses

func (x *AnnotateVideoResponse) Reset()

func (*AnnotateVideoResponse) String Uses

func (x *AnnotateVideoResponse) String() string

type Celebrity Uses

type Celebrity struct {

    // The resource name of the celebrity. Have the format
    // `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery.
    // kg-mid is the id in Google knowledge graph, which is unique for the
    // celebrity.
    Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
    // The celebrity name.
    DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
    // Textual description of additional information about the celebrity, if
    // applicable.
    Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
    // contains filtered or unexported fields
}

Celebrity definition.

func (*Celebrity) Descriptor Uses

func (*Celebrity) Descriptor() ([]byte, []int)

Deprecated: Use Celebrity.ProtoReflect.Descriptor instead.

func (*Celebrity) GetDescription Uses

func (x *Celebrity) GetDescription() string

func (*Celebrity) GetDisplayName Uses

func (x *Celebrity) GetDisplayName() string

func (*Celebrity) GetName Uses

func (x *Celebrity) GetName() string

func (*Celebrity) ProtoMessage Uses

func (*Celebrity) ProtoMessage()

func (*Celebrity) ProtoReflect Uses

func (x *Celebrity) ProtoReflect() protoreflect.Message

func (*Celebrity) Reset Uses

func (x *Celebrity) Reset()

func (*Celebrity) String Uses

func (x *Celebrity) String() string

type CelebrityRecognitionAnnotation Uses

type CelebrityRecognitionAnnotation struct {

    // The tracks detected from the input video, including recognized celebrities
    // and other detected faces in the video.
    CelebrityTracks []*CelebrityTrack `protobuf:"bytes,1,rep,name=celebrity_tracks,json=celebrityTracks,proto3" json:"celebrity_tracks,omitempty"`
    // contains filtered or unexported fields
}

Celebrity recognition annotation per video.

func (*CelebrityRecognitionAnnotation) Descriptor Uses

func (*CelebrityRecognitionAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use CelebrityRecognitionAnnotation.ProtoReflect.Descriptor instead.

func (*CelebrityRecognitionAnnotation) GetCelebrityTracks Uses

func (x *CelebrityRecognitionAnnotation) GetCelebrityTracks() []*CelebrityTrack

func (*CelebrityRecognitionAnnotation) ProtoMessage Uses

func (*CelebrityRecognitionAnnotation) ProtoMessage()

func (*CelebrityRecognitionAnnotation) ProtoReflect Uses

func (x *CelebrityRecognitionAnnotation) ProtoReflect() protoreflect.Message

func (*CelebrityRecognitionAnnotation) Reset Uses

func (x *CelebrityRecognitionAnnotation) Reset()

func (*CelebrityRecognitionAnnotation) String Uses

func (x *CelebrityRecognitionAnnotation) String() string

type CelebrityTrack Uses

type CelebrityTrack struct {

    // Top N match of the celebrities for the face in this track.
    Celebrities []*CelebrityTrack_RecognizedCelebrity `protobuf:"bytes,1,rep,name=celebrities,proto3" json:"celebrities,omitempty"`
    // A track of a person's face.
    FaceTrack *Track `protobuf:"bytes,3,opt,name=face_track,json=faceTrack,proto3" json:"face_track,omitempty"`
    // contains filtered or unexported fields
}

The annotation result of a celebrity face track. RecognizedCelebrity field could be empty if the face track does not have any matched celebrities.

func (*CelebrityTrack) Descriptor Uses

func (*CelebrityTrack) Descriptor() ([]byte, []int)

Deprecated: Use CelebrityTrack.ProtoReflect.Descriptor instead.

func (*CelebrityTrack) GetCelebrities Uses

func (x *CelebrityTrack) GetCelebrities() []*CelebrityTrack_RecognizedCelebrity

func (*CelebrityTrack) GetFaceTrack Uses

func (x *CelebrityTrack) GetFaceTrack() *Track

func (*CelebrityTrack) ProtoMessage Uses

func (*CelebrityTrack) ProtoMessage()

func (*CelebrityTrack) ProtoReflect Uses

func (x *CelebrityTrack) ProtoReflect() protoreflect.Message

func (*CelebrityTrack) Reset Uses

func (x *CelebrityTrack) Reset()

func (*CelebrityTrack) String Uses

func (x *CelebrityTrack) String() string

type CelebrityTrack_RecognizedCelebrity Uses

type CelebrityTrack_RecognizedCelebrity struct {

    // The recognized celebrity.
    Celebrity *Celebrity `protobuf:"bytes,1,opt,name=celebrity,proto3" json:"celebrity,omitempty"`
    // Recognition confidence. Range [0, 1].
    Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
    // contains filtered or unexported fields
}

The recognized celebrity with confidence score.

func (*CelebrityTrack_RecognizedCelebrity) Descriptor Uses

func (*CelebrityTrack_RecognizedCelebrity) Descriptor() ([]byte, []int)

Deprecated: Use CelebrityTrack_RecognizedCelebrity.ProtoReflect.Descriptor instead.

func (*CelebrityTrack_RecognizedCelebrity) GetCelebrity Uses

func (x *CelebrityTrack_RecognizedCelebrity) GetCelebrity() *Celebrity

func (*CelebrityTrack_RecognizedCelebrity) GetConfidence Uses

func (x *CelebrityTrack_RecognizedCelebrity) GetConfidence() float32

func (*CelebrityTrack_RecognizedCelebrity) ProtoMessage Uses

func (*CelebrityTrack_RecognizedCelebrity) ProtoMessage()

func (*CelebrityTrack_RecognizedCelebrity) ProtoReflect Uses

func (x *CelebrityTrack_RecognizedCelebrity) ProtoReflect() protoreflect.Message

func (*CelebrityTrack_RecognizedCelebrity) Reset Uses

func (x *CelebrityTrack_RecognizedCelebrity) Reset()

func (*CelebrityTrack_RecognizedCelebrity) String Uses

func (x *CelebrityTrack_RecognizedCelebrity) String() string

type DetectedAttribute Uses

type DetectedAttribute struct {

    // The name of the attribute, for example, glasses, dark_glasses, mouth_open.
    // A full list of supported type names will be provided in the document.
    Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
    // Detected attribute confidence. Range [0, 1].
    Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
    // Text value of the detection result. For example, the value for "HairColor"
    // can be "black", "blonde", etc.
    Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
    // contains filtered or unexported fields
}

A generic detected attribute represented by name in string format.

func (*DetectedAttribute) Descriptor Uses

func (*DetectedAttribute) Descriptor() ([]byte, []int)

Deprecated: Use DetectedAttribute.ProtoReflect.Descriptor instead.

func (*DetectedAttribute) GetConfidence Uses

func (x *DetectedAttribute) GetConfidence() float32

func (*DetectedAttribute) GetName Uses

func (x *DetectedAttribute) GetName() string

func (*DetectedAttribute) GetValue Uses

func (x *DetectedAttribute) GetValue() string

func (*DetectedAttribute) ProtoMessage Uses

func (*DetectedAttribute) ProtoMessage()

func (*DetectedAttribute) ProtoReflect Uses

func (x *DetectedAttribute) ProtoReflect() protoreflect.Message

func (*DetectedAttribute) Reset Uses

func (x *DetectedAttribute) Reset()

func (*DetectedAttribute) String Uses

func (x *DetectedAttribute) String() string

type DetectedLandmark Uses

type DetectedLandmark struct {

    // The name of this landmark, for example, left_hand, right_shoulder.
    Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
    // The 2D point of the detected landmark using the normalized image
    // coordindate system. The normalized coordinates have the range from 0 to 1.
    Point *NormalizedVertex `protobuf:"bytes,2,opt,name=point,proto3" json:"point,omitempty"`
    // The confidence score of the detected landmark. Range [0, 1].
    Confidence float32 `protobuf:"fixed32,3,opt,name=confidence,proto3" json:"confidence,omitempty"`
    // contains filtered or unexported fields
}

A generic detected landmark represented by name in string format and a 2D location.

func (*DetectedLandmark) Descriptor Uses

func (*DetectedLandmark) Descriptor() ([]byte, []int)

Deprecated: Use DetectedLandmark.ProtoReflect.Descriptor instead.

func (*DetectedLandmark) GetConfidence Uses

func (x *DetectedLandmark) GetConfidence() float32

func (*DetectedLandmark) GetName Uses

func (x *DetectedLandmark) GetName() string

func (*DetectedLandmark) GetPoint Uses

func (x *DetectedLandmark) GetPoint() *NormalizedVertex

func (*DetectedLandmark) ProtoMessage Uses

func (*DetectedLandmark) ProtoMessage()

func (*DetectedLandmark) ProtoReflect Uses

func (x *DetectedLandmark) ProtoReflect() protoreflect.Message

func (*DetectedLandmark) Reset Uses

func (x *DetectedLandmark) Reset()

func (*DetectedLandmark) String Uses

func (x *DetectedLandmark) String() string

type Entity Uses

type Entity struct {

    // Opaque entity ID. Some IDs may be available in
    // [Google Knowledge Graph Search
    // API](https://developers.google.com/knowledge-graph/).
    EntityId string `protobuf:"bytes,1,opt,name=entity_id,json=entityId,proto3" json:"entity_id,omitempty"`
    // Textual description, e.g., `Fixed-gear bicycle`.
    Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
    // Language code for `description` in BCP-47 format.
    LanguageCode string `protobuf:"bytes,3,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
    // contains filtered or unexported fields
}

Detected entity from video analysis.

func (*Entity) Descriptor Uses

func (*Entity) Descriptor() ([]byte, []int)

Deprecated: Use Entity.ProtoReflect.Descriptor instead.

func (*Entity) GetDescription Uses

func (x *Entity) GetDescription() string

func (*Entity) GetEntityId Uses

func (x *Entity) GetEntityId() string

func (*Entity) GetLanguageCode Uses

func (x *Entity) GetLanguageCode() string

func (*Entity) ProtoMessage Uses

func (*Entity) ProtoMessage()

func (*Entity) ProtoReflect Uses

func (x *Entity) ProtoReflect() protoreflect.Message

func (*Entity) Reset Uses

func (x *Entity) Reset()

func (*Entity) String Uses

func (x *Entity) String() string

type ExplicitContentAnnotation Uses

type ExplicitContentAnnotation struct {

    // All video frames where explicit content was detected.
    Frames []*ExplicitContentFrame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"`
    // contains filtered or unexported fields
}

Explicit content annotation (based on per-frame visual signals only). If no explicit content has been detected in a frame, no annotations are present for that frame.

func (*ExplicitContentAnnotation) Descriptor Uses

func (*ExplicitContentAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use ExplicitContentAnnotation.ProtoReflect.Descriptor instead.

func (*ExplicitContentAnnotation) GetFrames Uses

func (x *ExplicitContentAnnotation) GetFrames() []*ExplicitContentFrame

func (*ExplicitContentAnnotation) ProtoMessage Uses

func (*ExplicitContentAnnotation) ProtoMessage()

func (*ExplicitContentAnnotation) ProtoReflect Uses

func (x *ExplicitContentAnnotation) ProtoReflect() protoreflect.Message

func (*ExplicitContentAnnotation) Reset Uses

func (x *ExplicitContentAnnotation) Reset()

func (*ExplicitContentAnnotation) String Uses

func (x *ExplicitContentAnnotation) String() string

type ExplicitContentDetectionConfig Uses

type ExplicitContentDetectionConfig struct {

    // Model to use for explicit content detection.
    // Supported values: "builtin/stable" (the default if unset) and
    // "builtin/latest".
    Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
    // contains filtered or unexported fields
}

Config for EXPLICIT_CONTENT_DETECTION.

func (*ExplicitContentDetectionConfig) Descriptor Uses

func (*ExplicitContentDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use ExplicitContentDetectionConfig.ProtoReflect.Descriptor instead.

func (*ExplicitContentDetectionConfig) GetModel Uses

func (x *ExplicitContentDetectionConfig) GetModel() string

func (*ExplicitContentDetectionConfig) ProtoMessage Uses

func (*ExplicitContentDetectionConfig) ProtoMessage()

func (*ExplicitContentDetectionConfig) ProtoReflect Uses

func (x *ExplicitContentDetectionConfig) ProtoReflect() protoreflect.Message

func (*ExplicitContentDetectionConfig) Reset Uses

func (x *ExplicitContentDetectionConfig) Reset()

func (*ExplicitContentDetectionConfig) String Uses

func (x *ExplicitContentDetectionConfig) String() string

type ExplicitContentFrame Uses

type ExplicitContentFrame struct {

    // Time-offset, relative to the beginning of the video, corresponding to the
    // video frame for this location.
    TimeOffset *duration.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
    // Likelihood of the pornography content..
    PornographyLikelihood Likelihood `protobuf:"varint,2,opt,name=pornography_likelihood,json=pornographyLikelihood,proto3,enum=google.cloud.videointelligence.v1p3beta1.Likelihood" json:"pornography_likelihood,omitempty"`
    // contains filtered or unexported fields
}

Video frame level annotation results for explicit content.

func (*ExplicitContentFrame) Descriptor Uses

func (*ExplicitContentFrame) Descriptor() ([]byte, []int)

Deprecated: Use ExplicitContentFrame.ProtoReflect.Descriptor instead.

func (*ExplicitContentFrame) GetPornographyLikelihood Uses

func (x *ExplicitContentFrame) GetPornographyLikelihood() Likelihood

func (*ExplicitContentFrame) GetTimeOffset Uses

func (x *ExplicitContentFrame) GetTimeOffset() *duration.Duration

func (*ExplicitContentFrame) ProtoMessage Uses

func (*ExplicitContentFrame) ProtoMessage()

func (*ExplicitContentFrame) ProtoReflect Uses

func (x *ExplicitContentFrame) ProtoReflect() protoreflect.Message

func (*ExplicitContentFrame) Reset Uses

func (x *ExplicitContentFrame) Reset()

func (*ExplicitContentFrame) String Uses

func (x *ExplicitContentFrame) String() string

type FaceDetectionAnnotation Uses

type FaceDetectionAnnotation struct {

    // The face tracks with attributes.
    Tracks []*Track `protobuf:"bytes,3,rep,name=tracks,proto3" json:"tracks,omitempty"`
    // The thumbnail of a person's face.
    Thumbnail []byte `protobuf:"bytes,4,opt,name=thumbnail,proto3" json:"thumbnail,omitempty"`
    // contains filtered or unexported fields
}

Face detection annotation.

func (*FaceDetectionAnnotation) Descriptor Uses

func (*FaceDetectionAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use FaceDetectionAnnotation.ProtoReflect.Descriptor instead.

func (*FaceDetectionAnnotation) GetThumbnail Uses

func (x *FaceDetectionAnnotation) GetThumbnail() []byte

func (*FaceDetectionAnnotation) GetTracks Uses

func (x *FaceDetectionAnnotation) GetTracks() []*Track

func (*FaceDetectionAnnotation) ProtoMessage Uses

func (*FaceDetectionAnnotation) ProtoMessage()

func (*FaceDetectionAnnotation) ProtoReflect Uses

func (x *FaceDetectionAnnotation) ProtoReflect() protoreflect.Message

func (*FaceDetectionAnnotation) Reset Uses

func (x *FaceDetectionAnnotation) Reset()

func (*FaceDetectionAnnotation) String Uses

func (x *FaceDetectionAnnotation) String() string

type FaceDetectionConfig Uses

type FaceDetectionConfig struct {

    // Model to use for face detection.
    // Supported values: "builtin/stable" (the default if unset) and
    // "builtin/latest".
    Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
    // Whether bounding boxes are included in the face annotation output.
    IncludeBoundingBoxes bool `protobuf:"varint,2,opt,name=include_bounding_boxes,json=includeBoundingBoxes,proto3" json:"include_bounding_boxes,omitempty"`
    // Whether to enable face attributes detection, such as glasses, dark_glasses,
    // mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
    IncludeAttributes bool `protobuf:"varint,5,opt,name=include_attributes,json=includeAttributes,proto3" json:"include_attributes,omitempty"`
    // contains filtered or unexported fields
}

Config for FACE_DETECTION.

func (*FaceDetectionConfig) Descriptor Uses

func (*FaceDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use FaceDetectionConfig.ProtoReflect.Descriptor instead.

func (*FaceDetectionConfig) GetIncludeAttributes Uses

func (x *FaceDetectionConfig) GetIncludeAttributes() bool

func (*FaceDetectionConfig) GetIncludeBoundingBoxes Uses

func (x *FaceDetectionConfig) GetIncludeBoundingBoxes() bool

func (*FaceDetectionConfig) GetModel Uses

func (x *FaceDetectionConfig) GetModel() string

func (*FaceDetectionConfig) ProtoMessage Uses

func (*FaceDetectionConfig) ProtoMessage()

func (*FaceDetectionConfig) ProtoReflect Uses

func (x *FaceDetectionConfig) ProtoReflect() protoreflect.Message

func (*FaceDetectionConfig) Reset Uses

func (x *FaceDetectionConfig) Reset()

func (*FaceDetectionConfig) String Uses

func (x *FaceDetectionConfig) String() string

type Feature Uses

type Feature int32

Video annotation feature.

const (
    // Unspecified.
    Feature_FEATURE_UNSPECIFIED Feature = 0
    // Label detection. Detect objects, such as dog or flower.
    Feature_LABEL_DETECTION Feature = 1
    // Shot change detection.
    Feature_SHOT_CHANGE_DETECTION Feature = 2
    // Explicit content detection.
    Feature_EXPLICIT_CONTENT_DETECTION Feature = 3
    // Human face detection.
    Feature_FACE_DETECTION Feature = 4
    // Speech transcription.
    Feature_SPEECH_TRANSCRIPTION Feature = 6
    // OCR text detection and tracking.
    Feature_TEXT_DETECTION Feature = 7
    // Object detection and tracking.
    Feature_OBJECT_TRACKING Feature = 9
    // Logo detection, tracking, and recognition.
    Feature_LOGO_RECOGNITION Feature = 12
    // Celebrity recognition.
    Feature_CELEBRITY_RECOGNITION Feature = 13
    // Person detection.
    Feature_PERSON_DETECTION Feature = 14
)

func (Feature) Descriptor Uses

func (Feature) Descriptor() protoreflect.EnumDescriptor

func (Feature) Enum Uses

func (x Feature) Enum() *Feature

func (Feature) EnumDescriptor Uses

func (Feature) EnumDescriptor() ([]byte, []int)

Deprecated: Use Feature.Descriptor instead.

func (Feature) Number Uses

func (x Feature) Number() protoreflect.EnumNumber

func (Feature) String Uses

func (x Feature) String() string

func (Feature) Type Uses

func (Feature) Type() protoreflect.EnumType

type LabelAnnotation Uses

type LabelAnnotation struct {

    // Detected entity.
    Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
    // Common categories for the detected entity.
    // For example, when the label is `Terrier`, the category is likely `dog`. And
    // in some cases there might be more than one categories e.g., `Terrier` could
    // also be a `pet`.
    CategoryEntities []*Entity `protobuf:"bytes,2,rep,name=category_entities,json=categoryEntities,proto3" json:"category_entities,omitempty"`
    // All video segments where a label was detected.
    Segments []*LabelSegment `protobuf:"bytes,3,rep,name=segments,proto3" json:"segments,omitempty"`
    // All video frames where a label was detected.
    Frames []*LabelFrame `protobuf:"bytes,4,rep,name=frames,proto3" json:"frames,omitempty"`
    // contains filtered or unexported fields
}

Label annotation.

func (*LabelAnnotation) Descriptor Uses

func (*LabelAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use LabelAnnotation.ProtoReflect.Descriptor instead.

func (*LabelAnnotation) GetCategoryEntities Uses

func (x *LabelAnnotation) GetCategoryEntities() []*Entity

func (*LabelAnnotation) GetEntity Uses

func (x *LabelAnnotation) GetEntity() *Entity

func (*LabelAnnotation) GetFrames Uses

func (x *LabelAnnotation) GetFrames() []*LabelFrame

func (*LabelAnnotation) GetSegments Uses

func (x *LabelAnnotation) GetSegments() []*LabelSegment

func (*LabelAnnotation) ProtoMessage Uses

func (*LabelAnnotation) ProtoMessage()

func (*LabelAnnotation) ProtoReflect Uses

func (x *LabelAnnotation) ProtoReflect() protoreflect.Message

func (*LabelAnnotation) Reset Uses

func (x *LabelAnnotation) Reset()

func (*LabelAnnotation) String Uses

func (x *LabelAnnotation) String() string

type LabelDetectionConfig Uses

type LabelDetectionConfig struct {

    // What labels should be detected with LABEL_DETECTION, in addition to
    // video-level labels or segment-level labels.
    // If unspecified, defaults to `SHOT_MODE`.
    LabelDetectionMode LabelDetectionMode `protobuf:"varint,1,opt,name=label_detection_mode,json=labelDetectionMode,proto3,enum=google.cloud.videointelligence.v1p3beta1.LabelDetectionMode" json:"label_detection_mode,omitempty"`
    // Whether the video has been shot from a stationary (i.e., non-moving)
    // camera. When set to true, might improve detection accuracy for moving
    // objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
    StationaryCamera bool `protobuf:"varint,2,opt,name=stationary_camera,json=stationaryCamera,proto3" json:"stationary_camera,omitempty"`
    // Model to use for label detection.
    // Supported values: "builtin/stable" (the default if unset) and
    // "builtin/latest".
    Model string `protobuf:"bytes,3,opt,name=model,proto3" json:"model,omitempty"`
    // The confidence threshold we perform filtering on the labels from
    // frame-level detection. If not set, it is set to 0.4 by default. The valid
    // range for this threshold is [0.1, 0.9]. Any value set outside of this
    // range will be clipped.
    // Note: For best results, follow the default threshold. We will update
    // the default threshold everytime when we release a new model.
    FrameConfidenceThreshold float32 `protobuf:"fixed32,4,opt,name=frame_confidence_threshold,json=frameConfidenceThreshold,proto3" json:"frame_confidence_threshold,omitempty"`
    // The confidence threshold we perform filtering on the labels from
    // video-level and shot-level detections. If not set, it's set to 0.3 by
    // default. The valid range for this threshold is [0.1, 0.9]. Any value set
    // outside of this range will be clipped.
    // Note: For best results, follow the default threshold. We will update
    // the default threshold everytime when we release a new model.
    VideoConfidenceThreshold float32 `protobuf:"fixed32,5,opt,name=video_confidence_threshold,json=videoConfidenceThreshold,proto3" json:"video_confidence_threshold,omitempty"`
    // contains filtered or unexported fields
}

Config for LABEL_DETECTION.

func (*LabelDetectionConfig) Descriptor Uses

func (*LabelDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use LabelDetectionConfig.ProtoReflect.Descriptor instead.

func (*LabelDetectionConfig) GetFrameConfidenceThreshold Uses

func (x *LabelDetectionConfig) GetFrameConfidenceThreshold() float32

func (*LabelDetectionConfig) GetLabelDetectionMode Uses

func (x *LabelDetectionConfig) GetLabelDetectionMode() LabelDetectionMode

func (*LabelDetectionConfig) GetModel Uses

func (x *LabelDetectionConfig) GetModel() string

func (*LabelDetectionConfig) GetStationaryCamera Uses

func (x *LabelDetectionConfig) GetStationaryCamera() bool

func (*LabelDetectionConfig) GetVideoConfidenceThreshold Uses

func (x *LabelDetectionConfig) GetVideoConfidenceThreshold() float32

func (*LabelDetectionConfig) ProtoMessage Uses

func (*LabelDetectionConfig) ProtoMessage()

func (*LabelDetectionConfig) ProtoReflect Uses

func (x *LabelDetectionConfig) ProtoReflect() protoreflect.Message

func (*LabelDetectionConfig) Reset Uses

func (x *LabelDetectionConfig) Reset()

func (*LabelDetectionConfig) String Uses

func (x *LabelDetectionConfig) String() string

type LabelDetectionMode Uses

type LabelDetectionMode int32

Label detection mode.

const (
    // Unspecified.
    LabelDetectionMode_LABEL_DETECTION_MODE_UNSPECIFIED LabelDetectionMode = 0
    // Detect shot-level labels.
    LabelDetectionMode_SHOT_MODE LabelDetectionMode = 1
    // Detect frame-level labels.
    LabelDetectionMode_FRAME_MODE LabelDetectionMode = 2
    // Detect both shot-level and frame-level labels.
    LabelDetectionMode_SHOT_AND_FRAME_MODE LabelDetectionMode = 3
)

func (LabelDetectionMode) Descriptor Uses

func (LabelDetectionMode) Descriptor() protoreflect.EnumDescriptor

func (LabelDetectionMode) Enum Uses

func (x LabelDetectionMode) Enum() *LabelDetectionMode

func (LabelDetectionMode) EnumDescriptor Uses

func (LabelDetectionMode) EnumDescriptor() ([]byte, []int)

Deprecated: Use LabelDetectionMode.Descriptor instead.

func (LabelDetectionMode) Number Uses

func (x LabelDetectionMode) Number() protoreflect.EnumNumber

func (LabelDetectionMode) String Uses

func (x LabelDetectionMode) String() string

func (LabelDetectionMode) Type Uses

func (LabelDetectionMode) Type() protoreflect.EnumType

type LabelFrame Uses

type LabelFrame struct {

    // Time-offset, relative to the beginning of the video, corresponding to the
    // video frame for this location.
    TimeOffset *duration.Duration `protobuf:"bytes,1,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
    // Confidence that the label is accurate. Range: [0, 1].
    Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
    // contains filtered or unexported fields
}

Video frame level annotation results for label detection.

func (*LabelFrame) Descriptor Uses

func (*LabelFrame) Descriptor() ([]byte, []int)

Deprecated: Use LabelFrame.ProtoReflect.Descriptor instead.

func (*LabelFrame) GetConfidence Uses

func (x *LabelFrame) GetConfidence() float32

func (*LabelFrame) GetTimeOffset Uses

func (x *LabelFrame) GetTimeOffset() *duration.Duration

func (*LabelFrame) ProtoMessage Uses

func (*LabelFrame) ProtoMessage()

func (*LabelFrame) ProtoReflect Uses

func (x *LabelFrame) ProtoReflect() protoreflect.Message

func (*LabelFrame) Reset Uses

func (x *LabelFrame) Reset()

func (*LabelFrame) String Uses

func (x *LabelFrame) String() string

type LabelSegment Uses

type LabelSegment struct {

    // Video segment where a label was detected.
    Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
    // Confidence that the label is accurate. Range: [0, 1].
    Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
    // contains filtered or unexported fields
}

Video segment level annotation results for label detection.

func (*LabelSegment) Descriptor Uses

func (*LabelSegment) Descriptor() ([]byte, []int)

Deprecated: Use LabelSegment.ProtoReflect.Descriptor instead.

func (*LabelSegment) GetConfidence Uses

func (x *LabelSegment) GetConfidence() float32

func (*LabelSegment) GetSegment Uses

func (x *LabelSegment) GetSegment() *VideoSegment

func (*LabelSegment) ProtoMessage Uses

func (*LabelSegment) ProtoMessage()

func (*LabelSegment) ProtoReflect Uses

func (x *LabelSegment) ProtoReflect() protoreflect.Message

func (*LabelSegment) Reset Uses

func (x *LabelSegment) Reset()

func (*LabelSegment) String Uses

func (x *LabelSegment) String() string

type Likelihood Uses

type Likelihood int32

Bucketized representation of likelihood.

const (
    // Unspecified likelihood.
    Likelihood_LIKELIHOOD_UNSPECIFIED Likelihood = 0
    // Very unlikely.
    Likelihood_VERY_UNLIKELY Likelihood = 1
    // Unlikely.
    Likelihood_UNLIKELY Likelihood = 2
    // Possible.
    Likelihood_POSSIBLE Likelihood = 3
    // Likely.
    Likelihood_LIKELY Likelihood = 4
    // Very likely.
    Likelihood_VERY_LIKELY Likelihood = 5
)

func (Likelihood) Descriptor Uses

func (Likelihood) Descriptor() protoreflect.EnumDescriptor

func (Likelihood) Enum Uses

func (x Likelihood) Enum() *Likelihood

func (Likelihood) EnumDescriptor Uses

func (Likelihood) EnumDescriptor() ([]byte, []int)

Deprecated: Use Likelihood.Descriptor instead.

func (Likelihood) Number Uses

func (x Likelihood) Number() protoreflect.EnumNumber

func (Likelihood) String Uses

func (x Likelihood) String() string

func (Likelihood) Type Uses

func (Likelihood) Type() protoreflect.EnumType

type LogoRecognitionAnnotation Uses

type LogoRecognitionAnnotation struct {

    // Entity category information to specify the logo class that all the logo
    // tracks within this LogoRecognitionAnnotation are recognized as.
    Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
    // All logo tracks where the recognized logo appears. Each track corresponds
    // to one logo instance appearing in consecutive frames.
    Tracks []*Track `protobuf:"bytes,2,rep,name=tracks,proto3" json:"tracks,omitempty"`
    // All video segments where the recognized logo appears. There might be
    // multiple instances of the same logo class appearing in one VideoSegment.
    Segments []*VideoSegment `protobuf:"bytes,3,rep,name=segments,proto3" json:"segments,omitempty"`
    // contains filtered or unexported fields
}

Annotation corresponding to one detected, tracked and recognized logo class.

func (*LogoRecognitionAnnotation) Descriptor Uses

func (*LogoRecognitionAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use LogoRecognitionAnnotation.ProtoReflect.Descriptor instead.

func (*LogoRecognitionAnnotation) GetEntity Uses

func (x *LogoRecognitionAnnotation) GetEntity() *Entity

func (*LogoRecognitionAnnotation) GetSegments Uses

func (x *LogoRecognitionAnnotation) GetSegments() []*VideoSegment

func (*LogoRecognitionAnnotation) GetTracks Uses

func (x *LogoRecognitionAnnotation) GetTracks() []*Track

func (*LogoRecognitionAnnotation) ProtoMessage Uses

func (*LogoRecognitionAnnotation) ProtoMessage()

func (*LogoRecognitionAnnotation) ProtoReflect Uses

func (x *LogoRecognitionAnnotation) ProtoReflect() protoreflect.Message

func (*LogoRecognitionAnnotation) Reset Uses

func (x *LogoRecognitionAnnotation) Reset()

func (*LogoRecognitionAnnotation) String Uses

func (x *LogoRecognitionAnnotation) String() string

type NormalizedBoundingBox Uses

type NormalizedBoundingBox struct {

    // Left X coordinate.
    Left float32 `protobuf:"fixed32,1,opt,name=left,proto3" json:"left,omitempty"`
    // Top Y coordinate.
    Top float32 `protobuf:"fixed32,2,opt,name=top,proto3" json:"top,omitempty"`
    // Right X coordinate.
    Right float32 `protobuf:"fixed32,3,opt,name=right,proto3" json:"right,omitempty"`
    // Bottom Y coordinate.
    Bottom float32 `protobuf:"fixed32,4,opt,name=bottom,proto3" json:"bottom,omitempty"`
    // contains filtered or unexported fields
}

Normalized bounding box. The normalized vertex coordinates are relative to the original image. Range: [0, 1].

func (*NormalizedBoundingBox) Descriptor Uses

func (*NormalizedBoundingBox) Descriptor() ([]byte, []int)

Deprecated: Use NormalizedBoundingBox.ProtoReflect.Descriptor instead.

func (*NormalizedBoundingBox) GetBottom Uses

func (x *NormalizedBoundingBox) GetBottom() float32

func (*NormalizedBoundingBox) GetLeft Uses

func (x *NormalizedBoundingBox) GetLeft() float32

func (*NormalizedBoundingBox) GetRight Uses

func (x *NormalizedBoundingBox) GetRight() float32

func (*NormalizedBoundingBox) GetTop Uses

func (x *NormalizedBoundingBox) GetTop() float32

func (*NormalizedBoundingBox) ProtoMessage Uses

func (*NormalizedBoundingBox) ProtoMessage()

func (*NormalizedBoundingBox) ProtoReflect Uses

func (x *NormalizedBoundingBox) ProtoReflect() protoreflect.Message

func (*NormalizedBoundingBox) Reset Uses

func (x *NormalizedBoundingBox) Reset()

func (*NormalizedBoundingBox) String Uses

func (x *NormalizedBoundingBox) String() string

type NormalizedBoundingPoly Uses

type NormalizedBoundingPoly struct {

    // Normalized vertices of the bounding polygon.
    Vertices []*NormalizedVertex `protobuf:"bytes,1,rep,name=vertices,proto3" json:"vertices,omitempty"`
    // contains filtered or unexported fields
}

Normalized bounding polygon for text (that might not be aligned with axis). Contains list of the corner points in clockwise order starting from top-left corner. For example, for a rectangular bounding box: When the text is horizontal it might look like:

0----1
|    |
3----2

When it's clockwise rotated 180 degrees around the top-left corner it becomes:

2----3
|    |
1----0

and the vertex order will still be (0, 1, 2, 3). Note that values can be less than 0, or greater than 1 due to trignometric calculations for location of the box.

func (*NormalizedBoundingPoly) Descriptor Uses

func (*NormalizedBoundingPoly) Descriptor() ([]byte, []int)

Deprecated: Use NormalizedBoundingPoly.ProtoReflect.Descriptor instead.

func (*NormalizedBoundingPoly) GetVertices Uses

func (x *NormalizedBoundingPoly) GetVertices() []*NormalizedVertex

func (*NormalizedBoundingPoly) ProtoMessage Uses

func (*NormalizedBoundingPoly) ProtoMessage()

func (*NormalizedBoundingPoly) ProtoReflect Uses

func (x *NormalizedBoundingPoly) ProtoReflect() protoreflect.Message

func (*NormalizedBoundingPoly) Reset Uses

func (x *NormalizedBoundingPoly) Reset()

func (*NormalizedBoundingPoly) String Uses

func (x *NormalizedBoundingPoly) String() string

type NormalizedVertex Uses

type NormalizedVertex struct {

    // X coordinate.
    X   float32 `protobuf:"fixed32,1,opt,name=x,proto3" json:"x,omitempty"`
    // Y coordinate.
    Y   float32 `protobuf:"fixed32,2,opt,name=y,proto3" json:"y,omitempty"`
    // contains filtered or unexported fields
}

A vertex represents a 2D point in the image. NOTE: the normalized vertex coordinates are relative to the original image and range from 0 to 1.

func (*NormalizedVertex) Descriptor Uses

func (*NormalizedVertex) Descriptor() ([]byte, []int)

Deprecated: Use NormalizedVertex.ProtoReflect.Descriptor instead.

func (*NormalizedVertex) GetX Uses

func (x *NormalizedVertex) GetX() float32

func (*NormalizedVertex) GetY Uses

func (x *NormalizedVertex) GetY() float32

func (*NormalizedVertex) ProtoMessage Uses

func (*NormalizedVertex) ProtoMessage()

func (*NormalizedVertex) ProtoReflect Uses

func (x *NormalizedVertex) ProtoReflect() protoreflect.Message

func (*NormalizedVertex) Reset Uses

func (x *NormalizedVertex) Reset()

func (*NormalizedVertex) String Uses

func (x *NormalizedVertex) String() string

type ObjectTrackingAnnotation Uses

type ObjectTrackingAnnotation struct {

    // Different representation of tracking info in non-streaming batch
    // and streaming modes.
    //
    // Types that are assignable to TrackInfo:
    //	*ObjectTrackingAnnotation_Segment
    //	*ObjectTrackingAnnotation_TrackId
    TrackInfo isObjectTrackingAnnotation_TrackInfo `protobuf_oneof:"track_info"`
    // Entity to specify the object category that this track is labeled as.
    Entity *Entity `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"`
    // Object category's labeling confidence of this track.
    Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
    // Information corresponding to all frames where this object track appears.
    // Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
    // messages in frames.
    // Streaming mode: it can only be one ObjectTrackingFrame message in frames.
    Frames []*ObjectTrackingFrame `protobuf:"bytes,2,rep,name=frames,proto3" json:"frames,omitempty"`
    // contains filtered or unexported fields
}

Annotations corresponding to one tracked object.

func (*ObjectTrackingAnnotation) Descriptor Uses

func (*ObjectTrackingAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use ObjectTrackingAnnotation.ProtoReflect.Descriptor instead.

func (*ObjectTrackingAnnotation) GetConfidence Uses

func (x *ObjectTrackingAnnotation) GetConfidence() float32

func (*ObjectTrackingAnnotation) GetEntity Uses

func (x *ObjectTrackingAnnotation) GetEntity() *Entity

func (*ObjectTrackingAnnotation) GetFrames Uses

func (x *ObjectTrackingAnnotation) GetFrames() []*ObjectTrackingFrame

func (*ObjectTrackingAnnotation) GetSegment Uses

func (x *ObjectTrackingAnnotation) GetSegment() *VideoSegment

func (*ObjectTrackingAnnotation) GetTrackId Uses

func (x *ObjectTrackingAnnotation) GetTrackId() int64

func (*ObjectTrackingAnnotation) GetTrackInfo Uses

func (m *ObjectTrackingAnnotation) GetTrackInfo() isObjectTrackingAnnotation_TrackInfo

func (*ObjectTrackingAnnotation) ProtoMessage Uses

func (*ObjectTrackingAnnotation) ProtoMessage()

func (*ObjectTrackingAnnotation) ProtoReflect Uses

func (x *ObjectTrackingAnnotation) ProtoReflect() protoreflect.Message

func (*ObjectTrackingAnnotation) Reset Uses

func (x *ObjectTrackingAnnotation) Reset()

func (*ObjectTrackingAnnotation) String Uses

func (x *ObjectTrackingAnnotation) String() string

type ObjectTrackingAnnotation_Segment Uses

type ObjectTrackingAnnotation_Segment struct {
    // Non-streaming batch mode ONLY.
    // Each object track corresponds to one video segment where it appears.
    Segment *VideoSegment `protobuf:"bytes,3,opt,name=segment,proto3,oneof"`
}

type ObjectTrackingAnnotation_TrackId Uses

type ObjectTrackingAnnotation_TrackId struct {
    // Streaming mode ONLY.
    // In streaming mode, we do not know the end time of a tracked object
    // before it is completed. Hence, there is no VideoSegment info returned.
    // Instead, we provide a unique identifiable integer track_id so that
    // the customers can correlate the results of the ongoing
    // ObjectTrackAnnotation of the same track_id over time.
    TrackId int64 `protobuf:"varint,5,opt,name=track_id,json=trackId,proto3,oneof"`
}

type ObjectTrackingConfig Uses

type ObjectTrackingConfig struct {

    // Model to use for object tracking.
    // Supported values: "builtin/stable" (the default if unset) and
    // "builtin/latest".
    Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
    // contains filtered or unexported fields
}

Config for OBJECT_TRACKING.

func (*ObjectTrackingConfig) Descriptor Uses

func (*ObjectTrackingConfig) Descriptor() ([]byte, []int)

Deprecated: Use ObjectTrackingConfig.ProtoReflect.Descriptor instead.

func (*ObjectTrackingConfig) GetModel Uses

func (x *ObjectTrackingConfig) GetModel() string

func (*ObjectTrackingConfig) ProtoMessage Uses

func (*ObjectTrackingConfig) ProtoMessage()

func (*ObjectTrackingConfig) ProtoReflect Uses

func (x *ObjectTrackingConfig) ProtoReflect() protoreflect.Message

func (*ObjectTrackingConfig) Reset Uses

func (x *ObjectTrackingConfig) Reset()

func (*ObjectTrackingConfig) String Uses

func (x *ObjectTrackingConfig) String() string

type ObjectTrackingFrame Uses

type ObjectTrackingFrame struct {

    // The normalized bounding box location of this object track for the frame.
    NormalizedBoundingBox *NormalizedBoundingBox `protobuf:"bytes,1,opt,name=normalized_bounding_box,json=normalizedBoundingBox,proto3" json:"normalized_bounding_box,omitempty"`
    // The timestamp of the frame in microseconds.
    TimeOffset *duration.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
    // contains filtered or unexported fields
}

Video frame level annotations for object detection and tracking. This field stores per frame location, time offset, and confidence.

func (*ObjectTrackingFrame) Descriptor Uses

func (*ObjectTrackingFrame) Descriptor() ([]byte, []int)

Deprecated: Use ObjectTrackingFrame.ProtoReflect.Descriptor instead.

func (*ObjectTrackingFrame) GetNormalizedBoundingBox Uses

func (x *ObjectTrackingFrame) GetNormalizedBoundingBox() *NormalizedBoundingBox

func (*ObjectTrackingFrame) GetTimeOffset Uses

func (x *ObjectTrackingFrame) GetTimeOffset() *duration.Duration

func (*ObjectTrackingFrame) ProtoMessage Uses

func (*ObjectTrackingFrame) ProtoMessage()

func (*ObjectTrackingFrame) ProtoReflect Uses

func (x *ObjectTrackingFrame) ProtoReflect() protoreflect.Message

func (*ObjectTrackingFrame) Reset Uses

func (x *ObjectTrackingFrame) Reset()

func (*ObjectTrackingFrame) String Uses

func (x *ObjectTrackingFrame) String() string

type PersonDetectionAnnotation Uses

type PersonDetectionAnnotation struct {

    // The detected tracks of a person.
    Tracks []*Track `protobuf:"bytes,1,rep,name=tracks,proto3" json:"tracks,omitempty"`
    // contains filtered or unexported fields
}

Person detection annotation per video.

func (*PersonDetectionAnnotation) Descriptor Uses

func (*PersonDetectionAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use PersonDetectionAnnotation.ProtoReflect.Descriptor instead.

func (*PersonDetectionAnnotation) GetTracks Uses

func (x *PersonDetectionAnnotation) GetTracks() []*Track

func (*PersonDetectionAnnotation) ProtoMessage Uses

func (*PersonDetectionAnnotation) ProtoMessage()

func (*PersonDetectionAnnotation) ProtoReflect Uses

func (x *PersonDetectionAnnotation) ProtoReflect() protoreflect.Message

func (*PersonDetectionAnnotation) Reset Uses

func (x *PersonDetectionAnnotation) Reset()

func (*PersonDetectionAnnotation) String Uses

func (x *PersonDetectionAnnotation) String() string

type PersonDetectionConfig Uses

type PersonDetectionConfig struct {

    // Whether bounding boxes are included in the person detection annotation
    // output.
    IncludeBoundingBoxes bool `protobuf:"varint,1,opt,name=include_bounding_boxes,json=includeBoundingBoxes,proto3" json:"include_bounding_boxes,omitempty"`
    // Whether to enable pose landmarks detection. Ignored if
    // 'include_bounding_boxes' is set to false.
    IncludePoseLandmarks bool `protobuf:"varint,2,opt,name=include_pose_landmarks,json=includePoseLandmarks,proto3" json:"include_pose_landmarks,omitempty"`
    // Whether to enable person attributes detection, such as cloth color (black,
    // blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
    // etc.
    // Ignored if 'include_bounding_boxes' is set to false.
    IncludeAttributes bool `protobuf:"varint,3,opt,name=include_attributes,json=includeAttributes,proto3" json:"include_attributes,omitempty"`
    // contains filtered or unexported fields
}

Config for PERSON_DETECTION.

func (*PersonDetectionConfig) Descriptor Uses

func (*PersonDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use PersonDetectionConfig.ProtoReflect.Descriptor instead.

func (*PersonDetectionConfig) GetIncludeAttributes Uses

func (x *PersonDetectionConfig) GetIncludeAttributes() bool

func (*PersonDetectionConfig) GetIncludeBoundingBoxes Uses

func (x *PersonDetectionConfig) GetIncludeBoundingBoxes() bool

func (*PersonDetectionConfig) GetIncludePoseLandmarks Uses

func (x *PersonDetectionConfig) GetIncludePoseLandmarks() bool

func (*PersonDetectionConfig) ProtoMessage Uses

func (*PersonDetectionConfig) ProtoMessage()

func (*PersonDetectionConfig) ProtoReflect Uses

func (x *PersonDetectionConfig) ProtoReflect() protoreflect.Message

func (*PersonDetectionConfig) Reset Uses

func (x *PersonDetectionConfig) Reset()

func (*PersonDetectionConfig) String Uses

func (x *PersonDetectionConfig) String() string

type ShotChangeDetectionConfig Uses

type ShotChangeDetectionConfig struct {

    // Model to use for shot change detection.
    // Supported values: "builtin/stable" (the default if unset) and
    // "builtin/latest".
    Model string `protobuf:"bytes,1,opt,name=model,proto3" json:"model,omitempty"`
    // contains filtered or unexported fields
}

Config for SHOT_CHANGE_DETECTION.

func (*ShotChangeDetectionConfig) Descriptor Uses

func (*ShotChangeDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use ShotChangeDetectionConfig.ProtoReflect.Descriptor instead.

func (*ShotChangeDetectionConfig) GetModel Uses

func (x *ShotChangeDetectionConfig) GetModel() string

func (*ShotChangeDetectionConfig) ProtoMessage Uses

func (*ShotChangeDetectionConfig) ProtoMessage()

func (*ShotChangeDetectionConfig) ProtoReflect Uses

func (x *ShotChangeDetectionConfig) ProtoReflect() protoreflect.Message

func (*ShotChangeDetectionConfig) Reset Uses

func (x *ShotChangeDetectionConfig) Reset()

func (*ShotChangeDetectionConfig) String Uses

func (x *ShotChangeDetectionConfig) String() string

type SpeechContext Uses

type SpeechContext struct {

    // Optional. A list of strings containing words and phrases "hints" so that
    // the speech recognition is more likely to recognize them. This can be used
    // to improve the accuracy for specific words and phrases, for example, if
    // specific commands are typically spoken by the user. This can also be used
    // to add additional words to the vocabulary of the recognizer. See
    // [usage limits](https://cloud.google.com/speech/limits#content).
    Phrases []string `protobuf:"bytes,1,rep,name=phrases,proto3" json:"phrases,omitempty"`
    // contains filtered or unexported fields
}

Provides "hints" to the speech recognizer to favor specific words and phrases in the results.

func (*SpeechContext) Descriptor Uses

func (*SpeechContext) Descriptor() ([]byte, []int)

Deprecated: Use SpeechContext.ProtoReflect.Descriptor instead.

func (*SpeechContext) GetPhrases Uses

func (x *SpeechContext) GetPhrases() []string

func (*SpeechContext) ProtoMessage Uses

func (*SpeechContext) ProtoMessage()

func (*SpeechContext) ProtoReflect Uses

func (x *SpeechContext) ProtoReflect() protoreflect.Message

func (*SpeechContext) Reset Uses

func (x *SpeechContext) Reset()

func (*SpeechContext) String Uses

func (x *SpeechContext) String() string

type SpeechRecognitionAlternative Uses

type SpeechRecognitionAlternative struct {

    // Transcript text representing the words that the user spoke.
    Transcript string `protobuf:"bytes,1,opt,name=transcript,proto3" json:"transcript,omitempty"`
    // Output only. The confidence estimate between 0.0 and 1.0. A higher number
    // indicates an estimated greater likelihood that the recognized words are
    // correct. This field is set only for the top alternative.
    // This field is not guaranteed to be accurate and users should not rely on it
    // to be always provided.
    // The default of 0.0 is a sentinel value indicating `confidence` was not set.
    Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
    // Output only. A list of word-specific information for each recognized word.
    // Note: When `enable_speaker_diarization` is set to true, you will see all
    // the words from the beginning of the audio.
    Words []*WordInfo `protobuf:"bytes,3,rep,name=words,proto3" json:"words,omitempty"`
    // contains filtered or unexported fields
}

Alternative hypotheses (a.k.a. n-best list).

func (*SpeechRecognitionAlternative) Descriptor Uses

func (*SpeechRecognitionAlternative) Descriptor() ([]byte, []int)

Deprecated: Use SpeechRecognitionAlternative.ProtoReflect.Descriptor instead.

func (*SpeechRecognitionAlternative) GetConfidence Uses

func (x *SpeechRecognitionAlternative) GetConfidence() float32

func (*SpeechRecognitionAlternative) GetTranscript Uses

func (x *SpeechRecognitionAlternative) GetTranscript() string

func (*SpeechRecognitionAlternative) GetWords Uses

func (x *SpeechRecognitionAlternative) GetWords() []*WordInfo

func (*SpeechRecognitionAlternative) ProtoMessage Uses

func (*SpeechRecognitionAlternative) ProtoMessage()

func (*SpeechRecognitionAlternative) ProtoReflect Uses

func (x *SpeechRecognitionAlternative) ProtoReflect() protoreflect.Message

func (*SpeechRecognitionAlternative) Reset Uses

func (x *SpeechRecognitionAlternative) Reset()

func (*SpeechRecognitionAlternative) String Uses

func (x *SpeechRecognitionAlternative) String() string

type SpeechTranscription Uses

type SpeechTranscription struct {

    // May contain one or more recognition hypotheses (up to the maximum specified
    // in `max_alternatives`).  These alternatives are ordered in terms of
    // accuracy, with the top (first) alternative being the most probable, as
    // ranked by the recognizer.
    Alternatives []*SpeechRecognitionAlternative `protobuf:"bytes,1,rep,name=alternatives,proto3" json:"alternatives,omitempty"`
    // Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
    // language tag of the language in this result. This language code was
    // detected to have the most likelihood of being spoken in the audio.
    LanguageCode string `protobuf:"bytes,2,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
    // contains filtered or unexported fields
}

A speech recognition result corresponding to a portion of the audio.

func (*SpeechTranscription) Descriptor Uses

func (*SpeechTranscription) Descriptor() ([]byte, []int)

Deprecated: Use SpeechTranscription.ProtoReflect.Descriptor instead.

func (*SpeechTranscription) GetAlternatives Uses

func (x *SpeechTranscription) GetAlternatives() []*SpeechRecognitionAlternative

func (*SpeechTranscription) GetLanguageCode Uses

func (x *SpeechTranscription) GetLanguageCode() string

func (*SpeechTranscription) ProtoMessage Uses

func (*SpeechTranscription) ProtoMessage()

func (*SpeechTranscription) ProtoReflect Uses

func (x *SpeechTranscription) ProtoReflect() protoreflect.Message

func (*SpeechTranscription) Reset Uses

func (x *SpeechTranscription) Reset()

func (*SpeechTranscription) String Uses

func (x *SpeechTranscription) String() string

type SpeechTranscriptionConfig Uses

type SpeechTranscriptionConfig struct {

    // Required. *Required* The language of the supplied audio as a
    // [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
    // Example: "en-US".
    // See [Language Support](https://cloud.google.com/speech/docs/languages)
    // for a list of the currently supported language codes.
    LanguageCode string `protobuf:"bytes,1,opt,name=language_code,json=languageCode,proto3" json:"language_code,omitempty"`
    // Optional. Maximum number of recognition hypotheses to be returned.
    // Specifically, the maximum number of `SpeechRecognitionAlternative` messages
    // within each `SpeechTranscription`. The server may return fewer than
    // `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
    // return a maximum of one. If omitted, will return a maximum of one.
    MaxAlternatives int32 `protobuf:"varint,2,opt,name=max_alternatives,json=maxAlternatives,proto3" json:"max_alternatives,omitempty"`
    // Optional. If set to `true`, the server will attempt to filter out
    // profanities, replacing all but the initial character in each filtered word
    // with asterisks, e.g. "f***". If set to `false` or omitted, profanities
    // won't be filtered out.
    FilterProfanity bool `protobuf:"varint,3,opt,name=filter_profanity,json=filterProfanity,proto3" json:"filter_profanity,omitempty"`
    // Optional. A means to provide context to assist the speech recognition.
    SpeechContexts []*SpeechContext `protobuf:"bytes,4,rep,name=speech_contexts,json=speechContexts,proto3" json:"speech_contexts,omitempty"`
    // Optional. If 'true', adds punctuation to recognition result hypotheses.
    // This feature is only available in select languages. Setting this for
    // requests in other languages has no effect at all. The default 'false' value
    // does not add punctuation to result hypotheses. NOTE: "This is currently
    // offered as an experimental service, complimentary to all users. In the
    // future this may be exclusively available as a premium feature."
    EnableAutomaticPunctuation bool `protobuf:"varint,5,opt,name=enable_automatic_punctuation,json=enableAutomaticPunctuation,proto3" json:"enable_automatic_punctuation,omitempty"`
    // Optional. For file formats, such as MXF or MKV, supporting multiple audio
    // tracks, specify up to two tracks. Default: track 0.
    AudioTracks []int32 `protobuf:"varint,6,rep,packed,name=audio_tracks,json=audioTracks,proto3" json:"audio_tracks,omitempty"`
    // Optional. If 'true', enables speaker detection for each recognized word in
    // the top alternative of the recognition result using a speaker_tag provided
    // in the WordInfo.
    // Note: When this is true, we send all the words from the beginning of the
    // audio for the top alternative in every consecutive response.
    // This is done in order to improve our speaker tags as our models learn to
    // identify the speakers in the conversation over time.
    EnableSpeakerDiarization bool `protobuf:"varint,7,opt,name=enable_speaker_diarization,json=enableSpeakerDiarization,proto3" json:"enable_speaker_diarization,omitempty"`
    // Optional. If set, specifies the estimated number of speakers in the
    // conversation. If not set, defaults to '2'. Ignored unless
    // enable_speaker_diarization is set to true.
    DiarizationSpeakerCount int32 `protobuf:"varint,8,opt,name=diarization_speaker_count,json=diarizationSpeakerCount,proto3" json:"diarization_speaker_count,omitempty"`
    // Optional. If `true`, the top result includes a list of words and the
    // confidence for those words. If `false`, no word-level confidence
    // information is returned. The default is `false`.
    EnableWordConfidence bool `protobuf:"varint,9,opt,name=enable_word_confidence,json=enableWordConfidence,proto3" json:"enable_word_confidence,omitempty"`
    // contains filtered or unexported fields
}

Config for SPEECH_TRANSCRIPTION.

func (*SpeechTranscriptionConfig) Descriptor Uses

func (*SpeechTranscriptionConfig) Descriptor() ([]byte, []int)

Deprecated: Use SpeechTranscriptionConfig.ProtoReflect.Descriptor instead.

func (*SpeechTranscriptionConfig) GetAudioTracks Uses

func (x *SpeechTranscriptionConfig) GetAudioTracks() []int32

func (*SpeechTranscriptionConfig) GetDiarizationSpeakerCount Uses

func (x *SpeechTranscriptionConfig) GetDiarizationSpeakerCount() int32

func (*SpeechTranscriptionConfig) GetEnableAutomaticPunctuation Uses

func (x *SpeechTranscriptionConfig) GetEnableAutomaticPunctuation() bool

func (*SpeechTranscriptionConfig) GetEnableSpeakerDiarization Uses

func (x *SpeechTranscriptionConfig) GetEnableSpeakerDiarization() bool

func (*SpeechTranscriptionConfig) GetEnableWordConfidence Uses

func (x *SpeechTranscriptionConfig) GetEnableWordConfidence() bool

func (*SpeechTranscriptionConfig) GetFilterProfanity Uses

func (x *SpeechTranscriptionConfig) GetFilterProfanity() bool

func (*SpeechTranscriptionConfig) GetLanguageCode Uses

func (x *SpeechTranscriptionConfig) GetLanguageCode() string

func (*SpeechTranscriptionConfig) GetMaxAlternatives Uses

func (x *SpeechTranscriptionConfig) GetMaxAlternatives() int32

func (*SpeechTranscriptionConfig) GetSpeechContexts Uses

func (x *SpeechTranscriptionConfig) GetSpeechContexts() []*SpeechContext

func (*SpeechTranscriptionConfig) ProtoMessage Uses

func (*SpeechTranscriptionConfig) ProtoMessage()

func (*SpeechTranscriptionConfig) ProtoReflect Uses

func (x *SpeechTranscriptionConfig) ProtoReflect() protoreflect.Message

func (*SpeechTranscriptionConfig) Reset Uses

func (x *SpeechTranscriptionConfig) Reset()

func (*SpeechTranscriptionConfig) String Uses

func (x *SpeechTranscriptionConfig) String() string

type StreamingAnnotateVideoRequest Uses

type StreamingAnnotateVideoRequest struct {

    // *Required* The streaming request, which is either a streaming config or
    // video content.
    //
    // Types that are assignable to StreamingRequest:
    //	*StreamingAnnotateVideoRequest_VideoConfig
    //	*StreamingAnnotateVideoRequest_InputContent
    StreamingRequest isStreamingAnnotateVideoRequest_StreamingRequest `protobuf_oneof:"streaming_request"`
    // contains filtered or unexported fields
}

The top-level message sent by the client for the `StreamingAnnotateVideo` method. Multiple `StreamingAnnotateVideoRequest` messages are sent. The first message must only contain a `StreamingVideoConfig` message. All subsequent messages must only contain `input_content` data.

func (*StreamingAnnotateVideoRequest) Descriptor Uses

func (*StreamingAnnotateVideoRequest) Descriptor() ([]byte, []int)

Deprecated: Use StreamingAnnotateVideoRequest.ProtoReflect.Descriptor instead.

func (*StreamingAnnotateVideoRequest) GetInputContent Uses

func (x *StreamingAnnotateVideoRequest) GetInputContent() []byte

func (*StreamingAnnotateVideoRequest) GetStreamingRequest Uses

func (m *StreamingAnnotateVideoRequest) GetStreamingRequest() isStreamingAnnotateVideoRequest_StreamingRequest

func (*StreamingAnnotateVideoRequest) GetVideoConfig Uses

func (x *StreamingAnnotateVideoRequest) GetVideoConfig() *StreamingVideoConfig

func (*StreamingAnnotateVideoRequest) ProtoMessage Uses

func (*StreamingAnnotateVideoRequest) ProtoMessage()

func (*StreamingAnnotateVideoRequest) ProtoReflect Uses

func (x *StreamingAnnotateVideoRequest) ProtoReflect() protoreflect.Message

func (*StreamingAnnotateVideoRequest) Reset Uses

func (x *StreamingAnnotateVideoRequest) Reset()

func (*StreamingAnnotateVideoRequest) String Uses

func (x *StreamingAnnotateVideoRequest) String() string

type StreamingAnnotateVideoRequest_InputContent Uses

type StreamingAnnotateVideoRequest_InputContent struct {
    // The video data to be annotated. Chunks of video data are sequentially
    // sent in `StreamingAnnotateVideoRequest` messages. Except the initial
    // `StreamingAnnotateVideoRequest` message containing only
    // `video_config`, all subsequent `AnnotateStreamingVideoRequest`
    // messages must only contain `input_content` field.
    // Note: as with all bytes fields, protobuffers use a pure binary
    // representation (not base64).
    InputContent []byte `protobuf:"bytes,2,opt,name=input_content,json=inputContent,proto3,oneof"`
}

type StreamingAnnotateVideoRequest_VideoConfig Uses

type StreamingAnnotateVideoRequest_VideoConfig struct {
    // Provides information to the annotator, specifing how to process the
    // request. The first `AnnotateStreamingVideoRequest` message must only
    // contain a `video_config` message.
    VideoConfig *StreamingVideoConfig `protobuf:"bytes,1,opt,name=video_config,json=videoConfig,proto3,oneof"`
}

type StreamingAnnotateVideoResponse Uses

type StreamingAnnotateVideoResponse struct {

    // If set, returns a [google.rpc.Status][google.rpc.Status] message that
    // specifies the error for the operation.
    Error *status.Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
    // Streaming annotation results.
    AnnotationResults *StreamingVideoAnnotationResults `protobuf:"bytes,2,opt,name=annotation_results,json=annotationResults,proto3" json:"annotation_results,omitempty"`
    // Google Cloud Storage(GCS) URI that stores annotation results of one
    // streaming session in JSON format.
    // It is the annotation_result_storage_directory
    // from the request followed by '/cloud_project_number-session_id'.
    AnnotationResultsUri string `protobuf:"bytes,3,opt,name=annotation_results_uri,json=annotationResultsUri,proto3" json:"annotation_results_uri,omitempty"`
    // contains filtered or unexported fields
}

`StreamingAnnotateVideoResponse` is the only message returned to the client by `StreamingAnnotateVideo`. A series of zero or more `StreamingAnnotateVideoResponse` messages are streamed back to the client.

func (*StreamingAnnotateVideoResponse) Descriptor Uses

func (*StreamingAnnotateVideoResponse) Descriptor() ([]byte, []int)

Deprecated: Use StreamingAnnotateVideoResponse.ProtoReflect.Descriptor instead.

func (*StreamingAnnotateVideoResponse) GetAnnotationResults Uses

func (x *StreamingAnnotateVideoResponse) GetAnnotationResults() *StreamingVideoAnnotationResults

func (*StreamingAnnotateVideoResponse) GetAnnotationResultsUri Uses

func (x *StreamingAnnotateVideoResponse) GetAnnotationResultsUri() string

func (*StreamingAnnotateVideoResponse) GetError Uses

func (x *StreamingAnnotateVideoResponse) GetError() *status.Status

func (*StreamingAnnotateVideoResponse) ProtoMessage Uses

func (*StreamingAnnotateVideoResponse) ProtoMessage()

func (*StreamingAnnotateVideoResponse) ProtoReflect Uses

func (x *StreamingAnnotateVideoResponse) ProtoReflect() protoreflect.Message

func (*StreamingAnnotateVideoResponse) Reset Uses

func (x *StreamingAnnotateVideoResponse) Reset()

func (*StreamingAnnotateVideoResponse) String Uses

func (x *StreamingAnnotateVideoResponse) String() string

type StreamingAutomlActionRecognitionConfig Uses

type StreamingAutomlActionRecognitionConfig struct {

    // Resource name of AutoML model.
    // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
    ModelName string `protobuf:"bytes,1,opt,name=model_name,json=modelName,proto3" json:"model_name,omitempty"`
    // contains filtered or unexported fields
}

Config for STREAMING_AUTOML_ACTION_RECOGNITION.

func (*StreamingAutomlActionRecognitionConfig) Descriptor Uses

func (*StreamingAutomlActionRecognitionConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingAutomlActionRecognitionConfig.ProtoReflect.Descriptor instead.

func (*StreamingAutomlActionRecognitionConfig) GetModelName Uses

func (x *StreamingAutomlActionRecognitionConfig) GetModelName() string

func (*StreamingAutomlActionRecognitionConfig) ProtoMessage Uses

func (*StreamingAutomlActionRecognitionConfig) ProtoMessage()

func (*StreamingAutomlActionRecognitionConfig) ProtoReflect Uses

func (x *StreamingAutomlActionRecognitionConfig) ProtoReflect() protoreflect.Message

func (*StreamingAutomlActionRecognitionConfig) Reset Uses

func (x *StreamingAutomlActionRecognitionConfig) Reset()

func (*StreamingAutomlActionRecognitionConfig) String Uses

func (x *StreamingAutomlActionRecognitionConfig) String() string

type StreamingAutomlClassificationConfig Uses

type StreamingAutomlClassificationConfig struct {

    // Resource name of AutoML model.
    // Format:
    // `projects/{project_number}/locations/{location_id}/models/{model_id}`
    ModelName string `protobuf:"bytes,1,opt,name=model_name,json=modelName,proto3" json:"model_name,omitempty"`
    // contains filtered or unexported fields
}

Config for STREAMING_AUTOML_CLASSIFICATION.

func (*StreamingAutomlClassificationConfig) Descriptor Uses

func (*StreamingAutomlClassificationConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingAutomlClassificationConfig.ProtoReflect.Descriptor instead.

func (*StreamingAutomlClassificationConfig) GetModelName Uses

func (x *StreamingAutomlClassificationConfig) GetModelName() string

func (*StreamingAutomlClassificationConfig) ProtoMessage Uses

func (*StreamingAutomlClassificationConfig) ProtoMessage()

func (*StreamingAutomlClassificationConfig) ProtoReflect Uses

func (x *StreamingAutomlClassificationConfig) ProtoReflect() protoreflect.Message

func (*StreamingAutomlClassificationConfig) Reset Uses

func (x *StreamingAutomlClassificationConfig) Reset()

func (*StreamingAutomlClassificationConfig) String Uses

func (x *StreamingAutomlClassificationConfig) String() string

type StreamingAutomlObjectTrackingConfig Uses

type StreamingAutomlObjectTrackingConfig struct {

    // Resource name of AutoML model.
    // Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
    ModelName string `protobuf:"bytes,1,opt,name=model_name,json=modelName,proto3" json:"model_name,omitempty"`
    // contains filtered or unexported fields
}

Config for STREAMING_AUTOML_OBJECT_TRACKING.

func (*StreamingAutomlObjectTrackingConfig) Descriptor Uses

func (*StreamingAutomlObjectTrackingConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingAutomlObjectTrackingConfig.ProtoReflect.Descriptor instead.

func (*StreamingAutomlObjectTrackingConfig) GetModelName Uses

func (x *StreamingAutomlObjectTrackingConfig) GetModelName() string

func (*StreamingAutomlObjectTrackingConfig) ProtoMessage Uses

func (*StreamingAutomlObjectTrackingConfig) ProtoMessage()

func (*StreamingAutomlObjectTrackingConfig) ProtoReflect Uses

func (x *StreamingAutomlObjectTrackingConfig) ProtoReflect() protoreflect.Message

func (*StreamingAutomlObjectTrackingConfig) Reset Uses

func (x *StreamingAutomlObjectTrackingConfig) Reset()

func (*StreamingAutomlObjectTrackingConfig) String Uses

func (x *StreamingAutomlObjectTrackingConfig) String() string

type StreamingExplicitContentDetectionConfig Uses

type StreamingExplicitContentDetectionConfig struct {
    // contains filtered or unexported fields
}

Config for STREAMING_EXPLICIT_CONTENT_DETECTION.

func (*StreamingExplicitContentDetectionConfig) Descriptor Uses

func (*StreamingExplicitContentDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingExplicitContentDetectionConfig.ProtoReflect.Descriptor instead.

func (*StreamingExplicitContentDetectionConfig) ProtoMessage Uses

func (*StreamingExplicitContentDetectionConfig) ProtoMessage()

func (*StreamingExplicitContentDetectionConfig) ProtoReflect Uses

func (x *StreamingExplicitContentDetectionConfig) ProtoReflect() protoreflect.Message

func (*StreamingExplicitContentDetectionConfig) Reset Uses

func (x *StreamingExplicitContentDetectionConfig) Reset()

func (*StreamingExplicitContentDetectionConfig) String Uses

func (x *StreamingExplicitContentDetectionConfig) String() string

type StreamingFeature Uses

type StreamingFeature int32

Streaming video annotation feature.

const (
    // Unspecified.
    StreamingFeature_STREAMING_FEATURE_UNSPECIFIED StreamingFeature = 0
    // Label detection. Detect objects, such as dog or flower.
    StreamingFeature_STREAMING_LABEL_DETECTION StreamingFeature = 1
    // Shot change detection.
    StreamingFeature_STREAMING_SHOT_CHANGE_DETECTION StreamingFeature = 2
    // Explicit content detection.
    StreamingFeature_STREAMING_EXPLICIT_CONTENT_DETECTION StreamingFeature = 3
    // Object detection and tracking.
    StreamingFeature_STREAMING_OBJECT_TRACKING StreamingFeature = 4
    // Action recognition based on AutoML model.
    StreamingFeature_STREAMING_AUTOML_ACTION_RECOGNITION StreamingFeature = 23
    // Video classification based on AutoML model.
    StreamingFeature_STREAMING_AUTOML_CLASSIFICATION StreamingFeature = 21
    // Object detection and tracking based on AutoML model.
    StreamingFeature_STREAMING_AUTOML_OBJECT_TRACKING StreamingFeature = 22
)

func (StreamingFeature) Descriptor Uses

func (StreamingFeature) Descriptor() protoreflect.EnumDescriptor

func (StreamingFeature) Enum Uses

func (x StreamingFeature) Enum() *StreamingFeature

func (StreamingFeature) EnumDescriptor Uses

func (StreamingFeature) EnumDescriptor() ([]byte, []int)

Deprecated: Use StreamingFeature.Descriptor instead.

func (StreamingFeature) Number Uses

func (x StreamingFeature) Number() protoreflect.EnumNumber

func (StreamingFeature) String Uses

func (x StreamingFeature) String() string

func (StreamingFeature) Type Uses

func (StreamingFeature) Type() protoreflect.EnumType

type StreamingLabelDetectionConfig Uses

type StreamingLabelDetectionConfig struct {

    // Whether the video has been captured from a stationary (i.e. non-moving)
    // camera. When set to true, might improve detection accuracy for moving
    // objects. Default: false.
    StationaryCamera bool `protobuf:"varint,1,opt,name=stationary_camera,json=stationaryCamera,proto3" json:"stationary_camera,omitempty"`
    // contains filtered or unexported fields
}

Config for STREAMING_LABEL_DETECTION.

func (*StreamingLabelDetectionConfig) Descriptor Uses

func (*StreamingLabelDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingLabelDetectionConfig.ProtoReflect.Descriptor instead.

func (*StreamingLabelDetectionConfig) GetStationaryCamera Uses

func (x *StreamingLabelDetectionConfig) GetStationaryCamera() bool

func (*StreamingLabelDetectionConfig) ProtoMessage Uses

func (*StreamingLabelDetectionConfig) ProtoMessage()

func (*StreamingLabelDetectionConfig) ProtoReflect Uses

func (x *StreamingLabelDetectionConfig) ProtoReflect() protoreflect.Message

func (*StreamingLabelDetectionConfig) Reset Uses

func (x *StreamingLabelDetectionConfig) Reset()

func (*StreamingLabelDetectionConfig) String Uses

func (x *StreamingLabelDetectionConfig) String() string

type StreamingObjectTrackingConfig Uses

type StreamingObjectTrackingConfig struct {
    // contains filtered or unexported fields
}

Config for STREAMING_OBJECT_TRACKING.

func (*StreamingObjectTrackingConfig) Descriptor Uses

func (*StreamingObjectTrackingConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingObjectTrackingConfig.ProtoReflect.Descriptor instead.

func (*StreamingObjectTrackingConfig) ProtoMessage Uses

func (*StreamingObjectTrackingConfig) ProtoMessage()

func (*StreamingObjectTrackingConfig) ProtoReflect Uses

func (x *StreamingObjectTrackingConfig) ProtoReflect() protoreflect.Message

func (*StreamingObjectTrackingConfig) Reset Uses

func (x *StreamingObjectTrackingConfig) Reset()

func (*StreamingObjectTrackingConfig) String Uses

func (x *StreamingObjectTrackingConfig) String() string

type StreamingShotChangeDetectionConfig Uses

type StreamingShotChangeDetectionConfig struct {
    // contains filtered or unexported fields
}

Config for STREAMING_SHOT_CHANGE_DETECTION.

func (*StreamingShotChangeDetectionConfig) Descriptor Uses

func (*StreamingShotChangeDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingShotChangeDetectionConfig.ProtoReflect.Descriptor instead.

func (*StreamingShotChangeDetectionConfig) ProtoMessage Uses

func (*StreamingShotChangeDetectionConfig) ProtoMessage()

func (*StreamingShotChangeDetectionConfig) ProtoReflect Uses

func (x *StreamingShotChangeDetectionConfig) ProtoReflect() protoreflect.Message

func (*StreamingShotChangeDetectionConfig) Reset Uses

func (x *StreamingShotChangeDetectionConfig) Reset()

func (*StreamingShotChangeDetectionConfig) String Uses

func (x *StreamingShotChangeDetectionConfig) String() string

type StreamingStorageConfig Uses

type StreamingStorageConfig struct {

    // Enable streaming storage. Default: false.
    EnableStorageAnnotationResult bool `protobuf:"varint,1,opt,name=enable_storage_annotation_result,json=enableStorageAnnotationResult,proto3" json:"enable_storage_annotation_result,omitempty"`
    // Cloud Storage URI to store all annotation results for one client. Client
    // should specify this field as the top-level storage directory. Annotation
    // results of different sessions will be put into different sub-directories
    // denoted by project_name and session_id. All sub-directories will be auto
    // generated by program and will be made accessible to client in response
    // proto. URIs must be specified in the following format:
    // `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage
    // bucket created by client and bucket permission shall also be configured
    // properly. `object-id` can be arbitrary string that make sense to client.
    // Other URI formats will return error and cause Cloud Storage write failure.
    AnnotationResultStorageDirectory string `protobuf:"bytes,3,opt,name=annotation_result_storage_directory,json=annotationResultStorageDirectory,proto3" json:"annotation_result_storage_directory,omitempty"`
    // contains filtered or unexported fields
}

Config for streaming storage option.

func (*StreamingStorageConfig) Descriptor Uses

func (*StreamingStorageConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingStorageConfig.ProtoReflect.Descriptor instead.

func (*StreamingStorageConfig) GetAnnotationResultStorageDirectory Uses

func (x *StreamingStorageConfig) GetAnnotationResultStorageDirectory() string

func (*StreamingStorageConfig) GetEnableStorageAnnotationResult Uses

func (x *StreamingStorageConfig) GetEnableStorageAnnotationResult() bool

func (*StreamingStorageConfig) ProtoMessage Uses

func (*StreamingStorageConfig) ProtoMessage()

func (*StreamingStorageConfig) ProtoReflect Uses

func (x *StreamingStorageConfig) ProtoReflect() protoreflect.Message

func (*StreamingStorageConfig) Reset Uses

func (x *StreamingStorageConfig) Reset()

func (*StreamingStorageConfig) String Uses

func (x *StreamingStorageConfig) String() string

type StreamingVideoAnnotationResults Uses

type StreamingVideoAnnotationResults struct {

    // Shot annotation results. Each shot is represented as a video segment.
    ShotAnnotations []*VideoSegment `protobuf:"bytes,1,rep,name=shot_annotations,json=shotAnnotations,proto3" json:"shot_annotations,omitempty"`
    // Label annotation results.
    LabelAnnotations []*LabelAnnotation `protobuf:"bytes,2,rep,name=label_annotations,json=labelAnnotations,proto3" json:"label_annotations,omitempty"`
    // Explicit content annotation results.
    ExplicitAnnotation *ExplicitContentAnnotation `protobuf:"bytes,3,opt,name=explicit_annotation,json=explicitAnnotation,proto3" json:"explicit_annotation,omitempty"`
    // Object tracking results.
    ObjectAnnotations []*ObjectTrackingAnnotation `protobuf:"bytes,4,rep,name=object_annotations,json=objectAnnotations,proto3" json:"object_annotations,omitempty"`
    // contains filtered or unexported fields
}

Streaming annotation results corresponding to a portion of the video that is currently being processed.

func (*StreamingVideoAnnotationResults) Descriptor Uses

func (*StreamingVideoAnnotationResults) Descriptor() ([]byte, []int)

Deprecated: Use StreamingVideoAnnotationResults.ProtoReflect.Descriptor instead.

func (*StreamingVideoAnnotationResults) GetExplicitAnnotation Uses

func (x *StreamingVideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation

func (*StreamingVideoAnnotationResults) GetLabelAnnotations Uses

func (x *StreamingVideoAnnotationResults) GetLabelAnnotations() []*LabelAnnotation

func (*StreamingVideoAnnotationResults) GetObjectAnnotations Uses

func (x *StreamingVideoAnnotationResults) GetObjectAnnotations() []*ObjectTrackingAnnotation

func (*StreamingVideoAnnotationResults) GetShotAnnotations Uses

func (x *StreamingVideoAnnotationResults) GetShotAnnotations() []*VideoSegment

func (*StreamingVideoAnnotationResults) ProtoMessage Uses

func (*StreamingVideoAnnotationResults) ProtoMessage()

func (*StreamingVideoAnnotationResults) ProtoReflect Uses

func (x *StreamingVideoAnnotationResults) ProtoReflect() protoreflect.Message

func (*StreamingVideoAnnotationResults) Reset Uses

func (x *StreamingVideoAnnotationResults) Reset()

func (*StreamingVideoAnnotationResults) String Uses

func (x *StreamingVideoAnnotationResults) String() string

type StreamingVideoConfig Uses

type StreamingVideoConfig struct {

    // Config for requested annotation feature.
    //
    // Types that are assignable to StreamingConfig:
    //	*StreamingVideoConfig_ShotChangeDetectionConfig
    //	*StreamingVideoConfig_LabelDetectionConfig
    //	*StreamingVideoConfig_ExplicitContentDetectionConfig
    //	*StreamingVideoConfig_ObjectTrackingConfig
    //	*StreamingVideoConfig_AutomlActionRecognitionConfig
    //	*StreamingVideoConfig_AutomlClassificationConfig
    //	*StreamingVideoConfig_AutomlObjectTrackingConfig
    StreamingConfig isStreamingVideoConfig_StreamingConfig `protobuf_oneof:"streaming_config"`
    // Requested annotation feature.
    Feature StreamingFeature `protobuf:"varint,1,opt,name=feature,proto3,enum=google.cloud.videointelligence.v1p3beta1.StreamingFeature" json:"feature,omitempty"`
    // Streaming storage option. By default: storage is disabled.
    StorageConfig *StreamingStorageConfig `protobuf:"bytes,30,opt,name=storage_config,json=storageConfig,proto3" json:"storage_config,omitempty"`
    // contains filtered or unexported fields
}

Provides information to the annotator that specifies how to process the request.

func (*StreamingVideoConfig) Descriptor Uses

func (*StreamingVideoConfig) Descriptor() ([]byte, []int)

Deprecated: Use StreamingVideoConfig.ProtoReflect.Descriptor instead.

func (*StreamingVideoConfig) GetAutomlActionRecognitionConfig Uses

func (x *StreamingVideoConfig) GetAutomlActionRecognitionConfig() *StreamingAutomlActionRecognitionConfig

func (*StreamingVideoConfig) GetAutomlClassificationConfig Uses

func (x *StreamingVideoConfig) GetAutomlClassificationConfig() *StreamingAutomlClassificationConfig

func (*StreamingVideoConfig) GetAutomlObjectTrackingConfig Uses

func (x *StreamingVideoConfig) GetAutomlObjectTrackingConfig() *StreamingAutomlObjectTrackingConfig

func (*StreamingVideoConfig) GetExplicitContentDetectionConfig Uses

func (x *StreamingVideoConfig) GetExplicitContentDetectionConfig() *StreamingExplicitContentDetectionConfig

func (*StreamingVideoConfig) GetFeature Uses

func (x *StreamingVideoConfig) GetFeature() StreamingFeature

func (*StreamingVideoConfig) GetLabelDetectionConfig Uses

func (x *StreamingVideoConfig) GetLabelDetectionConfig() *StreamingLabelDetectionConfig

func (*StreamingVideoConfig) GetObjectTrackingConfig Uses

func (x *StreamingVideoConfig) GetObjectTrackingConfig() *StreamingObjectTrackingConfig

func (*StreamingVideoConfig) GetShotChangeDetectionConfig Uses

func (x *StreamingVideoConfig) GetShotChangeDetectionConfig() *StreamingShotChangeDetectionConfig

func (*StreamingVideoConfig) GetStorageConfig Uses

func (x *StreamingVideoConfig) GetStorageConfig() *StreamingStorageConfig

func (*StreamingVideoConfig) GetStreamingConfig Uses

func (m *StreamingVideoConfig) GetStreamingConfig() isStreamingVideoConfig_StreamingConfig

func (*StreamingVideoConfig) ProtoMessage Uses

func (*StreamingVideoConfig) ProtoMessage()

func (*StreamingVideoConfig) ProtoReflect Uses

func (x *StreamingVideoConfig) ProtoReflect() protoreflect.Message

func (*StreamingVideoConfig) Reset Uses

func (x *StreamingVideoConfig) Reset()

func (*StreamingVideoConfig) String Uses

func (x *StreamingVideoConfig) String() string

type StreamingVideoConfig_AutomlActionRecognitionConfig Uses

type StreamingVideoConfig_AutomlActionRecognitionConfig struct {
    // Config for STREAMING_AUTOML_ACTION_RECOGNITION.
    AutomlActionRecognitionConfig *StreamingAutomlActionRecognitionConfig `protobuf:"bytes,23,opt,name=automl_action_recognition_config,json=automlActionRecognitionConfig,proto3,oneof"`
}

type StreamingVideoConfig_AutomlClassificationConfig Uses

type StreamingVideoConfig_AutomlClassificationConfig struct {
    // Config for STREAMING_AUTOML_CLASSIFICATION.
    AutomlClassificationConfig *StreamingAutomlClassificationConfig `protobuf:"bytes,21,opt,name=automl_classification_config,json=automlClassificationConfig,proto3,oneof"`
}

type StreamingVideoConfig_AutomlObjectTrackingConfig Uses

type StreamingVideoConfig_AutomlObjectTrackingConfig struct {
    // Config for STREAMING_AUTOML_OBJECT_TRACKING.
    AutomlObjectTrackingConfig *StreamingAutomlObjectTrackingConfig `protobuf:"bytes,22,opt,name=automl_object_tracking_config,json=automlObjectTrackingConfig,proto3,oneof"`
}

type StreamingVideoConfig_ExplicitContentDetectionConfig Uses

type StreamingVideoConfig_ExplicitContentDetectionConfig struct {
    // Config for STREAMING_EXPLICIT_CONTENT_DETECTION.
    ExplicitContentDetectionConfig *StreamingExplicitContentDetectionConfig `protobuf:"bytes,4,opt,name=explicit_content_detection_config,json=explicitContentDetectionConfig,proto3,oneof"`
}

type StreamingVideoConfig_LabelDetectionConfig Uses

type StreamingVideoConfig_LabelDetectionConfig struct {
    // Config for STREAMING_LABEL_DETECTION.
    LabelDetectionConfig *StreamingLabelDetectionConfig `protobuf:"bytes,3,opt,name=label_detection_config,json=labelDetectionConfig,proto3,oneof"`
}

type StreamingVideoConfig_ObjectTrackingConfig Uses

type StreamingVideoConfig_ObjectTrackingConfig struct {
    // Config for STREAMING_OBJECT_TRACKING.
    ObjectTrackingConfig *StreamingObjectTrackingConfig `protobuf:"bytes,5,opt,name=object_tracking_config,json=objectTrackingConfig,proto3,oneof"`
}

type StreamingVideoConfig_ShotChangeDetectionConfig Uses

type StreamingVideoConfig_ShotChangeDetectionConfig struct {
    // Config for STREAMING_SHOT_CHANGE_DETECTION.
    ShotChangeDetectionConfig *StreamingShotChangeDetectionConfig `protobuf:"bytes,2,opt,name=shot_change_detection_config,json=shotChangeDetectionConfig,proto3,oneof"`
}

type StreamingVideoIntelligenceServiceClient Uses

type StreamingVideoIntelligenceServiceClient interface {
    // Performs video annotation with bidirectional streaming: emitting results
    // while sending video/audio bytes.
    // This method is only available via the gRPC API (not REST).
    StreamingAnnotateVideo(ctx context.Context, opts ...grpc.CallOption) (StreamingVideoIntelligenceService_StreamingAnnotateVideoClient, error)
}

StreamingVideoIntelligenceServiceClient is the client API for StreamingVideoIntelligenceService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewStreamingVideoIntelligenceServiceClient Uses

func NewStreamingVideoIntelligenceServiceClient(cc grpc.ClientConnInterface) StreamingVideoIntelligenceServiceClient

type StreamingVideoIntelligenceServiceServer Uses

type StreamingVideoIntelligenceServiceServer interface {
    // Performs video annotation with bidirectional streaming: emitting results
    // while sending video/audio bytes.
    // This method is only available via the gRPC API (not REST).
    StreamingAnnotateVideo(StreamingVideoIntelligenceService_StreamingAnnotateVideoServer) error
}

StreamingVideoIntelligenceServiceServer is the server API for StreamingVideoIntelligenceService service.

type StreamingVideoIntelligenceService_StreamingAnnotateVideoClient Uses

type StreamingVideoIntelligenceService_StreamingAnnotateVideoClient interface {
    Send(*StreamingAnnotateVideoRequest) error
    Recv() (*StreamingAnnotateVideoResponse, error)
    grpc.ClientStream
}

type StreamingVideoIntelligenceService_StreamingAnnotateVideoServer Uses

type StreamingVideoIntelligenceService_StreamingAnnotateVideoServer interface {
    Send(*StreamingAnnotateVideoResponse) error
    Recv() (*StreamingAnnotateVideoRequest, error)
    grpc.ServerStream
}

type TextAnnotation Uses

type TextAnnotation struct {

    // The detected text.
    Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
    // All video segments where OCR detected text appears.
    Segments []*TextSegment `protobuf:"bytes,2,rep,name=segments,proto3" json:"segments,omitempty"`
    // contains filtered or unexported fields
}

Annotations related to one detected OCR text snippet. This will contain the corresponding text, confidence value, and frame level information for each detection.

func (*TextAnnotation) Descriptor Uses

func (*TextAnnotation) Descriptor() ([]byte, []int)

Deprecated: Use TextAnnotation.ProtoReflect.Descriptor instead.

func (*TextAnnotation) GetSegments Uses

func (x *TextAnnotation) GetSegments() []*TextSegment

func (*TextAnnotation) GetText Uses

func (x *TextAnnotation) GetText() string

func (*TextAnnotation) ProtoMessage Uses

func (*TextAnnotation) ProtoMessage()

func (*TextAnnotation) ProtoReflect Uses

func (x *TextAnnotation) ProtoReflect() protoreflect.Message

func (*TextAnnotation) Reset Uses

func (x *TextAnnotation) Reset()

func (*TextAnnotation) String Uses

func (x *TextAnnotation) String() string

type TextDetectionConfig Uses

type TextDetectionConfig struct {

    // Language hint can be specified if the language to be detected is known a
    // priori. It can increase the accuracy of the detection. Language hint must
    // be language code in BCP-47 format.
    //
    // Automatic language detection is performed if no hint is provided.
    LanguageHints []string `protobuf:"bytes,1,rep,name=language_hints,json=languageHints,proto3" json:"language_hints,omitempty"`
    // Model to use for text detection.
    // Supported values: "builtin/stable" (the default if unset) and
    // "builtin/latest".
    Model string `protobuf:"bytes,2,opt,name=model,proto3" json:"model,omitempty"`
    // contains filtered or unexported fields
}

Config for TEXT_DETECTION.

func (*TextDetectionConfig) Descriptor Uses

func (*TextDetectionConfig) Descriptor() ([]byte, []int)

Deprecated: Use TextDetectionConfig.ProtoReflect.Descriptor instead.

func (*TextDetectionConfig) GetLanguageHints Uses

func (x *TextDetectionConfig) GetLanguageHints() []string

func (*TextDetectionConfig) GetModel Uses

func (x *TextDetectionConfig) GetModel() string

func (*TextDetectionConfig) ProtoMessage Uses

func (*TextDetectionConfig) ProtoMessage()

func (*TextDetectionConfig) ProtoReflect Uses

func (x *TextDetectionConfig) ProtoReflect() protoreflect.Message

func (*TextDetectionConfig) Reset Uses

func (x *TextDetectionConfig) Reset()

func (*TextDetectionConfig) String Uses

func (x *TextDetectionConfig) String() string

type TextFrame Uses

type TextFrame struct {

    // Bounding polygon of the detected text for this frame.
    RotatedBoundingBox *NormalizedBoundingPoly `protobuf:"bytes,1,opt,name=rotated_bounding_box,json=rotatedBoundingBox,proto3" json:"rotated_bounding_box,omitempty"`
    // Timestamp of this frame.
    TimeOffset *duration.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
    // contains filtered or unexported fields
}

Video frame level annotation results for text annotation (OCR). Contains information regarding timestamp and bounding box locations for the frames containing detected OCR text snippets.

func (*TextFrame) Descriptor Uses

func (*TextFrame) Descriptor() ([]byte, []int)

Deprecated: Use TextFrame.ProtoReflect.Descriptor instead.

func (*TextFrame) GetRotatedBoundingBox Uses

func (x *TextFrame) GetRotatedBoundingBox() *NormalizedBoundingPoly

func (*TextFrame) GetTimeOffset Uses

func (x *TextFrame) GetTimeOffset() *duration.Duration

func (*TextFrame) ProtoMessage Uses

func (*TextFrame) ProtoMessage()

func (*TextFrame) ProtoReflect Uses

func (x *TextFrame) ProtoReflect() protoreflect.Message

func (*TextFrame) Reset Uses

func (x *TextFrame) Reset()

func (*TextFrame) String Uses

func (x *TextFrame) String() string

type TextSegment Uses

type TextSegment struct {

    // Video segment where a text snippet was detected.
    Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
    // Confidence for the track of detected text. It is calculated as the highest
    // over all frames where OCR detected text appears.
    Confidence float32 `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
    // Information related to the frames where OCR detected text appears.
    Frames []*TextFrame `protobuf:"bytes,3,rep,name=frames,proto3" json:"frames,omitempty"`
    // contains filtered or unexported fields
}

Video segment level annotation results for text detection.

func (*TextSegment) Descriptor Uses

func (*TextSegment) Descriptor() ([]byte, []int)

Deprecated: Use TextSegment.ProtoReflect.Descriptor instead.

func (*TextSegment) GetConfidence Uses

func (x *TextSegment) GetConfidence() float32

func (*TextSegment) GetFrames Uses

func (x *TextSegment) GetFrames() []*TextFrame

func (*TextSegment) GetSegment Uses

func (x *TextSegment) GetSegment() *VideoSegment

func (*TextSegment) ProtoMessage Uses

func (*TextSegment) ProtoMessage()

func (*TextSegment) ProtoReflect Uses

func (x *TextSegment) ProtoReflect() protoreflect.Message

func (*TextSegment) Reset Uses

func (x *TextSegment) Reset()

func (*TextSegment) String Uses

func (x *TextSegment) String() string

type TimestampedObject Uses

type TimestampedObject struct {

    // Normalized Bounding box in a frame, where the object is located.
    NormalizedBoundingBox *NormalizedBoundingBox `protobuf:"bytes,1,opt,name=normalized_bounding_box,json=normalizedBoundingBox,proto3" json:"normalized_bounding_box,omitempty"`
    // Time-offset, relative to the beginning of the video,
    // corresponding to the video frame for this object.
    TimeOffset *duration.Duration `protobuf:"bytes,2,opt,name=time_offset,json=timeOffset,proto3" json:"time_offset,omitempty"`
    // Optional. The attributes of the object in the bounding box.
    Attributes []*DetectedAttribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
    // Optional. The detected landmarks.
    Landmarks []*DetectedLandmark `protobuf:"bytes,4,rep,name=landmarks,proto3" json:"landmarks,omitempty"`
    // contains filtered or unexported fields
}

For tracking related features. An object at time_offset with attributes, and located with normalized_bounding_box.

func (*TimestampedObject) Descriptor Uses

func (*TimestampedObject) Descriptor() ([]byte, []int)

Deprecated: Use TimestampedObject.ProtoReflect.Descriptor instead.

func (*TimestampedObject) GetAttributes Uses

func (x *TimestampedObject) GetAttributes() []*DetectedAttribute

func (*TimestampedObject) GetLandmarks Uses

func (x *TimestampedObject) GetLandmarks() []*DetectedLandmark

func (*TimestampedObject) GetNormalizedBoundingBox Uses

func (x *TimestampedObject) GetNormalizedBoundingBox() *NormalizedBoundingBox

func (*TimestampedObject) GetTimeOffset Uses

func (x *TimestampedObject) GetTimeOffset() *duration.Duration

func (*TimestampedObject) ProtoMessage Uses

func (*TimestampedObject) ProtoMessage()

func (*TimestampedObject) ProtoReflect Uses

func (x *TimestampedObject) ProtoReflect() protoreflect.Message

func (*TimestampedObject) Reset Uses

func (x *TimestampedObject) Reset()

func (*TimestampedObject) String Uses

func (x *TimestampedObject) String() string

type Track Uses

type Track struct {

    // Video segment of a track.
    Segment *VideoSegment `protobuf:"bytes,1,opt,name=segment,proto3" json:"segment,omitempty"`
    // The object with timestamp and attributes per frame in the track.
    TimestampedObjects []*TimestampedObject `protobuf:"bytes,2,rep,name=timestamped_objects,json=timestampedObjects,proto3" json:"timestamped_objects,omitempty"`
    // Optional. Attributes in the track level.
    Attributes []*DetectedAttribute `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
    // Optional. The confidence score of the tracked object.
    Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
    // contains filtered or unexported fields
}

A track of an object instance.

func (*Track) Descriptor Uses

func (*Track) Descriptor() ([]byte, []int)

Deprecated: Use Track.ProtoReflect.Descriptor instead.

func (*Track) GetAttributes Uses

func (x *Track) GetAttributes() []*DetectedAttribute

func (*Track) GetConfidence Uses

func (x *Track) GetConfidence() float32

func (*Track) GetSegment Uses

func (x *Track) GetSegment() *VideoSegment

func (*Track) GetTimestampedObjects Uses

func (x *Track) GetTimestampedObjects() []*TimestampedObject

func (*Track) ProtoMessage Uses

func (*Track) ProtoMessage()

func (*Track) ProtoReflect Uses

func (x *Track) ProtoReflect() protoreflect.Message

func (*Track) Reset Uses

func (x *Track) Reset()

func (*Track) String Uses

func (x *Track) String() string

type UnimplementedStreamingVideoIntelligenceServiceServer Uses

type UnimplementedStreamingVideoIntelligenceServiceServer struct {
}

UnimplementedStreamingVideoIntelligenceServiceServer can be embedded to have forward compatible implementations.

func (*UnimplementedStreamingVideoIntelligenceServiceServer) StreamingAnnotateVideo Uses

func (*UnimplementedStreamingVideoIntelligenceServiceServer) StreamingAnnotateVideo(StreamingVideoIntelligenceService_StreamingAnnotateVideoServer) error

type UnimplementedVideoIntelligenceServiceServer Uses

type UnimplementedVideoIntelligenceServiceServer struct {
}

UnimplementedVideoIntelligenceServiceServer can be embedded to have forward compatible implementations.

func (*UnimplementedVideoIntelligenceServiceServer) AnnotateVideo Uses

func (*UnimplementedVideoIntelligenceServiceServer) AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunning.Operation, error)

type VideoAnnotationProgress Uses

type VideoAnnotationProgress struct {

    // Video file location in
    // [Cloud Storage](https://cloud.google.com/storage/).
    InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
    // Approximate percentage processed thus far. Guaranteed to be
    // 100 when fully processed.
    ProgressPercent int32 `protobuf:"varint,2,opt,name=progress_percent,json=progressPercent,proto3" json:"progress_percent,omitempty"`
    // Time when the request was received.
    StartTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
    // Time of the most recent update.
    UpdateTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
    // Specifies which feature is being tracked if the request contains more than
    // one feature.
    Feature Feature `protobuf:"varint,5,opt,name=feature,proto3,enum=google.cloud.videointelligence.v1p3beta1.Feature" json:"feature,omitempty"`
    // Specifies which segment is being tracked if the request contains more than
    // one segment.
    Segment *VideoSegment `protobuf:"bytes,6,opt,name=segment,proto3" json:"segment,omitempty"`
    // contains filtered or unexported fields
}

Annotation progress for a single video.

func (*VideoAnnotationProgress) Descriptor Uses

func (*VideoAnnotationProgress) Descriptor() ([]byte, []int)

Deprecated: Use VideoAnnotationProgress.ProtoReflect.Descriptor instead.

func (*VideoAnnotationProgress) GetFeature Uses

func (x *VideoAnnotationProgress) GetFeature() Feature

func (*VideoAnnotationProgress) GetInputUri Uses

func (x *VideoAnnotationProgress) GetInputUri() string

func (*VideoAnnotationProgress) GetProgressPercent Uses

func (x *VideoAnnotationProgress) GetProgressPercent() int32

func (*VideoAnnotationProgress) GetSegment Uses

func (x *VideoAnnotationProgress) GetSegment() *VideoSegment

func (*VideoAnnotationProgress) GetStartTime Uses

func (x *VideoAnnotationProgress) GetStartTime() *timestamp.Timestamp

func (*VideoAnnotationProgress) GetUpdateTime Uses

func (x *VideoAnnotationProgress) GetUpdateTime() *timestamp.Timestamp

func (*VideoAnnotationProgress) ProtoMessage Uses

func (*VideoAnnotationProgress) ProtoMessage()

func (*VideoAnnotationProgress) ProtoReflect Uses

func (x *VideoAnnotationProgress) ProtoReflect() protoreflect.Message

func (*VideoAnnotationProgress) Reset Uses

func (x *VideoAnnotationProgress) Reset()

func (*VideoAnnotationProgress) String Uses

func (x *VideoAnnotationProgress) String() string

type VideoAnnotationResults Uses

type VideoAnnotationResults struct {

    // Video file location in
    // [Cloud Storage](https://cloud.google.com/storage/).
    InputUri string `protobuf:"bytes,1,opt,name=input_uri,json=inputUri,proto3" json:"input_uri,omitempty"`
    // Video segment on which the annotation is run.
    Segment *VideoSegment `protobuf:"bytes,10,opt,name=segment,proto3" json:"segment,omitempty"`
    // Topical label annotations on video level or user-specified segment level.
    // There is exactly one element for each unique label.
    SegmentLabelAnnotations []*LabelAnnotation `protobuf:"bytes,2,rep,name=segment_label_annotations,json=segmentLabelAnnotations,proto3" json:"segment_label_annotations,omitempty"`
    // Presence label annotations on video level or user-specified segment level.
    // There is exactly one element for each unique label. Compared to the
    // existing topical `segment_label_annotations`, this field presents more
    // fine-grained, segment-level labels detected in video content and is made
    // available only when the client sets `LabelDetectionConfig.model` to
    // "builtin/latest" in the request.
    SegmentPresenceLabelAnnotations []*LabelAnnotation `protobuf:"bytes,23,rep,name=segment_presence_label_annotations,json=segmentPresenceLabelAnnotations,proto3" json:"segment_presence_label_annotations,omitempty"`
    // Topical label annotations on shot level.
    // There is exactly one element for each unique label.
    ShotLabelAnnotations []*LabelAnnotation `protobuf:"bytes,3,rep,name=shot_label_annotations,json=shotLabelAnnotations,proto3" json:"shot_label_annotations,omitempty"`
    // Presence label annotations on shot level. There is exactly one element for
    // each unique label. Compared to the existing topical
    // `shot_label_annotations`, this field presents more fine-grained, shot-level
    // labels detected in video content and is made available only when the client
    // sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
    ShotPresenceLabelAnnotations []*LabelAnnotation `protobuf:"bytes,24,rep,name=shot_presence_label_annotations,json=shotPresenceLabelAnnotations,proto3" json:"shot_presence_label_annotations,omitempty"`
    // Label annotations on frame level.
    // There is exactly one element for each unique label.
    FrameLabelAnnotations []*LabelAnnotation `protobuf:"bytes,4,rep,name=frame_label_annotations,json=frameLabelAnnotations,proto3" json:"frame_label_annotations,omitempty"`
    // Face detection annotations.
    FaceDetectionAnnotations []*FaceDetectionAnnotation `protobuf:"bytes,13,rep,name=face_detection_annotations,json=faceDetectionAnnotations,proto3" json:"face_detection_annotations,omitempty"`
    // Shot annotations. Each shot is represented as a video segment.
    ShotAnnotations []*VideoSegment `protobuf:"bytes,6,rep,name=shot_annotations,json=shotAnnotations,proto3" json:"shot_annotations,omitempty"`
    // Explicit content annotation.
    ExplicitAnnotation *ExplicitContentAnnotation `protobuf:"bytes,7,opt,name=explicit_annotation,json=explicitAnnotation,proto3" json:"explicit_annotation,omitempty"`
    // Speech transcription.
    SpeechTranscriptions []*SpeechTranscription `protobuf:"bytes,11,rep,name=speech_transcriptions,json=speechTranscriptions,proto3" json:"speech_transcriptions,omitempty"`
    // OCR text detection and tracking.
    // Annotations for list of detected text snippets. Each will have list of
    // frame information associated with it.
    TextAnnotations []*TextAnnotation `protobuf:"bytes,12,rep,name=text_annotations,json=textAnnotations,proto3" json:"text_annotations,omitempty"`
    // Annotations for list of objects detected and tracked in video.
    ObjectAnnotations []*ObjectTrackingAnnotation `protobuf:"bytes,14,rep,name=object_annotations,json=objectAnnotations,proto3" json:"object_annotations,omitempty"`
    // Annotations for list of logos detected, tracked and recognized in video.
    LogoRecognitionAnnotations []*LogoRecognitionAnnotation `protobuf:"bytes,19,rep,name=logo_recognition_annotations,json=logoRecognitionAnnotations,proto3" json:"logo_recognition_annotations,omitempty"`
    // Person detection annotations.
    PersonDetectionAnnotations []*PersonDetectionAnnotation `protobuf:"bytes,20,rep,name=person_detection_annotations,json=personDetectionAnnotations,proto3" json:"person_detection_annotations,omitempty"`
    // Celebrity recognition annotations.
    CelebrityRecognitionAnnotations *CelebrityRecognitionAnnotation `protobuf:"bytes,21,opt,name=celebrity_recognition_annotations,json=celebrityRecognitionAnnotations,proto3" json:"celebrity_recognition_annotations,omitempty"`
    // If set, indicates an error. Note that for a single `AnnotateVideoRequest`
    // some videos may succeed and some may fail.
    Error *status.Status `protobuf:"bytes,9,opt,name=error,proto3" json:"error,omitempty"`
    // contains filtered or unexported fields
}

Annotation results for a single video.

func (*VideoAnnotationResults) Descriptor Uses

func (*VideoAnnotationResults) Descriptor() ([]byte, []int)

Deprecated: Use VideoAnnotationResults.ProtoReflect.Descriptor instead.

func (*VideoAnnotationResults) GetCelebrityRecognitionAnnotations Uses

func (x *VideoAnnotationResults) GetCelebrityRecognitionAnnotations() *CelebrityRecognitionAnnotation

func (*VideoAnnotationResults) GetError Uses

func (x *VideoAnnotationResults) GetError() *status.Status

func (*VideoAnnotationResults) GetExplicitAnnotation Uses

func (x *VideoAnnotationResults) GetExplicitAnnotation() *ExplicitContentAnnotation

func (*VideoAnnotationResults) GetFaceDetectionAnnotations Uses

func (x *VideoAnnotationResults) GetFaceDetectionAnnotations() []*FaceDetectionAnnotation

func (*VideoAnnotationResults) GetFrameLabelAnnotations Uses

func (x *VideoAnnotationResults) GetFrameLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetInputUri Uses

func (x *VideoAnnotationResults) GetInputUri() string

func (*VideoAnnotationResults) GetLogoRecognitionAnnotations Uses

func (x *VideoAnnotationResults) GetLogoRecognitionAnnotations() []*LogoRecognitionAnnotation

func (*VideoAnnotationResults) GetObjectAnnotations Uses

func (x *VideoAnnotationResults) GetObjectAnnotations() []*ObjectTrackingAnnotation

func (*VideoAnnotationResults) GetPersonDetectionAnnotations Uses

func (x *VideoAnnotationResults) GetPersonDetectionAnnotations() []*PersonDetectionAnnotation

func (*VideoAnnotationResults) GetSegment Uses

func (x *VideoAnnotationResults) GetSegment() *VideoSegment

func (*VideoAnnotationResults) GetSegmentLabelAnnotations Uses

func (x *VideoAnnotationResults) GetSegmentLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetSegmentPresenceLabelAnnotations Uses

func (x *VideoAnnotationResults) GetSegmentPresenceLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetShotAnnotations Uses

func (x *VideoAnnotationResults) GetShotAnnotations() []*VideoSegment

func (*VideoAnnotationResults) GetShotLabelAnnotations Uses

func (x *VideoAnnotationResults) GetShotLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetShotPresenceLabelAnnotations Uses

func (x *VideoAnnotationResults) GetShotPresenceLabelAnnotations() []*LabelAnnotation

func (*VideoAnnotationResults) GetSpeechTranscriptions Uses

func (x *VideoAnnotationResults) GetSpeechTranscriptions() []*SpeechTranscription

func (*VideoAnnotationResults) GetTextAnnotations Uses

func (x *VideoAnnotationResults) GetTextAnnotations() []*TextAnnotation

func (*VideoAnnotationResults) ProtoMessage Uses

func (*VideoAnnotationResults) ProtoMessage()

func (*VideoAnnotationResults) ProtoReflect Uses

func (x *VideoAnnotationResults) ProtoReflect() protoreflect.Message

func (*VideoAnnotationResults) Reset Uses

func (x *VideoAnnotationResults) Reset()

func (*VideoAnnotationResults) String Uses

func (x *VideoAnnotationResults) String() string

type VideoContext Uses

type VideoContext struct {

    // Video segments to annotate. The segments may overlap and are not required
    // to be contiguous or span the whole video. If unspecified, each video is
    // treated as a single segment.
    Segments []*VideoSegment `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"`
    // Config for LABEL_DETECTION.
    LabelDetectionConfig *LabelDetectionConfig `protobuf:"bytes,2,opt,name=label_detection_config,json=labelDetectionConfig,proto3" json:"label_detection_config,omitempty"`
    // Config for SHOT_CHANGE_DETECTION.
    ShotChangeDetectionConfig *ShotChangeDetectionConfig `protobuf:"bytes,3,opt,name=shot_change_detection_config,json=shotChangeDetectionConfig,proto3" json:"shot_change_detection_config,omitempty"`
    // Config for EXPLICIT_CONTENT_DETECTION.
    ExplicitContentDetectionConfig *ExplicitContentDetectionConfig `protobuf:"bytes,4,opt,name=explicit_content_detection_config,json=explicitContentDetectionConfig,proto3" json:"explicit_content_detection_config,omitempty"`
    // Config for FACE_DETECTION.
    FaceDetectionConfig *FaceDetectionConfig `protobuf:"bytes,5,opt,name=face_detection_config,json=faceDetectionConfig,proto3" json:"face_detection_config,omitempty"`
    // Config for SPEECH_TRANSCRIPTION.
    SpeechTranscriptionConfig *SpeechTranscriptionConfig `protobuf:"bytes,6,opt,name=speech_transcription_config,json=speechTranscriptionConfig,proto3" json:"speech_transcription_config,omitempty"`
    // Config for TEXT_DETECTION.
    TextDetectionConfig *TextDetectionConfig `protobuf:"bytes,8,opt,name=text_detection_config,json=textDetectionConfig,proto3" json:"text_detection_config,omitempty"`
    // Config for PERSON_DETECTION.
    PersonDetectionConfig *PersonDetectionConfig `protobuf:"bytes,11,opt,name=person_detection_config,json=personDetectionConfig,proto3" json:"person_detection_config,omitempty"`
    // Config for OBJECT_TRACKING.
    ObjectTrackingConfig *ObjectTrackingConfig `protobuf:"bytes,13,opt,name=object_tracking_config,json=objectTrackingConfig,proto3" json:"object_tracking_config,omitempty"`
    // contains filtered or unexported fields
}

Video context and/or feature-specific parameters.

func (*VideoContext) Descriptor Uses

func (*VideoContext) Descriptor() ([]byte, []int)

Deprecated: Use VideoContext.ProtoReflect.Descriptor instead.

func (*VideoContext) GetExplicitContentDetectionConfig Uses

func (x *VideoContext) GetExplicitContentDetectionConfig() *ExplicitContentDetectionConfig

func (*VideoContext) GetFaceDetectionConfig Uses

func (x *VideoContext) GetFaceDetectionConfig() *FaceDetectionConfig

func (*VideoContext) GetLabelDetectionConfig Uses

func (x *VideoContext) GetLabelDetectionConfig() *LabelDetectionConfig

func (*VideoContext) GetObjectTrackingConfig Uses

func (x *VideoContext) GetObjectTrackingConfig() *ObjectTrackingConfig

func (*VideoContext) GetPersonDetectionConfig Uses

func (x *VideoContext) GetPersonDetectionConfig() *PersonDetectionConfig

func (*VideoContext) GetSegments Uses

func (x *VideoContext) GetSegments() []*VideoSegment

func (*VideoContext) GetShotChangeDetectionConfig Uses

func (x *VideoContext) GetShotChangeDetectionConfig() *ShotChangeDetectionConfig

func (*VideoContext) GetSpeechTranscriptionConfig Uses

func (x *VideoContext) GetSpeechTranscriptionConfig() *SpeechTranscriptionConfig

func (*VideoContext) GetTextDetectionConfig Uses

func (x *VideoContext) GetTextDetectionConfig() *TextDetectionConfig

func (*VideoContext) ProtoMessage Uses

func (*VideoContext) ProtoMessage()

func (*VideoContext) ProtoReflect Uses

func (x *VideoContext) ProtoReflect() protoreflect.Message

func (*VideoContext) Reset Uses

func (x *VideoContext) Reset()

func (*VideoContext) String Uses

func (x *VideoContext) String() string

type VideoIntelligenceServiceClient Uses

type VideoIntelligenceServiceClient interface {
    // Performs asynchronous video annotation. Progress and results can be
    // retrieved through the `google.longrunning.Operations` interface.
    // `Operation.metadata` contains `AnnotateVideoProgress` (progress).
    // `Operation.response` contains `AnnotateVideoResponse` (results).
    AnnotateVideo(ctx context.Context, in *AnnotateVideoRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
}

VideoIntelligenceServiceClient is the client API for VideoIntelligenceService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewVideoIntelligenceServiceClient Uses

func NewVideoIntelligenceServiceClient(cc grpc.ClientConnInterface) VideoIntelligenceServiceClient

type VideoIntelligenceServiceServer Uses

type VideoIntelligenceServiceServer interface {
    // Performs asynchronous video annotation. Progress and results can be
    // retrieved through the `google.longrunning.Operations` interface.
    // `Operation.metadata` contains `AnnotateVideoProgress` (progress).
    // `Operation.response` contains `AnnotateVideoResponse` (results).
    AnnotateVideo(context.Context, *AnnotateVideoRequest) (*longrunning.Operation, error)
}

VideoIntelligenceServiceServer is the server API for VideoIntelligenceService service.

type VideoSegment Uses

type VideoSegment struct {

    // Time-offset, relative to the beginning of the video,
    // corresponding to the start of the segment (inclusive).
    StartTimeOffset *duration.Duration `protobuf:"bytes,1,opt,name=start_time_offset,json=startTimeOffset,proto3" json:"start_time_offset,omitempty"`
    // Time-offset, relative to the beginning of the video,
    // corresponding to the end of the segment (inclusive).
    EndTimeOffset *duration.Duration `protobuf:"bytes,2,opt,name=end_time_offset,json=endTimeOffset,proto3" json:"end_time_offset,omitempty"`
    // contains filtered or unexported fields
}

Video segment.

func (*VideoSegment) Descriptor Uses

func (*VideoSegment) Descriptor() ([]byte, []int)

Deprecated: Use VideoSegment.ProtoReflect.Descriptor instead.

func (*VideoSegment) GetEndTimeOffset Uses

func (x *VideoSegment) GetEndTimeOffset() *duration.Duration

func (*VideoSegment) GetStartTimeOffset Uses

func (x *VideoSegment) GetStartTimeOffset() *duration.Duration

func (*VideoSegment) ProtoMessage Uses

func (*VideoSegment) ProtoMessage()

func (*VideoSegment) ProtoReflect Uses

func (x *VideoSegment) ProtoReflect() protoreflect.Message

func (*VideoSegment) Reset Uses

func (x *VideoSegment) Reset()

func (*VideoSegment) String Uses

func (x *VideoSegment) String() string

type WordInfo Uses

type WordInfo struct {

    // Time offset relative to the beginning of the audio, and
    // corresponding to the start of the spoken word. This field is only set if
    // `enable_word_time_offsets=true` and only in the top hypothesis. This is an
    // experimental feature and the accuracy of the time offset can vary.
    StartTime *duration.Duration `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
    // Time offset relative to the beginning of the audio, and
    // corresponding to the end of the spoken word. This field is only set if
    // `enable_word_time_offsets=true` and only in the top hypothesis. This is an
    // experimental feature and the accuracy of the time offset can vary.
    EndTime *duration.Duration `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
    // The word corresponding to this set of information.
    Word string `protobuf:"bytes,3,opt,name=word,proto3" json:"word,omitempty"`
    // Output only. The confidence estimate between 0.0 and 1.0. A higher number
    // indicates an estimated greater likelihood that the recognized words are
    // correct. This field is set only for the top alternative.
    // This field is not guaranteed to be accurate and users should not rely on it
    // to be always provided.
    // The default of 0.0 is a sentinel value indicating `confidence` was not set.
    Confidence float32 `protobuf:"fixed32,4,opt,name=confidence,proto3" json:"confidence,omitempty"`
    // Output only. A distinct integer value is assigned for every speaker within
    // the audio. This field specifies which one of those speakers was detected to
    // have spoken this word. Value ranges from 1 up to diarization_speaker_count,
    // and is only set if speaker diarization is enabled.
    SpeakerTag int32 `protobuf:"varint,5,opt,name=speaker_tag,json=speakerTag,proto3" json:"speaker_tag,omitempty"`
    // contains filtered or unexported fields
}

Word-specific information for recognized words. Word information is only included in the response when certain request parameters are set, such as `enable_word_time_offsets`.

func (*WordInfo) Descriptor Uses

func (*WordInfo) Descriptor() ([]byte, []int)

Deprecated: Use WordInfo.ProtoReflect.Descriptor instead.

func (*WordInfo) GetConfidence Uses

func (x *WordInfo) GetConfidence() float32

func (*WordInfo) GetEndTime Uses

func (x *WordInfo) GetEndTime() *duration.Duration

func (*WordInfo) GetSpeakerTag Uses

func (x *WordInfo) GetSpeakerTag() int32

func (*WordInfo) GetStartTime Uses

func (x *WordInfo) GetStartTime() *duration.Duration

func (*WordInfo) GetWord Uses

func (x *WordInfo) GetWord() string

func (*WordInfo) ProtoMessage Uses

func (*WordInfo) ProtoMessage()

func (*WordInfo) ProtoReflect Uses

func (x *WordInfo) ProtoReflect() protoreflect.Message

func (*WordInfo) Reset Uses

func (x *WordInfo) Reset()

func (*WordInfo) String Uses

func (x *WordInfo) String() string

Package videointelligence imports 14 packages (graph). Updated 2020-08-13. Refresh now. Tools for package owners.