genproto: google.golang.org/genproto/googleapis/cloud/language/v1 Index | Files

package language

import "google.golang.org/genproto/googleapis/cloud/language/v1"

Index

Package Files

language_service.pb.go

Variables

var DependencyEdge_Label_name = map[int32]string{
    0:  "UNKNOWN",
    1:  "ABBREV",
    2:  "ACOMP",
    3:  "ADVCL",
    4:  "ADVMOD",
    5:  "AMOD",
    6:  "APPOS",
    7:  "ATTR",
    8:  "AUX",
    9:  "AUXPASS",
    10: "CC",
    11: "CCOMP",
    12: "CONJ",
    13: "CSUBJ",
    14: "CSUBJPASS",
    15: "DEP",
    16: "DET",
    17: "DISCOURSE",
    18: "DOBJ",
    19: "EXPL",
    20: "GOESWITH",
    21: "IOBJ",
    22: "MARK",
    23: "MWE",
    24: "MWV",
    25: "NEG",
    26: "NN",
    27: "NPADVMOD",
    28: "NSUBJ",
    29: "NSUBJPASS",
    30: "NUM",
    31: "NUMBER",
    32: "P",
    33: "PARATAXIS",
    34: "PARTMOD",
    35: "PCOMP",
    36: "POBJ",
    37: "POSS",
    38: "POSTNEG",
    39: "PRECOMP",
    40: "PRECONJ",
    41: "PREDET",
    42: "PREF",
    43: "PREP",
    44: "PRONL",
    45: "PRT",
    46: "PS",
    47: "QUANTMOD",
    48: "RCMOD",
    49: "RCMODREL",
    50: "RDROP",
    51: "REF",
    52: "REMNANT",
    53: "REPARANDUM",
    54: "ROOT",
    55: "SNUM",
    56: "SUFF",
    57: "TMOD",
    58: "TOPIC",
    59: "VMOD",
    60: "VOCATIVE",
    61: "XCOMP",
    62: "SUFFIX",
    63: "TITLE",
    64: "ADVPHMOD",
    65: "AUXCAUS",
    66: "AUXVV",
    67: "DTMOD",
    68: "FOREIGN",
    69: "KW",
    70: "LIST",
    71: "NOMC",
    72: "NOMCSUBJ",
    73: "NOMCSUBJPASS",
    74: "NUMC",
    75: "COP",
    76: "DISLOCATED",
    77: "ASP",
    78: "GMOD",
    79: "GOBJ",
    80: "INFMOD",
    81: "MES",
    82: "NCOMP",
}
var DependencyEdge_Label_value = map[string]int32{
    "UNKNOWN":      0,
    "ABBREV":       1,
    "ACOMP":        2,
    "ADVCL":        3,
    "ADVMOD":       4,
    "AMOD":         5,
    "APPOS":        6,
    "ATTR":         7,
    "AUX":          8,
    "AUXPASS":      9,
    "CC":           10,
    "CCOMP":        11,
    "CONJ":         12,
    "CSUBJ":        13,
    "CSUBJPASS":    14,
    "DEP":          15,
    "DET":          16,
    "DISCOURSE":    17,
    "DOBJ":         18,
    "EXPL":         19,
    "GOESWITH":     20,
    "IOBJ":         21,
    "MARK":         22,
    "MWE":          23,
    "MWV":          24,
    "NEG":          25,
    "NN":           26,
    "NPADVMOD":     27,
    "NSUBJ":        28,
    "NSUBJPASS":    29,
    "NUM":          30,
    "NUMBER":       31,
    "P":            32,
    "PARATAXIS":    33,
    "PARTMOD":      34,
    "PCOMP":        35,
    "POBJ":         36,
    "POSS":         37,
    "POSTNEG":      38,
    "PRECOMP":      39,
    "PRECONJ":      40,
    "PREDET":       41,
    "PREF":         42,
    "PREP":         43,
    "PRONL":        44,
    "PRT":          45,
    "PS":           46,
    "QUANTMOD":     47,
    "RCMOD":        48,
    "RCMODREL":     49,
    "RDROP":        50,
    "REF":          51,
    "REMNANT":      52,
    "REPARANDUM":   53,
    "ROOT":         54,
    "SNUM":         55,
    "SUFF":         56,
    "TMOD":         57,
    "TOPIC":        58,
    "VMOD":         59,
    "VOCATIVE":     60,
    "XCOMP":        61,
    "SUFFIX":       62,
    "TITLE":        63,
    "ADVPHMOD":     64,
    "AUXCAUS":      65,
    "AUXVV":        66,
    "DTMOD":        67,
    "FOREIGN":      68,
    "KW":           69,
    "LIST":         70,
    "NOMC":         71,
    "NOMCSUBJ":     72,
    "NOMCSUBJPASS": 73,
    "NUMC":         74,
    "COP":          75,
    "DISLOCATED":   76,
    "ASP":          77,
    "GMOD":         78,
    "GOBJ":         79,
    "INFMOD":       80,
    "MES":          81,
    "NCOMP":        82,
}
var Document_Type_name = map[int32]string{
    0:  "TYPE_UNSPECIFIED",
    1:  "PLAIN_TEXT",
    2:  "HTML",
}
var Document_Type_value = map[string]int32{
    "TYPE_UNSPECIFIED": 0,
    "PLAIN_TEXT":       1,
    "HTML":             2,
}
var EncodingType_name = map[int32]string{
    0:  "NONE",
    1:  "UTF8",
    2:  "UTF16",
    3:  "UTF32",
}
var EncodingType_value = map[string]int32{
    "NONE":  0,
    "UTF8":  1,
    "UTF16": 2,
    "UTF32": 3,
}
var EntityMention_Type_name = map[int32]string{
    0:  "TYPE_UNKNOWN",
    1:  "PROPER",
    2:  "COMMON",
}
var EntityMention_Type_value = map[string]int32{
    "TYPE_UNKNOWN": 0,
    "PROPER":       1,
    "COMMON":       2,
}
var Entity_Type_name = map[int32]string{
    0:  "UNKNOWN",
    1:  "PERSON",
    2:  "LOCATION",
    3:  "ORGANIZATION",
    4:  "EVENT",
    5:  "WORK_OF_ART",
    6:  "CONSUMER_GOOD",
    7:  "OTHER",
    9:  "PHONE_NUMBER",
    10: "ADDRESS",
    11: "DATE",
    12: "NUMBER",
    13: "PRICE",
}
var Entity_Type_value = map[string]int32{
    "UNKNOWN":       0,
    "PERSON":        1,
    "LOCATION":      2,
    "ORGANIZATION":  3,
    "EVENT":         4,
    "WORK_OF_ART":   5,
    "CONSUMER_GOOD": 6,
    "OTHER":         7,
    "PHONE_NUMBER":  9,
    "ADDRESS":       10,
    "DATE":          11,
    "NUMBER":        12,
    "PRICE":         13,
}
var PartOfSpeech_Aspect_name = map[int32]string{
    0:  "ASPECT_UNKNOWN",
    1:  "PERFECTIVE",
    2:  "IMPERFECTIVE",
    3:  "PROGRESSIVE",
}
var PartOfSpeech_Aspect_value = map[string]int32{
    "ASPECT_UNKNOWN": 0,
    "PERFECTIVE":     1,
    "IMPERFECTIVE":   2,
    "PROGRESSIVE":    3,
}
var PartOfSpeech_Case_name = map[int32]string{
    0:  "CASE_UNKNOWN",
    1:  "ACCUSATIVE",
    2:  "ADVERBIAL",
    3:  "COMPLEMENTIVE",
    4:  "DATIVE",
    5:  "GENITIVE",
    6:  "INSTRUMENTAL",
    7:  "LOCATIVE",
    8:  "NOMINATIVE",
    9:  "OBLIQUE",
    10: "PARTITIVE",
    11: "PREPOSITIONAL",
    12: "REFLEXIVE_CASE",
    13: "RELATIVE_CASE",
    14: "VOCATIVE",
}
var PartOfSpeech_Case_value = map[string]int32{
    "CASE_UNKNOWN":   0,
    "ACCUSATIVE":     1,
    "ADVERBIAL":      2,
    "COMPLEMENTIVE":  3,
    "DATIVE":         4,
    "GENITIVE":       5,
    "INSTRUMENTAL":   6,
    "LOCATIVE":       7,
    "NOMINATIVE":     8,
    "OBLIQUE":        9,
    "PARTITIVE":      10,
    "PREPOSITIONAL":  11,
    "REFLEXIVE_CASE": 12,
    "RELATIVE_CASE":  13,
    "VOCATIVE":       14,
}
var PartOfSpeech_Form_name = map[int32]string{
    0:  "FORM_UNKNOWN",
    1:  "ADNOMIAL",
    2:  "AUXILIARY",
    3:  "COMPLEMENTIZER",
    4:  "FINAL_ENDING",
    5:  "GERUND",
    6:  "REALIS",
    7:  "IRREALIS",
    8:  "SHORT",
    9:  "LONG",
    10: "ORDER",
    11: "SPECIFIC",
}
var PartOfSpeech_Form_value = map[string]int32{
    "FORM_UNKNOWN":   0,
    "ADNOMIAL":       1,
    "AUXILIARY":      2,
    "COMPLEMENTIZER": 3,
    "FINAL_ENDING":   4,
    "GERUND":         5,
    "REALIS":         6,
    "IRREALIS":       7,
    "SHORT":          8,
    "LONG":           9,
    "ORDER":          10,
    "SPECIFIC":       11,
}
var PartOfSpeech_Gender_name = map[int32]string{
    0:  "GENDER_UNKNOWN",
    1:  "FEMININE",
    2:  "MASCULINE",
    3:  "NEUTER",
}
var PartOfSpeech_Gender_value = map[string]int32{
    "GENDER_UNKNOWN": 0,
    "FEMININE":       1,
    "MASCULINE":      2,
    "NEUTER":         3,
}
var PartOfSpeech_Mood_name = map[int32]string{
    0:  "MOOD_UNKNOWN",
    1:  "CONDITIONAL_MOOD",
    2:  "IMPERATIVE",
    3:  "INDICATIVE",
    4:  "INTERROGATIVE",
    5:  "JUSSIVE",
    6:  "SUBJUNCTIVE",
}
var PartOfSpeech_Mood_value = map[string]int32{
    "MOOD_UNKNOWN":     0,
    "CONDITIONAL_MOOD": 1,
    "IMPERATIVE":       2,
    "INDICATIVE":       3,
    "INTERROGATIVE":    4,
    "JUSSIVE":          5,
    "SUBJUNCTIVE":      6,
}
var PartOfSpeech_Number_name = map[int32]string{
    0:  "NUMBER_UNKNOWN",
    1:  "SINGULAR",
    2:  "PLURAL",
    3:  "DUAL",
}
var PartOfSpeech_Number_value = map[string]int32{
    "NUMBER_UNKNOWN": 0,
    "SINGULAR":       1,
    "PLURAL":         2,
    "DUAL":           3,
}
var PartOfSpeech_Person_name = map[int32]string{
    0:  "PERSON_UNKNOWN",
    1:  "FIRST",
    2:  "SECOND",
    3:  "THIRD",
    4:  "REFLEXIVE_PERSON",
}
var PartOfSpeech_Person_value = map[string]int32{
    "PERSON_UNKNOWN":   0,
    "FIRST":            1,
    "SECOND":           2,
    "THIRD":            3,
    "REFLEXIVE_PERSON": 4,
}
var PartOfSpeech_Proper_name = map[int32]string{
    0:  "PROPER_UNKNOWN",
    1:  "PROPER",
    2:  "NOT_PROPER",
}
var PartOfSpeech_Proper_value = map[string]int32{
    "PROPER_UNKNOWN": 0,
    "PROPER":         1,
    "NOT_PROPER":     2,
}
var PartOfSpeech_Reciprocity_name = map[int32]string{
    0:  "RECIPROCITY_UNKNOWN",
    1:  "RECIPROCAL",
    2:  "NON_RECIPROCAL",
}
var PartOfSpeech_Reciprocity_value = map[string]int32{
    "RECIPROCITY_UNKNOWN": 0,
    "RECIPROCAL":          1,
    "NON_RECIPROCAL":      2,
}
var PartOfSpeech_Tag_name = map[int32]string{
    0:  "UNKNOWN",
    1:  "ADJ",
    2:  "ADP",
    3:  "ADV",
    4:  "CONJ",
    5:  "DET",
    6:  "NOUN",
    7:  "NUM",
    8:  "PRON",
    9:  "PRT",
    10: "PUNCT",
    11: "VERB",
    12: "X",
    13: "AFFIX",
}
var PartOfSpeech_Tag_value = map[string]int32{
    "UNKNOWN": 0,
    "ADJ":     1,
    "ADP":     2,
    "ADV":     3,
    "CONJ":    4,
    "DET":     5,
    "NOUN":    6,
    "NUM":     7,
    "PRON":    8,
    "PRT":     9,
    "PUNCT":   10,
    "VERB":    11,
    "X":       12,
    "AFFIX":   13,
}
var PartOfSpeech_Tense_name = map[int32]string{
    0:  "TENSE_UNKNOWN",
    1:  "CONDITIONAL_TENSE",
    2:  "FUTURE",
    3:  "PAST",
    4:  "PRESENT",
    5:  "IMPERFECT",
    6:  "PLUPERFECT",
}
var PartOfSpeech_Tense_value = map[string]int32{
    "TENSE_UNKNOWN":     0,
    "CONDITIONAL_TENSE": 1,
    "FUTURE":            2,
    "PAST":              3,
    "PRESENT":           4,
    "IMPERFECT":         5,
    "PLUPERFECT":        6,
}
var PartOfSpeech_Voice_name = map[int32]string{
    0:  "VOICE_UNKNOWN",
    1:  "ACTIVE",
    2:  "CAUSATIVE",
    3:  "PASSIVE",
}
var PartOfSpeech_Voice_value = map[string]int32{
    "VOICE_UNKNOWN": 0,
    "ACTIVE":        1,
    "CAUSATIVE":     2,
    "PASSIVE":       3,
}

func RegisterLanguageServiceServer Uses

func RegisterLanguageServiceServer(s *grpc.Server, srv LanguageServiceServer)

type AnalyzeEntitiesRequest Uses

type AnalyzeEntitiesRequest struct {
    // Input document.
    Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"`
    // The encoding type used by the API to calculate offsets.
    EncodingType         EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,proto3,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"`
    XXX_NoUnkeyedLiteral struct{}     `json:"-"`
    XXX_unrecognized     []byte       `json:"-"`
    XXX_sizecache        int32        `json:"-"`
}

The entity analysis request message.

func (*AnalyzeEntitiesRequest) Descriptor Uses

func (*AnalyzeEntitiesRequest) Descriptor() ([]byte, []int)

func (*AnalyzeEntitiesRequest) GetDocument Uses

func (m *AnalyzeEntitiesRequest) GetDocument() *Document

func (*AnalyzeEntitiesRequest) GetEncodingType Uses

func (m *AnalyzeEntitiesRequest) GetEncodingType() EncodingType

func (*AnalyzeEntitiesRequest) ProtoMessage Uses

func (*AnalyzeEntitiesRequest) ProtoMessage()

func (*AnalyzeEntitiesRequest) Reset Uses

func (m *AnalyzeEntitiesRequest) Reset()

func (*AnalyzeEntitiesRequest) String Uses

func (m *AnalyzeEntitiesRequest) String() string

func (*AnalyzeEntitiesRequest) XXX_DiscardUnknown Uses

func (m *AnalyzeEntitiesRequest) XXX_DiscardUnknown()

func (*AnalyzeEntitiesRequest) XXX_Marshal Uses

func (m *AnalyzeEntitiesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AnalyzeEntitiesRequest) XXX_Merge Uses

func (m *AnalyzeEntitiesRequest) XXX_Merge(src proto.Message)

func (*AnalyzeEntitiesRequest) XXX_Size Uses

func (m *AnalyzeEntitiesRequest) XXX_Size() int

func (*AnalyzeEntitiesRequest) XXX_Unmarshal Uses

func (m *AnalyzeEntitiesRequest) XXX_Unmarshal(b []byte) error

type AnalyzeEntitiesResponse Uses

type AnalyzeEntitiesResponse struct {
    // The recognized entities in the input document.
    Entities []*Entity `protobuf:"bytes,1,rep,name=entities,proto3" json:"entities,omitempty"`
    // The language of the text, which will be the same as the language specified
    // in the request or, if not specified, the automatically-detected language.
    // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
    Language             string   `protobuf:"bytes,2,opt,name=language,proto3" json:"language,omitempty"`
    XXX_NoUnkeyedLiteral struct{} `json:"-"`
    XXX_unrecognized     []byte   `json:"-"`
    XXX_sizecache        int32    `json:"-"`
}

The entity analysis response message.

func (*AnalyzeEntitiesResponse) Descriptor Uses

func (*AnalyzeEntitiesResponse) Descriptor() ([]byte, []int)

func (*AnalyzeEntitiesResponse) GetEntities Uses

func (m *AnalyzeEntitiesResponse) GetEntities() []*Entity

func (*AnalyzeEntitiesResponse) GetLanguage Uses

func (m *AnalyzeEntitiesResponse) GetLanguage() string

func (*AnalyzeEntitiesResponse) ProtoMessage Uses

func (*AnalyzeEntitiesResponse) ProtoMessage()

func (*AnalyzeEntitiesResponse) Reset Uses

func (m *AnalyzeEntitiesResponse) Reset()

func (*AnalyzeEntitiesResponse) String Uses

func (m *AnalyzeEntitiesResponse) String() string

func (*AnalyzeEntitiesResponse) XXX_DiscardUnknown Uses

func (m *AnalyzeEntitiesResponse) XXX_DiscardUnknown()

func (*AnalyzeEntitiesResponse) XXX_Marshal Uses

func (m *AnalyzeEntitiesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AnalyzeEntitiesResponse) XXX_Merge Uses

func (m *AnalyzeEntitiesResponse) XXX_Merge(src proto.Message)

func (*AnalyzeEntitiesResponse) XXX_Size Uses

func (m *AnalyzeEntitiesResponse) XXX_Size() int

func (*AnalyzeEntitiesResponse) XXX_Unmarshal Uses

func (m *AnalyzeEntitiesResponse) XXX_Unmarshal(b []byte) error

type AnalyzeEntitySentimentRequest Uses

type AnalyzeEntitySentimentRequest struct {
    // Input document.
    Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"`
    // The encoding type used by the API to calculate offsets.
    EncodingType         EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,proto3,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"`
    XXX_NoUnkeyedLiteral struct{}     `json:"-"`
    XXX_unrecognized     []byte       `json:"-"`
    XXX_sizecache        int32        `json:"-"`
}

The entity-level sentiment analysis request message.

func (*AnalyzeEntitySentimentRequest) Descriptor Uses

func (*AnalyzeEntitySentimentRequest) Descriptor() ([]byte, []int)

func (*AnalyzeEntitySentimentRequest) GetDocument Uses

func (m *AnalyzeEntitySentimentRequest) GetDocument() *Document

func (*AnalyzeEntitySentimentRequest) GetEncodingType Uses

func (m *AnalyzeEntitySentimentRequest) GetEncodingType() EncodingType

func (*AnalyzeEntitySentimentRequest) ProtoMessage Uses

func (*AnalyzeEntitySentimentRequest) ProtoMessage()

func (*AnalyzeEntitySentimentRequest) Reset Uses

func (m *AnalyzeEntitySentimentRequest) Reset()

func (*AnalyzeEntitySentimentRequest) String Uses

func (m *AnalyzeEntitySentimentRequest) String() string

func (*AnalyzeEntitySentimentRequest) XXX_DiscardUnknown Uses

func (m *AnalyzeEntitySentimentRequest) XXX_DiscardUnknown()

func (*AnalyzeEntitySentimentRequest) XXX_Marshal Uses

func (m *AnalyzeEntitySentimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AnalyzeEntitySentimentRequest) XXX_Merge Uses

func (m *AnalyzeEntitySentimentRequest) XXX_Merge(src proto.Message)

func (*AnalyzeEntitySentimentRequest) XXX_Size Uses

func (m *AnalyzeEntitySentimentRequest) XXX_Size() int

func (*AnalyzeEntitySentimentRequest) XXX_Unmarshal Uses

func (m *AnalyzeEntitySentimentRequest) XXX_Unmarshal(b []byte) error

type AnalyzeEntitySentimentResponse Uses

type AnalyzeEntitySentimentResponse struct {
    // The recognized entities in the input document with associated sentiments.
    Entities []*Entity `protobuf:"bytes,1,rep,name=entities,proto3" json:"entities,omitempty"`
    // The language of the text, which will be the same as the language specified
    // in the request or, if not specified, the automatically-detected language.
    // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
    Language             string   `protobuf:"bytes,2,opt,name=language,proto3" json:"language,omitempty"`
    XXX_NoUnkeyedLiteral struct{} `json:"-"`
    XXX_unrecognized     []byte   `json:"-"`
    XXX_sizecache        int32    `json:"-"`
}

The entity-level sentiment analysis response message.

func (*AnalyzeEntitySentimentResponse) Descriptor Uses

func (*AnalyzeEntitySentimentResponse) Descriptor() ([]byte, []int)

func (*AnalyzeEntitySentimentResponse) GetEntities Uses

func (m *AnalyzeEntitySentimentResponse) GetEntities() []*Entity

func (*AnalyzeEntitySentimentResponse) GetLanguage Uses

func (m *AnalyzeEntitySentimentResponse) GetLanguage() string

func (*AnalyzeEntitySentimentResponse) ProtoMessage Uses

func (*AnalyzeEntitySentimentResponse) ProtoMessage()

func (*AnalyzeEntitySentimentResponse) Reset Uses

func (m *AnalyzeEntitySentimentResponse) Reset()

func (*AnalyzeEntitySentimentResponse) String Uses

func (m *AnalyzeEntitySentimentResponse) String() string

func (*AnalyzeEntitySentimentResponse) XXX_DiscardUnknown Uses

func (m *AnalyzeEntitySentimentResponse) XXX_DiscardUnknown()

func (*AnalyzeEntitySentimentResponse) XXX_Marshal Uses

func (m *AnalyzeEntitySentimentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AnalyzeEntitySentimentResponse) XXX_Merge Uses

func (m *AnalyzeEntitySentimentResponse) XXX_Merge(src proto.Message)

func (*AnalyzeEntitySentimentResponse) XXX_Size Uses

func (m *AnalyzeEntitySentimentResponse) XXX_Size() int

func (*AnalyzeEntitySentimentResponse) XXX_Unmarshal Uses

func (m *AnalyzeEntitySentimentResponse) XXX_Unmarshal(b []byte) error

type AnalyzeSentimentRequest Uses

type AnalyzeSentimentRequest struct {
    // Input document.
    Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"`
    // The encoding type used by the API to calculate sentence offsets.
    EncodingType         EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,proto3,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"`
    XXX_NoUnkeyedLiteral struct{}     `json:"-"`
    XXX_unrecognized     []byte       `json:"-"`
    XXX_sizecache        int32        `json:"-"`
}

The sentiment analysis request message.

func (*AnalyzeSentimentRequest) Descriptor Uses

func (*AnalyzeSentimentRequest) Descriptor() ([]byte, []int)

func (*AnalyzeSentimentRequest) GetDocument Uses

func (m *AnalyzeSentimentRequest) GetDocument() *Document

func (*AnalyzeSentimentRequest) GetEncodingType Uses

func (m *AnalyzeSentimentRequest) GetEncodingType() EncodingType

func (*AnalyzeSentimentRequest) ProtoMessage Uses

func (*AnalyzeSentimentRequest) ProtoMessage()

func (*AnalyzeSentimentRequest) Reset Uses

func (m *AnalyzeSentimentRequest) Reset()

func (*AnalyzeSentimentRequest) String Uses

func (m *AnalyzeSentimentRequest) String() string

func (*AnalyzeSentimentRequest) XXX_DiscardUnknown Uses

func (m *AnalyzeSentimentRequest) XXX_DiscardUnknown()

func (*AnalyzeSentimentRequest) XXX_Marshal Uses

func (m *AnalyzeSentimentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AnalyzeSentimentRequest) XXX_Merge Uses

func (m *AnalyzeSentimentRequest) XXX_Merge(src proto.Message)

func (*AnalyzeSentimentRequest) XXX_Size Uses

func (m *AnalyzeSentimentRequest) XXX_Size() int

func (*AnalyzeSentimentRequest) XXX_Unmarshal Uses

func (m *AnalyzeSentimentRequest) XXX_Unmarshal(b []byte) error

type AnalyzeSentimentResponse Uses

type AnalyzeSentimentResponse struct {
    // The overall sentiment of the input document.
    DocumentSentiment *Sentiment `protobuf:"bytes,1,opt,name=document_sentiment,json=documentSentiment,proto3" json:"document_sentiment,omitempty"`
    // The language of the text, which will be the same as the language specified
    // in the request or, if not specified, the automatically-detected language.
    // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
    Language string `protobuf:"bytes,2,opt,name=language,proto3" json:"language,omitempty"`
    // The sentiment for all the sentences in the document.
    Sentences            []*Sentence `protobuf:"bytes,3,rep,name=sentences,proto3" json:"sentences,omitempty"`
    XXX_NoUnkeyedLiteral struct{}    `json:"-"`
    XXX_unrecognized     []byte      `json:"-"`
    XXX_sizecache        int32       `json:"-"`
}

The sentiment analysis response message.

func (*AnalyzeSentimentResponse) Descriptor Uses

func (*AnalyzeSentimentResponse) Descriptor() ([]byte, []int)

func (*AnalyzeSentimentResponse) GetDocumentSentiment Uses

func (m *AnalyzeSentimentResponse) GetDocumentSentiment() *Sentiment

func (*AnalyzeSentimentResponse) GetLanguage Uses

func (m *AnalyzeSentimentResponse) GetLanguage() string

func (*AnalyzeSentimentResponse) GetSentences Uses

func (m *AnalyzeSentimentResponse) GetSentences() []*Sentence

func (*AnalyzeSentimentResponse) ProtoMessage Uses

func (*AnalyzeSentimentResponse) ProtoMessage()

func (*AnalyzeSentimentResponse) Reset Uses

func (m *AnalyzeSentimentResponse) Reset()

func (*AnalyzeSentimentResponse) String Uses

func (m *AnalyzeSentimentResponse) String() string

func (*AnalyzeSentimentResponse) XXX_DiscardUnknown Uses

func (m *AnalyzeSentimentResponse) XXX_DiscardUnknown()

func (*AnalyzeSentimentResponse) XXX_Marshal Uses

func (m *AnalyzeSentimentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AnalyzeSentimentResponse) XXX_Merge Uses

func (m *AnalyzeSentimentResponse) XXX_Merge(src proto.Message)

func (*AnalyzeSentimentResponse) XXX_Size Uses

func (m *AnalyzeSentimentResponse) XXX_Size() int

func (*AnalyzeSentimentResponse) XXX_Unmarshal Uses

func (m *AnalyzeSentimentResponse) XXX_Unmarshal(b []byte) error

type AnalyzeSyntaxRequest Uses

type AnalyzeSyntaxRequest struct {
    // Input document.
    Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"`
    // The encoding type used by the API to calculate offsets.
    EncodingType         EncodingType `protobuf:"varint,2,opt,name=encoding_type,json=encodingType,proto3,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"`
    XXX_NoUnkeyedLiteral struct{}     `json:"-"`
    XXX_unrecognized     []byte       `json:"-"`
    XXX_sizecache        int32        `json:"-"`
}

The syntax analysis request message.

func (*AnalyzeSyntaxRequest) Descriptor Uses

func (*AnalyzeSyntaxRequest) Descriptor() ([]byte, []int)

func (*AnalyzeSyntaxRequest) GetDocument Uses

func (m *AnalyzeSyntaxRequest) GetDocument() *Document

func (*AnalyzeSyntaxRequest) GetEncodingType Uses

func (m *AnalyzeSyntaxRequest) GetEncodingType() EncodingType

func (*AnalyzeSyntaxRequest) ProtoMessage Uses

func (*AnalyzeSyntaxRequest) ProtoMessage()

func (*AnalyzeSyntaxRequest) Reset Uses

func (m *AnalyzeSyntaxRequest) Reset()

func (*AnalyzeSyntaxRequest) String Uses

func (m *AnalyzeSyntaxRequest) String() string

func (*AnalyzeSyntaxRequest) XXX_DiscardUnknown Uses

func (m *AnalyzeSyntaxRequest) XXX_DiscardUnknown()

func (*AnalyzeSyntaxRequest) XXX_Marshal Uses

func (m *AnalyzeSyntaxRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AnalyzeSyntaxRequest) XXX_Merge Uses

func (m *AnalyzeSyntaxRequest) XXX_Merge(src proto.Message)

func (*AnalyzeSyntaxRequest) XXX_Size Uses

func (m *AnalyzeSyntaxRequest) XXX_Size() int

func (*AnalyzeSyntaxRequest) XXX_Unmarshal Uses

func (m *AnalyzeSyntaxRequest) XXX_Unmarshal(b []byte) error

type AnalyzeSyntaxResponse Uses

type AnalyzeSyntaxResponse struct {
    // Sentences in the input document.
    Sentences []*Sentence `protobuf:"bytes,1,rep,name=sentences,proto3" json:"sentences,omitempty"`
    // Tokens, along with their syntactic information, in the input document.
    Tokens []*Token `protobuf:"bytes,2,rep,name=tokens,proto3" json:"tokens,omitempty"`
    // The language of the text, which will be the same as the language specified
    // in the request or, if not specified, the automatically-detected language.
    // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
    Language             string   `protobuf:"bytes,3,opt,name=language,proto3" json:"language,omitempty"`
    XXX_NoUnkeyedLiteral struct{} `json:"-"`
    XXX_unrecognized     []byte   `json:"-"`
    XXX_sizecache        int32    `json:"-"`
}

The syntax analysis response message.

func (*AnalyzeSyntaxResponse) Descriptor Uses

func (*AnalyzeSyntaxResponse) Descriptor() ([]byte, []int)

func (*AnalyzeSyntaxResponse) GetLanguage Uses

func (m *AnalyzeSyntaxResponse) GetLanguage() string

func (*AnalyzeSyntaxResponse) GetSentences Uses

func (m *AnalyzeSyntaxResponse) GetSentences() []*Sentence

func (*AnalyzeSyntaxResponse) GetTokens Uses

func (m *AnalyzeSyntaxResponse) GetTokens() []*Token

func (*AnalyzeSyntaxResponse) ProtoMessage Uses

func (*AnalyzeSyntaxResponse) ProtoMessage()

func (*AnalyzeSyntaxResponse) Reset Uses

func (m *AnalyzeSyntaxResponse) Reset()

func (*AnalyzeSyntaxResponse) String Uses

func (m *AnalyzeSyntaxResponse) String() string

func (*AnalyzeSyntaxResponse) XXX_DiscardUnknown Uses

func (m *AnalyzeSyntaxResponse) XXX_DiscardUnknown()

func (*AnalyzeSyntaxResponse) XXX_Marshal Uses

func (m *AnalyzeSyntaxResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AnalyzeSyntaxResponse) XXX_Merge Uses

func (m *AnalyzeSyntaxResponse) XXX_Merge(src proto.Message)

func (*AnalyzeSyntaxResponse) XXX_Size Uses

func (m *AnalyzeSyntaxResponse) XXX_Size() int

func (*AnalyzeSyntaxResponse) XXX_Unmarshal Uses

func (m *AnalyzeSyntaxResponse) XXX_Unmarshal(b []byte) error

type AnnotateTextRequest Uses

type AnnotateTextRequest struct {
    // Input document.
    Document *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"`
    // The enabled features.
    Features *AnnotateTextRequest_Features `protobuf:"bytes,2,opt,name=features,proto3" json:"features,omitempty"`
    // The encoding type used by the API to calculate offsets.
    EncodingType         EncodingType `protobuf:"varint,3,opt,name=encoding_type,json=encodingType,proto3,enum=google.cloud.language.v1.EncodingType" json:"encoding_type,omitempty"`
    XXX_NoUnkeyedLiteral struct{}     `json:"-"`
    XXX_unrecognized     []byte       `json:"-"`
    XXX_sizecache        int32        `json:"-"`
}

The request message for the text annotation API, which can perform multiple analysis types (sentiment, entities, and syntax) in one call.

func (*AnnotateTextRequest) Descriptor Uses

func (*AnnotateTextRequest) Descriptor() ([]byte, []int)

func (*AnnotateTextRequest) GetDocument Uses

func (m *AnnotateTextRequest) GetDocument() *Document

func (*AnnotateTextRequest) GetEncodingType Uses

func (m *AnnotateTextRequest) GetEncodingType() EncodingType

func (*AnnotateTextRequest) GetFeatures Uses

func (m *AnnotateTextRequest) GetFeatures() *AnnotateTextRequest_Features

func (*AnnotateTextRequest) ProtoMessage Uses

func (*AnnotateTextRequest) ProtoMessage()

func (*AnnotateTextRequest) Reset Uses

func (m *AnnotateTextRequest) Reset()

func (*AnnotateTextRequest) String Uses

func (m *AnnotateTextRequest) String() string

func (*AnnotateTextRequest) XXX_DiscardUnknown Uses

func (m *AnnotateTextRequest) XXX_DiscardUnknown()

func (*AnnotateTextRequest) XXX_Marshal Uses

func (m *AnnotateTextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AnnotateTextRequest) XXX_Merge Uses

func (m *AnnotateTextRequest) XXX_Merge(src proto.Message)

func (*AnnotateTextRequest) XXX_Size Uses

func (m *AnnotateTextRequest) XXX_Size() int

func (*AnnotateTextRequest) XXX_Unmarshal Uses

func (m *AnnotateTextRequest) XXX_Unmarshal(b []byte) error

type AnnotateTextRequest_Features Uses

type AnnotateTextRequest_Features struct {
    // Extract syntax information.
    ExtractSyntax bool `protobuf:"varint,1,opt,name=extract_syntax,json=extractSyntax,proto3" json:"extract_syntax,omitempty"`
    // Extract entities.
    ExtractEntities bool `protobuf:"varint,2,opt,name=extract_entities,json=extractEntities,proto3" json:"extract_entities,omitempty"`
    // Extract document-level sentiment.
    ExtractDocumentSentiment bool `protobuf:"varint,3,opt,name=extract_document_sentiment,json=extractDocumentSentiment,proto3" json:"extract_document_sentiment,omitempty"`
    // Extract entities and their associated sentiment.
    ExtractEntitySentiment bool `protobuf:"varint,4,opt,name=extract_entity_sentiment,json=extractEntitySentiment,proto3" json:"extract_entity_sentiment,omitempty"`
    // Classify the full document into categories.
    ClassifyText         bool     `protobuf:"varint,6,opt,name=classify_text,json=classifyText,proto3" json:"classify_text,omitempty"`
    XXX_NoUnkeyedLiteral struct{} `json:"-"`
    XXX_unrecognized     []byte   `json:"-"`
    XXX_sizecache        int32    `json:"-"`
}

All available features for sentiment, syntax, and semantic analysis. Setting each one to true will enable that specific analysis for the input.

func (*AnnotateTextRequest_Features) Descriptor Uses

func (*AnnotateTextRequest_Features) Descriptor() ([]byte, []int)

func (*AnnotateTextRequest_Features) GetClassifyText Uses

func (m *AnnotateTextRequest_Features) GetClassifyText() bool

func (*AnnotateTextRequest_Features) GetExtractDocumentSentiment Uses

func (m *AnnotateTextRequest_Features) GetExtractDocumentSentiment() bool

func (*AnnotateTextRequest_Features) GetExtractEntities Uses

func (m *AnnotateTextRequest_Features) GetExtractEntities() bool

func (*AnnotateTextRequest_Features) GetExtractEntitySentiment Uses

func (m *AnnotateTextRequest_Features) GetExtractEntitySentiment() bool

func (*AnnotateTextRequest_Features) GetExtractSyntax Uses

func (m *AnnotateTextRequest_Features) GetExtractSyntax() bool

func (*AnnotateTextRequest_Features) ProtoMessage Uses

func (*AnnotateTextRequest_Features) ProtoMessage()

func (*AnnotateTextRequest_Features) Reset Uses

func (m *AnnotateTextRequest_Features) Reset()

func (*AnnotateTextRequest_Features) String Uses

func (m *AnnotateTextRequest_Features) String() string

func (*AnnotateTextRequest_Features) XXX_DiscardUnknown Uses

func (m *AnnotateTextRequest_Features) XXX_DiscardUnknown()

func (*AnnotateTextRequest_Features) XXX_Marshal Uses

func (m *AnnotateTextRequest_Features) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AnnotateTextRequest_Features) XXX_Merge Uses

func (m *AnnotateTextRequest_Features) XXX_Merge(src proto.Message)

func (*AnnotateTextRequest_Features) XXX_Size Uses

func (m *AnnotateTextRequest_Features) XXX_Size() int

func (*AnnotateTextRequest_Features) XXX_Unmarshal Uses

func (m *AnnotateTextRequest_Features) XXX_Unmarshal(b []byte) error

type AnnotateTextResponse Uses

type AnnotateTextResponse struct {
    // Sentences in the input document. Populated if the user enables
    // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax].
    Sentences []*Sentence `protobuf:"bytes,1,rep,name=sentences,proto3" json:"sentences,omitempty"`
    // Tokens, along with their syntactic information, in the input document.
    // Populated if the user enables
    // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1.AnnotateTextRequest.Features.extract_syntax].
    Tokens []*Token `protobuf:"bytes,2,rep,name=tokens,proto3" json:"tokens,omitempty"`
    // Entities, along with their semantic information, in the input document.
    // Populated if the user enables
    // [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entities].
    Entities []*Entity `protobuf:"bytes,3,rep,name=entities,proto3" json:"entities,omitempty"`
    // The overall sentiment for the document. Populated if the user enables
    // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment].
    DocumentSentiment *Sentiment `protobuf:"bytes,4,opt,name=document_sentiment,json=documentSentiment,proto3" json:"document_sentiment,omitempty"`
    // The language of the text, which will be the same as the language specified
    // in the request or, if not specified, the automatically-detected language.
    // See [Document.language][google.cloud.language.v1.Document.language] field for more details.
    Language string `protobuf:"bytes,5,opt,name=language,proto3" json:"language,omitempty"`
    // Categories identified in the input document.
    Categories           []*ClassificationCategory `protobuf:"bytes,6,rep,name=categories,proto3" json:"categories,omitempty"`
    XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
    XXX_unrecognized     []byte                    `json:"-"`
    XXX_sizecache        int32                     `json:"-"`
}

The text annotations response message.

func (*AnnotateTextResponse) Descriptor Uses

func (*AnnotateTextResponse) Descriptor() ([]byte, []int)

func (*AnnotateTextResponse) GetCategories Uses

func (m *AnnotateTextResponse) GetCategories() []*ClassificationCategory

func (*AnnotateTextResponse) GetDocumentSentiment Uses

func (m *AnnotateTextResponse) GetDocumentSentiment() *Sentiment

func (*AnnotateTextResponse) GetEntities Uses

func (m *AnnotateTextResponse) GetEntities() []*Entity

func (*AnnotateTextResponse) GetLanguage Uses

func (m *AnnotateTextResponse) GetLanguage() string

func (*AnnotateTextResponse) GetSentences Uses

func (m *AnnotateTextResponse) GetSentences() []*Sentence

func (*AnnotateTextResponse) GetTokens Uses

func (m *AnnotateTextResponse) GetTokens() []*Token

func (*AnnotateTextResponse) ProtoMessage Uses

func (*AnnotateTextResponse) ProtoMessage()

func (*AnnotateTextResponse) Reset Uses

func (m *AnnotateTextResponse) Reset()

func (*AnnotateTextResponse) String Uses

func (m *AnnotateTextResponse) String() string

func (*AnnotateTextResponse) XXX_DiscardUnknown Uses

func (m *AnnotateTextResponse) XXX_DiscardUnknown()

func (*AnnotateTextResponse) XXX_Marshal Uses

func (m *AnnotateTextResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AnnotateTextResponse) XXX_Merge Uses

func (m *AnnotateTextResponse) XXX_Merge(src proto.Message)

func (*AnnotateTextResponse) XXX_Size Uses

func (m *AnnotateTextResponse) XXX_Size() int

func (*AnnotateTextResponse) XXX_Unmarshal Uses

func (m *AnnotateTextResponse) XXX_Unmarshal(b []byte) error

type ClassificationCategory Uses

type ClassificationCategory struct {
    // The name of the category representing the document, from the [predefined
    // taxonomy](/natural-language/docs/categories).
    Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
    // The classifier's confidence of the category. Number represents how certain
    // the classifier is that this category represents the given text.
    Confidence           float32  `protobuf:"fixed32,2,opt,name=confidence,proto3" json:"confidence,omitempty"`
    XXX_NoUnkeyedLiteral struct{} `json:"-"`
    XXX_unrecognized     []byte   `json:"-"`
    XXX_sizecache        int32    `json:"-"`
}

Represents a category returned from the text classifier.

func (*ClassificationCategory) Descriptor Uses

func (*ClassificationCategory) Descriptor() ([]byte, []int)

func (*ClassificationCategory) GetConfidence Uses

func (m *ClassificationCategory) GetConfidence() float32

func (*ClassificationCategory) GetName Uses

func (m *ClassificationCategory) GetName() string

func (*ClassificationCategory) ProtoMessage Uses

func (*ClassificationCategory) ProtoMessage()

func (*ClassificationCategory) Reset Uses

func (m *ClassificationCategory) Reset()

func (*ClassificationCategory) String Uses

func (m *ClassificationCategory) String() string

func (*ClassificationCategory) XXX_DiscardUnknown Uses

func (m *ClassificationCategory) XXX_DiscardUnknown()

func (*ClassificationCategory) XXX_Marshal Uses

func (m *ClassificationCategory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClassificationCategory) XXX_Merge Uses

func (m *ClassificationCategory) XXX_Merge(src proto.Message)

func (*ClassificationCategory) XXX_Size Uses

func (m *ClassificationCategory) XXX_Size() int

func (*ClassificationCategory) XXX_Unmarshal Uses

func (m *ClassificationCategory) XXX_Unmarshal(b []byte) error

type ClassifyTextRequest Uses

type ClassifyTextRequest struct {
    // Input document.
    Document             *Document `protobuf:"bytes,1,opt,name=document,proto3" json:"document,omitempty"`
    XXX_NoUnkeyedLiteral struct{}  `json:"-"`
    XXX_unrecognized     []byte    `json:"-"`
    XXX_sizecache        int32     `json:"-"`
}

The document classification request message.

func (*ClassifyTextRequest) Descriptor Uses

func (*ClassifyTextRequest) Descriptor() ([]byte, []int)

func (*ClassifyTextRequest) GetDocument Uses

func (m *ClassifyTextRequest) GetDocument() *Document

func (*ClassifyTextRequest) ProtoMessage Uses

func (*ClassifyTextRequest) ProtoMessage()

func (*ClassifyTextRequest) Reset Uses

func (m *ClassifyTextRequest) Reset()

func (*ClassifyTextRequest) String Uses

func (m *ClassifyTextRequest) String() string

func (*ClassifyTextRequest) XXX_DiscardUnknown Uses

func (m *ClassifyTextRequest) XXX_DiscardUnknown()

func (*ClassifyTextRequest) XXX_Marshal Uses

func (m *ClassifyTextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClassifyTextRequest) XXX_Merge Uses

func (m *ClassifyTextRequest) XXX_Merge(src proto.Message)

func (*ClassifyTextRequest) XXX_Size Uses

func (m *ClassifyTextRequest) XXX_Size() int

func (*ClassifyTextRequest) XXX_Unmarshal Uses

func (m *ClassifyTextRequest) XXX_Unmarshal(b []byte) error

type ClassifyTextResponse Uses

type ClassifyTextResponse struct {
    // Categories representing the input document.
    Categories           []*ClassificationCategory `protobuf:"bytes,1,rep,name=categories,proto3" json:"categories,omitempty"`
    XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
    XXX_unrecognized     []byte                    `json:"-"`
    XXX_sizecache        int32                     `json:"-"`
}

The document classification response message.

func (*ClassifyTextResponse) Descriptor Uses

func (*ClassifyTextResponse) Descriptor() ([]byte, []int)

func (*ClassifyTextResponse) GetCategories Uses

func (m *ClassifyTextResponse) GetCategories() []*ClassificationCategory

func (*ClassifyTextResponse) ProtoMessage Uses

func (*ClassifyTextResponse) ProtoMessage()

func (*ClassifyTextResponse) Reset Uses

func (m *ClassifyTextResponse) Reset()

func (*ClassifyTextResponse) String Uses

func (m *ClassifyTextResponse) String() string

func (*ClassifyTextResponse) XXX_DiscardUnknown Uses

func (m *ClassifyTextResponse) XXX_DiscardUnknown()

func (*ClassifyTextResponse) XXX_Marshal Uses

func (m *ClassifyTextResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClassifyTextResponse) XXX_Merge Uses

func (m *ClassifyTextResponse) XXX_Merge(src proto.Message)

func (*ClassifyTextResponse) XXX_Size Uses

func (m *ClassifyTextResponse) XXX_Size() int

func (*ClassifyTextResponse) XXX_Unmarshal Uses

func (m *ClassifyTextResponse) XXX_Unmarshal(b []byte) error

type DependencyEdge Uses

type DependencyEdge struct {
    // Represents the head of this token in the dependency tree.
    // This is the index of the token which has an arc going to this token.
    // The index is the position of the token in the array of tokens returned
    // by the API method. If this token is a root token, then the
    // `head_token_index` is its own index.
    HeadTokenIndex int32 `protobuf:"varint,1,opt,name=head_token_index,json=headTokenIndex,proto3" json:"head_token_index,omitempty"`
    // The parse label for the token.
    Label                DependencyEdge_Label `protobuf:"varint,2,opt,name=label,proto3,enum=google.cloud.language.v1.DependencyEdge_Label" json:"label,omitempty"`
    XXX_NoUnkeyedLiteral struct{}             `json:"-"`
    XXX_unrecognized     []byte               `json:"-"`
    XXX_sizecache        int32                `json:"-"`
}

Represents dependency parse tree information for a token. (For more information on dependency labels, see http://www.aclweb.org/anthology/P13-2017

func (*DependencyEdge) Descriptor Uses

func (*DependencyEdge) Descriptor() ([]byte, []int)

func (*DependencyEdge) GetHeadTokenIndex Uses

func (m *DependencyEdge) GetHeadTokenIndex() int32

func (*DependencyEdge) GetLabel Uses

func (m *DependencyEdge) GetLabel() DependencyEdge_Label

func (*DependencyEdge) ProtoMessage Uses

func (*DependencyEdge) ProtoMessage()

func (*DependencyEdge) Reset Uses

func (m *DependencyEdge) Reset()

func (*DependencyEdge) String Uses

func (m *DependencyEdge) String() string

func (*DependencyEdge) XXX_DiscardUnknown Uses

func (m *DependencyEdge) XXX_DiscardUnknown()

func (*DependencyEdge) XXX_Marshal Uses

func (m *DependencyEdge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DependencyEdge) XXX_Merge Uses

func (m *DependencyEdge) XXX_Merge(src proto.Message)

func (*DependencyEdge) XXX_Size Uses

func (m *DependencyEdge) XXX_Size() int

func (*DependencyEdge) XXX_Unmarshal Uses

func (m *DependencyEdge) XXX_Unmarshal(b []byte) error

type DependencyEdge_Label Uses

type DependencyEdge_Label int32

The parse label enum for the token.

const (
    // Unknown
    DependencyEdge_UNKNOWN DependencyEdge_Label = 0
    // Abbreviation modifier
    DependencyEdge_ABBREV DependencyEdge_Label = 1
    // Adjectival complement
    DependencyEdge_ACOMP DependencyEdge_Label = 2
    // Adverbial clause modifier
    DependencyEdge_ADVCL DependencyEdge_Label = 3
    // Adverbial modifier
    DependencyEdge_ADVMOD DependencyEdge_Label = 4
    // Adjectival modifier of an NP
    DependencyEdge_AMOD DependencyEdge_Label = 5
    // Appositional modifier of an NP
    DependencyEdge_APPOS DependencyEdge_Label = 6
    // Attribute dependent of a copular verb
    DependencyEdge_ATTR DependencyEdge_Label = 7
    // Auxiliary (non-main) verb
    DependencyEdge_AUX DependencyEdge_Label = 8
    // Passive auxiliary
    DependencyEdge_AUXPASS DependencyEdge_Label = 9
    // Coordinating conjunction
    DependencyEdge_CC DependencyEdge_Label = 10
    // Clausal complement of a verb or adjective
    DependencyEdge_CCOMP DependencyEdge_Label = 11
    // Conjunct
    DependencyEdge_CONJ DependencyEdge_Label = 12
    // Clausal subject
    DependencyEdge_CSUBJ DependencyEdge_Label = 13
    // Clausal passive subject
    DependencyEdge_CSUBJPASS DependencyEdge_Label = 14
    // Dependency (unable to determine)
    DependencyEdge_DEP DependencyEdge_Label = 15
    // Determiner
    DependencyEdge_DET DependencyEdge_Label = 16
    // Discourse
    DependencyEdge_DISCOURSE DependencyEdge_Label = 17
    // Direct object
    DependencyEdge_DOBJ DependencyEdge_Label = 18
    // Expletive
    DependencyEdge_EXPL DependencyEdge_Label = 19
    // Goes with (part of a word in a text not well edited)
    DependencyEdge_GOESWITH DependencyEdge_Label = 20
    // Indirect object
    DependencyEdge_IOBJ DependencyEdge_Label = 21
    // Marker (word introducing a subordinate clause)
    DependencyEdge_MARK DependencyEdge_Label = 22
    // Multi-word expression
    DependencyEdge_MWE DependencyEdge_Label = 23
    // Multi-word verbal expression
    DependencyEdge_MWV DependencyEdge_Label = 24
    // Negation modifier
    DependencyEdge_NEG DependencyEdge_Label = 25
    // Noun compound modifier
    DependencyEdge_NN DependencyEdge_Label = 26
    // Noun phrase used as an adverbial modifier
    DependencyEdge_NPADVMOD DependencyEdge_Label = 27
    // Nominal subject
    DependencyEdge_NSUBJ DependencyEdge_Label = 28
    // Passive nominal subject
    DependencyEdge_NSUBJPASS DependencyEdge_Label = 29
    // Numeric modifier of a noun
    DependencyEdge_NUM DependencyEdge_Label = 30
    // Element of compound number
    DependencyEdge_NUMBER DependencyEdge_Label = 31
    // Punctuation mark
    DependencyEdge_P DependencyEdge_Label = 32
    // Parataxis relation
    DependencyEdge_PARATAXIS DependencyEdge_Label = 33
    // Participial modifier
    DependencyEdge_PARTMOD DependencyEdge_Label = 34
    // The complement of a preposition is a clause
    DependencyEdge_PCOMP DependencyEdge_Label = 35
    // Object of a preposition
    DependencyEdge_POBJ DependencyEdge_Label = 36
    // Possession modifier
    DependencyEdge_POSS DependencyEdge_Label = 37
    // Postverbal negative particle
    DependencyEdge_POSTNEG DependencyEdge_Label = 38
    // Predicate complement
    DependencyEdge_PRECOMP DependencyEdge_Label = 39
    // Preconjunt
    DependencyEdge_PRECONJ DependencyEdge_Label = 40
    // Predeterminer
    DependencyEdge_PREDET DependencyEdge_Label = 41
    // Prefix
    DependencyEdge_PREF DependencyEdge_Label = 42
    // Prepositional modifier
    DependencyEdge_PREP DependencyEdge_Label = 43
    // The relationship between a verb and verbal morpheme
    DependencyEdge_PRONL DependencyEdge_Label = 44
    // Particle
    DependencyEdge_PRT DependencyEdge_Label = 45
    // Associative or possessive marker
    DependencyEdge_PS DependencyEdge_Label = 46
    // Quantifier phrase modifier
    DependencyEdge_QUANTMOD DependencyEdge_Label = 47
    // Relative clause modifier
    DependencyEdge_RCMOD DependencyEdge_Label = 48
    // Complementizer in relative clause
    DependencyEdge_RCMODREL DependencyEdge_Label = 49
    // Ellipsis without a preceding predicate
    DependencyEdge_RDROP DependencyEdge_Label = 50
    // Referent
    DependencyEdge_REF DependencyEdge_Label = 51
    // Remnant
    DependencyEdge_REMNANT DependencyEdge_Label = 52
    // Reparandum
    DependencyEdge_REPARANDUM DependencyEdge_Label = 53
    // Root
    DependencyEdge_ROOT DependencyEdge_Label = 54
    // Suffix specifying a unit of number
    DependencyEdge_SNUM DependencyEdge_Label = 55
    // Suffix
    DependencyEdge_SUFF DependencyEdge_Label = 56
    // Temporal modifier
    DependencyEdge_TMOD DependencyEdge_Label = 57
    // Topic marker
    DependencyEdge_TOPIC DependencyEdge_Label = 58
    // Clause headed by an infinite form of the verb that modifies a noun
    DependencyEdge_VMOD DependencyEdge_Label = 59
    // Vocative
    DependencyEdge_VOCATIVE DependencyEdge_Label = 60
    // Open clausal complement
    DependencyEdge_XCOMP DependencyEdge_Label = 61
    // Name suffix
    DependencyEdge_SUFFIX DependencyEdge_Label = 62
    // Name title
    DependencyEdge_TITLE DependencyEdge_Label = 63
    // Adverbial phrase modifier
    DependencyEdge_ADVPHMOD DependencyEdge_Label = 64
    // Causative auxiliary
    DependencyEdge_AUXCAUS DependencyEdge_Label = 65
    // Helper auxiliary
    DependencyEdge_AUXVV DependencyEdge_Label = 66
    // Rentaishi (Prenominal modifier)
    DependencyEdge_DTMOD DependencyEdge_Label = 67
    // Foreign words
    DependencyEdge_FOREIGN DependencyEdge_Label = 68
    // Keyword
    DependencyEdge_KW DependencyEdge_Label = 69
    // List for chains of comparable items
    DependencyEdge_LIST DependencyEdge_Label = 70
    // Nominalized clause
    DependencyEdge_NOMC DependencyEdge_Label = 71
    // Nominalized clausal subject
    DependencyEdge_NOMCSUBJ DependencyEdge_Label = 72
    // Nominalized clausal passive
    DependencyEdge_NOMCSUBJPASS DependencyEdge_Label = 73
    // Compound of numeric modifier
    DependencyEdge_NUMC DependencyEdge_Label = 74
    // Copula
    DependencyEdge_COP DependencyEdge_Label = 75
    // Dislocated relation (for fronted/topicalized elements)
    DependencyEdge_DISLOCATED DependencyEdge_Label = 76
    // Aspect marker
    DependencyEdge_ASP DependencyEdge_Label = 77
    // Genitive modifier
    DependencyEdge_GMOD DependencyEdge_Label = 78
    // Genitive object
    DependencyEdge_GOBJ DependencyEdge_Label = 79
    // Infinitival modifier
    DependencyEdge_INFMOD DependencyEdge_Label = 80
    // Measure
    DependencyEdge_MES DependencyEdge_Label = 81
    // Nominal complement of a noun
    DependencyEdge_NCOMP DependencyEdge_Label = 82
)

func (DependencyEdge_Label) EnumDescriptor Uses

func (DependencyEdge_Label) EnumDescriptor() ([]byte, []int)

func (DependencyEdge_Label) String Uses

func (x DependencyEdge_Label) String() string

type Document Uses

type Document struct {
    // Required. If the type is not set or is `TYPE_UNSPECIFIED`,
    // returns an `INVALID_ARGUMENT` error.
    Type Document_Type `protobuf:"varint,1,opt,name=type,proto3,enum=google.cloud.language.v1.Document_Type" json:"type,omitempty"`
    // The source of the document: a string containing the content or a
    // Google Cloud Storage URI.
    //
    // Types that are valid to be assigned to Source:
    //	*Document_Content
    //	*Document_GcsContentUri
    Source isDocument_Source `protobuf_oneof:"source"`
    // The language of the document (if not specified, the language is
    // automatically detected). Both ISO and BCP-47 language codes are
    // accepted.<br>
    // [Language Support](/natural-language/docs/languages)
    // lists currently supported languages for each API method.
    // If the language (either specified by the caller or automatically detected)
    // is not supported by the called API method, an `INVALID_ARGUMENT` error
    // is returned.
    Language             string   `protobuf:"bytes,4,opt,name=language,proto3" json:"language,omitempty"`
    XXX_NoUnkeyedLiteral struct{} `json:"-"`
    XXX_unrecognized     []byte   `json:"-"`
    XXX_sizecache        int32    `json:"-"`
}

################################################################ #

Represents the input to API methods.

func (*Document) Descriptor Uses

func (*Document) Descriptor() ([]byte, []int)

func (*Document) GetContent Uses

func (m *Document) GetContent() string

func (*Document) GetGcsContentUri Uses

func (m *Document) GetGcsContentUri() string

func (*Document) GetLanguage Uses

func (m *Document) GetLanguage() string

func (*Document) GetSource Uses

func (m *Document) GetSource() isDocument_Source

func (*Document) GetType Uses

func (m *Document) GetType() Document_Type

func (*Document) ProtoMessage Uses

func (*Document) ProtoMessage()

func (*Document) Reset Uses

func (m *Document) Reset()

func (*Document) String Uses

func (m *Document) String() string

func (*Document) XXX_DiscardUnknown Uses

func (m *Document) XXX_DiscardUnknown()

func (*Document) XXX_Marshal Uses

func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Document) XXX_Merge Uses

func (m *Document) XXX_Merge(src proto.Message)

func (*Document) XXX_OneofWrappers Uses

func (*Document) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*Document) XXX_Size Uses

func (m *Document) XXX_Size() int

func (*Document) XXX_Unmarshal Uses

func (m *Document) XXX_Unmarshal(b []byte) error

type Document_Content Uses

type Document_Content struct {
    Content string `protobuf:"bytes,2,opt,name=content,proto3,oneof"`
}

type Document_GcsContentUri Uses

type Document_GcsContentUri struct {
    GcsContentUri string `protobuf:"bytes,3,opt,name=gcs_content_uri,json=gcsContentUri,proto3,oneof"`
}

type Document_Type Uses

type Document_Type int32

The document types enum.

const (
    // The content type is not specified.
    Document_TYPE_UNSPECIFIED Document_Type = 0
    // Plain text
    Document_PLAIN_TEXT Document_Type = 1
    // HTML
    Document_HTML Document_Type = 2
)

func (Document_Type) EnumDescriptor Uses

func (Document_Type) EnumDescriptor() ([]byte, []int)

func (Document_Type) String Uses

func (x Document_Type) String() string

type EncodingType Uses

type EncodingType int32

Represents the text encoding that the caller uses to process the output. Providing an `EncodingType` is recommended because the API provides the beginning offsets for various outputs, such as tokens and mentions, and languages that natively use different text encodings may access offsets differently.

const (
    // If `EncodingType` is not specified, encoding-dependent information (such as
    // `begin_offset`) will be set at `-1`.
    EncodingType_NONE EncodingType = 0
    // Encoding-dependent information (such as `begin_offset`) is calculated based
    // on the UTF-8 encoding of the input. C++ and Go are examples of languages
    // that use this encoding natively.
    EncodingType_UTF8 EncodingType = 1
    // Encoding-dependent information (such as `begin_offset`) is calculated based
    // on the UTF-16 encoding of the input. Java and JavaScript are examples of
    // languages that use this encoding natively.
    EncodingType_UTF16 EncodingType = 2
    // Encoding-dependent information (such as `begin_offset`) is calculated based
    // on the UTF-32 encoding of the input. Python is an example of a language
    // that uses this encoding natively.
    EncodingType_UTF32 EncodingType = 3
)

func (EncodingType) EnumDescriptor Uses

func (EncodingType) EnumDescriptor() ([]byte, []int)

func (EncodingType) String Uses

func (x EncodingType) String() string

type Entity Uses

type Entity struct {
    // The representative name for the entity.
    Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
    // The entity type.
    Type Entity_Type `protobuf:"varint,2,opt,name=type,proto3,enum=google.cloud.language.v1.Entity_Type" json:"type,omitempty"`
    // Metadata associated with the entity.
    //
    // For most entity types, the metadata is a Wikipedia URL (`wikipedia_url`)
    // and Knowledge Graph MID (`mid`), if they are available. For the metadata
    // associated with other entity types, see the Type table below.
    Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
    // The salience score associated with the entity in the [0, 1.0] range.
    //
    // The salience score for an entity provides information about the
    // importance or centrality of that entity to the entire document text.
    // Scores closer to 0 are less salient, while scores closer to 1.0 are highly
    // salient.
    Salience float32 `protobuf:"fixed32,4,opt,name=salience,proto3" json:"salience,omitempty"`
    // The mentions of this entity in the input document. The API currently
    // supports proper noun mentions.
    Mentions []*EntityMention `protobuf:"bytes,5,rep,name=mentions,proto3" json:"mentions,omitempty"`
    // For calls to [AnalyzeEntitySentiment][] or if
    // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entity_sentiment] is set to
    // true, this field will contain the aggregate sentiment expressed for this
    // entity in the provided document.
    Sentiment            *Sentiment `protobuf:"bytes,6,opt,name=sentiment,proto3" json:"sentiment,omitempty"`
    XXX_NoUnkeyedLiteral struct{}   `json:"-"`
    XXX_unrecognized     []byte     `json:"-"`
    XXX_sizecache        int32      `json:"-"`
}

Represents a phrase in the text that is a known entity, such as a person, an organization, or location. The API associates information, such as salience and mentions, with entities.

func (*Entity) Descriptor Uses

func (*Entity) Descriptor() ([]byte, []int)

func (*Entity) GetMentions Uses

func (m *Entity) GetMentions() []*EntityMention

func (*Entity) GetMetadata Uses

func (m *Entity) GetMetadata() map[string]string

func (*Entity) GetName Uses

func (m *Entity) GetName() string

func (*Entity) GetSalience Uses

func (m *Entity) GetSalience() float32

func (*Entity) GetSentiment Uses

func (m *Entity) GetSentiment() *Sentiment

func (*Entity) GetType Uses

func (m *Entity) GetType() Entity_Type

func (*Entity) ProtoMessage Uses

func (*Entity) ProtoMessage()

func (*Entity) Reset Uses

func (m *Entity) Reset()

func (*Entity) String Uses

func (m *Entity) String() string

func (*Entity) XXX_DiscardUnknown Uses

func (m *Entity) XXX_DiscardUnknown()

func (*Entity) XXX_Marshal Uses

func (m *Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Entity) XXX_Merge Uses

func (m *Entity) XXX_Merge(src proto.Message)

func (*Entity) XXX_Size Uses

func (m *Entity) XXX_Size() int

func (*Entity) XXX_Unmarshal Uses

func (m *Entity) XXX_Unmarshal(b []byte) error

type EntityMention Uses

type EntityMention struct {
    // The mention text.
    Text *TextSpan `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
    // The type of the entity mention.
    Type EntityMention_Type `protobuf:"varint,2,opt,name=type,proto3,enum=google.cloud.language.v1.EntityMention_Type" json:"type,omitempty"`
    // For calls to [AnalyzeEntitySentiment][] or if
    // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_entity_sentiment] is set to
    // true, this field will contain the sentiment expressed for this mention of
    // the entity in the provided document.
    Sentiment            *Sentiment `protobuf:"bytes,3,opt,name=sentiment,proto3" json:"sentiment,omitempty"`
    XXX_NoUnkeyedLiteral struct{}   `json:"-"`
    XXX_unrecognized     []byte     `json:"-"`
    XXX_sizecache        int32      `json:"-"`
}

Represents a mention for an entity in the text. Currently, proper noun mentions are supported.

func (*EntityMention) Descriptor Uses

func (*EntityMention) Descriptor() ([]byte, []int)

func (*EntityMention) GetSentiment Uses

func (m *EntityMention) GetSentiment() *Sentiment

func (*EntityMention) GetText Uses

func (m *EntityMention) GetText() *TextSpan

func (*EntityMention) GetType Uses

func (m *EntityMention) GetType() EntityMention_Type

func (*EntityMention) ProtoMessage Uses

func (*EntityMention) ProtoMessage()

func (*EntityMention) Reset Uses

func (m *EntityMention) Reset()

func (*EntityMention) String Uses

func (m *EntityMention) String() string

func (*EntityMention) XXX_DiscardUnknown Uses

func (m *EntityMention) XXX_DiscardUnknown()

func (*EntityMention) XXX_Marshal Uses

func (m *EntityMention) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*EntityMention) XXX_Merge Uses

func (m *EntityMention) XXX_Merge(src proto.Message)

func (*EntityMention) XXX_Size Uses

func (m *EntityMention) XXX_Size() int

func (*EntityMention) XXX_Unmarshal Uses

func (m *EntityMention) XXX_Unmarshal(b []byte) error

type EntityMention_Type Uses

type EntityMention_Type int32

The supported types of mentions.

const (
    // Unknown
    EntityMention_TYPE_UNKNOWN EntityMention_Type = 0
    // Proper name
    EntityMention_PROPER EntityMention_Type = 1
    // Common noun (or noun compound)
    EntityMention_COMMON EntityMention_Type = 2
)

func (EntityMention_Type) EnumDescriptor Uses

func (EntityMention_Type) EnumDescriptor() ([]byte, []int)

func (EntityMention_Type) String Uses

func (x EntityMention_Type) String() string

type Entity_Type Uses

type Entity_Type int32

The type of the entity. For most entity types, the associated metadata is a Wikipedia URL (`wikipedia_url`) and Knowledge Graph MID (`mid`). The table below lists the associated fields for entities that have different metadata.

const (
    // Unknown
    Entity_UNKNOWN Entity_Type = 0
    // Person
    Entity_PERSON Entity_Type = 1
    // Location
    Entity_LOCATION Entity_Type = 2
    // Organization
    Entity_ORGANIZATION Entity_Type = 3
    // Event
    Entity_EVENT Entity_Type = 4
    // Artwork
    Entity_WORK_OF_ART Entity_Type = 5
    // Consumer product
    Entity_CONSUMER_GOOD Entity_Type = 6
    // Other types of entities
    Entity_OTHER Entity_Type = 7
    // Phone number<br><br>
    // The metadata lists the phone number, formatted according to local
    // convention, plus whichever additional elements appear in the text:<ul>
    // <li><code>number</code> &ndash; the actual number, broken down into
    // sections as per local convention</li> <li><code>national_prefix</code>
    // &ndash; country code, if detected</li> <li><code>area_code</code> &ndash;
    // region or area code, if detected</li> <li><code>extension</code> &ndash;
    // phone extension (to be dialed after connection), if detected</li></ul>
    Entity_PHONE_NUMBER Entity_Type = 9
    // Address<br><br>
    // The metadata identifies the street number and locality plus whichever
    // additional elements appear in the text:<ul>
    // <li><code>street_number</code> &ndash; street number</li>
    // <li><code>locality</code> &ndash; city or town</li>
    // <li><code>street_name</code> &ndash; street/route name, if detected</li>
    // <li><code>postal_code</code> &ndash; postal code, if detected</li>
    // <li><code>country</code> &ndash; country, if detected</li>
    // <li><code>broad_region</code> &ndash; administrative area, such as the
    // state, if detected</li> <li><code>narrow_region</code> &ndash; smaller
    // administrative area, such as county, if detected</li>
    // <li><code>sublocality</code> &ndash; used in Asian addresses to demark a
    // district within a city, if detected</li></ul>
    Entity_ADDRESS Entity_Type = 10
    // Date<br><br>
    // The metadata identifies the components of the date:<ul>
    // <li><code>year</code> &ndash; four digit year, if detected</li>
    // <li><code>month</code> &ndash; two digit month number, if detected</li>
    // <li><code>day</code> &ndash; two digit day number, if detected</li></ul>
    Entity_DATE Entity_Type = 11
    // Number<br><br>
    // The metadata is the number itself.
    Entity_NUMBER Entity_Type = 12
    // Price<br><br>
    // The metadata identifies the <code>value</code> and <code>currency</code>.
    Entity_PRICE Entity_Type = 13
)

func (Entity_Type) EnumDescriptor Uses

func (Entity_Type) EnumDescriptor() ([]byte, []int)

func (Entity_Type) String Uses

func (x Entity_Type) String() string

type LanguageServiceClient Uses

type LanguageServiceClient interface {
    // Analyzes the sentiment of the provided text.
    AnalyzeSentiment(ctx context.Context, in *AnalyzeSentimentRequest, opts ...grpc.CallOption) (*AnalyzeSentimentResponse, error)
    // Finds named entities (currently proper names and common nouns) in the text
    // along with entity types, salience, mentions for each entity, and
    // other properties.
    AnalyzeEntities(ctx context.Context, in *AnalyzeEntitiesRequest, opts ...grpc.CallOption) (*AnalyzeEntitiesResponse, error)
    // Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] in the text and analyzes
    // sentiment associated with each entity and its mentions.
    AnalyzeEntitySentiment(ctx context.Context, in *AnalyzeEntitySentimentRequest, opts ...grpc.CallOption) (*AnalyzeEntitySentimentResponse, error)
    // Analyzes the syntax of the text and provides sentence boundaries and
    // tokenization along with part of speech tags, dependency trees, and other
    // properties.
    AnalyzeSyntax(ctx context.Context, in *AnalyzeSyntaxRequest, opts ...grpc.CallOption) (*AnalyzeSyntaxResponse, error)
    // Classifies a document into categories.
    ClassifyText(ctx context.Context, in *ClassifyTextRequest, opts ...grpc.CallOption) (*ClassifyTextResponse, error)
    // A convenience method that provides all the features that analyzeSentiment,
    // analyzeEntities, and analyzeSyntax provide in one call.
    AnnotateText(ctx context.Context, in *AnnotateTextRequest, opts ...grpc.CallOption) (*AnnotateTextResponse, error)
}

LanguageServiceClient is the client API for LanguageService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

func NewLanguageServiceClient Uses

func NewLanguageServiceClient(cc *grpc.ClientConn) LanguageServiceClient

type LanguageServiceServer Uses

type LanguageServiceServer interface {
    // Analyzes the sentiment of the provided text.
    AnalyzeSentiment(context.Context, *AnalyzeSentimentRequest) (*AnalyzeSentimentResponse, error)
    // Finds named entities (currently proper names and common nouns) in the text
    // along with entity types, salience, mentions for each entity, and
    // other properties.
    AnalyzeEntities(context.Context, *AnalyzeEntitiesRequest) (*AnalyzeEntitiesResponse, error)
    // Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1.LanguageService.AnalyzeEntities] in the text and analyzes
    // sentiment associated with each entity and its mentions.
    AnalyzeEntitySentiment(context.Context, *AnalyzeEntitySentimentRequest) (*AnalyzeEntitySentimentResponse, error)
    // Analyzes the syntax of the text and provides sentence boundaries and
    // tokenization along with part of speech tags, dependency trees, and other
    // properties.
    AnalyzeSyntax(context.Context, *AnalyzeSyntaxRequest) (*AnalyzeSyntaxResponse, error)
    // Classifies a document into categories.
    ClassifyText(context.Context, *ClassifyTextRequest) (*ClassifyTextResponse, error)
    // A convenience method that provides all the features that analyzeSentiment,
    // analyzeEntities, and analyzeSyntax provide in one call.
    AnnotateText(context.Context, *AnnotateTextRequest) (*AnnotateTextResponse, error)
}

LanguageServiceServer is the server API for LanguageService service.

type PartOfSpeech Uses

type PartOfSpeech struct {
    // The part of speech tag.
    Tag PartOfSpeech_Tag `protobuf:"varint,1,opt,name=tag,proto3,enum=google.cloud.language.v1.PartOfSpeech_Tag" json:"tag,omitempty"`
    // The grammatical aspect.
    Aspect PartOfSpeech_Aspect `protobuf:"varint,2,opt,name=aspect,proto3,enum=google.cloud.language.v1.PartOfSpeech_Aspect" json:"aspect,omitempty"`
    // The grammatical case.
    Case PartOfSpeech_Case `protobuf:"varint,3,opt,name=case,proto3,enum=google.cloud.language.v1.PartOfSpeech_Case" json:"case,omitempty"`
    // The grammatical form.
    Form PartOfSpeech_Form `protobuf:"varint,4,opt,name=form,proto3,enum=google.cloud.language.v1.PartOfSpeech_Form" json:"form,omitempty"`
    // The grammatical gender.
    Gender PartOfSpeech_Gender `protobuf:"varint,5,opt,name=gender,proto3,enum=google.cloud.language.v1.PartOfSpeech_Gender" json:"gender,omitempty"`
    // The grammatical mood.
    Mood PartOfSpeech_Mood `protobuf:"varint,6,opt,name=mood,proto3,enum=google.cloud.language.v1.PartOfSpeech_Mood" json:"mood,omitempty"`
    // The grammatical number.
    Number PartOfSpeech_Number `protobuf:"varint,7,opt,name=number,proto3,enum=google.cloud.language.v1.PartOfSpeech_Number" json:"number,omitempty"`
    // The grammatical person.
    Person PartOfSpeech_Person `protobuf:"varint,8,opt,name=person,proto3,enum=google.cloud.language.v1.PartOfSpeech_Person" json:"person,omitempty"`
    // The grammatical properness.
    Proper PartOfSpeech_Proper `protobuf:"varint,9,opt,name=proper,proto3,enum=google.cloud.language.v1.PartOfSpeech_Proper" json:"proper,omitempty"`
    // The grammatical reciprocity.
    Reciprocity PartOfSpeech_Reciprocity `protobuf:"varint,10,opt,name=reciprocity,proto3,enum=google.cloud.language.v1.PartOfSpeech_Reciprocity" json:"reciprocity,omitempty"`
    // The grammatical tense.
    Tense PartOfSpeech_Tense `protobuf:"varint,11,opt,name=tense,proto3,enum=google.cloud.language.v1.PartOfSpeech_Tense" json:"tense,omitempty"`
    // The grammatical voice.
    Voice                PartOfSpeech_Voice `protobuf:"varint,12,opt,name=voice,proto3,enum=google.cloud.language.v1.PartOfSpeech_Voice" json:"voice,omitempty"`
    XXX_NoUnkeyedLiteral struct{}           `json:"-"`
    XXX_unrecognized     []byte             `json:"-"`
    XXX_sizecache        int32              `json:"-"`
}

Represents part of speech information for a token. Parts of speech are as defined in http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf

func (*PartOfSpeech) Descriptor Uses

func (*PartOfSpeech) Descriptor() ([]byte, []int)

func (*PartOfSpeech) GetAspect Uses

func (m *PartOfSpeech) GetAspect() PartOfSpeech_Aspect

func (*PartOfSpeech) GetCase Uses

func (m *PartOfSpeech) GetCase() PartOfSpeech_Case

func (*PartOfSpeech) GetForm Uses

func (m *PartOfSpeech) GetForm() PartOfSpeech_Form

func (*PartOfSpeech) GetGender Uses

func (m *PartOfSpeech) GetGender() PartOfSpeech_Gender

func (*PartOfSpeech) GetMood Uses

func (m *PartOfSpeech) GetMood() PartOfSpeech_Mood

func (*PartOfSpeech) GetNumber Uses

func (m *PartOfSpeech) GetNumber() PartOfSpeech_Number

func (*PartOfSpeech) GetPerson Uses

func (m *PartOfSpeech) GetPerson() PartOfSpeech_Person

func (*PartOfSpeech) GetProper Uses

func (m *PartOfSpeech) GetProper() PartOfSpeech_Proper

func (*PartOfSpeech) GetReciprocity Uses

func (m *PartOfSpeech) GetReciprocity() PartOfSpeech_Reciprocity

func (*PartOfSpeech) GetTag Uses

func (m *PartOfSpeech) GetTag() PartOfSpeech_Tag

func (*PartOfSpeech) GetTense Uses

func (m *PartOfSpeech) GetTense() PartOfSpeech_Tense

func (*PartOfSpeech) GetVoice Uses

func (m *PartOfSpeech) GetVoice() PartOfSpeech_Voice

func (*PartOfSpeech) ProtoMessage Uses

func (*PartOfSpeech) ProtoMessage()

func (*PartOfSpeech) Reset Uses

func (m *PartOfSpeech) Reset()

func (*PartOfSpeech) String Uses

func (m *PartOfSpeech) String() string

func (*PartOfSpeech) XXX_DiscardUnknown Uses

func (m *PartOfSpeech) XXX_DiscardUnknown()

func (*PartOfSpeech) XXX_Marshal Uses

func (m *PartOfSpeech) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PartOfSpeech) XXX_Merge Uses

func (m *PartOfSpeech) XXX_Merge(src proto.Message)

func (*PartOfSpeech) XXX_Size Uses

func (m *PartOfSpeech) XXX_Size() int

func (*PartOfSpeech) XXX_Unmarshal Uses

func (m *PartOfSpeech) XXX_Unmarshal(b []byte) error

type PartOfSpeech_Aspect Uses

type PartOfSpeech_Aspect int32

The characteristic of a verb that expresses time flow during an event.

const (
    // Aspect is not applicable in the analyzed language or is not predicted.
    PartOfSpeech_ASPECT_UNKNOWN PartOfSpeech_Aspect = 0
    // Perfective
    PartOfSpeech_PERFECTIVE PartOfSpeech_Aspect = 1
    // Imperfective
    PartOfSpeech_IMPERFECTIVE PartOfSpeech_Aspect = 2
    // Progressive
    PartOfSpeech_PROGRESSIVE PartOfSpeech_Aspect = 3
)

func (PartOfSpeech_Aspect) EnumDescriptor Uses

func (PartOfSpeech_Aspect) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Aspect) String Uses

func (x PartOfSpeech_Aspect) String() string

type PartOfSpeech_Case Uses

type PartOfSpeech_Case int32

The grammatical function performed by a noun or pronoun in a phrase, clause, or sentence. In some languages, other parts of speech, such as adjective and determiner, take case inflection in agreement with the noun.

const (
    // Case is not applicable in the analyzed language or is not predicted.
    PartOfSpeech_CASE_UNKNOWN PartOfSpeech_Case = 0
    // Accusative
    PartOfSpeech_ACCUSATIVE PartOfSpeech_Case = 1
    // Adverbial
    PartOfSpeech_ADVERBIAL PartOfSpeech_Case = 2
    // Complementive
    PartOfSpeech_COMPLEMENTIVE PartOfSpeech_Case = 3
    // Dative
    PartOfSpeech_DATIVE PartOfSpeech_Case = 4
    // Genitive
    PartOfSpeech_GENITIVE PartOfSpeech_Case = 5
    // Instrumental
    PartOfSpeech_INSTRUMENTAL PartOfSpeech_Case = 6
    // Locative
    PartOfSpeech_LOCATIVE PartOfSpeech_Case = 7
    // Nominative
    PartOfSpeech_NOMINATIVE PartOfSpeech_Case = 8
    // Oblique
    PartOfSpeech_OBLIQUE PartOfSpeech_Case = 9
    // Partitive
    PartOfSpeech_PARTITIVE PartOfSpeech_Case = 10
    // Prepositional
    PartOfSpeech_PREPOSITIONAL PartOfSpeech_Case = 11
    // Reflexive
    PartOfSpeech_REFLEXIVE_CASE PartOfSpeech_Case = 12
    // Relative
    PartOfSpeech_RELATIVE_CASE PartOfSpeech_Case = 13
    // Vocative
    PartOfSpeech_VOCATIVE PartOfSpeech_Case = 14
)

func (PartOfSpeech_Case) EnumDescriptor Uses

func (PartOfSpeech_Case) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Case) String Uses

func (x PartOfSpeech_Case) String() string

type PartOfSpeech_Form Uses

type PartOfSpeech_Form int32

Depending on the language, Form can be categorizing different forms of verbs, adjectives, adverbs, etc. For example, categorizing inflected endings of verbs and adjectives or distinguishing between short and long forms of adjectives and participles

const (
    // Form is not applicable in the analyzed language or is not predicted.
    PartOfSpeech_FORM_UNKNOWN PartOfSpeech_Form = 0
    // Adnomial
    PartOfSpeech_ADNOMIAL PartOfSpeech_Form = 1
    // Auxiliary
    PartOfSpeech_AUXILIARY PartOfSpeech_Form = 2
    // Complementizer
    PartOfSpeech_COMPLEMENTIZER PartOfSpeech_Form = 3
    // Final ending
    PartOfSpeech_FINAL_ENDING PartOfSpeech_Form = 4
    // Gerund
    PartOfSpeech_GERUND PartOfSpeech_Form = 5
    // Realis
    PartOfSpeech_REALIS PartOfSpeech_Form = 6
    // Irrealis
    PartOfSpeech_IRREALIS PartOfSpeech_Form = 7
    // Short form
    PartOfSpeech_SHORT PartOfSpeech_Form = 8
    // Long form
    PartOfSpeech_LONG PartOfSpeech_Form = 9
    // Order form
    PartOfSpeech_ORDER PartOfSpeech_Form = 10
    // Specific form
    PartOfSpeech_SPECIFIC PartOfSpeech_Form = 11
)

func (PartOfSpeech_Form) EnumDescriptor Uses

func (PartOfSpeech_Form) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Form) String Uses

func (x PartOfSpeech_Form) String() string

type PartOfSpeech_Gender Uses

type PartOfSpeech_Gender int32

Gender classes of nouns reflected in the behaviour of associated words.

const (
    // Gender is not applicable in the analyzed language or is not predicted.
    PartOfSpeech_GENDER_UNKNOWN PartOfSpeech_Gender = 0
    // Feminine
    PartOfSpeech_FEMININE PartOfSpeech_Gender = 1
    // Masculine
    PartOfSpeech_MASCULINE PartOfSpeech_Gender = 2
    // Neuter
    PartOfSpeech_NEUTER PartOfSpeech_Gender = 3
)

func (PartOfSpeech_Gender) EnumDescriptor Uses

func (PartOfSpeech_Gender) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Gender) String Uses

func (x PartOfSpeech_Gender) String() string

type PartOfSpeech_Mood Uses

type PartOfSpeech_Mood int32

The grammatical feature of verbs, used for showing modality and attitude.

const (
    // Mood is not applicable in the analyzed language or is not predicted.
    PartOfSpeech_MOOD_UNKNOWN PartOfSpeech_Mood = 0
    // Conditional
    PartOfSpeech_CONDITIONAL_MOOD PartOfSpeech_Mood = 1
    // Imperative
    PartOfSpeech_IMPERATIVE PartOfSpeech_Mood = 2
    // Indicative
    PartOfSpeech_INDICATIVE PartOfSpeech_Mood = 3
    // Interrogative
    PartOfSpeech_INTERROGATIVE PartOfSpeech_Mood = 4
    // Jussive
    PartOfSpeech_JUSSIVE PartOfSpeech_Mood = 5
    // Subjunctive
    PartOfSpeech_SUBJUNCTIVE PartOfSpeech_Mood = 6
)

func (PartOfSpeech_Mood) EnumDescriptor Uses

func (PartOfSpeech_Mood) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Mood) String Uses

func (x PartOfSpeech_Mood) String() string

type PartOfSpeech_Number Uses

type PartOfSpeech_Number int32

Count distinctions.

const (
    // Number is not applicable in the analyzed language or is not predicted.
    PartOfSpeech_NUMBER_UNKNOWN PartOfSpeech_Number = 0
    // Singular
    PartOfSpeech_SINGULAR PartOfSpeech_Number = 1
    // Plural
    PartOfSpeech_PLURAL PartOfSpeech_Number = 2
    // Dual
    PartOfSpeech_DUAL PartOfSpeech_Number = 3
)

func (PartOfSpeech_Number) EnumDescriptor Uses

func (PartOfSpeech_Number) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Number) String Uses

func (x PartOfSpeech_Number) String() string

type PartOfSpeech_Person Uses

type PartOfSpeech_Person int32

The distinction between the speaker, second person, third person, etc.

const (
    // Person is not applicable in the analyzed language or is not predicted.
    PartOfSpeech_PERSON_UNKNOWN PartOfSpeech_Person = 0
    // First
    PartOfSpeech_FIRST PartOfSpeech_Person = 1
    // Second
    PartOfSpeech_SECOND PartOfSpeech_Person = 2
    // Third
    PartOfSpeech_THIRD PartOfSpeech_Person = 3
    // Reflexive
    PartOfSpeech_REFLEXIVE_PERSON PartOfSpeech_Person = 4
)

func (PartOfSpeech_Person) EnumDescriptor Uses

func (PartOfSpeech_Person) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Person) String Uses

func (x PartOfSpeech_Person) String() string

type PartOfSpeech_Proper Uses

type PartOfSpeech_Proper int32

This category shows if the token is part of a proper name.

const (
    // Proper is not applicable in the analyzed language or is not predicted.
    PartOfSpeech_PROPER_UNKNOWN PartOfSpeech_Proper = 0
    // Proper
    PartOfSpeech_PROPER PartOfSpeech_Proper = 1
    // Not proper
    PartOfSpeech_NOT_PROPER PartOfSpeech_Proper = 2
)

func (PartOfSpeech_Proper) EnumDescriptor Uses

func (PartOfSpeech_Proper) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Proper) String Uses

func (x PartOfSpeech_Proper) String() string

type PartOfSpeech_Reciprocity Uses

type PartOfSpeech_Reciprocity int32

Reciprocal features of a pronoun.

const (
    // Reciprocity is not applicable in the analyzed language or is not
    // predicted.
    PartOfSpeech_RECIPROCITY_UNKNOWN PartOfSpeech_Reciprocity = 0
    // Reciprocal
    PartOfSpeech_RECIPROCAL PartOfSpeech_Reciprocity = 1
    // Non-reciprocal
    PartOfSpeech_NON_RECIPROCAL PartOfSpeech_Reciprocity = 2
)

func (PartOfSpeech_Reciprocity) EnumDescriptor Uses

func (PartOfSpeech_Reciprocity) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Reciprocity) String Uses

func (x PartOfSpeech_Reciprocity) String() string

type PartOfSpeech_Tag Uses

type PartOfSpeech_Tag int32

The part of speech tags enum.

const (
    // Unknown
    PartOfSpeech_UNKNOWN PartOfSpeech_Tag = 0
    // Adjective
    PartOfSpeech_ADJ PartOfSpeech_Tag = 1
    // Adposition (preposition and postposition)
    PartOfSpeech_ADP PartOfSpeech_Tag = 2
    // Adverb
    PartOfSpeech_ADV PartOfSpeech_Tag = 3
    // Conjunction
    PartOfSpeech_CONJ PartOfSpeech_Tag = 4
    // Determiner
    PartOfSpeech_DET PartOfSpeech_Tag = 5
    // Noun (common and proper)
    PartOfSpeech_NOUN PartOfSpeech_Tag = 6
    // Cardinal number
    PartOfSpeech_NUM PartOfSpeech_Tag = 7
    // Pronoun
    PartOfSpeech_PRON PartOfSpeech_Tag = 8
    // Particle or other function word
    PartOfSpeech_PRT PartOfSpeech_Tag = 9
    // Punctuation
    PartOfSpeech_PUNCT PartOfSpeech_Tag = 10
    // Verb (all tenses and modes)
    PartOfSpeech_VERB PartOfSpeech_Tag = 11
    // Other: foreign words, typos, abbreviations
    PartOfSpeech_X PartOfSpeech_Tag = 12
    // Affix
    PartOfSpeech_AFFIX PartOfSpeech_Tag = 13
)

func (PartOfSpeech_Tag) EnumDescriptor Uses

func (PartOfSpeech_Tag) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Tag) String Uses

func (x PartOfSpeech_Tag) String() string

type PartOfSpeech_Tense Uses

type PartOfSpeech_Tense int32

Time reference.

const (
    // Tense is not applicable in the analyzed language or is not predicted.
    PartOfSpeech_TENSE_UNKNOWN PartOfSpeech_Tense = 0
    // Conditional
    PartOfSpeech_CONDITIONAL_TENSE PartOfSpeech_Tense = 1
    // Future
    PartOfSpeech_FUTURE PartOfSpeech_Tense = 2
    // Past
    PartOfSpeech_PAST PartOfSpeech_Tense = 3
    // Present
    PartOfSpeech_PRESENT PartOfSpeech_Tense = 4
    // Imperfect
    PartOfSpeech_IMPERFECT PartOfSpeech_Tense = 5
    // Pluperfect
    PartOfSpeech_PLUPERFECT PartOfSpeech_Tense = 6
)

func (PartOfSpeech_Tense) EnumDescriptor Uses

func (PartOfSpeech_Tense) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Tense) String Uses

func (x PartOfSpeech_Tense) String() string

type PartOfSpeech_Voice Uses

type PartOfSpeech_Voice int32

The relationship between the action that a verb expresses and the participants identified by its arguments.

const (
    // Voice is not applicable in the analyzed language or is not predicted.
    PartOfSpeech_VOICE_UNKNOWN PartOfSpeech_Voice = 0
    // Active
    PartOfSpeech_ACTIVE PartOfSpeech_Voice = 1
    // Causative
    PartOfSpeech_CAUSATIVE PartOfSpeech_Voice = 2
    // Passive
    PartOfSpeech_PASSIVE PartOfSpeech_Voice = 3
)

func (PartOfSpeech_Voice) EnumDescriptor Uses

func (PartOfSpeech_Voice) EnumDescriptor() ([]byte, []int)

func (PartOfSpeech_Voice) String Uses

func (x PartOfSpeech_Voice) String() string

type Sentence Uses

type Sentence struct {
    // The sentence text.
    Text *TextSpan `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
    // For calls to [AnalyzeSentiment][] or if
    // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1.AnnotateTextRequest.Features.extract_document_sentiment] is set to
    // true, this field will contain the sentiment for the sentence.
    Sentiment            *Sentiment `protobuf:"bytes,2,opt,name=sentiment,proto3" json:"sentiment,omitempty"`
    XXX_NoUnkeyedLiteral struct{}   `json:"-"`
    XXX_unrecognized     []byte     `json:"-"`
    XXX_sizecache        int32      `json:"-"`
}

Represents a sentence in the input document.

func (*Sentence) Descriptor Uses

func (*Sentence) Descriptor() ([]byte, []int)

func (*Sentence) GetSentiment Uses

func (m *Sentence) GetSentiment() *Sentiment

func (*Sentence) GetText Uses

func (m *Sentence) GetText() *TextSpan

func (*Sentence) ProtoMessage Uses

func (*Sentence) ProtoMessage()

func (*Sentence) Reset Uses

func (m *Sentence) Reset()

func (*Sentence) String Uses

func (m *Sentence) String() string

func (*Sentence) XXX_DiscardUnknown Uses

func (m *Sentence) XXX_DiscardUnknown()

func (*Sentence) XXX_Marshal Uses

func (m *Sentence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Sentence) XXX_Merge Uses

func (m *Sentence) XXX_Merge(src proto.Message)

func (*Sentence) XXX_Size Uses

func (m *Sentence) XXX_Size() int

func (*Sentence) XXX_Unmarshal Uses

func (m *Sentence) XXX_Unmarshal(b []byte) error

type Sentiment Uses

type Sentiment struct {
    // A non-negative number in the [0, +inf) range, which represents
    // the absolute magnitude of sentiment regardless of score (positive or
    // negative).
    Magnitude float32 `protobuf:"fixed32,2,opt,name=magnitude,proto3" json:"magnitude,omitempty"`
    // Sentiment score between -1.0 (negative sentiment) and 1.0
    // (positive sentiment).
    Score                float32  `protobuf:"fixed32,3,opt,name=score,proto3" json:"score,omitempty"`
    XXX_NoUnkeyedLiteral struct{} `json:"-"`
    XXX_unrecognized     []byte   `json:"-"`
    XXX_sizecache        int32    `json:"-"`
}

Represents the feeling associated with the entire text or entities in the text.

func (*Sentiment) Descriptor Uses

func (*Sentiment) Descriptor() ([]byte, []int)

func (*Sentiment) GetMagnitude Uses

func (m *Sentiment) GetMagnitude() float32

func (*Sentiment) GetScore Uses

func (m *Sentiment) GetScore() float32

func (*Sentiment) ProtoMessage Uses

func (*Sentiment) ProtoMessage()

func (*Sentiment) Reset Uses

func (m *Sentiment) Reset()

func (*Sentiment) String Uses

func (m *Sentiment) String() string

func (*Sentiment) XXX_DiscardUnknown Uses

func (m *Sentiment) XXX_DiscardUnknown()

func (*Sentiment) XXX_Marshal Uses

func (m *Sentiment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Sentiment) XXX_Merge Uses

func (m *Sentiment) XXX_Merge(src proto.Message)

func (*Sentiment) XXX_Size Uses

func (m *Sentiment) XXX_Size() int

func (*Sentiment) XXX_Unmarshal Uses

func (m *Sentiment) XXX_Unmarshal(b []byte) error

type TextSpan Uses

type TextSpan struct {
    // The content of the output text.
    Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
    // The API calculates the beginning offset of the content in the original
    // document according to the [EncodingType][google.cloud.language.v1.EncodingType] specified in the API request.
    BeginOffset          int32    `protobuf:"varint,2,opt,name=begin_offset,json=beginOffset,proto3" json:"begin_offset,omitempty"`
    XXX_NoUnkeyedLiteral struct{} `json:"-"`
    XXX_unrecognized     []byte   `json:"-"`
    XXX_sizecache        int32    `json:"-"`
}

Represents an output piece of text.

func (*TextSpan) Descriptor Uses

func (*TextSpan) Descriptor() ([]byte, []int)

func (*TextSpan) GetBeginOffset Uses

func (m *TextSpan) GetBeginOffset() int32

func (*TextSpan) GetContent Uses

func (m *TextSpan) GetContent() string

func (*TextSpan) ProtoMessage Uses

func (*TextSpan) ProtoMessage()

func (*TextSpan) Reset Uses

func (m *TextSpan) Reset()

func (*TextSpan) String Uses

func (m *TextSpan) String() string

func (*TextSpan) XXX_DiscardUnknown Uses

func (m *TextSpan) XXX_DiscardUnknown()

func (*TextSpan) XXX_Marshal Uses

func (m *TextSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TextSpan) XXX_Merge Uses

func (m *TextSpan) XXX_Merge(src proto.Message)

func (*TextSpan) XXX_Size Uses

func (m *TextSpan) XXX_Size() int

func (*TextSpan) XXX_Unmarshal Uses

func (m *TextSpan) XXX_Unmarshal(b []byte) error

type Token Uses

type Token struct {
    // The token text.
    Text *TextSpan `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
    // Parts of speech tag for this token.
    PartOfSpeech *PartOfSpeech `protobuf:"bytes,2,opt,name=part_of_speech,json=partOfSpeech,proto3" json:"part_of_speech,omitempty"`
    // Dependency tree parse for this token.
    DependencyEdge *DependencyEdge `protobuf:"bytes,3,opt,name=dependency_edge,json=dependencyEdge,proto3" json:"dependency_edge,omitempty"`
    // [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.
    Lemma                string   `protobuf:"bytes,4,opt,name=lemma,proto3" json:"lemma,omitempty"`
    XXX_NoUnkeyedLiteral struct{} `json:"-"`
    XXX_unrecognized     []byte   `json:"-"`
    XXX_sizecache        int32    `json:"-"`
}

Represents the smallest syntactic building block of the text.

func (*Token) Descriptor Uses

func (*Token) Descriptor() ([]byte, []int)

func (*Token) GetDependencyEdge Uses

func (m *Token) GetDependencyEdge() *DependencyEdge

func (*Token) GetLemma Uses

func (m *Token) GetLemma() string

func (*Token) GetPartOfSpeech Uses

func (m *Token) GetPartOfSpeech() *PartOfSpeech

func (*Token) GetText Uses

func (m *Token) GetText() *TextSpan

func (*Token) ProtoMessage Uses

func (*Token) ProtoMessage()

func (*Token) Reset Uses

func (m *Token) Reset()

func (*Token) String Uses

func (m *Token) String() string

func (*Token) XXX_DiscardUnknown Uses

func (m *Token) XXX_DiscardUnknown()

func (*Token) XXX_Marshal Uses

func (m *Token) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Token) XXX_Merge Uses

func (m *Token) XXX_Merge(src proto.Message)

func (*Token) XXX_Size Uses

func (m *Token) XXX_Size() int

func (*Token) XXX_Unmarshal Uses

func (m *Token) XXX_Unmarshal(b []byte) error

Package language imports 6 packages (graph) and is imported by 4 packages. Updated 2019-05-17. Refresh now. Tools for package owners.