language_service.proto 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199
  1. // Copyright 2022 Google LLC
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.cloud.language.v1beta2;
  16. import "google/api/annotations.proto";
  17. import "google/api/client.proto";
  18. import "google/api/field_behavior.proto";
  19. option go_package = "google.golang.org/genproto/googleapis/cloud/language/v1beta2;language";
  20. option java_multiple_files = true;
  21. option java_outer_classname = "LanguageServiceProto";
  22. option java_package = "com.google.cloud.language.v1beta2";
  23. // Provides text analysis operations such as sentiment analysis and entity
  24. // recognition.
  25. service LanguageService {
  26. option (google.api.default_host) = "language.googleapis.com";
  27. option (google.api.oauth_scopes) =
  28. "https://www.googleapis.com/auth/cloud-language,"
  29. "https://www.googleapis.com/auth/cloud-platform";
  30. // Analyzes the sentiment of the provided text.
  31. rpc AnalyzeSentiment(AnalyzeSentimentRequest) returns (AnalyzeSentimentResponse) {
  32. option (google.api.http) = {
  33. post: "/v1beta2/documents:analyzeSentiment"
  34. body: "*"
  35. };
  36. option (google.api.method_signature) = "document,encoding_type";
  37. option (google.api.method_signature) = "document";
  38. }
  39. // Finds named entities (currently proper names and common nouns) in the text
  40. // along with entity types, salience, mentions for each entity, and
  41. // other properties.
  42. rpc AnalyzeEntities(AnalyzeEntitiesRequest) returns (AnalyzeEntitiesResponse) {
  43. option (google.api.http) = {
  44. post: "/v1beta2/documents:analyzeEntities"
  45. body: "*"
  46. };
  47. option (google.api.method_signature) = "document,encoding_type";
  48. option (google.api.method_signature) = "document";
  49. }
  50. // Finds entities, similar to [AnalyzeEntities][google.cloud.language.v1beta2.LanguageService.AnalyzeEntities] in the text and analyzes
  51. // sentiment associated with each entity and its mentions.
  52. rpc AnalyzeEntitySentiment(AnalyzeEntitySentimentRequest) returns (AnalyzeEntitySentimentResponse) {
  53. option (google.api.http) = {
  54. post: "/v1beta2/documents:analyzeEntitySentiment"
  55. body: "*"
  56. };
  57. option (google.api.method_signature) = "document,encoding_type";
  58. option (google.api.method_signature) = "document";
  59. }
  60. // Analyzes the syntax of the text and provides sentence boundaries and
  61. // tokenization along with part of speech tags, dependency trees, and other
  62. // properties.
  63. rpc AnalyzeSyntax(AnalyzeSyntaxRequest) returns (AnalyzeSyntaxResponse) {
  64. option (google.api.http) = {
  65. post: "/v1beta2/documents:analyzeSyntax"
  66. body: "*"
  67. };
  68. option (google.api.method_signature) = "document,encoding_type";
  69. option (google.api.method_signature) = "document";
  70. }
  71. // Classifies a document into categories.
  72. rpc ClassifyText(ClassifyTextRequest) returns (ClassifyTextResponse) {
  73. option (google.api.http) = {
  74. post: "/v1beta2/documents:classifyText"
  75. body: "*"
  76. };
  77. option (google.api.method_signature) = "document";
  78. }
  79. // A convenience method that provides all syntax, sentiment, entity, and
  80. // classification features in one call.
  81. rpc AnnotateText(AnnotateTextRequest) returns (AnnotateTextResponse) {
  82. option (google.api.http) = {
  83. post: "/v1beta2/documents:annotateText"
  84. body: "*"
  85. };
  86. option (google.api.method_signature) = "document,features,encoding_type";
  87. option (google.api.method_signature) = "document,features";
  88. }
  89. }
  90. // Represents the input to API methods.
  91. message Document {
  92. // The document types enum.
  93. enum Type {
  94. // The content type is not specified.
  95. TYPE_UNSPECIFIED = 0;
  96. // Plain text
  97. PLAIN_TEXT = 1;
  98. // HTML
  99. HTML = 2;
  100. }
  101. // Ways of handling boilerplate detected in the document
  102. enum BoilerplateHandling {
  103. // The boilerplate handling is not specified.
  104. BOILERPLATE_HANDLING_UNSPECIFIED = 0;
  105. // Do not analyze detected boilerplate. Reference web URI is required for
  106. // detecting boilerplate.
  107. SKIP_BOILERPLATE = 1;
  108. // Treat boilerplate the same as content.
  109. KEEP_BOILERPLATE = 2;
  110. }
  111. // Required. If the type is not set or is `TYPE_UNSPECIFIED`,
  112. // returns an `INVALID_ARGUMENT` error.
  113. Type type = 1;
  114. // The source of the document: a string containing the content or a
  115. // Google Cloud Storage URI.
  116. oneof source {
  117. // The content of the input in string format.
  118. // Cloud audit logging exempt since it is based on user data.
  119. string content = 2;
  120. // The Google Cloud Storage URI where the file content is located.
  121. // This URI must be of the form: gs://bucket_name/object_name. For more
  122. // details, see https://cloud.google.com/storage/docs/reference-uris.
  123. // NOTE: Cloud Storage object versioning is not supported.
  124. string gcs_content_uri = 3;
  125. }
  126. // The language of the document (if not specified, the language is
  127. // automatically detected). Both ISO and BCP-47 language codes are
  128. // accepted.<br>
  129. // [Language
  130. // Support](https://cloud.google.com/natural-language/docs/languages) lists
  131. // currently supported languages for each API method. If the language (either
  132. // specified by the caller or automatically detected) is not supported by the
  133. // called API method, an `INVALID_ARGUMENT` error is returned.
  134. string language = 4;
  135. // The web URI where the document comes from. This URI is not used for
  136. // fetching the content, but as a hint for analyzing the document.
  137. string reference_web_uri = 5;
  138. // Indicates how detected boilerplate(e.g. advertisements, copyright
  139. // declarations, banners) should be handled for this document. If not
  140. // specified, boilerplate will be treated the same as content.
  141. BoilerplateHandling boilerplate_handling = 6;
  142. }
  143. // Represents a sentence in the input document.
  144. message Sentence {
  145. // The sentence text.
  146. TextSpan text = 1;
  147. // For calls to [AnalyzeSentiment][] or if
  148. // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_document_sentiment] is set to
  149. // true, this field will contain the sentiment for the sentence.
  150. Sentiment sentiment = 2;
  151. }
  152. // Represents the text encoding that the caller uses to process the output.
  153. // Providing an `EncodingType` is recommended because the API provides the
  154. // beginning offsets for various outputs, such as tokens and mentions, and
  155. // languages that natively use different text encodings may access offsets
  156. // differently.
  157. enum EncodingType {
  158. // If `EncodingType` is not specified, encoding-dependent information (such as
  159. // `begin_offset`) will be set at `-1`.
  160. NONE = 0;
  161. // Encoding-dependent information (such as `begin_offset`) is calculated based
  162. // on the UTF-8 encoding of the input. C++ and Go are examples of languages
  163. // that use this encoding natively.
  164. UTF8 = 1;
  165. // Encoding-dependent information (such as `begin_offset`) is calculated based
  166. // on the UTF-16 encoding of the input. Java and JavaScript are examples of
  167. // languages that use this encoding natively.
  168. UTF16 = 2;
  169. // Encoding-dependent information (such as `begin_offset`) is calculated based
  170. // on the UTF-32 encoding of the input. Python is an example of a language
  171. // that uses this encoding natively.
  172. UTF32 = 3;
  173. }
  174. // Represents a phrase in the text that is a known entity, such as
  175. // a person, an organization, or location. The API associates information, such
  176. // as salience and mentions, with entities.
  177. message Entity {
  178. // The type of the entity. For most entity types, the associated metadata is a
  179. // Wikipedia URL (`wikipedia_url`) and Knowledge Graph MID (`mid`). The table
  180. // below lists the associated fields for entities that have different
  181. // metadata.
  182. enum Type {
  183. // Unknown
  184. UNKNOWN = 0;
  185. // Person
  186. PERSON = 1;
  187. // Location
  188. LOCATION = 2;
  189. // Organization
  190. ORGANIZATION = 3;
  191. // Event
  192. EVENT = 4;
  193. // Artwork
  194. WORK_OF_ART = 5;
  195. // Consumer product
  196. CONSUMER_GOOD = 6;
  197. // Other types of entities
  198. OTHER = 7;
  199. // Phone number
  200. //
  201. // The metadata lists the phone number, formatted according to local
  202. // convention, plus whichever additional elements appear in the text:
  203. //
  204. // * `number` - the actual number, broken down into sections as per local
  205. // convention
  206. // * `national_prefix` - country code, if detected
  207. // * `area_code` - region or area code, if detected
  208. // * `extension` - phone extension (to be dialed after connection), if
  209. // detected
  210. PHONE_NUMBER = 9;
  211. // Address
  212. //
  213. // The metadata identifies the street number and locality plus whichever
  214. // additional elements appear in the text:
  215. //
  216. // * `street_number` - street number
  217. // * `locality` - city or town
  218. // * `street_name` - street/route name, if detected
  219. // * `postal_code` - postal code, if detected
  220. // * `country` - country, if detected<
  221. // * `broad_region` - administrative area, such as the state, if detected
  222. // * `narrow_region` - smaller administrative area, such as county, if
  223. // detected
  224. // * `sublocality` - used in Asian addresses to demark a district within a
  225. // city, if detected
  226. ADDRESS = 10;
  227. // Date
  228. //
  229. // The metadata identifies the components of the date:
  230. //
  231. // * `year` - four digit year, if detected
  232. // * `month` - two digit month number, if detected
  233. // * `day` - two digit day number, if detected
  234. DATE = 11;
  235. // Number
  236. //
  237. // The metadata is the number itself.
  238. NUMBER = 12;
  239. // Price
  240. //
  241. // The metadata identifies the `value` and `currency`.
  242. PRICE = 13;
  243. }
  244. // The representative name for the entity.
  245. string name = 1;
  246. // The entity type.
  247. Type type = 2;
  248. // Metadata associated with the entity.
  249. //
  250. // For most entity types, the metadata is a Wikipedia URL (`wikipedia_url`)
  251. // and Knowledge Graph MID (`mid`), if they are available. For the metadata
  252. // associated with other entity types, see the Type table below.
  253. map<string, string> metadata = 3;
  254. // The salience score associated with the entity in the [0, 1.0] range.
  255. //
  256. // The salience score for an entity provides information about the
  257. // importance or centrality of that entity to the entire document text.
  258. // Scores closer to 0 are less salient, while scores closer to 1.0 are highly
  259. // salient.
  260. float salience = 4;
  261. // The mentions of this entity in the input document. The API currently
  262. // supports proper noun mentions.
  263. repeated EntityMention mentions = 5;
  264. // For calls to [AnalyzeEntitySentiment][] or if
  265. // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] is set to
  266. // true, this field will contain the aggregate sentiment expressed for this
  267. // entity in the provided document.
  268. Sentiment sentiment = 6;
  269. }
  270. // Represents the smallest syntactic building block of the text.
  271. message Token {
  272. // The token text.
  273. TextSpan text = 1;
  274. // Parts of speech tag for this token.
  275. PartOfSpeech part_of_speech = 2;
  276. // Dependency tree parse for this token.
  277. DependencyEdge dependency_edge = 3;
  278. // [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.
  279. string lemma = 4;
  280. }
  281. // Represents the feeling associated with the entire text or entities in
  282. // the text.
  283. // Next ID: 6
  284. message Sentiment {
  285. // A non-negative number in the [0, +inf) range, which represents
  286. // the absolute magnitude of sentiment regardless of score (positive or
  287. // negative).
  288. float magnitude = 2;
  289. // Sentiment score between -1.0 (negative sentiment) and 1.0
  290. // (positive sentiment).
  291. float score = 3;
  292. }
  293. // Represents part of speech information for a token.
  294. message PartOfSpeech {
  295. // The part of speech tags enum.
  296. enum Tag {
  297. // Unknown
  298. UNKNOWN = 0;
  299. // Adjective
  300. ADJ = 1;
  301. // Adposition (preposition and postposition)
  302. ADP = 2;
  303. // Adverb
  304. ADV = 3;
  305. // Conjunction
  306. CONJ = 4;
  307. // Determiner
  308. DET = 5;
  309. // Noun (common and proper)
  310. NOUN = 6;
  311. // Cardinal number
  312. NUM = 7;
  313. // Pronoun
  314. PRON = 8;
  315. // Particle or other function word
  316. PRT = 9;
  317. // Punctuation
  318. PUNCT = 10;
  319. // Verb (all tenses and modes)
  320. VERB = 11;
  321. // Other: foreign words, typos, abbreviations
  322. X = 12;
  323. // Affix
  324. AFFIX = 13;
  325. }
  326. // The characteristic of a verb that expresses time flow during an event.
  327. enum Aspect {
  328. // Aspect is not applicable in the analyzed language or is not predicted.
  329. ASPECT_UNKNOWN = 0;
  330. // Perfective
  331. PERFECTIVE = 1;
  332. // Imperfective
  333. IMPERFECTIVE = 2;
  334. // Progressive
  335. PROGRESSIVE = 3;
  336. }
  337. // The grammatical function performed by a noun or pronoun in a phrase,
  338. // clause, or sentence. In some languages, other parts of speech, such as
  339. // adjective and determiner, take case inflection in agreement with the noun.
  340. enum Case {
  341. // Case is not applicable in the analyzed language or is not predicted.
  342. CASE_UNKNOWN = 0;
  343. // Accusative
  344. ACCUSATIVE = 1;
  345. // Adverbial
  346. ADVERBIAL = 2;
  347. // Complementive
  348. COMPLEMENTIVE = 3;
  349. // Dative
  350. DATIVE = 4;
  351. // Genitive
  352. GENITIVE = 5;
  353. // Instrumental
  354. INSTRUMENTAL = 6;
  355. // Locative
  356. LOCATIVE = 7;
  357. // Nominative
  358. NOMINATIVE = 8;
  359. // Oblique
  360. OBLIQUE = 9;
  361. // Partitive
  362. PARTITIVE = 10;
  363. // Prepositional
  364. PREPOSITIONAL = 11;
  365. // Reflexive
  366. REFLEXIVE_CASE = 12;
  367. // Relative
  368. RELATIVE_CASE = 13;
  369. // Vocative
  370. VOCATIVE = 14;
  371. }
  372. // Depending on the language, Form can be categorizing different forms of
  373. // verbs, adjectives, adverbs, etc. For example, categorizing inflected
  374. // endings of verbs and adjectives or distinguishing between short and long
  375. // forms of adjectives and participles
  376. enum Form {
  377. // Form is not applicable in the analyzed language or is not predicted.
  378. FORM_UNKNOWN = 0;
  379. // Adnomial
  380. ADNOMIAL = 1;
  381. // Auxiliary
  382. AUXILIARY = 2;
  383. // Complementizer
  384. COMPLEMENTIZER = 3;
  385. // Final ending
  386. FINAL_ENDING = 4;
  387. // Gerund
  388. GERUND = 5;
  389. // Realis
  390. REALIS = 6;
  391. // Irrealis
  392. IRREALIS = 7;
  393. // Short form
  394. SHORT = 8;
  395. // Long form
  396. LONG = 9;
  397. // Order form
  398. ORDER = 10;
  399. // Specific form
  400. SPECIFIC = 11;
  401. }
  402. // Gender classes of nouns reflected in the behaviour of associated words.
  403. enum Gender {
  404. // Gender is not applicable in the analyzed language or is not predicted.
  405. GENDER_UNKNOWN = 0;
  406. // Feminine
  407. FEMININE = 1;
  408. // Masculine
  409. MASCULINE = 2;
  410. // Neuter
  411. NEUTER = 3;
  412. }
  413. // The grammatical feature of verbs, used for showing modality and attitude.
  414. enum Mood {
  415. // Mood is not applicable in the analyzed language or is not predicted.
  416. MOOD_UNKNOWN = 0;
  417. // Conditional
  418. CONDITIONAL_MOOD = 1;
  419. // Imperative
  420. IMPERATIVE = 2;
  421. // Indicative
  422. INDICATIVE = 3;
  423. // Interrogative
  424. INTERROGATIVE = 4;
  425. // Jussive
  426. JUSSIVE = 5;
  427. // Subjunctive
  428. SUBJUNCTIVE = 6;
  429. }
  430. // Count distinctions.
  431. enum Number {
  432. // Number is not applicable in the analyzed language or is not predicted.
  433. NUMBER_UNKNOWN = 0;
  434. // Singular
  435. SINGULAR = 1;
  436. // Plural
  437. PLURAL = 2;
  438. // Dual
  439. DUAL = 3;
  440. }
  441. // The distinction between the speaker, second person, third person, etc.
  442. enum Person {
  443. // Person is not applicable in the analyzed language or is not predicted.
  444. PERSON_UNKNOWN = 0;
  445. // First
  446. FIRST = 1;
  447. // Second
  448. SECOND = 2;
  449. // Third
  450. THIRD = 3;
  451. // Reflexive
  452. REFLEXIVE_PERSON = 4;
  453. }
  454. // This category shows if the token is part of a proper name.
  455. enum Proper {
  456. // Proper is not applicable in the analyzed language or is not predicted.
  457. PROPER_UNKNOWN = 0;
  458. // Proper
  459. PROPER = 1;
  460. // Not proper
  461. NOT_PROPER = 2;
  462. }
  463. // Reciprocal features of a pronoun.
  464. enum Reciprocity {
  465. // Reciprocity is not applicable in the analyzed language or is not
  466. // predicted.
  467. RECIPROCITY_UNKNOWN = 0;
  468. // Reciprocal
  469. RECIPROCAL = 1;
  470. // Non-reciprocal
  471. NON_RECIPROCAL = 2;
  472. }
  473. // Time reference.
  474. enum Tense {
  475. // Tense is not applicable in the analyzed language or is not predicted.
  476. TENSE_UNKNOWN = 0;
  477. // Conditional
  478. CONDITIONAL_TENSE = 1;
  479. // Future
  480. FUTURE = 2;
  481. // Past
  482. PAST = 3;
  483. // Present
  484. PRESENT = 4;
  485. // Imperfect
  486. IMPERFECT = 5;
  487. // Pluperfect
  488. PLUPERFECT = 6;
  489. }
  490. // The relationship between the action that a verb expresses and the
  491. // participants identified by its arguments.
  492. enum Voice {
  493. // Voice is not applicable in the analyzed language or is not predicted.
  494. VOICE_UNKNOWN = 0;
  495. // Active
  496. ACTIVE = 1;
  497. // Causative
  498. CAUSATIVE = 2;
  499. // Passive
  500. PASSIVE = 3;
  501. }
  502. // The part of speech tag.
  503. Tag tag = 1;
  504. // The grammatical aspect.
  505. Aspect aspect = 2;
  506. // The grammatical case.
  507. Case case = 3;
  508. // The grammatical form.
  509. Form form = 4;
  510. // The grammatical gender.
  511. Gender gender = 5;
  512. // The grammatical mood.
  513. Mood mood = 6;
  514. // The grammatical number.
  515. Number number = 7;
  516. // The grammatical person.
  517. Person person = 8;
  518. // The grammatical properness.
  519. Proper proper = 9;
  520. // The grammatical reciprocity.
  521. Reciprocity reciprocity = 10;
  522. // The grammatical tense.
  523. Tense tense = 11;
  524. // The grammatical voice.
  525. Voice voice = 12;
  526. }
  527. // Represents dependency parse tree information for a token.
  528. message DependencyEdge {
  529. // The parse label enum for the token.
  530. enum Label {
  531. // Unknown
  532. UNKNOWN = 0;
  533. // Abbreviation modifier
  534. ABBREV = 1;
  535. // Adjectival complement
  536. ACOMP = 2;
  537. // Adverbial clause modifier
  538. ADVCL = 3;
  539. // Adverbial modifier
  540. ADVMOD = 4;
  541. // Adjectival modifier of an NP
  542. AMOD = 5;
  543. // Appositional modifier of an NP
  544. APPOS = 6;
  545. // Attribute dependent of a copular verb
  546. ATTR = 7;
  547. // Auxiliary (non-main) verb
  548. AUX = 8;
  549. // Passive auxiliary
  550. AUXPASS = 9;
  551. // Coordinating conjunction
  552. CC = 10;
  553. // Clausal complement of a verb or adjective
  554. CCOMP = 11;
  555. // Conjunct
  556. CONJ = 12;
  557. // Clausal subject
  558. CSUBJ = 13;
  559. // Clausal passive subject
  560. CSUBJPASS = 14;
  561. // Dependency (unable to determine)
  562. DEP = 15;
  563. // Determiner
  564. DET = 16;
  565. // Discourse
  566. DISCOURSE = 17;
  567. // Direct object
  568. DOBJ = 18;
  569. // Expletive
  570. EXPL = 19;
  571. // Goes with (part of a word in a text not well edited)
  572. GOESWITH = 20;
  573. // Indirect object
  574. IOBJ = 21;
  575. // Marker (word introducing a subordinate clause)
  576. MARK = 22;
  577. // Multi-word expression
  578. MWE = 23;
  579. // Multi-word verbal expression
  580. MWV = 24;
  581. // Negation modifier
  582. NEG = 25;
  583. // Noun compound modifier
  584. NN = 26;
  585. // Noun phrase used as an adverbial modifier
  586. NPADVMOD = 27;
  587. // Nominal subject
  588. NSUBJ = 28;
  589. // Passive nominal subject
  590. NSUBJPASS = 29;
  591. // Numeric modifier of a noun
  592. NUM = 30;
  593. // Element of compound number
  594. NUMBER = 31;
  595. // Punctuation mark
  596. P = 32;
  597. // Parataxis relation
  598. PARATAXIS = 33;
  599. // Participial modifier
  600. PARTMOD = 34;
  601. // The complement of a preposition is a clause
  602. PCOMP = 35;
  603. // Object of a preposition
  604. POBJ = 36;
  605. // Possession modifier
  606. POSS = 37;
  607. // Postverbal negative particle
  608. POSTNEG = 38;
  609. // Predicate complement
  610. PRECOMP = 39;
  611. // Preconjunt
  612. PRECONJ = 40;
  613. // Predeterminer
  614. PREDET = 41;
  615. // Prefix
  616. PREF = 42;
  617. // Prepositional modifier
  618. PREP = 43;
  619. // The relationship between a verb and verbal morpheme
  620. PRONL = 44;
  621. // Particle
  622. PRT = 45;
  623. // Associative or possessive marker
  624. PS = 46;
  625. // Quantifier phrase modifier
  626. QUANTMOD = 47;
  627. // Relative clause modifier
  628. RCMOD = 48;
  629. // Complementizer in relative clause
  630. RCMODREL = 49;
  631. // Ellipsis without a preceding predicate
  632. RDROP = 50;
  633. // Referent
  634. REF = 51;
  635. // Remnant
  636. REMNANT = 52;
  637. // Reparandum
  638. REPARANDUM = 53;
  639. // Root
  640. ROOT = 54;
  641. // Suffix specifying a unit of number
  642. SNUM = 55;
  643. // Suffix
  644. SUFF = 56;
  645. // Temporal modifier
  646. TMOD = 57;
  647. // Topic marker
  648. TOPIC = 58;
  649. // Clause headed by an infinite form of the verb that modifies a noun
  650. VMOD = 59;
  651. // Vocative
  652. VOCATIVE = 60;
  653. // Open clausal complement
  654. XCOMP = 61;
  655. // Name suffix
  656. SUFFIX = 62;
  657. // Name title
  658. TITLE = 63;
  659. // Adverbial phrase modifier
  660. ADVPHMOD = 64;
  661. // Causative auxiliary
  662. AUXCAUS = 65;
  663. // Helper auxiliary
  664. AUXVV = 66;
  665. // Rentaishi (Prenominal modifier)
  666. DTMOD = 67;
  667. // Foreign words
  668. FOREIGN = 68;
  669. // Keyword
  670. KW = 69;
  671. // List for chains of comparable items
  672. LIST = 70;
  673. // Nominalized clause
  674. NOMC = 71;
  675. // Nominalized clausal subject
  676. NOMCSUBJ = 72;
  677. // Nominalized clausal passive
  678. NOMCSUBJPASS = 73;
  679. // Compound of numeric modifier
  680. NUMC = 74;
  681. // Copula
  682. COP = 75;
  683. // Dislocated relation (for fronted/topicalized elements)
  684. DISLOCATED = 76;
  685. // Aspect marker
  686. ASP = 77;
  687. // Genitive modifier
  688. GMOD = 78;
  689. // Genitive object
  690. GOBJ = 79;
  691. // Infinitival modifier
  692. INFMOD = 80;
  693. // Measure
  694. MES = 81;
  695. // Nominal complement of a noun
  696. NCOMP = 82;
  697. }
  698. // Represents the head of this token in the dependency tree.
  699. // This is the index of the token which has an arc going to this token.
  700. // The index is the position of the token in the array of tokens returned
  701. // by the API method. If this token is a root token, then the
  702. // `head_token_index` is its own index.
  703. int32 head_token_index = 1;
  704. // The parse label for the token.
  705. Label label = 2;
  706. }
  707. // Represents a mention for an entity in the text. Currently, proper noun
  708. // mentions are supported.
  709. message EntityMention {
  710. // The supported types of mentions.
  711. enum Type {
  712. // Unknown
  713. TYPE_UNKNOWN = 0;
  714. // Proper name
  715. PROPER = 1;
  716. // Common noun (or noun compound)
  717. COMMON = 2;
  718. }
  719. // The mention text.
  720. TextSpan text = 1;
  721. // The type of the entity mention.
  722. Type type = 2;
  723. // For calls to [AnalyzeEntitySentiment][] or if
  724. // [AnnotateTextRequest.Features.extract_entity_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entity_sentiment] is set to
  725. // true, this field will contain the sentiment expressed for this mention of
  726. // the entity in the provided document.
  727. Sentiment sentiment = 3;
  728. }
  729. // Represents an output piece of text.
  730. message TextSpan {
  731. // The content of the output text.
  732. string content = 1;
  733. // The API calculates the beginning offset of the content in the original
  734. // document according to the [EncodingType][google.cloud.language.v1beta2.EncodingType] specified in the API request.
  735. int32 begin_offset = 2;
  736. }
  737. // Represents a category returned from the text classifier.
  738. message ClassificationCategory {
  739. // The name of the category representing the document, from the [predefined
  740. // taxonomy](https://cloud.google.com/natural-language/docs/categories).
  741. string name = 1;
  742. // The classifier's confidence of the category. Number represents how certain
  743. // the classifier is that this category represents the given text.
  744. float confidence = 2;
  745. }
  746. // Model options available for classification requests.
  747. message ClassificationModelOptions {
  748. // Options for the V1 model.
  749. message V1Model {
  750. }
  751. // Options for the V2 model.
  752. message V2Model {
  753. // The content categories used for classification.
  754. enum ContentCategoriesVersion {
  755. // If `ContentCategoriesVersion` is not specified, this option will
  756. // default to `V1`.
  757. CONTENT_CATEGORIES_VERSION_UNSPECIFIED = 0;
  758. // Legacy content categories of our initial launch in 2017.
  759. V1 = 1;
  760. // Updated content categories in 2022.
  761. V2 = 2;
  762. }
  763. // The content categories used for classification.
  764. ContentCategoriesVersion content_categories_version = 1;
  765. }
  766. // If this field is not set, then the `v1_model` will be used by default.
  767. oneof model_type {
  768. // Setting this field will use the V1 model and V1 content categories
  769. // version. The V1 model is a legacy model; support for this will be
  770. // discontinued in the future.
  771. V1Model v1_model = 1;
  772. // Setting this field will use the V2 model with the appropriate content
  773. // categories version. The V2 model is a better performing model.
  774. V2Model v2_model = 2;
  775. }
  776. }
  777. // The sentiment analysis request message.
  778. message AnalyzeSentimentRequest {
  779. // Required. Input document.
  780. Document document = 1 [(google.api.field_behavior) = REQUIRED];
  781. // The encoding type used by the API to calculate sentence offsets for the
  782. // sentence sentiment.
  783. EncodingType encoding_type = 2;
  784. }
  785. // The sentiment analysis response message.
  786. message AnalyzeSentimentResponse {
  787. // The overall sentiment of the input document.
  788. Sentiment document_sentiment = 1;
  789. // The language of the text, which will be the same as the language specified
  790. // in the request or, if not specified, the automatically-detected language.
  791. // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
  792. string language = 2;
  793. // The sentiment for all the sentences in the document.
  794. repeated Sentence sentences = 3;
  795. }
  796. // The entity-level sentiment analysis request message.
  797. message AnalyzeEntitySentimentRequest {
  798. // Required. Input document.
  799. Document document = 1 [(google.api.field_behavior) = REQUIRED];
  800. // The encoding type used by the API to calculate offsets.
  801. EncodingType encoding_type = 2;
  802. }
  803. // The entity-level sentiment analysis response message.
  804. message AnalyzeEntitySentimentResponse {
  805. // The recognized entities in the input document with associated sentiments.
  806. repeated Entity entities = 1;
  807. // The language of the text, which will be the same as the language specified
  808. // in the request or, if not specified, the automatically-detected language.
  809. // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
  810. string language = 2;
  811. }
  812. // The entity analysis request message.
  813. message AnalyzeEntitiesRequest {
  814. // Required. Input document.
  815. Document document = 1 [(google.api.field_behavior) = REQUIRED];
  816. // The encoding type used by the API to calculate offsets.
  817. EncodingType encoding_type = 2;
  818. }
  819. // The entity analysis response message.
  820. message AnalyzeEntitiesResponse {
  821. // The recognized entities in the input document.
  822. repeated Entity entities = 1;
  823. // The language of the text, which will be the same as the language specified
  824. // in the request or, if not specified, the automatically-detected language.
  825. // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
  826. string language = 2;
  827. }
  828. // The syntax analysis request message.
  829. message AnalyzeSyntaxRequest {
  830. // Required. Input document.
  831. Document document = 1 [(google.api.field_behavior) = REQUIRED];
  832. // The encoding type used by the API to calculate offsets.
  833. EncodingType encoding_type = 2;
  834. }
  835. // The syntax analysis response message.
  836. message AnalyzeSyntaxResponse {
  837. // Sentences in the input document.
  838. repeated Sentence sentences = 1;
  839. // Tokens, along with their syntactic information, in the input document.
  840. repeated Token tokens = 2;
  841. // The language of the text, which will be the same as the language specified
  842. // in the request or, if not specified, the automatically-detected language.
  843. // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
  844. string language = 3;
  845. }
  846. // The document classification request message.
  847. message ClassifyTextRequest {
  848. // Required. Input document.
  849. Document document = 1 [(google.api.field_behavior) = REQUIRED];
  850. // Model options to use for classification. Defaults to v1 options if not
  851. // specified.
  852. ClassificationModelOptions classification_model_options = 3;
  853. }
  854. // The document classification response message.
  855. message ClassifyTextResponse {
  856. // Categories representing the input document.
  857. repeated ClassificationCategory categories = 1;
  858. }
  859. // The request message for the text annotation API, which can perform multiple
  860. // analysis types (sentiment, entities, and syntax) in one call.
  861. message AnnotateTextRequest {
  862. // All available features for sentiment, syntax, and semantic analysis.
  863. // Setting each one to true will enable that specific analysis for the input.
  864. // Next ID: 11
  865. message Features {
  866. // Extract syntax information.
  867. bool extract_syntax = 1;
  868. // Extract entities.
  869. bool extract_entities = 2;
  870. // Extract document-level sentiment.
  871. bool extract_document_sentiment = 3;
  872. // Extract entities and their associated sentiment.
  873. bool extract_entity_sentiment = 4;
  874. // Classify the full document into categories. If this is true,
  875. // the API will use the default model which classifies into a
  876. // [predefined
  877. // taxonomy](https://cloud.google.com/natural-language/docs/categories).
  878. bool classify_text = 6;
  879. // The model options to use for classification. Defaults to v1 options
  880. // if not specified. Only used if `classify_text` is set to true.
  881. ClassificationModelOptions classification_model_options = 10;
  882. }
  883. // Required. Input document.
  884. Document document = 1 [(google.api.field_behavior) = REQUIRED];
  885. // Required. The enabled features.
  886. Features features = 2 [(google.api.field_behavior) = REQUIRED];
  887. // The encoding type used by the API to calculate offsets.
  888. EncodingType encoding_type = 3;
  889. }
  890. // The text annotations response message.
  891. message AnnotateTextResponse {
  892. // Sentences in the input document. Populated if the user enables
  893. // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_syntax].
  894. repeated Sentence sentences = 1;
  895. // Tokens, along with their syntactic information, in the input document.
  896. // Populated if the user enables
  897. // [AnnotateTextRequest.Features.extract_syntax][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_syntax].
  898. repeated Token tokens = 2;
  899. // Entities, along with their semantic information, in the input document.
  900. // Populated if the user enables
  901. // [AnnotateTextRequest.Features.extract_entities][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_entities].
  902. repeated Entity entities = 3;
  903. // The overall sentiment for the document. Populated if the user enables
  904. // [AnnotateTextRequest.Features.extract_document_sentiment][google.cloud.language.v1beta2.AnnotateTextRequest.Features.extract_document_sentiment].
  905. Sentiment document_sentiment = 4;
  906. // The language of the text, which will be the same as the language specified
  907. // in the request or, if not specified, the automatically-detected language.
  908. // See [Document.language][google.cloud.language.v1beta2.Document.language] field for more details.
  909. string language = 5;
  910. // Categories identified in the input document.
  911. repeated ClassificationCategory categories = 6;
  912. }