Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(133)

Side by Side Diff: generated/googleapis/lib/language/v1.dart

Issue 2571553005: Api-roll 43: 2016-12-13 (Closed)
Patch Set: Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // This is a generated file (see the discoveryapis_generator project).
2
3 library googleapis.language.v1;
4
5 import 'dart:core' as core;
6 import 'dart:async' as async;
7 import 'dart:convert' as convert;
8
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons;
10 import 'package:http/http.dart' as http;
11
12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show
13 ApiRequestError, DetailedApiRequestError;
14
15 const core.String USER_AGENT = 'dart-api-client language/v1';
16
17 /**
18 * Google Cloud Natural Language API provides natural language understanding
19 * technologies to developers. Examples include sentiment analysis, entity
20 * recognition, and text annotations.
21 */
22 class LanguageApi {
23 /** View and manage your data across Google Cloud Platform services */
24 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf orm";
25
26
27 final commons.ApiRequester _requester;
28
29 DocumentsResourceApi get documents => new DocumentsResourceApi(_requester);
30
31 LanguageApi(http.Client client, {core.String rootUrl: "https://language.google apis.com/", core.String servicePath: ""}) :
32 _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_A GENT);
33 }
34
35
36 class DocumentsResourceApi {
37 final commons.ApiRequester _requester;
38
39 DocumentsResourceApi(commons.ApiRequester client) :
40 _requester = client;
41
42 /**
43 * Finds named entities (currently finds proper names) in the text,
44 * entity types, salience, mentions for each entity, and other properties.
45 *
46 * [request] - The metadata request object.
47 *
48 * Request parameters:
49 *
50 * Completes with a [AnalyzeEntitiesResponse].
51 *
52 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
53 * error.
54 *
55 * If the used [http.Client] completes with an error when making a REST call,
56 * this method will complete with the same error.
57 */
58 async.Future<AnalyzeEntitiesResponse> analyzeEntities(AnalyzeEntitiesRequest r equest) {
59 var _url = null;
60 var _queryParams = new core.Map();
61 var _uploadMedia = null;
62 var _uploadOptions = null;
63 var _downloadOptions = commons.DownloadOptions.Metadata;
64 var _body = null;
65
66 if (request != null) {
67 _body = convert.JSON.encode((request).toJson());
68 }
69
70 _url = 'v1/documents:analyzeEntities';
71
72 var _response = _requester.request(_url,
73 "POST",
74 body: _body,
75 queryParams: _queryParams,
76 uploadOptions: _uploadOptions,
77 uploadMedia: _uploadMedia,
78 downloadOptions: _downloadOptions);
79 return _response.then((data) => new AnalyzeEntitiesResponse.fromJson(data));
80 }
81
82 /**
83 * Analyzes the sentiment of the provided text.
84 *
85 * [request] - The metadata request object.
86 *
87 * Request parameters:
88 *
89 * Completes with a [AnalyzeSentimentResponse].
90 *
91 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
92 * error.
93 *
94 * If the used [http.Client] completes with an error when making a REST call,
95 * this method will complete with the same error.
96 */
97 async.Future<AnalyzeSentimentResponse> analyzeSentiment(AnalyzeSentimentReques t request) {
98 var _url = null;
99 var _queryParams = new core.Map();
100 var _uploadMedia = null;
101 var _uploadOptions = null;
102 var _downloadOptions = commons.DownloadOptions.Metadata;
103 var _body = null;
104
105 if (request != null) {
106 _body = convert.JSON.encode((request).toJson());
107 }
108
109 _url = 'v1/documents:analyzeSentiment';
110
111 var _response = _requester.request(_url,
112 "POST",
113 body: _body,
114 queryParams: _queryParams,
115 uploadOptions: _uploadOptions,
116 uploadMedia: _uploadMedia,
117 downloadOptions: _downloadOptions);
118 return _response.then((data) => new AnalyzeSentimentResponse.fromJson(data)) ;
119 }
120
121 /**
122 * Analyzes the syntax of the text and provides sentence boundaries and
123 * tokenization along with part of speech tags, dependency trees, and other
124 * properties.
125 *
126 * [request] - The metadata request object.
127 *
128 * Request parameters:
129 *
130 * Completes with a [AnalyzeSyntaxResponse].
131 *
132 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
133 * error.
134 *
135 * If the used [http.Client] completes with an error when making a REST call,
136 * this method will complete with the same error.
137 */
138 async.Future<AnalyzeSyntaxResponse> analyzeSyntax(AnalyzeSyntaxRequest request ) {
139 var _url = null;
140 var _queryParams = new core.Map();
141 var _uploadMedia = null;
142 var _uploadOptions = null;
143 var _downloadOptions = commons.DownloadOptions.Metadata;
144 var _body = null;
145
146 if (request != null) {
147 _body = convert.JSON.encode((request).toJson());
148 }
149
150 _url = 'v1/documents:analyzeSyntax';
151
152 var _response = _requester.request(_url,
153 "POST",
154 body: _body,
155 queryParams: _queryParams,
156 uploadOptions: _uploadOptions,
157 uploadMedia: _uploadMedia,
158 downloadOptions: _downloadOptions);
159 return _response.then((data) => new AnalyzeSyntaxResponse.fromJson(data));
160 }
161
162 /**
163 * A convenience method that provides all the features that analyzeSentiment,
164 * analyzeEntities, and analyzeSyntax provide in one call.
165 *
166 * [request] - The metadata request object.
167 *
168 * Request parameters:
169 *
170 * Completes with a [AnnotateTextResponse].
171 *
172 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
173 * error.
174 *
175 * If the used [http.Client] completes with an error when making a REST call,
176 * this method will complete with the same error.
177 */
178 async.Future<AnnotateTextResponse> annotateText(AnnotateTextRequest request) {
179 var _url = null;
180 var _queryParams = new core.Map();
181 var _uploadMedia = null;
182 var _uploadOptions = null;
183 var _downloadOptions = commons.DownloadOptions.Metadata;
184 var _body = null;
185
186 if (request != null) {
187 _body = convert.JSON.encode((request).toJson());
188 }
189
190 _url = 'v1/documents:annotateText';
191
192 var _response = _requester.request(_url,
193 "POST",
194 body: _body,
195 queryParams: _queryParams,
196 uploadOptions: _uploadOptions,
197 uploadMedia: _uploadMedia,
198 downloadOptions: _downloadOptions);
199 return _response.then((data) => new AnnotateTextResponse.fromJson(data));
200 }
201
202 }
203
204
205
206 /** The entity analysis request message. */
207 class AnalyzeEntitiesRequest {
208 /** Input document. */
209 Document document;
210 /**
211 * The encoding type used by the API to calculate offsets.
212 * Possible string values are:
213 * - "NONE" : If `EncodingType` is not specified, encoding-dependent
214 * information (such as
215 * `begin_offset`) will be set at `-1`.
216 * - "UTF8" : Encoding-dependent information (such as `begin_offset`) is
217 * calculated based
218 * on the UTF-8 encoding of the input. C++ and Go are examples of languages
219 * that use this encoding natively.
220 * - "UTF16" : Encoding-dependent information (such as `begin_offset`) is
221 * calculated based
222 * on the UTF-16 encoding of the input. Java and Javascript are examples of
223 * languages that use this encoding natively.
224 * - "UTF32" : Encoding-dependent information (such as `begin_offset`) is
225 * calculated based
226 * on the UTF-32 encoding of the input. Python is an example of a language
227 * that uses this encoding natively.
228 */
229 core.String encodingType;
230
231 AnalyzeEntitiesRequest();
232
233 AnalyzeEntitiesRequest.fromJson(core.Map _json) {
234 if (_json.containsKey("document")) {
235 document = new Document.fromJson(_json["document"]);
236 }
237 if (_json.containsKey("encodingType")) {
238 encodingType = _json["encodingType"];
239 }
240 }
241
242 core.Map toJson() {
243 var _json = new core.Map();
244 if (document != null) {
245 _json["document"] = (document).toJson();
246 }
247 if (encodingType != null) {
248 _json["encodingType"] = encodingType;
249 }
250 return _json;
251 }
252 }
253
254 /** The entity analysis response message. */
255 class AnalyzeEntitiesResponse {
256 /** The recognized entities in the input document. */
257 core.List<Entity> entities;
258 /**
259 * The language of the text, which will be the same as the language specified
260 * in the request or, if not specified, the automatically-detected language.
261 * See `Document.language` field for more details.
262 */
263 core.String language;
264
265 AnalyzeEntitiesResponse();
266
267 AnalyzeEntitiesResponse.fromJson(core.Map _json) {
268 if (_json.containsKey("entities")) {
269 entities = _json["entities"].map((value) => new Entity.fromJson(value)).to List();
270 }
271 if (_json.containsKey("language")) {
272 language = _json["language"];
273 }
274 }
275
276 core.Map toJson() {
277 var _json = new core.Map();
278 if (entities != null) {
279 _json["entities"] = entities.map((value) => (value).toJson()).toList();
280 }
281 if (language != null) {
282 _json["language"] = language;
283 }
284 return _json;
285 }
286 }
287
288 /** The sentiment analysis request message. */
289 class AnalyzeSentimentRequest {
290 /**
291 * Input document. Currently, `analyzeSentiment` only supports English text
292 * (Document.language="EN").
293 */
294 Document document;
295 /**
296 * The encoding type used by the API to calculate sentence offsets.
297 * Possible string values are:
298 * - "NONE" : If `EncodingType` is not specified, encoding-dependent
299 * information (such as
300 * `begin_offset`) will be set at `-1`.
301 * - "UTF8" : Encoding-dependent information (such as `begin_offset`) is
302 * calculated based
303 * on the UTF-8 encoding of the input. C++ and Go are examples of languages
304 * that use this encoding natively.
305 * - "UTF16" : Encoding-dependent information (such as `begin_offset`) is
306 * calculated based
307 * on the UTF-16 encoding of the input. Java and Javascript are examples of
308 * languages that use this encoding natively.
309 * - "UTF32" : Encoding-dependent information (such as `begin_offset`) is
310 * calculated based
311 * on the UTF-32 encoding of the input. Python is an example of a language
312 * that uses this encoding natively.
313 */
314 core.String encodingType;
315
316 AnalyzeSentimentRequest();
317
318 AnalyzeSentimentRequest.fromJson(core.Map _json) {
319 if (_json.containsKey("document")) {
320 document = new Document.fromJson(_json["document"]);
321 }
322 if (_json.containsKey("encodingType")) {
323 encodingType = _json["encodingType"];
324 }
325 }
326
327 core.Map toJson() {
328 var _json = new core.Map();
329 if (document != null) {
330 _json["document"] = (document).toJson();
331 }
332 if (encodingType != null) {
333 _json["encodingType"] = encodingType;
334 }
335 return _json;
336 }
337 }
338
339 /** The sentiment analysis response message. */
340 class AnalyzeSentimentResponse {
341 /** The overall sentiment of the input document. */
342 Sentiment documentSentiment;
343 /**
344 * The language of the text, which will be the same as the language specified
345 * in the request or, if not specified, the automatically-detected language.
346 * See `Document.language` field for more details.
347 */
348 core.String language;
349 /** The sentiment for all the sentences in the document. */
350 core.List<Sentence> sentences;
351
352 AnalyzeSentimentResponse();
353
354 AnalyzeSentimentResponse.fromJson(core.Map _json) {
355 if (_json.containsKey("documentSentiment")) {
356 documentSentiment = new Sentiment.fromJson(_json["documentSentiment"]);
357 }
358 if (_json.containsKey("language")) {
359 language = _json["language"];
360 }
361 if (_json.containsKey("sentences")) {
362 sentences = _json["sentences"].map((value) => new Sentence.fromJson(value) ).toList();
363 }
364 }
365
366 core.Map toJson() {
367 var _json = new core.Map();
368 if (documentSentiment != null) {
369 _json["documentSentiment"] = (documentSentiment).toJson();
370 }
371 if (language != null) {
372 _json["language"] = language;
373 }
374 if (sentences != null) {
375 _json["sentences"] = sentences.map((value) => (value).toJson()).toList();
376 }
377 return _json;
378 }
379 }
380
381 /** The syntax analysis request message. */
382 class AnalyzeSyntaxRequest {
383 /** Input document. */
384 Document document;
385 /**
386 * The encoding type used by the API to calculate offsets.
387 * Possible string values are:
388 * - "NONE" : If `EncodingType` is not specified, encoding-dependent
389 * information (such as
390 * `begin_offset`) will be set at `-1`.
391 * - "UTF8" : Encoding-dependent information (such as `begin_offset`) is
392 * calculated based
393 * on the UTF-8 encoding of the input. C++ and Go are examples of languages
394 * that use this encoding natively.
395 * - "UTF16" : Encoding-dependent information (such as `begin_offset`) is
396 * calculated based
397 * on the UTF-16 encoding of the input. Java and Javascript are examples of
398 * languages that use this encoding natively.
399 * - "UTF32" : Encoding-dependent information (such as `begin_offset`) is
400 * calculated based
401 * on the UTF-32 encoding of the input. Python is an example of a language
402 * that uses this encoding natively.
403 */
404 core.String encodingType;
405
406 AnalyzeSyntaxRequest();
407
408 AnalyzeSyntaxRequest.fromJson(core.Map _json) {
409 if (_json.containsKey("document")) {
410 document = new Document.fromJson(_json["document"]);
411 }
412 if (_json.containsKey("encodingType")) {
413 encodingType = _json["encodingType"];
414 }
415 }
416
417 core.Map toJson() {
418 var _json = new core.Map();
419 if (document != null) {
420 _json["document"] = (document).toJson();
421 }
422 if (encodingType != null) {
423 _json["encodingType"] = encodingType;
424 }
425 return _json;
426 }
427 }
428
429 /** The syntax analysis response message. */
430 class AnalyzeSyntaxResponse {
431 /**
432 * The language of the text, which will be the same as the language specified
433 * in the request or, if not specified, the automatically-detected language.
434 * See `Document.language` field for more details.
435 */
436 core.String language;
437 /** Sentences in the input document. */
438 core.List<Sentence> sentences;
439 /** Tokens, along with their syntactic information, in the input document. */
440 core.List<Token> tokens;
441
442 AnalyzeSyntaxResponse();
443
444 AnalyzeSyntaxResponse.fromJson(core.Map _json) {
445 if (_json.containsKey("language")) {
446 language = _json["language"];
447 }
448 if (_json.containsKey("sentences")) {
449 sentences = _json["sentences"].map((value) => new Sentence.fromJson(value) ).toList();
450 }
451 if (_json.containsKey("tokens")) {
452 tokens = _json["tokens"].map((value) => new Token.fromJson(value)).toList( );
453 }
454 }
455
456 core.Map toJson() {
457 var _json = new core.Map();
458 if (language != null) {
459 _json["language"] = language;
460 }
461 if (sentences != null) {
462 _json["sentences"] = sentences.map((value) => (value).toJson()).toList();
463 }
464 if (tokens != null) {
465 _json["tokens"] = tokens.map((value) => (value).toJson()).toList();
466 }
467 return _json;
468 }
469 }
470
471 /**
472 * The request message for the text annotation API, which can perform multiple
473 * analysis types (sentiment, entities, and syntax) in one call.
474 */
475 class AnnotateTextRequest {
476 /** Input document. */
477 Document document;
478 /**
479 * The encoding type used by the API to calculate offsets.
480 * Possible string values are:
481 * - "NONE" : If `EncodingType` is not specified, encoding-dependent
482 * information (such as
483 * `begin_offset`) will be set at `-1`.
484 * - "UTF8" : Encoding-dependent information (such as `begin_offset`) is
485 * calculated based
486 * on the UTF-8 encoding of the input. C++ and Go are examples of languages
487 * that use this encoding natively.
488 * - "UTF16" : Encoding-dependent information (such as `begin_offset`) is
489 * calculated based
490 * on the UTF-16 encoding of the input. Java and Javascript are examples of
491 * languages that use this encoding natively.
492 * - "UTF32" : Encoding-dependent information (such as `begin_offset`) is
493 * calculated based
494 * on the UTF-32 encoding of the input. Python is an example of a language
495 * that uses this encoding natively.
496 */
497 core.String encodingType;
498 /** The enabled features. */
499 Features features;
500
501 AnnotateTextRequest();
502
503 AnnotateTextRequest.fromJson(core.Map _json) {
504 if (_json.containsKey("document")) {
505 document = new Document.fromJson(_json["document"]);
506 }
507 if (_json.containsKey("encodingType")) {
508 encodingType = _json["encodingType"];
509 }
510 if (_json.containsKey("features")) {
511 features = new Features.fromJson(_json["features"]);
512 }
513 }
514
515 core.Map toJson() {
516 var _json = new core.Map();
517 if (document != null) {
518 _json["document"] = (document).toJson();
519 }
520 if (encodingType != null) {
521 _json["encodingType"] = encodingType;
522 }
523 if (features != null) {
524 _json["features"] = (features).toJson();
525 }
526 return _json;
527 }
528 }
529
530 /** The text annotations response message. */
531 class AnnotateTextResponse {
532 /**
533 * The overall sentiment for the document. Populated if the user enables
534 * AnnotateTextRequest.Features.extract_document_sentiment.
535 */
536 Sentiment documentSentiment;
537 /**
538 * Entities, along with their semantic information, in the input document.
539 * Populated if the user enables
540 * AnnotateTextRequest.Features.extract_entities.
541 */
542 core.List<Entity> entities;
543 /**
544 * The language of the text, which will be the same as the language specified
545 * in the request or, if not specified, the automatically-detected language.
546 * See `Document.language` field for more details.
547 */
548 core.String language;
549 /**
550 * Sentences in the input document. Populated if the user enables
551 * AnnotateTextRequest.Features.extract_syntax.
552 */
553 core.List<Sentence> sentences;
554 /**
555 * Tokens, along with their syntactic information, in the input document.
556 * Populated if the user enables
557 * AnnotateTextRequest.Features.extract_syntax.
558 */
559 core.List<Token> tokens;
560
561 AnnotateTextResponse();
562
563 AnnotateTextResponse.fromJson(core.Map _json) {
564 if (_json.containsKey("documentSentiment")) {
565 documentSentiment = new Sentiment.fromJson(_json["documentSentiment"]);
566 }
567 if (_json.containsKey("entities")) {
568 entities = _json["entities"].map((value) => new Entity.fromJson(value)).to List();
569 }
570 if (_json.containsKey("language")) {
571 language = _json["language"];
572 }
573 if (_json.containsKey("sentences")) {
574 sentences = _json["sentences"].map((value) => new Sentence.fromJson(value) ).toList();
575 }
576 if (_json.containsKey("tokens")) {
577 tokens = _json["tokens"].map((value) => new Token.fromJson(value)).toList( );
578 }
579 }
580
581 core.Map toJson() {
582 var _json = new core.Map();
583 if (documentSentiment != null) {
584 _json["documentSentiment"] = (documentSentiment).toJson();
585 }
586 if (entities != null) {
587 _json["entities"] = entities.map((value) => (value).toJson()).toList();
588 }
589 if (language != null) {
590 _json["language"] = language;
591 }
592 if (sentences != null) {
593 _json["sentences"] = sentences.map((value) => (value).toJson()).toList();
594 }
595 if (tokens != null) {
596 _json["tokens"] = tokens.map((value) => (value).toJson()).toList();
597 }
598 return _json;
599 }
600 }
601
602 /**
603 * Represents dependency parse tree information for a token. (For more
604 * information on dependency labels, see
605 * http://www.aclweb.org/anthology/P13-2017
606 */
607 class DependencyEdge {
608 /**
609 * Represents the head of this token in the dependency tree.
610 * This is the index of the token which has an arc going to this token.
611 * The index is the position of the token in the array of tokens returned
612 * by the API method. If this token is a root token, then the
613 * `head_token_index` is its own index.
614 */
615 core.int headTokenIndex;
616 /**
617 * The parse label for the token.
618 * Possible string values are:
619 * - "UNKNOWN" : Unknown
620 * - "ABBREV" : Abbreviation modifier
621 * - "ACOMP" : Adjectival complement
622 * - "ADVCL" : Adverbial clause modifier
623 * - "ADVMOD" : Adverbial modifier
624 * - "AMOD" : Adjectival modifier of an NP
625 * - "APPOS" : Appositional modifier of an NP
626 * - "ATTR" : Attribute dependent of a copular verb
627 * - "AUX" : Auxiliary (non-main) verb
628 * - "AUXPASS" : Passive auxiliary
629 * - "CC" : Coordinating conjunction
630 * - "CCOMP" : Clausal complement of a verb or adjective
631 * - "CONJ" : Conjunct
632 * - "CSUBJ" : Clausal subject
633 * - "CSUBJPASS" : Clausal passive subject
634 * - "DEP" : Dependency (unable to determine)
635 * - "DET" : Determiner
636 * - "DISCOURSE" : Discourse
637 * - "DOBJ" : Direct object
638 * - "EXPL" : Expletive
639 * - "GOESWITH" : Goes with (part of a word in a text not well edited)
640 * - "IOBJ" : Indirect object
641 * - "MARK" : Marker (word introducing a subordinate clause)
642 * - "MWE" : Multi-word expression
643 * - "MWV" : Multi-word verbal expression
644 * - "NEG" : Negation modifier
645 * - "NN" : Noun compound modifier
646 * - "NPADVMOD" : Noun phrase used as an adverbial modifier
647 * - "NSUBJ" : Nominal subject
648 * - "NSUBJPASS" : Passive nominal subject
649 * - "NUM" : Numeric modifier of a noun
650 * - "NUMBER" : Element of compound number
651 * - "P" : Punctuation mark
652 * - "PARATAXIS" : Parataxis relation
653 * - "PARTMOD" : Participial modifier
654 * - "PCOMP" : The complement of a preposition is a clause
655 * - "POBJ" : Object of a preposition
656 * - "POSS" : Possession modifier
657 * - "POSTNEG" : Postverbal negative particle
658 * - "PRECOMP" : Predicate complement
659 * - "PRECONJ" : Preconjunt
660 * - "PREDET" : Predeterminer
661 * - "PREF" : Prefix
662 * - "PREP" : Prepositional modifier
663 * - "PRONL" : The relationship between a verb and verbal morpheme
664 * - "PRT" : Particle
665 * - "PS" : Associative or possessive marker
666 * - "QUANTMOD" : Quantifier phrase modifier
667 * - "RCMOD" : Relative clause modifier
668 * - "RCMODREL" : Complementizer in relative clause
669 * - "RDROP" : Ellipsis without a preceding predicate
670 * - "REF" : Referent
671 * - "REMNANT" : Remnant
672 * - "REPARANDUM" : Reparandum
673 * - "ROOT" : Root
674 * - "SNUM" : Suffix specifying a unit of number
675 * - "SUFF" : Suffix
676 * - "TMOD" : Temporal modifier
677 * - "TOPIC" : Topic marker
678 * - "VMOD" : Clause headed by an infinite form of the verb that modifies a
679 * noun
680 * - "VOCATIVE" : Vocative
681 * - "XCOMP" : Open clausal complement
682 * - "SUFFIX" : Name suffix
683 * - "TITLE" : Name title
684 * - "ADVPHMOD" : Adverbial phrase modifier
685 * - "AUXCAUS" : Causative auxiliary
686 * - "AUXVV" : Helper auxiliary
687 * - "DTMOD" : Rentaishi (Prenominal modifier)
688 * - "FOREIGN" : Foreign words
689 * - "KW" : Keyword
690 * - "LIST" : List for chains of comparable items
691 * - "NOMC" : Nominalized clause
692 * - "NOMCSUBJ" : Nominalized clausal subject
693 * - "NOMCSUBJPASS" : Nominalized clausal passive
694 * - "NUMC" : Compound of numeric modifier
695 * - "COP" : Copula
696 * - "DISLOCATED" : Dislocated relation (for fronted/topicalized elements)
697 */
698 core.String label;
699
700 DependencyEdge();
701
702 DependencyEdge.fromJson(core.Map _json) {
703 if (_json.containsKey("headTokenIndex")) {
704 headTokenIndex = _json["headTokenIndex"];
705 }
706 if (_json.containsKey("label")) {
707 label = _json["label"];
708 }
709 }
710
711 core.Map toJson() {
712 var _json = new core.Map();
713 if (headTokenIndex != null) {
714 _json["headTokenIndex"] = headTokenIndex;
715 }
716 if (label != null) {
717 _json["label"] = label;
718 }
719 return _json;
720 }
721 }
722
723 /**
724 * ################################################################ #
725 *
726 * Represents the input to API methods.
727 */
728 class Document {
729 /** The content of the input in string format. */
730 core.String content;
731 /**
732 * The Google Cloud Storage URI where the file content is located.
733 * This URI must be of the form: gs://bucket_name/object_name. For more
734 * details, see https://cloud.google.com/storage/docs/reference-uris.
735 * NOTE: Cloud Storage object versioning is not supported.
736 */
737 core.String gcsContentUri;
738 /**
739 * The language of the document (if not specified, the language is
740 * automatically detected). Both ISO and BCP-47 language codes are
741 * accepted.<br>
742 * **Current Language Restrictions:**
743 *
744 * * Only English, Spanish, and Japanese textual content are supported.
745 * If the language (either specified by the caller or automatically detected)
746 * is not supported by the called API method, an `INVALID_ARGUMENT` error
747 * is returned.
748 */
749 core.String language;
750 /**
751 * Required. If the type is not set or is `TYPE_UNSPECIFIED`,
752 * returns an `INVALID_ARGUMENT` error.
753 * Possible string values are:
754 * - "TYPE_UNSPECIFIED" : The content type is not specified.
755 * - "PLAIN_TEXT" : Plain text
756 * - "HTML" : HTML
757 */
758 core.String type;
759
760 Document();
761
762 Document.fromJson(core.Map _json) {
763 if (_json.containsKey("content")) {
764 content = _json["content"];
765 }
766 if (_json.containsKey("gcsContentUri")) {
767 gcsContentUri = _json["gcsContentUri"];
768 }
769 if (_json.containsKey("language")) {
770 language = _json["language"];
771 }
772 if (_json.containsKey("type")) {
773 type = _json["type"];
774 }
775 }
776
777 core.Map toJson() {
778 var _json = new core.Map();
779 if (content != null) {
780 _json["content"] = content;
781 }
782 if (gcsContentUri != null) {
783 _json["gcsContentUri"] = gcsContentUri;
784 }
785 if (language != null) {
786 _json["language"] = language;
787 }
788 if (type != null) {
789 _json["type"] = type;
790 }
791 return _json;
792 }
793 }
794
795 /**
796 * Represents a phrase in the text that is a known entity, such as
797 * a person, an organization, or location. The API associates information, such
798 * as salience and mentions, with entities.
799 */
800 class Entity {
801 /**
802 * The mentions of this entity in the input document. The API currently
803 * supports proper noun mentions.
804 */
805 core.List<EntityMention> mentions;
806 /**
807 * Metadata associated with the entity.
808 *
809 * Currently, Wikipedia URLs and Knowledge Graph MIDs are provided, if
810 * available. The associated keys are "wikipedia_url" and "mid", respectively.
811 */
812 core.Map<core.String, core.String> metadata;
813 /** The representative name for the entity. */
814 core.String name;
815 /**
816 * The salience score associated with the entity in the [0, 1.0] range.
817 *
818 * The salience score for an entity provides information about the
819 * importance or centrality of that entity to the entire document text.
820 * Scores closer to 0 are less salient, while scores closer to 1.0 are highly
821 * salient.
822 */
823 core.double salience;
824 /**
825 * The entity type.
826 * Possible string values are:
827 * - "UNKNOWN" : Unknown
828 * - "PERSON" : Person
829 * - "LOCATION" : Location
830 * - "ORGANIZATION" : Organization
831 * - "EVENT" : Event
832 * - "WORK_OF_ART" : Work of art
833 * - "CONSUMER_GOOD" : Consumer goods
834 * - "OTHER" : Other types
835 */
836 core.String type;
837
838 Entity();
839
840 Entity.fromJson(core.Map _json) {
841 if (_json.containsKey("mentions")) {
842 mentions = _json["mentions"].map((value) => new EntityMention.fromJson(val ue)).toList();
843 }
844 if (_json.containsKey("metadata")) {
845 metadata = _json["metadata"];
846 }
847 if (_json.containsKey("name")) {
848 name = _json["name"];
849 }
850 if (_json.containsKey("salience")) {
851 salience = _json["salience"];
852 }
853 if (_json.containsKey("type")) {
854 type = _json["type"];
855 }
856 }
857
858 core.Map toJson() {
859 var _json = new core.Map();
860 if (mentions != null) {
861 _json["mentions"] = mentions.map((value) => (value).toJson()).toList();
862 }
863 if (metadata != null) {
864 _json["metadata"] = metadata;
865 }
866 if (name != null) {
867 _json["name"] = name;
868 }
869 if (salience != null) {
870 _json["salience"] = salience;
871 }
872 if (type != null) {
873 _json["type"] = type;
874 }
875 return _json;
876 }
877 }
878
879 /**
880 * Represents a mention for an entity in the text. Currently, proper noun
881 * mentions are supported.
882 */
883 class EntityMention {
884 /** The mention text. */
885 TextSpan text;
886 /**
887 * The type of the entity mention.
888 * Possible string values are:
889 * - "TYPE_UNKNOWN" : Unknown
890 * - "PROPER" : Proper name
891 * - "COMMON" : Common noun (or noun compound)
892 */
893 core.String type;
894
895 EntityMention();
896
897 EntityMention.fromJson(core.Map _json) {
898 if (_json.containsKey("text")) {
899 text = new TextSpan.fromJson(_json["text"]);
900 }
901 if (_json.containsKey("type")) {
902 type = _json["type"];
903 }
904 }
905
906 core.Map toJson() {
907 var _json = new core.Map();
908 if (text != null) {
909 _json["text"] = (text).toJson();
910 }
911 if (type != null) {
912 _json["type"] = type;
913 }
914 return _json;
915 }
916 }
917
918 /**
919 * All available features for sentiment, syntax, and semantic analysis.
920 * Setting each one to true will enable that specific analysis for the input.
921 */
922 class Features {
923 /** Extract document-level sentiment. */
924 core.bool extractDocumentSentiment;
925 /** Extract entities. */
926 core.bool extractEntities;
927 /** Extract syntax information. */
928 core.bool extractSyntax;
929
930 Features();
931
932 Features.fromJson(core.Map _json) {
933 if (_json.containsKey("extractDocumentSentiment")) {
934 extractDocumentSentiment = _json["extractDocumentSentiment"];
935 }
936 if (_json.containsKey("extractEntities")) {
937 extractEntities = _json["extractEntities"];
938 }
939 if (_json.containsKey("extractSyntax")) {
940 extractSyntax = _json["extractSyntax"];
941 }
942 }
943
944 core.Map toJson() {
945 var _json = new core.Map();
946 if (extractDocumentSentiment != null) {
947 _json["extractDocumentSentiment"] = extractDocumentSentiment;
948 }
949 if (extractEntities != null) {
950 _json["extractEntities"] = extractEntities;
951 }
952 if (extractSyntax != null) {
953 _json["extractSyntax"] = extractSyntax;
954 }
955 return _json;
956 }
957 }
958
959 /**
960 * Represents part of speech information for a token. Parts of speech
961 * are as defined in
962 * http://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf
963 */
964 class PartOfSpeech {
965 /**
966 * The grammatical aspect.
967 * Possible string values are:
968 * - "ASPECT_UNKNOWN" : Aspect is not applicable in the analyzed language or
969 * is not predicted.
970 * - "PERFECTIVE" : Perfective
971 * - "IMPERFECTIVE" : Imperfective
972 * - "PROGRESSIVE" : Progressive
973 */
974 core.String aspect;
975 /**
976 * The grammatical case.
977 * Possible string values are:
978 * - "CASE_UNKNOWN" : Case is not applicable in the analyzed language or is
979 * not predicted.
980 * - "ACCUSATIVE" : Accusative
981 * - "ADVERBIAL" : Adverbial
982 * - "COMPLEMENTIVE" : Complementive
983 * - "DATIVE" : Dative
984 * - "GENITIVE" : Genitive
985 * - "INSTRUMENTAL" : Instrumental
986 * - "LOCATIVE" : Locative
987 * - "NOMINATIVE" : Nominative
988 * - "OBLIQUE" : Oblique
989 * - "PARTITIVE" : Partitive
990 * - "PREPOSITIONAL" : Prepositional
991 * - "REFLEXIVE_CASE" : Reflexive
992 * - "RELATIVE_CASE" : Relative
993 * - "VOCATIVE" : Vocative
994 */
995 core.String case_;
996 /**
997 * The grammatical form.
998 * Possible string values are:
999 * - "FORM_UNKNOWN" : Form is not applicable in the analyzed language or is
1000 * not predicted.
1001 * - "ADNOMIAL" : Adnomial
1002 * - "AUXILIARY" : Auxiliary
1003 * - "COMPLEMENTIZER" : Complementizer
1004 * - "FINAL_ENDING" : Final ending
1005 * - "GERUND" : Gerund
1006 * - "REALIS" : Realis
1007 * - "IRREALIS" : Irrealis
1008 * - "SHORT" : Short form
1009 * - "LONG" : Long form
1010 * - "ORDER" : Order form
1011 * - "SPECIFIC" : Specific form
1012 */
1013 core.String form;
1014 /**
1015 * The grammatical gender.
1016 * Possible string values are:
1017 * - "GENDER_UNKNOWN" : Gender is not applicable in the analyzed language or
1018 * is not predicted.
1019 * - "FEMININE" : Feminine
1020 * - "MASCULINE" : Masculine
1021 * - "NEUTER" : Neuter
1022 */
1023 core.String gender;
1024 /**
1025 * The grammatical mood.
1026 * Possible string values are:
1027 * - "MOOD_UNKNOWN" : Mood is not applicable in the analyzed language or is
1028 * not predicted.
1029 * - "CONDITIONAL_MOOD" : Conditional
1030 * - "IMPERATIVE" : Imperative
1031 * - "INDICATIVE" : Indicative
1032 * - "INTERROGATIVE" : Interrogative
1033 * - "JUSSIVE" : Jussive
1034 * - "SUBJUNCTIVE" : Subjunctive
1035 */
1036 core.String mood;
1037 /**
1038 * The grammatical number.
1039 * Possible string values are:
1040 * - "NUMBER_UNKNOWN" : Number is not applicable in the analyzed language or
1041 * is not predicted.
1042 * - "SINGULAR" : Singular
1043 * - "PLURAL" : Plural
1044 * - "DUAL" : Dual
1045 */
1046 core.String number;
1047 /**
1048 * The grammatical person.
1049 * Possible string values are:
1050 * - "PERSON_UNKNOWN" : Person is not applicable in the analyzed language or
1051 * is not predicted.
1052 * - "FIRST" : First
1053 * - "SECOND" : Second
1054 * - "THIRD" : Third
1055 * - "REFLEXIVE_PERSON" : Reflexive
1056 */
1057 core.String person;
1058 /**
1059 * The grammatical properness.
1060 * Possible string values are:
1061 * - "PROPER_UNKNOWN" : Proper is not applicable in the analyzed language or
1062 * is not predicted.
1063 * - "PROPER" : Proper
1064 * - "NOT_PROPER" : Not proper
1065 */
1066 core.String proper;
1067 /**
1068 * The grammatical reciprocity.
1069 * Possible string values are:
1070 * - "RECIPROCITY_UNKNOWN" : Reciprocity is not applicable in the analyzed
1071 * language or is not
1072 * predicted.
1073 * - "RECIPROCAL" : Reciprocal
1074 * - "NON_RECIPROCAL" : Non-reciprocal
1075 */
1076 core.String reciprocity;
1077 /**
1078 * The part of speech tag.
1079 * Possible string values are:
1080 * - "UNKNOWN" : Unknown
1081 * - "ADJ" : Adjective
1082 * - "ADP" : Adposition (preposition and postposition)
1083 * - "ADV" : Adverb
1084 * - "CONJ" : Conjunction
1085 * - "DET" : Determiner
1086 * - "NOUN" : Noun (common and proper)
1087 * - "NUM" : Cardinal number
1088 * - "PRON" : Pronoun
1089 * - "PRT" : Particle or other function word
1090 * - "PUNCT" : Punctuation
1091 * - "VERB" : Verb (all tenses and modes)
1092 * - "X" : Other: foreign words, typos, abbreviations
1093 * - "AFFIX" : Affix
1094 */
1095 core.String tag;
1096 /**
1097 * The grammatical tense.
1098 * Possible string values are:
1099 * - "TENSE_UNKNOWN" : Tense is not applicable in the analyzed language or is
1100 * not predicted.
1101 * - "CONDITIONAL_TENSE" : Conditional
1102 * - "FUTURE" : Future
1103 * - "PAST" : Past
1104 * - "PRESENT" : Present
1105 * - "IMPERFECT" : Imperfect
1106 * - "PLUPERFECT" : Pluperfect
1107 */
1108 core.String tense;
1109 /**
1110 * The grammatical voice.
1111 * Possible string values are:
1112 * - "VOICE_UNKNOWN" : Voice is not applicable in the analyzed language or is
1113 * not predicted.
1114 * - "ACTIVE" : Active
1115 * - "CAUSATIVE" : Causative
1116 * - "PASSIVE" : Passive
1117 */
1118 core.String voice;
1119
1120 PartOfSpeech();
1121
1122 PartOfSpeech.fromJson(core.Map _json) {
1123 if (_json.containsKey("aspect")) {
1124 aspect = _json["aspect"];
1125 }
1126 if (_json.containsKey("case")) {
1127 case_ = _json["case"];
1128 }
1129 if (_json.containsKey("form")) {
1130 form = _json["form"];
1131 }
1132 if (_json.containsKey("gender")) {
1133 gender = _json["gender"];
1134 }
1135 if (_json.containsKey("mood")) {
1136 mood = _json["mood"];
1137 }
1138 if (_json.containsKey("number")) {
1139 number = _json["number"];
1140 }
1141 if (_json.containsKey("person")) {
1142 person = _json["person"];
1143 }
1144 if (_json.containsKey("proper")) {
1145 proper = _json["proper"];
1146 }
1147 if (_json.containsKey("reciprocity")) {
1148 reciprocity = _json["reciprocity"];
1149 }
1150 if (_json.containsKey("tag")) {
1151 tag = _json["tag"];
1152 }
1153 if (_json.containsKey("tense")) {
1154 tense = _json["tense"];
1155 }
1156 if (_json.containsKey("voice")) {
1157 voice = _json["voice"];
1158 }
1159 }
1160
1161 core.Map toJson() {
1162 var _json = new core.Map();
1163 if (aspect != null) {
1164 _json["aspect"] = aspect;
1165 }
1166 if (case_ != null) {
1167 _json["case"] = case_;
1168 }
1169 if (form != null) {
1170 _json["form"] = form;
1171 }
1172 if (gender != null) {
1173 _json["gender"] = gender;
1174 }
1175 if (mood != null) {
1176 _json["mood"] = mood;
1177 }
1178 if (number != null) {
1179 _json["number"] = number;
1180 }
1181 if (person != null) {
1182 _json["person"] = person;
1183 }
1184 if (proper != null) {
1185 _json["proper"] = proper;
1186 }
1187 if (reciprocity != null) {
1188 _json["reciprocity"] = reciprocity;
1189 }
1190 if (tag != null) {
1191 _json["tag"] = tag;
1192 }
1193 if (tense != null) {
1194 _json["tense"] = tense;
1195 }
1196 if (voice != null) {
1197 _json["voice"] = voice;
1198 }
1199 return _json;
1200 }
1201 }
1202
1203 /** Represents a sentence in the input document. */
1204 class Sentence {
1205 /**
1206 * For calls to AnalyzeSentiment or if
1207 * AnnotateTextRequest.Features.extract_document_sentiment is set to
1208 * true, this field will contain the sentiment for the sentence.
1209 */
1210 Sentiment sentiment;
1211 /** The sentence text. */
1212 TextSpan text;
1213
1214 Sentence();
1215
1216 Sentence.fromJson(core.Map _json) {
1217 if (_json.containsKey("sentiment")) {
1218 sentiment = new Sentiment.fromJson(_json["sentiment"]);
1219 }
1220 if (_json.containsKey("text")) {
1221 text = new TextSpan.fromJson(_json["text"]);
1222 }
1223 }
1224
1225 core.Map toJson() {
1226 var _json = new core.Map();
1227 if (sentiment != null) {
1228 _json["sentiment"] = (sentiment).toJson();
1229 }
1230 if (text != null) {
1231 _json["text"] = (text).toJson();
1232 }
1233 return _json;
1234 }
1235 }
1236
1237 /**
1238 * Represents the feeling associated with the entire text or entities in
1239 * the text.
1240 */
1241 class Sentiment {
1242 /**
1243 * A non-negative number in the [0, +inf) range, which represents
1244 * the absolute magnitude of sentiment regardless of score (positive or
1245 * negative).
1246 */
1247 core.double magnitude;
1248 /**
1249 * Sentiment score between -1.0 (negative sentiment) and 1.0
1250 * (positive sentiment).
1251 */
1252 core.double score;
1253
1254 Sentiment();
1255
1256 Sentiment.fromJson(core.Map _json) {
1257 if (_json.containsKey("magnitude")) {
1258 magnitude = _json["magnitude"];
1259 }
1260 if (_json.containsKey("score")) {
1261 score = _json["score"];
1262 }
1263 }
1264
1265 core.Map toJson() {
1266 var _json = new core.Map();
1267 if (magnitude != null) {
1268 _json["magnitude"] = magnitude;
1269 }
1270 if (score != null) {
1271 _json["score"] = score;
1272 }
1273 return _json;
1274 }
1275 }
1276
1277 /**
1278 * The `Status` type defines a logical error model that is suitable for
1279 * different
1280 * programming environments, including REST APIs and RPC APIs. It is used by
1281 * [gRPC](https://github.com/grpc). The error model is designed to be:
1282 *
1283 * - Simple to use and understand for most users
1284 * - Flexible enough to meet unexpected needs
1285 *
1286 * # Overview
1287 *
1288 * The `Status` message contains three pieces of data: error code, error
1289 * message,
1290 * and error details. The error code should be an enum value of
1291 * google.rpc.Code, but it may accept additional error codes if needed. The
1292 * error message should be a developer-facing English message that helps
1293 * developers *understand* and *resolve* the error. If a localized user-facing
1294 * error message is needed, put the localized message in the error details or
1295 * localize it in the client. The optional error details may contain arbitrary
1296 * information about the error. There is a predefined set of error detail types
1297 * in the package `google.rpc` which can be used for common error conditions.
1298 *
1299 * # Language mapping
1300 *
1301 * The `Status` message is the logical representation of the error model, but it
1302 * is not necessarily the actual wire format. When the `Status` message is
1303 * exposed in different client libraries and different wire protocols, it can be
1304 * mapped differently. For example, it will likely be mapped to some exceptions
1305 * in Java, but more likely mapped to some error codes in C.
1306 *
1307 * # Other uses
1308 *
1309 * The error model and the `Status` message can be used in a variety of
1310 * environments, either with or without APIs, to provide a
1311 * consistent developer experience across different environments.
1312 *
1313 * Example uses of this error model include:
1314 *
1315 * - Partial errors. If a service needs to return partial errors to the client,
1316 * it may embed the `Status` in the normal response to indicate the partial
1317 * errors.
1318 *
1319 * - Workflow errors. A typical workflow has multiple steps. Each step may
1320 * have a `Status` message for error reporting purpose.
1321 *
1322 * - Batch operations. If a client uses batch request and batch response, the
1323 * `Status` message should be used directly inside batch response, one for
1324 * each error sub-response.
1325 *
1326 * - Asynchronous operations. If an API call embeds asynchronous operation
1327 * results in its response, the status of those operations should be
1328 * represented directly using the `Status` message.
1329 *
1330 * - Logging. If some API errors are stored in logs, the message `Status` could
1331 * be used directly after any stripping needed for security/privacy reasons.
1332 */
1333 class Status {
1334 /** The status code, which should be an enum value of google.rpc.Code. */
1335 core.int code;
1336 /**
1337 * A list of messages that carry the error details. There will be a
1338 * common set of message types for APIs to use.
1339 *
1340 * The values for Object must be JSON objects. It can consist of `num`,
1341 * `String`, `bool` and `null` as well as `Map` and `List` values.
1342 */
1343 core.List<core.Map<core.String, core.Object>> details;
1344 /**
1345 * A developer-facing error message, which should be in English. Any
1346 * user-facing error message should be localized and sent in the
1347 * google.rpc.Status.details field, or localized by the client.
1348 */
1349 core.String message;
1350
1351 Status();
1352
1353 Status.fromJson(core.Map _json) {
1354 if (_json.containsKey("code")) {
1355 code = _json["code"];
1356 }
1357 if (_json.containsKey("details")) {
1358 details = _json["details"];
1359 }
1360 if (_json.containsKey("message")) {
1361 message = _json["message"];
1362 }
1363 }
1364
1365 core.Map toJson() {
1366 var _json = new core.Map();
1367 if (code != null) {
1368 _json["code"] = code;
1369 }
1370 if (details != null) {
1371 _json["details"] = details;
1372 }
1373 if (message != null) {
1374 _json["message"] = message;
1375 }
1376 return _json;
1377 }
1378 }
1379
1380 /** Represents an output piece of text. */
1381 class TextSpan {
1382 /**
1383 * The API calculates the beginning offset of the content in the original
1384 * document according to the EncodingType specified in the API request.
1385 */
1386 core.int beginOffset;
1387 /** The content of the output text. */
1388 core.String content;
1389
1390 TextSpan();
1391
1392 TextSpan.fromJson(core.Map _json) {
1393 if (_json.containsKey("beginOffset")) {
1394 beginOffset = _json["beginOffset"];
1395 }
1396 if (_json.containsKey("content")) {
1397 content = _json["content"];
1398 }
1399 }
1400
1401 core.Map toJson() {
1402 var _json = new core.Map();
1403 if (beginOffset != null) {
1404 _json["beginOffset"] = beginOffset;
1405 }
1406 if (content != null) {
1407 _json["content"] = content;
1408 }
1409 return _json;
1410 }
1411 }
1412
1413 /** Represents the smallest syntactic building block of the text. */
1414 class Token {
1415 /** Dependency tree parse for this token. */
1416 DependencyEdge dependencyEdge;
1417 /**
1418 * [Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.
1419 */
1420 core.String lemma;
1421 /** Parts of speech tag for this token. */
1422 PartOfSpeech partOfSpeech;
1423 /** The token text. */
1424 TextSpan text;
1425
1426 Token();
1427
1428 Token.fromJson(core.Map _json) {
1429 if (_json.containsKey("dependencyEdge")) {
1430 dependencyEdge = new DependencyEdge.fromJson(_json["dependencyEdge"]);
1431 }
1432 if (_json.containsKey("lemma")) {
1433 lemma = _json["lemma"];
1434 }
1435 if (_json.containsKey("partOfSpeech")) {
1436 partOfSpeech = new PartOfSpeech.fromJson(_json["partOfSpeech"]);
1437 }
1438 if (_json.containsKey("text")) {
1439 text = new TextSpan.fromJson(_json["text"]);
1440 }
1441 }
1442
1443 core.Map toJson() {
1444 var _json = new core.Map();
1445 if (dependencyEdge != null) {
1446 _json["dependencyEdge"] = (dependencyEdge).toJson();
1447 }
1448 if (lemma != null) {
1449 _json["lemma"] = lemma;
1450 }
1451 if (partOfSpeech != null) {
1452 _json["partOfSpeech"] = (partOfSpeech).toJson();
1453 }
1454 if (text != null) {
1455 _json["text"] = (text).toJson();
1456 }
1457 return _json;
1458 }
1459 }
OLDNEW
« no previous file with comments | « generated/googleapis/lib/identitytoolkit/v3.dart ('k') | generated/googleapis/lib/logging/v2.dart » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698