Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(282)

Side by Side Diff: generated/googleapis_beta/lib/speech/v1beta1.dart

Issue 2695743002: Api-roll 45: 2017-02-13 (Closed)
Patch Set: reverted local changes to pubspec file Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // This is a generated file (see the discoveryapis_generator project). 1 // This is a generated file (see the discoveryapis_generator project).
2 2
3 library googleapis_beta.speech.v1beta1; 3 library googleapis_beta.speech.v1beta1;
4 4
5 import 'dart:core' as core; 5 import 'dart:core' as core;
6 import 'dart:async' as async; 6 import 'dart:async' as async;
7 import 'dart:convert' as convert; 7 import 'dart:convert' as convert;
8 8
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons;
10 import 'package:http/http.dart' as http; 10 import 'package:http/http.dart' as http;
(...skipping 30 matching lines...) Expand all
41 * makes a best effort to cancel the operation, but success is not 41 * makes a best effort to cancel the operation, but success is not
42 * guaranteed. If the server doesn't support this method, it returns 42 * guaranteed. If the server doesn't support this method, it returns
43 * `google.rpc.Code.UNIMPLEMENTED`. Clients can use 43 * `google.rpc.Code.UNIMPLEMENTED`. Clients can use
44 * Operations.GetOperation or 44 * Operations.GetOperation or
45 * other methods to check whether the cancellation succeeded or whether the 45 * other methods to check whether the cancellation succeeded or whether the
46 * operation completed despite cancellation. On successful cancellation, 46 * operation completed despite cancellation. On successful cancellation,
47 * the operation is not deleted; instead, it becomes an operation with 47 * the operation is not deleted; instead, it becomes an operation with
48 * an Operation.error value with a google.rpc.Status.code of 1, 48 * an Operation.error value with a google.rpc.Status.code of 1,
49 * corresponding to `Code.CANCELLED`. 49 * corresponding to `Code.CANCELLED`.
50 * 50 *
51 * [request] - The metadata request object.
52 *
53 * Request parameters: 51 * Request parameters:
54 * 52 *
55 * [name] - The name of the operation resource to be cancelled. 53 * [name] - The name of the operation resource to be cancelled.
56 * Value must have pattern "^[^/]+$". 54 * Value must have pattern "^[^/]+$".
57 * 55 *
58 * Completes with a [Empty]. 56 * Completes with a [Empty].
59 * 57 *
60 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 58 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
61 * error. 59 * error.
62 * 60 *
63 * If the used [http.Client] completes with an error when making a REST call, 61 * If the used [http.Client] completes with an error when making a REST call,
64 * this method will complete with the same error. 62 * this method will complete with the same error.
65 */ 63 */
66 async.Future<Empty> cancel(CancelOperationRequest request, core.String name) { 64 async.Future<Empty> cancel(core.String name) {
67 var _url = null; 65 var _url = null;
68 var _queryParams = new core.Map(); 66 var _queryParams = new core.Map();
69 var _uploadMedia = null; 67 var _uploadMedia = null;
70 var _uploadOptions = null; 68 var _uploadOptions = null;
71 var _downloadOptions = commons.DownloadOptions.Metadata; 69 var _downloadOptions = commons.DownloadOptions.Metadata;
72 var _body = null; 70 var _body = null;
73 71
74 if (request != null) {
75 _body = convert.JSON.encode((request).toJson());
76 }
77 if (name == null) { 72 if (name == null) {
78 throw new core.ArgumentError("Parameter name is required."); 73 throw new core.ArgumentError("Parameter name is required.");
79 } 74 }
80 75
81 _url = 'v1beta1/operations/' + commons.Escaper.ecapeVariableReserved('$name' ) + ':cancel'; 76 _url = 'v1beta1/operations/' + commons.Escaper.ecapeVariableReserved('$name' ) + ':cancel';
82 77
83 var _response = _requester.request(_url, 78 var _response = _requester.request(_url,
84 "POST", 79 "POST",
85 body: _body, 80 body: _body,
86 queryParams: _queryParams, 81 queryParams: _queryParams,
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
177 172
178 /** 173 /**
179 * Lists operations that match the specified filter in the request. If the 174 * Lists operations that match the specified filter in the request. If the
180 * server doesn't support this method, it returns `UNIMPLEMENTED`. 175 * server doesn't support this method, it returns `UNIMPLEMENTED`.
181 * 176 *
182 * NOTE: the `name` binding below allows API services to override the binding 177 * NOTE: the `name` binding below allows API services to override the binding
183 * to use different resource name schemes, such as `users / * /operations`. 178 * to use different resource name schemes, such as `users / * /operations`.
184 * 179 *
185 * Request parameters: 180 * Request parameters:
186 * 181 *
182 * [name] - The name of the operation collection.
183 *
184 * [pageToken] - The standard list page token.
185 *
187 * [pageSize] - The standard list page size. 186 * [pageSize] - The standard list page size.
188 * 187 *
189 * [filter] - The standard list filter. 188 * [filter] - The standard list filter.
190 * 189 *
191 * [name] - The name of the operation collection.
192 *
193 * [pageToken] - The standard list page token.
194 *
195 * Completes with a [ListOperationsResponse]. 190 * Completes with a [ListOperationsResponse].
196 * 191 *
197 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 192 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
198 * error. 193 * error.
199 * 194 *
200 * If the used [http.Client] completes with an error when making a REST call, 195 * If the used [http.Client] completes with an error when making a REST call,
201 * this method will complete with the same error. 196 * this method will complete with the same error.
202 */ 197 */
203 async.Future<ListOperationsResponse> list({core.int pageSize, core.String filt er, core.String name, core.String pageToken}) { 198 async.Future<ListOperationsResponse> list({core.String name, core.String pageT oken, core.int pageSize, core.String filter}) {
204 var _url = null; 199 var _url = null;
205 var _queryParams = new core.Map(); 200 var _queryParams = new core.Map();
206 var _uploadMedia = null; 201 var _uploadMedia = null;
207 var _uploadOptions = null; 202 var _uploadOptions = null;
208 var _downloadOptions = commons.DownloadOptions.Metadata; 203 var _downloadOptions = commons.DownloadOptions.Metadata;
209 var _body = null; 204 var _body = null;
210 205
206 if (name != null) {
207 _queryParams["name"] = [name];
208 }
209 if (pageToken != null) {
210 _queryParams["pageToken"] = [pageToken];
211 }
211 if (pageSize != null) { 212 if (pageSize != null) {
212 _queryParams["pageSize"] = ["${pageSize}"]; 213 _queryParams["pageSize"] = ["${pageSize}"];
213 } 214 }
214 if (filter != null) { 215 if (filter != null) {
215 _queryParams["filter"] = [filter]; 216 _queryParams["filter"] = [filter];
216 } 217 }
217 if (name != null) {
218 _queryParams["name"] = [name];
219 }
220 if (pageToken != null) {
221 _queryParams["pageToken"] = [pageToken];
222 }
223 218
224 _url = 'v1beta1/operations'; 219 _url = 'v1beta1/operations';
225 220
226 var _response = _requester.request(_url, 221 var _response = _requester.request(_url,
227 "GET", 222 "GET",
228 body: _body, 223 body: _body,
229 queryParams: _queryParams, 224 queryParams: _queryParams,
230 uploadOptions: _uploadOptions, 225 uploadOptions: _uploadOptions,
231 uploadMedia: _uploadMedia, 226 uploadMedia: _uploadMedia,
232 downloadOptions: _downloadOptions); 227 downloadOptions: _downloadOptions);
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
324 uploadMedia: _uploadMedia, 319 uploadMedia: _uploadMedia,
325 downloadOptions: _downloadOptions); 320 downloadOptions: _downloadOptions);
326 return _response.then((data) => new SyncRecognizeResponse.fromJson(data)); 321 return _response.then((data) => new SyncRecognizeResponse.fromJson(data));
327 } 322 }
328 323
329 } 324 }
330 325
331 326
332 327
333 /** 328 /**
334 * `AsyncRecognizeRequest` is the top-level message sent by the client for 329 * The top-level message sent by the client for the `AsyncRecognize` method.
335 * the `AsyncRecognize` method.
336 */ 330 */
337 class AsyncRecognizeRequest { 331 class AsyncRecognizeRequest {
338 /** [Required] The audio data to be recognized. */ 332 /** *Required* The audio data to be recognized. */
339 RecognitionAudio audio; 333 RecognitionAudio audio;
340 /** 334 /**
341 * [Required] The `config` message provides information to the recognizer 335 * *Required* Provides information to the recognizer that specifies how to
342 * that specifies how to process the request. 336 * process the request.
343 */ 337 */
344 RecognitionConfig config; 338 RecognitionConfig config;
345 339
346 AsyncRecognizeRequest(); 340 AsyncRecognizeRequest();
347 341
348 AsyncRecognizeRequest.fromJson(core.Map _json) { 342 AsyncRecognizeRequest.fromJson(core.Map _json) {
349 if (_json.containsKey("audio")) { 343 if (_json.containsKey("audio")) {
350 audio = new RecognitionAudio.fromJson(_json["audio"]); 344 audio = new RecognitionAudio.fromJson(_json["audio"]);
351 } 345 }
352 if (_json.containsKey("config")) { 346 if (_json.containsKey("config")) {
353 config = new RecognitionConfig.fromJson(_json["config"]); 347 config = new RecognitionConfig.fromJson(_json["config"]);
354 } 348 }
355 } 349 }
356 350
357 core.Map toJson() { 351 core.Map toJson() {
358 var _json = new core.Map(); 352 var _json = new core.Map();
359 if (audio != null) { 353 if (audio != null) {
360 _json["audio"] = (audio).toJson(); 354 _json["audio"] = (audio).toJson();
361 } 355 }
362 if (config != null) { 356 if (config != null) {
363 _json["config"] = (config).toJson(); 357 _json["config"] = (config).toJson();
364 } 358 }
365 return _json; 359 return _json;
366 } 360 }
367 } 361 }
368 362
369 /** The request message for Operations.CancelOperation. */
370 class CancelOperationRequest {
371
372 CancelOperationRequest();
373
374 CancelOperationRequest.fromJson(core.Map _json) {
375 }
376
377 core.Map toJson() {
378 var _json = new core.Map();
379 return _json;
380 }
381 }
382
383 /** 363 /**
384 * A generic empty message that you can re-use to avoid defining duplicated 364 * A generic empty message that you can re-use to avoid defining duplicated
385 * empty messages in your APIs. A typical example is to use it as the request 365 * empty messages in your APIs. A typical example is to use it as the request
386 * or the response type of an API method. For instance: 366 * or the response type of an API method. For instance:
387 * 367 *
388 * service Foo { 368 * service Foo {
389 * rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); 369 * rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
390 * } 370 * }
391 * 371 *
392 * The JSON representation for `Empty` is empty JSON object `{}`. 372 * The JSON representation for `Empty` is empty JSON object `{}`.
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after
566 _json["content"] = content; 546 _json["content"] = content;
567 } 547 }
568 if (uri != null) { 548 if (uri != null) {
569 _json["uri"] = uri; 549 _json["uri"] = uri;
570 } 550 }
571 return _json; 551 return _json;
572 } 552 }
573 } 553 }
574 554
575 /** 555 /**
576 * The `RecognitionConfig` message provides information to the recognizer 556 * Provides information to the recognizer that specifies how to process the
577 * that specifies how to process the request. 557 * request.
578 */ 558 */
579 class RecognitionConfig { 559 class RecognitionConfig {
580 /** 560 /**
581 * [Required] Encoding of audio data sent in all `RecognitionAudio` messages. 561 * *Required* Encoding of audio data sent in all `RecognitionAudio` messages.
582 * Possible string values are: 562 * Possible string values are:
583 * - "ENCODING_UNSPECIFIED" : Not specified. Will return result 563 * - "ENCODING_UNSPECIFIED" : Not specified. Will return result
584 * google.rpc.Code.INVALID_ARGUMENT. 564 * google.rpc.Code.INVALID_ARGUMENT.
585 * - "LINEAR16" : Uncompressed 16-bit signed little-endian samples (Linear 565 * - "LINEAR16" : Uncompressed 16-bit signed little-endian samples (Linear
586 * PCM). 566 * PCM).
587 * This is the only encoding that may be used by `AsyncRecognize`. 567 * This is the only encoding that may be used by `AsyncRecognize`.
588 * - "FLAC" : This is the recommended encoding for `SyncRecognize` and 568 * - "FLAC" : This is the recommended encoding for `SyncRecognize` and
589 * `StreamingRecognize` because it uses lossless compression; therefore 569 * `StreamingRecognize` because it uses lossless compression; therefore
590 * recognition accuracy is not compromised by a lossy codec. 570 * recognition accuracy is not compromised by a lossy codec.
591 * 571 *
592 * The stream FLAC (Free Lossless Audio Codec) encoding is specified at: 572 * The stream FLAC (Free Lossless Audio Codec) encoding is specified at:
593 * http://flac.sourceforge.net/documentation.html. 573 * http://flac.sourceforge.net/documentation.html.
594 * 16-bit and 24-bit samples are supported. 574 * 16-bit and 24-bit samples are supported.
595 * Not all fields in STREAMINFO are supported. 575 * Not all fields in STREAMINFO are supported.
596 * - "MULAW" : 8-bit samples that compand 14-bit audio samples using G.711 576 * - "MULAW" : 8-bit samples that compand 14-bit audio samples using G.711
597 * PCMU/mu-law. 577 * PCMU/mu-law.
598 * - "AMR" : Adaptive Multi-Rate Narrowband codec. `sample_rate` must be 8000 578 * - "AMR" : Adaptive Multi-Rate Narrowband codec. `sample_rate` must be 8000
599 * Hz. 579 * Hz.
600 * - "AMR_WB" : Adaptive Multi-Rate Wideband codec. `sample_rate` must be 580 * - "AMR_WB" : Adaptive Multi-Rate Wideband codec. `sample_rate` must be
601 * 16000 Hz. 581 * 16000 Hz.
602 */ 582 */
603 core.String encoding; 583 core.String encoding;
604 /** 584 /**
605 * [Optional] The language of the supplied audio as a BCP-47 language tag. 585 * *Optional* The language of the supplied audio as a BCP-47 language tag.
606 * Example: "en-GB" https://www.rfc-editor.org/rfc/bcp/bcp47.txt 586 * Example: "en-GB" https://www.rfc-editor.org/rfc/bcp/bcp47.txt
607 * If omitted, defaults to "en-US". See 587 * If omitted, defaults to "en-US". See
608 * [Language Support](https://cloud.google.com/speech/docs/languages) 588 * [Language Support](https://cloud.google.com/speech/docs/languages)
609 * for a list of the currently supported language codes. 589 * for a list of the currently supported language codes.
610 */ 590 */
611 core.String languageCode; 591 core.String languageCode;
612 /** 592 /**
613 * [Optional] Maximum number of recognition hypotheses to be returned. 593 * *Optional* Maximum number of recognition hypotheses to be returned.
614 * Specifically, the maximum number of `SpeechRecognitionAlternative` messages 594 * Specifically, the maximum number of `SpeechRecognitionAlternative` messages
615 * within each `SpeechRecognitionResult`. 595 * within each `SpeechRecognitionResult`.
616 * The server may return fewer than `max_alternatives`. 596 * The server may return fewer than `max_alternatives`.
617 * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of 597 * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
618 * `1`. If omitted, defaults to `1`. 598 * one. If omitted, will return a maximum of one.
619 */ 599 */
620 core.int maxAlternatives; 600 core.int maxAlternatives;
621 /** 601 /**
622 * [Optional] If set to `true`, the server will attempt to filter out 602 * *Optional* If set to `true`, the server will attempt to filter out
623 * profanities, replacing all but the initial character in each filtered word 603 * profanities, replacing all but the initial character in each filtered word
624 * with asterisks, e.g. "f***". If set to `false` or omitted, profanities 604 * with asterisks, e.g. "f***". If set to `false` or omitted, profanities
625 * won't be filtered out. 605 * won't be filtered out.
626 */ 606 */
627 core.bool profanityFilter; 607 core.bool profanityFilter;
628 /** 608 /**
629 * [Required] Sample rate in Hertz of the audio data sent in all 609 * *Required* Sample rate in Hertz of the audio data sent in all
630 * `RecognitionAudio` messages. Valid values are: 8000-48000. 610 * `RecognitionAudio` messages. Valid values are: 8000-48000.
631 * 16000 is optimal. For best results, set the sampling rate of the audio 611 * 16000 is optimal. For best results, set the sampling rate of the audio
632 * source to 16000 Hz. If that's not possible, use the native sample rate of 612 * source to 16000 Hz. If that's not possible, use the native sample rate of
633 * the audio source (instead of re-sampling). 613 * the audio source (instead of re-sampling).
634 */ 614 */
635 core.int sampleRate; 615 core.int sampleRate;
636 /** 616 /**
637 * [Optional] A means to provide context to assist the speech recognition. 617 * *Optional* A means to provide context to assist the speech recognition.
638 */ 618 */
639 SpeechContext speechContext; 619 SpeechContext speechContext;
640 620
641 RecognitionConfig(); 621 RecognitionConfig();
642 622
643 RecognitionConfig.fromJson(core.Map _json) { 623 RecognitionConfig.fromJson(core.Map _json) {
644 if (_json.containsKey("encoding")) { 624 if (_json.containsKey("encoding")) {
645 encoding = _json["encoding"]; 625 encoding = _json["encoding"];
646 } 626 }
647 if (_json.containsKey("languageCode")) { 627 if (_json.containsKey("languageCode")) {
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
684 return _json; 664 return _json;
685 } 665 }
686 } 666 }
687 667
688 /** 668 /**
689 * Provides "hints" to the speech recognizer to favor specific words and phrases 669 * Provides "hints" to the speech recognizer to favor specific words and phrases
690 * in the results. 670 * in the results.
691 */ 671 */
692 class SpeechContext { 672 class SpeechContext {
693 /** 673 /**
694 * [Optional] A list of strings containing words and phrases "hints" so that 674 * *Optional* A list of strings containing words and phrases "hints" so that
695 * the speech recognition is more likely to recognize them. This can be used 675 * the speech recognition is more likely to recognize them. This can be used
696 * to improve the accuracy for specific words and phrases, for example, if 676 * to improve the accuracy for specific words and phrases, for example, if
697 * specific commands are typically spoken by the user. This can also be used 677 * specific commands are typically spoken by the user. This can also be used
698 * to add additional words to the vocabulary of the recognizer. See 678 * to add additional words to the vocabulary of the recognizer. See
699 * [usage limits](https://cloud.google.com/speech/limits#content). 679 * [usage limits](https://cloud.google.com/speech/limits#content).
700 */ 680 */
701 core.List<core.String> phrases; 681 core.List<core.String> phrases;
702 682
703 SpeechContext(); 683 SpeechContext();
704 684
705 SpeechContext.fromJson(core.Map _json) { 685 SpeechContext.fromJson(core.Map _json) {
706 if (_json.containsKey("phrases")) { 686 if (_json.containsKey("phrases")) {
707 phrases = _json["phrases"]; 687 phrases = _json["phrases"];
708 } 688 }
709 } 689 }
710 690
711 core.Map toJson() { 691 core.Map toJson() {
712 var _json = new core.Map(); 692 var _json = new core.Map();
713 if (phrases != null) { 693 if (phrases != null) {
714 _json["phrases"] = phrases; 694 _json["phrases"] = phrases;
715 } 695 }
716 return _json; 696 return _json;
717 } 697 }
718 } 698 }
719 699
720 /** Alternative hypotheses (a.k.a. n-best list). */ 700 /** Alternative hypotheses (a.k.a. n-best list). */
721 class SpeechRecognitionAlternative { 701 class SpeechRecognitionAlternative {
722 /** 702 /**
723 * [Output-only] The confidence estimate between 0.0 and 1.0. A higher number 703 * *Output-only* The confidence estimate between 0.0 and 1.0. A higher number
724 * means the system is more confident that the recognition is correct. 704 * indicates an estimated greater likelihood that the recognized words are
725 * This field is typically provided only for the top hypothesis, and only for 705 * correct. This field is typically provided only for the top hypothesis, and
726 * `is_final=true` results. 706 * only for `is_final=true` results. Clients should not rely on the
727 * The default of 0.0 is a sentinel value indicating confidence was not set. 707 * `confidence` field as it is not guaranteed to be accurate, or even set, in
708 * any of the results.
709 * The default of 0.0 is a sentinel value indicating `confidence` was not set.
728 */ 710 */
729 core.double confidence; 711 core.double confidence;
730 /** 712 /**
731 * [Output-only] Transcript text representing the words that the user spoke. 713 * *Output-only* Transcript text representing the words that the user spoke.
732 */ 714 */
733 core.String transcript; 715 core.String transcript;
734 716
735 SpeechRecognitionAlternative(); 717 SpeechRecognitionAlternative();
736 718
737 SpeechRecognitionAlternative.fromJson(core.Map _json) { 719 SpeechRecognitionAlternative.fromJson(core.Map _json) {
738 if (_json.containsKey("confidence")) { 720 if (_json.containsKey("confidence")) {
739 confidence = _json["confidence"]; 721 confidence = _json["confidence"];
740 } 722 }
741 if (_json.containsKey("transcript")) { 723 if (_json.containsKey("transcript")) {
742 transcript = _json["transcript"]; 724 transcript = _json["transcript"];
743 } 725 }
744 } 726 }
745 727
746 core.Map toJson() { 728 core.Map toJson() {
747 var _json = new core.Map(); 729 var _json = new core.Map();
748 if (confidence != null) { 730 if (confidence != null) {
749 _json["confidence"] = confidence; 731 _json["confidence"] = confidence;
750 } 732 }
751 if (transcript != null) { 733 if (transcript != null) {
752 _json["transcript"] = transcript; 734 _json["transcript"] = transcript;
753 } 735 }
754 return _json; 736 return _json;
755 } 737 }
756 } 738 }
757 739
758 /** A speech recognition result corresponding to a portion of the audio. */ 740 /** A speech recognition result corresponding to a portion of the audio. */
759 class SpeechRecognitionResult { 741 class SpeechRecognitionResult {
760 /** 742 /**
761 * [Output-only] May contain one or more recognition hypotheses (up to the 743 * *Output-only* May contain one or more recognition hypotheses (up to the
762 * maximum specified in `max_alternatives`). 744 * maximum specified in `max_alternatives`).
763 */ 745 */
764 core.List<SpeechRecognitionAlternative> alternatives; 746 core.List<SpeechRecognitionAlternative> alternatives;
765 747
766 SpeechRecognitionResult(); 748 SpeechRecognitionResult();
767 749
768 SpeechRecognitionResult.fromJson(core.Map _json) { 750 SpeechRecognitionResult.fromJson(core.Map _json) {
769 if (_json.containsKey("alternatives")) { 751 if (_json.containsKey("alternatives")) {
770 alternatives = _json["alternatives"].map((value) => new SpeechRecognitionA lternative.fromJson(value)).toList(); 752 alternatives = _json["alternatives"].map((value) => new SpeechRecognitionA lternative.fromJson(value)).toList();
771 } 753 }
(...skipping 104 matching lines...) Expand 10 before | Expand all | Expand 10 after
876 if (details != null) { 858 if (details != null) {
877 _json["details"] = details; 859 _json["details"] = details;
878 } 860 }
879 if (message != null) { 861 if (message != null) {
880 _json["message"] = message; 862 _json["message"] = message;
881 } 863 }
882 return _json; 864 return _json;
883 } 865 }
884 } 866 }
885 867
886 /** 868 /** The top-level message sent by the client for the `SyncRecognize` method. */
887 * `SyncRecognizeRequest` is the top-level message sent by the client for
888 * the `SyncRecognize` method.
889 */
890 class SyncRecognizeRequest { 869 class SyncRecognizeRequest {
891 /** [Required] The audio data to be recognized. */ 870 /** *Required* The audio data to be recognized. */
892 RecognitionAudio audio; 871 RecognitionAudio audio;
893 /** 872 /**
894 * [Required] The `config` message provides information to the recognizer 873 * *Required* Provides information to the recognizer that specifies how to
895 * that specifies how to process the request. 874 * process the request.
896 */ 875 */
897 RecognitionConfig config; 876 RecognitionConfig config;
898 877
899 SyncRecognizeRequest(); 878 SyncRecognizeRequest();
900 879
901 SyncRecognizeRequest.fromJson(core.Map _json) { 880 SyncRecognizeRequest.fromJson(core.Map _json) {
902 if (_json.containsKey("audio")) { 881 if (_json.containsKey("audio")) {
903 audio = new RecognitionAudio.fromJson(_json["audio"]); 882 audio = new RecognitionAudio.fromJson(_json["audio"]);
904 } 883 }
905 if (_json.containsKey("config")) { 884 if (_json.containsKey("config")) {
906 config = new RecognitionConfig.fromJson(_json["config"]); 885 config = new RecognitionConfig.fromJson(_json["config"]);
907 } 886 }
908 } 887 }
909 888
910 core.Map toJson() { 889 core.Map toJson() {
911 var _json = new core.Map(); 890 var _json = new core.Map();
912 if (audio != null) { 891 if (audio != null) {
913 _json["audio"] = (audio).toJson(); 892 _json["audio"] = (audio).toJson();
914 } 893 }
915 if (config != null) { 894 if (config != null) {
916 _json["config"] = (config).toJson(); 895 _json["config"] = (config).toJson();
917 } 896 }
918 return _json; 897 return _json;
919 } 898 }
920 } 899 }
921 900
922 /** 901 /**
923 * `SyncRecognizeResponse` is the only message returned to the client by 902 * The only message returned to the client by `SyncRecognize`. method. It
924 * `SyncRecognize`. It contains the result as zero or more sequential 903 * contains the result as zero or more sequential `SpeechRecognitionResult`
925 * `SpeechRecognitionResult` messages. 904 * messages.
926 */ 905 */
927 class SyncRecognizeResponse { 906 class SyncRecognizeResponse {
928 /** 907 /**
929 * [Output-only] Sequential list of transcription results corresponding to 908 * *Output-only* Sequential list of transcription results corresponding to
930 * sequential portions of audio. 909 * sequential portions of audio.
931 */ 910 */
932 core.List<SpeechRecognitionResult> results; 911 core.List<SpeechRecognitionResult> results;
933 912
934 SyncRecognizeResponse(); 913 SyncRecognizeResponse();
935 914
936 SyncRecognizeResponse.fromJson(core.Map _json) { 915 SyncRecognizeResponse.fromJson(core.Map _json) {
937 if (_json.containsKey("results")) { 916 if (_json.containsKey("results")) {
938 results = _json["results"].map((value) => new SpeechRecognitionResult.from Json(value)).toList(); 917 results = _json["results"].map((value) => new SpeechRecognitionResult.from Json(value)).toList();
939 } 918 }
940 } 919 }
941 920
942 core.Map toJson() { 921 core.Map toJson() {
943 var _json = new core.Map(); 922 var _json = new core.Map();
944 if (results != null) { 923 if (results != null) {
945 _json["results"] = results.map((value) => (value).toJson()).toList(); 924 _json["results"] = results.map((value) => (value).toJson()).toList();
946 } 925 }
947 return _json; 926 return _json;
948 } 927 }
949 } 928 }
OLDNEW
« no previous file with comments | « generated/googleapis_beta/lib/runtimeconfig/v1beta1.dart ('k') | generated/googleapis_beta/lib/sqladmin/v1beta4.dart » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698