OLD | NEW |
1 // This is a generated file (see the discoveryapis_generator project). | 1 // This is a generated file (see the discoveryapis_generator project). |
2 | 2 |
3 library googleapis.speech.v1; | 3 library googleapis.speech.v1; |
4 | 4 |
5 import 'dart:core' as core; | 5 import 'dart:core' as core; |
6 import 'dart:async' as async; | 6 import 'dart:async' as async; |
7 import 'dart:convert' as convert; | 7 import 'dart:convert' as convert; |
8 | 8 |
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; | 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; |
10 import 'package:http/http.dart' as http; | 10 import 'package:http/http.dart' as http; |
(...skipping 563 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
574 return _json; | 574 return _json; |
575 } | 575 } |
576 } | 576 } |
577 | 577 |
578 /** | 578 /** |
579 * Provides information to the recognizer that specifies how to process the | 579 * Provides information to the recognizer that specifies how to process the |
580 * request. | 580 * request. |
581 */ | 581 */ |
582 class RecognitionConfig { | 582 class RecognitionConfig { |
583 /** | 583 /** |
| 584 * *Optional* If `true`, a list of `words` are returned in the top result, |
| 585 * containing the start and end timestamps for those words. The default value, |
| 586 * 'false' does not return any word-level timing information. |
| 587 */ |
| 588 core.bool enableWordTimeOffsets; |
| 589 /** |
584 * *Required* Encoding of audio data sent in all `RecognitionAudio` messages. | 590 * *Required* Encoding of audio data sent in all `RecognitionAudio` messages. |
585 * Possible string values are: | 591 * Possible string values are: |
586 * - "ENCODING_UNSPECIFIED" : Not specified. Will return result | 592 * - "ENCODING_UNSPECIFIED" : Not specified. Will return result |
587 * google.rpc.Code.INVALID_ARGUMENT. | 593 * google.rpc.Code.INVALID_ARGUMENT. |
588 * - "LINEAR16" : Uncompressed 16-bit signed little-endian samples (Linear | 594 * - "LINEAR16" : Uncompressed 16-bit signed little-endian samples (Linear |
589 * PCM). | 595 * PCM). |
590 * - "FLAC" : [`FLAC`](https://xiph.org/flac/documentation.html) (Free | 596 * - "FLAC" : [`FLAC`](https://xiph.org/flac/documentation.html) (Free |
591 * Lossless Audio | 597 * Lossless Audio |
592 * Codec) is the recommended encoding because it is | 598 * Codec) is the recommended encoding because it is |
593 * lossless--therefore recognition is not compromised--and | 599 * lossless--therefore recognition is not compromised--and |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
652 */ | 658 */ |
653 core.int sampleRateHertz; | 659 core.int sampleRateHertz; |
654 /** | 660 /** |
655 * *Optional* A means to provide context to assist the speech recognition. | 661 * *Optional* A means to provide context to assist the speech recognition. |
656 */ | 662 */ |
657 core.List<SpeechContext> speechContexts; | 663 core.List<SpeechContext> speechContexts; |
658 | 664 |
659 RecognitionConfig(); | 665 RecognitionConfig(); |
660 | 666 |
661 RecognitionConfig.fromJson(core.Map _json) { | 667 RecognitionConfig.fromJson(core.Map _json) { |
| 668 if (_json.containsKey("enableWordTimeOffsets")) { |
| 669 enableWordTimeOffsets = _json["enableWordTimeOffsets"]; |
| 670 } |
662 if (_json.containsKey("encoding")) { | 671 if (_json.containsKey("encoding")) { |
663 encoding = _json["encoding"]; | 672 encoding = _json["encoding"]; |
664 } | 673 } |
665 if (_json.containsKey("languageCode")) { | 674 if (_json.containsKey("languageCode")) { |
666 languageCode = _json["languageCode"]; | 675 languageCode = _json["languageCode"]; |
667 } | 676 } |
668 if (_json.containsKey("maxAlternatives")) { | 677 if (_json.containsKey("maxAlternatives")) { |
669 maxAlternatives = _json["maxAlternatives"]; | 678 maxAlternatives = _json["maxAlternatives"]; |
670 } | 679 } |
671 if (_json.containsKey("profanityFilter")) { | 680 if (_json.containsKey("profanityFilter")) { |
672 profanityFilter = _json["profanityFilter"]; | 681 profanityFilter = _json["profanityFilter"]; |
673 } | 682 } |
674 if (_json.containsKey("sampleRateHertz")) { | 683 if (_json.containsKey("sampleRateHertz")) { |
675 sampleRateHertz = _json["sampleRateHertz"]; | 684 sampleRateHertz = _json["sampleRateHertz"]; |
676 } | 685 } |
677 if (_json.containsKey("speechContexts")) { | 686 if (_json.containsKey("speechContexts")) { |
678 speechContexts = _json["speechContexts"].map((value) => new SpeechContext.
fromJson(value)).toList(); | 687 speechContexts = _json["speechContexts"].map((value) => new SpeechContext.
fromJson(value)).toList(); |
679 } | 688 } |
680 } | 689 } |
681 | 690 |
682 core.Map<core.String, core.Object> toJson() { | 691 core.Map<core.String, core.Object> toJson() { |
683 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 692 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); |
| 693 if (enableWordTimeOffsets != null) { |
| 694 _json["enableWordTimeOffsets"] = enableWordTimeOffsets; |
| 695 } |
684 if (encoding != null) { | 696 if (encoding != null) { |
685 _json["encoding"] = encoding; | 697 _json["encoding"] = encoding; |
686 } | 698 } |
687 if (languageCode != null) { | 699 if (languageCode != null) { |
688 _json["languageCode"] = languageCode; | 700 _json["languageCode"] = languageCode; |
689 } | 701 } |
690 if (maxAlternatives != null) { | 702 if (maxAlternatives != null) { |
691 _json["maxAlternatives"] = maxAlternatives; | 703 _json["maxAlternatives"] = maxAlternatives; |
692 } | 704 } |
693 if (profanityFilter != null) { | 705 if (profanityFilter != null) { |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
805 * correct. This field is typically provided only for the top hypothesis, and | 817 * correct. This field is typically provided only for the top hypothesis, and |
806 * only for `is_final=true` results. Clients should not rely on the | 818 * only for `is_final=true` results. Clients should not rely on the |
807 * `confidence` field as it is not guaranteed to be accurate or consistent. | 819 * `confidence` field as it is not guaranteed to be accurate or consistent. |
808 * The default of 0.0 is a sentinel value indicating `confidence` was not set. | 820 * The default of 0.0 is a sentinel value indicating `confidence` was not set. |
809 */ | 821 */ |
810 core.double confidence; | 822 core.double confidence; |
811 /** | 823 /** |
812 * *Output-only* Transcript text representing the words that the user spoke. | 824 * *Output-only* Transcript text representing the words that the user spoke. |
813 */ | 825 */ |
814 core.String transcript; | 826 core.String transcript; |
| 827 /** |
| 828 * *Output-only* List of word-specific information for each recognized word. |
| 829 */ |
| 830 core.List<WordInfo> words; |
815 | 831 |
816 SpeechRecognitionAlternative(); | 832 SpeechRecognitionAlternative(); |
817 | 833 |
818 SpeechRecognitionAlternative.fromJson(core.Map _json) { | 834 SpeechRecognitionAlternative.fromJson(core.Map _json) { |
819 if (_json.containsKey("confidence")) { | 835 if (_json.containsKey("confidence")) { |
820 confidence = _json["confidence"]; | 836 confidence = _json["confidence"]; |
821 } | 837 } |
822 if (_json.containsKey("transcript")) { | 838 if (_json.containsKey("transcript")) { |
823 transcript = _json["transcript"]; | 839 transcript = _json["transcript"]; |
824 } | 840 } |
| 841 if (_json.containsKey("words")) { |
| 842 words = _json["words"].map((value) => new WordInfo.fromJson(value)).toList
(); |
| 843 } |
825 } | 844 } |
826 | 845 |
827 core.Map<core.String, core.Object> toJson() { | 846 core.Map<core.String, core.Object> toJson() { |
828 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 847 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); |
829 if (confidence != null) { | 848 if (confidence != null) { |
830 _json["confidence"] = confidence; | 849 _json["confidence"] = confidence; |
831 } | 850 } |
832 if (transcript != null) { | 851 if (transcript != null) { |
833 _json["transcript"] = transcript; | 852 _json["transcript"] = transcript; |
834 } | 853 } |
| 854 if (words != null) { |
| 855 _json["words"] = words.map((value) => (value).toJson()).toList(); |
| 856 } |
835 return _json; | 857 return _json; |
836 } | 858 } |
837 } | 859 } |
838 | 860 |
839 /** A speech recognition result corresponding to a portion of the audio. */ | 861 /** A speech recognition result corresponding to a portion of the audio. */ |
840 class SpeechRecognitionResult { | 862 class SpeechRecognitionResult { |
841 /** | 863 /** |
842 * *Output-only* May contain one or more recognition hypotheses (up to the | 864 * *Output-only* May contain one or more recognition hypotheses (up to the |
843 * maximum specified in `max_alternatives`). | 865 * maximum specified in `max_alternatives`). |
844 * These alternatives are ordered in terms of accuracy, with the first/top | 866 * These alternatives are ordered in terms of accuracy, with the first/top |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
916 * results in its response, the status of those operations should be | 938 * results in its response, the status of those operations should be |
917 * represented directly using the `Status` message. | 939 * represented directly using the `Status` message. |
918 * | 940 * |
919 * - Logging. If some API errors are stored in logs, the message `Status` could | 941 * - Logging. If some API errors are stored in logs, the message `Status` could |
920 * be used directly after any stripping needed for security/privacy reasons. | 942 * be used directly after any stripping needed for security/privacy reasons. |
921 */ | 943 */ |
922 class Status { | 944 class Status { |
923 /** The status code, which should be an enum value of google.rpc.Code. */ | 945 /** The status code, which should be an enum value of google.rpc.Code. */ |
924 core.int code; | 946 core.int code; |
925 /** | 947 /** |
926 * A list of messages that carry the error details. There will be a | 948 * A list of messages that carry the error details. There is a common set of |
927 * common set of message types for APIs to use. | 949 * message types for APIs to use. |
928 * | 950 * |
929 * The values for Object must be JSON objects. It can consist of `num`, | 951 * The values for Object must be JSON objects. It can consist of `num`, |
930 * `String`, `bool` and `null` as well as `Map` and `List` values. | 952 * `String`, `bool` and `null` as well as `Map` and `List` values. |
931 */ | 953 */ |
932 core.List<core.Map<core.String, core.Object>> details; | 954 core.List<core.Map<core.String, core.Object>> details; |
933 /** | 955 /** |
934 * A developer-facing error message, which should be in English. Any | 956 * A developer-facing error message, which should be in English. Any |
935 * user-facing error message should be localized and sent in the | 957 * user-facing error message should be localized and sent in the |
936 * google.rpc.Status.details field, or localized by the client. | 958 * google.rpc.Status.details field, or localized by the client. |
937 */ | 959 */ |
(...skipping 20 matching lines...) Expand all Loading... |
958 } | 980 } |
959 if (details != null) { | 981 if (details != null) { |
960 _json["details"] = details; | 982 _json["details"] = details; |
961 } | 983 } |
962 if (message != null) { | 984 if (message != null) { |
963 _json["message"] = message; | 985 _json["message"] = message; |
964 } | 986 } |
965 return _json; | 987 return _json; |
966 } | 988 } |
967 } | 989 } |
| 990 |
| 991 /** |
| 992 * Word-specific information detected along with speech recognition when certain |
| 993 * request parameters are set. |
| 994 */ |
| 995 class WordInfo { |
| 996 /** |
| 997 * *Output-only* Time offset relative to the beginning of the audio, |
| 998 * and corresponding to the end of the spoken word. |
| 999 * This field is only set if `enable_word_time_offsets=true` and only |
| 1000 * in the top hypothesis. |
| 1001 * This is an experimental feature and the accuracy of the time offset can |
| 1002 * vary. |
| 1003 */ |
| 1004 core.String endTime; |
| 1005 /** |
| 1006 * *Output-only* Time offset relative to the beginning of the audio, |
| 1007 * and corresponding to the start of the spoken word. |
| 1008 * This field is only set if `enable_word_time_offsets=true` and only |
| 1009 * in the top hypothesis. |
| 1010 * This is an experimental feature and the accuracy of the time offset can |
| 1011 * vary. |
| 1012 */ |
| 1013 core.String startTime; |
| 1014 /** *Output-only* The word corresponding to this set of information. */ |
| 1015 core.String word; |
| 1016 |
| 1017 WordInfo(); |
| 1018 |
| 1019 WordInfo.fromJson(core.Map _json) { |
| 1020 if (_json.containsKey("endTime")) { |
| 1021 endTime = _json["endTime"]; |
| 1022 } |
| 1023 if (_json.containsKey("startTime")) { |
| 1024 startTime = _json["startTime"]; |
| 1025 } |
| 1026 if (_json.containsKey("word")) { |
| 1027 word = _json["word"]; |
| 1028 } |
| 1029 } |
| 1030 |
| 1031 core.Map<core.String, core.Object> toJson() { |
| 1032 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); |
| 1033 if (endTime != null) { |
| 1034 _json["endTime"] = endTime; |
| 1035 } |
| 1036 if (startTime != null) { |
| 1037 _json["startTime"] = startTime; |
| 1038 } |
| 1039 if (word != null) { |
| 1040 _json["word"] = word; |
| 1041 } |
| 1042 return _json; |
| 1043 } |
| 1044 } |
OLD | NEW |