OLD | NEW |
1 // This is a generated file (see the discoveryapis_generator project). | 1 // This is a generated file (see the discoveryapis_generator project). |
2 | 2 |
3 library googleapis.speech.v1; | 3 library googleapis.speech.v1; |
4 | 4 |
5 import 'dart:core' as core; | 5 import 'dart:core' as core; |
6 import 'dart:async' as async; | 6 import 'dart:async' as async; |
7 import 'dart:convert' as convert; | 7 import 'dart:convert' as convert; |
8 | 8 |
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; | 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; |
10 import 'package:http/http.dart' as http; | 10 import 'package:http/http.dart' as http; |
(...skipping 563 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
574 return _json; | 574 return _json; |
575 } | 575 } |
576 } | 576 } |
577 | 577 |
578 /** | 578 /** |
579 * Provides information to the recognizer that specifies how to process the | 579 * Provides information to the recognizer that specifies how to process the |
580 * request. | 580 * request. |
581 */ | 581 */ |
582 class RecognitionConfig { | 582 class RecognitionConfig { |
583 /** | 583 /** |
584 * *Optional* If `true`, a list of `words` are returned in the top result, | 584 * *Optional* If `true`, the top result includes a list of words and |
585 * containing the start and end timestamps for those words. The default value, | 585 * the start and end time offsets (timestamps) for those words. If |
586 * 'false' does not return any word-level timing information. | 586 * `false`, no word-level time offset information is returned. The default is |
| 587 * `false`. |
587 */ | 588 */ |
588 core.bool enableWordTimeOffsets; | 589 core.bool enableWordTimeOffsets; |
589 /** | 590 /** |
590 * *Required* Encoding of audio data sent in all `RecognitionAudio` messages. | 591 * *Required* Encoding of audio data sent in all `RecognitionAudio` messages. |
591 * Possible string values are: | 592 * Possible string values are: |
592 * - "ENCODING_UNSPECIFIED" : Not specified. Will return result | 593 * - "ENCODING_UNSPECIFIED" : Not specified. Will return result |
593 * google.rpc.Code.INVALID_ARGUMENT. | 594 * google.rpc.Code.INVALID_ARGUMENT. |
594 * - "LINEAR16" : Uncompressed 16-bit signed little-endian samples (Linear | 595 * - "LINEAR16" : Uncompressed 16-bit signed little-endian samples (Linear |
595 * PCM). | 596 * PCM). |
596 * - "FLAC" : [`FLAC`](https://xiph.org/flac/documentation.html) (Free | 597 * - "FLAC" : [`FLAC`](https://xiph.org/flac/documentation.html) (Free |
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
818 * only for `is_final=true` results. Clients should not rely on the | 819 * only for `is_final=true` results. Clients should not rely on the |
819 * `confidence` field as it is not guaranteed to be accurate or consistent. | 820 * `confidence` field as it is not guaranteed to be accurate or consistent. |
820 * The default of 0.0 is a sentinel value indicating `confidence` was not set. | 821 * The default of 0.0 is a sentinel value indicating `confidence` was not set. |
821 */ | 822 */ |
822 core.double confidence; | 823 core.double confidence; |
823 /** | 824 /** |
824 * *Output-only* Transcript text representing the words that the user spoke. | 825 * *Output-only* Transcript text representing the words that the user spoke. |
825 */ | 826 */ |
826 core.String transcript; | 827 core.String transcript; |
827 /** | 828 /** |
828 * *Output-only* List of word-specific information for each recognized word. | 829 * *Output-only* A list of word-specific information for each recognized word. |
829 */ | 830 */ |
830 core.List<WordInfo> words; | 831 core.List<WordInfo> words; |
831 | 832 |
832 SpeechRecognitionAlternative(); | 833 SpeechRecognitionAlternative(); |
833 | 834 |
834 SpeechRecognitionAlternative.fromJson(core.Map _json) { | 835 SpeechRecognitionAlternative.fromJson(core.Map _json) { |
835 if (_json.containsKey("confidence")) { | 836 if (_json.containsKey("confidence")) { |
836 confidence = _json["confidence"]; | 837 confidence = _json["confidence"]; |
837 } | 838 } |
838 if (_json.containsKey("transcript")) { | 839 if (_json.containsKey("transcript")) { |
(...skipping 17 matching lines...) Expand all Loading... |
856 } | 857 } |
857 return _json; | 858 return _json; |
858 } | 859 } |
859 } | 860 } |
860 | 861 |
861 /** A speech recognition result corresponding to a portion of the audio. */ | 862 /** A speech recognition result corresponding to a portion of the audio. */ |
862 class SpeechRecognitionResult { | 863 class SpeechRecognitionResult { |
863 /** | 864 /** |
864 * *Output-only* May contain one or more recognition hypotheses (up to the | 865 * *Output-only* May contain one or more recognition hypotheses (up to the |
865 * maximum specified in `max_alternatives`). | 866 * maximum specified in `max_alternatives`). |
866 * These alternatives are ordered in terms of accuracy, with the first/top | 867 * These alternatives are ordered in terms of accuracy, with the top (first) |
867 * alternative being the most probable, as ranked by the recognizer. | 868 * alternative being the most probable, as ranked by the recognizer. |
868 */ | 869 */ |
869 core.List<SpeechRecognitionAlternative> alternatives; | 870 core.List<SpeechRecognitionAlternative> alternatives; |
870 | 871 |
871 SpeechRecognitionResult(); | 872 SpeechRecognitionResult(); |
872 | 873 |
873 SpeechRecognitionResult.fromJson(core.Map _json) { | 874 SpeechRecognitionResult.fromJson(core.Map _json) { |
874 if (_json.containsKey("alternatives")) { | 875 if (_json.containsKey("alternatives")) { |
875 alternatives = _json["alternatives"].map((value) => new SpeechRecognitionA
lternative.fromJson(value)).toList(); | 876 alternatives = _json["alternatives"].map((value) => new SpeechRecognitionA
lternative.fromJson(value)).toList(); |
876 } | 877 } |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
982 _json["details"] = details; | 983 _json["details"] = details; |
983 } | 984 } |
984 if (message != null) { | 985 if (message != null) { |
985 _json["message"] = message; | 986 _json["message"] = message; |
986 } | 987 } |
987 return _json; | 988 return _json; |
988 } | 989 } |
989 } | 990 } |
990 | 991 |
991 /** | 992 /** |
992 * Word-specific information detected along with speech recognition when certain | 993 * Word-specific information for recognized words. Word information is only |
993 * request parameters are set. | 994 * included in the response when certain request parameters are set, such |
| 995 * as `enable_word_time_offsets`. |
994 */ | 996 */ |
995 class WordInfo { | 997 class WordInfo { |
996 /** | 998 /** |
997 * *Output-only* Time offset relative to the beginning of the audio, | 999 * *Output-only* Time offset relative to the beginning of the audio, |
998 * and corresponding to the end of the spoken word. | 1000 * and corresponding to the end of the spoken word. |
999 * This field is only set if `enable_word_time_offsets=true` and only | 1001 * This field is only set if `enable_word_time_offsets=true` and only |
1000 * in the top hypothesis. | 1002 * in the top hypothesis. |
1001 * This is an experimental feature and the accuracy of the time offset can | 1003 * This is an experimental feature and the accuracy of the time offset can |
1002 * vary. | 1004 * vary. |
1003 */ | 1005 */ |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1035 } | 1037 } |
1036 if (startTime != null) { | 1038 if (startTime != null) { |
1037 _json["startTime"] = startTime; | 1039 _json["startTime"] = startTime; |
1038 } | 1040 } |
1039 if (word != null) { | 1041 if (word != null) { |
1040 _json["word"] = word; | 1042 _json["word"] = word; |
1041 } | 1043 } |
1042 return _json; | 1044 return _json; |
1043 } | 1045 } |
1044 } | 1046 } |
OLD | NEW |