Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(65)

Side by Side Diff: generated/googleapis/lib/speech/v1.dart

Issue 3006323002: Api-Roll 54: 2017-09-11 (Closed)
Patch Set: use 2.0.0-dev.infinity sdk constraint in pubspecs Created 3 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « generated/googleapis/lib/spanner/v1.dart ('k') | generated/googleapis/lib/storage/v1.dart » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // This is a generated file (see the discoveryapis_generator project). 1 // This is a generated file (see the discoveryapis_generator project).
2 2
3 library googleapis.speech.v1; 3 library googleapis.speech.v1;
4 4
5 import 'dart:core' as core; 5 import 'dart:core' as core;
6 import 'dart:async' as async; 6 import 'dart:async' as async;
7 import 'dart:convert' as convert; 7 import 'dart:convert' as convert;
8 8
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons;
10 import 'package:http/http.dart' as http; 10 import 'package:http/http.dart' as http;
11 11
12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show 12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart'
13 ApiRequestError, DetailedApiRequestError; 13 show ApiRequestError, DetailedApiRequestError;
14 14
15 const core.String USER_AGENT = 'dart-api-client speech/v1'; 15 const core.String USER_AGENT = 'dart-api-client speech/v1';
16 16
17 /** Converts audio to text by applying powerful neural network models. */ 17 /// Converts audio to text by applying powerful neural network models.
18 class SpeechApi { 18 class SpeechApi {
19 /** View and manage your data across Google Cloud Platform services */ 19 /// View and manage your data across Google Cloud Platform services
20 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf orm"; 20 static const CloudPlatformScope =
21 21 "https://www.googleapis.com/auth/cloud-platform";
22 22
23 final commons.ApiRequester _requester; 23 final commons.ApiRequester _requester;
24 24
25 OperationsResourceApi get operations => new OperationsResourceApi(_requester); 25 OperationsResourceApi get operations => new OperationsResourceApi(_requester);
26 SpeechResourceApi get speech => new SpeechResourceApi(_requester); 26 SpeechResourceApi get speech => new SpeechResourceApi(_requester);
27 27
28 SpeechApi(http.Client client, {core.String rootUrl: "https://speech.googleapis .com/", core.String servicePath: ""}) : 28 SpeechApi(http.Client client,
29 _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_A GENT); 29 {core.String rootUrl: "https://speech.googleapis.com/",
30 core.String servicePath: ""})
31 : _requester =
32 new commons.ApiRequester(client, rootUrl, servicePath, USER_AGENT);
30 } 33 }
31 34
32
33 class OperationsResourceApi { 35 class OperationsResourceApi {
34 final commons.ApiRequester _requester; 36 final commons.ApiRequester _requester;
35 37
36 OperationsResourceApi(commons.ApiRequester client) : 38 OperationsResourceApi(commons.ApiRequester client) : _requester = client;
37 _requester = client;
38 39
39 /** 40 /// Starts asynchronous cancellation on a long-running operation. The server
40 * Starts asynchronous cancellation on a long-running operation. The server 41 /// makes a best effort to cancel the operation, but success is not
41 * makes a best effort to cancel the operation, but success is not 42 /// guaranteed. If the server doesn't support this method, it returns
42 * guaranteed. If the server doesn't support this method, it returns 43 /// `google.rpc.Code.UNIMPLEMENTED`. Clients can use
43 * `google.rpc.Code.UNIMPLEMENTED`. Clients can use 44 /// Operations.GetOperation or
44 * Operations.GetOperation or 45 /// other methods to check whether the cancellation succeeded or whether the
45 * other methods to check whether the cancellation succeeded or whether the 46 /// operation completed despite cancellation. On successful cancellation,
46 * operation completed despite cancellation. On successful cancellation, 47 /// the operation is not deleted; instead, it becomes an operation with
47 * the operation is not deleted; instead, it becomes an operation with 48 /// an Operation.error value with a google.rpc.Status.code of 1,
48 * an Operation.error value with a google.rpc.Status.code of 1, 49 /// corresponding to `Code.CANCELLED`.
49 * corresponding to `Code.CANCELLED`. 50 ///
50 * 51 /// [request] - The metadata request object.
51 * [request] - The metadata request object. 52 ///
52 * 53 /// Request parameters:
53 * Request parameters: 54 ///
54 * 55 /// [name] - The name of the operation resource to be cancelled.
55 * [name] - The name of the operation resource to be cancelled. 56 /// Value must have pattern "^[^/]+$".
56 * Value must have pattern "^[^/]+$". 57 ///
57 * 58 /// Completes with a [Empty].
58 * Completes with a [Empty]. 59 ///
59 * 60 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
60 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 61 /// an error.
61 * error. 62 ///
62 * 63 /// If the used [http.Client] completes with an error when making a REST
63 * If the used [http.Client] completes with an error when making a REST call, 64 /// call, this method will complete with the same error.
64 * this method will complete with the same error.
65 */
66 async.Future<Empty> cancel(CancelOperationRequest request, core.String name) { 65 async.Future<Empty> cancel(CancelOperationRequest request, core.String name) {
67 var _url = null; 66 var _url = null;
68 var _queryParams = new core.Map(); 67 var _queryParams = new core.Map();
69 var _uploadMedia = null; 68 var _uploadMedia = null;
70 var _uploadOptions = null; 69 var _uploadOptions = null;
71 var _downloadOptions = commons.DownloadOptions.Metadata; 70 var _downloadOptions = commons.DownloadOptions.Metadata;
72 var _body = null; 71 var _body = null;
73 72
74 if (request != null) { 73 if (request != null) {
75 _body = convert.JSON.encode((request).toJson()); 74 _body = convert.JSON.encode((request).toJson());
76 } 75 }
77 if (name == null) { 76 if (name == null) {
78 throw new core.ArgumentError("Parameter name is required."); 77 throw new core.ArgumentError("Parameter name is required.");
79 } 78 }
80 79
81 _url = 'v1/operations/' + commons.Escaper.ecapeVariableReserved('$name') + ' :cancel'; 80 _url = 'v1/operations/' +
81 commons.Escaper.ecapeVariableReserved('$name') +
82 ':cancel';
82 83
83 var _response = _requester.request(_url, 84 var _response = _requester.request(_url, "POST",
84 "POST", 85 body: _body,
85 body: _body, 86 queryParams: _queryParams,
86 queryParams: _queryParams, 87 uploadOptions: _uploadOptions,
87 uploadOptions: _uploadOptions, 88 uploadMedia: _uploadMedia,
88 uploadMedia: _uploadMedia, 89 downloadOptions: _downloadOptions);
89 downloadOptions: _downloadOptions);
90 return _response.then((data) => new Empty.fromJson(data)); 90 return _response.then((data) => new Empty.fromJson(data));
91 } 91 }
92 92
93 /** 93 /// Deletes a long-running operation. This method indicates that the client
94 * Deletes a long-running operation. This method indicates that the client is 94 /// is
95 * no longer interested in the operation result. It does not cancel the 95 /// no longer interested in the operation result. It does not cancel the
96 * operation. If the server doesn't support this method, it returns 96 /// operation. If the server doesn't support this method, it returns
97 * `google.rpc.Code.UNIMPLEMENTED`. 97 /// `google.rpc.Code.UNIMPLEMENTED`.
98 * 98 ///
99 * Request parameters: 99 /// Request parameters:
100 * 100 ///
101 * [name] - The name of the operation resource to be deleted. 101 /// [name] - The name of the operation resource to be deleted.
102 * Value must have pattern "^[^/]+$". 102 /// Value must have pattern "^[^/]+$".
103 * 103 ///
104 * Completes with a [Empty]. 104 /// Completes with a [Empty].
105 * 105 ///
106 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 106 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
107 * error. 107 /// an error.
108 * 108 ///
109 * If the used [http.Client] completes with an error when making a REST call, 109 /// If the used [http.Client] completes with an error when making a REST
110 * this method will complete with the same error. 110 /// call, this method will complete with the same error.
111 */
112 async.Future<Empty> delete(core.String name) { 111 async.Future<Empty> delete(core.String name) {
113 var _url = null; 112 var _url = null;
114 var _queryParams = new core.Map(); 113 var _queryParams = new core.Map();
115 var _uploadMedia = null; 114 var _uploadMedia = null;
116 var _uploadOptions = null; 115 var _uploadOptions = null;
117 var _downloadOptions = commons.DownloadOptions.Metadata; 116 var _downloadOptions = commons.DownloadOptions.Metadata;
118 var _body = null; 117 var _body = null;
119 118
120 if (name == null) { 119 if (name == null) {
121 throw new core.ArgumentError("Parameter name is required."); 120 throw new core.ArgumentError("Parameter name is required.");
122 } 121 }
123 122
124 _url = 'v1/operations/' + commons.Escaper.ecapeVariableReserved('$name'); 123 _url = 'v1/operations/' + commons.Escaper.ecapeVariableReserved('$name');
125 124
126 var _response = _requester.request(_url, 125 var _response = _requester.request(_url, "DELETE",
127 "DELETE", 126 body: _body,
128 body: _body, 127 queryParams: _queryParams,
129 queryParams: _queryParams, 128 uploadOptions: _uploadOptions,
130 uploadOptions: _uploadOptions, 129 uploadMedia: _uploadMedia,
131 uploadMedia: _uploadMedia, 130 downloadOptions: _downloadOptions);
132 downloadOptions: _downloadOptions);
133 return _response.then((data) => new Empty.fromJson(data)); 131 return _response.then((data) => new Empty.fromJson(data));
134 } 132 }
135 133
136 /** 134 /// Gets the latest state of a long-running operation. Clients can use this
137 * Gets the latest state of a long-running operation. Clients can use this 135 /// method to poll the operation result at intervals as recommended by the
138 * method to poll the operation result at intervals as recommended by the API 136 /// API
139 * service. 137 /// service.
140 * 138 ///
141 * Request parameters: 139 /// Request parameters:
142 * 140 ///
143 * [name] - The name of the operation resource. 141 /// [name] - The name of the operation resource.
144 * Value must have pattern "^[^/]+$". 142 /// Value must have pattern "^[^/]+$".
145 * 143 ///
146 * Completes with a [Operation]. 144 /// Completes with a [Operation].
147 * 145 ///
148 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 146 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
149 * error. 147 /// an error.
150 * 148 ///
151 * If the used [http.Client] completes with an error when making a REST call, 149 /// If the used [http.Client] completes with an error when making a REST
152 * this method will complete with the same error. 150 /// call, this method will complete with the same error.
153 */
154 async.Future<Operation> get(core.String name) { 151 async.Future<Operation> get(core.String name) {
155 var _url = null; 152 var _url = null;
156 var _queryParams = new core.Map(); 153 var _queryParams = new core.Map();
157 var _uploadMedia = null; 154 var _uploadMedia = null;
158 var _uploadOptions = null; 155 var _uploadOptions = null;
159 var _downloadOptions = commons.DownloadOptions.Metadata; 156 var _downloadOptions = commons.DownloadOptions.Metadata;
160 var _body = null; 157 var _body = null;
161 158
162 if (name == null) { 159 if (name == null) {
163 throw new core.ArgumentError("Parameter name is required."); 160 throw new core.ArgumentError("Parameter name is required.");
164 } 161 }
165 162
166 _url = 'v1/operations/' + commons.Escaper.ecapeVariableReserved('$name'); 163 _url = 'v1/operations/' + commons.Escaper.ecapeVariableReserved('$name');
167 164
168 var _response = _requester.request(_url, 165 var _response = _requester.request(_url, "GET",
169 "GET", 166 body: _body,
170 body: _body, 167 queryParams: _queryParams,
171 queryParams: _queryParams, 168 uploadOptions: _uploadOptions,
172 uploadOptions: _uploadOptions, 169 uploadMedia: _uploadMedia,
173 uploadMedia: _uploadMedia, 170 downloadOptions: _downloadOptions);
174 downloadOptions: _downloadOptions);
175 return _response.then((data) => new Operation.fromJson(data)); 171 return _response.then((data) => new Operation.fromJson(data));
176 } 172 }
177 173
178 /** 174 /// Lists operations that match the specified filter in the request. If the
179 * Lists operations that match the specified filter in the request. If the 175 /// server doesn't support this method, it returns `UNIMPLEMENTED`.
180 * server doesn't support this method, it returns `UNIMPLEMENTED`. 176 ///
181 * 177 /// NOTE: the `name` binding allows API services to override the binding
182 * NOTE: the `name` binding allows API services to override the binding 178 /// to use different resource name schemes, such as `users / * /operations`.
183 * to use different resource name schemes, such as `users / * /operations`. To 179 /// To
184 * override the binding, API services can add a binding such as 180 /// override the binding, API services can add a binding such as
185 * `"/v1/{name=users / * }/operations"` to their service configuration. 181 /// `"/v1/{name=users / * }/operations"` to their service configuration.
186 * For backwards compatibility, the default name includes the operations 182 /// For backwards compatibility, the default name includes the operations
187 * collection id, however overriding users must ensure the name binding 183 /// collection id, however overriding users must ensure the name binding
188 * is the parent resource, without the operations collection id. 184 /// is the parent resource, without the operations collection id.
189 * 185 ///
190 * Request parameters: 186 /// Request parameters:
191 * 187 ///
192 * [filter] - The standard list filter. 188 /// [filter] - The standard list filter.
193 * 189 ///
194 * [pageToken] - The standard list page token. 190 /// [pageToken] - The standard list page token.
195 * 191 ///
196 * [name] - The name of the operation's parent resource. 192 /// [name] - The name of the operation's parent resource.
197 * 193 ///
198 * [pageSize] - The standard list page size. 194 /// [pageSize] - The standard list page size.
199 * 195 ///
200 * Completes with a [ListOperationsResponse]. 196 /// Completes with a [ListOperationsResponse].
201 * 197 ///
202 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 198 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
203 * error. 199 /// an error.
204 * 200 ///
205 * If the used [http.Client] completes with an error when making a REST call, 201 /// If the used [http.Client] completes with an error when making a REST
206 * this method will complete with the same error. 202 /// call, this method will complete with the same error.
207 */ 203 async.Future<ListOperationsResponse> list(
208 async.Future<ListOperationsResponse> list({core.String filter, core.String pag eToken, core.String name, core.int pageSize}) { 204 {core.String filter,
205 core.String pageToken,
206 core.String name,
207 core.int pageSize}) {
209 var _url = null; 208 var _url = null;
210 var _queryParams = new core.Map(); 209 var _queryParams = new core.Map();
211 var _uploadMedia = null; 210 var _uploadMedia = null;
212 var _uploadOptions = null; 211 var _uploadOptions = null;
213 var _downloadOptions = commons.DownloadOptions.Metadata; 212 var _downloadOptions = commons.DownloadOptions.Metadata;
214 var _body = null; 213 var _body = null;
215 214
216 if (filter != null) { 215 if (filter != null) {
217 _queryParams["filter"] = [filter]; 216 _queryParams["filter"] = [filter];
218 } 217 }
219 if (pageToken != null) { 218 if (pageToken != null) {
220 _queryParams["pageToken"] = [pageToken]; 219 _queryParams["pageToken"] = [pageToken];
221 } 220 }
222 if (name != null) { 221 if (name != null) {
223 _queryParams["name"] = [name]; 222 _queryParams["name"] = [name];
224 } 223 }
225 if (pageSize != null) { 224 if (pageSize != null) {
226 _queryParams["pageSize"] = ["${pageSize}"]; 225 _queryParams["pageSize"] = ["${pageSize}"];
227 } 226 }
228 227
229 _url = 'v1/operations'; 228 _url = 'v1/operations';
230 229
231 var _response = _requester.request(_url, 230 var _response = _requester.request(_url, "GET",
232 "GET", 231 body: _body,
233 body: _body, 232 queryParams: _queryParams,
234 queryParams: _queryParams, 233 uploadOptions: _uploadOptions,
235 uploadOptions: _uploadOptions, 234 uploadMedia: _uploadMedia,
236 uploadMedia: _uploadMedia, 235 downloadOptions: _downloadOptions);
237 downloadOptions: _downloadOptions);
238 return _response.then((data) => new ListOperationsResponse.fromJson(data)); 236 return _response.then((data) => new ListOperationsResponse.fromJson(data));
239 } 237 }
240
241 } 238 }
242 239
243
244 class SpeechResourceApi { 240 class SpeechResourceApi {
245 final commons.ApiRequester _requester; 241 final commons.ApiRequester _requester;
246 242
247 SpeechResourceApi(commons.ApiRequester client) : 243 SpeechResourceApi(commons.ApiRequester client) : _requester = client;
248 _requester = client;
249 244
250 /** 245 /// Performs asynchronous speech recognition: receive results via the
251 * Performs asynchronous speech recognition: receive results via the 246 /// google.longrunning.Operations interface. Returns either an
252 * google.longrunning.Operations interface. Returns either an 247 /// `Operation.error` or an `Operation.response` which contains
253 * `Operation.error` or an `Operation.response` which contains 248 /// a `LongRunningRecognizeResponse` message.
254 * a `LongRunningRecognizeResponse` message. 249 ///
255 * 250 /// [request] - The metadata request object.
256 * [request] - The metadata request object. 251 ///
257 * 252 /// Request parameters:
258 * Request parameters: 253 ///
259 * 254 /// Completes with a [Operation].
260 * Completes with a [Operation]. 255 ///
261 * 256 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
262 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 257 /// an error.
263 * error. 258 ///
264 * 259 /// If the used [http.Client] completes with an error when making a REST
265 * If the used [http.Client] completes with an error when making a REST call, 260 /// call, this method will complete with the same error.
266 * this method will complete with the same error. 261 async.Future<Operation> longrunningrecognize(
267 */ 262 LongRunningRecognizeRequest request) {
268 async.Future<Operation> longrunningrecognize(LongRunningRecognizeRequest reque st) {
269 var _url = null; 263 var _url = null;
270 var _queryParams = new core.Map(); 264 var _queryParams = new core.Map();
271 var _uploadMedia = null; 265 var _uploadMedia = null;
272 var _uploadOptions = null; 266 var _uploadOptions = null;
273 var _downloadOptions = commons.DownloadOptions.Metadata; 267 var _downloadOptions = commons.DownloadOptions.Metadata;
274 var _body = null; 268 var _body = null;
275 269
276 if (request != null) { 270 if (request != null) {
277 _body = convert.JSON.encode((request).toJson()); 271 _body = convert.JSON.encode((request).toJson());
278 } 272 }
279 273
280 _url = 'v1/speech:longrunningrecognize'; 274 _url = 'v1/speech:longrunningrecognize';
281 275
282 var _response = _requester.request(_url, 276 var _response = _requester.request(_url, "POST",
283 "POST", 277 body: _body,
284 body: _body, 278 queryParams: _queryParams,
285 queryParams: _queryParams, 279 uploadOptions: _uploadOptions,
286 uploadOptions: _uploadOptions, 280 uploadMedia: _uploadMedia,
287 uploadMedia: _uploadMedia, 281 downloadOptions: _downloadOptions);
288 downloadOptions: _downloadOptions);
289 return _response.then((data) => new Operation.fromJson(data)); 282 return _response.then((data) => new Operation.fromJson(data));
290 } 283 }
291 284
292 /** 285 /// Performs synchronous speech recognition: receive results after all audio
293 * Performs synchronous speech recognition: receive results after all audio 286 /// has been sent and processed.
294 * has been sent and processed. 287 ///
295 * 288 /// [request] - The metadata request object.
296 * [request] - The metadata request object. 289 ///
297 * 290 /// Request parameters:
298 * Request parameters: 291 ///
299 * 292 /// Completes with a [RecognizeResponse].
300 * Completes with a [RecognizeResponse]. 293 ///
301 * 294 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
302 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 295 /// an error.
303 * error. 296 ///
304 * 297 /// If the used [http.Client] completes with an error when making a REST
305 * If the used [http.Client] completes with an error when making a REST call, 298 /// call, this method will complete with the same error.
306 * this method will complete with the same error.
307 */
308 async.Future<RecognizeResponse> recognize(RecognizeRequest request) { 299 async.Future<RecognizeResponse> recognize(RecognizeRequest request) {
309 var _url = null; 300 var _url = null;
310 var _queryParams = new core.Map(); 301 var _queryParams = new core.Map();
311 var _uploadMedia = null; 302 var _uploadMedia = null;
312 var _uploadOptions = null; 303 var _uploadOptions = null;
313 var _downloadOptions = commons.DownloadOptions.Metadata; 304 var _downloadOptions = commons.DownloadOptions.Metadata;
314 var _body = null; 305 var _body = null;
315 306
316 if (request != null) { 307 if (request != null) {
317 _body = convert.JSON.encode((request).toJson()); 308 _body = convert.JSON.encode((request).toJson());
318 } 309 }
319 310
320 _url = 'v1/speech:recognize'; 311 _url = 'v1/speech:recognize';
321 312
322 var _response = _requester.request(_url, 313 var _response = _requester.request(_url, "POST",
323 "POST", 314 body: _body,
324 body: _body, 315 queryParams: _queryParams,
325 queryParams: _queryParams, 316 uploadOptions: _uploadOptions,
326 uploadOptions: _uploadOptions, 317 uploadMedia: _uploadMedia,
327 uploadMedia: _uploadMedia, 318 downloadOptions: _downloadOptions);
328 downloadOptions: _downloadOptions);
329 return _response.then((data) => new RecognizeResponse.fromJson(data)); 319 return _response.then((data) => new RecognizeResponse.fromJson(data));
330 } 320 }
331
332 } 321 }
333 322
334 323 /// The request message for Operations.CancelOperation.
335
336 /** The request message for Operations.CancelOperation. */
337 class CancelOperationRequest { 324 class CancelOperationRequest {
338
339 CancelOperationRequest(); 325 CancelOperationRequest();
340 326
341 CancelOperationRequest.fromJson(core.Map _json) { 327 CancelOperationRequest.fromJson(core.Map _json) {}
342 }
343 328
344 core.Map<core.String, core.Object> toJson() { 329 core.Map<core.String, core.Object> toJson() {
345 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 330 final core.Map<core.String, core.Object> _json =
331 new core.Map<core.String, core.Object>();
346 return _json; 332 return _json;
347 } 333 }
348 } 334 }
349 335
350 /** 336 /// A generic empty message that you can re-use to avoid defining duplicated
351 * A generic empty message that you can re-use to avoid defining duplicated 337 /// empty messages in your APIs. A typical example is to use it as the request
352 * empty messages in your APIs. A typical example is to use it as the request 338 /// or the response type of an API method. For instance:
353 * or the response type of an API method. For instance: 339 ///
354 * 340 /// service Foo {
355 * service Foo { 341 /// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
356 * rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); 342 /// }
357 * } 343 ///
358 * 344 /// The JSON representation for `Empty` is empty JSON object `{}`.
359 * The JSON representation for `Empty` is empty JSON object `{}`.
360 */
361 class Empty { 345 class Empty {
362
363 Empty(); 346 Empty();
364 347
365 Empty.fromJson(core.Map _json) { 348 Empty.fromJson(core.Map _json) {}
366 }
367 349
368 core.Map<core.String, core.Object> toJson() { 350 core.Map<core.String, core.Object> toJson() {
369 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 351 final core.Map<core.String, core.Object> _json =
352 new core.Map<core.String, core.Object>();
370 return _json; 353 return _json;
371 } 354 }
372 } 355 }
373 356
374 /** The response message for Operations.ListOperations. */ 357 /// The response message for Operations.ListOperations.
375 class ListOperationsResponse { 358 class ListOperationsResponse {
376 /** The standard List next-page token. */ 359 /// The standard List next-page token.
377 core.String nextPageToken; 360 core.String nextPageToken;
378 /** A list of operations that matches the specified filter in the request. */ 361
362 /// A list of operations that matches the specified filter in the request.
379 core.List<Operation> operations; 363 core.List<Operation> operations;
380 364
381 ListOperationsResponse(); 365 ListOperationsResponse();
382 366
383 ListOperationsResponse.fromJson(core.Map _json) { 367 ListOperationsResponse.fromJson(core.Map _json) {
384 if (_json.containsKey("nextPageToken")) { 368 if (_json.containsKey("nextPageToken")) {
385 nextPageToken = _json["nextPageToken"]; 369 nextPageToken = _json["nextPageToken"];
386 } 370 }
387 if (_json.containsKey("operations")) { 371 if (_json.containsKey("operations")) {
388 operations = _json["operations"].map((value) => new Operation.fromJson(val ue)).toList(); 372 operations = _json["operations"]
373 .map((value) => new Operation.fromJson(value))
374 .toList();
389 } 375 }
390 } 376 }
391 377
392 core.Map<core.String, core.Object> toJson() { 378 core.Map<core.String, core.Object> toJson() {
393 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 379 final core.Map<core.String, core.Object> _json =
380 new core.Map<core.String, core.Object>();
394 if (nextPageToken != null) { 381 if (nextPageToken != null) {
395 _json["nextPageToken"] = nextPageToken; 382 _json["nextPageToken"] = nextPageToken;
396 } 383 }
397 if (operations != null) { 384 if (operations != null) {
398 _json["operations"] = operations.map((value) => (value).toJson()).toList() ; 385 _json["operations"] =
386 operations.map((value) => (value).toJson()).toList();
399 } 387 }
400 return _json; 388 return _json;
401 } 389 }
402 } 390 }
403 391
404 /** 392 /// The top-level message sent by the client for the `LongRunningRecognize`
405 * The top-level message sent by the client for the `LongRunningRecognize` 393 /// method.
406 * method.
407 */
408 class LongRunningRecognizeRequest { 394 class LongRunningRecognizeRequest {
409 /** *Required* The audio data to be recognized. */ 395 /// *Required* The audio data to be recognized.
410 RecognitionAudio audio; 396 RecognitionAudio audio;
411 /** 397
412 * *Required* Provides information to the recognizer that specifies how to 398 /// *Required* Provides information to the recognizer that specifies how to
413 * process the request. 399 /// process the request.
414 */
415 RecognitionConfig config; 400 RecognitionConfig config;
416 401
417 LongRunningRecognizeRequest(); 402 LongRunningRecognizeRequest();
418 403
419 LongRunningRecognizeRequest.fromJson(core.Map _json) { 404 LongRunningRecognizeRequest.fromJson(core.Map _json) {
420 if (_json.containsKey("audio")) { 405 if (_json.containsKey("audio")) {
421 audio = new RecognitionAudio.fromJson(_json["audio"]); 406 audio = new RecognitionAudio.fromJson(_json["audio"]);
422 } 407 }
423 if (_json.containsKey("config")) { 408 if (_json.containsKey("config")) {
424 config = new RecognitionConfig.fromJson(_json["config"]); 409 config = new RecognitionConfig.fromJson(_json["config"]);
425 } 410 }
426 } 411 }
427 412
428 core.Map<core.String, core.Object> toJson() { 413 core.Map<core.String, core.Object> toJson() {
429 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 414 final core.Map<core.String, core.Object> _json =
415 new core.Map<core.String, core.Object>();
430 if (audio != null) { 416 if (audio != null) {
431 _json["audio"] = (audio).toJson(); 417 _json["audio"] = (audio).toJson();
432 } 418 }
433 if (config != null) { 419 if (config != null) {
434 _json["config"] = (config).toJson(); 420 _json["config"] = (config).toJson();
435 } 421 }
436 return _json; 422 return _json;
437 } 423 }
438 } 424 }
439 425
440 /** 426 /// This resource represents a long-running operation that is the result of a
441 * This resource represents a long-running operation that is the result of a 427 /// network API call.
442 * network API call.
443 */
444 class Operation { 428 class Operation {
445 /** 429 /// If the value is `false`, it means the operation is still in progress.
446 * If the value is `false`, it means the operation is still in progress. 430 /// If `true`, the operation is completed, and either `error` or `response`
447 * If true, the operation is completed, and either `error` or `response` is 431 /// is
448 * available. 432 /// available.
449 */
450 core.bool done; 433 core.bool done;
451 /** The error result of the operation in case of failure or cancellation. */ 434
435 /// The error result of the operation in case of failure or cancellation.
452 Status error; 436 Status error;
453 /** 437
454 * Service-specific metadata associated with the operation. It typically 438 /// Service-specific metadata associated with the operation. It typically
455 * contains progress information and common metadata such as create time. 439 /// contains progress information and common metadata such as create time.
456 * Some services might not provide such metadata. Any method that returns a 440 /// Some services might not provide such metadata. Any method that returns a
457 * long-running operation should document the metadata type, if any. 441 /// long-running operation should document the metadata type, if any.
458 * 442 ///
459 * The values for Object must be JSON objects. It can consist of `num`, 443 /// The values for Object must be JSON objects. It can consist of `num`,
460 * `String`, `bool` and `null` as well as `Map` and `List` values. 444 /// `String`, `bool` and `null` as well as `Map` and `List` values.
461 */
462 core.Map<core.String, core.Object> metadata; 445 core.Map<core.String, core.Object> metadata;
463 /** 446
464 * The server-assigned name, which is only unique within the same service that 447 /// The server-assigned name, which is only unique within the same service
465 * originally returns it. If you use the default HTTP mapping, the 448 /// that
466 * `name` should have the format of `operations/some/unique/name`. 449 /// originally returns it. If you use the default HTTP mapping, the
467 */ 450 /// `name` should have the format of `operations/some/unique/name`.
468 core.String name; 451 core.String name;
469 /** 452
470 * The normal response of the operation in case of success. If the original 453 /// The normal response of the operation in case of success. If the original
471 * method returns no data on success, such as `Delete`, the response is 454 /// method returns no data on success, such as `Delete`, the response is
472 * `google.protobuf.Empty`. If the original method is standard 455 /// `google.protobuf.Empty`. If the original method is standard
473 * `Get`/`Create`/`Update`, the response should be the resource. For other 456 /// `Get`/`Create`/`Update`, the response should be the resource. For other
474 * methods, the response should have the type `XxxResponse`, where `Xxx` 457 /// methods, the response should have the type `XxxResponse`, where `Xxx`
475 * is the original method name. For example, if the original method name 458 /// is the original method name. For example, if the original method name
476 * is `TakeSnapshot()`, the inferred response type is 459 /// is `TakeSnapshot()`, the inferred response type is
477 * `TakeSnapshotResponse`. 460 /// `TakeSnapshotResponse`.
478 * 461 ///
479 * The values for Object must be JSON objects. It can consist of `num`, 462 /// The values for Object must be JSON objects. It can consist of `num`,
480 * `String`, `bool` and `null` as well as `Map` and `List` values. 463 /// `String`, `bool` and `null` as well as `Map` and `List` values.
481 */
482 core.Map<core.String, core.Object> response; 464 core.Map<core.String, core.Object> response;
483 465
484 Operation(); 466 Operation();
485 467
486 Operation.fromJson(core.Map _json) { 468 Operation.fromJson(core.Map _json) {
487 if (_json.containsKey("done")) { 469 if (_json.containsKey("done")) {
488 done = _json["done"]; 470 done = _json["done"];
489 } 471 }
490 if (_json.containsKey("error")) { 472 if (_json.containsKey("error")) {
491 error = new Status.fromJson(_json["error"]); 473 error = new Status.fromJson(_json["error"]);
492 } 474 }
493 if (_json.containsKey("metadata")) { 475 if (_json.containsKey("metadata")) {
494 metadata = _json["metadata"]; 476 metadata = _json["metadata"];
495 } 477 }
496 if (_json.containsKey("name")) { 478 if (_json.containsKey("name")) {
497 name = _json["name"]; 479 name = _json["name"];
498 } 480 }
499 if (_json.containsKey("response")) { 481 if (_json.containsKey("response")) {
500 response = _json["response"]; 482 response = _json["response"];
501 } 483 }
502 } 484 }
503 485
504 core.Map<core.String, core.Object> toJson() { 486 core.Map<core.String, core.Object> toJson() {
505 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 487 final core.Map<core.String, core.Object> _json =
488 new core.Map<core.String, core.Object>();
506 if (done != null) { 489 if (done != null) {
507 _json["done"] = done; 490 _json["done"] = done;
508 } 491 }
509 if (error != null) { 492 if (error != null) {
510 _json["error"] = (error).toJson(); 493 _json["error"] = (error).toJson();
511 } 494 }
512 if (metadata != null) { 495 if (metadata != null) {
513 _json["metadata"] = metadata; 496 _json["metadata"] = metadata;
514 } 497 }
515 if (name != null) { 498 if (name != null) {
516 _json["name"] = name; 499 _json["name"] = name;
517 } 500 }
518 if (response != null) { 501 if (response != null) {
519 _json["response"] = response; 502 _json["response"] = response;
520 } 503 }
521 return _json; 504 return _json;
522 } 505 }
523 } 506 }
524 507
525 /** 508 /// Contains audio data in the encoding specified in the `RecognitionConfig`.
526 * Contains audio data in the encoding specified in the `RecognitionConfig`. 509 /// Either `content` or `uri` must be supplied. Supplying both or neither
527 * Either `content` or `uri` must be supplied. Supplying both or neither 510 /// returns google.rpc.Code.INVALID_ARGUMENT. See
528 * returns google.rpc.Code.INVALID_ARGUMENT. See 511 /// [audio limits](https://cloud.google.com/speech/limits#content).
529 * [audio limits](https://cloud.google.com/speech/limits#content).
530 */
531 class RecognitionAudio { 512 class RecognitionAudio {
532 /** 513 /// The audio data bytes encoded as specified in
533 * The audio data bytes encoded as specified in 514 /// `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a
534 * `RecognitionConfig`. Note: as with all bytes fields, protobuffers use a 515 /// pure binary representation, whereas JSON representations use base64.
535 * pure binary representation, whereas JSON representations use base64.
536 */
537 core.String content; 516 core.String content;
538 core.List<core.int> get contentAsBytes { 517 core.List<core.int> get contentAsBytes {
539 return convert.BASE64.decode(content); 518 return convert.BASE64.decode(content);
540 } 519 }
541 520
542 void set contentAsBytes(core.List<core.int> _bytes) { 521 void set contentAsBytes(core.List<core.int> _bytes) {
543 content = convert.BASE64.encode(_bytes).replaceAll("/", "_").replaceAll("+", "-"); 522 content =
523 convert.BASE64.encode(_bytes).replaceAll("/", "_").replaceAll("+", "-");
544 } 524 }
545 /** 525
546 * URI that points to a file that contains audio data bytes as specified in 526 /// URI that points to a file that contains audio data bytes as specified in
547 * `RecognitionConfig`. Currently, only Google Cloud Storage URIs are 527 /// `RecognitionConfig`. Currently, only Google Cloud Storage URIs are
548 * supported, which must be specified in the following format: 528 /// supported, which must be specified in the following format:
549 * `gs://bucket_name/object_name` (other URI formats return 529 /// `gs://bucket_name/object_name` (other URI formats return
550 * google.rpc.Code.INVALID_ARGUMENT). For more information, see 530 /// google.rpc.Code.INVALID_ARGUMENT). For more information, see
551 * [Request URIs](https://cloud.google.com/storage/docs/reference-uris). 531 /// [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
552 */
553 core.String uri; 532 core.String uri;
554 533
555 RecognitionAudio(); 534 RecognitionAudio();
556 535
557 RecognitionAudio.fromJson(core.Map _json) { 536 RecognitionAudio.fromJson(core.Map _json) {
558 if (_json.containsKey("content")) { 537 if (_json.containsKey("content")) {
559 content = _json["content"]; 538 content = _json["content"];
560 } 539 }
561 if (_json.containsKey("uri")) { 540 if (_json.containsKey("uri")) {
562 uri = _json["uri"]; 541 uri = _json["uri"];
563 } 542 }
564 } 543 }
565 544
566 core.Map<core.String, core.Object> toJson() { 545 core.Map<core.String, core.Object> toJson() {
567 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 546 final core.Map<core.String, core.Object> _json =
547 new core.Map<core.String, core.Object>();
568 if (content != null) { 548 if (content != null) {
569 _json["content"] = content; 549 _json["content"] = content;
570 } 550 }
571 if (uri != null) { 551 if (uri != null) {
572 _json["uri"] = uri; 552 _json["uri"] = uri;
573 } 553 }
574 return _json; 554 return _json;
575 } 555 }
576 } 556 }
577 557
578 /** 558 /// Provides information to the recognizer that specifies how to process the
579 * Provides information to the recognizer that specifies how to process the 559 /// request.
580 * request.
581 */
582 class RecognitionConfig { 560 class RecognitionConfig {
583 /** 561 /// *Optional* If `true`, the top result includes a list of words and
584 * *Optional* If `true`, the top result includes a list of words and 562 /// the start and end time offsets (timestamps) for those words. If
585 * the start and end time offsets (timestamps) for those words. If 563 /// `false`, no word-level time offset information is returned. The default
586 * `false`, no word-level time offset information is returned. The default is 564 /// is
587 * `false`. 565 /// `false`.
588 */
589 core.bool enableWordTimeOffsets; 566 core.bool enableWordTimeOffsets;
590 /** 567
591 * *Required* Encoding of audio data sent in all `RecognitionAudio` messages. 568 /// *Required* Encoding of audio data sent in all `RecognitionAudio`
592 * Possible string values are: 569 /// messages.
593 * - "ENCODING_UNSPECIFIED" : Not specified. Will return result 570 /// Possible string values are:
594 * google.rpc.Code.INVALID_ARGUMENT. 571 /// - "ENCODING_UNSPECIFIED" : Not specified. Will return result
595 * - "LINEAR16" : Uncompressed 16-bit signed little-endian samples (Linear 572 /// google.rpc.Code.INVALID_ARGUMENT.
596 * PCM). 573 /// - "LINEAR16" : Uncompressed 16-bit signed little-endian samples (Linear
597 * - "FLAC" : [`FLAC`](https://xiph.org/flac/documentation.html) (Free 574 /// PCM).
598 * Lossless Audio 575 /// - "FLAC" : [`FLAC`](https://xiph.org/flac/documentation.html) (Free
599 * Codec) is the recommended encoding because it is 576 /// Lossless Audio
600 * lossless--therefore recognition is not compromised--and 577 /// Codec) is the recommended encoding because it is
601 * requires only about half the bandwidth of `LINEAR16`. `FLAC` stream 578 /// lossless--therefore recognition is not compromised--and
602 * encoding supports 16-bit and 24-bit samples, however, not all fields in 579 /// requires only about half the bandwidth of `LINEAR16`. `FLAC` stream
603 * `STREAMINFO` are supported. 580 /// encoding supports 16-bit and 24-bit samples, however, not all fields in
604 * - "MULAW" : 8-bit samples that compand 14-bit audio samples using G.711 581 /// `STREAMINFO` are supported.
605 * PCMU/mu-law. 582 /// - "MULAW" : 8-bit samples that compand 14-bit audio samples using G.711
606 * - "AMR" : Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must be 583 /// PCMU/mu-law.
607 * 8000. 584 /// - "AMR" : Adaptive Multi-Rate Narrowband codec. `sample_rate_hertz` must
608 * - "AMR_WB" : Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must 585 /// be 8000.
609 * be 16000. 586 /// - "AMR_WB" : Adaptive Multi-Rate Wideband codec. `sample_rate_hertz` must
610 * - "OGG_OPUS" : Opus encoded audio frames in Ogg container 587 /// be 16000.
611 * ([OggOpus](https://wiki.xiph.org/OggOpus)). 588 /// - "OGG_OPUS" : Opus encoded audio frames in Ogg container
612 * `sample_rate_hertz` must be 16000. 589 /// ([OggOpus](https://wiki.xiph.org/OggOpus)).
613 * - "SPEEX_WITH_HEADER_BYTE" : Although the use of lossy encodings is not 590 /// `sample_rate_hertz` must be 16000.
614 * recommended, if a very low 591 /// - "SPEEX_WITH_HEADER_BYTE" : Although the use of lossy encodings is not
615 * bitrate encoding is required, `OGG_OPUS` is highly preferred over 592 /// recommended, if a very low
616 * Speex encoding. The [Speex](https://speex.org/) encoding supported by 593 /// bitrate encoding is required, `OGG_OPUS` is highly preferred over
617 * Cloud Speech API has a header byte in each block, as in MIME type 594 /// Speex encoding. The [Speex](https://speex.org/) encoding supported by
618 * `audio/x-speex-with-header-byte`. 595 /// Cloud Speech API has a header byte in each block, as in MIME type
619 * It is a variant of the RTP Speex encoding defined in 596 /// `audio/x-speex-with-header-byte`.
620 * [RFC 5574](https://tools.ietf.org/html/rfc5574). 597 /// It is a variant of the RTP Speex encoding defined in
621 * The stream is a sequence of blocks, one block per RTP packet. Each block 598 /// [RFC 5574](https://tools.ietf.org/html/rfc5574).
622 * starts with a byte containing the length of the block, in bytes, followed 599 /// The stream is a sequence of blocks, one block per RTP packet. Each block
623 * by one or more frames of Speex data, padded to an integral number of 600 /// starts with a byte containing the length of the block, in bytes, followed
624 * bytes (octets) as specified in RFC 5574. In other words, each RTP header 601 /// by one or more frames of Speex data, padded to an integral number of
625 * is replaced with a single byte containing the block length. Only Speex 602 /// bytes (octets) as specified in RFC 5574. In other words, each RTP header
626 * wideband is supported. `sample_rate_hertz` must be 16000. 603 /// is replaced with a single byte containing the block length. Only Speex
627 */ 604 /// wideband is supported. `sample_rate_hertz` must be 16000.
628 core.String encoding; 605 core.String encoding;
629 /** 606
630 * *Required* The language of the supplied audio as a 607 /// *Required* The language of the supplied audio as a
631 * [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag. 608 /// [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
632 * Example: "en-US". 609 /// Example: "en-US".
633 * See [Language Support](https://cloud.google.com/speech/docs/languages) 610 /// See [Language Support](https://cloud.google.com/speech/docs/languages)
634 * for a list of the currently supported language codes. 611 /// for a list of the currently supported language codes.
635 */
636 core.String languageCode; 612 core.String languageCode;
637 /** 613
638 * *Optional* Maximum number of recognition hypotheses to be returned. 614 /// *Optional* Maximum number of recognition hypotheses to be returned.
639 * Specifically, the maximum number of `SpeechRecognitionAlternative` messages 615 /// Specifically, the maximum number of `SpeechRecognitionAlternative`
640 * within each `SpeechRecognitionResult`. 616 /// messages
641 * The server may return fewer than `max_alternatives`. 617 /// within each `SpeechRecognitionResult`.
642 * Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of 618 /// The server may return fewer than `max_alternatives`.
643 * one. If omitted, will return a maximum of one. 619 /// Valid values are `0`-`30`. A value of `0` or `1` will return a maximum of
644 */ 620 /// one. If omitted, will return a maximum of one.
645 core.int maxAlternatives; 621 core.int maxAlternatives;
646 /** 622
647 * *Optional* If set to `true`, the server will attempt to filter out 623 /// *Optional* If set to `true`, the server will attempt to filter out
648 * profanities, replacing all but the initial character in each filtered word 624 /// profanities, replacing all but the initial character in each filtered
649 * with asterisks, e.g. "f***". If set to `false` or omitted, profanities 625 /// word
650 * won't be filtered out. 626 /// with asterisks, e.g. "f***". If set to `false` or omitted, profanities
651 */ 627 /// won't be filtered out.
652 core.bool profanityFilter; 628 core.bool profanityFilter;
653 /** 629
654 * *Required* Sample rate in Hertz of the audio data sent in all 630 /// *Required* Sample rate in Hertz of the audio data sent in all
655 * `RecognitionAudio` messages. Valid values are: 8000-48000. 631 /// `RecognitionAudio` messages. Valid values are: 8000-48000.
656 * 16000 is optimal. For best results, set the sampling rate of the audio 632 /// 16000 is optimal. For best results, set the sampling rate of the audio
657 * source to 16000 Hz. If that's not possible, use the native sample rate of 633 /// source to 16000 Hz. If that's not possible, use the native sample rate of
658 * the audio source (instead of re-sampling). 634 /// the audio source (instead of re-sampling).
659 */
660 core.int sampleRateHertz; 635 core.int sampleRateHertz;
661 /** 636
662 * *Optional* A means to provide context to assist the speech recognition. 637 /// *Optional* A means to provide context to assist the speech recognition.
663 */
664 core.List<SpeechContext> speechContexts; 638 core.List<SpeechContext> speechContexts;
665 639
666 RecognitionConfig(); 640 RecognitionConfig();
667 641
668 RecognitionConfig.fromJson(core.Map _json) { 642 RecognitionConfig.fromJson(core.Map _json) {
669 if (_json.containsKey("enableWordTimeOffsets")) { 643 if (_json.containsKey("enableWordTimeOffsets")) {
670 enableWordTimeOffsets = _json["enableWordTimeOffsets"]; 644 enableWordTimeOffsets = _json["enableWordTimeOffsets"];
671 } 645 }
672 if (_json.containsKey("encoding")) { 646 if (_json.containsKey("encoding")) {
673 encoding = _json["encoding"]; 647 encoding = _json["encoding"];
674 } 648 }
675 if (_json.containsKey("languageCode")) { 649 if (_json.containsKey("languageCode")) {
676 languageCode = _json["languageCode"]; 650 languageCode = _json["languageCode"];
677 } 651 }
678 if (_json.containsKey("maxAlternatives")) { 652 if (_json.containsKey("maxAlternatives")) {
679 maxAlternatives = _json["maxAlternatives"]; 653 maxAlternatives = _json["maxAlternatives"];
680 } 654 }
681 if (_json.containsKey("profanityFilter")) { 655 if (_json.containsKey("profanityFilter")) {
682 profanityFilter = _json["profanityFilter"]; 656 profanityFilter = _json["profanityFilter"];
683 } 657 }
684 if (_json.containsKey("sampleRateHertz")) { 658 if (_json.containsKey("sampleRateHertz")) {
685 sampleRateHertz = _json["sampleRateHertz"]; 659 sampleRateHertz = _json["sampleRateHertz"];
686 } 660 }
687 if (_json.containsKey("speechContexts")) { 661 if (_json.containsKey("speechContexts")) {
688 speechContexts = _json["speechContexts"].map((value) => new SpeechContext. fromJson(value)).toList(); 662 speechContexts = _json["speechContexts"]
663 .map((value) => new SpeechContext.fromJson(value))
664 .toList();
689 } 665 }
690 } 666 }
691 667
692 core.Map<core.String, core.Object> toJson() { 668 core.Map<core.String, core.Object> toJson() {
693 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 669 final core.Map<core.String, core.Object> _json =
670 new core.Map<core.String, core.Object>();
694 if (enableWordTimeOffsets != null) { 671 if (enableWordTimeOffsets != null) {
695 _json["enableWordTimeOffsets"] = enableWordTimeOffsets; 672 _json["enableWordTimeOffsets"] = enableWordTimeOffsets;
696 } 673 }
697 if (encoding != null) { 674 if (encoding != null) {
698 _json["encoding"] = encoding; 675 _json["encoding"] = encoding;
699 } 676 }
700 if (languageCode != null) { 677 if (languageCode != null) {
701 _json["languageCode"] = languageCode; 678 _json["languageCode"] = languageCode;
702 } 679 }
703 if (maxAlternatives != null) { 680 if (maxAlternatives != null) {
704 _json["maxAlternatives"] = maxAlternatives; 681 _json["maxAlternatives"] = maxAlternatives;
705 } 682 }
706 if (profanityFilter != null) { 683 if (profanityFilter != null) {
707 _json["profanityFilter"] = profanityFilter; 684 _json["profanityFilter"] = profanityFilter;
708 } 685 }
709 if (sampleRateHertz != null) { 686 if (sampleRateHertz != null) {
710 _json["sampleRateHertz"] = sampleRateHertz; 687 _json["sampleRateHertz"] = sampleRateHertz;
711 } 688 }
712 if (speechContexts != null) { 689 if (speechContexts != null) {
713 _json["speechContexts"] = speechContexts.map((value) => (value).toJson()). toList(); 690 _json["speechContexts"] =
691 speechContexts.map((value) => (value).toJson()).toList();
714 } 692 }
715 return _json; 693 return _json;
716 } 694 }
717 } 695 }
718 696
719 /** The top-level message sent by the client for the `Recognize` method. */ 697 /// The top-level message sent by the client for the `Recognize` method.
720 class RecognizeRequest { 698 class RecognizeRequest {
721 /** *Required* The audio data to be recognized. */ 699 /// *Required* The audio data to be recognized.
722 RecognitionAudio audio; 700 RecognitionAudio audio;
723 /** 701
724 * *Required* Provides information to the recognizer that specifies how to 702 /// *Required* Provides information to the recognizer that specifies how to
725 * process the request. 703 /// process the request.
726 */
727 RecognitionConfig config; 704 RecognitionConfig config;
728 705
729 RecognizeRequest(); 706 RecognizeRequest();
730 707
731 RecognizeRequest.fromJson(core.Map _json) { 708 RecognizeRequest.fromJson(core.Map _json) {
732 if (_json.containsKey("audio")) { 709 if (_json.containsKey("audio")) {
733 audio = new RecognitionAudio.fromJson(_json["audio"]); 710 audio = new RecognitionAudio.fromJson(_json["audio"]);
734 } 711 }
735 if (_json.containsKey("config")) { 712 if (_json.containsKey("config")) {
736 config = new RecognitionConfig.fromJson(_json["config"]); 713 config = new RecognitionConfig.fromJson(_json["config"]);
737 } 714 }
738 } 715 }
739 716
740 core.Map<core.String, core.Object> toJson() { 717 core.Map<core.String, core.Object> toJson() {
741 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 718 final core.Map<core.String, core.Object> _json =
719 new core.Map<core.String, core.Object>();
742 if (audio != null) { 720 if (audio != null) {
743 _json["audio"] = (audio).toJson(); 721 _json["audio"] = (audio).toJson();
744 } 722 }
745 if (config != null) { 723 if (config != null) {
746 _json["config"] = (config).toJson(); 724 _json["config"] = (config).toJson();
747 } 725 }
748 return _json; 726 return _json;
749 } 727 }
750 } 728 }
751 729
752 /** 730 /// The only message returned to the client by the `Recognize` method. It
753 * The only message returned to the client by the `Recognize` method. It 731 /// contains the result as zero or more sequential `SpeechRecognitionResult`
754 * contains the result as zero or more sequential `SpeechRecognitionResult` 732 /// messages.
755 * messages.
756 */
757 class RecognizeResponse { 733 class RecognizeResponse {
758 /** 734 /// *Output-only* Sequential list of transcription results corresponding to
759 * *Output-only* Sequential list of transcription results corresponding to 735 /// sequential portions of audio.
760 * sequential portions of audio.
761 */
762 core.List<SpeechRecognitionResult> results; 736 core.List<SpeechRecognitionResult> results;
763 737
764 RecognizeResponse(); 738 RecognizeResponse();
765 739
766 RecognizeResponse.fromJson(core.Map _json) { 740 RecognizeResponse.fromJson(core.Map _json) {
767 if (_json.containsKey("results")) { 741 if (_json.containsKey("results")) {
768 results = _json["results"].map((value) => new SpeechRecognitionResult.from Json(value)).toList(); 742 results = _json["results"]
743 .map((value) => new SpeechRecognitionResult.fromJson(value))
744 .toList();
769 } 745 }
770 } 746 }
771 747
772 core.Map<core.String, core.Object> toJson() { 748 core.Map<core.String, core.Object> toJson() {
773 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 749 final core.Map<core.String, core.Object> _json =
750 new core.Map<core.String, core.Object>();
774 if (results != null) { 751 if (results != null) {
775 _json["results"] = results.map((value) => (value).toJson()).toList(); 752 _json["results"] = results.map((value) => (value).toJson()).toList();
776 } 753 }
777 return _json; 754 return _json;
778 } 755 }
779 } 756 }
780 757
781 /** 758 /// Provides "hints" to the speech recognizer to favor specific words and
782 * Provides "hints" to the speech recognizer to favor specific words and phrases 759 /// phrases
783 * in the results. 760 /// in the results.
784 */
785 class SpeechContext { 761 class SpeechContext {
786 /** 762 /// *Optional* A list of strings containing words and phrases "hints" so that
787 * *Optional* A list of strings containing words and phrases "hints" so that 763 /// the speech recognition is more likely to recognize them. This can be used
788 * the speech recognition is more likely to recognize them. This can be used 764 /// to improve the accuracy for specific words and phrases, for example, if
789 * to improve the accuracy for specific words and phrases, for example, if 765 /// specific commands are typically spoken by the user. This can also be used
790 * specific commands are typically spoken by the user. This can also be used 766 /// to add additional words to the vocabulary of the recognizer. See
791 * to add additional words to the vocabulary of the recognizer. See 767 /// [usage limits](https://cloud.google.com/speech/limits#content).
792 * [usage limits](https://cloud.google.com/speech/limits#content).
793 */
794 core.List<core.String> phrases; 768 core.List<core.String> phrases;
795 769
796 SpeechContext(); 770 SpeechContext();
797 771
798 SpeechContext.fromJson(core.Map _json) { 772 SpeechContext.fromJson(core.Map _json) {
799 if (_json.containsKey("phrases")) { 773 if (_json.containsKey("phrases")) {
800 phrases = _json["phrases"]; 774 phrases = _json["phrases"];
801 } 775 }
802 } 776 }
803 777
804 core.Map<core.String, core.Object> toJson() { 778 core.Map<core.String, core.Object> toJson() {
805 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 779 final core.Map<core.String, core.Object> _json =
780 new core.Map<core.String, core.Object>();
806 if (phrases != null) { 781 if (phrases != null) {
807 _json["phrases"] = phrases; 782 _json["phrases"] = phrases;
808 } 783 }
809 return _json; 784 return _json;
810 } 785 }
811 } 786 }
812 787
813 /** Alternative hypotheses (a.k.a. n-best list). */ 788 /// Alternative hypotheses (a.k.a. n-best list).
814 class SpeechRecognitionAlternative { 789 class SpeechRecognitionAlternative {
815 /** 790 /// *Output-only* The confidence estimate between 0.0 and 1.0. A higher
816 * *Output-only* The confidence estimate between 0.0 and 1.0. A higher number 791 /// number
817 * indicates an estimated greater likelihood that the recognized words are 792 /// indicates an estimated greater likelihood that the recognized words are
818 * correct. This field is typically provided only for the top hypothesis, and 793 /// correct. This field is typically provided only for the top hypothesis,
819 * only for `is_final=true` results. Clients should not rely on the 794 /// and
820 * `confidence` field as it is not guaranteed to be accurate or consistent. 795 /// only for `is_final=true` results. Clients should not rely on the
821 * The default of 0.0 is a sentinel value indicating `confidence` was not set. 796 /// `confidence` field as it is not guaranteed to be accurate or consistent.
822 */ 797 /// The default of 0.0 is a sentinel value indicating `confidence` was not
798 /// set.
823 core.double confidence; 799 core.double confidence;
824 /** 800
825 * *Output-only* Transcript text representing the words that the user spoke. 801 /// *Output-only* Transcript text representing the words that the user spoke.
826 */
827 core.String transcript; 802 core.String transcript;
828 /** 803
829 * *Output-only* A list of word-specific information for each recognized word. 804 /// *Output-only* A list of word-specific information for each recognized
830 */ 805 /// word.
831 core.List<WordInfo> words; 806 core.List<WordInfo> words;
832 807
833 SpeechRecognitionAlternative(); 808 SpeechRecognitionAlternative();
834 809
835 SpeechRecognitionAlternative.fromJson(core.Map _json) { 810 SpeechRecognitionAlternative.fromJson(core.Map _json) {
836 if (_json.containsKey("confidence")) { 811 if (_json.containsKey("confidence")) {
837 confidence = _json["confidence"]; 812 confidence = _json["confidence"];
838 } 813 }
839 if (_json.containsKey("transcript")) { 814 if (_json.containsKey("transcript")) {
840 transcript = _json["transcript"]; 815 transcript = _json["transcript"];
841 } 816 }
842 if (_json.containsKey("words")) { 817 if (_json.containsKey("words")) {
843 words = _json["words"].map((value) => new WordInfo.fromJson(value)).toList (); 818 words =
819 _json["words"].map((value) => new WordInfo.fromJson(value)).toList();
844 } 820 }
845 } 821 }
846 822
847 core.Map<core.String, core.Object> toJson() { 823 core.Map<core.String, core.Object> toJson() {
848 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 824 final core.Map<core.String, core.Object> _json =
825 new core.Map<core.String, core.Object>();
849 if (confidence != null) { 826 if (confidence != null) {
850 _json["confidence"] = confidence; 827 _json["confidence"] = confidence;
851 } 828 }
852 if (transcript != null) { 829 if (transcript != null) {
853 _json["transcript"] = transcript; 830 _json["transcript"] = transcript;
854 } 831 }
855 if (words != null) { 832 if (words != null) {
856 _json["words"] = words.map((value) => (value).toJson()).toList(); 833 _json["words"] = words.map((value) => (value).toJson()).toList();
857 } 834 }
858 return _json; 835 return _json;
859 } 836 }
860 } 837 }
861 838
862 /** A speech recognition result corresponding to a portion of the audio. */ 839 /// A speech recognition result corresponding to a portion of the audio.
863 class SpeechRecognitionResult { 840 class SpeechRecognitionResult {
864 /** 841 /// *Output-only* May contain one or more recognition hypotheses (up to the
865 * *Output-only* May contain one or more recognition hypotheses (up to the 842 /// maximum specified in `max_alternatives`).
866 * maximum specified in `max_alternatives`). 843 /// These alternatives are ordered in terms of accuracy, with the top (first)
867 * These alternatives are ordered in terms of accuracy, with the top (first) 844 /// alternative being the most probable, as ranked by the recognizer.
868 * alternative being the most probable, as ranked by the recognizer.
869 */
870 core.List<SpeechRecognitionAlternative> alternatives; 845 core.List<SpeechRecognitionAlternative> alternatives;
871 846
872 SpeechRecognitionResult(); 847 SpeechRecognitionResult();
873 848
874 SpeechRecognitionResult.fromJson(core.Map _json) { 849 SpeechRecognitionResult.fromJson(core.Map _json) {
875 if (_json.containsKey("alternatives")) { 850 if (_json.containsKey("alternatives")) {
876 alternatives = _json["alternatives"].map((value) => new SpeechRecognitionA lternative.fromJson(value)).toList(); 851 alternatives = _json["alternatives"]
852 .map((value) => new SpeechRecognitionAlternative.fromJson(value))
853 .toList();
877 } 854 }
878 } 855 }
879 856
880 core.Map<core.String, core.Object> toJson() { 857 core.Map<core.String, core.Object> toJson() {
881 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 858 final core.Map<core.String, core.Object> _json =
859 new core.Map<core.String, core.Object>();
882 if (alternatives != null) { 860 if (alternatives != null) {
883 _json["alternatives"] = alternatives.map((value) => (value).toJson()).toLi st(); 861 _json["alternatives"] =
862 alternatives.map((value) => (value).toJson()).toList();
884 } 863 }
885 return _json; 864 return _json;
886 } 865 }
887 } 866 }
888 867
889 /** 868 /// The `Status` type defines a logical error model that is suitable for
890 * The `Status` type defines a logical error model that is suitable for 869 /// different
891 * different 870 /// programming environments, including REST APIs and RPC APIs. It is used by
892 * programming environments, including REST APIs and RPC APIs. It is used by 871 /// [gRPC](https://github.com/grpc). The error model is designed to be:
893 * [gRPC](https://github.com/grpc). The error model is designed to be: 872 ///
894 * 873 /// - Simple to use and understand for most users
895 * - Simple to use and understand for most users 874 /// - Flexible enough to meet unexpected needs
896 * - Flexible enough to meet unexpected needs 875 ///
897 * 876 /// # Overview
898 * # Overview 877 ///
899 * 878 /// The `Status` message contains three pieces of data: error code, error
900 * The `Status` message contains three pieces of data: error code, error 879 /// message,
901 * message, 880 /// and error details. The error code should be an enum value of
902 * and error details. The error code should be an enum value of 881 /// google.rpc.Code, but it may accept additional error codes if needed. The
903 * google.rpc.Code, but it may accept additional error codes if needed. The 882 /// error message should be a developer-facing English message that helps
904 * error message should be a developer-facing English message that helps 883 /// developers *understand* and *resolve* the error. If a localized user-facing
905 * developers *understand* and *resolve* the error. If a localized user-facing 884 /// error message is needed, put the localized message in the error details or
906 * error message is needed, put the localized message in the error details or 885 /// localize it in the client. The optional error details may contain arbitrary
907 * localize it in the client. The optional error details may contain arbitrary 886 /// information about the error. There is a predefined set of error detail
908 * information about the error. There is a predefined set of error detail types 887 /// types
909 * in the package `google.rpc` that can be used for common error conditions. 888 /// in the package `google.rpc` that can be used for common error conditions.
910 * 889 ///
911 * # Language mapping 890 /// # Language mapping
912 * 891 ///
913 * The `Status` message is the logical representation of the error model, but it 892 /// The `Status` message is the logical representation of the error model, but
914 * is not necessarily the actual wire format. When the `Status` message is 893 /// it
915 * exposed in different client libraries and different wire protocols, it can be 894 /// is not necessarily the actual wire format. When the `Status` message is
916 * mapped differently. For example, it will likely be mapped to some exceptions 895 /// exposed in different client libraries and different wire protocols, it can
917 * in Java, but more likely mapped to some error codes in C. 896 /// be
918 * 897 /// mapped differently. For example, it will likely be mapped to some
919 * # Other uses 898 /// exceptions
920 * 899 /// in Java, but more likely mapped to some error codes in C.
921 * The error model and the `Status` message can be used in a variety of 900 ///
922 * environments, either with or without APIs, to provide a 901 /// # Other uses
923 * consistent developer experience across different environments. 902 ///
924 * 903 /// The error model and the `Status` message can be used in a variety of
925 * Example uses of this error model include: 904 /// environments, either with or without APIs, to provide a
926 * 905 /// consistent developer experience across different environments.
927 * - Partial errors. If a service needs to return partial errors to the client, 906 ///
928 * it may embed the `Status` in the normal response to indicate the partial 907 /// Example uses of this error model include:
929 * errors. 908 ///
930 * 909 /// - Partial errors. If a service needs to return partial errors to the
931 * - Workflow errors. A typical workflow has multiple steps. Each step may 910 /// client,
932 * have a `Status` message for error reporting. 911 /// it may embed the `Status` in the normal response to indicate the partial
933 * 912 /// errors.
934 * - Batch operations. If a client uses batch request and batch response, the 913 ///
935 * `Status` message should be used directly inside batch response, one for 914 /// - Workflow errors. A typical workflow has multiple steps. Each step may
936 * each error sub-response. 915 /// have a `Status` message for error reporting.
937 * 916 ///
938 * - Asynchronous operations. If an API call embeds asynchronous operation 917 /// - Batch operations. If a client uses batch request and batch response, the
939 * results in its response, the status of those operations should be 918 /// `Status` message should be used directly inside batch response, one for
940 * represented directly using the `Status` message. 919 /// each error sub-response.
941 * 920 ///
942 * - Logging. If some API errors are stored in logs, the message `Status` could 921 /// - Asynchronous operations. If an API call embeds asynchronous operation
943 * be used directly after any stripping needed for security/privacy reasons. 922 /// results in its response, the status of those operations should be
944 */ 923 /// represented directly using the `Status` message.
924 ///
925 /// - Logging. If some API errors are stored in logs, the message `Status`
926 /// could
927 /// be used directly after any stripping needed for security/privacy reasons.
945 class Status { 928 class Status {
946 /** The status code, which should be an enum value of google.rpc.Code. */ 929 /// The status code, which should be an enum value of google.rpc.Code.
947 core.int code; 930 core.int code;
948 /** 931
949 * A list of messages that carry the error details. There is a common set of 932 /// A list of messages that carry the error details. There is a common set
950 * message types for APIs to use. 933 /// of
951 * 934 /// message types for APIs to use.
952 * The values for Object must be JSON objects. It can consist of `num`, 935 ///
953 * `String`, `bool` and `null` as well as `Map` and `List` values. 936 /// The values for Object must be JSON objects. It can consist of `num`,
954 */ 937 /// `String`, `bool` and `null` as well as `Map` and `List` values.
955 core.List<core.Map<core.String, core.Object>> details; 938 core.List<core.Map<core.String, core.Object>> details;
956 /** 939
957 * A developer-facing error message, which should be in English. Any 940 /// A developer-facing error message, which should be in English. Any
958 * user-facing error message should be localized and sent in the 941 /// user-facing error message should be localized and sent in the
959 * google.rpc.Status.details field, or localized by the client. 942 /// google.rpc.Status.details field, or localized by the client.
960 */
961 core.String message; 943 core.String message;
962 944
963 Status(); 945 Status();
964 946
965 Status.fromJson(core.Map _json) { 947 Status.fromJson(core.Map _json) {
966 if (_json.containsKey("code")) { 948 if (_json.containsKey("code")) {
967 code = _json["code"]; 949 code = _json["code"];
968 } 950 }
969 if (_json.containsKey("details")) { 951 if (_json.containsKey("details")) {
970 details = _json["details"]; 952 details = _json["details"];
971 } 953 }
972 if (_json.containsKey("message")) { 954 if (_json.containsKey("message")) {
973 message = _json["message"]; 955 message = _json["message"];
974 } 956 }
975 } 957 }
976 958
977 core.Map<core.String, core.Object> toJson() { 959 core.Map<core.String, core.Object> toJson() {
978 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 960 final core.Map<core.String, core.Object> _json =
961 new core.Map<core.String, core.Object>();
979 if (code != null) { 962 if (code != null) {
980 _json["code"] = code; 963 _json["code"] = code;
981 } 964 }
982 if (details != null) { 965 if (details != null) {
983 _json["details"] = details; 966 _json["details"] = details;
984 } 967 }
985 if (message != null) { 968 if (message != null) {
986 _json["message"] = message; 969 _json["message"] = message;
987 } 970 }
988 return _json; 971 return _json;
989 } 972 }
990 } 973 }
991 974
992 /** 975 /// Word-specific information for recognized words. Word information is only
993 * Word-specific information for recognized words. Word information is only 976 /// included in the response when certain request parameters are set, such
994 * included in the response when certain request parameters are set, such 977 /// as `enable_word_time_offsets`.
995 * as `enable_word_time_offsets`.
996 */
997 class WordInfo { 978 class WordInfo {
998 /** 979 /// *Output-only* Time offset relative to the beginning of the audio,
999 * *Output-only* Time offset relative to the beginning of the audio, 980 /// and corresponding to the end of the spoken word.
1000 * and corresponding to the end of the spoken word. 981 /// This field is only set if `enable_word_time_offsets=true` and only
1001 * This field is only set if `enable_word_time_offsets=true` and only 982 /// in the top hypothesis.
1002 * in the top hypothesis. 983 /// This is an experimental feature and the accuracy of the time offset can
1003 * This is an experimental feature and the accuracy of the time offset can 984 /// vary.
1004 * vary.
1005 */
1006 core.String endTime; 985 core.String endTime;
1007 /** 986
1008 * *Output-only* Time offset relative to the beginning of the audio, 987 /// *Output-only* Time offset relative to the beginning of the audio,
1009 * and corresponding to the start of the spoken word. 988 /// and corresponding to the start of the spoken word.
1010 * This field is only set if `enable_word_time_offsets=true` and only 989 /// This field is only set if `enable_word_time_offsets=true` and only
1011 * in the top hypothesis. 990 /// in the top hypothesis.
1012 * This is an experimental feature and the accuracy of the time offset can 991 /// This is an experimental feature and the accuracy of the time offset can
1013 * vary. 992 /// vary.
1014 */
1015 core.String startTime; 993 core.String startTime;
1016 /** *Output-only* The word corresponding to this set of information. */ 994
995 /// *Output-only* The word corresponding to this set of information.
1017 core.String word; 996 core.String word;
1018 997
1019 WordInfo(); 998 WordInfo();
1020 999
1021 WordInfo.fromJson(core.Map _json) { 1000 WordInfo.fromJson(core.Map _json) {
1022 if (_json.containsKey("endTime")) { 1001 if (_json.containsKey("endTime")) {
1023 endTime = _json["endTime"]; 1002 endTime = _json["endTime"];
1024 } 1003 }
1025 if (_json.containsKey("startTime")) { 1004 if (_json.containsKey("startTime")) {
1026 startTime = _json["startTime"]; 1005 startTime = _json["startTime"];
1027 } 1006 }
1028 if (_json.containsKey("word")) { 1007 if (_json.containsKey("word")) {
1029 word = _json["word"]; 1008 word = _json["word"];
1030 } 1009 }
1031 } 1010 }
1032 1011
1033 core.Map<core.String, core.Object> toJson() { 1012 core.Map<core.String, core.Object> toJson() {
1034 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1013 final core.Map<core.String, core.Object> _json =
1014 new core.Map<core.String, core.Object>();
1035 if (endTime != null) { 1015 if (endTime != null) {
1036 _json["endTime"] = endTime; 1016 _json["endTime"] = endTime;
1037 } 1017 }
1038 if (startTime != null) { 1018 if (startTime != null) {
1039 _json["startTime"] = startTime; 1019 _json["startTime"] = startTime;
1040 } 1020 }
1041 if (word != null) { 1021 if (word != null) {
1042 _json["word"] = word; 1022 _json["word"] = word;
1043 } 1023 }
1044 return _json; 1024 return _json;
1045 } 1025 }
1046 } 1026 }
OLDNEW
« no previous file with comments | « generated/googleapis/lib/spanner/v1.dart ('k') | generated/googleapis/lib/storage/v1.dart » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698