Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(235)

Side by Side Diff: generated/googleapis_beta/lib/dataproc/v1beta1.dart

Issue 2695743002: Api-roll 45: 2017-02-13 (Closed)
Patch Set: reverted local changes to pubspec file Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // This is a generated file (see the discoveryapis_generator project). 1 // This is a generated file (see the discoveryapis_generator project).
2 2
3 library googleapis_beta.dataproc.v1beta1; 3 library googleapis_beta.dataproc.v1beta1;
4 4
5 import 'dart:core' as core; 5 import 'dart:core' as core;
6 import 'dart:async' as async; 6 import 'dart:async' as async;
7 import 'dart:convert' as convert; 7 import 'dart:convert' as convert;
8 8
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons;
10 import 'package:http/http.dart' as http; 10 import 'package:http/http.dart' as http;
11 11
12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show 12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show
13 ApiRequestError, DetailedApiRequestError; 13 ApiRequestError, DetailedApiRequestError;
14 14
15 const core.String USER_AGENT = 'dart-api-client dataproc/v1beta1'; 15 const core.String USER_AGENT = 'dart-api-client dataproc/v1beta1';
16 16
17 /** 17 /** Manages Hadoop-based clusters and jobs on Google Cloud Platform. */
18 * An API for managing Hadoop-based clusters and jobs on Google Cloud Platform.
19 */
20 class DataprocApi { 18 class DataprocApi {
21 /** View and manage your data across Google Cloud Platform services */ 19 /** View and manage your data across Google Cloud Platform services */
22 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf orm"; 20 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf orm";
23 21
24 22
25 final commons.ApiRequester _requester; 23 final commons.ApiRequester _requester;
26 24
27 OperationsResourceApi get operations => new OperationsResourceApi(_requester); 25 OperationsResourceApi get operations => new OperationsResourceApi(_requester);
28 ProjectsResourceApi get projects => new ProjectsResourceApi(_requester); 26 ProjectsResourceApi get projects => new ProjectsResourceApi(_requester);
29 27
30 DataprocApi(http.Client client, {core.String rootUrl: "https://dataproc.google apis.com/", core.String servicePath: ""}) : 28 DataprocApi(http.Client client, {core.String rootUrl: "https://dataproc.google apis.com/", core.String servicePath: ""}) :
31 _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_A GENT); 29 _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_A GENT);
32 } 30 }
33 31
34 32
35 class OperationsResourceApi { 33 class OperationsResourceApi {
36 final commons.ApiRequester _requester; 34 final commons.ApiRequester _requester;
37 35
38 OperationsResourceApi(commons.ApiRequester client) : 36 OperationsResourceApi(commons.ApiRequester client) :
39 _requester = client; 37 _requester = client;
40 38
41 /** 39 /**
42 * Starts asynchronous cancellation on a long-running operation. The server 40 * Starts asynchronous cancellation on a long-running operation. The server
43 * makes a best effort to cancel the operation, but success is not guaranteed. 41 * makes a best effort to cancel the operation, but success is not guaranteed.
44 * If the server doesn't support this method, it returns 42 * If the server doesn't support this method, it returns
45 * `google.rpc.Code.UNIMPLEMENTED`. Clients can use 43 * google.rpc.Code.UNIMPLEMENTED. Clients can use operations.get or other
46 * [operations.get](/dataproc/reference/rest/v1beta1/operations/get) or other
47 * methods to check whether the cancellation succeeded or whether the 44 * methods to check whether the cancellation succeeded or whether the
48 * operation completed despite cancellation. 45 * operation completed despite cancellation.
49 * 46 *
50 * [request] - The metadata request object. 47 * [request] - The metadata request object.
51 * 48 *
52 * Request parameters: 49 * Request parameters:
53 * 50 *
54 * [name] - The name of the operation resource to be cancelled. 51 * [name] - The name of the operation resource to be cancelled.
55 * Value must have pattern "^operations/.+$". 52 * Value must have pattern "^operations/.+$".
56 * 53 *
(...skipping 29 matching lines...) Expand all
86 uploadOptions: _uploadOptions, 83 uploadOptions: _uploadOptions,
87 uploadMedia: _uploadMedia, 84 uploadMedia: _uploadMedia,
88 downloadOptions: _downloadOptions); 85 downloadOptions: _downloadOptions);
89 return _response.then((data) => new Empty.fromJson(data)); 86 return _response.then((data) => new Empty.fromJson(data));
90 } 87 }
91 88
92 /** 89 /**
93 * Deletes a long-running operation. This method indicates that the client is 90 * Deletes a long-running operation. This method indicates that the client is
94 * no longer interested in the operation result. It does not cancel the 91 * no longer interested in the operation result. It does not cancel the
95 * operation. If the server doesn't support this method, it returns 92 * operation. If the server doesn't support this method, it returns
96 * `google.rpc.Code.UNIMPLEMENTED`. 93 * google.rpc.Code.UNIMPLEMENTED.
97 * 94 *
98 * Request parameters: 95 * Request parameters:
99 * 96 *
100 * [name] - The name of the operation resource to be deleted. 97 * [name] - The name of the operation resource to be deleted.
101 * Value must have pattern "^operations/.+$". 98 * Value must have pattern "^operations/.+$".
102 * 99 *
103 * Completes with a [Empty]. 100 * Completes with a [Empty].
104 * 101 *
105 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 102 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
106 * error. 103 * error.
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
169 body: _body, 166 body: _body,
170 queryParams: _queryParams, 167 queryParams: _queryParams,
171 uploadOptions: _uploadOptions, 168 uploadOptions: _uploadOptions,
172 uploadMedia: _uploadMedia, 169 uploadMedia: _uploadMedia,
173 downloadOptions: _downloadOptions); 170 downloadOptions: _downloadOptions);
174 return _response.then((data) => new Operation.fromJson(data)); 171 return _response.then((data) => new Operation.fromJson(data));
175 } 172 }
176 173
177 /** 174 /**
178 * Lists operations that match the specified filter in the request. If the 175 * Lists operations that match the specified filter in the request. If the
179 * server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the 176 * server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name
180 * `name` binding below allows API services to override the binding to use 177 * binding below allows API services to override the binding to use different
181 * different resource name schemes, such as `users / * /operations`. 178 * resource name schemes, such as users / * /operations.
182 * 179 *
183 * Request parameters: 180 * Request parameters:
184 * 181 *
185 * [name] - The name of the operation collection. 182 * [name] - The name of the operation collection.
186 * Value must have pattern "^operations$". 183 * Value must have pattern "^operations$".
187 * 184 *
188 * [filter] - The standard list filter. 185 * [pageToken] - The standard list page token.
189 * 186 *
190 * [pageSize] - The standard list page size. 187 * [pageSize] - The standard list page size.
191 * 188 *
192 * [pageToken] - The standard list page token. 189 * [filter] - The standard list filter.
193 * 190 *
194 * Completes with a [ListOperationsResponse]. 191 * Completes with a [ListOperationsResponse].
195 * 192 *
196 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 193 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
197 * error. 194 * error.
198 * 195 *
199 * If the used [http.Client] completes with an error when making a REST call, 196 * If the used [http.Client] completes with an error when making a REST call,
200 * this method will complete with the same error. 197 * this method will complete with the same error.
201 */ 198 */
202 async.Future<ListOperationsResponse> list(core.String name, {core.String filte r, core.int pageSize, core.String pageToken}) { 199 async.Future<ListOperationsResponse> list(core.String name, {core.String pageT oken, core.int pageSize, core.String filter}) {
203 var _url = null; 200 var _url = null;
204 var _queryParams = new core.Map(); 201 var _queryParams = new core.Map();
205 var _uploadMedia = null; 202 var _uploadMedia = null;
206 var _uploadOptions = null; 203 var _uploadOptions = null;
207 var _downloadOptions = commons.DownloadOptions.Metadata; 204 var _downloadOptions = commons.DownloadOptions.Metadata;
208 var _body = null; 205 var _body = null;
209 206
210 if (name == null) { 207 if (name == null) {
211 throw new core.ArgumentError("Parameter name is required."); 208 throw new core.ArgumentError("Parameter name is required.");
212 } 209 }
210 if (pageToken != null) {
211 _queryParams["pageToken"] = [pageToken];
212 }
213 if (pageSize != null) {
214 _queryParams["pageSize"] = ["${pageSize}"];
215 }
213 if (filter != null) { 216 if (filter != null) {
214 _queryParams["filter"] = [filter]; 217 _queryParams["filter"] = [filter];
215 } 218 }
216 if (pageSize != null) {
217 _queryParams["pageSize"] = ["${pageSize}"];
218 }
219 if (pageToken != null) {
220 _queryParams["pageToken"] = [pageToken];
221 }
222 219
223 _url = 'v1beta1/' + commons.Escaper.ecapeVariableReserved('$name'); 220 _url = 'v1beta1/' + commons.Escaper.ecapeVariableReserved('$name');
224 221
225 var _response = _requester.request(_url, 222 var _response = _requester.request(_url,
226 "GET", 223 "GET",
227 body: _body, 224 body: _body,
228 queryParams: _queryParams, 225 queryParams: _queryParams,
229 uploadOptions: _uploadOptions, 226 uploadOptions: _uploadOptions,
230 uploadMedia: _uploadMedia, 227 uploadMedia: _uploadMedia,
231 downloadOptions: _downloadOptions); 228 downloadOptions: _downloadOptions);
(...skipping 20 matching lines...) Expand all
252 ProjectsClustersResourceApi(commons.ApiRequester client) : 249 ProjectsClustersResourceApi(commons.ApiRequester client) :
253 _requester = client; 250 _requester = client;
254 251
255 /** 252 /**
256 * Creates a cluster in a project. 253 * Creates a cluster in a project.
257 * 254 *
258 * [request] - The metadata request object. 255 * [request] - The metadata request object.
259 * 256 *
260 * Request parameters: 257 * Request parameters:
261 * 258 *
262 * [projectId] - [Required] The ID of the Google Cloud Platform project that 259 * [projectId] - Required The ID of the Google Cloud Platform project that the
263 * the cluster belongs to. 260 * cluster belongs to.
264 * 261 *
265 * Completes with a [Operation]. 262 * Completes with a [Operation].
266 * 263 *
267 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 264 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
268 * error. 265 * error.
269 * 266 *
270 * If the used [http.Client] completes with an error when making a REST call, 267 * If the used [http.Client] completes with an error when making a REST call,
271 * this method will complete with the same error. 268 * this method will complete with the same error.
272 */ 269 */
273 async.Future<Operation> create(Cluster request, core.String projectId) { 270 async.Future<Operation> create(Cluster request, core.String projectId) {
(...skipping 21 matching lines...) Expand all
295 uploadMedia: _uploadMedia, 292 uploadMedia: _uploadMedia,
296 downloadOptions: _downloadOptions); 293 downloadOptions: _downloadOptions);
297 return _response.then((data) => new Operation.fromJson(data)); 294 return _response.then((data) => new Operation.fromJson(data));
298 } 295 }
299 296
300 /** 297 /**
301 * Deletes a cluster in a project. 298 * Deletes a cluster in a project.
302 * 299 *
303 * Request parameters: 300 * Request parameters:
304 * 301 *
305 * [projectId] - [Required] The ID of the Google Cloud Platform project that 302 * [projectId] - Required The ID of the Google Cloud Platform project that the
306 * the cluster belongs to. 303 * cluster belongs to.
307 * 304 *
308 * [clusterName] - [Required] The cluster name. 305 * [clusterName] - Required The cluster name.
309 * 306 *
310 * Completes with a [Operation]. 307 * Completes with a [Operation].
311 * 308 *
312 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 309 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
313 * error. 310 * error.
314 * 311 *
315 * If the used [http.Client] completes with an error when making a REST call, 312 * If the used [http.Client] completes with an error when making a REST call,
316 * this method will complete with the same error. 313 * this method will complete with the same error.
317 */ 314 */
318 async.Future<Operation> delete(core.String projectId, core.String clusterName) { 315 async.Future<Operation> delete(core.String projectId, core.String clusterName) {
(...skipping 18 matching lines...) Expand all
337 body: _body, 334 body: _body,
338 queryParams: _queryParams, 335 queryParams: _queryParams,
339 uploadOptions: _uploadOptions, 336 uploadOptions: _uploadOptions,
340 uploadMedia: _uploadMedia, 337 uploadMedia: _uploadMedia,
341 downloadOptions: _downloadOptions); 338 downloadOptions: _downloadOptions);
342 return _response.then((data) => new Operation.fromJson(data)); 339 return _response.then((data) => new Operation.fromJson(data));
343 } 340 }
344 341
345 /** 342 /**
346 * Gets cluster diagnostic information. After the operation completes, the 343 * Gets cluster diagnostic information. After the operation completes, the
347 * Operation.response field contains `DiagnoseClusterOutputLocation`. 344 * Operation.response field contains DiagnoseClusterOutputLocation.
348 * 345 *
349 * [request] - The metadata request object. 346 * [request] - The metadata request object.
350 * 347 *
351 * Request parameters: 348 * Request parameters:
352 * 349 *
353 * [projectId] - [Required] The ID of the Google Cloud Platform project that 350 * [projectId] - Required The ID of the Google Cloud Platform project that the
354 * the cluster belongs to. 351 * cluster belongs to.
355 * 352 *
356 * [clusterName] - [Required] The cluster name. 353 * [clusterName] - Required The cluster name.
357 * 354 *
358 * Completes with a [Operation]. 355 * Completes with a [Operation].
359 * 356 *
360 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 357 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
361 * error. 358 * error.
362 * 359 *
363 * If the used [http.Client] completes with an error when making a REST call, 360 * If the used [http.Client] completes with an error when making a REST call,
364 * this method will complete with the same error. 361 * this method will complete with the same error.
365 */ 362 */
366 async.Future<Operation> diagnose(DiagnoseClusterRequest request, core.String p rojectId, core.String clusterName) { 363 async.Future<Operation> diagnose(DiagnoseClusterRequest request, core.String p rojectId, core.String clusterName) {
(...skipping 24 matching lines...) Expand all
391 uploadMedia: _uploadMedia, 388 uploadMedia: _uploadMedia,
392 downloadOptions: _downloadOptions); 389 downloadOptions: _downloadOptions);
393 return _response.then((data) => new Operation.fromJson(data)); 390 return _response.then((data) => new Operation.fromJson(data));
394 } 391 }
395 392
396 /** 393 /**
397 * Gets the resource representation for a cluster in a project. 394 * Gets the resource representation for a cluster in a project.
398 * 395 *
399 * Request parameters: 396 * Request parameters:
400 * 397 *
401 * [projectId] - [Required] The ID of the Google Cloud Platform project that 398 * [projectId] - Required The ID of the Google Cloud Platform project that the
402 * the cluster belongs to. 399 * cluster belongs to.
403 * 400 *
404 * [clusterName] - [Required] The cluster name. 401 * [clusterName] - Required The cluster name.
405 * 402 *
406 * Completes with a [Cluster]. 403 * Completes with a [Cluster].
407 * 404 *
408 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 405 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
409 * error. 406 * error.
410 * 407 *
411 * If the used [http.Client] completes with an error when making a REST call, 408 * If the used [http.Client] completes with an error when making a REST call,
412 * this method will complete with the same error. 409 * this method will complete with the same error.
413 */ 410 */
414 async.Future<Cluster> get(core.String projectId, core.String clusterName) { 411 async.Future<Cluster> get(core.String projectId, core.String clusterName) {
(...skipping 21 matching lines...) Expand all
436 uploadMedia: _uploadMedia, 433 uploadMedia: _uploadMedia,
437 downloadOptions: _downloadOptions); 434 downloadOptions: _downloadOptions);
438 return _response.then((data) => new Cluster.fromJson(data)); 435 return _response.then((data) => new Cluster.fromJson(data));
439 } 436 }
440 437
441 /** 438 /**
442 * Lists all clusters in a project. 439 * Lists all clusters in a project.
443 * 440 *
444 * Request parameters: 441 * Request parameters:
445 * 442 *
446 * [projectId] - [Required] The ID of the Google Cloud Platform project that 443 * [projectId] - Required The ID of the Google Cloud Platform project that the
447 * the cluster belongs to. 444 * cluster belongs to.
448 * 445 *
449 * [filter] - [Optional] A filter constraining which clusters to list. Valid 446 * [filter] - Optional A filter constraining which clusters to list. Valid
450 * filters contain label terms such as: labels.key1 = val1 AND (-labels.k2 = 447 * filters contain label terms such as: labels.key1 = val1 AND (-labels.k2 =
451 * val2 OR labels.k3 = val3) 448 * val2 OR labels.k3 = val3)
452 * 449 *
450 * [pageToken] - The standard List page token.
451 *
453 * [pageSize] - The standard List page size. 452 * [pageSize] - The standard List page size.
454 * 453 *
455 * [pageToken] - The standard List page token.
456 *
457 * Completes with a [ListClustersResponse]. 454 * Completes with a [ListClustersResponse].
458 * 455 *
459 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 456 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
460 * error. 457 * error.
461 * 458 *
462 * If the used [http.Client] completes with an error when making a REST call, 459 * If the used [http.Client] completes with an error when making a REST call,
463 * this method will complete with the same error. 460 * this method will complete with the same error.
464 */ 461 */
465 async.Future<ListClustersResponse> list(core.String projectId, {core.String fi lter, core.int pageSize, core.String pageToken}) { 462 async.Future<ListClustersResponse> list(core.String projectId, {core.String fi lter, core.String pageToken, core.int pageSize}) {
466 var _url = null; 463 var _url = null;
467 var _queryParams = new core.Map(); 464 var _queryParams = new core.Map();
468 var _uploadMedia = null; 465 var _uploadMedia = null;
469 var _uploadOptions = null; 466 var _uploadOptions = null;
470 var _downloadOptions = commons.DownloadOptions.Metadata; 467 var _downloadOptions = commons.DownloadOptions.Metadata;
471 var _body = null; 468 var _body = null;
472 469
473 if (projectId == null) { 470 if (projectId == null) {
474 throw new core.ArgumentError("Parameter projectId is required."); 471 throw new core.ArgumentError("Parameter projectId is required.");
475 } 472 }
476 if (filter != null) { 473 if (filter != null) {
477 _queryParams["filter"] = [filter]; 474 _queryParams["filter"] = [filter];
478 } 475 }
476 if (pageToken != null) {
477 _queryParams["pageToken"] = [pageToken];
478 }
479 if (pageSize != null) { 479 if (pageSize != null) {
480 _queryParams["pageSize"] = ["${pageSize}"]; 480 _queryParams["pageSize"] = ["${pageSize}"];
481 } 481 }
482 if (pageToken != null) {
483 _queryParams["pageToken"] = [pageToken];
484 }
485 482
486 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /clusters'; 483 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /clusters';
487 484
488 var _response = _requester.request(_url, 485 var _response = _requester.request(_url,
489 "GET", 486 "GET",
490 body: _body, 487 body: _body,
491 queryParams: _queryParams, 488 queryParams: _queryParams,
492 uploadOptions: _uploadOptions, 489 uploadOptions: _uploadOptions,
493 uploadMedia: _uploadMedia, 490 uploadMedia: _uploadMedia,
494 downloadOptions: _downloadOptions); 491 downloadOptions: _downloadOptions);
495 return _response.then((data) => new ListClustersResponse.fromJson(data)); 492 return _response.then((data) => new ListClustersResponse.fromJson(data));
496 } 493 }
497 494
498 /** 495 /**
499 * Updates a cluster in a project. 496 * Updates a cluster in a project.
500 * 497 *
501 * [request] - The metadata request object. 498 * [request] - The metadata request object.
502 * 499 *
503 * Request parameters: 500 * Request parameters:
504 * 501 *
505 * [projectId] - [Required] The ID of the Google Cloud Platform project the 502 * [projectId] - Required The ID of the Google Cloud Platform project the
506 * cluster belongs to. 503 * cluster belongs to.
507 * 504 *
508 * [clusterName] - [Required] The cluster name. 505 * [clusterName] - Required The cluster name.
509 * 506 *
510 * [updateMask] - [Required] Specifies the path, relative to Cluster, of the 507 * [updateMask] - Required Specifies the path, relative to
511 * field to update. For example, to change the number of workers in a cluster 508 * <code>Cluster</code>, of the field to update. For example, to change the
512 * to 5, the update_mask parameter would be specified as 509 * number of workers in a cluster to 5, the <code>update_mask</code> parameter
513 * configuration.worker_configuration.num_instances, and the `PATCH` request 510 * would be specified as
514 * body would specify the new value, as follows: { "configuration":{ 511 * <code>configuration.worker_configuration.num_instances</code>, and the
515 * "workerConfiguration":{ "numInstances":"5" } } } Similarly, to change the 512 * PATCH request body would specify the new value, as follows:
516 * number of preemptible workers in a cluster to 5, the update_mask parameter 513 * {
517 * would be config.secondary_worker_config.num_instances, and the `PATCH` 514 * "configuration":{
518 * request body would be set as follows: { "config":{ 515 * "workerConfiguration":{
519 * "secondaryWorkerConfig":{ "numInstances":"5" } } } Note: Currently, 516 * "numInstances":"5"
520 * config.worker_config.num_instances and 517 * }
521 * config.secondary_worker_config.num_instances are the only fields that can 518 * }
522 * be updated. 519 * }
520 * Similarly, to change the number of preemptible workers in a cluster to 5,
521 * the <code>update_mask</code> parameter would be
522 * <code>config.secondary_worker_config.num_instances</code>, and the PATCH
523 * request body would be set as follows:
524 * {
525 * "config":{
526 * "secondaryWorkerConfig":{
527 * "numInstances":"5"
528 * }
529 * }
530 * }
531 * <strong>Note:</strong> Currently,
532 * <code>config.worker_config.num_instances</code> and
533 * <code>config.secondary_worker_config.num_instances</code> are the only
534 * fields that can be updated.
523 * 535 *
524 * Completes with a [Operation]. 536 * Completes with a [Operation].
525 * 537 *
526 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 538 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
527 * error. 539 * error.
528 * 540 *
529 * If the used [http.Client] completes with an error when making a REST call, 541 * If the used [http.Client] completes with an error when making a REST call,
530 * this method will complete with the same error. 542 * this method will complete with the same error.
531 */ 543 */
532 async.Future<Operation> patch(Cluster request, core.String projectId, core.Str ing clusterName, {core.String updateMask}) { 544 async.Future<Operation> patch(Cluster request, core.String projectId, core.Str ing clusterName, {core.String updateMask}) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
566 578
567 579
568 class ProjectsJobsResourceApi { 580 class ProjectsJobsResourceApi {
569 final commons.ApiRequester _requester; 581 final commons.ApiRequester _requester;
570 582
571 ProjectsJobsResourceApi(commons.ApiRequester client) : 583 ProjectsJobsResourceApi(commons.ApiRequester client) :
572 _requester = client; 584 _requester = client;
573 585
574 /** 586 /**
575 * Starts a job cancellation request. To access the job resource after 587 * Starts a job cancellation request. To access the job resource after
576 * cancellation, call 588 * cancellation, call jobs.list or jobs.get.
577 * [jobs.list](/dataproc/reference/rest/v1beta1/projects.jobs/list) or
578 * [jobs.get](/dataproc/reference/rest/v1beta1/projects.jobs/get).
579 * 589 *
580 * [request] - The metadata request object. 590 * [request] - The metadata request object.
581 * 591 *
582 * Request parameters: 592 * Request parameters:
583 * 593 *
584 * [projectId] - [Required] The ID of the Google Cloud Platform project that 594 * [projectId] - Required The ID of the Google Cloud Platform project that the
585 * the job belongs to. 595 * job belongs to.
586 * 596 *
587 * [jobId] - [Required] The job ID. 597 * [jobId] - Required The job ID.
588 * 598 *
589 * Completes with a [Job]. 599 * Completes with a [Job].
590 * 600 *
591 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 601 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
592 * error. 602 * error.
593 * 603 *
594 * If the used [http.Client] completes with an error when making a REST call, 604 * If the used [http.Client] completes with an error when making a REST call,
595 * this method will complete with the same error. 605 * this method will complete with the same error.
596 */ 606 */
597 async.Future<Job> cancel(CancelJobRequest request, core.String projectId, core .String jobId) { 607 async.Future<Job> cancel(CancelJobRequest request, core.String projectId, core .String jobId) {
(...skipping 21 matching lines...) Expand all
619 body: _body, 629 body: _body,
620 queryParams: _queryParams, 630 queryParams: _queryParams,
621 uploadOptions: _uploadOptions, 631 uploadOptions: _uploadOptions,
622 uploadMedia: _uploadMedia, 632 uploadMedia: _uploadMedia,
623 downloadOptions: _downloadOptions); 633 downloadOptions: _downloadOptions);
624 return _response.then((data) => new Job.fromJson(data)); 634 return _response.then((data) => new Job.fromJson(data));
625 } 635 }
626 636
627 /** 637 /**
628 * Deletes the job from the project. If the job is active, the delete fails, 638 * Deletes the job from the project. If the job is active, the delete fails,
629 * and the response returns `FAILED_PRECONDITION`. 639 * and the response returns FAILED_PRECONDITION.
630 * 640 *
631 * Request parameters: 641 * Request parameters:
632 * 642 *
633 * [projectId] - [Required] The ID of the Google Cloud Platform project that 643 * [projectId] - Required The ID of the Google Cloud Platform project that the
634 * the job belongs to. 644 * job belongs to.
635 * 645 *
636 * [jobId] - [Required] The job ID. 646 * [jobId] - Required The job ID.
637 * 647 *
638 * Completes with a [Empty]. 648 * Completes with a [Empty].
639 * 649 *
640 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 650 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
641 * error. 651 * error.
642 * 652 *
643 * If the used [http.Client] completes with an error when making a REST call, 653 * If the used [http.Client] completes with an error when making a REST call,
644 * this method will complete with the same error. 654 * this method will complete with the same error.
645 */ 655 */
646 async.Future<Empty> delete(core.String projectId, core.String jobId) { 656 async.Future<Empty> delete(core.String projectId, core.String jobId) {
(...skipping 21 matching lines...) Expand all
668 uploadMedia: _uploadMedia, 678 uploadMedia: _uploadMedia,
669 downloadOptions: _downloadOptions); 679 downloadOptions: _downloadOptions);
670 return _response.then((data) => new Empty.fromJson(data)); 680 return _response.then((data) => new Empty.fromJson(data));
671 } 681 }
672 682
673 /** 683 /**
674 * Gets the resource representation for a job in a project. 684 * Gets the resource representation for a job in a project.
675 * 685 *
676 * Request parameters: 686 * Request parameters:
677 * 687 *
678 * [projectId] - [Required] The ID of the Google Cloud Platform project that 688 * [projectId] - Required The ID of the Google Cloud Platform project that the
679 * the job belongs to. 689 * job belongs to.
680 * 690 *
681 * [jobId] - [Required] The job ID. 691 * [jobId] - Required The job ID.
682 * 692 *
683 * Completes with a [Job]. 693 * Completes with a [Job].
684 * 694 *
685 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 695 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
686 * error. 696 * error.
687 * 697 *
688 * If the used [http.Client] completes with an error when making a REST call, 698 * If the used [http.Client] completes with an error when making a REST call,
689 * this method will complete with the same error. 699 * this method will complete with the same error.
690 */ 700 */
691 async.Future<Job> get(core.String projectId, core.String jobId) { 701 async.Future<Job> get(core.String projectId, core.String jobId) {
(...skipping 21 matching lines...) Expand all
713 uploadMedia: _uploadMedia, 723 uploadMedia: _uploadMedia,
714 downloadOptions: _downloadOptions); 724 downloadOptions: _downloadOptions);
715 return _response.then((data) => new Job.fromJson(data)); 725 return _response.then((data) => new Job.fromJson(data));
716 } 726 }
717 727
718 /** 728 /**
719 * Lists jobs in a project. 729 * Lists jobs in a project.
720 * 730 *
721 * Request parameters: 731 * Request parameters:
722 * 732 *
723 * [projectId] - [Required] The ID of the Google Cloud Platform project that 733 * [projectId] - Required The ID of the Google Cloud Platform project that the
724 * the job belongs to. 734 * job belongs to.
725 * 735 *
726 * [pageSize] - [Optional] The number of results to return in each response. 736 * [pageToken] - Optional The page token, returned by a previous call, to
727 *
728 * [pageToken] - [Optional] The page token, returned by a previous call, to
729 * request the next page of results. 737 * request the next page of results.
730 * 738 *
731 * [clusterName] - [Optional] If set, the returned jobs list includes only 739 * [pageSize] - Optional The number of results to return in each response.
732 * jobs that were submitted to the named cluster.
733 * 740 *
734 * [jobStateMatcher] - [Optional] Specifies enumerated categories of jobs to 741 * [clusterName] - Optional If set, the returned jobs list includes only jobs
742 * that were submitted to the named cluster.
743 *
744 * [filter] - Optional A filter constraining which jobs to list. Valid filters
745 * contain job state and label terms such as: labels.key1 = val1 AND
746 * (labels.k2 = val2 OR labels.k3 = val3)
747 *
748 * [jobStateMatcher] - Optional Specifies enumerated categories of jobs to
735 * list. 749 * list.
736 * Possible string values are: 750 * Possible string values are:
737 * - "ALL" : A ALL. 751 * - "ALL" : A ALL.
738 * - "ACTIVE" : A ACTIVE. 752 * - "ACTIVE" : A ACTIVE.
739 * - "NON_ACTIVE" : A NON_ACTIVE. 753 * - "NON_ACTIVE" : A NON_ACTIVE.
740 * 754 *
741 * [filter] - [Optional] A filter constraining which jobs to list. Valid
742 * filters contain job state and label terms such as: labels.key1 = val1 AND
743 * (labels.k2 = val2 OR labels.k3 = val3)
744 *
745 * Completes with a [ListJobsResponse]. 755 * Completes with a [ListJobsResponse].
746 * 756 *
747 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 757 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
748 * error. 758 * error.
749 * 759 *
750 * If the used [http.Client] completes with an error when making a REST call, 760 * If the used [http.Client] completes with an error when making a REST call,
751 * this method will complete with the same error. 761 * this method will complete with the same error.
752 */ 762 */
753 async.Future<ListJobsResponse> list(core.String projectId, {core.int pageSize, core.String pageToken, core.String clusterName, core.String jobStateMatcher, co re.String filter}) { 763 async.Future<ListJobsResponse> list(core.String projectId, {core.String pageTo ken, core.int pageSize, core.String clusterName, core.String filter, core.String jobStateMatcher}) {
754 var _url = null; 764 var _url = null;
755 var _queryParams = new core.Map(); 765 var _queryParams = new core.Map();
756 var _uploadMedia = null; 766 var _uploadMedia = null;
757 var _uploadOptions = null; 767 var _uploadOptions = null;
758 var _downloadOptions = commons.DownloadOptions.Metadata; 768 var _downloadOptions = commons.DownloadOptions.Metadata;
759 var _body = null; 769 var _body = null;
760 770
761 if (projectId == null) { 771 if (projectId == null) {
762 throw new core.ArgumentError("Parameter projectId is required."); 772 throw new core.ArgumentError("Parameter projectId is required.");
763 } 773 }
774 if (pageToken != null) {
775 _queryParams["pageToken"] = [pageToken];
776 }
764 if (pageSize != null) { 777 if (pageSize != null) {
765 _queryParams["pageSize"] = ["${pageSize}"]; 778 _queryParams["pageSize"] = ["${pageSize}"];
766 } 779 }
767 if (pageToken != null) {
768 _queryParams["pageToken"] = [pageToken];
769 }
770 if (clusterName != null) { 780 if (clusterName != null) {
771 _queryParams["clusterName"] = [clusterName]; 781 _queryParams["clusterName"] = [clusterName];
772 } 782 }
783 if (filter != null) {
784 _queryParams["filter"] = [filter];
785 }
773 if (jobStateMatcher != null) { 786 if (jobStateMatcher != null) {
774 _queryParams["jobStateMatcher"] = [jobStateMatcher]; 787 _queryParams["jobStateMatcher"] = [jobStateMatcher];
775 } 788 }
776 if (filter != null) {
777 _queryParams["filter"] = [filter];
778 }
779 789
780 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /jobs'; 790 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /jobs';
781 791
782 var _response = _requester.request(_url, 792 var _response = _requester.request(_url,
783 "GET", 793 "GET",
784 body: _body, 794 body: _body,
785 queryParams: _queryParams, 795 queryParams: _queryParams,
786 uploadOptions: _uploadOptions, 796 uploadOptions: _uploadOptions,
787 uploadMedia: _uploadMedia, 797 uploadMedia: _uploadMedia,
788 downloadOptions: _downloadOptions); 798 downloadOptions: _downloadOptions);
789 return _response.then((data) => new ListJobsResponse.fromJson(data)); 799 return _response.then((data) => new ListJobsResponse.fromJson(data));
790 } 800 }
791 801
792 /** 802 /**
803 * Updates a job in a project.
804 *
805 * [request] - The metadata request object.
806 *
807 * Request parameters:
808 *
809 * [projectId] - Required The ID of the Google Cloud Platform project that the
810 * job belongs to.
811 *
812 * [jobId] - Required The job ID.
813 *
814 * [updateMask] - Required Specifies the path, relative to <code>Job</code>,
815 * of the field to update. For example, to update the labels of a Job the
816 * <code>update_mask</code> parameter would be specified as
817 * <code>labels</code>, and the PATCH request body would specify the new
818 * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
819 * field that can be updated.
820 *
821 * Completes with a [Job].
822 *
823 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
824 * error.
825 *
826 * If the used [http.Client] completes with an error when making a REST call,
827 * this method will complete with the same error.
828 */
829 async.Future<Job> patch(Job request, core.String projectId, core.String jobId, {core.String updateMask}) {
830 var _url = null;
831 var _queryParams = new core.Map();
832 var _uploadMedia = null;
833 var _uploadOptions = null;
834 var _downloadOptions = commons.DownloadOptions.Metadata;
835 var _body = null;
836
837 if (request != null) {
838 _body = convert.JSON.encode((request).toJson());
839 }
840 if (projectId == null) {
841 throw new core.ArgumentError("Parameter projectId is required.");
842 }
843 if (jobId == null) {
844 throw new core.ArgumentError("Parameter jobId is required.");
845 }
846 if (updateMask != null) {
847 _queryParams["updateMask"] = [updateMask];
848 }
849
850 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /jobs/' + commons.Escaper.ecapeVariable('$jobId');
851
852 var _response = _requester.request(_url,
853 "PATCH",
854 body: _body,
855 queryParams: _queryParams,
856 uploadOptions: _uploadOptions,
857 uploadMedia: _uploadMedia,
858 downloadOptions: _downloadOptions);
859 return _response.then((data) => new Job.fromJson(data));
860 }
861
862 /**
793 * Submits a job to a cluster. 863 * Submits a job to a cluster.
794 * 864 *
795 * [request] - The metadata request object. 865 * [request] - The metadata request object.
796 * 866 *
797 * Request parameters: 867 * Request parameters:
798 * 868 *
799 * [projectId] - [Required] The ID of the Google Cloud Platform project that 869 * [projectId] - Required The ID of the Google Cloud Platform project that the
800 * the job belongs to. 870 * job belongs to.
801 * 871 *
802 * Completes with a [Job]. 872 * Completes with a [Job].
803 * 873 *
804 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 874 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
805 * error. 875 * error.
806 * 876 *
807 * If the used [http.Client] completes with an error when making a REST call, 877 * If the used [http.Client] completes with an error when making a REST call,
808 * this method will complete with the same error. 878 * this method will complete with the same error.
809 */ 879 */
810 async.Future<Job> submit(SubmitJobRequest request, core.String projectId) { 880 async.Future<Job> submit(SubmitJobRequest request, core.String projectId) {
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
865 return _json; 935 return _json;
866 } 936 }
867 } 937 }
868 938
869 /** 939 /**
870 * Describes the identifying information, configuration, and status of a cluster 940 * Describes the identifying information, configuration, and status of a cluster
871 * of Google Compute Engine instances. 941 * of Google Compute Engine instances.
872 */ 942 */
873 class Cluster { 943 class Cluster {
874 /** 944 /**
875 * [Required] The cluster name. Cluster names within a project must be unique. 945 * Required The cluster name. Cluster names within a project must be unique.
876 * Names from deleted clusters can be reused. 946 * Names from deleted clusters can be reused.
877 */ 947 */
878 core.String clusterName; 948 core.String clusterName;
879 /** 949 /**
880 * [Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc 950 * Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc
881 * generates this value when it creates the cluster. 951 * generates this value when it creates the cluster.
882 */ 952 */
883 core.String clusterUuid; 953 core.String clusterUuid;
884 /** 954 /**
885 * [Required] The cluster configuration. Note that Cloud Dataproc may set 955 * Required The cluster configuration. Note that Cloud Dataproc may set
886 * default values, and values may change when clusters are updated. 956 * default values, and values may change when clusters are updated.
887 */ 957 */
888 ClusterConfiguration configuration; 958 ClusterConfiguration configuration;
889 /** 959 /**
890 * [Optional] The labels to associate with this cluster. Label keys must be 960 * Optional The labels to associate with this cluster.Label keys must be
891 * between 1 and 63 characters long, and must conform to the following PCRE 961 * between 1 and 63 characters long, and must conform to the following PCRE
892 * regular expression: \p{Ll}\p{Lo}{0,62} Label values must be between 1 and 962 * regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63
893 * 63 characters long, and must conform to the following PCRE regular 963 * characters long, and must conform to the following PCRE regular expression:
894 * expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 64 labels can be 964 * \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a
895 * associated with a given cluster. 965 * given cluster.
896 */ 966 */
897 core.Map<core.String, core.String> labels; 967 core.Map<core.String, core.String> labels;
898 /** Contains cluster daemon metrics such as HDFS and YARN stats. */ 968 /** Contains cluster daemon metrics such as HDFS and YARN stats. */
899 ClusterMetrics metrics; 969 ClusterMetrics metrics;
900 /** 970 /**
901 * [Required] The Google Cloud Platform project ID that the cluster belongs 971 * Required The Google Cloud Platform project ID that the cluster belongs to.
902 * to.
903 */ 972 */
904 core.String projectId; 973 core.String projectId;
905 /** [Output-only] Cluster status. */ 974 /** Output-only Cluster status. */
906 ClusterStatus status; 975 ClusterStatus status;
907 /** [Output-only] Previous cluster statuses. */ 976 /** Output-only Previous cluster statuses. */
908 core.List<ClusterStatus> statusHistory; 977 core.List<ClusterStatus> statusHistory;
909 978
910 Cluster(); 979 Cluster();
911 980
912 Cluster.fromJson(core.Map _json) { 981 Cluster.fromJson(core.Map _json) {
913 if (_json.containsKey("clusterName")) { 982 if (_json.containsKey("clusterName")) {
914 clusterName = _json["clusterName"]; 983 clusterName = _json["clusterName"];
915 } 984 }
916 if (_json.containsKey("clusterUuid")) { 985 if (_json.containsKey("clusterUuid")) {
917 clusterUuid = _json["clusterUuid"]; 986 clusterUuid = _json["clusterUuid"];
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
962 if (statusHistory != null) { 1031 if (statusHistory != null) {
963 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List(); 1032 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List();
964 } 1033 }
965 return _json; 1034 return _json;
966 } 1035 }
967 } 1036 }
968 1037
969 /** The cluster configuration. */ 1038 /** The cluster configuration. */
970 class ClusterConfiguration { 1039 class ClusterConfiguration {
971 /** 1040 /**
972 * [Optional] A Google Cloud Storage staging bucket used for sharing generated 1041 * Optional A Google Cloud Storage staging bucket used for sharing generated
973 * SSH keys and configuration. If you do not specify a staging bucket, Cloud 1042 * SSH keys and configuration. If you do not specify a staging bucket, Cloud
974 * Dataproc will determine an appropriate Cloud Storage location (US, ASIA, or 1043 * Dataproc will determine an appropriate Cloud Storage location (US, ASIA, or
975 * EU) for your cluster's staging bucket according to the Google Compute 1044 * EU) for your cluster's staging bucket according to the Google Compute
976 * Engine zone where your cluster is deployed, and then it will create and 1045 * Engine zone where your cluster is deployed, and then it will create and
977 * manage this project-level, per-location bucket for you. 1046 * manage this project-level, per-location bucket for you.
978 */ 1047 */
979 core.String configurationBucket; 1048 core.String configurationBucket;
980 /** 1049 /**
981 * [Required] The shared Google Compute Engine configuration settings for all 1050 * Required The shared Google Compute Engine configuration settings for all
982 * instances in a cluster. 1051 * instances in a cluster.
983 */ 1052 */
984 GceClusterConfiguration gceClusterConfiguration; 1053 GceClusterConfiguration gceClusterConfiguration;
985 /** 1054 /**
986 * [Optional] Commands to execute on each node after configuration is 1055 * Optional Commands to execute on each node after configuration is completed.
987 * completed. By default, executables are run on master and all worker nodes. 1056 * By default, executables are run on master and all worker nodes. You can
988 * You can test a node's role metadata to run an executable on a master or 1057 * test a node's <code>role</code> metadata to run an executable on a master
989 * worker node, as shown below: ROLE=$(/usr/share/google/get_metadata_value 1058 * or worker node, as shown below:
990 * attributes/role) if [[ "${ROLE}" == 'Master' ]]; then ... master specific 1059 * ROLE=$(/usr/share/google/get_metadata_value attributes/role)
991 * actions ... else ... worker specific actions ... fi 1060 * if [[ "${ROLE}" == 'Master' ]]; then
1061 * ... master specific actions ...
1062 * else
1063 * ... worker specific actions ...
1064 * fi
992 */ 1065 */
993 core.List<NodeInitializationAction> initializationActions; 1066 core.List<NodeInitializationAction> initializationActions;
994 /** 1067 /**
995 * [Optional] The Google Compute Engine configuration settings for the master 1068 * Optional The Google Compute Engine configuration settings for the master
996 * instance in a cluster. 1069 * instance in a cluster.
997 */ 1070 */
998 InstanceGroupConfiguration masterConfiguration; 1071 InstanceGroupConfiguration masterConfiguration;
999 /** 1072 /**
1000 * [Optional] The Google Compute Engine configuration settings for additional 1073 * Optional The Google Compute Engine configuration settings for additional
1001 * worker instances in a cluster. 1074 * worker instances in a cluster.
1002 */ 1075 */
1003 InstanceGroupConfiguration secondaryWorkerConfiguration; 1076 InstanceGroupConfiguration secondaryWorkerConfiguration;
1004 /** [Optional] The configuration settings for software inside the cluster. */ 1077 /** Optional The configuration settings for software inside the cluster. */
1005 SoftwareConfiguration softwareConfiguration; 1078 SoftwareConfiguration softwareConfiguration;
1006 /** 1079 /**
1007 * [Optional] The Google Compute Engine configuration settings for worker 1080 * Optional The Google Compute Engine configuration settings for worker
1008 * instances in a cluster. 1081 * instances in a cluster.
1009 */ 1082 */
1010 InstanceGroupConfiguration workerConfiguration; 1083 InstanceGroupConfiguration workerConfiguration;
1011 1084
1012 ClusterConfiguration(); 1085 ClusterConfiguration();
1013 1086
1014 ClusterConfiguration.fromJson(core.Map _json) { 1087 ClusterConfiguration.fromJson(core.Map _json) {
1015 if (_json.containsKey("configurationBucket")) { 1088 if (_json.containsKey("configurationBucket")) {
1016 configurationBucket = _json["configurationBucket"]; 1089 configurationBucket = _json["configurationBucket"];
1017 } 1090 }
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
1087 } 1160 }
1088 if (yarnMetrics != null) { 1161 if (yarnMetrics != null) {
1089 _json["yarnMetrics"] = yarnMetrics; 1162 _json["yarnMetrics"] = yarnMetrics;
1090 } 1163 }
1091 return _json; 1164 return _json;
1092 } 1165 }
1093 } 1166 }
1094 1167
1095 /** Metadata describing the operation. */ 1168 /** Metadata describing the operation. */
1096 class ClusterOperationMetadata { 1169 class ClusterOperationMetadata {
1097 /** [Output-only] Name of the cluster for the operation. */ 1170 /** Output-only Name of the cluster for the operation. */
1098 core.String clusterName; 1171 core.String clusterName;
1099 /** [Output-only] Cluster UUID for the operation. */ 1172 /** Output-only Cluster UUID for the operation. */
1100 core.String clusterUuid; 1173 core.String clusterUuid;
1101 /** [Output-only] Short description of operation. */ 1174 /** Output-only Short description of operation. */
1102 core.String description; 1175 core.String description;
1103 /** [Output-only] labels associated with the operation */ 1176 /** Output-only Labels associated with the operation */
1104 core.Map<core.String, core.String> labels; 1177 core.Map<core.String, core.String> labels;
1105 /** [Output-only] The operation type. */ 1178 /** Output-only The operation type. */
1106 core.String operationType; 1179 core.String operationType;
1107 /** [Output-only] Current operation status. */ 1180 /** Output-only Current operation status. */
1108 ClusterOperationStatus status; 1181 ClusterOperationStatus status;
1109 /** [Output-only] The previous operation status. */ 1182 /** Output-only The previous operation status. */
1110 core.List<ClusterOperationStatus> statusHistory; 1183 core.List<ClusterOperationStatus> statusHistory;
1184 /** Output-only Errors encountered during operation execution. */
1185 core.List<core.String> warnings;
1111 1186
1112 ClusterOperationMetadata(); 1187 ClusterOperationMetadata();
1113 1188
1114 ClusterOperationMetadata.fromJson(core.Map _json) { 1189 ClusterOperationMetadata.fromJson(core.Map _json) {
1115 if (_json.containsKey("clusterName")) { 1190 if (_json.containsKey("clusterName")) {
1116 clusterName = _json["clusterName"]; 1191 clusterName = _json["clusterName"];
1117 } 1192 }
1118 if (_json.containsKey("clusterUuid")) { 1193 if (_json.containsKey("clusterUuid")) {
1119 clusterUuid = _json["clusterUuid"]; 1194 clusterUuid = _json["clusterUuid"];
1120 } 1195 }
1121 if (_json.containsKey("description")) { 1196 if (_json.containsKey("description")) {
1122 description = _json["description"]; 1197 description = _json["description"];
1123 } 1198 }
1124 if (_json.containsKey("labels")) { 1199 if (_json.containsKey("labels")) {
1125 labels = _json["labels"]; 1200 labels = _json["labels"];
1126 } 1201 }
1127 if (_json.containsKey("operationType")) { 1202 if (_json.containsKey("operationType")) {
1128 operationType = _json["operationType"]; 1203 operationType = _json["operationType"];
1129 } 1204 }
1130 if (_json.containsKey("status")) { 1205 if (_json.containsKey("status")) {
1131 status = new ClusterOperationStatus.fromJson(_json["status"]); 1206 status = new ClusterOperationStatus.fromJson(_json["status"]);
1132 } 1207 }
1133 if (_json.containsKey("statusHistory")) { 1208 if (_json.containsKey("statusHistory")) {
1134 statusHistory = _json["statusHistory"].map((value) => new ClusterOperation Status.fromJson(value)).toList(); 1209 statusHistory = _json["statusHistory"].map((value) => new ClusterOperation Status.fromJson(value)).toList();
1135 } 1210 }
1211 if (_json.containsKey("warnings")) {
1212 warnings = _json["warnings"];
1213 }
1136 } 1214 }
1137 1215
1138 core.Map toJson() { 1216 core.Map toJson() {
1139 var _json = new core.Map(); 1217 var _json = new core.Map();
1140 if (clusterName != null) { 1218 if (clusterName != null) {
1141 _json["clusterName"] = clusterName; 1219 _json["clusterName"] = clusterName;
1142 } 1220 }
1143 if (clusterUuid != null) { 1221 if (clusterUuid != null) {
1144 _json["clusterUuid"] = clusterUuid; 1222 _json["clusterUuid"] = clusterUuid;
1145 } 1223 }
1146 if (description != null) { 1224 if (description != null) {
1147 _json["description"] = description; 1225 _json["description"] = description;
1148 } 1226 }
1149 if (labels != null) { 1227 if (labels != null) {
1150 _json["labels"] = labels; 1228 _json["labels"] = labels;
1151 } 1229 }
1152 if (operationType != null) { 1230 if (operationType != null) {
1153 _json["operationType"] = operationType; 1231 _json["operationType"] = operationType;
1154 } 1232 }
1155 if (status != null) { 1233 if (status != null) {
1156 _json["status"] = (status).toJson(); 1234 _json["status"] = (status).toJson();
1157 } 1235 }
1158 if (statusHistory != null) { 1236 if (statusHistory != null) {
1159 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List(); 1237 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List();
1160 } 1238 }
1239 if (warnings != null) {
1240 _json["warnings"] = warnings;
1241 }
1161 return _json; 1242 return _json;
1162 } 1243 }
1163 } 1244 }
1164 1245
1165 /** The status of the operation. */ 1246 /** The status of the operation. */
1166 class ClusterOperationStatus { 1247 class ClusterOperationStatus {
1167 /** [Output-only]A message containing any operation metadata details. */ 1248 /** Output-onlyA message containing any operation metadata details. */
1168 core.String details; 1249 core.String details;
1169 /** [Output-only] A message containing the detailed operation state. */ 1250 /** Output-only A message containing the detailed operation state. */
1170 core.String innerState; 1251 core.String innerState;
1171 /** 1252 /**
1172 * [Output-only] A message containing the operation state. 1253 * Output-only A message containing the operation state.
1173 * Possible string values are: 1254 * Possible string values are:
1174 * - "UNKNOWN" : A UNKNOWN. 1255 * - "UNKNOWN" : Unused.
1175 * - "PENDING" : A PENDING. 1256 * - "PENDING" : The operation has been created.
1176 * - "RUNNING" : A RUNNING. 1257 * - "RUNNING" : The operation is running.
1177 * - "DONE" : A DONE. 1258 * - "DONE" : The operation is done; either cancelled or completed.
1178 */ 1259 */
1179 core.String state; 1260 core.String state;
1180 /** [Output-only] The time this state was entered. */ 1261 /** Output-only The time this state was entered. */
1181 core.String stateStartTime; 1262 core.String stateStartTime;
1182 1263
1183 ClusterOperationStatus(); 1264 ClusterOperationStatus();
1184 1265
1185 ClusterOperationStatus.fromJson(core.Map _json) { 1266 ClusterOperationStatus.fromJson(core.Map _json) {
1186 if (_json.containsKey("details")) { 1267 if (_json.containsKey("details")) {
1187 details = _json["details"]; 1268 details = _json["details"];
1188 } 1269 }
1189 if (_json.containsKey("innerState")) { 1270 if (_json.containsKey("innerState")) {
1190 innerState = _json["innerState"]; 1271 innerState = _json["innerState"];
(...skipping 24 matching lines...) Expand all
1215 } 1296 }
1216 } 1297 }
1217 1298
1218 /** The status of a cluster and its instances. */ 1299 /** The status of a cluster and its instances. */
1219 class ClusterStatus { 1300 class ClusterStatus {
1220 /** Optional details of cluster's state. */ 1301 /** Optional details of cluster's state. */
1221 core.String detail; 1302 core.String detail;
1222 /** 1303 /**
1223 * The cluster's state. 1304 * The cluster's state.
1224 * Possible string values are: 1305 * Possible string values are:
1225 * - "UNKNOWN" : A UNKNOWN. 1306 * - "UNKNOWN" : The cluster state is unknown.
1226 * - "CREATING" : A CREATING. 1307 * - "CREATING" : The cluster is being created and set up. It is not ready for
1227 * - "RUNNING" : A RUNNING. 1308 * use.
1228 * - "ERROR" : A ERROR. 1309 * - "RUNNING" : The cluster is currently running and healthy. It is ready for
1229 * - "DELETING" : A DELETING. 1310 * use.
1230 * - "UPDATING" : A UPDATING. 1311 * - "ERROR" : The cluster encountered an error. It is not ready for use.
1312 * - "DELETING" : The cluster is being deleted. It cannot be used.
1313 * - "UPDATING" : The cluster is being updated. It continues to accept and
1314 * process jobs.
1231 */ 1315 */
1232 core.String state; 1316 core.String state;
1233 /** Time when this state was entered. */ 1317 /** Time when this state was entered. */
1234 core.String stateStartTime; 1318 core.String stateStartTime;
1235 1319
1236 ClusterStatus(); 1320 ClusterStatus();
1237 1321
1238 ClusterStatus.fromJson(core.Map _json) { 1322 ClusterStatus.fromJson(core.Map _json) {
1239 if (_json.containsKey("detail")) { 1323 if (_json.containsKey("detail")) {
1240 detail = _json["detail"]; 1324 detail = _json["detail"];
(...skipping 17 matching lines...) Expand all
1258 if (stateStartTime != null) { 1342 if (stateStartTime != null) {
1259 _json["stateStartTime"] = stateStartTime; 1343 _json["stateStartTime"] = stateStartTime;
1260 } 1344 }
1261 return _json; 1345 return _json;
1262 } 1346 }
1263 } 1347 }
1264 1348
1265 /** The location where output from diagnostic command can be found. */ 1349 /** The location where output from diagnostic command can be found. */
1266 class DiagnoseClusterOutputLocation { 1350 class DiagnoseClusterOutputLocation {
1267 /** 1351 /**
1268 * [Output-only] The Google Cloud Storage URI of the diagnostic output. This 1352 * Output-only The Google Cloud Storage URI of the diagnostic output. This
1269 * will be a plain text file with summary of collected diagnostics. 1353 * will be a plain text file with summary of collected diagnostics.
1270 */ 1354 */
1271 core.String outputUri; 1355 core.String outputUri;
1272 1356
1273 DiagnoseClusterOutputLocation(); 1357 DiagnoseClusterOutputLocation();
1274 1358
1275 DiagnoseClusterOutputLocation.fromJson(core.Map _json) { 1359 DiagnoseClusterOutputLocation.fromJson(core.Map _json) {
1276 if (_json.containsKey("outputUri")) { 1360 if (_json.containsKey("outputUri")) {
1277 outputUri = _json["outputUri"]; 1361 outputUri = _json["outputUri"];
1278 } 1362 }
(...skipping 18 matching lines...) Expand all
1297 1381
1298 core.Map toJson() { 1382 core.Map toJson() {
1299 var _json = new core.Map(); 1383 var _json = new core.Map();
1300 return _json; 1384 return _json;
1301 } 1385 }
1302 } 1386 }
1303 1387
1304 /** The location of diagnostic output. */ 1388 /** The location of diagnostic output. */
1305 class DiagnoseClusterResults { 1389 class DiagnoseClusterResults {
1306 /** 1390 /**
1307 * [Output-only] The Google Cloud Storage URI of the diagnostic output. The 1391 * Output-only The Google Cloud Storage URI of the diagnostic output. The
1308 * output report is a plain text file with a summary of collected diagnostics. 1392 * output report is a plain text file with a summary of collected diagnostics.
1309 */ 1393 */
1310 core.String outputUri; 1394 core.String outputUri;
1311 1395
1312 DiagnoseClusterResults(); 1396 DiagnoseClusterResults();
1313 1397
1314 DiagnoseClusterResults.fromJson(core.Map _json) { 1398 DiagnoseClusterResults.fromJson(core.Map _json) {
1315 if (_json.containsKey("outputUri")) { 1399 if (_json.containsKey("outputUri")) {
1316 outputUri = _json["outputUri"]; 1400 outputUri = _json["outputUri"];
1317 } 1401 }
1318 } 1402 }
1319 1403
1320 core.Map toJson() { 1404 core.Map toJson() {
1321 var _json = new core.Map(); 1405 var _json = new core.Map();
1322 if (outputUri != null) { 1406 if (outputUri != null) {
1323 _json["outputUri"] = outputUri; 1407 _json["outputUri"] = outputUri;
1324 } 1408 }
1325 return _json; 1409 return _json;
1326 } 1410 }
1327 } 1411 }
1328 1412
1329 /** Specifies the configuration of disk options for a group of VM instances. */ 1413 /** Specifies the configuration of disk options for a group of VM instances. */
1330 class DiskConfiguration { 1414 class DiskConfiguration {
1331 /** [Optional] Size in GB of the boot disk (default is 500GB). */ 1415 /** Optional Size in GB of the boot disk (default is 500GB). */
1332 core.int bootDiskSizeGb; 1416 core.int bootDiskSizeGb;
1333 /** 1417 /**
1334 * [Optional] Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are 1418 * Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are
1335 * not attached, the boot disk is used to store runtime logs and HDFS data. If 1419 * not attached, the boot disk is used to store runtime logs and HDFS data. If
1336 * one or more SSDs are attached, this runtime bulk data is spread across 1420 * one or more SSDs are attached, this runtime bulk data is spread across
1337 * them, and the boot disk contains only basic configuration and installed 1421 * them, and the boot disk contains only basic configuration and installed
1338 * binaries. 1422 * binaries.
1339 */ 1423 */
1340 core.int numLocalSsds; 1424 core.int numLocalSsds;
1341 1425
1342 DiskConfiguration(); 1426 DiskConfiguration();
1343 1427
1344 DiskConfiguration.fromJson(core.Map _json) { 1428 DiskConfiguration.fromJson(core.Map _json) {
(...skipping 13 matching lines...) Expand all
1358 if (numLocalSsds != null) { 1442 if (numLocalSsds != null) {
1359 _json["numLocalSsds"] = numLocalSsds; 1443 _json["numLocalSsds"] = numLocalSsds;
1360 } 1444 }
1361 return _json; 1445 return _json;
1362 } 1446 }
1363 } 1447 }
1364 1448
1365 /** 1449 /**
1366 * A generic empty message that you can re-use to avoid defining duplicated 1450 * A generic empty message that you can re-use to avoid defining duplicated
1367 * empty messages in your APIs. A typical example is to use it as the request or 1451 * empty messages in your APIs. A typical example is to use it as the request or
1368 * the response type of an API method. For instance: service Foo { rpc 1452 * the response type of an API method. For instance:
1369 * Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON 1453 * service Foo {
1370 * representation for `Empty` is empty JSON object `{}`. 1454 * rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
1455 * }
1456 * The JSON representation for Empty is empty JSON object {}.
1371 */ 1457 */
1372 class Empty { 1458 class Empty {
1373 1459
1374 Empty(); 1460 Empty();
1375 1461
1376 Empty.fromJson(core.Map _json) { 1462 Empty.fromJson(core.Map _json) {
1377 } 1463 }
1378 1464
1379 core.Map toJson() { 1465 core.Map toJson() {
1380 var _json = new core.Map(); 1466 var _json = new core.Map();
(...skipping 17 matching lines...) Expand all
1398 core.bool internalIpOnly; 1484 core.bool internalIpOnly;
1399 /** The Google Compute Engine metadata entries to add to all instances. */ 1485 /** The Google Compute Engine metadata entries to add to all instances. */
1400 core.Map<core.String, core.String> metadata; 1486 core.Map<core.String, core.String> metadata;
1401 /** 1487 /**
1402 * The Google Compute Engine network to be used for machine communications. 1488 * The Google Compute Engine network to be used for machine communications.
1403 * Cannot be specified with subnetwork_uri. If neither network_uri nor 1489 * Cannot be specified with subnetwork_uri. If neither network_uri nor
1404 * subnetwork_uri is specified, the "default" network of the project is used, 1490 * subnetwork_uri is specified, the "default" network of the project is used,
1405 * if it exists. Cannot be a "Custom Subnet Network" (see 1491 * if it exists. Cannot be a "Custom Subnet Network" (see
1406 * https://cloud.google.com/compute/docs/subnetworks for more information). 1492 * https://cloud.google.com/compute/docs/subnetworks for more information).
1407 * Example: 1493 * Example:
1408 * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global /default`. 1494 * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/ default.
1409 */ 1495 */
1410 core.String networkUri; 1496 core.String networkUri;
1411 /** 1497 /**
1498 * Optional The service account of the instances. Defaults to the default
1499 * Google Compute Engine service account. Custom service accounts need
1500 * permissions equivalent to the folloing IAM roles:
1501 * roles/logging.logWriter
1502 * roles/storage.objectAdmin(see
1503 * https://cloud.google.com/compute/docs/access/service-accounts#custom_servic e_accounts
1504 * for more information). Example:
1505 * [account_id]@[project_id].iam.gserviceaccount.com
1506 */
1507 core.String serviceAccount;
1508 /**
1412 * The URIs of service account scopes to be included in Google Compute Engine 1509 * The URIs of service account scopes to be included in Google Compute Engine
1413 * instances. The following base set of scopes is always included: - 1510 * instances. The following base set of scopes is always included: -
1414 * https://www.googleapis.com/auth/cloud.useraccounts.readonly - 1511 * https://www.googleapis.com/auth/cloud.useraccounts.readonly -
1415 * https://www.googleapis.com/auth/devstorage.read_write - 1512 * https://www.googleapis.com/auth/devstorage.read_write -
1416 * https://www.googleapis.com/auth/logging.write If no scopes are specfied, 1513 * https://www.googleapis.com/auth/logging.write If no scopes are specfied,
1417 * the following defaults are also provided: - 1514 * the following defaults are also provided: -
1418 * https://www.googleapis.com/auth/bigquery - 1515 * https://www.googleapis.com/auth/bigquery -
1419 * https://www.googleapis.com/auth/bigtable.admin.table - 1516 * https://www.googleapis.com/auth/bigtable.admin.table -
1420 * https://www.googleapis.com/auth/bigtable.data - 1517 * https://www.googleapis.com/auth/bigtable.data -
1421 * https://www.googleapis.com/auth/devstorage.full_control 1518 * https://www.googleapis.com/auth/devstorage.full_control
1422 */ 1519 */
1423 core.List<core.String> serviceAccountScopes; 1520 core.List<core.String> serviceAccountScopes;
1424 /** 1521 /**
1425 * The Google Compute Engine subnetwork to be used for machine communications. 1522 * The Google Compute Engine subnetwork to be used for machine communications.
1426 * Cannot be specified with network_uri. Example: 1523 * Cannot be specified with network_uri. Example:
1427 * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-eas t1/sub0`. 1524 * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east 1/sub0.
1428 */ 1525 */
1429 core.String subnetworkUri; 1526 core.String subnetworkUri;
1430 /** The Google Compute Engine tags to add to all instances. */ 1527 /** The Google Compute Engine tags to add to all instances. */
1431 core.List<core.String> tags; 1528 core.List<core.String> tags;
1432 /** 1529 /**
1433 * [Required] The zone where the Google Compute Engine cluster will be 1530 * Required The zone where the Google Compute Engine cluster will be located.
1434 * located. Example: 1531 * Example:
1435 * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`. 1532 * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone].
1436 */ 1533 */
1437 core.String zoneUri; 1534 core.String zoneUri;
1438 1535
1439 GceClusterConfiguration(); 1536 GceClusterConfiguration();
1440 1537
1441 GceClusterConfiguration.fromJson(core.Map _json) { 1538 GceClusterConfiguration.fromJson(core.Map _json) {
1442 if (_json.containsKey("internalIpOnly")) { 1539 if (_json.containsKey("internalIpOnly")) {
1443 internalIpOnly = _json["internalIpOnly"]; 1540 internalIpOnly = _json["internalIpOnly"];
1444 } 1541 }
1445 if (_json.containsKey("metadata")) { 1542 if (_json.containsKey("metadata")) {
1446 metadata = _json["metadata"]; 1543 metadata = _json["metadata"];
1447 } 1544 }
1448 if (_json.containsKey("networkUri")) { 1545 if (_json.containsKey("networkUri")) {
1449 networkUri = _json["networkUri"]; 1546 networkUri = _json["networkUri"];
1450 } 1547 }
1548 if (_json.containsKey("serviceAccount")) {
1549 serviceAccount = _json["serviceAccount"];
1550 }
1451 if (_json.containsKey("serviceAccountScopes")) { 1551 if (_json.containsKey("serviceAccountScopes")) {
1452 serviceAccountScopes = _json["serviceAccountScopes"]; 1552 serviceAccountScopes = _json["serviceAccountScopes"];
1453 } 1553 }
1454 if (_json.containsKey("subnetworkUri")) { 1554 if (_json.containsKey("subnetworkUri")) {
1455 subnetworkUri = _json["subnetworkUri"]; 1555 subnetworkUri = _json["subnetworkUri"];
1456 } 1556 }
1457 if (_json.containsKey("tags")) { 1557 if (_json.containsKey("tags")) {
1458 tags = _json["tags"]; 1558 tags = _json["tags"];
1459 } 1559 }
1460 if (_json.containsKey("zoneUri")) { 1560 if (_json.containsKey("zoneUri")) {
1461 zoneUri = _json["zoneUri"]; 1561 zoneUri = _json["zoneUri"];
1462 } 1562 }
1463 } 1563 }
1464 1564
1465 core.Map toJson() { 1565 core.Map toJson() {
1466 var _json = new core.Map(); 1566 var _json = new core.Map();
1467 if (internalIpOnly != null) { 1567 if (internalIpOnly != null) {
1468 _json["internalIpOnly"] = internalIpOnly; 1568 _json["internalIpOnly"] = internalIpOnly;
1469 } 1569 }
1470 if (metadata != null) { 1570 if (metadata != null) {
1471 _json["metadata"] = metadata; 1571 _json["metadata"] = metadata;
1472 } 1572 }
1473 if (networkUri != null) { 1573 if (networkUri != null) {
1474 _json["networkUri"] = networkUri; 1574 _json["networkUri"] = networkUri;
1475 } 1575 }
1576 if (serviceAccount != null) {
1577 _json["serviceAccount"] = serviceAccount;
1578 }
1476 if (serviceAccountScopes != null) { 1579 if (serviceAccountScopes != null) {
1477 _json["serviceAccountScopes"] = serviceAccountScopes; 1580 _json["serviceAccountScopes"] = serviceAccountScopes;
1478 } 1581 }
1479 if (subnetworkUri != null) { 1582 if (subnetworkUri != null) {
1480 _json["subnetworkUri"] = subnetworkUri; 1583 _json["subnetworkUri"] = subnetworkUri;
1481 } 1584 }
1482 if (tags != null) { 1585 if (tags != null) {
1483 _json["tags"] = tags; 1586 _json["tags"] = tags;
1484 } 1587 }
1485 if (zoneUri != null) { 1588 if (zoneUri != null) {
1486 _json["zoneUri"] = zoneUri; 1589 _json["zoneUri"] = zoneUri;
1487 } 1590 }
1488 return _json; 1591 return _json;
1489 } 1592 }
1490 } 1593 }
1491 1594
1492 /** A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. */ 1595 /** A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. */
1493 class HadoopJob { 1596 class HadoopJob {
1494 /** 1597 /**
1495 * [Optional] HCFS URIs of archives to be extracted in the working directory 1598 * Optional HCFS URIs of archives to be extracted in the working directory of
1496 * of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, 1599 * Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz,
1497 * .tgz, or .zip. 1600 * or .zip.
1498 */ 1601 */
1499 core.List<core.String> archiveUris; 1602 core.List<core.String> archiveUris;
1500 /** 1603 /**
1501 * [Optional] The arguments to pass to the driver. Do not include arguments, 1604 * Optional The arguments to pass to the driver. Do not include arguments,
1502 * such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since 1605 * such as -libjars or -Dfoo=bar, that can be set as job properties, since a
1503 * a collision may occur that causes an incorrect job submission. 1606 * collision may occur that causes an incorrect job submission.
1504 */ 1607 */
1505 core.List<core.String> args; 1608 core.List<core.String> args;
1506 /** 1609 /**
1507 * [Optional] HCFS URIs of files to be copied to the working directory of 1610 * Optional HCFS URIs of files to be copied to the working directory of Hadoop
1508 * Hadoop drivers and distributed tasks. Useful for naively parallel tasks. 1611 * drivers and distributed tasks. Useful for naively parallel tasks.
1509 */ 1612 */
1510 core.List<core.String> fileUris; 1613 core.List<core.String> fileUris;
1511 /** 1614 /**
1512 * [Optional] Jar file URIs to add to the CLASSPATHs of the Hadoop driver and 1615 * Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and
1513 * tasks. 1616 * tasks.
1514 */ 1617 */
1515 core.List<core.String> jarFileUris; 1618 core.List<core.String> jarFileUris;
1516 /** [Optional] The runtime log configuration for job execution. */ 1619 /** Optional The runtime log configuration for job execution. */
1517 LoggingConfiguration loggingConfiguration; 1620 LoggingConfiguration loggingConfiguration;
1518 /** 1621 /**
1519 * The name of the driver's main class. The jar file containing the class must 1622 * The name of the driver's main class. The jar file containing the class must
1520 * be in the default CLASSPATH or specified in `jar_file_uris`. 1623 * be in the default CLASSPATH or specified in jar_file_uris.
1521 */ 1624 */
1522 core.String mainClass; 1625 core.String mainClass;
1523 /** 1626 /**
1524 * The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the 1627 * The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the
1525 * main class. Examples: 1628 * main class. Examples:
1526 * 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 1629 * 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
1527 * 'hdfs:/tmp/test-samples/custom-wordcount.jar' 1630 * 'hdfs:/tmp/test-samples/custom-wordcount.jar'
1528 * 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' 1631 * 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
1529 */ 1632 */
1530 core.String mainJarFileUri; 1633 core.String mainJarFileUri;
1531 /** 1634 /**
1532 * [Optional] A mapping of property names to values, used to configure Hadoop. 1635 * Optional A mapping of property names to values, used to configure Hadoop.
1533 * Properties that conflict with values set by the Cloud Dataproc API may be 1636 * Properties that conflict with values set by the Cloud Dataproc API may be
1534 * overwritten. Can include properties set in /etc/hadoop/conf / * -site and 1637 * overwritten. Can include properties set in /etc/hadoop/conf / * -site and
1535 * classes in user code. 1638 * classes in user code.
1536 */ 1639 */
1537 core.Map<core.String, core.String> properties; 1640 core.Map<core.String, core.String> properties;
1538 1641
1539 HadoopJob(); 1642 HadoopJob();
1540 1643
1541 HadoopJob.fromJson(core.Map _json) { 1644 HadoopJob.fromJson(core.Map _json) {
1542 if (_json.containsKey("archiveUris")) { 1645 if (_json.containsKey("archiveUris")) {
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
1591 if (properties != null) { 1694 if (properties != null) {
1592 _json["properties"] = properties; 1695 _json["properties"] = properties;
1593 } 1696 }
1594 return _json; 1697 return _json;
1595 } 1698 }
1596 } 1699 }
1597 1700
1598 /** A Cloud Dataproc job for running Hive queries on YARN. */ 1701 /** A Cloud Dataproc job for running Hive queries on YARN. */
1599 class HiveJob { 1702 class HiveJob {
1600 /** 1703 /**
1601 * [Optional] Whether to continue executing queries if a query fails. The 1704 * Optional Whether to continue executing queries if a query fails. The
1602 * default value is `false`. Setting to `true` can be useful when executing 1705 * default value is false. Setting to true can be useful when executing
1603 * independent parallel queries. 1706 * independent parallel queries.
1604 */ 1707 */
1605 core.bool continueOnFailure; 1708 core.bool continueOnFailure;
1606 /** 1709 /**
1607 * [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Hive 1710 * Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server
1608 * server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. 1711 * and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
1609 */ 1712 */
1610 core.List<core.String> jarFileUris; 1713 core.List<core.String> jarFileUris;
1611 /** 1714 /**
1612 * [Optional] A mapping of property names and values, used to configure Hive. 1715 * Optional A mapping of property names and values, used to configure Hive.
1613 * Properties that conflict with values set by the Cloud Dataproc API may be 1716 * Properties that conflict with values set by the Cloud Dataproc API may be
1614 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, 1717 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml,
1615 * /etc/hive/conf/hive-site.xml, and classes in user code. 1718 * /etc/hive/conf/hive-site.xml, and classes in user code.
1616 */ 1719 */
1617 core.Map<core.String, core.String> properties; 1720 core.Map<core.String, core.String> properties;
1618 /** The HCFS URI of the script that contains Hive queries. */ 1721 /** The HCFS URI of the script that contains Hive queries. */
1619 core.String queryFileUri; 1722 core.String queryFileUri;
1620 /** A list of queries. */ 1723 /** A list of queries. */
1621 QueryList queryList; 1724 QueryList queryList;
1622 /** 1725 /**
1623 * [Optional] Mapping of query variable names to values (equivalent to the 1726 * Optional Mapping of query variable names to values (equivalent to the Hive
1624 * Hive command: `SET name="value";`). 1727 * command: SET name="value";).
1625 */ 1728 */
1626 core.Map<core.String, core.String> scriptVariables; 1729 core.Map<core.String, core.String> scriptVariables;
1627 1730
1628 HiveJob(); 1731 HiveJob();
1629 1732
1630 HiveJob.fromJson(core.Map _json) { 1733 HiveJob.fromJson(core.Map _json) {
1631 if (_json.containsKey("continueOnFailure")) { 1734 if (_json.containsKey("continueOnFailure")) {
1632 continueOnFailure = _json["continueOnFailure"]; 1735 continueOnFailure = _json["continueOnFailure"];
1633 } 1736 }
1634 if (_json.containsKey("jarFileUris")) { 1737 if (_json.containsKey("jarFileUris")) {
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1673 } 1776 }
1674 1777
1675 /** 1778 /**
1676 * The configuration settings for Google Compute Engine resources in an instance 1779 * The configuration settings for Google Compute Engine resources in an instance
1677 * group, such as a master or worker group. 1780 * group, such as a master or worker group.
1678 */ 1781 */
1679 class InstanceGroupConfiguration { 1782 class InstanceGroupConfiguration {
1680 /** Disk option configuration settings. */ 1783 /** Disk option configuration settings. */
1681 DiskConfiguration diskConfiguration; 1784 DiskConfiguration diskConfiguration;
1682 /** 1785 /**
1683 * [Output-only] The Google Compute Engine image resource used for cluster 1786 * Output-only The Google Compute Engine image resource used for cluster
1684 * instances. Inferred from `SoftwareConfiguration.image_version`. 1787 * instances. Inferred from SoftwareConfiguration.image_version.
1685 */ 1788 */
1686 core.String imageUri; 1789 core.String imageUri;
1687 /** 1790 /**
1688 * The list of instance names. Dataproc derives the names from `cluster_name`, 1791 * The list of instance names. Dataproc derives the names from cluster_name,
1689 * `num_instances`, and the instance group if not set by user (recommended 1792 * num_instances, and the instance group if not set by user (recommended
1690 * practice is to let Dataproc derive the name). 1793 * practice is to let Dataproc derive the name).
1691 */ 1794 */
1692 core.List<core.String> instanceNames; 1795 core.List<core.String> instanceNames;
1693 /** Specifies that this instance group contains Preemptible Instances. */ 1796 /** Specifies that this instance group contains Preemptible Instances. */
1694 core.bool isPreemptible; 1797 core.bool isPreemptible;
1695 /** 1798 /**
1696 * The Google Compute Engine machine type used for cluster instances. Example: 1799 * The Google Compute Engine machine type used for cluster instances. Example:
1697 * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1 -a/machineTypes/n1-standard-2`. 1800 * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1- a/machineTypes/n1-standard-2.
1698 */ 1801 */
1699 core.String machineTypeUri; 1802 core.String machineTypeUri;
1700 /** 1803 /**
1701 * [Output-only] The configuration for Google Compute Engine Instance Group 1804 * Output-only The configuration for Google Compute Engine Instance Group
1702 * Manager that manages this group. This is only used for preemptible instance 1805 * Manager that manages this group. This is only used for preemptible instance
1703 * groups. 1806 * groups.
1704 */ 1807 */
1705 ManagedGroupConfiguration managedGroupConfiguration; 1808 ManagedGroupConfiguration managedGroupConfiguration;
1706 /** 1809 /**
1707 * The number of VM instances in the instance group. For master instance 1810 * The number of VM instances in the instance group. For master instance
1708 * groups, must be set to 1. 1811 * groups, must be set to 1.
1709 */ 1812 */
1710 core.int numInstances; 1813 core.int numInstances;
1711 1814
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
1758 if (numInstances != null) { 1861 if (numInstances != null) {
1759 _json["numInstances"] = numInstances; 1862 _json["numInstances"] = numInstances;
1760 } 1863 }
1761 return _json; 1864 return _json;
1762 } 1865 }
1763 } 1866 }
1764 1867
1765 /** A Cloud Dataproc job resource. */ 1868 /** A Cloud Dataproc job resource. */
1766 class Job { 1869 class Job {
1767 /** 1870 /**
1768 * [Output-only] If present, the location of miscellaneous control files which 1871 * Output-only If present, the location of miscellaneous control files which
1769 * may be used as part of job setup and handling. If not present, control 1872 * may be used as part of job setup and handling. If not present, control
1770 * files may be placed in the same location as `driver_output_uri`. 1873 * files may be placed in the same location as driver_output_uri.
1771 */ 1874 */
1772 core.String driverControlFilesUri; 1875 core.String driverControlFilesUri;
1773 /** 1876 /**
1774 * [Output-only] A URI pointing to the location of the stdin of the job's 1877 * Output-only A URI pointing to the location of the stdin of the job's driver
1775 * driver program, only set if the job is interactive. 1878 * program, only set if the job is interactive.
1776 */ 1879 */
1777 core.String driverInputResourceUri; 1880 core.String driverInputResourceUri;
1778 /** 1881 /**
1779 * [Output-only] A URI pointing to the location of the stdout of the job's 1882 * Output-only A URI pointing to the location of the stdout of the job's
1780 * driver program. 1883 * driver program.
1781 */ 1884 */
1782 core.String driverOutputResourceUri; 1885 core.String driverOutputResourceUri;
1783 /** Job is a Hadoop job. */ 1886 /** Job is a Hadoop job. */
1784 HadoopJob hadoopJob; 1887 HadoopJob hadoopJob;
1785 /** Job is a Hive job. */ 1888 /** Job is a Hive job. */
1786 HiveJob hiveJob; 1889 HiveJob hiveJob;
1787 /** 1890 /**
1788 * [Optional] If set to `true`, the driver's stdin will be kept open and 1891 * Optional If set to true, the driver's stdin will be kept open and
1789 * `driver_input_uri` will be set to provide a path at which additional input 1892 * driver_input_uri will be set to provide a path at which additional input
1790 * can be sent to the driver. 1893 * can be sent to the driver.
1791 */ 1894 */
1792 core.bool interactive; 1895 core.bool interactive;
1793 /** 1896 /**
1794 * [Optional] The labels to associate with this job. Label keys must be 1897 * Optional The labels to associate with this job.Label keys must be between 1
1795 * between 1 and 63 characters long, and must conform to the following regular 1898 * and 63 characters long, and must conform to the following regular
1796 * expression: \p{Ll}\p{Lo}{0,62} Label values must be between 1 and 63 1899 * expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63
1797 * characters long, and must conform to the following regular expression: 1900 * characters long, and must conform to the following regular expression:
1798 * [\p{Ll}\p{Lo}\p{N}_-]{0,63} No more than 64 labels can be associated with a 1901 * \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a
1799 * given job. 1902 * given job.
1800 */ 1903 */
1801 core.Map<core.String, core.String> labels; 1904 core.Map<core.String, core.String> labels;
1802 /** Job is a Pig job. */ 1905 /** Job is a Pig job. */
1803 PigJob pigJob; 1906 PigJob pigJob;
1804 /** 1907 /**
1805 * [Required] Job information, including how, when, and where to run the job. 1908 * Required Job information, including how, when, and where to run the job.
1806 */ 1909 */
1807 JobPlacement placement; 1910 JobPlacement placement;
1808 /** Job is a Pyspark job. */ 1911 /** Job is a Pyspark job. */
1809 PySparkJob pysparkJob; 1912 PySparkJob pysparkJob;
1810 /** 1913 /**
1811 * [Optional] The fully qualified reference to the job, which can be used to 1914 * Optional The fully qualified reference to the job, which can be used to
1812 * obtain the equivalent REST path of the job resource. If this property is 1915 * obtain the equivalent REST path of the job resource. If this property is
1813 * not specified when a job is created, the server generates a job_id. 1916 * not specified when a job is created, the server generates a
1917 * <code>job_id</code>.
1814 */ 1918 */
1815 JobReference reference; 1919 JobReference reference;
1920 /** Optional Job scheduling configuration. */
1921 JobScheduling scheduling;
1816 /** Job is a Spark job. */ 1922 /** Job is a Spark job. */
1817 SparkJob sparkJob; 1923 SparkJob sparkJob;
1818 /** Job is a SparkSql job. */ 1924 /** Job is a SparkSql job. */
1819 SparkSqlJob sparkSqlJob; 1925 SparkSqlJob sparkSqlJob;
1820 /** 1926 /**
1821 * [Output-only] The job status. Additional application-specific status 1927 * Output-only The job status. Additional application-specific status
1822 * information may be contained in the type_job and yarn_applications fields. 1928 * information may be contained in the <code>type_job</code> and
1929 * <code>yarn_applications</code> fields.
1823 */ 1930 */
1824 JobStatus status; 1931 JobStatus status;
1825 /** [Output-only] The previous job status. */ 1932 /** Output-only The previous job status. */
1826 core.List<JobStatus> statusHistory; 1933 core.List<JobStatus> statusHistory;
1827 /** 1934 /**
1828 * [Output-only] The email address of the user submitting the job. For jobs 1935 * Output-only The email address of the user submitting the job. For jobs
1829 * submitted on the cluster, the address is username@hostname. 1936 * submitted on the cluster, the address is <code>username@hostname</code>.
1830 */ 1937 */
1831 core.String submittedBy; 1938 core.String submittedBy;
1832 /** [Output-only] The collection of YARN applications spun up by this job. */ 1939 /** Output-only The collection of YARN applications spun up by this job. */
1833 core.List<YarnApplication> yarnApplications; 1940 core.List<YarnApplication> yarnApplications;
1834 1941
1835 Job(); 1942 Job();
1836 1943
1837 Job.fromJson(core.Map _json) { 1944 Job.fromJson(core.Map _json) {
1838 if (_json.containsKey("driverControlFilesUri")) { 1945 if (_json.containsKey("driverControlFilesUri")) {
1839 driverControlFilesUri = _json["driverControlFilesUri"]; 1946 driverControlFilesUri = _json["driverControlFilesUri"];
1840 } 1947 }
1841 if (_json.containsKey("driverInputResourceUri")) { 1948 if (_json.containsKey("driverInputResourceUri")) {
1842 driverInputResourceUri = _json["driverInputResourceUri"]; 1949 driverInputResourceUri = _json["driverInputResourceUri"];
(...skipping 18 matching lines...) Expand all
1861 } 1968 }
1862 if (_json.containsKey("placement")) { 1969 if (_json.containsKey("placement")) {
1863 placement = new JobPlacement.fromJson(_json["placement"]); 1970 placement = new JobPlacement.fromJson(_json["placement"]);
1864 } 1971 }
1865 if (_json.containsKey("pysparkJob")) { 1972 if (_json.containsKey("pysparkJob")) {
1866 pysparkJob = new PySparkJob.fromJson(_json["pysparkJob"]); 1973 pysparkJob = new PySparkJob.fromJson(_json["pysparkJob"]);
1867 } 1974 }
1868 if (_json.containsKey("reference")) { 1975 if (_json.containsKey("reference")) {
1869 reference = new JobReference.fromJson(_json["reference"]); 1976 reference = new JobReference.fromJson(_json["reference"]);
1870 } 1977 }
1978 if (_json.containsKey("scheduling")) {
1979 scheduling = new JobScheduling.fromJson(_json["scheduling"]);
1980 }
1871 if (_json.containsKey("sparkJob")) { 1981 if (_json.containsKey("sparkJob")) {
1872 sparkJob = new SparkJob.fromJson(_json["sparkJob"]); 1982 sparkJob = new SparkJob.fromJson(_json["sparkJob"]);
1873 } 1983 }
1874 if (_json.containsKey("sparkSqlJob")) { 1984 if (_json.containsKey("sparkSqlJob")) {
1875 sparkSqlJob = new SparkSqlJob.fromJson(_json["sparkSqlJob"]); 1985 sparkSqlJob = new SparkSqlJob.fromJson(_json["sparkSqlJob"]);
1876 } 1986 }
1877 if (_json.containsKey("status")) { 1987 if (_json.containsKey("status")) {
1878 status = new JobStatus.fromJson(_json["status"]); 1988 status = new JobStatus.fromJson(_json["status"]);
1879 } 1989 }
1880 if (_json.containsKey("statusHistory")) { 1990 if (_json.containsKey("statusHistory")) {
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
1916 } 2026 }
1917 if (placement != null) { 2027 if (placement != null) {
1918 _json["placement"] = (placement).toJson(); 2028 _json["placement"] = (placement).toJson();
1919 } 2029 }
1920 if (pysparkJob != null) { 2030 if (pysparkJob != null) {
1921 _json["pysparkJob"] = (pysparkJob).toJson(); 2031 _json["pysparkJob"] = (pysparkJob).toJson();
1922 } 2032 }
1923 if (reference != null) { 2033 if (reference != null) {
1924 _json["reference"] = (reference).toJson(); 2034 _json["reference"] = (reference).toJson();
1925 } 2035 }
2036 if (scheduling != null) {
2037 _json["scheduling"] = (scheduling).toJson();
2038 }
1926 if (sparkJob != null) { 2039 if (sparkJob != null) {
1927 _json["sparkJob"] = (sparkJob).toJson(); 2040 _json["sparkJob"] = (sparkJob).toJson();
1928 } 2041 }
1929 if (sparkSqlJob != null) { 2042 if (sparkSqlJob != null) {
1930 _json["sparkSqlJob"] = (sparkSqlJob).toJson(); 2043 _json["sparkSqlJob"] = (sparkSqlJob).toJson();
1931 } 2044 }
1932 if (status != null) { 2045 if (status != null) {
1933 _json["status"] = (status).toJson(); 2046 _json["status"] = (status).toJson();
1934 } 2047 }
1935 if (statusHistory != null) { 2048 if (statusHistory != null) {
1936 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List(); 2049 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List();
1937 } 2050 }
1938 if (submittedBy != null) { 2051 if (submittedBy != null) {
1939 _json["submittedBy"] = submittedBy; 2052 _json["submittedBy"] = submittedBy;
1940 } 2053 }
1941 if (yarnApplications != null) { 2054 if (yarnApplications != null) {
1942 _json["yarnApplications"] = yarnApplications.map((value) => (value).toJson ()).toList(); 2055 _json["yarnApplications"] = yarnApplications.map((value) => (value).toJson ()).toList();
1943 } 2056 }
1944 return _json; 2057 return _json;
1945 } 2058 }
1946 } 2059 }
1947 2060
1948 /** Cloud Dataproc job configuration. */ 2061 /** Cloud Dataproc job configuration. */
1949 class JobPlacement { 2062 class JobPlacement {
1950 /** [Required] The name of the cluster where the job will be submitted. */ 2063 /** Required The name of the cluster where the job will be submitted. */
1951 core.String clusterName; 2064 core.String clusterName;
1952 /** 2065 /**
1953 * [Output-only] A cluster UUID generated by the Dataproc service when the job 2066 * Output-only A cluster UUID generated by the Dataproc service when the job
1954 * is submitted. 2067 * is submitted.
1955 */ 2068 */
1956 core.String clusterUuid; 2069 core.String clusterUuid;
1957 2070
1958 JobPlacement(); 2071 JobPlacement();
1959 2072
1960 JobPlacement.fromJson(core.Map _json) { 2073 JobPlacement.fromJson(core.Map _json) {
1961 if (_json.containsKey("clusterName")) { 2074 if (_json.containsKey("clusterName")) {
1962 clusterName = _json["clusterName"]; 2075 clusterName = _json["clusterName"];
1963 } 2076 }
(...skipping 10 matching lines...) Expand all
1974 if (clusterUuid != null) { 2087 if (clusterUuid != null) {
1975 _json["clusterUuid"] = clusterUuid; 2088 _json["clusterUuid"] = clusterUuid;
1976 } 2089 }
1977 return _json; 2090 return _json;
1978 } 2091 }
1979 } 2092 }
1980 2093
1981 /** Encapsulates the full scoping used to reference a job. */ 2094 /** Encapsulates the full scoping used to reference a job. */
1982 class JobReference { 2095 class JobReference {
1983 /** 2096 /**
1984 * [Required] The job ID, which must be unique within the project. The job ID 2097 * Required The job ID, which must be unique within the project. The job ID is
1985 * is generated by the server upon job submission or provided by the user as a 2098 * generated by the server upon job submission or provided by the user as a
1986 * means to perform retries without creating duplicate jobs. The ID must 2099 * means to perform retries without creating duplicate jobs. The ID must
1987 * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens 2100 * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens
1988 * (-). The maximum length is 512 characters. 2101 * (-). The maximum length is 512 characters.
1989 */ 2102 */
1990 core.String jobId; 2103 core.String jobId;
1991 /** 2104 /**
1992 * [Required] The ID of the Google Cloud Platform project that the job belongs 2105 * Required The ID of the Google Cloud Platform project that the job belongs
1993 * to. 2106 * to.
1994 */ 2107 */
1995 core.String projectId; 2108 core.String projectId;
1996 2109
1997 JobReference(); 2110 JobReference();
1998 2111
1999 JobReference.fromJson(core.Map _json) { 2112 JobReference.fromJson(core.Map _json) {
2000 if (_json.containsKey("jobId")) { 2113 if (_json.containsKey("jobId")) {
2001 jobId = _json["jobId"]; 2114 jobId = _json["jobId"];
2002 } 2115 }
2003 if (_json.containsKey("projectId")) { 2116 if (_json.containsKey("projectId")) {
2004 projectId = _json["projectId"]; 2117 projectId = _json["projectId"];
2005 } 2118 }
2006 } 2119 }
2007 2120
2008 core.Map toJson() { 2121 core.Map toJson() {
2009 var _json = new core.Map(); 2122 var _json = new core.Map();
2010 if (jobId != null) { 2123 if (jobId != null) {
2011 _json["jobId"] = jobId; 2124 _json["jobId"] = jobId;
2012 } 2125 }
2013 if (projectId != null) { 2126 if (projectId != null) {
2014 _json["projectId"] = projectId; 2127 _json["projectId"] = projectId;
2015 } 2128 }
2016 return _json; 2129 return _json;
2017 } 2130 }
2018 } 2131 }
2019 2132
2133 /**
2134 * Job scheduling options.Beta Feature: These options are available for testing
2135 * purposes only. They may be changed before final release.
2136 */
2137 class JobScheduling {
2138 /**
2139 * Optional Maximum number of times per hour a driver may be restarted as a
2140 * result of driver terminating with non-zero code before job is reported
2141 * failed.A job may be reported as thrashing if driver exits with non-zero
2142 * code 4 times within 10 minute window.Maximum value is 10.
2143 */
2144 core.int maxFailuresPerHour;
2145
2146 JobScheduling();
2147
2148 JobScheduling.fromJson(core.Map _json) {
2149 if (_json.containsKey("maxFailuresPerHour")) {
2150 maxFailuresPerHour = _json["maxFailuresPerHour"];
2151 }
2152 }
2153
2154 core.Map toJson() {
2155 var _json = new core.Map();
2156 if (maxFailuresPerHour != null) {
2157 _json["maxFailuresPerHour"] = maxFailuresPerHour;
2158 }
2159 return _json;
2160 }
2161 }
2162
2020 /** Cloud Dataproc job status. */ 2163 /** Cloud Dataproc job status. */
2021 class JobStatus { 2164 class JobStatus {
2022 /** 2165 /**
2023 * [Optional] Job state details, such as an error description if the state is 2166 * Optional Job state details, such as an error description if the state is
2024 * ERROR. 2167 * <code>ERROR</code>.
2025 */ 2168 */
2026 core.String details; 2169 core.String details;
2027 /** 2170 /**
2028 * [Required] A state message specifying the overall job state. 2171 * Required A state message specifying the overall job state.
2029 * Possible string values are: 2172 * Possible string values are:
2030 * - "STATE_UNSPECIFIED" : A STATE_UNSPECIFIED. 2173 * - "STATE_UNSPECIFIED" : The job state is unknown.
2031 * - "PENDING" : A PENDING. 2174 * - "PENDING" : The job is pending; it has been submitted, but is not yet
2032 * - "SETUP_DONE" : A SETUP_DONE. 2175 * running.
2033 * - "RUNNING" : A RUNNING. 2176 * - "SETUP_DONE" : Job has been received by the service and completed initial
2034 * - "CANCEL_PENDING" : A CANCEL_PENDING. 2177 * setup; it will shortly be submitted to the cluster.
2035 * - "CANCEL_STARTED" : A CANCEL_STARTED. 2178 * - "RUNNING" : The job is running on the cluster.
2036 * - "CANCELLED" : A CANCELLED. 2179 * - "CANCEL_PENDING" : A CancelJob request has been received, but is pending.
2037 * - "DONE" : A DONE. 2180 * - "CANCEL_STARTED" : Transient in-flight resources have been canceled, and
2038 * - "ERROR" : A ERROR. 2181 * the request to cancel the running job has been issued to the cluster.
2182 * - "CANCELLED" : The job cancelation was successful.
2183 * - "DONE" : The job has completed successfully.
2184 * - "ERROR" : The job has completed, but encountered an error.
2185 * - "ATTEMPT_FAILURE" : Job attempt has failed. The detail field contains
2186 * failure details for this attempt.Applies to restartable jobs only.
2039 */ 2187 */
2040 core.String state; 2188 core.String state;
2041 /** [Output-only] The time when this state was entered. */ 2189 /** Output-only The time when this state was entered. */
2042 core.String stateStartTime; 2190 core.String stateStartTime;
2043 2191
2044 JobStatus(); 2192 JobStatus();
2045 2193
2046 JobStatus.fromJson(core.Map _json) { 2194 JobStatus.fromJson(core.Map _json) {
2047 if (_json.containsKey("details")) { 2195 if (_json.containsKey("details")) {
2048 details = _json["details"]; 2196 details = _json["details"];
2049 } 2197 }
2050 if (_json.containsKey("state")) { 2198 if (_json.containsKey("state")) {
2051 state = _json["state"]; 2199 state = _json["state"];
(...skipping 13 matching lines...) Expand all
2065 } 2213 }
2066 if (stateStartTime != null) { 2214 if (stateStartTime != null) {
2067 _json["stateStartTime"] = stateStartTime; 2215 _json["stateStartTime"] = stateStartTime;
2068 } 2216 }
2069 return _json; 2217 return _json;
2070 } 2218 }
2071 } 2219 }
2072 2220
2073 /** The list of all clusters in a project. */ 2221 /** The list of all clusters in a project. */
2074 class ListClustersResponse { 2222 class ListClustersResponse {
2075 /** [Output-only] The clusters in the project. */ 2223 /** Output-only The clusters in the project. */
2076 core.List<Cluster> clusters; 2224 core.List<Cluster> clusters;
2077 /** The standard List next-page token. */ 2225 /** The standard List next-page token. */
2078 core.String nextPageToken; 2226 core.String nextPageToken;
2079 2227
2080 ListClustersResponse(); 2228 ListClustersResponse();
2081 2229
2082 ListClustersResponse.fromJson(core.Map _json) { 2230 ListClustersResponse.fromJson(core.Map _json) {
2083 if (_json.containsKey("clusters")) { 2231 if (_json.containsKey("clusters")) {
2084 clusters = _json["clusters"].map((value) => new Cluster.fromJson(value)).t oList(); 2232 clusters = _json["clusters"].map((value) => new Cluster.fromJson(value)).t oList();
2085 } 2233 }
2086 if (_json.containsKey("nextPageToken")) { 2234 if (_json.containsKey("nextPageToken")) {
2087 nextPageToken = _json["nextPageToken"]; 2235 nextPageToken = _json["nextPageToken"];
2088 } 2236 }
2089 } 2237 }
2090 2238
2091 core.Map toJson() { 2239 core.Map toJson() {
2092 var _json = new core.Map(); 2240 var _json = new core.Map();
2093 if (clusters != null) { 2241 if (clusters != null) {
2094 _json["clusters"] = clusters.map((value) => (value).toJson()).toList(); 2242 _json["clusters"] = clusters.map((value) => (value).toJson()).toList();
2095 } 2243 }
2096 if (nextPageToken != null) { 2244 if (nextPageToken != null) {
2097 _json["nextPageToken"] = nextPageToken; 2245 _json["nextPageToken"] = nextPageToken;
2098 } 2246 }
2099 return _json; 2247 return _json;
2100 } 2248 }
2101 } 2249 }
2102 2250
2103 /** A list of jobs in a project. */ 2251 /** A list of jobs in a project. */
2104 class ListJobsResponse { 2252 class ListJobsResponse {
2105 /** [Output-only] Jobs list. */ 2253 /** Output-only Jobs list. */
2106 core.List<Job> jobs; 2254 core.List<Job> jobs;
2107 /** 2255 /**
2108 * [Optional] This token is included in the response if there are more results 2256 * Optional This token is included in the response if there are more results
2109 * to fetch. To fetch additional results, provide this value as the 2257 * to fetch. To fetch additional results, provide this value as the page_token
2110 * `page_token` in a subsequent ListJobsRequest. 2258 * in a subsequent <code>ListJobsRequest</code>.
2111 */ 2259 */
2112 core.String nextPageToken; 2260 core.String nextPageToken;
2113 2261
2114 ListJobsResponse(); 2262 ListJobsResponse();
2115 2263
2116 ListJobsResponse.fromJson(core.Map _json) { 2264 ListJobsResponse.fromJson(core.Map _json) {
2117 if (_json.containsKey("jobs")) { 2265 if (_json.containsKey("jobs")) {
2118 jobs = _json["jobs"].map((value) => new Job.fromJson(value)).toList(); 2266 jobs = _json["jobs"].map((value) => new Job.fromJson(value)).toList();
2119 } 2267 }
2120 if (_json.containsKey("nextPageToken")) { 2268 if (_json.containsKey("nextPageToken")) {
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
2161 _json["operations"] = operations.map((value) => (value).toJson()).toList() ; 2309 _json["operations"] = operations.map((value) => (value).toJson()).toList() ;
2162 } 2310 }
2163 return _json; 2311 return _json;
2164 } 2312 }
2165 } 2313 }
2166 2314
2167 /** The runtime logging configuration of the job. */ 2315 /** The runtime logging configuration of the job. */
2168 class LoggingConfiguration { 2316 class LoggingConfiguration {
2169 /** 2317 /**
2170 * The per-package log levels for the driver. This may include "root" package 2318 * The per-package log levels for the driver. This may include "root" package
2171 * name to configure rootLogger. Examples: 'com.google = FATAL', 'root = 2319 * name to configure rootLogger. Examples: 'com.google = FATAL', 'root =
2172 * INFO', 'org.apache = DEBUG' 2320 * INFO', 'org.apache = DEBUG'
2173 */ 2321 */
2174 core.Map<core.String, core.String> driverLogLevels; 2322 core.Map<core.String, core.String> driverLogLevels;
2175 2323
2176 LoggingConfiguration(); 2324 LoggingConfiguration();
2177 2325
2178 LoggingConfiguration.fromJson(core.Map _json) { 2326 LoggingConfiguration.fromJson(core.Map _json) {
2179 if (_json.containsKey("driverLogLevels")) { 2327 if (_json.containsKey("driverLogLevels")) {
2180 driverLogLevels = _json["driverLogLevels"]; 2328 driverLogLevels = _json["driverLogLevels"];
2181 } 2329 }
2182 } 2330 }
2183 2331
2184 core.Map toJson() { 2332 core.Map toJson() {
2185 var _json = new core.Map(); 2333 var _json = new core.Map();
2186 if (driverLogLevels != null) { 2334 if (driverLogLevels != null) {
2187 _json["driverLogLevels"] = driverLogLevels; 2335 _json["driverLogLevels"] = driverLogLevels;
2188 } 2336 }
2189 return _json; 2337 return _json;
2190 } 2338 }
2191 } 2339 }
2192 2340
2193 /** Specifies the resources used to actively manage an instance group. */ 2341 /** Specifies the resources used to actively manage an instance group. */
2194 class ManagedGroupConfiguration { 2342 class ManagedGroupConfiguration {
2195 /** [Output-only] The name of the Instance Group Manager for this group. */ 2343 /** Output-only The name of the Instance Group Manager for this group. */
2196 core.String instanceGroupManagerName; 2344 core.String instanceGroupManagerName;
2197 /** 2345 /**
2198 * [Output-only] The name of the Instance Template used for the Managed 2346 * Output-only The name of the Instance Template used for the Managed Instance
2199 * Instance Group. 2347 * Group.
2200 */ 2348 */
2201 core.String instanceTemplateName; 2349 core.String instanceTemplateName;
2202 2350
2203 ManagedGroupConfiguration(); 2351 ManagedGroupConfiguration();
2204 2352
2205 ManagedGroupConfiguration.fromJson(core.Map _json) { 2353 ManagedGroupConfiguration.fromJson(core.Map _json) {
2206 if (_json.containsKey("instanceGroupManagerName")) { 2354 if (_json.containsKey("instanceGroupManagerName")) {
2207 instanceGroupManagerName = _json["instanceGroupManagerName"]; 2355 instanceGroupManagerName = _json["instanceGroupManagerName"];
2208 } 2356 }
2209 if (_json.containsKey("instanceTemplateName")) { 2357 if (_json.containsKey("instanceTemplateName")) {
(...skipping 11 matching lines...) Expand all
2221 } 2369 }
2222 return _json; 2370 return _json;
2223 } 2371 }
2224 } 2372 }
2225 2373
2226 /** 2374 /**
2227 * Specifies an executable to run on a fully configured node and a timeout 2375 * Specifies an executable to run on a fully configured node and a timeout
2228 * period for executable completion. 2376 * period for executable completion.
2229 */ 2377 */
2230 class NodeInitializationAction { 2378 class NodeInitializationAction {
2231 /** [Required] Google Cloud Storage URI of executable file. */ 2379 /** Required Google Cloud Storage URI of executable file. */
2232 core.String executableFile; 2380 core.String executableFile;
2233 /** 2381 /**
2234 * [Optional] Amount of time executable has to complete. Default is 10 2382 * Optional Amount of time executable has to complete. Default is 10 minutes.
2235 * minutes. Cluster creation fails with an explanatory error message (the name 2383 * Cluster creation fails with an explanatory error message (the name of the
2236 * of the executable that caused the error and the exceeded timeout period) if 2384 * executable that caused the error and the exceeded timeout period) if the
2237 * the executable is not completed at end of the timeout period. 2385 * executable is not completed at end of the timeout period.
2238 */ 2386 */
2239 core.String executionTimeout; 2387 core.String executionTimeout;
2240 2388
2241 NodeInitializationAction(); 2389 NodeInitializationAction();
2242 2390
2243 NodeInitializationAction.fromJson(core.Map _json) { 2391 NodeInitializationAction.fromJson(core.Map _json) {
2244 if (_json.containsKey("executableFile")) { 2392 if (_json.containsKey("executableFile")) {
2245 executableFile = _json["executableFile"]; 2393 executableFile = _json["executableFile"];
2246 } 2394 }
2247 if (_json.containsKey("executionTimeout")) { 2395 if (_json.containsKey("executionTimeout")) {
(...skipping 12 matching lines...) Expand all
2260 return _json; 2408 return _json;
2261 } 2409 }
2262 } 2410 }
2263 2411
2264 /** 2412 /**
2265 * This resource represents a long-running operation that is the result of a 2413 * This resource represents a long-running operation that is the result of a
2266 * network API call. 2414 * network API call.
2267 */ 2415 */
2268 class Operation { 2416 class Operation {
2269 /** 2417 /**
2270 * If the value is `false`, it means the operation is still in progress. If 2418 * If the value is false, it means the operation is still in progress. If
2271 * true, the operation is completed, and either `error` or `response` is 2419 * true, the operation is completed, and either error or response is
2272 * available. 2420 * available.
2273 */ 2421 */
2274 core.bool done; 2422 core.bool done;
2275 /** The error result of the operation in case of failure or cancellation. */ 2423 /** The error result of the operation in case of failure or cancellation. */
2276 Status error; 2424 Status error;
2277 /** 2425 /**
2278 * Service-specific metadata associated with the operation. It typically 2426 * Service-specific metadata associated with the operation. It typically
2279 * contains progress information and common metadata such as create time. Some 2427 * contains progress information and common metadata such as create time. Some
2280 * services might not provide such metadata. Any method that returns a 2428 * services might not provide such metadata. Any method that returns a
2281 * long-running operation should document the metadata type, if any. 2429 * long-running operation should document the metadata type, if any.
2282 * 2430 *
2283 * The values for Object must be JSON objects. It can consist of `num`, 2431 * The values for Object must be JSON objects. It can consist of `num`,
2284 * `String`, `bool` and `null` as well as `Map` and `List` values. 2432 * `String`, `bool` and `null` as well as `Map` and `List` values.
2285 */ 2433 */
2286 core.Map<core.String, core.Object> metadata; 2434 core.Map<core.String, core.Object> metadata;
2287 /** 2435 /**
2288 * The server-assigned name, which is only unique within the same service that 2436 * The server-assigned name, which is only unique within the same service that
2289 * originally returns it. If you use the default HTTP mapping, the `name` 2437 * originally returns it. If you use the default HTTP mapping, the name should
2290 * should have the format of `operations/some/unique/name`. 2438 * have the format of operations/some/unique/name.
2291 */ 2439 */
2292 core.String name; 2440 core.String name;
2293 /** 2441 /**
2294 * The normal response of the operation in case of success. If the original 2442 * The normal response of the operation in case of success. If the original
2295 * method returns no data on success, such as `Delete`, the response is 2443 * method returns no data on success, such as Delete, the response is
2296 * `google.protobuf.Empty`. If the original method is standard 2444 * google.protobuf.Empty. If the original method is standard
2297 * `Get`/`Create`/`Update`, the response should be the resource. For other 2445 * Get/Create/Update, the response should be the resource. For other methods,
2298 * methods, the response should have the type `XxxResponse`, where `Xxx` is 2446 * the response should have the type XxxResponse, where Xxx is the original
2299 * the original method name. For example, if the original method name is 2447 * method name. For example, if the original method name is TakeSnapshot(),
2300 * `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. 2448 * the inferred response type is TakeSnapshotResponse.
2301 * 2449 *
2302 * The values for Object must be JSON objects. It can consist of `num`, 2450 * The values for Object must be JSON objects. It can consist of `num`,
2303 * `String`, `bool` and `null` as well as `Map` and `List` values. 2451 * `String`, `bool` and `null` as well as `Map` and `List` values.
2304 */ 2452 */
2305 core.Map<core.String, core.Object> response; 2453 core.Map<core.String, core.Object> response;
2306 2454
2307 Operation(); 2455 Operation();
2308 2456
2309 Operation.fromJson(core.Map _json) { 2457 Operation.fromJson(core.Map _json) {
2310 if (_json.containsKey("done")) { 2458 if (_json.containsKey("done")) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
2344 return _json; 2492 return _json;
2345 } 2493 }
2346 } 2494 }
2347 2495
2348 /** Metadata describing the operation. */ 2496 /** Metadata describing the operation. */
2349 class OperationMetadata { 2497 class OperationMetadata {
2350 /** Name of the cluster for the operation. */ 2498 /** Name of the cluster for the operation. */
2351 core.String clusterName; 2499 core.String clusterName;
2352 /** Cluster UUId for the operation. */ 2500 /** Cluster UUId for the operation. */
2353 core.String clusterUuid; 2501 core.String clusterUuid;
2354 /** [Output-only] Short description of operation. */ 2502 /** Output-only Short description of operation. */
2355 core.String description; 2503 core.String description;
2356 /** A message containing any operation metadata details. */ 2504 /** A message containing any operation metadata details. */
2357 core.String details; 2505 core.String details;
2358 /** The time that the operation completed. */ 2506 /** The time that the operation completed. */
2359 core.String endTime; 2507 core.String endTime;
2360 /** A message containing the detailed operation state. */ 2508 /** A message containing the detailed operation state. */
2361 core.String innerState; 2509 core.String innerState;
2362 /** The time that the operation was requested. */ 2510 /** The time that the operation was requested. */
2363 core.String insertTime; 2511 core.String insertTime;
2364 /** [Output-only] The operation type. */ 2512 /** Output-only The operation type. */
2365 core.String operationType; 2513 core.String operationType;
2366 /** The time that the operation was started by the server. */ 2514 /** The time that the operation was started by the server. */
2367 core.String startTime; 2515 core.String startTime;
2368 /** 2516 /**
2369 * A message containing the operation state. 2517 * A message containing the operation state.
2370 * Possible string values are: 2518 * Possible string values are:
2371 * - "UNKNOWN" : A UNKNOWN. 2519 * - "UNKNOWN" : Unused.
2372 * - "PENDING" : A PENDING. 2520 * - "PENDING" : The operation has been created.
2373 * - "RUNNING" : A RUNNING. 2521 * - "RUNNING" : The operation is currently running.
2374 * - "DONE" : A DONE. 2522 * - "DONE" : The operation is done, either cancelled or completed.
2375 */ 2523 */
2376 core.String state; 2524 core.String state;
2377 /** [Output-only] Current operation status. */ 2525 /** Output-only Current operation status. */
2378 OperationStatus status; 2526 OperationStatus status;
2379 /** [Output-only] Previous operation status. */ 2527 /** Output-only Previous operation status. */
2380 core.List<OperationStatus> statusHistory; 2528 core.List<OperationStatus> statusHistory;
2529 /** Output-only Errors encountered during operation execution. */
2530 core.List<core.String> warnings;
2381 2531
2382 OperationMetadata(); 2532 OperationMetadata();
2383 2533
2384 OperationMetadata.fromJson(core.Map _json) { 2534 OperationMetadata.fromJson(core.Map _json) {
2385 if (_json.containsKey("clusterName")) { 2535 if (_json.containsKey("clusterName")) {
2386 clusterName = _json["clusterName"]; 2536 clusterName = _json["clusterName"];
2387 } 2537 }
2388 if (_json.containsKey("clusterUuid")) { 2538 if (_json.containsKey("clusterUuid")) {
2389 clusterUuid = _json["clusterUuid"]; 2539 clusterUuid = _json["clusterUuid"];
2390 } 2540 }
(...skipping 20 matching lines...) Expand all
2411 } 2561 }
2412 if (_json.containsKey("state")) { 2562 if (_json.containsKey("state")) {
2413 state = _json["state"]; 2563 state = _json["state"];
2414 } 2564 }
2415 if (_json.containsKey("status")) { 2565 if (_json.containsKey("status")) {
2416 status = new OperationStatus.fromJson(_json["status"]); 2566 status = new OperationStatus.fromJson(_json["status"]);
2417 } 2567 }
2418 if (_json.containsKey("statusHistory")) { 2568 if (_json.containsKey("statusHistory")) {
2419 statusHistory = _json["statusHistory"].map((value) => new OperationStatus. fromJson(value)).toList(); 2569 statusHistory = _json["statusHistory"].map((value) => new OperationStatus. fromJson(value)).toList();
2420 } 2570 }
2571 if (_json.containsKey("warnings")) {
2572 warnings = _json["warnings"];
2573 }
2421 } 2574 }
2422 2575
2423 core.Map toJson() { 2576 core.Map toJson() {
2424 var _json = new core.Map(); 2577 var _json = new core.Map();
2425 if (clusterName != null) { 2578 if (clusterName != null) {
2426 _json["clusterName"] = clusterName; 2579 _json["clusterName"] = clusterName;
2427 } 2580 }
2428 if (clusterUuid != null) { 2581 if (clusterUuid != null) {
2429 _json["clusterUuid"] = clusterUuid; 2582 _json["clusterUuid"] = clusterUuid;
2430 } 2583 }
(...skipping 20 matching lines...) Expand all
2451 } 2604 }
2452 if (state != null) { 2605 if (state != null) {
2453 _json["state"] = state; 2606 _json["state"] = state;
2454 } 2607 }
2455 if (status != null) { 2608 if (status != null) {
2456 _json["status"] = (status).toJson(); 2609 _json["status"] = (status).toJson();
2457 } 2610 }
2458 if (statusHistory != null) { 2611 if (statusHistory != null) {
2459 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List(); 2612 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List();
2460 } 2613 }
2614 if (warnings != null) {
2615 _json["warnings"] = warnings;
2616 }
2461 return _json; 2617 return _json;
2462 } 2618 }
2463 } 2619 }
2464 2620
2465 /** The status of the operation. */ 2621 /** The status of the operation. */
2466 class OperationStatus { 2622 class OperationStatus {
2467 /** A message containing any operation metadata details. */ 2623 /** A message containing any operation metadata details. */
2468 core.String details; 2624 core.String details;
2469 /** A message containing the detailed operation state. */ 2625 /** A message containing the detailed operation state. */
2470 core.String innerState; 2626 core.String innerState;
2471 /** 2627 /**
2472 * A message containing the operation state. 2628 * A message containing the operation state.
2473 * Possible string values are: 2629 * Possible string values are:
2474 * - "UNKNOWN" : A UNKNOWN. 2630 * - "UNKNOWN" : Unused.
2475 * - "PENDING" : A PENDING. 2631 * - "PENDING" : The operation has been created.
2476 * - "RUNNING" : A RUNNING. 2632 * - "RUNNING" : The operation is running.
2477 * - "DONE" : A DONE. 2633 * - "DONE" : The operation is done; either cancelled or completed.
2478 */ 2634 */
2479 core.String state; 2635 core.String state;
2480 /** The time this state was entered. */ 2636 /** The time this state was entered. */
2481 core.String stateStartTime; 2637 core.String stateStartTime;
2482 2638
2483 OperationStatus(); 2639 OperationStatus();
2484 2640
2485 OperationStatus.fromJson(core.Map _json) { 2641 OperationStatus.fromJson(core.Map _json) {
2486 if (_json.containsKey("details")) { 2642 if (_json.containsKey("details")) {
2487 details = _json["details"]; 2643 details = _json["details"];
(...skipping 23 matching lines...) Expand all
2511 if (stateStartTime != null) { 2667 if (stateStartTime != null) {
2512 _json["stateStartTime"] = stateStartTime; 2668 _json["stateStartTime"] = stateStartTime;
2513 } 2669 }
2514 return _json; 2670 return _json;
2515 } 2671 }
2516 } 2672 }
2517 2673
2518 /** A Cloud Dataproc job for running Pig queries on YARN. */ 2674 /** A Cloud Dataproc job for running Pig queries on YARN. */
2519 class PigJob { 2675 class PigJob {
2520 /** 2676 /**
2521 * [Optional] Whether to continue executing queries if a query fails. The 2677 * Optional Whether to continue executing queries if a query fails. The
2522 * default value is `false`. Setting to `true` can be useful when executing 2678 * default value is false. Setting to true can be useful when executing
2523 * independent parallel queries. 2679 * independent parallel queries.
2524 */ 2680 */
2525 core.bool continueOnFailure; 2681 core.bool continueOnFailure;
2526 /** 2682 /**
2527 * [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Pig Client 2683 * Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client
2528 * and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. 2684 * and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
2529 */ 2685 */
2530 core.List<core.String> jarFileUris; 2686 core.List<core.String> jarFileUris;
2531 /** [Optional] The runtime log configuration for job execution. */ 2687 /** Optional The runtime log configuration for job execution. */
2532 LoggingConfiguration loggingConfiguration; 2688 LoggingConfiguration loggingConfiguration;
2533 /** 2689 /**
2534 * [Optional] A mapping of property names to values, used to configure Pig. 2690 * Optional A mapping of property names to values, used to configure Pig.
2535 * Properties that conflict with values set by the Cloud Dataproc API may be 2691 * Properties that conflict with values set by the Cloud Dataproc API may be
2536 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, 2692 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml,
2537 * /etc/pig/conf/pig.properties, and classes in user code. 2693 * /etc/pig/conf/pig.properties, and classes in user code.
2538 */ 2694 */
2539 core.Map<core.String, core.String> properties; 2695 core.Map<core.String, core.String> properties;
2540 /** The HCFS URI of the script that contains the Pig queries. */ 2696 /** The HCFS URI of the script that contains the Pig queries. */
2541 core.String queryFileUri; 2697 core.String queryFileUri;
2542 /** A list of queries. */ 2698 /** A list of queries. */
2543 QueryList queryList; 2699 QueryList queryList;
2544 /** 2700 /**
2545 * [Optional] Mapping of query variable names to values (equivalent to the Pig 2701 * Optional Mapping of query variable names to values (equivalent to the Pig
2546 * command: `name=[value]`). 2702 * command: name=[value]).
2547 */ 2703 */
2548 core.Map<core.String, core.String> scriptVariables; 2704 core.Map<core.String, core.String> scriptVariables;
2549 2705
2550 PigJob(); 2706 PigJob();
2551 2707
2552 PigJob.fromJson(core.Map _json) { 2708 PigJob.fromJson(core.Map _json) {
2553 if (_json.containsKey("continueOnFailure")) { 2709 if (_json.containsKey("continueOnFailure")) {
2554 continueOnFailure = _json["continueOnFailure"]; 2710 continueOnFailure = _json["continueOnFailure"];
2555 } 2711 }
2556 if (_json.containsKey("jarFileUris")) { 2712 if (_json.containsKey("jarFileUris")) {
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
2596 if (scriptVariables != null) { 2752 if (scriptVariables != null) {
2597 _json["scriptVariables"] = scriptVariables; 2753 _json["scriptVariables"] = scriptVariables;
2598 } 2754 }
2599 return _json; 2755 return _json;
2600 } 2756 }
2601 } 2757 }
2602 2758
2603 /** A Cloud Dataproc job for running PySpark applications on YARN. */ 2759 /** A Cloud Dataproc job for running PySpark applications on YARN. */
2604 class PySparkJob { 2760 class PySparkJob {
2605 /** 2761 /**
2606 * [Optional] HCFS URIs of archives to be extracted in the working directory 2762 * Optional HCFS URIs of archives to be extracted in the working directory of
2607 * of .jar, .tar, .tar.gz, .tgz, and .zip. 2763 * .jar, .tar, .tar.gz, .tgz, and .zip.
2608 */ 2764 */
2609 core.List<core.String> archiveUris; 2765 core.List<core.String> archiveUris;
2610 /** 2766 /**
2611 * [Optional] The arguments to pass to the driver. Do not include arguments, 2767 * Optional The arguments to pass to the driver. Do not include arguments,
2612 * such as `--conf`, that can be set as job properties, since a collision may 2768 * such as --conf, that can be set as job properties, since a collision may
2613 * occur that causes an incorrect job submission. 2769 * occur that causes an incorrect job submission.
2614 */ 2770 */
2615 core.List<core.String> args; 2771 core.List<core.String> args;
2616 /** 2772 /**
2617 * [Optional] HCFS URIs of files to be copied to the working directory of 2773 * Optional HCFS URIs of files to be copied to the working directory of Python
2618 * Python drivers and distributed tasks. Useful for naively parallel tasks. 2774 * drivers and distributed tasks. Useful for naively parallel tasks.
2619 */ 2775 */
2620 core.List<core.String> fileUris; 2776 core.List<core.String> fileUris;
2621 /** 2777 /**
2622 * [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Python 2778 * Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python
2623 * driver and tasks. 2779 * driver and tasks.
2624 */ 2780 */
2625 core.List<core.String> jarFileUris; 2781 core.List<core.String> jarFileUris;
2626 /** [Optional] The runtime log configuration for job execution. */ 2782 /** Optional The runtime log configuration for job execution. */
2627 LoggingConfiguration loggingConfiguration; 2783 LoggingConfiguration loggingConfiguration;
2628 /** 2784 /**
2629 * [Required] The Hadoop Compatible Filesystem (HCFS) URI of the main Python 2785 * Required The Hadoop Compatible Filesystem (HCFS) URI of the main Python
2630 * file to use as the driver. Must be a .py file. 2786 * file to use as the driver. Must be a .py file.
2631 */ 2787 */
2632 core.String mainPythonFileUri; 2788 core.String mainPythonFileUri;
2633 /** 2789 /**
2634 * [Optional] A mapping of property names to values, used to configure 2790 * Optional A mapping of property names to values, used to configure PySpark.
2635 * PySpark. Properties that conflict with values set by the Cloud Dataproc API 2791 * Properties that conflict with values set by the Cloud Dataproc API may be
2636 * may be overwritten. Can include properties set in 2792 * overwritten. Can include properties set in
2637 * /etc/spark/conf/spark-defaults.conf and classes in user code. 2793 * /etc/spark/conf/spark-defaults.conf and classes in user code.
2638 */ 2794 */
2639 core.Map<core.String, core.String> properties; 2795 core.Map<core.String, core.String> properties;
2640 /** 2796 /**
2641 * [Optional] HCFS file URIs of Python files to pass to the PySpark framework. 2797 * Optional HCFS file URIs of Python files to pass to the PySpark framework.
2642 * Supported file types: .py, .egg, and .zip. 2798 * Supported file types: .py, .egg, and .zip.
2643 */ 2799 */
2644 core.List<core.String> pythonFileUris; 2800 core.List<core.String> pythonFileUris;
2645 2801
2646 PySparkJob(); 2802 PySparkJob();
2647 2803
2648 PySparkJob.fromJson(core.Map _json) { 2804 PySparkJob.fromJson(core.Map _json) {
2649 if (_json.containsKey("archiveUris")) { 2805 if (_json.containsKey("archiveUris")) {
2650 archiveUris = _json["archiveUris"]; 2806 archiveUris = _json["archiveUris"];
2651 } 2807 }
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
2698 if (pythonFileUris != null) { 2854 if (pythonFileUris != null) {
2699 _json["pythonFileUris"] = pythonFileUris; 2855 _json["pythonFileUris"] = pythonFileUris;
2700 } 2856 }
2701 return _json; 2857 return _json;
2702 } 2858 }
2703 } 2859 }
2704 2860
2705 /** A list of queries to run on a cluster. */ 2861 /** A list of queries to run on a cluster. */
2706 class QueryList { 2862 class QueryList {
2707 /** 2863 /**
2708 * [Required] The queries to execute. You do not need to terminate a query 2864 * Required The queries to execute. You do not need to terminate a query with
2709 * with a semicolon. Multiple queries can be specified in one string by 2865 * a semicolon. Multiple queries can be specified in one string by separating
2710 * separating each with a semicolon. Here is an example of an Cloud Dataproc 2866 * each with a semicolon. Here is an example of an Cloud Dataproc API snippet
2711 * API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { 2867 * that uses a QueryList to specify a HiveJob:
2712 * "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } 2868 * "hiveJob": {
2869 * "queryList": {
2870 * "queries": [
2871 * "query1",
2872 * "query2",
2873 * "query3;query4",
2874 * ]
2875 * }
2876 * }
2713 */ 2877 */
2714 core.List<core.String> queries; 2878 core.List<core.String> queries;
2715 2879
2716 QueryList(); 2880 QueryList();
2717 2881
2718 QueryList.fromJson(core.Map _json) { 2882 QueryList.fromJson(core.Map _json) {
2719 if (_json.containsKey("queries")) { 2883 if (_json.containsKey("queries")) {
2720 queries = _json["queries"]; 2884 queries = _json["queries"];
2721 } 2885 }
2722 } 2886 }
2723 2887
2724 core.Map toJson() { 2888 core.Map toJson() {
2725 var _json = new core.Map(); 2889 var _json = new core.Map();
2726 if (queries != null) { 2890 if (queries != null) {
2727 _json["queries"] = queries; 2891 _json["queries"] = queries;
2728 } 2892 }
2729 return _json; 2893 return _json;
2730 } 2894 }
2731 } 2895 }
2732 2896
2733 /** 2897 /**
2734 * Specifies the selection and configuration of software inside the cluster. 2898 * Specifies the selection and configuration of software inside the cluster.
2735 */ 2899 */
2736 class SoftwareConfiguration { 2900 class SoftwareConfiguration {
2737 /** 2901 /**
2738 * [Optional] The version of software inside the cluster. It must match the 2902 * Optional The version of software inside the cluster. It must match the
2739 * regular expression `[0-9]+\.[0-9]+`. If unspecified, it defaults to the 2903 * regular expression [0-9]+\.[0-9]+. If unspecified, it defaults to the
2740 * latest version (see [Cloud Dataproc Versioning](/dataproc/versioning)). 2904 * latest version (see Cloud Dataproc Versioning).
2741 */ 2905 */
2742 core.String imageVersion; 2906 core.String imageVersion;
2743 /** 2907 /**
2744 * [Optional] The properties to set on daemon configuration files. Property 2908 * Optional The properties to set on daemon configuration files.Property keys
2745 * keys are specified in "prefix:property" format, such as 2909 * are specified in "prefix:property" format, such as "core:fs.defaultFS". The
2746 * "core:fs.defaultFS". The following are supported prefixes and their 2910 * following are supported prefixes and their mappings: core - core-site.xml
2747 * mappings: core - core-site.xml hdfs - hdfs-site.xml mapred - 2911 * hdfs - hdfs-site.xml mapred - mapred-site.xml yarn - yarn-site.xml hive
2748 * mapred-site.xml yarn - yarn-site.xml hive - hive-site.xml pig - 2912 * - hive-site.xml pig - pig.properties spark - spark-defaults.conf
2749 * pig.properties spark - spark-defaults.conf
2750 */ 2913 */
2751 core.Map<core.String, core.String> properties; 2914 core.Map<core.String, core.String> properties;
2752 2915
2753 SoftwareConfiguration(); 2916 SoftwareConfiguration();
2754 2917
2755 SoftwareConfiguration.fromJson(core.Map _json) { 2918 SoftwareConfiguration.fromJson(core.Map _json) {
2756 if (_json.containsKey("imageVersion")) { 2919 if (_json.containsKey("imageVersion")) {
2757 imageVersion = _json["imageVersion"]; 2920 imageVersion = _json["imageVersion"];
2758 } 2921 }
2759 if (_json.containsKey("properties")) { 2922 if (_json.containsKey("properties")) {
2760 properties = _json["properties"]; 2923 properties = _json["properties"];
2761 } 2924 }
2762 } 2925 }
2763 2926
2764 core.Map toJson() { 2927 core.Map toJson() {
2765 var _json = new core.Map(); 2928 var _json = new core.Map();
2766 if (imageVersion != null) { 2929 if (imageVersion != null) {
2767 _json["imageVersion"] = imageVersion; 2930 _json["imageVersion"] = imageVersion;
2768 } 2931 }
2769 if (properties != null) { 2932 if (properties != null) {
2770 _json["properties"] = properties; 2933 _json["properties"] = properties;
2771 } 2934 }
2772 return _json; 2935 return _json;
2773 } 2936 }
2774 } 2937 }
2775 2938
2776 /** A Cloud Dataproc job for running Spark applications on YARN. */ 2939 /** A Cloud Dataproc job for running Spark applications on YARN. */
2777 class SparkJob { 2940 class SparkJob {
2778 /** 2941 /**
2779 * [Optional] HCFS URIs of archives to be extracted in the working directory 2942 * Optional HCFS URIs of archives to be extracted in the working directory of
2780 * of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, 2943 * Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz,
2781 * .tgz, and .zip. 2944 * and .zip.
2782 */ 2945 */
2783 core.List<core.String> archiveUris; 2946 core.List<core.String> archiveUris;
2784 /** 2947 /**
2785 * [Optional] The arguments to pass to the driver. Do not include arguments, 2948 * Optional The arguments to pass to the driver. Do not include arguments,
2786 * such as `--conf`, that can be set as job properties, since a collision may 2949 * such as --conf, that can be set as job properties, since a collision may
2787 * occur that causes an incorrect job submission. 2950 * occur that causes an incorrect job submission.
2788 */ 2951 */
2789 core.List<core.String> args; 2952 core.List<core.String> args;
2790 /** 2953 /**
2791 * [Optional] HCFS URIs of files to be copied to the working directory of 2954 * Optional HCFS URIs of files to be copied to the working directory of Spark
2792 * Spark drivers and distributed tasks. Useful for naively parallel tasks. 2955 * drivers and distributed tasks. Useful for naively parallel tasks.
2793 */ 2956 */
2794 core.List<core.String> fileUris; 2957 core.List<core.String> fileUris;
2795 /** 2958 /**
2796 * [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Spark 2959 * Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark
2797 * driver and tasks. 2960 * driver and tasks.
2798 */ 2961 */
2799 core.List<core.String> jarFileUris; 2962 core.List<core.String> jarFileUris;
2800 /** [Optional] The runtime log configuration for job execution. */ 2963 /** Optional The runtime log configuration for job execution. */
2801 LoggingConfiguration loggingConfiguration; 2964 LoggingConfiguration loggingConfiguration;
2802 /** 2965 /**
2803 * The name of the driver's main class. The jar file that contains the class 2966 * The name of the driver's main class. The jar file that contains the class
2804 * must be in the default CLASSPATH or specified in `jar_file_uris`. 2967 * must be in the default CLASSPATH or specified in jar_file_uris.
2805 */ 2968 */
2806 core.String mainClass; 2969 core.String mainClass;
2807 /** 2970 /**
2808 * The Hadoop Compatible Filesystem (HCFS) URI of the jar file that contains 2971 * The Hadoop Compatible Filesystem (HCFS) URI of the jar file that contains
2809 * the main class. 2972 * the main class.
2810 */ 2973 */
2811 core.String mainJarFileUri; 2974 core.String mainJarFileUri;
2812 /** 2975 /**
2813 * [Optional] A mapping of property names to values, used to configure Spark. 2976 * Optional A mapping of property names to values, used to configure Spark.
2814 * Properties that conflict with values set by the Cloud Dataproc API may be 2977 * Properties that conflict with values set by the Cloud Dataproc API may be
2815 * overwritten. Can include properties set in 2978 * overwritten. Can include properties set in
2816 * /etc/spark/conf/spark-defaults.conf and classes in user code. 2979 * /etc/spark/conf/spark-defaults.conf and classes in user code.
2817 */ 2980 */
2818 core.Map<core.String, core.String> properties; 2981 core.Map<core.String, core.String> properties;
2819 2982
2820 SparkJob(); 2983 SparkJob();
2821 2984
2822 SparkJob.fromJson(core.Map _json) { 2985 SparkJob.fromJson(core.Map _json) {
2823 if (_json.containsKey("archiveUris")) { 2986 if (_json.containsKey("archiveUris")) {
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
2871 } 3034 }
2872 if (properties != null) { 3035 if (properties != null) {
2873 _json["properties"] = properties; 3036 _json["properties"] = properties;
2874 } 3037 }
2875 return _json; 3038 return _json;
2876 } 3039 }
2877 } 3040 }
2878 3041
2879 /** A Cloud Dataproc job for running Spark SQL queries. */ 3042 /** A Cloud Dataproc job for running Spark SQL queries. */
2880 class SparkSqlJob { 3043 class SparkSqlJob {
2881 /** [Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH. */ 3044 /** Optional HCFS URIs of jar files to be added to the Spark CLASSPATH. */
2882 core.List<core.String> jarFileUris; 3045 core.List<core.String> jarFileUris;
2883 /** [Optional] The runtime log configuration for job execution. */ 3046 /** Optional The runtime log configuration for job execution. */
2884 LoggingConfiguration loggingConfiguration; 3047 LoggingConfiguration loggingConfiguration;
2885 /** 3048 /**
2886 * [Optional] A mapping of property names to values, used to configure Spark 3049 * Optional A mapping of property names to values, used to configure Spark
2887 * SQL's SparkConf. Properties that conflict with values set by the Cloud 3050 * SQL's SparkConf. Properties that conflict with values set by the Cloud
2888 * Dataproc API may be overwritten. 3051 * Dataproc API may be overwritten.
2889 */ 3052 */
2890 core.Map<core.String, core.String> properties; 3053 core.Map<core.String, core.String> properties;
2891 /** The HCFS URI of the script that contains SQL queries. */ 3054 /** The HCFS URI of the script that contains SQL queries. */
2892 core.String queryFileUri; 3055 core.String queryFileUri;
2893 /** A list of queries. */ 3056 /** A list of queries. */
2894 QueryList queryList; 3057 QueryList queryList;
2895 /** 3058 /**
2896 * [Optional] Mapping of query variable names to values (equivalent to the 3059 * Optional Mapping of query variable names to values (equivalent to the Spark
2897 * Spark SQL command: SET `name="value";`). 3060 * SQL command: SET name="value";).
2898 */ 3061 */
2899 core.Map<core.String, core.String> scriptVariables; 3062 core.Map<core.String, core.String> scriptVariables;
2900 3063
2901 SparkSqlJob(); 3064 SparkSqlJob();
2902 3065
2903 SparkSqlJob.fromJson(core.Map _json) { 3066 SparkSqlJob.fromJson(core.Map _json) {
2904 if (_json.containsKey("jarFileUris")) { 3067 if (_json.containsKey("jarFileUris")) {
2905 jarFileUris = _json["jarFileUris"]; 3068 jarFileUris = _json["jarFileUris"];
2906 } 3069 }
2907 if (_json.containsKey("loggingConfiguration")) { 3070 if (_json.containsKey("loggingConfiguration")) {
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
2939 _json["queryList"] = (queryList).toJson(); 3102 _json["queryList"] = (queryList).toJson();
2940 } 3103 }
2941 if (scriptVariables != null) { 3104 if (scriptVariables != null) {
2942 _json["scriptVariables"] = scriptVariables; 3105 _json["scriptVariables"] = scriptVariables;
2943 } 3106 }
2944 return _json; 3107 return _json;
2945 } 3108 }
2946 } 3109 }
2947 3110
2948 /** 3111 /**
2949 * The `Status` type defines a logical error model that is suitable for 3112 * The Status type defines a logical error model that is suitable for different
2950 * different programming environments, including REST APIs and RPC APIs. It is 3113 * programming environments, including REST APIs and RPC APIs. It is used by
2951 * used by [gRPC](https://github.com/grpc). The error model is designed to be: - 3114 * gRPC (https://github.com/grpc). The error model is designed to be:
2952 * Simple to use and understand for most users - Flexible enough to meet 3115 * Simple to use and understand for most users
2953 * unexpected needs # Overview The `Status` message contains three pieces of 3116 * Flexible enough to meet unexpected needsOverviewThe Status message contains
2954 * data: error code, error message, and error details. The error code should be 3117 * three pieces of data: error code, error message, and error details. The error
2955 * an enum value of google.rpc.Code, but it may accept additional error codes if 3118 * code should be an enum value of google.rpc.Code, but it may accept additional
2956 * needed. The error message should be a developer-facing English message that 3119 * error codes if needed. The error message should be a developer-facing English
2957 * helps developers *understand* and *resolve* the error. If a localized 3120 * message that helps developers understand and resolve the error. If a
2958 * user-facing error message is needed, put the localized message in the error 3121 * localized user-facing error message is needed, put the localized message in
2959 * details or localize it in the client. The optional error details may contain 3122 * the error details or localize it in the client. The optional error details
2960 * arbitrary information about the error. There is a predefined set of error 3123 * may contain arbitrary information about the error. There is a predefined set
2961 * detail types in the package `google.rpc` which can be used for common error 3124 * of error detail types in the package google.rpc which can be used for common
2962 * conditions. # Language mapping The `Status` message is the logical 3125 * error conditions.Language mappingThe Status message is the logical
2963 * representation of the error model, but it is not necessarily the actual wire 3126 * representation of the error model, but it is not necessarily the actual wire
2964 * format. When the `Status` message is exposed in different client libraries 3127 * format. When the Status message is exposed in different client libraries and
2965 * and different wire protocols, it can be mapped differently. For example, it 3128 * different wire protocols, it can be mapped differently. For example, it will
2966 * will likely be mapped to some exceptions in Java, but more likely mapped to 3129 * likely be mapped to some exceptions in Java, but more likely mapped to some
2967 * some error codes in C. # Other uses The error model and the `Status` message 3130 * error codes in C.Other usesThe error model and the Status message can be used
2968 * can be used in a variety of environments, either with or without APIs, to 3131 * in a variety of environments, either with or without APIs, to provide a
2969 * provide a consistent developer experience across different environments. 3132 * consistent developer experience across different environments.Example uses of
2970 * Example uses of this error model include: - Partial errors. If a service 3133 * this error model include:
2971 * needs to return partial errors to the client, it may embed the `Status` in 3134 * Partial errors. If a service needs to return partial errors to the client, it
2972 * the normal response to indicate the partial errors. - Workflow errors. A 3135 * may embed the Status in the normal response to indicate the partial errors.
2973 * typical workflow has multiple steps. Each step may have a `Status` message 3136 * Workflow errors. A typical workflow has multiple steps. Each step may have a
2974 * for error reporting purpose. - Batch operations. If a client uses batch 3137 * Status message for error reporting purpose.
2975 * request and batch response, the `Status` message should be used directly 3138 * Batch operations. If a client uses batch request and batch response, the
2976 * inside batch response, one for each error sub-response. - Asynchronous 3139 * Status message should be used directly inside batch response, one for each
2977 * operations. If an API call embeds asynchronous operation results in its 3140 * error sub-response.
2978 * response, the status of those operations should be represented directly using 3141 * Asynchronous operations. If an API call embeds asynchronous operation results
2979 * the `Status` message. - Logging. If some API errors are stored in logs, the 3142 * in its response, the status of those operations should be represented
2980 * message `Status` could be used directly after any stripping needed for 3143 * directly using the Status message.
2981 * security/privacy reasons. 3144 * Logging. If some API errors are stored in logs, the message Status could be
3145 * used directly after any stripping needed for security/privacy reasons.
2982 */ 3146 */
2983 class Status { 3147 class Status {
2984 /** The status code, which should be an enum value of google.rpc.Code. */ 3148 /** The status code, which should be an enum value of google.rpc.Code. */
2985 core.int code; 3149 core.int code;
2986 /** 3150 /**
2987 * A list of messages that carry the error details. There will be a common set 3151 * A list of messages that carry the error details. There will be a common set
2988 * of message types for APIs to use. 3152 * of message types for APIs to use.
2989 * 3153 *
2990 * The values for Object must be JSON objects. It can consist of `num`, 3154 * The values for Object must be JSON objects. It can consist of `num`,
2991 * `String`, `bool` and `null` as well as `Map` and `List` values. 3155 * `String`, `bool` and `null` as well as `Map` and `List` values.
(...skipping 30 matching lines...) Expand all
3022 } 3186 }
3023 if (message != null) { 3187 if (message != null) {
3024 _json["message"] = message; 3188 _json["message"] = message;
3025 } 3189 }
3026 return _json; 3190 return _json;
3027 } 3191 }
3028 } 3192 }
3029 3193
3030 /** A request to submit a job. */ 3194 /** A request to submit a job. */
3031 class SubmitJobRequest { 3195 class SubmitJobRequest {
3032 /** [Required] The job resource. */ 3196 /** Required The job resource. */
3033 Job job; 3197 Job job;
3034 3198
3035 SubmitJobRequest(); 3199 SubmitJobRequest();
3036 3200
3037 SubmitJobRequest.fromJson(core.Map _json) { 3201 SubmitJobRequest.fromJson(core.Map _json) {
3038 if (_json.containsKey("job")) { 3202 if (_json.containsKey("job")) {
3039 job = new Job.fromJson(_json["job"]); 3203 job = new Job.fromJson(_json["job"]);
3040 } 3204 }
3041 } 3205 }
3042 3206
3043 core.Map toJson() { 3207 core.Map toJson() {
3044 var _json = new core.Map(); 3208 var _json = new core.Map();
3045 if (job != null) { 3209 if (job != null) {
3046 _json["job"] = (job).toJson(); 3210 _json["job"] = (job).toJson();
3047 } 3211 }
3048 return _json; 3212 return _json;
3049 } 3213 }
3050 } 3214 }
3051 3215
3052 /** 3216 /**
3053 * A YARN application created by a job. Application information is a subset of 3217 * A YARN application created by a job. Application information is a subset of
3054 * org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. 3218 * <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
3055 */ 3219 */
3056 class YarnApplication { 3220 class YarnApplication {
3057 /** [Required] The application name. */ 3221 /** Required The application name. */
3058 core.String name; 3222 core.String name;
3059 /** [Required] The numerical progress of the application, from 1 to 100. */ 3223 /** Required The numerical progress of the application, from 1 to 100. */
3060 core.double progress; 3224 core.double progress;
3061 /** 3225 /**
3062 * [Required] The application state. 3226 * Required The application state.
3063 * Possible string values are: 3227 * Possible string values are:
3064 * - "STATE_UNSPECIFIED" : A STATE_UNSPECIFIED. 3228 * - "STATE_UNSPECIFIED" : Status is unspecified.
3065 * - "NEW" : A NEW. 3229 * - "NEW" : Status is NEW.
3066 * - "NEW_SAVING" : A NEW_SAVING. 3230 * - "NEW_SAVING" : Status is NEW_SAVING.
3067 * - "SUBMITTED" : A SUBMITTED. 3231 * - "SUBMITTED" : Status is SUBMITTED.
3068 * - "ACCEPTED" : A ACCEPTED. 3232 * - "ACCEPTED" : Status is ACCEPTED.
3069 * - "RUNNING" : A RUNNING. 3233 * - "RUNNING" : Status is RUNNING.
3070 * - "FINISHED" : A FINISHED. 3234 * - "FINISHED" : Status is FINISHED.
3071 * - "FAILED" : A FAILED. 3235 * - "FAILED" : Status is FAILED.
3072 * - "KILLED" : A KILLED. 3236 * - "KILLED" : Status is KILLED.
3073 */ 3237 */
3074 core.String state; 3238 core.String state;
3075 /** 3239 /**
3076 * [Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or 3240 * Optional The HTTP URL of the ApplicationMaster, HistoryServer, or
3077 * TimelineServer that provides application-specific information. The URL uses 3241 * TimelineServer that provides application-specific information. The URL uses
3078 * the internal hostname, and requires a proxy server for resolution and, 3242 * the internal hostname, and requires a proxy server for resolution and,
3079 * possibly, access. 3243 * possibly, access.
3080 */ 3244 */
3081 core.String trackingUrl; 3245 core.String trackingUrl;
3082 3246
3083 YarnApplication(); 3247 YarnApplication();
3084 3248
3085 YarnApplication.fromJson(core.Map _json) { 3249 YarnApplication.fromJson(core.Map _json) {
3086 if (_json.containsKey("name")) { 3250 if (_json.containsKey("name")) {
(...skipping 20 matching lines...) Expand all
3107 } 3271 }
3108 if (state != null) { 3272 if (state != null) {
3109 _json["state"] = state; 3273 _json["state"] = state;
3110 } 3274 }
3111 if (trackingUrl != null) { 3275 if (trackingUrl != null) {
3112 _json["trackingUrl"] = trackingUrl; 3276 _json["trackingUrl"] = trackingUrl;
3113 } 3277 }
3114 return _json; 3278 return _json;
3115 } 3279 }
3116 } 3280 }
OLDNEW
« no previous file with comments | « generated/googleapis_beta/lib/dataflow/v1b3.dart ('k') | generated/googleapis_beta/lib/logging/v2beta1.dart » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698