Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(171)

Side by Side Diff: generated/googleapis_beta/lib/dataproc/v1beta1.dart

Issue 2987103002: Api-Roll 52: 2017-07-31 (Closed)
Patch Set: Created 3 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // This is a generated file (see the discoveryapis_generator project).
2
3 library googleapis_beta.dataproc.v1beta1;
4
5 import 'dart:core' as core;
6 import 'dart:async' as async;
7 import 'dart:convert' as convert;
8
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons;
10 import 'package:http/http.dart' as http;
11
12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show
13 ApiRequestError, DetailedApiRequestError;
14
15 const core.String USER_AGENT = 'dart-api-client dataproc/v1beta1';
16
17 /** Manages Hadoop-based clusters and jobs on Google Cloud Platform. */
18 class DataprocApi {
19 /** View and manage your data across Google Cloud Platform services */
20 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf orm";
21
22
23 final commons.ApiRequester _requester;
24
25 OperationsResourceApi get operations => new OperationsResourceApi(_requester);
26 ProjectsResourceApi get projects => new ProjectsResourceApi(_requester);
27
28 DataprocApi(http.Client client, {core.String rootUrl: "https://dataproc.google apis.com/", core.String servicePath: ""}) :
29 _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_A GENT);
30 }
31
32
33 class OperationsResourceApi {
34 final commons.ApiRequester _requester;
35
36 OperationsResourceApi(commons.ApiRequester client) :
37 _requester = client;
38
39 /**
40 * Starts asynchronous cancellation on a long-running operation. The server
41 * makes a best effort to cancel the operation, but success is not guaranteed.
42 * If the server doesn't support this method, it returns
43 * google.rpc.Code.UNIMPLEMENTED. Clients can use operations.get or other
44 * methods to check whether the cancellation succeeded or whether the
45 * operation completed despite cancellation.
46 *
47 * [request] - The metadata request object.
48 *
49 * Request parameters:
50 *
51 * [name] - The name of the operation resource to be cancelled.
52 * Value must have pattern "^operations/.+$".
53 *
54 * Completes with a [Empty].
55 *
56 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
57 * error.
58 *
59 * If the used [http.Client] completes with an error when making a REST call,
60 * this method will complete with the same error.
61 */
62 async.Future<Empty> cancel(CancelOperationRequest request, core.String name) {
63 var _url = null;
64 var _queryParams = new core.Map();
65 var _uploadMedia = null;
66 var _uploadOptions = null;
67 var _downloadOptions = commons.DownloadOptions.Metadata;
68 var _body = null;
69
70 if (request != null) {
71 _body = convert.JSON.encode((request).toJson());
72 }
73 if (name == null) {
74 throw new core.ArgumentError("Parameter name is required.");
75 }
76
77 _url = 'v1beta1/' + commons.Escaper.ecapeVariableReserved('$name') + ':cance l';
78
79 var _response = _requester.request(_url,
80 "POST",
81 body: _body,
82 queryParams: _queryParams,
83 uploadOptions: _uploadOptions,
84 uploadMedia: _uploadMedia,
85 downloadOptions: _downloadOptions);
86 return _response.then((data) => new Empty.fromJson(data));
87 }
88
89 /**
90 * Deletes a long-running operation. This method indicates that the client is
91 * no longer interested in the operation result. It does not cancel the
92 * operation. If the server doesn't support this method, it returns
93 * google.rpc.Code.UNIMPLEMENTED.
94 *
95 * Request parameters:
96 *
97 * [name] - The name of the operation resource to be deleted.
98 * Value must have pattern "^operations/.+$".
99 *
100 * Completes with a [Empty].
101 *
102 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
103 * error.
104 *
105 * If the used [http.Client] completes with an error when making a REST call,
106 * this method will complete with the same error.
107 */
108 async.Future<Empty> delete(core.String name) {
109 var _url = null;
110 var _queryParams = new core.Map();
111 var _uploadMedia = null;
112 var _uploadOptions = null;
113 var _downloadOptions = commons.DownloadOptions.Metadata;
114 var _body = null;
115
116 if (name == null) {
117 throw new core.ArgumentError("Parameter name is required.");
118 }
119
120 _url = 'v1beta1/' + commons.Escaper.ecapeVariableReserved('$name');
121
122 var _response = _requester.request(_url,
123 "DELETE",
124 body: _body,
125 queryParams: _queryParams,
126 uploadOptions: _uploadOptions,
127 uploadMedia: _uploadMedia,
128 downloadOptions: _downloadOptions);
129 return _response.then((data) => new Empty.fromJson(data));
130 }
131
132 /**
133 * Gets the latest state of a long-running operation. Clients can use this
134 * method to poll the operation result at intervals as recommended by the API
135 * service.
136 *
137 * Request parameters:
138 *
139 * [name] - The name of the operation resource.
140 * Value must have pattern "^operations/.+$".
141 *
142 * Completes with a [Operation].
143 *
144 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
145 * error.
146 *
147 * If the used [http.Client] completes with an error when making a REST call,
148 * this method will complete with the same error.
149 */
150 async.Future<Operation> get(core.String name) {
151 var _url = null;
152 var _queryParams = new core.Map();
153 var _uploadMedia = null;
154 var _uploadOptions = null;
155 var _downloadOptions = commons.DownloadOptions.Metadata;
156 var _body = null;
157
158 if (name == null) {
159 throw new core.ArgumentError("Parameter name is required.");
160 }
161
162 _url = 'v1beta1/' + commons.Escaper.ecapeVariableReserved('$name');
163
164 var _response = _requester.request(_url,
165 "GET",
166 body: _body,
167 queryParams: _queryParams,
168 uploadOptions: _uploadOptions,
169 uploadMedia: _uploadMedia,
170 downloadOptions: _downloadOptions);
171 return _response.then((data) => new Operation.fromJson(data));
172 }
173
174 /**
175 * Lists operations that match the specified filter in the request. If the
176 * server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name
177 * binding below allows API services to override the binding to use different
178 * resource name schemes, such as users / * /operations.
179 *
180 * Request parameters:
181 *
182 * [name] - The name of the operation's parent resource.
183 * Value must have pattern "^operations$".
184 *
185 * [filter] - The standard list filter.
186 *
187 * [pageToken] - The standard list page token.
188 *
189 * [pageSize] - The standard list page size.
190 *
191 * Completes with a [ListOperationsResponse].
192 *
193 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
194 * error.
195 *
196 * If the used [http.Client] completes with an error when making a REST call,
197 * this method will complete with the same error.
198 */
199 async.Future<ListOperationsResponse> list(core.String name, {core.String filte r, core.String pageToken, core.int pageSize}) {
200 var _url = null;
201 var _queryParams = new core.Map();
202 var _uploadMedia = null;
203 var _uploadOptions = null;
204 var _downloadOptions = commons.DownloadOptions.Metadata;
205 var _body = null;
206
207 if (name == null) {
208 throw new core.ArgumentError("Parameter name is required.");
209 }
210 if (filter != null) {
211 _queryParams["filter"] = [filter];
212 }
213 if (pageToken != null) {
214 _queryParams["pageToken"] = [pageToken];
215 }
216 if (pageSize != null) {
217 _queryParams["pageSize"] = ["${pageSize}"];
218 }
219
220 _url = 'v1beta1/' + commons.Escaper.ecapeVariableReserved('$name');
221
222 var _response = _requester.request(_url,
223 "GET",
224 body: _body,
225 queryParams: _queryParams,
226 uploadOptions: _uploadOptions,
227 uploadMedia: _uploadMedia,
228 downloadOptions: _downloadOptions);
229 return _response.then((data) => new ListOperationsResponse.fromJson(data));
230 }
231
232 }
233
234
235 class ProjectsResourceApi {
236 final commons.ApiRequester _requester;
237
238 ProjectsClustersResourceApi get clusters => new ProjectsClustersResourceApi(_r equester);
239 ProjectsJobsResourceApi get jobs => new ProjectsJobsResourceApi(_requester);
240
241 ProjectsResourceApi(commons.ApiRequester client) :
242 _requester = client;
243 }
244
245
246 class ProjectsClustersResourceApi {
247 final commons.ApiRequester _requester;
248
249 ProjectsClustersResourceApi(commons.ApiRequester client) :
250 _requester = client;
251
252 /**
253 * Creates a cluster in a project.
254 *
255 * [request] - The metadata request object.
256 *
257 * Request parameters:
258 *
259 * [projectId] - Required The ID of the Google Cloud Platform project that the
260 * cluster belongs to.
261 *
262 * Completes with a [Operation].
263 *
264 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
265 * error.
266 *
267 * If the used [http.Client] completes with an error when making a REST call,
268 * this method will complete with the same error.
269 */
270 async.Future<Operation> create(Cluster request, core.String projectId) {
271 var _url = null;
272 var _queryParams = new core.Map();
273 var _uploadMedia = null;
274 var _uploadOptions = null;
275 var _downloadOptions = commons.DownloadOptions.Metadata;
276 var _body = null;
277
278 if (request != null) {
279 _body = convert.JSON.encode((request).toJson());
280 }
281 if (projectId == null) {
282 throw new core.ArgumentError("Parameter projectId is required.");
283 }
284
285 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /clusters';
286
287 var _response = _requester.request(_url,
288 "POST",
289 body: _body,
290 queryParams: _queryParams,
291 uploadOptions: _uploadOptions,
292 uploadMedia: _uploadMedia,
293 downloadOptions: _downloadOptions);
294 return _response.then((data) => new Operation.fromJson(data));
295 }
296
297 /**
298 * Deletes a cluster in a project.
299 *
300 * Request parameters:
301 *
302 * [projectId] - Required The ID of the Google Cloud Platform project that the
303 * cluster belongs to.
304 *
305 * [clusterName] - Required The cluster name.
306 *
307 * Completes with a [Operation].
308 *
309 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
310 * error.
311 *
312 * If the used [http.Client] completes with an error when making a REST call,
313 * this method will complete with the same error.
314 */
315 async.Future<Operation> delete(core.String projectId, core.String clusterName) {
316 var _url = null;
317 var _queryParams = new core.Map();
318 var _uploadMedia = null;
319 var _uploadOptions = null;
320 var _downloadOptions = commons.DownloadOptions.Metadata;
321 var _body = null;
322
323 if (projectId == null) {
324 throw new core.ArgumentError("Parameter projectId is required.");
325 }
326 if (clusterName == null) {
327 throw new core.ArgumentError("Parameter clusterName is required.");
328 }
329
330 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /clusters/' + commons.Escaper.ecapeVariable('$clusterName');
331
332 var _response = _requester.request(_url,
333 "DELETE",
334 body: _body,
335 queryParams: _queryParams,
336 uploadOptions: _uploadOptions,
337 uploadMedia: _uploadMedia,
338 downloadOptions: _downloadOptions);
339 return _response.then((data) => new Operation.fromJson(data));
340 }
341
342 /**
343 * Gets cluster diagnostic information. After the operation completes, the
344 * Operation.response field contains DiagnoseClusterOutputLocation.
345 *
346 * [request] - The metadata request object.
347 *
348 * Request parameters:
349 *
350 * [projectId] - Required The ID of the Google Cloud Platform project that the
351 * cluster belongs to.
352 *
353 * [clusterName] - Required The cluster name.
354 *
355 * Completes with a [Operation].
356 *
357 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
358 * error.
359 *
360 * If the used [http.Client] completes with an error when making a REST call,
361 * this method will complete with the same error.
362 */
363 async.Future<Operation> diagnose(DiagnoseClusterRequest request, core.String p rojectId, core.String clusterName) {
364 var _url = null;
365 var _queryParams = new core.Map();
366 var _uploadMedia = null;
367 var _uploadOptions = null;
368 var _downloadOptions = commons.DownloadOptions.Metadata;
369 var _body = null;
370
371 if (request != null) {
372 _body = convert.JSON.encode((request).toJson());
373 }
374 if (projectId == null) {
375 throw new core.ArgumentError("Parameter projectId is required.");
376 }
377 if (clusterName == null) {
378 throw new core.ArgumentError("Parameter clusterName is required.");
379 }
380
381 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /clusters/' + commons.Escaper.ecapeVariable('$clusterName') + ':diagnose';
382
383 var _response = _requester.request(_url,
384 "POST",
385 body: _body,
386 queryParams: _queryParams,
387 uploadOptions: _uploadOptions,
388 uploadMedia: _uploadMedia,
389 downloadOptions: _downloadOptions);
390 return _response.then((data) => new Operation.fromJson(data));
391 }
392
393 /**
394 * Gets the resource representation for a cluster in a project.
395 *
396 * Request parameters:
397 *
398 * [projectId] - Required The ID of the Google Cloud Platform project that the
399 * cluster belongs to.
400 *
401 * [clusterName] - Required The cluster name.
402 *
403 * Completes with a [Cluster].
404 *
405 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
406 * error.
407 *
408 * If the used [http.Client] completes with an error when making a REST call,
409 * this method will complete with the same error.
410 */
411 async.Future<Cluster> get(core.String projectId, core.String clusterName) {
412 var _url = null;
413 var _queryParams = new core.Map();
414 var _uploadMedia = null;
415 var _uploadOptions = null;
416 var _downloadOptions = commons.DownloadOptions.Metadata;
417 var _body = null;
418
419 if (projectId == null) {
420 throw new core.ArgumentError("Parameter projectId is required.");
421 }
422 if (clusterName == null) {
423 throw new core.ArgumentError("Parameter clusterName is required.");
424 }
425
426 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /clusters/' + commons.Escaper.ecapeVariable('$clusterName');
427
428 var _response = _requester.request(_url,
429 "GET",
430 body: _body,
431 queryParams: _queryParams,
432 uploadOptions: _uploadOptions,
433 uploadMedia: _uploadMedia,
434 downloadOptions: _downloadOptions);
435 return _response.then((data) => new Cluster.fromJson(data));
436 }
437
438 /**
439 * Lists all clusters in a project.
440 *
441 * Request parameters:
442 *
443 * [projectId] - Required The ID of the Google Cloud Platform project that the
444 * cluster belongs to.
445 *
446 * [pageToken] - The standard List page token.
447 *
448 * [pageSize] - The standard List page size.
449 *
450 * [filter] - Optional A filter constraining which clusters to list. Valid
451 * filters contain label terms such as: labels.key1 = val1 AND (-labels.k2 =
452 * val2 OR labels.k3 = val3)
453 *
454 * Completes with a [ListClustersResponse].
455 *
456 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
457 * error.
458 *
459 * If the used [http.Client] completes with an error when making a REST call,
460 * this method will complete with the same error.
461 */
462 async.Future<ListClustersResponse> list(core.String projectId, {core.String pa geToken, core.int pageSize, core.String filter}) {
463 var _url = null;
464 var _queryParams = new core.Map();
465 var _uploadMedia = null;
466 var _uploadOptions = null;
467 var _downloadOptions = commons.DownloadOptions.Metadata;
468 var _body = null;
469
470 if (projectId == null) {
471 throw new core.ArgumentError("Parameter projectId is required.");
472 }
473 if (pageToken != null) {
474 _queryParams["pageToken"] = [pageToken];
475 }
476 if (pageSize != null) {
477 _queryParams["pageSize"] = ["${pageSize}"];
478 }
479 if (filter != null) {
480 _queryParams["filter"] = [filter];
481 }
482
483 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /clusters';
484
485 var _response = _requester.request(_url,
486 "GET",
487 body: _body,
488 queryParams: _queryParams,
489 uploadOptions: _uploadOptions,
490 uploadMedia: _uploadMedia,
491 downloadOptions: _downloadOptions);
492 return _response.then((data) => new ListClustersResponse.fromJson(data));
493 }
494
495 /**
496 * Updates a cluster in a project.
497 *
498 * [request] - The metadata request object.
499 *
500 * Request parameters:
501 *
502 * [projectId] - Required The ID of the Google Cloud Platform project the
503 * cluster belongs to.
504 *
505 * [clusterName] - Required The cluster name.
506 *
507 * [updateMask] - Required Specifies the path, relative to
508 * <code>Cluster</code>, of the field to update. For example, to change the
509 * number of workers in a cluster to 5, the <code>update_mask</code> parameter
510 * would be specified as
511 * <code>configuration.worker_configuration.num_instances</code>, and the
512 * PATCH request body would specify the new value, as follows:
513 * {
514 * "configuration":{
515 * "workerConfiguration":{
516 * "numInstances":"5"
517 * }
518 * }
519 * }
520 * Similarly, to change the number of preemptible workers in a cluster to 5,
521 * the <code>update_mask</code> parameter would be
522 * <code>config.secondary_worker_config.num_instances</code>, and the PATCH
523 * request body would be set as follows:
524 * {
525 * "config":{
526 * "secondaryWorkerConfig":{
527 * "numInstances":"5"
528 * }
529 * }
530 * }
531 * <strong>Note:</strong> Currently,
532 * <code>config.worker_config.num_instances</code> and
533 * <code>config.secondary_worker_config.num_instances</code> are the only
534 * fields that can be updated.
535 *
536 * Completes with a [Operation].
537 *
538 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
539 * error.
540 *
541 * If the used [http.Client] completes with an error when making a REST call,
542 * this method will complete with the same error.
543 */
544 async.Future<Operation> patch(Cluster request, core.String projectId, core.Str ing clusterName, {core.String updateMask}) {
545 var _url = null;
546 var _queryParams = new core.Map();
547 var _uploadMedia = null;
548 var _uploadOptions = null;
549 var _downloadOptions = commons.DownloadOptions.Metadata;
550 var _body = null;
551
552 if (request != null) {
553 _body = convert.JSON.encode((request).toJson());
554 }
555 if (projectId == null) {
556 throw new core.ArgumentError("Parameter projectId is required.");
557 }
558 if (clusterName == null) {
559 throw new core.ArgumentError("Parameter clusterName is required.");
560 }
561 if (updateMask != null) {
562 _queryParams["updateMask"] = [updateMask];
563 }
564
565 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /clusters/' + commons.Escaper.ecapeVariable('$clusterName');
566
567 var _response = _requester.request(_url,
568 "PATCH",
569 body: _body,
570 queryParams: _queryParams,
571 uploadOptions: _uploadOptions,
572 uploadMedia: _uploadMedia,
573 downloadOptions: _downloadOptions);
574 return _response.then((data) => new Operation.fromJson(data));
575 }
576
577 }
578
579
580 class ProjectsJobsResourceApi {
581 final commons.ApiRequester _requester;
582
583 ProjectsJobsResourceApi(commons.ApiRequester client) :
584 _requester = client;
585
586 /**
587 * Starts a job cancellation request. To access the job resource after
588 * cancellation, call jobs.list or jobs.get.
589 *
590 * [request] - The metadata request object.
591 *
592 * Request parameters:
593 *
594 * [projectId] - Required The ID of the Google Cloud Platform project that the
595 * job belongs to.
596 *
597 * [jobId] - Required The job ID.
598 *
599 * Completes with a [Job].
600 *
601 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
602 * error.
603 *
604 * If the used [http.Client] completes with an error when making a REST call,
605 * this method will complete with the same error.
606 */
607 async.Future<Job> cancel(CancelJobRequest request, core.String projectId, core .String jobId) {
608 var _url = null;
609 var _queryParams = new core.Map();
610 var _uploadMedia = null;
611 var _uploadOptions = null;
612 var _downloadOptions = commons.DownloadOptions.Metadata;
613 var _body = null;
614
615 if (request != null) {
616 _body = convert.JSON.encode((request).toJson());
617 }
618 if (projectId == null) {
619 throw new core.ArgumentError("Parameter projectId is required.");
620 }
621 if (jobId == null) {
622 throw new core.ArgumentError("Parameter jobId is required.");
623 }
624
625 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /jobs/' + commons.Escaper.ecapeVariable('$jobId') + ':cancel';
626
627 var _response = _requester.request(_url,
628 "POST",
629 body: _body,
630 queryParams: _queryParams,
631 uploadOptions: _uploadOptions,
632 uploadMedia: _uploadMedia,
633 downloadOptions: _downloadOptions);
634 return _response.then((data) => new Job.fromJson(data));
635 }
636
637 /**
638 * Deletes the job from the project. If the job is active, the delete fails,
639 * and the response returns FAILED_PRECONDITION.
640 *
641 * Request parameters:
642 *
643 * [projectId] - Required The ID of the Google Cloud Platform project that the
644 * job belongs to.
645 *
646 * [jobId] - Required The job ID.
647 *
648 * Completes with a [Empty].
649 *
650 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
651 * error.
652 *
653 * If the used [http.Client] completes with an error when making a REST call,
654 * this method will complete with the same error.
655 */
656 async.Future<Empty> delete(core.String projectId, core.String jobId) {
657 var _url = null;
658 var _queryParams = new core.Map();
659 var _uploadMedia = null;
660 var _uploadOptions = null;
661 var _downloadOptions = commons.DownloadOptions.Metadata;
662 var _body = null;
663
664 if (projectId == null) {
665 throw new core.ArgumentError("Parameter projectId is required.");
666 }
667 if (jobId == null) {
668 throw new core.ArgumentError("Parameter jobId is required.");
669 }
670
671 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /jobs/' + commons.Escaper.ecapeVariable('$jobId');
672
673 var _response = _requester.request(_url,
674 "DELETE",
675 body: _body,
676 queryParams: _queryParams,
677 uploadOptions: _uploadOptions,
678 uploadMedia: _uploadMedia,
679 downloadOptions: _downloadOptions);
680 return _response.then((data) => new Empty.fromJson(data));
681 }
682
683 /**
684 * Gets the resource representation for a job in a project.
685 *
686 * Request parameters:
687 *
688 * [projectId] - Required The ID of the Google Cloud Platform project that the
689 * job belongs to.
690 *
691 * [jobId] - Required The job ID.
692 *
693 * Completes with a [Job].
694 *
695 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
696 * error.
697 *
698 * If the used [http.Client] completes with an error when making a REST call,
699 * this method will complete with the same error.
700 */
701 async.Future<Job> get(core.String projectId, core.String jobId) {
702 var _url = null;
703 var _queryParams = new core.Map();
704 var _uploadMedia = null;
705 var _uploadOptions = null;
706 var _downloadOptions = commons.DownloadOptions.Metadata;
707 var _body = null;
708
709 if (projectId == null) {
710 throw new core.ArgumentError("Parameter projectId is required.");
711 }
712 if (jobId == null) {
713 throw new core.ArgumentError("Parameter jobId is required.");
714 }
715
716 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /jobs/' + commons.Escaper.ecapeVariable('$jobId');
717
718 var _response = _requester.request(_url,
719 "GET",
720 body: _body,
721 queryParams: _queryParams,
722 uploadOptions: _uploadOptions,
723 uploadMedia: _uploadMedia,
724 downloadOptions: _downloadOptions);
725 return _response.then((data) => new Job.fromJson(data));
726 }
727
728 /**
729 * Lists jobs in a project.
730 *
731 * Request parameters:
732 *
733 * [projectId] - Required The ID of the Google Cloud Platform project that the
734 * job belongs to.
735 *
736 * [clusterName] - Optional If set, the returned jobs list includes only jobs
737 * that were submitted to the named cluster.
738 *
739 * [filter] - Optional A filter constraining which jobs to list. Valid filters
740 * contain job state and label terms such as: labels.key1 = val1 AND
741 * (labels.k2 = val2 OR labels.k3 = val3)
742 *
743 * [jobStateMatcher] - Optional Specifies enumerated categories of jobs to
744 * list.
745 * Possible string values are:
746 * - "ALL" : A ALL.
747 * - "ACTIVE" : A ACTIVE.
748 * - "NON_ACTIVE" : A NON_ACTIVE.
749 *
750 * [pageToken] - Optional The page token, returned by a previous call, to
751 * request the next page of results.
752 *
753 * [pageSize] - Optional The number of results to return in each response.
754 *
755 * Completes with a [ListJobsResponse].
756 *
757 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
758 * error.
759 *
760 * If the used [http.Client] completes with an error when making a REST call,
761 * this method will complete with the same error.
762 */
763 async.Future<ListJobsResponse> list(core.String projectId, {core.String cluste rName, core.String filter, core.String jobStateMatcher, core.String pageToken, c ore.int pageSize}) {
764 var _url = null;
765 var _queryParams = new core.Map();
766 var _uploadMedia = null;
767 var _uploadOptions = null;
768 var _downloadOptions = commons.DownloadOptions.Metadata;
769 var _body = null;
770
771 if (projectId == null) {
772 throw new core.ArgumentError("Parameter projectId is required.");
773 }
774 if (clusterName != null) {
775 _queryParams["clusterName"] = [clusterName];
776 }
777 if (filter != null) {
778 _queryParams["filter"] = [filter];
779 }
780 if (jobStateMatcher != null) {
781 _queryParams["jobStateMatcher"] = [jobStateMatcher];
782 }
783 if (pageToken != null) {
784 _queryParams["pageToken"] = [pageToken];
785 }
786 if (pageSize != null) {
787 _queryParams["pageSize"] = ["${pageSize}"];
788 }
789
790 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /jobs';
791
792 var _response = _requester.request(_url,
793 "GET",
794 body: _body,
795 queryParams: _queryParams,
796 uploadOptions: _uploadOptions,
797 uploadMedia: _uploadMedia,
798 downloadOptions: _downloadOptions);
799 return _response.then((data) => new ListJobsResponse.fromJson(data));
800 }
801
802 /**
803 * Updates a job in a project.
804 *
805 * [request] - The metadata request object.
806 *
807 * Request parameters:
808 *
809 * [projectId] - Required The ID of the Google Cloud Platform project that the
810 * job belongs to.
811 *
812 * [jobId] - Required The job ID.
813 *
814 * [updateMask] - Required Specifies the path, relative to <code>Job</code>,
815 * of the field to update. For example, to update the labels of a Job the
816 * <code>update_mask</code> parameter would be specified as
817 * <code>labels</code>, and the PATCH request body would specify the new
818 * value. <strong>Note:</strong> Currently, <code>labels</code> is the only
819 * field that can be updated.
820 *
821 * Completes with a [Job].
822 *
823 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
824 * error.
825 *
826 * If the used [http.Client] completes with an error when making a REST call,
827 * this method will complete with the same error.
828 */
829 async.Future<Job> patch(Job request, core.String projectId, core.String jobId, {core.String updateMask}) {
830 var _url = null;
831 var _queryParams = new core.Map();
832 var _uploadMedia = null;
833 var _uploadOptions = null;
834 var _downloadOptions = commons.DownloadOptions.Metadata;
835 var _body = null;
836
837 if (request != null) {
838 _body = convert.JSON.encode((request).toJson());
839 }
840 if (projectId == null) {
841 throw new core.ArgumentError("Parameter projectId is required.");
842 }
843 if (jobId == null) {
844 throw new core.ArgumentError("Parameter jobId is required.");
845 }
846 if (updateMask != null) {
847 _queryParams["updateMask"] = [updateMask];
848 }
849
850 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /jobs/' + commons.Escaper.ecapeVariable('$jobId');
851
852 var _response = _requester.request(_url,
853 "PATCH",
854 body: _body,
855 queryParams: _queryParams,
856 uploadOptions: _uploadOptions,
857 uploadMedia: _uploadMedia,
858 downloadOptions: _downloadOptions);
859 return _response.then((data) => new Job.fromJson(data));
860 }
861
862 /**
863 * Submits a job to a cluster.
864 *
865 * [request] - The metadata request object.
866 *
867 * Request parameters:
868 *
869 * [projectId] - Required The ID of the Google Cloud Platform project that the
870 * job belongs to.
871 *
872 * Completes with a [Job].
873 *
874 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
875 * error.
876 *
877 * If the used [http.Client] completes with an error when making a REST call,
878 * this method will complete with the same error.
879 */
880 async.Future<Job> submit(SubmitJobRequest request, core.String projectId) {
881 var _url = null;
882 var _queryParams = new core.Map();
883 var _uploadMedia = null;
884 var _uploadOptions = null;
885 var _downloadOptions = commons.DownloadOptions.Metadata;
886 var _body = null;
887
888 if (request != null) {
889 _body = convert.JSON.encode((request).toJson());
890 }
891 if (projectId == null) {
892 throw new core.ArgumentError("Parameter projectId is required.");
893 }
894
895 _url = 'v1beta1/projects/' + commons.Escaper.ecapeVariable('$projectId') + ' /jobs:submit';
896
897 var _response = _requester.request(_url,
898 "POST",
899 body: _body,
900 queryParams: _queryParams,
901 uploadOptions: _uploadOptions,
902 uploadMedia: _uploadMedia,
903 downloadOptions: _downloadOptions);
904 return _response.then((data) => new Job.fromJson(data));
905 }
906
907 }
908
909
910
911 /**
912 * Specifies the type and number of accelerator cards attached to the instances
913 * of an instance group (see GPUs on Compute Engine).
914 */
915 class AcceleratorConfiguration {
916 /**
917 * The number of the accelerator cards of this type exposed to this instance.
918 */
919 core.int acceleratorCount;
920 /**
921 * Full or partial URI of the accelerator type resource to expose to this
922 * instance. See Google Compute Engine AcceleratorTypes(
923 * /compute/docs/reference/beta/acceleratorTypes)
924 */
925 core.String acceleratorTypeUri;
926
927 AcceleratorConfiguration();
928
929 AcceleratorConfiguration.fromJson(core.Map _json) {
930 if (_json.containsKey("acceleratorCount")) {
931 acceleratorCount = _json["acceleratorCount"];
932 }
933 if (_json.containsKey("acceleratorTypeUri")) {
934 acceleratorTypeUri = _json["acceleratorTypeUri"];
935 }
936 }
937
938 core.Map<core.String, core.Object> toJson() {
939 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
940 if (acceleratorCount != null) {
941 _json["acceleratorCount"] = acceleratorCount;
942 }
943 if (acceleratorTypeUri != null) {
944 _json["acceleratorTypeUri"] = acceleratorTypeUri;
945 }
946 return _json;
947 }
948 }
949
950 /** A request to cancel a job. */
951 class CancelJobRequest {
952
953 CancelJobRequest();
954
955 CancelJobRequest.fromJson(core.Map _json) {
956 }
957
958 core.Map<core.String, core.Object> toJson() {
959 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
960 return _json;
961 }
962 }
963
964 /** The request message for Operations.CancelOperation. */
965 class CancelOperationRequest {
966
967 CancelOperationRequest();
968
969 CancelOperationRequest.fromJson(core.Map _json) {
970 }
971
972 core.Map<core.String, core.Object> toJson() {
973 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
974 return _json;
975 }
976 }
977
978 /**
979 * Describes the identifying information, configuration, and status of a cluster
980 * of Google Compute Engine instances.
981 */
982 class Cluster {
983 /**
984 * Required The cluster name. Cluster names within a project must be unique.
985 * Names from deleted clusters can be reused.
986 */
987 core.String clusterName;
988 /**
989 * Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc
990 * generates this value when it creates the cluster.
991 */
992 core.String clusterUuid;
993 /**
994 * Required The cluster configuration. Note that Cloud Dataproc may set
995 * default values, and values may change when clusters are updated.
996 */
997 ClusterConfiguration configuration;
998 /**
999 * Optional The labels to associate with this cluster.Label keys must be
1000 * between 1 and 63 characters long, and must conform to the following PCRE
1001 * regular expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63
1002 * characters long, and must conform to the following PCRE regular expression:
1003 * \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a
1004 * given cluster.
1005 */
1006 core.Map<core.String, core.String> labels;
1007 /** Contains cluster daemon metrics such as HDFS and YARN stats. */
1008 ClusterMetrics metrics;
1009 /**
1010 * Required The Google Cloud Platform project ID that the cluster belongs to.
1011 */
1012 core.String projectId;
1013 /** Output-only Cluster status. */
1014 ClusterStatus status;
1015 /** Output-only Previous cluster statuses. */
1016 core.List<ClusterStatus> statusHistory;
1017
1018 Cluster();
1019
1020 Cluster.fromJson(core.Map _json) {
1021 if (_json.containsKey("clusterName")) {
1022 clusterName = _json["clusterName"];
1023 }
1024 if (_json.containsKey("clusterUuid")) {
1025 clusterUuid = _json["clusterUuid"];
1026 }
1027 if (_json.containsKey("configuration")) {
1028 configuration = new ClusterConfiguration.fromJson(_json["configuration"]);
1029 }
1030 if (_json.containsKey("labels")) {
1031 labels = _json["labels"];
1032 }
1033 if (_json.containsKey("metrics")) {
1034 metrics = new ClusterMetrics.fromJson(_json["metrics"]);
1035 }
1036 if (_json.containsKey("projectId")) {
1037 projectId = _json["projectId"];
1038 }
1039 if (_json.containsKey("status")) {
1040 status = new ClusterStatus.fromJson(_json["status"]);
1041 }
1042 if (_json.containsKey("statusHistory")) {
1043 statusHistory = _json["statusHistory"].map((value) => new ClusterStatus.fr omJson(value)).toList();
1044 }
1045 }
1046
1047 core.Map<core.String, core.Object> toJson() {
1048 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1049 if (clusterName != null) {
1050 _json["clusterName"] = clusterName;
1051 }
1052 if (clusterUuid != null) {
1053 _json["clusterUuid"] = clusterUuid;
1054 }
1055 if (configuration != null) {
1056 _json["configuration"] = (configuration).toJson();
1057 }
1058 if (labels != null) {
1059 _json["labels"] = labels;
1060 }
1061 if (metrics != null) {
1062 _json["metrics"] = (metrics).toJson();
1063 }
1064 if (projectId != null) {
1065 _json["projectId"] = projectId;
1066 }
1067 if (status != null) {
1068 _json["status"] = (status).toJson();
1069 }
1070 if (statusHistory != null) {
1071 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List();
1072 }
1073 return _json;
1074 }
1075 }
1076
1077 /** The cluster configuration. */
1078 class ClusterConfiguration {
1079 /**
1080 * Optional A Google Cloud Storage staging bucket used for sharing generated
1081 * SSH keys and configuration. If you do not specify a staging bucket, Cloud
1082 * Dataproc will determine an appropriate Cloud Storage location (US, ASIA, or
1083 * EU) for your cluster's staging bucket according to the Google Compute
1084 * Engine zone where your cluster is deployed, and then it will create and
1085 * manage this project-level, per-location bucket for you.
1086 */
1087 core.String configurationBucket;
1088 /**
1089 * Required The shared Google Compute Engine configuration settings for all
1090 * instances in a cluster.
1091 */
1092 GceClusterConfiguration gceClusterConfiguration;
1093 /**
1094 * Optional Commands to execute on each node after configuration is completed.
1095 * By default, executables are run on master and all worker nodes. You can
1096 * test a node's <code>role</code> metadata to run an executable on a master
1097 * or worker node, as shown below:
1098 * ROLE=$(/usr/share/google/get_metadata_value attributes/role)
1099 * if [[ "${ROLE}" == 'Master' ]]; then
1100 * ... master specific actions ...
1101 * else
1102 * ... worker specific actions ...
1103 * fi
1104 */
1105 core.List<NodeInitializationAction> initializationActions;
1106 /**
1107 * Optional The Google Compute Engine configuration settings for the master
1108 * instance in a cluster.
1109 */
1110 InstanceGroupConfiguration masterConfiguration;
1111 /**
1112 * Optional The Google Compute Engine configuration settings for additional
1113 * worker instances in a cluster.
1114 */
1115 InstanceGroupConfiguration secondaryWorkerConfiguration;
1116 /** Optional The configuration settings for software inside the cluster. */
1117 SoftwareConfiguration softwareConfiguration;
1118 /**
1119 * Optional The Google Compute Engine configuration settings for worker
1120 * instances in a cluster.
1121 */
1122 InstanceGroupConfiguration workerConfiguration;
1123
1124 ClusterConfiguration();
1125
1126 ClusterConfiguration.fromJson(core.Map _json) {
1127 if (_json.containsKey("configurationBucket")) {
1128 configurationBucket = _json["configurationBucket"];
1129 }
1130 if (_json.containsKey("gceClusterConfiguration")) {
1131 gceClusterConfiguration = new GceClusterConfiguration.fromJson(_json["gceC lusterConfiguration"]);
1132 }
1133 if (_json.containsKey("initializationActions")) {
1134 initializationActions = _json["initializationActions"].map((value) => new NodeInitializationAction.fromJson(value)).toList();
1135 }
1136 if (_json.containsKey("masterConfiguration")) {
1137 masterConfiguration = new InstanceGroupConfiguration.fromJson(_json["maste rConfiguration"]);
1138 }
1139 if (_json.containsKey("secondaryWorkerConfiguration")) {
1140 secondaryWorkerConfiguration = new InstanceGroupConfiguration.fromJson(_js on["secondaryWorkerConfiguration"]);
1141 }
1142 if (_json.containsKey("softwareConfiguration")) {
1143 softwareConfiguration = new SoftwareConfiguration.fromJson(_json["software Configuration"]);
1144 }
1145 if (_json.containsKey("workerConfiguration")) {
1146 workerConfiguration = new InstanceGroupConfiguration.fromJson(_json["worke rConfiguration"]);
1147 }
1148 }
1149
1150 core.Map<core.String, core.Object> toJson() {
1151 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1152 if (configurationBucket != null) {
1153 _json["configurationBucket"] = configurationBucket;
1154 }
1155 if (gceClusterConfiguration != null) {
1156 _json["gceClusterConfiguration"] = (gceClusterConfiguration).toJson();
1157 }
1158 if (initializationActions != null) {
1159 _json["initializationActions"] = initializationActions.map((value) => (val ue).toJson()).toList();
1160 }
1161 if (masterConfiguration != null) {
1162 _json["masterConfiguration"] = (masterConfiguration).toJson();
1163 }
1164 if (secondaryWorkerConfiguration != null) {
1165 _json["secondaryWorkerConfiguration"] = (secondaryWorkerConfiguration).toJ son();
1166 }
1167 if (softwareConfiguration != null) {
1168 _json["softwareConfiguration"] = (softwareConfiguration).toJson();
1169 }
1170 if (workerConfiguration != null) {
1171 _json["workerConfiguration"] = (workerConfiguration).toJson();
1172 }
1173 return _json;
1174 }
1175 }
1176
1177 /** Contains cluster daemon metrics, such as HDFS and YARN stats. */
1178 class ClusterMetrics {
1179 /** The HDFS metrics. */
1180 core.Map<core.String, core.String> hdfsMetrics;
1181 /** The YARN metrics. */
1182 core.Map<core.String, core.String> yarnMetrics;
1183
1184 ClusterMetrics();
1185
1186 ClusterMetrics.fromJson(core.Map _json) {
1187 if (_json.containsKey("hdfsMetrics")) {
1188 hdfsMetrics = _json["hdfsMetrics"];
1189 }
1190 if (_json.containsKey("yarnMetrics")) {
1191 yarnMetrics = _json["yarnMetrics"];
1192 }
1193 }
1194
1195 core.Map<core.String, core.Object> toJson() {
1196 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1197 if (hdfsMetrics != null) {
1198 _json["hdfsMetrics"] = hdfsMetrics;
1199 }
1200 if (yarnMetrics != null) {
1201 _json["yarnMetrics"] = yarnMetrics;
1202 }
1203 return _json;
1204 }
1205 }
1206
1207 /** Metadata describing the operation. */
1208 class ClusterOperationMetadata {
1209 /** Output-only. Name of the cluster for the operation. */
1210 core.String clusterName;
1211 /** Output-only. Cluster UUID for the operation. */
1212 core.String clusterUuid;
1213 /** Output-only. Short description of operation. */
1214 core.String description;
1215 /** Output-only. Labels associated with the operation */
1216 core.Map<core.String, core.String> labels;
1217 /** Output-only. The operation type. */
1218 core.String operationType;
1219 /** Output-only. Current operation status. */
1220 ClusterOperationStatus status;
1221 /** Output-only. The previous operation status. */
1222 core.List<ClusterOperationStatus> statusHistory;
1223 /** Output-only. Errors encountered during operation execution. */
1224 core.List<core.String> warnings;
1225
1226 ClusterOperationMetadata();
1227
1228 ClusterOperationMetadata.fromJson(core.Map _json) {
1229 if (_json.containsKey("clusterName")) {
1230 clusterName = _json["clusterName"];
1231 }
1232 if (_json.containsKey("clusterUuid")) {
1233 clusterUuid = _json["clusterUuid"];
1234 }
1235 if (_json.containsKey("description")) {
1236 description = _json["description"];
1237 }
1238 if (_json.containsKey("labels")) {
1239 labels = _json["labels"];
1240 }
1241 if (_json.containsKey("operationType")) {
1242 operationType = _json["operationType"];
1243 }
1244 if (_json.containsKey("status")) {
1245 status = new ClusterOperationStatus.fromJson(_json["status"]);
1246 }
1247 if (_json.containsKey("statusHistory")) {
1248 statusHistory = _json["statusHistory"].map((value) => new ClusterOperation Status.fromJson(value)).toList();
1249 }
1250 if (_json.containsKey("warnings")) {
1251 warnings = _json["warnings"];
1252 }
1253 }
1254
1255 core.Map<core.String, core.Object> toJson() {
1256 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1257 if (clusterName != null) {
1258 _json["clusterName"] = clusterName;
1259 }
1260 if (clusterUuid != null) {
1261 _json["clusterUuid"] = clusterUuid;
1262 }
1263 if (description != null) {
1264 _json["description"] = description;
1265 }
1266 if (labels != null) {
1267 _json["labels"] = labels;
1268 }
1269 if (operationType != null) {
1270 _json["operationType"] = operationType;
1271 }
1272 if (status != null) {
1273 _json["status"] = (status).toJson();
1274 }
1275 if (statusHistory != null) {
1276 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List();
1277 }
1278 if (warnings != null) {
1279 _json["warnings"] = warnings;
1280 }
1281 return _json;
1282 }
1283 }
1284
1285 /** The status of the operation. */
1286 class ClusterOperationStatus {
1287 /** Output-only.A message containing any operation metadata details. */
1288 core.String details;
1289 /** Output-only. A message containing the detailed operation state. */
1290 core.String innerState;
1291 /**
1292 * Output-only. A message containing the operation state.
1293 * Possible string values are:
1294 * - "UNKNOWN" : Unused.
1295 * - "PENDING" : The operation has been created.
1296 * - "RUNNING" : The operation is running.
1297 * - "DONE" : The operation is done; either cancelled or completed.
1298 */
1299 core.String state;
1300 /** Output-only. The time this state was entered. */
1301 core.String stateStartTime;
1302
1303 ClusterOperationStatus();
1304
1305 ClusterOperationStatus.fromJson(core.Map _json) {
1306 if (_json.containsKey("details")) {
1307 details = _json["details"];
1308 }
1309 if (_json.containsKey("innerState")) {
1310 innerState = _json["innerState"];
1311 }
1312 if (_json.containsKey("state")) {
1313 state = _json["state"];
1314 }
1315 if (_json.containsKey("stateStartTime")) {
1316 stateStartTime = _json["stateStartTime"];
1317 }
1318 }
1319
1320 core.Map<core.String, core.Object> toJson() {
1321 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1322 if (details != null) {
1323 _json["details"] = details;
1324 }
1325 if (innerState != null) {
1326 _json["innerState"] = innerState;
1327 }
1328 if (state != null) {
1329 _json["state"] = state;
1330 }
1331 if (stateStartTime != null) {
1332 _json["stateStartTime"] = stateStartTime;
1333 }
1334 return _json;
1335 }
1336 }
1337
1338 /** The status of a cluster and its instances. */
1339 class ClusterStatus {
1340 /** Optional details of cluster's state. */
1341 core.String detail;
1342 /**
1343 * The cluster's state.
1344 * Possible string values are:
1345 * - "UNKNOWN" : The cluster state is unknown.
1346 * - "CREATING" : The cluster is being created and set up. It is not ready for
1347 * use.
1348 * - "RUNNING" : The cluster is currently running and healthy. It is ready for
1349 * use.
1350 * - "ERROR" : The cluster encountered an error. It is not ready for use.
1351 * - "DELETING" : The cluster is being deleted. It cannot be used.
1352 * - "UPDATING" : The cluster is being updated. It continues to accept and
1353 * process jobs.
1354 */
1355 core.String state;
1356 /** Time when this state was entered. */
1357 core.String stateStartTime;
1358 /**
1359 * Output-only Additional state information that includes status reported by
1360 * the agent.
1361 * Possible string values are:
1362 * - "UNSPECIFIED"
1363 * - "UNHEALTHY" : The cluster is known to be in an unhealthy state (for
1364 * example, critical daemons are not running or HDFS capacity is
1365 * exhausted).Applies to RUNNING state.
1366 * - "STALE_STATUS" : The agent-reported status is out of date (may occur if
1367 * Cloud Dataproc loses communication with Agent).Applies to RUNNING state.
1368 */
1369 core.String substate;
1370
1371 ClusterStatus();
1372
1373 ClusterStatus.fromJson(core.Map _json) {
1374 if (_json.containsKey("detail")) {
1375 detail = _json["detail"];
1376 }
1377 if (_json.containsKey("state")) {
1378 state = _json["state"];
1379 }
1380 if (_json.containsKey("stateStartTime")) {
1381 stateStartTime = _json["stateStartTime"];
1382 }
1383 if (_json.containsKey("substate")) {
1384 substate = _json["substate"];
1385 }
1386 }
1387
1388 core.Map<core.String, core.Object> toJson() {
1389 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1390 if (detail != null) {
1391 _json["detail"] = detail;
1392 }
1393 if (state != null) {
1394 _json["state"] = state;
1395 }
1396 if (stateStartTime != null) {
1397 _json["stateStartTime"] = stateStartTime;
1398 }
1399 if (substate != null) {
1400 _json["substate"] = substate;
1401 }
1402 return _json;
1403 }
1404 }
1405
1406 /** The location of diagnostic output. */
1407 class DiagnoseClusterOutputLocation {
1408 /**
1409 * Output-only The Google Cloud Storage URI of the diagnostic output. This is
1410 * a plain text file with a summary of collected diagnostics.
1411 */
1412 core.String outputUri;
1413
1414 DiagnoseClusterOutputLocation();
1415
1416 DiagnoseClusterOutputLocation.fromJson(core.Map _json) {
1417 if (_json.containsKey("outputUri")) {
1418 outputUri = _json["outputUri"];
1419 }
1420 }
1421
1422 core.Map<core.String, core.Object> toJson() {
1423 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1424 if (outputUri != null) {
1425 _json["outputUri"] = outputUri;
1426 }
1427 return _json;
1428 }
1429 }
1430
1431 /** A request to collect cluster diagnostic information. */
1432 class DiagnoseClusterRequest {
1433
1434 DiagnoseClusterRequest();
1435
1436 DiagnoseClusterRequest.fromJson(core.Map _json) {
1437 }
1438
1439 core.Map<core.String, core.Object> toJson() {
1440 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1441 return _json;
1442 }
1443 }
1444
1445 /** The location of diagnostic output. */
1446 class DiagnoseClusterResults {
1447 /**
1448 * Output-only. The Google Cloud Storage URI of the diagnostic output. The
1449 * output report is a plain text file with a summary of collected diagnostics.
1450 */
1451 core.String outputUri;
1452
1453 DiagnoseClusterResults();
1454
1455 DiagnoseClusterResults.fromJson(core.Map _json) {
1456 if (_json.containsKey("outputUri")) {
1457 outputUri = _json["outputUri"];
1458 }
1459 }
1460
1461 core.Map<core.String, core.Object> toJson() {
1462 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1463 if (outputUri != null) {
1464 _json["outputUri"] = outputUri;
1465 }
1466 return _json;
1467 }
1468 }
1469
1470 /** Specifies the configuration of disk options for a group of VM instances. */
1471 class DiskConfiguration {
1472 /** Optional Size in GB of the boot disk (default is 500GB). */
1473 core.int bootDiskSizeGb;
1474 /**
1475 * Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are
1476 * not attached, the boot disk is used to store runtime logs and HDFS data. If
1477 * one or more SSDs are attached, this runtime bulk data is spread across
1478 * them, and the boot disk contains only basic configuration and installed
1479 * binaries.
1480 */
1481 core.int numLocalSsds;
1482
1483 DiskConfiguration();
1484
1485 DiskConfiguration.fromJson(core.Map _json) {
1486 if (_json.containsKey("bootDiskSizeGb")) {
1487 bootDiskSizeGb = _json["bootDiskSizeGb"];
1488 }
1489 if (_json.containsKey("numLocalSsds")) {
1490 numLocalSsds = _json["numLocalSsds"];
1491 }
1492 }
1493
1494 core.Map<core.String, core.Object> toJson() {
1495 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1496 if (bootDiskSizeGb != null) {
1497 _json["bootDiskSizeGb"] = bootDiskSizeGb;
1498 }
1499 if (numLocalSsds != null) {
1500 _json["numLocalSsds"] = numLocalSsds;
1501 }
1502 return _json;
1503 }
1504 }
1505
1506 /**
1507 * A generic empty message that you can re-use to avoid defining duplicated
1508 * empty messages in your APIs. A typical example is to use it as the request or
1509 * the response type of an API method. For instance:
1510 * service Foo {
1511 * rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
1512 * }
1513 * The JSON representation for Empty is empty JSON object {}.
1514 */
1515 class Empty {
1516
1517 Empty();
1518
1519 Empty.fromJson(core.Map _json) {
1520 }
1521
1522 core.Map<core.String, core.Object> toJson() {
1523 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1524 return _json;
1525 }
1526 }
1527
1528 /**
1529 * Common configuration settings for resources of Google Compute Engine cluster
1530 * instances, applicable to all instances in the cluster.
1531 */
1532 class GceClusterConfiguration {
1533 /**
1534 * If true, all instances in the cluser will only have internal IP addresses.
1535 * By default, clusters are not restricted to internal IP addresses, and will
1536 * have ephemeral external IP addresses assigned to each instance. This
1537 * restriction can only be enabled for subnetwork enabled networks, and all
1538 * off-cluster dependencies must be configured to be accessible without
1539 * external IP addresses.
1540 */
1541 core.bool internalIpOnly;
1542 /** The Google Compute Engine metadata entries to add to all instances. */
1543 core.Map<core.String, core.String> metadata;
1544 /**
1545 * The Google Compute Engine network to be used for machine communications.
1546 * Cannot be specified with subnetwork_uri. If neither network_uri nor
1547 * subnetwork_uri is specified, the "default" network of the project is used,
1548 * if it exists. Cannot be a "Custom Subnet Network" (see
1549 * https://cloud.google.com/compute/docs/subnetworks for more information).
1550 * Example:
1551 * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/ default.
1552 */
1553 core.String networkUri;
1554 /**
1555 * Optional The service account of the instances. Defaults to the default
1556 * Google Compute Engine service account. Custom service accounts need
1557 * permissions equivalent to the folloing IAM roles:
1558 * roles/logging.logWriter
1559 * roles/storage.objectAdmin(see
1560 * https://cloud.google.com/compute/docs/access/service-accounts#custom_servic e_accounts
1561 * for more information). Example:
1562 * [account_id]@[project_id].iam.gserviceaccount.com
1563 */
1564 core.String serviceAccount;
1565 /**
1566 * The URIs of service account scopes to be included in Google Compute Engine
1567 * instances. The following base set of scopes is always included: -
1568 * https://www.googleapis.com/auth/cloud.useraccounts.readonly -
1569 * https://www.googleapis.com/auth/devstorage.read_write -
1570 * https://www.googleapis.com/auth/logging.write If no scopes are specfied,
1571 * the following defaults are also provided: -
1572 * https://www.googleapis.com/auth/bigquery -
1573 * https://www.googleapis.com/auth/bigtable.admin.table -
1574 * https://www.googleapis.com/auth/bigtable.data -
1575 * https://www.googleapis.com/auth/devstorage.full_control
1576 */
1577 core.List<core.String> serviceAccountScopes;
1578 /**
1579 * The Google Compute Engine subnetwork to be used for machine communications.
1580 * Cannot be specified with network_uri. Example:
1581 * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east 1/sub0.
1582 */
1583 core.String subnetworkUri;
1584 /** The Google Compute Engine tags to add to all instances. */
1585 core.List<core.String> tags;
1586 /**
1587 * Required The zone where the Google Compute Engine cluster will be located.
1588 * Example:
1589 * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone].
1590 */
1591 core.String zoneUri;
1592
1593 GceClusterConfiguration();
1594
1595 GceClusterConfiguration.fromJson(core.Map _json) {
1596 if (_json.containsKey("internalIpOnly")) {
1597 internalIpOnly = _json["internalIpOnly"];
1598 }
1599 if (_json.containsKey("metadata")) {
1600 metadata = _json["metadata"];
1601 }
1602 if (_json.containsKey("networkUri")) {
1603 networkUri = _json["networkUri"];
1604 }
1605 if (_json.containsKey("serviceAccount")) {
1606 serviceAccount = _json["serviceAccount"];
1607 }
1608 if (_json.containsKey("serviceAccountScopes")) {
1609 serviceAccountScopes = _json["serviceAccountScopes"];
1610 }
1611 if (_json.containsKey("subnetworkUri")) {
1612 subnetworkUri = _json["subnetworkUri"];
1613 }
1614 if (_json.containsKey("tags")) {
1615 tags = _json["tags"];
1616 }
1617 if (_json.containsKey("zoneUri")) {
1618 zoneUri = _json["zoneUri"];
1619 }
1620 }
1621
1622 core.Map<core.String, core.Object> toJson() {
1623 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1624 if (internalIpOnly != null) {
1625 _json["internalIpOnly"] = internalIpOnly;
1626 }
1627 if (metadata != null) {
1628 _json["metadata"] = metadata;
1629 }
1630 if (networkUri != null) {
1631 _json["networkUri"] = networkUri;
1632 }
1633 if (serviceAccount != null) {
1634 _json["serviceAccount"] = serviceAccount;
1635 }
1636 if (serviceAccountScopes != null) {
1637 _json["serviceAccountScopes"] = serviceAccountScopes;
1638 }
1639 if (subnetworkUri != null) {
1640 _json["subnetworkUri"] = subnetworkUri;
1641 }
1642 if (tags != null) {
1643 _json["tags"] = tags;
1644 }
1645 if (zoneUri != null) {
1646 _json["zoneUri"] = zoneUri;
1647 }
1648 return _json;
1649 }
1650 }
1651
1652 /** A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. */
1653 class HadoopJob {
1654 /**
1655 * Optional HCFS URIs of archives to be extracted in the working directory of
1656 * Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz,
1657 * or .zip.
1658 */
1659 core.List<core.String> archiveUris;
1660 /**
1661 * Optional The arguments to pass to the driver. Do not include arguments,
1662 * such as -libjars or -Dfoo=bar, that can be set as job properties, since a
1663 * collision may occur that causes an incorrect job submission.
1664 */
1665 core.List<core.String> args;
1666 /**
1667 * Optional HCFS URIs of files to be copied to the working directory of Hadoop
1668 * drivers and distributed tasks. Useful for naively parallel tasks.
1669 */
1670 core.List<core.String> fileUris;
1671 /**
1672 * Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and
1673 * tasks.
1674 */
1675 core.List<core.String> jarFileUris;
1676 /** Optional The runtime log configuration for job execution. */
1677 LoggingConfiguration loggingConfiguration;
1678 /**
1679 * The name of the driver's main class. The jar file containing the class must
1680 * be in the default CLASSPATH or specified in jar_file_uris.
1681 */
1682 core.String mainClass;
1683 /**
1684 * The Hadoop Compatible Filesystem (HCFS) URI of the jar file containing the
1685 * main class. Examples:
1686 * 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
1687 * 'hdfs:/tmp/test-samples/custom-wordcount.jar'
1688 * 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
1689 */
1690 core.String mainJarFileUri;
1691 /**
1692 * Optional A mapping of property names to values, used to configure Hadoop.
1693 * Properties that conflict with values set by the Cloud Dataproc API may be
1694 * overwritten. Can include properties set in /etc/hadoop/conf / * -site and
1695 * classes in user code.
1696 */
1697 core.Map<core.String, core.String> properties;
1698
1699 HadoopJob();
1700
1701 HadoopJob.fromJson(core.Map _json) {
1702 if (_json.containsKey("archiveUris")) {
1703 archiveUris = _json["archiveUris"];
1704 }
1705 if (_json.containsKey("args")) {
1706 args = _json["args"];
1707 }
1708 if (_json.containsKey("fileUris")) {
1709 fileUris = _json["fileUris"];
1710 }
1711 if (_json.containsKey("jarFileUris")) {
1712 jarFileUris = _json["jarFileUris"];
1713 }
1714 if (_json.containsKey("loggingConfiguration")) {
1715 loggingConfiguration = new LoggingConfiguration.fromJson(_json["loggingCon figuration"]);
1716 }
1717 if (_json.containsKey("mainClass")) {
1718 mainClass = _json["mainClass"];
1719 }
1720 if (_json.containsKey("mainJarFileUri")) {
1721 mainJarFileUri = _json["mainJarFileUri"];
1722 }
1723 if (_json.containsKey("properties")) {
1724 properties = _json["properties"];
1725 }
1726 }
1727
1728 core.Map<core.String, core.Object> toJson() {
1729 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1730 if (archiveUris != null) {
1731 _json["archiveUris"] = archiveUris;
1732 }
1733 if (args != null) {
1734 _json["args"] = args;
1735 }
1736 if (fileUris != null) {
1737 _json["fileUris"] = fileUris;
1738 }
1739 if (jarFileUris != null) {
1740 _json["jarFileUris"] = jarFileUris;
1741 }
1742 if (loggingConfiguration != null) {
1743 _json["loggingConfiguration"] = (loggingConfiguration).toJson();
1744 }
1745 if (mainClass != null) {
1746 _json["mainClass"] = mainClass;
1747 }
1748 if (mainJarFileUri != null) {
1749 _json["mainJarFileUri"] = mainJarFileUri;
1750 }
1751 if (properties != null) {
1752 _json["properties"] = properties;
1753 }
1754 return _json;
1755 }
1756 }
1757
1758 /** A Cloud Dataproc job for running Hive queries on YARN. */
1759 class HiveJob {
1760 /**
1761 * Optional Whether to continue executing queries if a query fails. The
1762 * default value is false. Setting to true can be useful when executing
1763 * independent parallel queries.
1764 */
1765 core.bool continueOnFailure;
1766 /**
1767 * Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server
1768 * and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
1769 */
1770 core.List<core.String> jarFileUris;
1771 /**
1772 * Optional A mapping of property names and values, used to configure Hive.
1773 * Properties that conflict with values set by the Cloud Dataproc API may be
1774 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml,
1775 * /etc/hive/conf/hive-site.xml, and classes in user code.
1776 */
1777 core.Map<core.String, core.String> properties;
1778 /** The HCFS URI of the script that contains Hive queries. */
1779 core.String queryFileUri;
1780 /** A list of queries. */
1781 QueryList queryList;
1782 /**
1783 * Optional Mapping of query variable names to values (equivalent to the Hive
1784 * command: SET name="value";).
1785 */
1786 core.Map<core.String, core.String> scriptVariables;
1787
1788 HiveJob();
1789
1790 HiveJob.fromJson(core.Map _json) {
1791 if (_json.containsKey("continueOnFailure")) {
1792 continueOnFailure = _json["continueOnFailure"];
1793 }
1794 if (_json.containsKey("jarFileUris")) {
1795 jarFileUris = _json["jarFileUris"];
1796 }
1797 if (_json.containsKey("properties")) {
1798 properties = _json["properties"];
1799 }
1800 if (_json.containsKey("queryFileUri")) {
1801 queryFileUri = _json["queryFileUri"];
1802 }
1803 if (_json.containsKey("queryList")) {
1804 queryList = new QueryList.fromJson(_json["queryList"]);
1805 }
1806 if (_json.containsKey("scriptVariables")) {
1807 scriptVariables = _json["scriptVariables"];
1808 }
1809 }
1810
1811 core.Map<core.String, core.Object> toJson() {
1812 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1813 if (continueOnFailure != null) {
1814 _json["continueOnFailure"] = continueOnFailure;
1815 }
1816 if (jarFileUris != null) {
1817 _json["jarFileUris"] = jarFileUris;
1818 }
1819 if (properties != null) {
1820 _json["properties"] = properties;
1821 }
1822 if (queryFileUri != null) {
1823 _json["queryFileUri"] = queryFileUri;
1824 }
1825 if (queryList != null) {
1826 _json["queryList"] = (queryList).toJson();
1827 }
1828 if (scriptVariables != null) {
1829 _json["scriptVariables"] = scriptVariables;
1830 }
1831 return _json;
1832 }
1833 }
1834
1835 /**
1836 * The configuration settings for Google Compute Engine resources in an instance
1837 * group, such as a master or worker group.
1838 */
1839 class InstanceGroupConfiguration {
1840 /**
1841 * Optional The Google Compute Engine accelerator configuration for these
1842 * instances.
1843 */
1844 core.List<AcceleratorConfiguration> accelerators;
1845 /** Disk option configuration settings. */
1846 DiskConfiguration diskConfiguration;
1847 /**
1848 * Output-only The Google Compute Engine image resource used for cluster
1849 * instances. Inferred from SoftwareConfiguration.image_version.
1850 */
1851 core.String imageUri;
1852 /**
1853 * The list of instance names. Dataproc derives the names from cluster_name,
1854 * num_instances, and the instance group if not set by user (recommended
1855 * practice is to let Dataproc derive the name).
1856 */
1857 core.List<core.String> instanceNames;
1858 /** Specifies that this instance group contains Preemptible Instances. */
1859 core.bool isPreemptible;
1860 /**
1861 * The Google Compute Engine machine type used for cluster instances. Example:
1862 * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1- a/machineTypes/n1-standard-2.
1863 */
1864 core.String machineTypeUri;
1865 /**
1866 * Output-only The configuration for Google Compute Engine Instance Group
1867 * Manager that manages this group. This is only used for preemptible instance
1868 * groups.
1869 */
1870 ManagedGroupConfiguration managedGroupConfiguration;
1871 /**
1872 * The number of VM instances in the instance group. For master instance
1873 * groups, must be set to 1.
1874 */
1875 core.int numInstances;
1876
1877 InstanceGroupConfiguration();
1878
1879 InstanceGroupConfiguration.fromJson(core.Map _json) {
1880 if (_json.containsKey("accelerators")) {
1881 accelerators = _json["accelerators"].map((value) => new AcceleratorConfigu ration.fromJson(value)).toList();
1882 }
1883 if (_json.containsKey("diskConfiguration")) {
1884 diskConfiguration = new DiskConfiguration.fromJson(_json["diskConfiguratio n"]);
1885 }
1886 if (_json.containsKey("imageUri")) {
1887 imageUri = _json["imageUri"];
1888 }
1889 if (_json.containsKey("instanceNames")) {
1890 instanceNames = _json["instanceNames"];
1891 }
1892 if (_json.containsKey("isPreemptible")) {
1893 isPreemptible = _json["isPreemptible"];
1894 }
1895 if (_json.containsKey("machineTypeUri")) {
1896 machineTypeUri = _json["machineTypeUri"];
1897 }
1898 if (_json.containsKey("managedGroupConfiguration")) {
1899 managedGroupConfiguration = new ManagedGroupConfiguration.fromJson(_json[" managedGroupConfiguration"]);
1900 }
1901 if (_json.containsKey("numInstances")) {
1902 numInstances = _json["numInstances"];
1903 }
1904 }
1905
1906 core.Map<core.String, core.Object> toJson() {
1907 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
1908 if (accelerators != null) {
1909 _json["accelerators"] = accelerators.map((value) => (value).toJson()).toLi st();
1910 }
1911 if (diskConfiguration != null) {
1912 _json["diskConfiguration"] = (diskConfiguration).toJson();
1913 }
1914 if (imageUri != null) {
1915 _json["imageUri"] = imageUri;
1916 }
1917 if (instanceNames != null) {
1918 _json["instanceNames"] = instanceNames;
1919 }
1920 if (isPreemptible != null) {
1921 _json["isPreemptible"] = isPreemptible;
1922 }
1923 if (machineTypeUri != null) {
1924 _json["machineTypeUri"] = machineTypeUri;
1925 }
1926 if (managedGroupConfiguration != null) {
1927 _json["managedGroupConfiguration"] = (managedGroupConfiguration).toJson();
1928 }
1929 if (numInstances != null) {
1930 _json["numInstances"] = numInstances;
1931 }
1932 return _json;
1933 }
1934 }
1935
1936 /** A Cloud Dataproc job resource. */
1937 class Job {
1938 /**
1939 * Output-only If present, the location of miscellaneous control files which
1940 * may be used as part of job setup and handling. If not present, control
1941 * files may be placed in the same location as driver_output_uri.
1942 */
1943 core.String driverControlFilesUri;
1944 /**
1945 * Output-only A URI pointing to the location of the stdin of the job's driver
1946 * program, only set if the job is interactive.
1947 */
1948 core.String driverInputResourceUri;
1949 /**
1950 * Output-only A URI pointing to the location of the stdout of the job's
1951 * driver program.
1952 */
1953 core.String driverOutputResourceUri;
1954 /** Job is a Hadoop job. */
1955 HadoopJob hadoopJob;
1956 /** Job is a Hive job. */
1957 HiveJob hiveJob;
1958 /**
1959 * Optional If set to true, the driver's stdin will be kept open and
1960 * driver_input_uri will be set to provide a path at which additional input
1961 * can be sent to the driver.
1962 */
1963 core.bool interactive;
1964 /**
1965 * Optional The labels to associate with this job.Label keys must be between 1
1966 * and 63 characters long, and must conform to the following regular
1967 * expression: \p{Ll}\p{Lo}{0,62}Label values must be between 1 and 63
1968 * characters long, and must conform to the following regular expression:
1969 * \p{Ll}\p{Lo}\p{N}_-{0,63}No more than 64 labels can be associated with a
1970 * given job.
1971 */
1972 core.Map<core.String, core.String> labels;
1973 /** Job is a Pig job. */
1974 PigJob pigJob;
1975 /**
1976 * Required Job information, including how, when, and where to run the job.
1977 */
1978 JobPlacement placement;
1979 /** Job is a Pyspark job. */
1980 PySparkJob pysparkJob;
1981 /**
1982 * Optional The fully qualified reference to the job, which can be used to
1983 * obtain the equivalent REST path of the job resource. If this property is
1984 * not specified when a job is created, the server generates a
1985 * <code>job_id</code>.
1986 */
1987 JobReference reference;
1988 /** Optional Job scheduling configuration. */
1989 JobScheduling scheduling;
1990 /** Job is a Spark job. */
1991 SparkJob sparkJob;
1992 /** Job is a SparkSql job. */
1993 SparkSqlJob sparkSqlJob;
1994 /**
1995 * Output-only The job status. Additional application-specific status
1996 * information may be contained in the <code>type_job</code> and
1997 * <code>yarn_applications</code> fields.
1998 */
1999 JobStatus status;
2000 /** Output-only The previous job status. */
2001 core.List<JobStatus> statusHistory;
2002 /**
2003 * Output-only The email address of the user submitting the job. For jobs
2004 * submitted on the cluster, the address is <code>username@hostname</code>.
2005 */
2006 core.String submittedBy;
2007 /** Output-only The collection of YARN applications spun up by this job. */
2008 core.List<YarnApplication> yarnApplications;
2009
2010 Job();
2011
2012 Job.fromJson(core.Map _json) {
2013 if (_json.containsKey("driverControlFilesUri")) {
2014 driverControlFilesUri = _json["driverControlFilesUri"];
2015 }
2016 if (_json.containsKey("driverInputResourceUri")) {
2017 driverInputResourceUri = _json["driverInputResourceUri"];
2018 }
2019 if (_json.containsKey("driverOutputResourceUri")) {
2020 driverOutputResourceUri = _json["driverOutputResourceUri"];
2021 }
2022 if (_json.containsKey("hadoopJob")) {
2023 hadoopJob = new HadoopJob.fromJson(_json["hadoopJob"]);
2024 }
2025 if (_json.containsKey("hiveJob")) {
2026 hiveJob = new HiveJob.fromJson(_json["hiveJob"]);
2027 }
2028 if (_json.containsKey("interactive")) {
2029 interactive = _json["interactive"];
2030 }
2031 if (_json.containsKey("labels")) {
2032 labels = _json["labels"];
2033 }
2034 if (_json.containsKey("pigJob")) {
2035 pigJob = new PigJob.fromJson(_json["pigJob"]);
2036 }
2037 if (_json.containsKey("placement")) {
2038 placement = new JobPlacement.fromJson(_json["placement"]);
2039 }
2040 if (_json.containsKey("pysparkJob")) {
2041 pysparkJob = new PySparkJob.fromJson(_json["pysparkJob"]);
2042 }
2043 if (_json.containsKey("reference")) {
2044 reference = new JobReference.fromJson(_json["reference"]);
2045 }
2046 if (_json.containsKey("scheduling")) {
2047 scheduling = new JobScheduling.fromJson(_json["scheduling"]);
2048 }
2049 if (_json.containsKey("sparkJob")) {
2050 sparkJob = new SparkJob.fromJson(_json["sparkJob"]);
2051 }
2052 if (_json.containsKey("sparkSqlJob")) {
2053 sparkSqlJob = new SparkSqlJob.fromJson(_json["sparkSqlJob"]);
2054 }
2055 if (_json.containsKey("status")) {
2056 status = new JobStatus.fromJson(_json["status"]);
2057 }
2058 if (_json.containsKey("statusHistory")) {
2059 statusHistory = _json["statusHistory"].map((value) => new JobStatus.fromJs on(value)).toList();
2060 }
2061 if (_json.containsKey("submittedBy")) {
2062 submittedBy = _json["submittedBy"];
2063 }
2064 if (_json.containsKey("yarnApplications")) {
2065 yarnApplications = _json["yarnApplications"].map((value) => new YarnApplic ation.fromJson(value)).toList();
2066 }
2067 }
2068
2069 core.Map<core.String, core.Object> toJson() {
2070 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2071 if (driverControlFilesUri != null) {
2072 _json["driverControlFilesUri"] = driverControlFilesUri;
2073 }
2074 if (driverInputResourceUri != null) {
2075 _json["driverInputResourceUri"] = driverInputResourceUri;
2076 }
2077 if (driverOutputResourceUri != null) {
2078 _json["driverOutputResourceUri"] = driverOutputResourceUri;
2079 }
2080 if (hadoopJob != null) {
2081 _json["hadoopJob"] = (hadoopJob).toJson();
2082 }
2083 if (hiveJob != null) {
2084 _json["hiveJob"] = (hiveJob).toJson();
2085 }
2086 if (interactive != null) {
2087 _json["interactive"] = interactive;
2088 }
2089 if (labels != null) {
2090 _json["labels"] = labels;
2091 }
2092 if (pigJob != null) {
2093 _json["pigJob"] = (pigJob).toJson();
2094 }
2095 if (placement != null) {
2096 _json["placement"] = (placement).toJson();
2097 }
2098 if (pysparkJob != null) {
2099 _json["pysparkJob"] = (pysparkJob).toJson();
2100 }
2101 if (reference != null) {
2102 _json["reference"] = (reference).toJson();
2103 }
2104 if (scheduling != null) {
2105 _json["scheduling"] = (scheduling).toJson();
2106 }
2107 if (sparkJob != null) {
2108 _json["sparkJob"] = (sparkJob).toJson();
2109 }
2110 if (sparkSqlJob != null) {
2111 _json["sparkSqlJob"] = (sparkSqlJob).toJson();
2112 }
2113 if (status != null) {
2114 _json["status"] = (status).toJson();
2115 }
2116 if (statusHistory != null) {
2117 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List();
2118 }
2119 if (submittedBy != null) {
2120 _json["submittedBy"] = submittedBy;
2121 }
2122 if (yarnApplications != null) {
2123 _json["yarnApplications"] = yarnApplications.map((value) => (value).toJson ()).toList();
2124 }
2125 return _json;
2126 }
2127 }
2128
2129 /** Cloud Dataproc job configuration. */
2130 class JobPlacement {
2131 /** Required The name of the cluster where the job will be submitted. */
2132 core.String clusterName;
2133 /**
2134 * Output-only A cluster UUID generated by the Dataproc service when the job
2135 * is submitted.
2136 */
2137 core.String clusterUuid;
2138
2139 JobPlacement();
2140
2141 JobPlacement.fromJson(core.Map _json) {
2142 if (_json.containsKey("clusterName")) {
2143 clusterName = _json["clusterName"];
2144 }
2145 if (_json.containsKey("clusterUuid")) {
2146 clusterUuid = _json["clusterUuid"];
2147 }
2148 }
2149
2150 core.Map<core.String, core.Object> toJson() {
2151 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2152 if (clusterName != null) {
2153 _json["clusterName"] = clusterName;
2154 }
2155 if (clusterUuid != null) {
2156 _json["clusterUuid"] = clusterUuid;
2157 }
2158 return _json;
2159 }
2160 }
2161
2162 /** Encapsulates the full scoping used to reference a job. */
2163 class JobReference {
2164 /**
2165 * Required The job ID, which must be unique within the project. The job ID is
2166 * generated by the server upon job submission or provided by the user as a
2167 * means to perform retries without creating duplicate jobs. The ID must
2168 * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens
2169 * (-). The maximum length is 100 characters.
2170 */
2171 core.String jobId;
2172 /**
2173 * Required The ID of the Google Cloud Platform project that the job belongs
2174 * to.
2175 */
2176 core.String projectId;
2177
2178 JobReference();
2179
2180 JobReference.fromJson(core.Map _json) {
2181 if (_json.containsKey("jobId")) {
2182 jobId = _json["jobId"];
2183 }
2184 if (_json.containsKey("projectId")) {
2185 projectId = _json["projectId"];
2186 }
2187 }
2188
2189 core.Map<core.String, core.Object> toJson() {
2190 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2191 if (jobId != null) {
2192 _json["jobId"] = jobId;
2193 }
2194 if (projectId != null) {
2195 _json["projectId"] = projectId;
2196 }
2197 return _json;
2198 }
2199 }
2200
2201 /**
2202 * Job scheduling options.Beta Feature: These options are available for testing
2203 * purposes only. They may be changed before final release.
2204 */
2205 class JobScheduling {
2206 /**
2207 * Optional Maximum number of times per hour a driver may be restarted as a
2208 * result of driver terminating with non-zero code before job is reported
2209 * failed.A job may be reported as thrashing if driver exits with non-zero
2210 * code 4 times within 10 minute window.Maximum value is 10.
2211 */
2212 core.int maxFailuresPerHour;
2213
2214 JobScheduling();
2215
2216 JobScheduling.fromJson(core.Map _json) {
2217 if (_json.containsKey("maxFailuresPerHour")) {
2218 maxFailuresPerHour = _json["maxFailuresPerHour"];
2219 }
2220 }
2221
2222 core.Map<core.String, core.Object> toJson() {
2223 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2224 if (maxFailuresPerHour != null) {
2225 _json["maxFailuresPerHour"] = maxFailuresPerHour;
2226 }
2227 return _json;
2228 }
2229 }
2230
2231 /** Cloud Dataproc job status. */
2232 class JobStatus {
2233 /**
2234 * Optional Job state details, such as an error description if the state is
2235 * <code>ERROR</code>.
2236 */
2237 core.String details;
2238 /**
2239 * Required A state message specifying the overall job state.
2240 * Possible string values are:
2241 * - "STATE_UNSPECIFIED" : The job state is unknown.
2242 * - "PENDING" : The job is pending; it has been submitted, but is not yet
2243 * running.
2244 * - "SETUP_DONE" : Job has been received by the service and completed initial
2245 * setup; it will shortly be submitted to the cluster.
2246 * - "RUNNING" : The job is running on the cluster.
2247 * - "CANCEL_PENDING" : A CancelJob request has been received, but is pending.
2248 * - "CANCEL_STARTED" : Transient in-flight resources have been canceled, and
2249 * the request to cancel the running job has been issued to the cluster.
2250 * - "CANCELLED" : The job cancelation was successful.
2251 * - "DONE" : The job has completed successfully.
2252 * - "ERROR" : The job has completed, but encountered an error.
2253 * - "ATTEMPT_FAILURE" : Job attempt has failed. The detail field contains
2254 * failure details for this attempt.Applies to restartable jobs only.
2255 */
2256 core.String state;
2257 /** Output-only The time when this state was entered. */
2258 core.String stateStartTime;
2259 /**
2260 * Output-only Additional state information, which includes status reported by
2261 * the agent.
2262 * Possible string values are:
2263 * - "UNSPECIFIED"
2264 * - "SUBMITTED" : The Job is submitted to the agent.Applies to RUNNING state.
2265 * - "QUEUED" : The Job has been received and is awaiting execution (it may be
2266 * waiting for a condition to be met). See the "details" field for the reason
2267 * for the delay.Applies to RUNNING state.
2268 * - "STALE_STATUS" : The agent-reported status is out of date, which may be
2269 * caused by a loss of communication between the agent and Cloud Dataproc. If
2270 * the agent does not send a timely update, the job will fail.Applies to
2271 * RUNNING state.
2272 */
2273 core.String substate;
2274
2275 JobStatus();
2276
2277 JobStatus.fromJson(core.Map _json) {
2278 if (_json.containsKey("details")) {
2279 details = _json["details"];
2280 }
2281 if (_json.containsKey("state")) {
2282 state = _json["state"];
2283 }
2284 if (_json.containsKey("stateStartTime")) {
2285 stateStartTime = _json["stateStartTime"];
2286 }
2287 if (_json.containsKey("substate")) {
2288 substate = _json["substate"];
2289 }
2290 }
2291
2292 core.Map<core.String, core.Object> toJson() {
2293 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2294 if (details != null) {
2295 _json["details"] = details;
2296 }
2297 if (state != null) {
2298 _json["state"] = state;
2299 }
2300 if (stateStartTime != null) {
2301 _json["stateStartTime"] = stateStartTime;
2302 }
2303 if (substate != null) {
2304 _json["substate"] = substate;
2305 }
2306 return _json;
2307 }
2308 }
2309
2310 /** The list of all clusters in a project. */
2311 class ListClustersResponse {
2312 /** Output-only The clusters in the project. */
2313 core.List<Cluster> clusters;
2314 /** The standard List next-page token. */
2315 core.String nextPageToken;
2316
2317 ListClustersResponse();
2318
2319 ListClustersResponse.fromJson(core.Map _json) {
2320 if (_json.containsKey("clusters")) {
2321 clusters = _json["clusters"].map((value) => new Cluster.fromJson(value)).t oList();
2322 }
2323 if (_json.containsKey("nextPageToken")) {
2324 nextPageToken = _json["nextPageToken"];
2325 }
2326 }
2327
2328 core.Map<core.String, core.Object> toJson() {
2329 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2330 if (clusters != null) {
2331 _json["clusters"] = clusters.map((value) => (value).toJson()).toList();
2332 }
2333 if (nextPageToken != null) {
2334 _json["nextPageToken"] = nextPageToken;
2335 }
2336 return _json;
2337 }
2338 }
2339
2340 /** A list of jobs in a project. */
2341 class ListJobsResponse {
2342 /** Output-only Jobs list. */
2343 core.List<Job> jobs;
2344 /**
2345 * Optional This token is included in the response if there are more results
2346 * to fetch. To fetch additional results, provide this value as the page_token
2347 * in a subsequent <code>ListJobsRequest</code>.
2348 */
2349 core.String nextPageToken;
2350
2351 ListJobsResponse();
2352
2353 ListJobsResponse.fromJson(core.Map _json) {
2354 if (_json.containsKey("jobs")) {
2355 jobs = _json["jobs"].map((value) => new Job.fromJson(value)).toList();
2356 }
2357 if (_json.containsKey("nextPageToken")) {
2358 nextPageToken = _json["nextPageToken"];
2359 }
2360 }
2361
2362 core.Map<core.String, core.Object> toJson() {
2363 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2364 if (jobs != null) {
2365 _json["jobs"] = jobs.map((value) => (value).toJson()).toList();
2366 }
2367 if (nextPageToken != null) {
2368 _json["nextPageToken"] = nextPageToken;
2369 }
2370 return _json;
2371 }
2372 }
2373
2374 /** The response message for Operations.ListOperations. */
2375 class ListOperationsResponse {
2376 /** The standard List next-page token. */
2377 core.String nextPageToken;
2378 /** A list of operations that matches the specified filter in the request. */
2379 core.List<Operation> operations;
2380
2381 ListOperationsResponse();
2382
2383 ListOperationsResponse.fromJson(core.Map _json) {
2384 if (_json.containsKey("nextPageToken")) {
2385 nextPageToken = _json["nextPageToken"];
2386 }
2387 if (_json.containsKey("operations")) {
2388 operations = _json["operations"].map((value) => new Operation.fromJson(val ue)).toList();
2389 }
2390 }
2391
2392 core.Map<core.String, core.Object> toJson() {
2393 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2394 if (nextPageToken != null) {
2395 _json["nextPageToken"] = nextPageToken;
2396 }
2397 if (operations != null) {
2398 _json["operations"] = operations.map((value) => (value).toJson()).toList() ;
2399 }
2400 return _json;
2401 }
2402 }
2403
2404 /** The runtime logging configuration of the job. */
2405 class LoggingConfiguration {
2406 /**
2407 * The per-package log levels for the driver. This may include "root" package
2408 * name to configure rootLogger. Examples: 'com.google = FATAL', 'root =
2409 * INFO', 'org.apache = DEBUG'
2410 */
2411 core.Map<core.String, core.String> driverLogLevels;
2412
2413 LoggingConfiguration();
2414
2415 LoggingConfiguration.fromJson(core.Map _json) {
2416 if (_json.containsKey("driverLogLevels")) {
2417 driverLogLevels = _json["driverLogLevels"];
2418 }
2419 }
2420
2421 core.Map<core.String, core.Object> toJson() {
2422 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2423 if (driverLogLevels != null) {
2424 _json["driverLogLevels"] = driverLogLevels;
2425 }
2426 return _json;
2427 }
2428 }
2429
2430 /** Specifies the resources used to actively manage an instance group. */
2431 class ManagedGroupConfiguration {
2432 /** Output-only The name of the Instance Group Manager for this group. */
2433 core.String instanceGroupManagerName;
2434 /**
2435 * Output-only The name of the Instance Template used for the Managed Instance
2436 * Group.
2437 */
2438 core.String instanceTemplateName;
2439
2440 ManagedGroupConfiguration();
2441
2442 ManagedGroupConfiguration.fromJson(core.Map _json) {
2443 if (_json.containsKey("instanceGroupManagerName")) {
2444 instanceGroupManagerName = _json["instanceGroupManagerName"];
2445 }
2446 if (_json.containsKey("instanceTemplateName")) {
2447 instanceTemplateName = _json["instanceTemplateName"];
2448 }
2449 }
2450
2451 core.Map<core.String, core.Object> toJson() {
2452 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2453 if (instanceGroupManagerName != null) {
2454 _json["instanceGroupManagerName"] = instanceGroupManagerName;
2455 }
2456 if (instanceTemplateName != null) {
2457 _json["instanceTemplateName"] = instanceTemplateName;
2458 }
2459 return _json;
2460 }
2461 }
2462
2463 /**
2464 * Specifies an executable to run on a fully configured node and a timeout
2465 * period for executable completion.
2466 */
2467 class NodeInitializationAction {
2468 /** Required Google Cloud Storage URI of executable file. */
2469 core.String executableFile;
2470 /**
2471 * Optional Amount of time executable has to complete. Default is 10 minutes.
2472 * Cluster creation fails with an explanatory error message (the name of the
2473 * executable that caused the error and the exceeded timeout period) if the
2474 * executable is not completed at end of the timeout period.
2475 */
2476 core.String executionTimeout;
2477
2478 NodeInitializationAction();
2479
2480 NodeInitializationAction.fromJson(core.Map _json) {
2481 if (_json.containsKey("executableFile")) {
2482 executableFile = _json["executableFile"];
2483 }
2484 if (_json.containsKey("executionTimeout")) {
2485 executionTimeout = _json["executionTimeout"];
2486 }
2487 }
2488
2489 core.Map<core.String, core.Object> toJson() {
2490 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2491 if (executableFile != null) {
2492 _json["executableFile"] = executableFile;
2493 }
2494 if (executionTimeout != null) {
2495 _json["executionTimeout"] = executionTimeout;
2496 }
2497 return _json;
2498 }
2499 }
2500
2501 /**
2502 * This resource represents a long-running operation that is the result of a
2503 * network API call.
2504 */
2505 class Operation {
2506 /**
2507 * If the value is false, it means the operation is still in progress. If
2508 * true, the operation is completed, and either error or response is
2509 * available.
2510 */
2511 core.bool done;
2512 /** The error result of the operation in case of failure or cancellation. */
2513 Status error;
2514 /**
2515 * Service-specific metadata associated with the operation. It typically
2516 * contains progress information and common metadata such as create time. Some
2517 * services might not provide such metadata. Any method that returns a
2518 * long-running operation should document the metadata type, if any.
2519 *
2520 * The values for Object must be JSON objects. It can consist of `num`,
2521 * `String`, `bool` and `null` as well as `Map` and `List` values.
2522 */
2523 core.Map<core.String, core.Object> metadata;
2524 /**
2525 * The server-assigned name, which is only unique within the same service that
2526 * originally returns it. If you use the default HTTP mapping, the name should
2527 * have the format of operations/some/unique/name.
2528 */
2529 core.String name;
2530 /**
2531 * The normal response of the operation in case of success. If the original
2532 * method returns no data on success, such as Delete, the response is
2533 * google.protobuf.Empty. If the original method is standard
2534 * Get/Create/Update, the response should be the resource. For other methods,
2535 * the response should have the type XxxResponse, where Xxx is the original
2536 * method name. For example, if the original method name is TakeSnapshot(),
2537 * the inferred response type is TakeSnapshotResponse.
2538 *
2539 * The values for Object must be JSON objects. It can consist of `num`,
2540 * `String`, `bool` and `null` as well as `Map` and `List` values.
2541 */
2542 core.Map<core.String, core.Object> response;
2543
2544 Operation();
2545
2546 Operation.fromJson(core.Map _json) {
2547 if (_json.containsKey("done")) {
2548 done = _json["done"];
2549 }
2550 if (_json.containsKey("error")) {
2551 error = new Status.fromJson(_json["error"]);
2552 }
2553 if (_json.containsKey("metadata")) {
2554 metadata = _json["metadata"];
2555 }
2556 if (_json.containsKey("name")) {
2557 name = _json["name"];
2558 }
2559 if (_json.containsKey("response")) {
2560 response = _json["response"];
2561 }
2562 }
2563
2564 core.Map<core.String, core.Object> toJson() {
2565 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2566 if (done != null) {
2567 _json["done"] = done;
2568 }
2569 if (error != null) {
2570 _json["error"] = (error).toJson();
2571 }
2572 if (metadata != null) {
2573 _json["metadata"] = metadata;
2574 }
2575 if (name != null) {
2576 _json["name"] = name;
2577 }
2578 if (response != null) {
2579 _json["response"] = response;
2580 }
2581 return _json;
2582 }
2583 }
2584
2585 /** Metadata describing the operation. */
2586 class OperationMetadata {
2587 /** Name of the cluster for the operation. */
2588 core.String clusterName;
2589 /** Cluster UUId for the operation. */
2590 core.String clusterUuid;
2591 /** Output-only Short description of operation. */
2592 core.String description;
2593 /** Output-only The operation type. */
2594 core.String operationType;
2595 /** Output-only Current operation status. */
2596 OperationStatus status;
2597 /** Output-only Previous operation status. */
2598 core.List<OperationStatus> statusHistory;
2599 /** Output-only Errors encountered during operation execution. */
2600 core.List<core.String> warnings;
2601
2602 OperationMetadata();
2603
2604 OperationMetadata.fromJson(core.Map _json) {
2605 if (_json.containsKey("clusterName")) {
2606 clusterName = _json["clusterName"];
2607 }
2608 if (_json.containsKey("clusterUuid")) {
2609 clusterUuid = _json["clusterUuid"];
2610 }
2611 if (_json.containsKey("description")) {
2612 description = _json["description"];
2613 }
2614 if (_json.containsKey("operationType")) {
2615 operationType = _json["operationType"];
2616 }
2617 if (_json.containsKey("status")) {
2618 status = new OperationStatus.fromJson(_json["status"]);
2619 }
2620 if (_json.containsKey("statusHistory")) {
2621 statusHistory = _json["statusHistory"].map((value) => new OperationStatus. fromJson(value)).toList();
2622 }
2623 if (_json.containsKey("warnings")) {
2624 warnings = _json["warnings"];
2625 }
2626 }
2627
2628 core.Map<core.String, core.Object> toJson() {
2629 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2630 if (clusterName != null) {
2631 _json["clusterName"] = clusterName;
2632 }
2633 if (clusterUuid != null) {
2634 _json["clusterUuid"] = clusterUuid;
2635 }
2636 if (description != null) {
2637 _json["description"] = description;
2638 }
2639 if (operationType != null) {
2640 _json["operationType"] = operationType;
2641 }
2642 if (status != null) {
2643 _json["status"] = (status).toJson();
2644 }
2645 if (statusHistory != null) {
2646 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List();
2647 }
2648 if (warnings != null) {
2649 _json["warnings"] = warnings;
2650 }
2651 return _json;
2652 }
2653 }
2654
2655 /** The status of the operation. */
2656 class OperationStatus {
2657 /** A message containing any operation metadata details. */
2658 core.String details;
2659 /** A message containing the detailed operation state. */
2660 core.String innerState;
2661 /**
2662 * A message containing the operation state.
2663 * Possible string values are:
2664 * - "UNKNOWN" : Unused.
2665 * - "PENDING" : The operation has been created.
2666 * - "RUNNING" : The operation is running.
2667 * - "DONE" : The operation is done; either cancelled or completed.
2668 */
2669 core.String state;
2670 /** The time this state was entered. */
2671 core.String stateStartTime;
2672
2673 OperationStatus();
2674
2675 OperationStatus.fromJson(core.Map _json) {
2676 if (_json.containsKey("details")) {
2677 details = _json["details"];
2678 }
2679 if (_json.containsKey("innerState")) {
2680 innerState = _json["innerState"];
2681 }
2682 if (_json.containsKey("state")) {
2683 state = _json["state"];
2684 }
2685 if (_json.containsKey("stateStartTime")) {
2686 stateStartTime = _json["stateStartTime"];
2687 }
2688 }
2689
2690 core.Map<core.String, core.Object> toJson() {
2691 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2692 if (details != null) {
2693 _json["details"] = details;
2694 }
2695 if (innerState != null) {
2696 _json["innerState"] = innerState;
2697 }
2698 if (state != null) {
2699 _json["state"] = state;
2700 }
2701 if (stateStartTime != null) {
2702 _json["stateStartTime"] = stateStartTime;
2703 }
2704 return _json;
2705 }
2706 }
2707
2708 /** A Cloud Dataproc job for running Pig queries on YARN. */
2709 class PigJob {
2710 /**
2711 * Optional Whether to continue executing queries if a query fails. The
2712 * default value is false. Setting to true can be useful when executing
2713 * independent parallel queries.
2714 */
2715 core.bool continueOnFailure;
2716 /**
2717 * Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client
2718 * and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
2719 */
2720 core.List<core.String> jarFileUris;
2721 /** Optional The runtime log configuration for job execution. */
2722 LoggingConfiguration loggingConfiguration;
2723 /**
2724 * Optional A mapping of property names to values, used to configure Pig.
2725 * Properties that conflict with values set by the Cloud Dataproc API may be
2726 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml,
2727 * /etc/pig/conf/pig.properties, and classes in user code.
2728 */
2729 core.Map<core.String, core.String> properties;
2730 /** The HCFS URI of the script that contains the Pig queries. */
2731 core.String queryFileUri;
2732 /** A list of queries. */
2733 QueryList queryList;
2734 /**
2735 * Optional Mapping of query variable names to values (equivalent to the Pig
2736 * command: name=[value]).
2737 */
2738 core.Map<core.String, core.String> scriptVariables;
2739
2740 PigJob();
2741
2742 PigJob.fromJson(core.Map _json) {
2743 if (_json.containsKey("continueOnFailure")) {
2744 continueOnFailure = _json["continueOnFailure"];
2745 }
2746 if (_json.containsKey("jarFileUris")) {
2747 jarFileUris = _json["jarFileUris"];
2748 }
2749 if (_json.containsKey("loggingConfiguration")) {
2750 loggingConfiguration = new LoggingConfiguration.fromJson(_json["loggingCon figuration"]);
2751 }
2752 if (_json.containsKey("properties")) {
2753 properties = _json["properties"];
2754 }
2755 if (_json.containsKey("queryFileUri")) {
2756 queryFileUri = _json["queryFileUri"];
2757 }
2758 if (_json.containsKey("queryList")) {
2759 queryList = new QueryList.fromJson(_json["queryList"]);
2760 }
2761 if (_json.containsKey("scriptVariables")) {
2762 scriptVariables = _json["scriptVariables"];
2763 }
2764 }
2765
2766 core.Map<core.String, core.Object> toJson() {
2767 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2768 if (continueOnFailure != null) {
2769 _json["continueOnFailure"] = continueOnFailure;
2770 }
2771 if (jarFileUris != null) {
2772 _json["jarFileUris"] = jarFileUris;
2773 }
2774 if (loggingConfiguration != null) {
2775 _json["loggingConfiguration"] = (loggingConfiguration).toJson();
2776 }
2777 if (properties != null) {
2778 _json["properties"] = properties;
2779 }
2780 if (queryFileUri != null) {
2781 _json["queryFileUri"] = queryFileUri;
2782 }
2783 if (queryList != null) {
2784 _json["queryList"] = (queryList).toJson();
2785 }
2786 if (scriptVariables != null) {
2787 _json["scriptVariables"] = scriptVariables;
2788 }
2789 return _json;
2790 }
2791 }
2792
2793 /** A Cloud Dataproc job for running PySpark applications on YARN. */
2794 class PySparkJob {
2795 /**
2796 * Optional HCFS URIs of archives to be extracted in the working directory of
2797 * .jar, .tar, .tar.gz, .tgz, and .zip.
2798 */
2799 core.List<core.String> archiveUris;
2800 /**
2801 * Optional The arguments to pass to the driver. Do not include arguments,
2802 * such as --conf, that can be set as job properties, since a collision may
2803 * occur that causes an incorrect job submission.
2804 */
2805 core.List<core.String> args;
2806 /**
2807 * Optional HCFS URIs of files to be copied to the working directory of Python
2808 * drivers and distributed tasks. Useful for naively parallel tasks.
2809 */
2810 core.List<core.String> fileUris;
2811 /**
2812 * Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python
2813 * driver and tasks.
2814 */
2815 core.List<core.String> jarFileUris;
2816 /** Optional The runtime log configuration for job execution. */
2817 LoggingConfiguration loggingConfiguration;
2818 /**
2819 * Required The Hadoop Compatible Filesystem (HCFS) URI of the main Python
2820 * file to use as the driver. Must be a .py file.
2821 */
2822 core.String mainPythonFileUri;
2823 /**
2824 * Optional A mapping of property names to values, used to configure PySpark.
2825 * Properties that conflict with values set by the Cloud Dataproc API may be
2826 * overwritten. Can include properties set in
2827 * /etc/spark/conf/spark-defaults.conf and classes in user code.
2828 */
2829 core.Map<core.String, core.String> properties;
2830 /**
2831 * Optional HCFS file URIs of Python files to pass to the PySpark framework.
2832 * Supported file types: .py, .egg, and .zip.
2833 */
2834 core.List<core.String> pythonFileUris;
2835
2836 PySparkJob();
2837
2838 PySparkJob.fromJson(core.Map _json) {
2839 if (_json.containsKey("archiveUris")) {
2840 archiveUris = _json["archiveUris"];
2841 }
2842 if (_json.containsKey("args")) {
2843 args = _json["args"];
2844 }
2845 if (_json.containsKey("fileUris")) {
2846 fileUris = _json["fileUris"];
2847 }
2848 if (_json.containsKey("jarFileUris")) {
2849 jarFileUris = _json["jarFileUris"];
2850 }
2851 if (_json.containsKey("loggingConfiguration")) {
2852 loggingConfiguration = new LoggingConfiguration.fromJson(_json["loggingCon figuration"]);
2853 }
2854 if (_json.containsKey("mainPythonFileUri")) {
2855 mainPythonFileUri = _json["mainPythonFileUri"];
2856 }
2857 if (_json.containsKey("properties")) {
2858 properties = _json["properties"];
2859 }
2860 if (_json.containsKey("pythonFileUris")) {
2861 pythonFileUris = _json["pythonFileUris"];
2862 }
2863 }
2864
2865 core.Map<core.String, core.Object> toJson() {
2866 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2867 if (archiveUris != null) {
2868 _json["archiveUris"] = archiveUris;
2869 }
2870 if (args != null) {
2871 _json["args"] = args;
2872 }
2873 if (fileUris != null) {
2874 _json["fileUris"] = fileUris;
2875 }
2876 if (jarFileUris != null) {
2877 _json["jarFileUris"] = jarFileUris;
2878 }
2879 if (loggingConfiguration != null) {
2880 _json["loggingConfiguration"] = (loggingConfiguration).toJson();
2881 }
2882 if (mainPythonFileUri != null) {
2883 _json["mainPythonFileUri"] = mainPythonFileUri;
2884 }
2885 if (properties != null) {
2886 _json["properties"] = properties;
2887 }
2888 if (pythonFileUris != null) {
2889 _json["pythonFileUris"] = pythonFileUris;
2890 }
2891 return _json;
2892 }
2893 }
2894
2895 /** A list of queries to run on a cluster. */
2896 class QueryList {
2897 /**
2898 * Required The queries to execute. You do not need to terminate a query with
2899 * a semicolon. Multiple queries can be specified in one string by separating
2900 * each with a semicolon. Here is an example of an Cloud Dataproc API snippet
2901 * that uses a QueryList to specify a HiveJob:
2902 * "hiveJob": {
2903 * "queryList": {
2904 * "queries": [
2905 * "query1",
2906 * "query2",
2907 * "query3;query4",
2908 * ]
2909 * }
2910 * }
2911 */
2912 core.List<core.String> queries;
2913
2914 QueryList();
2915
2916 QueryList.fromJson(core.Map _json) {
2917 if (_json.containsKey("queries")) {
2918 queries = _json["queries"];
2919 }
2920 }
2921
2922 core.Map<core.String, core.Object> toJson() {
2923 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2924 if (queries != null) {
2925 _json["queries"] = queries;
2926 }
2927 return _json;
2928 }
2929 }
2930
2931 /**
2932 * Specifies the selection and configuration of software inside the cluster.
2933 */
2934 class SoftwareConfiguration {
2935 /**
2936 * Optional The version of software inside the cluster. It must match the
2937 * regular expression [0-9]+\.[0-9]+. If unspecified, it defaults to the
2938 * latest version (see Cloud Dataproc Versioning).
2939 */
2940 core.String imageVersion;
2941 /**
2942 * Optional The properties to set on daemon configuration files.Property keys
2943 * are specified in "prefix:property" format, such as "core:fs.defaultFS". The
2944 * following are supported prefixes and their mappings: core - core-site.xml
2945 * hdfs - hdfs-site.xml mapred - mapred-site.xml yarn - yarn-site.xml hive
2946 * - hive-site.xml pig - pig.properties spark - spark-defaults.conf
2947 */
2948 core.Map<core.String, core.String> properties;
2949
2950 SoftwareConfiguration();
2951
2952 SoftwareConfiguration.fromJson(core.Map _json) {
2953 if (_json.containsKey("imageVersion")) {
2954 imageVersion = _json["imageVersion"];
2955 }
2956 if (_json.containsKey("properties")) {
2957 properties = _json["properties"];
2958 }
2959 }
2960
2961 core.Map<core.String, core.Object> toJson() {
2962 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
2963 if (imageVersion != null) {
2964 _json["imageVersion"] = imageVersion;
2965 }
2966 if (properties != null) {
2967 _json["properties"] = properties;
2968 }
2969 return _json;
2970 }
2971 }
2972
2973 /** A Cloud Dataproc job for running Spark applications on YARN. */
2974 class SparkJob {
2975 /**
2976 * Optional HCFS URIs of archives to be extracted in the working directory of
2977 * Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz,
2978 * and .zip.
2979 */
2980 core.List<core.String> archiveUris;
2981 /**
2982 * Optional The arguments to pass to the driver. Do not include arguments,
2983 * such as --conf, that can be set as job properties, since a collision may
2984 * occur that causes an incorrect job submission.
2985 */
2986 core.List<core.String> args;
2987 /**
2988 * Optional HCFS URIs of files to be copied to the working directory of Spark
2989 * drivers and distributed tasks. Useful for naively parallel tasks.
2990 */
2991 core.List<core.String> fileUris;
2992 /**
2993 * Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark
2994 * driver and tasks.
2995 */
2996 core.List<core.String> jarFileUris;
2997 /** Optional The runtime log configuration for job execution. */
2998 LoggingConfiguration loggingConfiguration;
2999 /**
3000 * The name of the driver's main class. The jar file that contains the class
3001 * must be in the default CLASSPATH or specified in jar_file_uris.
3002 */
3003 core.String mainClass;
3004 /**
3005 * The Hadoop Compatible Filesystem (HCFS) URI of the jar file that contains
3006 * the main class.
3007 */
3008 core.String mainJarFileUri;
3009 /**
3010 * Optional A mapping of property names to values, used to configure Spark.
3011 * Properties that conflict with values set by the Cloud Dataproc API may be
3012 * overwritten. Can include properties set in
3013 * /etc/spark/conf/spark-defaults.conf and classes in user code.
3014 */
3015 core.Map<core.String, core.String> properties;
3016
3017 SparkJob();
3018
3019 SparkJob.fromJson(core.Map _json) {
3020 if (_json.containsKey("archiveUris")) {
3021 archiveUris = _json["archiveUris"];
3022 }
3023 if (_json.containsKey("args")) {
3024 args = _json["args"];
3025 }
3026 if (_json.containsKey("fileUris")) {
3027 fileUris = _json["fileUris"];
3028 }
3029 if (_json.containsKey("jarFileUris")) {
3030 jarFileUris = _json["jarFileUris"];
3031 }
3032 if (_json.containsKey("loggingConfiguration")) {
3033 loggingConfiguration = new LoggingConfiguration.fromJson(_json["loggingCon figuration"]);
3034 }
3035 if (_json.containsKey("mainClass")) {
3036 mainClass = _json["mainClass"];
3037 }
3038 if (_json.containsKey("mainJarFileUri")) {
3039 mainJarFileUri = _json["mainJarFileUri"];
3040 }
3041 if (_json.containsKey("properties")) {
3042 properties = _json["properties"];
3043 }
3044 }
3045
3046 core.Map<core.String, core.Object> toJson() {
3047 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
3048 if (archiveUris != null) {
3049 _json["archiveUris"] = archiveUris;
3050 }
3051 if (args != null) {
3052 _json["args"] = args;
3053 }
3054 if (fileUris != null) {
3055 _json["fileUris"] = fileUris;
3056 }
3057 if (jarFileUris != null) {
3058 _json["jarFileUris"] = jarFileUris;
3059 }
3060 if (loggingConfiguration != null) {
3061 _json["loggingConfiguration"] = (loggingConfiguration).toJson();
3062 }
3063 if (mainClass != null) {
3064 _json["mainClass"] = mainClass;
3065 }
3066 if (mainJarFileUri != null) {
3067 _json["mainJarFileUri"] = mainJarFileUri;
3068 }
3069 if (properties != null) {
3070 _json["properties"] = properties;
3071 }
3072 return _json;
3073 }
3074 }
3075
3076 /** A Cloud Dataproc job for running Spark SQL queries. */
3077 class SparkSqlJob {
3078 /** Optional HCFS URIs of jar files to be added to the Spark CLASSPATH. */
3079 core.List<core.String> jarFileUris;
3080 /** Optional The runtime log configuration for job execution. */
3081 LoggingConfiguration loggingConfiguration;
3082 /**
3083 * Optional A mapping of property names to values, used to configure Spark
3084 * SQL's SparkConf. Properties that conflict with values set by the Cloud
3085 * Dataproc API may be overwritten.
3086 */
3087 core.Map<core.String, core.String> properties;
3088 /** The HCFS URI of the script that contains SQL queries. */
3089 core.String queryFileUri;
3090 /** A list of queries. */
3091 QueryList queryList;
3092 /**
3093 * Optional Mapping of query variable names to values (equivalent to the Spark
3094 * SQL command: SET name="value";).
3095 */
3096 core.Map<core.String, core.String> scriptVariables;
3097
3098 SparkSqlJob();
3099
3100 SparkSqlJob.fromJson(core.Map _json) {
3101 if (_json.containsKey("jarFileUris")) {
3102 jarFileUris = _json["jarFileUris"];
3103 }
3104 if (_json.containsKey("loggingConfiguration")) {
3105 loggingConfiguration = new LoggingConfiguration.fromJson(_json["loggingCon figuration"]);
3106 }
3107 if (_json.containsKey("properties")) {
3108 properties = _json["properties"];
3109 }
3110 if (_json.containsKey("queryFileUri")) {
3111 queryFileUri = _json["queryFileUri"];
3112 }
3113 if (_json.containsKey("queryList")) {
3114 queryList = new QueryList.fromJson(_json["queryList"]);
3115 }
3116 if (_json.containsKey("scriptVariables")) {
3117 scriptVariables = _json["scriptVariables"];
3118 }
3119 }
3120
3121 core.Map<core.String, core.Object> toJson() {
3122 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
3123 if (jarFileUris != null) {
3124 _json["jarFileUris"] = jarFileUris;
3125 }
3126 if (loggingConfiguration != null) {
3127 _json["loggingConfiguration"] = (loggingConfiguration).toJson();
3128 }
3129 if (properties != null) {
3130 _json["properties"] = properties;
3131 }
3132 if (queryFileUri != null) {
3133 _json["queryFileUri"] = queryFileUri;
3134 }
3135 if (queryList != null) {
3136 _json["queryList"] = (queryList).toJson();
3137 }
3138 if (scriptVariables != null) {
3139 _json["scriptVariables"] = scriptVariables;
3140 }
3141 return _json;
3142 }
3143 }
3144
3145 /**
3146 * The Status type defines a logical error model that is suitable for different
3147 * programming environments, including REST APIs and RPC APIs. It is used by
3148 * gRPC (https://github.com/grpc). The error model is designed to be:
3149 * Simple to use and understand for most users
3150 * Flexible enough to meet unexpected needsOverviewThe Status message contains
3151 * three pieces of data: error code, error message, and error details. The error
3152 * code should be an enum value of google.rpc.Code, but it may accept additional
3153 * error codes if needed. The error message should be a developer-facing English
3154 * message that helps developers understand and resolve the error. If a
3155 * localized user-facing error message is needed, put the localized message in
3156 * the error details or localize it in the client. The optional error details
3157 * may contain arbitrary information about the error. There is a predefined set
3158 * of error detail types in the package google.rpc that can be used for common
3159 * error conditions.Language mappingThe Status message is the logical
3160 * representation of the error model, but it is not necessarily the actual wire
3161 * format. When the Status message is exposed in different client libraries and
3162 * different wire protocols, it can be mapped differently. For example, it will
3163 * likely be mapped to some exceptions in Java, but more likely mapped to some
3164 * error codes in C.Other usesThe error model and the Status message can be used
3165 * in a variety of environments, either with or without APIs, to provide a
3166 * consistent developer experience across different environments.Example uses of
3167 * this error model include:
3168 * Partial errors. If a service needs to return partial errors to the client, it
3169 * may embed the Status in the normal response to indicate the partial errors.
3170 * Workflow errors. A typical workflow has multiple steps. Each step may have a
3171 * Status message for error reporting.
3172 * Batch operations. If a client uses batch request and batch response, the
3173 * Status message should be used directly inside batch response, one for each
3174 * error sub-response.
3175 * Asynchronous operations. If an API call embeds asynchronous operation results
3176 * in its response, the status of those operations should be represented
3177 * directly using the Status message.
3178 * Logging. If some API errors are stored in logs, the message Status could be
3179 * used directly after any stripping needed for security/privacy reasons.
3180 */
3181 class Status {
3182 /** The status code, which should be an enum value of google.rpc.Code. */
3183 core.int code;
3184 /**
3185 * A list of messages that carry the error details. There will be a common set
3186 * of message types for APIs to use.
3187 *
3188 * The values for Object must be JSON objects. It can consist of `num`,
3189 * `String`, `bool` and `null` as well as `Map` and `List` values.
3190 */
3191 core.List<core.Map<core.String, core.Object>> details;
3192 /**
3193 * A developer-facing error message, which should be in English. Any
3194 * user-facing error message should be localized and sent in the
3195 * google.rpc.Status.details field, or localized by the client.
3196 */
3197 core.String message;
3198
3199 Status();
3200
3201 Status.fromJson(core.Map _json) {
3202 if (_json.containsKey("code")) {
3203 code = _json["code"];
3204 }
3205 if (_json.containsKey("details")) {
3206 details = _json["details"];
3207 }
3208 if (_json.containsKey("message")) {
3209 message = _json["message"];
3210 }
3211 }
3212
3213 core.Map<core.String, core.Object> toJson() {
3214 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
3215 if (code != null) {
3216 _json["code"] = code;
3217 }
3218 if (details != null) {
3219 _json["details"] = details;
3220 }
3221 if (message != null) {
3222 _json["message"] = message;
3223 }
3224 return _json;
3225 }
3226 }
3227
3228 /** A request to submit a job. */
3229 class SubmitJobRequest {
3230 /** Required The job resource. */
3231 Job job;
3232
3233 SubmitJobRequest();
3234
3235 SubmitJobRequest.fromJson(core.Map _json) {
3236 if (_json.containsKey("job")) {
3237 job = new Job.fromJson(_json["job"]);
3238 }
3239 }
3240
3241 core.Map<core.String, core.Object> toJson() {
3242 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
3243 if (job != null) {
3244 _json["job"] = (job).toJson();
3245 }
3246 return _json;
3247 }
3248 }
3249
3250 /**
3251 * A YARN application created by a job. Application information is a subset of
3252 * <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.
3253 */
3254 class YarnApplication {
3255 /** Required The application name. */
3256 core.String name;
3257 /** Required The numerical progress of the application, from 1 to 100. */
3258 core.double progress;
3259 /**
3260 * Required The application state.
3261 * Possible string values are:
3262 * - "STATE_UNSPECIFIED" : Status is unspecified.
3263 * - "NEW" : Status is NEW.
3264 * - "NEW_SAVING" : Status is NEW_SAVING.
3265 * - "SUBMITTED" : Status is SUBMITTED.
3266 * - "ACCEPTED" : Status is ACCEPTED.
3267 * - "RUNNING" : Status is RUNNING.
3268 * - "FINISHED" : Status is FINISHED.
3269 * - "FAILED" : Status is FAILED.
3270 * - "KILLED" : Status is KILLED.
3271 */
3272 core.String state;
3273 /**
3274 * Optional The HTTP URL of the ApplicationMaster, HistoryServer, or
3275 * TimelineServer that provides application-specific information. The URL uses
3276 * the internal hostname, and requires a proxy server for resolution and,
3277 * possibly, access.
3278 */
3279 core.String trackingUrl;
3280
3281 YarnApplication();
3282
3283 YarnApplication.fromJson(core.Map _json) {
3284 if (_json.containsKey("name")) {
3285 name = _json["name"];
3286 }
3287 if (_json.containsKey("progress")) {
3288 progress = _json["progress"];
3289 }
3290 if (_json.containsKey("state")) {
3291 state = _json["state"];
3292 }
3293 if (_json.containsKey("trackingUrl")) {
3294 trackingUrl = _json["trackingUrl"];
3295 }
3296 }
3297
3298 core.Map<core.String, core.Object> toJson() {
3299 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>();
3300 if (name != null) {
3301 _json["name"] = name;
3302 }
3303 if (progress != null) {
3304 _json["progress"] = progress;
3305 }
3306 if (state != null) {
3307 _json["state"] = state;
3308 }
3309 if (trackingUrl != null) {
3310 _json["trackingUrl"] = trackingUrl;
3311 }
3312 return _json;
3313 }
3314 }
OLDNEW
« no previous file with comments | « generated/googleapis_beta/lib/dataflow/v1b3.dart ('k') | generated/googleapis_beta/lib/dlp/v2beta1.dart » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698