OLD | NEW |
| (Empty) |
1 // This is a generated file (see the discoveryapis_generator project). | |
2 | |
3 library googleapis.dataproc.v1beta2; | |
4 | |
5 import 'dart:core' as core; | |
6 import 'dart:async' as async; | |
7 import 'dart:convert' as convert; | |
8 | |
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; | |
10 import 'package:http/http.dart' as http; | |
11 | |
12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show | |
13 ApiRequestError, DetailedApiRequestError; | |
14 | |
15 const core.String USER_AGENT = 'dart-api-client dataproc/v1beta2'; | |
16 | |
17 /** Manages Hadoop-based clusters and jobs on Google Cloud Platform. */ | |
18 class DataprocApi { | |
19 /** View and manage your data across Google Cloud Platform services */ | |
20 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf
orm"; | |
21 | |
22 | |
23 final commons.ApiRequester _requester; | |
24 | |
25 ProjectsResourceApi get projects => new ProjectsResourceApi(_requester); | |
26 | |
27 DataprocApi(http.Client client, {core.String rootUrl: "https://dataproc.google
apis.com/", core.String servicePath: ""}) : | |
28 _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_A
GENT); | |
29 } | |
30 | |
31 | |
32 class ProjectsResourceApi { | |
33 final commons.ApiRequester _requester; | |
34 | |
35 ProjectsRegionsResourceApi get regions => new ProjectsRegionsResourceApi(_requ
ester); | |
36 | |
37 ProjectsResourceApi(commons.ApiRequester client) : | |
38 _requester = client; | |
39 } | |
40 | |
41 | |
42 class ProjectsRegionsResourceApi { | |
43 final commons.ApiRequester _requester; | |
44 | |
45 ProjectsRegionsClustersResourceApi get clusters => new ProjectsRegionsClusters
ResourceApi(_requester); | |
46 ProjectsRegionsJobsResourceApi get jobs => new ProjectsRegionsJobsResourceApi(
_requester); | |
47 ProjectsRegionsOperationsResourceApi get operations => new ProjectsRegionsOper
ationsResourceApi(_requester); | |
48 | |
49 ProjectsRegionsResourceApi(commons.ApiRequester client) : | |
50 _requester = client; | |
51 } | |
52 | |
53 | |
54 class ProjectsRegionsClustersResourceApi { | |
55 final commons.ApiRequester _requester; | |
56 | |
57 ProjectsRegionsClustersResourceApi(commons.ApiRequester client) : | |
58 _requester = client; | |
59 | |
60 /** | |
61 * Creates a cluster in a project. | |
62 * | |
63 * [request] - The metadata request object. | |
64 * | |
65 * Request parameters: | |
66 * | |
67 * [projectId] - Required. The ID of the Google Cloud Platform project that | |
68 * the cluster belongs to. | |
69 * | |
70 * [region] - Required. The Cloud Dataproc region in which to handle the | |
71 * request. | |
72 * | |
73 * Completes with a [Operation]. | |
74 * | |
75 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
76 * error. | |
77 * | |
78 * If the used [http.Client] completes with an error when making a REST call, | |
79 * this method will complete with the same error. | |
80 */ | |
81 async.Future<Operation> create(Cluster request, core.String projectId, core.St
ring region) { | |
82 var _url = null; | |
83 var _queryParams = new core.Map(); | |
84 var _uploadMedia = null; | |
85 var _uploadOptions = null; | |
86 var _downloadOptions = commons.DownloadOptions.Metadata; | |
87 var _body = null; | |
88 | |
89 if (request != null) { | |
90 _body = convert.JSON.encode((request).toJson()); | |
91 } | |
92 if (projectId == null) { | |
93 throw new core.ArgumentError("Parameter projectId is required."); | |
94 } | |
95 if (region == null) { | |
96 throw new core.ArgumentError("Parameter region is required."); | |
97 } | |
98 | |
99 _url = 'v1beta2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '
/regions/' + commons.Escaper.ecapeVariable('$region') + '/clusters'; | |
100 | |
101 var _response = _requester.request(_url, | |
102 "POST", | |
103 body: _body, | |
104 queryParams: _queryParams, | |
105 uploadOptions: _uploadOptions, | |
106 uploadMedia: _uploadMedia, | |
107 downloadOptions: _downloadOptions); | |
108 return _response.then((data) => new Operation.fromJson(data)); | |
109 } | |
110 | |
111 /** | |
112 * Deletes a cluster in a project. | |
113 * | |
114 * Request parameters: | |
115 * | |
116 * [projectId] - Required. The ID of the Google Cloud Platform project that | |
117 * the cluster belongs to. | |
118 * | |
119 * [region] - Required. The Cloud Dataproc region in which to handle the | |
120 * request. | |
121 * | |
122 * [clusterName] - Required. The cluster name. | |
123 * | |
124 * Completes with a [Operation]. | |
125 * | |
126 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
127 * error. | |
128 * | |
129 * If the used [http.Client] completes with an error when making a REST call, | |
130 * this method will complete with the same error. | |
131 */ | |
132 async.Future<Operation> delete(core.String projectId, core.String region, core
.String clusterName) { | |
133 var _url = null; | |
134 var _queryParams = new core.Map(); | |
135 var _uploadMedia = null; | |
136 var _uploadOptions = null; | |
137 var _downloadOptions = commons.DownloadOptions.Metadata; | |
138 var _body = null; | |
139 | |
140 if (projectId == null) { | |
141 throw new core.ArgumentError("Parameter projectId is required."); | |
142 } | |
143 if (region == null) { | |
144 throw new core.ArgumentError("Parameter region is required."); | |
145 } | |
146 if (clusterName == null) { | |
147 throw new core.ArgumentError("Parameter clusterName is required."); | |
148 } | |
149 | |
150 _url = 'v1beta2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '
/regions/' + commons.Escaper.ecapeVariable('$region') + '/clusters/' + commons.E
scaper.ecapeVariable('$clusterName'); | |
151 | |
152 var _response = _requester.request(_url, | |
153 "DELETE", | |
154 body: _body, | |
155 queryParams: _queryParams, | |
156 uploadOptions: _uploadOptions, | |
157 uploadMedia: _uploadMedia, | |
158 downloadOptions: _downloadOptions); | |
159 return _response.then((data) => new Operation.fromJson(data)); | |
160 } | |
161 | |
162 /** | |
163 * Gets cluster diagnostic information. After the operation completes, the | |
164 * Operation.response field contains DiagnoseClusterOutputLocation. | |
165 * | |
166 * [request] - The metadata request object. | |
167 * | |
168 * Request parameters: | |
169 * | |
170 * [projectId] - Required. The ID of the Google Cloud Platform project that | |
171 * the cluster belongs to. | |
172 * | |
173 * [region] - Required. The Cloud Dataproc region in which to handle the | |
174 * request. | |
175 * | |
176 * [clusterName] - Required. The cluster name. | |
177 * | |
178 * Completes with a [Operation]. | |
179 * | |
180 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
181 * error. | |
182 * | |
183 * If the used [http.Client] completes with an error when making a REST call, | |
184 * this method will complete with the same error. | |
185 */ | |
186 async.Future<Operation> diagnose(DiagnoseClusterRequest request, core.String p
rojectId, core.String region, core.String clusterName) { | |
187 var _url = null; | |
188 var _queryParams = new core.Map(); | |
189 var _uploadMedia = null; | |
190 var _uploadOptions = null; | |
191 var _downloadOptions = commons.DownloadOptions.Metadata; | |
192 var _body = null; | |
193 | |
194 if (request != null) { | |
195 _body = convert.JSON.encode((request).toJson()); | |
196 } | |
197 if (projectId == null) { | |
198 throw new core.ArgumentError("Parameter projectId is required."); | |
199 } | |
200 if (region == null) { | |
201 throw new core.ArgumentError("Parameter region is required."); | |
202 } | |
203 if (clusterName == null) { | |
204 throw new core.ArgumentError("Parameter clusterName is required."); | |
205 } | |
206 | |
207 _url = 'v1beta2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '
/regions/' + commons.Escaper.ecapeVariable('$region') + '/clusters/' + commons.E
scaper.ecapeVariable('$clusterName') + ':diagnose'; | |
208 | |
209 var _response = _requester.request(_url, | |
210 "POST", | |
211 body: _body, | |
212 queryParams: _queryParams, | |
213 uploadOptions: _uploadOptions, | |
214 uploadMedia: _uploadMedia, | |
215 downloadOptions: _downloadOptions); | |
216 return _response.then((data) => new Operation.fromJson(data)); | |
217 } | |
218 | |
219 /** | |
220 * Gets the resource representation for a cluster in a project. | |
221 * | |
222 * Request parameters: | |
223 * | |
224 * [projectId] - Required. The ID of the Google Cloud Platform project that | |
225 * the cluster belongs to. | |
226 * | |
227 * [region] - Required. The Cloud Dataproc region in which to handle the | |
228 * request. | |
229 * | |
230 * [clusterName] - Required. The cluster name. | |
231 * | |
232 * Completes with a [Cluster]. | |
233 * | |
234 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
235 * error. | |
236 * | |
237 * If the used [http.Client] completes with an error when making a REST call, | |
238 * this method will complete with the same error. | |
239 */ | |
240 async.Future<Cluster> get(core.String projectId, core.String region, core.Stri
ng clusterName) { | |
241 var _url = null; | |
242 var _queryParams = new core.Map(); | |
243 var _uploadMedia = null; | |
244 var _uploadOptions = null; | |
245 var _downloadOptions = commons.DownloadOptions.Metadata; | |
246 var _body = null; | |
247 | |
248 if (projectId == null) { | |
249 throw new core.ArgumentError("Parameter projectId is required."); | |
250 } | |
251 if (region == null) { | |
252 throw new core.ArgumentError("Parameter region is required."); | |
253 } | |
254 if (clusterName == null) { | |
255 throw new core.ArgumentError("Parameter clusterName is required."); | |
256 } | |
257 | |
258 _url = 'v1beta2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '
/regions/' + commons.Escaper.ecapeVariable('$region') + '/clusters/' + commons.E
scaper.ecapeVariable('$clusterName'); | |
259 | |
260 var _response = _requester.request(_url, | |
261 "GET", | |
262 body: _body, | |
263 queryParams: _queryParams, | |
264 uploadOptions: _uploadOptions, | |
265 uploadMedia: _uploadMedia, | |
266 downloadOptions: _downloadOptions); | |
267 return _response.then((data) => new Cluster.fromJson(data)); | |
268 } | |
269 | |
270 /** | |
271 * Lists all regions/{region}/clusters in a project. | |
272 * | |
273 * Request parameters: | |
274 * | |
275 * [projectId] - Required. The ID of the Google Cloud Platform project that | |
276 * the cluster belongs to. | |
277 * | |
278 * [region] - Required. The Cloud Dataproc region in which to handle the | |
279 * request. | |
280 * | |
281 * [filter] - Optional. A filter constraining the clusters to list. Filters | |
282 * are case-sensitive and have the following syntax:field = value AND field = | |
283 * value ...where field is one of status.state, clusterName, or labels.[KEY], | |
284 * and [KEY] is a label key. value can be * to match all values. status.state | |
285 * can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, | |
286 * DELETING, or UPDATING. ACTIVE contains the CREATING, UPDATING, and RUNNING | |
287 * states. INACTIVE contains the DELETING and ERROR states. clusterName is the | |
288 * name of the cluster provided at creation time. Only the logical AND | |
289 * operator is supported; space-separated items are treated as having an | |
290 * implicit AND operator.Example filter:status.state = ACTIVE AND clusterName | |
291 * = mycluster AND labels.env = staging AND labels.starred = * | |
292 * | |
293 * [pageToken] - Optional. The standard List page token. | |
294 * | |
295 * [pageSize] - Optional. The standard List page size. | |
296 * | |
297 * Completes with a [ListClustersResponse]. | |
298 * | |
299 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
300 * error. | |
301 * | |
302 * If the used [http.Client] completes with an error when making a REST call, | |
303 * this method will complete with the same error. | |
304 */ | |
305 async.Future<ListClustersResponse> list(core.String projectId, core.String reg
ion, {core.String filter, core.String pageToken, core.int pageSize}) { | |
306 var _url = null; | |
307 var _queryParams = new core.Map(); | |
308 var _uploadMedia = null; | |
309 var _uploadOptions = null; | |
310 var _downloadOptions = commons.DownloadOptions.Metadata; | |
311 var _body = null; | |
312 | |
313 if (projectId == null) { | |
314 throw new core.ArgumentError("Parameter projectId is required."); | |
315 } | |
316 if (region == null) { | |
317 throw new core.ArgumentError("Parameter region is required."); | |
318 } | |
319 if (filter != null) { | |
320 _queryParams["filter"] = [filter]; | |
321 } | |
322 if (pageToken != null) { | |
323 _queryParams["pageToken"] = [pageToken]; | |
324 } | |
325 if (pageSize != null) { | |
326 _queryParams["pageSize"] = ["${pageSize}"]; | |
327 } | |
328 | |
329 _url = 'v1beta2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '
/regions/' + commons.Escaper.ecapeVariable('$region') + '/clusters'; | |
330 | |
331 var _response = _requester.request(_url, | |
332 "GET", | |
333 body: _body, | |
334 queryParams: _queryParams, | |
335 uploadOptions: _uploadOptions, | |
336 uploadMedia: _uploadMedia, | |
337 downloadOptions: _downloadOptions); | |
338 return _response.then((data) => new ListClustersResponse.fromJson(data)); | |
339 } | |
340 | |
341 /** | |
342 * Updates a cluster in a project. | |
343 * | |
344 * [request] - The metadata request object. | |
345 * | |
346 * Request parameters: | |
347 * | |
348 * [projectId] - Required. The ID of the Google Cloud Platform project the | |
349 * cluster belongs to. | |
350 * | |
351 * [region] - Required. The Cloud Dataproc region in which to handle the | |
352 * request. | |
353 * | |
354 * [clusterName] - Required. The cluster name. | |
355 * | |
356 * [updateMask] - Required. Specifies the path, relative to | |
357 * <code>Cluster</code>, of the field to update. For example, to change the | |
358 * number of workers in a cluster to 5, the <code>update_mask</code> parameter | |
359 * would be specified as <code>config.worker_config.num_instances</code>, and | |
360 * the PATCH request body would specify the new value, as follows: | |
361 * { | |
362 * "config":{ | |
363 * "workerConfig":{ | |
364 * "numInstances":"5" | |
365 * } | |
366 * } | |
367 * } | |
368 * Similarly, to change the number of preemptible workers in a cluster to 5, | |
369 * the <code>update_mask</code> parameter would be | |
370 * <code>config.secondary_worker_config.num_instances</code>, and the PATCH | |
371 * request body would be set as follows: | |
372 * { | |
373 * "config":{ | |
374 * "secondaryWorkerConfig":{ | |
375 * "numInstances":"5" | |
376 * } | |
377 * } | |
378 * } | |
379 * <strong>Note:</strong> currently only some fields can be updated: | |
380 * |Mask|Purpose| |labels|Updates labels| | |
381 * |config.worker_config.num_instances|Resize primary worker group| | |
382 * |config.secondary_worker_config.num_instances|Resize secondary worker | |
383 * group| | |
384 * | |
385 * Completes with a [Operation]. | |
386 * | |
387 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
388 * error. | |
389 * | |
390 * If the used [http.Client] completes with an error when making a REST call, | |
391 * this method will complete with the same error. | |
392 */ | |
393 async.Future<Operation> patch(Cluster request, core.String projectId, core.Str
ing region, core.String clusterName, {core.String updateMask}) { | |
394 var _url = null; | |
395 var _queryParams = new core.Map(); | |
396 var _uploadMedia = null; | |
397 var _uploadOptions = null; | |
398 var _downloadOptions = commons.DownloadOptions.Metadata; | |
399 var _body = null; | |
400 | |
401 if (request != null) { | |
402 _body = convert.JSON.encode((request).toJson()); | |
403 } | |
404 if (projectId == null) { | |
405 throw new core.ArgumentError("Parameter projectId is required."); | |
406 } | |
407 if (region == null) { | |
408 throw new core.ArgumentError("Parameter region is required."); | |
409 } | |
410 if (clusterName == null) { | |
411 throw new core.ArgumentError("Parameter clusterName is required."); | |
412 } | |
413 if (updateMask != null) { | |
414 _queryParams["updateMask"] = [updateMask]; | |
415 } | |
416 | |
417 _url = 'v1beta2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '
/regions/' + commons.Escaper.ecapeVariable('$region') + '/clusters/' + commons.E
scaper.ecapeVariable('$clusterName'); | |
418 | |
419 var _response = _requester.request(_url, | |
420 "PATCH", | |
421 body: _body, | |
422 queryParams: _queryParams, | |
423 uploadOptions: _uploadOptions, | |
424 uploadMedia: _uploadMedia, | |
425 downloadOptions: _downloadOptions); | |
426 return _response.then((data) => new Operation.fromJson(data)); | |
427 } | |
428 | |
429 } | |
430 | |
431 | |
432 class ProjectsRegionsJobsResourceApi { | |
433 final commons.ApiRequester _requester; | |
434 | |
435 ProjectsRegionsJobsResourceApi(commons.ApiRequester client) : | |
436 _requester = client; | |
437 | |
438 /** | |
439 * Starts a job cancellation request. To access the job resource after | |
440 * cancellation, call regions/{region}/jobs.list or regions/{region}/jobs.get. | |
441 * | |
442 * [request] - The metadata request object. | |
443 * | |
444 * Request parameters: | |
445 * | |
446 * [projectId] - Required. The ID of the Google Cloud Platform project that | |
447 * the job belongs to. | |
448 * | |
449 * [region] - Required. The Cloud Dataproc region in which to handle the | |
450 * request. | |
451 * | |
452 * [jobId] - Required. The job ID. | |
453 * | |
454 * Completes with a [Job]. | |
455 * | |
456 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
457 * error. | |
458 * | |
459 * If the used [http.Client] completes with an error when making a REST call, | |
460 * this method will complete with the same error. | |
461 */ | |
462 async.Future<Job> cancel(CancelJobRequest request, core.String projectId, core
.String region, core.String jobId) { | |
463 var _url = null; | |
464 var _queryParams = new core.Map(); | |
465 var _uploadMedia = null; | |
466 var _uploadOptions = null; | |
467 var _downloadOptions = commons.DownloadOptions.Metadata; | |
468 var _body = null; | |
469 | |
470 if (request != null) { | |
471 _body = convert.JSON.encode((request).toJson()); | |
472 } | |
473 if (projectId == null) { | |
474 throw new core.ArgumentError("Parameter projectId is required."); | |
475 } | |
476 if (region == null) { | |
477 throw new core.ArgumentError("Parameter region is required."); | |
478 } | |
479 if (jobId == null) { | |
480 throw new core.ArgumentError("Parameter jobId is required."); | |
481 } | |
482 | |
483 _url = 'v1beta2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '
/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escap
er.ecapeVariable('$jobId') + ':cancel'; | |
484 | |
485 var _response = _requester.request(_url, | |
486 "POST", | |
487 body: _body, | |
488 queryParams: _queryParams, | |
489 uploadOptions: _uploadOptions, | |
490 uploadMedia: _uploadMedia, | |
491 downloadOptions: _downloadOptions); | |
492 return _response.then((data) => new Job.fromJson(data)); | |
493 } | |
494 | |
495 /** | |
496 * Deletes the job from the project. If the job is active, the delete fails, | |
497 * and the response returns FAILED_PRECONDITION. | |
498 * | |
499 * Request parameters: | |
500 * | |
501 * [projectId] - Required. The ID of the Google Cloud Platform project that | |
502 * the job belongs to. | |
503 * | |
504 * [region] - Required. The Cloud Dataproc region in which to handle the | |
505 * request. | |
506 * | |
507 * [jobId] - Required. The job ID. | |
508 * | |
509 * Completes with a [Empty]. | |
510 * | |
511 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
512 * error. | |
513 * | |
514 * If the used [http.Client] completes with an error when making a REST call, | |
515 * this method will complete with the same error. | |
516 */ | |
517 async.Future<Empty> delete(core.String projectId, core.String region, core.Str
ing jobId) { | |
518 var _url = null; | |
519 var _queryParams = new core.Map(); | |
520 var _uploadMedia = null; | |
521 var _uploadOptions = null; | |
522 var _downloadOptions = commons.DownloadOptions.Metadata; | |
523 var _body = null; | |
524 | |
525 if (projectId == null) { | |
526 throw new core.ArgumentError("Parameter projectId is required."); | |
527 } | |
528 if (region == null) { | |
529 throw new core.ArgumentError("Parameter region is required."); | |
530 } | |
531 if (jobId == null) { | |
532 throw new core.ArgumentError("Parameter jobId is required."); | |
533 } | |
534 | |
535 _url = 'v1beta2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '
/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escap
er.ecapeVariable('$jobId'); | |
536 | |
537 var _response = _requester.request(_url, | |
538 "DELETE", | |
539 body: _body, | |
540 queryParams: _queryParams, | |
541 uploadOptions: _uploadOptions, | |
542 uploadMedia: _uploadMedia, | |
543 downloadOptions: _downloadOptions); | |
544 return _response.then((data) => new Empty.fromJson(data)); | |
545 } | |
546 | |
547 /** | |
548 * Gets the resource representation for a job in a project. | |
549 * | |
550 * Request parameters: | |
551 * | |
552 * [projectId] - Required. The ID of the Google Cloud Platform project that | |
553 * the job belongs to. | |
554 * | |
555 * [region] - Required. The Cloud Dataproc region in which to handle the | |
556 * request. | |
557 * | |
558 * [jobId] - Required. The job ID. | |
559 * | |
560 * Completes with a [Job]. | |
561 * | |
562 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
563 * error. | |
564 * | |
565 * If the used [http.Client] completes with an error when making a REST call, | |
566 * this method will complete with the same error. | |
567 */ | |
568 async.Future<Job> get(core.String projectId, core.String region, core.String j
obId) { | |
569 var _url = null; | |
570 var _queryParams = new core.Map(); | |
571 var _uploadMedia = null; | |
572 var _uploadOptions = null; | |
573 var _downloadOptions = commons.DownloadOptions.Metadata; | |
574 var _body = null; | |
575 | |
576 if (projectId == null) { | |
577 throw new core.ArgumentError("Parameter projectId is required."); | |
578 } | |
579 if (region == null) { | |
580 throw new core.ArgumentError("Parameter region is required."); | |
581 } | |
582 if (jobId == null) { | |
583 throw new core.ArgumentError("Parameter jobId is required."); | |
584 } | |
585 | |
586 _url = 'v1beta2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '
/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escap
er.ecapeVariable('$jobId'); | |
587 | |
588 var _response = _requester.request(_url, | |
589 "GET", | |
590 body: _body, | |
591 queryParams: _queryParams, | |
592 uploadOptions: _uploadOptions, | |
593 uploadMedia: _uploadMedia, | |
594 downloadOptions: _downloadOptions); | |
595 return _response.then((data) => new Job.fromJson(data)); | |
596 } | |
597 | |
598 /** | |
599 * Lists regions/{region}/jobs in a project. | |
600 * | |
601 * Request parameters: | |
602 * | |
603 * [projectId] - Required. The ID of the Google Cloud Platform project that | |
604 * the job belongs to. | |
605 * | |
606 * [region] - Required. The Cloud Dataproc region in which to handle the | |
607 * request. | |
608 * | |
609 * [filter] - Optional. A filter constraining the jobs to list. Filters are | |
610 * case-sensitive and have the following syntax:field = value AND field = | |
611 * value ...where field is status.state or labels.[KEY], and [KEY] is a label | |
612 * key. value can be * to match all values. status.state can be either ACTIVE | |
613 * or INACTIVE. Only the logical AND operator is supported; space-separated | |
614 * items are treated as having an implicit AND operator.Example | |
615 * filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = | |
616 * * | |
617 * | |
618 * [jobStateMatcher] - Optional. Specifies enumerated categories of jobs to | |
619 * list (default = match ALL jobs). | |
620 * Possible string values are: | |
621 * - "ALL" : A ALL. | |
622 * - "ACTIVE" : A ACTIVE. | |
623 * - "NON_ACTIVE" : A NON_ACTIVE. | |
624 * | |
625 * [pageToken] - Optional. The page token, returned by a previous call, to | |
626 * request the next page of results. | |
627 * | |
628 * [pageSize] - Optional. The number of results to return in each response. | |
629 * | |
630 * [clusterName] - Optional. If set, the returned jobs list includes only jobs | |
631 * that were submitted to the named cluster. | |
632 * | |
633 * Completes with a [ListJobsResponse]. | |
634 * | |
635 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
636 * error. | |
637 * | |
638 * If the used [http.Client] completes with an error when making a REST call, | |
639 * this method will complete with the same error. | |
640 */ | |
641 async.Future<ListJobsResponse> list(core.String projectId, core.String region,
{core.String filter, core.String jobStateMatcher, core.String pageToken, core.i
nt pageSize, core.String clusterName}) { | |
642 var _url = null; | |
643 var _queryParams = new core.Map(); | |
644 var _uploadMedia = null; | |
645 var _uploadOptions = null; | |
646 var _downloadOptions = commons.DownloadOptions.Metadata; | |
647 var _body = null; | |
648 | |
649 if (projectId == null) { | |
650 throw new core.ArgumentError("Parameter projectId is required."); | |
651 } | |
652 if (region == null) { | |
653 throw new core.ArgumentError("Parameter region is required."); | |
654 } | |
655 if (filter != null) { | |
656 _queryParams["filter"] = [filter]; | |
657 } | |
658 if (jobStateMatcher != null) { | |
659 _queryParams["jobStateMatcher"] = [jobStateMatcher]; | |
660 } | |
661 if (pageToken != null) { | |
662 _queryParams["pageToken"] = [pageToken]; | |
663 } | |
664 if (pageSize != null) { | |
665 _queryParams["pageSize"] = ["${pageSize}"]; | |
666 } | |
667 if (clusterName != null) { | |
668 _queryParams["clusterName"] = [clusterName]; | |
669 } | |
670 | |
671 _url = 'v1beta2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '
/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs'; | |
672 | |
673 var _response = _requester.request(_url, | |
674 "GET", | |
675 body: _body, | |
676 queryParams: _queryParams, | |
677 uploadOptions: _uploadOptions, | |
678 uploadMedia: _uploadMedia, | |
679 downloadOptions: _downloadOptions); | |
680 return _response.then((data) => new ListJobsResponse.fromJson(data)); | |
681 } | |
682 | |
683 /** | |
684 * Updates a job in a project. | |
685 * | |
686 * [request] - The metadata request object. | |
687 * | |
688 * Request parameters: | |
689 * | |
690 * [projectId] - Required. The ID of the Google Cloud Platform project that | |
691 * the job belongs to. | |
692 * | |
693 * [region] - Required. The Cloud Dataproc region in which to handle the | |
694 * request. | |
695 * | |
696 * [jobId] - Required. The job ID. | |
697 * | |
698 * [updateMask] - Required. Specifies the path, relative to <code>Job</code>, | |
699 * of the field to update. For example, to update the labels of a Job the | |
700 * <code>update_mask</code> parameter would be specified as | |
701 * <code>labels</code>, and the PATCH request body would specify the new | |
702 * value. <strong>Note:</strong> Currently, <code>labels</code> is the only | |
703 * field that can be updated. | |
704 * | |
705 * Completes with a [Job]. | |
706 * | |
707 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
708 * error. | |
709 * | |
710 * If the used [http.Client] completes with an error when making a REST call, | |
711 * this method will complete with the same error. | |
712 */ | |
713 async.Future<Job> patch(Job request, core.String projectId, core.String region
, core.String jobId, {core.String updateMask}) { | |
714 var _url = null; | |
715 var _queryParams = new core.Map(); | |
716 var _uploadMedia = null; | |
717 var _uploadOptions = null; | |
718 var _downloadOptions = commons.DownloadOptions.Metadata; | |
719 var _body = null; | |
720 | |
721 if (request != null) { | |
722 _body = convert.JSON.encode((request).toJson()); | |
723 } | |
724 if (projectId == null) { | |
725 throw new core.ArgumentError("Parameter projectId is required."); | |
726 } | |
727 if (region == null) { | |
728 throw new core.ArgumentError("Parameter region is required."); | |
729 } | |
730 if (jobId == null) { | |
731 throw new core.ArgumentError("Parameter jobId is required."); | |
732 } | |
733 if (updateMask != null) { | |
734 _queryParams["updateMask"] = [updateMask]; | |
735 } | |
736 | |
737 _url = 'v1beta2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '
/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escap
er.ecapeVariable('$jobId'); | |
738 | |
739 var _response = _requester.request(_url, | |
740 "PATCH", | |
741 body: _body, | |
742 queryParams: _queryParams, | |
743 uploadOptions: _uploadOptions, | |
744 uploadMedia: _uploadMedia, | |
745 downloadOptions: _downloadOptions); | |
746 return _response.then((data) => new Job.fromJson(data)); | |
747 } | |
748 | |
749 /** | |
750 * Submits a job to a cluster. | |
751 * | |
752 * [request] - The metadata request object. | |
753 * | |
754 * Request parameters: | |
755 * | |
756 * [projectId] - Required. The ID of the Google Cloud Platform project that | |
757 * the job belongs to. | |
758 * | |
759 * [region] - Required. The Cloud Dataproc region in which to handle the | |
760 * request. | |
761 * | |
762 * Completes with a [Job]. | |
763 * | |
764 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
765 * error. | |
766 * | |
767 * If the used [http.Client] completes with an error when making a REST call, | |
768 * this method will complete with the same error. | |
769 */ | |
770 async.Future<Job> submit(SubmitJobRequest request, core.String projectId, core
.String region) { | |
771 var _url = null; | |
772 var _queryParams = new core.Map(); | |
773 var _uploadMedia = null; | |
774 var _uploadOptions = null; | |
775 var _downloadOptions = commons.DownloadOptions.Metadata; | |
776 var _body = null; | |
777 | |
778 if (request != null) { | |
779 _body = convert.JSON.encode((request).toJson()); | |
780 } | |
781 if (projectId == null) { | |
782 throw new core.ArgumentError("Parameter projectId is required."); | |
783 } | |
784 if (region == null) { | |
785 throw new core.ArgumentError("Parameter region is required."); | |
786 } | |
787 | |
788 _url = 'v1beta2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '
/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs:submit'; | |
789 | |
790 var _response = _requester.request(_url, | |
791 "POST", | |
792 body: _body, | |
793 queryParams: _queryParams, | |
794 uploadOptions: _uploadOptions, | |
795 uploadMedia: _uploadMedia, | |
796 downloadOptions: _downloadOptions); | |
797 return _response.then((data) => new Job.fromJson(data)); | |
798 } | |
799 | |
800 } | |
801 | |
802 | |
803 class ProjectsRegionsOperationsResourceApi { | |
804 final commons.ApiRequester _requester; | |
805 | |
806 ProjectsRegionsOperationsResourceApi(commons.ApiRequester client) : | |
807 _requester = client; | |
808 | |
809 /** | |
810 * Starts asynchronous cancellation on a long-running operation. The server | |
811 * makes a best effort to cancel the operation, but success is not guaranteed. | |
812 * If the server doesn't support this method, it returns | |
813 * google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or | |
814 * other methods to check whether the cancellation succeeded or whether the | |
815 * operation completed despite cancellation. On successful cancellation, the | |
816 * operation is not deleted; instead, it becomes an operation with an | |
817 * Operation.error value with a google.rpc.Status.code of 1, corresponding to | |
818 * Code.CANCELLED. | |
819 * | |
820 * Request parameters: | |
821 * | |
822 * [name] - The name of the operation resource to be cancelled. | |
823 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". | |
824 * | |
825 * Completes with a [Empty]. | |
826 * | |
827 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
828 * error. | |
829 * | |
830 * If the used [http.Client] completes with an error when making a REST call, | |
831 * this method will complete with the same error. | |
832 */ | |
833 async.Future<Empty> cancel(core.String name) { | |
834 var _url = null; | |
835 var _queryParams = new core.Map(); | |
836 var _uploadMedia = null; | |
837 var _uploadOptions = null; | |
838 var _downloadOptions = commons.DownloadOptions.Metadata; | |
839 var _body = null; | |
840 | |
841 if (name == null) { | |
842 throw new core.ArgumentError("Parameter name is required."); | |
843 } | |
844 | |
845 _url = 'v1beta2/' + commons.Escaper.ecapeVariableReserved('$name') + ':cance
l'; | |
846 | |
847 var _response = _requester.request(_url, | |
848 "POST", | |
849 body: _body, | |
850 queryParams: _queryParams, | |
851 uploadOptions: _uploadOptions, | |
852 uploadMedia: _uploadMedia, | |
853 downloadOptions: _downloadOptions); | |
854 return _response.then((data) => new Empty.fromJson(data)); | |
855 } | |
856 | |
857 /** | |
858 * Deletes a long-running operation. This method indicates that the client is | |
859 * no longer interested in the operation result. It does not cancel the | |
860 * operation. If the server doesn't support this method, it returns | |
861 * google.rpc.Code.UNIMPLEMENTED. | |
862 * | |
863 * Request parameters: | |
864 * | |
865 * [name] - The name of the operation resource to be deleted. | |
866 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". | |
867 * | |
868 * Completes with a [Empty]. | |
869 * | |
870 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
871 * error. | |
872 * | |
873 * If the used [http.Client] completes with an error when making a REST call, | |
874 * this method will complete with the same error. | |
875 */ | |
876 async.Future<Empty> delete(core.String name) { | |
877 var _url = null; | |
878 var _queryParams = new core.Map(); | |
879 var _uploadMedia = null; | |
880 var _uploadOptions = null; | |
881 var _downloadOptions = commons.DownloadOptions.Metadata; | |
882 var _body = null; | |
883 | |
884 if (name == null) { | |
885 throw new core.ArgumentError("Parameter name is required."); | |
886 } | |
887 | |
888 _url = 'v1beta2/' + commons.Escaper.ecapeVariableReserved('$name'); | |
889 | |
890 var _response = _requester.request(_url, | |
891 "DELETE", | |
892 body: _body, | |
893 queryParams: _queryParams, | |
894 uploadOptions: _uploadOptions, | |
895 uploadMedia: _uploadMedia, | |
896 downloadOptions: _downloadOptions); | |
897 return _response.then((data) => new Empty.fromJson(data)); | |
898 } | |
899 | |
900 /** | |
901 * Gets the latest state of a long-running operation. Clients can use this | |
902 * method to poll the operation result at intervals as recommended by the API | |
903 * service. | |
904 * | |
905 * Request parameters: | |
906 * | |
907 * [name] - The name of the operation resource. | |
908 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". | |
909 * | |
910 * Completes with a [Operation]. | |
911 * | |
912 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
913 * error. | |
914 * | |
915 * If the used [http.Client] completes with an error when making a REST call, | |
916 * this method will complete with the same error. | |
917 */ | |
918 async.Future<Operation> get(core.String name) { | |
919 var _url = null; | |
920 var _queryParams = new core.Map(); | |
921 var _uploadMedia = null; | |
922 var _uploadOptions = null; | |
923 var _downloadOptions = commons.DownloadOptions.Metadata; | |
924 var _body = null; | |
925 | |
926 if (name == null) { | |
927 throw new core.ArgumentError("Parameter name is required."); | |
928 } | |
929 | |
930 _url = 'v1beta2/' + commons.Escaper.ecapeVariableReserved('$name'); | |
931 | |
932 var _response = _requester.request(_url, | |
933 "GET", | |
934 body: _body, | |
935 queryParams: _queryParams, | |
936 uploadOptions: _uploadOptions, | |
937 uploadMedia: _uploadMedia, | |
938 downloadOptions: _downloadOptions); | |
939 return _response.then((data) => new Operation.fromJson(data)); | |
940 } | |
941 | |
942 /** | |
943 * Lists operations that match the specified filter in the request. If the | |
944 * server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name | |
945 * binding allows API services to override the binding to use different | |
946 * resource name schemes, such as users / * /operations. To override the | |
947 * binding, API services can add a binding such as "/v1/{name=users / * | |
948 * }/operations" to their service configuration. For backwards compatibility, | |
949 * the default name includes the operations collection id, however overriding | |
950 * users must ensure the name binding is the parent resource, without the | |
951 * operations collection id. | |
952 * | |
953 * Request parameters: | |
954 * | |
955 * [name] - The name of the operation's parent resource. | |
956 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations$". | |
957 * | |
958 * [pageSize] - The standard list page size. | |
959 * | |
960 * [filter] - The standard list filter. | |
961 * | |
962 * [pageToken] - The standard list page token. | |
963 * | |
964 * Completes with a [ListOperationsResponse]. | |
965 * | |
966 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | |
967 * error. | |
968 * | |
969 * If the used [http.Client] completes with an error when making a REST call, | |
970 * this method will complete with the same error. | |
971 */ | |
972 async.Future<ListOperationsResponse> list(core.String name, {core.int pageSize
, core.String filter, core.String pageToken}) { | |
973 var _url = null; | |
974 var _queryParams = new core.Map(); | |
975 var _uploadMedia = null; | |
976 var _uploadOptions = null; | |
977 var _downloadOptions = commons.DownloadOptions.Metadata; | |
978 var _body = null; | |
979 | |
980 if (name == null) { | |
981 throw new core.ArgumentError("Parameter name is required."); | |
982 } | |
983 if (pageSize != null) { | |
984 _queryParams["pageSize"] = ["${pageSize}"]; | |
985 } | |
986 if (filter != null) { | |
987 _queryParams["filter"] = [filter]; | |
988 } | |
989 if (pageToken != null) { | |
990 _queryParams["pageToken"] = [pageToken]; | |
991 } | |
992 | |
993 _url = 'v1beta2/' + commons.Escaper.ecapeVariableReserved('$name'); | |
994 | |
995 var _response = _requester.request(_url, | |
996 "GET", | |
997 body: _body, | |
998 queryParams: _queryParams, | |
999 uploadOptions: _uploadOptions, | |
1000 uploadMedia: _uploadMedia, | |
1001 downloadOptions: _downloadOptions); | |
1002 return _response.then((data) => new ListOperationsResponse.fromJson(data)); | |
1003 } | |
1004 | |
1005 } | |
1006 | |
1007 | |
1008 | |
1009 /** | |
1010 * Specifies the type and number of accelerator cards attached to the instances | |
1011 * of an instance group (see GPUs on Compute Engine). | |
1012 */ | |
1013 class AcceleratorConfig { | |
1014 /** | |
1015 * The number of the accelerator cards of this type exposed to this instance. | |
1016 */ | |
1017 core.int acceleratorCount; | |
1018 /** | |
1019 * Full URL, partial URI, or short name of the accelerator type resource to | |
1020 * expose to this instance. See Google Compute Engine AcceleratorTypes( | |
1021 * /compute/docs/reference/beta/acceleratorTypes)Examples * | |
1022 * https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east
1-a/acceleratorTypes/nvidia-tesla-k80 | |
1023 * * projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 | |
1024 * * nvidia-tesla-k80 | |
1025 */ | |
1026 core.String acceleratorTypeUri; | |
1027 | |
1028 AcceleratorConfig(); | |
1029 | |
1030 AcceleratorConfig.fromJson(core.Map _json) { | |
1031 if (_json.containsKey("acceleratorCount")) { | |
1032 acceleratorCount = _json["acceleratorCount"]; | |
1033 } | |
1034 if (_json.containsKey("acceleratorTypeUri")) { | |
1035 acceleratorTypeUri = _json["acceleratorTypeUri"]; | |
1036 } | |
1037 } | |
1038 | |
1039 core.Map<core.String, core.Object> toJson() { | |
1040 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1041 if (acceleratorCount != null) { | |
1042 _json["acceleratorCount"] = acceleratorCount; | |
1043 } | |
1044 if (acceleratorTypeUri != null) { | |
1045 _json["acceleratorTypeUri"] = acceleratorTypeUri; | |
1046 } | |
1047 return _json; | |
1048 } | |
1049 } | |
1050 | |
1051 /** A request to cancel a job. */ | |
1052 class CancelJobRequest { | |
1053 | |
1054 CancelJobRequest(); | |
1055 | |
1056 CancelJobRequest.fromJson(core.Map _json) { | |
1057 } | |
1058 | |
1059 core.Map<core.String, core.Object> toJson() { | |
1060 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1061 return _json; | |
1062 } | |
1063 } | |
1064 | |
1065 /** | |
1066 * Describes the identifying information, config, and status of a cluster of | |
1067 * Google Compute Engine instances. | |
1068 */ | |
1069 class Cluster { | |
1070 /** | |
1071 * Required. The cluster name. Cluster names within a project must be unique. | |
1072 * Names of deleted clusters can be reused. | |
1073 */ | |
1074 core.String clusterName; | |
1075 /** | |
1076 * Output-only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc | |
1077 * generates this value when it creates the cluster. | |
1078 */ | |
1079 core.String clusterUuid; | |
1080 /** | |
1081 * Required. The cluster config. Note that Cloud Dataproc may set default | |
1082 * values, and values may change when clusters are updated. | |
1083 */ | |
1084 ClusterConfig config; | |
1085 /** | |
1086 * Optional. The labels to associate with this cluster. Label keys must | |
1087 * contain 1 to 63 characters, and must conform to RFC 1035 | |
1088 * (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if | |
1089 * present, must contain 1 to 63 characters, and must conform to RFC 1035 | |
1090 * (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be | |
1091 * associated with a cluster. | |
1092 */ | |
1093 core.Map<core.String, core.String> labels; | |
1094 /** | |
1095 * Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: | |
1096 * This report is available for testing purposes only. It may be changed | |
1097 * before final release. | |
1098 */ | |
1099 ClusterMetrics metrics; | |
1100 /** | |
1101 * Required. The Google Cloud Platform project ID that the cluster belongs to. | |
1102 */ | |
1103 core.String projectId; | |
1104 /** Output-only. Cluster status. */ | |
1105 ClusterStatus status; | |
1106 /** Output-only. The previous cluster status. */ | |
1107 core.List<ClusterStatus> statusHistory; | |
1108 | |
1109 Cluster(); | |
1110 | |
1111 Cluster.fromJson(core.Map _json) { | |
1112 if (_json.containsKey("clusterName")) { | |
1113 clusterName = _json["clusterName"]; | |
1114 } | |
1115 if (_json.containsKey("clusterUuid")) { | |
1116 clusterUuid = _json["clusterUuid"]; | |
1117 } | |
1118 if (_json.containsKey("config")) { | |
1119 config = new ClusterConfig.fromJson(_json["config"]); | |
1120 } | |
1121 if (_json.containsKey("labels")) { | |
1122 labels = _json["labels"]; | |
1123 } | |
1124 if (_json.containsKey("metrics")) { | |
1125 metrics = new ClusterMetrics.fromJson(_json["metrics"]); | |
1126 } | |
1127 if (_json.containsKey("projectId")) { | |
1128 projectId = _json["projectId"]; | |
1129 } | |
1130 if (_json.containsKey("status")) { | |
1131 status = new ClusterStatus.fromJson(_json["status"]); | |
1132 } | |
1133 if (_json.containsKey("statusHistory")) { | |
1134 statusHistory = _json["statusHistory"].map((value) => new ClusterStatus.fr
omJson(value)).toList(); | |
1135 } | |
1136 } | |
1137 | |
1138 core.Map<core.String, core.Object> toJson() { | |
1139 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1140 if (clusterName != null) { | |
1141 _json["clusterName"] = clusterName; | |
1142 } | |
1143 if (clusterUuid != null) { | |
1144 _json["clusterUuid"] = clusterUuid; | |
1145 } | |
1146 if (config != null) { | |
1147 _json["config"] = (config).toJson(); | |
1148 } | |
1149 if (labels != null) { | |
1150 _json["labels"] = labels; | |
1151 } | |
1152 if (metrics != null) { | |
1153 _json["metrics"] = (metrics).toJson(); | |
1154 } | |
1155 if (projectId != null) { | |
1156 _json["projectId"] = projectId; | |
1157 } | |
1158 if (status != null) { | |
1159 _json["status"] = (status).toJson(); | |
1160 } | |
1161 if (statusHistory != null) { | |
1162 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to
List(); | |
1163 } | |
1164 return _json; | |
1165 } | |
1166 } | |
1167 | |
1168 /** The cluster config. */ | |
1169 class ClusterConfig { | |
1170 /** | |
1171 * Optional. A Google Cloud Storage staging bucket used for sharing generated | |
1172 * SSH keys and config. If you do not specify a staging bucket, Cloud Dataproc | |
1173 * will determine an appropriate Cloud Storage location (US, ASIA, or EU) for | |
1174 * your cluster's staging bucket according to the Google Compute Engine zone | |
1175 * where your cluster is deployed, and then it will create and manage this | |
1176 * project-level, per-location bucket for you. | |
1177 */ | |
1178 core.String configBucket; | |
1179 /** | |
1180 * Required. The shared Google Compute Engine config settings for all | |
1181 * instances in a cluster. | |
1182 */ | |
1183 GceClusterConfig gceClusterConfig; | |
1184 /** | |
1185 * Optional. Commands to execute on each node after config is completed. By | |
1186 * default, executables are run on master and all worker nodes. You can test a | |
1187 * node's <code>role</code> metadata to run an executable on a master or | |
1188 * worker node, as shown below using curl (you can also use wget): | |
1189 * ROLE=$(curl -H Metadata-Flavor:Google | |
1190 * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) | |
1191 * if [[ "${ROLE}" == 'Master' ]]; then | |
1192 * ... master specific actions ... | |
1193 * else | |
1194 * ... worker specific actions ... | |
1195 * fi | |
1196 */ | |
1197 core.List<NodeInitializationAction> initializationActions; | |
1198 /** | |
1199 * Optional. The Google Compute Engine config settings for the master instance | |
1200 * in a cluster. | |
1201 */ | |
1202 InstanceGroupConfig masterConfig; | |
1203 /** | |
1204 * Optional. The Google Compute Engine config settings for additional worker | |
1205 * instances in a cluster. | |
1206 */ | |
1207 InstanceGroupConfig secondaryWorkerConfig; | |
1208 /** Optional. The config settings for software inside the cluster. */ | |
1209 SoftwareConfig softwareConfig; | |
1210 /** | |
1211 * Optional. The Google Compute Engine config settings for worker instances in | |
1212 * a cluster. | |
1213 */ | |
1214 InstanceGroupConfig workerConfig; | |
1215 | |
1216 ClusterConfig(); | |
1217 | |
1218 ClusterConfig.fromJson(core.Map _json) { | |
1219 if (_json.containsKey("configBucket")) { | |
1220 configBucket = _json["configBucket"]; | |
1221 } | |
1222 if (_json.containsKey("gceClusterConfig")) { | |
1223 gceClusterConfig = new GceClusterConfig.fromJson(_json["gceClusterConfig"]
); | |
1224 } | |
1225 if (_json.containsKey("initializationActions")) { | |
1226 initializationActions = _json["initializationActions"].map((value) => new
NodeInitializationAction.fromJson(value)).toList(); | |
1227 } | |
1228 if (_json.containsKey("masterConfig")) { | |
1229 masterConfig = new InstanceGroupConfig.fromJson(_json["masterConfig"]); | |
1230 } | |
1231 if (_json.containsKey("secondaryWorkerConfig")) { | |
1232 secondaryWorkerConfig = new InstanceGroupConfig.fromJson(_json["secondaryW
orkerConfig"]); | |
1233 } | |
1234 if (_json.containsKey("softwareConfig")) { | |
1235 softwareConfig = new SoftwareConfig.fromJson(_json["softwareConfig"]); | |
1236 } | |
1237 if (_json.containsKey("workerConfig")) { | |
1238 workerConfig = new InstanceGroupConfig.fromJson(_json["workerConfig"]); | |
1239 } | |
1240 } | |
1241 | |
1242 core.Map<core.String, core.Object> toJson() { | |
1243 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1244 if (configBucket != null) { | |
1245 _json["configBucket"] = configBucket; | |
1246 } | |
1247 if (gceClusterConfig != null) { | |
1248 _json["gceClusterConfig"] = (gceClusterConfig).toJson(); | |
1249 } | |
1250 if (initializationActions != null) { | |
1251 _json["initializationActions"] = initializationActions.map((value) => (val
ue).toJson()).toList(); | |
1252 } | |
1253 if (masterConfig != null) { | |
1254 _json["masterConfig"] = (masterConfig).toJson(); | |
1255 } | |
1256 if (secondaryWorkerConfig != null) { | |
1257 _json["secondaryWorkerConfig"] = (secondaryWorkerConfig).toJson(); | |
1258 } | |
1259 if (softwareConfig != null) { | |
1260 _json["softwareConfig"] = (softwareConfig).toJson(); | |
1261 } | |
1262 if (workerConfig != null) { | |
1263 _json["workerConfig"] = (workerConfig).toJson(); | |
1264 } | |
1265 return _json; | |
1266 } | |
1267 } | |
1268 | |
1269 /** | |
1270 * Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: | |
1271 * This report is available for testing purposes only. It may be changed before | |
1272 * final release. | |
1273 */ | |
1274 class ClusterMetrics { | |
1275 /** The HDFS metrics. */ | |
1276 core.Map<core.String, core.String> hdfsMetrics; | |
1277 /** The YARN metrics. */ | |
1278 core.Map<core.String, core.String> yarnMetrics; | |
1279 | |
1280 ClusterMetrics(); | |
1281 | |
1282 ClusterMetrics.fromJson(core.Map _json) { | |
1283 if (_json.containsKey("hdfsMetrics")) { | |
1284 hdfsMetrics = _json["hdfsMetrics"]; | |
1285 } | |
1286 if (_json.containsKey("yarnMetrics")) { | |
1287 yarnMetrics = _json["yarnMetrics"]; | |
1288 } | |
1289 } | |
1290 | |
1291 core.Map<core.String, core.Object> toJson() { | |
1292 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1293 if (hdfsMetrics != null) { | |
1294 _json["hdfsMetrics"] = hdfsMetrics; | |
1295 } | |
1296 if (yarnMetrics != null) { | |
1297 _json["yarnMetrics"] = yarnMetrics; | |
1298 } | |
1299 return _json; | |
1300 } | |
1301 } | |
1302 | |
1303 /** Metadata describing the operation. */ | |
1304 class ClusterOperationMetadata { | |
1305 /** Output-only. Name of the cluster for the operation. */ | |
1306 core.String clusterName; | |
1307 /** Output-only. Cluster UUID for the operation. */ | |
1308 core.String clusterUuid; | |
1309 /** Output-only. Short description of operation. */ | |
1310 core.String description; | |
1311 /** Output-only. Labels associated with the operation */ | |
1312 core.Map<core.String, core.String> labels; | |
1313 /** Output-only. The operation type. */ | |
1314 core.String operationType; | |
1315 /** Output-only. Current operation status. */ | |
1316 ClusterOperationStatus status; | |
1317 /** Output-only. The previous operation status. */ | |
1318 core.List<ClusterOperationStatus> statusHistory; | |
1319 /** Output-only. Errors encountered during operation execution. */ | |
1320 core.List<core.String> warnings; | |
1321 | |
1322 ClusterOperationMetadata(); | |
1323 | |
1324 ClusterOperationMetadata.fromJson(core.Map _json) { | |
1325 if (_json.containsKey("clusterName")) { | |
1326 clusterName = _json["clusterName"]; | |
1327 } | |
1328 if (_json.containsKey("clusterUuid")) { | |
1329 clusterUuid = _json["clusterUuid"]; | |
1330 } | |
1331 if (_json.containsKey("description")) { | |
1332 description = _json["description"]; | |
1333 } | |
1334 if (_json.containsKey("labels")) { | |
1335 labels = _json["labels"]; | |
1336 } | |
1337 if (_json.containsKey("operationType")) { | |
1338 operationType = _json["operationType"]; | |
1339 } | |
1340 if (_json.containsKey("status")) { | |
1341 status = new ClusterOperationStatus.fromJson(_json["status"]); | |
1342 } | |
1343 if (_json.containsKey("statusHistory")) { | |
1344 statusHistory = _json["statusHistory"].map((value) => new ClusterOperation
Status.fromJson(value)).toList(); | |
1345 } | |
1346 if (_json.containsKey("warnings")) { | |
1347 warnings = _json["warnings"]; | |
1348 } | |
1349 } | |
1350 | |
1351 core.Map<core.String, core.Object> toJson() { | |
1352 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1353 if (clusterName != null) { | |
1354 _json["clusterName"] = clusterName; | |
1355 } | |
1356 if (clusterUuid != null) { | |
1357 _json["clusterUuid"] = clusterUuid; | |
1358 } | |
1359 if (description != null) { | |
1360 _json["description"] = description; | |
1361 } | |
1362 if (labels != null) { | |
1363 _json["labels"] = labels; | |
1364 } | |
1365 if (operationType != null) { | |
1366 _json["operationType"] = operationType; | |
1367 } | |
1368 if (status != null) { | |
1369 _json["status"] = (status).toJson(); | |
1370 } | |
1371 if (statusHistory != null) { | |
1372 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to
List(); | |
1373 } | |
1374 if (warnings != null) { | |
1375 _json["warnings"] = warnings; | |
1376 } | |
1377 return _json; | |
1378 } | |
1379 } | |
1380 | |
1381 /** The status of the operation. */ | |
1382 class ClusterOperationStatus { | |
1383 /** Output-only.A message containing any operation metadata details. */ | |
1384 core.String details; | |
1385 /** Output-only. A message containing the detailed operation state. */ | |
1386 core.String innerState; | |
1387 /** | |
1388 * Output-only. A message containing the operation state. | |
1389 * Possible string values are: | |
1390 * - "UNKNOWN" : Unused. | |
1391 * - "PENDING" : The operation has been created. | |
1392 * - "RUNNING" : The operation is running. | |
1393 * - "DONE" : The operation is done; either cancelled or completed. | |
1394 */ | |
1395 core.String state; | |
1396 /** Output-only. The time this state was entered. */ | |
1397 core.String stateStartTime; | |
1398 | |
1399 ClusterOperationStatus(); | |
1400 | |
1401 ClusterOperationStatus.fromJson(core.Map _json) { | |
1402 if (_json.containsKey("details")) { | |
1403 details = _json["details"]; | |
1404 } | |
1405 if (_json.containsKey("innerState")) { | |
1406 innerState = _json["innerState"]; | |
1407 } | |
1408 if (_json.containsKey("state")) { | |
1409 state = _json["state"]; | |
1410 } | |
1411 if (_json.containsKey("stateStartTime")) { | |
1412 stateStartTime = _json["stateStartTime"]; | |
1413 } | |
1414 } | |
1415 | |
1416 core.Map<core.String, core.Object> toJson() { | |
1417 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1418 if (details != null) { | |
1419 _json["details"] = details; | |
1420 } | |
1421 if (innerState != null) { | |
1422 _json["innerState"] = innerState; | |
1423 } | |
1424 if (state != null) { | |
1425 _json["state"] = state; | |
1426 } | |
1427 if (stateStartTime != null) { | |
1428 _json["stateStartTime"] = stateStartTime; | |
1429 } | |
1430 return _json; | |
1431 } | |
1432 } | |
1433 | |
1434 /** The status of a cluster and its instances. */ | |
1435 class ClusterStatus { | |
1436 /** Output-only. Optional details of cluster's state. */ | |
1437 core.String detail; | |
1438 /** | |
1439 * Output-only. The cluster's state. | |
1440 * Possible string values are: | |
1441 * - "UNKNOWN" : The cluster state is unknown. | |
1442 * - "CREATING" : The cluster is being created and set up. It is not ready for | |
1443 * use. | |
1444 * - "RUNNING" : The cluster is currently running and healthy. It is ready for | |
1445 * use. | |
1446 * - "ERROR" : The cluster encountered an error. It is not ready for use. | |
1447 * - "DELETING" : The cluster is being deleted. It cannot be used. | |
1448 * - "UPDATING" : The cluster is being updated. It continues to accept and | |
1449 * process jobs. | |
1450 */ | |
1451 core.String state; | |
1452 /** Output-only. Time when this state was entered. */ | |
1453 core.String stateStartTime; | |
1454 /** | |
1455 * Output-only. Additional state information that includes status reported by | |
1456 * the agent. | |
1457 * Possible string values are: | |
1458 * - "UNSPECIFIED" | |
1459 * - "UNHEALTHY" : The cluster is known to be in an unhealthy state (for | |
1460 * example, critical daemons are not running or HDFS capacity is | |
1461 * exhausted).Applies to RUNNING state. | |
1462 * - "STALE_STATUS" : The agent-reported status is out of date (may occur if | |
1463 * Cloud Dataproc loses communication with Agent).Applies to RUNNING state. | |
1464 */ | |
1465 core.String substate; | |
1466 | |
1467 ClusterStatus(); | |
1468 | |
1469 ClusterStatus.fromJson(core.Map _json) { | |
1470 if (_json.containsKey("detail")) { | |
1471 detail = _json["detail"]; | |
1472 } | |
1473 if (_json.containsKey("state")) { | |
1474 state = _json["state"]; | |
1475 } | |
1476 if (_json.containsKey("stateStartTime")) { | |
1477 stateStartTime = _json["stateStartTime"]; | |
1478 } | |
1479 if (_json.containsKey("substate")) { | |
1480 substate = _json["substate"]; | |
1481 } | |
1482 } | |
1483 | |
1484 core.Map<core.String, core.Object> toJson() { | |
1485 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1486 if (detail != null) { | |
1487 _json["detail"] = detail; | |
1488 } | |
1489 if (state != null) { | |
1490 _json["state"] = state; | |
1491 } | |
1492 if (stateStartTime != null) { | |
1493 _json["stateStartTime"] = stateStartTime; | |
1494 } | |
1495 if (substate != null) { | |
1496 _json["substate"] = substate; | |
1497 } | |
1498 return _json; | |
1499 } | |
1500 } | |
1501 | |
1502 /** A request to collect cluster diagnostic information. */ | |
1503 class DiagnoseClusterRequest { | |
1504 | |
1505 DiagnoseClusterRequest(); | |
1506 | |
1507 DiagnoseClusterRequest.fromJson(core.Map _json) { | |
1508 } | |
1509 | |
1510 core.Map<core.String, core.Object> toJson() { | |
1511 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1512 return _json; | |
1513 } | |
1514 } | |
1515 | |
1516 /** Specifies the config of disk options for a group of VM instances. */ | |
1517 class DiskConfig { | |
1518 /** Optional. Size in GB of the boot disk (default is 500GB). */ | |
1519 core.int bootDiskSizeGb; | |
1520 /** | |
1521 * Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are | |
1522 * not attached, the boot disk is used to store runtime logs and HDFS | |
1523 * (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one | |
1524 * or more SSDs are attached, this runtime bulk data is spread across them, | |
1525 * and the boot disk contains only basic config and installed binaries. | |
1526 */ | |
1527 core.int numLocalSsds; | |
1528 | |
1529 DiskConfig(); | |
1530 | |
1531 DiskConfig.fromJson(core.Map _json) { | |
1532 if (_json.containsKey("bootDiskSizeGb")) { | |
1533 bootDiskSizeGb = _json["bootDiskSizeGb"]; | |
1534 } | |
1535 if (_json.containsKey("numLocalSsds")) { | |
1536 numLocalSsds = _json["numLocalSsds"]; | |
1537 } | |
1538 } | |
1539 | |
1540 core.Map<core.String, core.Object> toJson() { | |
1541 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1542 if (bootDiskSizeGb != null) { | |
1543 _json["bootDiskSizeGb"] = bootDiskSizeGb; | |
1544 } | |
1545 if (numLocalSsds != null) { | |
1546 _json["numLocalSsds"] = numLocalSsds; | |
1547 } | |
1548 return _json; | |
1549 } | |
1550 } | |
1551 | |
1552 /** | |
1553 * A generic empty message that you can re-use to avoid defining duplicated | |
1554 * empty messages in your APIs. A typical example is to use it as the request or | |
1555 * the response type of an API method. For instance: | |
1556 * service Foo { | |
1557 * rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); | |
1558 * } | |
1559 * The JSON representation for Empty is empty JSON object {}. | |
1560 */ | |
1561 class Empty { | |
1562 | |
1563 Empty(); | |
1564 | |
1565 Empty.fromJson(core.Map _json) { | |
1566 } | |
1567 | |
1568 core.Map<core.String, core.Object> toJson() { | |
1569 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1570 return _json; | |
1571 } | |
1572 } | |
1573 | |
1574 /** | |
1575 * Common config settings for resources of Google Compute Engine cluster | |
1576 * instances, applicable to all instances in the cluster. | |
1577 */ | |
1578 class GceClusterConfig { | |
1579 /** | |
1580 * Optional. If true, all instances in the cluster will only have internal IP | |
1581 * addresses. By default, clusters are not restricted to internal IP | |
1582 * addresses, and will have ephemeral external IP addresses assigned to each | |
1583 * instance. This internal_ip_only restriction can only be enabled for | |
1584 * subnetwork enabled networks, and all off-cluster dependencies must be | |
1585 * configured to be accessible without external IP addresses. | |
1586 */ | |
1587 core.bool internalIpOnly; | |
1588 /** | |
1589 * The Google Compute Engine metadata entries to add to all instances (see | |
1590 * Project and instance metadata | |
1591 * (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_
and_instance_metadata)). | |
1592 */ | |
1593 core.Map<core.String, core.String> metadata; | |
1594 /** | |
1595 * Optional. The Google Compute Engine network to be used for machine | |
1596 * communications. Cannot be specified with subnetwork_uri. If neither | |
1597 * network_uri nor subnetwork_uri is specified, the "default" network of the | |
1598 * project is used, if it exists. Cannot be a "Custom Subnet Network" (see | |
1599 * Using Subnetworks for more information).A full URL, partial URI, or short | |
1600 * name are valid. Examples: | |
1601 * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/
default | |
1602 * projects/[project_id]/regions/global/default | |
1603 * default | |
1604 */ | |
1605 core.String networkUri; | |
1606 /** | |
1607 * Optional. The service account of the instances. Defaults to the default | |
1608 * Google Compute Engine service account. Custom service accounts need | |
1609 * permissions equivalent to the folloing IAM roles: | |
1610 * roles/logging.logWriter | |
1611 * roles/storage.objectAdmin(see | |
1612 * https://cloud.google.com/compute/docs/access/service-accounts#custom_servic
e_accounts | |
1613 * for more information). Example: | |
1614 * [account_id]@[project_id].iam.gserviceaccount.com | |
1615 */ | |
1616 core.String serviceAccount; | |
1617 /** | |
1618 * Optional. The URIs of service account scopes to be included in Google | |
1619 * Compute Engine instances. The following base set of scopes is always | |
1620 * included: | |
1621 * https://www.googleapis.com/auth/cloud.useraccounts.readonly | |
1622 * https://www.googleapis.com/auth/devstorage.read_write | |
1623 * https://www.googleapis.com/auth/logging.writeIf no scopes are specified, | |
1624 * the following defaults are also provided: | |
1625 * https://www.googleapis.com/auth/bigquery | |
1626 * https://www.googleapis.com/auth/bigtable.admin.table | |
1627 * https://www.googleapis.com/auth/bigtable.data | |
1628 * https://www.googleapis.com/auth/devstorage.full_control | |
1629 */ | |
1630 core.List<core.String> serviceAccountScopes; | |
1631 /** | |
1632 * Optional. The Google Compute Engine subnetwork to be used for machine | |
1633 * communications. Cannot be specified with network_uri.A full URL, partial | |
1634 * URI, or short name are valid. Examples: | |
1635 * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east
1/sub0 | |
1636 * projects/[project_id]/regions/us-east1/sub0 | |
1637 * sub0 | |
1638 */ | |
1639 core.String subnetworkUri; | |
1640 /** | |
1641 * The Google Compute Engine tags to add to all instances (see Tagging | |
1642 * instances). | |
1643 */ | |
1644 core.List<core.String> tags; | |
1645 /** | |
1646 * Optional. The zone where the Google Compute Engine cluster will be located. | |
1647 * On a create request, it is required in the "global" region. If omitted in a | |
1648 * non-global Cloud Dataproc region, the service will pick a zone in the | |
1649 * corresponding GCE region. On a get request, zone will always be present.A | |
1650 * full URL, partial URI, or short name are valid. Examples: | |
1651 * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] | |
1652 * projects/[project_id]/zones/[zone] | |
1653 * us-central1-f | |
1654 */ | |
1655 core.String zoneUri; | |
1656 | |
1657 GceClusterConfig(); | |
1658 | |
1659 GceClusterConfig.fromJson(core.Map _json) { | |
1660 if (_json.containsKey("internalIpOnly")) { | |
1661 internalIpOnly = _json["internalIpOnly"]; | |
1662 } | |
1663 if (_json.containsKey("metadata")) { | |
1664 metadata = _json["metadata"]; | |
1665 } | |
1666 if (_json.containsKey("networkUri")) { | |
1667 networkUri = _json["networkUri"]; | |
1668 } | |
1669 if (_json.containsKey("serviceAccount")) { | |
1670 serviceAccount = _json["serviceAccount"]; | |
1671 } | |
1672 if (_json.containsKey("serviceAccountScopes")) { | |
1673 serviceAccountScopes = _json["serviceAccountScopes"]; | |
1674 } | |
1675 if (_json.containsKey("subnetworkUri")) { | |
1676 subnetworkUri = _json["subnetworkUri"]; | |
1677 } | |
1678 if (_json.containsKey("tags")) { | |
1679 tags = _json["tags"]; | |
1680 } | |
1681 if (_json.containsKey("zoneUri")) { | |
1682 zoneUri = _json["zoneUri"]; | |
1683 } | |
1684 } | |
1685 | |
1686 core.Map<core.String, core.Object> toJson() { | |
1687 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1688 if (internalIpOnly != null) { | |
1689 _json["internalIpOnly"] = internalIpOnly; | |
1690 } | |
1691 if (metadata != null) { | |
1692 _json["metadata"] = metadata; | |
1693 } | |
1694 if (networkUri != null) { | |
1695 _json["networkUri"] = networkUri; | |
1696 } | |
1697 if (serviceAccount != null) { | |
1698 _json["serviceAccount"] = serviceAccount; | |
1699 } | |
1700 if (serviceAccountScopes != null) { | |
1701 _json["serviceAccountScopes"] = serviceAccountScopes; | |
1702 } | |
1703 if (subnetworkUri != null) { | |
1704 _json["subnetworkUri"] = subnetworkUri; | |
1705 } | |
1706 if (tags != null) { | |
1707 _json["tags"] = tags; | |
1708 } | |
1709 if (zoneUri != null) { | |
1710 _json["zoneUri"] = zoneUri; | |
1711 } | |
1712 return _json; | |
1713 } | |
1714 } | |
1715 | |
1716 /** | |
1717 * A Cloud Dataproc job for running Apache Hadoop MapReduce | |
1718 * (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapred
uce-client-core/MapReduceTutorial.html) | |
1719 * jobs on Apache Hadoop YARN | |
1720 * (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html
). | |
1721 */ | |
1722 class HadoopJob { | |
1723 /** | |
1724 * Optional. HCFS URIs of archives to be extracted in the working directory of | |
1725 * Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, | |
1726 * or .zip. | |
1727 */ | |
1728 core.List<core.String> archiveUris; | |
1729 /** | |
1730 * Optional. The arguments to pass to the driver. Do not include arguments, | |
1731 * such as -libjars or -Dfoo=bar, that can be set as job properties, since a | |
1732 * collision may occur that causes an incorrect job submission. | |
1733 */ | |
1734 core.List<core.String> args; | |
1735 /** | |
1736 * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to | |
1737 * the working directory of Hadoop drivers and distributed tasks. Useful for | |
1738 * naively parallel tasks. | |
1739 */ | |
1740 core.List<core.String> fileUris; | |
1741 /** | |
1742 * Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and | |
1743 * tasks. | |
1744 */ | |
1745 core.List<core.String> jarFileUris; | |
1746 /** Optional. The runtime log config for job execution. */ | |
1747 LoggingConfig loggingConfig; | |
1748 /** | |
1749 * The name of the driver's main class. The jar file containing the class must | |
1750 * be in the default CLASSPATH or specified in jar_file_uris. | |
1751 */ | |
1752 core.String mainClass; | |
1753 /** | |
1754 * The HCFS URI of the jar file containing the main class. Examples: | |
1755 * 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' | |
1756 * 'hdfs:/tmp/test-samples/custom-wordcount.jar' | |
1757 * 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' | |
1758 */ | |
1759 core.String mainJarFileUri; | |
1760 /** | |
1761 * Optional. A mapping of property names to values, used to configure Hadoop. | |
1762 * Properties that conflict with values set by the Cloud Dataproc API may be | |
1763 * overwritten. Can include properties set in /etc/hadoop/conf / * -site and | |
1764 * classes in user code. | |
1765 */ | |
1766 core.Map<core.String, core.String> properties; | |
1767 | |
1768 HadoopJob(); | |
1769 | |
1770 HadoopJob.fromJson(core.Map _json) { | |
1771 if (_json.containsKey("archiveUris")) { | |
1772 archiveUris = _json["archiveUris"]; | |
1773 } | |
1774 if (_json.containsKey("args")) { | |
1775 args = _json["args"]; | |
1776 } | |
1777 if (_json.containsKey("fileUris")) { | |
1778 fileUris = _json["fileUris"]; | |
1779 } | |
1780 if (_json.containsKey("jarFileUris")) { | |
1781 jarFileUris = _json["jarFileUris"]; | |
1782 } | |
1783 if (_json.containsKey("loggingConfig")) { | |
1784 loggingConfig = new LoggingConfig.fromJson(_json["loggingConfig"]); | |
1785 } | |
1786 if (_json.containsKey("mainClass")) { | |
1787 mainClass = _json["mainClass"]; | |
1788 } | |
1789 if (_json.containsKey("mainJarFileUri")) { | |
1790 mainJarFileUri = _json["mainJarFileUri"]; | |
1791 } | |
1792 if (_json.containsKey("properties")) { | |
1793 properties = _json["properties"]; | |
1794 } | |
1795 } | |
1796 | |
1797 core.Map<core.String, core.Object> toJson() { | |
1798 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1799 if (archiveUris != null) { | |
1800 _json["archiveUris"] = archiveUris; | |
1801 } | |
1802 if (args != null) { | |
1803 _json["args"] = args; | |
1804 } | |
1805 if (fileUris != null) { | |
1806 _json["fileUris"] = fileUris; | |
1807 } | |
1808 if (jarFileUris != null) { | |
1809 _json["jarFileUris"] = jarFileUris; | |
1810 } | |
1811 if (loggingConfig != null) { | |
1812 _json["loggingConfig"] = (loggingConfig).toJson(); | |
1813 } | |
1814 if (mainClass != null) { | |
1815 _json["mainClass"] = mainClass; | |
1816 } | |
1817 if (mainJarFileUri != null) { | |
1818 _json["mainJarFileUri"] = mainJarFileUri; | |
1819 } | |
1820 if (properties != null) { | |
1821 _json["properties"] = properties; | |
1822 } | |
1823 return _json; | |
1824 } | |
1825 } | |
1826 | |
1827 /** | |
1828 * A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) | |
1829 * queries on YARN. | |
1830 */ | |
1831 class HiveJob { | |
1832 /** | |
1833 * Optional. Whether to continue executing queries if a query fails. The | |
1834 * default value is false. Setting to true can be useful when executing | |
1835 * independent parallel queries. | |
1836 */ | |
1837 core.bool continueOnFailure; | |
1838 /** | |
1839 * Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server | |
1840 * and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. | |
1841 */ | |
1842 core.List<core.String> jarFileUris; | |
1843 /** | |
1844 * Optional. A mapping of property names and values, used to configure Hive. | |
1845 * Properties that conflict with values set by the Cloud Dataproc API may be | |
1846 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, | |
1847 * /etc/hive/conf/hive-site.xml, and classes in user code. | |
1848 */ | |
1849 core.Map<core.String, core.String> properties; | |
1850 /** The HCFS URI of the script that contains Hive queries. */ | |
1851 core.String queryFileUri; | |
1852 /** A list of queries. */ | |
1853 QueryList queryList; | |
1854 /** | |
1855 * Optional. Mapping of query variable names to values (equivalent to the Hive | |
1856 * command: SET name="value";). | |
1857 */ | |
1858 core.Map<core.String, core.String> scriptVariables; | |
1859 | |
1860 HiveJob(); | |
1861 | |
1862 HiveJob.fromJson(core.Map _json) { | |
1863 if (_json.containsKey("continueOnFailure")) { | |
1864 continueOnFailure = _json["continueOnFailure"]; | |
1865 } | |
1866 if (_json.containsKey("jarFileUris")) { | |
1867 jarFileUris = _json["jarFileUris"]; | |
1868 } | |
1869 if (_json.containsKey("properties")) { | |
1870 properties = _json["properties"]; | |
1871 } | |
1872 if (_json.containsKey("queryFileUri")) { | |
1873 queryFileUri = _json["queryFileUri"]; | |
1874 } | |
1875 if (_json.containsKey("queryList")) { | |
1876 queryList = new QueryList.fromJson(_json["queryList"]); | |
1877 } | |
1878 if (_json.containsKey("scriptVariables")) { | |
1879 scriptVariables = _json["scriptVariables"]; | |
1880 } | |
1881 } | |
1882 | |
1883 core.Map<core.String, core.Object> toJson() { | |
1884 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1885 if (continueOnFailure != null) { | |
1886 _json["continueOnFailure"] = continueOnFailure; | |
1887 } | |
1888 if (jarFileUris != null) { | |
1889 _json["jarFileUris"] = jarFileUris; | |
1890 } | |
1891 if (properties != null) { | |
1892 _json["properties"] = properties; | |
1893 } | |
1894 if (queryFileUri != null) { | |
1895 _json["queryFileUri"] = queryFileUri; | |
1896 } | |
1897 if (queryList != null) { | |
1898 _json["queryList"] = (queryList).toJson(); | |
1899 } | |
1900 if (scriptVariables != null) { | |
1901 _json["scriptVariables"] = scriptVariables; | |
1902 } | |
1903 return _json; | |
1904 } | |
1905 } | |
1906 | |
1907 /** | |
1908 * Optional. The config settings for Google Compute Engine resources in an | |
1909 * instance group, such as a master or worker group. | |
1910 */ | |
1911 class InstanceGroupConfig { | |
1912 /** | |
1913 * Optional. The Google Compute Engine accelerator configuration for these | |
1914 * instances.Beta Feature: This feature is still under development. It may be | |
1915 * changed before final release. | |
1916 */ | |
1917 core.List<AcceleratorConfig> accelerators; | |
1918 /** Optional. Disk option config settings. */ | |
1919 DiskConfig diskConfig; | |
1920 /** | |
1921 * Output-only. The Google Compute Engine image resource used for cluster | |
1922 * instances. Inferred from SoftwareConfig.image_version. | |
1923 */ | |
1924 core.String imageUri; | |
1925 /** | |
1926 * Optional. The list of instance names. Cloud Dataproc derives the names from | |
1927 * cluster_name, num_instances, and the instance group if not set by user | |
1928 * (recommended practice is to let Cloud Dataproc derive the name). | |
1929 */ | |
1930 core.List<core.String> instanceNames; | |
1931 /** | |
1932 * Optional. Specifies that this instance group contains preemptible | |
1933 * instances. | |
1934 */ | |
1935 core.bool isPreemptible; | |
1936 /** | |
1937 * Optional. The Google Compute Engine machine type used for cluster | |
1938 * instances.A full URL, partial URI, or short name are valid. Examples: | |
1939 * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-
a/machineTypes/n1-standard-2 | |
1940 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 | |
1941 * n1-standard-2 | |
1942 */ | |
1943 core.String machineTypeUri; | |
1944 /** | |
1945 * Output-only. The config for Google Compute Engine Instance Group Manager | |
1946 * that manages this group. This is only used for preemptible instance groups. | |
1947 */ | |
1948 ManagedGroupConfig managedGroupConfig; | |
1949 /** | |
1950 * Optional. The number of VM instances in the instance group. For master | |
1951 * instance groups, must be set to 1. | |
1952 */ | |
1953 core.int numInstances; | |
1954 | |
1955 InstanceGroupConfig(); | |
1956 | |
1957 InstanceGroupConfig.fromJson(core.Map _json) { | |
1958 if (_json.containsKey("accelerators")) { | |
1959 accelerators = _json["accelerators"].map((value) => new AcceleratorConfig.
fromJson(value)).toList(); | |
1960 } | |
1961 if (_json.containsKey("diskConfig")) { | |
1962 diskConfig = new DiskConfig.fromJson(_json["diskConfig"]); | |
1963 } | |
1964 if (_json.containsKey("imageUri")) { | |
1965 imageUri = _json["imageUri"]; | |
1966 } | |
1967 if (_json.containsKey("instanceNames")) { | |
1968 instanceNames = _json["instanceNames"]; | |
1969 } | |
1970 if (_json.containsKey("isPreemptible")) { | |
1971 isPreemptible = _json["isPreemptible"]; | |
1972 } | |
1973 if (_json.containsKey("machineTypeUri")) { | |
1974 machineTypeUri = _json["machineTypeUri"]; | |
1975 } | |
1976 if (_json.containsKey("managedGroupConfig")) { | |
1977 managedGroupConfig = new ManagedGroupConfig.fromJson(_json["managedGroupCo
nfig"]); | |
1978 } | |
1979 if (_json.containsKey("numInstances")) { | |
1980 numInstances = _json["numInstances"]; | |
1981 } | |
1982 } | |
1983 | |
1984 core.Map<core.String, core.Object> toJson() { | |
1985 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
1986 if (accelerators != null) { | |
1987 _json["accelerators"] = accelerators.map((value) => (value).toJson()).toLi
st(); | |
1988 } | |
1989 if (diskConfig != null) { | |
1990 _json["diskConfig"] = (diskConfig).toJson(); | |
1991 } | |
1992 if (imageUri != null) { | |
1993 _json["imageUri"] = imageUri; | |
1994 } | |
1995 if (instanceNames != null) { | |
1996 _json["instanceNames"] = instanceNames; | |
1997 } | |
1998 if (isPreemptible != null) { | |
1999 _json["isPreemptible"] = isPreemptible; | |
2000 } | |
2001 if (machineTypeUri != null) { | |
2002 _json["machineTypeUri"] = machineTypeUri; | |
2003 } | |
2004 if (managedGroupConfig != null) { | |
2005 _json["managedGroupConfig"] = (managedGroupConfig).toJson(); | |
2006 } | |
2007 if (numInstances != null) { | |
2008 _json["numInstances"] = numInstances; | |
2009 } | |
2010 return _json; | |
2011 } | |
2012 } | |
2013 | |
2014 /** A Cloud Dataproc job resource. */ | |
2015 class Job { | |
2016 /** | |
2017 * Output-only. If present, the location of miscellaneous control files which | |
2018 * may be used as part of job setup and handling. If not present, control | |
2019 * files may be placed in the same location as driver_output_uri. | |
2020 */ | |
2021 core.String driverControlFilesUri; | |
2022 /** | |
2023 * Output-only. A URI pointing to the location of the stdout of the job's | |
2024 * driver program. | |
2025 */ | |
2026 core.String driverOutputResourceUri; | |
2027 /** Job is a Hadoop job. */ | |
2028 HadoopJob hadoopJob; | |
2029 /** Job is a Hive job. */ | |
2030 HiveJob hiveJob; | |
2031 /** | |
2032 * Optional. The labels to associate with this job. Label keys must contain 1 | |
2033 * to 63 characters, and must conform to RFC 1035 | |
2034 * (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if | |
2035 * present, must contain 1 to 63 characters, and must conform to RFC 1035 | |
2036 * (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be | |
2037 * associated with a job. | |
2038 */ | |
2039 core.Map<core.String, core.String> labels; | |
2040 /** Job is a Pig job. */ | |
2041 PigJob pigJob; | |
2042 /** | |
2043 * Required. Job information, including how, when, and where to run the job. | |
2044 */ | |
2045 JobPlacement placement; | |
2046 /** Job is a Pyspark job. */ | |
2047 PySparkJob pysparkJob; | |
2048 /** | |
2049 * Optional. The fully qualified reference to the job, which can be used to | |
2050 * obtain the equivalent REST path of the job resource. If this property is | |
2051 * not specified when a job is created, the server generates a | |
2052 * <code>job_id</code>. | |
2053 */ | |
2054 JobReference reference; | |
2055 /** Optional. Job scheduling configuration. */ | |
2056 JobScheduling scheduling; | |
2057 /** Job is a Spark job. */ | |
2058 SparkJob sparkJob; | |
2059 /** Job is a SparkSql job. */ | |
2060 SparkSqlJob sparkSqlJob; | |
2061 /** | |
2062 * Output-only. The job status. Additional application-specific status | |
2063 * information may be contained in the <code>type_job</code> and | |
2064 * <code>yarn_applications</code> fields. | |
2065 */ | |
2066 JobStatus status; | |
2067 /** Output-only. The previous job status. */ | |
2068 core.List<JobStatus> statusHistory; | |
2069 /** | |
2070 * Output-only. The collection of YARN applications spun up by this job.Beta | |
2071 * Feature: This report is available for testing purposes only. It may be | |
2072 * changed before final release. | |
2073 */ | |
2074 core.List<YarnApplication> yarnApplications; | |
2075 | |
2076 Job(); | |
2077 | |
2078 Job.fromJson(core.Map _json) { | |
2079 if (_json.containsKey("driverControlFilesUri")) { | |
2080 driverControlFilesUri = _json["driverControlFilesUri"]; | |
2081 } | |
2082 if (_json.containsKey("driverOutputResourceUri")) { | |
2083 driverOutputResourceUri = _json["driverOutputResourceUri"]; | |
2084 } | |
2085 if (_json.containsKey("hadoopJob")) { | |
2086 hadoopJob = new HadoopJob.fromJson(_json["hadoopJob"]); | |
2087 } | |
2088 if (_json.containsKey("hiveJob")) { | |
2089 hiveJob = new HiveJob.fromJson(_json["hiveJob"]); | |
2090 } | |
2091 if (_json.containsKey("labels")) { | |
2092 labels = _json["labels"]; | |
2093 } | |
2094 if (_json.containsKey("pigJob")) { | |
2095 pigJob = new PigJob.fromJson(_json["pigJob"]); | |
2096 } | |
2097 if (_json.containsKey("placement")) { | |
2098 placement = new JobPlacement.fromJson(_json["placement"]); | |
2099 } | |
2100 if (_json.containsKey("pysparkJob")) { | |
2101 pysparkJob = new PySparkJob.fromJson(_json["pysparkJob"]); | |
2102 } | |
2103 if (_json.containsKey("reference")) { | |
2104 reference = new JobReference.fromJson(_json["reference"]); | |
2105 } | |
2106 if (_json.containsKey("scheduling")) { | |
2107 scheduling = new JobScheduling.fromJson(_json["scheduling"]); | |
2108 } | |
2109 if (_json.containsKey("sparkJob")) { | |
2110 sparkJob = new SparkJob.fromJson(_json["sparkJob"]); | |
2111 } | |
2112 if (_json.containsKey("sparkSqlJob")) { | |
2113 sparkSqlJob = new SparkSqlJob.fromJson(_json["sparkSqlJob"]); | |
2114 } | |
2115 if (_json.containsKey("status")) { | |
2116 status = new JobStatus.fromJson(_json["status"]); | |
2117 } | |
2118 if (_json.containsKey("statusHistory")) { | |
2119 statusHistory = _json["statusHistory"].map((value) => new JobStatus.fromJs
on(value)).toList(); | |
2120 } | |
2121 if (_json.containsKey("yarnApplications")) { | |
2122 yarnApplications = _json["yarnApplications"].map((value) => new YarnApplic
ation.fromJson(value)).toList(); | |
2123 } | |
2124 } | |
2125 | |
2126 core.Map<core.String, core.Object> toJson() { | |
2127 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2128 if (driverControlFilesUri != null) { | |
2129 _json["driverControlFilesUri"] = driverControlFilesUri; | |
2130 } | |
2131 if (driverOutputResourceUri != null) { | |
2132 _json["driverOutputResourceUri"] = driverOutputResourceUri; | |
2133 } | |
2134 if (hadoopJob != null) { | |
2135 _json["hadoopJob"] = (hadoopJob).toJson(); | |
2136 } | |
2137 if (hiveJob != null) { | |
2138 _json["hiveJob"] = (hiveJob).toJson(); | |
2139 } | |
2140 if (labels != null) { | |
2141 _json["labels"] = labels; | |
2142 } | |
2143 if (pigJob != null) { | |
2144 _json["pigJob"] = (pigJob).toJson(); | |
2145 } | |
2146 if (placement != null) { | |
2147 _json["placement"] = (placement).toJson(); | |
2148 } | |
2149 if (pysparkJob != null) { | |
2150 _json["pysparkJob"] = (pysparkJob).toJson(); | |
2151 } | |
2152 if (reference != null) { | |
2153 _json["reference"] = (reference).toJson(); | |
2154 } | |
2155 if (scheduling != null) { | |
2156 _json["scheduling"] = (scheduling).toJson(); | |
2157 } | |
2158 if (sparkJob != null) { | |
2159 _json["sparkJob"] = (sparkJob).toJson(); | |
2160 } | |
2161 if (sparkSqlJob != null) { | |
2162 _json["sparkSqlJob"] = (sparkSqlJob).toJson(); | |
2163 } | |
2164 if (status != null) { | |
2165 _json["status"] = (status).toJson(); | |
2166 } | |
2167 if (statusHistory != null) { | |
2168 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to
List(); | |
2169 } | |
2170 if (yarnApplications != null) { | |
2171 _json["yarnApplications"] = yarnApplications.map((value) => (value).toJson
()).toList(); | |
2172 } | |
2173 return _json; | |
2174 } | |
2175 } | |
2176 | |
2177 /** Cloud Dataproc job config. */ | |
2178 class JobPlacement { | |
2179 /** Required. The name of the cluster where the job will be submitted. */ | |
2180 core.String clusterName; | |
2181 /** | |
2182 * Output-only. A cluster UUID generated by the Cloud Dataproc service when | |
2183 * the job is submitted. | |
2184 */ | |
2185 core.String clusterUuid; | |
2186 | |
2187 JobPlacement(); | |
2188 | |
2189 JobPlacement.fromJson(core.Map _json) { | |
2190 if (_json.containsKey("clusterName")) { | |
2191 clusterName = _json["clusterName"]; | |
2192 } | |
2193 if (_json.containsKey("clusterUuid")) { | |
2194 clusterUuid = _json["clusterUuid"]; | |
2195 } | |
2196 } | |
2197 | |
2198 core.Map<core.String, core.Object> toJson() { | |
2199 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2200 if (clusterName != null) { | |
2201 _json["clusterName"] = clusterName; | |
2202 } | |
2203 if (clusterUuid != null) { | |
2204 _json["clusterUuid"] = clusterUuid; | |
2205 } | |
2206 return _json; | |
2207 } | |
2208 } | |
2209 | |
2210 /** Encapsulates the full scoping used to reference a job. */ | |
2211 class JobReference { | |
2212 /** | |
2213 * Optional. The job ID, which must be unique within the project. The job ID | |
2214 * is generated by the server upon job submission or provided by the user as a | |
2215 * means to perform retries without creating duplicate jobs. The ID must | |
2216 * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens | |
2217 * (-). The maximum length is 100 characters. | |
2218 */ | |
2219 core.String jobId; | |
2220 /** | |
2221 * Required. The ID of the Google Cloud Platform project that the job belongs | |
2222 * to. | |
2223 */ | |
2224 core.String projectId; | |
2225 | |
2226 JobReference(); | |
2227 | |
2228 JobReference.fromJson(core.Map _json) { | |
2229 if (_json.containsKey("jobId")) { | |
2230 jobId = _json["jobId"]; | |
2231 } | |
2232 if (_json.containsKey("projectId")) { | |
2233 projectId = _json["projectId"]; | |
2234 } | |
2235 } | |
2236 | |
2237 core.Map<core.String, core.Object> toJson() { | |
2238 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2239 if (jobId != null) { | |
2240 _json["jobId"] = jobId; | |
2241 } | |
2242 if (projectId != null) { | |
2243 _json["projectId"] = projectId; | |
2244 } | |
2245 return _json; | |
2246 } | |
2247 } | |
2248 | |
2249 /** | |
2250 * Job scheduling options.Beta Feature: These options are available for testing | |
2251 * purposes only. They may be changed before final release. | |
2252 */ | |
2253 class JobScheduling { | |
2254 /** | |
2255 * Optional. Maximum number of times per hour a driver may be restarted as a | |
2256 * result of driver terminating with non-zero code before job is reported | |
2257 * failed.A job may be reported as thrashing if driver exits with non-zero | |
2258 * code 4 times within 10 minute window.Maximum value is 10. | |
2259 */ | |
2260 core.int maxFailuresPerHour; | |
2261 | |
2262 JobScheduling(); | |
2263 | |
2264 JobScheduling.fromJson(core.Map _json) { | |
2265 if (_json.containsKey("maxFailuresPerHour")) { | |
2266 maxFailuresPerHour = _json["maxFailuresPerHour"]; | |
2267 } | |
2268 } | |
2269 | |
2270 core.Map<core.String, core.Object> toJson() { | |
2271 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2272 if (maxFailuresPerHour != null) { | |
2273 _json["maxFailuresPerHour"] = maxFailuresPerHour; | |
2274 } | |
2275 return _json; | |
2276 } | |
2277 } | |
2278 | |
2279 /** Cloud Dataproc job status. */ | |
2280 class JobStatus { | |
2281 /** | |
2282 * Output-only. Optional job state details, such as an error description if | |
2283 * the state is <code>ERROR</code>. | |
2284 */ | |
2285 core.String details; | |
2286 /** | |
2287 * Output-only. A state message specifying the overall job state. | |
2288 * Possible string values are: | |
2289 * - "STATE_UNSPECIFIED" : The job state is unknown. | |
2290 * - "PENDING" : The job is pending; it has been submitted, but is not yet | |
2291 * running. | |
2292 * - "SETUP_DONE" : Job has been received by the service and completed initial | |
2293 * setup; it will soon be submitted to the cluster. | |
2294 * - "RUNNING" : The job is running on the cluster. | |
2295 * - "CANCEL_PENDING" : A CancelJob request has been received, but is pending. | |
2296 * - "CANCEL_STARTED" : Transient in-flight resources have been canceled, and | |
2297 * the request to cancel the running job has been issued to the cluster. | |
2298 * - "CANCELLED" : The job cancellation was successful. | |
2299 * - "DONE" : The job has completed successfully. | |
2300 * - "ERROR" : The job has completed, but encountered an error. | |
2301 * - "ATTEMPT_FAILURE" : Job attempt has failed. The detail field contains | |
2302 * failure details for this attempt.Applies to restartable jobs only. | |
2303 */ | |
2304 core.String state; | |
2305 /** Output-only. The time when this state was entered. */ | |
2306 core.String stateStartTime; | |
2307 /** | |
2308 * Output-only. Additional state information, which includes status reported | |
2309 * by the agent. | |
2310 * Possible string values are: | |
2311 * - "UNSPECIFIED" | |
2312 * - "SUBMITTED" : The Job is submitted to the agent.Applies to RUNNING state. | |
2313 * - "QUEUED" : The Job has been received and is awaiting execution (it may be | |
2314 * waiting for a condition to be met). See the "details" field for the reason | |
2315 * for the delay.Applies to RUNNING state. | |
2316 * - "STALE_STATUS" : The agent-reported status is out of date, which may be | |
2317 * caused by a loss of communication between the agent and Cloud Dataproc. If | |
2318 * the agent does not send a timely update, the job will fail.Applies to | |
2319 * RUNNING state. | |
2320 */ | |
2321 core.String substate; | |
2322 | |
2323 JobStatus(); | |
2324 | |
2325 JobStatus.fromJson(core.Map _json) { | |
2326 if (_json.containsKey("details")) { | |
2327 details = _json["details"]; | |
2328 } | |
2329 if (_json.containsKey("state")) { | |
2330 state = _json["state"]; | |
2331 } | |
2332 if (_json.containsKey("stateStartTime")) { | |
2333 stateStartTime = _json["stateStartTime"]; | |
2334 } | |
2335 if (_json.containsKey("substate")) { | |
2336 substate = _json["substate"]; | |
2337 } | |
2338 } | |
2339 | |
2340 core.Map<core.String, core.Object> toJson() { | |
2341 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2342 if (details != null) { | |
2343 _json["details"] = details; | |
2344 } | |
2345 if (state != null) { | |
2346 _json["state"] = state; | |
2347 } | |
2348 if (stateStartTime != null) { | |
2349 _json["stateStartTime"] = stateStartTime; | |
2350 } | |
2351 if (substate != null) { | |
2352 _json["substate"] = substate; | |
2353 } | |
2354 return _json; | |
2355 } | |
2356 } | |
2357 | |
2358 /** The list of all clusters in a project. */ | |
2359 class ListClustersResponse { | |
2360 /** Output-only. The clusters in the project. */ | |
2361 core.List<Cluster> clusters; | |
2362 /** | |
2363 * Output-only. This token is included in the response if there are more | |
2364 * results to fetch. To fetch additional results, provide this value as the | |
2365 * page_token in a subsequent <code>ListClustersRequest</code>. | |
2366 */ | |
2367 core.String nextPageToken; | |
2368 | |
2369 ListClustersResponse(); | |
2370 | |
2371 ListClustersResponse.fromJson(core.Map _json) { | |
2372 if (_json.containsKey("clusters")) { | |
2373 clusters = _json["clusters"].map((value) => new Cluster.fromJson(value)).t
oList(); | |
2374 } | |
2375 if (_json.containsKey("nextPageToken")) { | |
2376 nextPageToken = _json["nextPageToken"]; | |
2377 } | |
2378 } | |
2379 | |
2380 core.Map<core.String, core.Object> toJson() { | |
2381 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2382 if (clusters != null) { | |
2383 _json["clusters"] = clusters.map((value) => (value).toJson()).toList(); | |
2384 } | |
2385 if (nextPageToken != null) { | |
2386 _json["nextPageToken"] = nextPageToken; | |
2387 } | |
2388 return _json; | |
2389 } | |
2390 } | |
2391 | |
2392 /** A list of jobs in a project. */ | |
2393 class ListJobsResponse { | |
2394 /** Output-only. Jobs list. */ | |
2395 core.List<Job> jobs; | |
2396 /** | |
2397 * Optional. This token is included in the response if there are more results | |
2398 * to fetch. To fetch additional results, provide this value as the page_token | |
2399 * in a subsequent <code>ListJobsRequest</code>. | |
2400 */ | |
2401 core.String nextPageToken; | |
2402 | |
2403 ListJobsResponse(); | |
2404 | |
2405 ListJobsResponse.fromJson(core.Map _json) { | |
2406 if (_json.containsKey("jobs")) { | |
2407 jobs = _json["jobs"].map((value) => new Job.fromJson(value)).toList(); | |
2408 } | |
2409 if (_json.containsKey("nextPageToken")) { | |
2410 nextPageToken = _json["nextPageToken"]; | |
2411 } | |
2412 } | |
2413 | |
2414 core.Map<core.String, core.Object> toJson() { | |
2415 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2416 if (jobs != null) { | |
2417 _json["jobs"] = jobs.map((value) => (value).toJson()).toList(); | |
2418 } | |
2419 if (nextPageToken != null) { | |
2420 _json["nextPageToken"] = nextPageToken; | |
2421 } | |
2422 return _json; | |
2423 } | |
2424 } | |
2425 | |
2426 /** The response message for Operations.ListOperations. */ | |
2427 class ListOperationsResponse { | |
2428 /** The standard List next-page token. */ | |
2429 core.String nextPageToken; | |
2430 /** A list of operations that matches the specified filter in the request. */ | |
2431 core.List<Operation> operations; | |
2432 | |
2433 ListOperationsResponse(); | |
2434 | |
2435 ListOperationsResponse.fromJson(core.Map _json) { | |
2436 if (_json.containsKey("nextPageToken")) { | |
2437 nextPageToken = _json["nextPageToken"]; | |
2438 } | |
2439 if (_json.containsKey("operations")) { | |
2440 operations = _json["operations"].map((value) => new Operation.fromJson(val
ue)).toList(); | |
2441 } | |
2442 } | |
2443 | |
2444 core.Map<core.String, core.Object> toJson() { | |
2445 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2446 if (nextPageToken != null) { | |
2447 _json["nextPageToken"] = nextPageToken; | |
2448 } | |
2449 if (operations != null) { | |
2450 _json["operations"] = operations.map((value) => (value).toJson()).toList()
; | |
2451 } | |
2452 return _json; | |
2453 } | |
2454 } | |
2455 | |
2456 /** The runtime logging config of the job. */ | |
2457 class LoggingConfig { | |
2458 /** | |
2459 * The per-package log levels for the driver. This may include "root" package | |
2460 * name to configure rootLogger. Examples: 'com.google = FATAL', 'root = | |
2461 * INFO', 'org.apache = DEBUG' | |
2462 */ | |
2463 core.Map<core.String, core.String> driverLogLevels; | |
2464 | |
2465 LoggingConfig(); | |
2466 | |
2467 LoggingConfig.fromJson(core.Map _json) { | |
2468 if (_json.containsKey("driverLogLevels")) { | |
2469 driverLogLevels = _json["driverLogLevels"]; | |
2470 } | |
2471 } | |
2472 | |
2473 core.Map<core.String, core.Object> toJson() { | |
2474 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2475 if (driverLogLevels != null) { | |
2476 _json["driverLogLevels"] = driverLogLevels; | |
2477 } | |
2478 return _json; | |
2479 } | |
2480 } | |
2481 | |
2482 /** Specifies the resources used to actively manage an instance group. */ | |
2483 class ManagedGroupConfig { | |
2484 /** Output-only. The name of the Instance Group Manager for this group. */ | |
2485 core.String instanceGroupManagerName; | |
2486 /** | |
2487 * Output-only. The name of the Instance Template used for the Managed | |
2488 * Instance Group. | |
2489 */ | |
2490 core.String instanceTemplateName; | |
2491 | |
2492 ManagedGroupConfig(); | |
2493 | |
2494 ManagedGroupConfig.fromJson(core.Map _json) { | |
2495 if (_json.containsKey("instanceGroupManagerName")) { | |
2496 instanceGroupManagerName = _json["instanceGroupManagerName"]; | |
2497 } | |
2498 if (_json.containsKey("instanceTemplateName")) { | |
2499 instanceTemplateName = _json["instanceTemplateName"]; | |
2500 } | |
2501 } | |
2502 | |
2503 core.Map<core.String, core.Object> toJson() { | |
2504 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2505 if (instanceGroupManagerName != null) { | |
2506 _json["instanceGroupManagerName"] = instanceGroupManagerName; | |
2507 } | |
2508 if (instanceTemplateName != null) { | |
2509 _json["instanceTemplateName"] = instanceTemplateName; | |
2510 } | |
2511 return _json; | |
2512 } | |
2513 } | |
2514 | |
2515 /** | |
2516 * Specifies an executable to run on a fully configured node and a timeout | |
2517 * period for executable completion. | |
2518 */ | |
2519 class NodeInitializationAction { | |
2520 /** Required. Google Cloud Storage URI of executable file. */ | |
2521 core.String executableFile; | |
2522 /** | |
2523 * Optional. Amount of time executable has to complete. Default is 10 minutes. | |
2524 * Cluster creation fails with an explanatory error message (the name of the | |
2525 * executable that caused the error and the exceeded timeout period) if the | |
2526 * executable is not completed at end of the timeout period. | |
2527 */ | |
2528 core.String executionTimeout; | |
2529 | |
2530 NodeInitializationAction(); | |
2531 | |
2532 NodeInitializationAction.fromJson(core.Map _json) { | |
2533 if (_json.containsKey("executableFile")) { | |
2534 executableFile = _json["executableFile"]; | |
2535 } | |
2536 if (_json.containsKey("executionTimeout")) { | |
2537 executionTimeout = _json["executionTimeout"]; | |
2538 } | |
2539 } | |
2540 | |
2541 core.Map<core.String, core.Object> toJson() { | |
2542 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2543 if (executableFile != null) { | |
2544 _json["executableFile"] = executableFile; | |
2545 } | |
2546 if (executionTimeout != null) { | |
2547 _json["executionTimeout"] = executionTimeout; | |
2548 } | |
2549 return _json; | |
2550 } | |
2551 } | |
2552 | |
2553 /** | |
2554 * This resource represents a long-running operation that is the result of a | |
2555 * network API call. | |
2556 */ | |
2557 class Operation { | |
2558 /** | |
2559 * If the value is false, it means the operation is still in progress. If | |
2560 * true, the operation is completed, and either error or response is | |
2561 * available. | |
2562 */ | |
2563 core.bool done; | |
2564 /** The error result of the operation in case of failure or cancellation. */ | |
2565 Status error; | |
2566 /** | |
2567 * Service-specific metadata associated with the operation. It typically | |
2568 * contains progress information and common metadata such as create time. Some | |
2569 * services might not provide such metadata. Any method that returns a | |
2570 * long-running operation should document the metadata type, if any. | |
2571 * | |
2572 * The values for Object must be JSON objects. It can consist of `num`, | |
2573 * `String`, `bool` and `null` as well as `Map` and `List` values. | |
2574 */ | |
2575 core.Map<core.String, core.Object> metadata; | |
2576 /** | |
2577 * The server-assigned name, which is only unique within the same service that | |
2578 * originally returns it. If you use the default HTTP mapping, the name should | |
2579 * have the format of operations/some/unique/name. | |
2580 */ | |
2581 core.String name; | |
2582 /** | |
2583 * The normal response of the operation in case of success. If the original | |
2584 * method returns no data on success, such as Delete, the response is | |
2585 * google.protobuf.Empty. If the original method is standard | |
2586 * Get/Create/Update, the response should be the resource. For other methods, | |
2587 * the response should have the type XxxResponse, where Xxx is the original | |
2588 * method name. For example, if the original method name is TakeSnapshot(), | |
2589 * the inferred response type is TakeSnapshotResponse. | |
2590 * | |
2591 * The values for Object must be JSON objects. It can consist of `num`, | |
2592 * `String`, `bool` and `null` as well as `Map` and `List` values. | |
2593 */ | |
2594 core.Map<core.String, core.Object> response; | |
2595 | |
2596 Operation(); | |
2597 | |
2598 Operation.fromJson(core.Map _json) { | |
2599 if (_json.containsKey("done")) { | |
2600 done = _json["done"]; | |
2601 } | |
2602 if (_json.containsKey("error")) { | |
2603 error = new Status.fromJson(_json["error"]); | |
2604 } | |
2605 if (_json.containsKey("metadata")) { | |
2606 metadata = _json["metadata"]; | |
2607 } | |
2608 if (_json.containsKey("name")) { | |
2609 name = _json["name"]; | |
2610 } | |
2611 if (_json.containsKey("response")) { | |
2612 response = _json["response"]; | |
2613 } | |
2614 } | |
2615 | |
2616 core.Map<core.String, core.Object> toJson() { | |
2617 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2618 if (done != null) { | |
2619 _json["done"] = done; | |
2620 } | |
2621 if (error != null) { | |
2622 _json["error"] = (error).toJson(); | |
2623 } | |
2624 if (metadata != null) { | |
2625 _json["metadata"] = metadata; | |
2626 } | |
2627 if (name != null) { | |
2628 _json["name"] = name; | |
2629 } | |
2630 if (response != null) { | |
2631 _json["response"] = response; | |
2632 } | |
2633 return _json; | |
2634 } | |
2635 } | |
2636 | |
2637 /** | |
2638 * A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries | |
2639 * on YARN. | |
2640 */ | |
2641 class PigJob { | |
2642 /** | |
2643 * Optional. Whether to continue executing queries if a query fails. The | |
2644 * default value is false. Setting to true can be useful when executing | |
2645 * independent parallel queries. | |
2646 */ | |
2647 core.bool continueOnFailure; | |
2648 /** | |
2649 * Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client | |
2650 * and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. | |
2651 */ | |
2652 core.List<core.String> jarFileUris; | |
2653 /** Optional. The runtime log config for job execution. */ | |
2654 LoggingConfig loggingConfig; | |
2655 /** | |
2656 * Optional. A mapping of property names to values, used to configure Pig. | |
2657 * Properties that conflict with values set by the Cloud Dataproc API may be | |
2658 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, | |
2659 * /etc/pig/conf/pig.properties, and classes in user code. | |
2660 */ | |
2661 core.Map<core.String, core.String> properties; | |
2662 /** The HCFS URI of the script that contains the Pig queries. */ | |
2663 core.String queryFileUri; | |
2664 /** A list of queries. */ | |
2665 QueryList queryList; | |
2666 /** | |
2667 * Optional. Mapping of query variable names to values (equivalent to the Pig | |
2668 * command: name=[value]). | |
2669 */ | |
2670 core.Map<core.String, core.String> scriptVariables; | |
2671 | |
2672 PigJob(); | |
2673 | |
2674 PigJob.fromJson(core.Map _json) { | |
2675 if (_json.containsKey("continueOnFailure")) { | |
2676 continueOnFailure = _json["continueOnFailure"]; | |
2677 } | |
2678 if (_json.containsKey("jarFileUris")) { | |
2679 jarFileUris = _json["jarFileUris"]; | |
2680 } | |
2681 if (_json.containsKey("loggingConfig")) { | |
2682 loggingConfig = new LoggingConfig.fromJson(_json["loggingConfig"]); | |
2683 } | |
2684 if (_json.containsKey("properties")) { | |
2685 properties = _json["properties"]; | |
2686 } | |
2687 if (_json.containsKey("queryFileUri")) { | |
2688 queryFileUri = _json["queryFileUri"]; | |
2689 } | |
2690 if (_json.containsKey("queryList")) { | |
2691 queryList = new QueryList.fromJson(_json["queryList"]); | |
2692 } | |
2693 if (_json.containsKey("scriptVariables")) { | |
2694 scriptVariables = _json["scriptVariables"]; | |
2695 } | |
2696 } | |
2697 | |
2698 core.Map<core.String, core.Object> toJson() { | |
2699 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2700 if (continueOnFailure != null) { | |
2701 _json["continueOnFailure"] = continueOnFailure; | |
2702 } | |
2703 if (jarFileUris != null) { | |
2704 _json["jarFileUris"] = jarFileUris; | |
2705 } | |
2706 if (loggingConfig != null) { | |
2707 _json["loggingConfig"] = (loggingConfig).toJson(); | |
2708 } | |
2709 if (properties != null) { | |
2710 _json["properties"] = properties; | |
2711 } | |
2712 if (queryFileUri != null) { | |
2713 _json["queryFileUri"] = queryFileUri; | |
2714 } | |
2715 if (queryList != null) { | |
2716 _json["queryList"] = (queryList).toJson(); | |
2717 } | |
2718 if (scriptVariables != null) { | |
2719 _json["scriptVariables"] = scriptVariables; | |
2720 } | |
2721 return _json; | |
2722 } | |
2723 } | |
2724 | |
2725 /** | |
2726 * A Cloud Dataproc job for running Apache PySpark | |
2727 * (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) | |
2728 * applications on YARN. | |
2729 */ | |
2730 class PySparkJob { | |
2731 /** | |
2732 * Optional. HCFS URIs of archives to be extracted in the working directory of | |
2733 * .jar, .tar, .tar.gz, .tgz, and .zip. | |
2734 */ | |
2735 core.List<core.String> archiveUris; | |
2736 /** | |
2737 * Optional. The arguments to pass to the driver. Do not include arguments, | |
2738 * such as --conf, that can be set as job properties, since a collision may | |
2739 * occur that causes an incorrect job submission. | |
2740 */ | |
2741 core.List<core.String> args; | |
2742 /** | |
2743 * Optional. HCFS URIs of files to be copied to the working directory of | |
2744 * Python drivers and distributed tasks. Useful for naively parallel tasks. | |
2745 */ | |
2746 core.List<core.String> fileUris; | |
2747 /** | |
2748 * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python | |
2749 * driver and tasks. | |
2750 */ | |
2751 core.List<core.String> jarFileUris; | |
2752 /** Optional. The runtime log config for job execution. */ | |
2753 LoggingConfig loggingConfig; | |
2754 /** | |
2755 * Required. The HCFS URI of the main Python file to use as the driver. Must | |
2756 * be a .py file. | |
2757 */ | |
2758 core.String mainPythonFileUri; | |
2759 /** | |
2760 * Optional. A mapping of property names to values, used to configure PySpark. | |
2761 * Properties that conflict with values set by the Cloud Dataproc API may be | |
2762 * overwritten. Can include properties set in | |
2763 * /etc/spark/conf/spark-defaults.conf and classes in user code. | |
2764 */ | |
2765 core.Map<core.String, core.String> properties; | |
2766 /** | |
2767 * Optional. HCFS file URIs of Python files to pass to the PySpark framework. | |
2768 * Supported file types: .py, .egg, and .zip. | |
2769 */ | |
2770 core.List<core.String> pythonFileUris; | |
2771 | |
2772 PySparkJob(); | |
2773 | |
2774 PySparkJob.fromJson(core.Map _json) { | |
2775 if (_json.containsKey("archiveUris")) { | |
2776 archiveUris = _json["archiveUris"]; | |
2777 } | |
2778 if (_json.containsKey("args")) { | |
2779 args = _json["args"]; | |
2780 } | |
2781 if (_json.containsKey("fileUris")) { | |
2782 fileUris = _json["fileUris"]; | |
2783 } | |
2784 if (_json.containsKey("jarFileUris")) { | |
2785 jarFileUris = _json["jarFileUris"]; | |
2786 } | |
2787 if (_json.containsKey("loggingConfig")) { | |
2788 loggingConfig = new LoggingConfig.fromJson(_json["loggingConfig"]); | |
2789 } | |
2790 if (_json.containsKey("mainPythonFileUri")) { | |
2791 mainPythonFileUri = _json["mainPythonFileUri"]; | |
2792 } | |
2793 if (_json.containsKey("properties")) { | |
2794 properties = _json["properties"]; | |
2795 } | |
2796 if (_json.containsKey("pythonFileUris")) { | |
2797 pythonFileUris = _json["pythonFileUris"]; | |
2798 } | |
2799 } | |
2800 | |
2801 core.Map<core.String, core.Object> toJson() { | |
2802 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2803 if (archiveUris != null) { | |
2804 _json["archiveUris"] = archiveUris; | |
2805 } | |
2806 if (args != null) { | |
2807 _json["args"] = args; | |
2808 } | |
2809 if (fileUris != null) { | |
2810 _json["fileUris"] = fileUris; | |
2811 } | |
2812 if (jarFileUris != null) { | |
2813 _json["jarFileUris"] = jarFileUris; | |
2814 } | |
2815 if (loggingConfig != null) { | |
2816 _json["loggingConfig"] = (loggingConfig).toJson(); | |
2817 } | |
2818 if (mainPythonFileUri != null) { | |
2819 _json["mainPythonFileUri"] = mainPythonFileUri; | |
2820 } | |
2821 if (properties != null) { | |
2822 _json["properties"] = properties; | |
2823 } | |
2824 if (pythonFileUris != null) { | |
2825 _json["pythonFileUris"] = pythonFileUris; | |
2826 } | |
2827 return _json; | |
2828 } | |
2829 } | |
2830 | |
2831 /** A list of queries to run on a cluster. */ | |
2832 class QueryList { | |
2833 /** | |
2834 * Required. The queries to execute. You do not need to terminate a query with | |
2835 * a semicolon. Multiple queries can be specified in one string by separating | |
2836 * each with a semicolon. Here is an example of an Cloud Dataproc API snippet | |
2837 * that uses a QueryList to specify a HiveJob: | |
2838 * "hiveJob": { | |
2839 * "queryList": { | |
2840 * "queries": [ | |
2841 * "query1", | |
2842 * "query2", | |
2843 * "query3;query4", | |
2844 * ] | |
2845 * } | |
2846 * } | |
2847 */ | |
2848 core.List<core.String> queries; | |
2849 | |
2850 QueryList(); | |
2851 | |
2852 QueryList.fromJson(core.Map _json) { | |
2853 if (_json.containsKey("queries")) { | |
2854 queries = _json["queries"]; | |
2855 } | |
2856 } | |
2857 | |
2858 core.Map<core.String, core.Object> toJson() { | |
2859 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2860 if (queries != null) { | |
2861 _json["queries"] = queries; | |
2862 } | |
2863 return _json; | |
2864 } | |
2865 } | |
2866 | |
2867 /** Specifies the selection and config of software inside the cluster. */ | |
2868 class SoftwareConfig { | |
2869 /** | |
2870 * Optional. The version of software inside the cluster. It must match the | |
2871 * regular expression [0-9]+\.[0-9]+. If unspecified, it defaults to the | |
2872 * latest version (see Cloud Dataproc Versioning). | |
2873 */ | |
2874 core.String imageVersion; | |
2875 /** | |
2876 * Optional. The properties to set on daemon config files.Property keys are | |
2877 * specified in prefix:property format, such as core:fs.defaultFS. The | |
2878 * following are supported prefixes and their mappings: | |
2879 * capacity-scheduler: capacity-scheduler.xml | |
2880 * core: core-site.xml | |
2881 * distcp: distcp-default.xml | |
2882 * hdfs: hdfs-site.xml | |
2883 * hive: hive-site.xml | |
2884 * mapred: mapred-site.xml | |
2885 * pig: pig.properties | |
2886 * spark: spark-defaults.conf | |
2887 * yarn: yarn-site.xml | |
2888 */ | |
2889 core.Map<core.String, core.String> properties; | |
2890 | |
2891 SoftwareConfig(); | |
2892 | |
2893 SoftwareConfig.fromJson(core.Map _json) { | |
2894 if (_json.containsKey("imageVersion")) { | |
2895 imageVersion = _json["imageVersion"]; | |
2896 } | |
2897 if (_json.containsKey("properties")) { | |
2898 properties = _json["properties"]; | |
2899 } | |
2900 } | |
2901 | |
2902 core.Map<core.String, core.Object> toJson() { | |
2903 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2904 if (imageVersion != null) { | |
2905 _json["imageVersion"] = imageVersion; | |
2906 } | |
2907 if (properties != null) { | |
2908 _json["properties"] = properties; | |
2909 } | |
2910 return _json; | |
2911 } | |
2912 } | |
2913 | |
2914 /** | |
2915 * A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) | |
2916 * applications on YARN. | |
2917 */ | |
2918 class SparkJob { | |
2919 /** | |
2920 * Optional. HCFS URIs of archives to be extracted in the working directory of | |
2921 * Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, | |
2922 * and .zip. | |
2923 */ | |
2924 core.List<core.String> archiveUris; | |
2925 /** | |
2926 * Optional. The arguments to pass to the driver. Do not include arguments, | |
2927 * such as --conf, that can be set as job properties, since a collision may | |
2928 * occur that causes an incorrect job submission. | |
2929 */ | |
2930 core.List<core.String> args; | |
2931 /** | |
2932 * Optional. HCFS URIs of files to be copied to the working directory of Spark | |
2933 * drivers and distributed tasks. Useful for naively parallel tasks. | |
2934 */ | |
2935 core.List<core.String> fileUris; | |
2936 /** | |
2937 * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark | |
2938 * driver and tasks. | |
2939 */ | |
2940 core.List<core.String> jarFileUris; | |
2941 /** Optional. The runtime log config for job execution. */ | |
2942 LoggingConfig loggingConfig; | |
2943 /** | |
2944 * The name of the driver's main class. The jar file that contains the class | |
2945 * must be in the default CLASSPATH or specified in jar_file_uris. | |
2946 */ | |
2947 core.String mainClass; | |
2948 /** The HCFS URI of the jar file that contains the main class. */ | |
2949 core.String mainJarFileUri; | |
2950 /** | |
2951 * Optional. A mapping of property names to values, used to configure Spark. | |
2952 * Properties that conflict with values set by the Cloud Dataproc API may be | |
2953 * overwritten. Can include properties set in | |
2954 * /etc/spark/conf/spark-defaults.conf and classes in user code. | |
2955 */ | |
2956 core.Map<core.String, core.String> properties; | |
2957 | |
2958 SparkJob(); | |
2959 | |
2960 SparkJob.fromJson(core.Map _json) { | |
2961 if (_json.containsKey("archiveUris")) { | |
2962 archiveUris = _json["archiveUris"]; | |
2963 } | |
2964 if (_json.containsKey("args")) { | |
2965 args = _json["args"]; | |
2966 } | |
2967 if (_json.containsKey("fileUris")) { | |
2968 fileUris = _json["fileUris"]; | |
2969 } | |
2970 if (_json.containsKey("jarFileUris")) { | |
2971 jarFileUris = _json["jarFileUris"]; | |
2972 } | |
2973 if (_json.containsKey("loggingConfig")) { | |
2974 loggingConfig = new LoggingConfig.fromJson(_json["loggingConfig"]); | |
2975 } | |
2976 if (_json.containsKey("mainClass")) { | |
2977 mainClass = _json["mainClass"]; | |
2978 } | |
2979 if (_json.containsKey("mainJarFileUri")) { | |
2980 mainJarFileUri = _json["mainJarFileUri"]; | |
2981 } | |
2982 if (_json.containsKey("properties")) { | |
2983 properties = _json["properties"]; | |
2984 } | |
2985 } | |
2986 | |
2987 core.Map<core.String, core.Object> toJson() { | |
2988 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
2989 if (archiveUris != null) { | |
2990 _json["archiveUris"] = archiveUris; | |
2991 } | |
2992 if (args != null) { | |
2993 _json["args"] = args; | |
2994 } | |
2995 if (fileUris != null) { | |
2996 _json["fileUris"] = fileUris; | |
2997 } | |
2998 if (jarFileUris != null) { | |
2999 _json["jarFileUris"] = jarFileUris; | |
3000 } | |
3001 if (loggingConfig != null) { | |
3002 _json["loggingConfig"] = (loggingConfig).toJson(); | |
3003 } | |
3004 if (mainClass != null) { | |
3005 _json["mainClass"] = mainClass; | |
3006 } | |
3007 if (mainJarFileUri != null) { | |
3008 _json["mainJarFileUri"] = mainJarFileUri; | |
3009 } | |
3010 if (properties != null) { | |
3011 _json["properties"] = properties; | |
3012 } | |
3013 return _json; | |
3014 } | |
3015 } | |
3016 | |
3017 /** | |
3018 * A Cloud Dataproc job for running Apache Spark SQL | |
3019 * (http://spark.apache.org/sql/) queries. | |
3020 */ | |
3021 class SparkSqlJob { | |
3022 /** Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. */ | |
3023 core.List<core.String> jarFileUris; | |
3024 /** Optional. The runtime log config for job execution. */ | |
3025 LoggingConfig loggingConfig; | |
3026 /** | |
3027 * Optional. A mapping of property names to values, used to configure Spark | |
3028 * SQL's SparkConf. Properties that conflict with values set by the Cloud | |
3029 * Dataproc API may be overwritten. | |
3030 */ | |
3031 core.Map<core.String, core.String> properties; | |
3032 /** The HCFS URI of the script that contains SQL queries. */ | |
3033 core.String queryFileUri; | |
3034 /** A list of queries. */ | |
3035 QueryList queryList; | |
3036 /** | |
3037 * Optional. Mapping of query variable names to values (equivalent to the | |
3038 * Spark SQL command: SET name="value";). | |
3039 */ | |
3040 core.Map<core.String, core.String> scriptVariables; | |
3041 | |
3042 SparkSqlJob(); | |
3043 | |
3044 SparkSqlJob.fromJson(core.Map _json) { | |
3045 if (_json.containsKey("jarFileUris")) { | |
3046 jarFileUris = _json["jarFileUris"]; | |
3047 } | |
3048 if (_json.containsKey("loggingConfig")) { | |
3049 loggingConfig = new LoggingConfig.fromJson(_json["loggingConfig"]); | |
3050 } | |
3051 if (_json.containsKey("properties")) { | |
3052 properties = _json["properties"]; | |
3053 } | |
3054 if (_json.containsKey("queryFileUri")) { | |
3055 queryFileUri = _json["queryFileUri"]; | |
3056 } | |
3057 if (_json.containsKey("queryList")) { | |
3058 queryList = new QueryList.fromJson(_json["queryList"]); | |
3059 } | |
3060 if (_json.containsKey("scriptVariables")) { | |
3061 scriptVariables = _json["scriptVariables"]; | |
3062 } | |
3063 } | |
3064 | |
3065 core.Map<core.String, core.Object> toJson() { | |
3066 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
3067 if (jarFileUris != null) { | |
3068 _json["jarFileUris"] = jarFileUris; | |
3069 } | |
3070 if (loggingConfig != null) { | |
3071 _json["loggingConfig"] = (loggingConfig).toJson(); | |
3072 } | |
3073 if (properties != null) { | |
3074 _json["properties"] = properties; | |
3075 } | |
3076 if (queryFileUri != null) { | |
3077 _json["queryFileUri"] = queryFileUri; | |
3078 } | |
3079 if (queryList != null) { | |
3080 _json["queryList"] = (queryList).toJson(); | |
3081 } | |
3082 if (scriptVariables != null) { | |
3083 _json["scriptVariables"] = scriptVariables; | |
3084 } | |
3085 return _json; | |
3086 } | |
3087 } | |
3088 | |
3089 /** | |
3090 * The Status type defines a logical error model that is suitable for different | |
3091 * programming environments, including REST APIs and RPC APIs. It is used by | |
3092 * gRPC (https://github.com/grpc). The error model is designed to be: | |
3093 * Simple to use and understand for most users | |
3094 * Flexible enough to meet unexpected needsOverviewThe Status message contains | |
3095 * three pieces of data: error code, error message, and error details. The error | |
3096 * code should be an enum value of google.rpc.Code, but it may accept additional | |
3097 * error codes if needed. The error message should be a developer-facing English | |
3098 * message that helps developers understand and resolve the error. If a | |
3099 * localized user-facing error message is needed, put the localized message in | |
3100 * the error details or localize it in the client. The optional error details | |
3101 * may contain arbitrary information about the error. There is a predefined set | |
3102 * of error detail types in the package google.rpc that can be used for common | |
3103 * error conditions.Language mappingThe Status message is the logical | |
3104 * representation of the error model, but it is not necessarily the actual wire | |
3105 * format. When the Status message is exposed in different client libraries and | |
3106 * different wire protocols, it can be mapped differently. For example, it will | |
3107 * likely be mapped to some exceptions in Java, but more likely mapped to some | |
3108 * error codes in C.Other usesThe error model and the Status message can be used | |
3109 * in a variety of environments, either with or without APIs, to provide a | |
3110 * consistent developer experience across different environments.Example uses of | |
3111 * this error model include: | |
3112 * Partial errors. If a service needs to return partial errors to the client, it | |
3113 * may embed the Status in the normal response to indicate the partial errors. | |
3114 * Workflow errors. A typical workflow has multiple steps. Each step may have a | |
3115 * Status message for error reporting. | |
3116 * Batch operations. If a client uses batch request and batch response, the | |
3117 * Status message should be used directly inside batch response, one for each | |
3118 * error sub-response. | |
3119 * Asynchronous operations. If an API call embeds asynchronous operation results | |
3120 * in its response, the status of those operations should be represented | |
3121 * directly using the Status message. | |
3122 * Logging. If some API errors are stored in logs, the message Status could be | |
3123 * used directly after any stripping needed for security/privacy reasons. | |
3124 */ | |
3125 class Status { | |
3126 /** The status code, which should be an enum value of google.rpc.Code. */ | |
3127 core.int code; | |
3128 /** | |
3129 * A list of messages that carry the error details. There will be a common set | |
3130 * of message types for APIs to use. | |
3131 * | |
3132 * The values for Object must be JSON objects. It can consist of `num`, | |
3133 * `String`, `bool` and `null` as well as `Map` and `List` values. | |
3134 */ | |
3135 core.List<core.Map<core.String, core.Object>> details; | |
3136 /** | |
3137 * A developer-facing error message, which should be in English. Any | |
3138 * user-facing error message should be localized and sent in the | |
3139 * google.rpc.Status.details field, or localized by the client. | |
3140 */ | |
3141 core.String message; | |
3142 | |
3143 Status(); | |
3144 | |
3145 Status.fromJson(core.Map _json) { | |
3146 if (_json.containsKey("code")) { | |
3147 code = _json["code"]; | |
3148 } | |
3149 if (_json.containsKey("details")) { | |
3150 details = _json["details"]; | |
3151 } | |
3152 if (_json.containsKey("message")) { | |
3153 message = _json["message"]; | |
3154 } | |
3155 } | |
3156 | |
3157 core.Map<core.String, core.Object> toJson() { | |
3158 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
3159 if (code != null) { | |
3160 _json["code"] = code; | |
3161 } | |
3162 if (details != null) { | |
3163 _json["details"] = details; | |
3164 } | |
3165 if (message != null) { | |
3166 _json["message"] = message; | |
3167 } | |
3168 return _json; | |
3169 } | |
3170 } | |
3171 | |
3172 /** A request to submit a job. */ | |
3173 class SubmitJobRequest { | |
3174 /** Required. The job resource. */ | |
3175 Job job; | |
3176 | |
3177 SubmitJobRequest(); | |
3178 | |
3179 SubmitJobRequest.fromJson(core.Map _json) { | |
3180 if (_json.containsKey("job")) { | |
3181 job = new Job.fromJson(_json["job"]); | |
3182 } | |
3183 } | |
3184 | |
3185 core.Map<core.String, core.Object> toJson() { | |
3186 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
3187 if (job != null) { | |
3188 _json["job"] = (job).toJson(); | |
3189 } | |
3190 return _json; | |
3191 } | |
3192 } | |
3193 | |
3194 /** | |
3195 * A YARN application created by a job. Application information is a subset of | |
3196 * <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.B
eta | |
3197 * Feature: This report is available for testing purposes only. It may be | |
3198 * changed before final release. | |
3199 */ | |
3200 class YarnApplication { | |
3201 /** Required. The application name. */ | |
3202 core.String name; | |
3203 /** Required. The numerical progress of the application, from 1 to 100. */ | |
3204 core.double progress; | |
3205 /** | |
3206 * Required. The application state. | |
3207 * Possible string values are: | |
3208 * - "STATE_UNSPECIFIED" : Status is unspecified. | |
3209 * - "NEW" : Status is NEW. | |
3210 * - "NEW_SAVING" : Status is NEW_SAVING. | |
3211 * - "SUBMITTED" : Status is SUBMITTED. | |
3212 * - "ACCEPTED" : Status is ACCEPTED. | |
3213 * - "RUNNING" : Status is RUNNING. | |
3214 * - "FINISHED" : Status is FINISHED. | |
3215 * - "FAILED" : Status is FAILED. | |
3216 * - "KILLED" : Status is KILLED. | |
3217 */ | |
3218 core.String state; | |
3219 /** | |
3220 * Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or | |
3221 * TimelineServer that provides application-specific information. The URL uses | |
3222 * the internal hostname, and requires a proxy server for resolution and, | |
3223 * possibly, access. | |
3224 */ | |
3225 core.String trackingUrl; | |
3226 | |
3227 YarnApplication(); | |
3228 | |
3229 YarnApplication.fromJson(core.Map _json) { | |
3230 if (_json.containsKey("name")) { | |
3231 name = _json["name"]; | |
3232 } | |
3233 if (_json.containsKey("progress")) { | |
3234 progress = _json["progress"]; | |
3235 } | |
3236 if (_json.containsKey("state")) { | |
3237 state = _json["state"]; | |
3238 } | |
3239 if (_json.containsKey("trackingUrl")) { | |
3240 trackingUrl = _json["trackingUrl"]; | |
3241 } | |
3242 } | |
3243 | |
3244 core.Map<core.String, core.Object> toJson() { | |
3245 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | |
3246 if (name != null) { | |
3247 _json["name"] = name; | |
3248 } | |
3249 if (progress != null) { | |
3250 _json["progress"] = progress; | |
3251 } | |
3252 if (state != null) { | |
3253 _json["state"] = state; | |
3254 } | |
3255 if (trackingUrl != null) { | |
3256 _json["trackingUrl"] = trackingUrl; | |
3257 } | |
3258 return _json; | |
3259 } | |
3260 } | |
OLD | NEW |