OLD | NEW |
1 // This is a generated file (see the discoveryapis_generator project). | 1 // This is a generated file (see the discoveryapis_generator project). |
2 | 2 |
3 library googleapis.dataproc.v1; | 3 library googleapis.dataproc.v1; |
4 | 4 |
5 import 'dart:core' as core; | 5 import 'dart:core' as core; |
6 import 'dart:async' as async; | 6 import 'dart:async' as async; |
7 import 'dart:convert' as convert; | 7 import 'dart:convert' as convert; |
8 | 8 |
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; | 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; |
10 import 'package:http/http.dart' as http; | 10 import 'package:http/http.dart' as http; |
11 | 11 |
12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show | 12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show |
13 ApiRequestError, DetailedApiRequestError; | 13 ApiRequestError, DetailedApiRequestError; |
14 | 14 |
15 const core.String USER_AGENT = 'dart-api-client dataproc/v1'; | 15 const core.String USER_AGENT = 'dart-api-client dataproc/v1'; |
16 | 16 |
17 /** | 17 /** Manages Hadoop-based clusters and jobs on Google Cloud Platform. */ |
18 * An API for managing Hadoop-based clusters and jobs on Google Cloud Platform. | |
19 */ | |
20 class DataprocApi { | 18 class DataprocApi { |
21 /** View and manage your data across Google Cloud Platform services */ | 19 /** View and manage your data across Google Cloud Platform services */ |
22 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf
orm"; | 20 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf
orm"; |
23 | 21 |
24 | 22 |
25 final commons.ApiRequester _requester; | 23 final commons.ApiRequester _requester; |
26 | 24 |
27 ProjectsResourceApi get projects => new ProjectsResourceApi(_requester); | 25 ProjectsResourceApi get projects => new ProjectsResourceApi(_requester); |
28 | 26 |
29 DataprocApi(http.Client client, {core.String rootUrl: "https://dataproc.google
apis.com/", core.String servicePath: ""}) : | 27 DataprocApi(http.Client client, {core.String rootUrl: "https://dataproc.google
apis.com/", core.String servicePath: ""}) : |
(...skipping 29 matching lines...) Loading... |
59 ProjectsRegionsClustersResourceApi(commons.ApiRequester client) : | 57 ProjectsRegionsClustersResourceApi(commons.ApiRequester client) : |
60 _requester = client; | 58 _requester = client; |
61 | 59 |
62 /** | 60 /** |
63 * Creates a cluster in a project. | 61 * Creates a cluster in a project. |
64 * | 62 * |
65 * [request] - The metadata request object. | 63 * [request] - The metadata request object. |
66 * | 64 * |
67 * Request parameters: | 65 * Request parameters: |
68 * | 66 * |
69 * [projectId] - [Required] The ID of the Google Cloud Platform project that | 67 * [projectId] - Required The ID of the Google Cloud Platform project that the |
70 * the cluster belongs to. | 68 * cluster belongs to. |
71 * | 69 * |
72 * [region] - [Required] The Cloud Dataproc region in which to handle the | 70 * [region] - Required The Cloud Dataproc region in which to handle the |
73 * request. | 71 * request. |
74 * | 72 * |
75 * Completes with a [Operation]. | 73 * Completes with a [Operation]. |
76 * | 74 * |
77 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 75 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
78 * error. | 76 * error. |
79 * | 77 * |
80 * If the used [http.Client] completes with an error when making a REST call, | 78 * If the used [http.Client] completes with an error when making a REST call, |
81 * this method will complete with the same error. | 79 * this method will complete with the same error. |
82 */ | 80 */ |
(...skipping 25 matching lines...) Loading... |
108 uploadMedia: _uploadMedia, | 106 uploadMedia: _uploadMedia, |
109 downloadOptions: _downloadOptions); | 107 downloadOptions: _downloadOptions); |
110 return _response.then((data) => new Operation.fromJson(data)); | 108 return _response.then((data) => new Operation.fromJson(data)); |
111 } | 109 } |
112 | 110 |
113 /** | 111 /** |
114 * Deletes a cluster in a project. | 112 * Deletes a cluster in a project. |
115 * | 113 * |
116 * Request parameters: | 114 * Request parameters: |
117 * | 115 * |
118 * [projectId] - [Required] The ID of the Google Cloud Platform project that | 116 * [projectId] - Required The ID of the Google Cloud Platform project that the |
119 * the cluster belongs to. | 117 * cluster belongs to. |
120 * | 118 * |
121 * [region] - [Required] The Cloud Dataproc region in which to handle the | 119 * [region] - Required The Cloud Dataproc region in which to handle the |
122 * request. | 120 * request. |
123 * | 121 * |
124 * [clusterName] - [Required] The cluster name. | 122 * [clusterName] - Required The cluster name. |
125 * | 123 * |
126 * Completes with a [Operation]. | 124 * Completes with a [Operation]. |
127 * | 125 * |
128 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 126 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
129 * error. | 127 * error. |
130 * | 128 * |
131 * If the used [http.Client] completes with an error when making a REST call, | 129 * If the used [http.Client] completes with an error when making a REST call, |
132 * this method will complete with the same error. | 130 * this method will complete with the same error. |
133 */ | 131 */ |
134 async.Future<Operation> delete(core.String projectId, core.String region, core
.String clusterName) { | 132 async.Future<Operation> delete(core.String projectId, core.String region, core
.String clusterName) { |
(...skipping 21 matching lines...) Loading... |
156 body: _body, | 154 body: _body, |
157 queryParams: _queryParams, | 155 queryParams: _queryParams, |
158 uploadOptions: _uploadOptions, | 156 uploadOptions: _uploadOptions, |
159 uploadMedia: _uploadMedia, | 157 uploadMedia: _uploadMedia, |
160 downloadOptions: _downloadOptions); | 158 downloadOptions: _downloadOptions); |
161 return _response.then((data) => new Operation.fromJson(data)); | 159 return _response.then((data) => new Operation.fromJson(data)); |
162 } | 160 } |
163 | 161 |
164 /** | 162 /** |
165 * Gets cluster diagnostic information. After the operation completes, the | 163 * Gets cluster diagnostic information. After the operation completes, the |
166 * Operation.response field contains `DiagnoseClusterOutputLocation`. | 164 * Operation.response field contains DiagnoseClusterOutputLocation. |
167 * | 165 * |
168 * [request] - The metadata request object. | 166 * [request] - The metadata request object. |
169 * | 167 * |
170 * Request parameters: | 168 * Request parameters: |
171 * | 169 * |
172 * [projectId] - [Required] The ID of the Google Cloud Platform project that | 170 * [projectId] - Required The ID of the Google Cloud Platform project that the |
173 * the cluster belongs to. | 171 * cluster belongs to. |
174 * | 172 * |
175 * [region] - [Required] The Cloud Dataproc region in which to handle the | 173 * [region] - Required The Cloud Dataproc region in which to handle the |
176 * request. | 174 * request. |
177 * | 175 * |
178 * [clusterName] - [Required] The cluster name. | 176 * [clusterName] - Required The cluster name. |
179 * | 177 * |
180 * Completes with a [Operation]. | 178 * Completes with a [Operation]. |
181 * | 179 * |
182 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 180 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
183 * error. | 181 * error. |
184 * | 182 * |
185 * If the used [http.Client] completes with an error when making a REST call, | 183 * If the used [http.Client] completes with an error when making a REST call, |
186 * this method will complete with the same error. | 184 * this method will complete with the same error. |
187 */ | 185 */ |
188 async.Future<Operation> diagnose(DiagnoseClusterRequest request, core.String p
rojectId, core.String region, core.String clusterName) { | 186 async.Future<Operation> diagnose(DiagnoseClusterRequest request, core.String p
rojectId, core.String region, core.String clusterName) { |
(...skipping 27 matching lines...) Loading... |
216 uploadMedia: _uploadMedia, | 214 uploadMedia: _uploadMedia, |
217 downloadOptions: _downloadOptions); | 215 downloadOptions: _downloadOptions); |
218 return _response.then((data) => new Operation.fromJson(data)); | 216 return _response.then((data) => new Operation.fromJson(data)); |
219 } | 217 } |
220 | 218 |
221 /** | 219 /** |
222 * Gets the resource representation for a cluster in a project. | 220 * Gets the resource representation for a cluster in a project. |
223 * | 221 * |
224 * Request parameters: | 222 * Request parameters: |
225 * | 223 * |
226 * [projectId] - [Required] The ID of the Google Cloud Platform project that | 224 * [projectId] - Required The ID of the Google Cloud Platform project that the |
227 * the cluster belongs to. | 225 * cluster belongs to. |
228 * | 226 * |
229 * [region] - [Required] The Cloud Dataproc region in which to handle the | 227 * [region] - Required The Cloud Dataproc region in which to handle the |
230 * request. | 228 * request. |
231 * | 229 * |
232 * [clusterName] - [Required] The cluster name. | 230 * [clusterName] - Required The cluster name. |
233 * | 231 * |
234 * Completes with a [Cluster]. | 232 * Completes with a [Cluster]. |
235 * | 233 * |
236 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 234 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
237 * error. | 235 * error. |
238 * | 236 * |
239 * If the used [http.Client] completes with an error when making a REST call, | 237 * If the used [http.Client] completes with an error when making a REST call, |
240 * this method will complete with the same error. | 238 * this method will complete with the same error. |
241 */ | 239 */ |
242 async.Future<Cluster> get(core.String projectId, core.String region, core.Stri
ng clusterName) { | 240 async.Future<Cluster> get(core.String projectId, core.String region, core.Stri
ng clusterName) { |
(...skipping 24 matching lines...) Loading... |
267 uploadMedia: _uploadMedia, | 265 uploadMedia: _uploadMedia, |
268 downloadOptions: _downloadOptions); | 266 downloadOptions: _downloadOptions); |
269 return _response.then((data) => new Cluster.fromJson(data)); | 267 return _response.then((data) => new Cluster.fromJson(data)); |
270 } | 268 } |
271 | 269 |
272 /** | 270 /** |
273 * Lists all regions/{region}/clusters in a project. | 271 * Lists all regions/{region}/clusters in a project. |
274 * | 272 * |
275 * Request parameters: | 273 * Request parameters: |
276 * | 274 * |
277 * [projectId] - [Required] The ID of the Google Cloud Platform project that | 275 * [projectId] - Required The ID of the Google Cloud Platform project that the |
278 * the cluster belongs to. | 276 * cluster belongs to. |
279 * | 277 * |
280 * [region] - [Required] The Cloud Dataproc region in which to handle the | 278 * [region] - Required The Cloud Dataproc region in which to handle the |
281 * request. | 279 * request. |
282 * | 280 * |
283 * [filter] - [Optional] A filter constraining the clusters to list. Filters | 281 * [pageToken] - Optional The standard List page token. |
284 * are case-sensitive and have the following syntax: field:value [field:value] | |
285 * ... or field = value [AND [field = value]] ... where **field** is one of | |
286 * `status.state`, `clusterName`, or `labels.[KEY]`, and `[KEY]` is a label | |
287 * key. **value** can be `*` to match all values. `status.state` can be one of | |
288 * the following: `ACTIVE`, `INACTIVE`, `CREATING`, `RUNNING`, `ERROR`, | |
289 * `DELETING`, or `UPDATING`. `ACTIVE` contains the `CREATING`, `UPDATING`, | |
290 * and `RUNNING` states. `INACTIVE` contains the `DELETING` and `ERROR` | |
291 * states. `clusterName` is the name of the cluster provided at creation time. | |
292 * Only the logical `AND` operator is supported; space-separated items are | |
293 * treated as having an implicit `AND` operator. Example valid filters are: | |
294 * status.state:ACTIVE clusterName:mycluster labels.env:staging \ | |
295 * labels.starred:* and status.state = ACTIVE AND clusterName = mycluster \ | |
296 * AND labels.env = staging AND labels.starred = * | |
297 * | 282 * |
298 * [pageSize] - [Optional] The standard List page size. | 283 * [pageSize] - Optional The standard List page size. |
299 * | 284 * |
300 * [pageToken] - [Optional] The standard List page token. | 285 * [filter] - Optional A filter constraining the clusters to list. Filters are |
| 286 * case-sensitive and have the following syntax:field = value AND field = |
| 287 * value ...where field is one of status.state, clusterName, or labels.[KEY], |
| 288 * and [KEY] is a label key. value can be * to match all values. status.state |
| 289 * can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, |
| 290 * DELETING, or UPDATING. ACTIVE contains the CREATING, UPDATING, and RUNNING |
| 291 * states. INACTIVE contains the DELETING and ERROR states. clusterName is the |
| 292 * name of the cluster provided at creation time. Only the logical AND |
| 293 * operator is supported; space-separated items are treated as having an |
| 294 * implicit AND operator.Example filter:status.state = ACTIVE AND clusterName |
| 295 * = mycluster AND labels.env = staging AND labels.starred = * |
301 * | 296 * |
302 * Completes with a [ListClustersResponse]. | 297 * Completes with a [ListClustersResponse]. |
303 * | 298 * |
304 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 299 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
305 * error. | 300 * error. |
306 * | 301 * |
307 * If the used [http.Client] completes with an error when making a REST call, | 302 * If the used [http.Client] completes with an error when making a REST call, |
308 * this method will complete with the same error. | 303 * this method will complete with the same error. |
309 */ | 304 */ |
310 async.Future<ListClustersResponse> list(core.String projectId, core.String reg
ion, {core.String filter, core.int pageSize, core.String pageToken}) { | 305 async.Future<ListClustersResponse> list(core.String projectId, core.String reg
ion, {core.String pageToken, core.int pageSize, core.String filter}) { |
311 var _url = null; | 306 var _url = null; |
312 var _queryParams = new core.Map(); | 307 var _queryParams = new core.Map(); |
313 var _uploadMedia = null; | 308 var _uploadMedia = null; |
314 var _uploadOptions = null; | 309 var _uploadOptions = null; |
315 var _downloadOptions = commons.DownloadOptions.Metadata; | 310 var _downloadOptions = commons.DownloadOptions.Metadata; |
316 var _body = null; | 311 var _body = null; |
317 | 312 |
318 if (projectId == null) { | 313 if (projectId == null) { |
319 throw new core.ArgumentError("Parameter projectId is required."); | 314 throw new core.ArgumentError("Parameter projectId is required."); |
320 } | 315 } |
321 if (region == null) { | 316 if (region == null) { |
322 throw new core.ArgumentError("Parameter region is required."); | 317 throw new core.ArgumentError("Parameter region is required."); |
323 } | 318 } |
| 319 if (pageToken != null) { |
| 320 _queryParams["pageToken"] = [pageToken]; |
| 321 } |
| 322 if (pageSize != null) { |
| 323 _queryParams["pageSize"] = ["${pageSize}"]; |
| 324 } |
324 if (filter != null) { | 325 if (filter != null) { |
325 _queryParams["filter"] = [filter]; | 326 _queryParams["filter"] = [filter]; |
326 } | 327 } |
327 if (pageSize != null) { | |
328 _queryParams["pageSize"] = ["${pageSize}"]; | |
329 } | |
330 if (pageToken != null) { | |
331 _queryParams["pageToken"] = [pageToken]; | |
332 } | |
333 | 328 |
334 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi
ons/' + commons.Escaper.ecapeVariable('$region') + '/clusters'; | 329 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi
ons/' + commons.Escaper.ecapeVariable('$region') + '/clusters'; |
335 | 330 |
336 var _response = _requester.request(_url, | 331 var _response = _requester.request(_url, |
337 "GET", | 332 "GET", |
338 body: _body, | 333 body: _body, |
339 queryParams: _queryParams, | 334 queryParams: _queryParams, |
340 uploadOptions: _uploadOptions, | 335 uploadOptions: _uploadOptions, |
341 uploadMedia: _uploadMedia, | 336 uploadMedia: _uploadMedia, |
342 downloadOptions: _downloadOptions); | 337 downloadOptions: _downloadOptions); |
343 return _response.then((data) => new ListClustersResponse.fromJson(data)); | 338 return _response.then((data) => new ListClustersResponse.fromJson(data)); |
344 } | 339 } |
345 | 340 |
346 /** | 341 /** |
347 * Updates a cluster in a project. | 342 * Updates a cluster in a project. |
348 * | 343 * |
349 * [request] - The metadata request object. | 344 * [request] - The metadata request object. |
350 * | 345 * |
351 * Request parameters: | 346 * Request parameters: |
352 * | 347 * |
353 * [projectId] - [Required] The ID of the Google Cloud Platform project the | 348 * [projectId] - Required The ID of the Google Cloud Platform project the |
354 * cluster belongs to. | 349 * cluster belongs to. |
355 * | 350 * |
356 * [region] - [Required] The Cloud Dataproc region in which to handle the | 351 * [region] - Required The Cloud Dataproc region in which to handle the |
357 * request. | 352 * request. |
358 * | 353 * |
359 * [clusterName] - [Required] The cluster name. | 354 * [clusterName] - Required The cluster name. |
360 * | 355 * |
361 * [updateMask] - [Required] Specifies the path, relative to Cluster, of the | 356 * [updateMask] - Required Specifies the path, relative to |
362 * field to update. For example, to change the number of workers in a cluster | 357 * <code>Cluster</code>, of the field to update. For example, to change the |
363 * to 5, the update_mask parameter would be specified as | 358 * number of workers in a cluster to 5, the <code>update_mask</code> parameter |
364 * config.worker_config.num_instances, and the `PATCH` request body would | 359 * would be specified as <code>config.worker_config.num_instances</code>, and |
365 * specify the new value, as follows: { "config":{ "workerConfig":{ | 360 * the PATCH request body would specify the new value, as follows: |
366 * "numInstances":"5" } } } Similarly, to change the number of preemptible | 361 * { |
367 * workers in a cluster to 5, the update_mask parameter would be | 362 * "config":{ |
368 * config.secondary_worker_config.num_instances, and the `PATCH` request body | 363 * "workerConfig":{ |
369 * would be set as follows: { "config":{ "secondaryWorkerConfig":{ | 364 * "numInstances":"5" |
370 * "numInstances":"5" } } } Note: Currently, | 365 * } |
371 * config.worker_config.num_instances and | 366 * } |
372 * config.secondary_worker_config.num_instances are the only fields that can | 367 * } |
373 * be updated. | 368 * Similarly, to change the number of preemptible workers in a cluster to 5, |
| 369 * the <code>update_mask</code> parameter would be |
| 370 * <code>config.secondary_worker_config.num_instances</code>, and the PATCH |
| 371 * request body would be set as follows: |
| 372 * { |
| 373 * "config":{ |
| 374 * "secondaryWorkerConfig":{ |
| 375 * "numInstances":"5" |
| 376 * } |
| 377 * } |
| 378 * } |
| 379 * <strong>Note:</strong> Currently, |
| 380 * <code>config.worker_config.num_instances</code> and |
| 381 * <code>config.secondary_worker_config.num_instances</code> are the only |
| 382 * fields that can be updated. |
374 * | 383 * |
375 * Completes with a [Operation]. | 384 * Completes with a [Operation]. |
376 * | 385 * |
377 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 386 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
378 * error. | 387 * error. |
379 * | 388 * |
380 * If the used [http.Client] completes with an error when making a REST call, | 389 * If the used [http.Client] completes with an error when making a REST call, |
381 * this method will complete with the same error. | 390 * this method will complete with the same error. |
382 */ | 391 */ |
383 async.Future<Operation> patch(Cluster request, core.String projectId, core.Str
ing region, core.String clusterName, {core.String updateMask}) { | 392 async.Future<Operation> patch(Cluster request, core.String projectId, core.Str
ing region, core.String clusterName, {core.String updateMask}) { |
(...skipping 36 matching lines...) Loading... |
420 | 429 |
421 | 430 |
422 class ProjectsRegionsJobsResourceApi { | 431 class ProjectsRegionsJobsResourceApi { |
423 final commons.ApiRequester _requester; | 432 final commons.ApiRequester _requester; |
424 | 433 |
425 ProjectsRegionsJobsResourceApi(commons.ApiRequester client) : | 434 ProjectsRegionsJobsResourceApi(commons.ApiRequester client) : |
426 _requester = client; | 435 _requester = client; |
427 | 436 |
428 /** | 437 /** |
429 * Starts a job cancellation request. To access the job resource after | 438 * Starts a job cancellation request. To access the job resource after |
430 * cancellation, call | 439 * cancellation, call regions/{region}/jobs.list or regions/{region}/jobs.get. |
431 * [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions.j
obs/list) | |
432 * or | |
433 * [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions.jo
bs/get). | |
434 * | 440 * |
435 * [request] - The metadata request object. | 441 * [request] - The metadata request object. |
436 * | 442 * |
437 * Request parameters: | 443 * Request parameters: |
438 * | 444 * |
439 * [projectId] - [Required] The ID of the Google Cloud Platform project that | 445 * [projectId] - Required The ID of the Google Cloud Platform project that the |
440 * the job belongs to. | 446 * job belongs to. |
441 * | 447 * |
442 * [region] - [Required] The Cloud Dataproc region in which to handle the | 448 * [region] - Required The Cloud Dataproc region in which to handle the |
443 * request. | 449 * request. |
444 * | 450 * |
445 * [jobId] - [Required] The job ID. | 451 * [jobId] - Required The job ID. |
446 * | 452 * |
447 * Completes with a [Job]. | 453 * Completes with a [Job]. |
448 * | 454 * |
449 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 455 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
450 * error. | 456 * error. |
451 * | 457 * |
452 * If the used [http.Client] completes with an error when making a REST call, | 458 * If the used [http.Client] completes with an error when making a REST call, |
453 * this method will complete with the same error. | 459 * this method will complete with the same error. |
454 */ | 460 */ |
455 async.Future<Job> cancel(CancelJobRequest request, core.String projectId, core
.String region, core.String jobId) { | 461 async.Future<Job> cancel(CancelJobRequest request, core.String projectId, core
.String region, core.String jobId) { |
(...skipping 24 matching lines...) Loading... |
480 body: _body, | 486 body: _body, |
481 queryParams: _queryParams, | 487 queryParams: _queryParams, |
482 uploadOptions: _uploadOptions, | 488 uploadOptions: _uploadOptions, |
483 uploadMedia: _uploadMedia, | 489 uploadMedia: _uploadMedia, |
484 downloadOptions: _downloadOptions); | 490 downloadOptions: _downloadOptions); |
485 return _response.then((data) => new Job.fromJson(data)); | 491 return _response.then((data) => new Job.fromJson(data)); |
486 } | 492 } |
487 | 493 |
488 /** | 494 /** |
489 * Deletes the job from the project. If the job is active, the delete fails, | 495 * Deletes the job from the project. If the job is active, the delete fails, |
490 * and the response returns `FAILED_PRECONDITION`. | 496 * and the response returns FAILED_PRECONDITION. |
491 * | 497 * |
492 * Request parameters: | 498 * Request parameters: |
493 * | 499 * |
494 * [projectId] - [Required] The ID of the Google Cloud Platform project that | 500 * [projectId] - Required The ID of the Google Cloud Platform project that the |
495 * the job belongs to. | 501 * job belongs to. |
496 * | 502 * |
497 * [region] - [Required] The Cloud Dataproc region in which to handle the | 503 * [region] - Required The Cloud Dataproc region in which to handle the |
498 * request. | 504 * request. |
499 * | 505 * |
500 * [jobId] - [Required] The job ID. | 506 * [jobId] - Required The job ID. |
501 * | 507 * |
502 * Completes with a [Empty]. | 508 * Completes with a [Empty]. |
503 * | 509 * |
504 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 510 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
505 * error. | 511 * error. |
506 * | 512 * |
507 * If the used [http.Client] completes with an error when making a REST call, | 513 * If the used [http.Client] completes with an error when making a REST call, |
508 * this method will complete with the same error. | 514 * this method will complete with the same error. |
509 */ | 515 */ |
510 async.Future<Empty> delete(core.String projectId, core.String region, core.Str
ing jobId) { | 516 async.Future<Empty> delete(core.String projectId, core.String region, core.Str
ing jobId) { |
(...skipping 24 matching lines...) Loading... |
535 uploadMedia: _uploadMedia, | 541 uploadMedia: _uploadMedia, |
536 downloadOptions: _downloadOptions); | 542 downloadOptions: _downloadOptions); |
537 return _response.then((data) => new Empty.fromJson(data)); | 543 return _response.then((data) => new Empty.fromJson(data)); |
538 } | 544 } |
539 | 545 |
540 /** | 546 /** |
541 * Gets the resource representation for a job in a project. | 547 * Gets the resource representation for a job in a project. |
542 * | 548 * |
543 * Request parameters: | 549 * Request parameters: |
544 * | 550 * |
545 * [projectId] - [Required] The ID of the Google Cloud Platform project that | 551 * [projectId] - Required The ID of the Google Cloud Platform project that the |
546 * the job belongs to. | 552 * job belongs to. |
547 * | 553 * |
548 * [region] - [Required] The Cloud Dataproc region in which to handle the | 554 * [region] - Required The Cloud Dataproc region in which to handle the |
549 * request. | 555 * request. |
550 * | 556 * |
551 * [jobId] - [Required] The job ID. | 557 * [jobId] - Required The job ID. |
552 * | 558 * |
553 * Completes with a [Job]. | 559 * Completes with a [Job]. |
554 * | 560 * |
555 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 561 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
556 * error. | 562 * error. |
557 * | 563 * |
558 * If the used [http.Client] completes with an error when making a REST call, | 564 * If the used [http.Client] completes with an error when making a REST call, |
559 * this method will complete with the same error. | 565 * this method will complete with the same error. |
560 */ | 566 */ |
561 async.Future<Job> get(core.String projectId, core.String region, core.String j
obId) { | 567 async.Future<Job> get(core.String projectId, core.String region, core.String j
obId) { |
(...skipping 24 matching lines...) Loading... |
586 uploadMedia: _uploadMedia, | 592 uploadMedia: _uploadMedia, |
587 downloadOptions: _downloadOptions); | 593 downloadOptions: _downloadOptions); |
588 return _response.then((data) => new Job.fromJson(data)); | 594 return _response.then((data) => new Job.fromJson(data)); |
589 } | 595 } |
590 | 596 |
591 /** | 597 /** |
592 * Lists regions/{region}/jobs in a project. | 598 * Lists regions/{region}/jobs in a project. |
593 * | 599 * |
594 * Request parameters: | 600 * Request parameters: |
595 * | 601 * |
596 * [projectId] - [Required] The ID of the Google Cloud Platform project that | 602 * [projectId] - Required The ID of the Google Cloud Platform project that the |
597 * the job belongs to. | 603 * job belongs to. |
598 * | 604 * |
599 * [region] - [Required] The Cloud Dataproc region in which to handle the | 605 * [region] - Required The Cloud Dataproc region in which to handle the |
600 * request. | 606 * request. |
601 * | 607 * |
602 * [pageSize] - [Optional] The number of results to return in each response. | 608 * [pageToken] - Optional The page token, returned by a previous call, to |
603 * | |
604 * [pageToken] - [Optional] The page token, returned by a previous call, to | |
605 * request the next page of results. | 609 * request the next page of results. |
606 * | 610 * |
607 * [clusterName] - [Optional] If set, the returned jobs list includes only | 611 * [pageSize] - Optional The number of results to return in each response. |
608 * jobs that were submitted to the named cluster. | |
609 * | 612 * |
610 * [jobStateMatcher] - [Optional] Specifies enumerated categories of jobs to | 613 * [clusterName] - Optional If set, the returned jobs list includes only jobs |
| 614 * that were submitted to the named cluster. |
| 615 * |
| 616 * [filter] - Optional A filter constraining the jobs to list. Filters are |
| 617 * case-sensitive and have the following syntax:field = value AND field = |
| 618 * value ...where field is status.state or labels.[KEY], and [KEY] is a label |
| 619 * key. value can be * to match all values. status.state can be either ACTIVE |
| 620 * or INACTIVE. Only the logical AND operator is supported; space-separated |
| 621 * items are treated as having an implicit AND operator.Example |
| 622 * filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = |
| 623 * * |
| 624 * |
| 625 * [jobStateMatcher] - Optional Specifies enumerated categories of jobs to |
611 * list (default = match ALL jobs). | 626 * list (default = match ALL jobs). |
612 * Possible string values are: | 627 * Possible string values are: |
613 * - "ALL" : A ALL. | 628 * - "ALL" : A ALL. |
614 * - "ACTIVE" : A ACTIVE. | 629 * - "ACTIVE" : A ACTIVE. |
615 * - "NON_ACTIVE" : A NON_ACTIVE. | 630 * - "NON_ACTIVE" : A NON_ACTIVE. |
616 * | 631 * |
617 * [filter] - [Optional] A filter constraining the jobs to list. Filters are | |
618 * case-sensitive and have the following syntax: field:value] ... or [field = | |
619 * value] AND [field [= value]] ... where **field** is `status.state` or | |
620 * `labels.[KEY]`, and `[KEY]` is a label key. **value** can be `*` to match | |
621 * all values. `status.state` can be either `ACTIVE` or `INACTIVE`. Only the | |
622 * logical `AND` operator is supported; space-separated items are treated as | |
623 * having an implicit `AND` operator. Example valid filters are: | |
624 * status.state:ACTIVE labels.env:staging labels.starred:* and status.state = | |
625 * ACTIVE AND labels.env = staging AND labels.starred = * | |
626 * | |
627 * Completes with a [ListJobsResponse]. | 632 * Completes with a [ListJobsResponse]. |
628 * | 633 * |
629 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 634 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
630 * error. | 635 * error. |
631 * | 636 * |
632 * If the used [http.Client] completes with an error when making a REST call, | 637 * If the used [http.Client] completes with an error when making a REST call, |
633 * this method will complete with the same error. | 638 * this method will complete with the same error. |
634 */ | 639 */ |
635 async.Future<ListJobsResponse> list(core.String projectId, core.String region,
{core.int pageSize, core.String pageToken, core.String clusterName, core.String
jobStateMatcher, core.String filter}) { | 640 async.Future<ListJobsResponse> list(core.String projectId, core.String region,
{core.String pageToken, core.int pageSize, core.String clusterName, core.String
filter, core.String jobStateMatcher}) { |
636 var _url = null; | 641 var _url = null; |
637 var _queryParams = new core.Map(); | 642 var _queryParams = new core.Map(); |
638 var _uploadMedia = null; | 643 var _uploadMedia = null; |
639 var _uploadOptions = null; | 644 var _uploadOptions = null; |
640 var _downloadOptions = commons.DownloadOptions.Metadata; | 645 var _downloadOptions = commons.DownloadOptions.Metadata; |
641 var _body = null; | 646 var _body = null; |
642 | 647 |
643 if (projectId == null) { | 648 if (projectId == null) { |
644 throw new core.ArgumentError("Parameter projectId is required."); | 649 throw new core.ArgumentError("Parameter projectId is required."); |
645 } | 650 } |
646 if (region == null) { | 651 if (region == null) { |
647 throw new core.ArgumentError("Parameter region is required."); | 652 throw new core.ArgumentError("Parameter region is required."); |
648 } | 653 } |
| 654 if (pageToken != null) { |
| 655 _queryParams["pageToken"] = [pageToken]; |
| 656 } |
649 if (pageSize != null) { | 657 if (pageSize != null) { |
650 _queryParams["pageSize"] = ["${pageSize}"]; | 658 _queryParams["pageSize"] = ["${pageSize}"]; |
651 } | 659 } |
652 if (pageToken != null) { | |
653 _queryParams["pageToken"] = [pageToken]; | |
654 } | |
655 if (clusterName != null) { | 660 if (clusterName != null) { |
656 _queryParams["clusterName"] = [clusterName]; | 661 _queryParams["clusterName"] = [clusterName]; |
657 } | 662 } |
| 663 if (filter != null) { |
| 664 _queryParams["filter"] = [filter]; |
| 665 } |
658 if (jobStateMatcher != null) { | 666 if (jobStateMatcher != null) { |
659 _queryParams["jobStateMatcher"] = [jobStateMatcher]; | 667 _queryParams["jobStateMatcher"] = [jobStateMatcher]; |
660 } | 668 } |
661 if (filter != null) { | |
662 _queryParams["filter"] = [filter]; | |
663 } | |
664 | 669 |
665 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi
ons/' + commons.Escaper.ecapeVariable('$region') + '/jobs'; | 670 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi
ons/' + commons.Escaper.ecapeVariable('$region') + '/jobs'; |
666 | 671 |
667 var _response = _requester.request(_url, | 672 var _response = _requester.request(_url, |
668 "GET", | 673 "GET", |
669 body: _body, | 674 body: _body, |
670 queryParams: _queryParams, | 675 queryParams: _queryParams, |
671 uploadOptions: _uploadOptions, | 676 uploadOptions: _uploadOptions, |
672 uploadMedia: _uploadMedia, | 677 uploadMedia: _uploadMedia, |
673 downloadOptions: _downloadOptions); | 678 downloadOptions: _downloadOptions); |
674 return _response.then((data) => new ListJobsResponse.fromJson(data)); | 679 return _response.then((data) => new ListJobsResponse.fromJson(data)); |
675 } | 680 } |
676 | 681 |
677 /** | 682 /** |
| 683 * Updates a job in a project. |
| 684 * |
| 685 * [request] - The metadata request object. |
| 686 * |
| 687 * Request parameters: |
| 688 * |
| 689 * [projectId] - Required The ID of the Google Cloud Platform project that the |
| 690 * job belongs to. |
| 691 * |
| 692 * [region] - Required The Cloud Dataproc region in which to handle the |
| 693 * request. |
| 694 * |
| 695 * [jobId] - Required The job ID. |
| 696 * |
| 697 * [updateMask] - Required Specifies the path, relative to <code>Job</code>, |
| 698 * of the field to update. For example, to update the labels of a Job the |
| 699 * <code>update_mask</code> parameter would be specified as |
| 700 * <code>labels</code>, and the PATCH request body would specify the new |
| 701 * value. <strong>Note:</strong> Currently, <code>labels</code> is the only |
| 702 * field that can be updated. |
| 703 * |
| 704 * Completes with a [Job]. |
| 705 * |
| 706 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 707 * error. |
| 708 * |
| 709 * If the used [http.Client] completes with an error when making a REST call, |
| 710 * this method will complete with the same error. |
| 711 */ |
| 712 async.Future<Job> patch(Job request, core.String projectId, core.String region
, core.String jobId, {core.String updateMask}) { |
| 713 var _url = null; |
| 714 var _queryParams = new core.Map(); |
| 715 var _uploadMedia = null; |
| 716 var _uploadOptions = null; |
| 717 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 718 var _body = null; |
| 719 |
| 720 if (request != null) { |
| 721 _body = convert.JSON.encode((request).toJson()); |
| 722 } |
| 723 if (projectId == null) { |
| 724 throw new core.ArgumentError("Parameter projectId is required."); |
| 725 } |
| 726 if (region == null) { |
| 727 throw new core.ArgumentError("Parameter region is required."); |
| 728 } |
| 729 if (jobId == null) { |
| 730 throw new core.ArgumentError("Parameter jobId is required."); |
| 731 } |
| 732 if (updateMask != null) { |
| 733 _queryParams["updateMask"] = [updateMask]; |
| 734 } |
| 735 |
| 736 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi
ons/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escaper.ec
apeVariable('$jobId'); |
| 737 |
| 738 var _response = _requester.request(_url, |
| 739 "PATCH", |
| 740 body: _body, |
| 741 queryParams: _queryParams, |
| 742 uploadOptions: _uploadOptions, |
| 743 uploadMedia: _uploadMedia, |
| 744 downloadOptions: _downloadOptions); |
| 745 return _response.then((data) => new Job.fromJson(data)); |
| 746 } |
| 747 |
| 748 /** |
678 * Submits a job to a cluster. | 749 * Submits a job to a cluster. |
679 * | 750 * |
680 * [request] - The metadata request object. | 751 * [request] - The metadata request object. |
681 * | 752 * |
682 * Request parameters: | 753 * Request parameters: |
683 * | 754 * |
684 * [projectId] - [Required] The ID of the Google Cloud Platform project that | 755 * [projectId] - Required The ID of the Google Cloud Platform project that the |
685 * the job belongs to. | 756 * job belongs to. |
686 * | 757 * |
687 * [region] - [Required] The Cloud Dataproc region in which to handle the | 758 * [region] - Required The Cloud Dataproc region in which to handle the |
688 * request. | 759 * request. |
689 * | 760 * |
690 * Completes with a [Job]. | 761 * Completes with a [Job]. |
691 * | 762 * |
692 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 763 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
693 * error. | 764 * error. |
694 * | 765 * |
695 * If the used [http.Client] completes with an error when making a REST call, | 766 * If the used [http.Client] completes with an error when making a REST call, |
696 * this method will complete with the same error. | 767 * this method will complete with the same error. |
697 */ | 768 */ |
(...skipping 33 matching lines...) Loading... |
731 class ProjectsRegionsOperationsResourceApi { | 802 class ProjectsRegionsOperationsResourceApi { |
732 final commons.ApiRequester _requester; | 803 final commons.ApiRequester _requester; |
733 | 804 |
734 ProjectsRegionsOperationsResourceApi(commons.ApiRequester client) : | 805 ProjectsRegionsOperationsResourceApi(commons.ApiRequester client) : |
735 _requester = client; | 806 _requester = client; |
736 | 807 |
737 /** | 808 /** |
738 * Starts asynchronous cancellation on a long-running operation. The server | 809 * Starts asynchronous cancellation on a long-running operation. The server |
739 * makes a best effort to cancel the operation, but success is not guaranteed. | 810 * makes a best effort to cancel the operation, but success is not guaranteed. |
740 * If the server doesn't support this method, it returns | 811 * If the server doesn't support this method, it returns |
741 * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or | 812 * google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or |
742 * other methods to check whether the cancellation succeeded or whether the | 813 * other methods to check whether the cancellation succeeded or whether the |
743 * operation completed despite cancellation. On successful cancellation, the | 814 * operation completed despite cancellation. On successful cancellation, the |
744 * operation is not deleted; instead, it becomes an operation with an | 815 * operation is not deleted; instead, it becomes an operation with an |
745 * Operation.error value with a google.rpc.Status.code of 1, corresponding to | 816 * Operation.error value with a google.rpc.Status.code of 1, corresponding to |
746 * `Code.CANCELLED`. | 817 * Code.CANCELLED. |
747 * | 818 * |
748 * Request parameters: | 819 * Request parameters: |
749 * | 820 * |
750 * [name] - The name of the operation resource to be cancelled. | 821 * [name] - The name of the operation resource to be cancelled. |
751 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". | 822 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". |
752 * | 823 * |
753 * Completes with a [Empty]. | 824 * Completes with a [Empty]. |
754 * | 825 * |
755 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 826 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
756 * error. | 827 * error. |
(...skipping 22 matching lines...) Loading... |
779 uploadOptions: _uploadOptions, | 850 uploadOptions: _uploadOptions, |
780 uploadMedia: _uploadMedia, | 851 uploadMedia: _uploadMedia, |
781 downloadOptions: _downloadOptions); | 852 downloadOptions: _downloadOptions); |
782 return _response.then((data) => new Empty.fromJson(data)); | 853 return _response.then((data) => new Empty.fromJson(data)); |
783 } | 854 } |
784 | 855 |
785 /** | 856 /** |
786 * Deletes a long-running operation. This method indicates that the client is | 857 * Deletes a long-running operation. This method indicates that the client is |
787 * no longer interested in the operation result. It does not cancel the | 858 * no longer interested in the operation result. It does not cancel the |
788 * operation. If the server doesn't support this method, it returns | 859 * operation. If the server doesn't support this method, it returns |
789 * `google.rpc.Code.UNIMPLEMENTED`. | 860 * google.rpc.Code.UNIMPLEMENTED. |
790 * | 861 * |
791 * Request parameters: | 862 * Request parameters: |
792 * | 863 * |
793 * [name] - The name of the operation resource to be deleted. | 864 * [name] - The name of the operation resource to be deleted. |
794 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". | 865 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". |
795 * | 866 * |
796 * Completes with a [Empty]. | 867 * Completes with a [Empty]. |
797 * | 868 * |
798 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 869 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
799 * error. | 870 * error. |
(...skipping 62 matching lines...) Loading... |
862 body: _body, | 933 body: _body, |
863 queryParams: _queryParams, | 934 queryParams: _queryParams, |
864 uploadOptions: _uploadOptions, | 935 uploadOptions: _uploadOptions, |
865 uploadMedia: _uploadMedia, | 936 uploadMedia: _uploadMedia, |
866 downloadOptions: _downloadOptions); | 937 downloadOptions: _downloadOptions); |
867 return _response.then((data) => new Operation.fromJson(data)); | 938 return _response.then((data) => new Operation.fromJson(data)); |
868 } | 939 } |
869 | 940 |
870 /** | 941 /** |
871 * Lists operations that match the specified filter in the request. If the | 942 * Lists operations that match the specified filter in the request. If the |
872 * server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the | 943 * server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name |
873 * `name` binding below allows API services to override the binding to use | 944 * binding below allows API services to override the binding to use different |
874 * different resource name schemes, such as `users / * /operations`. | 945 * resource name schemes, such as users / * /operations. |
875 * | 946 * |
876 * Request parameters: | 947 * Request parameters: |
877 * | 948 * |
878 * [name] - The name of the operation collection. | 949 * [name] - The name of the operation collection. |
879 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations$". | 950 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations$". |
880 * | 951 * |
| 952 * [pageSize] - The standard list page size. |
| 953 * |
881 * [filter] - The standard list filter. | 954 * [filter] - The standard list filter. |
882 * | 955 * |
883 * [pageSize] - The standard list page size. | |
884 * | |
885 * [pageToken] - The standard list page token. | 956 * [pageToken] - The standard list page token. |
886 * | 957 * |
887 * Completes with a [ListOperationsResponse]. | 958 * Completes with a [ListOperationsResponse]. |
888 * | 959 * |
889 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 960 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
890 * error. | 961 * error. |
891 * | 962 * |
892 * If the used [http.Client] completes with an error when making a REST call, | 963 * If the used [http.Client] completes with an error when making a REST call, |
893 * this method will complete with the same error. | 964 * this method will complete with the same error. |
894 */ | 965 */ |
895 async.Future<ListOperationsResponse> list(core.String name, {core.String filte
r, core.int pageSize, core.String pageToken}) { | 966 async.Future<ListOperationsResponse> list(core.String name, {core.int pageSize
, core.String filter, core.String pageToken}) { |
896 var _url = null; | 967 var _url = null; |
897 var _queryParams = new core.Map(); | 968 var _queryParams = new core.Map(); |
898 var _uploadMedia = null; | 969 var _uploadMedia = null; |
899 var _uploadOptions = null; | 970 var _uploadOptions = null; |
900 var _downloadOptions = commons.DownloadOptions.Metadata; | 971 var _downloadOptions = commons.DownloadOptions.Metadata; |
901 var _body = null; | 972 var _body = null; |
902 | 973 |
903 if (name == null) { | 974 if (name == null) { |
904 throw new core.ArgumentError("Parameter name is required."); | 975 throw new core.ArgumentError("Parameter name is required."); |
905 } | 976 } |
| 977 if (pageSize != null) { |
| 978 _queryParams["pageSize"] = ["${pageSize}"]; |
| 979 } |
906 if (filter != null) { | 980 if (filter != null) { |
907 _queryParams["filter"] = [filter]; | 981 _queryParams["filter"] = [filter]; |
908 } | 982 } |
909 if (pageSize != null) { | |
910 _queryParams["pageSize"] = ["${pageSize}"]; | |
911 } | |
912 if (pageToken != null) { | 983 if (pageToken != null) { |
913 _queryParams["pageToken"] = [pageToken]; | 984 _queryParams["pageToken"] = [pageToken]; |
914 } | 985 } |
915 | 986 |
916 _url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name'); | 987 _url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name'); |
917 | 988 |
918 var _response = _requester.request(_url, | 989 var _response = _requester.request(_url, |
919 "GET", | 990 "GET", |
920 body: _body, | 991 body: _body, |
921 queryParams: _queryParams, | 992 queryParams: _queryParams, |
(...skipping 20 matching lines...) Loading... |
942 return _json; | 1013 return _json; |
943 } | 1014 } |
944 } | 1015 } |
945 | 1016 |
946 /** | 1017 /** |
947 * Describes the identifying information, config, and status of a cluster of | 1018 * Describes the identifying information, config, and status of a cluster of |
948 * Google Compute Engine instances. | 1019 * Google Compute Engine instances. |
949 */ | 1020 */ |
950 class Cluster { | 1021 class Cluster { |
951 /** | 1022 /** |
952 * [Required] The cluster name. Cluster names within a project must be unique. | 1023 * Required The cluster name. Cluster names within a project must be unique. |
953 * Names of deleted clusters can be reused. | 1024 * Names of deleted clusters can be reused. |
954 */ | 1025 */ |
955 core.String clusterName; | 1026 core.String clusterName; |
956 /** | 1027 /** |
957 * [Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc | 1028 * Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc |
958 * generates this value when it creates the cluster. | 1029 * generates this value when it creates the cluster. |
959 */ | 1030 */ |
960 core.String clusterUuid; | 1031 core.String clusterUuid; |
961 /** | 1032 /** |
962 * [Required] The cluster config. Note that Cloud Dataproc may set default | 1033 * Required The cluster config. Note that Cloud Dataproc may set default |
963 * values, and values may change when clusters are updated. | 1034 * values, and values may change when clusters are updated. |
964 */ | 1035 */ |
965 ClusterConfig config; | 1036 ClusterConfig config; |
966 /** | 1037 /** |
967 * [Optional] The labels to associate with this cluster. Label **keys** must | 1038 * Optional The labels to associate with this cluster. Label keys must contain |
968 * contain 1 to 63 characters, and must conform to [RFC | 1039 * 1 to 63 characters, and must conform to RFC 1035 |
969 * 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, | 1040 * (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if |
970 * but, if present, must contain 1 to 63 characters, and must conform to [RFC | 1041 * present, must contain 1 to 63 characters, and must conform to RFC 1035 |
971 * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be | 1042 * (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be |
972 * associated with a cluster. | 1043 * associated with a cluster. |
973 */ | 1044 */ |
974 core.Map<core.String, core.String> labels; | 1045 core.Map<core.String, core.String> labels; |
975 /** | 1046 /** |
976 * Contains cluster daemon metrics such as HDFS and YARN stats. **Beta | 1047 * Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: |
977 * Feature**: This report is available for testing purposes only. It may be | 1048 * This report is available for testing purposes only. It may be changed |
978 * changed before final release. | 1049 * before final release. |
979 */ | 1050 */ |
980 ClusterMetrics metrics; | 1051 ClusterMetrics metrics; |
981 /** | 1052 /** |
982 * [Required] The Google Cloud Platform project ID that the cluster belongs | 1053 * Required The Google Cloud Platform project ID that the cluster belongs to. |
983 * to. | |
984 */ | 1054 */ |
985 core.String projectId; | 1055 core.String projectId; |
986 /** [Output-only] Cluster status. */ | 1056 /** Output-only Cluster status. */ |
987 ClusterStatus status; | 1057 ClusterStatus status; |
988 /** [Output-only] The previous cluster status. */ | 1058 /** Output-only The previous cluster status. */ |
989 core.List<ClusterStatus> statusHistory; | 1059 core.List<ClusterStatus> statusHistory; |
990 | 1060 |
991 Cluster(); | 1061 Cluster(); |
992 | 1062 |
993 Cluster.fromJson(core.Map _json) { | 1063 Cluster.fromJson(core.Map _json) { |
994 if (_json.containsKey("clusterName")) { | 1064 if (_json.containsKey("clusterName")) { |
995 clusterName = _json["clusterName"]; | 1065 clusterName = _json["clusterName"]; |
996 } | 1066 } |
997 if (_json.containsKey("clusterUuid")) { | 1067 if (_json.containsKey("clusterUuid")) { |
998 clusterUuid = _json["clusterUuid"]; | 1068 clusterUuid = _json["clusterUuid"]; |
(...skipping 44 matching lines...) Loading... |
1043 if (statusHistory != null) { | 1113 if (statusHistory != null) { |
1044 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to
List(); | 1114 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to
List(); |
1045 } | 1115 } |
1046 return _json; | 1116 return _json; |
1047 } | 1117 } |
1048 } | 1118 } |
1049 | 1119 |
1050 /** The cluster config. */ | 1120 /** The cluster config. */ |
1051 class ClusterConfig { | 1121 class ClusterConfig { |
1052 /** | 1122 /** |
1053 * [Optional] A Google Cloud Storage staging bucket used for sharing generated | 1123 * Optional A Google Cloud Storage staging bucket used for sharing generated |
1054 * SSH keys and config. If you do not specify a staging bucket, Cloud Dataproc | 1124 * SSH keys and config. If you do not specify a staging bucket, Cloud Dataproc |
1055 * will determine an appropriate Cloud Storage location (US, ASIA, or EU) for | 1125 * will determine an appropriate Cloud Storage location (US, ASIA, or EU) for |
1056 * your cluster's staging bucket according to the Google Compute Engine zone | 1126 * your cluster's staging bucket according to the Google Compute Engine zone |
1057 * where your cluster is deployed, and then it will create and manage this | 1127 * where your cluster is deployed, and then it will create and manage this |
1058 * project-level, per-location bucket for you. | 1128 * project-level, per-location bucket for you. |
1059 */ | 1129 */ |
1060 core.String configBucket; | 1130 core.String configBucket; |
1061 /** | 1131 /** |
1062 * [Required] The shared Google Compute Engine config settings for all | 1132 * Required The shared Google Compute Engine config settings for all instances |
1063 * instances in a cluster. | 1133 * in a cluster. |
1064 */ | 1134 */ |
1065 GceClusterConfig gceClusterConfig; | 1135 GceClusterConfig gceClusterConfig; |
1066 /** | 1136 /** |
1067 * [Optional] Commands to execute on each node after config is completed. By | 1137 * Optional Commands to execute on each node after config is completed. By |
1068 * default, executables are run on master and all worker nodes. You can test a | 1138 * default, executables are run on master and all worker nodes. You can test a |
1069 * node's role metadata to run an executable on a master or worker node, as | 1139 * node's <code>role</code> metadata to run an executable on a master or |
1070 * shown below using `curl` (you can also use `wget`): ROLE=$(curl -H | 1140 * worker node, as shown below using curl (you can also use wget): |
1071 * Metadata-Flavor:Google | 1141 * ROLE=$(curl -H Metadata-Flavor:Google |
1072 * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ | 1142 * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) |
1073 * "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... | 1143 * if [[ "${ROLE}" == 'Master' ]]; then |
1074 * worker specific actions ... fi | 1144 * ... master specific actions ... |
| 1145 * else |
| 1146 * ... worker specific actions ... |
| 1147 * fi |
1075 */ | 1148 */ |
1076 core.List<NodeInitializationAction> initializationActions; | 1149 core.List<NodeInitializationAction> initializationActions; |
1077 /** | 1150 /** |
1078 * [Optional] The Google Compute Engine config settings for the master | 1151 * Optional The Google Compute Engine config settings for the master instance |
1079 * instance in a cluster. | 1152 * in a cluster. |
1080 */ | 1153 */ |
1081 InstanceGroupConfig masterConfig; | 1154 InstanceGroupConfig masterConfig; |
1082 /** | 1155 /** |
1083 * [Optional] The Google Compute Engine config settings for additional worker | 1156 * Optional The Google Compute Engine config settings for additional worker |
1084 * instances in a cluster. | 1157 * instances in a cluster. |
1085 */ | 1158 */ |
1086 InstanceGroupConfig secondaryWorkerConfig; | 1159 InstanceGroupConfig secondaryWorkerConfig; |
1087 /** [Optional] The config settings for software inside the cluster. */ | 1160 /** Optional The config settings for software inside the cluster. */ |
1088 SoftwareConfig softwareConfig; | 1161 SoftwareConfig softwareConfig; |
1089 /** | 1162 /** |
1090 * [Optional] The Google Compute Engine config settings for worker instances | 1163 * Optional The Google Compute Engine config settings for worker instances in |
1091 * in a cluster. | 1164 * a cluster. |
1092 */ | 1165 */ |
1093 InstanceGroupConfig workerConfig; | 1166 InstanceGroupConfig workerConfig; |
1094 | 1167 |
1095 ClusterConfig(); | 1168 ClusterConfig(); |
1096 | 1169 |
1097 ClusterConfig.fromJson(core.Map _json) { | 1170 ClusterConfig.fromJson(core.Map _json) { |
1098 if (_json.containsKey("configBucket")) { | 1171 if (_json.containsKey("configBucket")) { |
1099 configBucket = _json["configBucket"]; | 1172 configBucket = _json["configBucket"]; |
1100 } | 1173 } |
1101 if (_json.containsKey("gceClusterConfig")) { | 1174 if (_json.containsKey("gceClusterConfig")) { |
(...skipping 37 matching lines...) Loading... |
1139 _json["softwareConfig"] = (softwareConfig).toJson(); | 1212 _json["softwareConfig"] = (softwareConfig).toJson(); |
1140 } | 1213 } |
1141 if (workerConfig != null) { | 1214 if (workerConfig != null) { |
1142 _json["workerConfig"] = (workerConfig).toJson(); | 1215 _json["workerConfig"] = (workerConfig).toJson(); |
1143 } | 1216 } |
1144 return _json; | 1217 return _json; |
1145 } | 1218 } |
1146 } | 1219 } |
1147 | 1220 |
1148 /** | 1221 /** |
1149 * Contains cluster daemon metrics, such as HDFS and YARN stats. **Beta | 1222 * Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: |
1150 * Feature**: This report is available for testing purposes only. It may be | 1223 * This report is available for testing purposes only. It may be changed before |
1151 * changed before final release. | 1224 * final release. |
1152 */ | 1225 */ |
1153 class ClusterMetrics { | 1226 class ClusterMetrics { |
1154 /** The HDFS metrics. */ | 1227 /** The HDFS metrics. */ |
1155 core.Map<core.String, core.String> hdfsMetrics; | 1228 core.Map<core.String, core.String> hdfsMetrics; |
1156 /** The YARN metrics. */ | 1229 /** The YARN metrics. */ |
1157 core.Map<core.String, core.String> yarnMetrics; | 1230 core.Map<core.String, core.String> yarnMetrics; |
1158 | 1231 |
1159 ClusterMetrics(); | 1232 ClusterMetrics(); |
1160 | 1233 |
1161 ClusterMetrics.fromJson(core.Map _json) { | 1234 ClusterMetrics.fromJson(core.Map _json) { |
(...skipping 12 matching lines...) Loading... |
1174 } | 1247 } |
1175 if (yarnMetrics != null) { | 1248 if (yarnMetrics != null) { |
1176 _json["yarnMetrics"] = yarnMetrics; | 1249 _json["yarnMetrics"] = yarnMetrics; |
1177 } | 1250 } |
1178 return _json; | 1251 return _json; |
1179 } | 1252 } |
1180 } | 1253 } |
1181 | 1254 |
1182 /** Metadata describing the operation. */ | 1255 /** Metadata describing the operation. */ |
1183 class ClusterOperationMetadata { | 1256 class ClusterOperationMetadata { |
1184 /** [Output-only] Name of the cluster for the operation. */ | 1257 /** Output-only Name of the cluster for the operation. */ |
1185 core.String clusterName; | 1258 core.String clusterName; |
1186 /** [Output-only] Cluster UUID for the operation. */ | 1259 /** Output-only Cluster UUID for the operation. */ |
1187 core.String clusterUuid; | 1260 core.String clusterUuid; |
1188 /** [Output-only] Short description of operation. */ | 1261 /** Output-only Short description of operation. */ |
1189 core.String description; | 1262 core.String description; |
1190 /** [Output-only] labels associated with the operation */ | 1263 /** Output-only Labels associated with the operation */ |
1191 core.Map<core.String, core.String> labels; | 1264 core.Map<core.String, core.String> labels; |
1192 /** [Output-only] The operation type. */ | 1265 /** Output-only The operation type. */ |
1193 core.String operationType; | 1266 core.String operationType; |
1194 /** [Output-only] Current operation status. */ | 1267 /** Output-only Current operation status. */ |
1195 ClusterOperationStatus status; | 1268 ClusterOperationStatus status; |
1196 /** [Output-only] The previous operation status. */ | 1269 /** Output-only The previous operation status. */ |
1197 core.List<ClusterOperationStatus> statusHistory; | 1270 core.List<ClusterOperationStatus> statusHistory; |
| 1271 /** Output-only Errors encountered during operation execution. */ |
| 1272 core.List<core.String> warnings; |
1198 | 1273 |
1199 ClusterOperationMetadata(); | 1274 ClusterOperationMetadata(); |
1200 | 1275 |
1201 ClusterOperationMetadata.fromJson(core.Map _json) { | 1276 ClusterOperationMetadata.fromJson(core.Map _json) { |
1202 if (_json.containsKey("clusterName")) { | 1277 if (_json.containsKey("clusterName")) { |
1203 clusterName = _json["clusterName"]; | 1278 clusterName = _json["clusterName"]; |
1204 } | 1279 } |
1205 if (_json.containsKey("clusterUuid")) { | 1280 if (_json.containsKey("clusterUuid")) { |
1206 clusterUuid = _json["clusterUuid"]; | 1281 clusterUuid = _json["clusterUuid"]; |
1207 } | 1282 } |
1208 if (_json.containsKey("description")) { | 1283 if (_json.containsKey("description")) { |
1209 description = _json["description"]; | 1284 description = _json["description"]; |
1210 } | 1285 } |
1211 if (_json.containsKey("labels")) { | 1286 if (_json.containsKey("labels")) { |
1212 labels = _json["labels"]; | 1287 labels = _json["labels"]; |
1213 } | 1288 } |
1214 if (_json.containsKey("operationType")) { | 1289 if (_json.containsKey("operationType")) { |
1215 operationType = _json["operationType"]; | 1290 operationType = _json["operationType"]; |
1216 } | 1291 } |
1217 if (_json.containsKey("status")) { | 1292 if (_json.containsKey("status")) { |
1218 status = new ClusterOperationStatus.fromJson(_json["status"]); | 1293 status = new ClusterOperationStatus.fromJson(_json["status"]); |
1219 } | 1294 } |
1220 if (_json.containsKey("statusHistory")) { | 1295 if (_json.containsKey("statusHistory")) { |
1221 statusHistory = _json["statusHistory"].map((value) => new ClusterOperation
Status.fromJson(value)).toList(); | 1296 statusHistory = _json["statusHistory"].map((value) => new ClusterOperation
Status.fromJson(value)).toList(); |
1222 } | 1297 } |
| 1298 if (_json.containsKey("warnings")) { |
| 1299 warnings = _json["warnings"]; |
| 1300 } |
1223 } | 1301 } |
1224 | 1302 |
1225 core.Map toJson() { | 1303 core.Map toJson() { |
1226 var _json = new core.Map(); | 1304 var _json = new core.Map(); |
1227 if (clusterName != null) { | 1305 if (clusterName != null) { |
1228 _json["clusterName"] = clusterName; | 1306 _json["clusterName"] = clusterName; |
1229 } | 1307 } |
1230 if (clusterUuid != null) { | 1308 if (clusterUuid != null) { |
1231 _json["clusterUuid"] = clusterUuid; | 1309 _json["clusterUuid"] = clusterUuid; |
1232 } | 1310 } |
1233 if (description != null) { | 1311 if (description != null) { |
1234 _json["description"] = description; | 1312 _json["description"] = description; |
1235 } | 1313 } |
1236 if (labels != null) { | 1314 if (labels != null) { |
1237 _json["labels"] = labels; | 1315 _json["labels"] = labels; |
1238 } | 1316 } |
1239 if (operationType != null) { | 1317 if (operationType != null) { |
1240 _json["operationType"] = operationType; | 1318 _json["operationType"] = operationType; |
1241 } | 1319 } |
1242 if (status != null) { | 1320 if (status != null) { |
1243 _json["status"] = (status).toJson(); | 1321 _json["status"] = (status).toJson(); |
1244 } | 1322 } |
1245 if (statusHistory != null) { | 1323 if (statusHistory != null) { |
1246 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to
List(); | 1324 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to
List(); |
1247 } | 1325 } |
| 1326 if (warnings != null) { |
| 1327 _json["warnings"] = warnings; |
| 1328 } |
1248 return _json; | 1329 return _json; |
1249 } | 1330 } |
1250 } | 1331 } |
1251 | 1332 |
1252 /** The status of the operation. */ | 1333 /** The status of the operation. */ |
1253 class ClusterOperationStatus { | 1334 class ClusterOperationStatus { |
1254 /** [Output-only]A message containing any operation metadata details. */ | 1335 /** Output-onlyA message containing any operation metadata details. */ |
1255 core.String details; | 1336 core.String details; |
1256 /** [Output-only] A message containing the detailed operation state. */ | 1337 /** Output-only A message containing the detailed operation state. */ |
1257 core.String innerState; | 1338 core.String innerState; |
1258 /** | 1339 /** |
1259 * [Output-only] A message containing the operation state. | 1340 * Output-only A message containing the operation state. |
1260 * Possible string values are: | 1341 * Possible string values are: |
1261 * - "UNKNOWN" : A UNKNOWN. | 1342 * - "UNKNOWN" : Unused. |
1262 * - "PENDING" : A PENDING. | 1343 * - "PENDING" : The operation has been created. |
1263 * - "RUNNING" : A RUNNING. | 1344 * - "RUNNING" : The operation is running. |
1264 * - "DONE" : A DONE. | 1345 * - "DONE" : The operation is done; either cancelled or completed. |
1265 */ | 1346 */ |
1266 core.String state; | 1347 core.String state; |
1267 /** [Output-only] The time this state was entered. */ | 1348 /** Output-only The time this state was entered. */ |
1268 core.String stateStartTime; | 1349 core.String stateStartTime; |
1269 | 1350 |
1270 ClusterOperationStatus(); | 1351 ClusterOperationStatus(); |
1271 | 1352 |
1272 ClusterOperationStatus.fromJson(core.Map _json) { | 1353 ClusterOperationStatus.fromJson(core.Map _json) { |
1273 if (_json.containsKey("details")) { | 1354 if (_json.containsKey("details")) { |
1274 details = _json["details"]; | 1355 details = _json["details"]; |
1275 } | 1356 } |
1276 if (_json.containsKey("innerState")) { | 1357 if (_json.containsKey("innerState")) { |
1277 innerState = _json["innerState"]; | 1358 innerState = _json["innerState"]; |
(...skipping 19 matching lines...) Loading... |
1297 } | 1378 } |
1298 if (stateStartTime != null) { | 1379 if (stateStartTime != null) { |
1299 _json["stateStartTime"] = stateStartTime; | 1380 _json["stateStartTime"] = stateStartTime; |
1300 } | 1381 } |
1301 return _json; | 1382 return _json; |
1302 } | 1383 } |
1303 } | 1384 } |
1304 | 1385 |
1305 /** The status of a cluster and its instances. */ | 1386 /** The status of a cluster and its instances. */ |
1306 class ClusterStatus { | 1387 class ClusterStatus { |
1307 /** [Output-only] Optional details of cluster's state. */ | 1388 /** Output-only Optional details of cluster's state. */ |
1308 core.String detail; | 1389 core.String detail; |
1309 /** | 1390 /** |
1310 * [Output-only] The cluster's state. | 1391 * Output-only The cluster's state. |
1311 * Possible string values are: | 1392 * Possible string values are: |
1312 * - "UNKNOWN" : A UNKNOWN. | 1393 * - "UNKNOWN" : The cluster state is unknown. |
1313 * - "CREATING" : A CREATING. | 1394 * - "CREATING" : The cluster is being created and set up. It is not ready for |
1314 * - "RUNNING" : A RUNNING. | 1395 * use. |
1315 * - "ERROR" : A ERROR. | 1396 * - "RUNNING" : The cluster is currently running and healthy. It is ready for |
1316 * - "DELETING" : A DELETING. | 1397 * use. |
1317 * - "UPDATING" : A UPDATING. | 1398 * - "ERROR" : The cluster encountered an error. It is not ready for use. |
| 1399 * - "DELETING" : The cluster is being deleted. It cannot be used. |
| 1400 * - "UPDATING" : The cluster is being updated. It continues to accept and |
| 1401 * process jobs. |
1318 */ | 1402 */ |
1319 core.String state; | 1403 core.String state; |
1320 /** [Output-only] Time when this state was entered. */ | 1404 /** Output-only Time when this state was entered. */ |
1321 core.String stateStartTime; | 1405 core.String stateStartTime; |
1322 | 1406 |
1323 ClusterStatus(); | 1407 ClusterStatus(); |
1324 | 1408 |
1325 ClusterStatus.fromJson(core.Map _json) { | 1409 ClusterStatus.fromJson(core.Map _json) { |
1326 if (_json.containsKey("detail")) { | 1410 if (_json.containsKey("detail")) { |
1327 detail = _json["detail"]; | 1411 detail = _json["detail"]; |
1328 } | 1412 } |
1329 if (_json.containsKey("state")) { | 1413 if (_json.containsKey("state")) { |
1330 state = _json["state"]; | 1414 state = _json["state"]; |
(...skipping 14 matching lines...) Loading... |
1345 if (stateStartTime != null) { | 1429 if (stateStartTime != null) { |
1346 _json["stateStartTime"] = stateStartTime; | 1430 _json["stateStartTime"] = stateStartTime; |
1347 } | 1431 } |
1348 return _json; | 1432 return _json; |
1349 } | 1433 } |
1350 } | 1434 } |
1351 | 1435 |
1352 /** The location where output from diagnostic command can be found. */ | 1436 /** The location where output from diagnostic command can be found. */ |
1353 class DiagnoseClusterOutputLocation { | 1437 class DiagnoseClusterOutputLocation { |
1354 /** | 1438 /** |
1355 * [Output-only] The Google Cloud Storage URI of the diagnostic output. This | 1439 * Output-only The Google Cloud Storage URI of the diagnostic output. This |
1356 * will be a plain text file with summary of collected diagnostics. | 1440 * will be a plain text file with summary of collected diagnostics. |
1357 */ | 1441 */ |
1358 core.String outputUri; | 1442 core.String outputUri; |
1359 | 1443 |
1360 DiagnoseClusterOutputLocation(); | 1444 DiagnoseClusterOutputLocation(); |
1361 | 1445 |
1362 DiagnoseClusterOutputLocation.fromJson(core.Map _json) { | 1446 DiagnoseClusterOutputLocation.fromJson(core.Map _json) { |
1363 if (_json.containsKey("outputUri")) { | 1447 if (_json.containsKey("outputUri")) { |
1364 outputUri = _json["outputUri"]; | 1448 outputUri = _json["outputUri"]; |
1365 } | 1449 } |
(...skipping 18 matching lines...) Loading... |
1384 | 1468 |
1385 core.Map toJson() { | 1469 core.Map toJson() { |
1386 var _json = new core.Map(); | 1470 var _json = new core.Map(); |
1387 return _json; | 1471 return _json; |
1388 } | 1472 } |
1389 } | 1473 } |
1390 | 1474 |
1391 /** The location of diagnostic output. */ | 1475 /** The location of diagnostic output. */ |
1392 class DiagnoseClusterResults { | 1476 class DiagnoseClusterResults { |
1393 /** | 1477 /** |
1394 * [Output-only] The Google Cloud Storage URI of the diagnostic output. The | 1478 * Output-only The Google Cloud Storage URI of the diagnostic output. The |
1395 * output report is a plain text file with a summary of collected diagnostics. | 1479 * output report is a plain text file with a summary of collected diagnostics. |
1396 */ | 1480 */ |
1397 core.String outputUri; | 1481 core.String outputUri; |
1398 | 1482 |
1399 DiagnoseClusterResults(); | 1483 DiagnoseClusterResults(); |
1400 | 1484 |
1401 DiagnoseClusterResults.fromJson(core.Map _json) { | 1485 DiagnoseClusterResults.fromJson(core.Map _json) { |
1402 if (_json.containsKey("outputUri")) { | 1486 if (_json.containsKey("outputUri")) { |
1403 outputUri = _json["outputUri"]; | 1487 outputUri = _json["outputUri"]; |
1404 } | 1488 } |
1405 } | 1489 } |
1406 | 1490 |
1407 core.Map toJson() { | 1491 core.Map toJson() { |
1408 var _json = new core.Map(); | 1492 var _json = new core.Map(); |
1409 if (outputUri != null) { | 1493 if (outputUri != null) { |
1410 _json["outputUri"] = outputUri; | 1494 _json["outputUri"] = outputUri; |
1411 } | 1495 } |
1412 return _json; | 1496 return _json; |
1413 } | 1497 } |
1414 } | 1498 } |
1415 | 1499 |
1416 /** Specifies the config of disk options for a group of VM instances. */ | 1500 /** Specifies the config of disk options for a group of VM instances. */ |
1417 class DiskConfig { | 1501 class DiskConfig { |
1418 /** [Optional] Size in GB of the boot disk (default is 500GB). */ | 1502 /** Optional Size in GB of the boot disk (default is 500GB). */ |
1419 core.int bootDiskSizeGb; | 1503 core.int bootDiskSizeGb; |
1420 /** | 1504 /** |
1421 * [Optional] Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are | 1505 * Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are |
1422 * not attached, the boot disk is used to store runtime logs and | 1506 * not attached, the boot disk is used to store runtime logs and HDFS |
1423 * [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If | 1507 * (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one |
1424 * one or more SSDs are attached, this runtime bulk data is spread across | 1508 * or more SSDs are attached, this runtime bulk data is spread across them, |
1425 * them, and the boot disk contains only basic config and installed binaries. | 1509 * and the boot disk contains only basic config and installed binaries. |
1426 */ | 1510 */ |
1427 core.int numLocalSsds; | 1511 core.int numLocalSsds; |
1428 | 1512 |
1429 DiskConfig(); | 1513 DiskConfig(); |
1430 | 1514 |
1431 DiskConfig.fromJson(core.Map _json) { | 1515 DiskConfig.fromJson(core.Map _json) { |
1432 if (_json.containsKey("bootDiskSizeGb")) { | 1516 if (_json.containsKey("bootDiskSizeGb")) { |
1433 bootDiskSizeGb = _json["bootDiskSizeGb"]; | 1517 bootDiskSizeGb = _json["bootDiskSizeGb"]; |
1434 } | 1518 } |
1435 if (_json.containsKey("numLocalSsds")) { | 1519 if (_json.containsKey("numLocalSsds")) { |
1436 numLocalSsds = _json["numLocalSsds"]; | 1520 numLocalSsds = _json["numLocalSsds"]; |
1437 } | 1521 } |
1438 } | 1522 } |
1439 | 1523 |
1440 core.Map toJson() { | 1524 core.Map toJson() { |
1441 var _json = new core.Map(); | 1525 var _json = new core.Map(); |
1442 if (bootDiskSizeGb != null) { | 1526 if (bootDiskSizeGb != null) { |
1443 _json["bootDiskSizeGb"] = bootDiskSizeGb; | 1527 _json["bootDiskSizeGb"] = bootDiskSizeGb; |
1444 } | 1528 } |
1445 if (numLocalSsds != null) { | 1529 if (numLocalSsds != null) { |
1446 _json["numLocalSsds"] = numLocalSsds; | 1530 _json["numLocalSsds"] = numLocalSsds; |
1447 } | 1531 } |
1448 return _json; | 1532 return _json; |
1449 } | 1533 } |
1450 } | 1534 } |
1451 | 1535 |
1452 /** | 1536 /** |
1453 * A generic empty message that you can re-use to avoid defining duplicated | 1537 * A generic empty message that you can re-use to avoid defining duplicated |
1454 * empty messages in your APIs. A typical example is to use it as the request or | 1538 * empty messages in your APIs. A typical example is to use it as the request or |
1455 * the response type of an API method. For instance: service Foo { rpc | 1539 * the response type of an API method. For instance: |
1456 * Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON | 1540 * service Foo { |
1457 * representation for `Empty` is empty JSON object `{}`. | 1541 * rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); |
| 1542 * } |
| 1543 * The JSON representation for Empty is empty JSON object {}. |
1458 */ | 1544 */ |
1459 class Empty { | 1545 class Empty { |
1460 | 1546 |
1461 Empty(); | 1547 Empty(); |
1462 | 1548 |
1463 Empty.fromJson(core.Map _json) { | 1549 Empty.fromJson(core.Map _json) { |
1464 } | 1550 } |
1465 | 1551 |
1466 core.Map toJson() { | 1552 core.Map toJson() { |
1467 var _json = new core.Map(); | 1553 var _json = new core.Map(); |
1468 return _json; | 1554 return _json; |
1469 } | 1555 } |
1470 } | 1556 } |
1471 | 1557 |
1472 /** | 1558 /** |
1473 * Common config settings for resources of Google Compute Engine cluster | 1559 * Common config settings for resources of Google Compute Engine cluster |
1474 * instances, applicable to all instances in the cluster. | 1560 * instances, applicable to all instances in the cluster. |
1475 */ | 1561 */ |
1476 class GceClusterConfig { | 1562 class GceClusterConfig { |
1477 /** | 1563 /** |
1478 * [Optional] If true, all instances in the cluster will only have internal IP | 1564 * Optional If true, all instances in the cluster will only have internal IP |
1479 * addresses. By default, clusters are not restricted to internal IP | 1565 * addresses. By default, clusters are not restricted to internal IP |
1480 * addresses, and will have ephemeral external IP addresses assigned to each | 1566 * addresses, and will have ephemeral external IP addresses assigned to each |
1481 * instance. This `internal_ip_only` restriction can only be enabled for | 1567 * instance. This internal_ip_only restriction can only be enabled for |
1482 * subnetwork enabled networks, and all off-cluster dependencies must be | 1568 * subnetwork enabled networks, and all off-cluster dependencies must be |
1483 * configured to be accessible without external IP addresses. | 1569 * configured to be accessible without external IP addresses. |
1484 */ | 1570 */ |
1485 core.bool internalIpOnly; | 1571 core.bool internalIpOnly; |
1486 /** | 1572 /** |
1487 * The Google Compute Engine metadata entries to add to all instances (see | 1573 * The Google Compute Engine metadata entries to add to all instances (see |
1488 * [Project and instance | 1574 * Project and instance metadata |
1489 * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata
#project_and_instance_metadata)). | 1575 * (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_
and_instance_metadata)). |
1490 */ | 1576 */ |
1491 core.Map<core.String, core.String> metadata; | 1577 core.Map<core.String, core.String> metadata; |
1492 /** | 1578 /** |
1493 * [Optional] The Google Compute Engine network to be used for machine | 1579 * Optional The Google Compute Engine network to be used for machine |
1494 * communications. Cannot be specified with subnetwork_uri. If neither | 1580 * communications. Cannot be specified with subnetwork_uri. If neither |
1495 * `network_uri` nor `subnetwork_uri` is specified, the "default" network of | 1581 * network_uri nor subnetwork_uri is specified, the "default" network of the |
1496 * the project is used, if it exists. Cannot be a "Custom Subnet Network" (see | 1582 * project is used, if it exists. Cannot be a "Custom Subnet Network" (see |
1497 * [Using Subnetworks](/compute/docs/subnetworks) for more information). | 1583 * Using Subnetworks for more information). Example: |
1498 * Example: | 1584 * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/
default. |
1499 * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global
/default`. | |
1500 */ | 1585 */ |
1501 core.String networkUri; | 1586 core.String networkUri; |
1502 /** | 1587 /** |
1503 * [Optional] The URIs of service account scopes to be included in Google | 1588 * Optional The service account of the instances. Defaults to the default |
| 1589 * Google Compute Engine service account. Custom service accounts need |
| 1590 * permissions equivalent to the folloing IAM roles: |
| 1591 * roles/logging.logWriter |
| 1592 * roles/storage.objectAdmin(see |
| 1593 * https://cloud.google.com/compute/docs/access/service-accounts#custom_servic
e_accounts |
| 1594 * for more information). Example: |
| 1595 * [account_id]@[project_id].iam.gserviceaccount.com |
| 1596 */ |
| 1597 core.String serviceAccount; |
| 1598 /** |
| 1599 * Optional The URIs of service account scopes to be included in Google |
1504 * Compute Engine instances. The following base set of scopes is always | 1600 * Compute Engine instances. The following base set of scopes is always |
1505 * included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * | 1601 * included: |
1506 * https://www.googleapis.com/auth/devstorage.read_write * | 1602 * https://www.googleapis.com/auth/cloud.useraccounts.readonly |
1507 * https://www.googleapis.com/auth/logging.write If no scopes are specified, | 1603 * https://www.googleapis.com/auth/devstorage.read_write |
1508 * the following defaults are also provided: * | 1604 * https://www.googleapis.com/auth/logging.writeIf no scopes are specified, |
1509 * https://www.googleapis.com/auth/bigquery * | 1605 * the following defaults are also provided: |
1510 * https://www.googleapis.com/auth/bigtable.admin.table * | 1606 * https://www.googleapis.com/auth/bigquery |
1511 * https://www.googleapis.com/auth/bigtable.data * | 1607 * https://www.googleapis.com/auth/bigtable.admin.table |
| 1608 * https://www.googleapis.com/auth/bigtable.data |
1512 * https://www.googleapis.com/auth/devstorage.full_control | 1609 * https://www.googleapis.com/auth/devstorage.full_control |
1513 */ | 1610 */ |
1514 core.List<core.String> serviceAccountScopes; | 1611 core.List<core.String> serviceAccountScopes; |
1515 /** | 1612 /** |
1516 * [Optional] The Google Compute Engine subnetwork to be used for machine | 1613 * Optional The Google Compute Engine subnetwork to be used for machine |
1517 * communications. Cannot be specified with network_uri. Example: | 1614 * communications. Cannot be specified with network_uri. Example: |
1518 * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-eas
t1/sub0`. | 1615 * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east
1/sub0. |
1519 */ | 1616 */ |
1520 core.String subnetworkUri; | 1617 core.String subnetworkUri; |
1521 /** | 1618 /** |
1522 * The Google Compute Engine tags to add to all instances (see [Tagging | 1619 * The Google Compute Engine tags to add to all instances (see Tagging |
1523 * instances](/compute/docs/label-or-tag-resources#tags)). | 1620 * instances). |
1524 */ | 1621 */ |
1525 core.List<core.String> tags; | 1622 core.List<core.String> tags; |
1526 /** | 1623 /** |
1527 * [Required] The zone where the Google Compute Engine cluster will be | 1624 * Required The zone where the Google Compute Engine cluster will be located. |
1528 * located. Example: | 1625 * Example: |
1529 * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`. | 1626 * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]. |
1530 */ | 1627 */ |
1531 core.String zoneUri; | 1628 core.String zoneUri; |
1532 | 1629 |
1533 GceClusterConfig(); | 1630 GceClusterConfig(); |
1534 | 1631 |
1535 GceClusterConfig.fromJson(core.Map _json) { | 1632 GceClusterConfig.fromJson(core.Map _json) { |
1536 if (_json.containsKey("internalIpOnly")) { | 1633 if (_json.containsKey("internalIpOnly")) { |
1537 internalIpOnly = _json["internalIpOnly"]; | 1634 internalIpOnly = _json["internalIpOnly"]; |
1538 } | 1635 } |
1539 if (_json.containsKey("metadata")) { | 1636 if (_json.containsKey("metadata")) { |
1540 metadata = _json["metadata"]; | 1637 metadata = _json["metadata"]; |
1541 } | 1638 } |
1542 if (_json.containsKey("networkUri")) { | 1639 if (_json.containsKey("networkUri")) { |
1543 networkUri = _json["networkUri"]; | 1640 networkUri = _json["networkUri"]; |
1544 } | 1641 } |
| 1642 if (_json.containsKey("serviceAccount")) { |
| 1643 serviceAccount = _json["serviceAccount"]; |
| 1644 } |
1545 if (_json.containsKey("serviceAccountScopes")) { | 1645 if (_json.containsKey("serviceAccountScopes")) { |
1546 serviceAccountScopes = _json["serviceAccountScopes"]; | 1646 serviceAccountScopes = _json["serviceAccountScopes"]; |
1547 } | 1647 } |
1548 if (_json.containsKey("subnetworkUri")) { | 1648 if (_json.containsKey("subnetworkUri")) { |
1549 subnetworkUri = _json["subnetworkUri"]; | 1649 subnetworkUri = _json["subnetworkUri"]; |
1550 } | 1650 } |
1551 if (_json.containsKey("tags")) { | 1651 if (_json.containsKey("tags")) { |
1552 tags = _json["tags"]; | 1652 tags = _json["tags"]; |
1553 } | 1653 } |
1554 if (_json.containsKey("zoneUri")) { | 1654 if (_json.containsKey("zoneUri")) { |
1555 zoneUri = _json["zoneUri"]; | 1655 zoneUri = _json["zoneUri"]; |
1556 } | 1656 } |
1557 } | 1657 } |
1558 | 1658 |
1559 core.Map toJson() { | 1659 core.Map toJson() { |
1560 var _json = new core.Map(); | 1660 var _json = new core.Map(); |
1561 if (internalIpOnly != null) { | 1661 if (internalIpOnly != null) { |
1562 _json["internalIpOnly"] = internalIpOnly; | 1662 _json["internalIpOnly"] = internalIpOnly; |
1563 } | 1663 } |
1564 if (metadata != null) { | 1664 if (metadata != null) { |
1565 _json["metadata"] = metadata; | 1665 _json["metadata"] = metadata; |
1566 } | 1666 } |
1567 if (networkUri != null) { | 1667 if (networkUri != null) { |
1568 _json["networkUri"] = networkUri; | 1668 _json["networkUri"] = networkUri; |
1569 } | 1669 } |
| 1670 if (serviceAccount != null) { |
| 1671 _json["serviceAccount"] = serviceAccount; |
| 1672 } |
1570 if (serviceAccountScopes != null) { | 1673 if (serviceAccountScopes != null) { |
1571 _json["serviceAccountScopes"] = serviceAccountScopes; | 1674 _json["serviceAccountScopes"] = serviceAccountScopes; |
1572 } | 1675 } |
1573 if (subnetworkUri != null) { | 1676 if (subnetworkUri != null) { |
1574 _json["subnetworkUri"] = subnetworkUri; | 1677 _json["subnetworkUri"] = subnetworkUri; |
1575 } | 1678 } |
1576 if (tags != null) { | 1679 if (tags != null) { |
1577 _json["tags"] = tags; | 1680 _json["tags"] = tags; |
1578 } | 1681 } |
1579 if (zoneUri != null) { | 1682 if (zoneUri != null) { |
1580 _json["zoneUri"] = zoneUri; | 1683 _json["zoneUri"] = zoneUri; |
1581 } | 1684 } |
1582 return _json; | 1685 return _json; |
1583 } | 1686 } |
1584 } | 1687 } |
1585 | 1688 |
1586 /** | 1689 /** |
1587 * A Cloud Dataproc job for running [Apache Hadoop | 1690 * A Cloud Dataproc job for running Apache Hadoop MapReduce |
1588 * MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/had
oop-mapreduce-client-core/MapReduceTutorial.html) | 1691 * (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapred
uce-client-core/MapReduceTutorial.html) |
1589 * jobs on [Apache Hadoop | 1692 * jobs on Apache Hadoop YARN |
1590 * YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN
.html). | 1693 * (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html
). |
1591 */ | 1694 */ |
1592 class HadoopJob { | 1695 class HadoopJob { |
1593 /** | 1696 /** |
1594 * [Optional] HCFS URIs of archives to be extracted in the working directory | 1697 * Optional HCFS URIs of archives to be extracted in the working directory of |
1595 * of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, | 1698 * Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, |
1596 * .tgz, or .zip. | 1699 * or .zip. |
1597 */ | 1700 */ |
1598 core.List<core.String> archiveUris; | 1701 core.List<core.String> archiveUris; |
1599 /** | 1702 /** |
1600 * [Optional] The arguments to pass to the driver. Do not include arguments, | 1703 * Optional The arguments to pass to the driver. Do not include arguments, |
1601 * such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since | 1704 * such as -libjars or -Dfoo=bar, that can be set as job properties, since a |
1602 * a collision may occur that causes an incorrect job submission. | 1705 * collision may occur that causes an incorrect job submission. |
1603 */ | 1706 */ |
1604 core.List<core.String> args; | 1707 core.List<core.String> args; |
1605 /** | 1708 /** |
1606 * [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied | 1709 * Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to |
1607 * to the working directory of Hadoop drivers and distributed tasks. Useful | 1710 * the working directory of Hadoop drivers and distributed tasks. Useful for |
1608 * for naively parallel tasks. | 1711 * naively parallel tasks. |
1609 */ | 1712 */ |
1610 core.List<core.String> fileUris; | 1713 core.List<core.String> fileUris; |
1611 /** | 1714 /** |
1612 * [Optional] Jar file URIs to add to the CLASSPATHs of the Hadoop driver and | 1715 * Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and |
1613 * tasks. | 1716 * tasks. |
1614 */ | 1717 */ |
1615 core.List<core.String> jarFileUris; | 1718 core.List<core.String> jarFileUris; |
1616 /** [Optional] The runtime log config for job execution. */ | 1719 /** Optional The runtime log config for job execution. */ |
1617 LoggingConfig loggingConfig; | 1720 LoggingConfig loggingConfig; |
1618 /** | 1721 /** |
1619 * The name of the driver's main class. The jar file containing the class must | 1722 * The name of the driver's main class. The jar file containing the class must |
1620 * be in the default CLASSPATH or specified in `jar_file_uris`. | 1723 * be in the default CLASSPATH or specified in jar_file_uris. |
1621 */ | 1724 */ |
1622 core.String mainClass; | 1725 core.String mainClass; |
1623 /** | 1726 /** |
1624 * The HCFS URI of the jar file containing the main class. Examples: | 1727 * The HCFS URI of the jar file containing the main class. Examples: |
1625 * 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' | 1728 * 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' |
1626 * 'hdfs:/tmp/test-samples/custom-wordcount.jar' | 1729 * 'hdfs:/tmp/test-samples/custom-wordcount.jar' |
1627 * 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' | 1730 * 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' |
1628 */ | 1731 */ |
1629 core.String mainJarFileUri; | 1732 core.String mainJarFileUri; |
1630 /** | 1733 /** |
1631 * [Optional] A mapping of property names to values, used to configure Hadoop. | 1734 * Optional A mapping of property names to values, used to configure Hadoop. |
1632 * Properties that conflict with values set by the Cloud Dataproc API may be | 1735 * Properties that conflict with values set by the Cloud Dataproc API may be |
1633 * overwritten. Can include properties set in /etc/hadoop/conf / * -site and | 1736 * overwritten. Can include properties set in /etc/hadoop/conf / * -site and |
1634 * classes in user code. | 1737 * classes in user code. |
1635 */ | 1738 */ |
1636 core.Map<core.String, core.String> properties; | 1739 core.Map<core.String, core.String> properties; |
1637 | 1740 |
1638 HadoopJob(); | 1741 HadoopJob(); |
1639 | 1742 |
1640 HadoopJob.fromJson(core.Map _json) { | 1743 HadoopJob.fromJson(core.Map _json) { |
1641 if (_json.containsKey("archiveUris")) { | 1744 if (_json.containsKey("archiveUris")) { |
(...skipping 46 matching lines...) Loading... |
1688 _json["mainJarFileUri"] = mainJarFileUri; | 1791 _json["mainJarFileUri"] = mainJarFileUri; |
1689 } | 1792 } |
1690 if (properties != null) { | 1793 if (properties != null) { |
1691 _json["properties"] = properties; | 1794 _json["properties"] = properties; |
1692 } | 1795 } |
1693 return _json; | 1796 return _json; |
1694 } | 1797 } |
1695 } | 1798 } |
1696 | 1799 |
1697 /** | 1800 /** |
1698 * A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) | 1801 * A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) |
1699 * queries on YARN. | 1802 * queries on YARN. |
1700 */ | 1803 */ |
1701 class HiveJob { | 1804 class HiveJob { |
1702 /** | 1805 /** |
1703 * [Optional] Whether to continue executing queries if a query fails. The | 1806 * Optional Whether to continue executing queries if a query fails. The |
1704 * default value is `false`. Setting to `true` can be useful when executing | 1807 * default value is false. Setting to true can be useful when executing |
1705 * independent parallel queries. | 1808 * independent parallel queries. |
1706 */ | 1809 */ |
1707 core.bool continueOnFailure; | 1810 core.bool continueOnFailure; |
1708 /** | 1811 /** |
1709 * [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Hive | 1812 * Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server |
1710 * server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. | 1813 * and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. |
1711 */ | 1814 */ |
1712 core.List<core.String> jarFileUris; | 1815 core.List<core.String> jarFileUris; |
1713 /** | 1816 /** |
1714 * [Optional] A mapping of property names and values, used to configure Hive. | 1817 * Optional A mapping of property names and values, used to configure Hive. |
1715 * Properties that conflict with values set by the Cloud Dataproc API may be | 1818 * Properties that conflict with values set by the Cloud Dataproc API may be |
1716 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, | 1819 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, |
1717 * /etc/hive/conf/hive-site.xml, and classes in user code. | 1820 * /etc/hive/conf/hive-site.xml, and classes in user code. |
1718 */ | 1821 */ |
1719 core.Map<core.String, core.String> properties; | 1822 core.Map<core.String, core.String> properties; |
1720 /** The HCFS URI of the script that contains Hive queries. */ | 1823 /** The HCFS URI of the script that contains Hive queries. */ |
1721 core.String queryFileUri; | 1824 core.String queryFileUri; |
1722 /** A list of queries. */ | 1825 /** A list of queries. */ |
1723 QueryList queryList; | 1826 QueryList queryList; |
1724 /** | 1827 /** |
1725 * [Optional] Mapping of query variable names to values (equivalent to the | 1828 * Optional Mapping of query variable names to values (equivalent to the Hive |
1726 * Hive command: `SET name="value";`). | 1829 * command: SET name="value";). |
1727 */ | 1830 */ |
1728 core.Map<core.String, core.String> scriptVariables; | 1831 core.Map<core.String, core.String> scriptVariables; |
1729 | 1832 |
1730 HiveJob(); | 1833 HiveJob(); |
1731 | 1834 |
1732 HiveJob.fromJson(core.Map _json) { | 1835 HiveJob.fromJson(core.Map _json) { |
1733 if (_json.containsKey("continueOnFailure")) { | 1836 if (_json.containsKey("continueOnFailure")) { |
1734 continueOnFailure = _json["continueOnFailure"]; | 1837 continueOnFailure = _json["continueOnFailure"]; |
1735 } | 1838 } |
1736 if (_json.containsKey("jarFileUris")) { | 1839 if (_json.containsKey("jarFileUris")) { |
(...skipping 31 matching lines...) Loading... |
1768 _json["queryList"] = (queryList).toJson(); | 1871 _json["queryList"] = (queryList).toJson(); |
1769 } | 1872 } |
1770 if (scriptVariables != null) { | 1873 if (scriptVariables != null) { |
1771 _json["scriptVariables"] = scriptVariables; | 1874 _json["scriptVariables"] = scriptVariables; |
1772 } | 1875 } |
1773 return _json; | 1876 return _json; |
1774 } | 1877 } |
1775 } | 1878 } |
1776 | 1879 |
1777 /** | 1880 /** |
1778 * [Optional] The config settings for Google Compute Engine resources in an | 1881 * Optional The config settings for Google Compute Engine resources in an |
1779 * instance group, such as a master or worker group. | 1882 * instance group, such as a master or worker group. |
1780 */ | 1883 */ |
1781 class InstanceGroupConfig { | 1884 class InstanceGroupConfig { |
1782 /** [Optional] Disk option config settings. */ | 1885 /** Optional Disk option config settings. */ |
1783 DiskConfig diskConfig; | 1886 DiskConfig diskConfig; |
1784 /** | 1887 /** |
1785 * [Output-only] The Google Compute Engine image resource used for cluster | 1888 * Output-only The Google Compute Engine image resource used for cluster |
1786 * instances. Inferred from `SoftwareConfig.image_version`. | 1889 * instances. Inferred from SoftwareConfig.image_version. |
1787 */ | 1890 */ |
1788 core.String imageUri; | 1891 core.String imageUri; |
1789 /** | 1892 /** |
1790 * [Optional] The list of instance names. Cloud Dataproc derives the names | 1893 * Optional The list of instance names. Cloud Dataproc derives the names from |
1791 * from `cluster_name`, `num_instances`, and the instance group if not set by | 1894 * cluster_name, num_instances, and the instance group if not set by user |
1792 * user (recommended practice is to let Cloud Dataproc derive the name). | 1895 * (recommended practice is to let Cloud Dataproc derive the name). |
1793 */ | 1896 */ |
1794 core.List<core.String> instanceNames; | 1897 core.List<core.String> instanceNames; |
1795 /** | 1898 /** |
1796 * [Optional] Specifies that this instance group contains preemptible | 1899 * Optional Specifies that this instance group contains preemptible instances. |
1797 * instances. | |
1798 */ | 1900 */ |
1799 core.bool isPreemptible; | 1901 core.bool isPreemptible; |
1800 /** | 1902 /** |
1801 * [Required] The Google Compute Engine machine type used for cluster | 1903 * Required The Google Compute Engine machine type used for cluster instances. |
1802 * instances. Example: | 1904 * Example: |
1803 * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1
-a/machineTypes/n1-standard-2`. | 1905 * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-
a/machineTypes/n1-standard-2. |
1804 */ | 1906 */ |
1805 core.String machineTypeUri; | 1907 core.String machineTypeUri; |
1806 /** | 1908 /** |
1807 * [Output-only] The config for Google Compute Engine Instance Group Manager | 1909 * Output-only The config for Google Compute Engine Instance Group Manager |
1808 * that manages this group. This is only used for preemptible instance groups. | 1910 * that manages this group. This is only used for preemptible instance groups. |
1809 */ | 1911 */ |
1810 ManagedGroupConfig managedGroupConfig; | 1912 ManagedGroupConfig managedGroupConfig; |
1811 /** | 1913 /** |
1812 * [Required] The number of VM instances in the instance group. For master | 1914 * Required The number of VM instances in the instance group. For master |
1813 * instance groups, must be set to 1. | 1915 * instance groups, must be set to 1. |
1814 */ | 1916 */ |
1815 core.int numInstances; | 1917 core.int numInstances; |
1816 | 1918 |
1817 InstanceGroupConfig(); | 1919 InstanceGroupConfig(); |
1818 | 1920 |
1819 InstanceGroupConfig.fromJson(core.Map _json) { | 1921 InstanceGroupConfig.fromJson(core.Map _json) { |
1820 if (_json.containsKey("diskConfig")) { | 1922 if (_json.containsKey("diskConfig")) { |
1821 diskConfig = new DiskConfig.fromJson(_json["diskConfig"]); | 1923 diskConfig = new DiskConfig.fromJson(_json["diskConfig"]); |
1822 } | 1924 } |
(...skipping 40 matching lines...) Loading... |
1863 if (numInstances != null) { | 1965 if (numInstances != null) { |
1864 _json["numInstances"] = numInstances; | 1966 _json["numInstances"] = numInstances; |
1865 } | 1967 } |
1866 return _json; | 1968 return _json; |
1867 } | 1969 } |
1868 } | 1970 } |
1869 | 1971 |
1870 /** A Cloud Dataproc job resource. */ | 1972 /** A Cloud Dataproc job resource. */ |
1871 class Job { | 1973 class Job { |
1872 /** | 1974 /** |
1873 * [Output-only] If present, the location of miscellaneous control files which | 1975 * Output-only If present, the location of miscellaneous control files which |
1874 * may be used as part of job setup and handling. If not present, control | 1976 * may be used as part of job setup and handling. If not present, control |
1875 * files may be placed in the same location as `driver_output_uri`. | 1977 * files may be placed in the same location as driver_output_uri. |
1876 */ | 1978 */ |
1877 core.String driverControlFilesUri; | 1979 core.String driverControlFilesUri; |
1878 /** | 1980 /** |
1879 * [Output-only] A URI pointing to the location of the stdout of the job's | 1981 * Output-only A URI pointing to the location of the stdout of the job's |
1880 * driver program. | 1982 * driver program. |
1881 */ | 1983 */ |
1882 core.String driverOutputResourceUri; | 1984 core.String driverOutputResourceUri; |
1883 /** Job is a Hadoop job. */ | 1985 /** Job is a Hadoop job. */ |
1884 HadoopJob hadoopJob; | 1986 HadoopJob hadoopJob; |
1885 /** Job is a Hive job. */ | 1987 /** Job is a Hive job. */ |
1886 HiveJob hiveJob; | 1988 HiveJob hiveJob; |
1887 /** | 1989 /** |
1888 * [Optional] The labels to associate with this job. Label **keys** must | 1990 * Optional The labels to associate with this job. Label keys must contain 1 |
1889 * contain 1 to 63 characters, and must conform to [RFC | 1991 * to 63 characters, and must conform to RFC 1035 |
1890 * 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, | 1992 * (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if |
1891 * but, if present, must contain 1 to 63 characters, and must conform to [RFC | 1993 * present, must contain 1 to 63 characters, and must conform to RFC 1035 |
1892 * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be | 1994 * (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be |
1893 * associated with a job. | 1995 * associated with a job. |
1894 */ | 1996 */ |
1895 core.Map<core.String, core.String> labels; | 1997 core.Map<core.String, core.String> labels; |
1896 /** Job is a Pig job. */ | 1998 /** Job is a Pig job. */ |
1897 PigJob pigJob; | 1999 PigJob pigJob; |
1898 /** | 2000 /** |
1899 * [Required] Job information, including how, when, and where to run the job. | 2001 * Required Job information, including how, when, and where to run the job. |
1900 */ | 2002 */ |
1901 JobPlacement placement; | 2003 JobPlacement placement; |
1902 /** Job is a Pyspark job. */ | 2004 /** Job is a Pyspark job. */ |
1903 PySparkJob pysparkJob; | 2005 PySparkJob pysparkJob; |
1904 /** | 2006 /** |
1905 * [Optional] The fully qualified reference to the job, which can be used to | 2007 * Optional The fully qualified reference to the job, which can be used to |
1906 * obtain the equivalent REST path of the job resource. If this property is | 2008 * obtain the equivalent REST path of the job resource. If this property is |
1907 * not specified when a job is created, the server generates a job_id. | 2009 * not specified when a job is created, the server generates a |
| 2010 * <code>job_id</code>. |
1908 */ | 2011 */ |
1909 JobReference reference; | 2012 JobReference reference; |
| 2013 /** Optional Job scheduling configuration. */ |
| 2014 JobScheduling scheduling; |
1910 /** Job is a Spark job. */ | 2015 /** Job is a Spark job. */ |
1911 SparkJob sparkJob; | 2016 SparkJob sparkJob; |
1912 /** Job is a SparkSql job. */ | 2017 /** Job is a SparkSql job. */ |
1913 SparkSqlJob sparkSqlJob; | 2018 SparkSqlJob sparkSqlJob; |
1914 /** | 2019 /** |
1915 * [Output-only] The job status. Additional application-specific status | 2020 * Output-only The job status. Additional application-specific status |
1916 * information may be contained in the type_job and yarn_applications fields. | 2021 * information may be contained in the <code>type_job</code> and |
| 2022 * <code>yarn_applications</code> fields. |
1917 */ | 2023 */ |
1918 JobStatus status; | 2024 JobStatus status; |
1919 /** [Output-only] The previous job status. */ | 2025 /** Output-only The previous job status. */ |
1920 core.List<JobStatus> statusHistory; | 2026 core.List<JobStatus> statusHistory; |
1921 /** | 2027 /** |
1922 * [Output-only] The collection of YARN applications spun up by this job. | 2028 * Output-only The collection of YARN applications spun up by this job.Beta |
1923 * **Beta** Feature: This report is available for testing purposes only. It | 2029 * Feature: This report is available for testing purposes only. It may be |
1924 * may be changed before final release. | 2030 * changed before final release. |
1925 */ | 2031 */ |
1926 core.List<YarnApplication> yarnApplications; | 2032 core.List<YarnApplication> yarnApplications; |
1927 | 2033 |
1928 Job(); | 2034 Job(); |
1929 | 2035 |
1930 Job.fromJson(core.Map _json) { | 2036 Job.fromJson(core.Map _json) { |
1931 if (_json.containsKey("driverControlFilesUri")) { | 2037 if (_json.containsKey("driverControlFilesUri")) { |
1932 driverControlFilesUri = _json["driverControlFilesUri"]; | 2038 driverControlFilesUri = _json["driverControlFilesUri"]; |
1933 } | 2039 } |
1934 if (_json.containsKey("driverOutputResourceUri")) { | 2040 if (_json.containsKey("driverOutputResourceUri")) { |
(...skipping 13 matching lines...) Loading... |
1948 } | 2054 } |
1949 if (_json.containsKey("placement")) { | 2055 if (_json.containsKey("placement")) { |
1950 placement = new JobPlacement.fromJson(_json["placement"]); | 2056 placement = new JobPlacement.fromJson(_json["placement"]); |
1951 } | 2057 } |
1952 if (_json.containsKey("pysparkJob")) { | 2058 if (_json.containsKey("pysparkJob")) { |
1953 pysparkJob = new PySparkJob.fromJson(_json["pysparkJob"]); | 2059 pysparkJob = new PySparkJob.fromJson(_json["pysparkJob"]); |
1954 } | 2060 } |
1955 if (_json.containsKey("reference")) { | 2061 if (_json.containsKey("reference")) { |
1956 reference = new JobReference.fromJson(_json["reference"]); | 2062 reference = new JobReference.fromJson(_json["reference"]); |
1957 } | 2063 } |
| 2064 if (_json.containsKey("scheduling")) { |
| 2065 scheduling = new JobScheduling.fromJson(_json["scheduling"]); |
| 2066 } |
1958 if (_json.containsKey("sparkJob")) { | 2067 if (_json.containsKey("sparkJob")) { |
1959 sparkJob = new SparkJob.fromJson(_json["sparkJob"]); | 2068 sparkJob = new SparkJob.fromJson(_json["sparkJob"]); |
1960 } | 2069 } |
1961 if (_json.containsKey("sparkSqlJob")) { | 2070 if (_json.containsKey("sparkSqlJob")) { |
1962 sparkSqlJob = new SparkSqlJob.fromJson(_json["sparkSqlJob"]); | 2071 sparkSqlJob = new SparkSqlJob.fromJson(_json["sparkSqlJob"]); |
1963 } | 2072 } |
1964 if (_json.containsKey("status")) { | 2073 if (_json.containsKey("status")) { |
1965 status = new JobStatus.fromJson(_json["status"]); | 2074 status = new JobStatus.fromJson(_json["status"]); |
1966 } | 2075 } |
1967 if (_json.containsKey("statusHistory")) { | 2076 if (_json.containsKey("statusHistory")) { |
(...skipping 26 matching lines...) Loading... |
1994 } | 2103 } |
1995 if (placement != null) { | 2104 if (placement != null) { |
1996 _json["placement"] = (placement).toJson(); | 2105 _json["placement"] = (placement).toJson(); |
1997 } | 2106 } |
1998 if (pysparkJob != null) { | 2107 if (pysparkJob != null) { |
1999 _json["pysparkJob"] = (pysparkJob).toJson(); | 2108 _json["pysparkJob"] = (pysparkJob).toJson(); |
2000 } | 2109 } |
2001 if (reference != null) { | 2110 if (reference != null) { |
2002 _json["reference"] = (reference).toJson(); | 2111 _json["reference"] = (reference).toJson(); |
2003 } | 2112 } |
| 2113 if (scheduling != null) { |
| 2114 _json["scheduling"] = (scheduling).toJson(); |
| 2115 } |
2004 if (sparkJob != null) { | 2116 if (sparkJob != null) { |
2005 _json["sparkJob"] = (sparkJob).toJson(); | 2117 _json["sparkJob"] = (sparkJob).toJson(); |
2006 } | 2118 } |
2007 if (sparkSqlJob != null) { | 2119 if (sparkSqlJob != null) { |
2008 _json["sparkSqlJob"] = (sparkSqlJob).toJson(); | 2120 _json["sparkSqlJob"] = (sparkSqlJob).toJson(); |
2009 } | 2121 } |
2010 if (status != null) { | 2122 if (status != null) { |
2011 _json["status"] = (status).toJson(); | 2123 _json["status"] = (status).toJson(); |
2012 } | 2124 } |
2013 if (statusHistory != null) { | 2125 if (statusHistory != null) { |
2014 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to
List(); | 2126 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to
List(); |
2015 } | 2127 } |
2016 if (yarnApplications != null) { | 2128 if (yarnApplications != null) { |
2017 _json["yarnApplications"] = yarnApplications.map((value) => (value).toJson
()).toList(); | 2129 _json["yarnApplications"] = yarnApplications.map((value) => (value).toJson
()).toList(); |
2018 } | 2130 } |
2019 return _json; | 2131 return _json; |
2020 } | 2132 } |
2021 } | 2133 } |
2022 | 2134 |
2023 /** Cloud Dataproc job config. */ | 2135 /** Cloud Dataproc job config. */ |
2024 class JobPlacement { | 2136 class JobPlacement { |
2025 /** [Required] The name of the cluster where the job will be submitted. */ | 2137 /** Required The name of the cluster where the job will be submitted. */ |
2026 core.String clusterName; | 2138 core.String clusterName; |
2027 /** | 2139 /** |
2028 * [Output-only] A cluster UUID generated by the Cloud Dataproc service when | 2140 * Output-only A cluster UUID generated by the Cloud Dataproc service when the |
2029 * the job is submitted. | 2141 * job is submitted. |
2030 */ | 2142 */ |
2031 core.String clusterUuid; | 2143 core.String clusterUuid; |
2032 | 2144 |
2033 JobPlacement(); | 2145 JobPlacement(); |
2034 | 2146 |
2035 JobPlacement.fromJson(core.Map _json) { | 2147 JobPlacement.fromJson(core.Map _json) { |
2036 if (_json.containsKey("clusterName")) { | 2148 if (_json.containsKey("clusterName")) { |
2037 clusterName = _json["clusterName"]; | 2149 clusterName = _json["clusterName"]; |
2038 } | 2150 } |
2039 if (_json.containsKey("clusterUuid")) { | 2151 if (_json.containsKey("clusterUuid")) { |
2040 clusterUuid = _json["clusterUuid"]; | 2152 clusterUuid = _json["clusterUuid"]; |
2041 } | 2153 } |
2042 } | 2154 } |
2043 | 2155 |
2044 core.Map toJson() { | 2156 core.Map toJson() { |
2045 var _json = new core.Map(); | 2157 var _json = new core.Map(); |
2046 if (clusterName != null) { | 2158 if (clusterName != null) { |
2047 _json["clusterName"] = clusterName; | 2159 _json["clusterName"] = clusterName; |
2048 } | 2160 } |
2049 if (clusterUuid != null) { | 2161 if (clusterUuid != null) { |
2050 _json["clusterUuid"] = clusterUuid; | 2162 _json["clusterUuid"] = clusterUuid; |
2051 } | 2163 } |
2052 return _json; | 2164 return _json; |
2053 } | 2165 } |
2054 } | 2166 } |
2055 | 2167 |
2056 /** Encapsulates the full scoping used to reference a job. */ | 2168 /** Encapsulates the full scoping used to reference a job. */ |
2057 class JobReference { | 2169 class JobReference { |
2058 /** | 2170 /** |
2059 * [Optional] The job ID, which must be unique within the project. The job ID | 2171 * Optional The job ID, which must be unique within the project. The job ID is |
2060 * is generated by the server upon job submission or provided by the user as a | 2172 * generated by the server upon job submission or provided by the user as a |
2061 * means to perform retries without creating duplicate jobs. The ID must | 2173 * means to perform retries without creating duplicate jobs. The ID must |
2062 * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens | 2174 * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens |
2063 * (-). The maximum length is 512 characters. | 2175 * (-). The maximum length is 512 characters. |
2064 */ | 2176 */ |
2065 core.String jobId; | 2177 core.String jobId; |
2066 /** | 2178 /** |
2067 * [Required] The ID of the Google Cloud Platform project that the job belongs | 2179 * Required The ID of the Google Cloud Platform project that the job belongs |
2068 * to. | 2180 * to. |
2069 */ | 2181 */ |
2070 core.String projectId; | 2182 core.String projectId; |
2071 | 2183 |
2072 JobReference(); | 2184 JobReference(); |
2073 | 2185 |
2074 JobReference.fromJson(core.Map _json) { | 2186 JobReference.fromJson(core.Map _json) { |
2075 if (_json.containsKey("jobId")) { | 2187 if (_json.containsKey("jobId")) { |
2076 jobId = _json["jobId"]; | 2188 jobId = _json["jobId"]; |
2077 } | 2189 } |
2078 if (_json.containsKey("projectId")) { | 2190 if (_json.containsKey("projectId")) { |
2079 projectId = _json["projectId"]; | 2191 projectId = _json["projectId"]; |
2080 } | 2192 } |
2081 } | 2193 } |
2082 | 2194 |
2083 core.Map toJson() { | 2195 core.Map toJson() { |
2084 var _json = new core.Map(); | 2196 var _json = new core.Map(); |
2085 if (jobId != null) { | 2197 if (jobId != null) { |
2086 _json["jobId"] = jobId; | 2198 _json["jobId"] = jobId; |
2087 } | 2199 } |
2088 if (projectId != null) { | 2200 if (projectId != null) { |
2089 _json["projectId"] = projectId; | 2201 _json["projectId"] = projectId; |
2090 } | 2202 } |
2091 return _json; | 2203 return _json; |
2092 } | 2204 } |
2093 } | 2205 } |
2094 | 2206 |
| 2207 /** |
| 2208 * Job scheduling options.Beta Feature: These options are available for testing |
| 2209 * purposes only. They may be changed before final release. |
| 2210 */ |
| 2211 class JobScheduling { |
| 2212 /** |
| 2213 * Optional Maximum number of times per hour a driver may be restarted as a |
| 2214 * result of driver terminating with non-zero code before job is reported |
| 2215 * failed.A job may be reported as thrashing if driver exits with non-zero |
| 2216 * code 4 times within 10 minute window.Maximum value is 10. |
| 2217 */ |
| 2218 core.int maxFailuresPerHour; |
| 2219 |
| 2220 JobScheduling(); |
| 2221 |
| 2222 JobScheduling.fromJson(core.Map _json) { |
| 2223 if (_json.containsKey("maxFailuresPerHour")) { |
| 2224 maxFailuresPerHour = _json["maxFailuresPerHour"]; |
| 2225 } |
| 2226 } |
| 2227 |
| 2228 core.Map toJson() { |
| 2229 var _json = new core.Map(); |
| 2230 if (maxFailuresPerHour != null) { |
| 2231 _json["maxFailuresPerHour"] = maxFailuresPerHour; |
| 2232 } |
| 2233 return _json; |
| 2234 } |
| 2235 } |
| 2236 |
2095 /** Cloud Dataproc job status. */ | 2237 /** Cloud Dataproc job status. */ |
2096 class JobStatus { | 2238 class JobStatus { |
2097 /** | 2239 /** |
2098 * [Output-only] Optional job state details, such as an error description if | 2240 * Output-only Optional job state details, such as an error description if the |
2099 * the state is ERROR. | 2241 * state is <code>ERROR</code>. |
2100 */ | 2242 */ |
2101 core.String details; | 2243 core.String details; |
2102 /** | 2244 /** |
2103 * [Output-only] A state message specifying the overall job state. | 2245 * Output-only A state message specifying the overall job state. |
2104 * Possible string values are: | 2246 * Possible string values are: |
2105 * - "STATE_UNSPECIFIED" : A STATE_UNSPECIFIED. | 2247 * - "STATE_UNSPECIFIED" : The job state is unknown. |
2106 * - "PENDING" : A PENDING. | 2248 * - "PENDING" : The job is pending; it has been submitted, but is not yet |
2107 * - "SETUP_DONE" : A SETUP_DONE. | 2249 * running. |
2108 * - "RUNNING" : A RUNNING. | 2250 * - "SETUP_DONE" : Job has been received by the service and completed initial |
2109 * - "CANCEL_PENDING" : A CANCEL_PENDING. | 2251 * setup; it will soon be submitted to the cluster. |
2110 * - "CANCEL_STARTED" : A CANCEL_STARTED. | 2252 * - "RUNNING" : The job is running on the cluster. |
2111 * - "CANCELLED" : A CANCELLED. | 2253 * - "CANCEL_PENDING" : A CancelJob request has been received, but is pending. |
2112 * - "DONE" : A DONE. | 2254 * - "CANCEL_STARTED" : Transient in-flight resources have been canceled, and |
2113 * - "ERROR" : A ERROR. | 2255 * the request to cancel the running job has been issued to the cluster. |
| 2256 * - "CANCELLED" : The job cancellation was successful. |
| 2257 * - "DONE" : The job has completed successfully. |
| 2258 * - "ERROR" : The job has completed, but encountered an error. |
| 2259 * - "ATTEMPT_FAILURE" : Job attempt has failed. The detail field contains |
| 2260 * failure details for this attempt.Applies to restartable jobs only. |
2114 */ | 2261 */ |
2115 core.String state; | 2262 core.String state; |
2116 /** [Output-only] The time when this state was entered. */ | 2263 /** Output-only The time when this state was entered. */ |
2117 core.String stateStartTime; | 2264 core.String stateStartTime; |
2118 | 2265 |
2119 JobStatus(); | 2266 JobStatus(); |
2120 | 2267 |
2121 JobStatus.fromJson(core.Map _json) { | 2268 JobStatus.fromJson(core.Map _json) { |
2122 if (_json.containsKey("details")) { | 2269 if (_json.containsKey("details")) { |
2123 details = _json["details"]; | 2270 details = _json["details"]; |
2124 } | 2271 } |
2125 if (_json.containsKey("state")) { | 2272 if (_json.containsKey("state")) { |
2126 state = _json["state"]; | 2273 state = _json["state"]; |
(...skipping 13 matching lines...) Loading... |
2140 } | 2287 } |
2141 if (stateStartTime != null) { | 2288 if (stateStartTime != null) { |
2142 _json["stateStartTime"] = stateStartTime; | 2289 _json["stateStartTime"] = stateStartTime; |
2143 } | 2290 } |
2144 return _json; | 2291 return _json; |
2145 } | 2292 } |
2146 } | 2293 } |
2147 | 2294 |
2148 /** The list of all clusters in a project. */ | 2295 /** The list of all clusters in a project. */ |
2149 class ListClustersResponse { | 2296 class ListClustersResponse { |
2150 /** [Output-only] The clusters in the project. */ | 2297 /** Output-only The clusters in the project. */ |
2151 core.List<Cluster> clusters; | 2298 core.List<Cluster> clusters; |
2152 /** | 2299 /** |
2153 * [Output-only] This token is included in the response if there are more | 2300 * Output-only This token is included in the response if there are more |
2154 * results to fetch. To fetch additional results, provide this value as the | 2301 * results to fetch. To fetch additional results, provide this value as the |
2155 * `page_token` in a subsequent ListClustersRequest. | 2302 * page_token in a subsequent <code>ListClustersRequest</code>. |
2156 */ | 2303 */ |
2157 core.String nextPageToken; | 2304 core.String nextPageToken; |
2158 | 2305 |
2159 ListClustersResponse(); | 2306 ListClustersResponse(); |
2160 | 2307 |
2161 ListClustersResponse.fromJson(core.Map _json) { | 2308 ListClustersResponse.fromJson(core.Map _json) { |
2162 if (_json.containsKey("clusters")) { | 2309 if (_json.containsKey("clusters")) { |
2163 clusters = _json["clusters"].map((value) => new Cluster.fromJson(value)).t
oList(); | 2310 clusters = _json["clusters"].map((value) => new Cluster.fromJson(value)).t
oList(); |
2164 } | 2311 } |
2165 if (_json.containsKey("nextPageToken")) { | 2312 if (_json.containsKey("nextPageToken")) { |
2166 nextPageToken = _json["nextPageToken"]; | 2313 nextPageToken = _json["nextPageToken"]; |
2167 } | 2314 } |
2168 } | 2315 } |
2169 | 2316 |
2170 core.Map toJson() { | 2317 core.Map toJson() { |
2171 var _json = new core.Map(); | 2318 var _json = new core.Map(); |
2172 if (clusters != null) { | 2319 if (clusters != null) { |
2173 _json["clusters"] = clusters.map((value) => (value).toJson()).toList(); | 2320 _json["clusters"] = clusters.map((value) => (value).toJson()).toList(); |
2174 } | 2321 } |
2175 if (nextPageToken != null) { | 2322 if (nextPageToken != null) { |
2176 _json["nextPageToken"] = nextPageToken; | 2323 _json["nextPageToken"] = nextPageToken; |
2177 } | 2324 } |
2178 return _json; | 2325 return _json; |
2179 } | 2326 } |
2180 } | 2327 } |
2181 | 2328 |
2182 /** A list of jobs in a project. */ | 2329 /** A list of jobs in a project. */ |
2183 class ListJobsResponse { | 2330 class ListJobsResponse { |
2184 /** [Output-only] Jobs list. */ | 2331 /** Output-only Jobs list. */ |
2185 core.List<Job> jobs; | 2332 core.List<Job> jobs; |
2186 /** | 2333 /** |
2187 * [Optional] This token is included in the response if there are more results | 2334 * Optional This token is included in the response if there are more results |
2188 * to fetch. To fetch additional results, provide this value as the | 2335 * to fetch. To fetch additional results, provide this value as the page_token |
2189 * `page_token` in a subsequent ListJobsRequest. | 2336 * in a subsequent <code>ListJobsRequest</code>. |
2190 */ | 2337 */ |
2191 core.String nextPageToken; | 2338 core.String nextPageToken; |
2192 | 2339 |
2193 ListJobsResponse(); | 2340 ListJobsResponse(); |
2194 | 2341 |
2195 ListJobsResponse.fromJson(core.Map _json) { | 2342 ListJobsResponse.fromJson(core.Map _json) { |
2196 if (_json.containsKey("jobs")) { | 2343 if (_json.containsKey("jobs")) { |
2197 jobs = _json["jobs"].map((value) => new Job.fromJson(value)).toList(); | 2344 jobs = _json["jobs"].map((value) => new Job.fromJson(value)).toList(); |
2198 } | 2345 } |
2199 if (_json.containsKey("nextPageToken")) { | 2346 if (_json.containsKey("nextPageToken")) { |
(...skipping 40 matching lines...) Loading... |
2240 _json["operations"] = operations.map((value) => (value).toJson()).toList()
; | 2387 _json["operations"] = operations.map((value) => (value).toJson()).toList()
; |
2241 } | 2388 } |
2242 return _json; | 2389 return _json; |
2243 } | 2390 } |
2244 } | 2391 } |
2245 | 2392 |
2246 /** The runtime logging config of the job. */ | 2393 /** The runtime logging config of the job. */ |
2247 class LoggingConfig { | 2394 class LoggingConfig { |
2248 /** | 2395 /** |
2249 * The per-package log levels for the driver. This may include "root" package | 2396 * The per-package log levels for the driver. This may include "root" package |
2250 * name to configure rootLogger. Examples: 'com.google = FATAL', 'root = | 2397 * name to configure rootLogger. Examples: 'com.google = FATAL', 'root = |
2251 * INFO', 'org.apache = DEBUG' | 2398 * INFO', 'org.apache = DEBUG' |
2252 */ | 2399 */ |
2253 core.Map<core.String, core.String> driverLogLevels; | 2400 core.Map<core.String, core.String> driverLogLevels; |
2254 | 2401 |
2255 LoggingConfig(); | 2402 LoggingConfig(); |
2256 | 2403 |
2257 LoggingConfig.fromJson(core.Map _json) { | 2404 LoggingConfig.fromJson(core.Map _json) { |
2258 if (_json.containsKey("driverLogLevels")) { | 2405 if (_json.containsKey("driverLogLevels")) { |
2259 driverLogLevels = _json["driverLogLevels"]; | 2406 driverLogLevels = _json["driverLogLevels"]; |
2260 } | 2407 } |
2261 } | 2408 } |
2262 | 2409 |
2263 core.Map toJson() { | 2410 core.Map toJson() { |
2264 var _json = new core.Map(); | 2411 var _json = new core.Map(); |
2265 if (driverLogLevels != null) { | 2412 if (driverLogLevels != null) { |
2266 _json["driverLogLevels"] = driverLogLevels; | 2413 _json["driverLogLevels"] = driverLogLevels; |
2267 } | 2414 } |
2268 return _json; | 2415 return _json; |
2269 } | 2416 } |
2270 } | 2417 } |
2271 | 2418 |
2272 /** Specifies the resources used to actively manage an instance group. */ | 2419 /** Specifies the resources used to actively manage an instance group. */ |
2273 class ManagedGroupConfig { | 2420 class ManagedGroupConfig { |
2274 /** [Output-only] The name of the Instance Group Manager for this group. */ | 2421 /** Output-only The name of the Instance Group Manager for this group. */ |
2275 core.String instanceGroupManagerName; | 2422 core.String instanceGroupManagerName; |
2276 /** | 2423 /** |
2277 * [Output-only] The name of the Instance Template used for the Managed | 2424 * Output-only The name of the Instance Template used for the Managed Instance |
2278 * Instance Group. | 2425 * Group. |
2279 */ | 2426 */ |
2280 core.String instanceTemplateName; | 2427 core.String instanceTemplateName; |
2281 | 2428 |
2282 ManagedGroupConfig(); | 2429 ManagedGroupConfig(); |
2283 | 2430 |
2284 ManagedGroupConfig.fromJson(core.Map _json) { | 2431 ManagedGroupConfig.fromJson(core.Map _json) { |
2285 if (_json.containsKey("instanceGroupManagerName")) { | 2432 if (_json.containsKey("instanceGroupManagerName")) { |
2286 instanceGroupManagerName = _json["instanceGroupManagerName"]; | 2433 instanceGroupManagerName = _json["instanceGroupManagerName"]; |
2287 } | 2434 } |
2288 if (_json.containsKey("instanceTemplateName")) { | 2435 if (_json.containsKey("instanceTemplateName")) { |
(...skipping 11 matching lines...) Loading... |
2300 } | 2447 } |
2301 return _json; | 2448 return _json; |
2302 } | 2449 } |
2303 } | 2450 } |
2304 | 2451 |
2305 /** | 2452 /** |
2306 * Specifies an executable to run on a fully configured node and a timeout | 2453 * Specifies an executable to run on a fully configured node and a timeout |
2307 * period for executable completion. | 2454 * period for executable completion. |
2308 */ | 2455 */ |
2309 class NodeInitializationAction { | 2456 class NodeInitializationAction { |
2310 /** [Required] Google Cloud Storage URI of executable file. */ | 2457 /** Required Google Cloud Storage URI of executable file. */ |
2311 core.String executableFile; | 2458 core.String executableFile; |
2312 /** | 2459 /** |
2313 * [Optional] Amount of time executable has to complete. Default is 10 | 2460 * Optional Amount of time executable has to complete. Default is 10 minutes. |
2314 * minutes. Cluster creation fails with an explanatory error message (the name | 2461 * Cluster creation fails with an explanatory error message (the name of the |
2315 * of the executable that caused the error and the exceeded timeout period) if | 2462 * executable that caused the error and the exceeded timeout period) if the |
2316 * the executable is not completed at end of the timeout period. | 2463 * executable is not completed at end of the timeout period. |
2317 */ | 2464 */ |
2318 core.String executionTimeout; | 2465 core.String executionTimeout; |
2319 | 2466 |
2320 NodeInitializationAction(); | 2467 NodeInitializationAction(); |
2321 | 2468 |
2322 NodeInitializationAction.fromJson(core.Map _json) { | 2469 NodeInitializationAction.fromJson(core.Map _json) { |
2323 if (_json.containsKey("executableFile")) { | 2470 if (_json.containsKey("executableFile")) { |
2324 executableFile = _json["executableFile"]; | 2471 executableFile = _json["executableFile"]; |
2325 } | 2472 } |
2326 if (_json.containsKey("executionTimeout")) { | 2473 if (_json.containsKey("executionTimeout")) { |
(...skipping 12 matching lines...) Loading... |
2339 return _json; | 2486 return _json; |
2340 } | 2487 } |
2341 } | 2488 } |
2342 | 2489 |
2343 /** | 2490 /** |
2344 * This resource represents a long-running operation that is the result of a | 2491 * This resource represents a long-running operation that is the result of a |
2345 * network API call. | 2492 * network API call. |
2346 */ | 2493 */ |
2347 class Operation { | 2494 class Operation { |
2348 /** | 2495 /** |
2349 * If the value is `false`, it means the operation is still in progress. If | 2496 * If the value is false, it means the operation is still in progress. If |
2350 * true, the operation is completed, and either `error` or `response` is | 2497 * true, the operation is completed, and either error or response is |
2351 * available. | 2498 * available. |
2352 */ | 2499 */ |
2353 core.bool done; | 2500 core.bool done; |
2354 /** The error result of the operation in case of failure or cancellation. */ | 2501 /** The error result of the operation in case of failure or cancellation. */ |
2355 Status error; | 2502 Status error; |
2356 /** | 2503 /** |
2357 * Service-specific metadata associated with the operation. It typically | 2504 * Service-specific metadata associated with the operation. It typically |
2358 * contains progress information and common metadata such as create time. Some | 2505 * contains progress information and common metadata such as create time. Some |
2359 * services might not provide such metadata. Any method that returns a | 2506 * services might not provide such metadata. Any method that returns a |
2360 * long-running operation should document the metadata type, if any. | 2507 * long-running operation should document the metadata type, if any. |
2361 * | 2508 * |
2362 * The values for Object must be JSON objects. It can consist of `num`, | 2509 * The values for Object must be JSON objects. It can consist of `num`, |
2363 * `String`, `bool` and `null` as well as `Map` and `List` values. | 2510 * `String`, `bool` and `null` as well as `Map` and `List` values. |
2364 */ | 2511 */ |
2365 core.Map<core.String, core.Object> metadata; | 2512 core.Map<core.String, core.Object> metadata; |
2366 /** | 2513 /** |
2367 * The server-assigned name, which is only unique within the same service that | 2514 * The server-assigned name, which is only unique within the same service that |
2368 * originally returns it. If you use the default HTTP mapping, the `name` | 2515 * originally returns it. If you use the default HTTP mapping, the name should |
2369 * should have the format of `operations/some/unique/name`. | 2516 * have the format of operations/some/unique/name. |
2370 */ | 2517 */ |
2371 core.String name; | 2518 core.String name; |
2372 /** | 2519 /** |
2373 * The normal response of the operation in case of success. If the original | 2520 * The normal response of the operation in case of success. If the original |
2374 * method returns no data on success, such as `Delete`, the response is | 2521 * method returns no data on success, such as Delete, the response is |
2375 * `google.protobuf.Empty`. If the original method is standard | 2522 * google.protobuf.Empty. If the original method is standard |
2376 * `Get`/`Create`/`Update`, the response should be the resource. For other | 2523 * Get/Create/Update, the response should be the resource. For other methods, |
2377 * methods, the response should have the type `XxxResponse`, where `Xxx` is | 2524 * the response should have the type XxxResponse, where Xxx is the original |
2378 * the original method name. For example, if the original method name is | 2525 * method name. For example, if the original method name is TakeSnapshot(), |
2379 * `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. | 2526 * the inferred response type is TakeSnapshotResponse. |
2380 * | 2527 * |
2381 * The values for Object must be JSON objects. It can consist of `num`, | 2528 * The values for Object must be JSON objects. It can consist of `num`, |
2382 * `String`, `bool` and `null` as well as `Map` and `List` values. | 2529 * `String`, `bool` and `null` as well as `Map` and `List` values. |
2383 */ | 2530 */ |
2384 core.Map<core.String, core.Object> response; | 2531 core.Map<core.String, core.Object> response; |
2385 | 2532 |
2386 Operation(); | 2533 Operation(); |
2387 | 2534 |
2388 Operation.fromJson(core.Map _json) { | 2535 Operation.fromJson(core.Map _json) { |
2389 if (_json.containsKey("done")) { | 2536 if (_json.containsKey("done")) { |
(...skipping 33 matching lines...) Loading... |
2423 return _json; | 2570 return _json; |
2424 } | 2571 } |
2425 } | 2572 } |
2426 | 2573 |
2427 /** Metadata describing the operation. */ | 2574 /** Metadata describing the operation. */ |
2428 class OperationMetadata { | 2575 class OperationMetadata { |
2429 /** Name of the cluster for the operation. */ | 2576 /** Name of the cluster for the operation. */ |
2430 core.String clusterName; | 2577 core.String clusterName; |
2431 /** Cluster UUId for the operation. */ | 2578 /** Cluster UUId for the operation. */ |
2432 core.String clusterUuid; | 2579 core.String clusterUuid; |
2433 /** [Output-only] Short description of operation. */ | 2580 /** Output-only Short description of operation. */ |
2434 core.String description; | 2581 core.String description; |
2435 /** A message containing any operation metadata details. */ | 2582 /** A message containing any operation metadata details. */ |
2436 core.String details; | 2583 core.String details; |
2437 /** The time that the operation completed. */ | 2584 /** The time that the operation completed. */ |
2438 core.String endTime; | 2585 core.String endTime; |
2439 /** A message containing the detailed operation state. */ | 2586 /** A message containing the detailed operation state. */ |
2440 core.String innerState; | 2587 core.String innerState; |
2441 /** The time that the operation was requested. */ | 2588 /** The time that the operation was requested. */ |
2442 core.String insertTime; | 2589 core.String insertTime; |
2443 /** [Output-only] The operation type. */ | 2590 /** Output-only The operation type. */ |
2444 core.String operationType; | 2591 core.String operationType; |
2445 /** The time that the operation was started by the server. */ | 2592 /** The time that the operation was started by the server. */ |
2446 core.String startTime; | 2593 core.String startTime; |
2447 /** | 2594 /** |
2448 * A message containing the operation state. | 2595 * A message containing the operation state. |
2449 * Possible string values are: | 2596 * Possible string values are: |
2450 * - "UNKNOWN" : A UNKNOWN. | 2597 * - "UNKNOWN" : Unused. |
2451 * - "PENDING" : A PENDING. | 2598 * - "PENDING" : The operation has been created. |
2452 * - "RUNNING" : A RUNNING. | 2599 * - "RUNNING" : The operation is currently running. |
2453 * - "DONE" : A DONE. | 2600 * - "DONE" : The operation is done, either cancelled or completed. |
2454 */ | 2601 */ |
2455 core.String state; | 2602 core.String state; |
2456 /** [Output-only] Current operation status. */ | 2603 /** Output-only Current operation status. */ |
2457 OperationStatus status; | 2604 OperationStatus status; |
2458 /** [Output-only] Previous operation status. */ | 2605 /** Output-only Previous operation status. */ |
2459 core.List<OperationStatus> statusHistory; | 2606 core.List<OperationStatus> statusHistory; |
| 2607 /** Output-only Errors encountered during operation execution. */ |
| 2608 core.List<core.String> warnings; |
2460 | 2609 |
2461 OperationMetadata(); | 2610 OperationMetadata(); |
2462 | 2611 |
2463 OperationMetadata.fromJson(core.Map _json) { | 2612 OperationMetadata.fromJson(core.Map _json) { |
2464 if (_json.containsKey("clusterName")) { | 2613 if (_json.containsKey("clusterName")) { |
2465 clusterName = _json["clusterName"]; | 2614 clusterName = _json["clusterName"]; |
2466 } | 2615 } |
2467 if (_json.containsKey("clusterUuid")) { | 2616 if (_json.containsKey("clusterUuid")) { |
2468 clusterUuid = _json["clusterUuid"]; | 2617 clusterUuid = _json["clusterUuid"]; |
2469 } | 2618 } |
(...skipping 20 matching lines...) Loading... |
2490 } | 2639 } |
2491 if (_json.containsKey("state")) { | 2640 if (_json.containsKey("state")) { |
2492 state = _json["state"]; | 2641 state = _json["state"]; |
2493 } | 2642 } |
2494 if (_json.containsKey("status")) { | 2643 if (_json.containsKey("status")) { |
2495 status = new OperationStatus.fromJson(_json["status"]); | 2644 status = new OperationStatus.fromJson(_json["status"]); |
2496 } | 2645 } |
2497 if (_json.containsKey("statusHistory")) { | 2646 if (_json.containsKey("statusHistory")) { |
2498 statusHistory = _json["statusHistory"].map((value) => new OperationStatus.
fromJson(value)).toList(); | 2647 statusHistory = _json["statusHistory"].map((value) => new OperationStatus.
fromJson(value)).toList(); |
2499 } | 2648 } |
| 2649 if (_json.containsKey("warnings")) { |
| 2650 warnings = _json["warnings"]; |
| 2651 } |
2500 } | 2652 } |
2501 | 2653 |
2502 core.Map toJson() { | 2654 core.Map toJson() { |
2503 var _json = new core.Map(); | 2655 var _json = new core.Map(); |
2504 if (clusterName != null) { | 2656 if (clusterName != null) { |
2505 _json["clusterName"] = clusterName; | 2657 _json["clusterName"] = clusterName; |
2506 } | 2658 } |
2507 if (clusterUuid != null) { | 2659 if (clusterUuid != null) { |
2508 _json["clusterUuid"] = clusterUuid; | 2660 _json["clusterUuid"] = clusterUuid; |
2509 } | 2661 } |
(...skipping 20 matching lines...) Loading... |
2530 } | 2682 } |
2531 if (state != null) { | 2683 if (state != null) { |
2532 _json["state"] = state; | 2684 _json["state"] = state; |
2533 } | 2685 } |
2534 if (status != null) { | 2686 if (status != null) { |
2535 _json["status"] = (status).toJson(); | 2687 _json["status"] = (status).toJson(); |
2536 } | 2688 } |
2537 if (statusHistory != null) { | 2689 if (statusHistory != null) { |
2538 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to
List(); | 2690 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to
List(); |
2539 } | 2691 } |
| 2692 if (warnings != null) { |
| 2693 _json["warnings"] = warnings; |
| 2694 } |
2540 return _json; | 2695 return _json; |
2541 } | 2696 } |
2542 } | 2697 } |
2543 | 2698 |
2544 /** The status of the operation. */ | 2699 /** The status of the operation. */ |
2545 class OperationStatus { | 2700 class OperationStatus { |
2546 /** A message containing any operation metadata details. */ | 2701 /** A message containing any operation metadata details. */ |
2547 core.String details; | 2702 core.String details; |
2548 /** A message containing the detailed operation state. */ | 2703 /** A message containing the detailed operation state. */ |
2549 core.String innerState; | 2704 core.String innerState; |
2550 /** | 2705 /** |
2551 * A message containing the operation state. | 2706 * A message containing the operation state. |
2552 * Possible string values are: | 2707 * Possible string values are: |
2553 * - "UNKNOWN" : A UNKNOWN. | 2708 * - "UNKNOWN" : Unused. |
2554 * - "PENDING" : A PENDING. | 2709 * - "PENDING" : The operation has been created. |
2555 * - "RUNNING" : A RUNNING. | 2710 * - "RUNNING" : The operation is running. |
2556 * - "DONE" : A DONE. | 2711 * - "DONE" : The operation is done; either cancelled or completed. |
2557 */ | 2712 */ |
2558 core.String state; | 2713 core.String state; |
2559 /** The time this state was entered. */ | 2714 /** The time this state was entered. */ |
2560 core.String stateStartTime; | 2715 core.String stateStartTime; |
2561 | 2716 |
2562 OperationStatus(); | 2717 OperationStatus(); |
2563 | 2718 |
2564 OperationStatus.fromJson(core.Map _json) { | 2719 OperationStatus.fromJson(core.Map _json) { |
2565 if (_json.containsKey("details")) { | 2720 if (_json.containsKey("details")) { |
2566 details = _json["details"]; | 2721 details = _json["details"]; |
(...skipping 21 matching lines...) Loading... |
2588 _json["state"] = state; | 2743 _json["state"] = state; |
2589 } | 2744 } |
2590 if (stateStartTime != null) { | 2745 if (stateStartTime != null) { |
2591 _json["stateStartTime"] = stateStartTime; | 2746 _json["stateStartTime"] = stateStartTime; |
2592 } | 2747 } |
2593 return _json; | 2748 return _json; |
2594 } | 2749 } |
2595 } | 2750 } |
2596 | 2751 |
2597 /** | 2752 /** |
2598 * A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) | 2753 * A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries |
2599 * queries on YARN. | 2754 * on YARN. |
2600 */ | 2755 */ |
2601 class PigJob { | 2756 class PigJob { |
2602 /** | 2757 /** |
2603 * [Optional] Whether to continue executing queries if a query fails. The | 2758 * Optional Whether to continue executing queries if a query fails. The |
2604 * default value is `false`. Setting to `true` can be useful when executing | 2759 * default value is false. Setting to true can be useful when executing |
2605 * independent parallel queries. | 2760 * independent parallel queries. |
2606 */ | 2761 */ |
2607 core.bool continueOnFailure; | 2762 core.bool continueOnFailure; |
2608 /** | 2763 /** |
2609 * [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Pig Client | 2764 * Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client |
2610 * and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. | 2765 * and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. |
2611 */ | 2766 */ |
2612 core.List<core.String> jarFileUris; | 2767 core.List<core.String> jarFileUris; |
2613 /** [Optional] The runtime log config for job execution. */ | 2768 /** Optional The runtime log config for job execution. */ |
2614 LoggingConfig loggingConfig; | 2769 LoggingConfig loggingConfig; |
2615 /** | 2770 /** |
2616 * [Optional] A mapping of property names to values, used to configure Pig. | 2771 * Optional A mapping of property names to values, used to configure Pig. |
2617 * Properties that conflict with values set by the Cloud Dataproc API may be | 2772 * Properties that conflict with values set by the Cloud Dataproc API may be |
2618 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, | 2773 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, |
2619 * /etc/pig/conf/pig.properties, and classes in user code. | 2774 * /etc/pig/conf/pig.properties, and classes in user code. |
2620 */ | 2775 */ |
2621 core.Map<core.String, core.String> properties; | 2776 core.Map<core.String, core.String> properties; |
2622 /** The HCFS URI of the script that contains the Pig queries. */ | 2777 /** The HCFS URI of the script that contains the Pig queries. */ |
2623 core.String queryFileUri; | 2778 core.String queryFileUri; |
2624 /** A list of queries. */ | 2779 /** A list of queries. */ |
2625 QueryList queryList; | 2780 QueryList queryList; |
2626 /** | 2781 /** |
2627 * [Optional] Mapping of query variable names to values (equivalent to the Pig | 2782 * Optional Mapping of query variable names to values (equivalent to the Pig |
2628 * command: `name=[value]`). | 2783 * command: name=[value]). |
2629 */ | 2784 */ |
2630 core.Map<core.String, core.String> scriptVariables; | 2785 core.Map<core.String, core.String> scriptVariables; |
2631 | 2786 |
2632 PigJob(); | 2787 PigJob(); |
2633 | 2788 |
2634 PigJob.fromJson(core.Map _json) { | 2789 PigJob.fromJson(core.Map _json) { |
2635 if (_json.containsKey("continueOnFailure")) { | 2790 if (_json.containsKey("continueOnFailure")) { |
2636 continueOnFailure = _json["continueOnFailure"]; | 2791 continueOnFailure = _json["continueOnFailure"]; |
2637 } | 2792 } |
2638 if (_json.containsKey("jarFileUris")) { | 2793 if (_json.containsKey("jarFileUris")) { |
(...skipping 37 matching lines...) Loading... |
2676 _json["queryList"] = (queryList).toJson(); | 2831 _json["queryList"] = (queryList).toJson(); |
2677 } | 2832 } |
2678 if (scriptVariables != null) { | 2833 if (scriptVariables != null) { |
2679 _json["scriptVariables"] = scriptVariables; | 2834 _json["scriptVariables"] = scriptVariables; |
2680 } | 2835 } |
2681 return _json; | 2836 return _json; |
2682 } | 2837 } |
2683 } | 2838 } |
2684 | 2839 |
2685 /** | 2840 /** |
2686 * A Cloud Dataproc job for running [Apache | 2841 * A Cloud Dataproc job for running Apache PySpark |
2687 * PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) | 2842 * (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) |
2688 * applications on YARN. | 2843 * applications on YARN. |
2689 */ | 2844 */ |
2690 class PySparkJob { | 2845 class PySparkJob { |
2691 /** | 2846 /** |
2692 * [Optional] HCFS URIs of archives to be extracted in the working directory | 2847 * Optional HCFS URIs of archives to be extracted in the working directory of |
2693 * of .jar, .tar, .tar.gz, .tgz, and .zip. | 2848 * .jar, .tar, .tar.gz, .tgz, and .zip. |
2694 */ | 2849 */ |
2695 core.List<core.String> archiveUris; | 2850 core.List<core.String> archiveUris; |
2696 /** | 2851 /** |
2697 * [Optional] The arguments to pass to the driver. Do not include arguments, | 2852 * Optional The arguments to pass to the driver. Do not include arguments, |
2698 * such as `--conf`, that can be set as job properties, since a collision may | 2853 * such as --conf, that can be set as job properties, since a collision may |
2699 * occur that causes an incorrect job submission. | 2854 * occur that causes an incorrect job submission. |
2700 */ | 2855 */ |
2701 core.List<core.String> args; | 2856 core.List<core.String> args; |
2702 /** | 2857 /** |
2703 * [Optional] HCFS URIs of files to be copied to the working directory of | 2858 * Optional HCFS URIs of files to be copied to the working directory of Python |
2704 * Python drivers and distributed tasks. Useful for naively parallel tasks. | 2859 * drivers and distributed tasks. Useful for naively parallel tasks. |
2705 */ | 2860 */ |
2706 core.List<core.String> fileUris; | 2861 core.List<core.String> fileUris; |
2707 /** | 2862 /** |
2708 * [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Python | 2863 * Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python |
2709 * driver and tasks. | 2864 * driver and tasks. |
2710 */ | 2865 */ |
2711 core.List<core.String> jarFileUris; | 2866 core.List<core.String> jarFileUris; |
2712 /** [Optional] The runtime log config for job execution. */ | 2867 /** Optional The runtime log config for job execution. */ |
2713 LoggingConfig loggingConfig; | 2868 LoggingConfig loggingConfig; |
2714 /** | 2869 /** |
2715 * [Required] The HCFS URI of the main Python file to use as the driver. Must | 2870 * Required The HCFS URI of the main Python file to use as the driver. Must be |
2716 * be a .py file. | 2871 * a .py file. |
2717 */ | 2872 */ |
2718 core.String mainPythonFileUri; | 2873 core.String mainPythonFileUri; |
2719 /** | 2874 /** |
2720 * [Optional] A mapping of property names to values, used to configure | 2875 * Optional A mapping of property names to values, used to configure PySpark. |
2721 * PySpark. Properties that conflict with values set by the Cloud Dataproc API | 2876 * Properties that conflict with values set by the Cloud Dataproc API may be |
2722 * may be overwritten. Can include properties set in | 2877 * overwritten. Can include properties set in |
2723 * /etc/spark/conf/spark-defaults.conf and classes in user code. | 2878 * /etc/spark/conf/spark-defaults.conf and classes in user code. |
2724 */ | 2879 */ |
2725 core.Map<core.String, core.String> properties; | 2880 core.Map<core.String, core.String> properties; |
2726 /** | 2881 /** |
2727 * [Optional] HCFS file URIs of Python files to pass to the PySpark framework. | 2882 * Optional HCFS file URIs of Python files to pass to the PySpark framework. |
2728 * Supported file types: .py, .egg, and .zip. | 2883 * Supported file types: .py, .egg, and .zip. |
2729 */ | 2884 */ |
2730 core.List<core.String> pythonFileUris; | 2885 core.List<core.String> pythonFileUris; |
2731 | 2886 |
2732 PySparkJob(); | 2887 PySparkJob(); |
2733 | 2888 |
2734 PySparkJob.fromJson(core.Map _json) { | 2889 PySparkJob.fromJson(core.Map _json) { |
2735 if (_json.containsKey("archiveUris")) { | 2890 if (_json.containsKey("archiveUris")) { |
2736 archiveUris = _json["archiveUris"]; | 2891 archiveUris = _json["archiveUris"]; |
2737 } | 2892 } |
(...skipping 46 matching lines...) Loading... |
2784 if (pythonFileUris != null) { | 2939 if (pythonFileUris != null) { |
2785 _json["pythonFileUris"] = pythonFileUris; | 2940 _json["pythonFileUris"] = pythonFileUris; |
2786 } | 2941 } |
2787 return _json; | 2942 return _json; |
2788 } | 2943 } |
2789 } | 2944 } |
2790 | 2945 |
2791 /** A list of queries to run on a cluster. */ | 2946 /** A list of queries to run on a cluster. */ |
2792 class QueryList { | 2947 class QueryList { |
2793 /** | 2948 /** |
2794 * [Required] The queries to execute. You do not need to terminate a query | 2949 * Required The queries to execute. You do not need to terminate a query with |
2795 * with a semicolon. Multiple queries can be specified in one string by | 2950 * a semicolon. Multiple queries can be specified in one string by separating |
2796 * separating each with a semicolon. Here is an example of an Cloud Dataproc | 2951 * each with a semicolon. Here is an example of an Cloud Dataproc API snippet |
2797 * API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { | 2952 * that uses a QueryList to specify a HiveJob: |
2798 * "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } | 2953 * "hiveJob": { |
| 2954 * "queryList": { |
| 2955 * "queries": [ |
| 2956 * "query1", |
| 2957 * "query2", |
| 2958 * "query3;query4", |
| 2959 * ] |
| 2960 * } |
| 2961 * } |
2799 */ | 2962 */ |
2800 core.List<core.String> queries; | 2963 core.List<core.String> queries; |
2801 | 2964 |
2802 QueryList(); | 2965 QueryList(); |
2803 | 2966 |
2804 QueryList.fromJson(core.Map _json) { | 2967 QueryList.fromJson(core.Map _json) { |
2805 if (_json.containsKey("queries")) { | 2968 if (_json.containsKey("queries")) { |
2806 queries = _json["queries"]; | 2969 queries = _json["queries"]; |
2807 } | 2970 } |
2808 } | 2971 } |
2809 | 2972 |
2810 core.Map toJson() { | 2973 core.Map toJson() { |
2811 var _json = new core.Map(); | 2974 var _json = new core.Map(); |
2812 if (queries != null) { | 2975 if (queries != null) { |
2813 _json["queries"] = queries; | 2976 _json["queries"] = queries; |
2814 } | 2977 } |
2815 return _json; | 2978 return _json; |
2816 } | 2979 } |
2817 } | 2980 } |
2818 | 2981 |
2819 /** Specifies the selection and config of software inside the cluster. */ | 2982 /** Specifies the selection and config of software inside the cluster. */ |
2820 class SoftwareConfig { | 2983 class SoftwareConfig { |
2821 /** | 2984 /** |
2822 * [Optional] The version of software inside the cluster. It must match the | 2985 * Optional The version of software inside the cluster. It must match the |
2823 * regular expression `[0-9]+\.[0-9]+`. If unspecified, it defaults to the | 2986 * regular expression [0-9]+\.[0-9]+. If unspecified, it defaults to the |
2824 * latest version (see [Cloud Dataproc Versioning](/dataproc/versioning)). | 2987 * latest version (see Cloud Dataproc Versioning). |
2825 */ | 2988 */ |
2826 core.String imageVersion; | 2989 core.String imageVersion; |
2827 /** | 2990 /** |
2828 * [Optional] The properties to set on daemon config files. Property keys are | 2991 * Optional The properties to set on daemon config files.Property keys are |
2829 * specified in `prefix:property` format, such as `core:fs.defaultFS`. The | 2992 * specified in prefix:property format, such as core:fs.defaultFS. The |
2830 * following are supported prefixes and their mappings: * core: | 2993 * following are supported prefixes and their mappings: |
2831 * `core-site.xml` * hdfs: `hdfs-site.xml` * mapred: `mapred-site.xml` * yarn: | 2994 * core: core-site.xml |
2832 * `yarn-site.xml` * hive: `hive-site.xml` * pig: `pig.properties` * spark: | 2995 * hdfs: hdfs-site.xml |
2833 * `spark-defaults.conf` | 2996 * mapred: mapred-site.xml |
| 2997 * yarn: yarn-site.xml |
| 2998 * hive: hive-site.xml |
| 2999 * pig: pig.properties |
| 3000 * spark: spark-defaults.conf |
2834 */ | 3001 */ |
2835 core.Map<core.String, core.String> properties; | 3002 core.Map<core.String, core.String> properties; |
2836 | 3003 |
2837 SoftwareConfig(); | 3004 SoftwareConfig(); |
2838 | 3005 |
2839 SoftwareConfig.fromJson(core.Map _json) { | 3006 SoftwareConfig.fromJson(core.Map _json) { |
2840 if (_json.containsKey("imageVersion")) { | 3007 if (_json.containsKey("imageVersion")) { |
2841 imageVersion = _json["imageVersion"]; | 3008 imageVersion = _json["imageVersion"]; |
2842 } | 3009 } |
2843 if (_json.containsKey("properties")) { | 3010 if (_json.containsKey("properties")) { |
2844 properties = _json["properties"]; | 3011 properties = _json["properties"]; |
2845 } | 3012 } |
2846 } | 3013 } |
2847 | 3014 |
2848 core.Map toJson() { | 3015 core.Map toJson() { |
2849 var _json = new core.Map(); | 3016 var _json = new core.Map(); |
2850 if (imageVersion != null) { | 3017 if (imageVersion != null) { |
2851 _json["imageVersion"] = imageVersion; | 3018 _json["imageVersion"] = imageVersion; |
2852 } | 3019 } |
2853 if (properties != null) { | 3020 if (properties != null) { |
2854 _json["properties"] = properties; | 3021 _json["properties"] = properties; |
2855 } | 3022 } |
2856 return _json; | 3023 return _json; |
2857 } | 3024 } |
2858 } | 3025 } |
2859 | 3026 |
2860 /** | 3027 /** |
2861 * A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) | 3028 * A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) |
2862 * applications on YARN. | 3029 * applications on YARN. |
2863 */ | 3030 */ |
2864 class SparkJob { | 3031 class SparkJob { |
2865 /** | 3032 /** |
2866 * [Optional] HCFS URIs of archives to be extracted in the working directory | 3033 * Optional HCFS URIs of archives to be extracted in the working directory of |
2867 * of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, | 3034 * Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, |
2868 * .tgz, and .zip. | 3035 * and .zip. |
2869 */ | 3036 */ |
2870 core.List<core.String> archiveUris; | 3037 core.List<core.String> archiveUris; |
2871 /** | 3038 /** |
2872 * [Optional] The arguments to pass to the driver. Do not include arguments, | 3039 * Optional The arguments to pass to the driver. Do not include arguments, |
2873 * such as `--conf`, that can be set as job properties, since a collision may | 3040 * such as --conf, that can be set as job properties, since a collision may |
2874 * occur that causes an incorrect job submission. | 3041 * occur that causes an incorrect job submission. |
2875 */ | 3042 */ |
2876 core.List<core.String> args; | 3043 core.List<core.String> args; |
2877 /** | 3044 /** |
2878 * [Optional] HCFS URIs of files to be copied to the working directory of | 3045 * Optional HCFS URIs of files to be copied to the working directory of Spark |
2879 * Spark drivers and distributed tasks. Useful for naively parallel tasks. | 3046 * drivers and distributed tasks. Useful for naively parallel tasks. |
2880 */ | 3047 */ |
2881 core.List<core.String> fileUris; | 3048 core.List<core.String> fileUris; |
2882 /** | 3049 /** |
2883 * [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Spark | 3050 * Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark |
2884 * driver and tasks. | 3051 * driver and tasks. |
2885 */ | 3052 */ |
2886 core.List<core.String> jarFileUris; | 3053 core.List<core.String> jarFileUris; |
2887 /** [Optional] The runtime log config for job execution. */ | 3054 /** Optional The runtime log config for job execution. */ |
2888 LoggingConfig loggingConfig; | 3055 LoggingConfig loggingConfig; |
2889 /** | 3056 /** |
2890 * The name of the driver's main class. The jar file that contains the class | 3057 * The name of the driver's main class. The jar file that contains the class |
2891 * must be in the default CLASSPATH or specified in `jar_file_uris`. | 3058 * must be in the default CLASSPATH or specified in jar_file_uris. |
2892 */ | 3059 */ |
2893 core.String mainClass; | 3060 core.String mainClass; |
2894 /** The HCFS URI of the jar file that contains the main class. */ | 3061 /** The HCFS URI of the jar file that contains the main class. */ |
2895 core.String mainJarFileUri; | 3062 core.String mainJarFileUri; |
2896 /** | 3063 /** |
2897 * [Optional] A mapping of property names to values, used to configure Spark. | 3064 * Optional A mapping of property names to values, used to configure Spark. |
2898 * Properties that conflict with values set by the Cloud Dataproc API may be | 3065 * Properties that conflict with values set by the Cloud Dataproc API may be |
2899 * overwritten. Can include properties set in | 3066 * overwritten. Can include properties set in |
2900 * /etc/spark/conf/spark-defaults.conf and classes in user code. | 3067 * /etc/spark/conf/spark-defaults.conf and classes in user code. |
2901 */ | 3068 */ |
2902 core.Map<core.String, core.String> properties; | 3069 core.Map<core.String, core.String> properties; |
2903 | 3070 |
2904 SparkJob(); | 3071 SparkJob(); |
2905 | 3072 |
2906 SparkJob.fromJson(core.Map _json) { | 3073 SparkJob.fromJson(core.Map _json) { |
2907 if (_json.containsKey("archiveUris")) { | 3074 if (_json.containsKey("archiveUris")) { |
(...skipping 46 matching lines...) Loading... |
2954 _json["mainJarFileUri"] = mainJarFileUri; | 3121 _json["mainJarFileUri"] = mainJarFileUri; |
2955 } | 3122 } |
2956 if (properties != null) { | 3123 if (properties != null) { |
2957 _json["properties"] = properties; | 3124 _json["properties"] = properties; |
2958 } | 3125 } |
2959 return _json; | 3126 return _json; |
2960 } | 3127 } |
2961 } | 3128 } |
2962 | 3129 |
2963 /** | 3130 /** |
2964 * A Cloud Dataproc job for running [Apache Spark | 3131 * A Cloud Dataproc job for running Apache Spark SQL |
2965 * SQL](http://spark.apache.org/sql/) queries. | 3132 * (http://spark.apache.org/sql/) queries. |
2966 */ | 3133 */ |
2967 class SparkSqlJob { | 3134 class SparkSqlJob { |
2968 /** [Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH. */ | 3135 /** Optional HCFS URIs of jar files to be added to the Spark CLASSPATH. */ |
2969 core.List<core.String> jarFileUris; | 3136 core.List<core.String> jarFileUris; |
2970 /** [Optional] The runtime log config for job execution. */ | 3137 /** Optional The runtime log config for job execution. */ |
2971 LoggingConfig loggingConfig; | 3138 LoggingConfig loggingConfig; |
2972 /** | 3139 /** |
2973 * [Optional] A mapping of property names to values, used to configure Spark | 3140 * Optional A mapping of property names to values, used to configure Spark |
2974 * SQL's SparkConf. Properties that conflict with values set by the Cloud | 3141 * SQL's SparkConf. Properties that conflict with values set by the Cloud |
2975 * Dataproc API may be overwritten. | 3142 * Dataproc API may be overwritten. |
2976 */ | 3143 */ |
2977 core.Map<core.String, core.String> properties; | 3144 core.Map<core.String, core.String> properties; |
2978 /** The HCFS URI of the script that contains SQL queries. */ | 3145 /** The HCFS URI of the script that contains SQL queries. */ |
2979 core.String queryFileUri; | 3146 core.String queryFileUri; |
2980 /** A list of queries. */ | 3147 /** A list of queries. */ |
2981 QueryList queryList; | 3148 QueryList queryList; |
2982 /** | 3149 /** |
2983 * [Optional] Mapping of query variable names to values (equivalent to the | 3150 * Optional Mapping of query variable names to values (equivalent to the Spark |
2984 * Spark SQL command: SET `name="value";`). | 3151 * SQL command: SET name="value";). |
2985 */ | 3152 */ |
2986 core.Map<core.String, core.String> scriptVariables; | 3153 core.Map<core.String, core.String> scriptVariables; |
2987 | 3154 |
2988 SparkSqlJob(); | 3155 SparkSqlJob(); |
2989 | 3156 |
2990 SparkSqlJob.fromJson(core.Map _json) { | 3157 SparkSqlJob.fromJson(core.Map _json) { |
2991 if (_json.containsKey("jarFileUris")) { | 3158 if (_json.containsKey("jarFileUris")) { |
2992 jarFileUris = _json["jarFileUris"]; | 3159 jarFileUris = _json["jarFileUris"]; |
2993 } | 3160 } |
2994 if (_json.containsKey("loggingConfig")) { | 3161 if (_json.containsKey("loggingConfig")) { |
(...skipping 31 matching lines...) Loading... |
3026 _json["queryList"] = (queryList).toJson(); | 3193 _json["queryList"] = (queryList).toJson(); |
3027 } | 3194 } |
3028 if (scriptVariables != null) { | 3195 if (scriptVariables != null) { |
3029 _json["scriptVariables"] = scriptVariables; | 3196 _json["scriptVariables"] = scriptVariables; |
3030 } | 3197 } |
3031 return _json; | 3198 return _json; |
3032 } | 3199 } |
3033 } | 3200 } |
3034 | 3201 |
3035 /** | 3202 /** |
3036 * The `Status` type defines a logical error model that is suitable for | 3203 * The Status type defines a logical error model that is suitable for different |
3037 * different programming environments, including REST APIs and RPC APIs. It is | 3204 * programming environments, including REST APIs and RPC APIs. It is used by |
3038 * used by [gRPC](https://github.com/grpc). The error model is designed to be: - | 3205 * gRPC (https://github.com/grpc). The error model is designed to be: |
3039 * Simple to use and understand for most users - Flexible enough to meet | 3206 * Simple to use and understand for most users |
3040 * unexpected needs # Overview The `Status` message contains three pieces of | 3207 * Flexible enough to meet unexpected needsOverviewThe Status message contains |
3041 * data: error code, error message, and error details. The error code should be | 3208 * three pieces of data: error code, error message, and error details. The error |
3042 * an enum value of google.rpc.Code, but it may accept additional error codes if | 3209 * code should be an enum value of google.rpc.Code, but it may accept additional |
3043 * needed. The error message should be a developer-facing English message that | 3210 * error codes if needed. The error message should be a developer-facing English |
3044 * helps developers *understand* and *resolve* the error. If a localized | 3211 * message that helps developers understand and resolve the error. If a |
3045 * user-facing error message is needed, put the localized message in the error | 3212 * localized user-facing error message is needed, put the localized message in |
3046 * details or localize it in the client. The optional error details may contain | 3213 * the error details or localize it in the client. The optional error details |
3047 * arbitrary information about the error. There is a predefined set of error | 3214 * may contain arbitrary information about the error. There is a predefined set |
3048 * detail types in the package `google.rpc` which can be used for common error | 3215 * of error detail types in the package google.rpc which can be used for common |
3049 * conditions. # Language mapping The `Status` message is the logical | 3216 * error conditions.Language mappingThe Status message is the logical |
3050 * representation of the error model, but it is not necessarily the actual wire | 3217 * representation of the error model, but it is not necessarily the actual wire |
3051 * format. When the `Status` message is exposed in different client libraries | 3218 * format. When the Status message is exposed in different client libraries and |
3052 * and different wire protocols, it can be mapped differently. For example, it | 3219 * different wire protocols, it can be mapped differently. For example, it will |
3053 * will likely be mapped to some exceptions in Java, but more likely mapped to | 3220 * likely be mapped to some exceptions in Java, but more likely mapped to some |
3054 * some error codes in C. # Other uses The error model and the `Status` message | 3221 * error codes in C.Other usesThe error model and the Status message can be used |
3055 * can be used in a variety of environments, either with or without APIs, to | 3222 * in a variety of environments, either with or without APIs, to provide a |
3056 * provide a consistent developer experience across different environments. | 3223 * consistent developer experience across different environments.Example uses of |
3057 * Example uses of this error model include: - Partial errors. If a service | 3224 * this error model include: |
3058 * needs to return partial errors to the client, it may embed the `Status` in | 3225 * Partial errors. If a service needs to return partial errors to the client, it |
3059 * the normal response to indicate the partial errors. - Workflow errors. A | 3226 * may embed the Status in the normal response to indicate the partial errors. |
3060 * typical workflow has multiple steps. Each step may have a `Status` message | 3227 * Workflow errors. A typical workflow has multiple steps. Each step may have a |
3061 * for error reporting purpose. - Batch operations. If a client uses batch | 3228 * Status message for error reporting purpose. |
3062 * request and batch response, the `Status` message should be used directly | 3229 * Batch operations. If a client uses batch request and batch response, the |
3063 * inside batch response, one for each error sub-response. - Asynchronous | 3230 * Status message should be used directly inside batch response, one for each |
3064 * operations. If an API call embeds asynchronous operation results in its | 3231 * error sub-response. |
3065 * response, the status of those operations should be represented directly using | 3232 * Asynchronous operations. If an API call embeds asynchronous operation results |
3066 * the `Status` message. - Logging. If some API errors are stored in logs, the | 3233 * in its response, the status of those operations should be represented |
3067 * message `Status` could be used directly after any stripping needed for | 3234 * directly using the Status message. |
3068 * security/privacy reasons. | 3235 * Logging. If some API errors are stored in logs, the message Status could be |
| 3236 * used directly after any stripping needed for security/privacy reasons. |
3069 */ | 3237 */ |
3070 class Status { | 3238 class Status { |
3071 /** The status code, which should be an enum value of google.rpc.Code. */ | 3239 /** The status code, which should be an enum value of google.rpc.Code. */ |
3072 core.int code; | 3240 core.int code; |
3073 /** | 3241 /** |
3074 * A list of messages that carry the error details. There will be a common set | 3242 * A list of messages that carry the error details. There will be a common set |
3075 * of message types for APIs to use. | 3243 * of message types for APIs to use. |
3076 * | 3244 * |
3077 * The values for Object must be JSON objects. It can consist of `num`, | 3245 * The values for Object must be JSON objects. It can consist of `num`, |
3078 * `String`, `bool` and `null` as well as `Map` and `List` values. | 3246 * `String`, `bool` and `null` as well as `Map` and `List` values. |
(...skipping 30 matching lines...) Loading... |
3109 } | 3277 } |
3110 if (message != null) { | 3278 if (message != null) { |
3111 _json["message"] = message; | 3279 _json["message"] = message; |
3112 } | 3280 } |
3113 return _json; | 3281 return _json; |
3114 } | 3282 } |
3115 } | 3283 } |
3116 | 3284 |
3117 /** A request to submit a job. */ | 3285 /** A request to submit a job. */ |
3118 class SubmitJobRequest { | 3286 class SubmitJobRequest { |
3119 /** [Required] The job resource. */ | 3287 /** Required The job resource. */ |
3120 Job job; | 3288 Job job; |
3121 | 3289 |
3122 SubmitJobRequest(); | 3290 SubmitJobRequest(); |
3123 | 3291 |
3124 SubmitJobRequest.fromJson(core.Map _json) { | 3292 SubmitJobRequest.fromJson(core.Map _json) { |
3125 if (_json.containsKey("job")) { | 3293 if (_json.containsKey("job")) { |
3126 job = new Job.fromJson(_json["job"]); | 3294 job = new Job.fromJson(_json["job"]); |
3127 } | 3295 } |
3128 } | 3296 } |
3129 | 3297 |
3130 core.Map toJson() { | 3298 core.Map toJson() { |
3131 var _json = new core.Map(); | 3299 var _json = new core.Map(); |
3132 if (job != null) { | 3300 if (job != null) { |
3133 _json["job"] = (job).toJson(); | 3301 _json["job"] = (job).toJson(); |
3134 } | 3302 } |
3135 return _json; | 3303 return _json; |
3136 } | 3304 } |
3137 } | 3305 } |
3138 | 3306 |
3139 /** | 3307 /** |
3140 * A YARN application created by a job. Application information is a subset of | 3308 * A YARN application created by a job. Application information is a subset of |
3141 * org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. **Beta | 3309 * <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.B
eta |
3142 * Feature**: This report is available for testing purposes only. It may be | 3310 * Feature: This report is available for testing purposes only. It may be |
3143 * changed before final release. | 3311 * changed before final release. |
3144 */ | 3312 */ |
3145 class YarnApplication { | 3313 class YarnApplication { |
3146 /** [Required] The application name. */ | 3314 /** Required The application name. */ |
3147 core.String name; | 3315 core.String name; |
3148 /** [Required] The numerical progress of the application, from 1 to 100. */ | 3316 /** Required The numerical progress of the application, from 1 to 100. */ |
3149 core.double progress; | 3317 core.double progress; |
3150 /** | 3318 /** |
3151 * [Required] The application state. | 3319 * Required The application state. |
3152 * Possible string values are: | 3320 * Possible string values are: |
3153 * - "STATE_UNSPECIFIED" : A STATE_UNSPECIFIED. | 3321 * - "STATE_UNSPECIFIED" : Status is unspecified. |
3154 * - "NEW" : A NEW. | 3322 * - "NEW" : Status is NEW. |
3155 * - "NEW_SAVING" : A NEW_SAVING. | 3323 * - "NEW_SAVING" : Status is NEW_SAVING. |
3156 * - "SUBMITTED" : A SUBMITTED. | 3324 * - "SUBMITTED" : Status is SUBMITTED. |
3157 * - "ACCEPTED" : A ACCEPTED. | 3325 * - "ACCEPTED" : Status is ACCEPTED. |
3158 * - "RUNNING" : A RUNNING. | 3326 * - "RUNNING" : Status is RUNNING. |
3159 * - "FINISHED" : A FINISHED. | 3327 * - "FINISHED" : Status is FINISHED. |
3160 * - "FAILED" : A FAILED. | 3328 * - "FAILED" : Status is FAILED. |
3161 * - "KILLED" : A KILLED. | 3329 * - "KILLED" : Status is KILLED. |
3162 */ | 3330 */ |
3163 core.String state; | 3331 core.String state; |
3164 /** | 3332 /** |
3165 * [Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or | 3333 * Optional The HTTP URL of the ApplicationMaster, HistoryServer, or |
3166 * TimelineServer that provides application-specific information. The URL uses | 3334 * TimelineServer that provides application-specific information. The URL uses |
3167 * the internal hostname, and requires a proxy server for resolution and, | 3335 * the internal hostname, and requires a proxy server for resolution and, |
3168 * possibly, access. | 3336 * possibly, access. |
3169 */ | 3337 */ |
3170 core.String trackingUrl; | 3338 core.String trackingUrl; |
3171 | 3339 |
3172 YarnApplication(); | 3340 YarnApplication(); |
3173 | 3341 |
3174 YarnApplication.fromJson(core.Map _json) { | 3342 YarnApplication.fromJson(core.Map _json) { |
3175 if (_json.containsKey("name")) { | 3343 if (_json.containsKey("name")) { |
(...skipping 20 matching lines...) Loading... |
3196 } | 3364 } |
3197 if (state != null) { | 3365 if (state != null) { |
3198 _json["state"] = state; | 3366 _json["state"] = state; |
3199 } | 3367 } |
3200 if (trackingUrl != null) { | 3368 if (trackingUrl != null) { |
3201 _json["trackingUrl"] = trackingUrl; | 3369 _json["trackingUrl"] = trackingUrl; |
3202 } | 3370 } |
3203 return _json; | 3371 return _json; |
3204 } | 3372 } |
3205 } | 3373 } |
OLD | NEW |