Index: generated/googleapis/lib/dataproc/v1.dart |
diff --git a/generated/googleapis/lib/dataproc/v1.dart b/generated/googleapis/lib/dataproc/v1.dart |
index b21cef129918439e8c86c0587fe107ac9bb29c23..b50343cb6e5ebbf87135c867546060bf23c8a009 100644 |
--- a/generated/googleapis/lib/dataproc/v1.dart |
+++ b/generated/googleapis/lib/dataproc/v1.dart |
@@ -14,9 +14,7 @@ export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show |
const core.String USER_AGENT = 'dart-api-client dataproc/v1'; |
-/** |
- * An API for managing Hadoop-based clusters and jobs on Google Cloud Platform. |
- */ |
+/** Manages Hadoop-based clusters and jobs on Google Cloud Platform. */ |
class DataprocApi { |
/** View and manage your data across Google Cloud Platform services */ |
static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"; |
@@ -66,10 +64,10 @@ class ProjectsRegionsClustersResourceApi { |
* |
* Request parameters: |
* |
- * [projectId] - [Required] The ID of the Google Cloud Platform project that |
- * the cluster belongs to. |
+ * [projectId] - Required The ID of the Google Cloud Platform project that the |
+ * cluster belongs to. |
* |
- * [region] - [Required] The Cloud Dataproc region in which to handle the |
+ * [region] - Required The Cloud Dataproc region in which to handle the |
* request. |
* |
* Completes with a [Operation]. |
@@ -115,13 +113,13 @@ class ProjectsRegionsClustersResourceApi { |
* |
* Request parameters: |
* |
- * [projectId] - [Required] The ID of the Google Cloud Platform project that |
- * the cluster belongs to. |
+ * [projectId] - Required The ID of the Google Cloud Platform project that the |
+ * cluster belongs to. |
* |
- * [region] - [Required] The Cloud Dataproc region in which to handle the |
+ * [region] - Required The Cloud Dataproc region in which to handle the |
* request. |
* |
- * [clusterName] - [Required] The cluster name. |
+ * [clusterName] - Required The cluster name. |
* |
* Completes with a [Operation]. |
* |
@@ -163,19 +161,19 @@ class ProjectsRegionsClustersResourceApi { |
/** |
* Gets cluster diagnostic information. After the operation completes, the |
- * Operation.response field contains `DiagnoseClusterOutputLocation`. |
+ * Operation.response field contains DiagnoseClusterOutputLocation. |
* |
* [request] - The metadata request object. |
* |
* Request parameters: |
* |
- * [projectId] - [Required] The ID of the Google Cloud Platform project that |
- * the cluster belongs to. |
+ * [projectId] - Required The ID of the Google Cloud Platform project that the |
+ * cluster belongs to. |
* |
- * [region] - [Required] The Cloud Dataproc region in which to handle the |
+ * [region] - Required The Cloud Dataproc region in which to handle the |
* request. |
* |
- * [clusterName] - [Required] The cluster name. |
+ * [clusterName] - Required The cluster name. |
* |
* Completes with a [Operation]. |
* |
@@ -223,13 +221,13 @@ class ProjectsRegionsClustersResourceApi { |
* |
* Request parameters: |
* |
- * [projectId] - [Required] The ID of the Google Cloud Platform project that |
- * the cluster belongs to. |
+ * [projectId] - Required The ID of the Google Cloud Platform project that the |
+ * cluster belongs to. |
* |
- * [region] - [Required] The Cloud Dataproc region in which to handle the |
+ * [region] - Required The Cloud Dataproc region in which to handle the |
* request. |
* |
- * [clusterName] - [Required] The cluster name. |
+ * [clusterName] - Required The cluster name. |
* |
* Completes with a [Cluster]. |
* |
@@ -274,30 +272,27 @@ class ProjectsRegionsClustersResourceApi { |
* |
* Request parameters: |
* |
- * [projectId] - [Required] The ID of the Google Cloud Platform project that |
- * the cluster belongs to. |
+ * [projectId] - Required The ID of the Google Cloud Platform project that the |
+ * cluster belongs to. |
* |
- * [region] - [Required] The Cloud Dataproc region in which to handle the |
+ * [region] - Required The Cloud Dataproc region in which to handle the |
* request. |
* |
- * [filter] - [Optional] A filter constraining the clusters to list. Filters |
- * are case-sensitive and have the following syntax: field:value [field:value] |
- * ... or field = value [AND [field = value]] ... where **field** is one of |
- * `status.state`, `clusterName`, or `labels.[KEY]`, and `[KEY]` is a label |
- * key. **value** can be `*` to match all values. `status.state` can be one of |
- * the following: `ACTIVE`, `INACTIVE`, `CREATING`, `RUNNING`, `ERROR`, |
- * `DELETING`, or `UPDATING`. `ACTIVE` contains the `CREATING`, `UPDATING`, |
- * and `RUNNING` states. `INACTIVE` contains the `DELETING` and `ERROR` |
- * states. `clusterName` is the name of the cluster provided at creation time. |
- * Only the logical `AND` operator is supported; space-separated items are |
- * treated as having an implicit `AND` operator. Example valid filters are: |
- * status.state:ACTIVE clusterName:mycluster labels.env:staging \ |
- * labels.starred:* and status.state = ACTIVE AND clusterName = mycluster \ |
- * AND labels.env = staging AND labels.starred = * |
- * |
- * [pageSize] - [Optional] The standard List page size. |
- * |
- * [pageToken] - [Optional] The standard List page token. |
+ * [pageToken] - Optional The standard List page token. |
+ * |
+ * [pageSize] - Optional The standard List page size. |
+ * |
+ * [filter] - Optional A filter constraining the clusters to list. Filters are |
+ * case-sensitive and have the following syntax:field = value AND field = |
+ * value ...where field is one of status.state, clusterName, or labels.[KEY], |
+ * and [KEY] is a label key. value can be * to match all values. status.state |
+ * can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, |
+ * DELETING, or UPDATING. ACTIVE contains the CREATING, UPDATING, and RUNNING |
+ * states. INACTIVE contains the DELETING and ERROR states. clusterName is the |
+ * name of the cluster provided at creation time. Only the logical AND |
+ * operator is supported; space-separated items are treated as having an |
+ * implicit AND operator.Example filter:status.state = ACTIVE AND clusterName |
+ * = mycluster AND labels.env = staging AND labels.starred = * |
* |
* Completes with a [ListClustersResponse]. |
* |
@@ -307,7 +302,7 @@ class ProjectsRegionsClustersResourceApi { |
* If the used [http.Client] completes with an error when making a REST call, |
* this method will complete with the same error. |
*/ |
- async.Future<ListClustersResponse> list(core.String projectId, core.String region, {core.String filter, core.int pageSize, core.String pageToken}) { |
+ async.Future<ListClustersResponse> list(core.String projectId, core.String region, {core.String pageToken, core.int pageSize, core.String filter}) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -321,14 +316,14 @@ class ProjectsRegionsClustersResourceApi { |
if (region == null) { |
throw new core.ArgumentError("Parameter region is required."); |
} |
- if (filter != null) { |
- _queryParams["filter"] = [filter]; |
+ if (pageToken != null) { |
+ _queryParams["pageToken"] = [pageToken]; |
} |
if (pageSize != null) { |
_queryParams["pageSize"] = ["${pageSize}"]; |
} |
- if (pageToken != null) { |
- _queryParams["pageToken"] = [pageToken]; |
+ if (filter != null) { |
+ _queryParams["filter"] = [filter]; |
} |
_url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/clusters'; |
@@ -350,27 +345,41 @@ class ProjectsRegionsClustersResourceApi { |
* |
* Request parameters: |
* |
- * [projectId] - [Required] The ID of the Google Cloud Platform project the |
+ * [projectId] - Required The ID of the Google Cloud Platform project the |
* cluster belongs to. |
* |
- * [region] - [Required] The Cloud Dataproc region in which to handle the |
+ * [region] - Required The Cloud Dataproc region in which to handle the |
* request. |
* |
- * [clusterName] - [Required] The cluster name. |
- * |
- * [updateMask] - [Required] Specifies the path, relative to Cluster, of the |
- * field to update. For example, to change the number of workers in a cluster |
- * to 5, the update_mask parameter would be specified as |
- * config.worker_config.num_instances, and the `PATCH` request body would |
- * specify the new value, as follows: { "config":{ "workerConfig":{ |
- * "numInstances":"5" } } } Similarly, to change the number of preemptible |
- * workers in a cluster to 5, the update_mask parameter would be |
- * config.secondary_worker_config.num_instances, and the `PATCH` request body |
- * would be set as follows: { "config":{ "secondaryWorkerConfig":{ |
- * "numInstances":"5" } } } Note: Currently, |
- * config.worker_config.num_instances and |
- * config.secondary_worker_config.num_instances are the only fields that can |
- * be updated. |
+ * [clusterName] - Required The cluster name. |
+ * |
+ * [updateMask] - Required Specifies the path, relative to |
+ * <code>Cluster</code>, of the field to update. For example, to change the |
+ * number of workers in a cluster to 5, the <code>update_mask</code> parameter |
+ * would be specified as <code>config.worker_config.num_instances</code>, and |
+ * the PATCH request body would specify the new value, as follows: |
+ * { |
+ * "config":{ |
+ * "workerConfig":{ |
+ * "numInstances":"5" |
+ * } |
+ * } |
+ * } |
+ * Similarly, to change the number of preemptible workers in a cluster to 5, |
+ * the <code>update_mask</code> parameter would be |
+ * <code>config.secondary_worker_config.num_instances</code>, and the PATCH |
+ * request body would be set as follows: |
+ * { |
+ * "config":{ |
+ * "secondaryWorkerConfig":{ |
+ * "numInstances":"5" |
+ * } |
+ * } |
+ * } |
+ * <strong>Note:</strong> Currently, |
+ * <code>config.worker_config.num_instances</code> and |
+ * <code>config.secondary_worker_config.num_instances</code> are the only |
+ * fields that can be updated. |
* |
* Completes with a [Operation]. |
* |
@@ -427,22 +436,19 @@ class ProjectsRegionsJobsResourceApi { |
/** |
* Starts a job cancellation request. To access the job resource after |
- * cancellation, call |
- * [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions.jobs/list) |
- * or |
- * [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions.jobs/get). |
+ * cancellation, call regions/{region}/jobs.list or regions/{region}/jobs.get. |
* |
* [request] - The metadata request object. |
* |
* Request parameters: |
* |
- * [projectId] - [Required] The ID of the Google Cloud Platform project that |
- * the job belongs to. |
+ * [projectId] - Required The ID of the Google Cloud Platform project that the |
+ * job belongs to. |
* |
- * [region] - [Required] The Cloud Dataproc region in which to handle the |
+ * [region] - Required The Cloud Dataproc region in which to handle the |
* request. |
* |
- * [jobId] - [Required] The job ID. |
+ * [jobId] - Required The job ID. |
* |
* Completes with a [Job]. |
* |
@@ -487,17 +493,17 @@ class ProjectsRegionsJobsResourceApi { |
/** |
* Deletes the job from the project. If the job is active, the delete fails, |
- * and the response returns `FAILED_PRECONDITION`. |
+ * and the response returns FAILED_PRECONDITION. |
* |
* Request parameters: |
* |
- * [projectId] - [Required] The ID of the Google Cloud Platform project that |
- * the job belongs to. |
+ * [projectId] - Required The ID of the Google Cloud Platform project that the |
+ * job belongs to. |
* |
- * [region] - [Required] The Cloud Dataproc region in which to handle the |
+ * [region] - Required The Cloud Dataproc region in which to handle the |
* request. |
* |
- * [jobId] - [Required] The job ID. |
+ * [jobId] - Required The job ID. |
* |
* Completes with a [Empty]. |
* |
@@ -542,13 +548,13 @@ class ProjectsRegionsJobsResourceApi { |
* |
* Request parameters: |
* |
- * [projectId] - [Required] The ID of the Google Cloud Platform project that |
- * the job belongs to. |
+ * [projectId] - Required The ID of the Google Cloud Platform project that the |
+ * job belongs to. |
* |
- * [region] - [Required] The Cloud Dataproc region in which to handle the |
+ * [region] - Required The Cloud Dataproc region in which to handle the |
* request. |
* |
- * [jobId] - [Required] The job ID. |
+ * [jobId] - Required The job ID. |
* |
* Completes with a [Job]. |
* |
@@ -593,37 +599,36 @@ class ProjectsRegionsJobsResourceApi { |
* |
* Request parameters: |
* |
- * [projectId] - [Required] The ID of the Google Cloud Platform project that |
- * the job belongs to. |
+ * [projectId] - Required The ID of the Google Cloud Platform project that the |
+ * job belongs to. |
* |
- * [region] - [Required] The Cloud Dataproc region in which to handle the |
+ * [region] - Required The Cloud Dataproc region in which to handle the |
* request. |
* |
- * [pageSize] - [Optional] The number of results to return in each response. |
- * |
- * [pageToken] - [Optional] The page token, returned by a previous call, to |
+ * [pageToken] - Optional The page token, returned by a previous call, to |
* request the next page of results. |
* |
- * [clusterName] - [Optional] If set, the returned jobs list includes only |
- * jobs that were submitted to the named cluster. |
+ * [pageSize] - Optional The number of results to return in each response. |
* |
- * [jobStateMatcher] - [Optional] Specifies enumerated categories of jobs to |
+ * [clusterName] - Optional If set, the returned jobs list includes only jobs |
+ * that were submitted to the named cluster. |
+ * |
+ * [filter] - Optional A filter constraining the jobs to list. Filters are |
+ * case-sensitive and have the following syntax:field = value AND field = |
+ * value ...where field is status.state or labels.[KEY], and [KEY] is a label |
+ * key. value can be * to match all values. status.state can be either ACTIVE |
+ * or INACTIVE. Only the logical AND operator is supported; space-separated |
+ * items are treated as having an implicit AND operator.Example |
+ * filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = |
+ * * |
+ * |
+ * [jobStateMatcher] - Optional Specifies enumerated categories of jobs to |
* list (default = match ALL jobs). |
* Possible string values are: |
* - "ALL" : A ALL. |
* - "ACTIVE" : A ACTIVE. |
* - "NON_ACTIVE" : A NON_ACTIVE. |
* |
- * [filter] - [Optional] A filter constraining the jobs to list. Filters are |
- * case-sensitive and have the following syntax: field:value] ... or [field = |
- * value] AND [field [= value]] ... where **field** is `status.state` or |
- * `labels.[KEY]`, and `[KEY]` is a label key. **value** can be `*` to match |
- * all values. `status.state` can be either `ACTIVE` or `INACTIVE`. Only the |
- * logical `AND` operator is supported; space-separated items are treated as |
- * having an implicit `AND` operator. Example valid filters are: |
- * status.state:ACTIVE labels.env:staging labels.starred:* and status.state = |
- * ACTIVE AND labels.env = staging AND labels.starred = * |
- * |
* Completes with a [ListJobsResponse]. |
* |
* Completes with a [commons.ApiRequestError] if the API endpoint returned an |
@@ -632,7 +637,7 @@ class ProjectsRegionsJobsResourceApi { |
* If the used [http.Client] completes with an error when making a REST call, |
* this method will complete with the same error. |
*/ |
- async.Future<ListJobsResponse> list(core.String projectId, core.String region, {core.int pageSize, core.String pageToken, core.String clusterName, core.String jobStateMatcher, core.String filter}) { |
+ async.Future<ListJobsResponse> list(core.String projectId, core.String region, {core.String pageToken, core.int pageSize, core.String clusterName, core.String filter, core.String jobStateMatcher}) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -646,21 +651,21 @@ class ProjectsRegionsJobsResourceApi { |
if (region == null) { |
throw new core.ArgumentError("Parameter region is required."); |
} |
- if (pageSize != null) { |
- _queryParams["pageSize"] = ["${pageSize}"]; |
- } |
if (pageToken != null) { |
_queryParams["pageToken"] = [pageToken]; |
} |
+ if (pageSize != null) { |
+ _queryParams["pageSize"] = ["${pageSize}"]; |
+ } |
if (clusterName != null) { |
_queryParams["clusterName"] = [clusterName]; |
} |
- if (jobStateMatcher != null) { |
- _queryParams["jobStateMatcher"] = [jobStateMatcher]; |
- } |
if (filter != null) { |
_queryParams["filter"] = [filter]; |
} |
+ if (jobStateMatcher != null) { |
+ _queryParams["jobStateMatcher"] = [jobStateMatcher]; |
+ } |
_url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs'; |
@@ -675,16 +680,82 @@ class ProjectsRegionsJobsResourceApi { |
} |
/** |
+ * Updates a job in a project. |
+ * |
+ * [request] - The metadata request object. |
+ * |
+ * Request parameters: |
+ * |
+ * [projectId] - Required The ID of the Google Cloud Platform project that the |
+ * job belongs to. |
+ * |
+ * [region] - Required The Cloud Dataproc region in which to handle the |
+ * request. |
+ * |
+ * [jobId] - Required The job ID. |
+ * |
+ * [updateMask] - Required Specifies the path, relative to <code>Job</code>, |
+ * of the field to update. For example, to update the labels of a Job the |
+ * <code>update_mask</code> parameter would be specified as |
+ * <code>labels</code>, and the PATCH request body would specify the new |
+ * value. <strong>Note:</strong> Currently, <code>labels</code> is the only |
+ * field that can be updated. |
+ * |
+ * Completes with a [Job]. |
+ * |
+ * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
+ * error. |
+ * |
+ * If the used [http.Client] completes with an error when making a REST call, |
+ * this method will complete with the same error. |
+ */ |
+ async.Future<Job> patch(Job request, core.String projectId, core.String region, core.String jobId, {core.String updateMask}) { |
+ var _url = null; |
+ var _queryParams = new core.Map(); |
+ var _uploadMedia = null; |
+ var _uploadOptions = null; |
+ var _downloadOptions = commons.DownloadOptions.Metadata; |
+ var _body = null; |
+ |
+ if (request != null) { |
+ _body = convert.JSON.encode((request).toJson()); |
+ } |
+ if (projectId == null) { |
+ throw new core.ArgumentError("Parameter projectId is required."); |
+ } |
+ if (region == null) { |
+ throw new core.ArgumentError("Parameter region is required."); |
+ } |
+ if (jobId == null) { |
+ throw new core.ArgumentError("Parameter jobId is required."); |
+ } |
+ if (updateMask != null) { |
+ _queryParams["updateMask"] = [updateMask]; |
+ } |
+ |
+ _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId'); |
+ |
+ var _response = _requester.request(_url, |
+ "PATCH", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
+ return _response.then((data) => new Job.fromJson(data)); |
+ } |
+ |
+ /** |
* Submits a job to a cluster. |
* |
* [request] - The metadata request object. |
* |
* Request parameters: |
* |
- * [projectId] - [Required] The ID of the Google Cloud Platform project that |
- * the job belongs to. |
+ * [projectId] - Required The ID of the Google Cloud Platform project that the |
+ * job belongs to. |
* |
- * [region] - [Required] The Cloud Dataproc region in which to handle the |
+ * [region] - Required The Cloud Dataproc region in which to handle the |
* request. |
* |
* Completes with a [Job]. |
@@ -738,12 +809,12 @@ class ProjectsRegionsOperationsResourceApi { |
* Starts asynchronous cancellation on a long-running operation. The server |
* makes a best effort to cancel the operation, but success is not guaranteed. |
* If the server doesn't support this method, it returns |
- * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or |
+ * google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or |
* other methods to check whether the cancellation succeeded or whether the |
* operation completed despite cancellation. On successful cancellation, the |
* operation is not deleted; instead, it becomes an operation with an |
* Operation.error value with a google.rpc.Status.code of 1, corresponding to |
- * `Code.CANCELLED`. |
+ * Code.CANCELLED. |
* |
* Request parameters: |
* |
@@ -786,7 +857,7 @@ class ProjectsRegionsOperationsResourceApi { |
* Deletes a long-running operation. This method indicates that the client is |
* no longer interested in the operation result. It does not cancel the |
* operation. If the server doesn't support this method, it returns |
- * `google.rpc.Code.UNIMPLEMENTED`. |
+ * google.rpc.Code.UNIMPLEMENTED. |
* |
* Request parameters: |
* |
@@ -869,19 +940,19 @@ class ProjectsRegionsOperationsResourceApi { |
/** |
* Lists operations that match the specified filter in the request. If the |
- * server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the |
- * `name` binding below allows API services to override the binding to use |
- * different resource name schemes, such as `users / * /operations`. |
+ * server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name |
+ * binding below allows API services to override the binding to use different |
+ * resource name schemes, such as users / * /operations. |
* |
* Request parameters: |
* |
* [name] - The name of the operation collection. |
* Value must have pattern "^projects/[^/]+/regions/[^/]+/operations$". |
* |
- * [filter] - The standard list filter. |
- * |
* [pageSize] - The standard list page size. |
* |
+ * [filter] - The standard list filter. |
+ * |
* [pageToken] - The standard list page token. |
* |
* Completes with a [ListOperationsResponse]. |
@@ -892,7 +963,7 @@ class ProjectsRegionsOperationsResourceApi { |
* If the used [http.Client] completes with an error when making a REST call, |
* this method will complete with the same error. |
*/ |
- async.Future<ListOperationsResponse> list(core.String name, {core.String filter, core.int pageSize, core.String pageToken}) { |
+ async.Future<ListOperationsResponse> list(core.String name, {core.int pageSize, core.String filter, core.String pageToken}) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -903,12 +974,12 @@ class ProjectsRegionsOperationsResourceApi { |
if (name == null) { |
throw new core.ArgumentError("Parameter name is required."); |
} |
- if (filter != null) { |
- _queryParams["filter"] = [filter]; |
- } |
if (pageSize != null) { |
_queryParams["pageSize"] = ["${pageSize}"]; |
} |
+ if (filter != null) { |
+ _queryParams["filter"] = [filter]; |
+ } |
if (pageToken != null) { |
_queryParams["pageToken"] = [pageToken]; |
} |
@@ -949,43 +1020,42 @@ class CancelJobRequest { |
*/ |
class Cluster { |
/** |
- * [Required] The cluster name. Cluster names within a project must be unique. |
+ * Required The cluster name. Cluster names within a project must be unique. |
* Names of deleted clusters can be reused. |
*/ |
core.String clusterName; |
/** |
- * [Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc |
+ * Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc |
* generates this value when it creates the cluster. |
*/ |
core.String clusterUuid; |
/** |
- * [Required] The cluster config. Note that Cloud Dataproc may set default |
+ * Required The cluster config. Note that Cloud Dataproc may set default |
* values, and values may change when clusters are updated. |
*/ |
ClusterConfig config; |
/** |
- * [Optional] The labels to associate with this cluster. Label **keys** must |
- * contain 1 to 63 characters, and must conform to [RFC |
- * 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, |
- * but, if present, must contain 1 to 63 characters, and must conform to [RFC |
- * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be |
+ * Optional The labels to associate with this cluster. Label keys must contain |
+ * 1 to 63 characters, and must conform to RFC 1035 |
+ * (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if |
+ * present, must contain 1 to 63 characters, and must conform to RFC 1035 |
+ * (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be |
* associated with a cluster. |
*/ |
core.Map<core.String, core.String> labels; |
/** |
- * Contains cluster daemon metrics such as HDFS and YARN stats. **Beta |
- * Feature**: This report is available for testing purposes only. It may be |
- * changed before final release. |
+ * Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: |
+ * This report is available for testing purposes only. It may be changed |
+ * before final release. |
*/ |
ClusterMetrics metrics; |
/** |
- * [Required] The Google Cloud Platform project ID that the cluster belongs |
- * to. |
+ * Required The Google Cloud Platform project ID that the cluster belongs to. |
*/ |
core.String projectId; |
- /** [Output-only] Cluster status. */ |
+ /** Output-only Cluster status. */ |
ClusterStatus status; |
- /** [Output-only] The previous cluster status. */ |
+ /** Output-only The previous cluster status. */ |
core.List<ClusterStatus> statusHistory; |
Cluster(); |
@@ -1050,7 +1120,7 @@ class Cluster { |
/** The cluster config. */ |
class ClusterConfig { |
/** |
- * [Optional] A Google Cloud Storage staging bucket used for sharing generated |
+ * Optional A Google Cloud Storage staging bucket used for sharing generated |
* SSH keys and config. If you do not specify a staging bucket, Cloud Dataproc |
* will determine an appropriate Cloud Storage location (US, ASIA, or EU) for |
* your cluster's staging bucket according to the Google Compute Engine zone |
@@ -1059,36 +1129,39 @@ class ClusterConfig { |
*/ |
core.String configBucket; |
/** |
- * [Required] The shared Google Compute Engine config settings for all |
- * instances in a cluster. |
+ * Required The shared Google Compute Engine config settings for all instances |
+ * in a cluster. |
*/ |
GceClusterConfig gceClusterConfig; |
/** |
- * [Optional] Commands to execute on each node after config is completed. By |
+ * Optional Commands to execute on each node after config is completed. By |
* default, executables are run on master and all worker nodes. You can test a |
- * node's role metadata to run an executable on a master or worker node, as |
- * shown below using `curl` (you can also use `wget`): ROLE=$(curl -H |
- * Metadata-Flavor:Google |
- * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ |
- * "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ... |
- * worker specific actions ... fi |
+ * node's <code>role</code> metadata to run an executable on a master or |
+ * worker node, as shown below using curl (you can also use wget): |
+ * ROLE=$(curl -H Metadata-Flavor:Google |
+ * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) |
+ * if [[ "${ROLE}" == 'Master' ]]; then |
+ * ... master specific actions ... |
+ * else |
+ * ... worker specific actions ... |
+ * fi |
*/ |
core.List<NodeInitializationAction> initializationActions; |
/** |
- * [Optional] The Google Compute Engine config settings for the master |
- * instance in a cluster. |
+ * Optional The Google Compute Engine config settings for the master instance |
+ * in a cluster. |
*/ |
InstanceGroupConfig masterConfig; |
/** |
- * [Optional] The Google Compute Engine config settings for additional worker |
+ * Optional The Google Compute Engine config settings for additional worker |
* instances in a cluster. |
*/ |
InstanceGroupConfig secondaryWorkerConfig; |
- /** [Optional] The config settings for software inside the cluster. */ |
+ /** Optional The config settings for software inside the cluster. */ |
SoftwareConfig softwareConfig; |
/** |
- * [Optional] The Google Compute Engine config settings for worker instances |
- * in a cluster. |
+ * Optional The Google Compute Engine config settings for worker instances in |
+ * a cluster. |
*/ |
InstanceGroupConfig workerConfig; |
@@ -1146,9 +1219,9 @@ class ClusterConfig { |
} |
/** |
- * Contains cluster daemon metrics, such as HDFS and YARN stats. **Beta |
- * Feature**: This report is available for testing purposes only. It may be |
- * changed before final release. |
+ * Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: |
+ * This report is available for testing purposes only. It may be changed before |
+ * final release. |
*/ |
class ClusterMetrics { |
/** The HDFS metrics. */ |
@@ -1181,20 +1254,22 @@ class ClusterMetrics { |
/** Metadata describing the operation. */ |
class ClusterOperationMetadata { |
- /** [Output-only] Name of the cluster for the operation. */ |
+ /** Output-only Name of the cluster for the operation. */ |
core.String clusterName; |
- /** [Output-only] Cluster UUID for the operation. */ |
+ /** Output-only Cluster UUID for the operation. */ |
core.String clusterUuid; |
- /** [Output-only] Short description of operation. */ |
+ /** Output-only Short description of operation. */ |
core.String description; |
- /** [Output-only] labels associated with the operation */ |
+ /** Output-only Labels associated with the operation */ |
core.Map<core.String, core.String> labels; |
- /** [Output-only] The operation type. */ |
+ /** Output-only The operation type. */ |
core.String operationType; |
- /** [Output-only] Current operation status. */ |
+ /** Output-only Current operation status. */ |
ClusterOperationStatus status; |
- /** [Output-only] The previous operation status. */ |
+ /** Output-only The previous operation status. */ |
core.List<ClusterOperationStatus> statusHistory; |
+ /** Output-only Errors encountered during operation execution. */ |
+ core.List<core.String> warnings; |
ClusterOperationMetadata(); |
@@ -1220,6 +1295,9 @@ class ClusterOperationMetadata { |
if (_json.containsKey("statusHistory")) { |
statusHistory = _json["statusHistory"].map((value) => new ClusterOperationStatus.fromJson(value)).toList(); |
} |
+ if (_json.containsKey("warnings")) { |
+ warnings = _json["warnings"]; |
+ } |
} |
core.Map toJson() { |
@@ -1245,26 +1323,29 @@ class ClusterOperationMetadata { |
if (statusHistory != null) { |
_json["statusHistory"] = statusHistory.map((value) => (value).toJson()).toList(); |
} |
+ if (warnings != null) { |
+ _json["warnings"] = warnings; |
+ } |
return _json; |
} |
} |
/** The status of the operation. */ |
class ClusterOperationStatus { |
- /** [Output-only]A message containing any operation metadata details. */ |
+ /** Output-onlyA message containing any operation metadata details. */ |
core.String details; |
- /** [Output-only] A message containing the detailed operation state. */ |
+ /** Output-only A message containing the detailed operation state. */ |
core.String innerState; |
/** |
- * [Output-only] A message containing the operation state. |
+ * Output-only A message containing the operation state. |
* Possible string values are: |
- * - "UNKNOWN" : A UNKNOWN. |
- * - "PENDING" : A PENDING. |
- * - "RUNNING" : A RUNNING. |
- * - "DONE" : A DONE. |
+ * - "UNKNOWN" : Unused. |
+ * - "PENDING" : The operation has been created. |
+ * - "RUNNING" : The operation is running. |
+ * - "DONE" : The operation is done; either cancelled or completed. |
*/ |
core.String state; |
- /** [Output-only] The time this state was entered. */ |
+ /** Output-only The time this state was entered. */ |
core.String stateStartTime; |
ClusterOperationStatus(); |
@@ -1304,20 +1385,23 @@ class ClusterOperationStatus { |
/** The status of a cluster and its instances. */ |
class ClusterStatus { |
- /** [Output-only] Optional details of cluster's state. */ |
+ /** Output-only Optional details of cluster's state. */ |
core.String detail; |
/** |
- * [Output-only] The cluster's state. |
+ * Output-only The cluster's state. |
* Possible string values are: |
- * - "UNKNOWN" : A UNKNOWN. |
- * - "CREATING" : A CREATING. |
- * - "RUNNING" : A RUNNING. |
- * - "ERROR" : A ERROR. |
- * - "DELETING" : A DELETING. |
- * - "UPDATING" : A UPDATING. |
+ * - "UNKNOWN" : The cluster state is unknown. |
+ * - "CREATING" : The cluster is being created and set up. It is not ready for |
+ * use. |
+ * - "RUNNING" : The cluster is currently running and healthy. It is ready for |
+ * use. |
+ * - "ERROR" : The cluster encountered an error. It is not ready for use. |
+ * - "DELETING" : The cluster is being deleted. It cannot be used. |
+ * - "UPDATING" : The cluster is being updated. It continues to accept and |
+ * process jobs. |
*/ |
core.String state; |
- /** [Output-only] Time when this state was entered. */ |
+ /** Output-only Time when this state was entered. */ |
core.String stateStartTime; |
ClusterStatus(); |
@@ -1352,7 +1436,7 @@ class ClusterStatus { |
/** The location where output from diagnostic command can be found. */ |
class DiagnoseClusterOutputLocation { |
/** |
- * [Output-only] The Google Cloud Storage URI of the diagnostic output. This |
+ * Output-only The Google Cloud Storage URI of the diagnostic output. This |
* will be a plain text file with summary of collected diagnostics. |
*/ |
core.String outputUri; |
@@ -1391,7 +1475,7 @@ class DiagnoseClusterRequest { |
/** The location of diagnostic output. */ |
class DiagnoseClusterResults { |
/** |
- * [Output-only] The Google Cloud Storage URI of the diagnostic output. The |
+ * Output-only The Google Cloud Storage URI of the diagnostic output. The |
* output report is a plain text file with a summary of collected diagnostics. |
*/ |
core.String outputUri; |
@@ -1415,14 +1499,14 @@ class DiagnoseClusterResults { |
/** Specifies the config of disk options for a group of VM instances. */ |
class DiskConfig { |
- /** [Optional] Size in GB of the boot disk (default is 500GB). */ |
+ /** Optional Size in GB of the boot disk (default is 500GB). */ |
core.int bootDiskSizeGb; |
/** |
- * [Optional] Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are |
- * not attached, the boot disk is used to store runtime logs and |
- * [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If |
- * one or more SSDs are attached, this runtime bulk data is spread across |
- * them, and the boot disk contains only basic config and installed binaries. |
+ * Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are |
+ * not attached, the boot disk is used to store runtime logs and HDFS |
+ * (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one |
+ * or more SSDs are attached, this runtime bulk data is spread across them, |
+ * and the boot disk contains only basic config and installed binaries. |
*/ |
core.int numLocalSsds; |
@@ -1452,9 +1536,11 @@ class DiskConfig { |
/** |
* A generic empty message that you can re-use to avoid defining duplicated |
* empty messages in your APIs. A typical example is to use it as the request or |
- * the response type of an API method. For instance: service Foo { rpc |
- * Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON |
- * representation for `Empty` is empty JSON object `{}`. |
+ * the response type of an API method. For instance: |
+ * service Foo { |
+ * rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); |
+ * } |
+ * The JSON representation for Empty is empty JSON object {}. |
*/ |
class Empty { |
@@ -1475,58 +1561,69 @@ class Empty { |
*/ |
class GceClusterConfig { |
/** |
- * [Optional] If true, all instances in the cluster will only have internal IP |
+ * Optional If true, all instances in the cluster will only have internal IP |
* addresses. By default, clusters are not restricted to internal IP |
* addresses, and will have ephemeral external IP addresses assigned to each |
- * instance. This `internal_ip_only` restriction can only be enabled for |
+ * instance. This internal_ip_only restriction can only be enabled for |
* subnetwork enabled networks, and all off-cluster dependencies must be |
* configured to be accessible without external IP addresses. |
*/ |
core.bool internalIpOnly; |
/** |
* The Google Compute Engine metadata entries to add to all instances (see |
- * [Project and instance |
- * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). |
+ * Project and instance metadata |
+ * (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). |
*/ |
core.Map<core.String, core.String> metadata; |
/** |
- * [Optional] The Google Compute Engine network to be used for machine |
+ * Optional The Google Compute Engine network to be used for machine |
* communications. Cannot be specified with subnetwork_uri. If neither |
- * `network_uri` nor `subnetwork_uri` is specified, the "default" network of |
- * the project is used, if it exists. Cannot be a "Custom Subnet Network" (see |
- * [Using Subnetworks](/compute/docs/subnetworks) for more information). |
- * Example: |
- * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`. |
+ * network_uri nor subnetwork_uri is specified, the "default" network of the |
+ * project is used, if it exists. Cannot be a "Custom Subnet Network" (see |
+ * Using Subnetworks for more information). Example: |
+ * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default. |
*/ |
core.String networkUri; |
/** |
- * [Optional] The URIs of service account scopes to be included in Google |
+ * Optional The service account of the instances. Defaults to the default |
+ * Google Compute Engine service account. Custom service accounts need |
+ * permissions equivalent to the folloing IAM roles: |
+ * roles/logging.logWriter |
+ * roles/storage.objectAdmin(see |
+ * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts |
+ * for more information). Example: |
+ * [account_id]@[project_id].iam.gserviceaccount.com |
+ */ |
+ core.String serviceAccount; |
+ /** |
+ * Optional The URIs of service account scopes to be included in Google |
* Compute Engine instances. The following base set of scopes is always |
- * included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * |
- * https://www.googleapis.com/auth/devstorage.read_write * |
- * https://www.googleapis.com/auth/logging.write If no scopes are specified, |
- * the following defaults are also provided: * |
- * https://www.googleapis.com/auth/bigquery * |
- * https://www.googleapis.com/auth/bigtable.admin.table * |
- * https://www.googleapis.com/auth/bigtable.data * |
+ * included: |
+ * https://www.googleapis.com/auth/cloud.useraccounts.readonly |
+ * https://www.googleapis.com/auth/devstorage.read_write |
+ * https://www.googleapis.com/auth/logging.writeIf no scopes are specified, |
+ * the following defaults are also provided: |
+ * https://www.googleapis.com/auth/bigquery |
+ * https://www.googleapis.com/auth/bigtable.admin.table |
+ * https://www.googleapis.com/auth/bigtable.data |
* https://www.googleapis.com/auth/devstorage.full_control |
*/ |
core.List<core.String> serviceAccountScopes; |
/** |
- * [Optional] The Google Compute Engine subnetwork to be used for machine |
+ * Optional The Google Compute Engine subnetwork to be used for machine |
* communications. Cannot be specified with network_uri. Example: |
- * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`. |
+ * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0. |
*/ |
core.String subnetworkUri; |
/** |
- * The Google Compute Engine tags to add to all instances (see [Tagging |
- * instances](/compute/docs/label-or-tag-resources#tags)). |
+ * The Google Compute Engine tags to add to all instances (see Tagging |
+ * instances). |
*/ |
core.List<core.String> tags; |
/** |
- * [Required] The zone where the Google Compute Engine cluster will be |
- * located. Example: |
- * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`. |
+ * Required The zone where the Google Compute Engine cluster will be located. |
+ * Example: |
+ * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]. |
*/ |
core.String zoneUri; |
@@ -1542,6 +1639,9 @@ class GceClusterConfig { |
if (_json.containsKey("networkUri")) { |
networkUri = _json["networkUri"]; |
} |
+ if (_json.containsKey("serviceAccount")) { |
+ serviceAccount = _json["serviceAccount"]; |
+ } |
if (_json.containsKey("serviceAccountScopes")) { |
serviceAccountScopes = _json["serviceAccountScopes"]; |
} |
@@ -1567,6 +1667,9 @@ class GceClusterConfig { |
if (networkUri != null) { |
_json["networkUri"] = networkUri; |
} |
+ if (serviceAccount != null) { |
+ _json["serviceAccount"] = serviceAccount; |
+ } |
if (serviceAccountScopes != null) { |
_json["serviceAccountScopes"] = serviceAccountScopes; |
} |
@@ -1584,40 +1687,40 @@ class GceClusterConfig { |
} |
/** |
- * A Cloud Dataproc job for running [Apache Hadoop |
- * MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) |
- * jobs on [Apache Hadoop |
- * YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). |
+ * A Cloud Dataproc job for running Apache Hadoop MapReduce |
+ * (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) |
+ * jobs on Apache Hadoop YARN |
+ * (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). |
*/ |
class HadoopJob { |
/** |
- * [Optional] HCFS URIs of archives to be extracted in the working directory |
- * of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, |
- * .tgz, or .zip. |
+ * Optional HCFS URIs of archives to be extracted in the working directory of |
+ * Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, |
+ * or .zip. |
*/ |
core.List<core.String> archiveUris; |
/** |
- * [Optional] The arguments to pass to the driver. Do not include arguments, |
- * such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since |
- * a collision may occur that causes an incorrect job submission. |
+ * Optional The arguments to pass to the driver. Do not include arguments, |
+ * such as -libjars or -Dfoo=bar, that can be set as job properties, since a |
+ * collision may occur that causes an incorrect job submission. |
*/ |
core.List<core.String> args; |
/** |
- * [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied |
- * to the working directory of Hadoop drivers and distributed tasks. Useful |
- * for naively parallel tasks. |
+ * Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to |
+ * the working directory of Hadoop drivers and distributed tasks. Useful for |
+ * naively parallel tasks. |
*/ |
core.List<core.String> fileUris; |
/** |
- * [Optional] Jar file URIs to add to the CLASSPATHs of the Hadoop driver and |
+ * Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and |
* tasks. |
*/ |
core.List<core.String> jarFileUris; |
- /** [Optional] The runtime log config for job execution. */ |
+ /** Optional The runtime log config for job execution. */ |
LoggingConfig loggingConfig; |
/** |
* The name of the driver's main class. The jar file containing the class must |
- * be in the default CLASSPATH or specified in `jar_file_uris`. |
+ * be in the default CLASSPATH or specified in jar_file_uris. |
*/ |
core.String mainClass; |
/** |
@@ -1628,7 +1731,7 @@ class HadoopJob { |
*/ |
core.String mainJarFileUri; |
/** |
- * [Optional] A mapping of property names to values, used to configure Hadoop. |
+ * Optional A mapping of property names to values, used to configure Hadoop. |
* Properties that conflict with values set by the Cloud Dataproc API may be |
* overwritten. Can include properties set in /etc/hadoop/conf / * -site and |
* classes in user code. |
@@ -1695,23 +1798,23 @@ class HadoopJob { |
} |
/** |
- * A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) |
+ * A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) |
* queries on YARN. |
*/ |
class HiveJob { |
/** |
- * [Optional] Whether to continue executing queries if a query fails. The |
- * default value is `false`. Setting to `true` can be useful when executing |
+ * Optional Whether to continue executing queries if a query fails. The |
+ * default value is false. Setting to true can be useful when executing |
* independent parallel queries. |
*/ |
core.bool continueOnFailure; |
/** |
- * [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Hive |
- * server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. |
+ * Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server |
+ * and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. |
*/ |
core.List<core.String> jarFileUris; |
/** |
- * [Optional] A mapping of property names and values, used to configure Hive. |
+ * Optional A mapping of property names and values, used to configure Hive. |
* Properties that conflict with values set by the Cloud Dataproc API may be |
* overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, |
* /etc/hive/conf/hive-site.xml, and classes in user code. |
@@ -1722,8 +1825,8 @@ class HiveJob { |
/** A list of queries. */ |
QueryList queryList; |
/** |
- * [Optional] Mapping of query variable names to values (equivalent to the |
- * Hive command: `SET name="value";`). |
+ * Optional Mapping of query variable names to values (equivalent to the Hive |
+ * command: SET name="value";). |
*/ |
core.Map<core.String, core.String> scriptVariables; |
@@ -1775,41 +1878,40 @@ class HiveJob { |
} |
/** |
- * [Optional] The config settings for Google Compute Engine resources in an |
+ * Optional The config settings for Google Compute Engine resources in an |
* instance group, such as a master or worker group. |
*/ |
class InstanceGroupConfig { |
- /** [Optional] Disk option config settings. */ |
+ /** Optional Disk option config settings. */ |
DiskConfig diskConfig; |
/** |
- * [Output-only] The Google Compute Engine image resource used for cluster |
- * instances. Inferred from `SoftwareConfig.image_version`. |
+ * Output-only The Google Compute Engine image resource used for cluster |
+ * instances. Inferred from SoftwareConfig.image_version. |
*/ |
core.String imageUri; |
/** |
- * [Optional] The list of instance names. Cloud Dataproc derives the names |
- * from `cluster_name`, `num_instances`, and the instance group if not set by |
- * user (recommended practice is to let Cloud Dataproc derive the name). |
+ * Optional The list of instance names. Cloud Dataproc derives the names from |
+ * cluster_name, num_instances, and the instance group if not set by user |
+ * (recommended practice is to let Cloud Dataproc derive the name). |
*/ |
core.List<core.String> instanceNames; |
/** |
- * [Optional] Specifies that this instance group contains preemptible |
- * instances. |
+ * Optional Specifies that this instance group contains preemptible instances. |
*/ |
core.bool isPreemptible; |
/** |
- * [Required] The Google Compute Engine machine type used for cluster |
- * instances. Example: |
- * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`. |
+ * Required The Google Compute Engine machine type used for cluster instances. |
+ * Example: |
+ * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2. |
*/ |
core.String machineTypeUri; |
/** |
- * [Output-only] The config for Google Compute Engine Instance Group Manager |
+ * Output-only The config for Google Compute Engine Instance Group Manager |
* that manages this group. This is only used for preemptible instance groups. |
*/ |
ManagedGroupConfig managedGroupConfig; |
/** |
- * [Required] The number of VM instances in the instance group. For master |
+ * Required The number of VM instances in the instance group. For master |
* instance groups, must be set to 1. |
*/ |
core.int numInstances; |
@@ -1870,13 +1972,13 @@ class InstanceGroupConfig { |
/** A Cloud Dataproc job resource. */ |
class Job { |
/** |
- * [Output-only] If present, the location of miscellaneous control files which |
+ * Output-only If present, the location of miscellaneous control files which |
* may be used as part of job setup and handling. If not present, control |
- * files may be placed in the same location as `driver_output_uri`. |
+ * files may be placed in the same location as driver_output_uri. |
*/ |
core.String driverControlFilesUri; |
/** |
- * [Output-only] A URI pointing to the location of the stdout of the job's |
+ * Output-only A URI pointing to the location of the stdout of the job's |
* driver program. |
*/ |
core.String driverOutputResourceUri; |
@@ -1885,43 +1987,47 @@ class Job { |
/** Job is a Hive job. */ |
HiveJob hiveJob; |
/** |
- * [Optional] The labels to associate with this job. Label **keys** must |
- * contain 1 to 63 characters, and must conform to [RFC |
- * 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, |
- * but, if present, must contain 1 to 63 characters, and must conform to [RFC |
- * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be |
+ * Optional The labels to associate with this job. Label keys must contain 1 |
+ * to 63 characters, and must conform to RFC 1035 |
+ * (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if |
+ * present, must contain 1 to 63 characters, and must conform to RFC 1035 |
+ * (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be |
* associated with a job. |
*/ |
core.Map<core.String, core.String> labels; |
/** Job is a Pig job. */ |
PigJob pigJob; |
/** |
- * [Required] Job information, including how, when, and where to run the job. |
+ * Required Job information, including how, when, and where to run the job. |
*/ |
JobPlacement placement; |
/** Job is a Pyspark job. */ |
PySparkJob pysparkJob; |
/** |
- * [Optional] The fully qualified reference to the job, which can be used to |
+ * Optional The fully qualified reference to the job, which can be used to |
* obtain the equivalent REST path of the job resource. If this property is |
- * not specified when a job is created, the server generates a job_id. |
+ * not specified when a job is created, the server generates a |
+ * <code>job_id</code>. |
*/ |
JobReference reference; |
+ /** Optional Job scheduling configuration. */ |
+ JobScheduling scheduling; |
/** Job is a Spark job. */ |
SparkJob sparkJob; |
/** Job is a SparkSql job. */ |
SparkSqlJob sparkSqlJob; |
/** |
- * [Output-only] The job status. Additional application-specific status |
- * information may be contained in the type_job and yarn_applications fields. |
+ * Output-only The job status. Additional application-specific status |
+ * information may be contained in the <code>type_job</code> and |
+ * <code>yarn_applications</code> fields. |
*/ |
JobStatus status; |
- /** [Output-only] The previous job status. */ |
+ /** Output-only The previous job status. */ |
core.List<JobStatus> statusHistory; |
/** |
- * [Output-only] The collection of YARN applications spun up by this job. |
- * **Beta** Feature: This report is available for testing purposes only. It |
- * may be changed before final release. |
+ * Output-only The collection of YARN applications spun up by this job.Beta |
+ * Feature: This report is available for testing purposes only. It may be |
+ * changed before final release. |
*/ |
core.List<YarnApplication> yarnApplications; |
@@ -1955,6 +2061,9 @@ class Job { |
if (_json.containsKey("reference")) { |
reference = new JobReference.fromJson(_json["reference"]); |
} |
+ if (_json.containsKey("scheduling")) { |
+ scheduling = new JobScheduling.fromJson(_json["scheduling"]); |
+ } |
if (_json.containsKey("sparkJob")) { |
sparkJob = new SparkJob.fromJson(_json["sparkJob"]); |
} |
@@ -2001,6 +2110,9 @@ class Job { |
if (reference != null) { |
_json["reference"] = (reference).toJson(); |
} |
+ if (scheduling != null) { |
+ _json["scheduling"] = (scheduling).toJson(); |
+ } |
if (sparkJob != null) { |
_json["sparkJob"] = (sparkJob).toJson(); |
} |
@@ -2022,11 +2134,11 @@ class Job { |
/** Cloud Dataproc job config. */ |
class JobPlacement { |
- /** [Required] The name of the cluster where the job will be submitted. */ |
+ /** Required The name of the cluster where the job will be submitted. */ |
core.String clusterName; |
/** |
- * [Output-only] A cluster UUID generated by the Cloud Dataproc service when |
- * the job is submitted. |
+ * Output-only A cluster UUID generated by the Cloud Dataproc service when the |
+ * job is submitted. |
*/ |
core.String clusterUuid; |
@@ -2056,15 +2168,15 @@ class JobPlacement { |
/** Encapsulates the full scoping used to reference a job. */ |
class JobReference { |
/** |
- * [Optional] The job ID, which must be unique within the project. The job ID |
- * is generated by the server upon job submission or provided by the user as a |
+ * Optional The job ID, which must be unique within the project. The job ID is |
+ * generated by the server upon job submission or provided by the user as a |
* means to perform retries without creating duplicate jobs. The ID must |
* contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens |
* (-). The maximum length is 512 characters. |
*/ |
core.String jobId; |
/** |
- * [Required] The ID of the Google Cloud Platform project that the job belongs |
+ * Required The ID of the Google Cloud Platform project that the job belongs |
* to. |
*/ |
core.String projectId; |
@@ -2092,28 +2204,63 @@ class JobReference { |
} |
} |
+/** |
+ * Job scheduling options.Beta Feature: These options are available for testing |
+ * purposes only. They may be changed before final release. |
+ */ |
+class JobScheduling { |
+ /** |
+ * Optional Maximum number of times per hour a driver may be restarted as a |
+ * result of driver terminating with non-zero code before job is reported |
+ * failed.A job may be reported as thrashing if driver exits with non-zero |
+ * code 4 times within 10 minute window.Maximum value is 10. |
+ */ |
+ core.int maxFailuresPerHour; |
+ |
+ JobScheduling(); |
+ |
+ JobScheduling.fromJson(core.Map _json) { |
+ if (_json.containsKey("maxFailuresPerHour")) { |
+ maxFailuresPerHour = _json["maxFailuresPerHour"]; |
+ } |
+ } |
+ |
+ core.Map toJson() { |
+ var _json = new core.Map(); |
+ if (maxFailuresPerHour != null) { |
+ _json["maxFailuresPerHour"] = maxFailuresPerHour; |
+ } |
+ return _json; |
+ } |
+} |
+ |
/** Cloud Dataproc job status. */ |
class JobStatus { |
/** |
- * [Output-only] Optional job state details, such as an error description if |
- * the state is ERROR. |
+ * Output-only Optional job state details, such as an error description if the |
+ * state is <code>ERROR</code>. |
*/ |
core.String details; |
/** |
- * [Output-only] A state message specifying the overall job state. |
+ * Output-only A state message specifying the overall job state. |
* Possible string values are: |
- * - "STATE_UNSPECIFIED" : A STATE_UNSPECIFIED. |
- * - "PENDING" : A PENDING. |
- * - "SETUP_DONE" : A SETUP_DONE. |
- * - "RUNNING" : A RUNNING. |
- * - "CANCEL_PENDING" : A CANCEL_PENDING. |
- * - "CANCEL_STARTED" : A CANCEL_STARTED. |
- * - "CANCELLED" : A CANCELLED. |
- * - "DONE" : A DONE. |
- * - "ERROR" : A ERROR. |
+ * - "STATE_UNSPECIFIED" : The job state is unknown. |
+ * - "PENDING" : The job is pending; it has been submitted, but is not yet |
+ * running. |
+ * - "SETUP_DONE" : Job has been received by the service and completed initial |
+ * setup; it will soon be submitted to the cluster. |
+ * - "RUNNING" : The job is running on the cluster. |
+ * - "CANCEL_PENDING" : A CancelJob request has been received, but is pending. |
+ * - "CANCEL_STARTED" : Transient in-flight resources have been canceled, and |
+ * the request to cancel the running job has been issued to the cluster. |
+ * - "CANCELLED" : The job cancellation was successful. |
+ * - "DONE" : The job has completed successfully. |
+ * - "ERROR" : The job has completed, but encountered an error. |
+ * - "ATTEMPT_FAILURE" : Job attempt has failed. The detail field contains |
+ * failure details for this attempt.Applies to restartable jobs only. |
*/ |
core.String state; |
- /** [Output-only] The time when this state was entered. */ |
+ /** Output-only The time when this state was entered. */ |
core.String stateStartTime; |
JobStatus(); |
@@ -2147,12 +2294,12 @@ class JobStatus { |
/** The list of all clusters in a project. */ |
class ListClustersResponse { |
- /** [Output-only] The clusters in the project. */ |
+ /** Output-only The clusters in the project. */ |
core.List<Cluster> clusters; |
/** |
- * [Output-only] This token is included in the response if there are more |
+ * Output-only This token is included in the response if there are more |
* results to fetch. To fetch additional results, provide this value as the |
- * `page_token` in a subsequent ListClustersRequest. |
+ * page_token in a subsequent <code>ListClustersRequest</code>. |
*/ |
core.String nextPageToken; |
@@ -2181,12 +2328,12 @@ class ListClustersResponse { |
/** A list of jobs in a project. */ |
class ListJobsResponse { |
- /** [Output-only] Jobs list. */ |
+ /** Output-only Jobs list. */ |
core.List<Job> jobs; |
/** |
- * [Optional] This token is included in the response if there are more results |
- * to fetch. To fetch additional results, provide this value as the |
- * `page_token` in a subsequent ListJobsRequest. |
+ * Optional This token is included in the response if there are more results |
+ * to fetch. To fetch additional results, provide this value as the page_token |
+ * in a subsequent <code>ListJobsRequest</code>. |
*/ |
core.String nextPageToken; |
@@ -2247,7 +2394,7 @@ class ListOperationsResponse { |
class LoggingConfig { |
/** |
* The per-package log levels for the driver. This may include "root" package |
- * name to configure rootLogger. Examples: 'com.google = FATAL', 'root = |
+ * name to configure rootLogger. Examples: 'com.google = FATAL', 'root = |
* INFO', 'org.apache = DEBUG' |
*/ |
core.Map<core.String, core.String> driverLogLevels; |
@@ -2271,11 +2418,11 @@ class LoggingConfig { |
/** Specifies the resources used to actively manage an instance group. */ |
class ManagedGroupConfig { |
- /** [Output-only] The name of the Instance Group Manager for this group. */ |
+ /** Output-only The name of the Instance Group Manager for this group. */ |
core.String instanceGroupManagerName; |
/** |
- * [Output-only] The name of the Instance Template used for the Managed |
- * Instance Group. |
+ * Output-only The name of the Instance Template used for the Managed Instance |
+ * Group. |
*/ |
core.String instanceTemplateName; |
@@ -2307,13 +2454,13 @@ class ManagedGroupConfig { |
* period for executable completion. |
*/ |
class NodeInitializationAction { |
- /** [Required] Google Cloud Storage URI of executable file. */ |
+ /** Required Google Cloud Storage URI of executable file. */ |
core.String executableFile; |
/** |
- * [Optional] Amount of time executable has to complete. Default is 10 |
- * minutes. Cluster creation fails with an explanatory error message (the name |
- * of the executable that caused the error and the exceeded timeout period) if |
- * the executable is not completed at end of the timeout period. |
+ * Optional Amount of time executable has to complete. Default is 10 minutes. |
+ * Cluster creation fails with an explanatory error message (the name of the |
+ * executable that caused the error and the exceeded timeout period) if the |
+ * executable is not completed at end of the timeout period. |
*/ |
core.String executionTimeout; |
@@ -2346,8 +2493,8 @@ class NodeInitializationAction { |
*/ |
class Operation { |
/** |
- * If the value is `false`, it means the operation is still in progress. If |
- * true, the operation is completed, and either `error` or `response` is |
+ * If the value is false, it means the operation is still in progress. If |
+ * true, the operation is completed, and either error or response is |
* available. |
*/ |
core.bool done; |
@@ -2365,18 +2512,18 @@ class Operation { |
core.Map<core.String, core.Object> metadata; |
/** |
* The server-assigned name, which is only unique within the same service that |
- * originally returns it. If you use the default HTTP mapping, the `name` |
- * should have the format of `operations/some/unique/name`. |
+ * originally returns it. If you use the default HTTP mapping, the name should |
+ * have the format of operations/some/unique/name. |
*/ |
core.String name; |
/** |
* The normal response of the operation in case of success. If the original |
- * method returns no data on success, such as `Delete`, the response is |
- * `google.protobuf.Empty`. If the original method is standard |
- * `Get`/`Create`/`Update`, the response should be the resource. For other |
- * methods, the response should have the type `XxxResponse`, where `Xxx` is |
- * the original method name. For example, if the original method name is |
- * `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. |
+ * method returns no data on success, such as Delete, the response is |
+ * google.protobuf.Empty. If the original method is standard |
+ * Get/Create/Update, the response should be the resource. For other methods, |
+ * the response should have the type XxxResponse, where Xxx is the original |
+ * method name. For example, if the original method name is TakeSnapshot(), |
+ * the inferred response type is TakeSnapshotResponse. |
* |
* The values for Object must be JSON objects. It can consist of `num`, |
* `String`, `bool` and `null` as well as `Map` and `List` values. |
@@ -2430,7 +2577,7 @@ class OperationMetadata { |
core.String clusterName; |
/** Cluster UUId for the operation. */ |
core.String clusterUuid; |
- /** [Output-only] Short description of operation. */ |
+ /** Output-only Short description of operation. */ |
core.String description; |
/** A message containing any operation metadata details. */ |
core.String details; |
@@ -2440,23 +2587,25 @@ class OperationMetadata { |
core.String innerState; |
/** The time that the operation was requested. */ |
core.String insertTime; |
- /** [Output-only] The operation type. */ |
+ /** Output-only The operation type. */ |
core.String operationType; |
/** The time that the operation was started by the server. */ |
core.String startTime; |
/** |
* A message containing the operation state. |
* Possible string values are: |
- * - "UNKNOWN" : A UNKNOWN. |
- * - "PENDING" : A PENDING. |
- * - "RUNNING" : A RUNNING. |
- * - "DONE" : A DONE. |
+ * - "UNKNOWN" : Unused. |
+ * - "PENDING" : The operation has been created. |
+ * - "RUNNING" : The operation is currently running. |
+ * - "DONE" : The operation is done, either cancelled or completed. |
*/ |
core.String state; |
- /** [Output-only] Current operation status. */ |
+ /** Output-only Current operation status. */ |
OperationStatus status; |
- /** [Output-only] Previous operation status. */ |
+ /** Output-only Previous operation status. */ |
core.List<OperationStatus> statusHistory; |
+ /** Output-only Errors encountered during operation execution. */ |
+ core.List<core.String> warnings; |
OperationMetadata(); |
@@ -2497,6 +2646,9 @@ class OperationMetadata { |
if (_json.containsKey("statusHistory")) { |
statusHistory = _json["statusHistory"].map((value) => new OperationStatus.fromJson(value)).toList(); |
} |
+ if (_json.containsKey("warnings")) { |
+ warnings = _json["warnings"]; |
+ } |
} |
core.Map toJson() { |
@@ -2537,6 +2689,9 @@ class OperationMetadata { |
if (statusHistory != null) { |
_json["statusHistory"] = statusHistory.map((value) => (value).toJson()).toList(); |
} |
+ if (warnings != null) { |
+ _json["warnings"] = warnings; |
+ } |
return _json; |
} |
} |
@@ -2550,10 +2705,10 @@ class OperationStatus { |
/** |
* A message containing the operation state. |
* Possible string values are: |
- * - "UNKNOWN" : A UNKNOWN. |
- * - "PENDING" : A PENDING. |
- * - "RUNNING" : A RUNNING. |
- * - "DONE" : A DONE. |
+ * - "UNKNOWN" : Unused. |
+ * - "PENDING" : The operation has been created. |
+ * - "RUNNING" : The operation is running. |
+ * - "DONE" : The operation is done; either cancelled or completed. |
*/ |
core.String state; |
/** The time this state was entered. */ |
@@ -2595,25 +2750,25 @@ class OperationStatus { |
} |
/** |
- * A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) |
- * queries on YARN. |
+ * A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries |
+ * on YARN. |
*/ |
class PigJob { |
/** |
- * [Optional] Whether to continue executing queries if a query fails. The |
- * default value is `false`. Setting to `true` can be useful when executing |
+ * Optional Whether to continue executing queries if a query fails. The |
+ * default value is false. Setting to true can be useful when executing |
* independent parallel queries. |
*/ |
core.bool continueOnFailure; |
/** |
- * [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Pig Client |
+ * Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client |
* and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. |
*/ |
core.List<core.String> jarFileUris; |
- /** [Optional] The runtime log config for job execution. */ |
+ /** Optional The runtime log config for job execution. */ |
LoggingConfig loggingConfig; |
/** |
- * [Optional] A mapping of property names to values, used to configure Pig. |
+ * Optional A mapping of property names to values, used to configure Pig. |
* Properties that conflict with values set by the Cloud Dataproc API may be |
* overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, |
* /etc/pig/conf/pig.properties, and classes in user code. |
@@ -2624,8 +2779,8 @@ class PigJob { |
/** A list of queries. */ |
QueryList queryList; |
/** |
- * [Optional] Mapping of query variable names to values (equivalent to the Pig |
- * command: `name=[value]`). |
+ * Optional Mapping of query variable names to values (equivalent to the Pig |
+ * command: name=[value]). |
*/ |
core.Map<core.String, core.String> scriptVariables; |
@@ -2683,48 +2838,48 @@ class PigJob { |
} |
/** |
- * A Cloud Dataproc job for running [Apache |
- * PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) |
+ * A Cloud Dataproc job for running Apache PySpark |
+ * (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) |
* applications on YARN. |
*/ |
class PySparkJob { |
/** |
- * [Optional] HCFS URIs of archives to be extracted in the working directory |
- * of .jar, .tar, .tar.gz, .tgz, and .zip. |
+ * Optional HCFS URIs of archives to be extracted in the working directory of |
+ * .jar, .tar, .tar.gz, .tgz, and .zip. |
*/ |
core.List<core.String> archiveUris; |
/** |
- * [Optional] The arguments to pass to the driver. Do not include arguments, |
- * such as `--conf`, that can be set as job properties, since a collision may |
+ * Optional The arguments to pass to the driver. Do not include arguments, |
+ * such as --conf, that can be set as job properties, since a collision may |
* occur that causes an incorrect job submission. |
*/ |
core.List<core.String> args; |
/** |
- * [Optional] HCFS URIs of files to be copied to the working directory of |
- * Python drivers and distributed tasks. Useful for naively parallel tasks. |
+ * Optional HCFS URIs of files to be copied to the working directory of Python |
+ * drivers and distributed tasks. Useful for naively parallel tasks. |
*/ |
core.List<core.String> fileUris; |
/** |
- * [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Python |
+ * Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python |
* driver and tasks. |
*/ |
core.List<core.String> jarFileUris; |
- /** [Optional] The runtime log config for job execution. */ |
+ /** Optional The runtime log config for job execution. */ |
LoggingConfig loggingConfig; |
/** |
- * [Required] The HCFS URI of the main Python file to use as the driver. Must |
- * be a .py file. |
+ * Required The HCFS URI of the main Python file to use as the driver. Must be |
+ * a .py file. |
*/ |
core.String mainPythonFileUri; |
/** |
- * [Optional] A mapping of property names to values, used to configure |
- * PySpark. Properties that conflict with values set by the Cloud Dataproc API |
- * may be overwritten. Can include properties set in |
+ * Optional A mapping of property names to values, used to configure PySpark. |
+ * Properties that conflict with values set by the Cloud Dataproc API may be |
+ * overwritten. Can include properties set in |
* /etc/spark/conf/spark-defaults.conf and classes in user code. |
*/ |
core.Map<core.String, core.String> properties; |
/** |
- * [Optional] HCFS file URIs of Python files to pass to the PySpark framework. |
+ * Optional HCFS file URIs of Python files to pass to the PySpark framework. |
* Supported file types: .py, .egg, and .zip. |
*/ |
core.List<core.String> pythonFileUris; |
@@ -2791,11 +2946,19 @@ class PySparkJob { |
/** A list of queries to run on a cluster. */ |
class QueryList { |
/** |
- * [Required] The queries to execute. You do not need to terminate a query |
- * with a semicolon. Multiple queries can be specified in one string by |
- * separating each with a semicolon. Here is an example of an Cloud Dataproc |
- * API snippet that uses a QueryList to specify a HiveJob: "hiveJob": { |
- * "queryList": { "queries": [ "query1", "query2", "query3;query4", ] } } |
+ * Required The queries to execute. You do not need to terminate a query with |
+ * a semicolon. Multiple queries can be specified in one string by separating |
+ * each with a semicolon. Here is an example of an Cloud Dataproc API snippet |
+ * that uses a QueryList to specify a HiveJob: |
+ * "hiveJob": { |
+ * "queryList": { |
+ * "queries": [ |
+ * "query1", |
+ * "query2", |
+ * "query3;query4", |
+ * ] |
+ * } |
+ * } |
*/ |
core.List<core.String> queries; |
@@ -2819,18 +2982,22 @@ class QueryList { |
/** Specifies the selection and config of software inside the cluster. */ |
class SoftwareConfig { |
/** |
- * [Optional] The version of software inside the cluster. It must match the |
- * regular expression `[0-9]+\.[0-9]+`. If unspecified, it defaults to the |
- * latest version (see [Cloud Dataproc Versioning](/dataproc/versioning)). |
+ * Optional The version of software inside the cluster. It must match the |
+ * regular expression [0-9]+\.[0-9]+. If unspecified, it defaults to the |
+ * latest version (see Cloud Dataproc Versioning). |
*/ |
core.String imageVersion; |
/** |
- * [Optional] The properties to set on daemon config files. Property keys are |
- * specified in `prefix:property` format, such as `core:fs.defaultFS`. The |
- * following are supported prefixes and their mappings: * core: |
- * `core-site.xml` * hdfs: `hdfs-site.xml` * mapred: `mapred-site.xml` * yarn: |
- * `yarn-site.xml` * hive: `hive-site.xml` * pig: `pig.properties` * spark: |
- * `spark-defaults.conf` |
+ * Optional The properties to set on daemon config files.Property keys are |
+ * specified in prefix:property format, such as core:fs.defaultFS. The |
+ * following are supported prefixes and their mappings: |
+ * core: core-site.xml |
+ * hdfs: hdfs-site.xml |
+ * mapred: mapred-site.xml |
+ * yarn: yarn-site.xml |
+ * hive: hive-site.xml |
+ * pig: pig.properties |
+ * spark: spark-defaults.conf |
*/ |
core.Map<core.String, core.String> properties; |
@@ -2858,43 +3025,43 @@ class SoftwareConfig { |
} |
/** |
- * A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) |
+ * A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) |
* applications on YARN. |
*/ |
class SparkJob { |
/** |
- * [Optional] HCFS URIs of archives to be extracted in the working directory |
- * of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, |
- * .tgz, and .zip. |
+ * Optional HCFS URIs of archives to be extracted in the working directory of |
+ * Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, |
+ * and .zip. |
*/ |
core.List<core.String> archiveUris; |
/** |
- * [Optional] The arguments to pass to the driver. Do not include arguments, |
- * such as `--conf`, that can be set as job properties, since a collision may |
+ * Optional The arguments to pass to the driver. Do not include arguments, |
+ * such as --conf, that can be set as job properties, since a collision may |
* occur that causes an incorrect job submission. |
*/ |
core.List<core.String> args; |
/** |
- * [Optional] HCFS URIs of files to be copied to the working directory of |
- * Spark drivers and distributed tasks. Useful for naively parallel tasks. |
+ * Optional HCFS URIs of files to be copied to the working directory of Spark |
+ * drivers and distributed tasks. Useful for naively parallel tasks. |
*/ |
core.List<core.String> fileUris; |
/** |
- * [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the Spark |
+ * Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark |
* driver and tasks. |
*/ |
core.List<core.String> jarFileUris; |
- /** [Optional] The runtime log config for job execution. */ |
+ /** Optional The runtime log config for job execution. */ |
LoggingConfig loggingConfig; |
/** |
* The name of the driver's main class. The jar file that contains the class |
- * must be in the default CLASSPATH or specified in `jar_file_uris`. |
+ * must be in the default CLASSPATH or specified in jar_file_uris. |
*/ |
core.String mainClass; |
/** The HCFS URI of the jar file that contains the main class. */ |
core.String mainJarFileUri; |
/** |
- * [Optional] A mapping of property names to values, used to configure Spark. |
+ * Optional A mapping of property names to values, used to configure Spark. |
* Properties that conflict with values set by the Cloud Dataproc API may be |
* overwritten. Can include properties set in |
* /etc/spark/conf/spark-defaults.conf and classes in user code. |
@@ -2961,16 +3128,16 @@ class SparkJob { |
} |
/** |
- * A Cloud Dataproc job for running [Apache Spark |
- * SQL](http://spark.apache.org/sql/) queries. |
+ * A Cloud Dataproc job for running Apache Spark SQL |
+ * (http://spark.apache.org/sql/) queries. |
*/ |
class SparkSqlJob { |
- /** [Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH. */ |
+ /** Optional HCFS URIs of jar files to be added to the Spark CLASSPATH. */ |
core.List<core.String> jarFileUris; |
- /** [Optional] The runtime log config for job execution. */ |
+ /** Optional The runtime log config for job execution. */ |
LoggingConfig loggingConfig; |
/** |
- * [Optional] A mapping of property names to values, used to configure Spark |
+ * Optional A mapping of property names to values, used to configure Spark |
* SQL's SparkConf. Properties that conflict with values set by the Cloud |
* Dataproc API may be overwritten. |
*/ |
@@ -2980,8 +3147,8 @@ class SparkSqlJob { |
/** A list of queries. */ |
QueryList queryList; |
/** |
- * [Optional] Mapping of query variable names to values (equivalent to the |
- * Spark SQL command: SET `name="value";`). |
+ * Optional Mapping of query variable names to values (equivalent to the Spark |
+ * SQL command: SET name="value";). |
*/ |
core.Map<core.String, core.String> scriptVariables; |
@@ -3033,39 +3200,40 @@ class SparkSqlJob { |
} |
/** |
- * The `Status` type defines a logical error model that is suitable for |
- * different programming environments, including REST APIs and RPC APIs. It is |
- * used by [gRPC](https://github.com/grpc). The error model is designed to be: - |
- * Simple to use and understand for most users - Flexible enough to meet |
- * unexpected needs # Overview The `Status` message contains three pieces of |
- * data: error code, error message, and error details. The error code should be |
- * an enum value of google.rpc.Code, but it may accept additional error codes if |
- * needed. The error message should be a developer-facing English message that |
- * helps developers *understand* and *resolve* the error. If a localized |
- * user-facing error message is needed, put the localized message in the error |
- * details or localize it in the client. The optional error details may contain |
- * arbitrary information about the error. There is a predefined set of error |
- * detail types in the package `google.rpc` which can be used for common error |
- * conditions. # Language mapping The `Status` message is the logical |
+ * The Status type defines a logical error model that is suitable for different |
+ * programming environments, including REST APIs and RPC APIs. It is used by |
+ * gRPC (https://github.com/grpc). The error model is designed to be: |
+ * Simple to use and understand for most users |
+ * Flexible enough to meet unexpected needsOverviewThe Status message contains |
+ * three pieces of data: error code, error message, and error details. The error |
+ * code should be an enum value of google.rpc.Code, but it may accept additional |
+ * error codes if needed. The error message should be a developer-facing English |
+ * message that helps developers understand and resolve the error. If a |
+ * localized user-facing error message is needed, put the localized message in |
+ * the error details or localize it in the client. The optional error details |
+ * may contain arbitrary information about the error. There is a predefined set |
+ * of error detail types in the package google.rpc which can be used for common |
+ * error conditions.Language mappingThe Status message is the logical |
* representation of the error model, but it is not necessarily the actual wire |
- * format. When the `Status` message is exposed in different client libraries |
- * and different wire protocols, it can be mapped differently. For example, it |
- * will likely be mapped to some exceptions in Java, but more likely mapped to |
- * some error codes in C. # Other uses The error model and the `Status` message |
- * can be used in a variety of environments, either with or without APIs, to |
- * provide a consistent developer experience across different environments. |
- * Example uses of this error model include: - Partial errors. If a service |
- * needs to return partial errors to the client, it may embed the `Status` in |
- * the normal response to indicate the partial errors. - Workflow errors. A |
- * typical workflow has multiple steps. Each step may have a `Status` message |
- * for error reporting purpose. - Batch operations. If a client uses batch |
- * request and batch response, the `Status` message should be used directly |
- * inside batch response, one for each error sub-response. - Asynchronous |
- * operations. If an API call embeds asynchronous operation results in its |
- * response, the status of those operations should be represented directly using |
- * the `Status` message. - Logging. If some API errors are stored in logs, the |
- * message `Status` could be used directly after any stripping needed for |
- * security/privacy reasons. |
+ * format. When the Status message is exposed in different client libraries and |
+ * different wire protocols, it can be mapped differently. For example, it will |
+ * likely be mapped to some exceptions in Java, but more likely mapped to some |
+ * error codes in C.Other usesThe error model and the Status message can be used |
+ * in a variety of environments, either with or without APIs, to provide a |
+ * consistent developer experience across different environments.Example uses of |
+ * this error model include: |
+ * Partial errors. If a service needs to return partial errors to the client, it |
+ * may embed the Status in the normal response to indicate the partial errors. |
+ * Workflow errors. A typical workflow has multiple steps. Each step may have a |
+ * Status message for error reporting purpose. |
+ * Batch operations. If a client uses batch request and batch response, the |
+ * Status message should be used directly inside batch response, one for each |
+ * error sub-response. |
+ * Asynchronous operations. If an API call embeds asynchronous operation results |
+ * in its response, the status of those operations should be represented |
+ * directly using the Status message. |
+ * Logging. If some API errors are stored in logs, the message Status could be |
+ * used directly after any stripping needed for security/privacy reasons. |
*/ |
class Status { |
/** The status code, which should be an enum value of google.rpc.Code. */ |
@@ -3116,7 +3284,7 @@ class Status { |
/** A request to submit a job. */ |
class SubmitJobRequest { |
- /** [Required] The job resource. */ |
+ /** Required The job resource. */ |
Job job; |
SubmitJobRequest(); |
@@ -3138,31 +3306,31 @@ class SubmitJobRequest { |
/** |
* A YARN application created by a job. Application information is a subset of |
- * org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. **Beta |
- * Feature**: This report is available for testing purposes only. It may be |
+ * <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.Beta |
+ * Feature: This report is available for testing purposes only. It may be |
* changed before final release. |
*/ |
class YarnApplication { |
- /** [Required] The application name. */ |
+ /** Required The application name. */ |
core.String name; |
- /** [Required] The numerical progress of the application, from 1 to 100. */ |
+ /** Required The numerical progress of the application, from 1 to 100. */ |
core.double progress; |
/** |
- * [Required] The application state. |
+ * Required The application state. |
* Possible string values are: |
- * - "STATE_UNSPECIFIED" : A STATE_UNSPECIFIED. |
- * - "NEW" : A NEW. |
- * - "NEW_SAVING" : A NEW_SAVING. |
- * - "SUBMITTED" : A SUBMITTED. |
- * - "ACCEPTED" : A ACCEPTED. |
- * - "RUNNING" : A RUNNING. |
- * - "FINISHED" : A FINISHED. |
- * - "FAILED" : A FAILED. |
- * - "KILLED" : A KILLED. |
+ * - "STATE_UNSPECIFIED" : Status is unspecified. |
+ * - "NEW" : Status is NEW. |
+ * - "NEW_SAVING" : Status is NEW_SAVING. |
+ * - "SUBMITTED" : Status is SUBMITTED. |
+ * - "ACCEPTED" : Status is ACCEPTED. |
+ * - "RUNNING" : Status is RUNNING. |
+ * - "FINISHED" : Status is FINISHED. |
+ * - "FAILED" : Status is FAILED. |
+ * - "KILLED" : Status is KILLED. |
*/ |
core.String state; |
/** |
- * [Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or |
+ * Optional The HTTP URL of the ApplicationMaster, HistoryServer, or |
* TimelineServer that provides application-specific information. The URL uses |
* the internal hostname, and requires a proxy server for resolution and, |
* possibly, access. |