Index: generated/googleapis/lib/dataproc/v1.dart |
diff --git a/generated/googleapis/lib/dataproc/v1.dart b/generated/googleapis/lib/dataproc/v1.dart |
index cab8750eeb0f26b451dd89675213ceed99850fc7..84e5ea7b9677bac68c9e562322cce232291b2cd3 100644 |
--- a/generated/googleapis/lib/dataproc/v1.dart |
+++ b/generated/googleapis/lib/dataproc/v1.dart |
@@ -9,76 +9,77 @@ import 'dart:convert' as convert; |
import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; |
import 'package:http/http.dart' as http; |
-export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show |
- ApiRequestError, DetailedApiRequestError; |
+export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' |
+ show ApiRequestError, DetailedApiRequestError; |
const core.String USER_AGENT = 'dart-api-client dataproc/v1'; |
-/** Manages Hadoop-based clusters and jobs on Google Cloud Platform. */ |
+/// Manages Hadoop-based clusters and jobs on Google Cloud Platform. |
class DataprocApi { |
- /** View and manage your data across Google Cloud Platform services */ |
- static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"; |
- |
+ /// View and manage your data across Google Cloud Platform services |
+ static const CloudPlatformScope = |
+ "https://www.googleapis.com/auth/cloud-platform"; |
final commons.ApiRequester _requester; |
ProjectsResourceApi get projects => new ProjectsResourceApi(_requester); |
- DataprocApi(http.Client client, {core.String rootUrl: "https://dataproc.googleapis.com/", core.String servicePath: ""}) : |
- _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_AGENT); |
+ DataprocApi(http.Client client, |
+ {core.String rootUrl: "https://dataproc.googleapis.com/", |
+ core.String servicePath: ""}) |
+ : _requester = |
+ new commons.ApiRequester(client, rootUrl, servicePath, USER_AGENT); |
} |
- |
class ProjectsResourceApi { |
final commons.ApiRequester _requester; |
- ProjectsRegionsResourceApi get regions => new ProjectsRegionsResourceApi(_requester); |
+ ProjectsRegionsResourceApi get regions => |
+ new ProjectsRegionsResourceApi(_requester); |
- ProjectsResourceApi(commons.ApiRequester client) : |
- _requester = client; |
+ ProjectsResourceApi(commons.ApiRequester client) : _requester = client; |
} |
- |
class ProjectsRegionsResourceApi { |
final commons.ApiRequester _requester; |
- ProjectsRegionsClustersResourceApi get clusters => new ProjectsRegionsClustersResourceApi(_requester); |
- ProjectsRegionsJobsResourceApi get jobs => new ProjectsRegionsJobsResourceApi(_requester); |
- ProjectsRegionsOperationsResourceApi get operations => new ProjectsRegionsOperationsResourceApi(_requester); |
+ ProjectsRegionsClustersResourceApi get clusters => |
+ new ProjectsRegionsClustersResourceApi(_requester); |
+ ProjectsRegionsJobsResourceApi get jobs => |
+ new ProjectsRegionsJobsResourceApi(_requester); |
+ ProjectsRegionsOperationsResourceApi get operations => |
+ new ProjectsRegionsOperationsResourceApi(_requester); |
- ProjectsRegionsResourceApi(commons.ApiRequester client) : |
- _requester = client; |
+ ProjectsRegionsResourceApi(commons.ApiRequester client) : _requester = client; |
} |
- |
class ProjectsRegionsClustersResourceApi { |
final commons.ApiRequester _requester; |
- ProjectsRegionsClustersResourceApi(commons.ApiRequester client) : |
- _requester = client; |
- |
- /** |
- * Creates a cluster in a project. |
- * |
- * [request] - The metadata request object. |
- * |
- * Request parameters: |
- * |
- * [projectId] - Required. The ID of the Google Cloud Platform project that |
- * the cluster belongs to. |
- * |
- * [region] - Required. The Cloud Dataproc region in which to handle the |
- * request. |
- * |
- * Completes with a [Operation]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
- async.Future<Operation> create(Cluster request, core.String projectId, core.String region) { |
+ ProjectsRegionsClustersResourceApi(commons.ApiRequester client) |
+ : _requester = client; |
+ |
+ /// Creates a cluster in a project. |
+ /// |
+ /// [request] - The metadata request object. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [projectId] - Required. The ID of the Google Cloud Platform project that |
+ /// the cluster belongs to. |
+ /// |
+ /// [region] - Required. The Cloud Dataproc region in which to handle the |
+ /// request. |
+ /// |
+ /// Completes with a [Operation]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
+ async.Future<Operation> create( |
+ Cluster request, core.String projectId, core.String region) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -96,40 +97,42 @@ class ProjectsRegionsClustersResourceApi { |
throw new core.ArgumentError("Parameter region is required."); |
} |
- _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/clusters'; |
+ _url = 'v1/projects/' + |
+ commons.Escaper.ecapeVariable('$projectId') + |
+ '/regions/' + |
+ commons.Escaper.ecapeVariable('$region') + |
+ '/clusters'; |
- var _response = _requester.request(_url, |
- "POST", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "POST", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new Operation.fromJson(data)); |
} |
- /** |
- * Deletes a cluster in a project. |
- * |
- * Request parameters: |
- * |
- * [projectId] - Required. The ID of the Google Cloud Platform project that |
- * the cluster belongs to. |
- * |
- * [region] - Required. The Cloud Dataproc region in which to handle the |
- * request. |
- * |
- * [clusterName] - Required. The cluster name. |
- * |
- * Completes with a [Operation]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
- async.Future<Operation> delete(core.String projectId, core.String region, core.String clusterName) { |
+ /// Deletes a cluster in a project. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [projectId] - Required. The ID of the Google Cloud Platform project that |
+ /// the cluster belongs to. |
+ /// |
+ /// [region] - Required. The Cloud Dataproc region in which to handle the |
+ /// request. |
+ /// |
+ /// [clusterName] - Required. The cluster name. |
+ /// |
+ /// Completes with a [Operation]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
+ async.Future<Operation> delete( |
+ core.String projectId, core.String region, core.String clusterName) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -147,43 +150,46 @@ class ProjectsRegionsClustersResourceApi { |
throw new core.ArgumentError("Parameter clusterName is required."); |
} |
- _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/clusters/' + commons.Escaper.ecapeVariable('$clusterName'); |
+ _url = 'v1/projects/' + |
+ commons.Escaper.ecapeVariable('$projectId') + |
+ '/regions/' + |
+ commons.Escaper.ecapeVariable('$region') + |
+ '/clusters/' + |
+ commons.Escaper.ecapeVariable('$clusterName'); |
- var _response = _requester.request(_url, |
- "DELETE", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "DELETE", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new Operation.fromJson(data)); |
} |
- /** |
- * Gets cluster diagnostic information. After the operation completes, the |
- * Operation.response field contains DiagnoseClusterOutputLocation. |
- * |
- * [request] - The metadata request object. |
- * |
- * Request parameters: |
- * |
- * [projectId] - Required. The ID of the Google Cloud Platform project that |
- * the cluster belongs to. |
- * |
- * [region] - Required. The Cloud Dataproc region in which to handle the |
- * request. |
- * |
- * [clusterName] - Required. The cluster name. |
- * |
- * Completes with a [Operation]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
- async.Future<Operation> diagnose(DiagnoseClusterRequest request, core.String projectId, core.String region, core.String clusterName) { |
+ /// Gets cluster diagnostic information. After the operation completes, the |
+ /// Operation.response field contains DiagnoseClusterOutputLocation. |
+ /// |
+ /// [request] - The metadata request object. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [projectId] - Required. The ID of the Google Cloud Platform project that |
+ /// the cluster belongs to. |
+ /// |
+ /// [region] - Required. The Cloud Dataproc region in which to handle the |
+ /// request. |
+ /// |
+ /// [clusterName] - Required. The cluster name. |
+ /// |
+ /// Completes with a [Operation]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
+ async.Future<Operation> diagnose(DiagnoseClusterRequest request, |
+ core.String projectId, core.String region, core.String clusterName) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -204,40 +210,44 @@ class ProjectsRegionsClustersResourceApi { |
throw new core.ArgumentError("Parameter clusterName is required."); |
} |
- _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/clusters/' + commons.Escaper.ecapeVariable('$clusterName') + ':diagnose'; |
- |
- var _response = _requester.request(_url, |
- "POST", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ _url = 'v1/projects/' + |
+ commons.Escaper.ecapeVariable('$projectId') + |
+ '/regions/' + |
+ commons.Escaper.ecapeVariable('$region') + |
+ '/clusters/' + |
+ commons.Escaper.ecapeVariable('$clusterName') + |
+ ':diagnose'; |
+ |
+ var _response = _requester.request(_url, "POST", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new Operation.fromJson(data)); |
} |
- /** |
- * Gets the resource representation for a cluster in a project. |
- * |
- * Request parameters: |
- * |
- * [projectId] - Required. The ID of the Google Cloud Platform project that |
- * the cluster belongs to. |
- * |
- * [region] - Required. The Cloud Dataproc region in which to handle the |
- * request. |
- * |
- * [clusterName] - Required. The cluster name. |
- * |
- * Completes with a [Cluster]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
- async.Future<Cluster> get(core.String projectId, core.String region, core.String clusterName) { |
+ /// Gets the resource representation for a cluster in a project. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [projectId] - Required. The ID of the Google Cloud Platform project that |
+ /// the cluster belongs to. |
+ /// |
+ /// [region] - Required. The Cloud Dataproc region in which to handle the |
+ /// request. |
+ /// |
+ /// [clusterName] - Required. The cluster name. |
+ /// |
+ /// Completes with a [Cluster]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
+ async.Future<Cluster> get( |
+ core.String projectId, core.String region, core.String clusterName) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -255,54 +265,59 @@ class ProjectsRegionsClustersResourceApi { |
throw new core.ArgumentError("Parameter clusterName is required."); |
} |
- _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/clusters/' + commons.Escaper.ecapeVariable('$clusterName'); |
+ _url = 'v1/projects/' + |
+ commons.Escaper.ecapeVariable('$projectId') + |
+ '/regions/' + |
+ commons.Escaper.ecapeVariable('$region') + |
+ '/clusters/' + |
+ commons.Escaper.ecapeVariable('$clusterName'); |
- var _response = _requester.request(_url, |
- "GET", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "GET", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new Cluster.fromJson(data)); |
} |
- /** |
- * Lists all regions/{region}/clusters in a project. |
- * |
- * Request parameters: |
- * |
- * [projectId] - Required. The ID of the Google Cloud Platform project that |
- * the cluster belongs to. |
- * |
- * [region] - Required. The Cloud Dataproc region in which to handle the |
- * request. |
- * |
- * [pageSize] - Optional. The standard List page size. |
- * |
- * [filter] - Optional. A filter constraining the clusters to list. Filters |
- * are case-sensitive and have the following syntax:field = value AND field = |
- * value ...where field is one of status.state, clusterName, or labels.[KEY], |
- * and [KEY] is a label key. value can be * to match all values. status.state |
- * can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, |
- * DELETING, or UPDATING. ACTIVE contains the CREATING, UPDATING, and RUNNING |
- * states. INACTIVE contains the DELETING and ERROR states. clusterName is the |
- * name of the cluster provided at creation time. Only the logical AND |
- * operator is supported; space-separated items are treated as having an |
- * implicit AND operator.Example filter:status.state = ACTIVE AND clusterName |
- * = mycluster AND labels.env = staging AND labels.starred = * |
- * |
- * [pageToken] - Optional. The standard List page token. |
- * |
- * Completes with a [ListClustersResponse]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
- async.Future<ListClustersResponse> list(core.String projectId, core.String region, {core.int pageSize, core.String filter, core.String pageToken}) { |
+ /// Lists all regions/{region}/clusters in a project. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [projectId] - Required. The ID of the Google Cloud Platform project that |
+ /// the cluster belongs to. |
+ /// |
+ /// [region] - Required. The Cloud Dataproc region in which to handle the |
+ /// request. |
+ /// |
+ /// [filter] - Optional. A filter constraining the clusters to list. Filters |
+ /// are case-sensitive and have the following syntax:field = value AND field |
+ /// = value ...where field is one of status.state, clusterName, or |
+ /// labels.[KEY], and [KEY] is a label key. value can be * to match all |
+ /// values. status.state can be one of the following: ACTIVE, INACTIVE, |
+ /// CREATING, RUNNING, ERROR, DELETING, or UPDATING. ACTIVE contains the |
+ /// CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING |
+ /// and ERROR states. clusterName is the name of the cluster provided at |
+ /// creation time. Only the logical AND operator is supported; |
+ /// space-separated items are treated as having an implicit AND |
+ /// operator.Example filter:status.state = ACTIVE AND clusterName = mycluster |
+ /// AND labels.env = staging AND labels.starred = * |
+ /// |
+ /// [pageToken] - Optional. The standard List page token. |
+ /// |
+ /// [pageSize] - Optional. The standard List page size. |
+ /// |
+ /// Completes with a [ListClustersResponse]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
+ async.Future<ListClustersResponse> list( |
+ core.String projectId, core.String region, |
+ {core.String filter, core.String pageToken, core.int pageSize}) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -316,84 +331,88 @@ class ProjectsRegionsClustersResourceApi { |
if (region == null) { |
throw new core.ArgumentError("Parameter region is required."); |
} |
- if (pageSize != null) { |
- _queryParams["pageSize"] = ["${pageSize}"]; |
- } |
if (filter != null) { |
_queryParams["filter"] = [filter]; |
} |
if (pageToken != null) { |
_queryParams["pageToken"] = [pageToken]; |
} |
+ if (pageSize != null) { |
+ _queryParams["pageSize"] = ["${pageSize}"]; |
+ } |
- _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/clusters'; |
+ _url = 'v1/projects/' + |
+ commons.Escaper.ecapeVariable('$projectId') + |
+ '/regions/' + |
+ commons.Escaper.ecapeVariable('$region') + |
+ '/clusters'; |
- var _response = _requester.request(_url, |
- "GET", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "GET", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new ListClustersResponse.fromJson(data)); |
} |
- /** |
- * Updates a cluster in a project. |
- * |
- * [request] - The metadata request object. |
- * |
- * Request parameters: |
- * |
- * [projectId] - Required. The ID of the Google Cloud Platform project the |
- * cluster belongs to. |
- * |
- * [region] - Required. The Cloud Dataproc region in which to handle the |
- * request. |
- * |
- * [clusterName] - Required. The cluster name. |
- * |
- * [updateMask] - Required. Specifies the path, relative to Cluster, of the |
- * field to update. For example, to change the number of workers in a cluster |
- * to 5, the update_mask parameter would be specified as |
- * config.worker_config.num_instances, and the PATCH request body would |
- * specify the new value, as follows: |
- * { |
- * "config":{ |
- * "workerConfig":{ |
- * "numInstances":"5" |
- * } |
- * } |
- * } |
- * Similarly, to change the number of preemptible workers in a cluster to 5, |
- * the update_mask parameter would be |
- * config.secondary_worker_config.num_instances, and the PATCH request body |
- * would be set as follows: |
- * { |
- * "config":{ |
- * "secondaryWorkerConfig":{ |
- * "numInstances":"5" |
- * } |
- * } |
- * } |
- * <strong>Note:</strong> Currently, only the following fields can be |
- * updated:<table> <tbody> <tr> <td><strong>Mask</strong></td> |
- * <td><strong>Purpose</strong></td> </tr> <tr> |
- * <td><strong><em>labels</em></strong></td> <td>Update labels</td> </tr> |
- * <tr> <td><strong><em>config.worker_config.num_instances</em></strong></td> |
- * <td>Resize primary worker group</td> </tr> <tr> |
- * <td><strong><em>config.secondary_worker_config.num_instances</em></strong></td> |
- * <td>Resize secondary worker group</td> </tr> </tbody> </table> |
- * |
- * Completes with a [Operation]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
- async.Future<Operation> patch(Cluster request, core.String projectId, core.String region, core.String clusterName, {core.String updateMask}) { |
+ /// Updates a cluster in a project. |
+ /// |
+ /// [request] - The metadata request object. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [projectId] - Required. The ID of the Google Cloud Platform project the |
+ /// cluster belongs to. |
+ /// |
+ /// [region] - Required. The Cloud Dataproc region in which to handle the |
+ /// request. |
+ /// |
+ /// [clusterName] - Required. The cluster name. |
+ /// |
+ /// [updateMask] - Required. Specifies the path, relative to Cluster, of the |
+ /// field to update. For example, to change the number of workers in a |
+ /// cluster to 5, the update_mask parameter would be specified as |
+ /// config.worker_config.num_instances, and the PATCH request body would |
+ /// specify the new value, as follows: |
+ /// { |
+ /// "config":{ |
+ /// "workerConfig":{ |
+ /// "numInstances":"5" |
+ /// } |
+ /// } |
+ /// } |
+ /// Similarly, to change the number of preemptible workers in a cluster to 5, |
+ /// the update_mask parameter would be |
+ /// config.secondary_worker_config.num_instances, and the PATCH request body |
+ /// would be set as follows: |
+ /// { |
+ /// "config":{ |
+ /// "secondaryWorkerConfig":{ |
+ /// "numInstances":"5" |
+ /// } |
+ /// } |
+ /// } |
+ /// <strong>Note:</strong> Currently, only the following fields can be |
+ /// updated:<table> <tbody> <tr> <td><strong>Mask</strong></td> |
+ /// <td><strong>Purpose</strong></td> </tr> <tr> |
+ /// <td><strong><em>labels</em></strong></td> <td>Update labels</td> </tr> |
+ /// <tr> |
+ /// <td><strong><em>config.worker_config.num_instances</em></strong></td> |
+ /// <td>Resize primary worker group</td> </tr> <tr> |
+ /// <td><strong><em>config.secondary_worker_config.num_instances</em></strong></td> |
+ /// <td>Resize secondary worker group</td> </tr> </tbody> </table> |
+ /// |
+ /// Completes with a [Operation]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
+ async.Future<Operation> patch(Cluster request, core.String projectId, |
+ core.String region, core.String clusterName, |
+ {core.String updateMask}) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -417,52 +436,54 @@ class ProjectsRegionsClustersResourceApi { |
_queryParams["updateMask"] = [updateMask]; |
} |
- _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/clusters/' + commons.Escaper.ecapeVariable('$clusterName'); |
+ _url = 'v1/projects/' + |
+ commons.Escaper.ecapeVariable('$projectId') + |
+ '/regions/' + |
+ commons.Escaper.ecapeVariable('$region') + |
+ '/clusters/' + |
+ commons.Escaper.ecapeVariable('$clusterName'); |
- var _response = _requester.request(_url, |
- "PATCH", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "PATCH", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new Operation.fromJson(data)); |
} |
- |
} |
- |
class ProjectsRegionsJobsResourceApi { |
final commons.ApiRequester _requester; |
- ProjectsRegionsJobsResourceApi(commons.ApiRequester client) : |
- _requester = client; |
- |
- /** |
- * Starts a job cancellation request. To access the job resource after |
- * cancellation, call regions/{region}/jobs.list or regions/{region}/jobs.get. |
- * |
- * [request] - The metadata request object. |
- * |
- * Request parameters: |
- * |
- * [projectId] - Required. The ID of the Google Cloud Platform project that |
- * the job belongs to. |
- * |
- * [region] - Required. The Cloud Dataproc region in which to handle the |
- * request. |
- * |
- * [jobId] - Required. The job ID. |
- * |
- * Completes with a [Job]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
- async.Future<Job> cancel(CancelJobRequest request, core.String projectId, core.String region, core.String jobId) { |
+ ProjectsRegionsJobsResourceApi(commons.ApiRequester client) |
+ : _requester = client; |
+ |
+ /// Starts a job cancellation request. To access the job resource after |
+ /// cancellation, call regions/{region}/jobs.list or |
+ /// regions/{region}/jobs.get. |
+ /// |
+ /// [request] - The metadata request object. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [projectId] - Required. The ID of the Google Cloud Platform project that |
+ /// the job belongs to. |
+ /// |
+ /// [region] - Required. The Cloud Dataproc region in which to handle the |
+ /// request. |
+ /// |
+ /// [jobId] - Required. The job ID. |
+ /// |
+ /// Completes with a [Job]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
+ async.Future<Job> cancel(CancelJobRequest request, core.String projectId, |
+ core.String region, core.String jobId) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -483,41 +504,45 @@ class ProjectsRegionsJobsResourceApi { |
throw new core.ArgumentError("Parameter jobId is required."); |
} |
- _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + ':cancel'; |
- |
- var _response = _requester.request(_url, |
- "POST", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ _url = 'v1/projects/' + |
+ commons.Escaper.ecapeVariable('$projectId') + |
+ '/regions/' + |
+ commons.Escaper.ecapeVariable('$region') + |
+ '/jobs/' + |
+ commons.Escaper.ecapeVariable('$jobId') + |
+ ':cancel'; |
+ |
+ var _response = _requester.request(_url, "POST", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new Job.fromJson(data)); |
} |
- /** |
- * Deletes the job from the project. If the job is active, the delete fails, |
- * and the response returns FAILED_PRECONDITION. |
- * |
- * Request parameters: |
- * |
- * [projectId] - Required. The ID of the Google Cloud Platform project that |
- * the job belongs to. |
- * |
- * [region] - Required. The Cloud Dataproc region in which to handle the |
- * request. |
- * |
- * [jobId] - Required. The job ID. |
- * |
- * Completes with a [Empty]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
- async.Future<Empty> delete(core.String projectId, core.String region, core.String jobId) { |
+ /// Deletes the job from the project. If the job is active, the delete fails, |
+ /// and the response returns FAILED_PRECONDITION. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [projectId] - Required. The ID of the Google Cloud Platform project that |
+ /// the job belongs to. |
+ /// |
+ /// [region] - Required. The Cloud Dataproc region in which to handle the |
+ /// request. |
+ /// |
+ /// [jobId] - Required. The job ID. |
+ /// |
+ /// Completes with a [Empty]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
+ async.Future<Empty> delete( |
+ core.String projectId, core.String region, core.String jobId) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -535,40 +560,43 @@ class ProjectsRegionsJobsResourceApi { |
throw new core.ArgumentError("Parameter jobId is required."); |
} |
- _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId'); |
+ _url = 'v1/projects/' + |
+ commons.Escaper.ecapeVariable('$projectId') + |
+ '/regions/' + |
+ commons.Escaper.ecapeVariable('$region') + |
+ '/jobs/' + |
+ commons.Escaper.ecapeVariable('$jobId'); |
- var _response = _requester.request(_url, |
- "DELETE", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "DELETE", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new Empty.fromJson(data)); |
} |
- /** |
- * Gets the resource representation for a job in a project. |
- * |
- * Request parameters: |
- * |
- * [projectId] - Required. The ID of the Google Cloud Platform project that |
- * the job belongs to. |
- * |
- * [region] - Required. The Cloud Dataproc region in which to handle the |
- * request. |
- * |
- * [jobId] - Required. The job ID. |
- * |
- * Completes with a [Job]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
- async.Future<Job> get(core.String projectId, core.String region, core.String jobId) { |
+ /// Gets the resource representation for a job in a project. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [projectId] - Required. The ID of the Google Cloud Platform project that |
+ /// the job belongs to. |
+ /// |
+ /// [region] - Required. The Cloud Dataproc region in which to handle the |
+ /// request. |
+ /// |
+ /// [jobId] - Required. The job ID. |
+ /// |
+ /// Completes with a [Job]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
+ async.Future<Job> get( |
+ core.String projectId, core.String region, core.String jobId) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -586,62 +614,69 @@ class ProjectsRegionsJobsResourceApi { |
throw new core.ArgumentError("Parameter jobId is required."); |
} |
- _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId'); |
+ _url = 'v1/projects/' + |
+ commons.Escaper.ecapeVariable('$projectId') + |
+ '/regions/' + |
+ commons.Escaper.ecapeVariable('$region') + |
+ '/jobs/' + |
+ commons.Escaper.ecapeVariable('$jobId'); |
- var _response = _requester.request(_url, |
- "GET", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "GET", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new Job.fromJson(data)); |
} |
- /** |
- * Lists regions/{region}/jobs in a project. |
- * |
- * Request parameters: |
- * |
- * [projectId] - Required. The ID of the Google Cloud Platform project that |
- * the job belongs to. |
- * |
- * [region] - Required. The Cloud Dataproc region in which to handle the |
- * request. |
- * |
- * [filter] - Optional. A filter constraining the jobs to list. Filters are |
- * case-sensitive and have the following syntax:field = value AND field = |
- * value ...where field is status.state or labels.[KEY], and [KEY] is a label |
- * key. value can be * to match all values. status.state can be either ACTIVE |
- * or INACTIVE. Only the logical AND operator is supported; space-separated |
- * items are treated as having an implicit AND operator.Example |
- * filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = |
- * * |
- * |
- * [jobStateMatcher] - Optional. Specifies enumerated categories of jobs to |
- * list (default = match ALL jobs). |
- * Possible string values are: |
- * - "ALL" : A ALL. |
- * - "ACTIVE" : A ACTIVE. |
- * - "NON_ACTIVE" : A NON_ACTIVE. |
- * |
- * [pageToken] - Optional. The page token, returned by a previous call, to |
- * request the next page of results. |
- * |
- * [pageSize] - Optional. The number of results to return in each response. |
- * |
- * [clusterName] - Optional. If set, the returned jobs list includes only jobs |
- * that were submitted to the named cluster. |
- * |
- * Completes with a [ListJobsResponse]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
- async.Future<ListJobsResponse> list(core.String projectId, core.String region, {core.String filter, core.String jobStateMatcher, core.String pageToken, core.int pageSize, core.String clusterName}) { |
+ /// Lists regions/{region}/jobs in a project. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [projectId] - Required. The ID of the Google Cloud Platform project that |
+ /// the job belongs to. |
+ /// |
+ /// [region] - Required. The Cloud Dataproc region in which to handle the |
+ /// request. |
+ /// |
+ /// [pageToken] - Optional. The page token, returned by a previous call, to |
+ /// request the next page of results. |
+ /// |
+ /// [pageSize] - Optional. The number of results to return in each response. |
+ /// |
+ /// [clusterName] - Optional. If set, the returned jobs list includes only |
+ /// jobs that were submitted to the named cluster. |
+ /// |
+ /// [filter] - Optional. A filter constraining the jobs to list. Filters are |
+ /// case-sensitive and have the following syntax:field = value AND field = |
+ /// value ...where field is status.state or labels.[KEY], and [KEY] is a |
+ /// label key. value can be * to match all values. status.state can be either |
+ /// ACTIVE or INACTIVE. Only the logical AND operator is supported; |
+ /// space-separated items are treated as having an implicit AND |
+ /// operator.Example filter:status.state = ACTIVE AND labels.env = staging |
+ /// AND labels.starred = * |
+ /// |
+ /// [jobStateMatcher] - Optional. Specifies enumerated categories of jobs to |
+ /// list (default = match ALL jobs). |
+ /// Possible string values are: |
+ /// - "ALL" : A ALL. |
+ /// - "ACTIVE" : A ACTIVE. |
+ /// - "NON_ACTIVE" : A NON_ACTIVE. |
+ /// |
+ /// Completes with a [ListJobsResponse]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
+ async.Future<ListJobsResponse> list(core.String projectId, core.String region, |
+ {core.String pageToken, |
+ core.int pageSize, |
+ core.String clusterName, |
+ core.String filter, |
+ core.String jobStateMatcher}) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -655,12 +690,6 @@ class ProjectsRegionsJobsResourceApi { |
if (region == null) { |
throw new core.ArgumentError("Parameter region is required."); |
} |
- if (filter != null) { |
- _queryParams["filter"] = [filter]; |
- } |
- if (jobStateMatcher != null) { |
- _queryParams["jobStateMatcher"] = [jobStateMatcher]; |
- } |
if (pageToken != null) { |
_queryParams["pageToken"] = [pageToken]; |
} |
@@ -670,50 +699,59 @@ class ProjectsRegionsJobsResourceApi { |
if (clusterName != null) { |
_queryParams["clusterName"] = [clusterName]; |
} |
+ if (filter != null) { |
+ _queryParams["filter"] = [filter]; |
+ } |
+ if (jobStateMatcher != null) { |
+ _queryParams["jobStateMatcher"] = [jobStateMatcher]; |
+ } |
- _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs'; |
+ _url = 'v1/projects/' + |
+ commons.Escaper.ecapeVariable('$projectId') + |
+ '/regions/' + |
+ commons.Escaper.ecapeVariable('$region') + |
+ '/jobs'; |
- var _response = _requester.request(_url, |
- "GET", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "GET", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new ListJobsResponse.fromJson(data)); |
} |
- /** |
- * Updates a job in a project. |
- * |
- * [request] - The metadata request object. |
- * |
- * Request parameters: |
- * |
- * [projectId] - Required. The ID of the Google Cloud Platform project that |
- * the job belongs to. |
- * |
- * [region] - Required. The Cloud Dataproc region in which to handle the |
- * request. |
- * |
- * [jobId] - Required. The job ID. |
- * |
- * [updateMask] - Required. Specifies the path, relative to <code>Job</code>, |
- * of the field to update. For example, to update the labels of a Job the |
- * <code>update_mask</code> parameter would be specified as |
- * <code>labels</code>, and the PATCH request body would specify the new |
- * value. <strong>Note:</strong> Currently, <code>labels</code> is the only |
- * field that can be updated. |
- * |
- * Completes with a [Job]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
- async.Future<Job> patch(Job request, core.String projectId, core.String region, core.String jobId, {core.String updateMask}) { |
+ /// Updates a job in a project. |
+ /// |
+ /// [request] - The metadata request object. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [projectId] - Required. The ID of the Google Cloud Platform project that |
+ /// the job belongs to. |
+ /// |
+ /// [region] - Required. The Cloud Dataproc region in which to handle the |
+ /// request. |
+ /// |
+ /// [jobId] - Required. The job ID. |
+ /// |
+ /// [updateMask] - Required. Specifies the path, relative to |
+ /// <code>Job</code>, of the field to update. For example, to update the |
+ /// labels of a Job the <code>update_mask</code> parameter would be specified |
+ /// as <code>labels</code>, and the PATCH request body would specify the new |
+ /// value. <strong>Note:</strong> Currently, <code>labels</code> is the only |
+ /// field that can be updated. |
+ /// |
+ /// Completes with a [Job]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
+ async.Future<Job> patch( |
+ Job request, core.String projectId, core.String region, core.String jobId, |
+ {core.String updateMask}) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -737,40 +775,43 @@ class ProjectsRegionsJobsResourceApi { |
_queryParams["updateMask"] = [updateMask]; |
} |
- _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId'); |
+ _url = 'v1/projects/' + |
+ commons.Escaper.ecapeVariable('$projectId') + |
+ '/regions/' + |
+ commons.Escaper.ecapeVariable('$region') + |
+ '/jobs/' + |
+ commons.Escaper.ecapeVariable('$jobId'); |
- var _response = _requester.request(_url, |
- "PATCH", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "PATCH", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new Job.fromJson(data)); |
} |
- /** |
- * Submits a job to a cluster. |
- * |
- * [request] - The metadata request object. |
- * |
- * Request parameters: |
- * |
- * [projectId] - Required. The ID of the Google Cloud Platform project that |
- * the job belongs to. |
- * |
- * [region] - Required. The Cloud Dataproc region in which to handle the |
- * request. |
- * |
- * Completes with a [Job]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
- async.Future<Job> submit(SubmitJobRequest request, core.String projectId, core.String region) { |
+ /// Submits a job to a cluster. |
+ /// |
+ /// [request] - The metadata request object. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [projectId] - Required. The ID of the Google Cloud Platform project that |
+ /// the job belongs to. |
+ /// |
+ /// [region] - Required. The Cloud Dataproc region in which to handle the |
+ /// request. |
+ /// |
+ /// Completes with a [Job]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
+ async.Future<Job> submit( |
+ SubmitJobRequest request, core.String projectId, core.String region) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -788,51 +829,51 @@ class ProjectsRegionsJobsResourceApi { |
throw new core.ArgumentError("Parameter region is required."); |
} |
- _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regions/' + commons.Escaper.ecapeVariable('$region') + '/jobs:submit'; |
+ _url = 'v1/projects/' + |
+ commons.Escaper.ecapeVariable('$projectId') + |
+ '/regions/' + |
+ commons.Escaper.ecapeVariable('$region') + |
+ '/jobs:submit'; |
- var _response = _requester.request(_url, |
- "POST", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "POST", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new Job.fromJson(data)); |
} |
- |
} |
- |
class ProjectsRegionsOperationsResourceApi { |
final commons.ApiRequester _requester; |
- ProjectsRegionsOperationsResourceApi(commons.ApiRequester client) : |
- _requester = client; |
- |
- /** |
- * Starts asynchronous cancellation on a long-running operation. The server |
- * makes a best effort to cancel the operation, but success is not guaranteed. |
- * If the server doesn't support this method, it returns |
- * google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or |
- * other methods to check whether the cancellation succeeded or whether the |
- * operation completed despite cancellation. On successful cancellation, the |
- * operation is not deleted; instead, it becomes an operation with an |
- * Operation.error value with a google.rpc.Status.code of 1, corresponding to |
- * Code.CANCELLED. |
- * |
- * Request parameters: |
- * |
- * [name] - The name of the operation resource to be cancelled. |
- * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". |
- * |
- * Completes with a [Empty]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
+ ProjectsRegionsOperationsResourceApi(commons.ApiRequester client) |
+ : _requester = client; |
+ |
+ /// Starts asynchronous cancellation on a long-running operation. The server |
+ /// makes a best effort to cancel the operation, but success is not |
+ /// guaranteed. If the server doesn't support this method, it returns |
+ /// google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or |
+ /// other methods to check whether the cancellation succeeded or whether the |
+ /// operation completed despite cancellation. On successful cancellation, the |
+ /// operation is not deleted; instead, it becomes an operation with an |
+ /// Operation.error value with a google.rpc.Status.code of 1, corresponding |
+ /// to Code.CANCELLED. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [name] - The name of the operation resource to be cancelled. |
+ /// Value must have pattern |
+ /// "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". |
+ /// |
+ /// Completes with a [Empty]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
async.Future<Empty> cancel(core.String name) { |
var _url = null; |
var _queryParams = new core.Map(); |
@@ -847,35 +888,33 @@ class ProjectsRegionsOperationsResourceApi { |
_url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name') + ':cancel'; |
- var _response = _requester.request(_url, |
- "POST", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "POST", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new Empty.fromJson(data)); |
} |
- /** |
- * Deletes a long-running operation. This method indicates that the client is |
- * no longer interested in the operation result. It does not cancel the |
- * operation. If the server doesn't support this method, it returns |
- * google.rpc.Code.UNIMPLEMENTED. |
- * |
- * Request parameters: |
- * |
- * [name] - The name of the operation resource to be deleted. |
- * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". |
- * |
- * Completes with a [Empty]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
+ /// Deletes a long-running operation. This method indicates that the client |
+ /// is no longer interested in the operation result. It does not cancel the |
+ /// operation. If the server doesn't support this method, it returns |
+ /// google.rpc.Code.UNIMPLEMENTED. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [name] - The name of the operation resource to be deleted. |
+ /// Value must have pattern |
+ /// "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". |
+ /// |
+ /// Completes with a [Empty]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
async.Future<Empty> delete(core.String name) { |
var _url = null; |
var _queryParams = new core.Map(); |
@@ -890,34 +929,32 @@ class ProjectsRegionsOperationsResourceApi { |
_url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name'); |
- var _response = _requester.request(_url, |
- "DELETE", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "DELETE", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new Empty.fromJson(data)); |
} |
- /** |
- * Gets the latest state of a long-running operation. Clients can use this |
- * method to poll the operation result at intervals as recommended by the API |
- * service. |
- * |
- * Request parameters: |
- * |
- * [name] - The name of the operation resource. |
- * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". |
- * |
- * Completes with a [Operation]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
+ /// Gets the latest state of a long-running operation. Clients can use this |
+ /// method to poll the operation result at intervals as recommended by the |
+ /// API service. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [name] - The name of the operation resource. |
+ /// Value must have pattern |
+ /// "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". |
+ /// |
+ /// Completes with a [Operation]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
async.Future<Operation> get(core.String name) { |
var _url = null; |
var _queryParams = new core.Map(); |
@@ -932,47 +969,45 @@ class ProjectsRegionsOperationsResourceApi { |
_url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name'); |
- var _response = _requester.request(_url, |
- "GET", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "GET", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new Operation.fromJson(data)); |
} |
- /** |
- * Lists operations that match the specified filter in the request. If the |
- * server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name |
- * binding allows API services to override the binding to use different |
- * resource name schemes, such as users / * /operations. To override the |
- * binding, API services can add a binding such as "/v1/{name=users / * |
- * }/operations" to their service configuration. For backwards compatibility, |
- * the default name includes the operations collection id, however overriding |
- * users must ensure the name binding is the parent resource, without the |
- * operations collection id. |
- * |
- * Request parameters: |
- * |
- * [name] - The name of the operation's parent resource. |
- * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations$". |
- * |
- * [filter] - The standard list filter. |
- * |
- * [pageToken] - The standard list page token. |
- * |
- * [pageSize] - The standard list page size. |
- * |
- * Completes with a [ListOperationsResponse]. |
- * |
- * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
- * error. |
- * |
- * If the used [http.Client] completes with an error when making a REST call, |
- * this method will complete with the same error. |
- */ |
- async.Future<ListOperationsResponse> list(core.String name, {core.String filter, core.String pageToken, core.int pageSize}) { |
+ /// Lists operations that match the specified filter in the request. If the |
+ /// server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the |
+ /// name binding allows API services to override the binding to use different |
+ /// resource name schemes, such as users / * /operations. To override the |
+ /// binding, API services can add a binding such as "/v1/{name=users / * |
+ /// }/operations" to their service configuration. For backwards |
+ /// compatibility, the default name includes the operations collection id, |
+ /// however overriding users must ensure the name binding is the parent |
+ /// resource, without the operations collection id. |
+ /// |
+ /// Request parameters: |
+ /// |
+ /// [name] - The name of the operation's parent resource. |
+ /// Value must have pattern "^projects/[^/]+/regions/[^/]+/operations$". |
+ /// |
+ /// [pageToken] - The standard list page token. |
+ /// |
+ /// [pageSize] - The standard list page size. |
+ /// |
+ /// [filter] - The standard list filter. |
+ /// |
+ /// Completes with a [ListOperationsResponse]. |
+ /// |
+ /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
+ /// an error. |
+ /// |
+ /// If the used [http.Client] completes with an error when making a REST |
+ /// call, this method will complete with the same error. |
+ async.Future<ListOperationsResponse> list(core.String name, |
+ {core.String pageToken, core.int pageSize, core.String filter}) { |
var _url = null; |
var _queryParams = new core.Map(); |
var _uploadMedia = null; |
@@ -983,49 +1018,42 @@ class ProjectsRegionsOperationsResourceApi { |
if (name == null) { |
throw new core.ArgumentError("Parameter name is required."); |
} |
- if (filter != null) { |
- _queryParams["filter"] = [filter]; |
- } |
if (pageToken != null) { |
_queryParams["pageToken"] = [pageToken]; |
} |
if (pageSize != null) { |
_queryParams["pageSize"] = ["${pageSize}"]; |
} |
+ if (filter != null) { |
+ _queryParams["filter"] = [filter]; |
+ } |
_url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name'); |
- var _response = _requester.request(_url, |
- "GET", |
- body: _body, |
- queryParams: _queryParams, |
- uploadOptions: _uploadOptions, |
- uploadMedia: _uploadMedia, |
- downloadOptions: _downloadOptions); |
+ var _response = _requester.request(_url, "GET", |
+ body: _body, |
+ queryParams: _queryParams, |
+ uploadOptions: _uploadOptions, |
+ uploadMedia: _uploadMedia, |
+ downloadOptions: _downloadOptions); |
return _response.then((data) => new ListOperationsResponse.fromJson(data)); |
} |
- |
} |
- |
- |
-/** |
- * Specifies the type and number of accelerator cards attached to the instances |
- * of an instance group (see GPUs on Compute Engine). |
- */ |
+/// Specifies the type and number of accelerator cards attached to the |
+/// instances of an instance group (see GPUs on Compute Engine). |
class AcceleratorConfig { |
- /** |
- * The number of the accelerator cards of this type exposed to this instance. |
- */ |
+ /// The number of the accelerator cards of this type exposed to this |
+ /// instance. |
core.int acceleratorCount; |
- /** |
- * Full URL, partial URI, or short name of the accelerator type resource to |
- * expose to this instance. See Google Compute Engine AcceleratorTypes( |
- * /compute/docs/reference/beta/acceleratorTypes)Examples * |
- * https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 |
- * * projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 |
- * * nvidia-tesla-k80 |
- */ |
+ |
+ /// Full URL, partial URI, or short name of the accelerator type resource to |
+ /// expose to this instance. See Google Compute Engine AcceleratorTypes( |
+ /// /compute/docs/reference/beta/acceleratorTypes)Examples * |
+ /// https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 |
+ /// * |
+ /// projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80 |
+ /// * nvidia-tesla-k80 |
core.String acceleratorTypeUri; |
AcceleratorConfig(); |
@@ -1040,7 +1068,8 @@ class AcceleratorConfig { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (acceleratorCount != null) { |
_json["acceleratorCount"] = acceleratorCount; |
} |
@@ -1051,62 +1080,55 @@ class AcceleratorConfig { |
} |
} |
-/** A request to cancel a job. */ |
+/// A request to cancel a job. |
class CancelJobRequest { |
- |
CancelJobRequest(); |
- CancelJobRequest.fromJson(core.Map _json) { |
- } |
+ CancelJobRequest.fromJson(core.Map _json) {} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
return _json; |
} |
} |
-/** |
- * Describes the identifying information, config, and status of a cluster of |
- * Google Compute Engine instances. |
- */ |
+/// Describes the identifying information, config, and status of a cluster of |
+/// Google Compute Engine instances. |
class Cluster { |
- /** |
- * Required. The cluster name. Cluster names within a project must be unique. |
- * Names of deleted clusters can be reused. |
- */ |
+ /// Required. The cluster name. Cluster names within a project must be |
+ /// unique. Names of deleted clusters can be reused. |
core.String clusterName; |
- /** |
- * Output-only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc |
- * generates this value when it creates the cluster. |
- */ |
+ |
+ /// Output-only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc |
+ /// generates this value when it creates the cluster. |
core.String clusterUuid; |
- /** |
- * Required. The cluster config. Note that Cloud Dataproc may set default |
- * values, and values may change when clusters are updated. |
- */ |
+ |
+ /// Required. The cluster config. Note that Cloud Dataproc may set default |
+ /// values, and values may change when clusters are updated. |
ClusterConfig config; |
- /** |
- * Optional. The labels to associate with this cluster. Label keys must |
- * contain 1 to 63 characters, and must conform to RFC 1035 |
- * (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if |
- * present, must contain 1 to 63 characters, and must conform to RFC 1035 |
- * (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be |
- * associated with a cluster. |
- */ |
+ |
+ /// Optional. The labels to associate with this cluster. Label keys must |
+ /// contain 1 to 63 characters, and must conform to RFC 1035 |
+ /// (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, |
+ /// if present, must contain 1 to 63 characters, and must conform to RFC 1035 |
+ /// (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be |
+ /// associated with a cluster. |
core.Map<core.String, core.String> labels; |
- /** |
- * Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: |
- * This report is available for testing purposes only. It may be changed |
- * before final release. |
- */ |
+ |
+ /// Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: |
+ /// This report is available for testing purposes only. It may be changed |
+ /// before final release. |
ClusterMetrics metrics; |
- /** |
- * Required. The Google Cloud Platform project ID that the cluster belongs to. |
- */ |
+ |
+ /// Required. The Google Cloud Platform project ID that the cluster belongs |
+ /// to. |
core.String projectId; |
- /** Output-only. Cluster status. */ |
+ |
+ /// Output-only. Cluster status. |
ClusterStatus status; |
- /** Output-only. The previous cluster status. */ |
+ |
+ /// Output-only. The previous cluster status. |
core.List<ClusterStatus> statusHistory; |
Cluster(); |
@@ -1134,12 +1156,15 @@ class Cluster { |
status = new ClusterStatus.fromJson(_json["status"]); |
} |
if (_json.containsKey("statusHistory")) { |
- statusHistory = _json["statusHistory"].map((value) => new ClusterStatus.fromJson(value)).toList(); |
+ statusHistory = _json["statusHistory"] |
+ .map((value) => new ClusterStatus.fromJson(value)) |
+ .toList(); |
} |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (clusterName != null) { |
_json["clusterName"] = clusterName; |
} |
@@ -1162,58 +1187,53 @@ class Cluster { |
_json["status"] = (status).toJson(); |
} |
if (statusHistory != null) { |
- _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).toList(); |
+ _json["statusHistory"] = |
+ statusHistory.map((value) => (value).toJson()).toList(); |
} |
return _json; |
} |
} |
-/** The cluster config. */ |
+/// The cluster config. |
class ClusterConfig { |
- /** |
- * Optional. A Google Cloud Storage staging bucket used for sharing generated |
- * SSH keys and config. If you do not specify a staging bucket, Cloud Dataproc |
- * will determine an appropriate Cloud Storage location (US, ASIA, or EU) for |
- * your cluster's staging bucket according to the Google Compute Engine zone |
- * where your cluster is deployed, and then it will create and manage this |
- * project-level, per-location bucket for you. |
- */ |
+ /// Optional. A Google Cloud Storage staging bucket used for sharing |
+ /// generated SSH keys and config. If you do not specify a staging bucket, |
+ /// Cloud Dataproc will determine an appropriate Cloud Storage location (US, |
+ /// ASIA, or EU) for your cluster's staging bucket according to the Google |
+ /// Compute Engine zone where your cluster is deployed, and then it will |
+ /// create and manage this project-level, per-location bucket for you. |
core.String configBucket; |
- /** |
- * Required. The shared Google Compute Engine config settings for all |
- * instances in a cluster. |
- */ |
+ |
+ /// Required. The shared Google Compute Engine config settings for all |
+ /// instances in a cluster. |
GceClusterConfig gceClusterConfig; |
- /** |
- * Optional. Commands to execute on each node after config is completed. By |
- * default, executables are run on master and all worker nodes. You can test a |
- * node's role metadata to run an executable on a master or worker node, as |
- * shown below using curl (you can also use wget): |
- * ROLE=$(curl -H Metadata-Flavor:Google |
- * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) |
- * if [[ "${ROLE}" == 'Master' ]]; then |
- * ... master specific actions ... |
- * else |
- * ... worker specific actions ... |
- * fi |
- */ |
+ |
+ /// Optional. Commands to execute on each node after config is completed. By |
+ /// default, executables are run on master and all worker nodes. You can test |
+ /// a node's role metadata to run an executable on a master or worker node, |
+ /// as shown below using curl (you can also use wget): |
+ /// ROLE=$(curl -H Metadata-Flavor:Google |
+ /// http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) |
+ /// if [[ "${ROLE}" == 'Master' ]]; then |
+ /// ... master specific actions ... |
+ /// else |
+ /// ... worker specific actions ... |
+ /// fi |
core.List<NodeInitializationAction> initializationActions; |
- /** |
- * Optional. The Google Compute Engine config settings for the master instance |
- * in a cluster. |
- */ |
+ |
+ /// Optional. The Google Compute Engine config settings for the master |
+ /// instance in a cluster. |
InstanceGroupConfig masterConfig; |
- /** |
- * Optional. The Google Compute Engine config settings for additional worker |
- * instances in a cluster. |
- */ |
+ |
+ /// Optional. The Google Compute Engine config settings for additional worker |
+ /// instances in a cluster. |
InstanceGroupConfig secondaryWorkerConfig; |
- /** Optional. The config settings for software inside the cluster. */ |
+ |
+ /// Optional. The config settings for software inside the cluster. |
SoftwareConfig softwareConfig; |
- /** |
- * Optional. The Google Compute Engine config settings for worker instances in |
- * a cluster. |
- */ |
+ |
+ /// Optional. The Google Compute Engine config settings for worker instances |
+ /// in a cluster. |
InstanceGroupConfig workerConfig; |
ClusterConfig(); |
@@ -1223,16 +1243,20 @@ class ClusterConfig { |
configBucket = _json["configBucket"]; |
} |
if (_json.containsKey("gceClusterConfig")) { |
- gceClusterConfig = new GceClusterConfig.fromJson(_json["gceClusterConfig"]); |
+ gceClusterConfig = |
+ new GceClusterConfig.fromJson(_json["gceClusterConfig"]); |
} |
if (_json.containsKey("initializationActions")) { |
- initializationActions = _json["initializationActions"].map((value) => new NodeInitializationAction.fromJson(value)).toList(); |
+ initializationActions = _json["initializationActions"] |
+ .map((value) => new NodeInitializationAction.fromJson(value)) |
+ .toList(); |
} |
if (_json.containsKey("masterConfig")) { |
masterConfig = new InstanceGroupConfig.fromJson(_json["masterConfig"]); |
} |
if (_json.containsKey("secondaryWorkerConfig")) { |
- secondaryWorkerConfig = new InstanceGroupConfig.fromJson(_json["secondaryWorkerConfig"]); |
+ secondaryWorkerConfig = |
+ new InstanceGroupConfig.fromJson(_json["secondaryWorkerConfig"]); |
} |
if (_json.containsKey("softwareConfig")) { |
softwareConfig = new SoftwareConfig.fromJson(_json["softwareConfig"]); |
@@ -1243,7 +1267,8 @@ class ClusterConfig { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (configBucket != null) { |
_json["configBucket"] = configBucket; |
} |
@@ -1251,7 +1276,8 @@ class ClusterConfig { |
_json["gceClusterConfig"] = (gceClusterConfig).toJson(); |
} |
if (initializationActions != null) { |
- _json["initializationActions"] = initializationActions.map((value) => (value).toJson()).toList(); |
+ _json["initializationActions"] = |
+ initializationActions.map((value) => (value).toJson()).toList(); |
} |
if (masterConfig != null) { |
_json["masterConfig"] = (masterConfig).toJson(); |
@@ -1269,15 +1295,14 @@ class ClusterConfig { |
} |
} |
-/** |
- * Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: |
- * This report is available for testing purposes only. It may be changed before |
- * final release. |
- */ |
+/// Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: |
+/// This report is available for testing purposes only. It may be changed |
+/// before final release. |
class ClusterMetrics { |
- /** The HDFS metrics. */ |
+ /// The HDFS metrics. |
core.Map<core.String, core.String> hdfsMetrics; |
- /** The YARN metrics. */ |
+ |
+ /// The YARN metrics. |
core.Map<core.String, core.String> yarnMetrics; |
ClusterMetrics(); |
@@ -1292,7 +1317,8 @@ class ClusterMetrics { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (hdfsMetrics != null) { |
_json["hdfsMetrics"] = hdfsMetrics; |
} |
@@ -1303,23 +1329,30 @@ class ClusterMetrics { |
} |
} |
-/** Metadata describing the operation. */ |
+/// Metadata describing the operation. |
class ClusterOperationMetadata { |
- /** Output-only. Name of the cluster for the operation. */ |
+ /// Output-only. Name of the cluster for the operation. |
core.String clusterName; |
- /** Output-only. Cluster UUID for the operation. */ |
+ |
+ /// Output-only. Cluster UUID for the operation. |
core.String clusterUuid; |
- /** Output-only. Short description of operation. */ |
+ |
+ /// Output-only. Short description of operation. |
core.String description; |
- /** Output-only. Labels associated with the operation */ |
+ |
+ /// Output-only. Labels associated with the operation |
core.Map<core.String, core.String> labels; |
- /** Output-only. The operation type. */ |
+ |
+ /// Output-only. The operation type. |
core.String operationType; |
- /** Output-only. Current operation status. */ |
+ |
+ /// Output-only. Current operation status. |
ClusterOperationStatus status; |
- /** Output-only. The previous operation status. */ |
+ |
+ /// Output-only. The previous operation status. |
core.List<ClusterOperationStatus> statusHistory; |
- /** Output-only. Errors encountered during operation execution. */ |
+ |
+ /// Output-only. Errors encountered during operation execution. |
core.List<core.String> warnings; |
ClusterOperationMetadata(); |
@@ -1344,7 +1377,9 @@ class ClusterOperationMetadata { |
status = new ClusterOperationStatus.fromJson(_json["status"]); |
} |
if (_json.containsKey("statusHistory")) { |
- statusHistory = _json["statusHistory"].map((value) => new ClusterOperationStatus.fromJson(value)).toList(); |
+ statusHistory = _json["statusHistory"] |
+ .map((value) => new ClusterOperationStatus.fromJson(value)) |
+ .toList(); |
} |
if (_json.containsKey("warnings")) { |
warnings = _json["warnings"]; |
@@ -1352,7 +1387,8 @@ class ClusterOperationMetadata { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (clusterName != null) { |
_json["clusterName"] = clusterName; |
} |
@@ -1372,7 +1408,8 @@ class ClusterOperationMetadata { |
_json["status"] = (status).toJson(); |
} |
if (statusHistory != null) { |
- _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).toList(); |
+ _json["statusHistory"] = |
+ statusHistory.map((value) => (value).toJson()).toList(); |
} |
if (warnings != null) { |
_json["warnings"] = warnings; |
@@ -1381,22 +1418,23 @@ class ClusterOperationMetadata { |
} |
} |
-/** The status of the operation. */ |
+/// The status of the operation. |
class ClusterOperationStatus { |
- /** Output-only.A message containing any operation metadata details. */ |
+ /// Output-only.A message containing any operation metadata details. |
core.String details; |
- /** Output-only. A message containing the detailed operation state. */ |
+ |
+ /// Output-only. A message containing the detailed operation state. |
core.String innerState; |
- /** |
- * Output-only. A message containing the operation state. |
- * Possible string values are: |
- * - "UNKNOWN" : Unused. |
- * - "PENDING" : The operation has been created. |
- * - "RUNNING" : The operation is running. |
- * - "DONE" : The operation is done; either cancelled or completed. |
- */ |
+ |
+ /// Output-only. A message containing the operation state. |
+ /// Possible string values are: |
+ /// - "UNKNOWN" : Unused. |
+ /// - "PENDING" : The operation has been created. |
+ /// - "RUNNING" : The operation is running. |
+ /// - "DONE" : The operation is done; either cancelled or completed. |
core.String state; |
- /** Output-only. The time this state was entered. */ |
+ |
+ /// Output-only. The time this state was entered. |
core.String stateStartTime; |
ClusterOperationStatus(); |
@@ -1417,7 +1455,8 @@ class ClusterOperationStatus { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (details != null) { |
_json["details"] = details; |
} |
@@ -1434,37 +1473,36 @@ class ClusterOperationStatus { |
} |
} |
-/** The status of a cluster and its instances. */ |
+/// The status of a cluster and its instances. |
class ClusterStatus { |
- /** Output-only. Optional details of cluster's state. */ |
+ /// Output-only. Optional details of cluster's state. |
core.String detail; |
- /** |
- * Output-only. The cluster's state. |
- * Possible string values are: |
- * - "UNKNOWN" : The cluster state is unknown. |
- * - "CREATING" : The cluster is being created and set up. It is not ready for |
- * use. |
- * - "RUNNING" : The cluster is currently running and healthy. It is ready for |
- * use. |
- * - "ERROR" : The cluster encountered an error. It is not ready for use. |
- * - "DELETING" : The cluster is being deleted. It cannot be used. |
- * - "UPDATING" : The cluster is being updated. It continues to accept and |
- * process jobs. |
- */ |
+ |
+ /// Output-only. The cluster's state. |
+ /// Possible string values are: |
+ /// - "UNKNOWN" : The cluster state is unknown. |
+ /// - "CREATING" : The cluster is being created and set up. It is not ready |
+ /// for use. |
+ /// - "RUNNING" : The cluster is currently running and healthy. It is ready |
+ /// for use. |
+ /// - "ERROR" : The cluster encountered an error. It is not ready for use. |
+ /// - "DELETING" : The cluster is being deleted. It cannot be used. |
+ /// - "UPDATING" : The cluster is being updated. It continues to accept and |
+ /// process jobs. |
core.String state; |
- /** Output-only. Time when this state was entered. */ |
+ |
+ /// Output-only. Time when this state was entered. |
core.String stateStartTime; |
- /** |
- * Output-only. Additional state information that includes status reported by |
- * the agent. |
- * Possible string values are: |
- * - "UNSPECIFIED" |
- * - "UNHEALTHY" : The cluster is known to be in an unhealthy state (for |
- * example, critical daemons are not running or HDFS capacity is |
- * exhausted).Applies to RUNNING state. |
- * - "STALE_STATUS" : The agent-reported status is out of date (may occur if |
- * Cloud Dataproc loses communication with Agent).Applies to RUNNING state. |
- */ |
+ |
+ /// Output-only. Additional state information that includes status reported |
+ /// by the agent. |
+ /// Possible string values are: |
+ /// - "UNSPECIFIED" |
+ /// - "UNHEALTHY" : The cluster is known to be in an unhealthy state (for |
+ /// example, critical daemons are not running or HDFS capacity is |
+ /// exhausted).Applies to RUNNING state. |
+ /// - "STALE_STATUS" : The agent-reported status is out of date (may occur if |
+ /// Cloud Dataproc loses communication with Agent).Applies to RUNNING state. |
core.String substate; |
ClusterStatus(); |
@@ -1485,7 +1523,8 @@ class ClusterStatus { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (detail != null) { |
_json["detail"] = detail; |
} |
@@ -1502,26 +1541,24 @@ class ClusterStatus { |
} |
} |
-/** A request to collect cluster diagnostic information. */ |
+/// A request to collect cluster diagnostic information. |
class DiagnoseClusterRequest { |
- |
DiagnoseClusterRequest(); |
- DiagnoseClusterRequest.fromJson(core.Map _json) { |
- } |
+ DiagnoseClusterRequest.fromJson(core.Map _json) {} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
return _json; |
} |
} |
-/** The location of diagnostic output. */ |
+/// The location of diagnostic output. |
class DiagnoseClusterResults { |
- /** |
- * Output-only. The Google Cloud Storage URI of the diagnostic output. The |
- * output report is a plain text file with a summary of collected diagnostics. |
- */ |
+ /// Output-only. The Google Cloud Storage URI of the diagnostic output. The |
+ /// output report is a plain text file with a summary of collected |
+ /// diagnostics. |
core.String outputUri; |
DiagnoseClusterResults(); |
@@ -1533,7 +1570,8 @@ class DiagnoseClusterResults { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (outputUri != null) { |
_json["outputUri"] = outputUri; |
} |
@@ -1541,17 +1579,16 @@ class DiagnoseClusterResults { |
} |
} |
-/** Specifies the config of disk options for a group of VM instances. */ |
+/// Specifies the config of disk options for a group of VM instances. |
class DiskConfig { |
- /** Optional. Size in GB of the boot disk (default is 500GB). */ |
+ /// Optional. Size in GB of the boot disk (default is 500GB). |
core.int bootDiskSizeGb; |
- /** |
- * Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are |
- * not attached, the boot disk is used to store runtime logs and HDFS |
- * (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one |
- * or more SSDs are attached, this runtime bulk data is spread across them, |
- * and the boot disk contains only basic config and installed binaries. |
- */ |
+ |
+ /// Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs |
+ /// are not attached, the boot disk is used to store runtime logs and HDFS |
+ /// (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one |
+ /// or more SSDs are attached, this runtime bulk data is spread across them, |
+ /// and the boot disk contains only basic config and installed binaries. |
core.int numLocalSsds; |
DiskConfig(); |
@@ -1566,7 +1603,8 @@ class DiskConfig { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (bootDiskSizeGb != null) { |
_json["bootDiskSizeGb"] = bootDiskSizeGb; |
} |
@@ -1577,109 +1615,96 @@ class DiskConfig { |
} |
} |
-/** |
- * A generic empty message that you can re-use to avoid defining duplicated |
- * empty messages in your APIs. A typical example is to use it as the request or |
- * the response type of an API method. For instance: |
- * service Foo { |
- * rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); |
- * } |
- * The JSON representation for Empty is empty JSON object {}. |
- */ |
+/// A generic empty message that you can re-use to avoid defining duplicated |
+/// empty messages in your APIs. A typical example is to use it as the request |
+/// or the response type of an API method. For instance: |
+/// service Foo { |
+/// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); |
+/// } |
+/// The JSON representation for Empty is empty JSON object {}. |
class Empty { |
- |
Empty(); |
- Empty.fromJson(core.Map _json) { |
- } |
+ Empty.fromJson(core.Map _json) {} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
return _json; |
} |
} |
-/** |
- * Common config settings for resources of Google Compute Engine cluster |
- * instances, applicable to all instances in the cluster. |
- */ |
+/// Common config settings for resources of Google Compute Engine cluster |
+/// instances, applicable to all instances in the cluster. |
class GceClusterConfig { |
- /** |
- * Optional. If true, all instances in the cluster will only have internal IP |
- * addresses. By default, clusters are not restricted to internal IP |
- * addresses, and will have ephemeral external IP addresses assigned to each |
- * instance. This internal_ip_only restriction can only be enabled for |
- * subnetwork enabled networks, and all off-cluster dependencies must be |
- * configured to be accessible without external IP addresses. |
- */ |
+ /// Optional. If true, all instances in the cluster will only have internal |
+ /// IP addresses. By default, clusters are not restricted to internal IP |
+ /// addresses, and will have ephemeral external IP addresses assigned to each |
+ /// instance. This internal_ip_only restriction can only be enabled for |
+ /// subnetwork enabled networks, and all off-cluster dependencies must be |
+ /// configured to be accessible without external IP addresses. |
core.bool internalIpOnly; |
- /** |
- * The Google Compute Engine metadata entries to add to all instances (see |
- * Project and instance metadata |
- * (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). |
- */ |
+ |
+ /// The Google Compute Engine metadata entries to add to all instances (see |
+ /// Project and instance metadata |
+ /// (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). |
core.Map<core.String, core.String> metadata; |
- /** |
- * Optional. The Google Compute Engine network to be used for machine |
- * communications. Cannot be specified with subnetwork_uri. If neither |
- * network_uri nor subnetwork_uri is specified, the "default" network of the |
- * project is used, if it exists. Cannot be a "Custom Subnet Network" (see |
- * Using Subnetworks for more information).A full URL, partial URI, or short |
- * name are valid. Examples: |
- * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default |
- * projects/[project_id]/regions/global/default |
- * default |
- */ |
+ |
+ /// Optional. The Google Compute Engine network to be used for machine |
+ /// communications. Cannot be specified with subnetwork_uri. If neither |
+ /// network_uri nor subnetwork_uri is specified, the "default" network of the |
+ /// project is used, if it exists. Cannot be a "Custom Subnet Network" (see |
+ /// Using Subnetworks for more information).A full URL, partial URI, or short |
+ /// name are valid. Examples: |
+ /// https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default |
+ /// projects/[project_id]/regions/global/default |
+ /// default |
core.String networkUri; |
- /** |
- * Optional. The service account of the instances. Defaults to the default |
- * Google Compute Engine service account. Custom service accounts need |
- * permissions equivalent to the folloing IAM roles: |
- * roles/logging.logWriter |
- * roles/storage.objectAdmin(see |
- * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts |
- * for more information). Example: |
- * [account_id]@[project_id].iam.gserviceaccount.com |
- */ |
+ |
+ /// Optional. The service account of the instances. Defaults to the default |
+ /// Google Compute Engine service account. Custom service accounts need |
+ /// permissions equivalent to the folloing IAM roles: |
+ /// roles/logging.logWriter |
+ /// roles/storage.objectAdmin(see |
+ /// https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts |
+ /// for more information). Example: |
+ /// [account_id]@[project_id].iam.gserviceaccount.com |
core.String serviceAccount; |
- /** |
- * Optional. The URIs of service account scopes to be included in Google |
- * Compute Engine instances. The following base set of scopes is always |
- * included: |
- * https://www.googleapis.com/auth/cloud.useraccounts.readonly |
- * https://www.googleapis.com/auth/devstorage.read_write |
- * https://www.googleapis.com/auth/logging.writeIf no scopes are specified, |
- * the following defaults are also provided: |
- * https://www.googleapis.com/auth/bigquery |
- * https://www.googleapis.com/auth/bigtable.admin.table |
- * https://www.googleapis.com/auth/bigtable.data |
- * https://www.googleapis.com/auth/devstorage.full_control |
- */ |
+ |
+ /// Optional. The URIs of service account scopes to be included in Google |
+ /// Compute Engine instances. The following base set of scopes is always |
+ /// included: |
+ /// https://www.googleapis.com/auth/cloud.useraccounts.readonly |
+ /// https://www.googleapis.com/auth/devstorage.read_write |
+ /// https://www.googleapis.com/auth/logging.writeIf no scopes are specified, |
+ /// the following defaults are also provided: |
+ /// https://www.googleapis.com/auth/bigquery |
+ /// https://www.googleapis.com/auth/bigtable.admin.table |
+ /// https://www.googleapis.com/auth/bigtable.data |
+ /// https://www.googleapis.com/auth/devstorage.full_control |
core.List<core.String> serviceAccountScopes; |
- /** |
- * Optional. The Google Compute Engine subnetwork to be used for machine |
- * communications. Cannot be specified with network_uri.A full URL, partial |
- * URI, or short name are valid. Examples: |
- * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0 |
- * projects/[project_id]/regions/us-east1/sub0 |
- * sub0 |
- */ |
+ |
+ /// Optional. The Google Compute Engine subnetwork to be used for machine |
+ /// communications. Cannot be specified with network_uri.A full URL, partial |
+ /// URI, or short name are valid. Examples: |
+ /// https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0 |
+ /// projects/[project_id]/regions/us-east1/sub0 |
+ /// sub0 |
core.String subnetworkUri; |
- /** |
- * The Google Compute Engine tags to add to all instances (see Tagging |
- * instances). |
- */ |
+ |
+ /// The Google Compute Engine tags to add to all instances (see Tagging |
+ /// instances). |
core.List<core.String> tags; |
- /** |
- * Optional. The zone where the Google Compute Engine cluster will be located. |
- * On a create request, it is required in the "global" region. If omitted in a |
- * non-global Cloud Dataproc region, the service will pick a zone in the |
- * corresponding Compute Engine region. On a get request, zone will always be |
- * present.A full URL, partial URI, or short name are valid. Examples: |
- * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] |
- * projects/[project_id]/zones/[zone] |
- * us-central1-f |
- */ |
+ |
+ /// Optional. The zone where the Google Compute Engine cluster will be |
+ /// located. On a create request, it is required in the "global" region. If |
+ /// omitted in a non-global Cloud Dataproc region, the service will pick a |
+ /// zone in the corresponding Compute Engine region. On a get request, zone |
+ /// will always be present.A full URL, partial URI, or short name are valid. |
+ /// Examples: |
+ /// https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] |
+ /// projects/[project_id]/zones/[zone] |
+ /// us-central1-f |
core.String zoneUri; |
GceClusterConfig(); |
@@ -1712,7 +1737,8 @@ class GceClusterConfig { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (internalIpOnly != null) { |
_json["internalIpOnly"] = internalIpOnly; |
} |
@@ -1741,56 +1767,47 @@ class GceClusterConfig { |
} |
} |
-/** |
- * A Cloud Dataproc job for running Apache Hadoop MapReduce |
- * (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) |
- * jobs on Apache Hadoop YARN |
- * (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). |
- */ |
+/// A Cloud Dataproc job for running Apache Hadoop MapReduce |
+/// (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) |
+/// jobs on Apache Hadoop YARN |
+/// (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). |
class HadoopJob { |
- /** |
- * Optional. HCFS URIs of archives to be extracted in the working directory of |
- * Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, |
- * or .zip. |
- */ |
+ /// Optional. HCFS URIs of archives to be extracted in the working directory |
+ /// of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, |
+ /// .tgz, or .zip. |
core.List<core.String> archiveUris; |
- /** |
- * Optional. The arguments to pass to the driver. Do not include arguments, |
- * such as -libjars or -Dfoo=bar, that can be set as job properties, since a |
- * collision may occur that causes an incorrect job submission. |
- */ |
+ |
+ /// Optional. The arguments to pass to the driver. Do not include arguments, |
+ /// such as -libjars or -Dfoo=bar, that can be set as job properties, since a |
+ /// collision may occur that causes an incorrect job submission. |
core.List<core.String> args; |
- /** |
- * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to |
- * the working directory of Hadoop drivers and distributed tasks. Useful for |
- * naively parallel tasks. |
- */ |
+ |
+ /// Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied |
+ /// to the working directory of Hadoop drivers and distributed tasks. Useful |
+ /// for naively parallel tasks. |
core.List<core.String> fileUris; |
- /** |
- * Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and |
- * tasks. |
- */ |
+ |
+ /// Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and |
+ /// tasks. |
core.List<core.String> jarFileUris; |
- /** Optional. The runtime log config for job execution. */ |
+ |
+ /// Optional. The runtime log config for job execution. |
LoggingConfig loggingConfig; |
- /** |
- * The name of the driver's main class. The jar file containing the class must |
- * be in the default CLASSPATH or specified in jar_file_uris. |
- */ |
+ |
+ /// The name of the driver's main class. The jar file containing the class |
+ /// must be in the default CLASSPATH or specified in jar_file_uris. |
core.String mainClass; |
- /** |
- * The HCFS URI of the jar file containing the main class. Examples: |
- * 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' |
- * 'hdfs:/tmp/test-samples/custom-wordcount.jar' |
- * 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' |
- */ |
+ |
+ /// The HCFS URI of the jar file containing the main class. Examples: |
+ /// 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' |
+ /// 'hdfs:/tmp/test-samples/custom-wordcount.jar' |
+ /// 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' |
core.String mainJarFileUri; |
- /** |
- * Optional. A mapping of property names to values, used to configure Hadoop. |
- * Properties that conflict with values set by the Cloud Dataproc API may be |
- * overwritten. Can include properties set in /etc/hadoop/conf / * -site and |
- * classes in user code. |
- */ |
+ |
+ /// Optional. A mapping of property names to values, used to configure |
+ /// Hadoop. Properties that conflict with values set by the Cloud Dataproc |
+ /// API may be overwritten. Can include properties set in /etc/hadoop/conf / |
+ /// * -site and classes in user code. |
core.Map<core.String, core.String> properties; |
HadoopJob(); |
@@ -1823,7 +1840,8 @@ class HadoopJob { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (archiveUris != null) { |
_json["archiveUris"] = archiveUris; |
} |
@@ -1852,37 +1870,32 @@ class HadoopJob { |
} |
} |
-/** |
- * A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) |
- * queries on YARN. |
- */ |
+/// A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) |
+/// queries on YARN. |
class HiveJob { |
- /** |
- * Optional. Whether to continue executing queries if a query fails. The |
- * default value is false. Setting to true can be useful when executing |
- * independent parallel queries. |
- */ |
+ /// Optional. Whether to continue executing queries if a query fails. The |
+ /// default value is false. Setting to true can be useful when executing |
+ /// independent parallel queries. |
core.bool continueOnFailure; |
- /** |
- * Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server |
- * and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. |
- */ |
+ |
+ /// Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive |
+ /// server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. |
core.List<core.String> jarFileUris; |
- /** |
- * Optional. A mapping of property names and values, used to configure Hive. |
- * Properties that conflict with values set by the Cloud Dataproc API may be |
- * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, |
- * /etc/hive/conf/hive-site.xml, and classes in user code. |
- */ |
+ |
+ /// Optional. A mapping of property names and values, used to configure Hive. |
+ /// Properties that conflict with values set by the Cloud Dataproc API may be |
+ /// overwritten. Can include properties set in /etc/hadoop/conf / * |
+ /// -site.xml, /etc/hive/conf/hive-site.xml, and classes in user code. |
core.Map<core.String, core.String> properties; |
- /** The HCFS URI of the script that contains Hive queries. */ |
+ |
+ /// The HCFS URI of the script that contains Hive queries. |
core.String queryFileUri; |
- /** A list of queries. */ |
+ |
+ /// A list of queries. |
QueryList queryList; |
- /** |
- * Optional. Mapping of query variable names to values (equivalent to the Hive |
- * command: SET name="value";). |
- */ |
+ |
+ /// Optional. Mapping of query variable names to values (equivalent to the |
+ /// Hive command: SET name="value";). |
core.Map<core.String, core.String> scriptVariables; |
HiveJob(); |
@@ -1909,7 +1922,8 @@ class HiveJob { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (continueOnFailure != null) { |
_json["continueOnFailure"] = continueOnFailure; |
} |
@@ -1932,59 +1946,53 @@ class HiveJob { |
} |
} |
-/** |
- * Optional. The config settings for Google Compute Engine resources in an |
- * instance group, such as a master or worker group. |
- */ |
+/// Optional. The config settings for Google Compute Engine resources in an |
+/// instance group, such as a master or worker group. |
class InstanceGroupConfig { |
- /** |
- * Optional. The Google Compute Engine accelerator configuration for these |
- * instances.Beta Feature: This feature is still under development. It may be |
- * changed before final release. |
- */ |
+ /// Optional. The Google Compute Engine accelerator configuration for these |
+ /// instances.Beta Feature: This feature is still under development. It may |
+ /// be changed before final release. |
core.List<AcceleratorConfig> accelerators; |
- /** Optional. Disk option config settings. */ |
+ |
+ /// Optional. Disk option config settings. |
DiskConfig diskConfig; |
- /** |
- * Output-only. The Google Compute Engine image resource used for cluster |
- * instances. Inferred from SoftwareConfig.image_version. |
- */ |
+ |
+ /// Output-only. The Google Compute Engine image resource used for cluster |
+ /// instances. Inferred from SoftwareConfig.image_version. |
core.String imageUri; |
- /** |
- * Optional. The list of instance names. Cloud Dataproc derives the names from |
- * cluster_name, num_instances, and the instance group if not set by user |
- * (recommended practice is to let Cloud Dataproc derive the name). |
- */ |
+ |
+ /// Optional. The list of instance names. Cloud Dataproc derives the names |
+ /// from cluster_name, num_instances, and the instance group if not set by |
+ /// user (recommended practice is to let Cloud Dataproc derive the name). |
core.List<core.String> instanceNames; |
- /** |
- * Optional. Specifies that this instance group contains preemptible |
- * instances. |
- */ |
+ |
+ /// Optional. Specifies that this instance group contains preemptible |
+ /// instances. |
core.bool isPreemptible; |
- /** |
- * Optional. The Google Compute Engine machine type used for cluster |
- * instances.A full URL, partial URI, or short name are valid. Examples: |
- * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 |
- * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 |
- * n1-standard-2 |
- */ |
+ |
+ /// Optional. The Google Compute Engine machine type used for cluster |
+ /// instances.A full URL, partial URI, or short name are valid. Examples: |
+ /// https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 |
+ /// projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 |
+ /// n1-standard-2 |
core.String machineTypeUri; |
- /** |
- * Output-only. The config for Google Compute Engine Instance Group Manager |
- * that manages this group. This is only used for preemptible instance groups. |
- */ |
+ |
+ /// Output-only. The config for Google Compute Engine Instance Group Manager |
+ /// that manages this group. This is only used for preemptible instance |
+ /// groups. |
ManagedGroupConfig managedGroupConfig; |
- /** |
- * Optional. The number of VM instances in the instance group. For master |
- * instance groups, must be set to 1. |
- */ |
+ |
+ /// Optional. The number of VM instances in the instance group. For master |
+ /// instance groups, must be set to 1. |
core.int numInstances; |
InstanceGroupConfig(); |
InstanceGroupConfig.fromJson(core.Map _json) { |
if (_json.containsKey("accelerators")) { |
- accelerators = _json["accelerators"].map((value) => new AcceleratorConfig.fromJson(value)).toList(); |
+ accelerators = _json["accelerators"] |
+ .map((value) => new AcceleratorConfig.fromJson(value)) |
+ .toList(); |
} |
if (_json.containsKey("diskConfig")) { |
diskConfig = new DiskConfig.fromJson(_json["diskConfig"]); |
@@ -2002,7 +2010,8 @@ class InstanceGroupConfig { |
machineTypeUri = _json["machineTypeUri"]; |
} |
if (_json.containsKey("managedGroupConfig")) { |
- managedGroupConfig = new ManagedGroupConfig.fromJson(_json["managedGroupConfig"]); |
+ managedGroupConfig = |
+ new ManagedGroupConfig.fromJson(_json["managedGroupConfig"]); |
} |
if (_json.containsKey("numInstances")) { |
numInstances = _json["numInstances"]; |
@@ -2010,9 +2019,11 @@ class InstanceGroupConfig { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (accelerators != null) { |
- _json["accelerators"] = accelerators.map((value) => (value).toJson()).toList(); |
+ _json["accelerators"] = |
+ accelerators.map((value) => (value).toJson()).toList(); |
} |
if (diskConfig != null) { |
_json["diskConfig"] = (diskConfig).toJson(); |
@@ -2039,66 +2050,66 @@ class InstanceGroupConfig { |
} |
} |
-/** A Cloud Dataproc job resource. */ |
+/// A Cloud Dataproc job resource. |
class Job { |
- /** |
- * Output-only. If present, the location of miscellaneous control files which |
- * may be used as part of job setup and handling. If not present, control |
- * files may be placed in the same location as driver_output_uri. |
- */ |
+ /// Output-only. If present, the location of miscellaneous control files |
+ /// which may be used as part of job setup and handling. If not present, |
+ /// control files may be placed in the same location as driver_output_uri. |
core.String driverControlFilesUri; |
- /** |
- * Output-only. A URI pointing to the location of the stdout of the job's |
- * driver program. |
- */ |
+ |
+ /// Output-only. A URI pointing to the location of the stdout of the job's |
+ /// driver program. |
core.String driverOutputResourceUri; |
- /** Job is a Hadoop job. */ |
+ |
+ /// Job is a Hadoop job. |
HadoopJob hadoopJob; |
- /** Job is a Hive job. */ |
+ |
+ /// Job is a Hive job. |
HiveJob hiveJob; |
- /** |
- * Optional. The labels to associate with this job. Label keys must contain 1 |
- * to 63 characters, and must conform to RFC 1035 |
- * (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if |
- * present, must contain 1 to 63 characters, and must conform to RFC 1035 |
- * (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be |
- * associated with a job. |
- */ |
+ |
+ /// Optional. The labels to associate with this job. Label keys must contain |
+ /// 1 to 63 characters, and must conform to RFC 1035 |
+ /// (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, |
+ /// if present, must contain 1 to 63 characters, and must conform to RFC 1035 |
+ /// (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be |
+ /// associated with a job. |
core.Map<core.String, core.String> labels; |
- /** Job is a Pig job. */ |
+ |
+ /// Job is a Pig job. |
PigJob pigJob; |
- /** |
- * Required. Job information, including how, when, and where to run the job. |
- */ |
+ |
+ /// Required. Job information, including how, when, and where to run the job. |
JobPlacement placement; |
- /** Job is a Pyspark job. */ |
+ |
+ /// Job is a Pyspark job. |
PySparkJob pysparkJob; |
- /** |
- * Optional. The fully qualified reference to the job, which can be used to |
- * obtain the equivalent REST path of the job resource. If this property is |
- * not specified when a job is created, the server generates a |
- * <code>job_id</code>. |
- */ |
+ |
+ /// Optional. The fully qualified reference to the job, which can be used to |
+ /// obtain the equivalent REST path of the job resource. If this property is |
+ /// not specified when a job is created, the server generates a |
+ /// <code>job_id</code>. |
JobReference reference; |
- /** Optional. Job scheduling configuration. */ |
+ |
+ /// Optional. Job scheduling configuration. |
JobScheduling scheduling; |
- /** Job is a Spark job. */ |
+ |
+ /// Job is a Spark job. |
SparkJob sparkJob; |
- /** Job is a SparkSql job. */ |
+ |
+ /// Job is a SparkSql job. |
SparkSqlJob sparkSqlJob; |
- /** |
- * Output-only. The job status. Additional application-specific status |
- * information may be contained in the <code>type_job</code> and |
- * <code>yarn_applications</code> fields. |
- */ |
+ |
+ /// Output-only. The job status. Additional application-specific status |
+ /// information may be contained in the <code>type_job</code> and |
+ /// <code>yarn_applications</code> fields. |
JobStatus status; |
- /** Output-only. The previous job status. */ |
+ |
+ /// Output-only. The previous job status. |
core.List<JobStatus> statusHistory; |
- /** |
- * Output-only. The collection of YARN applications spun up by this job.Beta |
- * Feature: This report is available for testing purposes only. It may be |
- * changed before final release. |
- */ |
+ |
+ /// Output-only. The collection of YARN applications spun up by this job.Beta |
+ /// Feature: This report is available for testing purposes only. It may be |
+ /// changed before final release. |
core.List<YarnApplication> yarnApplications; |
Job(); |
@@ -2144,15 +2155,20 @@ class Job { |
status = new JobStatus.fromJson(_json["status"]); |
} |
if (_json.containsKey("statusHistory")) { |
- statusHistory = _json["statusHistory"].map((value) => new JobStatus.fromJson(value)).toList(); |
+ statusHistory = _json["statusHistory"] |
+ .map((value) => new JobStatus.fromJson(value)) |
+ .toList(); |
} |
if (_json.containsKey("yarnApplications")) { |
- yarnApplications = _json["yarnApplications"].map((value) => new YarnApplication.fromJson(value)).toList(); |
+ yarnApplications = _json["yarnApplications"] |
+ .map((value) => new YarnApplication.fromJson(value)) |
+ .toList(); |
} |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (driverControlFilesUri != null) { |
_json["driverControlFilesUri"] = driverControlFilesUri; |
} |
@@ -2193,23 +2209,24 @@ class Job { |
_json["status"] = (status).toJson(); |
} |
if (statusHistory != null) { |
- _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).toList(); |
+ _json["statusHistory"] = |
+ statusHistory.map((value) => (value).toJson()).toList(); |
} |
if (yarnApplications != null) { |
- _json["yarnApplications"] = yarnApplications.map((value) => (value).toJson()).toList(); |
+ _json["yarnApplications"] = |
+ yarnApplications.map((value) => (value).toJson()).toList(); |
} |
return _json; |
} |
} |
-/** Cloud Dataproc job config. */ |
+/// Cloud Dataproc job config. |
class JobPlacement { |
- /** Required. The name of the cluster where the job will be submitted. */ |
+ /// Required. The name of the cluster where the job will be submitted. |
core.String clusterName; |
- /** |
- * Output-only. A cluster UUID generated by the Cloud Dataproc service when |
- * the job is submitted. |
- */ |
+ |
+ /// Output-only. A cluster UUID generated by the Cloud Dataproc service when |
+ /// the job is submitted. |
core.String clusterUuid; |
JobPlacement(); |
@@ -2224,7 +2241,8 @@ class JobPlacement { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (clusterName != null) { |
_json["clusterName"] = clusterName; |
} |
@@ -2235,20 +2253,17 @@ class JobPlacement { |
} |
} |
-/** Encapsulates the full scoping used to reference a job. */ |
+/// Encapsulates the full scoping used to reference a job. |
class JobReference { |
- /** |
- * Optional. The job ID, which must be unique within the project. The job ID |
- * is generated by the server upon job submission or provided by the user as a |
- * means to perform retries without creating duplicate jobs. The ID must |
- * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens |
- * (-). The maximum length is 100 characters. |
- */ |
+ /// Optional. The job ID, which must be unique within the project. The job ID |
+ /// is generated by the server upon job submission or provided by the user as |
+ /// a means to perform retries without creating duplicate jobs. The ID must |
+ /// contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or |
+ /// hyphens (-). The maximum length is 100 characters. |
core.String jobId; |
- /** |
- * Required. The ID of the Google Cloud Platform project that the job belongs |
- * to. |
- */ |
+ |
+ /// Required. The ID of the Google Cloud Platform project that the job |
+ /// belongs to. |
core.String projectId; |
JobReference(); |
@@ -2263,7 +2278,8 @@ class JobReference { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (jobId != null) { |
_json["jobId"] = jobId; |
} |
@@ -2274,17 +2290,13 @@ class JobReference { |
} |
} |
-/** |
- * Job scheduling options.Beta Feature: These options are available for testing |
- * purposes only. They may be changed before final release. |
- */ |
+/// Job scheduling options.Beta Feature: These options are available for |
+/// testing purposes only. They may be changed before final release. |
class JobScheduling { |
- /** |
- * Optional. Maximum number of times per hour a driver may be restarted as a |
- * result of driver terminating with non-zero code before job is reported |
- * failed.A job may be reported as thrashing if driver exits with non-zero |
- * code 4 times within 10 minute window.Maximum value is 10. |
- */ |
+ /// Optional. Maximum number of times per hour a driver may be restarted as a |
+ /// result of driver terminating with non-zero code before job is reported |
+ /// failed.A job may be reported as thrashing if driver exits with non-zero |
+ /// code 4 times within 10 minute window.Maximum value is 10. |
core.int maxFailuresPerHour; |
JobScheduling(); |
@@ -2296,7 +2308,8 @@ class JobScheduling { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (maxFailuresPerHour != null) { |
_json["maxFailuresPerHour"] = maxFailuresPerHour; |
} |
@@ -2304,48 +2317,47 @@ class JobScheduling { |
} |
} |
-/** Cloud Dataproc job status. */ |
+/// Cloud Dataproc job status. |
class JobStatus { |
- /** |
- * Output-only. Optional job state details, such as an error description if |
- * the state is <code>ERROR</code>. |
- */ |
+ /// Output-only. Optional job state details, such as an error description if |
+ /// the state is <code>ERROR</code>. |
core.String details; |
- /** |
- * Output-only. A state message specifying the overall job state. |
- * Possible string values are: |
- * - "STATE_UNSPECIFIED" : The job state is unknown. |
- * - "PENDING" : The job is pending; it has been submitted, but is not yet |
- * running. |
- * - "SETUP_DONE" : Job has been received by the service and completed initial |
- * setup; it will soon be submitted to the cluster. |
- * - "RUNNING" : The job is running on the cluster. |
- * - "CANCEL_PENDING" : A CancelJob request has been received, but is pending. |
- * - "CANCEL_STARTED" : Transient in-flight resources have been canceled, and |
- * the request to cancel the running job has been issued to the cluster. |
- * - "CANCELLED" : The job cancellation was successful. |
- * - "DONE" : The job has completed successfully. |
- * - "ERROR" : The job has completed, but encountered an error. |
- * - "ATTEMPT_FAILURE" : Job attempt has failed. The detail field contains |
- * failure details for this attempt.Applies to restartable jobs only. |
- */ |
+ |
+ /// Output-only. A state message specifying the overall job state. |
+ /// Possible string values are: |
+ /// - "STATE_UNSPECIFIED" : The job state is unknown. |
+ /// - "PENDING" : The job is pending; it has been submitted, but is not yet |
+ /// running. |
+ /// - "SETUP_DONE" : Job has been received by the service and completed |
+ /// initial setup; it will soon be submitted to the cluster. |
+ /// - "RUNNING" : The job is running on the cluster. |
+ /// - "CANCEL_PENDING" : A CancelJob request has been received, but is |
+ /// pending. |
+ /// - "CANCEL_STARTED" : Transient in-flight resources have been canceled, |
+ /// and the request to cancel the running job has been issued to the cluster. |
+ /// - "CANCELLED" : The job cancellation was successful. |
+ /// - "DONE" : The job has completed successfully. |
+ /// - "ERROR" : The job has completed, but encountered an error. |
+ /// - "ATTEMPT_FAILURE" : Job attempt has failed. The detail field contains |
+ /// failure details for this attempt.Applies to restartable jobs only. |
core.String state; |
- /** Output-only. The time when this state was entered. */ |
+ |
+ /// Output-only. The time when this state was entered. |
core.String stateStartTime; |
- /** |
- * Output-only. Additional state information, which includes status reported |
- * by the agent. |
- * Possible string values are: |
- * - "UNSPECIFIED" |
- * - "SUBMITTED" : The Job is submitted to the agent.Applies to RUNNING state. |
- * - "QUEUED" : The Job has been received and is awaiting execution (it may be |
- * waiting for a condition to be met). See the "details" field for the reason |
- * for the delay.Applies to RUNNING state. |
- * - "STALE_STATUS" : The agent-reported status is out of date, which may be |
- * caused by a loss of communication between the agent and Cloud Dataproc. If |
- * the agent does not send a timely update, the job will fail.Applies to |
- * RUNNING state. |
- */ |
+ |
+ /// Output-only. Additional state information, which includes status reported |
+ /// by the agent. |
+ /// Possible string values are: |
+ /// - "UNSPECIFIED" |
+ /// - "SUBMITTED" : The Job is submitted to the agent.Applies to RUNNING |
+ /// state. |
+ /// - "QUEUED" : The Job has been received and is awaiting execution (it may |
+ /// be waiting for a condition to be met). See the "details" field for the |
+ /// reason for the delay.Applies to RUNNING state. |
+ /// - "STALE_STATUS" : The agent-reported status is out of date, which may be |
+ /// caused by a loss of communication between the agent and Cloud Dataproc. |
+ /// If the agent does not send a timely update, the job will fail.Applies to |
+ /// RUNNING state. |
core.String substate; |
JobStatus(); |
@@ -2366,7 +2378,8 @@ class JobStatus { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (details != null) { |
_json["details"] = details; |
} |
@@ -2383,22 +2396,23 @@ class JobStatus { |
} |
} |
-/** The list of all clusters in a project. */ |
+/// The list of all clusters in a project. |
class ListClustersResponse { |
- /** Output-only. The clusters in the project. */ |
+ /// Output-only. The clusters in the project. |
core.List<Cluster> clusters; |
- /** |
- * Output-only. This token is included in the response if there are more |
- * results to fetch. To fetch additional results, provide this value as the |
- * page_token in a subsequent ListClustersRequest. |
- */ |
+ |
+ /// Output-only. This token is included in the response if there are more |
+ /// results to fetch. To fetch additional results, provide this value as the |
+ /// page_token in a subsequent ListClustersRequest. |
core.String nextPageToken; |
ListClustersResponse(); |
ListClustersResponse.fromJson(core.Map _json) { |
if (_json.containsKey("clusters")) { |
- clusters = _json["clusters"].map((value) => new Cluster.fromJson(value)).toList(); |
+ clusters = _json["clusters"] |
+ .map((value) => new Cluster.fromJson(value)) |
+ .toList(); |
} |
if (_json.containsKey("nextPageToken")) { |
nextPageToken = _json["nextPageToken"]; |
@@ -2406,7 +2420,8 @@ class ListClustersResponse { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (clusters != null) { |
_json["clusters"] = clusters.map((value) => (value).toJson()).toList(); |
} |
@@ -2417,15 +2432,14 @@ class ListClustersResponse { |
} |
} |
-/** A list of jobs in a project. */ |
+/// A list of jobs in a project. |
class ListJobsResponse { |
- /** Output-only. Jobs list. */ |
+ /// Output-only. Jobs list. |
core.List<Job> jobs; |
- /** |
- * Optional. This token is included in the response if there are more results |
- * to fetch. To fetch additional results, provide this value as the page_token |
- * in a subsequent <code>ListJobsRequest</code>. |
- */ |
+ |
+ /// Optional. This token is included in the response if there are more |
+ /// results to fetch. To fetch additional results, provide this value as the |
+ /// page_token in a subsequent <code>ListJobsRequest</code>. |
core.String nextPageToken; |
ListJobsResponse(); |
@@ -2440,7 +2454,8 @@ class ListJobsResponse { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (jobs != null) { |
_json["jobs"] = jobs.map((value) => (value).toJson()).toList(); |
} |
@@ -2451,11 +2466,12 @@ class ListJobsResponse { |
} |
} |
-/** The response message for Operations.ListOperations. */ |
+/// The response message for Operations.ListOperations. |
class ListOperationsResponse { |
- /** The standard List next-page token. */ |
+ /// The standard List next-page token. |
core.String nextPageToken; |
- /** A list of operations that matches the specified filter in the request. */ |
+ |
+ /// A list of operations that matches the specified filter in the request. |
core.List<Operation> operations; |
ListOperationsResponse(); |
@@ -2465,29 +2481,31 @@ class ListOperationsResponse { |
nextPageToken = _json["nextPageToken"]; |
} |
if (_json.containsKey("operations")) { |
- operations = _json["operations"].map((value) => new Operation.fromJson(value)).toList(); |
+ operations = _json["operations"] |
+ .map((value) => new Operation.fromJson(value)) |
+ .toList(); |
} |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (nextPageToken != null) { |
_json["nextPageToken"] = nextPageToken; |
} |
if (operations != null) { |
- _json["operations"] = operations.map((value) => (value).toJson()).toList(); |
+ _json["operations"] = |
+ operations.map((value) => (value).toJson()).toList(); |
} |
return _json; |
} |
} |
-/** The runtime logging config of the job. */ |
+/// The runtime logging config of the job. |
class LoggingConfig { |
- /** |
- * The per-package log levels for the driver. This may include "root" package |
- * name to configure rootLogger. Examples: 'com.google = FATAL', 'root = |
- * INFO', 'org.apache = DEBUG' |
- */ |
+ /// The per-package log levels for the driver. This may include "root" |
+ /// package name to configure rootLogger. Examples: 'com.google = FATAL', |
+ /// 'root = INFO', 'org.apache = DEBUG' |
core.Map<core.String, core.String> driverLogLevels; |
LoggingConfig(); |
@@ -2499,7 +2517,8 @@ class LoggingConfig { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (driverLogLevels != null) { |
_json["driverLogLevels"] = driverLogLevels; |
} |
@@ -2507,14 +2526,13 @@ class LoggingConfig { |
} |
} |
-/** Specifies the resources used to actively manage an instance group. */ |
+/// Specifies the resources used to actively manage an instance group. |
class ManagedGroupConfig { |
- /** Output-only. The name of the Instance Group Manager for this group. */ |
+ /// Output-only. The name of the Instance Group Manager for this group. |
core.String instanceGroupManagerName; |
- /** |
- * Output-only. The name of the Instance Template used for the Managed |
- * Instance Group. |
- */ |
+ |
+ /// Output-only. The name of the Instance Template used for the Managed |
+ /// Instance Group. |
core.String instanceTemplateName; |
ManagedGroupConfig(); |
@@ -2529,7 +2547,8 @@ class ManagedGroupConfig { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (instanceGroupManagerName != null) { |
_json["instanceGroupManagerName"] = instanceGroupManagerName; |
} |
@@ -2540,19 +2559,16 @@ class ManagedGroupConfig { |
} |
} |
-/** |
- * Specifies an executable to run on a fully configured node and a timeout |
- * period for executable completion. |
- */ |
+/// Specifies an executable to run on a fully configured node and a timeout |
+/// period for executable completion. |
class NodeInitializationAction { |
- /** Required. Google Cloud Storage URI of executable file. */ |
+ /// Required. Google Cloud Storage URI of executable file. |
core.String executableFile; |
- /** |
- * Optional. Amount of time executable has to complete. Default is 10 minutes. |
- * Cluster creation fails with an explanatory error message (the name of the |
- * executable that caused the error and the exceeded timeout period) if the |
- * executable is not completed at end of the timeout period. |
- */ |
+ |
+ /// Optional. Amount of time executable has to complete. Default is 10 |
+ /// minutes. Cluster creation fails with an explanatory error message (the |
+ /// name of the executable that caused the error and the exceeded timeout |
+ /// period) if the executable is not completed at end of the timeout period. |
core.String executionTimeout; |
NodeInitializationAction(); |
@@ -2567,7 +2583,8 @@ class NodeInitializationAction { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (executableFile != null) { |
_json["executableFile"] = executableFile; |
} |
@@ -2578,47 +2595,41 @@ class NodeInitializationAction { |
} |
} |
-/** |
- * This resource represents a long-running operation that is the result of a |
- * network API call. |
- */ |
+/// This resource represents a long-running operation that is the result of a |
+/// network API call. |
class Operation { |
- /** |
- * If the value is false, it means the operation is still in progress. If |
- * true, the operation is completed, and either error or response is |
- * available. |
- */ |
+ /// If the value is false, it means the operation is still in progress. If |
+ /// true, the operation is completed, and either error or response is |
+ /// available. |
core.bool done; |
- /** The error result of the operation in case of failure or cancellation. */ |
+ |
+ /// The error result of the operation in case of failure or cancellation. |
Status error; |
- /** |
- * Service-specific metadata associated with the operation. It typically |
- * contains progress information and common metadata such as create time. Some |
- * services might not provide such metadata. Any method that returns a |
- * long-running operation should document the metadata type, if any. |
- * |
- * The values for Object must be JSON objects. It can consist of `num`, |
- * `String`, `bool` and `null` as well as `Map` and `List` values. |
- */ |
+ |
+ /// Service-specific metadata associated with the operation. It typically |
+ /// contains progress information and common metadata such as create time. |
+ /// Some services might not provide such metadata. Any method that returns a |
+ /// long-running operation should document the metadata type, if any. |
+ /// |
+ /// The values for Object must be JSON objects. It can consist of `num`, |
+ /// `String`, `bool` and `null` as well as `Map` and `List` values. |
core.Map<core.String, core.Object> metadata; |
- /** |
- * The server-assigned name, which is only unique within the same service that |
- * originally returns it. If you use the default HTTP mapping, the name should |
- * have the format of operations/some/unique/name. |
- */ |
+ |
+ /// The server-assigned name, which is only unique within the same service |
+ /// that originally returns it. If you use the default HTTP mapping, the name |
+ /// should have the format of operations/some/unique/name. |
core.String name; |
- /** |
- * The normal response of the operation in case of success. If the original |
- * method returns no data on success, such as Delete, the response is |
- * google.protobuf.Empty. If the original method is standard |
- * Get/Create/Update, the response should be the resource. For other methods, |
- * the response should have the type XxxResponse, where Xxx is the original |
- * method name. For example, if the original method name is TakeSnapshot(), |
- * the inferred response type is TakeSnapshotResponse. |
- * |
- * The values for Object must be JSON objects. It can consist of `num`, |
- * `String`, `bool` and `null` as well as `Map` and `List` values. |
- */ |
+ |
+ /// The normal response of the operation in case of success. If the original |
+ /// method returns no data on success, such as Delete, the response is |
+ /// google.protobuf.Empty. If the original method is standard |
+ /// Get/Create/Update, the response should be the resource. For other |
+ /// methods, the response should have the type XxxResponse, where Xxx is the |
+ /// original method name. For example, if the original method name is |
+ /// TakeSnapshot(), the inferred response type is TakeSnapshotResponse. |
+ /// |
+ /// The values for Object must be JSON objects. It can consist of `num`, |
+ /// `String`, `bool` and `null` as well as `Map` and `List` values. |
core.Map<core.String, core.Object> response; |
Operation(); |
@@ -2642,7 +2653,8 @@ class Operation { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (done != null) { |
_json["done"] = done; |
} |
@@ -2662,39 +2674,35 @@ class Operation { |
} |
} |
-/** |
- * A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries |
- * on YARN. |
- */ |
+/// A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) |
+/// queries on YARN. |
class PigJob { |
- /** |
- * Optional. Whether to continue executing queries if a query fails. The |
- * default value is false. Setting to true can be useful when executing |
- * independent parallel queries. |
- */ |
+ /// Optional. Whether to continue executing queries if a query fails. The |
+ /// default value is false. Setting to true can be useful when executing |
+ /// independent parallel queries. |
core.bool continueOnFailure; |
- /** |
- * Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client |
- * and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. |
- */ |
+ |
+ /// Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig |
+ /// Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. |
core.List<core.String> jarFileUris; |
- /** Optional. The runtime log config for job execution. */ |
+ |
+ /// Optional. The runtime log config for job execution. |
LoggingConfig loggingConfig; |
- /** |
- * Optional. A mapping of property names to values, used to configure Pig. |
- * Properties that conflict with values set by the Cloud Dataproc API may be |
- * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, |
- * /etc/pig/conf/pig.properties, and classes in user code. |
- */ |
+ |
+ /// Optional. A mapping of property names to values, used to configure Pig. |
+ /// Properties that conflict with values set by the Cloud Dataproc API may be |
+ /// overwritten. Can include properties set in /etc/hadoop/conf / * |
+ /// -site.xml, /etc/pig/conf/pig.properties, and classes in user code. |
core.Map<core.String, core.String> properties; |
- /** The HCFS URI of the script that contains the Pig queries. */ |
+ |
+ /// The HCFS URI of the script that contains the Pig queries. |
core.String queryFileUri; |
- /** A list of queries. */ |
+ |
+ /// A list of queries. |
QueryList queryList; |
- /** |
- * Optional. Mapping of query variable names to values (equivalent to the Pig |
- * command: name=[value]). |
- */ |
+ |
+ /// Optional. Mapping of query variable names to values (equivalent to the |
+ /// Pig command: name=[value]). |
core.Map<core.String, core.String> scriptVariables; |
PigJob(); |
@@ -2724,7 +2732,8 @@ class PigJob { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (continueOnFailure != null) { |
_json["continueOnFailure"] = continueOnFailure; |
} |
@@ -2750,51 +2759,42 @@ class PigJob { |
} |
} |
-/** |
- * A Cloud Dataproc job for running Apache PySpark |
- * (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) |
- * applications on YARN. |
- */ |
+/// A Cloud Dataproc job for running Apache PySpark |
+/// (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) |
+/// applications on YARN. |
class PySparkJob { |
- /** |
- * Optional. HCFS URIs of archives to be extracted in the working directory of |
- * .jar, .tar, .tar.gz, .tgz, and .zip. |
- */ |
+ /// Optional. HCFS URIs of archives to be extracted in the working directory |
+ /// of .jar, .tar, .tar.gz, .tgz, and .zip. |
core.List<core.String> archiveUris; |
- /** |
- * Optional. The arguments to pass to the driver. Do not include arguments, |
- * such as --conf, that can be set as job properties, since a collision may |
- * occur that causes an incorrect job submission. |
- */ |
+ |
+ /// Optional. The arguments to pass to the driver. Do not include arguments, |
+ /// such as --conf, that can be set as job properties, since a collision may |
+ /// occur that causes an incorrect job submission. |
core.List<core.String> args; |
- /** |
- * Optional. HCFS URIs of files to be copied to the working directory of |
- * Python drivers and distributed tasks. Useful for naively parallel tasks. |
- */ |
+ |
+ /// Optional. HCFS URIs of files to be copied to the working directory of |
+ /// Python drivers and distributed tasks. Useful for naively parallel tasks. |
core.List<core.String> fileUris; |
- /** |
- * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python |
- * driver and tasks. |
- */ |
+ |
+ /// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python |
+ /// driver and tasks. |
core.List<core.String> jarFileUris; |
- /** Optional. The runtime log config for job execution. */ |
+ |
+ /// Optional. The runtime log config for job execution. |
LoggingConfig loggingConfig; |
- /** |
- * Required. The HCFS URI of the main Python file to use as the driver. Must |
- * be a .py file. |
- */ |
+ |
+ /// Required. The HCFS URI of the main Python file to use as the driver. Must |
+ /// be a .py file. |
core.String mainPythonFileUri; |
- /** |
- * Optional. A mapping of property names to values, used to configure PySpark. |
- * Properties that conflict with values set by the Cloud Dataproc API may be |
- * overwritten. Can include properties set in |
- * /etc/spark/conf/spark-defaults.conf and classes in user code. |
- */ |
+ |
+ /// Optional. A mapping of property names to values, used to configure |
+ /// PySpark. Properties that conflict with values set by the Cloud Dataproc |
+ /// API may be overwritten. Can include properties set in |
+ /// /etc/spark/conf/spark-defaults.conf and classes in user code. |
core.Map<core.String, core.String> properties; |
- /** |
- * Optional. HCFS file URIs of Python files to pass to the PySpark framework. |
- * Supported file types: .py, .egg, and .zip. |
- */ |
+ |
+ /// Optional. HCFS file URIs of Python files to pass to the PySpark |
+ /// framework. Supported file types: .py, .egg, and .zip. |
core.List<core.String> pythonFileUris; |
PySparkJob(); |
@@ -2827,7 +2827,8 @@ class PySparkJob { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (archiveUris != null) { |
_json["archiveUris"] = archiveUris; |
} |
@@ -2856,23 +2857,21 @@ class PySparkJob { |
} |
} |
-/** A list of queries to run on a cluster. */ |
+/// A list of queries to run on a cluster. |
class QueryList { |
- /** |
- * Required. The queries to execute. You do not need to terminate a query with |
- * a semicolon. Multiple queries can be specified in one string by separating |
- * each with a semicolon. Here is an example of an Cloud Dataproc API snippet |
- * that uses a QueryList to specify a HiveJob: |
- * "hiveJob": { |
- * "queryList": { |
- * "queries": [ |
- * "query1", |
- * "query2", |
- * "query3;query4", |
- * ] |
- * } |
- * } |
- */ |
+ /// Required. The queries to execute. You do not need to terminate a query |
+ /// with a semicolon. Multiple queries can be specified in one string by |
+ /// separating each with a semicolon. Here is an example of an Cloud Dataproc |
+ /// API snippet that uses a QueryList to specify a HiveJob: |
+ /// "hiveJob": { |
+ /// "queryList": { |
+ /// "queries": [ |
+ /// "query1", |
+ /// "query2", |
+ /// "query3;query4", |
+ /// ] |
+ /// } |
+ /// } |
core.List<core.String> queries; |
QueryList(); |
@@ -2884,7 +2883,8 @@ class QueryList { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (queries != null) { |
_json["queries"] = queries; |
} |
@@ -2892,28 +2892,25 @@ class QueryList { |
} |
} |
-/** Specifies the selection and config of software inside the cluster. */ |
+/// Specifies the selection and config of software inside the cluster. |
class SoftwareConfig { |
- /** |
- * Optional. The version of software inside the cluster. It must match the |
- * regular expression [0-9]+\.[0-9]+. If unspecified, it defaults to the |
- * latest version (see Cloud Dataproc Versioning). |
- */ |
+ /// Optional. The version of software inside the cluster. It must match the |
+ /// regular expression [0-9]+\.[0-9]+. If unspecified, it defaults to the |
+ /// latest version (see Cloud Dataproc Versioning). |
core.String imageVersion; |
- /** |
- * Optional. The properties to set on daemon config files.Property keys are |
- * specified in prefix:property format, such as core:fs.defaultFS. The |
- * following are supported prefixes and their mappings: |
- * capacity-scheduler: capacity-scheduler.xml |
- * core: core-site.xml |
- * distcp: distcp-default.xml |
- * hdfs: hdfs-site.xml |
- * hive: hive-site.xml |
- * mapred: mapred-site.xml |
- * pig: pig.properties |
- * spark: spark-defaults.conf |
- * yarn: yarn-site.xml |
- */ |
+ |
+ /// Optional. The properties to set on daemon config files.Property keys are |
+ /// specified in prefix:property format, such as core:fs.defaultFS. The |
+ /// following are supported prefixes and their mappings: |
+ /// capacity-scheduler: capacity-scheduler.xml |
+ /// core: core-site.xml |
+ /// distcp: distcp-default.xml |
+ /// hdfs: hdfs-site.xml |
+ /// hive: hive-site.xml |
+ /// mapred: mapred-site.xml |
+ /// pig: pig.properties |
+ /// spark: spark-defaults.conf |
+ /// yarn: yarn-site.xmlFor more information, see Cluster properties. |
core.Map<core.String, core.String> properties; |
SoftwareConfig(); |
@@ -2928,7 +2925,8 @@ class SoftwareConfig { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (imageVersion != null) { |
_json["imageVersion"] = imageVersion; |
} |
@@ -2939,48 +2937,41 @@ class SoftwareConfig { |
} |
} |
-/** |
- * A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) |
- * applications on YARN. |
- */ |
+/// A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) |
+/// applications on YARN. |
class SparkJob { |
- /** |
- * Optional. HCFS URIs of archives to be extracted in the working directory of |
- * Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, |
- * and .zip. |
- */ |
+ /// Optional. HCFS URIs of archives to be extracted in the working directory |
+ /// of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, |
+ /// .tgz, and .zip. |
core.List<core.String> archiveUris; |
- /** |
- * Optional. The arguments to pass to the driver. Do not include arguments, |
- * such as --conf, that can be set as job properties, since a collision may |
- * occur that causes an incorrect job submission. |
- */ |
+ |
+ /// Optional. The arguments to pass to the driver. Do not include arguments, |
+ /// such as --conf, that can be set as job properties, since a collision may |
+ /// occur that causes an incorrect job submission. |
core.List<core.String> args; |
- /** |
- * Optional. HCFS URIs of files to be copied to the working directory of Spark |
- * drivers and distributed tasks. Useful for naively parallel tasks. |
- */ |
+ |
+ /// Optional. HCFS URIs of files to be copied to the working directory of |
+ /// Spark drivers and distributed tasks. Useful for naively parallel tasks. |
core.List<core.String> fileUris; |
- /** |
- * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark |
- * driver and tasks. |
- */ |
+ |
+ /// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark |
+ /// driver and tasks. |
core.List<core.String> jarFileUris; |
- /** Optional. The runtime log config for job execution. */ |
+ |
+ /// Optional. The runtime log config for job execution. |
LoggingConfig loggingConfig; |
- /** |
- * The name of the driver's main class. The jar file that contains the class |
- * must be in the default CLASSPATH or specified in jar_file_uris. |
- */ |
+ |
+ /// The name of the driver's main class. The jar file that contains the class |
+ /// must be in the default CLASSPATH or specified in jar_file_uris. |
core.String mainClass; |
- /** The HCFS URI of the jar file that contains the main class. */ |
+ |
+ /// The HCFS URI of the jar file that contains the main class. |
core.String mainJarFileUri; |
- /** |
- * Optional. A mapping of property names to values, used to configure Spark. |
- * Properties that conflict with values set by the Cloud Dataproc API may be |
- * overwritten. Can include properties set in |
- * /etc/spark/conf/spark-defaults.conf and classes in user code. |
- */ |
+ |
+ /// Optional. A mapping of property names to values, used to configure Spark. |
+ /// Properties that conflict with values set by the Cloud Dataproc API may be |
+ /// overwritten. Can include properties set in |
+ /// /etc/spark/conf/spark-defaults.conf and classes in user code. |
core.Map<core.String, core.String> properties; |
SparkJob(); |
@@ -3013,7 +3004,8 @@ class SparkJob { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (archiveUris != null) { |
_json["archiveUris"] = archiveUris; |
} |
@@ -3042,29 +3034,28 @@ class SparkJob { |
} |
} |
-/** |
- * A Cloud Dataproc job for running Apache Spark SQL |
- * (http://spark.apache.org/sql/) queries. |
- */ |
+/// A Cloud Dataproc job for running Apache Spark SQL |
+/// (http://spark.apache.org/sql/) queries. |
class SparkSqlJob { |
- /** Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. */ |
+ /// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. |
core.List<core.String> jarFileUris; |
- /** Optional. The runtime log config for job execution. */ |
+ |
+ /// Optional. The runtime log config for job execution. |
LoggingConfig loggingConfig; |
- /** |
- * Optional. A mapping of property names to values, used to configure Spark |
- * SQL's SparkConf. Properties that conflict with values set by the Cloud |
- * Dataproc API may be overwritten. |
- */ |
+ |
+ /// Optional. A mapping of property names to values, used to configure Spark |
+ /// SQL's SparkConf. Properties that conflict with values set by the Cloud |
+ /// Dataproc API may be overwritten. |
core.Map<core.String, core.String> properties; |
- /** The HCFS URI of the script that contains SQL queries. */ |
+ |
+ /// The HCFS URI of the script that contains SQL queries. |
core.String queryFileUri; |
- /** A list of queries. */ |
+ |
+ /// A list of queries. |
QueryList queryList; |
- /** |
- * Optional. Mapping of query variable names to values (equivalent to the |
- * Spark SQL command: SET name="value";). |
- */ |
+ |
+ /// Optional. Mapping of query variable names to values (equivalent to the |
+ /// Spark SQL command: SET name="value";). |
core.Map<core.String, core.String> scriptVariables; |
SparkSqlJob(); |
@@ -3091,7 +3082,8 @@ class SparkSqlJob { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (jarFileUris != null) { |
_json["jarFileUris"] = jarFileUris; |
} |
@@ -3114,58 +3106,56 @@ class SparkSqlJob { |
} |
} |
-/** |
- * The Status type defines a logical error model that is suitable for different |
- * programming environments, including REST APIs and RPC APIs. It is used by |
- * gRPC (https://github.com/grpc). The error model is designed to be: |
- * Simple to use and understand for most users |
- * Flexible enough to meet unexpected needsOverviewThe Status message contains |
- * three pieces of data: error code, error message, and error details. The error |
- * code should be an enum value of google.rpc.Code, but it may accept additional |
- * error codes if needed. The error message should be a developer-facing English |
- * message that helps developers understand and resolve the error. If a |
- * localized user-facing error message is needed, put the localized message in |
- * the error details or localize it in the client. The optional error details |
- * may contain arbitrary information about the error. There is a predefined set |
- * of error detail types in the package google.rpc that can be used for common |
- * error conditions.Language mappingThe Status message is the logical |
- * representation of the error model, but it is not necessarily the actual wire |
- * format. When the Status message is exposed in different client libraries and |
- * different wire protocols, it can be mapped differently. For example, it will |
- * likely be mapped to some exceptions in Java, but more likely mapped to some |
- * error codes in C.Other usesThe error model and the Status message can be used |
- * in a variety of environments, either with or without APIs, to provide a |
- * consistent developer experience across different environments.Example uses of |
- * this error model include: |
- * Partial errors. If a service needs to return partial errors to the client, it |
- * may embed the Status in the normal response to indicate the partial errors. |
- * Workflow errors. A typical workflow has multiple steps. Each step may have a |
- * Status message for error reporting. |
- * Batch operations. If a client uses batch request and batch response, the |
- * Status message should be used directly inside batch response, one for each |
- * error sub-response. |
- * Asynchronous operations. If an API call embeds asynchronous operation results |
- * in its response, the status of those operations should be represented |
- * directly using the Status message. |
- * Logging. If some API errors are stored in logs, the message Status could be |
- * used directly after any stripping needed for security/privacy reasons. |
- */ |
+/// The Status type defines a logical error model that is suitable for |
+/// different programming environments, including REST APIs and RPC APIs. It is |
+/// used by gRPC (https://github.com/grpc). The error model is designed to be: |
+/// Simple to use and understand for most users |
+/// Flexible enough to meet unexpected needsOverviewThe Status message contains |
+/// three pieces of data: error code, error message, and error details. The |
+/// error code should be an enum value of google.rpc.Code, but it may accept |
+/// additional error codes if needed. The error message should be a |
+/// developer-facing English message that helps developers understand and |
+/// resolve the error. If a localized user-facing error message is needed, put |
+/// the localized message in the error details or localize it in the client. |
+/// The optional error details may contain arbitrary information about the |
+/// error. There is a predefined set of error detail types in the package |
+/// google.rpc that can be used for common error conditions.Language mappingThe |
+/// Status message is the logical representation of the error model, but it is |
+/// not necessarily the actual wire format. When the Status message is exposed |
+/// in different client libraries and different wire protocols, it can be |
+/// mapped differently. For example, it will likely be mapped to some |
+/// exceptions in Java, but more likely mapped to some error codes in C.Other |
+/// usesThe error model and the Status message can be used in a variety of |
+/// environments, either with or without APIs, to provide a consistent |
+/// developer experience across different environments.Example uses of this |
+/// error model include: |
+/// Partial errors. If a service needs to return partial errors to the client, |
+/// it may embed the Status in the normal response to indicate the partial |
+/// errors. |
+/// Workflow errors. A typical workflow has multiple steps. Each step may have |
+/// a Status message for error reporting. |
+/// Batch operations. If a client uses batch request and batch response, the |
+/// Status message should be used directly inside batch response, one for each |
+/// error sub-response. |
+/// Asynchronous operations. If an API call embeds asynchronous operation |
+/// results in its response, the status of those operations should be |
+/// represented directly using the Status message. |
+/// Logging. If some API errors are stored in logs, the message Status could be |
+/// used directly after any stripping needed for security/privacy reasons. |
class Status { |
- /** The status code, which should be an enum value of google.rpc.Code. */ |
+ /// The status code, which should be an enum value of google.rpc.Code. |
core.int code; |
- /** |
- * A list of messages that carry the error details. There is a common set of |
- * message types for APIs to use. |
- * |
- * The values for Object must be JSON objects. It can consist of `num`, |
- * `String`, `bool` and `null` as well as `Map` and `List` values. |
- */ |
+ |
+ /// A list of messages that carry the error details. There is a common set of |
+ /// message types for APIs to use. |
+ /// |
+ /// The values for Object must be JSON objects. It can consist of `num`, |
+ /// `String`, `bool` and `null` as well as `Map` and `List` values. |
core.List<core.Map<core.String, core.Object>> details; |
- /** |
- * A developer-facing error message, which should be in English. Any |
- * user-facing error message should be localized and sent in the |
- * google.rpc.Status.details field, or localized by the client. |
- */ |
+ |
+ /// A developer-facing error message, which should be in English. Any |
+ /// user-facing error message should be localized and sent in the |
+ /// google.rpc.Status.details field, or localized by the client. |
core.String message; |
Status(); |
@@ -3183,7 +3173,8 @@ class Status { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (code != null) { |
_json["code"] = code; |
} |
@@ -3197,9 +3188,9 @@ class Status { |
} |
} |
-/** A request to submit a job. */ |
+/// A request to submit a job. |
class SubmitJobRequest { |
- /** Required. The job resource. */ |
+ /// Required. The job resource. |
Job job; |
SubmitJobRequest(); |
@@ -3211,7 +3202,8 @@ class SubmitJobRequest { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (job != null) { |
_json["job"] = (job).toJson(); |
} |
@@ -3219,37 +3211,34 @@ class SubmitJobRequest { |
} |
} |
-/** |
- * A YARN application created by a job. Application information is a subset of |
- * <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.Beta |
- * Feature: This report is available for testing purposes only. It may be |
- * changed before final release. |
- */ |
+/// A YARN application created by a job. Application information is a subset of |
+/// <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.Beta |
+/// Feature: This report is available for testing purposes only. It may be |
+/// changed before final release. |
class YarnApplication { |
- /** Required. The application name. */ |
+ /// Required. The application name. |
core.String name; |
- /** Required. The numerical progress of the application, from 1 to 100. */ |
+ |
+ /// Required. The numerical progress of the application, from 1 to 100. |
core.double progress; |
- /** |
- * Required. The application state. |
- * Possible string values are: |
- * - "STATE_UNSPECIFIED" : Status is unspecified. |
- * - "NEW" : Status is NEW. |
- * - "NEW_SAVING" : Status is NEW_SAVING. |
- * - "SUBMITTED" : Status is SUBMITTED. |
- * - "ACCEPTED" : Status is ACCEPTED. |
- * - "RUNNING" : Status is RUNNING. |
- * - "FINISHED" : Status is FINISHED. |
- * - "FAILED" : Status is FAILED. |
- * - "KILLED" : Status is KILLED. |
- */ |
+ |
+ /// Required. The application state. |
+ /// Possible string values are: |
+ /// - "STATE_UNSPECIFIED" : Status is unspecified. |
+ /// - "NEW" : Status is NEW. |
+ /// - "NEW_SAVING" : Status is NEW_SAVING. |
+ /// - "SUBMITTED" : Status is SUBMITTED. |
+ /// - "ACCEPTED" : Status is ACCEPTED. |
+ /// - "RUNNING" : Status is RUNNING. |
+ /// - "FINISHED" : Status is FINISHED. |
+ /// - "FAILED" : Status is FAILED. |
+ /// - "KILLED" : Status is KILLED. |
core.String state; |
- /** |
- * Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or |
- * TimelineServer that provides application-specific information. The URL uses |
- * the internal hostname, and requires a proxy server for resolution and, |
- * possibly, access. |
- */ |
+ |
+ /// Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or |
+ /// TimelineServer that provides application-specific information. The URL |
+ /// uses the internal hostname, and requires a proxy server for resolution |
+ /// and, possibly, access. |
core.String trackingUrl; |
YarnApplication(); |
@@ -3270,7 +3259,8 @@ class YarnApplication { |
} |
core.Map<core.String, core.Object> toJson() { |
- final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>(); |
+ final core.Map<core.String, core.Object> _json = |
+ new core.Map<core.String, core.Object>(); |
if (name != null) { |
_json["name"] = name; |
} |