Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(115)

Unified Diff: discovery/googleapis/dataproc__v1.json

Issue 2779563003: Api-roll 47: 2017-03-27 (Closed)
Patch Set: Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « discovery/googleapis/content__v2sandbox.json ('k') | discovery/googleapis/deploymentmanager__v2.json » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: discovery/googleapis/dataproc__v1.json
diff --git a/discovery/googleapis/dataproc__v1.json b/discovery/googleapis/dataproc__v1.json
index 5cb66b3b8cdd6742d718b9972644ae8590c4d710..ba9d0c9c9723f9116ec73ee59abe5c3cf7b9c9bc 100644
--- a/discovery/googleapis/dataproc__v1.json
+++ b/discovery/googleapis/dataproc__v1.json
@@ -24,6 +24,37 @@
"ownerDomain": "google.com",
"ownerName": "Google",
"parameters": {
+ "access_token": {
+ "description": "OAuth access token.",
+ "location": "query",
+ "type": "string"
+ },
+ "key": {
+ "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+ "location": "query",
+ "type": "string"
+ },
+ "quotaUser": {
+ "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.",
+ "location": "query",
+ "type": "string"
+ },
+ "pp": {
+ "default": "true",
+ "description": "Pretty-print response.",
+ "location": "query",
+ "type": "boolean"
+ },
+ "oauth_token": {
+ "description": "OAuth 2.0 token for the current user.",
+ "location": "query",
+ "type": "string"
+ },
+ "bearer_token": {
+ "description": "OAuth bearer token.",
+ "location": "query",
+ "type": "string"
+ },
"upload_protocol": {
"description": "Upload protocol for media (e.g. \"raw\", \"multipart\").",
"location": "query",
@@ -45,11 +76,6 @@
"location": "query",
"type": "string"
},
- "callback": {
- "description": "JSONP",
- "location": "query",
- "type": "string"
- },
"$.xgafv": {
"description": "V1 error format.",
"enum": [
@@ -63,6 +89,11 @@
"location": "query",
"type": "string"
},
+ "callback": {
+ "description": "JSONP",
+ "location": "query",
+ "type": "string"
+ },
"alt": {
"default": "json",
"description": "Data format for response.",
@@ -78,37 +109,6 @@
],
"location": "query",
"type": "string"
- },
- "access_token": {
- "description": "OAuth access token.",
- "location": "query",
- "type": "string"
- },
- "key": {
- "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
- "location": "query",
- "type": "string"
- },
- "quotaUser": {
- "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.",
- "location": "query",
- "type": "string"
- },
- "pp": {
- "default": "true",
- "description": "Pretty-print response.",
- "location": "query",
- "type": "boolean"
- },
- "bearer_token": {
- "description": "OAuth bearer token.",
- "location": "query",
- "type": "string"
- },
- "oauth_token": {
- "description": "OAuth 2.0 token for the current user.",
- "location": "query",
- "type": "string"
}
},
"protocol": "rest",
@@ -117,127 +117,105 @@
"resources": {
"regions": {
"resources": {
- "jobs": {
+ "clusters": {
"methods": {
- "list": {
- "description": "Lists regions/{region}/jobs in a project.",
- "httpMethod": "GET",
- "id": "dataproc.projects.regions.jobs.list",
+ "create": {
+ "description": "Creates a cluster in a project.",
+ "httpMethod": "POST",
+ "id": "dataproc.projects.regions.clusters.create",
"parameterOrder": [
"projectId",
"region"
],
"parameters": {
- "pageToken": {
- "description": "Optional The page token, returned by a previous call, to request the next page of results.",
- "location": "query",
- "type": "string"
- },
- "pageSize": {
- "description": "Optional The number of results to return in each response.",
- "format": "int32",
- "location": "query",
- "type": "integer"
- },
"region": {
"description": "Required The Cloud Dataproc region in which to handle the request.",
"location": "path",
"required": true,
"type": "string"
},
- "clusterName": {
- "description": "Optional If set, the returned jobs list includes only jobs that were submitted to the named cluster.",
- "location": "query",
- "type": "string"
- },
"projectId": {
- "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
+ "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.",
"location": "path",
"required": true,
"type": "string"
- },
- "filter": {
- "description": "Optional A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is status.state or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be either ACTIVE or INACTIVE. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = *",
- "location": "query",
- "type": "string"
- },
- "jobStateMatcher": {
- "description": "Optional Specifies enumerated categories of jobs to list (default = match ALL jobs).",
- "enum": [
- "ALL",
- "ACTIVE",
- "NON_ACTIVE"
- ],
- "location": "query",
- "type": "string"
}
},
- "path": "v1/projects/{projectId}/regions/{region}/jobs",
+ "path": "v1/projects/{projectId}/regions/{region}/clusters",
+ "request": {
+ "$ref": "Cluster"
+ },
"response": {
- "$ref": "ListJobsResponse"
+ "$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
- "cancel": {
- "description": "Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs.list or regions/{region}/jobs.get.",
- "httpMethod": "POST",
- "id": "dataproc.projects.regions.jobs.cancel",
+ "patch": {
+ "description": "Updates a cluster in a project.",
+ "httpMethod": "PATCH",
+ "id": "dataproc.projects.regions.clusters.patch",
"parameterOrder": [
"projectId",
"region",
- "jobId"
+ "clusterName"
],
"parameters": {
- "projectId": {
- "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
+ "region": {
+ "description": "Required The Cloud Dataproc region in which to handle the request.",
"location": "path",
"required": true,
"type": "string"
},
- "jobId": {
- "description": "Required The job ID.",
+ "updateMask": {
+ "description": "Required Specifies the path, relative to <code>Cluster</code>, of the field to update. For example, to change the number of workers in a cluster to 5, the <code>update_mask</code> parameter would be specified as <code>config.worker_config.num_instances</code>, and the PATCH request body would specify the new value, as follows:\n{\n \"config\":{\n \"workerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\nSimilarly, to change the number of preemptible workers in a cluster to 5, the <code>update_mask</code> parameter would be <code>config.secondary_worker_config.num_instances</code>, and the PATCH request body would be set as follows:\n{\n \"config\":{\n \"secondaryWorkerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\n<strong>Note:</strong> Currently, <code>config.worker_config.num_instances</code> and <code>config.secondary_worker_config.num_instances</code> are the only fields that can be updated.",
+ "format": "google-fieldmask",
+ "location": "query",
+ "type": "string"
+ },
+ "clusterName": {
+ "description": "Required The cluster name.",
"location": "path",
"required": true,
"type": "string"
},
- "region": {
- "description": "Required The Cloud Dataproc region in which to handle the request.",
+ "projectId": {
+ "description": "Required The ID of the Google Cloud Platform project the cluster belongs to.",
"location": "path",
"required": true,
"type": "string"
}
},
- "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel",
+ "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}",
"request": {
- "$ref": "CancelJobRequest"
+ "$ref": "Cluster"
},
"response": {
- "$ref": "Job"
+ "$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
"get": {
- "description": "Gets the resource representation for a job in a project.",
+ "description": "Gets the resource representation for a cluster in a project.",
"httpMethod": "GET",
- "id": "dataproc.projects.regions.jobs.get",
+ "id": "dataproc.projects.regions.clusters.get",
"parameterOrder": [
"projectId",
"region",
- "jobId"
+ "clusterName"
],
"parameters": {
- "projectId": {
- "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
+ "clusterName": {
+ "description": "Required The cluster name.",
"location": "path",
"required": true,
"type": "string"
},
- "jobId": {
- "description": "Required The job ID.",
+ "projectId": {
+ "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.",
"location": "path",
"required": true,
"type": "string"
@@ -249,71 +227,72 @@
"type": "string"
}
},
- "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}",
+ "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}",
"response": {
- "$ref": "Job"
+ "$ref": "Cluster"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
- "patch": {
- "description": "Updates a job in a project.",
- "httpMethod": "PATCH",
- "id": "dataproc.projects.regions.jobs.patch",
+ "diagnose": {
+ "description": "Gets cluster diagnostic information. After the operation completes, the Operation.response field contains DiagnoseClusterOutputLocation.",
+ "httpMethod": "POST",
+ "id": "dataproc.projects.regions.clusters.diagnose",
"parameterOrder": [
"projectId",
"region",
- "jobId"
+ "clusterName"
],
"parameters": {
- "projectId": {
- "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
+ "region": {
+ "description": "Required The Cloud Dataproc region in which to handle the request.",
"location": "path",
"required": true,
"type": "string"
},
- "jobId": {
- "description": "Required The job ID.",
+ "clusterName": {
+ "description": "Required The cluster name.",
"location": "path",
"required": true,
"type": "string"
},
- "region": {
- "description": "Required The Cloud Dataproc region in which to handle the request.",
+ "projectId": {
+ "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.",
"location": "path",
"required": true,
"type": "string"
- },
- "updateMask": {
- "description": "Required Specifies the path, relative to <code>Job</code>, of the field to update. For example, to update the labels of a Job the <code>update_mask</code> parameter would be specified as <code>labels</code>, and the PATCH request body would specify the new value. <strong>Note:</strong> Currently, <code>labels</code> is the only field that can be updated.",
- "format": "google-fieldmask",
- "location": "query",
- "type": "string"
}
},
- "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}",
+ "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose",
"request": {
- "$ref": "Job"
+ "$ref": "DiagnoseClusterRequest"
},
"response": {
- "$ref": "Job"
+ "$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
- "submit": {
- "description": "Submits a job to a cluster.",
- "httpMethod": "POST",
- "id": "dataproc.projects.regions.jobs.submit",
+ "delete": {
+ "description": "Deletes a cluster in a project.",
+ "httpMethod": "DELETE",
+ "id": "dataproc.projects.regions.clusters.delete",
"parameterOrder": [
"projectId",
- "region"
+ "region",
+ "clusterName"
],
"parameters": {
+ "clusterName": {
+ "description": "Required The cluster name.",
+ "location": "path",
+ "required": true,
+ "type": "string"
+ },
"projectId": {
- "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
+ "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.",
"location": "path",
"required": true,
"type": "string"
@@ -325,49 +304,55 @@
"type": "string"
}
},
- "path": "v1/projects/{projectId}/regions/{region}/jobs:submit",
- "request": {
- "$ref": "SubmitJobRequest"
- },
+ "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}",
"response": {
- "$ref": "Job"
+ "$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
- "delete": {
- "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION.",
- "httpMethod": "DELETE",
- "id": "dataproc.projects.regions.jobs.delete",
+ "list": {
+ "description": "Lists all regions/{region}/clusters in a project.",
+ "httpMethod": "GET",
+ "id": "dataproc.projects.regions.clusters.list",
"parameterOrder": [
"projectId",
- "region",
- "jobId"
+ "region"
],
"parameters": {
- "region": {
- "description": "Required The Cloud Dataproc region in which to handle the request.",
- "location": "path",
- "required": true,
+ "pageToken": {
+ "description": "Optional The standard List page token.",
+ "location": "query",
"type": "string"
},
- "projectId": {
- "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
- "location": "path",
- "required": true,
+ "pageSize": {
+ "description": "Optional The standard List page size.",
+ "format": "int32",
+ "location": "query",
+ "type": "integer"
+ },
+ "projectId": {
+ "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.",
+ "location": "path",
+ "required": true,
"type": "string"
},
- "jobId": {
- "description": "Required The job ID.",
+ "region": {
+ "description": "Required The Cloud Dataproc region in which to handle the request.",
"location": "path",
"required": true,
"type": "string"
+ },
+ "filter": {
+ "description": "Optional A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is one of status.state, clusterName, or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, DELETING, or UPDATING. ACTIVE contains the CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING and ERROR states. clusterName is the name of the cluster provided at creation time. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND clusterName = mycluster AND labels.env = staging AND labels.starred = *",
+ "location": "query",
+ "type": "string"
}
},
- "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}",
+ "path": "v1/projects/{projectId}/regions/{region}/clusters",
"response": {
- "$ref": "Empty"
+ "$ref": "ListClustersResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
@@ -375,332 +360,310 @@
}
}
},
- "clusters": {
+ "operations": {
"methods": {
- "delete": {
- "description": "Deletes a cluster in a project.",
- "httpMethod": "DELETE",
- "id": "dataproc.projects.regions.clusters.delete",
+ "cancel": {
+ "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.",
+ "httpMethod": "POST",
+ "id": "dataproc.projects.regions.operations.cancel",
"parameterOrder": [
- "projectId",
- "region",
- "clusterName"
+ "name"
],
"parameters": {
- "region": {
- "description": "Required The Cloud Dataproc region in which to handle the request.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "clusterName": {
- "description": "Required The cluster name.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "projectId": {
- "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.",
+ "name": {
+ "description": "The name of the operation resource to be cancelled.",
"location": "path",
+ "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$",
"required": true,
"type": "string"
}
},
- "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}",
+ "path": "v1/{+name}:cancel",
"response": {
- "$ref": "Operation"
+ "$ref": "Empty"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
- "diagnose": {
- "description": "Gets cluster diagnostic information. After the operation completes, the Operation.response field contains DiagnoseClusterOutputLocation.",
- "httpMethod": "POST",
- "id": "dataproc.projects.regions.clusters.diagnose",
+ "delete": {
+ "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.",
+ "httpMethod": "DELETE",
+ "id": "dataproc.projects.regions.operations.delete",
"parameterOrder": [
- "projectId",
- "region",
- "clusterName"
+ "name"
],
"parameters": {
- "projectId": {
- "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "region": {
- "description": "Required The Cloud Dataproc region in which to handle the request.",
- "location": "path",
- "required": true,
- "type": "string"
- },
- "clusterName": {
- "description": "Required The cluster name.",
+ "name": {
+ "description": "The name of the operation resource to be deleted.",
"location": "path",
+ "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$",
"required": true,
"type": "string"
}
},
- "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}:diagnose",
- "request": {
- "$ref": "DiagnoseClusterRequest"
- },
+ "path": "v1/{+name}",
"response": {
- "$ref": "Operation"
+ "$ref": "Empty"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
"list": {
- "description": "Lists all regions/{region}/clusters in a project.",
+ "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding below allows API services to override the binding to use different resource name schemes, such as users/*/operations.",
"httpMethod": "GET",
- "id": "dataproc.projects.regions.clusters.list",
+ "id": "dataproc.projects.regions.operations.list",
"parameterOrder": [
- "projectId",
- "region"
+ "name"
],
"parameters": {
- "region": {
- "description": "Required The Cloud Dataproc region in which to handle the request.",
- "location": "path",
- "required": true,
- "type": "string"
- },
"filter": {
- "description": "Optional A filter constraining the clusters to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is one of status.state, clusterName, or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, DELETING, or UPDATING. ACTIVE contains the CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING and ERROR states. clusterName is the name of the cluster provided at creation time. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND clusterName = mycluster AND labels.env = staging AND labels.starred = *",
+ "description": "The standard list filter.",
"location": "query",
"type": "string"
},
+ "name": {
+ "description": "The name of the operation collection.",
+ "location": "path",
+ "pattern": "^projects/[^/]+/regions/[^/]+/operations$",
+ "required": true,
+ "type": "string"
+ },
"pageToken": {
- "description": "Optional The standard List page token.",
+ "description": "The standard list page token.",
"location": "query",
"type": "string"
},
"pageSize": {
- "description": "Optional The standard List page size.",
+ "description": "The standard list page size.",
"format": "int32",
"location": "query",
"type": "integer"
- },
- "projectId": {
- "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.",
+ }
+ },
+ "path": "v1/{+name}",
+ "response": {
+ "$ref": "ListOperationsResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "get": {
+ "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.",
+ "httpMethod": "GET",
+ "id": "dataproc.projects.regions.operations.get",
+ "parameterOrder": [
+ "name"
+ ],
+ "parameters": {
+ "name": {
+ "description": "The name of the operation resource.",
"location": "path",
+ "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$",
"required": true,
"type": "string"
}
},
- "path": "v1/projects/{projectId}/regions/{region}/clusters",
+ "path": "v1/{+name}",
"response": {
- "$ref": "ListClustersResponse"
+ "$ref": "Operation"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
- },
- "create": {
- "description": "Creates a cluster in a project.",
+ }
+ }
+ },
+ "jobs": {
+ "methods": {
+ "cancel": {
+ "description": "Starts a job cancellation request. To access the job resource after cancellation, call regions/{region}/jobs.list or regions/{region}/jobs.get.",
"httpMethod": "POST",
- "id": "dataproc.projects.regions.clusters.create",
+ "id": "dataproc.projects.regions.jobs.cancel",
"parameterOrder": [
"projectId",
- "region"
+ "region",
+ "jobId"
],
"parameters": {
+ "region": {
+ "description": "Required The Cloud Dataproc region in which to handle the request.",
+ "location": "path",
+ "required": true,
+ "type": "string"
+ },
"projectId": {
- "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.",
+ "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
"location": "path",
"required": true,
"type": "string"
},
- "region": {
- "description": "Required The Cloud Dataproc region in which to handle the request.",
+ "jobId": {
+ "description": "Required The job ID.",
"location": "path",
"required": true,
"type": "string"
}
},
- "path": "v1/projects/{projectId}/regions/{region}/clusters",
+ "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}:cancel",
"request": {
- "$ref": "Cluster"
+ "$ref": "CancelJobRequest"
},
"response": {
- "$ref": "Operation"
+ "$ref": "Job"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
- "get": {
- "description": "Gets the resource representation for a cluster in a project.",
- "httpMethod": "GET",
- "id": "dataproc.projects.regions.clusters.get",
+ "patch": {
+ "description": "Updates a job in a project.",
+ "httpMethod": "PATCH",
+ "id": "dataproc.projects.regions.jobs.patch",
"parameterOrder": [
"projectId",
"region",
- "clusterName"
+ "jobId"
],
"parameters": {
"projectId": {
- "description": "Required The ID of the Google Cloud Platform project that the cluster belongs to.",
+ "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
"location": "path",
"required": true,
"type": "string"
},
- "region": {
- "description": "Required The Cloud Dataproc region in which to handle the request.",
+ "jobId": {
+ "description": "Required The job ID.",
"location": "path",
"required": true,
"type": "string"
},
- "clusterName": {
- "description": "Required The cluster name.",
+ "region": {
+ "description": "Required The Cloud Dataproc region in which to handle the request.",
"location": "path",
"required": true,
"type": "string"
+ },
+ "updateMask": {
+ "description": "Required Specifies the path, relative to <code>Job</code>, of the field to update. For example, to update the labels of a Job the <code>update_mask</code> parameter would be specified as <code>labels</code>, and the PATCH request body would specify the new value. <strong>Note:</strong> Currently, <code>labels</code> is the only field that can be updated.",
+ "format": "google-fieldmask",
+ "location": "query",
+ "type": "string"
}
},
- "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}",
+ "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}",
+ "request": {
+ "$ref": "Job"
+ },
"response": {
- "$ref": "Cluster"
+ "$ref": "Job"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
- "patch": {
- "description": "Updates a cluster in a project.",
- "httpMethod": "PATCH",
- "id": "dataproc.projects.regions.clusters.patch",
+ "get": {
+ "description": "Gets the resource representation for a job in a project.",
+ "httpMethod": "GET",
+ "id": "dataproc.projects.regions.jobs.get",
"parameterOrder": [
"projectId",
"region",
- "clusterName"
+ "jobId"
],
"parameters": {
- "region": {
- "description": "Required The Cloud Dataproc region in which to handle the request.",
+ "projectId": {
+ "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
"location": "path",
"required": true,
"type": "string"
},
- "updateMask": {
- "description": "Required Specifies the path, relative to <code>Cluster</code>, of the field to update. For example, to change the number of workers in a cluster to 5, the <code>update_mask</code> parameter would be specified as <code>config.worker_config.num_instances</code>, and the PATCH request body would specify the new value, as follows:\n{\n \"config\":{\n \"workerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\nSimilarly, to change the number of preemptible workers in a cluster to 5, the <code>update_mask</code> parameter would be <code>config.secondary_worker_config.num_instances</code>, and the PATCH request body would be set as follows:\n{\n \"config\":{\n \"secondaryWorkerConfig\":{\n \"numInstances\":\"5\"\n }\n }\n}\n<strong>Note:</strong> Currently, <code>config.worker_config.num_instances</code> and <code>config.secondary_worker_config.num_instances</code> are the only fields that can be updated.",
- "format": "google-fieldmask",
- "location": "query",
- "type": "string"
- },
- "clusterName": {
- "description": "Required The cluster name.",
+ "jobId": {
+ "description": "Required The job ID.",
"location": "path",
"required": true,
"type": "string"
},
- "projectId": {
- "description": "Required The ID of the Google Cloud Platform project the cluster belongs to.",
+ "region": {
+ "description": "Required The Cloud Dataproc region in which to handle the request.",
"location": "path",
"required": true,
"type": "string"
}
},
- "path": "v1/projects/{projectId}/regions/{region}/clusters/{clusterName}",
- "request": {
- "$ref": "Cluster"
- },
+ "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}",
"response": {
- "$ref": "Operation"
+ "$ref": "Job"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
- }
- }
- },
- "operations": {
- "methods": {
- "list": {
- "description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name binding below allows API services to override the binding to use different resource name schemes, such as users/*/operations.",
- "httpMethod": "GET",
- "id": "dataproc.projects.regions.operations.list",
+ },
+ "submit": {
+ "description": "Submits a job to a cluster.",
+ "httpMethod": "POST",
+ "id": "dataproc.projects.regions.jobs.submit",
"parameterOrder": [
- "name"
+ "projectId",
+ "region"
],
"parameters": {
- "pageSize": {
- "description": "The standard list page size.",
- "format": "int32",
- "location": "query",
- "type": "integer"
- },
- "filter": {
- "description": "The standard list filter.",
- "location": "query",
- "type": "string"
- },
- "name": {
- "description": "The name of the operation collection.",
+ "region": {
+ "description": "Required The Cloud Dataproc region in which to handle the request.",
"location": "path",
- "pattern": "^projects/[^/]+/regions/[^/]+/operations$",
"required": true,
"type": "string"
},
- "pageToken": {
- "description": "The standard list page token.",
- "location": "query",
+ "projectId": {
+ "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
+ "location": "path",
+ "required": true,
"type": "string"
}
},
- "path": "v1/{+name}",
+ "path": "v1/projects/{projectId}/regions/{region}/jobs:submit",
+ "request": {
+ "$ref": "SubmitJobRequest"
+ },
"response": {
- "$ref": "ListOperationsResponse"
+ "$ref": "Job"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
- "get": {
- "description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.",
- "httpMethod": "GET",
- "id": "dataproc.projects.regions.operations.get",
+ "delete": {
+ "description": "Deletes the job from the project. If the job is active, the delete fails, and the response returns FAILED_PRECONDITION.",
+ "httpMethod": "DELETE",
+ "id": "dataproc.projects.regions.jobs.delete",
"parameterOrder": [
- "name"
+ "projectId",
+ "region",
+ "jobId"
],
"parameters": {
- "name": {
- "description": "The name of the operation resource.",
+ "projectId": {
+ "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
"location": "path",
- "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$",
"required": true,
"type": "string"
- }
- },
- "path": "v1/{+name}",
- "response": {
- "$ref": "Operation"
- },
- "scopes": [
- "https://www.googleapis.com/auth/cloud-platform"
- ]
- },
- "cancel": {
- "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to Code.CANCELLED.",
- "httpMethod": "POST",
- "id": "dataproc.projects.regions.operations.cancel",
- "parameterOrder": [
- "name"
- ],
- "parameters": {
- "name": {
- "description": "The name of the operation resource to be cancelled.",
+ },
+ "jobId": {
+ "description": "Required The job ID.",
+ "location": "path",
+ "required": true,
+ "type": "string"
+ },
+ "region": {
+ "description": "Required The Cloud Dataproc region in which to handle the request.",
"location": "path",
- "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$",
"required": true,
"type": "string"
}
},
- "path": "v1/{+name}:cancel",
+ "path": "v1/projects/{projectId}/regions/{region}/jobs/{jobId}",
"response": {
"$ref": "Empty"
},
@@ -708,25 +671,62 @@
"https://www.googleapis.com/auth/cloud-platform"
]
},
- "delete": {
- "description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns google.rpc.Code.UNIMPLEMENTED.",
- "httpMethod": "DELETE",
- "id": "dataproc.projects.regions.operations.delete",
+ "list": {
+ "description": "Lists regions/{region}/jobs in a project.",
+ "httpMethod": "GET",
+ "id": "dataproc.projects.regions.jobs.list",
"parameterOrder": [
- "name"
+ "projectId",
+ "region"
],
"parameters": {
- "name": {
- "description": "The name of the operation resource to be deleted.",
+ "pageToken": {
+ "description": "Optional The page token, returned by a previous call, to request the next page of results.",
+ "location": "query",
+ "type": "string"
+ },
+ "pageSize": {
+ "description": "Optional The number of results to return in each response.",
+ "format": "int32",
+ "location": "query",
+ "type": "integer"
+ },
+ "region": {
+ "description": "Required The Cloud Dataproc region in which to handle the request.",
+ "location": "path",
+ "required": true,
+ "type": "string"
+ },
+ "clusterName": {
+ "description": "Optional If set, the returned jobs list includes only jobs that were submitted to the named cluster.",
+ "location": "query",
+ "type": "string"
+ },
+ "projectId": {
+ "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
"location": "path",
- "pattern": "^projects/[^/]+/regions/[^/]+/operations/[^/]+$",
"required": true,
"type": "string"
+ },
+ "filter": {
+ "description": "Optional A filter constraining the jobs to list. Filters are case-sensitive and have the following syntax:field = value AND field = value ...where field is status.state or labels.[KEY], and [KEY] is a label key. value can be * to match all values. status.state can be either ACTIVE or INACTIVE. Only the logical AND operator is supported; space-separated items are treated as having an implicit AND operator.Example filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = *",
+ "location": "query",
+ "type": "string"
+ },
+ "jobStateMatcher": {
+ "description": "Optional Specifies enumerated categories of jobs to list (default = match ALL jobs).",
+ "enum": [
+ "ALL",
+ "ACTIVE",
+ "NON_ACTIVE"
+ ],
+ "location": "query",
+ "type": "string"
}
},
- "path": "v1/{+name}",
+ "path": "v1/projects/{projectId}/regions/{region}/jobs",
"response": {
- "$ref": "Empty"
+ "$ref": "ListJobsResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
@@ -739,159 +739,127 @@
}
}
},
- "revision": "20170228",
+ "revision": "20170321",
"rootUrl": "https://dataproc.googleapis.com/",
"schemas": {
- "NodeInitializationAction": {
- "description": "Specifies an executable to run on a fully configured node and a timeout period for executable completion.",
- "id": "NodeInitializationAction",
+ "AcceleratorConfig": {
+ "description": "Specifies the type and number of accelerator cards attached to the instances of an instance group (see GPUs on Compute Engine).",
+ "id": "AcceleratorConfig",
"properties": {
- "executionTimeout": {
- "description": "Optional Amount of time executable has to complete. Default is 10 minutes. Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.",
- "format": "google-duration",
- "type": "string"
+ "acceleratorCount": {
+ "description": "The number of the accelerator cards of this type exposed to this instance.",
+ "format": "int32",
+ "type": "integer"
},
- "executableFile": {
- "description": "Required Google Cloud Storage URI of executable file.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "ListJobsResponse": {
- "description": "A list of jobs in a project.",
- "id": "ListJobsResponse",
- "properties": {
- "nextPageToken": {
- "description": "Optional This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent <code>ListJobsRequest</code>.",
+ "acceleratorTypeUri": {
+ "description": "Full or partial URI of the accelerator type resource to expose to this instance. See Google Compute Engine AcceleratorTypes( /compute/docs/reference/beta/acceleratorTypes)",
"type": "string"
- },
- "jobs": {
- "description": "Output-only Jobs list.",
- "items": {
- "$ref": "Job"
- },
- "type": "array"
}
},
"type": "object"
},
- "CancelJobRequest": {
- "description": "A request to cancel a job.",
- "id": "CancelJobRequest",
- "properties": {},
- "type": "object"
- },
- "SparkSqlJob": {
- "description": "A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/) queries.",
- "id": "SparkSqlJob",
+ "ClusterMetrics": {
+ "description": "Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.",
+ "id": "ClusterMetrics",
"properties": {
- "queryFileUri": {
- "description": "The HCFS URI of the script that contains SQL queries.",
- "type": "string"
- },
- "queryList": {
- "$ref": "QueryList",
- "description": "A list of queries."
- },
- "scriptVariables": {
+ "yarnMetrics": {
"additionalProperties": {
+ "format": "int64",
"type": "string"
},
- "description": "Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).",
+ "description": "The YARN metrics.",
"type": "object"
},
- "jarFileUris": {
- "description": "Optional HCFS URIs of jar files to be added to the Spark CLASSPATH.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "loggingConfig": {
- "$ref": "LoggingConfig",
- "description": "Optional The runtime log config for job execution."
- },
- "properties": {
+ "hdfsMetrics": {
"additionalProperties": {
+ "format": "int64",
"type": "string"
},
- "description": "Optional A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.",
+ "description": "The HDFS metrics.",
"type": "object"
}
},
"type": "object"
},
- "Cluster": {
- "description": "Describes the identifying information, config, and status of a cluster of Google Compute Engine instances.",
- "id": "Cluster",
+ "LoggingConfig": {
+ "description": "The runtime logging config of the job.",
+ "id": "LoggingConfig",
"properties": {
- "labels": {
+ "driverLogLevels": {
"additionalProperties": {
+ "enum": [
+ "LEVEL_UNSPECIFIED",
+ "ALL",
+ "TRACE",
+ "DEBUG",
+ "INFO",
+ "WARN",
+ "ERROR",
+ "FATAL",
+ "OFF"
+ ],
"type": "string"
},
- "description": "Optional The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.",
+ "description": "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'",
"type": "object"
- },
- "metrics": {
- "$ref": "ClusterMetrics",
- "description": "Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release."
- },
- "status": {
- "$ref": "ClusterStatus",
- "description": "Output-only Cluster status."
- },
- "statusHistory": {
- "description": "Output-only The previous cluster status.",
- "items": {
- "$ref": "ClusterStatus"
- },
- "type": "array"
- },
- "config": {
- "$ref": "ClusterConfig",
- "description": "Required The cluster config. Note that Cloud Dataproc may set default values, and values may change when clusters are updated."
- },
- "clusterName": {
- "description": "Required The cluster name. Cluster names within a project must be unique. Names of deleted clusters can be reused.",
- "type": "string"
- },
- "clusterUuid": {
- "description": "Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc generates this value when it creates the cluster.",
- "type": "string"
- },
- "projectId": {
- "description": "Required The Google Cloud Platform project ID that the cluster belongs to.",
+ }
+ },
+ "type": "object"
+ },
+ "DiagnoseClusterOutputLocation": {
+ "description": "The location where output from diagnostic command can be found.",
+ "id": "DiagnoseClusterOutputLocation",
+ "properties": {
+ "outputUri": {
+ "description": "Output-only The Google Cloud Storage URI of the diagnostic output. This will be a plain text file with summary of collected diagnostics.",
"type": "string"
}
},
"type": "object"
},
- "ListOperationsResponse": {
- "description": "The response message for Operations.ListOperations.",
- "id": "ListOperationsResponse",
+ "Operation": {
+ "description": "This resource represents a long-running operation that is the result of a network API call.",
+ "id": "Operation",
"properties": {
- "nextPageToken": {
- "description": "The standard List next-page token.",
+ "done": {
+ "description": "If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.",
+ "type": "boolean"
+ },
+ "response": {
+ "additionalProperties": {
+ "description": "Properties of the object. Contains field @type with type URL.",
+ "type": "any"
+ },
+ "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.",
+ "type": "object"
+ },
+ "name": {
+ "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should have the format of operations/some/unique/name.",
"type": "string"
},
- "operations": {
- "description": "A list of operations that matches the specified filter in the request.",
- "items": {
- "$ref": "Operation"
+ "error": {
+ "$ref": "Status",
+ "description": "The error result of the operation in case of failure or cancellation."
+ },
+ "metadata": {
+ "additionalProperties": {
+ "description": "Properties of the object. Contains field @type with type URL.",
+ "type": "any"
},
- "type": "array"
+ "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.",
+ "type": "object"
}
},
"type": "object"
},
- "OperationMetadata": {
- "description": "Metadata describing the operation.",
- "id": "OperationMetadata",
+ "OperationStatus": {
+ "description": "The status of the operation.",
+ "id": "OperationStatus",
"properties": {
- "status": {
- "$ref": "OperationStatus",
- "description": "Output-only Current operation status."
+ "stateStartTime": {
+ "description": "The time this state was entered.",
+ "format": "google-datetime",
+ "type": "string"
},
"state": {
"description": "A message containing the operation state.",
@@ -904,8 +872,8 @@
"enumDescriptions": [
"Unused.",
"The operation has been created.",
- "The operation is currently running.",
- "The operation is done, either cancelled or completed."
+ "The operation is running.",
+ "The operation is done; either cancelled or completed."
],
"type": "string"
},
@@ -913,271 +881,177 @@
"description": "A message containing any operation metadata details.",
"type": "string"
},
- "clusterUuid": {
- "description": "Cluster UUId for the operation.",
- "type": "string"
- },
- "clusterName": {
- "description": "Name of the cluster for the operation.",
- "type": "string"
- },
"innerState": {
"description": "A message containing the detailed operation state.",
"type": "string"
- },
- "endTime": {
- "description": "The time that the operation completed.",
- "format": "google-datetime",
- "type": "string"
- },
- "startTime": {
- "description": "The time that the operation was started by the server.",
- "format": "google-datetime",
- "type": "string"
- },
- "warnings": {
- "description": "Output-only Errors encountered during operation execution.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "insertTime": {
- "description": "The time that the operation was requested.",
- "format": "google-datetime",
- "type": "string"
- },
- "statusHistory": {
- "description": "Output-only Previous operation status.",
- "items": {
- "$ref": "OperationStatus"
- },
- "type": "array"
- },
- "operationType": {
- "description": "Output-only The operation type.",
- "type": "string"
- },
- "description": {
- "description": "Output-only Short description of operation.",
- "type": "string"
}
},
"type": "object"
},
- "JobPlacement": {
- "description": "Cloud Dataproc job config.",
- "id": "JobPlacement",
+ "JobReference": {
+ "description": "Encapsulates the full scoping used to reference a job.",
+ "id": "JobReference",
"properties": {
- "clusterName": {
- "description": "Required The name of the cluster where the job will be submitted.",
+ "projectId": {
+ "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
"type": "string"
},
- "clusterUuid": {
- "description": "Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.",
+ "jobId": {
+ "description": "Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.",
"type": "string"
}
},
"type": "object"
},
- "SoftwareConfig": {
- "description": "Specifies the selection and config of software inside the cluster.",
- "id": "SoftwareConfig",
+ "SubmitJobRequest": {
+ "description": "A request to submit a job.",
+ "id": "SubmitJobRequest",
"properties": {
- "imageVersion": {
- "description": "Optional The version of software inside the cluster. It must match the regular expression [0-9]+\\.[0-9]+. If unspecified, it defaults to the latest version (see Cloud Dataproc Versioning).",
- "type": "string"
- },
- "properties": {
- "additionalProperties": {
- "type": "string"
- },
- "description": "Optional The properties to set on daemon config files.Property keys are specified in prefix:property format, such as core:fs.defaultFS. The following are supported prefixes and their mappings:\ncore: core-site.xml\nhdfs: hdfs-site.xml\nmapred: mapred-site.xml\nyarn: yarn-site.xml\nhive: hive-site.xml\npig: pig.properties\nspark: spark-defaults.conf",
- "type": "object"
+ "job": {
+ "$ref": "Job",
+ "description": "Required The job resource."
}
},
"type": "object"
},
- "ClusterStatus": {
- "description": "The status of a cluster and its instances.",
- "id": "ClusterStatus",
+ "Status": {
+ "description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). The error model is designed to be:\nSimple to use and understand for most users\nFlexible enough to meet unexpected needsOverviewThe Status message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers understand and resolve the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package google.rpc which can be used for common error conditions.Language mappingThe Status message is the logical representation of the error model, but it is not necessarily the actual wire format. When the Status message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C.Other usesThe error model and the Status message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments.Example uses of this error model include:\nPartial errors. If a service needs to return partial errors to the client, it may embed the Status in the normal response to indicate the partial errors.\nWorkflow errors. A typical workflow has multiple steps. Each step may have a Status message for error reporting purpose.\nBatch operations. If a client uses batch request and batch response, the Status message should be used directly inside batch response, one for each error sub-response.\nAsynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the Status message.\nLogging. If some API errors are stored in logs, the message Status could be used directly after any stripping needed for security/privacy reasons.",
+ "id": "Status",
"properties": {
- "detail": {
- "description": "Output-only Optional details of cluster's state.",
- "type": "string"
+ "details": {
+ "description": "A list of messages that carry the error details. There will be a common set of message types for APIs to use.",
+ "items": {
+ "additionalProperties": {
+ "description": "Properties of the object. Contains field @type with type URL.",
+ "type": "any"
+ },
+ "type": "object"
+ },
+ "type": "array"
},
- "state": {
- "description": "Output-only The cluster's state.",
- "enum": [
- "UNKNOWN",
- "CREATING",
- "RUNNING",
- "ERROR",
- "DELETING",
- "UPDATING"
- ],
- "enumDescriptions": [
- "The cluster state is unknown.",
- "The cluster is being created and set up. It is not ready for use.",
- "The cluster is currently running and healthy. It is ready for use.",
- "The cluster encountered an error. It is not ready for use.",
- "The cluster is being deleted. It cannot be used.",
- "The cluster is being updated. It continues to accept and process jobs."
- ],
- "type": "string"
+ "code": {
+ "description": "The status code, which should be an enum value of google.rpc.Code.",
+ "format": "int32",
+ "type": "integer"
},
- "stateStartTime": {
- "description": "Output-only Time when this state was entered.",
- "format": "google-datetime",
+ "message": {
+ "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.",
"type": "string"
}
},
"type": "object"
},
- "PigJob": {
- "description": "A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN.",
- "id": "PigJob",
+ "JobScheduling": {
+ "description": "Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release.",
+ "id": "JobScheduling",
"properties": {
- "scriptVariables": {
- "additionalProperties": {
+ "maxFailuresPerHour": {
+ "description": "Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.",
+ "format": "int32",
+ "type": "integer"
+ }
+ },
+ "type": "object"
+ },
+ "InstanceGroupConfig": {
+ "description": "Optional The config settings for Google Compute Engine resources in an instance group, such as a master or worker group.",
+ "id": "InstanceGroupConfig",
+ "properties": {
+ "instanceNames": {
+ "description": "Optional The list of instance names. Cloud Dataproc derives the names from cluster_name, num_instances, and the instance group if not set by user (recommended practice is to let Cloud Dataproc derive the name).",
+ "items": {
"type": "string"
},
- "description": "Optional Mapping of query variable names to values (equivalent to the Pig command: name=[value]).",
- "type": "object"
+ "type": "array"
},
- "jarFileUris": {
- "description": "Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.",
+ "accelerators": {
+ "description": "Optional The Google Compute Engine accelerator configuration for these instances.Beta Feature: This feature is still under development. It may be changed before final release.",
"items": {
- "type": "string"
+ "$ref": "AcceleratorConfig"
},
"type": "array"
},
- "loggingConfig": {
- "$ref": "LoggingConfig",
- "description": "Optional The runtime log config for job execution."
+ "numInstances": {
+ "description": "Required The number of VM instances in the instance group. For master instance groups, must be set to 1.",
+ "format": "int32",
+ "type": "integer"
},
- "properties": {
- "additionalProperties": {
- "type": "string"
- },
- "description": "Optional A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.",
- "type": "object"
+ "diskConfig": {
+ "$ref": "DiskConfig",
+ "description": "Optional Disk option config settings."
},
- "continueOnFailure": {
- "description": "Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.",
+ "machineTypeUri": {
+ "description": "Required The Google Compute Engine machine type used for cluster instances. Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2.",
+ "type": "string"
+ },
+ "managedGroupConfig": {
+ "$ref": "ManagedGroupConfig",
+ "description": "Output-only The config for Google Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups."
+ },
+ "isPreemptible": {
+ "description": "Optional Specifies that this instance group contains preemptible instances.",
"type": "boolean"
},
- "queryFileUri": {
- "description": "The HCFS URI of the script that contains the Pig queries.",
+ "imageUri": {
+ "description": "Output-only The Google Compute Engine image resource used for cluster instances. Inferred from SoftwareConfig.image_version.",
"type": "string"
- },
- "queryList": {
- "$ref": "QueryList",
- "description": "A list of queries."
}
},
"type": "object"
},
- "ListClustersResponse": {
- "description": "The list of all clusters in a project.",
- "id": "ListClustersResponse",
+ "ListJobsResponse": {
+ "description": "A list of jobs in a project.",
+ "id": "ListJobsResponse",
"properties": {
- "clusters": {
- "description": "Output-only The clusters in the project.",
+ "jobs": {
+ "description": "Output-only Jobs list.",
"items": {
- "$ref": "Cluster"
+ "$ref": "Job"
},
"type": "array"
},
"nextPageToken": {
- "description": "Output-only This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent <code>ListClustersRequest</code>.",
+ "description": "Optional This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent <code>ListJobsRequest</code>.",
"type": "string"
}
},
"type": "object"
},
- "Job": {
- "description": "A Cloud Dataproc job resource.",
- "id": "Job",
+ "NodeInitializationAction": {
+ "description": "Specifies an executable to run on a fully configured node and a timeout period for executable completion.",
+ "id": "NodeInitializationAction",
"properties": {
- "placement": {
- "$ref": "JobPlacement",
- "description": "Required Job information, including how, when, and where to run the job."
- },
- "status": {
- "$ref": "JobStatus",
- "description": "Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields."
- },
- "driverControlFilesUri": {
- "description": "Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.",
+ "executableFile": {
+ "description": "Required Google Cloud Storage URI of executable file.",
"type": "string"
},
- "scheduling": {
- "$ref": "JobScheduling",
- "description": "Optional Job scheduling configuration."
- },
- "pigJob": {
- "$ref": "PigJob",
- "description": "Job is a Pig job."
- },
- "hiveJob": {
- "$ref": "HiveJob",
- "description": "Job is a Hive job."
- },
- "labels": {
- "additionalProperties": {
- "type": "string"
- },
- "description": "Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.",
- "type": "object"
- },
- "driverOutputResourceUri": {
- "description": "Output-only A URI pointing to the location of the stdout of the job's driver program.",
+ "executionTimeout": {
+ "description": "Optional Amount of time executable has to complete. Default is 10 minutes. Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.",
+ "format": "google-duration",
"type": "string"
- },
- "statusHistory": {
- "description": "Output-only The previous job status.",
- "items": {
- "$ref": "JobStatus"
- },
- "type": "array"
- },
- "sparkJob": {
- "$ref": "SparkJob",
- "description": "Job is a Spark job."
- },
- "sparkSqlJob": {
- "$ref": "SparkSqlJob",
- "description": "Job is a SparkSql job."
- },
- "yarnApplications": {
- "description": "Output-only The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It may be changed before final release.",
- "items": {
- "$ref": "YarnApplication"
- },
- "type": "array"
- },
- "pysparkJob": {
- "$ref": "PySparkJob",
- "description": "Job is a Pyspark job."
- },
- "reference": {
- "$ref": "JobReference",
- "description": "Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>."
- },
- "hadoopJob": {
- "$ref": "HadoopJob",
- "description": "Job is a Hadoop job."
}
},
"type": "object"
},
- "SparkJob": {
- "description": "A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) applications on YARN.",
- "id": "SparkJob",
+ "CancelJobRequest": {
+ "description": "A request to cancel a job.",
+ "id": "CancelJobRequest",
+ "properties": {},
+ "type": "object"
+ },
+ "SparkSqlJob": {
+ "description": "A Cloud Dataproc job for running Apache Spark SQL (http://spark.apache.org/sql/) queries.",
+ "id": "SparkSqlJob",
"properties": {
+ "scriptVariables": {
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "Optional Mapping of query variable names to values (equivalent to the Spark SQL command: SET name=\"value\";).",
+ "type": "object"
+ },
"jarFileUris": {
- "description": "Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.",
+ "description": "Optional HCFS URIs of jar files to be added to the Spark CLASSPATH.",
"items": {
"type": "string"
},
@@ -1191,206 +1065,329 @@
"additionalProperties": {
"type": "string"
},
- "description": "Optional A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.",
+ "description": "Optional A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.",
"type": "object"
},
- "args": {
- "description": "Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.",
- "items": {
+ "queryFileUri": {
+ "description": "The HCFS URI of the script that contains SQL queries.",
+ "type": "string"
+ },
+ "queryList": {
+ "$ref": "QueryList",
+ "description": "A list of queries."
+ }
+ },
+ "type": "object"
+ },
+ "Cluster": {
+ "description": "Describes the identifying information, config, and status of a cluster of Google Compute Engine instances.",
+ "id": "Cluster",
+ "properties": {
+ "labels": {
+ "additionalProperties": {
"type": "string"
},
- "type": "array"
+ "description": "Optional The labels to associate with this cluster. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a cluster.",
+ "type": "object"
},
- "fileUris": {
- "description": "Optional HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.",
+ "status": {
+ "$ref": "ClusterStatus",
+ "description": "Output-only Cluster status."
+ },
+ "metrics": {
+ "$ref": "ClusterMetrics",
+ "description": "Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release."
+ },
+ "statusHistory": {
+ "description": "Output-only The previous cluster status.",
"items": {
- "type": "string"
+ "$ref": "ClusterStatus"
},
"type": "array"
},
- "mainClass": {
- "description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris.",
+ "config": {
+ "$ref": "ClusterConfig",
+ "description": "Required The cluster config. Note that Cloud Dataproc may set default values, and values may change when clusters are updated."
+ },
+ "clusterUuid": {
+ "description": "Output-only A cluster UUID (Unique Universal Identifier). Cloud Dataproc generates this value when it creates the cluster.",
"type": "string"
},
- "archiveUris": {
- "description": "Optional HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.",
+ "clusterName": {
+ "description": "Required The cluster name. Cluster names within a project must be unique. Names of deleted clusters can be reused.",
+ "type": "string"
+ },
+ "projectId": {
+ "description": "Required The Google Cloud Platform project ID that the cluster belongs to.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "ListOperationsResponse": {
+ "description": "The response message for Operations.ListOperations.",
+ "id": "ListOperationsResponse",
+ "properties": {
+ "nextPageToken": {
+ "description": "The standard List next-page token.",
+ "type": "string"
+ },
+ "operations": {
+ "description": "A list of operations that matches the specified filter in the request.",
"items": {
- "type": "string"
+ "$ref": "Operation"
},
"type": "array"
- },
- "mainJarFileUri": {
- "description": "The HCFS URI of the jar file that contains the main class.",
- "type": "string"
}
},
"type": "object"
},
- "JobStatus": {
- "description": "Cloud Dataproc job status.",
- "id": "JobStatus",
+ "OperationMetadata": {
+ "description": "Metadata describing the operation.",
+ "id": "OperationMetadata",
"properties": {
- "stateStartTime": {
- "description": "Output-only The time when this state was entered.",
- "format": "google-datetime",
+ "operationType": {
+ "description": "Output-only The operation type.",
+ "type": "string"
+ },
+ "description": {
+ "description": "Output-only Short description of operation.",
"type": "string"
},
+ "status": {
+ "$ref": "OperationStatus",
+ "description": "Output-only Current operation status."
+ },
"state": {
- "description": "Output-only A state message specifying the overall job state.",
+ "description": "A message containing the operation state.",
"enum": [
- "STATE_UNSPECIFIED",
+ "UNKNOWN",
"PENDING",
- "SETUP_DONE",
"RUNNING",
- "CANCEL_PENDING",
- "CANCEL_STARTED",
- "CANCELLED",
- "DONE",
- "ERROR",
- "ATTEMPT_FAILURE"
+ "DONE"
],
"enumDescriptions": [
- "The job state is unknown.",
- "The job is pending; it has been submitted, but is not yet running.",
- "Job has been received by the service and completed initial setup; it will soon be submitted to the cluster.",
- "The job is running on the cluster.",
- "A CancelJob request has been received, but is pending.",
- "Transient in-flight resources have been canceled, and the request to cancel the running job has been issued to the cluster.",
- "The job cancellation was successful.",
- "The job has completed successfully.",
- "The job has completed, but encountered an error.",
- "Job attempt has failed. The detail field contains failure details for this attempt.Applies to restartable jobs only."
+ "Unused.",
+ "The operation has been created.",
+ "The operation is currently running.",
+ "The operation is done, either cancelled or completed."
],
"type": "string"
},
"details": {
- "description": "Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.",
+ "description": "A message containing any operation metadata details.",
"type": "string"
- }
- },
- "type": "object"
+ },
+ "clusterUuid": {
+ "description": "Cluster UUId for the operation.",
+ "type": "string"
+ },
+ "clusterName": {
+ "description": "Name of the cluster for the operation.",
+ "type": "string"
+ },
+ "innerState": {
+ "description": "A message containing the detailed operation state.",
+ "type": "string"
+ },
+ "endTime": {
+ "description": "The time that the operation completed.",
+ "format": "google-datetime",
+ "type": "string"
+ },
+ "startTime": {
+ "description": "The time that the operation was started by the server.",
+ "format": "google-datetime",
+ "type": "string"
+ },
+ "warnings": {
+ "description": "Output-only Errors encountered during operation execution.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "insertTime": {
+ "description": "The time that the operation was requested.",
+ "format": "google-datetime",
+ "type": "string"
+ },
+ "statusHistory": {
+ "description": "Output-only Previous operation status.",
+ "items": {
+ "$ref": "OperationStatus"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
},
- "ManagedGroupConfig": {
- "description": "Specifies the resources used to actively manage an instance group.",
- "id": "ManagedGroupConfig",
+ "SoftwareConfig": {
+ "description": "Specifies the selection and config of software inside the cluster.",
+ "id": "SoftwareConfig",
"properties": {
- "instanceGroupManagerName": {
- "description": "Output-only The name of the Instance Group Manager for this group.",
+ "imageVersion": {
+ "description": "Optional The version of software inside the cluster. It must match the regular expression [0-9]+\\.[0-9]+. If unspecified, it defaults to the latest version (see Cloud Dataproc Versioning).",
"type": "string"
},
- "instanceTemplateName": {
- "description": "Output-only The name of the Instance Template used for the Managed Instance Group.",
+ "properties": {
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "Optional The properties to set on daemon config files.Property keys are specified in prefix:property format, such as core:fs.defaultFS. The following are supported prefixes and their mappings:\ncore: core-site.xml\nhdfs: hdfs-site.xml\nmapred: mapred-site.xml\nyarn: yarn-site.xml\nhive: hive-site.xml\npig: pig.properties\nspark: spark-defaults.conf",
+ "type": "object"
+ }
+ },
+ "type": "object"
+ },
+ "JobPlacement": {
+ "description": "Cloud Dataproc job config.",
+ "id": "JobPlacement",
+ "properties": {
+ "clusterName": {
+ "description": "Required The name of the cluster where the job will be submitted.",
+ "type": "string"
+ },
+ "clusterUuid": {
+ "description": "Output-only A cluster UUID generated by the Cloud Dataproc service when the job is submitted.",
"type": "string"
}
},
"type": "object"
},
- "ClusterOperationStatus": {
- "description": "The status of the operation.",
- "id": "ClusterOperationStatus",
+ "ClusterStatus": {
+ "description": "The status of a cluster and its instances.",
+ "id": "ClusterStatus",
"properties": {
"stateStartTime": {
- "description": "Output-only The time this state was entered.",
+ "description": "Output-only Time when this state was entered.",
"format": "google-datetime",
"type": "string"
},
- "state": {
- "description": "Output-only A message containing the operation state.",
+ "substate": {
+ "description": "Output-only Additional state information that includes status reported by the agent.",
"enum": [
- "UNKNOWN",
- "PENDING",
- "RUNNING",
- "DONE"
+ "UNSPECIFIED",
+ "UNHEALTHY",
+ "STALE_STATUS"
],
"enumDescriptions": [
- "Unused.",
- "The operation has been created.",
- "The operation is running.",
- "The operation is done; either cancelled or completed."
+ "",
+ "The cluster is known to be in an unhealthy state (for example, critical daemons are not running or HDFS capacity is exhausted).Applies to RUNNING state.",
+ "The agent-reported status is out of date (may occur if Cloud Dataproc loses communication with Agent).Applies to RUNNING state."
],
"type": "string"
},
- "details": {
- "description": "Output-onlyA message containing any operation metadata details.",
+ "detail": {
+ "description": "Output-only Optional details of cluster's state.",
"type": "string"
},
- "innerState": {
- "description": "Output-only A message containing the detailed operation state.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "YarnApplication": {
- "description": "A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.Beta Feature: This report is available for testing purposes only. It may be changed before final release.",
- "id": "YarnApplication",
- "properties": {
"state": {
- "description": "Required The application state.",
+ "description": "Output-only The cluster's state.",
"enum": [
- "STATE_UNSPECIFIED",
- "NEW",
- "NEW_SAVING",
- "SUBMITTED",
- "ACCEPTED",
+ "UNKNOWN",
+ "CREATING",
"RUNNING",
- "FINISHED",
- "FAILED",
- "KILLED"
+ "ERROR",
+ "DELETING",
+ "UPDATING"
],
"enumDescriptions": [
- "Status is unspecified.",
- "Status is NEW.",
- "Status is NEW_SAVING.",
- "Status is SUBMITTED.",
- "Status is ACCEPTED.",
- "Status is RUNNING.",
- "Status is FINISHED.",
- "Status is FAILED.",
- "Status is KILLED."
+ "The cluster state is unknown.",
+ "The cluster is being created and set up. It is not ready for use.",
+ "The cluster is currently running and healthy. It is ready for use.",
+ "The cluster encountered an error. It is not ready for use.",
+ "The cluster is being deleted. It cannot be used.",
+ "The cluster is being updated. It continues to accept and process jobs."
],
"type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "PigJob": {
+ "description": "A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries on YARN.",
+ "id": "PigJob",
+ "properties": {
+ "properties": {
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "Optional A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.",
+ "type": "object"
},
- "name": {
- "description": "Required The application name.",
- "type": "string"
+ "continueOnFailure": {
+ "description": "Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.",
+ "type": "boolean"
},
- "trackingUrl": {
- "description": "Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.",
+ "queryFileUri": {
+ "description": "The HCFS URI of the script that contains the Pig queries.",
"type": "string"
},
- "progress": {
- "description": "Required The numerical progress of the application, from 1 to 100.",
- "format": "float",
- "type": "number"
+ "queryList": {
+ "$ref": "QueryList",
+ "description": "A list of queries."
+ },
+ "jarFileUris": {
+ "description": "Optional HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "scriptVariables": {
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "Optional Mapping of query variable names to values (equivalent to the Pig command: name=[value]).",
+ "type": "object"
+ },
+ "loggingConfig": {
+ "$ref": "LoggingConfig",
+ "description": "Optional The runtime log config for job execution."
}
},
"type": "object"
},
- "QueryList": {
- "description": "A list of queries to run on a cluster.",
- "id": "QueryList",
+ "ListClustersResponse": {
+ "description": "The list of all clusters in a project.",
+ "id": "ListClustersResponse",
"properties": {
- "queries": {
- "description": "Required The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:\n\"hiveJob\": {\n \"queryList\": {\n \"queries\": [\n \"query1\",\n \"query2\",\n \"query3;query4\",\n ]\n }\n}\n",
+ "clusters": {
+ "description": "Output-only The clusters in the project.",
"items": {
- "type": "string"
+ "$ref": "Cluster"
},
"type": "array"
+ },
+ "nextPageToken": {
+ "description": "Output-only This token is included in the response if there are more results to fetch. To fetch additional results, provide this value as the page_token in a subsequent <code>ListClustersRequest</code>.",
+ "type": "string"
}
},
"type": "object"
},
- "HadoopJob": {
- "description": "A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).",
- "id": "HadoopJob",
+ "SparkJob": {
+ "description": "A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) applications on YARN.",
+ "id": "SparkJob",
"properties": {
+ "mainClass": {
+ "description": "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in jar_file_uris.",
+ "type": "string"
+ },
+ "archiveUris": {
+ "description": "Optional HCFS URIs of archives to be extracted in the working directory of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
"mainJarFileUri": {
- "description": "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'",
+ "description": "The HCFS URI of the jar file that contains the main class.",
"type": "string"
},
"jarFileUris": {
- "description": "Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.",
+ "description": "Optional HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.",
"items": {
"type": "string"
},
@@ -1404,29 +1401,18 @@
"additionalProperties": {
"type": "string"
},
- "description": "Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.",
+ "description": "Optional A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.",
"type": "object"
},
"args": {
- "description": "Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.",
+ "description": "Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.",
"items": {
"type": "string"
},
"type": "array"
},
"fileUris": {
- "description": "Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "mainClass": {
- "description": "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.",
- "type": "string"
- },
- "archiveUris": {
- "description": "Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.",
+ "description": "Optional HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.",
"items": {
"type": "string"
},
@@ -1435,185 +1421,274 @@
},
"type": "object"
},
- "DiagnoseClusterRequest": {
- "description": "A request to collect cluster diagnostic information.",
- "id": "DiagnoseClusterRequest",
- "properties": {},
- "type": "object"
- },
- "DiskConfig": {
- "description": "Specifies the config of disk options for a group of VM instances.",
- "id": "DiskConfig",
+ "Job": {
+ "description": "A Cloud Dataproc job resource.",
+ "id": "Job",
"properties": {
- "numLocalSsds": {
- "description": "Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.",
- "format": "int32",
- "type": "integer"
+ "reference": {
+ "$ref": "JobReference",
+ "description": "Optional The fully qualified reference to the job, which can be used to obtain the equivalent REST path of the job resource. If this property is not specified when a job is created, the server generates a <code>job_id</code>."
},
- "bootDiskSizeGb": {
- "description": "Optional Size in GB of the boot disk (default is 500GB).",
- "format": "int32",
- "type": "integer"
- }
- },
- "type": "object"
- },
- "ClusterOperationMetadata": {
- "description": "Metadata describing the operation.",
- "id": "ClusterOperationMetadata",
- "properties": {
- "operationType": {
- "description": "Output-only The operation type.",
- "type": "string"
+ "hadoopJob": {
+ "$ref": "HadoopJob",
+ "description": "Job is a Hadoop job."
},
- "description": {
- "description": "Output-only Short description of operation.",
+ "placement": {
+ "$ref": "JobPlacement",
+ "description": "Required Job information, including how, when, and where to run the job."
+ },
+ "status": {
+ "$ref": "JobStatus",
+ "description": "Output-only The job status. Additional application-specific status information may be contained in the <code>type_job</code> and <code>yarn_applications</code> fields."
+ },
+ "driverControlFilesUri": {
+ "description": "Output-only If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.",
"type": "string"
},
- "warnings": {
- "description": "Output-only Errors encountered during operation execution.",
- "items": {
- "type": "string"
- },
- "type": "array"
+ "scheduling": {
+ "$ref": "JobScheduling",
+ "description": "Optional Job scheduling configuration."
+ },
+ "pigJob": {
+ "$ref": "PigJob",
+ "description": "Job is a Pig job."
+ },
+ "hiveJob": {
+ "$ref": "HiveJob",
+ "description": "Job is a Hive job."
},
"labels": {
"additionalProperties": {
"type": "string"
},
- "description": "Output-only Labels associated with the operation",
+ "description": "Optional The labels to associate with this job. Label keys must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if present, must contain 1 to 63 characters, and must conform to RFC 1035 (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a job.",
"type": "object"
},
- "status": {
- "$ref": "ClusterOperationStatus",
- "description": "Output-only Current operation status."
+ "driverOutputResourceUri": {
+ "description": "Output-only A URI pointing to the location of the stdout of the job's driver program.",
+ "type": "string"
+ },
+ "sparkSqlJob": {
+ "$ref": "SparkSqlJob",
+ "description": "Job is a SparkSql job."
},
"statusHistory": {
- "description": "Output-only The previous operation status.",
+ "description": "Output-only The previous job status.",
"items": {
- "$ref": "ClusterOperationStatus"
+ "$ref": "JobStatus"
},
"type": "array"
},
- "clusterName": {
- "description": "Output-only Name of the cluster for the operation.",
- "type": "string"
+ "sparkJob": {
+ "$ref": "SparkJob",
+ "description": "Job is a Spark job."
},
- "clusterUuid": {
- "description": "Output-only Cluster UUID for the operation.",
- "type": "string"
+ "yarnApplications": {
+ "description": "Output-only The collection of YARN applications spun up by this job.Beta Feature: This report is available for testing purposes only. It may be changed before final release.",
+ "items": {
+ "$ref": "YarnApplication"
+ },
+ "type": "array"
+ },
+ "pysparkJob": {
+ "$ref": "PySparkJob",
+ "description": "Job is a Pyspark job."
}
},
"type": "object"
},
- "Empty": {
- "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.",
- "id": "Empty",
- "properties": {},
- "type": "object"
- },
- "HiveJob": {
- "description": "A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN.",
- "id": "HiveJob",
+ "JobStatus": {
+ "description": "Cloud Dataproc job status.",
+ "id": "JobStatus",
"properties": {
- "continueOnFailure": {
- "description": "Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.",
- "type": "boolean"
- },
- "queryList": {
- "$ref": "QueryList",
- "description": "A list of queries."
- },
- "queryFileUri": {
- "description": "The HCFS URI of the script that contains Hive queries.",
+ "stateStartTime": {
+ "description": "Output-only The time when this state was entered.",
+ "format": "google-datetime",
"type": "string"
},
- "jarFileUris": {
- "description": "Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.",
- "items": {
- "type": "string"
- },
- "type": "array"
+ "substate": {
+ "description": "Output-only Additional state information, which includes status reported by the agent.",
+ "enum": [
+ "UNSPECIFIED",
+ "SUBMITTED",
+ "QUEUED",
+ "STALE_STATUS"
+ ],
+ "enumDescriptions": [
+ "",
+ "The Job is submitted to the agent.Applies to RUNNING state.",
+ "The Job has been received and is awaiting execution (it may be waiting for a condition to be met). See the \"details\" field for the reason for the delay.Applies to RUNNING state.",
+ "The agent-reported status is out of date, which may be caused by a loss of communication between the agent and Cloud Dataproc. If the agent does not send a timely update, the job will fail.Applies to RUNNING state."
+ ],
+ "type": "string"
},
- "scriptVariables": {
- "additionalProperties": {
- "type": "string"
- },
- "description": "Optional Mapping of query variable names to values (equivalent to the Hive command: SET name=\"value\";).",
- "type": "object"
+ "state": {
+ "description": "Output-only A state message specifying the overall job state.",
+ "enum": [
+ "STATE_UNSPECIFIED",
+ "PENDING",
+ "SETUP_DONE",
+ "RUNNING",
+ "CANCEL_PENDING",
+ "CANCEL_STARTED",
+ "CANCELLED",
+ "DONE",
+ "ERROR",
+ "ATTEMPT_FAILURE"
+ ],
+ "enumDescriptions": [
+ "The job state is unknown.",
+ "The job is pending; it has been submitted, but is not yet running.",
+ "Job has been received by the service and completed initial setup; it will soon be submitted to the cluster.",
+ "The job is running on the cluster.",
+ "A CancelJob request has been received, but is pending.",
+ "Transient in-flight resources have been canceled, and the request to cancel the running job has been issued to the cluster.",
+ "The job cancellation was successful.",
+ "The job has completed successfully.",
+ "The job has completed, but encountered an error.",
+ "Job attempt has failed. The detail field contains failure details for this attempt.Applies to restartable jobs only."
+ ],
+ "type": "string"
},
- "properties": {
- "additionalProperties": {
- "type": "string"
- },
- "description": "Optional A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.",
- "type": "object"
+ "details": {
+ "description": "Output-only Optional job state details, such as an error description if the state is <code>ERROR</code>.",
+ "type": "string"
}
},
"type": "object"
},
- "DiagnoseClusterResults": {
- "description": "The location of diagnostic output.",
- "id": "DiagnoseClusterResults",
+ "ManagedGroupConfig": {
+ "description": "Specifies the resources used to actively manage an instance group.",
+ "id": "ManagedGroupConfig",
"properties": {
- "outputUri": {
- "description": "Output-only The Google Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics.",
+ "instanceGroupManagerName": {
+ "description": "Output-only The name of the Instance Group Manager for this group.",
+ "type": "string"
+ },
+ "instanceTemplateName": {
+ "description": "Output-only The name of the Instance Template used for the Managed Instance Group.",
"type": "string"
}
},
"type": "object"
},
- "ClusterConfig": {
- "description": "The cluster config.",
- "id": "ClusterConfig",
+ "ClusterOperationStatus": {
+ "description": "The status of the operation.",
+ "id": "ClusterOperationStatus",
"properties": {
- "gceClusterConfig": {
- "$ref": "GceClusterConfig",
- "description": "Required The shared Google Compute Engine config settings for all instances in a cluster."
+ "stateStartTime": {
+ "description": "Output-only The time this state was entered.",
+ "format": "google-datetime",
+ "type": "string"
},
- "softwareConfig": {
- "$ref": "SoftwareConfig",
- "description": "Optional The config settings for software inside the cluster."
+ "state": {
+ "description": "Output-only A message containing the operation state.",
+ "enum": [
+ "UNKNOWN",
+ "PENDING",
+ "RUNNING",
+ "DONE"
+ ],
+ "enumDescriptions": [
+ "Unused.",
+ "The operation has been created.",
+ "The operation is running.",
+ "The operation is done; either cancelled or completed."
+ ],
+ "type": "string"
},
- "masterConfig": {
- "$ref": "InstanceGroupConfig",
- "description": "Optional The Google Compute Engine config settings for the master instance in a cluster."
+ "details": {
+ "description": "Output-onlyA message containing any operation metadata details.",
+ "type": "string"
},
- "secondaryWorkerConfig": {
- "$ref": "InstanceGroupConfig",
- "description": "Optional The Google Compute Engine config settings for additional worker instances in a cluster."
+ "innerState": {
+ "description": "Output-only A message containing the detailed operation state.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "YarnApplication": {
+ "description": "A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.Beta Feature: This report is available for testing purposes only. It may be changed before final release.",
+ "id": "YarnApplication",
+ "properties": {
+ "state": {
+ "description": "Required The application state.",
+ "enum": [
+ "STATE_UNSPECIFIED",
+ "NEW",
+ "NEW_SAVING",
+ "SUBMITTED",
+ "ACCEPTED",
+ "RUNNING",
+ "FINISHED",
+ "FAILED",
+ "KILLED"
+ ],
+ "enumDescriptions": [
+ "Status is unspecified.",
+ "Status is NEW.",
+ "Status is NEW_SAVING.",
+ "Status is SUBMITTED.",
+ "Status is ACCEPTED.",
+ "Status is RUNNING.",
+ "Status is FINISHED.",
+ "Status is FAILED.",
+ "Status is KILLED."
+ ],
+ "type": "string"
},
- "initializationActions": {
- "description": "Optional Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's <code>role</code> metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget):\nROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)\nif [[ \"${ROLE}\" == 'Master' ]]; then\n ... master specific actions ...\nelse\n ... worker specific actions ...\nfi\n",
- "items": {
- "$ref": "NodeInitializationAction"
- },
- "type": "array"
+ "name": {
+ "description": "Required The application name.",
+ "type": "string"
},
- "configBucket": {
- "description": "Optional A Google Cloud Storage staging bucket used for sharing generated SSH keys and config. If you do not specify a staging bucket, Cloud Dataproc will determine an appropriate Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Google Compute Engine zone where your cluster is deployed, and then it will create and manage this project-level, per-location bucket for you.",
+ "trackingUrl": {
+ "description": "Optional The HTTP URL of the ApplicationMaster, HistoryServer, or TimelineServer that provides application-specific information. The URL uses the internal hostname, and requires a proxy server for resolution and, possibly, access.",
"type": "string"
},
- "workerConfig": {
- "$ref": "InstanceGroupConfig",
- "description": "Optional The Google Compute Engine config settings for worker instances in a cluster."
+ "progress": {
+ "description": "Required The numerical progress of the application, from 1 to 100.",
+ "format": "float",
+ "type": "number"
}
},
"type": "object"
},
- "PySparkJob": {
- "description": "A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.",
- "id": "PySparkJob",
+ "QueryList": {
+ "description": "A list of queries to run on a cluster.",
+ "id": "QueryList",
+ "properties": {
+ "queries": {
+ "description": "Required The queries to execute. You do not need to terminate a query with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of an Cloud Dataproc API snippet that uses a QueryList to specify a HiveJob:\n\"hiveJob\": {\n \"queryList\": {\n \"queries\": [\n \"query1\",\n \"query2\",\n \"query3;query4\",\n ]\n }\n}\n",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "HadoopJob": {
+ "description": "A Cloud Dataproc job for running Apache Hadoop MapReduce (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on Apache Hadoop YARN (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).",
+ "id": "HadoopJob",
"properties": {
+ "mainClass": {
+ "description": "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in jar_file_uris.",
+ "type": "string"
+ },
"archiveUris": {
- "description": "Optional HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.",
+ "description": "Optional HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.",
"items": {
"type": "string"
},
"type": "array"
},
+ "mainJarFileUri": {
+ "description": "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'",
+ "type": "string"
+ },
"jarFileUris": {
- "description": "Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.",
+ "description": "Optional Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.",
"items": {
"type": "string"
},
@@ -1627,338 +1702,293 @@
"additionalProperties": {
"type": "string"
},
- "description": "Optional A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.",
+ "description": "Optional A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.",
"type": "object"
},
"args": {
- "description": "Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.",
+ "description": "Optional The arguments to pass to the driver. Do not include arguments, such as -libjars or -Dfoo=bar, that can be set as job properties, since a collision may occur that causes an incorrect job submission.",
"items": {
"type": "string"
},
"type": "array"
},
"fileUris": {
- "description": "Optional HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.",
- "items": {
- "type": "string"
- },
- "type": "array"
- },
- "pythonFileUris": {
- "description": "Optional HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.",
+ "description": "Optional HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.",
"items": {
"type": "string"
},
"type": "array"
- },
- "mainPythonFileUri": {
- "description": "Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.",
- "type": "string"
}
},
"type": "object"
},
- "GceClusterConfig": {
- "description": "Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.",
- "id": "GceClusterConfig",
+ "DiagnoseClusterRequest": {
+ "description": "A request to collect cluster diagnostic information.",
+ "id": "DiagnoseClusterRequest",
+ "properties": {},
+ "type": "object"
+ },
+ "DiskConfig": {
+ "description": "Specifies the config of disk options for a group of VM instances.",
+ "id": "DiskConfig",
"properties": {
- "internalIpOnly": {
- "description": "Optional If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.",
- "type": "boolean"
- },
- "metadata": {
- "additionalProperties": {
- "type": "string"
- },
- "description": "The Google Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).",
- "type": "object"
- },
- "serviceAccountScopes": {
- "description": "Optional The URIs of service account scopes to be included in Google Compute Engine instances. The following base set of scopes is always included:\nhttps://www.googleapis.com/auth/cloud.useraccounts.readonly\nhttps://www.googleapis.com/auth/devstorage.read_write\nhttps://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided:\nhttps://www.googleapis.com/auth/bigquery\nhttps://www.googleapis.com/auth/bigtable.admin.table\nhttps://www.googleapis.com/auth/bigtable.data\nhttps://www.googleapis.com/auth/devstorage.full_control",
- "items": {
- "type": "string"
- },
- "type": "array"
+ "bootDiskSizeGb": {
+ "description": "Optional Size in GB of the boot disk (default is 500GB).",
+ "format": "int32",
+ "type": "integer"
},
- "tags": {
- "description": "The Google Compute Engine tags to add to all instances (see Tagging instances).",
+ "numLocalSsds": {
+ "description": "Optional Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and HDFS (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.",
+ "format": "int32",
+ "type": "integer"
+ }
+ },
+ "type": "object"
+ },
+ "ClusterOperationMetadata": {
+ "description": "Metadata describing the operation.",
+ "id": "ClusterOperationMetadata",
+ "properties": {
+ "statusHistory": {
+ "description": "Output-only The previous operation status.",
"items": {
- "type": "string"
+ "$ref": "ClusterOperationStatus"
},
"type": "array"
},
- "serviceAccount": {
- "description": "Optional The service account of the instances. Defaults to the default Google Compute Engine service account. Custom service accounts need permissions equivalent to the folloing IAM roles:\nroles/logging.logWriter\nroles/storage.objectAdmin(see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts for more information). Example: [account_id]@[project_id].iam.gserviceaccount.com",
+ "clusterUuid": {
+ "description": "Output-only Cluster UUID for the operation.",
"type": "string"
},
- "subnetworkUri": {
- "description": "Optional The Google Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. Example: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0.",
+ "clusterName": {
+ "description": "Output-only Name of the cluster for the operation.",
"type": "string"
},
- "networkUri": {
- "description": "Optional The Google Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see Using Subnetworks for more information). Example: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default.",
+ "operationType": {
+ "description": "Output-only The operation type.",
"type": "string"
},
- "zoneUri": {
- "description": "Required The zone where the Google Compute Engine cluster will be located. Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone].",
- "type": "string"
- }
- },
- "type": "object"
- },
- "AcceleratorConfig": {
- "description": "Specifies the type and number of accelerator cards attached to the instances of an instance group (see GPUs on Compute Engine).",
- "id": "AcceleratorConfig",
- "properties": {
- "acceleratorTypeUri": {
- "description": "Full or partial URI of the accelerator type resource to expose to this instance. See Google Compute Engine AcceleratorTypes( /compute/docs/reference/beta/acceleratorTypes)",
+ "description": {
+ "description": "Output-only Short description of operation.",
"type": "string"
},
- "acceleratorCount": {
- "description": "The number of the accelerator cards of this type exposed to this instance.",
- "format": "int32",
- "type": "integer"
- }
- },
- "type": "object"
- },
- "ClusterMetrics": {
- "description": "Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: This report is available for testing purposes only. It may be changed before final release.",
- "id": "ClusterMetrics",
- "properties": {
- "hdfsMetrics": {
- "additionalProperties": {
- "format": "int64",
+ "warnings": {
+ "description": "Output-only Errors encountered during operation execution.",
+ "items": {
"type": "string"
},
- "description": "The HDFS metrics.",
- "type": "object"
+ "type": "array"
},
- "yarnMetrics": {
+ "labels": {
"additionalProperties": {
- "format": "int64",
"type": "string"
},
- "description": "The YARN metrics.",
+ "description": "Output-only Labels associated with the operation",
"type": "object"
+ },
+ "status": {
+ "$ref": "ClusterOperationStatus",
+ "description": "Output-only Current operation status."
}
},
"type": "object"
},
- "LoggingConfig": {
- "description": "The runtime logging config of the job.",
- "id": "LoggingConfig",
+ "HiveJob": {
+ "description": "A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) queries on YARN.",
+ "id": "HiveJob",
"properties": {
- "driverLogLevels": {
+ "continueOnFailure": {
+ "description": "Optional Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries.",
+ "type": "boolean"
+ },
+ "queryList": {
+ "$ref": "QueryList",
+ "description": "A list of queries."
+ },
+ "queryFileUri": {
+ "description": "The HCFS URI of the script that contains Hive queries.",
+ "type": "string"
+ },
+ "jarFileUris": {
+ "description": "Optional HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "scriptVariables": {
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "Optional Mapping of query variable names to values (equivalent to the Hive command: SET name=\"value\";).",
+ "type": "object"
+ },
+ "properties": {
"additionalProperties": {
- "enum": [
- "LEVEL_UNSPECIFIED",
- "ALL",
- "TRACE",
- "DEBUG",
- "INFO",
- "WARN",
- "ERROR",
- "FATAL",
- "OFF"
- ],
"type": "string"
},
- "description": "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'",
+ "description": "Optional A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.",
"type": "object"
}
},
"type": "object"
},
- "DiagnoseClusterOutputLocation": {
- "description": "The location where output from diagnostic command can be found.",
- "id": "DiagnoseClusterOutputLocation",
+ "Empty": {
+ "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance:\nservice Foo {\n rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);\n}\nThe JSON representation for Empty is empty JSON object {}.",
+ "id": "Empty",
+ "properties": {},
+ "type": "object"
+ },
+ "DiagnoseClusterResults": {
+ "description": "The location of diagnostic output.",
+ "id": "DiagnoseClusterResults",
"properties": {
"outputUri": {
- "description": "Output-only The Google Cloud Storage URI of the diagnostic output. This will be a plain text file with summary of collected diagnostics.",
+ "description": "Output-only The Google Cloud Storage URI of the diagnostic output. The output report is a plain text file with a summary of collected diagnostics.",
"type": "string"
}
},
"type": "object"
},
- "Operation": {
- "description": "This resource represents a long-running operation that is the result of a network API call.",
- "id": "Operation",
+ "ClusterConfig": {
+ "description": "The cluster config.",
+ "id": "ClusterConfig",
"properties": {
- "response": {
- "additionalProperties": {
- "description": "Properties of the object. Contains field @type with type URL.",
- "type": "any"
- },
- "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.",
- "type": "object"
+ "workerConfig": {
+ "$ref": "InstanceGroupConfig",
+ "description": "Optional The Google Compute Engine config settings for worker instances in a cluster."
},
- "name": {
- "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should have the format of operations/some/unique/name.",
- "type": "string"
+ "gceClusterConfig": {
+ "$ref": "GceClusterConfig",
+ "description": "Required The shared Google Compute Engine config settings for all instances in a cluster."
},
- "error": {
- "$ref": "Status",
- "description": "The error result of the operation in case of failure or cancellation."
+ "softwareConfig": {
+ "$ref": "SoftwareConfig",
+ "description": "Optional The config settings for software inside the cluster."
},
- "metadata": {
- "additionalProperties": {
- "description": "Properties of the object. Contains field @type with type URL.",
- "type": "any"
+ "masterConfig": {
+ "$ref": "InstanceGroupConfig",
+ "description": "Optional The Google Compute Engine config settings for the master instance in a cluster."
+ },
+ "secondaryWorkerConfig": {
+ "$ref": "InstanceGroupConfig",
+ "description": "Optional The Google Compute Engine config settings for additional worker instances in a cluster."
+ },
+ "initializationActions": {
+ "description": "Optional Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's <code>role</code> metadata to run an executable on a master or worker node, as shown below using curl (you can also use wget):\nROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)\nif [[ \"${ROLE}\" == 'Master' ]]; then\n ... master specific actions ...\nelse\n ... worker specific actions ...\nfi\n",
+ "items": {
+ "$ref": "NodeInitializationAction"
},
- "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.",
- "type": "object"
+ "type": "array"
},
- "done": {
- "description": "If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.",
- "type": "boolean"
+ "configBucket": {
+ "description": "Optional A Google Cloud Storage staging bucket used for sharing generated SSH keys and config. If you do not specify a staging bucket, Cloud Dataproc will determine an appropriate Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Google Compute Engine zone where your cluster is deployed, and then it will create and manage this project-level, per-location bucket for you.",
+ "type": "string"
}
},
"type": "object"
},
- "OperationStatus": {
- "description": "The status of the operation.",
- "id": "OperationStatus",
+ "PySparkJob": {
+ "description": "A Cloud Dataproc job for running Apache PySpark (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.",
+ "id": "PySparkJob",
"properties": {
- "innerState": {
- "description": "A message containing the detailed operation state.",
- "type": "string"
+ "jarFileUris": {
+ "description": "Optional HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
},
- "stateStartTime": {
- "description": "The time this state was entered.",
- "format": "google-datetime",
- "type": "string"
+ "loggingConfig": {
+ "$ref": "LoggingConfig",
+ "description": "Optional The runtime log config for job execution."
},
- "state": {
- "description": "A message containing the operation state.",
- "enum": [
- "UNKNOWN",
- "PENDING",
- "RUNNING",
- "DONE"
- ],
- "enumDescriptions": [
- "Unused.",
- "The operation has been created.",
- "The operation is running.",
- "The operation is done; either cancelled or completed."
- ],
- "type": "string"
+ "properties": {
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "Optional A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.",
+ "type": "object"
},
- "details": {
- "description": "A message containing any operation metadata details.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "JobReference": {
- "description": "Encapsulates the full scoping used to reference a job.",
- "id": "JobReference",
- "properties": {
- "projectId": {
- "description": "Required The ID of the Google Cloud Platform project that the job belongs to.",
- "type": "string"
+ "args": {
+ "description": "Optional The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
},
- "jobId": {
- "description": "Optional The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens (-). The maximum length is 100 characters.",
- "type": "string"
- }
- },
- "type": "object"
- },
- "SubmitJobRequest": {
- "description": "A request to submit a job.",
- "id": "SubmitJobRequest",
- "properties": {
- "job": {
- "$ref": "Job",
- "description": "Required The job resource."
- }
- },
- "type": "object"
- },
- "Status": {
- "description": "The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). The error model is designed to be:\nSimple to use and understand for most users\nFlexible enough to meet unexpected needsOverviewThe Status message contains three pieces of data: error code, error message, and error details. The error code should be an enum value of google.rpc.Code, but it may accept additional error codes if needed. The error message should be a developer-facing English message that helps developers understand and resolve the error. If a localized user-facing error message is needed, put the localized message in the error details or localize it in the client. The optional error details may contain arbitrary information about the error. There is a predefined set of error detail types in the package google.rpc which can be used for common error conditions.Language mappingThe Status message is the logical representation of the error model, but it is not necessarily the actual wire format. When the Status message is exposed in different client libraries and different wire protocols, it can be mapped differently. For example, it will likely be mapped to some exceptions in Java, but more likely mapped to some error codes in C.Other usesThe error model and the Status message can be used in a variety of environments, either with or without APIs, to provide a consistent developer experience across different environments.Example uses of this error model include:\nPartial errors. If a service needs to return partial errors to the client, it may embed the Status in the normal response to indicate the partial errors.\nWorkflow errors. A typical workflow has multiple steps. Each step may have a Status message for error reporting purpose.\nBatch operations. If a client uses batch request and batch response, the Status message should be used directly inside batch response, one for each error sub-response.\nAsynchronous operations. If an API call embeds asynchronous operation results in its response, the status of those operations should be represented directly using the Status message.\nLogging. If some API errors are stored in logs, the message Status could be used directly after any stripping needed for security/privacy reasons.",
- "id": "Status",
- "properties": {
- "details": {
- "description": "A list of messages that carry the error details. There will be a common set of message types for APIs to use.",
+ "fileUris": {
+ "description": "Optional HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks.",
"items": {
- "additionalProperties": {
- "description": "Properties of the object. Contains field @type with type URL.",
- "type": "any"
- },
- "type": "object"
+ "type": "string"
},
"type": "array"
},
- "code": {
- "description": "The status code, which should be an enum value of google.rpc.Code.",
- "format": "int32",
- "type": "integer"
+ "pythonFileUris": {
+ "description": "Optional HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
},
- "message": {
- "description": "A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.",
+ "mainPythonFileUri": {
+ "description": "Required The HCFS URI of the main Python file to use as the driver. Must be a .py file.",
"type": "string"
+ },
+ "archiveUris": {
+ "description": "Optional HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.",
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
}
},
"type": "object"
},
- "JobScheduling": {
- "description": "Job scheduling options.Beta Feature: These options are available for testing purposes only. They may be changed before final release.",
- "id": "JobScheduling",
- "properties": {
- "maxFailuresPerHour": {
- "description": "Optional Maximum number of times per hour a driver may be restarted as a result of driver terminating with non-zero code before job is reported failed.A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window.Maximum value is 10.",
- "format": "int32",
- "type": "integer"
- }
- },
- "type": "object"
- },
- "InstanceGroupConfig": {
- "description": "Optional The config settings for Google Compute Engine resources in an instance group, such as a master or worker group.",
- "id": "InstanceGroupConfig",
+ "GceClusterConfig": {
+ "description": "Common config settings for resources of Google Compute Engine cluster instances, applicable to all instances in the cluster.",
+ "id": "GceClusterConfig",
"properties": {
- "numInstances": {
- "description": "Required The number of VM instances in the instance group. For master instance groups, must be set to 1.",
- "format": "int32",
- "type": "integer"
+ "networkUri": {
+ "description": "Optional The Google Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither network_uri nor subnetwork_uri is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see Using Subnetworks for more information). Example: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default.",
+ "type": "string"
},
- "diskConfig": {
- "$ref": "DiskConfig",
- "description": "Optional Disk option config settings."
+ "zoneUri": {
+ "description": "Required The zone where the Google Compute Engine cluster will be located. Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone].",
+ "type": "string"
},
- "managedGroupConfig": {
- "$ref": "ManagedGroupConfig",
- "description": "Output-only The config for Google Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups."
+ "metadata": {
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "The Google Compute Engine metadata entries to add to all instances (see Project and instance metadata (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).",
+ "type": "object"
},
- "isPreemptible": {
- "description": "Optional Specifies that this instance group contains preemptible instances.",
+ "internalIpOnly": {
+ "description": "Optional If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This internal_ip_only restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.",
"type": "boolean"
},
- "imageUri": {
- "description": "Output-only The Google Compute Engine image resource used for cluster instances. Inferred from SoftwareConfig.image_version.",
- "type": "string"
- },
- "machineTypeUri": {
- "description": "Required The Google Compute Engine machine type used for cluster instances. Example: https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2.",
- "type": "string"
- },
- "instanceNames": {
- "description": "Optional The list of instance names. Cloud Dataproc derives the names from cluster_name, num_instances, and the instance group if not set by user (recommended practice is to let Cloud Dataproc derive the name).",
+ "serviceAccountScopes": {
+ "description": "Optional The URIs of service account scopes to be included in Google Compute Engine instances. The following base set of scopes is always included:\nhttps://www.googleapis.com/auth/cloud.useraccounts.readonly\nhttps://www.googleapis.com/auth/devstorage.read_write\nhttps://www.googleapis.com/auth/logging.writeIf no scopes are specified, the following defaults are also provided:\nhttps://www.googleapis.com/auth/bigquery\nhttps://www.googleapis.com/auth/bigtable.admin.table\nhttps://www.googleapis.com/auth/bigtable.data\nhttps://www.googleapis.com/auth/devstorage.full_control",
"items": {
"type": "string"
},
"type": "array"
},
- "accelerators": {
- "description": "Optional The Google Compute Engine accelerator configuration for these instances.Beta Feature: This feature is still under development. It may be changed before final release.",
+ "tags": {
+ "description": "The Google Compute Engine tags to add to all instances (see Tagging instances).",
"items": {
- "$ref": "AcceleratorConfig"
+ "type": "string"
},
"type": "array"
+ },
+ "serviceAccount": {
+ "description": "Optional The service account of the instances. Defaults to the default Google Compute Engine service account. Custom service accounts need permissions equivalent to the folloing IAM roles:\nroles/logging.logWriter\nroles/storage.objectAdmin(see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts for more information). Example: [account_id]@[project_id].iam.gserviceaccount.com",
+ "type": "string"
+ },
+ "subnetworkUri": {
+ "description": "Optional The Google Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. Example: https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0.",
+ "type": "string"
}
},
"type": "object"
« no previous file with comments | « discovery/googleapis/content__v2sandbox.json ('k') | discovery/googleapis/deploymentmanager__v2.json » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698