Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(62)

Side by Side Diff: generated/googleapis/lib/dataproc/v1.dart

Issue 3006323002: Api-Roll 54: 2017-09-11 (Closed)
Patch Set: use 2.0.0-dev.infinity sdk constraint in pubspecs Created 3 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // This is a generated file (see the discoveryapis_generator project). 1 // This is a generated file (see the discoveryapis_generator project).
2 2
3 library googleapis.dataproc.v1; 3 library googleapis.dataproc.v1;
4 4
5 import 'dart:core' as core; 5 import 'dart:core' as core;
6 import 'dart:async' as async; 6 import 'dart:async' as async;
7 import 'dart:convert' as convert; 7 import 'dart:convert' as convert;
8 8
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons;
10 import 'package:http/http.dart' as http; 10 import 'package:http/http.dart' as http;
11 11
12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show 12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart'
13 ApiRequestError, DetailedApiRequestError; 13 show ApiRequestError, DetailedApiRequestError;
14 14
15 const core.String USER_AGENT = 'dart-api-client dataproc/v1'; 15 const core.String USER_AGENT = 'dart-api-client dataproc/v1';
16 16
17 /** Manages Hadoop-based clusters and jobs on Google Cloud Platform. */ 17 /// Manages Hadoop-based clusters and jobs on Google Cloud Platform.
18 class DataprocApi { 18 class DataprocApi {
19 /** View and manage your data across Google Cloud Platform services */ 19 /// View and manage your data across Google Cloud Platform services
20 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf orm"; 20 static const CloudPlatformScope =
21 21 "https://www.googleapis.com/auth/cloud-platform";
22 22
23 final commons.ApiRequester _requester; 23 final commons.ApiRequester _requester;
24 24
25 ProjectsResourceApi get projects => new ProjectsResourceApi(_requester); 25 ProjectsResourceApi get projects => new ProjectsResourceApi(_requester);
26 26
27 DataprocApi(http.Client client, {core.String rootUrl: "https://dataproc.google apis.com/", core.String servicePath: ""}) : 27 DataprocApi(http.Client client,
28 _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_A GENT); 28 {core.String rootUrl: "https://dataproc.googleapis.com/",
29 core.String servicePath: ""})
30 : _requester =
31 new commons.ApiRequester(client, rootUrl, servicePath, USER_AGENT);
29 } 32 }
30 33
31
32 class ProjectsResourceApi { 34 class ProjectsResourceApi {
33 final commons.ApiRequester _requester; 35 final commons.ApiRequester _requester;
34 36
35 ProjectsRegionsResourceApi get regions => new ProjectsRegionsResourceApi(_requ ester); 37 ProjectsRegionsResourceApi get regions =>
38 new ProjectsRegionsResourceApi(_requester);
36 39
37 ProjectsResourceApi(commons.ApiRequester client) : 40 ProjectsResourceApi(commons.ApiRequester client) : _requester = client;
38 _requester = client;
39 } 41 }
40 42
41
42 class ProjectsRegionsResourceApi { 43 class ProjectsRegionsResourceApi {
43 final commons.ApiRequester _requester; 44 final commons.ApiRequester _requester;
44 45
45 ProjectsRegionsClustersResourceApi get clusters => new ProjectsRegionsClusters ResourceApi(_requester); 46 ProjectsRegionsClustersResourceApi get clusters =>
46 ProjectsRegionsJobsResourceApi get jobs => new ProjectsRegionsJobsResourceApi( _requester); 47 new ProjectsRegionsClustersResourceApi(_requester);
47 ProjectsRegionsOperationsResourceApi get operations => new ProjectsRegionsOper ationsResourceApi(_requester); 48 ProjectsRegionsJobsResourceApi get jobs =>
49 new ProjectsRegionsJobsResourceApi(_requester);
50 ProjectsRegionsOperationsResourceApi get operations =>
51 new ProjectsRegionsOperationsResourceApi(_requester);
48 52
49 ProjectsRegionsResourceApi(commons.ApiRequester client) : 53 ProjectsRegionsResourceApi(commons.ApiRequester client) : _requester = client;
50 _requester = client;
51 } 54 }
52 55
53
54 class ProjectsRegionsClustersResourceApi { 56 class ProjectsRegionsClustersResourceApi {
55 final commons.ApiRequester _requester; 57 final commons.ApiRequester _requester;
56 58
57 ProjectsRegionsClustersResourceApi(commons.ApiRequester client) : 59 ProjectsRegionsClustersResourceApi(commons.ApiRequester client)
58 _requester = client; 60 : _requester = client;
59 61
60 /** 62 /// Creates a cluster in a project.
61 * Creates a cluster in a project. 63 ///
62 * 64 /// [request] - The metadata request object.
63 * [request] - The metadata request object. 65 ///
64 * 66 /// Request parameters:
65 * Request parameters: 67 ///
66 * 68 /// [projectId] - Required. The ID of the Google Cloud Platform project that
67 * [projectId] - Required. The ID of the Google Cloud Platform project that 69 /// the cluster belongs to.
68 * the cluster belongs to. 70 ///
69 * 71 /// [region] - Required. The Cloud Dataproc region in which to handle the
70 * [region] - Required. The Cloud Dataproc region in which to handle the 72 /// request.
71 * request. 73 ///
72 * 74 /// Completes with a [Operation].
73 * Completes with a [Operation]. 75 ///
74 * 76 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
75 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 77 /// an error.
76 * error. 78 ///
77 * 79 /// If the used [http.Client] completes with an error when making a REST
78 * If the used [http.Client] completes with an error when making a REST call, 80 /// call, this method will complete with the same error.
79 * this method will complete with the same error. 81 async.Future<Operation> create(
80 */ 82 Cluster request, core.String projectId, core.String region) {
81 async.Future<Operation> create(Cluster request, core.String projectId, core.St ring region) {
82 var _url = null; 83 var _url = null;
83 var _queryParams = new core.Map(); 84 var _queryParams = new core.Map();
84 var _uploadMedia = null; 85 var _uploadMedia = null;
85 var _uploadOptions = null; 86 var _uploadOptions = null;
86 var _downloadOptions = commons.DownloadOptions.Metadata; 87 var _downloadOptions = commons.DownloadOptions.Metadata;
87 var _body = null; 88 var _body = null;
88 89
89 if (request != null) { 90 if (request != null) {
90 _body = convert.JSON.encode((request).toJson()); 91 _body = convert.JSON.encode((request).toJson());
91 } 92 }
92 if (projectId == null) { 93 if (projectId == null) {
93 throw new core.ArgumentError("Parameter projectId is required."); 94 throw new core.ArgumentError("Parameter projectId is required.");
94 } 95 }
95 if (region == null) { 96 if (region == null) {
96 throw new core.ArgumentError("Parameter region is required."); 97 throw new core.ArgumentError("Parameter region is required.");
97 } 98 }
98 99
99 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/clusters'; 100 _url = 'v1/projects/' +
101 commons.Escaper.ecapeVariable('$projectId') +
102 '/regions/' +
103 commons.Escaper.ecapeVariable('$region') +
104 '/clusters';
100 105
101 var _response = _requester.request(_url, 106 var _response = _requester.request(_url, "POST",
102 "POST", 107 body: _body,
103 body: _body, 108 queryParams: _queryParams,
104 queryParams: _queryParams, 109 uploadOptions: _uploadOptions,
105 uploadOptions: _uploadOptions, 110 uploadMedia: _uploadMedia,
106 uploadMedia: _uploadMedia, 111 downloadOptions: _downloadOptions);
107 downloadOptions: _downloadOptions);
108 return _response.then((data) => new Operation.fromJson(data)); 112 return _response.then((data) => new Operation.fromJson(data));
109 } 113 }
110 114
111 /** 115 /// Deletes a cluster in a project.
112 * Deletes a cluster in a project. 116 ///
113 * 117 /// Request parameters:
114 * Request parameters: 118 ///
115 * 119 /// [projectId] - Required. The ID of the Google Cloud Platform project that
116 * [projectId] - Required. The ID of the Google Cloud Platform project that 120 /// the cluster belongs to.
117 * the cluster belongs to. 121 ///
118 * 122 /// [region] - Required. The Cloud Dataproc region in which to handle the
119 * [region] - Required. The Cloud Dataproc region in which to handle the 123 /// request.
120 * request. 124 ///
121 * 125 /// [clusterName] - Required. The cluster name.
122 * [clusterName] - Required. The cluster name. 126 ///
123 * 127 /// Completes with a [Operation].
124 * Completes with a [Operation]. 128 ///
125 * 129 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
126 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 130 /// an error.
127 * error. 131 ///
128 * 132 /// If the used [http.Client] completes with an error when making a REST
129 * If the used [http.Client] completes with an error when making a REST call, 133 /// call, this method will complete with the same error.
130 * this method will complete with the same error. 134 async.Future<Operation> delete(
131 */ 135 core.String projectId, core.String region, core.String clusterName) {
132 async.Future<Operation> delete(core.String projectId, core.String region, core .String clusterName) {
133 var _url = null; 136 var _url = null;
134 var _queryParams = new core.Map(); 137 var _queryParams = new core.Map();
135 var _uploadMedia = null; 138 var _uploadMedia = null;
136 var _uploadOptions = null; 139 var _uploadOptions = null;
137 var _downloadOptions = commons.DownloadOptions.Metadata; 140 var _downloadOptions = commons.DownloadOptions.Metadata;
138 var _body = null; 141 var _body = null;
139 142
140 if (projectId == null) { 143 if (projectId == null) {
141 throw new core.ArgumentError("Parameter projectId is required."); 144 throw new core.ArgumentError("Parameter projectId is required.");
142 } 145 }
143 if (region == null) { 146 if (region == null) {
144 throw new core.ArgumentError("Parameter region is required."); 147 throw new core.ArgumentError("Parameter region is required.");
145 } 148 }
146 if (clusterName == null) { 149 if (clusterName == null) {
147 throw new core.ArgumentError("Parameter clusterName is required."); 150 throw new core.ArgumentError("Parameter clusterName is required.");
148 } 151 }
149 152
150 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/clusters/' + commons.Escape r.ecapeVariable('$clusterName'); 153 _url = 'v1/projects/' +
154 commons.Escaper.ecapeVariable('$projectId') +
155 '/regions/' +
156 commons.Escaper.ecapeVariable('$region') +
157 '/clusters/' +
158 commons.Escaper.ecapeVariable('$clusterName');
151 159
152 var _response = _requester.request(_url, 160 var _response = _requester.request(_url, "DELETE",
153 "DELETE", 161 body: _body,
154 body: _body, 162 queryParams: _queryParams,
155 queryParams: _queryParams, 163 uploadOptions: _uploadOptions,
156 uploadOptions: _uploadOptions, 164 uploadMedia: _uploadMedia,
157 uploadMedia: _uploadMedia, 165 downloadOptions: _downloadOptions);
158 downloadOptions: _downloadOptions);
159 return _response.then((data) => new Operation.fromJson(data)); 166 return _response.then((data) => new Operation.fromJson(data));
160 } 167 }
161 168
162 /** 169 /// Gets cluster diagnostic information. After the operation completes, the
163 * Gets cluster diagnostic information. After the operation completes, the 170 /// Operation.response field contains DiagnoseClusterOutputLocation.
164 * Operation.response field contains DiagnoseClusterOutputLocation. 171 ///
165 * 172 /// [request] - The metadata request object.
166 * [request] - The metadata request object. 173 ///
167 * 174 /// Request parameters:
168 * Request parameters: 175 ///
169 * 176 /// [projectId] - Required. The ID of the Google Cloud Platform project that
170 * [projectId] - Required. The ID of the Google Cloud Platform project that 177 /// the cluster belongs to.
171 * the cluster belongs to. 178 ///
172 * 179 /// [region] - Required. The Cloud Dataproc region in which to handle the
173 * [region] - Required. The Cloud Dataproc region in which to handle the 180 /// request.
174 * request. 181 ///
175 * 182 /// [clusterName] - Required. The cluster name.
176 * [clusterName] - Required. The cluster name. 183 ///
177 * 184 /// Completes with a [Operation].
178 * Completes with a [Operation]. 185 ///
179 * 186 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
180 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 187 /// an error.
181 * error. 188 ///
182 * 189 /// If the used [http.Client] completes with an error when making a REST
183 * If the used [http.Client] completes with an error when making a REST call, 190 /// call, this method will complete with the same error.
184 * this method will complete with the same error. 191 async.Future<Operation> diagnose(DiagnoseClusterRequest request,
185 */ 192 core.String projectId, core.String region, core.String clusterName) {
186 async.Future<Operation> diagnose(DiagnoseClusterRequest request, core.String p rojectId, core.String region, core.String clusterName) {
187 var _url = null; 193 var _url = null;
188 var _queryParams = new core.Map(); 194 var _queryParams = new core.Map();
189 var _uploadMedia = null; 195 var _uploadMedia = null;
190 var _uploadOptions = null; 196 var _uploadOptions = null;
191 var _downloadOptions = commons.DownloadOptions.Metadata; 197 var _downloadOptions = commons.DownloadOptions.Metadata;
192 var _body = null; 198 var _body = null;
193 199
194 if (request != null) { 200 if (request != null) {
195 _body = convert.JSON.encode((request).toJson()); 201 _body = convert.JSON.encode((request).toJson());
196 } 202 }
197 if (projectId == null) { 203 if (projectId == null) {
198 throw new core.ArgumentError("Parameter projectId is required."); 204 throw new core.ArgumentError("Parameter projectId is required.");
199 } 205 }
200 if (region == null) { 206 if (region == null) {
201 throw new core.ArgumentError("Parameter region is required."); 207 throw new core.ArgumentError("Parameter region is required.");
202 } 208 }
203 if (clusterName == null) { 209 if (clusterName == null) {
204 throw new core.ArgumentError("Parameter clusterName is required."); 210 throw new core.ArgumentError("Parameter clusterName is required.");
205 } 211 }
206 212
207 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/clusters/' + commons.Escape r.ecapeVariable('$clusterName') + ':diagnose'; 213 _url = 'v1/projects/' +
214 commons.Escaper.ecapeVariable('$projectId') +
215 '/regions/' +
216 commons.Escaper.ecapeVariable('$region') +
217 '/clusters/' +
218 commons.Escaper.ecapeVariable('$clusterName') +
219 ':diagnose';
208 220
209 var _response = _requester.request(_url, 221 var _response = _requester.request(_url, "POST",
210 "POST", 222 body: _body,
211 body: _body, 223 queryParams: _queryParams,
212 queryParams: _queryParams, 224 uploadOptions: _uploadOptions,
213 uploadOptions: _uploadOptions, 225 uploadMedia: _uploadMedia,
214 uploadMedia: _uploadMedia, 226 downloadOptions: _downloadOptions);
215 downloadOptions: _downloadOptions);
216 return _response.then((data) => new Operation.fromJson(data)); 227 return _response.then((data) => new Operation.fromJson(data));
217 } 228 }
218 229
219 /** 230 /// Gets the resource representation for a cluster in a project.
220 * Gets the resource representation for a cluster in a project. 231 ///
221 * 232 /// Request parameters:
222 * Request parameters: 233 ///
223 * 234 /// [projectId] - Required. The ID of the Google Cloud Platform project that
224 * [projectId] - Required. The ID of the Google Cloud Platform project that 235 /// the cluster belongs to.
225 * the cluster belongs to. 236 ///
226 * 237 /// [region] - Required. The Cloud Dataproc region in which to handle the
227 * [region] - Required. The Cloud Dataproc region in which to handle the 238 /// request.
228 * request. 239 ///
229 * 240 /// [clusterName] - Required. The cluster name.
230 * [clusterName] - Required. The cluster name. 241 ///
231 * 242 /// Completes with a [Cluster].
232 * Completes with a [Cluster]. 243 ///
233 * 244 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
234 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 245 /// an error.
235 * error. 246 ///
236 * 247 /// If the used [http.Client] completes with an error when making a REST
237 * If the used [http.Client] completes with an error when making a REST call, 248 /// call, this method will complete with the same error.
238 * this method will complete with the same error. 249 async.Future<Cluster> get(
239 */ 250 core.String projectId, core.String region, core.String clusterName) {
240 async.Future<Cluster> get(core.String projectId, core.String region, core.Stri ng clusterName) {
241 var _url = null; 251 var _url = null;
242 var _queryParams = new core.Map(); 252 var _queryParams = new core.Map();
243 var _uploadMedia = null; 253 var _uploadMedia = null;
244 var _uploadOptions = null; 254 var _uploadOptions = null;
245 var _downloadOptions = commons.DownloadOptions.Metadata; 255 var _downloadOptions = commons.DownloadOptions.Metadata;
246 var _body = null; 256 var _body = null;
247 257
248 if (projectId == null) { 258 if (projectId == null) {
249 throw new core.ArgumentError("Parameter projectId is required."); 259 throw new core.ArgumentError("Parameter projectId is required.");
250 } 260 }
251 if (region == null) { 261 if (region == null) {
252 throw new core.ArgumentError("Parameter region is required."); 262 throw new core.ArgumentError("Parameter region is required.");
253 } 263 }
254 if (clusterName == null) { 264 if (clusterName == null) {
255 throw new core.ArgumentError("Parameter clusterName is required."); 265 throw new core.ArgumentError("Parameter clusterName is required.");
256 } 266 }
257 267
258 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/clusters/' + commons.Escape r.ecapeVariable('$clusterName'); 268 _url = 'v1/projects/' +
269 commons.Escaper.ecapeVariable('$projectId') +
270 '/regions/' +
271 commons.Escaper.ecapeVariable('$region') +
272 '/clusters/' +
273 commons.Escaper.ecapeVariable('$clusterName');
259 274
260 var _response = _requester.request(_url, 275 var _response = _requester.request(_url, "GET",
261 "GET", 276 body: _body,
262 body: _body, 277 queryParams: _queryParams,
263 queryParams: _queryParams, 278 uploadOptions: _uploadOptions,
264 uploadOptions: _uploadOptions, 279 uploadMedia: _uploadMedia,
265 uploadMedia: _uploadMedia, 280 downloadOptions: _downloadOptions);
266 downloadOptions: _downloadOptions);
267 return _response.then((data) => new Cluster.fromJson(data)); 281 return _response.then((data) => new Cluster.fromJson(data));
268 } 282 }
269 283
270 /** 284 /// Lists all regions/{region}/clusters in a project.
271 * Lists all regions/{region}/clusters in a project. 285 ///
272 * 286 /// Request parameters:
273 * Request parameters: 287 ///
274 * 288 /// [projectId] - Required. The ID of the Google Cloud Platform project that
275 * [projectId] - Required. The ID of the Google Cloud Platform project that 289 /// the cluster belongs to.
276 * the cluster belongs to. 290 ///
277 * 291 /// [region] - Required. The Cloud Dataproc region in which to handle the
278 * [region] - Required. The Cloud Dataproc region in which to handle the 292 /// request.
279 * request. 293 ///
280 * 294 /// [filter] - Optional. A filter constraining the clusters to list. Filters
281 * [pageSize] - Optional. The standard List page size. 295 /// are case-sensitive and have the following syntax:field = value AND field
282 * 296 /// = value ...where field is one of status.state, clusterName, or
283 * [filter] - Optional. A filter constraining the clusters to list. Filters 297 /// labels.[KEY], and [KEY] is a label key. value can be * to match all
284 * are case-sensitive and have the following syntax:field = value AND field = 298 /// values. status.state can be one of the following: ACTIVE, INACTIVE,
285 * value ...where field is one of status.state, clusterName, or labels.[KEY], 299 /// CREATING, RUNNING, ERROR, DELETING, or UPDATING. ACTIVE contains the
286 * and [KEY] is a label key. value can be * to match all values. status.state 300 /// CREATING, UPDATING, and RUNNING states. INACTIVE contains the DELETING
287 * can be one of the following: ACTIVE, INACTIVE, CREATING, RUNNING, ERROR, 301 /// and ERROR states. clusterName is the name of the cluster provided at
288 * DELETING, or UPDATING. ACTIVE contains the CREATING, UPDATING, and RUNNING 302 /// creation time. Only the logical AND operator is supported;
289 * states. INACTIVE contains the DELETING and ERROR states. clusterName is the 303 /// space-separated items are treated as having an implicit AND
290 * name of the cluster provided at creation time. Only the logical AND 304 /// operator.Example filter:status.state = ACTIVE AND clusterName = mycluster
291 * operator is supported; space-separated items are treated as having an 305 /// AND labels.env = staging AND labels.starred = *
292 * implicit AND operator.Example filter:status.state = ACTIVE AND clusterName 306 ///
293 * = mycluster AND labels.env = staging AND labels.starred = * 307 /// [pageToken] - Optional. The standard List page token.
294 * 308 ///
295 * [pageToken] - Optional. The standard List page token. 309 /// [pageSize] - Optional. The standard List page size.
296 * 310 ///
297 * Completes with a [ListClustersResponse]. 311 /// Completes with a [ListClustersResponse].
298 * 312 ///
299 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 313 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
300 * error. 314 /// an error.
301 * 315 ///
302 * If the used [http.Client] completes with an error when making a REST call, 316 /// If the used [http.Client] completes with an error when making a REST
303 * this method will complete with the same error. 317 /// call, this method will complete with the same error.
304 */ 318 async.Future<ListClustersResponse> list(
305 async.Future<ListClustersResponse> list(core.String projectId, core.String reg ion, {core.int pageSize, core.String filter, core.String pageToken}) { 319 core.String projectId, core.String region,
320 {core.String filter, core.String pageToken, core.int pageSize}) {
306 var _url = null; 321 var _url = null;
307 var _queryParams = new core.Map(); 322 var _queryParams = new core.Map();
308 var _uploadMedia = null; 323 var _uploadMedia = null;
309 var _uploadOptions = null; 324 var _uploadOptions = null;
310 var _downloadOptions = commons.DownloadOptions.Metadata; 325 var _downloadOptions = commons.DownloadOptions.Metadata;
311 var _body = null; 326 var _body = null;
312 327
313 if (projectId == null) { 328 if (projectId == null) {
314 throw new core.ArgumentError("Parameter projectId is required."); 329 throw new core.ArgumentError("Parameter projectId is required.");
315 } 330 }
316 if (region == null) { 331 if (region == null) {
317 throw new core.ArgumentError("Parameter region is required."); 332 throw new core.ArgumentError("Parameter region is required.");
318 } 333 }
319 if (pageSize != null) {
320 _queryParams["pageSize"] = ["${pageSize}"];
321 }
322 if (filter != null) { 334 if (filter != null) {
323 _queryParams["filter"] = [filter]; 335 _queryParams["filter"] = [filter];
324 } 336 }
325 if (pageToken != null) { 337 if (pageToken != null) {
326 _queryParams["pageToken"] = [pageToken]; 338 _queryParams["pageToken"] = [pageToken];
327 } 339 }
340 if (pageSize != null) {
341 _queryParams["pageSize"] = ["${pageSize}"];
342 }
328 343
329 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/clusters'; 344 _url = 'v1/projects/' +
345 commons.Escaper.ecapeVariable('$projectId') +
346 '/regions/' +
347 commons.Escaper.ecapeVariable('$region') +
348 '/clusters';
330 349
331 var _response = _requester.request(_url, 350 var _response = _requester.request(_url, "GET",
332 "GET", 351 body: _body,
333 body: _body, 352 queryParams: _queryParams,
334 queryParams: _queryParams, 353 uploadOptions: _uploadOptions,
335 uploadOptions: _uploadOptions, 354 uploadMedia: _uploadMedia,
336 uploadMedia: _uploadMedia, 355 downloadOptions: _downloadOptions);
337 downloadOptions: _downloadOptions);
338 return _response.then((data) => new ListClustersResponse.fromJson(data)); 356 return _response.then((data) => new ListClustersResponse.fromJson(data));
339 } 357 }
340 358
341 /** 359 /// Updates a cluster in a project.
342 * Updates a cluster in a project. 360 ///
343 * 361 /// [request] - The metadata request object.
344 * [request] - The metadata request object. 362 ///
345 * 363 /// Request parameters:
346 * Request parameters: 364 ///
347 * 365 /// [projectId] - Required. The ID of the Google Cloud Platform project the
348 * [projectId] - Required. The ID of the Google Cloud Platform project the 366 /// cluster belongs to.
349 * cluster belongs to. 367 ///
350 * 368 /// [region] - Required. The Cloud Dataproc region in which to handle the
351 * [region] - Required. The Cloud Dataproc region in which to handle the 369 /// request.
352 * request. 370 ///
353 * 371 /// [clusterName] - Required. The cluster name.
354 * [clusterName] - Required. The cluster name. 372 ///
355 * 373 /// [updateMask] - Required. Specifies the path, relative to Cluster, of the
356 * [updateMask] - Required. Specifies the path, relative to Cluster, of the 374 /// field to update. For example, to change the number of workers in a
357 * field to update. For example, to change the number of workers in a cluster 375 /// cluster to 5, the update_mask parameter would be specified as
358 * to 5, the update_mask parameter would be specified as 376 /// config.worker_config.num_instances, and the PATCH request body would
359 * config.worker_config.num_instances, and the PATCH request body would 377 /// specify the new value, as follows:
360 * specify the new value, as follows: 378 /// {
361 * { 379 /// "config":{
362 * "config":{ 380 /// "workerConfig":{
363 * "workerConfig":{ 381 /// "numInstances":"5"
364 * "numInstances":"5" 382 /// }
365 * } 383 /// }
366 * } 384 /// }
367 * } 385 /// Similarly, to change the number of preemptible workers in a cluster to 5,
368 * Similarly, to change the number of preemptible workers in a cluster to 5, 386 /// the update_mask parameter would be
369 * the update_mask parameter would be 387 /// config.secondary_worker_config.num_instances, and the PATCH request body
370 * config.secondary_worker_config.num_instances, and the PATCH request body 388 /// would be set as follows:
371 * would be set as follows: 389 /// {
372 * { 390 /// "config":{
373 * "config":{ 391 /// "secondaryWorkerConfig":{
374 * "secondaryWorkerConfig":{ 392 /// "numInstances":"5"
375 * "numInstances":"5" 393 /// }
376 * } 394 /// }
377 * } 395 /// }
378 * } 396 /// <strong>Note:</strong> Currently, only the following fields can be
379 * <strong>Note:</strong> Currently, only the following fields can be 397 /// updated:<table> <tbody> <tr> <td><strong>Mask</strong></td>
380 * updated:<table> <tbody> <tr> <td><strong>Mask</strong></td> 398 /// <td><strong>Purpose</strong></td> </tr> <tr>
381 * <td><strong>Purpose</strong></td> </tr> <tr> 399 /// <td><strong><em>labels</em></strong></td> <td>Update labels</td> </tr>
382 * <td><strong><em>labels</em></strong></td> <td>Update labels</td> </tr> 400 /// <tr>
383 * <tr> <td><strong><em>config.worker_config.num_instances</em></strong></td> 401 /// <td><strong><em>config.worker_config.num_instances</em></strong></td>
384 * <td>Resize primary worker group</td> </tr> <tr> 402 /// <td>Resize primary worker group</td> </tr> <tr>
385 * <td><strong><em>config.secondary_worker_config.num_instances</em></strong>< /td> 403 /// <td><strong><em>config.secondary_worker_config.num_instances</em></strong> </td>
386 * <td>Resize secondary worker group</td> </tr> </tbody> </table> 404 /// <td>Resize secondary worker group</td> </tr> </tbody> </table>
387 * 405 ///
388 * Completes with a [Operation]. 406 /// Completes with a [Operation].
389 * 407 ///
390 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 408 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
391 * error. 409 /// an error.
392 * 410 ///
393 * If the used [http.Client] completes with an error when making a REST call, 411 /// If the used [http.Client] completes with an error when making a REST
394 * this method will complete with the same error. 412 /// call, this method will complete with the same error.
395 */ 413 async.Future<Operation> patch(Cluster request, core.String projectId,
396 async.Future<Operation> patch(Cluster request, core.String projectId, core.Str ing region, core.String clusterName, {core.String updateMask}) { 414 core.String region, core.String clusterName,
415 {core.String updateMask}) {
397 var _url = null; 416 var _url = null;
398 var _queryParams = new core.Map(); 417 var _queryParams = new core.Map();
399 var _uploadMedia = null; 418 var _uploadMedia = null;
400 var _uploadOptions = null; 419 var _uploadOptions = null;
401 var _downloadOptions = commons.DownloadOptions.Metadata; 420 var _downloadOptions = commons.DownloadOptions.Metadata;
402 var _body = null; 421 var _body = null;
403 422
404 if (request != null) { 423 if (request != null) {
405 _body = convert.JSON.encode((request).toJson()); 424 _body = convert.JSON.encode((request).toJson());
406 } 425 }
407 if (projectId == null) { 426 if (projectId == null) {
408 throw new core.ArgumentError("Parameter projectId is required."); 427 throw new core.ArgumentError("Parameter projectId is required.");
409 } 428 }
410 if (region == null) { 429 if (region == null) {
411 throw new core.ArgumentError("Parameter region is required."); 430 throw new core.ArgumentError("Parameter region is required.");
412 } 431 }
413 if (clusterName == null) { 432 if (clusterName == null) {
414 throw new core.ArgumentError("Parameter clusterName is required."); 433 throw new core.ArgumentError("Parameter clusterName is required.");
415 } 434 }
416 if (updateMask != null) { 435 if (updateMask != null) {
417 _queryParams["updateMask"] = [updateMask]; 436 _queryParams["updateMask"] = [updateMask];
418 } 437 }
419 438
420 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/clusters/' + commons.Escape r.ecapeVariable('$clusterName'); 439 _url = 'v1/projects/' +
440 commons.Escaper.ecapeVariable('$projectId') +
441 '/regions/' +
442 commons.Escaper.ecapeVariable('$region') +
443 '/clusters/' +
444 commons.Escaper.ecapeVariable('$clusterName');
421 445
422 var _response = _requester.request(_url, 446 var _response = _requester.request(_url, "PATCH",
423 "PATCH", 447 body: _body,
424 body: _body, 448 queryParams: _queryParams,
425 queryParams: _queryParams, 449 uploadOptions: _uploadOptions,
426 uploadOptions: _uploadOptions, 450 uploadMedia: _uploadMedia,
427 uploadMedia: _uploadMedia, 451 downloadOptions: _downloadOptions);
428 downloadOptions: _downloadOptions);
429 return _response.then((data) => new Operation.fromJson(data)); 452 return _response.then((data) => new Operation.fromJson(data));
430 } 453 }
431
432 } 454 }
433 455
434
435 class ProjectsRegionsJobsResourceApi { 456 class ProjectsRegionsJobsResourceApi {
436 final commons.ApiRequester _requester; 457 final commons.ApiRequester _requester;
437 458
438 ProjectsRegionsJobsResourceApi(commons.ApiRequester client) : 459 ProjectsRegionsJobsResourceApi(commons.ApiRequester client)
439 _requester = client; 460 : _requester = client;
440 461
441 /** 462 /// Starts a job cancellation request. To access the job resource after
442 * Starts a job cancellation request. To access the job resource after 463 /// cancellation, call regions/{region}/jobs.list or
443 * cancellation, call regions/{region}/jobs.list or regions/{region}/jobs.get. 464 /// regions/{region}/jobs.get.
444 * 465 ///
445 * [request] - The metadata request object. 466 /// [request] - The metadata request object.
446 * 467 ///
447 * Request parameters: 468 /// Request parameters:
448 * 469 ///
449 * [projectId] - Required. The ID of the Google Cloud Platform project that 470 /// [projectId] - Required. The ID of the Google Cloud Platform project that
450 * the job belongs to. 471 /// the job belongs to.
451 * 472 ///
452 * [region] - Required. The Cloud Dataproc region in which to handle the 473 /// [region] - Required. The Cloud Dataproc region in which to handle the
453 * request. 474 /// request.
454 * 475 ///
455 * [jobId] - Required. The job ID. 476 /// [jobId] - Required. The job ID.
456 * 477 ///
457 * Completes with a [Job]. 478 /// Completes with a [Job].
458 * 479 ///
459 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 480 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
460 * error. 481 /// an error.
461 * 482 ///
462 * If the used [http.Client] completes with an error when making a REST call, 483 /// If the used [http.Client] completes with an error when making a REST
463 * this method will complete with the same error. 484 /// call, this method will complete with the same error.
464 */ 485 async.Future<Job> cancel(CancelJobRequest request, core.String projectId,
465 async.Future<Job> cancel(CancelJobRequest request, core.String projectId, core .String region, core.String jobId) { 486 core.String region, core.String jobId) {
466 var _url = null; 487 var _url = null;
467 var _queryParams = new core.Map(); 488 var _queryParams = new core.Map();
468 var _uploadMedia = null; 489 var _uploadMedia = null;
469 var _uploadOptions = null; 490 var _uploadOptions = null;
470 var _downloadOptions = commons.DownloadOptions.Metadata; 491 var _downloadOptions = commons.DownloadOptions.Metadata;
471 var _body = null; 492 var _body = null;
472 493
473 if (request != null) { 494 if (request != null) {
474 _body = convert.JSON.encode((request).toJson()); 495 _body = convert.JSON.encode((request).toJson());
475 } 496 }
476 if (projectId == null) { 497 if (projectId == null) {
477 throw new core.ArgumentError("Parameter projectId is required."); 498 throw new core.ArgumentError("Parameter projectId is required.");
478 } 499 }
479 if (region == null) { 500 if (region == null) {
480 throw new core.ArgumentError("Parameter region is required."); 501 throw new core.ArgumentError("Parameter region is required.");
481 } 502 }
482 if (jobId == null) { 503 if (jobId == null) {
483 throw new core.ArgumentError("Parameter jobId is required."); 504 throw new core.ArgumentError("Parameter jobId is required.");
484 } 505 }
485 506
486 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escaper.ec apeVariable('$jobId') + ':cancel'; 507 _url = 'v1/projects/' +
508 commons.Escaper.ecapeVariable('$projectId') +
509 '/regions/' +
510 commons.Escaper.ecapeVariable('$region') +
511 '/jobs/' +
512 commons.Escaper.ecapeVariable('$jobId') +
513 ':cancel';
487 514
488 var _response = _requester.request(_url, 515 var _response = _requester.request(_url, "POST",
489 "POST", 516 body: _body,
490 body: _body, 517 queryParams: _queryParams,
491 queryParams: _queryParams, 518 uploadOptions: _uploadOptions,
492 uploadOptions: _uploadOptions, 519 uploadMedia: _uploadMedia,
493 uploadMedia: _uploadMedia, 520 downloadOptions: _downloadOptions);
494 downloadOptions: _downloadOptions);
495 return _response.then((data) => new Job.fromJson(data)); 521 return _response.then((data) => new Job.fromJson(data));
496 } 522 }
497 523
498 /** 524 /// Deletes the job from the project. If the job is active, the delete fails,
499 * Deletes the job from the project. If the job is active, the delete fails, 525 /// and the response returns FAILED_PRECONDITION.
500 * and the response returns FAILED_PRECONDITION. 526 ///
501 * 527 /// Request parameters:
502 * Request parameters: 528 ///
503 * 529 /// [projectId] - Required. The ID of the Google Cloud Platform project that
504 * [projectId] - Required. The ID of the Google Cloud Platform project that 530 /// the job belongs to.
505 * the job belongs to. 531 ///
506 * 532 /// [region] - Required. The Cloud Dataproc region in which to handle the
507 * [region] - Required. The Cloud Dataproc region in which to handle the 533 /// request.
508 * request. 534 ///
509 * 535 /// [jobId] - Required. The job ID.
510 * [jobId] - Required. The job ID. 536 ///
511 * 537 /// Completes with a [Empty].
512 * Completes with a [Empty]. 538 ///
513 * 539 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
514 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 540 /// an error.
515 * error. 541 ///
516 * 542 /// If the used [http.Client] completes with an error when making a REST
517 * If the used [http.Client] completes with an error when making a REST call, 543 /// call, this method will complete with the same error.
518 * this method will complete with the same error. 544 async.Future<Empty> delete(
519 */ 545 core.String projectId, core.String region, core.String jobId) {
520 async.Future<Empty> delete(core.String projectId, core.String region, core.Str ing jobId) {
521 var _url = null; 546 var _url = null;
522 var _queryParams = new core.Map(); 547 var _queryParams = new core.Map();
523 var _uploadMedia = null; 548 var _uploadMedia = null;
524 var _uploadOptions = null; 549 var _uploadOptions = null;
525 var _downloadOptions = commons.DownloadOptions.Metadata; 550 var _downloadOptions = commons.DownloadOptions.Metadata;
526 var _body = null; 551 var _body = null;
527 552
528 if (projectId == null) { 553 if (projectId == null) {
529 throw new core.ArgumentError("Parameter projectId is required."); 554 throw new core.ArgumentError("Parameter projectId is required.");
530 } 555 }
531 if (region == null) { 556 if (region == null) {
532 throw new core.ArgumentError("Parameter region is required."); 557 throw new core.ArgumentError("Parameter region is required.");
533 } 558 }
534 if (jobId == null) { 559 if (jobId == null) {
535 throw new core.ArgumentError("Parameter jobId is required."); 560 throw new core.ArgumentError("Parameter jobId is required.");
536 } 561 }
537 562
538 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escaper.ec apeVariable('$jobId'); 563 _url = 'v1/projects/' +
564 commons.Escaper.ecapeVariable('$projectId') +
565 '/regions/' +
566 commons.Escaper.ecapeVariable('$region') +
567 '/jobs/' +
568 commons.Escaper.ecapeVariable('$jobId');
539 569
540 var _response = _requester.request(_url, 570 var _response = _requester.request(_url, "DELETE",
541 "DELETE", 571 body: _body,
542 body: _body, 572 queryParams: _queryParams,
543 queryParams: _queryParams, 573 uploadOptions: _uploadOptions,
544 uploadOptions: _uploadOptions, 574 uploadMedia: _uploadMedia,
545 uploadMedia: _uploadMedia, 575 downloadOptions: _downloadOptions);
546 downloadOptions: _downloadOptions);
547 return _response.then((data) => new Empty.fromJson(data)); 576 return _response.then((data) => new Empty.fromJson(data));
548 } 577 }
549 578
550 /** 579 /// Gets the resource representation for a job in a project.
551 * Gets the resource representation for a job in a project. 580 ///
552 * 581 /// Request parameters:
553 * Request parameters: 582 ///
554 * 583 /// [projectId] - Required. The ID of the Google Cloud Platform project that
555 * [projectId] - Required. The ID of the Google Cloud Platform project that 584 /// the job belongs to.
556 * the job belongs to. 585 ///
557 * 586 /// [region] - Required. The Cloud Dataproc region in which to handle the
558 * [region] - Required. The Cloud Dataproc region in which to handle the 587 /// request.
559 * request. 588 ///
560 * 589 /// [jobId] - Required. The job ID.
561 * [jobId] - Required. The job ID. 590 ///
562 * 591 /// Completes with a [Job].
563 * Completes with a [Job]. 592 ///
564 * 593 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
565 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 594 /// an error.
566 * error. 595 ///
567 * 596 /// If the used [http.Client] completes with an error when making a REST
568 * If the used [http.Client] completes with an error when making a REST call, 597 /// call, this method will complete with the same error.
569 * this method will complete with the same error. 598 async.Future<Job> get(
570 */ 599 core.String projectId, core.String region, core.String jobId) {
571 async.Future<Job> get(core.String projectId, core.String region, core.String j obId) {
572 var _url = null; 600 var _url = null;
573 var _queryParams = new core.Map(); 601 var _queryParams = new core.Map();
574 var _uploadMedia = null; 602 var _uploadMedia = null;
575 var _uploadOptions = null; 603 var _uploadOptions = null;
576 var _downloadOptions = commons.DownloadOptions.Metadata; 604 var _downloadOptions = commons.DownloadOptions.Metadata;
577 var _body = null; 605 var _body = null;
578 606
579 if (projectId == null) { 607 if (projectId == null) {
580 throw new core.ArgumentError("Parameter projectId is required."); 608 throw new core.ArgumentError("Parameter projectId is required.");
581 } 609 }
582 if (region == null) { 610 if (region == null) {
583 throw new core.ArgumentError("Parameter region is required."); 611 throw new core.ArgumentError("Parameter region is required.");
584 } 612 }
585 if (jobId == null) { 613 if (jobId == null) {
586 throw new core.ArgumentError("Parameter jobId is required."); 614 throw new core.ArgumentError("Parameter jobId is required.");
587 } 615 }
588 616
589 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escaper.ec apeVariable('$jobId'); 617 _url = 'v1/projects/' +
618 commons.Escaper.ecapeVariable('$projectId') +
619 '/regions/' +
620 commons.Escaper.ecapeVariable('$region') +
621 '/jobs/' +
622 commons.Escaper.ecapeVariable('$jobId');
590 623
591 var _response = _requester.request(_url, 624 var _response = _requester.request(_url, "GET",
592 "GET", 625 body: _body,
593 body: _body, 626 queryParams: _queryParams,
594 queryParams: _queryParams, 627 uploadOptions: _uploadOptions,
595 uploadOptions: _uploadOptions, 628 uploadMedia: _uploadMedia,
596 uploadMedia: _uploadMedia, 629 downloadOptions: _downloadOptions);
597 downloadOptions: _downloadOptions);
598 return _response.then((data) => new Job.fromJson(data)); 630 return _response.then((data) => new Job.fromJson(data));
599 } 631 }
600 632
601 /** 633 /// Lists regions/{region}/jobs in a project.
602 * Lists regions/{region}/jobs in a project. 634 ///
603 * 635 /// Request parameters:
604 * Request parameters: 636 ///
605 * 637 /// [projectId] - Required. The ID of the Google Cloud Platform project that
606 * [projectId] - Required. The ID of the Google Cloud Platform project that 638 /// the job belongs to.
607 * the job belongs to. 639 ///
608 * 640 /// [region] - Required. The Cloud Dataproc region in which to handle the
609 * [region] - Required. The Cloud Dataproc region in which to handle the 641 /// request.
610 * request. 642 ///
611 * 643 /// [pageToken] - Optional. The page token, returned by a previous call, to
612 * [filter] - Optional. A filter constraining the jobs to list. Filters are 644 /// request the next page of results.
613 * case-sensitive and have the following syntax:field = value AND field = 645 ///
614 * value ...where field is status.state or labels.[KEY], and [KEY] is a label 646 /// [pageSize] - Optional. The number of results to return in each response.
615 * key. value can be * to match all values. status.state can be either ACTIVE 647 ///
616 * or INACTIVE. Only the logical AND operator is supported; space-separated 648 /// [clusterName] - Optional. If set, the returned jobs list includes only
617 * items are treated as having an implicit AND operator.Example 649 /// jobs that were submitted to the named cluster.
618 * filter:status.state = ACTIVE AND labels.env = staging AND labels.starred = 650 ///
619 * * 651 /// [filter] - Optional. A filter constraining the jobs to list. Filters are
620 * 652 /// case-sensitive and have the following syntax:field = value AND field =
621 * [jobStateMatcher] - Optional. Specifies enumerated categories of jobs to 653 /// value ...where field is status.state or labels.[KEY], and [KEY] is a
622 * list (default = match ALL jobs). 654 /// label key. value can be * to match all values. status.state can be either
623 * Possible string values are: 655 /// ACTIVE or INACTIVE. Only the logical AND operator is supported;
624 * - "ALL" : A ALL. 656 /// space-separated items are treated as having an implicit AND
625 * - "ACTIVE" : A ACTIVE. 657 /// operator.Example filter:status.state = ACTIVE AND labels.env = staging
626 * - "NON_ACTIVE" : A NON_ACTIVE. 658 /// AND labels.starred = *
627 * 659 ///
628 * [pageToken] - Optional. The page token, returned by a previous call, to 660 /// [jobStateMatcher] - Optional. Specifies enumerated categories of jobs to
629 * request the next page of results. 661 /// list (default = match ALL jobs).
630 * 662 /// Possible string values are:
631 * [pageSize] - Optional. The number of results to return in each response. 663 /// - "ALL" : A ALL.
632 * 664 /// - "ACTIVE" : A ACTIVE.
633 * [clusterName] - Optional. If set, the returned jobs list includes only jobs 665 /// - "NON_ACTIVE" : A NON_ACTIVE.
634 * that were submitted to the named cluster. 666 ///
635 * 667 /// Completes with a [ListJobsResponse].
636 * Completes with a [ListJobsResponse]. 668 ///
637 * 669 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
638 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 670 /// an error.
639 * error. 671 ///
640 * 672 /// If the used [http.Client] completes with an error when making a REST
641 * If the used [http.Client] completes with an error when making a REST call, 673 /// call, this method will complete with the same error.
642 * this method will complete with the same error. 674 async.Future<ListJobsResponse> list(core.String projectId, core.String region,
643 */ 675 {core.String pageToken,
644 async.Future<ListJobsResponse> list(core.String projectId, core.String region, {core.String filter, core.String jobStateMatcher, core.String pageToken, core.i nt pageSize, core.String clusterName}) { 676 core.int pageSize,
677 core.String clusterName,
678 core.String filter,
679 core.String jobStateMatcher}) {
645 var _url = null; 680 var _url = null;
646 var _queryParams = new core.Map(); 681 var _queryParams = new core.Map();
647 var _uploadMedia = null; 682 var _uploadMedia = null;
648 var _uploadOptions = null; 683 var _uploadOptions = null;
649 var _downloadOptions = commons.DownloadOptions.Metadata; 684 var _downloadOptions = commons.DownloadOptions.Metadata;
650 var _body = null; 685 var _body = null;
651 686
652 if (projectId == null) { 687 if (projectId == null) {
653 throw new core.ArgumentError("Parameter projectId is required."); 688 throw new core.ArgumentError("Parameter projectId is required.");
654 } 689 }
655 if (region == null) { 690 if (region == null) {
656 throw new core.ArgumentError("Parameter region is required."); 691 throw new core.ArgumentError("Parameter region is required.");
657 } 692 }
658 if (filter != null) {
659 _queryParams["filter"] = [filter];
660 }
661 if (jobStateMatcher != null) {
662 _queryParams["jobStateMatcher"] = [jobStateMatcher];
663 }
664 if (pageToken != null) { 693 if (pageToken != null) {
665 _queryParams["pageToken"] = [pageToken]; 694 _queryParams["pageToken"] = [pageToken];
666 } 695 }
667 if (pageSize != null) { 696 if (pageSize != null) {
668 _queryParams["pageSize"] = ["${pageSize}"]; 697 _queryParams["pageSize"] = ["${pageSize}"];
669 } 698 }
670 if (clusterName != null) { 699 if (clusterName != null) {
671 _queryParams["clusterName"] = [clusterName]; 700 _queryParams["clusterName"] = [clusterName];
672 } 701 }
702 if (filter != null) {
703 _queryParams["filter"] = [filter];
704 }
705 if (jobStateMatcher != null) {
706 _queryParams["jobStateMatcher"] = [jobStateMatcher];
707 }
673 708
674 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/jobs'; 709 _url = 'v1/projects/' +
710 commons.Escaper.ecapeVariable('$projectId') +
711 '/regions/' +
712 commons.Escaper.ecapeVariable('$region') +
713 '/jobs';
675 714
676 var _response = _requester.request(_url, 715 var _response = _requester.request(_url, "GET",
677 "GET", 716 body: _body,
678 body: _body, 717 queryParams: _queryParams,
679 queryParams: _queryParams, 718 uploadOptions: _uploadOptions,
680 uploadOptions: _uploadOptions, 719 uploadMedia: _uploadMedia,
681 uploadMedia: _uploadMedia, 720 downloadOptions: _downloadOptions);
682 downloadOptions: _downloadOptions);
683 return _response.then((data) => new ListJobsResponse.fromJson(data)); 721 return _response.then((data) => new ListJobsResponse.fromJson(data));
684 } 722 }
685 723
686 /** 724 /// Updates a job in a project.
687 * Updates a job in a project. 725 ///
688 * 726 /// [request] - The metadata request object.
689 * [request] - The metadata request object. 727 ///
690 * 728 /// Request parameters:
691 * Request parameters: 729 ///
692 * 730 /// [projectId] - Required. The ID of the Google Cloud Platform project that
693 * [projectId] - Required. The ID of the Google Cloud Platform project that 731 /// the job belongs to.
694 * the job belongs to. 732 ///
695 * 733 /// [region] - Required. The Cloud Dataproc region in which to handle the
696 * [region] - Required. The Cloud Dataproc region in which to handle the 734 /// request.
697 * request. 735 ///
698 * 736 /// [jobId] - Required. The job ID.
699 * [jobId] - Required. The job ID. 737 ///
700 * 738 /// [updateMask] - Required. Specifies the path, relative to
701 * [updateMask] - Required. Specifies the path, relative to <code>Job</code>, 739 /// <code>Job</code>, of the field to update. For example, to update the
702 * of the field to update. For example, to update the labels of a Job the 740 /// labels of a Job the <code>update_mask</code> parameter would be specified
703 * <code>update_mask</code> parameter would be specified as 741 /// as <code>labels</code>, and the PATCH request body would specify the new
704 * <code>labels</code>, and the PATCH request body would specify the new 742 /// value. <strong>Note:</strong> Currently, <code>labels</code> is the only
705 * value. <strong>Note:</strong> Currently, <code>labels</code> is the only 743 /// field that can be updated.
706 * field that can be updated. 744 ///
707 * 745 /// Completes with a [Job].
708 * Completes with a [Job]. 746 ///
709 * 747 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
710 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 748 /// an error.
711 * error. 749 ///
712 * 750 /// If the used [http.Client] completes with an error when making a REST
713 * If the used [http.Client] completes with an error when making a REST call, 751 /// call, this method will complete with the same error.
714 * this method will complete with the same error. 752 async.Future<Job> patch(
715 */ 753 Job request, core.String projectId, core.String region, core.String jobId,
716 async.Future<Job> patch(Job request, core.String projectId, core.String region , core.String jobId, {core.String updateMask}) { 754 {core.String updateMask}) {
717 var _url = null; 755 var _url = null;
718 var _queryParams = new core.Map(); 756 var _queryParams = new core.Map();
719 var _uploadMedia = null; 757 var _uploadMedia = null;
720 var _uploadOptions = null; 758 var _uploadOptions = null;
721 var _downloadOptions = commons.DownloadOptions.Metadata; 759 var _downloadOptions = commons.DownloadOptions.Metadata;
722 var _body = null; 760 var _body = null;
723 761
724 if (request != null) { 762 if (request != null) {
725 _body = convert.JSON.encode((request).toJson()); 763 _body = convert.JSON.encode((request).toJson());
726 } 764 }
727 if (projectId == null) { 765 if (projectId == null) {
728 throw new core.ArgumentError("Parameter projectId is required."); 766 throw new core.ArgumentError("Parameter projectId is required.");
729 } 767 }
730 if (region == null) { 768 if (region == null) {
731 throw new core.ArgumentError("Parameter region is required."); 769 throw new core.ArgumentError("Parameter region is required.");
732 } 770 }
733 if (jobId == null) { 771 if (jobId == null) {
734 throw new core.ArgumentError("Parameter jobId is required."); 772 throw new core.ArgumentError("Parameter jobId is required.");
735 } 773 }
736 if (updateMask != null) { 774 if (updateMask != null) {
737 _queryParams["updateMask"] = [updateMask]; 775 _queryParams["updateMask"] = [updateMask];
738 } 776 }
739 777
740 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/jobs/' + commons.Escaper.ec apeVariable('$jobId'); 778 _url = 'v1/projects/' +
779 commons.Escaper.ecapeVariable('$projectId') +
780 '/regions/' +
781 commons.Escaper.ecapeVariable('$region') +
782 '/jobs/' +
783 commons.Escaper.ecapeVariable('$jobId');
741 784
742 var _response = _requester.request(_url, 785 var _response = _requester.request(_url, "PATCH",
743 "PATCH", 786 body: _body,
744 body: _body, 787 queryParams: _queryParams,
745 queryParams: _queryParams, 788 uploadOptions: _uploadOptions,
746 uploadOptions: _uploadOptions, 789 uploadMedia: _uploadMedia,
747 uploadMedia: _uploadMedia, 790 downloadOptions: _downloadOptions);
748 downloadOptions: _downloadOptions);
749 return _response.then((data) => new Job.fromJson(data)); 791 return _response.then((data) => new Job.fromJson(data));
750 } 792 }
751 793
752 /** 794 /// Submits a job to a cluster.
753 * Submits a job to a cluster. 795 ///
754 * 796 /// [request] - The metadata request object.
755 * [request] - The metadata request object. 797 ///
756 * 798 /// Request parameters:
757 * Request parameters: 799 ///
758 * 800 /// [projectId] - Required. The ID of the Google Cloud Platform project that
759 * [projectId] - Required. The ID of the Google Cloud Platform project that 801 /// the job belongs to.
760 * the job belongs to. 802 ///
761 * 803 /// [region] - Required. The Cloud Dataproc region in which to handle the
762 * [region] - Required. The Cloud Dataproc region in which to handle the 804 /// request.
763 * request. 805 ///
764 * 806 /// Completes with a [Job].
765 * Completes with a [Job]. 807 ///
766 * 808 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
767 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 809 /// an error.
768 * error. 810 ///
769 * 811 /// If the used [http.Client] completes with an error when making a REST
770 * If the used [http.Client] completes with an error when making a REST call, 812 /// call, this method will complete with the same error.
771 * this method will complete with the same error. 813 async.Future<Job> submit(
772 */ 814 SubmitJobRequest request, core.String projectId, core.String region) {
773 async.Future<Job> submit(SubmitJobRequest request, core.String projectId, core .String region) {
774 var _url = null; 815 var _url = null;
775 var _queryParams = new core.Map(); 816 var _queryParams = new core.Map();
776 var _uploadMedia = null; 817 var _uploadMedia = null;
777 var _uploadOptions = null; 818 var _uploadOptions = null;
778 var _downloadOptions = commons.DownloadOptions.Metadata; 819 var _downloadOptions = commons.DownloadOptions.Metadata;
779 var _body = null; 820 var _body = null;
780 821
781 if (request != null) { 822 if (request != null) {
782 _body = convert.JSON.encode((request).toJson()); 823 _body = convert.JSON.encode((request).toJson());
783 } 824 }
784 if (projectId == null) { 825 if (projectId == null) {
785 throw new core.ArgumentError("Parameter projectId is required."); 826 throw new core.ArgumentError("Parameter projectId is required.");
786 } 827 }
787 if (region == null) { 828 if (region == null) {
788 throw new core.ArgumentError("Parameter region is required."); 829 throw new core.ArgumentError("Parameter region is required.");
789 } 830 }
790 831
791 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/jobs:submit'; 832 _url = 'v1/projects/' +
833 commons.Escaper.ecapeVariable('$projectId') +
834 '/regions/' +
835 commons.Escaper.ecapeVariable('$region') +
836 '/jobs:submit';
792 837
793 var _response = _requester.request(_url, 838 var _response = _requester.request(_url, "POST",
794 "POST", 839 body: _body,
795 body: _body, 840 queryParams: _queryParams,
796 queryParams: _queryParams, 841 uploadOptions: _uploadOptions,
797 uploadOptions: _uploadOptions, 842 uploadMedia: _uploadMedia,
798 uploadMedia: _uploadMedia, 843 downloadOptions: _downloadOptions);
799 downloadOptions: _downloadOptions);
800 return _response.then((data) => new Job.fromJson(data)); 844 return _response.then((data) => new Job.fromJson(data));
801 } 845 }
802
803 } 846 }
804 847
805
806 class ProjectsRegionsOperationsResourceApi { 848 class ProjectsRegionsOperationsResourceApi {
807 final commons.ApiRequester _requester; 849 final commons.ApiRequester _requester;
808 850
809 ProjectsRegionsOperationsResourceApi(commons.ApiRequester client) : 851 ProjectsRegionsOperationsResourceApi(commons.ApiRequester client)
810 _requester = client; 852 : _requester = client;
811 853
812 /** 854 /// Starts asynchronous cancellation on a long-running operation. The server
813 * Starts asynchronous cancellation on a long-running operation. The server 855 /// makes a best effort to cancel the operation, but success is not
814 * makes a best effort to cancel the operation, but success is not guaranteed. 856 /// guaranteed. If the server doesn't support this method, it returns
815 * If the server doesn't support this method, it returns 857 /// google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or
816 * google.rpc.Code.UNIMPLEMENTED. Clients can use Operations.GetOperation or 858 /// other methods to check whether the cancellation succeeded or whether the
817 * other methods to check whether the cancellation succeeded or whether the 859 /// operation completed despite cancellation. On successful cancellation, the
818 * operation completed despite cancellation. On successful cancellation, the 860 /// operation is not deleted; instead, it becomes an operation with an
819 * operation is not deleted; instead, it becomes an operation with an 861 /// Operation.error value with a google.rpc.Status.code of 1, corresponding
820 * Operation.error value with a google.rpc.Status.code of 1, corresponding to 862 /// to Code.CANCELLED.
821 * Code.CANCELLED. 863 ///
822 * 864 /// Request parameters:
823 * Request parameters: 865 ///
824 * 866 /// [name] - The name of the operation resource to be cancelled.
825 * [name] - The name of the operation resource to be cancelled. 867 /// Value must have pattern
826 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". 868 /// "^projects/[^/]+/regions/[^/]+/operations/[^/]+$".
827 * 869 ///
828 * Completes with a [Empty]. 870 /// Completes with a [Empty].
829 * 871 ///
830 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 872 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
831 * error. 873 /// an error.
832 * 874 ///
833 * If the used [http.Client] completes with an error when making a REST call, 875 /// If the used [http.Client] completes with an error when making a REST
834 * this method will complete with the same error. 876 /// call, this method will complete with the same error.
835 */
836 async.Future<Empty> cancel(core.String name) { 877 async.Future<Empty> cancel(core.String name) {
837 var _url = null; 878 var _url = null;
838 var _queryParams = new core.Map(); 879 var _queryParams = new core.Map();
839 var _uploadMedia = null; 880 var _uploadMedia = null;
840 var _uploadOptions = null; 881 var _uploadOptions = null;
841 var _downloadOptions = commons.DownloadOptions.Metadata; 882 var _downloadOptions = commons.DownloadOptions.Metadata;
842 var _body = null; 883 var _body = null;
843 884
844 if (name == null) { 885 if (name == null) {
845 throw new core.ArgumentError("Parameter name is required."); 886 throw new core.ArgumentError("Parameter name is required.");
846 } 887 }
847 888
848 _url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name') + ':cancel'; 889 _url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name') + ':cancel';
849 890
850 var _response = _requester.request(_url, 891 var _response = _requester.request(_url, "POST",
851 "POST", 892 body: _body,
852 body: _body, 893 queryParams: _queryParams,
853 queryParams: _queryParams, 894 uploadOptions: _uploadOptions,
854 uploadOptions: _uploadOptions, 895 uploadMedia: _uploadMedia,
855 uploadMedia: _uploadMedia, 896 downloadOptions: _downloadOptions);
856 downloadOptions: _downloadOptions);
857 return _response.then((data) => new Empty.fromJson(data)); 897 return _response.then((data) => new Empty.fromJson(data));
858 } 898 }
859 899
860 /** 900 /// Deletes a long-running operation. This method indicates that the client
861 * Deletes a long-running operation. This method indicates that the client is 901 /// is no longer interested in the operation result. It does not cancel the
862 * no longer interested in the operation result. It does not cancel the 902 /// operation. If the server doesn't support this method, it returns
863 * operation. If the server doesn't support this method, it returns 903 /// google.rpc.Code.UNIMPLEMENTED.
864 * google.rpc.Code.UNIMPLEMENTED. 904 ///
865 * 905 /// Request parameters:
866 * Request parameters: 906 ///
867 * 907 /// [name] - The name of the operation resource to be deleted.
868 * [name] - The name of the operation resource to be deleted. 908 /// Value must have pattern
869 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". 909 /// "^projects/[^/]+/regions/[^/]+/operations/[^/]+$".
870 * 910 ///
871 * Completes with a [Empty]. 911 /// Completes with a [Empty].
872 * 912 ///
873 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 913 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
874 * error. 914 /// an error.
875 * 915 ///
876 * If the used [http.Client] completes with an error when making a REST call, 916 /// If the used [http.Client] completes with an error when making a REST
877 * this method will complete with the same error. 917 /// call, this method will complete with the same error.
878 */
879 async.Future<Empty> delete(core.String name) { 918 async.Future<Empty> delete(core.String name) {
880 var _url = null; 919 var _url = null;
881 var _queryParams = new core.Map(); 920 var _queryParams = new core.Map();
882 var _uploadMedia = null; 921 var _uploadMedia = null;
883 var _uploadOptions = null; 922 var _uploadOptions = null;
884 var _downloadOptions = commons.DownloadOptions.Metadata; 923 var _downloadOptions = commons.DownloadOptions.Metadata;
885 var _body = null; 924 var _body = null;
886 925
887 if (name == null) { 926 if (name == null) {
888 throw new core.ArgumentError("Parameter name is required."); 927 throw new core.ArgumentError("Parameter name is required.");
889 } 928 }
890 929
891 _url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name'); 930 _url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name');
892 931
893 var _response = _requester.request(_url, 932 var _response = _requester.request(_url, "DELETE",
894 "DELETE", 933 body: _body,
895 body: _body, 934 queryParams: _queryParams,
896 queryParams: _queryParams, 935 uploadOptions: _uploadOptions,
897 uploadOptions: _uploadOptions, 936 uploadMedia: _uploadMedia,
898 uploadMedia: _uploadMedia, 937 downloadOptions: _downloadOptions);
899 downloadOptions: _downloadOptions);
900 return _response.then((data) => new Empty.fromJson(data)); 938 return _response.then((data) => new Empty.fromJson(data));
901 } 939 }
902 940
903 /** 941 /// Gets the latest state of a long-running operation. Clients can use this
904 * Gets the latest state of a long-running operation. Clients can use this 942 /// method to poll the operation result at intervals as recommended by the
905 * method to poll the operation result at intervals as recommended by the API 943 /// API service.
906 * service. 944 ///
907 * 945 /// Request parameters:
908 * Request parameters: 946 ///
909 * 947 /// [name] - The name of the operation resource.
910 * [name] - The name of the operation resource. 948 /// Value must have pattern
911 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$". 949 /// "^projects/[^/]+/regions/[^/]+/operations/[^/]+$".
912 * 950 ///
913 * Completes with a [Operation]. 951 /// Completes with a [Operation].
914 * 952 ///
915 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 953 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
916 * error. 954 /// an error.
917 * 955 ///
918 * If the used [http.Client] completes with an error when making a REST call, 956 /// If the used [http.Client] completes with an error when making a REST
919 * this method will complete with the same error. 957 /// call, this method will complete with the same error.
920 */
921 async.Future<Operation> get(core.String name) { 958 async.Future<Operation> get(core.String name) {
922 var _url = null; 959 var _url = null;
923 var _queryParams = new core.Map(); 960 var _queryParams = new core.Map();
924 var _uploadMedia = null; 961 var _uploadMedia = null;
925 var _uploadOptions = null; 962 var _uploadOptions = null;
926 var _downloadOptions = commons.DownloadOptions.Metadata; 963 var _downloadOptions = commons.DownloadOptions.Metadata;
927 var _body = null; 964 var _body = null;
928 965
929 if (name == null) { 966 if (name == null) {
930 throw new core.ArgumentError("Parameter name is required."); 967 throw new core.ArgumentError("Parameter name is required.");
931 } 968 }
932 969
933 _url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name'); 970 _url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name');
934 971
935 var _response = _requester.request(_url, 972 var _response = _requester.request(_url, "GET",
936 "GET", 973 body: _body,
937 body: _body, 974 queryParams: _queryParams,
938 queryParams: _queryParams, 975 uploadOptions: _uploadOptions,
939 uploadOptions: _uploadOptions, 976 uploadMedia: _uploadMedia,
940 uploadMedia: _uploadMedia, 977 downloadOptions: _downloadOptions);
941 downloadOptions: _downloadOptions);
942 return _response.then((data) => new Operation.fromJson(data)); 978 return _response.then((data) => new Operation.fromJson(data));
943 } 979 }
944 980
945 /** 981 /// Lists operations that match the specified filter in the request. If the
946 * Lists operations that match the specified filter in the request. If the 982 /// server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the
947 * server doesn't support this method, it returns UNIMPLEMENTED.NOTE: the name 983 /// name binding allows API services to override the binding to use different
948 * binding allows API services to override the binding to use different 984 /// resource name schemes, such as users / * /operations. To override the
949 * resource name schemes, such as users / * /operations. To override the 985 /// binding, API services can add a binding such as "/v1/{name=users / *
950 * binding, API services can add a binding such as "/v1/{name=users / * 986 /// }/operations" to their service configuration. For backwards
951 * }/operations" to their service configuration. For backwards compatibility, 987 /// compatibility, the default name includes the operations collection id,
952 * the default name includes the operations collection id, however overriding 988 /// however overriding users must ensure the name binding is the parent
953 * users must ensure the name binding is the parent resource, without the 989 /// resource, without the operations collection id.
954 * operations collection id. 990 ///
955 * 991 /// Request parameters:
956 * Request parameters: 992 ///
957 * 993 /// [name] - The name of the operation's parent resource.
958 * [name] - The name of the operation's parent resource. 994 /// Value must have pattern "^projects/[^/]+/regions/[^/]+/operations$".
959 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations$". 995 ///
960 * 996 /// [pageToken] - The standard list page token.
961 * [filter] - The standard list filter. 997 ///
962 * 998 /// [pageSize] - The standard list page size.
963 * [pageToken] - The standard list page token. 999 ///
964 * 1000 /// [filter] - The standard list filter.
965 * [pageSize] - The standard list page size. 1001 ///
966 * 1002 /// Completes with a [ListOperationsResponse].
967 * Completes with a [ListOperationsResponse]. 1003 ///
968 * 1004 /// Completes with a [commons.ApiRequestError] if the API endpoint returned
969 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 1005 /// an error.
970 * error. 1006 ///
971 * 1007 /// If the used [http.Client] completes with an error when making a REST
972 * If the used [http.Client] completes with an error when making a REST call, 1008 /// call, this method will complete with the same error.
973 * this method will complete with the same error. 1009 async.Future<ListOperationsResponse> list(core.String name,
974 */ 1010 {core.String pageToken, core.int pageSize, core.String filter}) {
975 async.Future<ListOperationsResponse> list(core.String name, {core.String filte r, core.String pageToken, core.int pageSize}) {
976 var _url = null; 1011 var _url = null;
977 var _queryParams = new core.Map(); 1012 var _queryParams = new core.Map();
978 var _uploadMedia = null; 1013 var _uploadMedia = null;
979 var _uploadOptions = null; 1014 var _uploadOptions = null;
980 var _downloadOptions = commons.DownloadOptions.Metadata; 1015 var _downloadOptions = commons.DownloadOptions.Metadata;
981 var _body = null; 1016 var _body = null;
982 1017
983 if (name == null) { 1018 if (name == null) {
984 throw new core.ArgumentError("Parameter name is required."); 1019 throw new core.ArgumentError("Parameter name is required.");
985 } 1020 }
986 if (filter != null) {
987 _queryParams["filter"] = [filter];
988 }
989 if (pageToken != null) { 1021 if (pageToken != null) {
990 _queryParams["pageToken"] = [pageToken]; 1022 _queryParams["pageToken"] = [pageToken];
991 } 1023 }
992 if (pageSize != null) { 1024 if (pageSize != null) {
993 _queryParams["pageSize"] = ["${pageSize}"]; 1025 _queryParams["pageSize"] = ["${pageSize}"];
994 } 1026 }
1027 if (filter != null) {
1028 _queryParams["filter"] = [filter];
1029 }
995 1030
996 _url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name'); 1031 _url = 'v1/' + commons.Escaper.ecapeVariableReserved('$name');
997 1032
998 var _response = _requester.request(_url, 1033 var _response = _requester.request(_url, "GET",
999 "GET", 1034 body: _body,
1000 body: _body, 1035 queryParams: _queryParams,
1001 queryParams: _queryParams, 1036 uploadOptions: _uploadOptions,
1002 uploadOptions: _uploadOptions, 1037 uploadMedia: _uploadMedia,
1003 uploadMedia: _uploadMedia, 1038 downloadOptions: _downloadOptions);
1004 downloadOptions: _downloadOptions);
1005 return _response.then((data) => new ListOperationsResponse.fromJson(data)); 1039 return _response.then((data) => new ListOperationsResponse.fromJson(data));
1006 } 1040 }
1007
1008 } 1041 }
1009 1042
1043 /// Specifies the type and number of accelerator cards attached to the
1044 /// instances of an instance group (see GPUs on Compute Engine).
1045 class AcceleratorConfig {
1046 /// The number of the accelerator cards of this type exposed to this
1047 /// instance.
1048 core.int acceleratorCount;
1010 1049
1011 1050 /// Full URL, partial URI, or short name of the accelerator type resource to
1012 /** 1051 /// expose to this instance. See Google Compute Engine AcceleratorTypes(
1013 * Specifies the type and number of accelerator cards attached to the instances 1052 /// /compute/docs/reference/beta/acceleratorTypes)Examples *
1014 * of an instance group (see GPUs on Compute Engine). 1053 /// https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-eas t1-a/acceleratorTypes/nvidia-tesla-k80
1015 */ 1054 /// *
1016 class AcceleratorConfig { 1055 /// projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80
1017 /** 1056 /// * nvidia-tesla-k80
1018 * The number of the accelerator cards of this type exposed to this instance.
1019 */
1020 core.int acceleratorCount;
1021 /**
1022 * Full URL, partial URI, or short name of the accelerator type resource to
1023 * expose to this instance. See Google Compute Engine AcceleratorTypes(
1024 * /compute/docs/reference/beta/acceleratorTypes)Examples *
1025 * https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east 1-a/acceleratorTypes/nvidia-tesla-k80
1026 * * projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80
1027 * * nvidia-tesla-k80
1028 */
1029 core.String acceleratorTypeUri; 1057 core.String acceleratorTypeUri;
1030 1058
1031 AcceleratorConfig(); 1059 AcceleratorConfig();
1032 1060
1033 AcceleratorConfig.fromJson(core.Map _json) { 1061 AcceleratorConfig.fromJson(core.Map _json) {
1034 if (_json.containsKey("acceleratorCount")) { 1062 if (_json.containsKey("acceleratorCount")) {
1035 acceleratorCount = _json["acceleratorCount"]; 1063 acceleratorCount = _json["acceleratorCount"];
1036 } 1064 }
1037 if (_json.containsKey("acceleratorTypeUri")) { 1065 if (_json.containsKey("acceleratorTypeUri")) {
1038 acceleratorTypeUri = _json["acceleratorTypeUri"]; 1066 acceleratorTypeUri = _json["acceleratorTypeUri"];
1039 } 1067 }
1040 } 1068 }
1041 1069
1042 core.Map<core.String, core.Object> toJson() { 1070 core.Map<core.String, core.Object> toJson() {
1043 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1071 final core.Map<core.String, core.Object> _json =
1072 new core.Map<core.String, core.Object>();
1044 if (acceleratorCount != null) { 1073 if (acceleratorCount != null) {
1045 _json["acceleratorCount"] = acceleratorCount; 1074 _json["acceleratorCount"] = acceleratorCount;
1046 } 1075 }
1047 if (acceleratorTypeUri != null) { 1076 if (acceleratorTypeUri != null) {
1048 _json["acceleratorTypeUri"] = acceleratorTypeUri; 1077 _json["acceleratorTypeUri"] = acceleratorTypeUri;
1049 } 1078 }
1050 return _json; 1079 return _json;
1051 } 1080 }
1052 } 1081 }
1053 1082
1054 /** A request to cancel a job. */ 1083 /// A request to cancel a job.
1055 class CancelJobRequest { 1084 class CancelJobRequest {
1056
1057 CancelJobRequest(); 1085 CancelJobRequest();
1058 1086
1059 CancelJobRequest.fromJson(core.Map _json) { 1087 CancelJobRequest.fromJson(core.Map _json) {}
1060 }
1061 1088
1062 core.Map<core.String, core.Object> toJson() { 1089 core.Map<core.String, core.Object> toJson() {
1063 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1090 final core.Map<core.String, core.Object> _json =
1091 new core.Map<core.String, core.Object>();
1064 return _json; 1092 return _json;
1065 } 1093 }
1066 } 1094 }
1067 1095
1068 /** 1096 /// Describes the identifying information, config, and status of a cluster of
1069 * Describes the identifying information, config, and status of a cluster of 1097 /// Google Compute Engine instances.
1070 * Google Compute Engine instances.
1071 */
1072 class Cluster { 1098 class Cluster {
1073 /** 1099 /// Required. The cluster name. Cluster names within a project must be
1074 * Required. The cluster name. Cluster names within a project must be unique. 1100 /// unique. Names of deleted clusters can be reused.
1075 * Names of deleted clusters can be reused.
1076 */
1077 core.String clusterName; 1101 core.String clusterName;
1078 /** 1102
1079 * Output-only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc 1103 /// Output-only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
1080 * generates this value when it creates the cluster. 1104 /// generates this value when it creates the cluster.
1081 */
1082 core.String clusterUuid; 1105 core.String clusterUuid;
1083 /** 1106
1084 * Required. The cluster config. Note that Cloud Dataproc may set default 1107 /// Required. The cluster config. Note that Cloud Dataproc may set default
1085 * values, and values may change when clusters are updated. 1108 /// values, and values may change when clusters are updated.
1086 */
1087 ClusterConfig config; 1109 ClusterConfig config;
1088 /** 1110
1089 * Optional. The labels to associate with this cluster. Label keys must 1111 /// Optional. The labels to associate with this cluster. Label keys must
1090 * contain 1 to 63 characters, and must conform to RFC 1035 1112 /// contain 1 to 63 characters, and must conform to RFC 1035
1091 * (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if 1113 /// (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
1092 * present, must contain 1 to 63 characters, and must conform to RFC 1035 1114 /// if present, must contain 1 to 63 characters, and must conform to RFC 1035
1093 * (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be 1115 /// (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
1094 * associated with a cluster. 1116 /// associated with a cluster.
1095 */
1096 core.Map<core.String, core.String> labels; 1117 core.Map<core.String, core.String> labels;
1097 /** 1118
1098 * Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature: 1119 /// Contains cluster daemon metrics such as HDFS and YARN stats.Beta Feature:
1099 * This report is available for testing purposes only. It may be changed 1120 /// This report is available for testing purposes only. It may be changed
1100 * before final release. 1121 /// before final release.
1101 */
1102 ClusterMetrics metrics; 1122 ClusterMetrics metrics;
1103 /** 1123
1104 * Required. The Google Cloud Platform project ID that the cluster belongs to. 1124 /// Required. The Google Cloud Platform project ID that the cluster belongs
1105 */ 1125 /// to.
1106 core.String projectId; 1126 core.String projectId;
1107 /** Output-only. Cluster status. */ 1127
1128 /// Output-only. Cluster status.
1108 ClusterStatus status; 1129 ClusterStatus status;
1109 /** Output-only. The previous cluster status. */ 1130
1131 /// Output-only. The previous cluster status.
1110 core.List<ClusterStatus> statusHistory; 1132 core.List<ClusterStatus> statusHistory;
1111 1133
1112 Cluster(); 1134 Cluster();
1113 1135
1114 Cluster.fromJson(core.Map _json) { 1136 Cluster.fromJson(core.Map _json) {
1115 if (_json.containsKey("clusterName")) { 1137 if (_json.containsKey("clusterName")) {
1116 clusterName = _json["clusterName"]; 1138 clusterName = _json["clusterName"];
1117 } 1139 }
1118 if (_json.containsKey("clusterUuid")) { 1140 if (_json.containsKey("clusterUuid")) {
1119 clusterUuid = _json["clusterUuid"]; 1141 clusterUuid = _json["clusterUuid"];
1120 } 1142 }
1121 if (_json.containsKey("config")) { 1143 if (_json.containsKey("config")) {
1122 config = new ClusterConfig.fromJson(_json["config"]); 1144 config = new ClusterConfig.fromJson(_json["config"]);
1123 } 1145 }
1124 if (_json.containsKey("labels")) { 1146 if (_json.containsKey("labels")) {
1125 labels = _json["labels"]; 1147 labels = _json["labels"];
1126 } 1148 }
1127 if (_json.containsKey("metrics")) { 1149 if (_json.containsKey("metrics")) {
1128 metrics = new ClusterMetrics.fromJson(_json["metrics"]); 1150 metrics = new ClusterMetrics.fromJson(_json["metrics"]);
1129 } 1151 }
1130 if (_json.containsKey("projectId")) { 1152 if (_json.containsKey("projectId")) {
1131 projectId = _json["projectId"]; 1153 projectId = _json["projectId"];
1132 } 1154 }
1133 if (_json.containsKey("status")) { 1155 if (_json.containsKey("status")) {
1134 status = new ClusterStatus.fromJson(_json["status"]); 1156 status = new ClusterStatus.fromJson(_json["status"]);
1135 } 1157 }
1136 if (_json.containsKey("statusHistory")) { 1158 if (_json.containsKey("statusHistory")) {
1137 statusHistory = _json["statusHistory"].map((value) => new ClusterStatus.fr omJson(value)).toList(); 1159 statusHistory = _json["statusHistory"]
1160 .map((value) => new ClusterStatus.fromJson(value))
1161 .toList();
1138 } 1162 }
1139 } 1163 }
1140 1164
1141 core.Map<core.String, core.Object> toJson() { 1165 core.Map<core.String, core.Object> toJson() {
1142 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1166 final core.Map<core.String, core.Object> _json =
1167 new core.Map<core.String, core.Object>();
1143 if (clusterName != null) { 1168 if (clusterName != null) {
1144 _json["clusterName"] = clusterName; 1169 _json["clusterName"] = clusterName;
1145 } 1170 }
1146 if (clusterUuid != null) { 1171 if (clusterUuid != null) {
1147 _json["clusterUuid"] = clusterUuid; 1172 _json["clusterUuid"] = clusterUuid;
1148 } 1173 }
1149 if (config != null) { 1174 if (config != null) {
1150 _json["config"] = (config).toJson(); 1175 _json["config"] = (config).toJson();
1151 } 1176 }
1152 if (labels != null) { 1177 if (labels != null) {
1153 _json["labels"] = labels; 1178 _json["labels"] = labels;
1154 } 1179 }
1155 if (metrics != null) { 1180 if (metrics != null) {
1156 _json["metrics"] = (metrics).toJson(); 1181 _json["metrics"] = (metrics).toJson();
1157 } 1182 }
1158 if (projectId != null) { 1183 if (projectId != null) {
1159 _json["projectId"] = projectId; 1184 _json["projectId"] = projectId;
1160 } 1185 }
1161 if (status != null) { 1186 if (status != null) {
1162 _json["status"] = (status).toJson(); 1187 _json["status"] = (status).toJson();
1163 } 1188 }
1164 if (statusHistory != null) { 1189 if (statusHistory != null) {
1165 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List(); 1190 _json["statusHistory"] =
1191 statusHistory.map((value) => (value).toJson()).toList();
1166 } 1192 }
1167 return _json; 1193 return _json;
1168 } 1194 }
1169 } 1195 }
1170 1196
1171 /** The cluster config. */ 1197 /// The cluster config.
1172 class ClusterConfig { 1198 class ClusterConfig {
1173 /** 1199 /// Optional. A Google Cloud Storage staging bucket used for sharing
1174 * Optional. A Google Cloud Storage staging bucket used for sharing generated 1200 /// generated SSH keys and config. If you do not specify a staging bucket,
1175 * SSH keys and config. If you do not specify a staging bucket, Cloud Dataproc 1201 /// Cloud Dataproc will determine an appropriate Cloud Storage location (US,
1176 * will determine an appropriate Cloud Storage location (US, ASIA, or EU) for 1202 /// ASIA, or EU) for your cluster's staging bucket according to the Google
1177 * your cluster's staging bucket according to the Google Compute Engine zone 1203 /// Compute Engine zone where your cluster is deployed, and then it will
1178 * where your cluster is deployed, and then it will create and manage this 1204 /// create and manage this project-level, per-location bucket for you.
1179 * project-level, per-location bucket for you.
1180 */
1181 core.String configBucket; 1205 core.String configBucket;
1182 /** 1206
1183 * Required. The shared Google Compute Engine config settings for all 1207 /// Required. The shared Google Compute Engine config settings for all
1184 * instances in a cluster. 1208 /// instances in a cluster.
1185 */
1186 GceClusterConfig gceClusterConfig; 1209 GceClusterConfig gceClusterConfig;
1187 /** 1210
1188 * Optional. Commands to execute on each node after config is completed. By 1211 /// Optional. Commands to execute on each node after config is completed. By
1189 * default, executables are run on master and all worker nodes. You can test a 1212 /// default, executables are run on master and all worker nodes. You can test
1190 * node's role metadata to run an executable on a master or worker node, as 1213 /// a node's role metadata to run an executable on a master or worker node,
1191 * shown below using curl (you can also use wget): 1214 /// as shown below using curl (you can also use wget):
1192 * ROLE=$(curl -H Metadata-Flavor:Google 1215 /// ROLE=$(curl -H Metadata-Flavor:Google
1193 * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) 1216 /// http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)
1194 * if [[ "${ROLE}" == 'Master' ]]; then 1217 /// if [[ "${ROLE}" == 'Master' ]]; then
1195 * ... master specific actions ... 1218 /// ... master specific actions ...
1196 * else 1219 /// else
1197 * ... worker specific actions ... 1220 /// ... worker specific actions ...
1198 * fi 1221 /// fi
1199 */
1200 core.List<NodeInitializationAction> initializationActions; 1222 core.List<NodeInitializationAction> initializationActions;
1201 /** 1223
1202 * Optional. The Google Compute Engine config settings for the master instance 1224 /// Optional. The Google Compute Engine config settings for the master
1203 * in a cluster. 1225 /// instance in a cluster.
1204 */
1205 InstanceGroupConfig masterConfig; 1226 InstanceGroupConfig masterConfig;
1206 /** 1227
1207 * Optional. The Google Compute Engine config settings for additional worker 1228 /// Optional. The Google Compute Engine config settings for additional worker
1208 * instances in a cluster. 1229 /// instances in a cluster.
1209 */
1210 InstanceGroupConfig secondaryWorkerConfig; 1230 InstanceGroupConfig secondaryWorkerConfig;
1211 /** Optional. The config settings for software inside the cluster. */ 1231
1232 /// Optional. The config settings for software inside the cluster.
1212 SoftwareConfig softwareConfig; 1233 SoftwareConfig softwareConfig;
1213 /** 1234
1214 * Optional. The Google Compute Engine config settings for worker instances in 1235 /// Optional. The Google Compute Engine config settings for worker instances
1215 * a cluster. 1236 /// in a cluster.
1216 */
1217 InstanceGroupConfig workerConfig; 1237 InstanceGroupConfig workerConfig;
1218 1238
1219 ClusterConfig(); 1239 ClusterConfig();
1220 1240
1221 ClusterConfig.fromJson(core.Map _json) { 1241 ClusterConfig.fromJson(core.Map _json) {
1222 if (_json.containsKey("configBucket")) { 1242 if (_json.containsKey("configBucket")) {
1223 configBucket = _json["configBucket"]; 1243 configBucket = _json["configBucket"];
1224 } 1244 }
1225 if (_json.containsKey("gceClusterConfig")) { 1245 if (_json.containsKey("gceClusterConfig")) {
1226 gceClusterConfig = new GceClusterConfig.fromJson(_json["gceClusterConfig"] ); 1246 gceClusterConfig =
1247 new GceClusterConfig.fromJson(_json["gceClusterConfig"]);
1227 } 1248 }
1228 if (_json.containsKey("initializationActions")) { 1249 if (_json.containsKey("initializationActions")) {
1229 initializationActions = _json["initializationActions"].map((value) => new NodeInitializationAction.fromJson(value)).toList(); 1250 initializationActions = _json["initializationActions"]
1251 .map((value) => new NodeInitializationAction.fromJson(value))
1252 .toList();
1230 } 1253 }
1231 if (_json.containsKey("masterConfig")) { 1254 if (_json.containsKey("masterConfig")) {
1232 masterConfig = new InstanceGroupConfig.fromJson(_json["masterConfig"]); 1255 masterConfig = new InstanceGroupConfig.fromJson(_json["masterConfig"]);
1233 } 1256 }
1234 if (_json.containsKey("secondaryWorkerConfig")) { 1257 if (_json.containsKey("secondaryWorkerConfig")) {
1235 secondaryWorkerConfig = new InstanceGroupConfig.fromJson(_json["secondaryW orkerConfig"]); 1258 secondaryWorkerConfig =
1259 new InstanceGroupConfig.fromJson(_json["secondaryWorkerConfig"]);
1236 } 1260 }
1237 if (_json.containsKey("softwareConfig")) { 1261 if (_json.containsKey("softwareConfig")) {
1238 softwareConfig = new SoftwareConfig.fromJson(_json["softwareConfig"]); 1262 softwareConfig = new SoftwareConfig.fromJson(_json["softwareConfig"]);
1239 } 1263 }
1240 if (_json.containsKey("workerConfig")) { 1264 if (_json.containsKey("workerConfig")) {
1241 workerConfig = new InstanceGroupConfig.fromJson(_json["workerConfig"]); 1265 workerConfig = new InstanceGroupConfig.fromJson(_json["workerConfig"]);
1242 } 1266 }
1243 } 1267 }
1244 1268
1245 core.Map<core.String, core.Object> toJson() { 1269 core.Map<core.String, core.Object> toJson() {
1246 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1270 final core.Map<core.String, core.Object> _json =
1271 new core.Map<core.String, core.Object>();
1247 if (configBucket != null) { 1272 if (configBucket != null) {
1248 _json["configBucket"] = configBucket; 1273 _json["configBucket"] = configBucket;
1249 } 1274 }
1250 if (gceClusterConfig != null) { 1275 if (gceClusterConfig != null) {
1251 _json["gceClusterConfig"] = (gceClusterConfig).toJson(); 1276 _json["gceClusterConfig"] = (gceClusterConfig).toJson();
1252 } 1277 }
1253 if (initializationActions != null) { 1278 if (initializationActions != null) {
1254 _json["initializationActions"] = initializationActions.map((value) => (val ue).toJson()).toList(); 1279 _json["initializationActions"] =
1280 initializationActions.map((value) => (value).toJson()).toList();
1255 } 1281 }
1256 if (masterConfig != null) { 1282 if (masterConfig != null) {
1257 _json["masterConfig"] = (masterConfig).toJson(); 1283 _json["masterConfig"] = (masterConfig).toJson();
1258 } 1284 }
1259 if (secondaryWorkerConfig != null) { 1285 if (secondaryWorkerConfig != null) {
1260 _json["secondaryWorkerConfig"] = (secondaryWorkerConfig).toJson(); 1286 _json["secondaryWorkerConfig"] = (secondaryWorkerConfig).toJson();
1261 } 1287 }
1262 if (softwareConfig != null) { 1288 if (softwareConfig != null) {
1263 _json["softwareConfig"] = (softwareConfig).toJson(); 1289 _json["softwareConfig"] = (softwareConfig).toJson();
1264 } 1290 }
1265 if (workerConfig != null) { 1291 if (workerConfig != null) {
1266 _json["workerConfig"] = (workerConfig).toJson(); 1292 _json["workerConfig"] = (workerConfig).toJson();
1267 } 1293 }
1268 return _json; 1294 return _json;
1269 } 1295 }
1270 } 1296 }
1271 1297
1272 /** 1298 /// Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature:
1273 * Contains cluster daemon metrics, such as HDFS and YARN stats.Beta Feature: 1299 /// This report is available for testing purposes only. It may be changed
1274 * This report is available for testing purposes only. It may be changed before 1300 /// before final release.
1275 * final release.
1276 */
1277 class ClusterMetrics { 1301 class ClusterMetrics {
1278 /** The HDFS metrics. */ 1302 /// The HDFS metrics.
1279 core.Map<core.String, core.String> hdfsMetrics; 1303 core.Map<core.String, core.String> hdfsMetrics;
1280 /** The YARN metrics. */ 1304
1305 /// The YARN metrics.
1281 core.Map<core.String, core.String> yarnMetrics; 1306 core.Map<core.String, core.String> yarnMetrics;
1282 1307
1283 ClusterMetrics(); 1308 ClusterMetrics();
1284 1309
1285 ClusterMetrics.fromJson(core.Map _json) { 1310 ClusterMetrics.fromJson(core.Map _json) {
1286 if (_json.containsKey("hdfsMetrics")) { 1311 if (_json.containsKey("hdfsMetrics")) {
1287 hdfsMetrics = _json["hdfsMetrics"]; 1312 hdfsMetrics = _json["hdfsMetrics"];
1288 } 1313 }
1289 if (_json.containsKey("yarnMetrics")) { 1314 if (_json.containsKey("yarnMetrics")) {
1290 yarnMetrics = _json["yarnMetrics"]; 1315 yarnMetrics = _json["yarnMetrics"];
1291 } 1316 }
1292 } 1317 }
1293 1318
1294 core.Map<core.String, core.Object> toJson() { 1319 core.Map<core.String, core.Object> toJson() {
1295 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1320 final core.Map<core.String, core.Object> _json =
1321 new core.Map<core.String, core.Object>();
1296 if (hdfsMetrics != null) { 1322 if (hdfsMetrics != null) {
1297 _json["hdfsMetrics"] = hdfsMetrics; 1323 _json["hdfsMetrics"] = hdfsMetrics;
1298 } 1324 }
1299 if (yarnMetrics != null) { 1325 if (yarnMetrics != null) {
1300 _json["yarnMetrics"] = yarnMetrics; 1326 _json["yarnMetrics"] = yarnMetrics;
1301 } 1327 }
1302 return _json; 1328 return _json;
1303 } 1329 }
1304 } 1330 }
1305 1331
1306 /** Metadata describing the operation. */ 1332 /// Metadata describing the operation.
1307 class ClusterOperationMetadata { 1333 class ClusterOperationMetadata {
1308 /** Output-only. Name of the cluster for the operation. */ 1334 /// Output-only. Name of the cluster for the operation.
1309 core.String clusterName; 1335 core.String clusterName;
1310 /** Output-only. Cluster UUID for the operation. */ 1336
1337 /// Output-only. Cluster UUID for the operation.
1311 core.String clusterUuid; 1338 core.String clusterUuid;
1312 /** Output-only. Short description of operation. */ 1339
1340 /// Output-only. Short description of operation.
1313 core.String description; 1341 core.String description;
1314 /** Output-only. Labels associated with the operation */ 1342
1343 /// Output-only. Labels associated with the operation
1315 core.Map<core.String, core.String> labels; 1344 core.Map<core.String, core.String> labels;
1316 /** Output-only. The operation type. */ 1345
1346 /// Output-only. The operation type.
1317 core.String operationType; 1347 core.String operationType;
1318 /** Output-only. Current operation status. */ 1348
1349 /// Output-only. Current operation status.
1319 ClusterOperationStatus status; 1350 ClusterOperationStatus status;
1320 /** Output-only. The previous operation status. */ 1351
1352 /// Output-only. The previous operation status.
1321 core.List<ClusterOperationStatus> statusHistory; 1353 core.List<ClusterOperationStatus> statusHistory;
1322 /** Output-only. Errors encountered during operation execution. */ 1354
1355 /// Output-only. Errors encountered during operation execution.
1323 core.List<core.String> warnings; 1356 core.List<core.String> warnings;
1324 1357
1325 ClusterOperationMetadata(); 1358 ClusterOperationMetadata();
1326 1359
1327 ClusterOperationMetadata.fromJson(core.Map _json) { 1360 ClusterOperationMetadata.fromJson(core.Map _json) {
1328 if (_json.containsKey("clusterName")) { 1361 if (_json.containsKey("clusterName")) {
1329 clusterName = _json["clusterName"]; 1362 clusterName = _json["clusterName"];
1330 } 1363 }
1331 if (_json.containsKey("clusterUuid")) { 1364 if (_json.containsKey("clusterUuid")) {
1332 clusterUuid = _json["clusterUuid"]; 1365 clusterUuid = _json["clusterUuid"];
1333 } 1366 }
1334 if (_json.containsKey("description")) { 1367 if (_json.containsKey("description")) {
1335 description = _json["description"]; 1368 description = _json["description"];
1336 } 1369 }
1337 if (_json.containsKey("labels")) { 1370 if (_json.containsKey("labels")) {
1338 labels = _json["labels"]; 1371 labels = _json["labels"];
1339 } 1372 }
1340 if (_json.containsKey("operationType")) { 1373 if (_json.containsKey("operationType")) {
1341 operationType = _json["operationType"]; 1374 operationType = _json["operationType"];
1342 } 1375 }
1343 if (_json.containsKey("status")) { 1376 if (_json.containsKey("status")) {
1344 status = new ClusterOperationStatus.fromJson(_json["status"]); 1377 status = new ClusterOperationStatus.fromJson(_json["status"]);
1345 } 1378 }
1346 if (_json.containsKey("statusHistory")) { 1379 if (_json.containsKey("statusHistory")) {
1347 statusHistory = _json["statusHistory"].map((value) => new ClusterOperation Status.fromJson(value)).toList(); 1380 statusHistory = _json["statusHistory"]
1381 .map((value) => new ClusterOperationStatus.fromJson(value))
1382 .toList();
1348 } 1383 }
1349 if (_json.containsKey("warnings")) { 1384 if (_json.containsKey("warnings")) {
1350 warnings = _json["warnings"]; 1385 warnings = _json["warnings"];
1351 } 1386 }
1352 } 1387 }
1353 1388
1354 core.Map<core.String, core.Object> toJson() { 1389 core.Map<core.String, core.Object> toJson() {
1355 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1390 final core.Map<core.String, core.Object> _json =
1391 new core.Map<core.String, core.Object>();
1356 if (clusterName != null) { 1392 if (clusterName != null) {
1357 _json["clusterName"] = clusterName; 1393 _json["clusterName"] = clusterName;
1358 } 1394 }
1359 if (clusterUuid != null) { 1395 if (clusterUuid != null) {
1360 _json["clusterUuid"] = clusterUuid; 1396 _json["clusterUuid"] = clusterUuid;
1361 } 1397 }
1362 if (description != null) { 1398 if (description != null) {
1363 _json["description"] = description; 1399 _json["description"] = description;
1364 } 1400 }
1365 if (labels != null) { 1401 if (labels != null) {
1366 _json["labels"] = labels; 1402 _json["labels"] = labels;
1367 } 1403 }
1368 if (operationType != null) { 1404 if (operationType != null) {
1369 _json["operationType"] = operationType; 1405 _json["operationType"] = operationType;
1370 } 1406 }
1371 if (status != null) { 1407 if (status != null) {
1372 _json["status"] = (status).toJson(); 1408 _json["status"] = (status).toJson();
1373 } 1409 }
1374 if (statusHistory != null) { 1410 if (statusHistory != null) {
1375 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List(); 1411 _json["statusHistory"] =
1412 statusHistory.map((value) => (value).toJson()).toList();
1376 } 1413 }
1377 if (warnings != null) { 1414 if (warnings != null) {
1378 _json["warnings"] = warnings; 1415 _json["warnings"] = warnings;
1379 } 1416 }
1380 return _json; 1417 return _json;
1381 } 1418 }
1382 } 1419 }
1383 1420
1384 /** The status of the operation. */ 1421 /// The status of the operation.
1385 class ClusterOperationStatus { 1422 class ClusterOperationStatus {
1386 /** Output-only.A message containing any operation metadata details. */ 1423 /// Output-only.A message containing any operation metadata details.
1387 core.String details; 1424 core.String details;
1388 /** Output-only. A message containing the detailed operation state. */ 1425
1426 /// Output-only. A message containing the detailed operation state.
1389 core.String innerState; 1427 core.String innerState;
1390 /** 1428
1391 * Output-only. A message containing the operation state. 1429 /// Output-only. A message containing the operation state.
1392 * Possible string values are: 1430 /// Possible string values are:
1393 * - "UNKNOWN" : Unused. 1431 /// - "UNKNOWN" : Unused.
1394 * - "PENDING" : The operation has been created. 1432 /// - "PENDING" : The operation has been created.
1395 * - "RUNNING" : The operation is running. 1433 /// - "RUNNING" : The operation is running.
1396 * - "DONE" : The operation is done; either cancelled or completed. 1434 /// - "DONE" : The operation is done; either cancelled or completed.
1397 */
1398 core.String state; 1435 core.String state;
1399 /** Output-only. The time this state was entered. */ 1436
1437 /// Output-only. The time this state was entered.
1400 core.String stateStartTime; 1438 core.String stateStartTime;
1401 1439
1402 ClusterOperationStatus(); 1440 ClusterOperationStatus();
1403 1441
1404 ClusterOperationStatus.fromJson(core.Map _json) { 1442 ClusterOperationStatus.fromJson(core.Map _json) {
1405 if (_json.containsKey("details")) { 1443 if (_json.containsKey("details")) {
1406 details = _json["details"]; 1444 details = _json["details"];
1407 } 1445 }
1408 if (_json.containsKey("innerState")) { 1446 if (_json.containsKey("innerState")) {
1409 innerState = _json["innerState"]; 1447 innerState = _json["innerState"];
1410 } 1448 }
1411 if (_json.containsKey("state")) { 1449 if (_json.containsKey("state")) {
1412 state = _json["state"]; 1450 state = _json["state"];
1413 } 1451 }
1414 if (_json.containsKey("stateStartTime")) { 1452 if (_json.containsKey("stateStartTime")) {
1415 stateStartTime = _json["stateStartTime"]; 1453 stateStartTime = _json["stateStartTime"];
1416 } 1454 }
1417 } 1455 }
1418 1456
1419 core.Map<core.String, core.Object> toJson() { 1457 core.Map<core.String, core.Object> toJson() {
1420 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1458 final core.Map<core.String, core.Object> _json =
1459 new core.Map<core.String, core.Object>();
1421 if (details != null) { 1460 if (details != null) {
1422 _json["details"] = details; 1461 _json["details"] = details;
1423 } 1462 }
1424 if (innerState != null) { 1463 if (innerState != null) {
1425 _json["innerState"] = innerState; 1464 _json["innerState"] = innerState;
1426 } 1465 }
1427 if (state != null) { 1466 if (state != null) {
1428 _json["state"] = state; 1467 _json["state"] = state;
1429 } 1468 }
1430 if (stateStartTime != null) { 1469 if (stateStartTime != null) {
1431 _json["stateStartTime"] = stateStartTime; 1470 _json["stateStartTime"] = stateStartTime;
1432 } 1471 }
1433 return _json; 1472 return _json;
1434 } 1473 }
1435 } 1474 }
1436 1475
1437 /** The status of a cluster and its instances. */ 1476 /// The status of a cluster and its instances.
1438 class ClusterStatus { 1477 class ClusterStatus {
1439 /** Output-only. Optional details of cluster's state. */ 1478 /// Output-only. Optional details of cluster's state.
1440 core.String detail; 1479 core.String detail;
1441 /** 1480
1442 * Output-only. The cluster's state. 1481 /// Output-only. The cluster's state.
1443 * Possible string values are: 1482 /// Possible string values are:
1444 * - "UNKNOWN" : The cluster state is unknown. 1483 /// - "UNKNOWN" : The cluster state is unknown.
1445 * - "CREATING" : The cluster is being created and set up. It is not ready for 1484 /// - "CREATING" : The cluster is being created and set up. It is not ready
1446 * use. 1485 /// for use.
1447 * - "RUNNING" : The cluster is currently running and healthy. It is ready for 1486 /// - "RUNNING" : The cluster is currently running and healthy. It is ready
1448 * use. 1487 /// for use.
1449 * - "ERROR" : The cluster encountered an error. It is not ready for use. 1488 /// - "ERROR" : The cluster encountered an error. It is not ready for use.
1450 * - "DELETING" : The cluster is being deleted. It cannot be used. 1489 /// - "DELETING" : The cluster is being deleted. It cannot be used.
1451 * - "UPDATING" : The cluster is being updated. It continues to accept and 1490 /// - "UPDATING" : The cluster is being updated. It continues to accept and
1452 * process jobs. 1491 /// process jobs.
1453 */
1454 core.String state; 1492 core.String state;
1455 /** Output-only. Time when this state was entered. */ 1493
1494 /// Output-only. Time when this state was entered.
1456 core.String stateStartTime; 1495 core.String stateStartTime;
1457 /** 1496
1458 * Output-only. Additional state information that includes status reported by 1497 /// Output-only. Additional state information that includes status reported
1459 * the agent. 1498 /// by the agent.
1460 * Possible string values are: 1499 /// Possible string values are:
1461 * - "UNSPECIFIED" 1500 /// - "UNSPECIFIED"
1462 * - "UNHEALTHY" : The cluster is known to be in an unhealthy state (for 1501 /// - "UNHEALTHY" : The cluster is known to be in an unhealthy state (for
1463 * example, critical daemons are not running or HDFS capacity is 1502 /// example, critical daemons are not running or HDFS capacity is
1464 * exhausted).Applies to RUNNING state. 1503 /// exhausted).Applies to RUNNING state.
1465 * - "STALE_STATUS" : The agent-reported status is out of date (may occur if 1504 /// - "STALE_STATUS" : The agent-reported status is out of date (may occur if
1466 * Cloud Dataproc loses communication with Agent).Applies to RUNNING state. 1505 /// Cloud Dataproc loses communication with Agent).Applies to RUNNING state.
1467 */
1468 core.String substate; 1506 core.String substate;
1469 1507
1470 ClusterStatus(); 1508 ClusterStatus();
1471 1509
1472 ClusterStatus.fromJson(core.Map _json) { 1510 ClusterStatus.fromJson(core.Map _json) {
1473 if (_json.containsKey("detail")) { 1511 if (_json.containsKey("detail")) {
1474 detail = _json["detail"]; 1512 detail = _json["detail"];
1475 } 1513 }
1476 if (_json.containsKey("state")) { 1514 if (_json.containsKey("state")) {
1477 state = _json["state"]; 1515 state = _json["state"];
1478 } 1516 }
1479 if (_json.containsKey("stateStartTime")) { 1517 if (_json.containsKey("stateStartTime")) {
1480 stateStartTime = _json["stateStartTime"]; 1518 stateStartTime = _json["stateStartTime"];
1481 } 1519 }
1482 if (_json.containsKey("substate")) { 1520 if (_json.containsKey("substate")) {
1483 substate = _json["substate"]; 1521 substate = _json["substate"];
1484 } 1522 }
1485 } 1523 }
1486 1524
1487 core.Map<core.String, core.Object> toJson() { 1525 core.Map<core.String, core.Object> toJson() {
1488 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1526 final core.Map<core.String, core.Object> _json =
1527 new core.Map<core.String, core.Object>();
1489 if (detail != null) { 1528 if (detail != null) {
1490 _json["detail"] = detail; 1529 _json["detail"] = detail;
1491 } 1530 }
1492 if (state != null) { 1531 if (state != null) {
1493 _json["state"] = state; 1532 _json["state"] = state;
1494 } 1533 }
1495 if (stateStartTime != null) { 1534 if (stateStartTime != null) {
1496 _json["stateStartTime"] = stateStartTime; 1535 _json["stateStartTime"] = stateStartTime;
1497 } 1536 }
1498 if (substate != null) { 1537 if (substate != null) {
1499 _json["substate"] = substate; 1538 _json["substate"] = substate;
1500 } 1539 }
1501 return _json; 1540 return _json;
1502 } 1541 }
1503 } 1542 }
1504 1543
1505 /** A request to collect cluster diagnostic information. */ 1544 /// A request to collect cluster diagnostic information.
1506 class DiagnoseClusterRequest { 1545 class DiagnoseClusterRequest {
1507
1508 DiagnoseClusterRequest(); 1546 DiagnoseClusterRequest();
1509 1547
1510 DiagnoseClusterRequest.fromJson(core.Map _json) { 1548 DiagnoseClusterRequest.fromJson(core.Map _json) {}
1511 }
1512 1549
1513 core.Map<core.String, core.Object> toJson() { 1550 core.Map<core.String, core.Object> toJson() {
1514 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1551 final core.Map<core.String, core.Object> _json =
1552 new core.Map<core.String, core.Object>();
1515 return _json; 1553 return _json;
1516 } 1554 }
1517 } 1555 }
1518 1556
1519 /** The location of diagnostic output. */ 1557 /// The location of diagnostic output.
1520 class DiagnoseClusterResults { 1558 class DiagnoseClusterResults {
1521 /** 1559 /// Output-only. The Google Cloud Storage URI of the diagnostic output. The
1522 * Output-only. The Google Cloud Storage URI of the diagnostic output. The 1560 /// output report is a plain text file with a summary of collected
1523 * output report is a plain text file with a summary of collected diagnostics. 1561 /// diagnostics.
1524 */
1525 core.String outputUri; 1562 core.String outputUri;
1526 1563
1527 DiagnoseClusterResults(); 1564 DiagnoseClusterResults();
1528 1565
1529 DiagnoseClusterResults.fromJson(core.Map _json) { 1566 DiagnoseClusterResults.fromJson(core.Map _json) {
1530 if (_json.containsKey("outputUri")) { 1567 if (_json.containsKey("outputUri")) {
1531 outputUri = _json["outputUri"]; 1568 outputUri = _json["outputUri"];
1532 } 1569 }
1533 } 1570 }
1534 1571
1535 core.Map<core.String, core.Object> toJson() { 1572 core.Map<core.String, core.Object> toJson() {
1536 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1573 final core.Map<core.String, core.Object> _json =
1574 new core.Map<core.String, core.Object>();
1537 if (outputUri != null) { 1575 if (outputUri != null) {
1538 _json["outputUri"] = outputUri; 1576 _json["outputUri"] = outputUri;
1539 } 1577 }
1540 return _json; 1578 return _json;
1541 } 1579 }
1542 } 1580 }
1543 1581
1544 /** Specifies the config of disk options for a group of VM instances. */ 1582 /// Specifies the config of disk options for a group of VM instances.
1545 class DiskConfig { 1583 class DiskConfig {
1546 /** Optional. Size in GB of the boot disk (default is 500GB). */ 1584 /// Optional. Size in GB of the boot disk (default is 500GB).
1547 core.int bootDiskSizeGb; 1585 core.int bootDiskSizeGb;
1548 /** 1586
1549 * Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are 1587 /// Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs
1550 * not attached, the boot disk is used to store runtime logs and HDFS 1588 /// are not attached, the boot disk is used to store runtime logs and HDFS
1551 * (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one 1589 /// (https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one
1552 * or more SSDs are attached, this runtime bulk data is spread across them, 1590 /// or more SSDs are attached, this runtime bulk data is spread across them,
1553 * and the boot disk contains only basic config and installed binaries. 1591 /// and the boot disk contains only basic config and installed binaries.
1554 */
1555 core.int numLocalSsds; 1592 core.int numLocalSsds;
1556 1593
1557 DiskConfig(); 1594 DiskConfig();
1558 1595
1559 DiskConfig.fromJson(core.Map _json) { 1596 DiskConfig.fromJson(core.Map _json) {
1560 if (_json.containsKey("bootDiskSizeGb")) { 1597 if (_json.containsKey("bootDiskSizeGb")) {
1561 bootDiskSizeGb = _json["bootDiskSizeGb"]; 1598 bootDiskSizeGb = _json["bootDiskSizeGb"];
1562 } 1599 }
1563 if (_json.containsKey("numLocalSsds")) { 1600 if (_json.containsKey("numLocalSsds")) {
1564 numLocalSsds = _json["numLocalSsds"]; 1601 numLocalSsds = _json["numLocalSsds"];
1565 } 1602 }
1566 } 1603 }
1567 1604
1568 core.Map<core.String, core.Object> toJson() { 1605 core.Map<core.String, core.Object> toJson() {
1569 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1606 final core.Map<core.String, core.Object> _json =
1607 new core.Map<core.String, core.Object>();
1570 if (bootDiskSizeGb != null) { 1608 if (bootDiskSizeGb != null) {
1571 _json["bootDiskSizeGb"] = bootDiskSizeGb; 1609 _json["bootDiskSizeGb"] = bootDiskSizeGb;
1572 } 1610 }
1573 if (numLocalSsds != null) { 1611 if (numLocalSsds != null) {
1574 _json["numLocalSsds"] = numLocalSsds; 1612 _json["numLocalSsds"] = numLocalSsds;
1575 } 1613 }
1576 return _json; 1614 return _json;
1577 } 1615 }
1578 } 1616 }
1579 1617
1580 /** 1618 /// A generic empty message that you can re-use to avoid defining duplicated
1581 * A generic empty message that you can re-use to avoid defining duplicated 1619 /// empty messages in your APIs. A typical example is to use it as the request
1582 * empty messages in your APIs. A typical example is to use it as the request or 1620 /// or the response type of an API method. For instance:
1583 * the response type of an API method. For instance: 1621 /// service Foo {
1584 * service Foo { 1622 /// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
1585 * rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); 1623 /// }
1586 * } 1624 /// The JSON representation for Empty is empty JSON object {}.
1587 * The JSON representation for Empty is empty JSON object {}.
1588 */
1589 class Empty { 1625 class Empty {
1590
1591 Empty(); 1626 Empty();
1592 1627
1593 Empty.fromJson(core.Map _json) { 1628 Empty.fromJson(core.Map _json) {}
1594 }
1595 1629
1596 core.Map<core.String, core.Object> toJson() { 1630 core.Map<core.String, core.Object> toJson() {
1597 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1631 final core.Map<core.String, core.Object> _json =
1632 new core.Map<core.String, core.Object>();
1598 return _json; 1633 return _json;
1599 } 1634 }
1600 } 1635 }
1601 1636
1602 /** 1637 /// Common config settings for resources of Google Compute Engine cluster
1603 * Common config settings for resources of Google Compute Engine cluster 1638 /// instances, applicable to all instances in the cluster.
1604 * instances, applicable to all instances in the cluster.
1605 */
1606 class GceClusterConfig { 1639 class GceClusterConfig {
1607 /** 1640 /// Optional. If true, all instances in the cluster will only have internal
1608 * Optional. If true, all instances in the cluster will only have internal IP 1641 /// IP addresses. By default, clusters are not restricted to internal IP
1609 * addresses. By default, clusters are not restricted to internal IP 1642 /// addresses, and will have ephemeral external IP addresses assigned to each
1610 * addresses, and will have ephemeral external IP addresses assigned to each 1643 /// instance. This internal_ip_only restriction can only be enabled for
1611 * instance. This internal_ip_only restriction can only be enabled for 1644 /// subnetwork enabled networks, and all off-cluster dependencies must be
1612 * subnetwork enabled networks, and all off-cluster dependencies must be 1645 /// configured to be accessible without external IP addresses.
1613 * configured to be accessible without external IP addresses.
1614 */
1615 core.bool internalIpOnly; 1646 core.bool internalIpOnly;
1616 /** 1647
1617 * The Google Compute Engine metadata entries to add to all instances (see 1648 /// The Google Compute Engine metadata entries to add to all instances (see
1618 * Project and instance metadata 1649 /// Project and instance metadata
1619 * (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_ and_instance_metadata)). 1650 /// (https://cloud.google.com/compute/docs/storing-retrieving-metadata#project _and_instance_metadata)).
1620 */
1621 core.Map<core.String, core.String> metadata; 1651 core.Map<core.String, core.String> metadata;
1622 /** 1652
1623 * Optional. The Google Compute Engine network to be used for machine 1653 /// Optional. The Google Compute Engine network to be used for machine
1624 * communications. Cannot be specified with subnetwork_uri. If neither 1654 /// communications. Cannot be specified with subnetwork_uri. If neither
1625 * network_uri nor subnetwork_uri is specified, the "default" network of the 1655 /// network_uri nor subnetwork_uri is specified, the "default" network of the
1626 * project is used, if it exists. Cannot be a "Custom Subnet Network" (see 1656 /// project is used, if it exists. Cannot be a "Custom Subnet Network" (see
1627 * Using Subnetworks for more information).A full URL, partial URI, or short 1657 /// Using Subnetworks for more information).A full URL, partial URI, or short
1628 * name are valid. Examples: 1658 /// name are valid. Examples:
1629 * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/ default 1659 /// https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global /default
1630 * projects/[project_id]/regions/global/default 1660 /// projects/[project_id]/regions/global/default
1631 * default 1661 /// default
1632 */
1633 core.String networkUri; 1662 core.String networkUri;
1634 /** 1663
1635 * Optional. The service account of the instances. Defaults to the default 1664 /// Optional. The service account of the instances. Defaults to the default
1636 * Google Compute Engine service account. Custom service accounts need 1665 /// Google Compute Engine service account. Custom service accounts need
1637 * permissions equivalent to the folloing IAM roles: 1666 /// permissions equivalent to the folloing IAM roles:
1638 * roles/logging.logWriter 1667 /// roles/logging.logWriter
1639 * roles/storage.objectAdmin(see 1668 /// roles/storage.objectAdmin(see
1640 * https://cloud.google.com/compute/docs/access/service-accounts#custom_servic e_accounts 1669 /// https://cloud.google.com/compute/docs/access/service-accounts#custom_servi ce_accounts
1641 * for more information). Example: 1670 /// for more information). Example:
1642 * [account_id]@[project_id].iam.gserviceaccount.com 1671 /// [account_id]@[project_id].iam.gserviceaccount.com
1643 */
1644 core.String serviceAccount; 1672 core.String serviceAccount;
1645 /** 1673
1646 * Optional. The URIs of service account scopes to be included in Google 1674 /// Optional. The URIs of service account scopes to be included in Google
1647 * Compute Engine instances. The following base set of scopes is always 1675 /// Compute Engine instances. The following base set of scopes is always
1648 * included: 1676 /// included:
1649 * https://www.googleapis.com/auth/cloud.useraccounts.readonly 1677 /// https://www.googleapis.com/auth/cloud.useraccounts.readonly
1650 * https://www.googleapis.com/auth/devstorage.read_write 1678 /// https://www.googleapis.com/auth/devstorage.read_write
1651 * https://www.googleapis.com/auth/logging.writeIf no scopes are specified, 1679 /// https://www.googleapis.com/auth/logging.writeIf no scopes are specified,
1652 * the following defaults are also provided: 1680 /// the following defaults are also provided:
1653 * https://www.googleapis.com/auth/bigquery 1681 /// https://www.googleapis.com/auth/bigquery
1654 * https://www.googleapis.com/auth/bigtable.admin.table 1682 /// https://www.googleapis.com/auth/bigtable.admin.table
1655 * https://www.googleapis.com/auth/bigtable.data 1683 /// https://www.googleapis.com/auth/bigtable.data
1656 * https://www.googleapis.com/auth/devstorage.full_control 1684 /// https://www.googleapis.com/auth/devstorage.full_control
1657 */
1658 core.List<core.String> serviceAccountScopes; 1685 core.List<core.String> serviceAccountScopes;
1659 /** 1686
1660 * Optional. The Google Compute Engine subnetwork to be used for machine 1687 /// Optional. The Google Compute Engine subnetwork to be used for machine
1661 * communications. Cannot be specified with network_uri.A full URL, partial 1688 /// communications. Cannot be specified with network_uri.A full URL, partial
1662 * URI, or short name are valid. Examples: 1689 /// URI, or short name are valid. Examples:
1663 * https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east 1/sub0 1690 /// https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-eas t1/sub0
1664 * projects/[project_id]/regions/us-east1/sub0 1691 /// projects/[project_id]/regions/us-east1/sub0
1665 * sub0 1692 /// sub0
1666 */
1667 core.String subnetworkUri; 1693 core.String subnetworkUri;
1668 /** 1694
1669 * The Google Compute Engine tags to add to all instances (see Tagging 1695 /// The Google Compute Engine tags to add to all instances (see Tagging
1670 * instances). 1696 /// instances).
1671 */
1672 core.List<core.String> tags; 1697 core.List<core.String> tags;
1673 /** 1698
1674 * Optional. The zone where the Google Compute Engine cluster will be located. 1699 /// Optional. The zone where the Google Compute Engine cluster will be
1675 * On a create request, it is required in the "global" region. If omitted in a 1700 /// located. On a create request, it is required in the "global" region. If
1676 * non-global Cloud Dataproc region, the service will pick a zone in the 1701 /// omitted in a non-global Cloud Dataproc region, the service will pick a
1677 * corresponding Compute Engine region. On a get request, zone will always be 1702 /// zone in the corresponding Compute Engine region. On a get request, zone
1678 * present.A full URL, partial URI, or short name are valid. Examples: 1703 /// will always be present.A full URL, partial URI, or short name are valid.
1679 * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone] 1704 /// Examples:
1680 * projects/[project_id]/zones/[zone] 1705 /// https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]
1681 * us-central1-f 1706 /// projects/[project_id]/zones/[zone]
1682 */ 1707 /// us-central1-f
1683 core.String zoneUri; 1708 core.String zoneUri;
1684 1709
1685 GceClusterConfig(); 1710 GceClusterConfig();
1686 1711
1687 GceClusterConfig.fromJson(core.Map _json) { 1712 GceClusterConfig.fromJson(core.Map _json) {
1688 if (_json.containsKey("internalIpOnly")) { 1713 if (_json.containsKey("internalIpOnly")) {
1689 internalIpOnly = _json["internalIpOnly"]; 1714 internalIpOnly = _json["internalIpOnly"];
1690 } 1715 }
1691 if (_json.containsKey("metadata")) { 1716 if (_json.containsKey("metadata")) {
1692 metadata = _json["metadata"]; 1717 metadata = _json["metadata"];
(...skipping 12 matching lines...) Expand all
1705 } 1730 }
1706 if (_json.containsKey("tags")) { 1731 if (_json.containsKey("tags")) {
1707 tags = _json["tags"]; 1732 tags = _json["tags"];
1708 } 1733 }
1709 if (_json.containsKey("zoneUri")) { 1734 if (_json.containsKey("zoneUri")) {
1710 zoneUri = _json["zoneUri"]; 1735 zoneUri = _json["zoneUri"];
1711 } 1736 }
1712 } 1737 }
1713 1738
1714 core.Map<core.String, core.Object> toJson() { 1739 core.Map<core.String, core.Object> toJson() {
1715 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1740 final core.Map<core.String, core.Object> _json =
1741 new core.Map<core.String, core.Object>();
1716 if (internalIpOnly != null) { 1742 if (internalIpOnly != null) {
1717 _json["internalIpOnly"] = internalIpOnly; 1743 _json["internalIpOnly"] = internalIpOnly;
1718 } 1744 }
1719 if (metadata != null) { 1745 if (metadata != null) {
1720 _json["metadata"] = metadata; 1746 _json["metadata"] = metadata;
1721 } 1747 }
1722 if (networkUri != null) { 1748 if (networkUri != null) {
1723 _json["networkUri"] = networkUri; 1749 _json["networkUri"] = networkUri;
1724 } 1750 }
1725 if (serviceAccount != null) { 1751 if (serviceAccount != null) {
1726 _json["serviceAccount"] = serviceAccount; 1752 _json["serviceAccount"] = serviceAccount;
1727 } 1753 }
1728 if (serviceAccountScopes != null) { 1754 if (serviceAccountScopes != null) {
1729 _json["serviceAccountScopes"] = serviceAccountScopes; 1755 _json["serviceAccountScopes"] = serviceAccountScopes;
1730 } 1756 }
1731 if (subnetworkUri != null) { 1757 if (subnetworkUri != null) {
1732 _json["subnetworkUri"] = subnetworkUri; 1758 _json["subnetworkUri"] = subnetworkUri;
1733 } 1759 }
1734 if (tags != null) { 1760 if (tags != null) {
1735 _json["tags"] = tags; 1761 _json["tags"] = tags;
1736 } 1762 }
1737 if (zoneUri != null) { 1763 if (zoneUri != null) {
1738 _json["zoneUri"] = zoneUri; 1764 _json["zoneUri"] = zoneUri;
1739 } 1765 }
1740 return _json; 1766 return _json;
1741 } 1767 }
1742 } 1768 }
1743 1769
1744 /** 1770 /// A Cloud Dataproc job for running Apache Hadoop MapReduce
1745 * A Cloud Dataproc job for running Apache Hadoop MapReduce 1771 /// (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapre duce-client-core/MapReduceTutorial.html)
1746 * (https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapred uce-client-core/MapReduceTutorial.html) 1772 /// jobs on Apache Hadoop YARN
1747 * jobs on Apache Hadoop YARN 1773 /// (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.htm l).
1748 * (https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html ).
1749 */
1750 class HadoopJob { 1774 class HadoopJob {
1751 /** 1775 /// Optional. HCFS URIs of archives to be extracted in the working directory
1752 * Optional. HCFS URIs of archives to be extracted in the working directory of 1776 /// of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz,
1753 * Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, 1777 /// .tgz, or .zip.
1754 * or .zip.
1755 */
1756 core.List<core.String> archiveUris; 1778 core.List<core.String> archiveUris;
1757 /** 1779
1758 * Optional. The arguments to pass to the driver. Do not include arguments, 1780 /// Optional. The arguments to pass to the driver. Do not include arguments,
1759 * such as -libjars or -Dfoo=bar, that can be set as job properties, since a 1781 /// such as -libjars or -Dfoo=bar, that can be set as job properties, since a
1760 * collision may occur that causes an incorrect job submission. 1782 /// collision may occur that causes an incorrect job submission.
1761 */
1762 core.List<core.String> args; 1783 core.List<core.String> args;
1763 /** 1784
1764 * Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to 1785 /// Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
1765 * the working directory of Hadoop drivers and distributed tasks. Useful for 1786 /// to the working directory of Hadoop drivers and distributed tasks. Useful
1766 * naively parallel tasks. 1787 /// for naively parallel tasks.
1767 */
1768 core.List<core.String> fileUris; 1788 core.List<core.String> fileUris;
1769 /** 1789
1770 * Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and 1790 /// Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and
1771 * tasks. 1791 /// tasks.
1772 */
1773 core.List<core.String> jarFileUris; 1792 core.List<core.String> jarFileUris;
1774 /** Optional. The runtime log config for job execution. */ 1793
1794 /// Optional. The runtime log config for job execution.
1775 LoggingConfig loggingConfig; 1795 LoggingConfig loggingConfig;
1776 /** 1796
1777 * The name of the driver's main class. The jar file containing the class must 1797 /// The name of the driver's main class. The jar file containing the class
1778 * be in the default CLASSPATH or specified in jar_file_uris. 1798 /// must be in the default CLASSPATH or specified in jar_file_uris.
1779 */
1780 core.String mainClass; 1799 core.String mainClass;
1781 /** 1800
1782 * The HCFS URI of the jar file containing the main class. Examples: 1801 /// The HCFS URI of the jar file containing the main class. Examples:
1783 * 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 1802 /// 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
1784 * 'hdfs:/tmp/test-samples/custom-wordcount.jar' 1803 /// 'hdfs:/tmp/test-samples/custom-wordcount.jar'
1785 * 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar' 1804 /// 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
1786 */
1787 core.String mainJarFileUri; 1805 core.String mainJarFileUri;
1788 /** 1806
1789 * Optional. A mapping of property names to values, used to configure Hadoop. 1807 /// Optional. A mapping of property names to values, used to configure
1790 * Properties that conflict with values set by the Cloud Dataproc API may be 1808 /// Hadoop. Properties that conflict with values set by the Cloud Dataproc
1791 * overwritten. Can include properties set in /etc/hadoop/conf / * -site and 1809 /// API may be overwritten. Can include properties set in /etc/hadoop/conf /
1792 * classes in user code. 1810 /// * -site and classes in user code.
1793 */
1794 core.Map<core.String, core.String> properties; 1811 core.Map<core.String, core.String> properties;
1795 1812
1796 HadoopJob(); 1813 HadoopJob();
1797 1814
1798 HadoopJob.fromJson(core.Map _json) { 1815 HadoopJob.fromJson(core.Map _json) {
1799 if (_json.containsKey("archiveUris")) { 1816 if (_json.containsKey("archiveUris")) {
1800 archiveUris = _json["archiveUris"]; 1817 archiveUris = _json["archiveUris"];
1801 } 1818 }
1802 if (_json.containsKey("args")) { 1819 if (_json.containsKey("args")) {
1803 args = _json["args"]; 1820 args = _json["args"];
(...skipping 12 matching lines...) Expand all
1816 } 1833 }
1817 if (_json.containsKey("mainJarFileUri")) { 1834 if (_json.containsKey("mainJarFileUri")) {
1818 mainJarFileUri = _json["mainJarFileUri"]; 1835 mainJarFileUri = _json["mainJarFileUri"];
1819 } 1836 }
1820 if (_json.containsKey("properties")) { 1837 if (_json.containsKey("properties")) {
1821 properties = _json["properties"]; 1838 properties = _json["properties"];
1822 } 1839 }
1823 } 1840 }
1824 1841
1825 core.Map<core.String, core.Object> toJson() { 1842 core.Map<core.String, core.Object> toJson() {
1826 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1843 final core.Map<core.String, core.Object> _json =
1844 new core.Map<core.String, core.Object>();
1827 if (archiveUris != null) { 1845 if (archiveUris != null) {
1828 _json["archiveUris"] = archiveUris; 1846 _json["archiveUris"] = archiveUris;
1829 } 1847 }
1830 if (args != null) { 1848 if (args != null) {
1831 _json["args"] = args; 1849 _json["args"] = args;
1832 } 1850 }
1833 if (fileUris != null) { 1851 if (fileUris != null) {
1834 _json["fileUris"] = fileUris; 1852 _json["fileUris"] = fileUris;
1835 } 1853 }
1836 if (jarFileUris != null) { 1854 if (jarFileUris != null) {
1837 _json["jarFileUris"] = jarFileUris; 1855 _json["jarFileUris"] = jarFileUris;
1838 } 1856 }
1839 if (loggingConfig != null) { 1857 if (loggingConfig != null) {
1840 _json["loggingConfig"] = (loggingConfig).toJson(); 1858 _json["loggingConfig"] = (loggingConfig).toJson();
1841 } 1859 }
1842 if (mainClass != null) { 1860 if (mainClass != null) {
1843 _json["mainClass"] = mainClass; 1861 _json["mainClass"] = mainClass;
1844 } 1862 }
1845 if (mainJarFileUri != null) { 1863 if (mainJarFileUri != null) {
1846 _json["mainJarFileUri"] = mainJarFileUri; 1864 _json["mainJarFileUri"] = mainJarFileUri;
1847 } 1865 }
1848 if (properties != null) { 1866 if (properties != null) {
1849 _json["properties"] = properties; 1867 _json["properties"] = properties;
1850 } 1868 }
1851 return _json; 1869 return _json;
1852 } 1870 }
1853 } 1871 }
1854 1872
1855 /** 1873 /// A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/)
1856 * A Cloud Dataproc job for running Apache Hive (https://hive.apache.org/) 1874 /// queries on YARN.
1857 * queries on YARN.
1858 */
1859 class HiveJob { 1875 class HiveJob {
1860 /** 1876 /// Optional. Whether to continue executing queries if a query fails. The
1861 * Optional. Whether to continue executing queries if a query fails. The 1877 /// default value is false. Setting to true can be useful when executing
1862 * default value is false. Setting to true can be useful when executing 1878 /// independent parallel queries.
1863 * independent parallel queries.
1864 */
1865 core.bool continueOnFailure; 1879 core.bool continueOnFailure;
1866 /** 1880
1867 * Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server 1881 /// Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive
1868 * and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. 1882 /// server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
1869 */
1870 core.List<core.String> jarFileUris; 1883 core.List<core.String> jarFileUris;
1871 /** 1884
1872 * Optional. A mapping of property names and values, used to configure Hive. 1885 /// Optional. A mapping of property names and values, used to configure Hive.
1873 * Properties that conflict with values set by the Cloud Dataproc API may be 1886 /// Properties that conflict with values set by the Cloud Dataproc API may be
1874 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, 1887 /// overwritten. Can include properties set in /etc/hadoop/conf / *
1875 * /etc/hive/conf/hive-site.xml, and classes in user code. 1888 /// -site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.
1876 */
1877 core.Map<core.String, core.String> properties; 1889 core.Map<core.String, core.String> properties;
1878 /** The HCFS URI of the script that contains Hive queries. */ 1890
1891 /// The HCFS URI of the script that contains Hive queries.
1879 core.String queryFileUri; 1892 core.String queryFileUri;
1880 /** A list of queries. */ 1893
1894 /// A list of queries.
1881 QueryList queryList; 1895 QueryList queryList;
1882 /** 1896
1883 * Optional. Mapping of query variable names to values (equivalent to the Hive 1897 /// Optional. Mapping of query variable names to values (equivalent to the
1884 * command: SET name="value";). 1898 /// Hive command: SET name="value";).
1885 */
1886 core.Map<core.String, core.String> scriptVariables; 1899 core.Map<core.String, core.String> scriptVariables;
1887 1900
1888 HiveJob(); 1901 HiveJob();
1889 1902
1890 HiveJob.fromJson(core.Map _json) { 1903 HiveJob.fromJson(core.Map _json) {
1891 if (_json.containsKey("continueOnFailure")) { 1904 if (_json.containsKey("continueOnFailure")) {
1892 continueOnFailure = _json["continueOnFailure"]; 1905 continueOnFailure = _json["continueOnFailure"];
1893 } 1906 }
1894 if (_json.containsKey("jarFileUris")) { 1907 if (_json.containsKey("jarFileUris")) {
1895 jarFileUris = _json["jarFileUris"]; 1908 jarFileUris = _json["jarFileUris"];
1896 } 1909 }
1897 if (_json.containsKey("properties")) { 1910 if (_json.containsKey("properties")) {
1898 properties = _json["properties"]; 1911 properties = _json["properties"];
1899 } 1912 }
1900 if (_json.containsKey("queryFileUri")) { 1913 if (_json.containsKey("queryFileUri")) {
1901 queryFileUri = _json["queryFileUri"]; 1914 queryFileUri = _json["queryFileUri"];
1902 } 1915 }
1903 if (_json.containsKey("queryList")) { 1916 if (_json.containsKey("queryList")) {
1904 queryList = new QueryList.fromJson(_json["queryList"]); 1917 queryList = new QueryList.fromJson(_json["queryList"]);
1905 } 1918 }
1906 if (_json.containsKey("scriptVariables")) { 1919 if (_json.containsKey("scriptVariables")) {
1907 scriptVariables = _json["scriptVariables"]; 1920 scriptVariables = _json["scriptVariables"];
1908 } 1921 }
1909 } 1922 }
1910 1923
1911 core.Map<core.String, core.Object> toJson() { 1924 core.Map<core.String, core.Object> toJson() {
1912 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 1925 final core.Map<core.String, core.Object> _json =
1926 new core.Map<core.String, core.Object>();
1913 if (continueOnFailure != null) { 1927 if (continueOnFailure != null) {
1914 _json["continueOnFailure"] = continueOnFailure; 1928 _json["continueOnFailure"] = continueOnFailure;
1915 } 1929 }
1916 if (jarFileUris != null) { 1930 if (jarFileUris != null) {
1917 _json["jarFileUris"] = jarFileUris; 1931 _json["jarFileUris"] = jarFileUris;
1918 } 1932 }
1919 if (properties != null) { 1933 if (properties != null) {
1920 _json["properties"] = properties; 1934 _json["properties"] = properties;
1921 } 1935 }
1922 if (queryFileUri != null) { 1936 if (queryFileUri != null) {
1923 _json["queryFileUri"] = queryFileUri; 1937 _json["queryFileUri"] = queryFileUri;
1924 } 1938 }
1925 if (queryList != null) { 1939 if (queryList != null) {
1926 _json["queryList"] = (queryList).toJson(); 1940 _json["queryList"] = (queryList).toJson();
1927 } 1941 }
1928 if (scriptVariables != null) { 1942 if (scriptVariables != null) {
1929 _json["scriptVariables"] = scriptVariables; 1943 _json["scriptVariables"] = scriptVariables;
1930 } 1944 }
1931 return _json; 1945 return _json;
1932 } 1946 }
1933 } 1947 }
1934 1948
1935 /** 1949 /// Optional. The config settings for Google Compute Engine resources in an
1936 * Optional. The config settings for Google Compute Engine resources in an 1950 /// instance group, such as a master or worker group.
1937 * instance group, such as a master or worker group.
1938 */
1939 class InstanceGroupConfig { 1951 class InstanceGroupConfig {
1940 /** 1952 /// Optional. The Google Compute Engine accelerator configuration for these
1941 * Optional. The Google Compute Engine accelerator configuration for these 1953 /// instances.Beta Feature: This feature is still under development. It may
1942 * instances.Beta Feature: This feature is still under development. It may be 1954 /// be changed before final release.
1943 * changed before final release.
1944 */
1945 core.List<AcceleratorConfig> accelerators; 1955 core.List<AcceleratorConfig> accelerators;
1946 /** Optional. Disk option config settings. */ 1956
1957 /// Optional. Disk option config settings.
1947 DiskConfig diskConfig; 1958 DiskConfig diskConfig;
1948 /** 1959
1949 * Output-only. The Google Compute Engine image resource used for cluster 1960 /// Output-only. The Google Compute Engine image resource used for cluster
1950 * instances. Inferred from SoftwareConfig.image_version. 1961 /// instances. Inferred from SoftwareConfig.image_version.
1951 */
1952 core.String imageUri; 1962 core.String imageUri;
1953 /** 1963
1954 * Optional. The list of instance names. Cloud Dataproc derives the names from 1964 /// Optional. The list of instance names. Cloud Dataproc derives the names
1955 * cluster_name, num_instances, and the instance group if not set by user 1965 /// from cluster_name, num_instances, and the instance group if not set by
1956 * (recommended practice is to let Cloud Dataproc derive the name). 1966 /// user (recommended practice is to let Cloud Dataproc derive the name).
1957 */
1958 core.List<core.String> instanceNames; 1967 core.List<core.String> instanceNames;
1959 /** 1968
1960 * Optional. Specifies that this instance group contains preemptible 1969 /// Optional. Specifies that this instance group contains preemptible
1961 * instances. 1970 /// instances.
1962 */
1963 core.bool isPreemptible; 1971 core.bool isPreemptible;
1964 /** 1972
1965 * Optional. The Google Compute Engine machine type used for cluster 1973 /// Optional. The Google Compute Engine machine type used for cluster
1966 * instances.A full URL, partial URI, or short name are valid. Examples: 1974 /// instances.A full URL, partial URI, or short name are valid. Examples:
1967 * https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1- a/machineTypes/n1-standard-2 1975 /// https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1 -a/machineTypes/n1-standard-2
1968 * projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2 1976 /// projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2
1969 * n1-standard-2 1977 /// n1-standard-2
1970 */
1971 core.String machineTypeUri; 1978 core.String machineTypeUri;
1972 /** 1979
1973 * Output-only. The config for Google Compute Engine Instance Group Manager 1980 /// Output-only. The config for Google Compute Engine Instance Group Manager
1974 * that manages this group. This is only used for preemptible instance groups. 1981 /// that manages this group. This is only used for preemptible instance
1975 */ 1982 /// groups.
1976 ManagedGroupConfig managedGroupConfig; 1983 ManagedGroupConfig managedGroupConfig;
1977 /** 1984
1978 * Optional. The number of VM instances in the instance group. For master 1985 /// Optional. The number of VM instances in the instance group. For master
1979 * instance groups, must be set to 1. 1986 /// instance groups, must be set to 1.
1980 */
1981 core.int numInstances; 1987 core.int numInstances;
1982 1988
1983 InstanceGroupConfig(); 1989 InstanceGroupConfig();
1984 1990
1985 InstanceGroupConfig.fromJson(core.Map _json) { 1991 InstanceGroupConfig.fromJson(core.Map _json) {
1986 if (_json.containsKey("accelerators")) { 1992 if (_json.containsKey("accelerators")) {
1987 accelerators = _json["accelerators"].map((value) => new AcceleratorConfig. fromJson(value)).toList(); 1993 accelerators = _json["accelerators"]
1994 .map((value) => new AcceleratorConfig.fromJson(value))
1995 .toList();
1988 } 1996 }
1989 if (_json.containsKey("diskConfig")) { 1997 if (_json.containsKey("diskConfig")) {
1990 diskConfig = new DiskConfig.fromJson(_json["diskConfig"]); 1998 diskConfig = new DiskConfig.fromJson(_json["diskConfig"]);
1991 } 1999 }
1992 if (_json.containsKey("imageUri")) { 2000 if (_json.containsKey("imageUri")) {
1993 imageUri = _json["imageUri"]; 2001 imageUri = _json["imageUri"];
1994 } 2002 }
1995 if (_json.containsKey("instanceNames")) { 2003 if (_json.containsKey("instanceNames")) {
1996 instanceNames = _json["instanceNames"]; 2004 instanceNames = _json["instanceNames"];
1997 } 2005 }
1998 if (_json.containsKey("isPreemptible")) { 2006 if (_json.containsKey("isPreemptible")) {
1999 isPreemptible = _json["isPreemptible"]; 2007 isPreemptible = _json["isPreemptible"];
2000 } 2008 }
2001 if (_json.containsKey("machineTypeUri")) { 2009 if (_json.containsKey("machineTypeUri")) {
2002 machineTypeUri = _json["machineTypeUri"]; 2010 machineTypeUri = _json["machineTypeUri"];
2003 } 2011 }
2004 if (_json.containsKey("managedGroupConfig")) { 2012 if (_json.containsKey("managedGroupConfig")) {
2005 managedGroupConfig = new ManagedGroupConfig.fromJson(_json["managedGroupCo nfig"]); 2013 managedGroupConfig =
2014 new ManagedGroupConfig.fromJson(_json["managedGroupConfig"]);
2006 } 2015 }
2007 if (_json.containsKey("numInstances")) { 2016 if (_json.containsKey("numInstances")) {
2008 numInstances = _json["numInstances"]; 2017 numInstances = _json["numInstances"];
2009 } 2018 }
2010 } 2019 }
2011 2020
2012 core.Map<core.String, core.Object> toJson() { 2021 core.Map<core.String, core.Object> toJson() {
2013 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2022 final core.Map<core.String, core.Object> _json =
2023 new core.Map<core.String, core.Object>();
2014 if (accelerators != null) { 2024 if (accelerators != null) {
2015 _json["accelerators"] = accelerators.map((value) => (value).toJson()).toLi st(); 2025 _json["accelerators"] =
2026 accelerators.map((value) => (value).toJson()).toList();
2016 } 2027 }
2017 if (diskConfig != null) { 2028 if (diskConfig != null) {
2018 _json["diskConfig"] = (diskConfig).toJson(); 2029 _json["diskConfig"] = (diskConfig).toJson();
2019 } 2030 }
2020 if (imageUri != null) { 2031 if (imageUri != null) {
2021 _json["imageUri"] = imageUri; 2032 _json["imageUri"] = imageUri;
2022 } 2033 }
2023 if (instanceNames != null) { 2034 if (instanceNames != null) {
2024 _json["instanceNames"] = instanceNames; 2035 _json["instanceNames"] = instanceNames;
2025 } 2036 }
2026 if (isPreemptible != null) { 2037 if (isPreemptible != null) {
2027 _json["isPreemptible"] = isPreemptible; 2038 _json["isPreemptible"] = isPreemptible;
2028 } 2039 }
2029 if (machineTypeUri != null) { 2040 if (machineTypeUri != null) {
2030 _json["machineTypeUri"] = machineTypeUri; 2041 _json["machineTypeUri"] = machineTypeUri;
2031 } 2042 }
2032 if (managedGroupConfig != null) { 2043 if (managedGroupConfig != null) {
2033 _json["managedGroupConfig"] = (managedGroupConfig).toJson(); 2044 _json["managedGroupConfig"] = (managedGroupConfig).toJson();
2034 } 2045 }
2035 if (numInstances != null) { 2046 if (numInstances != null) {
2036 _json["numInstances"] = numInstances; 2047 _json["numInstances"] = numInstances;
2037 } 2048 }
2038 return _json; 2049 return _json;
2039 } 2050 }
2040 } 2051 }
2041 2052
2042 /** A Cloud Dataproc job resource. */ 2053 /// A Cloud Dataproc job resource.
2043 class Job { 2054 class Job {
2044 /** 2055 /// Output-only. If present, the location of miscellaneous control files
2045 * Output-only. If present, the location of miscellaneous control files which 2056 /// which may be used as part of job setup and handling. If not present,
2046 * may be used as part of job setup and handling. If not present, control 2057 /// control files may be placed in the same location as driver_output_uri.
2047 * files may be placed in the same location as driver_output_uri.
2048 */
2049 core.String driverControlFilesUri; 2058 core.String driverControlFilesUri;
2050 /** 2059
2051 * Output-only. A URI pointing to the location of the stdout of the job's 2060 /// Output-only. A URI pointing to the location of the stdout of the job's
2052 * driver program. 2061 /// driver program.
2053 */
2054 core.String driverOutputResourceUri; 2062 core.String driverOutputResourceUri;
2055 /** Job is a Hadoop job. */ 2063
2064 /// Job is a Hadoop job.
2056 HadoopJob hadoopJob; 2065 HadoopJob hadoopJob;
2057 /** Job is a Hive job. */ 2066
2067 /// Job is a Hive job.
2058 HiveJob hiveJob; 2068 HiveJob hiveJob;
2059 /** 2069
2060 * Optional. The labels to associate with this job. Label keys must contain 1 2070 /// Optional. The labels to associate with this job. Label keys must contain
2061 * to 63 characters, and must conform to RFC 1035 2071 /// 1 to 63 characters, and must conform to RFC 1035
2062 * (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but, if 2072 /// (https://www.ietf.org/rfc/rfc1035.txt). Label values may be empty, but,
2063 * present, must contain 1 to 63 characters, and must conform to RFC 1035 2073 /// if present, must contain 1 to 63 characters, and must conform to RFC 1035
2064 * (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be 2074 /// (https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
2065 * associated with a job. 2075 /// associated with a job.
2066 */
2067 core.Map<core.String, core.String> labels; 2076 core.Map<core.String, core.String> labels;
2068 /** Job is a Pig job. */ 2077
2078 /// Job is a Pig job.
2069 PigJob pigJob; 2079 PigJob pigJob;
2070 /** 2080
2071 * Required. Job information, including how, when, and where to run the job. 2081 /// Required. Job information, including how, when, and where to run the job.
2072 */
2073 JobPlacement placement; 2082 JobPlacement placement;
2074 /** Job is a Pyspark job. */ 2083
2084 /// Job is a Pyspark job.
2075 PySparkJob pysparkJob; 2085 PySparkJob pysparkJob;
2076 /** 2086
2077 * Optional. The fully qualified reference to the job, which can be used to 2087 /// Optional. The fully qualified reference to the job, which can be used to
2078 * obtain the equivalent REST path of the job resource. If this property is 2088 /// obtain the equivalent REST path of the job resource. If this property is
2079 * not specified when a job is created, the server generates a 2089 /// not specified when a job is created, the server generates a
2080 * <code>job_id</code>. 2090 /// <code>job_id</code>.
2081 */
2082 JobReference reference; 2091 JobReference reference;
2083 /** Optional. Job scheduling configuration. */ 2092
2093 /// Optional. Job scheduling configuration.
2084 JobScheduling scheduling; 2094 JobScheduling scheduling;
2085 /** Job is a Spark job. */ 2095
2096 /// Job is a Spark job.
2086 SparkJob sparkJob; 2097 SparkJob sparkJob;
2087 /** Job is a SparkSql job. */ 2098
2099 /// Job is a SparkSql job.
2088 SparkSqlJob sparkSqlJob; 2100 SparkSqlJob sparkSqlJob;
2089 /** 2101
2090 * Output-only. The job status. Additional application-specific status 2102 /// Output-only. The job status. Additional application-specific status
2091 * information may be contained in the <code>type_job</code> and 2103 /// information may be contained in the <code>type_job</code> and
2092 * <code>yarn_applications</code> fields. 2104 /// <code>yarn_applications</code> fields.
2093 */
2094 JobStatus status; 2105 JobStatus status;
2095 /** Output-only. The previous job status. */ 2106
2107 /// Output-only. The previous job status.
2096 core.List<JobStatus> statusHistory; 2108 core.List<JobStatus> statusHistory;
2097 /** 2109
2098 * Output-only. The collection of YARN applications spun up by this job.Beta 2110 /// Output-only. The collection of YARN applications spun up by this job.Beta
2099 * Feature: This report is available for testing purposes only. It may be 2111 /// Feature: This report is available for testing purposes only. It may be
2100 * changed before final release. 2112 /// changed before final release.
2101 */
2102 core.List<YarnApplication> yarnApplications; 2113 core.List<YarnApplication> yarnApplications;
2103 2114
2104 Job(); 2115 Job();
2105 2116
2106 Job.fromJson(core.Map _json) { 2117 Job.fromJson(core.Map _json) {
2107 if (_json.containsKey("driverControlFilesUri")) { 2118 if (_json.containsKey("driverControlFilesUri")) {
2108 driverControlFilesUri = _json["driverControlFilesUri"]; 2119 driverControlFilesUri = _json["driverControlFilesUri"];
2109 } 2120 }
2110 if (_json.containsKey("driverOutputResourceUri")) { 2121 if (_json.containsKey("driverOutputResourceUri")) {
2111 driverOutputResourceUri = _json["driverOutputResourceUri"]; 2122 driverOutputResourceUri = _json["driverOutputResourceUri"];
(...skipping 25 matching lines...) Expand all
2137 if (_json.containsKey("sparkJob")) { 2148 if (_json.containsKey("sparkJob")) {
2138 sparkJob = new SparkJob.fromJson(_json["sparkJob"]); 2149 sparkJob = new SparkJob.fromJson(_json["sparkJob"]);
2139 } 2150 }
2140 if (_json.containsKey("sparkSqlJob")) { 2151 if (_json.containsKey("sparkSqlJob")) {
2141 sparkSqlJob = new SparkSqlJob.fromJson(_json["sparkSqlJob"]); 2152 sparkSqlJob = new SparkSqlJob.fromJson(_json["sparkSqlJob"]);
2142 } 2153 }
2143 if (_json.containsKey("status")) { 2154 if (_json.containsKey("status")) {
2144 status = new JobStatus.fromJson(_json["status"]); 2155 status = new JobStatus.fromJson(_json["status"]);
2145 } 2156 }
2146 if (_json.containsKey("statusHistory")) { 2157 if (_json.containsKey("statusHistory")) {
2147 statusHistory = _json["statusHistory"].map((value) => new JobStatus.fromJs on(value)).toList(); 2158 statusHistory = _json["statusHistory"]
2159 .map((value) => new JobStatus.fromJson(value))
2160 .toList();
2148 } 2161 }
2149 if (_json.containsKey("yarnApplications")) { 2162 if (_json.containsKey("yarnApplications")) {
2150 yarnApplications = _json["yarnApplications"].map((value) => new YarnApplic ation.fromJson(value)).toList(); 2163 yarnApplications = _json["yarnApplications"]
2164 .map((value) => new YarnApplication.fromJson(value))
2165 .toList();
2151 } 2166 }
2152 } 2167 }
2153 2168
2154 core.Map<core.String, core.Object> toJson() { 2169 core.Map<core.String, core.Object> toJson() {
2155 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2170 final core.Map<core.String, core.Object> _json =
2171 new core.Map<core.String, core.Object>();
2156 if (driverControlFilesUri != null) { 2172 if (driverControlFilesUri != null) {
2157 _json["driverControlFilesUri"] = driverControlFilesUri; 2173 _json["driverControlFilesUri"] = driverControlFilesUri;
2158 } 2174 }
2159 if (driverOutputResourceUri != null) { 2175 if (driverOutputResourceUri != null) {
2160 _json["driverOutputResourceUri"] = driverOutputResourceUri; 2176 _json["driverOutputResourceUri"] = driverOutputResourceUri;
2161 } 2177 }
2162 if (hadoopJob != null) { 2178 if (hadoopJob != null) {
2163 _json["hadoopJob"] = (hadoopJob).toJson(); 2179 _json["hadoopJob"] = (hadoopJob).toJson();
2164 } 2180 }
2165 if (hiveJob != null) { 2181 if (hiveJob != null) {
(...skipping 20 matching lines...) Expand all
2186 if (sparkJob != null) { 2202 if (sparkJob != null) {
2187 _json["sparkJob"] = (sparkJob).toJson(); 2203 _json["sparkJob"] = (sparkJob).toJson();
2188 } 2204 }
2189 if (sparkSqlJob != null) { 2205 if (sparkSqlJob != null) {
2190 _json["sparkSqlJob"] = (sparkSqlJob).toJson(); 2206 _json["sparkSqlJob"] = (sparkSqlJob).toJson();
2191 } 2207 }
2192 if (status != null) { 2208 if (status != null) {
2193 _json["status"] = (status).toJson(); 2209 _json["status"] = (status).toJson();
2194 } 2210 }
2195 if (statusHistory != null) { 2211 if (statusHistory != null) {
2196 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List(); 2212 _json["statusHistory"] =
2213 statusHistory.map((value) => (value).toJson()).toList();
2197 } 2214 }
2198 if (yarnApplications != null) { 2215 if (yarnApplications != null) {
2199 _json["yarnApplications"] = yarnApplications.map((value) => (value).toJson ()).toList(); 2216 _json["yarnApplications"] =
2217 yarnApplications.map((value) => (value).toJson()).toList();
2200 } 2218 }
2201 return _json; 2219 return _json;
2202 } 2220 }
2203 } 2221 }
2204 2222
2205 /** Cloud Dataproc job config. */ 2223 /// Cloud Dataproc job config.
2206 class JobPlacement { 2224 class JobPlacement {
2207 /** Required. The name of the cluster where the job will be submitted. */ 2225 /// Required. The name of the cluster where the job will be submitted.
2208 core.String clusterName; 2226 core.String clusterName;
2209 /** 2227
2210 * Output-only. A cluster UUID generated by the Cloud Dataproc service when 2228 /// Output-only. A cluster UUID generated by the Cloud Dataproc service when
2211 * the job is submitted. 2229 /// the job is submitted.
2212 */
2213 core.String clusterUuid; 2230 core.String clusterUuid;
2214 2231
2215 JobPlacement(); 2232 JobPlacement();
2216 2233
2217 JobPlacement.fromJson(core.Map _json) { 2234 JobPlacement.fromJson(core.Map _json) {
2218 if (_json.containsKey("clusterName")) { 2235 if (_json.containsKey("clusterName")) {
2219 clusterName = _json["clusterName"]; 2236 clusterName = _json["clusterName"];
2220 } 2237 }
2221 if (_json.containsKey("clusterUuid")) { 2238 if (_json.containsKey("clusterUuid")) {
2222 clusterUuid = _json["clusterUuid"]; 2239 clusterUuid = _json["clusterUuid"];
2223 } 2240 }
2224 } 2241 }
2225 2242
2226 core.Map<core.String, core.Object> toJson() { 2243 core.Map<core.String, core.Object> toJson() {
2227 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2244 final core.Map<core.String, core.Object> _json =
2245 new core.Map<core.String, core.Object>();
2228 if (clusterName != null) { 2246 if (clusterName != null) {
2229 _json["clusterName"] = clusterName; 2247 _json["clusterName"] = clusterName;
2230 } 2248 }
2231 if (clusterUuid != null) { 2249 if (clusterUuid != null) {
2232 _json["clusterUuid"] = clusterUuid; 2250 _json["clusterUuid"] = clusterUuid;
2233 } 2251 }
2234 return _json; 2252 return _json;
2235 } 2253 }
2236 } 2254 }
2237 2255
2238 /** Encapsulates the full scoping used to reference a job. */ 2256 /// Encapsulates the full scoping used to reference a job.
2239 class JobReference { 2257 class JobReference {
2240 /** 2258 /// Optional. The job ID, which must be unique within the project. The job ID
2241 * Optional. The job ID, which must be unique within the project. The job ID 2259 /// is generated by the server upon job submission or provided by the user as
2242 * is generated by the server upon job submission or provided by the user as a 2260 /// a means to perform retries without creating duplicate jobs. The ID must
2243 * means to perform retries without creating duplicate jobs. The ID must 2261 /// contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or
2244 * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens 2262 /// hyphens (-). The maximum length is 100 characters.
2245 * (-). The maximum length is 100 characters.
2246 */
2247 core.String jobId; 2263 core.String jobId;
2248 /** 2264
2249 * Required. The ID of the Google Cloud Platform project that the job belongs 2265 /// Required. The ID of the Google Cloud Platform project that the job
2250 * to. 2266 /// belongs to.
2251 */
2252 core.String projectId; 2267 core.String projectId;
2253 2268
2254 JobReference(); 2269 JobReference();
2255 2270
2256 JobReference.fromJson(core.Map _json) { 2271 JobReference.fromJson(core.Map _json) {
2257 if (_json.containsKey("jobId")) { 2272 if (_json.containsKey("jobId")) {
2258 jobId = _json["jobId"]; 2273 jobId = _json["jobId"];
2259 } 2274 }
2260 if (_json.containsKey("projectId")) { 2275 if (_json.containsKey("projectId")) {
2261 projectId = _json["projectId"]; 2276 projectId = _json["projectId"];
2262 } 2277 }
2263 } 2278 }
2264 2279
2265 core.Map<core.String, core.Object> toJson() { 2280 core.Map<core.String, core.Object> toJson() {
2266 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2281 final core.Map<core.String, core.Object> _json =
2282 new core.Map<core.String, core.Object>();
2267 if (jobId != null) { 2283 if (jobId != null) {
2268 _json["jobId"] = jobId; 2284 _json["jobId"] = jobId;
2269 } 2285 }
2270 if (projectId != null) { 2286 if (projectId != null) {
2271 _json["projectId"] = projectId; 2287 _json["projectId"] = projectId;
2272 } 2288 }
2273 return _json; 2289 return _json;
2274 } 2290 }
2275 } 2291 }
2276 2292
2277 /** 2293 /// Job scheduling options.Beta Feature: These options are available for
2278 * Job scheduling options.Beta Feature: These options are available for testing 2294 /// testing purposes only. They may be changed before final release.
2279 * purposes only. They may be changed before final release.
2280 */
2281 class JobScheduling { 2295 class JobScheduling {
2282 /** 2296 /// Optional. Maximum number of times per hour a driver may be restarted as a
2283 * Optional. Maximum number of times per hour a driver may be restarted as a 2297 /// result of driver terminating with non-zero code before job is reported
2284 * result of driver terminating with non-zero code before job is reported 2298 /// failed.A job may be reported as thrashing if driver exits with non-zero
2285 * failed.A job may be reported as thrashing if driver exits with non-zero 2299 /// code 4 times within 10 minute window.Maximum value is 10.
2286 * code 4 times within 10 minute window.Maximum value is 10.
2287 */
2288 core.int maxFailuresPerHour; 2300 core.int maxFailuresPerHour;
2289 2301
2290 JobScheduling(); 2302 JobScheduling();
2291 2303
2292 JobScheduling.fromJson(core.Map _json) { 2304 JobScheduling.fromJson(core.Map _json) {
2293 if (_json.containsKey("maxFailuresPerHour")) { 2305 if (_json.containsKey("maxFailuresPerHour")) {
2294 maxFailuresPerHour = _json["maxFailuresPerHour"]; 2306 maxFailuresPerHour = _json["maxFailuresPerHour"];
2295 } 2307 }
2296 } 2308 }
2297 2309
2298 core.Map<core.String, core.Object> toJson() { 2310 core.Map<core.String, core.Object> toJson() {
2299 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2311 final core.Map<core.String, core.Object> _json =
2312 new core.Map<core.String, core.Object>();
2300 if (maxFailuresPerHour != null) { 2313 if (maxFailuresPerHour != null) {
2301 _json["maxFailuresPerHour"] = maxFailuresPerHour; 2314 _json["maxFailuresPerHour"] = maxFailuresPerHour;
2302 } 2315 }
2303 return _json; 2316 return _json;
2304 } 2317 }
2305 } 2318 }
2306 2319
2307 /** Cloud Dataproc job status. */ 2320 /// Cloud Dataproc job status.
2308 class JobStatus { 2321 class JobStatus {
2309 /** 2322 /// Output-only. Optional job state details, such as an error description if
2310 * Output-only. Optional job state details, such as an error description if 2323 /// the state is <code>ERROR</code>.
2311 * the state is <code>ERROR</code>.
2312 */
2313 core.String details; 2324 core.String details;
2314 /** 2325
2315 * Output-only. A state message specifying the overall job state. 2326 /// Output-only. A state message specifying the overall job state.
2316 * Possible string values are: 2327 /// Possible string values are:
2317 * - "STATE_UNSPECIFIED" : The job state is unknown. 2328 /// - "STATE_UNSPECIFIED" : The job state is unknown.
2318 * - "PENDING" : The job is pending; it has been submitted, but is not yet 2329 /// - "PENDING" : The job is pending; it has been submitted, but is not yet
2319 * running. 2330 /// running.
2320 * - "SETUP_DONE" : Job has been received by the service and completed initial 2331 /// - "SETUP_DONE" : Job has been received by the service and completed
2321 * setup; it will soon be submitted to the cluster. 2332 /// initial setup; it will soon be submitted to the cluster.
2322 * - "RUNNING" : The job is running on the cluster. 2333 /// - "RUNNING" : The job is running on the cluster.
2323 * - "CANCEL_PENDING" : A CancelJob request has been received, but is pending. 2334 /// - "CANCEL_PENDING" : A CancelJob request has been received, but is
2324 * - "CANCEL_STARTED" : Transient in-flight resources have been canceled, and 2335 /// pending.
2325 * the request to cancel the running job has been issued to the cluster. 2336 /// - "CANCEL_STARTED" : Transient in-flight resources have been canceled,
2326 * - "CANCELLED" : The job cancellation was successful. 2337 /// and the request to cancel the running job has been issued to the cluster.
2327 * - "DONE" : The job has completed successfully. 2338 /// - "CANCELLED" : The job cancellation was successful.
2328 * - "ERROR" : The job has completed, but encountered an error. 2339 /// - "DONE" : The job has completed successfully.
2329 * - "ATTEMPT_FAILURE" : Job attempt has failed. The detail field contains 2340 /// - "ERROR" : The job has completed, but encountered an error.
2330 * failure details for this attempt.Applies to restartable jobs only. 2341 /// - "ATTEMPT_FAILURE" : Job attempt has failed. The detail field contains
2331 */ 2342 /// failure details for this attempt.Applies to restartable jobs only.
2332 core.String state; 2343 core.String state;
2333 /** Output-only. The time when this state was entered. */ 2344
2345 /// Output-only. The time when this state was entered.
2334 core.String stateStartTime; 2346 core.String stateStartTime;
2335 /** 2347
2336 * Output-only. Additional state information, which includes status reported 2348 /// Output-only. Additional state information, which includes status reported
2337 * by the agent. 2349 /// by the agent.
2338 * Possible string values are: 2350 /// Possible string values are:
2339 * - "UNSPECIFIED" 2351 /// - "UNSPECIFIED"
2340 * - "SUBMITTED" : The Job is submitted to the agent.Applies to RUNNING state. 2352 /// - "SUBMITTED" : The Job is submitted to the agent.Applies to RUNNING
2341 * - "QUEUED" : The Job has been received and is awaiting execution (it may be 2353 /// state.
2342 * waiting for a condition to be met). See the "details" field for the reason 2354 /// - "QUEUED" : The Job has been received and is awaiting execution (it may
2343 * for the delay.Applies to RUNNING state. 2355 /// be waiting for a condition to be met). See the "details" field for the
2344 * - "STALE_STATUS" : The agent-reported status is out of date, which may be 2356 /// reason for the delay.Applies to RUNNING state.
2345 * caused by a loss of communication between the agent and Cloud Dataproc. If 2357 /// - "STALE_STATUS" : The agent-reported status is out of date, which may be
2346 * the agent does not send a timely update, the job will fail.Applies to 2358 /// caused by a loss of communication between the agent and Cloud Dataproc.
2347 * RUNNING state. 2359 /// If the agent does not send a timely update, the job will fail.Applies to
2348 */ 2360 /// RUNNING state.
2349 core.String substate; 2361 core.String substate;
2350 2362
2351 JobStatus(); 2363 JobStatus();
2352 2364
2353 JobStatus.fromJson(core.Map _json) { 2365 JobStatus.fromJson(core.Map _json) {
2354 if (_json.containsKey("details")) { 2366 if (_json.containsKey("details")) {
2355 details = _json["details"]; 2367 details = _json["details"];
2356 } 2368 }
2357 if (_json.containsKey("state")) { 2369 if (_json.containsKey("state")) {
2358 state = _json["state"]; 2370 state = _json["state"];
2359 } 2371 }
2360 if (_json.containsKey("stateStartTime")) { 2372 if (_json.containsKey("stateStartTime")) {
2361 stateStartTime = _json["stateStartTime"]; 2373 stateStartTime = _json["stateStartTime"];
2362 } 2374 }
2363 if (_json.containsKey("substate")) { 2375 if (_json.containsKey("substate")) {
2364 substate = _json["substate"]; 2376 substate = _json["substate"];
2365 } 2377 }
2366 } 2378 }
2367 2379
2368 core.Map<core.String, core.Object> toJson() { 2380 core.Map<core.String, core.Object> toJson() {
2369 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2381 final core.Map<core.String, core.Object> _json =
2382 new core.Map<core.String, core.Object>();
2370 if (details != null) { 2383 if (details != null) {
2371 _json["details"] = details; 2384 _json["details"] = details;
2372 } 2385 }
2373 if (state != null) { 2386 if (state != null) {
2374 _json["state"] = state; 2387 _json["state"] = state;
2375 } 2388 }
2376 if (stateStartTime != null) { 2389 if (stateStartTime != null) {
2377 _json["stateStartTime"] = stateStartTime; 2390 _json["stateStartTime"] = stateStartTime;
2378 } 2391 }
2379 if (substate != null) { 2392 if (substate != null) {
2380 _json["substate"] = substate; 2393 _json["substate"] = substate;
2381 } 2394 }
2382 return _json; 2395 return _json;
2383 } 2396 }
2384 } 2397 }
2385 2398
2386 /** The list of all clusters in a project. */ 2399 /// The list of all clusters in a project.
2387 class ListClustersResponse { 2400 class ListClustersResponse {
2388 /** Output-only. The clusters in the project. */ 2401 /// Output-only. The clusters in the project.
2389 core.List<Cluster> clusters; 2402 core.List<Cluster> clusters;
2390 /** 2403
2391 * Output-only. This token is included in the response if there are more 2404 /// Output-only. This token is included in the response if there are more
2392 * results to fetch. To fetch additional results, provide this value as the 2405 /// results to fetch. To fetch additional results, provide this value as the
2393 * page_token in a subsequent ListClustersRequest. 2406 /// page_token in a subsequent ListClustersRequest.
2394 */
2395 core.String nextPageToken; 2407 core.String nextPageToken;
2396 2408
2397 ListClustersResponse(); 2409 ListClustersResponse();
2398 2410
2399 ListClustersResponse.fromJson(core.Map _json) { 2411 ListClustersResponse.fromJson(core.Map _json) {
2400 if (_json.containsKey("clusters")) { 2412 if (_json.containsKey("clusters")) {
2401 clusters = _json["clusters"].map((value) => new Cluster.fromJson(value)).t oList(); 2413 clusters = _json["clusters"]
2414 .map((value) => new Cluster.fromJson(value))
2415 .toList();
2402 } 2416 }
2403 if (_json.containsKey("nextPageToken")) { 2417 if (_json.containsKey("nextPageToken")) {
2404 nextPageToken = _json["nextPageToken"]; 2418 nextPageToken = _json["nextPageToken"];
2405 } 2419 }
2406 } 2420 }
2407 2421
2408 core.Map<core.String, core.Object> toJson() { 2422 core.Map<core.String, core.Object> toJson() {
2409 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2423 final core.Map<core.String, core.Object> _json =
2424 new core.Map<core.String, core.Object>();
2410 if (clusters != null) { 2425 if (clusters != null) {
2411 _json["clusters"] = clusters.map((value) => (value).toJson()).toList(); 2426 _json["clusters"] = clusters.map((value) => (value).toJson()).toList();
2412 } 2427 }
2413 if (nextPageToken != null) { 2428 if (nextPageToken != null) {
2414 _json["nextPageToken"] = nextPageToken; 2429 _json["nextPageToken"] = nextPageToken;
2415 } 2430 }
2416 return _json; 2431 return _json;
2417 } 2432 }
2418 } 2433 }
2419 2434
2420 /** A list of jobs in a project. */ 2435 /// A list of jobs in a project.
2421 class ListJobsResponse { 2436 class ListJobsResponse {
2422 /** Output-only. Jobs list. */ 2437 /// Output-only. Jobs list.
2423 core.List<Job> jobs; 2438 core.List<Job> jobs;
2424 /** 2439
2425 * Optional. This token is included in the response if there are more results 2440 /// Optional. This token is included in the response if there are more
2426 * to fetch. To fetch additional results, provide this value as the page_token 2441 /// results to fetch. To fetch additional results, provide this value as the
2427 * in a subsequent <code>ListJobsRequest</code>. 2442 /// page_token in a subsequent <code>ListJobsRequest</code>.
2428 */
2429 core.String nextPageToken; 2443 core.String nextPageToken;
2430 2444
2431 ListJobsResponse(); 2445 ListJobsResponse();
2432 2446
2433 ListJobsResponse.fromJson(core.Map _json) { 2447 ListJobsResponse.fromJson(core.Map _json) {
2434 if (_json.containsKey("jobs")) { 2448 if (_json.containsKey("jobs")) {
2435 jobs = _json["jobs"].map((value) => new Job.fromJson(value)).toList(); 2449 jobs = _json["jobs"].map((value) => new Job.fromJson(value)).toList();
2436 } 2450 }
2437 if (_json.containsKey("nextPageToken")) { 2451 if (_json.containsKey("nextPageToken")) {
2438 nextPageToken = _json["nextPageToken"]; 2452 nextPageToken = _json["nextPageToken"];
2439 } 2453 }
2440 } 2454 }
2441 2455
2442 core.Map<core.String, core.Object> toJson() { 2456 core.Map<core.String, core.Object> toJson() {
2443 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2457 final core.Map<core.String, core.Object> _json =
2458 new core.Map<core.String, core.Object>();
2444 if (jobs != null) { 2459 if (jobs != null) {
2445 _json["jobs"] = jobs.map((value) => (value).toJson()).toList(); 2460 _json["jobs"] = jobs.map((value) => (value).toJson()).toList();
2446 } 2461 }
2447 if (nextPageToken != null) { 2462 if (nextPageToken != null) {
2448 _json["nextPageToken"] = nextPageToken; 2463 _json["nextPageToken"] = nextPageToken;
2449 } 2464 }
2450 return _json; 2465 return _json;
2451 } 2466 }
2452 } 2467 }
2453 2468
2454 /** The response message for Operations.ListOperations. */ 2469 /// The response message for Operations.ListOperations.
2455 class ListOperationsResponse { 2470 class ListOperationsResponse {
2456 /** The standard List next-page token. */ 2471 /// The standard List next-page token.
2457 core.String nextPageToken; 2472 core.String nextPageToken;
2458 /** A list of operations that matches the specified filter in the request. */ 2473
2474 /// A list of operations that matches the specified filter in the request.
2459 core.List<Operation> operations; 2475 core.List<Operation> operations;
2460 2476
2461 ListOperationsResponse(); 2477 ListOperationsResponse();
2462 2478
2463 ListOperationsResponse.fromJson(core.Map _json) { 2479 ListOperationsResponse.fromJson(core.Map _json) {
2464 if (_json.containsKey("nextPageToken")) { 2480 if (_json.containsKey("nextPageToken")) {
2465 nextPageToken = _json["nextPageToken"]; 2481 nextPageToken = _json["nextPageToken"];
2466 } 2482 }
2467 if (_json.containsKey("operations")) { 2483 if (_json.containsKey("operations")) {
2468 operations = _json["operations"].map((value) => new Operation.fromJson(val ue)).toList(); 2484 operations = _json["operations"]
2485 .map((value) => new Operation.fromJson(value))
2486 .toList();
2469 } 2487 }
2470 } 2488 }
2471 2489
2472 core.Map<core.String, core.Object> toJson() { 2490 core.Map<core.String, core.Object> toJson() {
2473 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2491 final core.Map<core.String, core.Object> _json =
2492 new core.Map<core.String, core.Object>();
2474 if (nextPageToken != null) { 2493 if (nextPageToken != null) {
2475 _json["nextPageToken"] = nextPageToken; 2494 _json["nextPageToken"] = nextPageToken;
2476 } 2495 }
2477 if (operations != null) { 2496 if (operations != null) {
2478 _json["operations"] = operations.map((value) => (value).toJson()).toList() ; 2497 _json["operations"] =
2498 operations.map((value) => (value).toJson()).toList();
2479 } 2499 }
2480 return _json; 2500 return _json;
2481 } 2501 }
2482 } 2502 }
2483 2503
2484 /** The runtime logging config of the job. */ 2504 /// The runtime logging config of the job.
2485 class LoggingConfig { 2505 class LoggingConfig {
2486 /** 2506 /// The per-package log levels for the driver. This may include "root"
2487 * The per-package log levels for the driver. This may include "root" package 2507 /// package name to configure rootLogger. Examples: 'com.google = FATAL',
2488 * name to configure rootLogger. Examples: 'com.google = FATAL', 'root = 2508 /// 'root = INFO', 'org.apache = DEBUG'
2489 * INFO', 'org.apache = DEBUG'
2490 */
2491 core.Map<core.String, core.String> driverLogLevels; 2509 core.Map<core.String, core.String> driverLogLevels;
2492 2510
2493 LoggingConfig(); 2511 LoggingConfig();
2494 2512
2495 LoggingConfig.fromJson(core.Map _json) { 2513 LoggingConfig.fromJson(core.Map _json) {
2496 if (_json.containsKey("driverLogLevels")) { 2514 if (_json.containsKey("driverLogLevels")) {
2497 driverLogLevels = _json["driverLogLevels"]; 2515 driverLogLevels = _json["driverLogLevels"];
2498 } 2516 }
2499 } 2517 }
2500 2518
2501 core.Map<core.String, core.Object> toJson() { 2519 core.Map<core.String, core.Object> toJson() {
2502 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2520 final core.Map<core.String, core.Object> _json =
2521 new core.Map<core.String, core.Object>();
2503 if (driverLogLevels != null) { 2522 if (driverLogLevels != null) {
2504 _json["driverLogLevels"] = driverLogLevels; 2523 _json["driverLogLevels"] = driverLogLevels;
2505 } 2524 }
2506 return _json; 2525 return _json;
2507 } 2526 }
2508 } 2527 }
2509 2528
2510 /** Specifies the resources used to actively manage an instance group. */ 2529 /// Specifies the resources used to actively manage an instance group.
2511 class ManagedGroupConfig { 2530 class ManagedGroupConfig {
2512 /** Output-only. The name of the Instance Group Manager for this group. */ 2531 /// Output-only. The name of the Instance Group Manager for this group.
2513 core.String instanceGroupManagerName; 2532 core.String instanceGroupManagerName;
2514 /** 2533
2515 * Output-only. The name of the Instance Template used for the Managed 2534 /// Output-only. The name of the Instance Template used for the Managed
2516 * Instance Group. 2535 /// Instance Group.
2517 */
2518 core.String instanceTemplateName; 2536 core.String instanceTemplateName;
2519 2537
2520 ManagedGroupConfig(); 2538 ManagedGroupConfig();
2521 2539
2522 ManagedGroupConfig.fromJson(core.Map _json) { 2540 ManagedGroupConfig.fromJson(core.Map _json) {
2523 if (_json.containsKey("instanceGroupManagerName")) { 2541 if (_json.containsKey("instanceGroupManagerName")) {
2524 instanceGroupManagerName = _json["instanceGroupManagerName"]; 2542 instanceGroupManagerName = _json["instanceGroupManagerName"];
2525 } 2543 }
2526 if (_json.containsKey("instanceTemplateName")) { 2544 if (_json.containsKey("instanceTemplateName")) {
2527 instanceTemplateName = _json["instanceTemplateName"]; 2545 instanceTemplateName = _json["instanceTemplateName"];
2528 } 2546 }
2529 } 2547 }
2530 2548
2531 core.Map<core.String, core.Object> toJson() { 2549 core.Map<core.String, core.Object> toJson() {
2532 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2550 final core.Map<core.String, core.Object> _json =
2551 new core.Map<core.String, core.Object>();
2533 if (instanceGroupManagerName != null) { 2552 if (instanceGroupManagerName != null) {
2534 _json["instanceGroupManagerName"] = instanceGroupManagerName; 2553 _json["instanceGroupManagerName"] = instanceGroupManagerName;
2535 } 2554 }
2536 if (instanceTemplateName != null) { 2555 if (instanceTemplateName != null) {
2537 _json["instanceTemplateName"] = instanceTemplateName; 2556 _json["instanceTemplateName"] = instanceTemplateName;
2538 } 2557 }
2539 return _json; 2558 return _json;
2540 } 2559 }
2541 } 2560 }
2542 2561
2543 /** 2562 /// Specifies an executable to run on a fully configured node and a timeout
2544 * Specifies an executable to run on a fully configured node and a timeout 2563 /// period for executable completion.
2545 * period for executable completion.
2546 */
2547 class NodeInitializationAction { 2564 class NodeInitializationAction {
2548 /** Required. Google Cloud Storage URI of executable file. */ 2565 /// Required. Google Cloud Storage URI of executable file.
2549 core.String executableFile; 2566 core.String executableFile;
2550 /** 2567
2551 * Optional. Amount of time executable has to complete. Default is 10 minutes. 2568 /// Optional. Amount of time executable has to complete. Default is 10
2552 * Cluster creation fails with an explanatory error message (the name of the 2569 /// minutes. Cluster creation fails with an explanatory error message (the
2553 * executable that caused the error and the exceeded timeout period) if the 2570 /// name of the executable that caused the error and the exceeded timeout
2554 * executable is not completed at end of the timeout period. 2571 /// period) if the executable is not completed at end of the timeout period.
2555 */
2556 core.String executionTimeout; 2572 core.String executionTimeout;
2557 2573
2558 NodeInitializationAction(); 2574 NodeInitializationAction();
2559 2575
2560 NodeInitializationAction.fromJson(core.Map _json) { 2576 NodeInitializationAction.fromJson(core.Map _json) {
2561 if (_json.containsKey("executableFile")) { 2577 if (_json.containsKey("executableFile")) {
2562 executableFile = _json["executableFile"]; 2578 executableFile = _json["executableFile"];
2563 } 2579 }
2564 if (_json.containsKey("executionTimeout")) { 2580 if (_json.containsKey("executionTimeout")) {
2565 executionTimeout = _json["executionTimeout"]; 2581 executionTimeout = _json["executionTimeout"];
2566 } 2582 }
2567 } 2583 }
2568 2584
2569 core.Map<core.String, core.Object> toJson() { 2585 core.Map<core.String, core.Object> toJson() {
2570 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2586 final core.Map<core.String, core.Object> _json =
2587 new core.Map<core.String, core.Object>();
2571 if (executableFile != null) { 2588 if (executableFile != null) {
2572 _json["executableFile"] = executableFile; 2589 _json["executableFile"] = executableFile;
2573 } 2590 }
2574 if (executionTimeout != null) { 2591 if (executionTimeout != null) {
2575 _json["executionTimeout"] = executionTimeout; 2592 _json["executionTimeout"] = executionTimeout;
2576 } 2593 }
2577 return _json; 2594 return _json;
2578 } 2595 }
2579 } 2596 }
2580 2597
2581 /** 2598 /// This resource represents a long-running operation that is the result of a
2582 * This resource represents a long-running operation that is the result of a 2599 /// network API call.
2583 * network API call.
2584 */
2585 class Operation { 2600 class Operation {
2586 /** 2601 /// If the value is false, it means the operation is still in progress. If
2587 * If the value is false, it means the operation is still in progress. If 2602 /// true, the operation is completed, and either error or response is
2588 * true, the operation is completed, and either error or response is 2603 /// available.
2589 * available.
2590 */
2591 core.bool done; 2604 core.bool done;
2592 /** The error result of the operation in case of failure or cancellation. */ 2605
2606 /// The error result of the operation in case of failure or cancellation.
2593 Status error; 2607 Status error;
2594 /** 2608
2595 * Service-specific metadata associated with the operation. It typically 2609 /// Service-specific metadata associated with the operation. It typically
2596 * contains progress information and common metadata such as create time. Some 2610 /// contains progress information and common metadata such as create time.
2597 * services might not provide such metadata. Any method that returns a 2611 /// Some services might not provide such metadata. Any method that returns a
2598 * long-running operation should document the metadata type, if any. 2612 /// long-running operation should document the metadata type, if any.
2599 * 2613 ///
2600 * The values for Object must be JSON objects. It can consist of `num`, 2614 /// The values for Object must be JSON objects. It can consist of `num`,
2601 * `String`, `bool` and `null` as well as `Map` and `List` values. 2615 /// `String`, `bool` and `null` as well as `Map` and `List` values.
2602 */
2603 core.Map<core.String, core.Object> metadata; 2616 core.Map<core.String, core.Object> metadata;
2604 /** 2617
2605 * The server-assigned name, which is only unique within the same service that 2618 /// The server-assigned name, which is only unique within the same service
2606 * originally returns it. If you use the default HTTP mapping, the name should 2619 /// that originally returns it. If you use the default HTTP mapping, the name
2607 * have the format of operations/some/unique/name. 2620 /// should have the format of operations/some/unique/name.
2608 */
2609 core.String name; 2621 core.String name;
2610 /** 2622
2611 * The normal response of the operation in case of success. If the original 2623 /// The normal response of the operation in case of success. If the original
2612 * method returns no data on success, such as Delete, the response is 2624 /// method returns no data on success, such as Delete, the response is
2613 * google.protobuf.Empty. If the original method is standard 2625 /// google.protobuf.Empty. If the original method is standard
2614 * Get/Create/Update, the response should be the resource. For other methods, 2626 /// Get/Create/Update, the response should be the resource. For other
2615 * the response should have the type XxxResponse, where Xxx is the original 2627 /// methods, the response should have the type XxxResponse, where Xxx is the
2616 * method name. For example, if the original method name is TakeSnapshot(), 2628 /// original method name. For example, if the original method name is
2617 * the inferred response type is TakeSnapshotResponse. 2629 /// TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
2618 * 2630 ///
2619 * The values for Object must be JSON objects. It can consist of `num`, 2631 /// The values for Object must be JSON objects. It can consist of `num`,
2620 * `String`, `bool` and `null` as well as `Map` and `List` values. 2632 /// `String`, `bool` and `null` as well as `Map` and `List` values.
2621 */
2622 core.Map<core.String, core.Object> response; 2633 core.Map<core.String, core.Object> response;
2623 2634
2624 Operation(); 2635 Operation();
2625 2636
2626 Operation.fromJson(core.Map _json) { 2637 Operation.fromJson(core.Map _json) {
2627 if (_json.containsKey("done")) { 2638 if (_json.containsKey("done")) {
2628 done = _json["done"]; 2639 done = _json["done"];
2629 } 2640 }
2630 if (_json.containsKey("error")) { 2641 if (_json.containsKey("error")) {
2631 error = new Status.fromJson(_json["error"]); 2642 error = new Status.fromJson(_json["error"]);
2632 } 2643 }
2633 if (_json.containsKey("metadata")) { 2644 if (_json.containsKey("metadata")) {
2634 metadata = _json["metadata"]; 2645 metadata = _json["metadata"];
2635 } 2646 }
2636 if (_json.containsKey("name")) { 2647 if (_json.containsKey("name")) {
2637 name = _json["name"]; 2648 name = _json["name"];
2638 } 2649 }
2639 if (_json.containsKey("response")) { 2650 if (_json.containsKey("response")) {
2640 response = _json["response"]; 2651 response = _json["response"];
2641 } 2652 }
2642 } 2653 }
2643 2654
2644 core.Map<core.String, core.Object> toJson() { 2655 core.Map<core.String, core.Object> toJson() {
2645 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2656 final core.Map<core.String, core.Object> _json =
2657 new core.Map<core.String, core.Object>();
2646 if (done != null) { 2658 if (done != null) {
2647 _json["done"] = done; 2659 _json["done"] = done;
2648 } 2660 }
2649 if (error != null) { 2661 if (error != null) {
2650 _json["error"] = (error).toJson(); 2662 _json["error"] = (error).toJson();
2651 } 2663 }
2652 if (metadata != null) { 2664 if (metadata != null) {
2653 _json["metadata"] = metadata; 2665 _json["metadata"] = metadata;
2654 } 2666 }
2655 if (name != null) { 2667 if (name != null) {
2656 _json["name"] = name; 2668 _json["name"] = name;
2657 } 2669 }
2658 if (response != null) { 2670 if (response != null) {
2659 _json["response"] = response; 2671 _json["response"] = response;
2660 } 2672 }
2661 return _json; 2673 return _json;
2662 } 2674 }
2663 } 2675 }
2664 2676
2665 /** 2677 /// A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/)
2666 * A Cloud Dataproc job for running Apache Pig (https://pig.apache.org/) queries 2678 /// queries on YARN.
2667 * on YARN.
2668 */
2669 class PigJob { 2679 class PigJob {
2670 /** 2680 /// Optional. Whether to continue executing queries if a query fails. The
2671 * Optional. Whether to continue executing queries if a query fails. The 2681 /// default value is false. Setting to true can be useful when executing
2672 * default value is false. Setting to true can be useful when executing 2682 /// independent parallel queries.
2673 * independent parallel queries.
2674 */
2675 core.bool continueOnFailure; 2683 core.bool continueOnFailure;
2676 /** 2684
2677 * Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client 2685 /// Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig
2678 * and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. 2686 /// Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
2679 */
2680 core.List<core.String> jarFileUris; 2687 core.List<core.String> jarFileUris;
2681 /** Optional. The runtime log config for job execution. */ 2688
2689 /// Optional. The runtime log config for job execution.
2682 LoggingConfig loggingConfig; 2690 LoggingConfig loggingConfig;
2683 /** 2691
2684 * Optional. A mapping of property names to values, used to configure Pig. 2692 /// Optional. A mapping of property names to values, used to configure Pig.
2685 * Properties that conflict with values set by the Cloud Dataproc API may be 2693 /// Properties that conflict with values set by the Cloud Dataproc API may be
2686 * overwritten. Can include properties set in /etc/hadoop/conf / * -site.xml, 2694 /// overwritten. Can include properties set in /etc/hadoop/conf / *
2687 * /etc/pig/conf/pig.properties, and classes in user code. 2695 /// -site.xml, /etc/pig/conf/pig.properties, and classes in user code.
2688 */
2689 core.Map<core.String, core.String> properties; 2696 core.Map<core.String, core.String> properties;
2690 /** The HCFS URI of the script that contains the Pig queries. */ 2697
2698 /// The HCFS URI of the script that contains the Pig queries.
2691 core.String queryFileUri; 2699 core.String queryFileUri;
2692 /** A list of queries. */ 2700
2701 /// A list of queries.
2693 QueryList queryList; 2702 QueryList queryList;
2694 /** 2703
2695 * Optional. Mapping of query variable names to values (equivalent to the Pig 2704 /// Optional. Mapping of query variable names to values (equivalent to the
2696 * command: name=[value]). 2705 /// Pig command: name=[value]).
2697 */
2698 core.Map<core.String, core.String> scriptVariables; 2706 core.Map<core.String, core.String> scriptVariables;
2699 2707
2700 PigJob(); 2708 PigJob();
2701 2709
2702 PigJob.fromJson(core.Map _json) { 2710 PigJob.fromJson(core.Map _json) {
2703 if (_json.containsKey("continueOnFailure")) { 2711 if (_json.containsKey("continueOnFailure")) {
2704 continueOnFailure = _json["continueOnFailure"]; 2712 continueOnFailure = _json["continueOnFailure"];
2705 } 2713 }
2706 if (_json.containsKey("jarFileUris")) { 2714 if (_json.containsKey("jarFileUris")) {
2707 jarFileUris = _json["jarFileUris"]; 2715 jarFileUris = _json["jarFileUris"];
2708 } 2716 }
2709 if (_json.containsKey("loggingConfig")) { 2717 if (_json.containsKey("loggingConfig")) {
2710 loggingConfig = new LoggingConfig.fromJson(_json["loggingConfig"]); 2718 loggingConfig = new LoggingConfig.fromJson(_json["loggingConfig"]);
2711 } 2719 }
2712 if (_json.containsKey("properties")) { 2720 if (_json.containsKey("properties")) {
2713 properties = _json["properties"]; 2721 properties = _json["properties"];
2714 } 2722 }
2715 if (_json.containsKey("queryFileUri")) { 2723 if (_json.containsKey("queryFileUri")) {
2716 queryFileUri = _json["queryFileUri"]; 2724 queryFileUri = _json["queryFileUri"];
2717 } 2725 }
2718 if (_json.containsKey("queryList")) { 2726 if (_json.containsKey("queryList")) {
2719 queryList = new QueryList.fromJson(_json["queryList"]); 2727 queryList = new QueryList.fromJson(_json["queryList"]);
2720 } 2728 }
2721 if (_json.containsKey("scriptVariables")) { 2729 if (_json.containsKey("scriptVariables")) {
2722 scriptVariables = _json["scriptVariables"]; 2730 scriptVariables = _json["scriptVariables"];
2723 } 2731 }
2724 } 2732 }
2725 2733
2726 core.Map<core.String, core.Object> toJson() { 2734 core.Map<core.String, core.Object> toJson() {
2727 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2735 final core.Map<core.String, core.Object> _json =
2736 new core.Map<core.String, core.Object>();
2728 if (continueOnFailure != null) { 2737 if (continueOnFailure != null) {
2729 _json["continueOnFailure"] = continueOnFailure; 2738 _json["continueOnFailure"] = continueOnFailure;
2730 } 2739 }
2731 if (jarFileUris != null) { 2740 if (jarFileUris != null) {
2732 _json["jarFileUris"] = jarFileUris; 2741 _json["jarFileUris"] = jarFileUris;
2733 } 2742 }
2734 if (loggingConfig != null) { 2743 if (loggingConfig != null) {
2735 _json["loggingConfig"] = (loggingConfig).toJson(); 2744 _json["loggingConfig"] = (loggingConfig).toJson();
2736 } 2745 }
2737 if (properties != null) { 2746 if (properties != null) {
2738 _json["properties"] = properties; 2747 _json["properties"] = properties;
2739 } 2748 }
2740 if (queryFileUri != null) { 2749 if (queryFileUri != null) {
2741 _json["queryFileUri"] = queryFileUri; 2750 _json["queryFileUri"] = queryFileUri;
2742 } 2751 }
2743 if (queryList != null) { 2752 if (queryList != null) {
2744 _json["queryList"] = (queryList).toJson(); 2753 _json["queryList"] = (queryList).toJson();
2745 } 2754 }
2746 if (scriptVariables != null) { 2755 if (scriptVariables != null) {
2747 _json["scriptVariables"] = scriptVariables; 2756 _json["scriptVariables"] = scriptVariables;
2748 } 2757 }
2749 return _json; 2758 return _json;
2750 } 2759 }
2751 } 2760 }
2752 2761
2753 /** 2762 /// A Cloud Dataproc job for running Apache PySpark
2754 * A Cloud Dataproc job for running Apache PySpark 2763 /// (https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
2755 * (https://spark.apache.org/docs/0.9.0/python-programming-guide.html) 2764 /// applications on YARN.
2756 * applications on YARN.
2757 */
2758 class PySparkJob { 2765 class PySparkJob {
2759 /** 2766 /// Optional. HCFS URIs of archives to be extracted in the working directory
2760 * Optional. HCFS URIs of archives to be extracted in the working directory of 2767 /// of .jar, .tar, .tar.gz, .tgz, and .zip.
2761 * .jar, .tar, .tar.gz, .tgz, and .zip.
2762 */
2763 core.List<core.String> archiveUris; 2768 core.List<core.String> archiveUris;
2764 /** 2769
2765 * Optional. The arguments to pass to the driver. Do not include arguments, 2770 /// Optional. The arguments to pass to the driver. Do not include arguments,
2766 * such as --conf, that can be set as job properties, since a collision may 2771 /// such as --conf, that can be set as job properties, since a collision may
2767 * occur that causes an incorrect job submission. 2772 /// occur that causes an incorrect job submission.
2768 */
2769 core.List<core.String> args; 2773 core.List<core.String> args;
2770 /** 2774
2771 * Optional. HCFS URIs of files to be copied to the working directory of 2775 /// Optional. HCFS URIs of files to be copied to the working directory of
2772 * Python drivers and distributed tasks. Useful for naively parallel tasks. 2776 /// Python drivers and distributed tasks. Useful for naively parallel tasks.
2773 */
2774 core.List<core.String> fileUris; 2777 core.List<core.String> fileUris;
2775 /** 2778
2776 * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python 2779 /// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python
2777 * driver and tasks. 2780 /// driver and tasks.
2778 */
2779 core.List<core.String> jarFileUris; 2781 core.List<core.String> jarFileUris;
2780 /** Optional. The runtime log config for job execution. */ 2782
2783 /// Optional. The runtime log config for job execution.
2781 LoggingConfig loggingConfig; 2784 LoggingConfig loggingConfig;
2782 /** 2785
2783 * Required. The HCFS URI of the main Python file to use as the driver. Must 2786 /// Required. The HCFS URI of the main Python file to use as the driver. Must
2784 * be a .py file. 2787 /// be a .py file.
2785 */
2786 core.String mainPythonFileUri; 2788 core.String mainPythonFileUri;
2787 /** 2789
2788 * Optional. A mapping of property names to values, used to configure PySpark. 2790 /// Optional. A mapping of property names to values, used to configure
2789 * Properties that conflict with values set by the Cloud Dataproc API may be 2791 /// PySpark. Properties that conflict with values set by the Cloud Dataproc
2790 * overwritten. Can include properties set in 2792 /// API may be overwritten. Can include properties set in
2791 * /etc/spark/conf/spark-defaults.conf and classes in user code. 2793 /// /etc/spark/conf/spark-defaults.conf and classes in user code.
2792 */
2793 core.Map<core.String, core.String> properties; 2794 core.Map<core.String, core.String> properties;
2794 /** 2795
2795 * Optional. HCFS file URIs of Python files to pass to the PySpark framework. 2796 /// Optional. HCFS file URIs of Python files to pass to the PySpark
2796 * Supported file types: .py, .egg, and .zip. 2797 /// framework. Supported file types: .py, .egg, and .zip.
2797 */
2798 core.List<core.String> pythonFileUris; 2798 core.List<core.String> pythonFileUris;
2799 2799
2800 PySparkJob(); 2800 PySparkJob();
2801 2801
2802 PySparkJob.fromJson(core.Map _json) { 2802 PySparkJob.fromJson(core.Map _json) {
2803 if (_json.containsKey("archiveUris")) { 2803 if (_json.containsKey("archiveUris")) {
2804 archiveUris = _json["archiveUris"]; 2804 archiveUris = _json["archiveUris"];
2805 } 2805 }
2806 if (_json.containsKey("args")) { 2806 if (_json.containsKey("args")) {
2807 args = _json["args"]; 2807 args = _json["args"];
(...skipping 12 matching lines...) Expand all
2820 } 2820 }
2821 if (_json.containsKey("properties")) { 2821 if (_json.containsKey("properties")) {
2822 properties = _json["properties"]; 2822 properties = _json["properties"];
2823 } 2823 }
2824 if (_json.containsKey("pythonFileUris")) { 2824 if (_json.containsKey("pythonFileUris")) {
2825 pythonFileUris = _json["pythonFileUris"]; 2825 pythonFileUris = _json["pythonFileUris"];
2826 } 2826 }
2827 } 2827 }
2828 2828
2829 core.Map<core.String, core.Object> toJson() { 2829 core.Map<core.String, core.Object> toJson() {
2830 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2830 final core.Map<core.String, core.Object> _json =
2831 new core.Map<core.String, core.Object>();
2831 if (archiveUris != null) { 2832 if (archiveUris != null) {
2832 _json["archiveUris"] = archiveUris; 2833 _json["archiveUris"] = archiveUris;
2833 } 2834 }
2834 if (args != null) { 2835 if (args != null) {
2835 _json["args"] = args; 2836 _json["args"] = args;
2836 } 2837 }
2837 if (fileUris != null) { 2838 if (fileUris != null) {
2838 _json["fileUris"] = fileUris; 2839 _json["fileUris"] = fileUris;
2839 } 2840 }
2840 if (jarFileUris != null) { 2841 if (jarFileUris != null) {
2841 _json["jarFileUris"] = jarFileUris; 2842 _json["jarFileUris"] = jarFileUris;
2842 } 2843 }
2843 if (loggingConfig != null) { 2844 if (loggingConfig != null) {
2844 _json["loggingConfig"] = (loggingConfig).toJson(); 2845 _json["loggingConfig"] = (loggingConfig).toJson();
2845 } 2846 }
2846 if (mainPythonFileUri != null) { 2847 if (mainPythonFileUri != null) {
2847 _json["mainPythonFileUri"] = mainPythonFileUri; 2848 _json["mainPythonFileUri"] = mainPythonFileUri;
2848 } 2849 }
2849 if (properties != null) { 2850 if (properties != null) {
2850 _json["properties"] = properties; 2851 _json["properties"] = properties;
2851 } 2852 }
2852 if (pythonFileUris != null) { 2853 if (pythonFileUris != null) {
2853 _json["pythonFileUris"] = pythonFileUris; 2854 _json["pythonFileUris"] = pythonFileUris;
2854 } 2855 }
2855 return _json; 2856 return _json;
2856 } 2857 }
2857 } 2858 }
2858 2859
2859 /** A list of queries to run on a cluster. */ 2860 /// A list of queries to run on a cluster.
2860 class QueryList { 2861 class QueryList {
2861 /** 2862 /// Required. The queries to execute. You do not need to terminate a query
2862 * Required. The queries to execute. You do not need to terminate a query with 2863 /// with a semicolon. Multiple queries can be specified in one string by
2863 * a semicolon. Multiple queries can be specified in one string by separating 2864 /// separating each with a semicolon. Here is an example of an Cloud Dataproc
2864 * each with a semicolon. Here is an example of an Cloud Dataproc API snippet 2865 /// API snippet that uses a QueryList to specify a HiveJob:
2865 * that uses a QueryList to specify a HiveJob: 2866 /// "hiveJob": {
2866 * "hiveJob": { 2867 /// "queryList": {
2867 * "queryList": { 2868 /// "queries": [
2868 * "queries": [ 2869 /// "query1",
2869 * "query1", 2870 /// "query2",
2870 * "query2", 2871 /// "query3;query4",
2871 * "query3;query4", 2872 /// ]
2872 * ] 2873 /// }
2873 * } 2874 /// }
2874 * }
2875 */
2876 core.List<core.String> queries; 2875 core.List<core.String> queries;
2877 2876
2878 QueryList(); 2877 QueryList();
2879 2878
2880 QueryList.fromJson(core.Map _json) { 2879 QueryList.fromJson(core.Map _json) {
2881 if (_json.containsKey("queries")) { 2880 if (_json.containsKey("queries")) {
2882 queries = _json["queries"]; 2881 queries = _json["queries"];
2883 } 2882 }
2884 } 2883 }
2885 2884
2886 core.Map<core.String, core.Object> toJson() { 2885 core.Map<core.String, core.Object> toJson() {
2887 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2886 final core.Map<core.String, core.Object> _json =
2887 new core.Map<core.String, core.Object>();
2888 if (queries != null) { 2888 if (queries != null) {
2889 _json["queries"] = queries; 2889 _json["queries"] = queries;
2890 } 2890 }
2891 return _json; 2891 return _json;
2892 } 2892 }
2893 } 2893 }
2894 2894
2895 /** Specifies the selection and config of software inside the cluster. */ 2895 /// Specifies the selection and config of software inside the cluster.
2896 class SoftwareConfig { 2896 class SoftwareConfig {
2897 /** 2897 /// Optional. The version of software inside the cluster. It must match the
2898 * Optional. The version of software inside the cluster. It must match the 2898 /// regular expression [0-9]+\.[0-9]+. If unspecified, it defaults to the
2899 * regular expression [0-9]+\.[0-9]+. If unspecified, it defaults to the 2899 /// latest version (see Cloud Dataproc Versioning).
2900 * latest version (see Cloud Dataproc Versioning).
2901 */
2902 core.String imageVersion; 2900 core.String imageVersion;
2903 /** 2901
2904 * Optional. The properties to set on daemon config files.Property keys are 2902 /// Optional. The properties to set on daemon config files.Property keys are
2905 * specified in prefix:property format, such as core:fs.defaultFS. The 2903 /// specified in prefix:property format, such as core:fs.defaultFS. The
2906 * following are supported prefixes and their mappings: 2904 /// following are supported prefixes and their mappings:
2907 * capacity-scheduler: capacity-scheduler.xml 2905 /// capacity-scheduler: capacity-scheduler.xml
2908 * core: core-site.xml 2906 /// core: core-site.xml
2909 * distcp: distcp-default.xml 2907 /// distcp: distcp-default.xml
2910 * hdfs: hdfs-site.xml 2908 /// hdfs: hdfs-site.xml
2911 * hive: hive-site.xml 2909 /// hive: hive-site.xml
2912 * mapred: mapred-site.xml 2910 /// mapred: mapred-site.xml
2913 * pig: pig.properties 2911 /// pig: pig.properties
2914 * spark: spark-defaults.conf 2912 /// spark: spark-defaults.conf
2915 * yarn: yarn-site.xml 2913 /// yarn: yarn-site.xmlFor more information, see Cluster properties.
2916 */
2917 core.Map<core.String, core.String> properties; 2914 core.Map<core.String, core.String> properties;
2918 2915
2919 SoftwareConfig(); 2916 SoftwareConfig();
2920 2917
2921 SoftwareConfig.fromJson(core.Map _json) { 2918 SoftwareConfig.fromJson(core.Map _json) {
2922 if (_json.containsKey("imageVersion")) { 2919 if (_json.containsKey("imageVersion")) {
2923 imageVersion = _json["imageVersion"]; 2920 imageVersion = _json["imageVersion"];
2924 } 2921 }
2925 if (_json.containsKey("properties")) { 2922 if (_json.containsKey("properties")) {
2926 properties = _json["properties"]; 2923 properties = _json["properties"];
2927 } 2924 }
2928 } 2925 }
2929 2926
2930 core.Map<core.String, core.Object> toJson() { 2927 core.Map<core.String, core.Object> toJson() {
2931 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 2928 final core.Map<core.String, core.Object> _json =
2929 new core.Map<core.String, core.Object>();
2932 if (imageVersion != null) { 2930 if (imageVersion != null) {
2933 _json["imageVersion"] = imageVersion; 2931 _json["imageVersion"] = imageVersion;
2934 } 2932 }
2935 if (properties != null) { 2933 if (properties != null) {
2936 _json["properties"] = properties; 2934 _json["properties"] = properties;
2937 } 2935 }
2938 return _json; 2936 return _json;
2939 } 2937 }
2940 } 2938 }
2941 2939
2942 /** 2940 /// A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/)
2943 * A Cloud Dataproc job for running Apache Spark (http://spark.apache.org/) 2941 /// applications on YARN.
2944 * applications on YARN.
2945 */
2946 class SparkJob { 2942 class SparkJob {
2947 /** 2943 /// Optional. HCFS URIs of archives to be extracted in the working directory
2948 * Optional. HCFS URIs of archives to be extracted in the working directory of 2944 /// of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz,
2949 * Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, 2945 /// .tgz, and .zip.
2950 * and .zip.
2951 */
2952 core.List<core.String> archiveUris; 2946 core.List<core.String> archiveUris;
2953 /** 2947
2954 * Optional. The arguments to pass to the driver. Do not include arguments, 2948 /// Optional. The arguments to pass to the driver. Do not include arguments,
2955 * such as --conf, that can be set as job properties, since a collision may 2949 /// such as --conf, that can be set as job properties, since a collision may
2956 * occur that causes an incorrect job submission. 2950 /// occur that causes an incorrect job submission.
2957 */
2958 core.List<core.String> args; 2951 core.List<core.String> args;
2959 /** 2952
2960 * Optional. HCFS URIs of files to be copied to the working directory of Spark 2953 /// Optional. HCFS URIs of files to be copied to the working directory of
2961 * drivers and distributed tasks. Useful for naively parallel tasks. 2954 /// Spark drivers and distributed tasks. Useful for naively parallel tasks.
2962 */
2963 core.List<core.String> fileUris; 2955 core.List<core.String> fileUris;
2964 /** 2956
2965 * Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark 2957 /// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark
2966 * driver and tasks. 2958 /// driver and tasks.
2967 */
2968 core.List<core.String> jarFileUris; 2959 core.List<core.String> jarFileUris;
2969 /** Optional. The runtime log config for job execution. */ 2960
2961 /// Optional. The runtime log config for job execution.
2970 LoggingConfig loggingConfig; 2962 LoggingConfig loggingConfig;
2971 /** 2963
2972 * The name of the driver's main class. The jar file that contains the class 2964 /// The name of the driver's main class. The jar file that contains the class
2973 * must be in the default CLASSPATH or specified in jar_file_uris. 2965 /// must be in the default CLASSPATH or specified in jar_file_uris.
2974 */
2975 core.String mainClass; 2966 core.String mainClass;
2976 /** The HCFS URI of the jar file that contains the main class. */ 2967
2968 /// The HCFS URI of the jar file that contains the main class.
2977 core.String mainJarFileUri; 2969 core.String mainJarFileUri;
2978 /** 2970
2979 * Optional. A mapping of property names to values, used to configure Spark. 2971 /// Optional. A mapping of property names to values, used to configure Spark.
2980 * Properties that conflict with values set by the Cloud Dataproc API may be 2972 /// Properties that conflict with values set by the Cloud Dataproc API may be
2981 * overwritten. Can include properties set in 2973 /// overwritten. Can include properties set in
2982 * /etc/spark/conf/spark-defaults.conf and classes in user code. 2974 /// /etc/spark/conf/spark-defaults.conf and classes in user code.
2983 */
2984 core.Map<core.String, core.String> properties; 2975 core.Map<core.String, core.String> properties;
2985 2976
2986 SparkJob(); 2977 SparkJob();
2987 2978
2988 SparkJob.fromJson(core.Map _json) { 2979 SparkJob.fromJson(core.Map _json) {
2989 if (_json.containsKey("archiveUris")) { 2980 if (_json.containsKey("archiveUris")) {
2990 archiveUris = _json["archiveUris"]; 2981 archiveUris = _json["archiveUris"];
2991 } 2982 }
2992 if (_json.containsKey("args")) { 2983 if (_json.containsKey("args")) {
2993 args = _json["args"]; 2984 args = _json["args"];
(...skipping 12 matching lines...) Expand all
3006 } 2997 }
3007 if (_json.containsKey("mainJarFileUri")) { 2998 if (_json.containsKey("mainJarFileUri")) {
3008 mainJarFileUri = _json["mainJarFileUri"]; 2999 mainJarFileUri = _json["mainJarFileUri"];
3009 } 3000 }
3010 if (_json.containsKey("properties")) { 3001 if (_json.containsKey("properties")) {
3011 properties = _json["properties"]; 3002 properties = _json["properties"];
3012 } 3003 }
3013 } 3004 }
3014 3005
3015 core.Map<core.String, core.Object> toJson() { 3006 core.Map<core.String, core.Object> toJson() {
3016 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 3007 final core.Map<core.String, core.Object> _json =
3008 new core.Map<core.String, core.Object>();
3017 if (archiveUris != null) { 3009 if (archiveUris != null) {
3018 _json["archiveUris"] = archiveUris; 3010 _json["archiveUris"] = archiveUris;
3019 } 3011 }
3020 if (args != null) { 3012 if (args != null) {
3021 _json["args"] = args; 3013 _json["args"] = args;
3022 } 3014 }
3023 if (fileUris != null) { 3015 if (fileUris != null) {
3024 _json["fileUris"] = fileUris; 3016 _json["fileUris"] = fileUris;
3025 } 3017 }
3026 if (jarFileUris != null) { 3018 if (jarFileUris != null) {
3027 _json["jarFileUris"] = jarFileUris; 3019 _json["jarFileUris"] = jarFileUris;
3028 } 3020 }
3029 if (loggingConfig != null) { 3021 if (loggingConfig != null) {
3030 _json["loggingConfig"] = (loggingConfig).toJson(); 3022 _json["loggingConfig"] = (loggingConfig).toJson();
3031 } 3023 }
3032 if (mainClass != null) { 3024 if (mainClass != null) {
3033 _json["mainClass"] = mainClass; 3025 _json["mainClass"] = mainClass;
3034 } 3026 }
3035 if (mainJarFileUri != null) { 3027 if (mainJarFileUri != null) {
3036 _json["mainJarFileUri"] = mainJarFileUri; 3028 _json["mainJarFileUri"] = mainJarFileUri;
3037 } 3029 }
3038 if (properties != null) { 3030 if (properties != null) {
3039 _json["properties"] = properties; 3031 _json["properties"] = properties;
3040 } 3032 }
3041 return _json; 3033 return _json;
3042 } 3034 }
3043 } 3035 }
3044 3036
3045 /** 3037 /// A Cloud Dataproc job for running Apache Spark SQL
3046 * A Cloud Dataproc job for running Apache Spark SQL 3038 /// (http://spark.apache.org/sql/) queries.
3047 * (http://spark.apache.org/sql/) queries.
3048 */
3049 class SparkSqlJob { 3039 class SparkSqlJob {
3050 /** Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. */ 3040 /// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
3051 core.List<core.String> jarFileUris; 3041 core.List<core.String> jarFileUris;
3052 /** Optional. The runtime log config for job execution. */ 3042
3043 /// Optional. The runtime log config for job execution.
3053 LoggingConfig loggingConfig; 3044 LoggingConfig loggingConfig;
3054 /** 3045
3055 * Optional. A mapping of property names to values, used to configure Spark 3046 /// Optional. A mapping of property names to values, used to configure Spark
3056 * SQL's SparkConf. Properties that conflict with values set by the Cloud 3047 /// SQL's SparkConf. Properties that conflict with values set by the Cloud
3057 * Dataproc API may be overwritten. 3048 /// Dataproc API may be overwritten.
3058 */
3059 core.Map<core.String, core.String> properties; 3049 core.Map<core.String, core.String> properties;
3060 /** The HCFS URI of the script that contains SQL queries. */ 3050
3051 /// The HCFS URI of the script that contains SQL queries.
3061 core.String queryFileUri; 3052 core.String queryFileUri;
3062 /** A list of queries. */ 3053
3054 /// A list of queries.
3063 QueryList queryList; 3055 QueryList queryList;
3064 /** 3056
3065 * Optional. Mapping of query variable names to values (equivalent to the 3057 /// Optional. Mapping of query variable names to values (equivalent to the
3066 * Spark SQL command: SET name="value";). 3058 /// Spark SQL command: SET name="value";).
3067 */
3068 core.Map<core.String, core.String> scriptVariables; 3059 core.Map<core.String, core.String> scriptVariables;
3069 3060
3070 SparkSqlJob(); 3061 SparkSqlJob();
3071 3062
3072 SparkSqlJob.fromJson(core.Map _json) { 3063 SparkSqlJob.fromJson(core.Map _json) {
3073 if (_json.containsKey("jarFileUris")) { 3064 if (_json.containsKey("jarFileUris")) {
3074 jarFileUris = _json["jarFileUris"]; 3065 jarFileUris = _json["jarFileUris"];
3075 } 3066 }
3076 if (_json.containsKey("loggingConfig")) { 3067 if (_json.containsKey("loggingConfig")) {
3077 loggingConfig = new LoggingConfig.fromJson(_json["loggingConfig"]); 3068 loggingConfig = new LoggingConfig.fromJson(_json["loggingConfig"]);
3078 } 3069 }
3079 if (_json.containsKey("properties")) { 3070 if (_json.containsKey("properties")) {
3080 properties = _json["properties"]; 3071 properties = _json["properties"];
3081 } 3072 }
3082 if (_json.containsKey("queryFileUri")) { 3073 if (_json.containsKey("queryFileUri")) {
3083 queryFileUri = _json["queryFileUri"]; 3074 queryFileUri = _json["queryFileUri"];
3084 } 3075 }
3085 if (_json.containsKey("queryList")) { 3076 if (_json.containsKey("queryList")) {
3086 queryList = new QueryList.fromJson(_json["queryList"]); 3077 queryList = new QueryList.fromJson(_json["queryList"]);
3087 } 3078 }
3088 if (_json.containsKey("scriptVariables")) { 3079 if (_json.containsKey("scriptVariables")) {
3089 scriptVariables = _json["scriptVariables"]; 3080 scriptVariables = _json["scriptVariables"];
3090 } 3081 }
3091 } 3082 }
3092 3083
3093 core.Map<core.String, core.Object> toJson() { 3084 core.Map<core.String, core.Object> toJson() {
3094 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 3085 final core.Map<core.String, core.Object> _json =
3086 new core.Map<core.String, core.Object>();
3095 if (jarFileUris != null) { 3087 if (jarFileUris != null) {
3096 _json["jarFileUris"] = jarFileUris; 3088 _json["jarFileUris"] = jarFileUris;
3097 } 3089 }
3098 if (loggingConfig != null) { 3090 if (loggingConfig != null) {
3099 _json["loggingConfig"] = (loggingConfig).toJson(); 3091 _json["loggingConfig"] = (loggingConfig).toJson();
3100 } 3092 }
3101 if (properties != null) { 3093 if (properties != null) {
3102 _json["properties"] = properties; 3094 _json["properties"] = properties;
3103 } 3095 }
3104 if (queryFileUri != null) { 3096 if (queryFileUri != null) {
3105 _json["queryFileUri"] = queryFileUri; 3097 _json["queryFileUri"] = queryFileUri;
3106 } 3098 }
3107 if (queryList != null) { 3099 if (queryList != null) {
3108 _json["queryList"] = (queryList).toJson(); 3100 _json["queryList"] = (queryList).toJson();
3109 } 3101 }
3110 if (scriptVariables != null) { 3102 if (scriptVariables != null) {
3111 _json["scriptVariables"] = scriptVariables; 3103 _json["scriptVariables"] = scriptVariables;
3112 } 3104 }
3113 return _json; 3105 return _json;
3114 } 3106 }
3115 } 3107 }
3116 3108
3117 /** 3109 /// The Status type defines a logical error model that is suitable for
3118 * The Status type defines a logical error model that is suitable for different 3110 /// different programming environments, including REST APIs and RPC APIs. It is
3119 * programming environments, including REST APIs and RPC APIs. It is used by 3111 /// used by gRPC (https://github.com/grpc). The error model is designed to be:
3120 * gRPC (https://github.com/grpc). The error model is designed to be: 3112 /// Simple to use and understand for most users
3121 * Simple to use and understand for most users 3113 /// Flexible enough to meet unexpected needsOverviewThe Status message contains
3122 * Flexible enough to meet unexpected needsOverviewThe Status message contains 3114 /// three pieces of data: error code, error message, and error details. The
3123 * three pieces of data: error code, error message, and error details. The error 3115 /// error code should be an enum value of google.rpc.Code, but it may accept
3124 * code should be an enum value of google.rpc.Code, but it may accept additional 3116 /// additional error codes if needed. The error message should be a
3125 * error codes if needed. The error message should be a developer-facing English 3117 /// developer-facing English message that helps developers understand and
3126 * message that helps developers understand and resolve the error. If a 3118 /// resolve the error. If a localized user-facing error message is needed, put
3127 * localized user-facing error message is needed, put the localized message in 3119 /// the localized message in the error details or localize it in the client.
3128 * the error details or localize it in the client. The optional error details 3120 /// The optional error details may contain arbitrary information about the
3129 * may contain arbitrary information about the error. There is a predefined set 3121 /// error. There is a predefined set of error detail types in the package
3130 * of error detail types in the package google.rpc that can be used for common 3122 /// google.rpc that can be used for common error conditions.Language mappingThe
3131 * error conditions.Language mappingThe Status message is the logical 3123 /// Status message is the logical representation of the error model, but it is
3132 * representation of the error model, but it is not necessarily the actual wire 3124 /// not necessarily the actual wire format. When the Status message is exposed
3133 * format. When the Status message is exposed in different client libraries and 3125 /// in different client libraries and different wire protocols, it can be
3134 * different wire protocols, it can be mapped differently. For example, it will 3126 /// mapped differently. For example, it will likely be mapped to some
3135 * likely be mapped to some exceptions in Java, but more likely mapped to some 3127 /// exceptions in Java, but more likely mapped to some error codes in C.Other
3136 * error codes in C.Other usesThe error model and the Status message can be used 3128 /// usesThe error model and the Status message can be used in a variety of
3137 * in a variety of environments, either with or without APIs, to provide a 3129 /// environments, either with or without APIs, to provide a consistent
3138 * consistent developer experience across different environments.Example uses of 3130 /// developer experience across different environments.Example uses of this
3139 * this error model include: 3131 /// error model include:
3140 * Partial errors. If a service needs to return partial errors to the client, it 3132 /// Partial errors. If a service needs to return partial errors to the client,
3141 * may embed the Status in the normal response to indicate the partial errors. 3133 /// it may embed the Status in the normal response to indicate the partial
3142 * Workflow errors. A typical workflow has multiple steps. Each step may have a 3134 /// errors.
3143 * Status message for error reporting. 3135 /// Workflow errors. A typical workflow has multiple steps. Each step may have
3144 * Batch operations. If a client uses batch request and batch response, the 3136 /// a Status message for error reporting.
3145 * Status message should be used directly inside batch response, one for each 3137 /// Batch operations. If a client uses batch request and batch response, the
3146 * error sub-response. 3138 /// Status message should be used directly inside batch response, one for each
3147 * Asynchronous operations. If an API call embeds asynchronous operation results 3139 /// error sub-response.
3148 * in its response, the status of those operations should be represented 3140 /// Asynchronous operations. If an API call embeds asynchronous operation
3149 * directly using the Status message. 3141 /// results in its response, the status of those operations should be
3150 * Logging. If some API errors are stored in logs, the message Status could be 3142 /// represented directly using the Status message.
3151 * used directly after any stripping needed for security/privacy reasons. 3143 /// Logging. If some API errors are stored in logs, the message Status could be
3152 */ 3144 /// used directly after any stripping needed for security/privacy reasons.
3153 class Status { 3145 class Status {
3154 /** The status code, which should be an enum value of google.rpc.Code. */ 3146 /// The status code, which should be an enum value of google.rpc.Code.
3155 core.int code; 3147 core.int code;
3156 /** 3148
3157 * A list of messages that carry the error details. There is a common set of 3149 /// A list of messages that carry the error details. There is a common set of
3158 * message types for APIs to use. 3150 /// message types for APIs to use.
3159 * 3151 ///
3160 * The values for Object must be JSON objects. It can consist of `num`, 3152 /// The values for Object must be JSON objects. It can consist of `num`,
3161 * `String`, `bool` and `null` as well as `Map` and `List` values. 3153 /// `String`, `bool` and `null` as well as `Map` and `List` values.
3162 */
3163 core.List<core.Map<core.String, core.Object>> details; 3154 core.List<core.Map<core.String, core.Object>> details;
3164 /** 3155
3165 * A developer-facing error message, which should be in English. Any 3156 /// A developer-facing error message, which should be in English. Any
3166 * user-facing error message should be localized and sent in the 3157 /// user-facing error message should be localized and sent in the
3167 * google.rpc.Status.details field, or localized by the client. 3158 /// google.rpc.Status.details field, or localized by the client.
3168 */
3169 core.String message; 3159 core.String message;
3170 3160
3171 Status(); 3161 Status();
3172 3162
3173 Status.fromJson(core.Map _json) { 3163 Status.fromJson(core.Map _json) {
3174 if (_json.containsKey("code")) { 3164 if (_json.containsKey("code")) {
3175 code = _json["code"]; 3165 code = _json["code"];
3176 } 3166 }
3177 if (_json.containsKey("details")) { 3167 if (_json.containsKey("details")) {
3178 details = _json["details"]; 3168 details = _json["details"];
3179 } 3169 }
3180 if (_json.containsKey("message")) { 3170 if (_json.containsKey("message")) {
3181 message = _json["message"]; 3171 message = _json["message"];
3182 } 3172 }
3183 } 3173 }
3184 3174
3185 core.Map<core.String, core.Object> toJson() { 3175 core.Map<core.String, core.Object> toJson() {
3186 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 3176 final core.Map<core.String, core.Object> _json =
3177 new core.Map<core.String, core.Object>();
3187 if (code != null) { 3178 if (code != null) {
3188 _json["code"] = code; 3179 _json["code"] = code;
3189 } 3180 }
3190 if (details != null) { 3181 if (details != null) {
3191 _json["details"] = details; 3182 _json["details"] = details;
3192 } 3183 }
3193 if (message != null) { 3184 if (message != null) {
3194 _json["message"] = message; 3185 _json["message"] = message;
3195 } 3186 }
3196 return _json; 3187 return _json;
3197 } 3188 }
3198 } 3189 }
3199 3190
3200 /** A request to submit a job. */ 3191 /// A request to submit a job.
3201 class SubmitJobRequest { 3192 class SubmitJobRequest {
3202 /** Required. The job resource. */ 3193 /// Required. The job resource.
3203 Job job; 3194 Job job;
3204 3195
3205 SubmitJobRequest(); 3196 SubmitJobRequest();
3206 3197
3207 SubmitJobRequest.fromJson(core.Map _json) { 3198 SubmitJobRequest.fromJson(core.Map _json) {
3208 if (_json.containsKey("job")) { 3199 if (_json.containsKey("job")) {
3209 job = new Job.fromJson(_json["job"]); 3200 job = new Job.fromJson(_json["job"]);
3210 } 3201 }
3211 } 3202 }
3212 3203
3213 core.Map<core.String, core.Object> toJson() { 3204 core.Map<core.String, core.Object> toJson() {
3214 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 3205 final core.Map<core.String, core.Object> _json =
3206 new core.Map<core.String, core.Object>();
3215 if (job != null) { 3207 if (job != null) {
3216 _json["job"] = (job).toJson(); 3208 _json["job"] = (job).toJson();
3217 } 3209 }
3218 return _json; 3210 return _json;
3219 } 3211 }
3220 } 3212 }
3221 3213
3222 /** 3214 /// A YARN application created by a job. Application information is a subset of
3223 * A YARN application created by a job. Application information is a subset of 3215 /// <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>. Beta
3224 * <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.B eta 3216 /// Feature: This report is available for testing purposes only. It may be
3225 * Feature: This report is available for testing purposes only. It may be 3217 /// changed before final release.
3226 * changed before final release.
3227 */
3228 class YarnApplication { 3218 class YarnApplication {
3229 /** Required. The application name. */ 3219 /// Required. The application name.
3230 core.String name; 3220 core.String name;
3231 /** Required. The numerical progress of the application, from 1 to 100. */ 3221
3222 /// Required. The numerical progress of the application, from 1 to 100.
3232 core.double progress; 3223 core.double progress;
3233 /** 3224
3234 * Required. The application state. 3225 /// Required. The application state.
3235 * Possible string values are: 3226 /// Possible string values are:
3236 * - "STATE_UNSPECIFIED" : Status is unspecified. 3227 /// - "STATE_UNSPECIFIED" : Status is unspecified.
3237 * - "NEW" : Status is NEW. 3228 /// - "NEW" : Status is NEW.
3238 * - "NEW_SAVING" : Status is NEW_SAVING. 3229 /// - "NEW_SAVING" : Status is NEW_SAVING.
3239 * - "SUBMITTED" : Status is SUBMITTED. 3230 /// - "SUBMITTED" : Status is SUBMITTED.
3240 * - "ACCEPTED" : Status is ACCEPTED. 3231 /// - "ACCEPTED" : Status is ACCEPTED.
3241 * - "RUNNING" : Status is RUNNING. 3232 /// - "RUNNING" : Status is RUNNING.
3242 * - "FINISHED" : Status is FINISHED. 3233 /// - "FINISHED" : Status is FINISHED.
3243 * - "FAILED" : Status is FAILED. 3234 /// - "FAILED" : Status is FAILED.
3244 * - "KILLED" : Status is KILLED. 3235 /// - "KILLED" : Status is KILLED.
3245 */
3246 core.String state; 3236 core.String state;
3247 /** 3237
3248 * Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or 3238 /// Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
3249 * TimelineServer that provides application-specific information. The URL uses 3239 /// TimelineServer that provides application-specific information. The URL
3250 * the internal hostname, and requires a proxy server for resolution and, 3240 /// uses the internal hostname, and requires a proxy server for resolution
3251 * possibly, access. 3241 /// and, possibly, access.
3252 */
3253 core.String trackingUrl; 3242 core.String trackingUrl;
3254 3243
3255 YarnApplication(); 3244 YarnApplication();
3256 3245
3257 YarnApplication.fromJson(core.Map _json) { 3246 YarnApplication.fromJson(core.Map _json) {
3258 if (_json.containsKey("name")) { 3247 if (_json.containsKey("name")) {
3259 name = _json["name"]; 3248 name = _json["name"];
3260 } 3249 }
3261 if (_json.containsKey("progress")) { 3250 if (_json.containsKey("progress")) {
3262 progress = _json["progress"]; 3251 progress = _json["progress"];
3263 } 3252 }
3264 if (_json.containsKey("state")) { 3253 if (_json.containsKey("state")) {
3265 state = _json["state"]; 3254 state = _json["state"];
3266 } 3255 }
3267 if (_json.containsKey("trackingUrl")) { 3256 if (_json.containsKey("trackingUrl")) {
3268 trackingUrl = _json["trackingUrl"]; 3257 trackingUrl = _json["trackingUrl"];
3269 } 3258 }
3270 } 3259 }
3271 3260
3272 core.Map<core.String, core.Object> toJson() { 3261 core.Map<core.String, core.Object> toJson() {
3273 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c ore.Object>(); 3262 final core.Map<core.String, core.Object> _json =
3263 new core.Map<core.String, core.Object>();
3274 if (name != null) { 3264 if (name != null) {
3275 _json["name"] = name; 3265 _json["name"] = name;
3276 } 3266 }
3277 if (progress != null) { 3267 if (progress != null) {
3278 _json["progress"] = progress; 3268 _json["progress"] = progress;
3279 } 3269 }
3280 if (state != null) { 3270 if (state != null) {
3281 _json["state"] = state; 3271 _json["state"] = state;
3282 } 3272 }
3283 if (trackingUrl != null) { 3273 if (trackingUrl != null) {
3284 _json["trackingUrl"] = trackingUrl; 3274 _json["trackingUrl"] = trackingUrl;
3285 } 3275 }
3286 return _json; 3276 return _json;
3287 } 3277 }
3288 } 3278 }
OLDNEW
« no previous file with comments | « generated/googleapis/lib/customsearch/v1.dart ('k') | generated/googleapis/lib/datastore/v1.dart » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698