OLD | NEW |
1 // This is a generated file (see the discoveryapis_generator project). | 1 // This is a generated file (see the discoveryapis_generator project). |
2 | 2 |
3 library googleapis_beta.dataflow.v1b3; | 3 library googleapis_beta.dataflow.v1b3; |
4 | 4 |
5 import 'dart:core' as core; | 5 import 'dart:core' as core; |
6 import 'dart:async' as async; | 6 import 'dart:async' as async; |
7 import 'dart:convert' as convert; | 7 import 'dart:convert' as convert; |
8 | 8 |
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; | 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; |
10 import 'package:http/http.dart' as http; | 10 import 'package:http/http.dart' as http; |
11 | 11 |
12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show | 12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show |
13 ApiRequestError, DetailedApiRequestError; | 13 ApiRequestError, DetailedApiRequestError; |
14 | 14 |
15 const core.String USER_AGENT = 'dart-api-client dataflow/v1b3'; | 15 const core.String USER_AGENT = 'dart-api-client dataflow/v1b3'; |
16 | 16 |
17 /** Google Dataflow API. */ | 17 /** Google Dataflow API. */ |
18 class DataflowApi { | 18 class DataflowApi { |
| 19 /** View and manage your data across Google Cloud Platform services */ |
| 20 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf
orm"; |
| 21 |
| 22 /** View your email address */ |
| 23 static const UserinfoEmailScope = "https://www.googleapis.com/auth/userinfo.em
ail"; |
| 24 |
19 | 25 |
20 final commons.ApiRequester _requester; | 26 final commons.ApiRequester _requester; |
21 | 27 |
| 28 ProjectsResourceApi get projects => new ProjectsResourceApi(_requester); |
| 29 |
22 DataflowApi(http.Client client, {core.String rootUrl: "https://dataflow.google
apis.com/", core.String servicePath: ""}) : | 30 DataflowApi(http.Client client, {core.String rootUrl: "https://dataflow.google
apis.com/", core.String servicePath: ""}) : |
23 _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_A
GENT); | 31 _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_A
GENT); |
24 } | 32 } |
| 33 |
| 34 |
| 35 class ProjectsResourceApi { |
| 36 final commons.ApiRequester _requester; |
| 37 |
| 38 ProjectsJobsResourceApi get jobs => new ProjectsJobsResourceApi(_requester); |
| 39 |
| 40 ProjectsResourceApi(commons.ApiRequester client) : |
| 41 _requester = client; |
| 42 } |
| 43 |
| 44 |
| 45 class ProjectsJobsResourceApi { |
| 46 final commons.ApiRequester _requester; |
| 47 |
| 48 ProjectsJobsMessagesResourceApi get messages => new ProjectsJobsMessagesResour
ceApi(_requester); |
| 49 ProjectsJobsWorkItemsResourceApi get workItems => new ProjectsJobsWorkItemsRes
ourceApi(_requester); |
| 50 |
| 51 ProjectsJobsResourceApi(commons.ApiRequester client) : |
| 52 _requester = client; |
| 53 |
| 54 /** |
| 55 * Creates a dataflow job. |
| 56 * |
| 57 * [request] - The metadata request object. |
| 58 * |
| 59 * Request parameters: |
| 60 * |
| 61 * [projectId] - The project which owns the job. |
| 62 * |
| 63 * [view] - Level of information requested in response. |
| 64 * Possible string values are: |
| 65 * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN. |
| 66 * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY. |
| 67 * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL. |
| 68 * |
| 69 * [replaceJobId] - DEPRECATED. This field is now on the Job message. |
| 70 * |
| 71 * Completes with a [Job]. |
| 72 * |
| 73 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 74 * error. |
| 75 * |
| 76 * If the used [http.Client] completes with an error when making a REST call, |
| 77 * this method will complete with the same error. |
| 78 */ |
| 79 async.Future<Job> create(Job request, core.String projectId, {core.String view
, core.String replaceJobId}) { |
| 80 var _url = null; |
| 81 var _queryParams = new core.Map(); |
| 82 var _uploadMedia = null; |
| 83 var _uploadOptions = null; |
| 84 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 85 var _body = null; |
| 86 |
| 87 if (request != null) { |
| 88 _body = convert.JSON.encode((request).toJson()); |
| 89 } |
| 90 if (projectId == null) { |
| 91 throw new core.ArgumentError("Parameter projectId is required."); |
| 92 } |
| 93 if (view != null) { |
| 94 _queryParams["view"] = [view]; |
| 95 } |
| 96 if (replaceJobId != null) { |
| 97 _queryParams["replaceJobId"] = [replaceJobId]; |
| 98 } |
| 99 |
| 100 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs'; |
| 101 |
| 102 var _response = _requester.request(_url, |
| 103 "POST", |
| 104 body: _body, |
| 105 queryParams: _queryParams, |
| 106 uploadOptions: _uploadOptions, |
| 107 uploadMedia: _uploadMedia, |
| 108 downloadOptions: _downloadOptions); |
| 109 return _response.then((data) => new Job.fromJson(data)); |
| 110 } |
| 111 |
| 112 /** |
| 113 * Gets the state of the specified dataflow job. |
| 114 * |
| 115 * Request parameters: |
| 116 * |
| 117 * [projectId] - The project which owns the job. |
| 118 * |
| 119 * [jobId] - Identifies a single job. |
| 120 * |
| 121 * [view] - Level of information requested in response. |
| 122 * Possible string values are: |
| 123 * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN. |
| 124 * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY. |
| 125 * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL. |
| 126 * |
| 127 * Completes with a [Job]. |
| 128 * |
| 129 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 130 * error. |
| 131 * |
| 132 * If the used [http.Client] completes with an error when making a REST call, |
| 133 * this method will complete with the same error. |
| 134 */ |
| 135 async.Future<Job> get(core.String projectId, core.String jobId, {core.String v
iew}) { |
| 136 var _url = null; |
| 137 var _queryParams = new core.Map(); |
| 138 var _uploadMedia = null; |
| 139 var _uploadOptions = null; |
| 140 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 141 var _body = null; |
| 142 |
| 143 if (projectId == null) { |
| 144 throw new core.ArgumentError("Parameter projectId is required."); |
| 145 } |
| 146 if (jobId == null) { |
| 147 throw new core.ArgumentError("Parameter jobId is required."); |
| 148 } |
| 149 if (view != null) { |
| 150 _queryParams["view"] = [view]; |
| 151 } |
| 152 |
| 153 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs/' + commons.Escaper.ecapeVariable('$jobId'); |
| 154 |
| 155 var _response = _requester.request(_url, |
| 156 "GET", |
| 157 body: _body, |
| 158 queryParams: _queryParams, |
| 159 uploadOptions: _uploadOptions, |
| 160 uploadMedia: _uploadMedia, |
| 161 downloadOptions: _downloadOptions); |
| 162 return _response.then((data) => new Job.fromJson(data)); |
| 163 } |
| 164 |
| 165 /** |
| 166 * Request the job status. |
| 167 * |
| 168 * Request parameters: |
| 169 * |
| 170 * [projectId] - A project id. |
| 171 * |
| 172 * [jobId] - The job to get messages for. |
| 173 * |
| 174 * [startTime] - Return only metric data that has changed since this time. |
| 175 * Default is to return all information about all metrics for the job. |
| 176 * |
| 177 * Completes with a [JobMetrics]. |
| 178 * |
| 179 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 180 * error. |
| 181 * |
| 182 * If the used [http.Client] completes with an error when making a REST call, |
| 183 * this method will complete with the same error. |
| 184 */ |
| 185 async.Future<JobMetrics> getMetrics(core.String projectId, core.String jobId,
{core.String startTime}) { |
| 186 var _url = null; |
| 187 var _queryParams = new core.Map(); |
| 188 var _uploadMedia = null; |
| 189 var _uploadOptions = null; |
| 190 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 191 var _body = null; |
| 192 |
| 193 if (projectId == null) { |
| 194 throw new core.ArgumentError("Parameter projectId is required."); |
| 195 } |
| 196 if (jobId == null) { |
| 197 throw new core.ArgumentError("Parameter jobId is required."); |
| 198 } |
| 199 if (startTime != null) { |
| 200 _queryParams["startTime"] = [startTime]; |
| 201 } |
| 202 |
| 203 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs/' + commons.Escaper.ecapeVariable('$jobId') + '/metrics'; |
| 204 |
| 205 var _response = _requester.request(_url, |
| 206 "GET", |
| 207 body: _body, |
| 208 queryParams: _queryParams, |
| 209 uploadOptions: _uploadOptions, |
| 210 uploadMedia: _uploadMedia, |
| 211 downloadOptions: _downloadOptions); |
| 212 return _response.then((data) => new JobMetrics.fromJson(data)); |
| 213 } |
| 214 |
| 215 /** |
| 216 * List the jobs of a project |
| 217 * |
| 218 * Request parameters: |
| 219 * |
| 220 * [projectId] - The project which owns the jobs. |
| 221 * |
| 222 * [view] - Level of information requested in response. Default is SUMMARY. |
| 223 * Possible string values are: |
| 224 * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN. |
| 225 * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY. |
| 226 * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL. |
| 227 * |
| 228 * [pageSize] - If there are many jobs, limit response to at most this many. |
| 229 * The actual number of jobs returned will be the lesser of max_responses and |
| 230 * an unspecified server-defined limit. |
| 231 * |
| 232 * [pageToken] - Set this to the 'next_page_token' field of a previous |
| 233 * response to request additional results in a long list. |
| 234 * |
| 235 * Completes with a [ListJobsResponse]. |
| 236 * |
| 237 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 238 * error. |
| 239 * |
| 240 * If the used [http.Client] completes with an error when making a REST call, |
| 241 * this method will complete with the same error. |
| 242 */ |
| 243 async.Future<ListJobsResponse> list(core.String projectId, {core.String view,
core.int pageSize, core.String pageToken}) { |
| 244 var _url = null; |
| 245 var _queryParams = new core.Map(); |
| 246 var _uploadMedia = null; |
| 247 var _uploadOptions = null; |
| 248 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 249 var _body = null; |
| 250 |
| 251 if (projectId == null) { |
| 252 throw new core.ArgumentError("Parameter projectId is required."); |
| 253 } |
| 254 if (view != null) { |
| 255 _queryParams["view"] = [view]; |
| 256 } |
| 257 if (pageSize != null) { |
| 258 _queryParams["pageSize"] = ["${pageSize}"]; |
| 259 } |
| 260 if (pageToken != null) { |
| 261 _queryParams["pageToken"] = [pageToken]; |
| 262 } |
| 263 |
| 264 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs'; |
| 265 |
| 266 var _response = _requester.request(_url, |
| 267 "GET", |
| 268 body: _body, |
| 269 queryParams: _queryParams, |
| 270 uploadOptions: _uploadOptions, |
| 271 uploadMedia: _uploadMedia, |
| 272 downloadOptions: _downloadOptions); |
| 273 return _response.then((data) => new ListJobsResponse.fromJson(data)); |
| 274 } |
| 275 |
| 276 /** |
| 277 * Updates the state of an existing dataflow job. |
| 278 * |
| 279 * [request] - The metadata request object. |
| 280 * |
| 281 * Request parameters: |
| 282 * |
| 283 * [projectId] - The project which owns the job. |
| 284 * |
| 285 * [jobId] - Identifies a single job. |
| 286 * |
| 287 * Completes with a [Job]. |
| 288 * |
| 289 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 290 * error. |
| 291 * |
| 292 * If the used [http.Client] completes with an error when making a REST call, |
| 293 * this method will complete with the same error. |
| 294 */ |
| 295 async.Future<Job> update(Job request, core.String projectId, core.String jobId
) { |
| 296 var _url = null; |
| 297 var _queryParams = new core.Map(); |
| 298 var _uploadMedia = null; |
| 299 var _uploadOptions = null; |
| 300 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 301 var _body = null; |
| 302 |
| 303 if (request != null) { |
| 304 _body = convert.JSON.encode((request).toJson()); |
| 305 } |
| 306 if (projectId == null) { |
| 307 throw new core.ArgumentError("Parameter projectId is required."); |
| 308 } |
| 309 if (jobId == null) { |
| 310 throw new core.ArgumentError("Parameter jobId is required."); |
| 311 } |
| 312 |
| 313 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs/' + commons.Escaper.ecapeVariable('$jobId'); |
| 314 |
| 315 var _response = _requester.request(_url, |
| 316 "PUT", |
| 317 body: _body, |
| 318 queryParams: _queryParams, |
| 319 uploadOptions: _uploadOptions, |
| 320 uploadMedia: _uploadMedia, |
| 321 downloadOptions: _downloadOptions); |
| 322 return _response.then((data) => new Job.fromJson(data)); |
| 323 } |
| 324 |
| 325 } |
| 326 |
| 327 |
| 328 class ProjectsJobsMessagesResourceApi { |
| 329 final commons.ApiRequester _requester; |
| 330 |
| 331 ProjectsJobsMessagesResourceApi(commons.ApiRequester client) : |
| 332 _requester = client; |
| 333 |
| 334 /** |
| 335 * Request the job status. |
| 336 * |
| 337 * Request parameters: |
| 338 * |
| 339 * [projectId] - A project id. |
| 340 * |
| 341 * [jobId] - The job to get messages about. |
| 342 * |
| 343 * [minimumImportance] - Filter to only get messages with importance >= level |
| 344 * Possible string values are: |
| 345 * - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : A JOB_MESSAGE_IMPORTANCE_UNKNOWN. |
| 346 * - "JOB_MESSAGE_DEBUG" : A JOB_MESSAGE_DEBUG. |
| 347 * - "JOB_MESSAGE_DETAILED" : A JOB_MESSAGE_DETAILED. |
| 348 * - "JOB_MESSAGE_BASIC" : A JOB_MESSAGE_BASIC. |
| 349 * - "JOB_MESSAGE_WARNING" : A JOB_MESSAGE_WARNING. |
| 350 * - "JOB_MESSAGE_ERROR" : A JOB_MESSAGE_ERROR. |
| 351 * |
| 352 * [pageSize] - If specified, determines the maximum number of messages to |
| 353 * return. If unspecified, the service may choose an appropriate default, or |
| 354 * may return an arbitrarily large number of results. |
| 355 * |
| 356 * [pageToken] - If supplied, this should be the value of next_page_token |
| 357 * returned by an earlier call. This will cause the next page of results to be |
| 358 * returned. |
| 359 * |
| 360 * [startTime] - If specified, return only messages with timestamps >= |
| 361 * start_time. The default is the job creation time (i.e. beginning of |
| 362 * messages). |
| 363 * |
| 364 * [endTime] - Return only messages with timestamps < end_time. The default is |
| 365 * now (i.e. return up to the latest messages available). |
| 366 * |
| 367 * Completes with a [ListJobMessagesResponse]. |
| 368 * |
| 369 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 370 * error. |
| 371 * |
| 372 * If the used [http.Client] completes with an error when making a REST call, |
| 373 * this method will complete with the same error. |
| 374 */ |
| 375 async.Future<ListJobMessagesResponse> list(core.String projectId, core.String
jobId, {core.String minimumImportance, core.int pageSize, core.String pageToken,
core.String startTime, core.String endTime}) { |
| 376 var _url = null; |
| 377 var _queryParams = new core.Map(); |
| 378 var _uploadMedia = null; |
| 379 var _uploadOptions = null; |
| 380 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 381 var _body = null; |
| 382 |
| 383 if (projectId == null) { |
| 384 throw new core.ArgumentError("Parameter projectId is required."); |
| 385 } |
| 386 if (jobId == null) { |
| 387 throw new core.ArgumentError("Parameter jobId is required."); |
| 388 } |
| 389 if (minimumImportance != null) { |
| 390 _queryParams["minimumImportance"] = [minimumImportance]; |
| 391 } |
| 392 if (pageSize != null) { |
| 393 _queryParams["pageSize"] = ["${pageSize}"]; |
| 394 } |
| 395 if (pageToken != null) { |
| 396 _queryParams["pageToken"] = [pageToken]; |
| 397 } |
| 398 if (startTime != null) { |
| 399 _queryParams["startTime"] = [startTime]; |
| 400 } |
| 401 if (endTime != null) { |
| 402 _queryParams["endTime"] = [endTime]; |
| 403 } |
| 404 |
| 405 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs/' + commons.Escaper.ecapeVariable('$jobId') + '/messages'; |
| 406 |
| 407 var _response = _requester.request(_url, |
| 408 "GET", |
| 409 body: _body, |
| 410 queryParams: _queryParams, |
| 411 uploadOptions: _uploadOptions, |
| 412 uploadMedia: _uploadMedia, |
| 413 downloadOptions: _downloadOptions); |
| 414 return _response.then((data) => new ListJobMessagesResponse.fromJson(data)); |
| 415 } |
| 416 |
| 417 } |
| 418 |
| 419 |
| 420 class ProjectsJobsWorkItemsResourceApi { |
| 421 final commons.ApiRequester _requester; |
| 422 |
| 423 ProjectsJobsWorkItemsResourceApi(commons.ApiRequester client) : |
| 424 _requester = client; |
| 425 |
| 426 /** |
| 427 * Leases a dataflow WorkItem to run. |
| 428 * |
| 429 * [request] - The metadata request object. |
| 430 * |
| 431 * Request parameters: |
| 432 * |
| 433 * [projectId] - Identifies the project this worker belongs to. |
| 434 * |
| 435 * [jobId] - Identifies the workflow job this worker belongs to. |
| 436 * |
| 437 * Completes with a [LeaseWorkItemResponse]. |
| 438 * |
| 439 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 440 * error. |
| 441 * |
| 442 * If the used [http.Client] completes with an error when making a REST call, |
| 443 * this method will complete with the same error. |
| 444 */ |
| 445 async.Future<LeaseWorkItemResponse> lease(LeaseWorkItemRequest request, core.S
tring projectId, core.String jobId) { |
| 446 var _url = null; |
| 447 var _queryParams = new core.Map(); |
| 448 var _uploadMedia = null; |
| 449 var _uploadOptions = null; |
| 450 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 451 var _body = null; |
| 452 |
| 453 if (request != null) { |
| 454 _body = convert.JSON.encode((request).toJson()); |
| 455 } |
| 456 if (projectId == null) { |
| 457 throw new core.ArgumentError("Parameter projectId is required."); |
| 458 } |
| 459 if (jobId == null) { |
| 460 throw new core.ArgumentError("Parameter jobId is required."); |
| 461 } |
| 462 |
| 463 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs/' + commons.Escaper.ecapeVariable('$jobId') + '/workItems:lease'; |
| 464 |
| 465 var _response = _requester.request(_url, |
| 466 "POST", |
| 467 body: _body, |
| 468 queryParams: _queryParams, |
| 469 uploadOptions: _uploadOptions, |
| 470 uploadMedia: _uploadMedia, |
| 471 downloadOptions: _downloadOptions); |
| 472 return _response.then((data) => new LeaseWorkItemResponse.fromJson(data)); |
| 473 } |
| 474 |
| 475 /** |
| 476 * Reports the status of dataflow WorkItems leased by a worker. |
| 477 * |
| 478 * [request] - The metadata request object. |
| 479 * |
| 480 * Request parameters: |
| 481 * |
| 482 * [projectId] - The project which owns the WorkItem's job. |
| 483 * |
| 484 * [jobId] - The job which the WorkItem is part of. |
| 485 * |
| 486 * Completes with a [ReportWorkItemStatusResponse]. |
| 487 * |
| 488 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 489 * error. |
| 490 * |
| 491 * If the used [http.Client] completes with an error when making a REST call, |
| 492 * this method will complete with the same error. |
| 493 */ |
| 494 async.Future<ReportWorkItemStatusResponse> reportStatus(ReportWorkItemStatusRe
quest request, core.String projectId, core.String jobId) { |
| 495 var _url = null; |
| 496 var _queryParams = new core.Map(); |
| 497 var _uploadMedia = null; |
| 498 var _uploadOptions = null; |
| 499 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 500 var _body = null; |
| 501 |
| 502 if (request != null) { |
| 503 _body = convert.JSON.encode((request).toJson()); |
| 504 } |
| 505 if (projectId == null) { |
| 506 throw new core.ArgumentError("Parameter projectId is required."); |
| 507 } |
| 508 if (jobId == null) { |
| 509 throw new core.ArgumentError("Parameter jobId is required."); |
| 510 } |
| 511 |
| 512 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs/' + commons.Escaper.ecapeVariable('$jobId') + '/workItems:reportStatus'; |
| 513 |
| 514 var _response = _requester.request(_url, |
| 515 "POST", |
| 516 body: _body, |
| 517 queryParams: _queryParams, |
| 518 uploadOptions: _uploadOptions, |
| 519 uploadMedia: _uploadMedia, |
| 520 downloadOptions: _downloadOptions); |
| 521 return _response.then((data) => new ReportWorkItemStatusResponse.fromJson(da
ta)); |
| 522 } |
| 523 |
| 524 } |
| 525 |
| 526 |
| 527 |
| 528 /** A progress measurement of a WorkItem by a worker. */ |
| 529 class ApproximateProgress { |
| 530 /** |
| 531 * Completion as percentage of the work, from 0.0 (beginning, nothing |
| 532 * complete), to 1.0 (end of the work range, entire WorkItem complete). |
| 533 */ |
| 534 core.double percentComplete; |
| 535 /** A Position within the work to represent a progress. */ |
| 536 Position position; |
| 537 /** Completion as an estimated time remaining. */ |
| 538 core.String remainingTime; |
| 539 |
| 540 ApproximateProgress(); |
| 541 |
| 542 ApproximateProgress.fromJson(core.Map _json) { |
| 543 if (_json.containsKey("percentComplete")) { |
| 544 percentComplete = _json["percentComplete"]; |
| 545 } |
| 546 if (_json.containsKey("position")) { |
| 547 position = new Position.fromJson(_json["position"]); |
| 548 } |
| 549 if (_json.containsKey("remainingTime")) { |
| 550 remainingTime = _json["remainingTime"]; |
| 551 } |
| 552 } |
| 553 |
| 554 core.Map toJson() { |
| 555 var _json = new core.Map(); |
| 556 if (percentComplete != null) { |
| 557 _json["percentComplete"] = percentComplete; |
| 558 } |
| 559 if (position != null) { |
| 560 _json["position"] = (position).toJson(); |
| 561 } |
| 562 if (remainingTime != null) { |
| 563 _json["remainingTime"] = remainingTime; |
| 564 } |
| 565 return _json; |
| 566 } |
| 567 } |
| 568 |
| 569 /** Settings for WorkerPool autoscaling. */ |
| 570 class AutoscalingSettings { |
| 571 /** |
| 572 * The algorithm to use for autoscaling. |
| 573 * Possible string values are: |
| 574 * - "AUTOSCALING_ALGORITHM_UNKNOWN" : A AUTOSCALING_ALGORITHM_UNKNOWN. |
| 575 * - "AUTOSCALING_ALGORITHM_NONE" : A AUTOSCALING_ALGORITHM_NONE. |
| 576 * - "AUTOSCALING_ALGORITHM_BASIC" : A AUTOSCALING_ALGORITHM_BASIC. |
| 577 */ |
| 578 core.String algorithm; |
| 579 /** The maximum number of workers to cap scaling at. */ |
| 580 core.int maxNumWorkers; |
| 581 |
| 582 AutoscalingSettings(); |
| 583 |
| 584 AutoscalingSettings.fromJson(core.Map _json) { |
| 585 if (_json.containsKey("algorithm")) { |
| 586 algorithm = _json["algorithm"]; |
| 587 } |
| 588 if (_json.containsKey("maxNumWorkers")) { |
| 589 maxNumWorkers = _json["maxNumWorkers"]; |
| 590 } |
| 591 } |
| 592 |
| 593 core.Map toJson() { |
| 594 var _json = new core.Map(); |
| 595 if (algorithm != null) { |
| 596 _json["algorithm"] = algorithm; |
| 597 } |
| 598 if (maxNumWorkers != null) { |
| 599 _json["maxNumWorkers"] = maxNumWorkers; |
| 600 } |
| 601 return _json; |
| 602 } |
| 603 } |
| 604 |
| 605 /** All configuration data for a particular Computation. */ |
| 606 class ComputationTopology { |
| 607 /** The ID of the computation. */ |
| 608 core.String computationId; |
| 609 /** The inputs to the computation. */ |
| 610 core.List<StreamLocation> inputs; |
| 611 /** The key ranges processed by the computation. */ |
| 612 core.List<KeyRangeLocation> keyRanges; |
| 613 /** The outputs from the computation. */ |
| 614 core.List<StreamLocation> outputs; |
| 615 /** The state family values. */ |
| 616 core.List<StateFamilyConfig> stateFamilies; |
| 617 /** The system stage name. */ |
| 618 core.String systemStageName; |
| 619 /** The user stage name. */ |
| 620 core.String userStageName; |
| 621 |
| 622 ComputationTopology(); |
| 623 |
| 624 ComputationTopology.fromJson(core.Map _json) { |
| 625 if (_json.containsKey("computationId")) { |
| 626 computationId = _json["computationId"]; |
| 627 } |
| 628 if (_json.containsKey("inputs")) { |
| 629 inputs = _json["inputs"].map((value) => new StreamLocation.fromJson(value)
).toList(); |
| 630 } |
| 631 if (_json.containsKey("keyRanges")) { |
| 632 keyRanges = _json["keyRanges"].map((value) => new KeyRangeLocation.fromJso
n(value)).toList(); |
| 633 } |
| 634 if (_json.containsKey("outputs")) { |
| 635 outputs = _json["outputs"].map((value) => new StreamLocation.fromJson(valu
e)).toList(); |
| 636 } |
| 637 if (_json.containsKey("stateFamilies")) { |
| 638 stateFamilies = _json["stateFamilies"].map((value) => new StateFamilyConfi
g.fromJson(value)).toList(); |
| 639 } |
| 640 if (_json.containsKey("systemStageName")) { |
| 641 systemStageName = _json["systemStageName"]; |
| 642 } |
| 643 if (_json.containsKey("userStageName")) { |
| 644 userStageName = _json["userStageName"]; |
| 645 } |
| 646 } |
| 647 |
| 648 core.Map toJson() { |
| 649 var _json = new core.Map(); |
| 650 if (computationId != null) { |
| 651 _json["computationId"] = computationId; |
| 652 } |
| 653 if (inputs != null) { |
| 654 _json["inputs"] = inputs.map((value) => (value).toJson()).toList(); |
| 655 } |
| 656 if (keyRanges != null) { |
| 657 _json["keyRanges"] = keyRanges.map((value) => (value).toJson()).toList(); |
| 658 } |
| 659 if (outputs != null) { |
| 660 _json["outputs"] = outputs.map((value) => (value).toJson()).toList(); |
| 661 } |
| 662 if (stateFamilies != null) { |
| 663 _json["stateFamilies"] = stateFamilies.map((value) => (value).toJson()).to
List(); |
| 664 } |
| 665 if (systemStageName != null) { |
| 666 _json["systemStageName"] = systemStageName; |
| 667 } |
| 668 if (userStageName != null) { |
| 669 _json["userStageName"] = userStageName; |
| 670 } |
| 671 return _json; |
| 672 } |
| 673 } |
| 674 |
| 675 /** Identifies the location of a custom souce. */ |
| 676 class CustomSourceLocation { |
| 677 /** Whether this source is stateful. */ |
| 678 core.bool stateful; |
| 679 |
| 680 CustomSourceLocation(); |
| 681 |
| 682 CustomSourceLocation.fromJson(core.Map _json) { |
| 683 if (_json.containsKey("stateful")) { |
| 684 stateful = _json["stateful"]; |
| 685 } |
| 686 } |
| 687 |
| 688 core.Map toJson() { |
| 689 var _json = new core.Map(); |
| 690 if (stateful != null) { |
| 691 _json["stateful"] = stateful; |
| 692 } |
| 693 return _json; |
| 694 } |
| 695 } |
| 696 |
| 697 /** Data disk assignment for a given VM instance. */ |
| 698 class DataDiskAssignment { |
| 699 /** |
| 700 * Mounted data disks. The order is important a data disk's 0-based index in |
| 701 * this list defines which persistent directory the disk is mounted to, for |
| 702 * example the list of { "myproject-1014-104817-4c2-harness-0-disk-0" }, { |
| 703 * "myproject-1014-104817-4c2-harness-0-disk-1" }. |
| 704 */ |
| 705 core.List<core.String> dataDisks; |
| 706 /** |
| 707 * VM instance name the data disks mounted to, for example |
| 708 * "myproject-1014-104817-4c2-harness-0". |
| 709 */ |
| 710 core.String vmInstance; |
| 711 |
| 712 DataDiskAssignment(); |
| 713 |
| 714 DataDiskAssignment.fromJson(core.Map _json) { |
| 715 if (_json.containsKey("dataDisks")) { |
| 716 dataDisks = _json["dataDisks"]; |
| 717 } |
| 718 if (_json.containsKey("vmInstance")) { |
| 719 vmInstance = _json["vmInstance"]; |
| 720 } |
| 721 } |
| 722 |
| 723 core.Map toJson() { |
| 724 var _json = new core.Map(); |
| 725 if (dataDisks != null) { |
| 726 _json["dataDisks"] = dataDisks; |
| 727 } |
| 728 if (vmInstance != null) { |
| 729 _json["vmInstance"] = vmInstance; |
| 730 } |
| 731 return _json; |
| 732 } |
| 733 } |
| 734 |
| 735 /** |
| 736 * Specification of one of the bundles produced as a result of splitting a |
| 737 * Source (e.g. when executing a SourceSplitRequest, or when splitting an active |
| 738 * task using WorkItemStatus.dynamic_source_split), relative to the source being |
| 739 * split. |
| 740 */ |
| 741 class DerivedSource { |
| 742 /** |
| 743 * What source to base the produced source on (if any). |
| 744 * Possible string values are: |
| 745 * - "SOURCE_DERIVATION_MODE_UNKNOWN" : A SOURCE_DERIVATION_MODE_UNKNOWN. |
| 746 * - "SOURCE_DERIVATION_MODE_INDEPENDENT" : A |
| 747 * SOURCE_DERIVATION_MODE_INDEPENDENT. |
| 748 * - "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" : A |
| 749 * SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT. |
| 750 * - "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" : A |
| 751 * SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT. |
| 752 */ |
| 753 core.String derivationMode; |
| 754 /** Specification of the source. */ |
| 755 Source source; |
| 756 |
| 757 DerivedSource(); |
| 758 |
| 759 DerivedSource.fromJson(core.Map _json) { |
| 760 if (_json.containsKey("derivationMode")) { |
| 761 derivationMode = _json["derivationMode"]; |
| 762 } |
| 763 if (_json.containsKey("source")) { |
| 764 source = new Source.fromJson(_json["source"]); |
| 765 } |
| 766 } |
| 767 |
| 768 core.Map toJson() { |
| 769 var _json = new core.Map(); |
| 770 if (derivationMode != null) { |
| 771 _json["derivationMode"] = derivationMode; |
| 772 } |
| 773 if (source != null) { |
| 774 _json["source"] = (source).toJson(); |
| 775 } |
| 776 return _json; |
| 777 } |
| 778 } |
| 779 |
| 780 /** Describes the data disk used by a workflow job. */ |
| 781 class Disk { |
| 782 /** |
| 783 * Disk storage type, as defined by Google Compute Engine. This must be a disk |
| 784 * type appropriate to the project and zone in which the workers will run. If |
| 785 * unknown or unspecified, the service will attempt to choose a reasonable |
| 786 * default. For example, the standard persistent disk type is a resource name |
| 787 * typically ending in "pd-standard". If SSD persistent disks are available, |
| 788 * the resource name typically ends with "pd-ssd". The actual valid values are |
| 789 * defined the Google Compute Engine API, not by the Dataflow API; consult the |
| 790 * Google Compute Engine documentation for more information about determining |
| 791 * the set of available disk types for a particular project and zone. Google |
| 792 * Compute Engine Disk types are local to a particular project in a particular |
| 793 * zone, and so the resource name will typically look something like this: |
| 794 * compute.googleapis.com/projects/ |
| 795 * /zones//diskTypes/pd-standard |
| 796 */ |
| 797 core.String diskType; |
| 798 /** Directory in a VM where disk is mounted. */ |
| 799 core.String mountPoint; |
| 800 /** |
| 801 * Size of disk in GB. If zero or unspecified, the service will attempt to |
| 802 * choose a reasonable default. |
| 803 */ |
| 804 core.int sizeGb; |
| 805 |
| 806 Disk(); |
| 807 |
| 808 Disk.fromJson(core.Map _json) { |
| 809 if (_json.containsKey("diskType")) { |
| 810 diskType = _json["diskType"]; |
| 811 } |
| 812 if (_json.containsKey("mountPoint")) { |
| 813 mountPoint = _json["mountPoint"]; |
| 814 } |
| 815 if (_json.containsKey("sizeGb")) { |
| 816 sizeGb = _json["sizeGb"]; |
| 817 } |
| 818 } |
| 819 |
| 820 core.Map toJson() { |
| 821 var _json = new core.Map(); |
| 822 if (diskType != null) { |
| 823 _json["diskType"] = diskType; |
| 824 } |
| 825 if (mountPoint != null) { |
| 826 _json["mountPoint"] = mountPoint; |
| 827 } |
| 828 if (sizeGb != null) { |
| 829 _json["sizeGb"] = sizeGb; |
| 830 } |
| 831 return _json; |
| 832 } |
| 833 } |
| 834 |
| 835 /** |
| 836 * When a task splits using WorkItemStatus.dynamic_source_split, this message |
| 837 * describes the two parts of the split relative to the description of the |
| 838 * current task's input. |
| 839 */ |
| 840 class DynamicSourceSplit { |
| 841 /** |
| 842 * Primary part (continued to be processed by worker). Specified relative to |
| 843 * the previously-current source. Becomes current. |
| 844 */ |
| 845 DerivedSource primary; |
| 846 /** |
| 847 * Residual part (returned to the pool of work). Specified relative to the |
| 848 * previously-current source. |
| 849 */ |
| 850 DerivedSource residual; |
| 851 |
| 852 DynamicSourceSplit(); |
| 853 |
| 854 DynamicSourceSplit.fromJson(core.Map _json) { |
| 855 if (_json.containsKey("primary")) { |
| 856 primary = new DerivedSource.fromJson(_json["primary"]); |
| 857 } |
| 858 if (_json.containsKey("residual")) { |
| 859 residual = new DerivedSource.fromJson(_json["residual"]); |
| 860 } |
| 861 } |
| 862 |
| 863 core.Map toJson() { |
| 864 var _json = new core.Map(); |
| 865 if (primary != null) { |
| 866 _json["primary"] = (primary).toJson(); |
| 867 } |
| 868 if (residual != null) { |
| 869 _json["residual"] = (residual).toJson(); |
| 870 } |
| 871 return _json; |
| 872 } |
| 873 } |
| 874 |
| 875 /** Describes the environment in which a Dataflow Job runs. */ |
| 876 class Environment { |
| 877 /** |
| 878 * The type of cluster manager API to use. If unknown or unspecified, the |
| 879 * service will attempt to choose a reasonable default. This should be in the |
| 880 * form of the API service name, e.g. "compute.googleapis.com". |
| 881 */ |
| 882 core.String clusterManagerApiService; |
| 883 /** |
| 884 * The dataset for the current project where various workflow related tables |
| 885 * are stored. The supported resource type is: Google BigQuery: |
| 886 * bigquery.googleapis.com/{dataset} |
| 887 */ |
| 888 core.String dataset; |
| 889 /** The list of experiments to enable. */ |
| 890 core.List<core.String> experiments; |
| 891 /** |
| 892 * Experimental settings. |
| 893 * |
| 894 * The values for Object must be JSON objects. It can consist of `num`, |
| 895 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 896 */ |
| 897 core.Map<core.String, core.Object> internalExperiments; |
| 898 /** |
| 899 * The Dataflow SDK pipeline options specified by the user. These options are |
| 900 * passed through the service and are used to recreate the SDK pipeline |
| 901 * options on the worker in a language agnostic and platform independent way. |
| 902 * |
| 903 * The values for Object must be JSON objects. It can consist of `num`, |
| 904 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 905 */ |
| 906 core.Map<core.String, core.Object> sdkPipelineOptions; |
| 907 /** |
| 908 * The prefix of the resources the system should use for temporary storage. |
| 909 * The system will append the suffix "/temp-{JOBNAME} to this resource prefix, |
| 910 * where {JOBNAME} is the value of the job_name field. The resulting bucket |
| 911 * and object prefix is used as the prefix of the resources used to store |
| 912 * temporary data needed during the job execution. NOTE: This will override |
| 913 * the value in taskrunner_settings. The supported resource type is: Google |
| 914 * Cloud Storage: storage.googleapis.com/{bucket}/{object} |
| 915 * bucket.storage.googleapis.com/{object} |
| 916 */ |
| 917 core.String tempStoragePrefix; |
| 918 /** |
| 919 * A description of the process that generated the request. |
| 920 * |
| 921 * The values for Object must be JSON objects. It can consist of `num`, |
| 922 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 923 */ |
| 924 core.Map<core.String, core.Object> userAgent; |
| 925 /** |
| 926 * A structure describing which components and their versions of the service |
| 927 * are required in order to run the job. |
| 928 * |
| 929 * The values for Object must be JSON objects. It can consist of `num`, |
| 930 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 931 */ |
| 932 core.Map<core.String, core.Object> version; |
| 933 /** |
| 934 * Worker pools. At least one "harness" worker pool must be specified in order |
| 935 * for the job to have workers. |
| 936 */ |
| 937 core.List<WorkerPool> workerPools; |
| 938 |
| 939 Environment(); |
| 940 |
| 941 Environment.fromJson(core.Map _json) { |
| 942 if (_json.containsKey("clusterManagerApiService")) { |
| 943 clusterManagerApiService = _json["clusterManagerApiService"]; |
| 944 } |
| 945 if (_json.containsKey("dataset")) { |
| 946 dataset = _json["dataset"]; |
| 947 } |
| 948 if (_json.containsKey("experiments")) { |
| 949 experiments = _json["experiments"]; |
| 950 } |
| 951 if (_json.containsKey("internalExperiments")) { |
| 952 internalExperiments = _json["internalExperiments"]; |
| 953 } |
| 954 if (_json.containsKey("sdkPipelineOptions")) { |
| 955 sdkPipelineOptions = _json["sdkPipelineOptions"]; |
| 956 } |
| 957 if (_json.containsKey("tempStoragePrefix")) { |
| 958 tempStoragePrefix = _json["tempStoragePrefix"]; |
| 959 } |
| 960 if (_json.containsKey("userAgent")) { |
| 961 userAgent = _json["userAgent"]; |
| 962 } |
| 963 if (_json.containsKey("version")) { |
| 964 version = _json["version"]; |
| 965 } |
| 966 if (_json.containsKey("workerPools")) { |
| 967 workerPools = _json["workerPools"].map((value) => new WorkerPool.fromJson(
value)).toList(); |
| 968 } |
| 969 } |
| 970 |
| 971 core.Map toJson() { |
| 972 var _json = new core.Map(); |
| 973 if (clusterManagerApiService != null) { |
| 974 _json["clusterManagerApiService"] = clusterManagerApiService; |
| 975 } |
| 976 if (dataset != null) { |
| 977 _json["dataset"] = dataset; |
| 978 } |
| 979 if (experiments != null) { |
| 980 _json["experiments"] = experiments; |
| 981 } |
| 982 if (internalExperiments != null) { |
| 983 _json["internalExperiments"] = internalExperiments; |
| 984 } |
| 985 if (sdkPipelineOptions != null) { |
| 986 _json["sdkPipelineOptions"] = sdkPipelineOptions; |
| 987 } |
| 988 if (tempStoragePrefix != null) { |
| 989 _json["tempStoragePrefix"] = tempStoragePrefix; |
| 990 } |
| 991 if (userAgent != null) { |
| 992 _json["userAgent"] = userAgent; |
| 993 } |
| 994 if (version != null) { |
| 995 _json["version"] = version; |
| 996 } |
| 997 if (workerPools != null) { |
| 998 _json["workerPools"] = workerPools.map((value) => (value).toJson()).toList
(); |
| 999 } |
| 1000 return _json; |
| 1001 } |
| 1002 } |
| 1003 |
| 1004 /** |
| 1005 * An instruction that copies its inputs (zero or more) to its (single) output. |
| 1006 */ |
| 1007 class FlattenInstruction { |
| 1008 /** Describes the inputs to the flatten instruction. */ |
| 1009 core.List<InstructionInput> inputs; |
| 1010 |
| 1011 FlattenInstruction(); |
| 1012 |
| 1013 FlattenInstruction.fromJson(core.Map _json) { |
| 1014 if (_json.containsKey("inputs")) { |
| 1015 inputs = _json["inputs"].map((value) => new InstructionInput.fromJson(valu
e)).toList(); |
| 1016 } |
| 1017 } |
| 1018 |
| 1019 core.Map toJson() { |
| 1020 var _json = new core.Map(); |
| 1021 if (inputs != null) { |
| 1022 _json["inputs"] = inputs.map((value) => (value).toJson()).toList(); |
| 1023 } |
| 1024 return _json; |
| 1025 } |
| 1026 } |
| 1027 |
| 1028 /** |
| 1029 * An input of an instruction, as a reference to an output of a producer |
| 1030 * instruction. |
| 1031 */ |
| 1032 class InstructionInput { |
| 1033 /** The output index (origin zero) within the producer. */ |
| 1034 core.int outputNum; |
| 1035 /** |
| 1036 * The index (origin zero) of the parallel instruction that produces the |
| 1037 * output to be consumed by this input. This index is relative to the list of |
| 1038 * instructions in this input's instruction's containing MapTask. |
| 1039 */ |
| 1040 core.int producerInstructionIndex; |
| 1041 |
| 1042 InstructionInput(); |
| 1043 |
| 1044 InstructionInput.fromJson(core.Map _json) { |
| 1045 if (_json.containsKey("outputNum")) { |
| 1046 outputNum = _json["outputNum"]; |
| 1047 } |
| 1048 if (_json.containsKey("producerInstructionIndex")) { |
| 1049 producerInstructionIndex = _json["producerInstructionIndex"]; |
| 1050 } |
| 1051 } |
| 1052 |
| 1053 core.Map toJson() { |
| 1054 var _json = new core.Map(); |
| 1055 if (outputNum != null) { |
| 1056 _json["outputNum"] = outputNum; |
| 1057 } |
| 1058 if (producerInstructionIndex != null) { |
| 1059 _json["producerInstructionIndex"] = producerInstructionIndex; |
| 1060 } |
| 1061 return _json; |
| 1062 } |
| 1063 } |
| 1064 |
| 1065 /** An output of an instruction. */ |
| 1066 class InstructionOutput { |
| 1067 /** |
| 1068 * The codec to use to encode data being written via this output. |
| 1069 * |
| 1070 * The values for Object must be JSON objects. It can consist of `num`, |
| 1071 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 1072 */ |
| 1073 core.Map<core.String, core.Object> codec; |
| 1074 /** The user-provided name of this output. */ |
| 1075 core.String name; |
| 1076 |
| 1077 InstructionOutput(); |
| 1078 |
| 1079 InstructionOutput.fromJson(core.Map _json) { |
| 1080 if (_json.containsKey("codec")) { |
| 1081 codec = _json["codec"]; |
| 1082 } |
| 1083 if (_json.containsKey("name")) { |
| 1084 name = _json["name"]; |
| 1085 } |
| 1086 } |
| 1087 |
| 1088 core.Map toJson() { |
| 1089 var _json = new core.Map(); |
| 1090 if (codec != null) { |
| 1091 _json["codec"] = codec; |
| 1092 } |
| 1093 if (name != null) { |
| 1094 _json["name"] = name; |
| 1095 } |
| 1096 return _json; |
| 1097 } |
| 1098 } |
| 1099 |
| 1100 /** Defines a job to be run by the Dataflow service. */ |
| 1101 class Job { |
| 1102 /** |
| 1103 * Client's unique identifier of the job, re-used by SDK across retried |
| 1104 * attempts. If this field is set, the service will ensure its uniqueness. |
| 1105 * That is, the request to create a job will fail if the service has knowledge |
| 1106 * of a previously submitted job with the same client's id and job name. The |
| 1107 * caller may, for example, use this field to ensure idempotence of job |
| 1108 * creation across retried attempts to create a job. By default, the field is |
| 1109 * empty and, in that case, the service ignores it. |
| 1110 */ |
| 1111 core.String clientRequestId; |
| 1112 /** |
| 1113 * Timestamp when job was initially created. Immutable, set by the Dataflow |
| 1114 * service. |
| 1115 */ |
| 1116 core.String createTime; |
| 1117 /** |
| 1118 * The current state of the job. Jobs are created in the JOB_STATE_STOPPED |
| 1119 * state unless otherwise specified. A job in the JOB_STATE_RUNNING state may |
| 1120 * asynchronously enter a terminal state. Once a job has reached a terminal |
| 1121 * state, no further state updates may be made. This field may be mutated by |
| 1122 * the Dataflow service; callers cannot mutate it. |
| 1123 * Possible string values are: |
| 1124 * - "JOB_STATE_UNKNOWN" : A JOB_STATE_UNKNOWN. |
| 1125 * - "JOB_STATE_STOPPED" : A JOB_STATE_STOPPED. |
| 1126 * - "JOB_STATE_RUNNING" : A JOB_STATE_RUNNING. |
| 1127 * - "JOB_STATE_DONE" : A JOB_STATE_DONE. |
| 1128 * - "JOB_STATE_FAILED" : A JOB_STATE_FAILED. |
| 1129 * - "JOB_STATE_CANCELLED" : A JOB_STATE_CANCELLED. |
| 1130 * - "JOB_STATE_UPDATED" : A JOB_STATE_UPDATED. |
| 1131 */ |
| 1132 core.String currentState; |
| 1133 /** The timestamp associated with the current state. */ |
| 1134 core.String currentStateTime; |
| 1135 /** Environment for the job. */ |
| 1136 Environment environment; |
| 1137 /** Information about how the Dataflow service will actually run the job. */ |
| 1138 JobExecutionInfo executionInfo; |
| 1139 /** |
| 1140 * The unique ID of this job. This field is set by the Dataflow service when |
| 1141 * the Job is created, and is immutable for the life of the Job. |
| 1142 */ |
| 1143 core.String id; |
| 1144 /** |
| 1145 * The user-specified Dataflow job name. Only one Job with a given name may |
| 1146 * exist in a project at any given time. If a caller attempts to create a Job |
| 1147 * with the same name as an already-existing Job, the attempt will return the |
| 1148 * existing Job. The name must match the regular expression |
| 1149 * [a-z]([-a-z0-9]{0,38}[a-z0-9])? |
| 1150 */ |
| 1151 core.String name; |
| 1152 /** The project which owns the job. */ |
| 1153 core.String projectId; |
| 1154 /** |
| 1155 * If this job is an update of an existing job, this field will be the ID of |
| 1156 * the job it replaced. When sending a CreateJobRequest, you can update a job |
| 1157 * by specifying it here. The job named here will be stopped, and its |
| 1158 * intermediate state transferred to this job. |
| 1159 */ |
| 1160 core.String replaceJobId; |
| 1161 /** |
| 1162 * If another job is an update of this job (and thus, this job is in |
| 1163 * JOB_STATE_UPDATED), this field will contain the ID of that job. |
| 1164 */ |
| 1165 core.String replacedByJobId; |
| 1166 /** |
| 1167 * The job's requested state. UpdateJob may be used to switch between the |
| 1168 * JOB_STATE_STOPPED and JOB_STATE_RUNNING states, by setting requested_state. |
| 1169 * UpdateJob may also be used to directly set a job's requested state to |
| 1170 * JOB_STATE_CANCELLED or JOB_STATE_DONE, irrevocably terminating the job if |
| 1171 * it has not already reached a terminal state. |
| 1172 * Possible string values are: |
| 1173 * - "JOB_STATE_UNKNOWN" : A JOB_STATE_UNKNOWN. |
| 1174 * - "JOB_STATE_STOPPED" : A JOB_STATE_STOPPED. |
| 1175 * - "JOB_STATE_RUNNING" : A JOB_STATE_RUNNING. |
| 1176 * - "JOB_STATE_DONE" : A JOB_STATE_DONE. |
| 1177 * - "JOB_STATE_FAILED" : A JOB_STATE_FAILED. |
| 1178 * - "JOB_STATE_CANCELLED" : A JOB_STATE_CANCELLED. |
| 1179 * - "JOB_STATE_UPDATED" : A JOB_STATE_UPDATED. |
| 1180 */ |
| 1181 core.String requestedState; |
| 1182 /** The top-level steps that constitute the entire job. */ |
| 1183 core.List<Step> steps; |
| 1184 /** |
| 1185 * Map of transform name prefixes of the job to be replaced to the |
| 1186 * corresponding name prefixes of the new job. |
| 1187 */ |
| 1188 core.Map<core.String, core.String> transformNameMapping; |
| 1189 /** |
| 1190 * The type of dataflow job. |
| 1191 * Possible string values are: |
| 1192 * - "JOB_TYPE_UNKNOWN" : A JOB_TYPE_UNKNOWN. |
| 1193 * - "JOB_TYPE_BATCH" : A JOB_TYPE_BATCH. |
| 1194 * - "JOB_TYPE_STREAMING" : A JOB_TYPE_STREAMING. |
| 1195 */ |
| 1196 core.String type; |
| 1197 |
| 1198 Job(); |
| 1199 |
| 1200 Job.fromJson(core.Map _json) { |
| 1201 if (_json.containsKey("clientRequestId")) { |
| 1202 clientRequestId = _json["clientRequestId"]; |
| 1203 } |
| 1204 if (_json.containsKey("createTime")) { |
| 1205 createTime = _json["createTime"]; |
| 1206 } |
| 1207 if (_json.containsKey("currentState")) { |
| 1208 currentState = _json["currentState"]; |
| 1209 } |
| 1210 if (_json.containsKey("currentStateTime")) { |
| 1211 currentStateTime = _json["currentStateTime"]; |
| 1212 } |
| 1213 if (_json.containsKey("environment")) { |
| 1214 environment = new Environment.fromJson(_json["environment"]); |
| 1215 } |
| 1216 if (_json.containsKey("executionInfo")) { |
| 1217 executionInfo = new JobExecutionInfo.fromJson(_json["executionInfo"]); |
| 1218 } |
| 1219 if (_json.containsKey("id")) { |
| 1220 id = _json["id"]; |
| 1221 } |
| 1222 if (_json.containsKey("name")) { |
| 1223 name = _json["name"]; |
| 1224 } |
| 1225 if (_json.containsKey("projectId")) { |
| 1226 projectId = _json["projectId"]; |
| 1227 } |
| 1228 if (_json.containsKey("replaceJobId")) { |
| 1229 replaceJobId = _json["replaceJobId"]; |
| 1230 } |
| 1231 if (_json.containsKey("replacedByJobId")) { |
| 1232 replacedByJobId = _json["replacedByJobId"]; |
| 1233 } |
| 1234 if (_json.containsKey("requestedState")) { |
| 1235 requestedState = _json["requestedState"]; |
| 1236 } |
| 1237 if (_json.containsKey("steps")) { |
| 1238 steps = _json["steps"].map((value) => new Step.fromJson(value)).toList(); |
| 1239 } |
| 1240 if (_json.containsKey("transformNameMapping")) { |
| 1241 transformNameMapping = _json["transformNameMapping"]; |
| 1242 } |
| 1243 if (_json.containsKey("type")) { |
| 1244 type = _json["type"]; |
| 1245 } |
| 1246 } |
| 1247 |
| 1248 core.Map toJson() { |
| 1249 var _json = new core.Map(); |
| 1250 if (clientRequestId != null) { |
| 1251 _json["clientRequestId"] = clientRequestId; |
| 1252 } |
| 1253 if (createTime != null) { |
| 1254 _json["createTime"] = createTime; |
| 1255 } |
| 1256 if (currentState != null) { |
| 1257 _json["currentState"] = currentState; |
| 1258 } |
| 1259 if (currentStateTime != null) { |
| 1260 _json["currentStateTime"] = currentStateTime; |
| 1261 } |
| 1262 if (environment != null) { |
| 1263 _json["environment"] = (environment).toJson(); |
| 1264 } |
| 1265 if (executionInfo != null) { |
| 1266 _json["executionInfo"] = (executionInfo).toJson(); |
| 1267 } |
| 1268 if (id != null) { |
| 1269 _json["id"] = id; |
| 1270 } |
| 1271 if (name != null) { |
| 1272 _json["name"] = name; |
| 1273 } |
| 1274 if (projectId != null) { |
| 1275 _json["projectId"] = projectId; |
| 1276 } |
| 1277 if (replaceJobId != null) { |
| 1278 _json["replaceJobId"] = replaceJobId; |
| 1279 } |
| 1280 if (replacedByJobId != null) { |
| 1281 _json["replacedByJobId"] = replacedByJobId; |
| 1282 } |
| 1283 if (requestedState != null) { |
| 1284 _json["requestedState"] = requestedState; |
| 1285 } |
| 1286 if (steps != null) { |
| 1287 _json["steps"] = steps.map((value) => (value).toJson()).toList(); |
| 1288 } |
| 1289 if (transformNameMapping != null) { |
| 1290 _json["transformNameMapping"] = transformNameMapping; |
| 1291 } |
| 1292 if (type != null) { |
| 1293 _json["type"] = type; |
| 1294 } |
| 1295 return _json; |
| 1296 } |
| 1297 } |
| 1298 |
| 1299 /** |
| 1300 * Additional information about how a Dataflow job will be executed which isn’t |
| 1301 * contained in the submitted job. |
| 1302 */ |
| 1303 class JobExecutionInfo { |
| 1304 /** A mapping from each stage to the information about that stage. */ |
| 1305 core.Map<core.String, JobExecutionStageInfo> stages; |
| 1306 |
| 1307 JobExecutionInfo(); |
| 1308 |
| 1309 JobExecutionInfo.fromJson(core.Map _json) { |
| 1310 if (_json.containsKey("stages")) { |
| 1311 stages = commons.mapMap(_json["stages"], (item) => new JobExecutionStageIn
fo.fromJson(item)); |
| 1312 } |
| 1313 } |
| 1314 |
| 1315 core.Map toJson() { |
| 1316 var _json = new core.Map(); |
| 1317 if (stages != null) { |
| 1318 _json["stages"] = commons.mapMap(stages, (item) => (item).toJson()); |
| 1319 } |
| 1320 return _json; |
| 1321 } |
| 1322 } |
| 1323 |
| 1324 /** |
| 1325 * Contains information about how a particular |
| 1326 * [google.dataflow.v1beta3.Step][google.dataflow.v1beta3.Step] will be |
| 1327 * executed. |
| 1328 */ |
| 1329 class JobExecutionStageInfo { |
| 1330 /** |
| 1331 * The steps associated with the execution stage. Note that stages may have |
| 1332 * several steps, and that a given step might be run by more than one stage. |
| 1333 */ |
| 1334 core.List<core.String> stepName; |
| 1335 |
| 1336 JobExecutionStageInfo(); |
| 1337 |
| 1338 JobExecutionStageInfo.fromJson(core.Map _json) { |
| 1339 if (_json.containsKey("stepName")) { |
| 1340 stepName = _json["stepName"]; |
| 1341 } |
| 1342 } |
| 1343 |
| 1344 core.Map toJson() { |
| 1345 var _json = new core.Map(); |
| 1346 if (stepName != null) { |
| 1347 _json["stepName"] = stepName; |
| 1348 } |
| 1349 return _json; |
| 1350 } |
| 1351 } |
| 1352 |
| 1353 /** A particular message pertaining to a Dataflow job. */ |
| 1354 class JobMessage { |
| 1355 /** |
| 1356 * Identifies the message. This is automatically generated by the service; the |
| 1357 * caller should treat it as an opaque string. |
| 1358 */ |
| 1359 core.String id; |
| 1360 /** |
| 1361 * Importance level of the message. |
| 1362 * Possible string values are: |
| 1363 * - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : A JOB_MESSAGE_IMPORTANCE_UNKNOWN. |
| 1364 * - "JOB_MESSAGE_DEBUG" : A JOB_MESSAGE_DEBUG. |
| 1365 * - "JOB_MESSAGE_DETAILED" : A JOB_MESSAGE_DETAILED. |
| 1366 * - "JOB_MESSAGE_BASIC" : A JOB_MESSAGE_BASIC. |
| 1367 * - "JOB_MESSAGE_WARNING" : A JOB_MESSAGE_WARNING. |
| 1368 * - "JOB_MESSAGE_ERROR" : A JOB_MESSAGE_ERROR. |
| 1369 */ |
| 1370 core.String messageImportance; |
| 1371 /** The text of the message. */ |
| 1372 core.String messageText; |
| 1373 /** The timestamp of the message. */ |
| 1374 core.String time; |
| 1375 |
| 1376 JobMessage(); |
| 1377 |
| 1378 JobMessage.fromJson(core.Map _json) { |
| 1379 if (_json.containsKey("id")) { |
| 1380 id = _json["id"]; |
| 1381 } |
| 1382 if (_json.containsKey("messageImportance")) { |
| 1383 messageImportance = _json["messageImportance"]; |
| 1384 } |
| 1385 if (_json.containsKey("messageText")) { |
| 1386 messageText = _json["messageText"]; |
| 1387 } |
| 1388 if (_json.containsKey("time")) { |
| 1389 time = _json["time"]; |
| 1390 } |
| 1391 } |
| 1392 |
| 1393 core.Map toJson() { |
| 1394 var _json = new core.Map(); |
| 1395 if (id != null) { |
| 1396 _json["id"] = id; |
| 1397 } |
| 1398 if (messageImportance != null) { |
| 1399 _json["messageImportance"] = messageImportance; |
| 1400 } |
| 1401 if (messageText != null) { |
| 1402 _json["messageText"] = messageText; |
| 1403 } |
| 1404 if (time != null) { |
| 1405 _json["time"] = time; |
| 1406 } |
| 1407 return _json; |
| 1408 } |
| 1409 } |
| 1410 |
| 1411 /** |
| 1412 * JobMetrics contains a collection of metrics descibing the detailed progress |
| 1413 * of a Dataflow job. Metrics correspond to user-defined and system-defined |
| 1414 * metrics in the job. This resource captures only the most recent values of |
| 1415 * each metric; time-series data can be queried for them (under the same metric |
| 1416 * names) from Cloud Monitoring. |
| 1417 */ |
| 1418 class JobMetrics { |
| 1419 /** Timestamp as of which metric values are current. */ |
| 1420 core.String metricTime; |
| 1421 /** All metrics for this job. */ |
| 1422 core.List<MetricUpdate> metrics; |
| 1423 |
| 1424 JobMetrics(); |
| 1425 |
| 1426 JobMetrics.fromJson(core.Map _json) { |
| 1427 if (_json.containsKey("metricTime")) { |
| 1428 metricTime = _json["metricTime"]; |
| 1429 } |
| 1430 if (_json.containsKey("metrics")) { |
| 1431 metrics = _json["metrics"].map((value) => new MetricUpdate.fromJson(value)
).toList(); |
| 1432 } |
| 1433 } |
| 1434 |
| 1435 core.Map toJson() { |
| 1436 var _json = new core.Map(); |
| 1437 if (metricTime != null) { |
| 1438 _json["metricTime"] = metricTime; |
| 1439 } |
| 1440 if (metrics != null) { |
| 1441 _json["metrics"] = metrics.map((value) => (value).toJson()).toList(); |
| 1442 } |
| 1443 return _json; |
| 1444 } |
| 1445 } |
| 1446 |
| 1447 /** |
| 1448 * Data disk assignment information for a specific key-range of a sharded |
| 1449 * computation. Currently we only support UTF-8 character splits to simplify |
| 1450 * encoding into JSON. |
| 1451 */ |
| 1452 class KeyRangeDataDiskAssignment { |
| 1453 /** |
| 1454 * The name of the data disk where data for this range is stored. This name is |
| 1455 * local to the Google Cloud Platform project and uniquely identifies the disk |
| 1456 * within that project, for example |
| 1457 * "myproject-1014-104817-4c2-harness-0-disk-1". |
| 1458 */ |
| 1459 core.String dataDisk; |
| 1460 /** The end (exclusive) of the key range. */ |
| 1461 core.String end; |
| 1462 /** The start (inclusive) of the key range. */ |
| 1463 core.String start; |
| 1464 |
| 1465 KeyRangeDataDiskAssignment(); |
| 1466 |
| 1467 KeyRangeDataDiskAssignment.fromJson(core.Map _json) { |
| 1468 if (_json.containsKey("dataDisk")) { |
| 1469 dataDisk = _json["dataDisk"]; |
| 1470 } |
| 1471 if (_json.containsKey("end")) { |
| 1472 end = _json["end"]; |
| 1473 } |
| 1474 if (_json.containsKey("start")) { |
| 1475 start = _json["start"]; |
| 1476 } |
| 1477 } |
| 1478 |
| 1479 core.Map toJson() { |
| 1480 var _json = new core.Map(); |
| 1481 if (dataDisk != null) { |
| 1482 _json["dataDisk"] = dataDisk; |
| 1483 } |
| 1484 if (end != null) { |
| 1485 _json["end"] = end; |
| 1486 } |
| 1487 if (start != null) { |
| 1488 _json["start"] = start; |
| 1489 } |
| 1490 return _json; |
| 1491 } |
| 1492 } |
| 1493 |
| 1494 /** |
| 1495 * Location information for a specific key-range of a sharded computation. |
| 1496 * Currently we only support UTF-8 character splits to simplify encoding into |
| 1497 * JSON. |
| 1498 */ |
| 1499 class KeyRangeLocation { |
| 1500 /** |
| 1501 * The name of the data disk where data for this range is stored. This name is |
| 1502 * local to the Google Cloud Platform project and uniquely identifies the disk |
| 1503 * within that project, for example |
| 1504 * "myproject-1014-104817-4c2-harness-0-disk-1". |
| 1505 */ |
| 1506 core.String dataDisk; |
| 1507 /** |
| 1508 * The physical location of this range assignment to be used for streaming |
| 1509 * computation cross-worker message delivery. |
| 1510 */ |
| 1511 core.String deliveryEndpoint; |
| 1512 /** The end (exclusive) of the key range. */ |
| 1513 core.String end; |
| 1514 /** |
| 1515 * The location of the persistent state for this range, as a persistent |
| 1516 * directory in the worker local filesystem. |
| 1517 */ |
| 1518 core.String persistentDirectory; |
| 1519 /** The start (inclusive) of the key range. */ |
| 1520 core.String start; |
| 1521 |
| 1522 KeyRangeLocation(); |
| 1523 |
| 1524 KeyRangeLocation.fromJson(core.Map _json) { |
| 1525 if (_json.containsKey("dataDisk")) { |
| 1526 dataDisk = _json["dataDisk"]; |
| 1527 } |
| 1528 if (_json.containsKey("deliveryEndpoint")) { |
| 1529 deliveryEndpoint = _json["deliveryEndpoint"]; |
| 1530 } |
| 1531 if (_json.containsKey("end")) { |
| 1532 end = _json["end"]; |
| 1533 } |
| 1534 if (_json.containsKey("persistentDirectory")) { |
| 1535 persistentDirectory = _json["persistentDirectory"]; |
| 1536 } |
| 1537 if (_json.containsKey("start")) { |
| 1538 start = _json["start"]; |
| 1539 } |
| 1540 } |
| 1541 |
| 1542 core.Map toJson() { |
| 1543 var _json = new core.Map(); |
| 1544 if (dataDisk != null) { |
| 1545 _json["dataDisk"] = dataDisk; |
| 1546 } |
| 1547 if (deliveryEndpoint != null) { |
| 1548 _json["deliveryEndpoint"] = deliveryEndpoint; |
| 1549 } |
| 1550 if (end != null) { |
| 1551 _json["end"] = end; |
| 1552 } |
| 1553 if (persistentDirectory != null) { |
| 1554 _json["persistentDirectory"] = persistentDirectory; |
| 1555 } |
| 1556 if (start != null) { |
| 1557 _json["start"] = start; |
| 1558 } |
| 1559 return _json; |
| 1560 } |
| 1561 } |
| 1562 |
| 1563 /** Request to lease WorkItems. */ |
| 1564 class LeaseWorkItemRequest { |
| 1565 /** The current timestamp at the worker. */ |
| 1566 core.String currentWorkerTime; |
| 1567 /** The initial lease period. */ |
| 1568 core.String requestedLeaseDuration; |
| 1569 /** Filter for WorkItem type. */ |
| 1570 core.List<core.String> workItemTypes; |
| 1571 /** |
| 1572 * Worker capabilities. WorkItems might be limited to workers with specific |
| 1573 * capabilities. |
| 1574 */ |
| 1575 core.List<core.String> workerCapabilities; |
| 1576 /** |
| 1577 * Identifies the worker leasing work -- typically the ID of the virtual |
| 1578 * machine running the worker. |
| 1579 */ |
| 1580 core.String workerId; |
| 1581 |
| 1582 LeaseWorkItemRequest(); |
| 1583 |
| 1584 LeaseWorkItemRequest.fromJson(core.Map _json) { |
| 1585 if (_json.containsKey("currentWorkerTime")) { |
| 1586 currentWorkerTime = _json["currentWorkerTime"]; |
| 1587 } |
| 1588 if (_json.containsKey("requestedLeaseDuration")) { |
| 1589 requestedLeaseDuration = _json["requestedLeaseDuration"]; |
| 1590 } |
| 1591 if (_json.containsKey("workItemTypes")) { |
| 1592 workItemTypes = _json["workItemTypes"]; |
| 1593 } |
| 1594 if (_json.containsKey("workerCapabilities")) { |
| 1595 workerCapabilities = _json["workerCapabilities"]; |
| 1596 } |
| 1597 if (_json.containsKey("workerId")) { |
| 1598 workerId = _json["workerId"]; |
| 1599 } |
| 1600 } |
| 1601 |
| 1602 core.Map toJson() { |
| 1603 var _json = new core.Map(); |
| 1604 if (currentWorkerTime != null) { |
| 1605 _json["currentWorkerTime"] = currentWorkerTime; |
| 1606 } |
| 1607 if (requestedLeaseDuration != null) { |
| 1608 _json["requestedLeaseDuration"] = requestedLeaseDuration; |
| 1609 } |
| 1610 if (workItemTypes != null) { |
| 1611 _json["workItemTypes"] = workItemTypes; |
| 1612 } |
| 1613 if (workerCapabilities != null) { |
| 1614 _json["workerCapabilities"] = workerCapabilities; |
| 1615 } |
| 1616 if (workerId != null) { |
| 1617 _json["workerId"] = workerId; |
| 1618 } |
| 1619 return _json; |
| 1620 } |
| 1621 } |
| 1622 |
| 1623 /** Response to a request to lease WorkItems. */ |
| 1624 class LeaseWorkItemResponse { |
| 1625 /** A list of the leased WorkItems. */ |
| 1626 core.List<WorkItem> workItems; |
| 1627 |
| 1628 LeaseWorkItemResponse(); |
| 1629 |
| 1630 LeaseWorkItemResponse.fromJson(core.Map _json) { |
| 1631 if (_json.containsKey("workItems")) { |
| 1632 workItems = _json["workItems"].map((value) => new WorkItem.fromJson(value)
).toList(); |
| 1633 } |
| 1634 } |
| 1635 |
| 1636 core.Map toJson() { |
| 1637 var _json = new core.Map(); |
| 1638 if (workItems != null) { |
| 1639 _json["workItems"] = workItems.map((value) => (value).toJson()).toList(); |
| 1640 } |
| 1641 return _json; |
| 1642 } |
| 1643 } |
| 1644 |
| 1645 /** Response to a request to list job messages. */ |
| 1646 class ListJobMessagesResponse { |
| 1647 /** Messages in ascending timestamp order. */ |
| 1648 core.List<JobMessage> jobMessages; |
| 1649 /** The token to obtain the next page of results if there are more. */ |
| 1650 core.String nextPageToken; |
| 1651 |
| 1652 ListJobMessagesResponse(); |
| 1653 |
| 1654 ListJobMessagesResponse.fromJson(core.Map _json) { |
| 1655 if (_json.containsKey("jobMessages")) { |
| 1656 jobMessages = _json["jobMessages"].map((value) => new JobMessage.fromJson(
value)).toList(); |
| 1657 } |
| 1658 if (_json.containsKey("nextPageToken")) { |
| 1659 nextPageToken = _json["nextPageToken"]; |
| 1660 } |
| 1661 } |
| 1662 |
| 1663 core.Map toJson() { |
| 1664 var _json = new core.Map(); |
| 1665 if (jobMessages != null) { |
| 1666 _json["jobMessages"] = jobMessages.map((value) => (value).toJson()).toList
(); |
| 1667 } |
| 1668 if (nextPageToken != null) { |
| 1669 _json["nextPageToken"] = nextPageToken; |
| 1670 } |
| 1671 return _json; |
| 1672 } |
| 1673 } |
| 1674 |
| 1675 /** |
| 1676 * Response to a request to list Dataflow jobs. This may be a partial response, |
| 1677 * depending on the page size in the ListJobsRequest. |
| 1678 */ |
| 1679 class ListJobsResponse { |
| 1680 /** A subset of the requested job information. */ |
| 1681 core.List<Job> jobs; |
| 1682 /** Set if there may be more results than fit in this response. */ |
| 1683 core.String nextPageToken; |
| 1684 |
| 1685 ListJobsResponse(); |
| 1686 |
| 1687 ListJobsResponse.fromJson(core.Map _json) { |
| 1688 if (_json.containsKey("jobs")) { |
| 1689 jobs = _json["jobs"].map((value) => new Job.fromJson(value)).toList(); |
| 1690 } |
| 1691 if (_json.containsKey("nextPageToken")) { |
| 1692 nextPageToken = _json["nextPageToken"]; |
| 1693 } |
| 1694 } |
| 1695 |
| 1696 core.Map toJson() { |
| 1697 var _json = new core.Map(); |
| 1698 if (jobs != null) { |
| 1699 _json["jobs"] = jobs.map((value) => (value).toJson()).toList(); |
| 1700 } |
| 1701 if (nextPageToken != null) { |
| 1702 _json["nextPageToken"] = nextPageToken; |
| 1703 } |
| 1704 return _json; |
| 1705 } |
| 1706 } |
| 1707 |
| 1708 /** |
| 1709 * MapTask consists of an ordered set of instructions, each of which describes |
| 1710 * one particular low-level operation for the worker to perform in order to |
| 1711 * accomplish the MapTask's WorkItem. Each instruction must appear in the list |
| 1712 * before any instructions which depends on its output. |
| 1713 */ |
| 1714 class MapTask { |
| 1715 /** The instructions in the MapTask. */ |
| 1716 core.List<ParallelInstruction> instructions; |
| 1717 /** |
| 1718 * System-defined name of the stage containing this MapTask. Unique across the |
| 1719 * workflow. |
| 1720 */ |
| 1721 core.String stageName; |
| 1722 /** System-defined name of this MapTask. Unique across the workflow. */ |
| 1723 core.String systemName; |
| 1724 |
| 1725 MapTask(); |
| 1726 |
| 1727 MapTask.fromJson(core.Map _json) { |
| 1728 if (_json.containsKey("instructions")) { |
| 1729 instructions = _json["instructions"].map((value) => new ParallelInstructio
n.fromJson(value)).toList(); |
| 1730 } |
| 1731 if (_json.containsKey("stageName")) { |
| 1732 stageName = _json["stageName"]; |
| 1733 } |
| 1734 if (_json.containsKey("systemName")) { |
| 1735 systemName = _json["systemName"]; |
| 1736 } |
| 1737 } |
| 1738 |
| 1739 core.Map toJson() { |
| 1740 var _json = new core.Map(); |
| 1741 if (instructions != null) { |
| 1742 _json["instructions"] = instructions.map((value) => (value).toJson()).toLi
st(); |
| 1743 } |
| 1744 if (stageName != null) { |
| 1745 _json["stageName"] = stageName; |
| 1746 } |
| 1747 if (systemName != null) { |
| 1748 _json["systemName"] = systemName; |
| 1749 } |
| 1750 return _json; |
| 1751 } |
| 1752 } |
| 1753 |
| 1754 /** |
| 1755 * Identifies a metric, by describing the source which generated the metric. |
| 1756 */ |
| 1757 class MetricStructuredName { |
| 1758 /** |
| 1759 * Zero or more labeled fields which identify the part of the job this metric |
| 1760 * is associated with, such as the name of a step or collection. For example, |
| 1761 * built-in counters associated with steps will have context['step'] = . |
| 1762 * Counters associated with PCollections in the SDK will have |
| 1763 * context['pcollection'] = |
| 1764 * . |
| 1765 */ |
| 1766 core.Map<core.String, core.String> context; |
| 1767 /** Worker-defined metric name. */ |
| 1768 core.String name; |
| 1769 /** |
| 1770 * Origin (namespace) of metric name. May be blank for user-define metrics; |
| 1771 * will be "dataflow" for metrics defined by the Dataflow service or SDK. |
| 1772 */ |
| 1773 core.String origin; |
| 1774 |
| 1775 MetricStructuredName(); |
| 1776 |
| 1777 MetricStructuredName.fromJson(core.Map _json) { |
| 1778 if (_json.containsKey("context")) { |
| 1779 context = _json["context"]; |
| 1780 } |
| 1781 if (_json.containsKey("name")) { |
| 1782 name = _json["name"]; |
| 1783 } |
| 1784 if (_json.containsKey("origin")) { |
| 1785 origin = _json["origin"]; |
| 1786 } |
| 1787 } |
| 1788 |
| 1789 core.Map toJson() { |
| 1790 var _json = new core.Map(); |
| 1791 if (context != null) { |
| 1792 _json["context"] = context; |
| 1793 } |
| 1794 if (name != null) { |
| 1795 _json["name"] = name; |
| 1796 } |
| 1797 if (origin != null) { |
| 1798 _json["origin"] = origin; |
| 1799 } |
| 1800 return _json; |
| 1801 } |
| 1802 } |
| 1803 |
| 1804 /** Describes the state of a metric. */ |
| 1805 class MetricUpdate { |
| 1806 /** |
| 1807 * True if this metric is reported as the total cumulative aggregate value |
| 1808 * accumulated since the worker started working on this WorkItem. By default |
| 1809 * this is false, indicating that this metric is reported as a delta that is |
| 1810 * not associated with any WorkItem. |
| 1811 */ |
| 1812 core.bool cumulative; |
| 1813 /** |
| 1814 * Worker-computed aggregate value for internal use by the Dataflow service. |
| 1815 * |
| 1816 * The values for Object must be JSON objects. It can consist of `num`, |
| 1817 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 1818 */ |
| 1819 core.Object internal; |
| 1820 /** |
| 1821 * Metric aggregation kind. The possible metric aggregation kinds are "Sum", |
| 1822 * "Max", "Min", "Mean", "Set", "And", and "Or". The specified aggregation |
| 1823 * kind is case-insensitive. If omitted, this is not an aggregated value but |
| 1824 * instead a single metric sample value. |
| 1825 */ |
| 1826 core.String kind; |
| 1827 /** |
| 1828 * Worker-computed aggregate value for the "Mean" aggregation kind. This holds |
| 1829 * the count of the aggregated values and is used in combination with mean_sum |
| 1830 * above to obtain the actual mean aggregate value. The only possible value |
| 1831 * type is Long. |
| 1832 * |
| 1833 * The values for Object must be JSON objects. It can consist of `num`, |
| 1834 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 1835 */ |
| 1836 core.Object meanCount; |
| 1837 /** |
| 1838 * Worker-computed aggregate value for the "Mean" aggregation kind. This holds |
| 1839 * the sum of the aggregated values and is used in combination with mean_count |
| 1840 * below to obtain the actual mean aggregate value. The only possible value |
| 1841 * types are Long and Double. |
| 1842 * |
| 1843 * The values for Object must be JSON objects. It can consist of `num`, |
| 1844 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 1845 */ |
| 1846 core.Object meanSum; |
| 1847 /** Name of the metric. */ |
| 1848 MetricStructuredName name; |
| 1849 /** |
| 1850 * Worker-computed aggregate value for aggregation kinds "Sum", "Max", "Min", |
| 1851 * "And", and "Or". The possible value types are Long, Double, and Boolean. |
| 1852 * |
| 1853 * The values for Object must be JSON objects. It can consist of `num`, |
| 1854 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 1855 */ |
| 1856 core.Object scalar; |
| 1857 /** |
| 1858 * Worker-computed aggregate value for the "Set" aggregation kind. The only |
| 1859 * possible value type is a list of Values whose type can be Long, Double, or |
| 1860 * String, according to the metric's type. All Values in the list must be of |
| 1861 * the same type. |
| 1862 * |
| 1863 * The values for Object must be JSON objects. It can consist of `num`, |
| 1864 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 1865 */ |
| 1866 core.Object set; |
| 1867 /** |
| 1868 * Timestamp associated with the metric value. Optional when workers are |
| 1869 * reporting work progress; it will be filled in responses from the metrics |
| 1870 * API. |
| 1871 */ |
| 1872 core.String updateTime; |
| 1873 |
| 1874 MetricUpdate(); |
| 1875 |
| 1876 MetricUpdate.fromJson(core.Map _json) { |
| 1877 if (_json.containsKey("cumulative")) { |
| 1878 cumulative = _json["cumulative"]; |
| 1879 } |
| 1880 if (_json.containsKey("internal")) { |
| 1881 internal = _json["internal"]; |
| 1882 } |
| 1883 if (_json.containsKey("kind")) { |
| 1884 kind = _json["kind"]; |
| 1885 } |
| 1886 if (_json.containsKey("meanCount")) { |
| 1887 meanCount = _json["meanCount"]; |
| 1888 } |
| 1889 if (_json.containsKey("meanSum")) { |
| 1890 meanSum = _json["meanSum"]; |
| 1891 } |
| 1892 if (_json.containsKey("name")) { |
| 1893 name = new MetricStructuredName.fromJson(_json["name"]); |
| 1894 } |
| 1895 if (_json.containsKey("scalar")) { |
| 1896 scalar = _json["scalar"]; |
| 1897 } |
| 1898 if (_json.containsKey("set")) { |
| 1899 set = _json["set"]; |
| 1900 } |
| 1901 if (_json.containsKey("updateTime")) { |
| 1902 updateTime = _json["updateTime"]; |
| 1903 } |
| 1904 } |
| 1905 |
| 1906 core.Map toJson() { |
| 1907 var _json = new core.Map(); |
| 1908 if (cumulative != null) { |
| 1909 _json["cumulative"] = cumulative; |
| 1910 } |
| 1911 if (internal != null) { |
| 1912 _json["internal"] = internal; |
| 1913 } |
| 1914 if (kind != null) { |
| 1915 _json["kind"] = kind; |
| 1916 } |
| 1917 if (meanCount != null) { |
| 1918 _json["meanCount"] = meanCount; |
| 1919 } |
| 1920 if (meanSum != null) { |
| 1921 _json["meanSum"] = meanSum; |
| 1922 } |
| 1923 if (name != null) { |
| 1924 _json["name"] = (name).toJson(); |
| 1925 } |
| 1926 if (scalar != null) { |
| 1927 _json["scalar"] = scalar; |
| 1928 } |
| 1929 if (set != null) { |
| 1930 _json["set"] = set; |
| 1931 } |
| 1932 if (updateTime != null) { |
| 1933 _json["updateTime"] = updateTime; |
| 1934 } |
| 1935 return _json; |
| 1936 } |
| 1937 } |
| 1938 |
| 1939 /** Describes mounted data disk. */ |
| 1940 class MountedDataDisk { |
| 1941 /** |
| 1942 * The name of the data disk. This name is local to the Google Cloud Platform |
| 1943 * project and uniquely identifies the disk within that project, for example |
| 1944 * "myproject-1014-104817-4c2-harness-0-disk-1". |
| 1945 */ |
| 1946 core.String dataDisk; |
| 1947 |
| 1948 MountedDataDisk(); |
| 1949 |
| 1950 MountedDataDisk.fromJson(core.Map _json) { |
| 1951 if (_json.containsKey("dataDisk")) { |
| 1952 dataDisk = _json["dataDisk"]; |
| 1953 } |
| 1954 } |
| 1955 |
| 1956 core.Map toJson() { |
| 1957 var _json = new core.Map(); |
| 1958 if (dataDisk != null) { |
| 1959 _json["dataDisk"] = dataDisk; |
| 1960 } |
| 1961 return _json; |
| 1962 } |
| 1963 } |
| 1964 |
| 1965 /** Information about an output of a multi-output DoFn. */ |
| 1966 class MultiOutputInfo { |
| 1967 /** |
| 1968 * The id of the tag the user code will emit to this output by; this should |
| 1969 * correspond to the tag of some SideInputInfo. |
| 1970 */ |
| 1971 core.String tag; |
| 1972 |
| 1973 MultiOutputInfo(); |
| 1974 |
| 1975 MultiOutputInfo.fromJson(core.Map _json) { |
| 1976 if (_json.containsKey("tag")) { |
| 1977 tag = _json["tag"]; |
| 1978 } |
| 1979 } |
| 1980 |
| 1981 core.Map toJson() { |
| 1982 var _json = new core.Map(); |
| 1983 if (tag != null) { |
| 1984 _json["tag"] = tag; |
| 1985 } |
| 1986 return _json; |
| 1987 } |
| 1988 } |
| 1989 |
| 1990 /** |
| 1991 * Packages that need to be installed in order for a worker to run the steps of |
| 1992 * the Dataflow job which will be assigned to its worker pool. This is the |
| 1993 * mechanism by which the SDK causes code to be loaded onto the workers. For |
| 1994 * example, the Dataflow Java SDK might use this to install jars containing the |
| 1995 * user's code and all of the various dependencies (libraries, data files, etc) |
| 1996 * required in order for that code to run. |
| 1997 */ |
| 1998 class Package { |
| 1999 /** |
| 2000 * The resource to read the package from. The supported resource type is: |
| 2001 * Google Cloud Storage: storage.googleapis.com/{bucket} |
| 2002 * bucket.storage.googleapis.com/ |
| 2003 */ |
| 2004 core.String location; |
| 2005 /** The name of the package. */ |
| 2006 core.String name; |
| 2007 |
| 2008 Package(); |
| 2009 |
| 2010 Package.fromJson(core.Map _json) { |
| 2011 if (_json.containsKey("location")) { |
| 2012 location = _json["location"]; |
| 2013 } |
| 2014 if (_json.containsKey("name")) { |
| 2015 name = _json["name"]; |
| 2016 } |
| 2017 } |
| 2018 |
| 2019 core.Map toJson() { |
| 2020 var _json = new core.Map(); |
| 2021 if (location != null) { |
| 2022 _json["location"] = location; |
| 2023 } |
| 2024 if (name != null) { |
| 2025 _json["name"] = name; |
| 2026 } |
| 2027 return _json; |
| 2028 } |
| 2029 } |
| 2030 |
| 2031 /** |
| 2032 * An instruction that does a ParDo operation. Takes one main input and zero or |
| 2033 * more side inputs, and produces zero or more outputs. Runs user code. |
| 2034 */ |
| 2035 class ParDoInstruction { |
| 2036 /** The input. */ |
| 2037 InstructionInput input; |
| 2038 /** Information about each of the outputs, if user_fn is a MultiDoFn. */ |
| 2039 core.List<MultiOutputInfo> multiOutputInfos; |
| 2040 /** The number of outputs. */ |
| 2041 core.int numOutputs; |
| 2042 /** Zero or more side inputs. */ |
| 2043 core.List<SideInputInfo> sideInputs; |
| 2044 /** |
| 2045 * The user function to invoke. |
| 2046 * |
| 2047 * The values for Object must be JSON objects. It can consist of `num`, |
| 2048 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2049 */ |
| 2050 core.Map<core.String, core.Object> userFn; |
| 2051 |
| 2052 ParDoInstruction(); |
| 2053 |
| 2054 ParDoInstruction.fromJson(core.Map _json) { |
| 2055 if (_json.containsKey("input")) { |
| 2056 input = new InstructionInput.fromJson(_json["input"]); |
| 2057 } |
| 2058 if (_json.containsKey("multiOutputInfos")) { |
| 2059 multiOutputInfos = _json["multiOutputInfos"].map((value) => new MultiOutpu
tInfo.fromJson(value)).toList(); |
| 2060 } |
| 2061 if (_json.containsKey("numOutputs")) { |
| 2062 numOutputs = _json["numOutputs"]; |
| 2063 } |
| 2064 if (_json.containsKey("sideInputs")) { |
| 2065 sideInputs = _json["sideInputs"].map((value) => new SideInputInfo.fromJson
(value)).toList(); |
| 2066 } |
| 2067 if (_json.containsKey("userFn")) { |
| 2068 userFn = _json["userFn"]; |
| 2069 } |
| 2070 } |
| 2071 |
| 2072 core.Map toJson() { |
| 2073 var _json = new core.Map(); |
| 2074 if (input != null) { |
| 2075 _json["input"] = (input).toJson(); |
| 2076 } |
| 2077 if (multiOutputInfos != null) { |
| 2078 _json["multiOutputInfos"] = multiOutputInfos.map((value) => (value).toJson
()).toList(); |
| 2079 } |
| 2080 if (numOutputs != null) { |
| 2081 _json["numOutputs"] = numOutputs; |
| 2082 } |
| 2083 if (sideInputs != null) { |
| 2084 _json["sideInputs"] = sideInputs.map((value) => (value).toJson()).toList()
; |
| 2085 } |
| 2086 if (userFn != null) { |
| 2087 _json["userFn"] = userFn; |
| 2088 } |
| 2089 return _json; |
| 2090 } |
| 2091 } |
| 2092 |
| 2093 /** Describes a particular operation comprising a MapTask. */ |
| 2094 class ParallelInstruction { |
| 2095 /** Additional information for Flatten instructions. */ |
| 2096 FlattenInstruction flatten; |
| 2097 /** User-provided name of this operation. */ |
| 2098 core.String name; |
| 2099 /** Describes the outputs of the instruction. */ |
| 2100 core.List<InstructionOutput> outputs; |
| 2101 /** Additional information for ParDo instructions. */ |
| 2102 ParDoInstruction parDo; |
| 2103 /** Additional information for PartialGroupByKey instructions. */ |
| 2104 PartialGroupByKeyInstruction partialGroupByKey; |
| 2105 /** Additional information for Read instructions. */ |
| 2106 ReadInstruction read; |
| 2107 /** System-defined name of this operation. Unique across the workflow. */ |
| 2108 core.String systemName; |
| 2109 /** Additional information for Write instructions. */ |
| 2110 WriteInstruction write; |
| 2111 |
| 2112 ParallelInstruction(); |
| 2113 |
| 2114 ParallelInstruction.fromJson(core.Map _json) { |
| 2115 if (_json.containsKey("flatten")) { |
| 2116 flatten = new FlattenInstruction.fromJson(_json["flatten"]); |
| 2117 } |
| 2118 if (_json.containsKey("name")) { |
| 2119 name = _json["name"]; |
| 2120 } |
| 2121 if (_json.containsKey("outputs")) { |
| 2122 outputs = _json["outputs"].map((value) => new InstructionOutput.fromJson(v
alue)).toList(); |
| 2123 } |
| 2124 if (_json.containsKey("parDo")) { |
| 2125 parDo = new ParDoInstruction.fromJson(_json["parDo"]); |
| 2126 } |
| 2127 if (_json.containsKey("partialGroupByKey")) { |
| 2128 partialGroupByKey = new PartialGroupByKeyInstruction.fromJson(_json["parti
alGroupByKey"]); |
| 2129 } |
| 2130 if (_json.containsKey("read")) { |
| 2131 read = new ReadInstruction.fromJson(_json["read"]); |
| 2132 } |
| 2133 if (_json.containsKey("systemName")) { |
| 2134 systemName = _json["systemName"]; |
| 2135 } |
| 2136 if (_json.containsKey("write")) { |
| 2137 write = new WriteInstruction.fromJson(_json["write"]); |
| 2138 } |
| 2139 } |
| 2140 |
| 2141 core.Map toJson() { |
| 2142 var _json = new core.Map(); |
| 2143 if (flatten != null) { |
| 2144 _json["flatten"] = (flatten).toJson(); |
| 2145 } |
| 2146 if (name != null) { |
| 2147 _json["name"] = name; |
| 2148 } |
| 2149 if (outputs != null) { |
| 2150 _json["outputs"] = outputs.map((value) => (value).toJson()).toList(); |
| 2151 } |
| 2152 if (parDo != null) { |
| 2153 _json["parDo"] = (parDo).toJson(); |
| 2154 } |
| 2155 if (partialGroupByKey != null) { |
| 2156 _json["partialGroupByKey"] = (partialGroupByKey).toJson(); |
| 2157 } |
| 2158 if (read != null) { |
| 2159 _json["read"] = (read).toJson(); |
| 2160 } |
| 2161 if (systemName != null) { |
| 2162 _json["systemName"] = systemName; |
| 2163 } |
| 2164 if (write != null) { |
| 2165 _json["write"] = (write).toJson(); |
| 2166 } |
| 2167 return _json; |
| 2168 } |
| 2169 } |
| 2170 |
| 2171 /** |
| 2172 * An instruction that does a partial group-by-key. One input and one output. |
| 2173 */ |
| 2174 class PartialGroupByKeyInstruction { |
| 2175 /** Describes the input to the partial group-by-key instruction. */ |
| 2176 InstructionInput input; |
| 2177 /** |
| 2178 * The codec to use for interpreting an element in the input PTable. |
| 2179 * |
| 2180 * The values for Object must be JSON objects. It can consist of `num`, |
| 2181 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2182 */ |
| 2183 core.Map<core.String, core.Object> inputElementCodec; |
| 2184 /** |
| 2185 * The value combining function to invoke. |
| 2186 * |
| 2187 * The values for Object must be JSON objects. It can consist of `num`, |
| 2188 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2189 */ |
| 2190 core.Map<core.String, core.Object> valueCombiningFn; |
| 2191 |
| 2192 PartialGroupByKeyInstruction(); |
| 2193 |
| 2194 PartialGroupByKeyInstruction.fromJson(core.Map _json) { |
| 2195 if (_json.containsKey("input")) { |
| 2196 input = new InstructionInput.fromJson(_json["input"]); |
| 2197 } |
| 2198 if (_json.containsKey("inputElementCodec")) { |
| 2199 inputElementCodec = _json["inputElementCodec"]; |
| 2200 } |
| 2201 if (_json.containsKey("valueCombiningFn")) { |
| 2202 valueCombiningFn = _json["valueCombiningFn"]; |
| 2203 } |
| 2204 } |
| 2205 |
| 2206 core.Map toJson() { |
| 2207 var _json = new core.Map(); |
| 2208 if (input != null) { |
| 2209 _json["input"] = (input).toJson(); |
| 2210 } |
| 2211 if (inputElementCodec != null) { |
| 2212 _json["inputElementCodec"] = inputElementCodec; |
| 2213 } |
| 2214 if (valueCombiningFn != null) { |
| 2215 _json["valueCombiningFn"] = valueCombiningFn; |
| 2216 } |
| 2217 return _json; |
| 2218 } |
| 2219 } |
| 2220 |
| 2221 /** |
| 2222 * Position defines a position within a collection of data. The value can be |
| 2223 * either the end position, a key (used with ordered collections), a byte |
| 2224 * offset, or a record index. |
| 2225 */ |
| 2226 class Position { |
| 2227 /** Position is a byte offset. */ |
| 2228 core.String byteOffset; |
| 2229 /** |
| 2230 * Position is past all other positions. Also useful for the end position of |
| 2231 * an unbounded range. |
| 2232 */ |
| 2233 core.bool end; |
| 2234 /** Position is a string key, ordered lexicographically. */ |
| 2235 core.String key; |
| 2236 /** Position is a record index. */ |
| 2237 core.String recordIndex; |
| 2238 /** |
| 2239 * CloudPosition is a base64 encoded BatchShufflePosition (with FIXED |
| 2240 * sharding). |
| 2241 */ |
| 2242 core.String shufflePosition; |
| 2243 |
| 2244 Position(); |
| 2245 |
| 2246 Position.fromJson(core.Map _json) { |
| 2247 if (_json.containsKey("byteOffset")) { |
| 2248 byteOffset = _json["byteOffset"]; |
| 2249 } |
| 2250 if (_json.containsKey("end")) { |
| 2251 end = _json["end"]; |
| 2252 } |
| 2253 if (_json.containsKey("key")) { |
| 2254 key = _json["key"]; |
| 2255 } |
| 2256 if (_json.containsKey("recordIndex")) { |
| 2257 recordIndex = _json["recordIndex"]; |
| 2258 } |
| 2259 if (_json.containsKey("shufflePosition")) { |
| 2260 shufflePosition = _json["shufflePosition"]; |
| 2261 } |
| 2262 } |
| 2263 |
| 2264 core.Map toJson() { |
| 2265 var _json = new core.Map(); |
| 2266 if (byteOffset != null) { |
| 2267 _json["byteOffset"] = byteOffset; |
| 2268 } |
| 2269 if (end != null) { |
| 2270 _json["end"] = end; |
| 2271 } |
| 2272 if (key != null) { |
| 2273 _json["key"] = key; |
| 2274 } |
| 2275 if (recordIndex != null) { |
| 2276 _json["recordIndex"] = recordIndex; |
| 2277 } |
| 2278 if (shufflePosition != null) { |
| 2279 _json["shufflePosition"] = shufflePosition; |
| 2280 } |
| 2281 return _json; |
| 2282 } |
| 2283 } |
| 2284 |
| 2285 /** |
| 2286 * Identifies a pubsub location to use for transferring data into or out of a |
| 2287 * streaming Dataflow job. |
| 2288 */ |
| 2289 class PubsubLocation { |
| 2290 /** Indicates whether the pipeline allows late-arriving data. */ |
| 2291 core.bool dropLateData; |
| 2292 /** |
| 2293 * If set, contains a pubsub label from which to extract record ids. If left |
| 2294 * empty, record deduplication will be strictly best effort. |
| 2295 */ |
| 2296 core.String idLabel; |
| 2297 /** |
| 2298 * A pubsub subscription, in the form of "pubsub.googleapis.com/subscriptions/ |
| 2299 * /" |
| 2300 */ |
| 2301 core.String subscription; |
| 2302 /** |
| 2303 * If set, contains a pubsub label from which to extract record timestamps. If |
| 2304 * left empty, record timestamps will be generated upon arrival. |
| 2305 */ |
| 2306 core.String timestampLabel; |
| 2307 /** |
| 2308 * A pubsub topic, in the form of "pubsub.googleapis.com/topics/ |
| 2309 * /" |
| 2310 */ |
| 2311 core.String topic; |
| 2312 /** |
| 2313 * If set, specifies the pubsub subscription that will be used for tracking |
| 2314 * custom time timestamps for watermark estimation. |
| 2315 */ |
| 2316 core.String trackingSubscription; |
| 2317 |
| 2318 PubsubLocation(); |
| 2319 |
| 2320 PubsubLocation.fromJson(core.Map _json) { |
| 2321 if (_json.containsKey("dropLateData")) { |
| 2322 dropLateData = _json["dropLateData"]; |
| 2323 } |
| 2324 if (_json.containsKey("idLabel")) { |
| 2325 idLabel = _json["idLabel"]; |
| 2326 } |
| 2327 if (_json.containsKey("subscription")) { |
| 2328 subscription = _json["subscription"]; |
| 2329 } |
| 2330 if (_json.containsKey("timestampLabel")) { |
| 2331 timestampLabel = _json["timestampLabel"]; |
| 2332 } |
| 2333 if (_json.containsKey("topic")) { |
| 2334 topic = _json["topic"]; |
| 2335 } |
| 2336 if (_json.containsKey("trackingSubscription")) { |
| 2337 trackingSubscription = _json["trackingSubscription"]; |
| 2338 } |
| 2339 } |
| 2340 |
| 2341 core.Map toJson() { |
| 2342 var _json = new core.Map(); |
| 2343 if (dropLateData != null) { |
| 2344 _json["dropLateData"] = dropLateData; |
| 2345 } |
| 2346 if (idLabel != null) { |
| 2347 _json["idLabel"] = idLabel; |
| 2348 } |
| 2349 if (subscription != null) { |
| 2350 _json["subscription"] = subscription; |
| 2351 } |
| 2352 if (timestampLabel != null) { |
| 2353 _json["timestampLabel"] = timestampLabel; |
| 2354 } |
| 2355 if (topic != null) { |
| 2356 _json["topic"] = topic; |
| 2357 } |
| 2358 if (trackingSubscription != null) { |
| 2359 _json["trackingSubscription"] = trackingSubscription; |
| 2360 } |
| 2361 return _json; |
| 2362 } |
| 2363 } |
| 2364 |
| 2365 /** An instruction that reads records. Takes no inputs, produces one output. */ |
| 2366 class ReadInstruction { |
| 2367 /** The source to read from. */ |
| 2368 Source source; |
| 2369 |
| 2370 ReadInstruction(); |
| 2371 |
| 2372 ReadInstruction.fromJson(core.Map _json) { |
| 2373 if (_json.containsKey("source")) { |
| 2374 source = new Source.fromJson(_json["source"]); |
| 2375 } |
| 2376 } |
| 2377 |
| 2378 core.Map toJson() { |
| 2379 var _json = new core.Map(); |
| 2380 if (source != null) { |
| 2381 _json["source"] = (source).toJson(); |
| 2382 } |
| 2383 return _json; |
| 2384 } |
| 2385 } |
| 2386 |
| 2387 /** Request to report the status of WorkItems. */ |
| 2388 class ReportWorkItemStatusRequest { |
| 2389 /** The current timestamp at the worker. */ |
| 2390 core.String currentWorkerTime; |
| 2391 /** |
| 2392 * The order is unimportant, except that the order of the WorkItemServiceState |
| 2393 * messages in the ReportWorkItemStatusResponse corresponds to the order of |
| 2394 * WorkItemStatus messages here. |
| 2395 */ |
| 2396 core.List<WorkItemStatus> workItemStatuses; |
| 2397 /** |
| 2398 * The ID of the worker reporting the WorkItem status. If this does not match |
| 2399 * the ID of the worker which the Dataflow service believes currently has the |
| 2400 * lease on the WorkItem, the report will be dropped (with an error response). |
| 2401 */ |
| 2402 core.String workerId; |
| 2403 |
| 2404 ReportWorkItemStatusRequest(); |
| 2405 |
| 2406 ReportWorkItemStatusRequest.fromJson(core.Map _json) { |
| 2407 if (_json.containsKey("currentWorkerTime")) { |
| 2408 currentWorkerTime = _json["currentWorkerTime"]; |
| 2409 } |
| 2410 if (_json.containsKey("workItemStatuses")) { |
| 2411 workItemStatuses = _json["workItemStatuses"].map((value) => new WorkItemSt
atus.fromJson(value)).toList(); |
| 2412 } |
| 2413 if (_json.containsKey("workerId")) { |
| 2414 workerId = _json["workerId"]; |
| 2415 } |
| 2416 } |
| 2417 |
| 2418 core.Map toJson() { |
| 2419 var _json = new core.Map(); |
| 2420 if (currentWorkerTime != null) { |
| 2421 _json["currentWorkerTime"] = currentWorkerTime; |
| 2422 } |
| 2423 if (workItemStatuses != null) { |
| 2424 _json["workItemStatuses"] = workItemStatuses.map((value) => (value).toJson
()).toList(); |
| 2425 } |
| 2426 if (workerId != null) { |
| 2427 _json["workerId"] = workerId; |
| 2428 } |
| 2429 return _json; |
| 2430 } |
| 2431 } |
| 2432 |
| 2433 /** Response from a request to report the status of WorkItems. */ |
| 2434 class ReportWorkItemStatusResponse { |
| 2435 /** |
| 2436 * A set of messages indicating the service-side state for each WorkItem whose |
| 2437 * status was reported, in the same order as the WorkItemStatus messages in |
| 2438 * the ReportWorkItemStatusRequest which resulting in this response. |
| 2439 */ |
| 2440 core.List<WorkItemServiceState> workItemServiceStates; |
| 2441 |
| 2442 ReportWorkItemStatusResponse(); |
| 2443 |
| 2444 ReportWorkItemStatusResponse.fromJson(core.Map _json) { |
| 2445 if (_json.containsKey("workItemServiceStates")) { |
| 2446 workItemServiceStates = _json["workItemServiceStates"].map((value) => new
WorkItemServiceState.fromJson(value)).toList(); |
| 2447 } |
| 2448 } |
| 2449 |
| 2450 core.Map toJson() { |
| 2451 var _json = new core.Map(); |
| 2452 if (workItemServiceStates != null) { |
| 2453 _json["workItemServiceStates"] = workItemServiceStates.map((value) => (val
ue).toJson()).toList(); |
| 2454 } |
| 2455 return _json; |
| 2456 } |
| 2457 } |
| 2458 |
| 2459 /** Describes a particular function to invoke. */ |
| 2460 class SeqMapTask { |
| 2461 /** Information about each of the inputs. */ |
| 2462 core.List<SideInputInfo> inputs; |
| 2463 /** The user-provided name of the SeqDo operation. */ |
| 2464 core.String name; |
| 2465 /** Information about each of the outputs. */ |
| 2466 core.List<SeqMapTaskOutputInfo> outputInfos; |
| 2467 /** |
| 2468 * System-defined name of the stage containing the SeqDo operation. Unique |
| 2469 * across the workflow. |
| 2470 */ |
| 2471 core.String stageName; |
| 2472 /** |
| 2473 * System-defined name of the SeqDo operation. Unique across the workflow. |
| 2474 */ |
| 2475 core.String systemName; |
| 2476 /** |
| 2477 * The user function to invoke. |
| 2478 * |
| 2479 * The values for Object must be JSON objects. It can consist of `num`, |
| 2480 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2481 */ |
| 2482 core.Map<core.String, core.Object> userFn; |
| 2483 |
| 2484 SeqMapTask(); |
| 2485 |
| 2486 SeqMapTask.fromJson(core.Map _json) { |
| 2487 if (_json.containsKey("inputs")) { |
| 2488 inputs = _json["inputs"].map((value) => new SideInputInfo.fromJson(value))
.toList(); |
| 2489 } |
| 2490 if (_json.containsKey("name")) { |
| 2491 name = _json["name"]; |
| 2492 } |
| 2493 if (_json.containsKey("outputInfos")) { |
| 2494 outputInfos = _json["outputInfos"].map((value) => new SeqMapTaskOutputInfo
.fromJson(value)).toList(); |
| 2495 } |
| 2496 if (_json.containsKey("stageName")) { |
| 2497 stageName = _json["stageName"]; |
| 2498 } |
| 2499 if (_json.containsKey("systemName")) { |
| 2500 systemName = _json["systemName"]; |
| 2501 } |
| 2502 if (_json.containsKey("userFn")) { |
| 2503 userFn = _json["userFn"]; |
| 2504 } |
| 2505 } |
| 2506 |
| 2507 core.Map toJson() { |
| 2508 var _json = new core.Map(); |
| 2509 if (inputs != null) { |
| 2510 _json["inputs"] = inputs.map((value) => (value).toJson()).toList(); |
| 2511 } |
| 2512 if (name != null) { |
| 2513 _json["name"] = name; |
| 2514 } |
| 2515 if (outputInfos != null) { |
| 2516 _json["outputInfos"] = outputInfos.map((value) => (value).toJson()).toList
(); |
| 2517 } |
| 2518 if (stageName != null) { |
| 2519 _json["stageName"] = stageName; |
| 2520 } |
| 2521 if (systemName != null) { |
| 2522 _json["systemName"] = systemName; |
| 2523 } |
| 2524 if (userFn != null) { |
| 2525 _json["userFn"] = userFn; |
| 2526 } |
| 2527 return _json; |
| 2528 } |
| 2529 } |
| 2530 |
| 2531 /** Information about an output of a SeqMapTask. */ |
| 2532 class SeqMapTaskOutputInfo { |
| 2533 /** The sink to write the output value to. */ |
| 2534 Sink sink; |
| 2535 /** The id of the TupleTag the user code will tag the output value by. */ |
| 2536 core.String tag; |
| 2537 |
| 2538 SeqMapTaskOutputInfo(); |
| 2539 |
| 2540 SeqMapTaskOutputInfo.fromJson(core.Map _json) { |
| 2541 if (_json.containsKey("sink")) { |
| 2542 sink = new Sink.fromJson(_json["sink"]); |
| 2543 } |
| 2544 if (_json.containsKey("tag")) { |
| 2545 tag = _json["tag"]; |
| 2546 } |
| 2547 } |
| 2548 |
| 2549 core.Map toJson() { |
| 2550 var _json = new core.Map(); |
| 2551 if (sink != null) { |
| 2552 _json["sink"] = (sink).toJson(); |
| 2553 } |
| 2554 if (tag != null) { |
| 2555 _json["tag"] = tag; |
| 2556 } |
| 2557 return _json; |
| 2558 } |
| 2559 } |
| 2560 |
| 2561 /** A task which consists of a shell command for the worker to execute. */ |
| 2562 class ShellTask { |
| 2563 /** The shell command to run. */ |
| 2564 core.String command; |
| 2565 /** Exit code for the task. */ |
| 2566 core.int exitCode; |
| 2567 |
| 2568 ShellTask(); |
| 2569 |
| 2570 ShellTask.fromJson(core.Map _json) { |
| 2571 if (_json.containsKey("command")) { |
| 2572 command = _json["command"]; |
| 2573 } |
| 2574 if (_json.containsKey("exitCode")) { |
| 2575 exitCode = _json["exitCode"]; |
| 2576 } |
| 2577 } |
| 2578 |
| 2579 core.Map toJson() { |
| 2580 var _json = new core.Map(); |
| 2581 if (command != null) { |
| 2582 _json["command"] = command; |
| 2583 } |
| 2584 if (exitCode != null) { |
| 2585 _json["exitCode"] = exitCode; |
| 2586 } |
| 2587 return _json; |
| 2588 } |
| 2589 } |
| 2590 |
| 2591 /** Information about a side input of a DoFn or an input of a SeqDoFn. */ |
| 2592 class SideInputInfo { |
| 2593 /** |
| 2594 * How to interpret the source element(s) as a side input value. |
| 2595 * |
| 2596 * The values for Object must be JSON objects. It can consist of `num`, |
| 2597 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2598 */ |
| 2599 core.Map<core.String, core.Object> kind; |
| 2600 /** |
| 2601 * The source(s) to read element(s) from to get the value of this side input. |
| 2602 * If more than one source, then the elements are taken from the sources, in |
| 2603 * the specified order if order matters. At least one source is required. |
| 2604 */ |
| 2605 core.List<Source> sources; |
| 2606 /** |
| 2607 * The id of the tag the user code will access this side input by; this should |
| 2608 * correspond to the tag of some MultiOutputInfo. |
| 2609 */ |
| 2610 core.String tag; |
| 2611 |
| 2612 SideInputInfo(); |
| 2613 |
| 2614 SideInputInfo.fromJson(core.Map _json) { |
| 2615 if (_json.containsKey("kind")) { |
| 2616 kind = _json["kind"]; |
| 2617 } |
| 2618 if (_json.containsKey("sources")) { |
| 2619 sources = _json["sources"].map((value) => new Source.fromJson(value)).toLi
st(); |
| 2620 } |
| 2621 if (_json.containsKey("tag")) { |
| 2622 tag = _json["tag"]; |
| 2623 } |
| 2624 } |
| 2625 |
| 2626 core.Map toJson() { |
| 2627 var _json = new core.Map(); |
| 2628 if (kind != null) { |
| 2629 _json["kind"] = kind; |
| 2630 } |
| 2631 if (sources != null) { |
| 2632 _json["sources"] = sources.map((value) => (value).toJson()).toList(); |
| 2633 } |
| 2634 if (tag != null) { |
| 2635 _json["tag"] = tag; |
| 2636 } |
| 2637 return _json; |
| 2638 } |
| 2639 } |
| 2640 |
| 2641 /** A sink that records can be encoded and written to. */ |
| 2642 class Sink { |
| 2643 /** |
| 2644 * The codec to use to encode data written to the sink. |
| 2645 * |
| 2646 * The values for Object must be JSON objects. It can consist of `num`, |
| 2647 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2648 */ |
| 2649 core.Map<core.String, core.Object> codec; |
| 2650 /** |
| 2651 * The sink to write to, plus its parameters. |
| 2652 * |
| 2653 * The values for Object must be JSON objects. It can consist of `num`, |
| 2654 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2655 */ |
| 2656 core.Map<core.String, core.Object> spec; |
| 2657 |
| 2658 Sink(); |
| 2659 |
| 2660 Sink.fromJson(core.Map _json) { |
| 2661 if (_json.containsKey("codec")) { |
| 2662 codec = _json["codec"]; |
| 2663 } |
| 2664 if (_json.containsKey("spec")) { |
| 2665 spec = _json["spec"]; |
| 2666 } |
| 2667 } |
| 2668 |
| 2669 core.Map toJson() { |
| 2670 var _json = new core.Map(); |
| 2671 if (codec != null) { |
| 2672 _json["codec"] = codec; |
| 2673 } |
| 2674 if (spec != null) { |
| 2675 _json["spec"] = spec; |
| 2676 } |
| 2677 return _json; |
| 2678 } |
| 2679 } |
| 2680 |
| 2681 /** A source that records can be read and decoded from. */ |
| 2682 class Source { |
| 2683 /** |
| 2684 * While splitting, sources may specify the produced bundles as differences |
| 2685 * against another source, in order to save backend-side memory and allow |
| 2686 * bigger jobs. For details, see SourceSplitRequest. To support this use case, |
| 2687 * the full set of parameters of the source is logically obtained by taking |
| 2688 * the latest explicitly specified value of each parameter in the order: |
| 2689 * base_specs (later items win), spec (overrides anything in base_specs). |
| 2690 * |
| 2691 * The values for Object must be JSON objects. It can consist of `num`, |
| 2692 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2693 */ |
| 2694 core.List<core.Map<core.String, core.Object>> baseSpecs; |
| 2695 /** |
| 2696 * The codec to use to decode data read from the source. |
| 2697 * |
| 2698 * The values for Object must be JSON objects. It can consist of `num`, |
| 2699 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2700 */ |
| 2701 core.Map<core.String, core.Object> codec; |
| 2702 /** |
| 2703 * Setting this value to true hints to the framework that the source doesn't |
| 2704 * need splitting, and using SourceSplitRequest on it would yield |
| 2705 * SOURCE_SPLIT_OUTCOME_USE_CURRENT. E.g. a file splitter may set this to true |
| 2706 * when splitting a single file into a set of byte ranges of appropriate size, |
| 2707 * and set this to false when splitting a filepattern into individual files. |
| 2708 * However, for efficiency, a file splitter may decide to produce file |
| 2709 * subranges directly from the filepattern to avoid a splitting round-trip. |
| 2710 * See SourceSplitRequest for an overview of the splitting process. This field |
| 2711 * is meaningful only in the Source objects populated by the user (e.g. when |
| 2712 * filling in a DerivedSource). Source objects supplied by the framework to |
| 2713 * the user don't have this field populated. |
| 2714 */ |
| 2715 core.bool doesNotNeedSplitting; |
| 2716 /** |
| 2717 * Optionally, metadata for this source can be supplied right away, avoiding a |
| 2718 * SourceGetMetadataOperation roundtrip (see SourceOperationRequest). This |
| 2719 * field is meaningful only in the Source objects populated by the user (e.g. |
| 2720 * when filling in a DerivedSource). Source objects supplied by the framework |
| 2721 * to the user don't have this field populated. |
| 2722 */ |
| 2723 SourceMetadata metadata; |
| 2724 /** |
| 2725 * The source to read from, plus its parameters. |
| 2726 * |
| 2727 * The values for Object must be JSON objects. It can consist of `num`, |
| 2728 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2729 */ |
| 2730 core.Map<core.String, core.Object> spec; |
| 2731 |
| 2732 Source(); |
| 2733 |
| 2734 Source.fromJson(core.Map _json) { |
| 2735 if (_json.containsKey("baseSpecs")) { |
| 2736 baseSpecs = _json["baseSpecs"]; |
| 2737 } |
| 2738 if (_json.containsKey("codec")) { |
| 2739 codec = _json["codec"]; |
| 2740 } |
| 2741 if (_json.containsKey("doesNotNeedSplitting")) { |
| 2742 doesNotNeedSplitting = _json["doesNotNeedSplitting"]; |
| 2743 } |
| 2744 if (_json.containsKey("metadata")) { |
| 2745 metadata = new SourceMetadata.fromJson(_json["metadata"]); |
| 2746 } |
| 2747 if (_json.containsKey("spec")) { |
| 2748 spec = _json["spec"]; |
| 2749 } |
| 2750 } |
| 2751 |
| 2752 core.Map toJson() { |
| 2753 var _json = new core.Map(); |
| 2754 if (baseSpecs != null) { |
| 2755 _json["baseSpecs"] = baseSpecs; |
| 2756 } |
| 2757 if (codec != null) { |
| 2758 _json["codec"] = codec; |
| 2759 } |
| 2760 if (doesNotNeedSplitting != null) { |
| 2761 _json["doesNotNeedSplitting"] = doesNotNeedSplitting; |
| 2762 } |
| 2763 if (metadata != null) { |
| 2764 _json["metadata"] = (metadata).toJson(); |
| 2765 } |
| 2766 if (spec != null) { |
| 2767 _json["spec"] = spec; |
| 2768 } |
| 2769 return _json; |
| 2770 } |
| 2771 } |
| 2772 |
| 2773 /** DEPRECATED in favor of DynamicSourceSplit. */ |
| 2774 class SourceFork { |
| 2775 /** DEPRECATED */ |
| 2776 SourceSplitShard primary; |
| 2777 /** DEPRECATED */ |
| 2778 DerivedSource primarySource; |
| 2779 /** DEPRECATED */ |
| 2780 SourceSplitShard residual; |
| 2781 /** DEPRECATED */ |
| 2782 DerivedSource residualSource; |
| 2783 |
| 2784 SourceFork(); |
| 2785 |
| 2786 SourceFork.fromJson(core.Map _json) { |
| 2787 if (_json.containsKey("primary")) { |
| 2788 primary = new SourceSplitShard.fromJson(_json["primary"]); |
| 2789 } |
| 2790 if (_json.containsKey("primarySource")) { |
| 2791 primarySource = new DerivedSource.fromJson(_json["primarySource"]); |
| 2792 } |
| 2793 if (_json.containsKey("residual")) { |
| 2794 residual = new SourceSplitShard.fromJson(_json["residual"]); |
| 2795 } |
| 2796 if (_json.containsKey("residualSource")) { |
| 2797 residualSource = new DerivedSource.fromJson(_json["residualSource"]); |
| 2798 } |
| 2799 } |
| 2800 |
| 2801 core.Map toJson() { |
| 2802 var _json = new core.Map(); |
| 2803 if (primary != null) { |
| 2804 _json["primary"] = (primary).toJson(); |
| 2805 } |
| 2806 if (primarySource != null) { |
| 2807 _json["primarySource"] = (primarySource).toJson(); |
| 2808 } |
| 2809 if (residual != null) { |
| 2810 _json["residual"] = (residual).toJson(); |
| 2811 } |
| 2812 if (residualSource != null) { |
| 2813 _json["residualSource"] = (residualSource).toJson(); |
| 2814 } |
| 2815 return _json; |
| 2816 } |
| 2817 } |
| 2818 |
| 2819 /** A request to compute the SourceMetadata of a Source. */ |
| 2820 class SourceGetMetadataRequest { |
| 2821 /** Specification of the source whose metadata should be computed. */ |
| 2822 Source source; |
| 2823 |
| 2824 SourceGetMetadataRequest(); |
| 2825 |
| 2826 SourceGetMetadataRequest.fromJson(core.Map _json) { |
| 2827 if (_json.containsKey("source")) { |
| 2828 source = new Source.fromJson(_json["source"]); |
| 2829 } |
| 2830 } |
| 2831 |
| 2832 core.Map toJson() { |
| 2833 var _json = new core.Map(); |
| 2834 if (source != null) { |
| 2835 _json["source"] = (source).toJson(); |
| 2836 } |
| 2837 return _json; |
| 2838 } |
| 2839 } |
| 2840 |
| 2841 /** The result of a SourceGetMetadataOperation. */ |
| 2842 class SourceGetMetadataResponse { |
| 2843 /** The computed metadata. */ |
| 2844 SourceMetadata metadata; |
| 2845 |
| 2846 SourceGetMetadataResponse(); |
| 2847 |
| 2848 SourceGetMetadataResponse.fromJson(core.Map _json) { |
| 2849 if (_json.containsKey("metadata")) { |
| 2850 metadata = new SourceMetadata.fromJson(_json["metadata"]); |
| 2851 } |
| 2852 } |
| 2853 |
| 2854 core.Map toJson() { |
| 2855 var _json = new core.Map(); |
| 2856 if (metadata != null) { |
| 2857 _json["metadata"] = (metadata).toJson(); |
| 2858 } |
| 2859 return _json; |
| 2860 } |
| 2861 } |
| 2862 |
| 2863 /** |
| 2864 * Metadata about a Source useful for automatically optimizing and tuning the |
| 2865 * pipeline, etc. |
| 2866 */ |
| 2867 class SourceMetadata { |
| 2868 /** |
| 2869 * An estimate of the total size (in bytes) of the data that would be read |
| 2870 * from this source. This estimate is in terms of external storage size, |
| 2871 * before any decompression or other processing done by the reader. |
| 2872 */ |
| 2873 core.String estimatedSizeBytes; |
| 2874 /** |
| 2875 * Specifies that the size of this source is known to be infinite (this is a |
| 2876 * streaming source). |
| 2877 */ |
| 2878 core.bool infinite; |
| 2879 /** |
| 2880 * Whether this source is known to produce key/value pairs with the (encoded) |
| 2881 * keys in lexicographically sorted order. |
| 2882 */ |
| 2883 core.bool producesSortedKeys; |
| 2884 |
| 2885 SourceMetadata(); |
| 2886 |
| 2887 SourceMetadata.fromJson(core.Map _json) { |
| 2888 if (_json.containsKey("estimatedSizeBytes")) { |
| 2889 estimatedSizeBytes = _json["estimatedSizeBytes"]; |
| 2890 } |
| 2891 if (_json.containsKey("infinite")) { |
| 2892 infinite = _json["infinite"]; |
| 2893 } |
| 2894 if (_json.containsKey("producesSortedKeys")) { |
| 2895 producesSortedKeys = _json["producesSortedKeys"]; |
| 2896 } |
| 2897 } |
| 2898 |
| 2899 core.Map toJson() { |
| 2900 var _json = new core.Map(); |
| 2901 if (estimatedSizeBytes != null) { |
| 2902 _json["estimatedSizeBytes"] = estimatedSizeBytes; |
| 2903 } |
| 2904 if (infinite != null) { |
| 2905 _json["infinite"] = infinite; |
| 2906 } |
| 2907 if (producesSortedKeys != null) { |
| 2908 _json["producesSortedKeys"] = producesSortedKeys; |
| 2909 } |
| 2910 return _json; |
| 2911 } |
| 2912 } |
| 2913 |
| 2914 /** |
| 2915 * A work item that represents the different operations that can be performed on |
| 2916 * a user-defined Source specification. |
| 2917 */ |
| 2918 class SourceOperationRequest { |
| 2919 /** Information about a request to get metadata about a source. */ |
| 2920 SourceGetMetadataRequest getMetadata; |
| 2921 /** Information about a request to split a source. */ |
| 2922 SourceSplitRequest split; |
| 2923 |
| 2924 SourceOperationRequest(); |
| 2925 |
| 2926 SourceOperationRequest.fromJson(core.Map _json) { |
| 2927 if (_json.containsKey("getMetadata")) { |
| 2928 getMetadata = new SourceGetMetadataRequest.fromJson(_json["getMetadata"]); |
| 2929 } |
| 2930 if (_json.containsKey("split")) { |
| 2931 split = new SourceSplitRequest.fromJson(_json["split"]); |
| 2932 } |
| 2933 } |
| 2934 |
| 2935 core.Map toJson() { |
| 2936 var _json = new core.Map(); |
| 2937 if (getMetadata != null) { |
| 2938 _json["getMetadata"] = (getMetadata).toJson(); |
| 2939 } |
| 2940 if (split != null) { |
| 2941 _json["split"] = (split).toJson(); |
| 2942 } |
| 2943 return _json; |
| 2944 } |
| 2945 } |
| 2946 |
| 2947 /** |
| 2948 * The result of a SourceOperationRequest, specified in |
| 2949 * ReportWorkItemStatusRequest.source_operation when the work item is completed. |
| 2950 */ |
| 2951 class SourceOperationResponse { |
| 2952 /** A response to a request to get metadata about a source. */ |
| 2953 SourceGetMetadataResponse getMetadata; |
| 2954 /** A response to a request to split a source. */ |
| 2955 SourceSplitResponse split; |
| 2956 |
| 2957 SourceOperationResponse(); |
| 2958 |
| 2959 SourceOperationResponse.fromJson(core.Map _json) { |
| 2960 if (_json.containsKey("getMetadata")) { |
| 2961 getMetadata = new SourceGetMetadataResponse.fromJson(_json["getMetadata"])
; |
| 2962 } |
| 2963 if (_json.containsKey("split")) { |
| 2964 split = new SourceSplitResponse.fromJson(_json["split"]); |
| 2965 } |
| 2966 } |
| 2967 |
| 2968 core.Map toJson() { |
| 2969 var _json = new core.Map(); |
| 2970 if (getMetadata != null) { |
| 2971 _json["getMetadata"] = (getMetadata).toJson(); |
| 2972 } |
| 2973 if (split != null) { |
| 2974 _json["split"] = (split).toJson(); |
| 2975 } |
| 2976 return _json; |
| 2977 } |
| 2978 } |
| 2979 |
| 2980 /** |
| 2981 * Hints for splitting a Source into bundles (parts for parallel processing) |
| 2982 * using SourceSplitRequest. |
| 2983 */ |
| 2984 class SourceSplitOptions { |
| 2985 /** |
| 2986 * The source should be split into a set of bundles where the estimated size |
| 2987 * of each is approximately this many bytes. |
| 2988 */ |
| 2989 core.String desiredBundleSizeBytes; |
| 2990 /** DEPRECATED in favor of desired_bundle_size_bytes. */ |
| 2991 core.String desiredShardSizeBytes; |
| 2992 |
| 2993 SourceSplitOptions(); |
| 2994 |
| 2995 SourceSplitOptions.fromJson(core.Map _json) { |
| 2996 if (_json.containsKey("desiredBundleSizeBytes")) { |
| 2997 desiredBundleSizeBytes = _json["desiredBundleSizeBytes"]; |
| 2998 } |
| 2999 if (_json.containsKey("desiredShardSizeBytes")) { |
| 3000 desiredShardSizeBytes = _json["desiredShardSizeBytes"]; |
| 3001 } |
| 3002 } |
| 3003 |
| 3004 core.Map toJson() { |
| 3005 var _json = new core.Map(); |
| 3006 if (desiredBundleSizeBytes != null) { |
| 3007 _json["desiredBundleSizeBytes"] = desiredBundleSizeBytes; |
| 3008 } |
| 3009 if (desiredShardSizeBytes != null) { |
| 3010 _json["desiredShardSizeBytes"] = desiredShardSizeBytes; |
| 3011 } |
| 3012 return _json; |
| 3013 } |
| 3014 } |
| 3015 |
| 3016 /** |
| 3017 * Represents the operation to split a high-level Source specification into |
| 3018 * bundles (parts for parallel processing). At a high level, splitting of a |
| 3019 * source into bundles happens as follows: SourceSplitRequest is applied to the |
| 3020 * source. If it returns SOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting |
| 3021 * happens and the source is used "as is". Otherwise, splitting is applied |
| 3022 * recursively to each produced DerivedSource. As an optimization, for any |
| 3023 * Source, if its does_not_need_splitting is true, the framework assumes that |
| 3024 * splitting this source would return SOURCE_SPLIT_OUTCOME_USE_CURRENT, and |
| 3025 * doesn't initiate a SourceSplitRequest. This applies both to the initial |
| 3026 * source being split and to bundles produced from it. |
| 3027 */ |
| 3028 class SourceSplitRequest { |
| 3029 /** Hints for tuning the splitting process. */ |
| 3030 SourceSplitOptions options; |
| 3031 /** Specification of the source to be split. */ |
| 3032 Source source; |
| 3033 |
| 3034 SourceSplitRequest(); |
| 3035 |
| 3036 SourceSplitRequest.fromJson(core.Map _json) { |
| 3037 if (_json.containsKey("options")) { |
| 3038 options = new SourceSplitOptions.fromJson(_json["options"]); |
| 3039 } |
| 3040 if (_json.containsKey("source")) { |
| 3041 source = new Source.fromJson(_json["source"]); |
| 3042 } |
| 3043 } |
| 3044 |
| 3045 core.Map toJson() { |
| 3046 var _json = new core.Map(); |
| 3047 if (options != null) { |
| 3048 _json["options"] = (options).toJson(); |
| 3049 } |
| 3050 if (source != null) { |
| 3051 _json["source"] = (source).toJson(); |
| 3052 } |
| 3053 return _json; |
| 3054 } |
| 3055 } |
| 3056 |
| 3057 /** The response to a SourceSplitRequest. */ |
| 3058 class SourceSplitResponse { |
| 3059 /** |
| 3060 * If outcome is SPLITTING_HAPPENED, then this is a list of bundles into which |
| 3061 * the source was split. Otherwise this field is ignored. This list can be |
| 3062 * empty, which means the source represents an empty input. |
| 3063 */ |
| 3064 core.List<DerivedSource> bundles; |
| 3065 /** |
| 3066 * Indicates whether splitting happened and produced a list of bundles. If |
| 3067 * this is USE_CURRENT_SOURCE_AS_IS, the current source should be processed |
| 3068 * "as is" without splitting. "bundles" is ignored in this case. If this is |
| 3069 * SPLITTING_HAPPENED, then "bundles" contains a list of bundles into which |
| 3070 * the source was split. |
| 3071 * Possible string values are: |
| 3072 * - "SOURCE_SPLIT_OUTCOME_UNKNOWN" : A SOURCE_SPLIT_OUTCOME_UNKNOWN. |
| 3073 * - "SOURCE_SPLIT_OUTCOME_USE_CURRENT" : A SOURCE_SPLIT_OUTCOME_USE_CURRENT. |
| 3074 * - "SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED" : A |
| 3075 * SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED. |
| 3076 */ |
| 3077 core.String outcome; |
| 3078 /** DEPRECATED in favor of bundles. */ |
| 3079 core.List<SourceSplitShard> shards; |
| 3080 |
| 3081 SourceSplitResponse(); |
| 3082 |
| 3083 SourceSplitResponse.fromJson(core.Map _json) { |
| 3084 if (_json.containsKey("bundles")) { |
| 3085 bundles = _json["bundles"].map((value) => new DerivedSource.fromJson(value
)).toList(); |
| 3086 } |
| 3087 if (_json.containsKey("outcome")) { |
| 3088 outcome = _json["outcome"]; |
| 3089 } |
| 3090 if (_json.containsKey("shards")) { |
| 3091 shards = _json["shards"].map((value) => new SourceSplitShard.fromJson(valu
e)).toList(); |
| 3092 } |
| 3093 } |
| 3094 |
| 3095 core.Map toJson() { |
| 3096 var _json = new core.Map(); |
| 3097 if (bundles != null) { |
| 3098 _json["bundles"] = bundles.map((value) => (value).toJson()).toList(); |
| 3099 } |
| 3100 if (outcome != null) { |
| 3101 _json["outcome"] = outcome; |
| 3102 } |
| 3103 if (shards != null) { |
| 3104 _json["shards"] = shards.map((value) => (value).toJson()).toList(); |
| 3105 } |
| 3106 return _json; |
| 3107 } |
| 3108 } |
| 3109 |
| 3110 /** DEPRECATED in favor of DerivedSource. */ |
| 3111 class SourceSplitShard { |
| 3112 /** |
| 3113 * DEPRECATED |
| 3114 * Possible string values are: |
| 3115 * - "SOURCE_DERIVATION_MODE_UNKNOWN" : A SOURCE_DERIVATION_MODE_UNKNOWN. |
| 3116 * - "SOURCE_DERIVATION_MODE_INDEPENDENT" : A |
| 3117 * SOURCE_DERIVATION_MODE_INDEPENDENT. |
| 3118 * - "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" : A |
| 3119 * SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT. |
| 3120 * - "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" : A |
| 3121 * SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT. |
| 3122 */ |
| 3123 core.String derivationMode; |
| 3124 /** DEPRECATED */ |
| 3125 Source source; |
| 3126 |
| 3127 SourceSplitShard(); |
| 3128 |
| 3129 SourceSplitShard.fromJson(core.Map _json) { |
| 3130 if (_json.containsKey("derivationMode")) { |
| 3131 derivationMode = _json["derivationMode"]; |
| 3132 } |
| 3133 if (_json.containsKey("source")) { |
| 3134 source = new Source.fromJson(_json["source"]); |
| 3135 } |
| 3136 } |
| 3137 |
| 3138 core.Map toJson() { |
| 3139 var _json = new core.Map(); |
| 3140 if (derivationMode != null) { |
| 3141 _json["derivationMode"] = derivationMode; |
| 3142 } |
| 3143 if (source != null) { |
| 3144 _json["source"] = (source).toJson(); |
| 3145 } |
| 3146 return _json; |
| 3147 } |
| 3148 } |
| 3149 |
| 3150 /** State family configuration. */ |
| 3151 class StateFamilyConfig { |
| 3152 /** If true, this family corresponds to a read operation. */ |
| 3153 core.bool isRead; |
| 3154 /** The state family value. */ |
| 3155 core.String stateFamily; |
| 3156 |
| 3157 StateFamilyConfig(); |
| 3158 |
| 3159 StateFamilyConfig.fromJson(core.Map _json) { |
| 3160 if (_json.containsKey("isRead")) { |
| 3161 isRead = _json["isRead"]; |
| 3162 } |
| 3163 if (_json.containsKey("stateFamily")) { |
| 3164 stateFamily = _json["stateFamily"]; |
| 3165 } |
| 3166 } |
| 3167 |
| 3168 core.Map toJson() { |
| 3169 var _json = new core.Map(); |
| 3170 if (isRead != null) { |
| 3171 _json["isRead"] = isRead; |
| 3172 } |
| 3173 if (stateFamily != null) { |
| 3174 _json["stateFamily"] = stateFamily; |
| 3175 } |
| 3176 return _json; |
| 3177 } |
| 3178 } |
| 3179 |
| 3180 /** |
| 3181 * The `Status` type defines a logical error model that is suitable for |
| 3182 * different programming environments, including REST APIs and RPC APIs. It is |
| 3183 * used by [gRPC](https://github.com/grpc). The error model is designed to be: - |
| 3184 * Simple to use and understand for most users - Flexible enough to meet |
| 3185 * unexpected needs # Overview The `Status` message contains three pieces of |
| 3186 * data: error code, error message, and error details. The error code should be |
| 3187 * an enum value of [google.rpc.Code][], but it may accept additional error |
| 3188 * codes if needed. The error message should be a developer-facing English |
| 3189 * message that helps developers *understand* and *resolve* the error. If a |
| 3190 * localized user-facing error message is needed, put the localized message in |
| 3191 * the error details or localize it in the client. The optional error details |
| 3192 * may contain arbitrary information about the error. There is a predefined set |
| 3193 * of error detail types in the package `google.rpc` which can be used for |
| 3194 * common error conditions. # Language mapping The `Status` message is the |
| 3195 * logical representation of the error model, but it is not necessarily the |
| 3196 * actual wire format. When the `Status` message is exposed in different client |
| 3197 * libraries and different wire protocols, it can be mapped differently. For |
| 3198 * example, it will likely be mapped to some exceptions in Java, but more likely |
| 3199 * mapped to some error codes in C. # Other uses The error model and the |
| 3200 * `Status` message can be used in a variety of environments, either with or |
| 3201 * without APIs, to provide a consistent developer experience across different |
| 3202 * environments. Example uses of this error model include: - Partial errors. If |
| 3203 * a service needs to return partial errors to the client, it may embed the |
| 3204 * `Status` in the normal response to indicate the partial errors. - Workflow |
| 3205 * errors. A typical workflow has multiple steps. Each step may have a `Status` |
| 3206 * message for error reporting purpose. - Batch operations. If a client uses |
| 3207 * batch request and batch response, the `Status` message should be used |
| 3208 * directly inside batch response, one for each error sub-response. - |
| 3209 * Asynchronous operations. If an API call embeds asynchronous operation results |
| 3210 * in its response, the status of those operations should be represented |
| 3211 * directly using the `Status` message. - Logging. If some API errors are stored |
| 3212 * in logs, the message `Status` could be used directly after any stripping |
| 3213 * needed for security/privacy reasons. |
| 3214 */ |
| 3215 class Status { |
| 3216 /** The status code, which should be an enum value of [google.rpc.Code][]. */ |
| 3217 core.int code; |
| 3218 /** |
| 3219 * A list of messages that carry the error details. There will be a common set |
| 3220 * of message types for APIs to use. |
| 3221 * |
| 3222 * The values for Object must be JSON objects. It can consist of `num`, |
| 3223 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 3224 */ |
| 3225 core.List<core.Map<core.String, core.Object>> details; |
| 3226 /** |
| 3227 * A developer-facing error message, which should be in English. Any |
| 3228 * user-facing error message should be localized and sent in the |
| 3229 * [google.rpc.Status.details][google.rpc.Status.details] field, or localized |
| 3230 * by the client. |
| 3231 */ |
| 3232 core.String message; |
| 3233 |
| 3234 Status(); |
| 3235 |
| 3236 Status.fromJson(core.Map _json) { |
| 3237 if (_json.containsKey("code")) { |
| 3238 code = _json["code"]; |
| 3239 } |
| 3240 if (_json.containsKey("details")) { |
| 3241 details = _json["details"]; |
| 3242 } |
| 3243 if (_json.containsKey("message")) { |
| 3244 message = _json["message"]; |
| 3245 } |
| 3246 } |
| 3247 |
| 3248 core.Map toJson() { |
| 3249 var _json = new core.Map(); |
| 3250 if (code != null) { |
| 3251 _json["code"] = code; |
| 3252 } |
| 3253 if (details != null) { |
| 3254 _json["details"] = details; |
| 3255 } |
| 3256 if (message != null) { |
| 3257 _json["message"] = message; |
| 3258 } |
| 3259 return _json; |
| 3260 } |
| 3261 } |
| 3262 |
| 3263 /** |
| 3264 * Defines a particular step within a Dataflow job. A job consists of multiple |
| 3265 * steps, each of which performs some specific operation as part of the overall |
| 3266 * job. Data is typically passed from one step to another as part of the job. |
| 3267 * Here's an example of a sequence of steps which together implement a |
| 3268 * Map-Reduce job: * Read a collection of data from some source, parsing the |
| 3269 * collection's elements. * Validate the elements. * Apply a user-defined |
| 3270 * function to map each element to some value and extract an element-specific |
| 3271 * key value. * Group elements with the same key into a single element with that |
| 3272 * key, transforming a multiply-keyed collection into a uniquely-keyed |
| 3273 * collection. * Write the elements out to some data sink. (Note that the |
| 3274 * Dataflow service may be used to run many different types of jobs, not just |
| 3275 * Map-Reduce). |
| 3276 */ |
| 3277 class Step { |
| 3278 /** The kind of step in the dataflow Job. */ |
| 3279 core.String kind; |
| 3280 /** |
| 3281 * Name identifying the step. This must be unique for each step with respect |
| 3282 * to all other steps in the dataflow Job. |
| 3283 */ |
| 3284 core.String name; |
| 3285 /** |
| 3286 * Named properties associated with the step. Each kind of predefined step has |
| 3287 * its own required set of properties. |
| 3288 * |
| 3289 * The values for Object must be JSON objects. It can consist of `num`, |
| 3290 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 3291 */ |
| 3292 core.Map<core.String, core.Object> properties; |
| 3293 |
| 3294 Step(); |
| 3295 |
| 3296 Step.fromJson(core.Map _json) { |
| 3297 if (_json.containsKey("kind")) { |
| 3298 kind = _json["kind"]; |
| 3299 } |
| 3300 if (_json.containsKey("name")) { |
| 3301 name = _json["name"]; |
| 3302 } |
| 3303 if (_json.containsKey("properties")) { |
| 3304 properties = _json["properties"]; |
| 3305 } |
| 3306 } |
| 3307 |
| 3308 core.Map toJson() { |
| 3309 var _json = new core.Map(); |
| 3310 if (kind != null) { |
| 3311 _json["kind"] = kind; |
| 3312 } |
| 3313 if (name != null) { |
| 3314 _json["name"] = name; |
| 3315 } |
| 3316 if (properties != null) { |
| 3317 _json["properties"] = properties; |
| 3318 } |
| 3319 return _json; |
| 3320 } |
| 3321 } |
| 3322 |
| 3323 /** |
| 3324 * Describes a stream of data, either as input to be processed or as output of a |
| 3325 * streaming Dataflow job. |
| 3326 */ |
| 3327 class StreamLocation { |
| 3328 /** The stream is a custom source. */ |
| 3329 CustomSourceLocation customSourceLocation; |
| 3330 /** The stream is a pubsub stream. */ |
| 3331 PubsubLocation pubsubLocation; |
| 3332 /** The stream is a streaming side input. */ |
| 3333 StreamingSideInputLocation sideInputLocation; |
| 3334 /** |
| 3335 * The stream is part of another computation within the current streaming |
| 3336 * Dataflow job. |
| 3337 */ |
| 3338 StreamingStageLocation streamingStageLocation; |
| 3339 |
| 3340 StreamLocation(); |
| 3341 |
| 3342 StreamLocation.fromJson(core.Map _json) { |
| 3343 if (_json.containsKey("customSourceLocation")) { |
| 3344 customSourceLocation = new CustomSourceLocation.fromJson(_json["customSour
ceLocation"]); |
| 3345 } |
| 3346 if (_json.containsKey("pubsubLocation")) { |
| 3347 pubsubLocation = new PubsubLocation.fromJson(_json["pubsubLocation"]); |
| 3348 } |
| 3349 if (_json.containsKey("sideInputLocation")) { |
| 3350 sideInputLocation = new StreamingSideInputLocation.fromJson(_json["sideInp
utLocation"]); |
| 3351 } |
| 3352 if (_json.containsKey("streamingStageLocation")) { |
| 3353 streamingStageLocation = new StreamingStageLocation.fromJson(_json["stream
ingStageLocation"]); |
| 3354 } |
| 3355 } |
| 3356 |
| 3357 core.Map toJson() { |
| 3358 var _json = new core.Map(); |
| 3359 if (customSourceLocation != null) { |
| 3360 _json["customSourceLocation"] = (customSourceLocation).toJson(); |
| 3361 } |
| 3362 if (pubsubLocation != null) { |
| 3363 _json["pubsubLocation"] = (pubsubLocation).toJson(); |
| 3364 } |
| 3365 if (sideInputLocation != null) { |
| 3366 _json["sideInputLocation"] = (sideInputLocation).toJson(); |
| 3367 } |
| 3368 if (streamingStageLocation != null) { |
| 3369 _json["streamingStageLocation"] = (streamingStageLocation).toJson(); |
| 3370 } |
| 3371 return _json; |
| 3372 } |
| 3373 } |
| 3374 |
| 3375 /** |
| 3376 * Describes full or partial data disk assignment information of the computation |
| 3377 * ranges. |
| 3378 */ |
| 3379 class StreamingComputationRanges { |
| 3380 /** The ID of the computation. */ |
| 3381 core.String computationId; |
| 3382 /** Data disk assignments for ranges from this computation. */ |
| 3383 core.List<KeyRangeDataDiskAssignment> rangeAssignments; |
| 3384 |
| 3385 StreamingComputationRanges(); |
| 3386 |
| 3387 StreamingComputationRanges.fromJson(core.Map _json) { |
| 3388 if (_json.containsKey("computationId")) { |
| 3389 computationId = _json["computationId"]; |
| 3390 } |
| 3391 if (_json.containsKey("rangeAssignments")) { |
| 3392 rangeAssignments = _json["rangeAssignments"].map((value) => new KeyRangeDa
taDiskAssignment.fromJson(value)).toList(); |
| 3393 } |
| 3394 } |
| 3395 |
| 3396 core.Map toJson() { |
| 3397 var _json = new core.Map(); |
| 3398 if (computationId != null) { |
| 3399 _json["computationId"] = computationId; |
| 3400 } |
| 3401 if (rangeAssignments != null) { |
| 3402 _json["rangeAssignments"] = rangeAssignments.map((value) => (value).toJson
()).toList(); |
| 3403 } |
| 3404 return _json; |
| 3405 } |
| 3406 } |
| 3407 |
| 3408 /** |
| 3409 * A task which describes what action should be performed for the specified |
| 3410 * streaming computation ranges. |
| 3411 */ |
| 3412 class StreamingComputationTask { |
| 3413 /** Contains ranges of a streaming computation this task should apply to. */ |
| 3414 core.List<StreamingComputationRanges> computationRanges; |
| 3415 /** Describes the set of data disks this task should apply to. */ |
| 3416 core.List<MountedDataDisk> dataDisks; |
| 3417 /** |
| 3418 * A type of streaming computation task. |
| 3419 * Possible string values are: |
| 3420 * - "STREAMING_COMPUTATION_TASK_UNKNOWN" : A |
| 3421 * STREAMING_COMPUTATION_TASK_UNKNOWN. |
| 3422 * - "STREAMING_COMPUTATION_TASK_STOP" : A STREAMING_COMPUTATION_TASK_STOP. |
| 3423 * - "STREAMING_COMPUTATION_TASK_START" : A STREAMING_COMPUTATION_TASK_START. |
| 3424 */ |
| 3425 core.String taskType; |
| 3426 |
| 3427 StreamingComputationTask(); |
| 3428 |
| 3429 StreamingComputationTask.fromJson(core.Map _json) { |
| 3430 if (_json.containsKey("computationRanges")) { |
| 3431 computationRanges = _json["computationRanges"].map((value) => new Streamin
gComputationRanges.fromJson(value)).toList(); |
| 3432 } |
| 3433 if (_json.containsKey("dataDisks")) { |
| 3434 dataDisks = _json["dataDisks"].map((value) => new MountedDataDisk.fromJson
(value)).toList(); |
| 3435 } |
| 3436 if (_json.containsKey("taskType")) { |
| 3437 taskType = _json["taskType"]; |
| 3438 } |
| 3439 } |
| 3440 |
| 3441 core.Map toJson() { |
| 3442 var _json = new core.Map(); |
| 3443 if (computationRanges != null) { |
| 3444 _json["computationRanges"] = computationRanges.map((value) => (value).toJs
on()).toList(); |
| 3445 } |
| 3446 if (dataDisks != null) { |
| 3447 _json["dataDisks"] = dataDisks.map((value) => (value).toJson()).toList(); |
| 3448 } |
| 3449 if (taskType != null) { |
| 3450 _json["taskType"] = taskType; |
| 3451 } |
| 3452 return _json; |
| 3453 } |
| 3454 } |
| 3455 |
| 3456 /** A task which initializes part of a streaming Dataflow job. */ |
| 3457 class StreamingSetupTask { |
| 3458 /** |
| 3459 * The TCP port on which the worker should listen for messages from other |
| 3460 * streaming computation workers. |
| 3461 */ |
| 3462 core.int receiveWorkPort; |
| 3463 /** The global topology of the streaming Dataflow job. */ |
| 3464 TopologyConfig streamingComputationTopology; |
| 3465 /** |
| 3466 * The TCP port used by the worker to communicate with the Dataflow worker |
| 3467 * harness. |
| 3468 */ |
| 3469 core.int workerHarnessPort; |
| 3470 |
| 3471 StreamingSetupTask(); |
| 3472 |
| 3473 StreamingSetupTask.fromJson(core.Map _json) { |
| 3474 if (_json.containsKey("receiveWorkPort")) { |
| 3475 receiveWorkPort = _json["receiveWorkPort"]; |
| 3476 } |
| 3477 if (_json.containsKey("streamingComputationTopology")) { |
| 3478 streamingComputationTopology = new TopologyConfig.fromJson(_json["streamin
gComputationTopology"]); |
| 3479 } |
| 3480 if (_json.containsKey("workerHarnessPort")) { |
| 3481 workerHarnessPort = _json["workerHarnessPort"]; |
| 3482 } |
| 3483 } |
| 3484 |
| 3485 core.Map toJson() { |
| 3486 var _json = new core.Map(); |
| 3487 if (receiveWorkPort != null) { |
| 3488 _json["receiveWorkPort"] = receiveWorkPort; |
| 3489 } |
| 3490 if (streamingComputationTopology != null) { |
| 3491 _json["streamingComputationTopology"] = (streamingComputationTopology).toJ
son(); |
| 3492 } |
| 3493 if (workerHarnessPort != null) { |
| 3494 _json["workerHarnessPort"] = workerHarnessPort; |
| 3495 } |
| 3496 return _json; |
| 3497 } |
| 3498 } |
| 3499 |
| 3500 /** Identifies the location of a streaming side input. */ |
| 3501 class StreamingSideInputLocation { |
| 3502 /** Identifies the state family where this side input is stored. */ |
| 3503 core.String stateFamily; |
| 3504 /** |
| 3505 * Identifies the particular side input within the streaming Dataflow job. |
| 3506 */ |
| 3507 core.String tag; |
| 3508 |
| 3509 StreamingSideInputLocation(); |
| 3510 |
| 3511 StreamingSideInputLocation.fromJson(core.Map _json) { |
| 3512 if (_json.containsKey("stateFamily")) { |
| 3513 stateFamily = _json["stateFamily"]; |
| 3514 } |
| 3515 if (_json.containsKey("tag")) { |
| 3516 tag = _json["tag"]; |
| 3517 } |
| 3518 } |
| 3519 |
| 3520 core.Map toJson() { |
| 3521 var _json = new core.Map(); |
| 3522 if (stateFamily != null) { |
| 3523 _json["stateFamily"] = stateFamily; |
| 3524 } |
| 3525 if (tag != null) { |
| 3526 _json["tag"] = tag; |
| 3527 } |
| 3528 return _json; |
| 3529 } |
| 3530 } |
| 3531 |
| 3532 /** |
| 3533 * Identifies the location of a streaming computation stage, for stage-to-stage |
| 3534 * communication. |
| 3535 */ |
| 3536 class StreamingStageLocation { |
| 3537 /** Identifies the particular stream within the streaming Dataflow job. */ |
| 3538 core.String streamId; |
| 3539 |
| 3540 StreamingStageLocation(); |
| 3541 |
| 3542 StreamingStageLocation.fromJson(core.Map _json) { |
| 3543 if (_json.containsKey("streamId")) { |
| 3544 streamId = _json["streamId"]; |
| 3545 } |
| 3546 } |
| 3547 |
| 3548 core.Map toJson() { |
| 3549 var _json = new core.Map(); |
| 3550 if (streamId != null) { |
| 3551 _json["streamId"] = streamId; |
| 3552 } |
| 3553 return _json; |
| 3554 } |
| 3555 } |
| 3556 |
| 3557 /** Taskrunner configuration settings. */ |
| 3558 class TaskRunnerSettings { |
| 3559 /** Also send taskrunner log info to stderr? */ |
| 3560 core.bool alsologtostderr; |
| 3561 /** Location on the worker for task-specific subdirectories. */ |
| 3562 core.String baseTaskDir; |
| 3563 /** |
| 3564 * The base URL for the taskrunner to use when accessing Google Cloud APIs. |
| 3565 * When workers access Google Cloud APIs, they logically do so via relative |
| 3566 * URLs. If this field is specified, it supplies the base URL to use for |
| 3567 * resolving these relative URLs. The normative algorithm used is defined by |
| 3568 * RFC 1808, "Relative Uniform Resource Locators". If not specified, the |
| 3569 * default value is "http://www.googleapis.com/" |
| 3570 */ |
| 3571 core.String baseUrl; |
| 3572 /** Store preprocessing commands in this file. */ |
| 3573 core.String commandlinesFileName; |
| 3574 /** Do we continue taskrunner if an exception is hit? */ |
| 3575 core.bool continueOnException; |
| 3576 /** API version of endpoint, e.g. "v1b3" */ |
| 3577 core.String dataflowApiVersion; |
| 3578 /** Command to launch the worker harness. */ |
| 3579 core.String harnessCommand; |
| 3580 /** Suggested backend language. */ |
| 3581 core.String languageHint; |
| 3582 /** Directory on the VM to store logs. */ |
| 3583 core.String logDir; |
| 3584 /** Send taskrunner log into to Google Compute Engine VM serial console? */ |
| 3585 core.bool logToSerialconsole; |
| 3586 /** |
| 3587 * Indicates where to put logs. If this is not specified, the logs will not be |
| 3588 * uploaded. The supported resource type is: Google Cloud Storage: |
| 3589 * storage.googleapis.com/{bucket}/{object} |
| 3590 * bucket.storage.googleapis.com/{object} |
| 3591 */ |
| 3592 core.String logUploadLocation; |
| 3593 /** |
| 3594 * OAuth2 scopes to be requested by the taskrunner in order to access the |
| 3595 * dataflow API. |
| 3596 */ |
| 3597 core.List<core.String> oauthScopes; |
| 3598 /** Settings to pass to the parallel worker harness. */ |
| 3599 WorkerSettings parallelWorkerSettings; |
| 3600 /** Streaming worker main class name. */ |
| 3601 core.String streamingWorkerMainClass; |
| 3602 /** |
| 3603 * The UNIX group ID on the worker VM to use for tasks launched by taskrunner; |
| 3604 * e.g. "wheel". |
| 3605 */ |
| 3606 core.String taskGroup; |
| 3607 /** |
| 3608 * The UNIX user ID on the worker VM to use for tasks launched by taskrunner; |
| 3609 * e.g. "root". |
| 3610 */ |
| 3611 core.String taskUser; |
| 3612 /** |
| 3613 * The prefix of the resources the taskrunner should use for temporary |
| 3614 * storage. The supported resource type is: Google Cloud Storage: |
| 3615 * storage.googleapis.com/{bucket}/{object} |
| 3616 * bucket.storage.googleapis.com/{object} |
| 3617 */ |
| 3618 core.String tempStoragePrefix; |
| 3619 /** ID string of VM. */ |
| 3620 core.String vmId; |
| 3621 /** Store the workflow in this file. */ |
| 3622 core.String workflowFileName; |
| 3623 |
| 3624 TaskRunnerSettings(); |
| 3625 |
| 3626 TaskRunnerSettings.fromJson(core.Map _json) { |
| 3627 if (_json.containsKey("alsologtostderr")) { |
| 3628 alsologtostderr = _json["alsologtostderr"]; |
| 3629 } |
| 3630 if (_json.containsKey("baseTaskDir")) { |
| 3631 baseTaskDir = _json["baseTaskDir"]; |
| 3632 } |
| 3633 if (_json.containsKey("baseUrl")) { |
| 3634 baseUrl = _json["baseUrl"]; |
| 3635 } |
| 3636 if (_json.containsKey("commandlinesFileName")) { |
| 3637 commandlinesFileName = _json["commandlinesFileName"]; |
| 3638 } |
| 3639 if (_json.containsKey("continueOnException")) { |
| 3640 continueOnException = _json["continueOnException"]; |
| 3641 } |
| 3642 if (_json.containsKey("dataflowApiVersion")) { |
| 3643 dataflowApiVersion = _json["dataflowApiVersion"]; |
| 3644 } |
| 3645 if (_json.containsKey("harnessCommand")) { |
| 3646 harnessCommand = _json["harnessCommand"]; |
| 3647 } |
| 3648 if (_json.containsKey("languageHint")) { |
| 3649 languageHint = _json["languageHint"]; |
| 3650 } |
| 3651 if (_json.containsKey("logDir")) { |
| 3652 logDir = _json["logDir"]; |
| 3653 } |
| 3654 if (_json.containsKey("logToSerialconsole")) { |
| 3655 logToSerialconsole = _json["logToSerialconsole"]; |
| 3656 } |
| 3657 if (_json.containsKey("logUploadLocation")) { |
| 3658 logUploadLocation = _json["logUploadLocation"]; |
| 3659 } |
| 3660 if (_json.containsKey("oauthScopes")) { |
| 3661 oauthScopes = _json["oauthScopes"]; |
| 3662 } |
| 3663 if (_json.containsKey("parallelWorkerSettings")) { |
| 3664 parallelWorkerSettings = new WorkerSettings.fromJson(_json["parallelWorker
Settings"]); |
| 3665 } |
| 3666 if (_json.containsKey("streamingWorkerMainClass")) { |
| 3667 streamingWorkerMainClass = _json["streamingWorkerMainClass"]; |
| 3668 } |
| 3669 if (_json.containsKey("taskGroup")) { |
| 3670 taskGroup = _json["taskGroup"]; |
| 3671 } |
| 3672 if (_json.containsKey("taskUser")) { |
| 3673 taskUser = _json["taskUser"]; |
| 3674 } |
| 3675 if (_json.containsKey("tempStoragePrefix")) { |
| 3676 tempStoragePrefix = _json["tempStoragePrefix"]; |
| 3677 } |
| 3678 if (_json.containsKey("vmId")) { |
| 3679 vmId = _json["vmId"]; |
| 3680 } |
| 3681 if (_json.containsKey("workflowFileName")) { |
| 3682 workflowFileName = _json["workflowFileName"]; |
| 3683 } |
| 3684 } |
| 3685 |
| 3686 core.Map toJson() { |
| 3687 var _json = new core.Map(); |
| 3688 if (alsologtostderr != null) { |
| 3689 _json["alsologtostderr"] = alsologtostderr; |
| 3690 } |
| 3691 if (baseTaskDir != null) { |
| 3692 _json["baseTaskDir"] = baseTaskDir; |
| 3693 } |
| 3694 if (baseUrl != null) { |
| 3695 _json["baseUrl"] = baseUrl; |
| 3696 } |
| 3697 if (commandlinesFileName != null) { |
| 3698 _json["commandlinesFileName"] = commandlinesFileName; |
| 3699 } |
| 3700 if (continueOnException != null) { |
| 3701 _json["continueOnException"] = continueOnException; |
| 3702 } |
| 3703 if (dataflowApiVersion != null) { |
| 3704 _json["dataflowApiVersion"] = dataflowApiVersion; |
| 3705 } |
| 3706 if (harnessCommand != null) { |
| 3707 _json["harnessCommand"] = harnessCommand; |
| 3708 } |
| 3709 if (languageHint != null) { |
| 3710 _json["languageHint"] = languageHint; |
| 3711 } |
| 3712 if (logDir != null) { |
| 3713 _json["logDir"] = logDir; |
| 3714 } |
| 3715 if (logToSerialconsole != null) { |
| 3716 _json["logToSerialconsole"] = logToSerialconsole; |
| 3717 } |
| 3718 if (logUploadLocation != null) { |
| 3719 _json["logUploadLocation"] = logUploadLocation; |
| 3720 } |
| 3721 if (oauthScopes != null) { |
| 3722 _json["oauthScopes"] = oauthScopes; |
| 3723 } |
| 3724 if (parallelWorkerSettings != null) { |
| 3725 _json["parallelWorkerSettings"] = (parallelWorkerSettings).toJson(); |
| 3726 } |
| 3727 if (streamingWorkerMainClass != null) { |
| 3728 _json["streamingWorkerMainClass"] = streamingWorkerMainClass; |
| 3729 } |
| 3730 if (taskGroup != null) { |
| 3731 _json["taskGroup"] = taskGroup; |
| 3732 } |
| 3733 if (taskUser != null) { |
| 3734 _json["taskUser"] = taskUser; |
| 3735 } |
| 3736 if (tempStoragePrefix != null) { |
| 3737 _json["tempStoragePrefix"] = tempStoragePrefix; |
| 3738 } |
| 3739 if (vmId != null) { |
| 3740 _json["vmId"] = vmId; |
| 3741 } |
| 3742 if (workflowFileName != null) { |
| 3743 _json["workflowFileName"] = workflowFileName; |
| 3744 } |
| 3745 return _json; |
| 3746 } |
| 3747 } |
| 3748 |
| 3749 /** |
| 3750 * Global topology of the streaming Dataflow job, including all computations and |
| 3751 * their sharded locations. |
| 3752 */ |
| 3753 class TopologyConfig { |
| 3754 /** The computations associated with a streaming Dataflow job. */ |
| 3755 core.List<ComputationTopology> computations; |
| 3756 /** The disks assigned to a streaming Dataflow job. */ |
| 3757 core.List<DataDiskAssignment> dataDiskAssignments; |
| 3758 /** Maps user stage names to stable computation names. */ |
| 3759 core.Map<core.String, core.String> userStageToComputationNameMap; |
| 3760 |
| 3761 TopologyConfig(); |
| 3762 |
| 3763 TopologyConfig.fromJson(core.Map _json) { |
| 3764 if (_json.containsKey("computations")) { |
| 3765 computations = _json["computations"].map((value) => new ComputationTopolog
y.fromJson(value)).toList(); |
| 3766 } |
| 3767 if (_json.containsKey("dataDiskAssignments")) { |
| 3768 dataDiskAssignments = _json["dataDiskAssignments"].map((value) => new Data
DiskAssignment.fromJson(value)).toList(); |
| 3769 } |
| 3770 if (_json.containsKey("userStageToComputationNameMap")) { |
| 3771 userStageToComputationNameMap = _json["userStageToComputationNameMap"]; |
| 3772 } |
| 3773 } |
| 3774 |
| 3775 core.Map toJson() { |
| 3776 var _json = new core.Map(); |
| 3777 if (computations != null) { |
| 3778 _json["computations"] = computations.map((value) => (value).toJson()).toLi
st(); |
| 3779 } |
| 3780 if (dataDiskAssignments != null) { |
| 3781 _json["dataDiskAssignments"] = dataDiskAssignments.map((value) => (value).
toJson()).toList(); |
| 3782 } |
| 3783 if (userStageToComputationNameMap != null) { |
| 3784 _json["userStageToComputationNameMap"] = userStageToComputationNameMap; |
| 3785 } |
| 3786 return _json; |
| 3787 } |
| 3788 } |
| 3789 |
| 3790 /** |
| 3791 * WorkItem represents basic information about a WorkItem to be executed in the |
| 3792 * cloud. |
| 3793 */ |
| 3794 class WorkItem { |
| 3795 /** Work item-specific configuration as an opaque blob. */ |
| 3796 core.String configuration; |
| 3797 /** Identifies this WorkItem. */ |
| 3798 core.String id; |
| 3799 /** The initial index to use when reporting the status of the WorkItem. */ |
| 3800 core.String initialReportIndex; |
| 3801 /** Identifies the workflow job this WorkItem belongs to. */ |
| 3802 core.String jobId; |
| 3803 /** Time when the lease on this [Work][] will expire. */ |
| 3804 core.String leaseExpireTime; |
| 3805 /** Additional information for MapTask WorkItems. */ |
| 3806 MapTask mapTask; |
| 3807 /** |
| 3808 * Any required packages that need to be fetched in order to execute this |
| 3809 * WorkItem. |
| 3810 */ |
| 3811 core.List<Package> packages; |
| 3812 /** Identifies the cloud project this WorkItem belongs to. */ |
| 3813 core.String projectId; |
| 3814 /** Recommended reporting interval. */ |
| 3815 core.String reportStatusInterval; |
| 3816 /** Additional information for SeqMapTask WorkItems. */ |
| 3817 SeqMapTask seqMapTask; |
| 3818 /** Additional information for ShellTask WorkItems. */ |
| 3819 ShellTask shellTask; |
| 3820 /** Additional information for source operation WorkItems. */ |
| 3821 SourceOperationRequest sourceOperationTask; |
| 3822 /** Additional information for StreamingComputationTask WorkItems. */ |
| 3823 StreamingComputationTask streamingComputationTask; |
| 3824 /** Additional information for StreamingSetupTask WorkItems. */ |
| 3825 StreamingSetupTask streamingSetupTask; |
| 3826 |
| 3827 WorkItem(); |
| 3828 |
| 3829 WorkItem.fromJson(core.Map _json) { |
| 3830 if (_json.containsKey("configuration")) { |
| 3831 configuration = _json["configuration"]; |
| 3832 } |
| 3833 if (_json.containsKey("id")) { |
| 3834 id = _json["id"]; |
| 3835 } |
| 3836 if (_json.containsKey("initialReportIndex")) { |
| 3837 initialReportIndex = _json["initialReportIndex"]; |
| 3838 } |
| 3839 if (_json.containsKey("jobId")) { |
| 3840 jobId = _json["jobId"]; |
| 3841 } |
| 3842 if (_json.containsKey("leaseExpireTime")) { |
| 3843 leaseExpireTime = _json["leaseExpireTime"]; |
| 3844 } |
| 3845 if (_json.containsKey("mapTask")) { |
| 3846 mapTask = new MapTask.fromJson(_json["mapTask"]); |
| 3847 } |
| 3848 if (_json.containsKey("packages")) { |
| 3849 packages = _json["packages"].map((value) => new Package.fromJson(value)).t
oList(); |
| 3850 } |
| 3851 if (_json.containsKey("projectId")) { |
| 3852 projectId = _json["projectId"]; |
| 3853 } |
| 3854 if (_json.containsKey("reportStatusInterval")) { |
| 3855 reportStatusInterval = _json["reportStatusInterval"]; |
| 3856 } |
| 3857 if (_json.containsKey("seqMapTask")) { |
| 3858 seqMapTask = new SeqMapTask.fromJson(_json["seqMapTask"]); |
| 3859 } |
| 3860 if (_json.containsKey("shellTask")) { |
| 3861 shellTask = new ShellTask.fromJson(_json["shellTask"]); |
| 3862 } |
| 3863 if (_json.containsKey("sourceOperationTask")) { |
| 3864 sourceOperationTask = new SourceOperationRequest.fromJson(_json["sourceOpe
rationTask"]); |
| 3865 } |
| 3866 if (_json.containsKey("streamingComputationTask")) { |
| 3867 streamingComputationTask = new StreamingComputationTask.fromJson(_json["st
reamingComputationTask"]); |
| 3868 } |
| 3869 if (_json.containsKey("streamingSetupTask")) { |
| 3870 streamingSetupTask = new StreamingSetupTask.fromJson(_json["streamingSetup
Task"]); |
| 3871 } |
| 3872 } |
| 3873 |
| 3874 core.Map toJson() { |
| 3875 var _json = new core.Map(); |
| 3876 if (configuration != null) { |
| 3877 _json["configuration"] = configuration; |
| 3878 } |
| 3879 if (id != null) { |
| 3880 _json["id"] = id; |
| 3881 } |
| 3882 if (initialReportIndex != null) { |
| 3883 _json["initialReportIndex"] = initialReportIndex; |
| 3884 } |
| 3885 if (jobId != null) { |
| 3886 _json["jobId"] = jobId; |
| 3887 } |
| 3888 if (leaseExpireTime != null) { |
| 3889 _json["leaseExpireTime"] = leaseExpireTime; |
| 3890 } |
| 3891 if (mapTask != null) { |
| 3892 _json["mapTask"] = (mapTask).toJson(); |
| 3893 } |
| 3894 if (packages != null) { |
| 3895 _json["packages"] = packages.map((value) => (value).toJson()).toList(); |
| 3896 } |
| 3897 if (projectId != null) { |
| 3898 _json["projectId"] = projectId; |
| 3899 } |
| 3900 if (reportStatusInterval != null) { |
| 3901 _json["reportStatusInterval"] = reportStatusInterval; |
| 3902 } |
| 3903 if (seqMapTask != null) { |
| 3904 _json["seqMapTask"] = (seqMapTask).toJson(); |
| 3905 } |
| 3906 if (shellTask != null) { |
| 3907 _json["shellTask"] = (shellTask).toJson(); |
| 3908 } |
| 3909 if (sourceOperationTask != null) { |
| 3910 _json["sourceOperationTask"] = (sourceOperationTask).toJson(); |
| 3911 } |
| 3912 if (streamingComputationTask != null) { |
| 3913 _json["streamingComputationTask"] = (streamingComputationTask).toJson(); |
| 3914 } |
| 3915 if (streamingSetupTask != null) { |
| 3916 _json["streamingSetupTask"] = (streamingSetupTask).toJson(); |
| 3917 } |
| 3918 return _json; |
| 3919 } |
| 3920 } |
| 3921 |
| 3922 /** |
| 3923 * The Dataflow service's idea of the current state of a WorkItem being |
| 3924 * processed by a worker. |
| 3925 */ |
| 3926 class WorkItemServiceState { |
| 3927 /** |
| 3928 * Other data returned by the service, specific to the particular worker |
| 3929 * harness. |
| 3930 * |
| 3931 * The values for Object must be JSON objects. It can consist of `num`, |
| 3932 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 3933 */ |
| 3934 core.Map<core.String, core.Object> harnessData; |
| 3935 /** Time at which the current lease will expire. */ |
| 3936 core.String leaseExpireTime; |
| 3937 /** |
| 3938 * The index value to use for the next report sent by the worker. Note: If the |
| 3939 * report call fails for whatever reason, the worker should reuse this index |
| 3940 * for subsequent report attempts. |
| 3941 */ |
| 3942 core.String nextReportIndex; |
| 3943 /** New recommended reporting interval. */ |
| 3944 core.String reportStatusInterval; |
| 3945 /** |
| 3946 * The progress point in the WorkItem where the Dataflow service suggests that |
| 3947 * the worker truncate the task. |
| 3948 */ |
| 3949 ApproximateProgress suggestedStopPoint; |
| 3950 /** Obsolete, always empty. */ |
| 3951 Position suggestedStopPosition; |
| 3952 |
| 3953 WorkItemServiceState(); |
| 3954 |
| 3955 WorkItemServiceState.fromJson(core.Map _json) { |
| 3956 if (_json.containsKey("harnessData")) { |
| 3957 harnessData = _json["harnessData"]; |
| 3958 } |
| 3959 if (_json.containsKey("leaseExpireTime")) { |
| 3960 leaseExpireTime = _json["leaseExpireTime"]; |
| 3961 } |
| 3962 if (_json.containsKey("nextReportIndex")) { |
| 3963 nextReportIndex = _json["nextReportIndex"]; |
| 3964 } |
| 3965 if (_json.containsKey("reportStatusInterval")) { |
| 3966 reportStatusInterval = _json["reportStatusInterval"]; |
| 3967 } |
| 3968 if (_json.containsKey("suggestedStopPoint")) { |
| 3969 suggestedStopPoint = new ApproximateProgress.fromJson(_json["suggestedStop
Point"]); |
| 3970 } |
| 3971 if (_json.containsKey("suggestedStopPosition")) { |
| 3972 suggestedStopPosition = new Position.fromJson(_json["suggestedStopPosition
"]); |
| 3973 } |
| 3974 } |
| 3975 |
| 3976 core.Map toJson() { |
| 3977 var _json = new core.Map(); |
| 3978 if (harnessData != null) { |
| 3979 _json["harnessData"] = harnessData; |
| 3980 } |
| 3981 if (leaseExpireTime != null) { |
| 3982 _json["leaseExpireTime"] = leaseExpireTime; |
| 3983 } |
| 3984 if (nextReportIndex != null) { |
| 3985 _json["nextReportIndex"] = nextReportIndex; |
| 3986 } |
| 3987 if (reportStatusInterval != null) { |
| 3988 _json["reportStatusInterval"] = reportStatusInterval; |
| 3989 } |
| 3990 if (suggestedStopPoint != null) { |
| 3991 _json["suggestedStopPoint"] = (suggestedStopPoint).toJson(); |
| 3992 } |
| 3993 if (suggestedStopPosition != null) { |
| 3994 _json["suggestedStopPosition"] = (suggestedStopPosition).toJson(); |
| 3995 } |
| 3996 return _json; |
| 3997 } |
| 3998 } |
| 3999 |
| 4000 /** Conveys a worker's progress through the work described by a WorkItem. */ |
| 4001 class WorkItemStatus { |
| 4002 /** True if the WorkItem was completed (successfully or unsuccessfully). */ |
| 4003 core.bool completed; |
| 4004 /** See documentation of stop_position. */ |
| 4005 DynamicSourceSplit dynamicSourceSplit; |
| 4006 /** |
| 4007 * Specifies errors which occurred during processing. If errors are provided, |
| 4008 * and completed = true, then the WorkItem is considered to have failed. |
| 4009 */ |
| 4010 core.List<Status> errors; |
| 4011 /** Worker output metrics (counters) for this WorkItem. */ |
| 4012 core.List<MetricUpdate> metricUpdates; |
| 4013 /** The WorkItem's approximate progress. */ |
| 4014 ApproximateProgress progress; |
| 4015 /** |
| 4016 * The report index. When a WorkItem is leased, the lease will contain an |
| 4017 * initial report index. When a WorkItem's status is reported to the system, |
| 4018 * the report should be sent with that report index, and the response will |
| 4019 * contain the index the worker should use for the next report. Reports |
| 4020 * received with unexpected index values will be rejected by the service. In |
| 4021 * order to preserve idempotency, the worker should not alter the contents of |
| 4022 * a report, even if the worker must submit the same report multiple times |
| 4023 * before getting back a response. The worker should not submit a subsequent |
| 4024 * report until the response for the previous report had been received from |
| 4025 * the service. |
| 4026 */ |
| 4027 core.String reportIndex; |
| 4028 /** Amount of time the worker requests for its lease. */ |
| 4029 core.String requestedLeaseDuration; |
| 4030 /** DEPRECATED in favor of dynamic_source_split. */ |
| 4031 SourceFork sourceFork; |
| 4032 /** |
| 4033 * If the work item represented a SourceOperationRequest, and the work is |
| 4034 * completed, contains the result of the operation. |
| 4035 */ |
| 4036 SourceOperationResponse sourceOperationResponse; |
| 4037 /** |
| 4038 * A worker may split an active map task in two parts, "primary" and |
| 4039 * "residual", continuing to process the primary part and returning the |
| 4040 * residual part into the pool of available work. This event is called a |
| 4041 * "dynamic split" and is critical to the dynamic work rebalancing feature. |
| 4042 * The two obtained sub-tasks are called "parts" of the split. The parts, if |
| 4043 * concatenated, must represent the same input as would be read by the current |
| 4044 * task if the split did not happen. The exact way in which the original task |
| 4045 * is decomposed into the two parts is specified either as a position |
| 4046 * demarcating them (stop_position), or explicitly as two DerivedSources, if |
| 4047 * this task consumes a user-defined source type (dynamic_source_split). The |
| 4048 * "current" task is adjusted as a result of the split: after a task with |
| 4049 * range [A, B) sends a stop_position update at C, its range is considered to |
| 4050 * be [A, C), e.g.: * Progress should be interpreted relative to the new |
| 4051 * range, e.g. "75% completed" means "75% of [A, C) completed" * The worker |
| 4052 * should interpret proposed_stop_position relative to the new range, e.g. |
| 4053 * "split at 68%" should be interpreted as "split at 68% of [A, C)". * If the |
| 4054 * worker chooses to split again using stop_position, only stop_positions in |
| 4055 * [A, C) will be accepted. * Etc. dynamic_source_split has similar semantics: |
| 4056 * e.g., if a task with source S splits using dynamic_source_split into {P, R} |
| 4057 * (where P and R must be together equivalent to S), then subsequent progress |
| 4058 * and proposed_stop_position should be interpreted relative to P, and in a |
| 4059 * potential subsequent dynamic_source_split into {P', R'}, P' and R' must be |
| 4060 * together equivalent to P, etc. |
| 4061 */ |
| 4062 Position stopPosition; |
| 4063 /** Identifies the WorkItem. */ |
| 4064 core.String workItemId; |
| 4065 |
| 4066 WorkItemStatus(); |
| 4067 |
| 4068 WorkItemStatus.fromJson(core.Map _json) { |
| 4069 if (_json.containsKey("completed")) { |
| 4070 completed = _json["completed"]; |
| 4071 } |
| 4072 if (_json.containsKey("dynamicSourceSplit")) { |
| 4073 dynamicSourceSplit = new DynamicSourceSplit.fromJson(_json["dynamicSourceS
plit"]); |
| 4074 } |
| 4075 if (_json.containsKey("errors")) { |
| 4076 errors = _json["errors"].map((value) => new Status.fromJson(value)).toList
(); |
| 4077 } |
| 4078 if (_json.containsKey("metricUpdates")) { |
| 4079 metricUpdates = _json["metricUpdates"].map((value) => new MetricUpdate.fro
mJson(value)).toList(); |
| 4080 } |
| 4081 if (_json.containsKey("progress")) { |
| 4082 progress = new ApproximateProgress.fromJson(_json["progress"]); |
| 4083 } |
| 4084 if (_json.containsKey("reportIndex")) { |
| 4085 reportIndex = _json["reportIndex"]; |
| 4086 } |
| 4087 if (_json.containsKey("requestedLeaseDuration")) { |
| 4088 requestedLeaseDuration = _json["requestedLeaseDuration"]; |
| 4089 } |
| 4090 if (_json.containsKey("sourceFork")) { |
| 4091 sourceFork = new SourceFork.fromJson(_json["sourceFork"]); |
| 4092 } |
| 4093 if (_json.containsKey("sourceOperationResponse")) { |
| 4094 sourceOperationResponse = new SourceOperationResponse.fromJson(_json["sour
ceOperationResponse"]); |
| 4095 } |
| 4096 if (_json.containsKey("stopPosition")) { |
| 4097 stopPosition = new Position.fromJson(_json["stopPosition"]); |
| 4098 } |
| 4099 if (_json.containsKey("workItemId")) { |
| 4100 workItemId = _json["workItemId"]; |
| 4101 } |
| 4102 } |
| 4103 |
| 4104 core.Map toJson() { |
| 4105 var _json = new core.Map(); |
| 4106 if (completed != null) { |
| 4107 _json["completed"] = completed; |
| 4108 } |
| 4109 if (dynamicSourceSplit != null) { |
| 4110 _json["dynamicSourceSplit"] = (dynamicSourceSplit).toJson(); |
| 4111 } |
| 4112 if (errors != null) { |
| 4113 _json["errors"] = errors.map((value) => (value).toJson()).toList(); |
| 4114 } |
| 4115 if (metricUpdates != null) { |
| 4116 _json["metricUpdates"] = metricUpdates.map((value) => (value).toJson()).to
List(); |
| 4117 } |
| 4118 if (progress != null) { |
| 4119 _json["progress"] = (progress).toJson(); |
| 4120 } |
| 4121 if (reportIndex != null) { |
| 4122 _json["reportIndex"] = reportIndex; |
| 4123 } |
| 4124 if (requestedLeaseDuration != null) { |
| 4125 _json["requestedLeaseDuration"] = requestedLeaseDuration; |
| 4126 } |
| 4127 if (sourceFork != null) { |
| 4128 _json["sourceFork"] = (sourceFork).toJson(); |
| 4129 } |
| 4130 if (sourceOperationResponse != null) { |
| 4131 _json["sourceOperationResponse"] = (sourceOperationResponse).toJson(); |
| 4132 } |
| 4133 if (stopPosition != null) { |
| 4134 _json["stopPosition"] = (stopPosition).toJson(); |
| 4135 } |
| 4136 if (workItemId != null) { |
| 4137 _json["workItemId"] = workItemId; |
| 4138 } |
| 4139 return _json; |
| 4140 } |
| 4141 } |
| 4142 |
| 4143 /** |
| 4144 * Describes one particular pool of Dataflow workers to be instantiated by the |
| 4145 * Dataflow service in order to perform the computations required by a job. Note |
| 4146 * that a workflow job may use multiple pools, in order to match the various |
| 4147 * computational requirements of the various stages of the job. |
| 4148 */ |
| 4149 class WorkerPool { |
| 4150 /** Settings for autoscaling of this WorkerPool. */ |
| 4151 AutoscalingSettings autoscalingSettings; |
| 4152 /** Data disks that are used by a VM in this workflow. */ |
| 4153 core.List<Disk> dataDisks; |
| 4154 /** |
| 4155 * The default package set to install. This allows the service to select a |
| 4156 * default set of packages which are useful to worker harnesses written in a |
| 4157 * particular language. |
| 4158 * Possible string values are: |
| 4159 * - "DEFAULT_PACKAGE_SET_UNKNOWN" : A DEFAULT_PACKAGE_SET_UNKNOWN. |
| 4160 * - "DEFAULT_PACKAGE_SET_NONE" : A DEFAULT_PACKAGE_SET_NONE. |
| 4161 * - "DEFAULT_PACKAGE_SET_JAVA" : A DEFAULT_PACKAGE_SET_JAVA. |
| 4162 * - "DEFAULT_PACKAGE_SET_PYTHON" : A DEFAULT_PACKAGE_SET_PYTHON. |
| 4163 */ |
| 4164 core.String defaultPackageSet; |
| 4165 /** |
| 4166 * Size of root disk for VMs, in GB. If zero or unspecified, the service will |
| 4167 * attempt to choose a reasonable default. |
| 4168 */ |
| 4169 core.int diskSizeGb; |
| 4170 /** Fully qualified source image for disks. */ |
| 4171 core.String diskSourceImage; |
| 4172 /** |
| 4173 * Type of root disk for VMs. If empty or unspecified, the service will |
| 4174 * attempt to choose a reasonable default. |
| 4175 */ |
| 4176 core.String diskType; |
| 4177 /** |
| 4178 * The kind of the worker pool; currently only 'harness' and 'shuffle' are |
| 4179 * supported. |
| 4180 */ |
| 4181 core.String kind; |
| 4182 /** |
| 4183 * Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service |
| 4184 * will attempt to choose a reasonable default. |
| 4185 */ |
| 4186 core.String machineType; |
| 4187 /** Metadata to set on the Google Compute Engine VMs. */ |
| 4188 core.Map<core.String, core.String> metadata; |
| 4189 /** |
| 4190 * Network to which VMs will be assigned. If empty or unspecified, the service |
| 4191 * will use the network "default". |
| 4192 */ |
| 4193 core.String network; |
| 4194 /** |
| 4195 * Number of Google Compute Engine workers in this pool needed to execute the |
| 4196 * job. If zero or unspecified, the service will attempt to choose a |
| 4197 * reasonable default. |
| 4198 */ |
| 4199 core.int numWorkers; |
| 4200 /** |
| 4201 * The action to take on host maintenance, as defined by the Google Compute |
| 4202 * Engine API. |
| 4203 */ |
| 4204 core.String onHostMaintenance; |
| 4205 /** Packages to be installed on workers. */ |
| 4206 core.List<Package> packages; |
| 4207 /** |
| 4208 * Extra arguments for this worker pool. |
| 4209 * |
| 4210 * The values for Object must be JSON objects. It can consist of `num`, |
| 4211 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 4212 */ |
| 4213 core.Map<core.String, core.Object> poolArgs; |
| 4214 /** |
| 4215 * Settings passed through to Google Compute Engine workers when using the |
| 4216 * standard Dataflow task runner. Users should ignore this field. |
| 4217 */ |
| 4218 TaskRunnerSettings taskrunnerSettings; |
| 4219 /** |
| 4220 * Sets the policy for determining when to turndown worker pool. Allowed |
| 4221 * values are: TEARDOWN_ALWAYS, TEARDOWN_ON_SUCCESS, and TEARDOWN_NEVER. |
| 4222 * TEARDOWN_ALWAYS means workers are always torn down regardless of whether |
| 4223 * the job succeeds. TEARDOWN_ON_SUCCESS means workers are torn down if the |
| 4224 * job succeeds. TEARDOWN_NEVER means the workers are never torn down. If the |
| 4225 * workers are not torn down by the service, they will continue to run and use |
| 4226 * Google Compute Engine VM resources in the user's project until they are |
| 4227 * explicitly terminated by the user. Because of this, Google recommends using |
| 4228 * the TEARDOWN_ALWAYS policy except for small, manually supervised test jobs. |
| 4229 * If unknown or unspecified, the service will attempt to choose a reasonable |
| 4230 * default. |
| 4231 * Possible string values are: |
| 4232 * - "TEARDOWN_POLICY_UNKNOWN" : A TEARDOWN_POLICY_UNKNOWN. |
| 4233 * - "TEARDOWN_ALWAYS" : A TEARDOWN_ALWAYS. |
| 4234 * - "TEARDOWN_ON_SUCCESS" : A TEARDOWN_ON_SUCCESS. |
| 4235 * - "TEARDOWN_NEVER" : A TEARDOWN_NEVER. |
| 4236 */ |
| 4237 core.String teardownPolicy; |
| 4238 /** |
| 4239 * Zone to run the worker pools in (e.g. "us-central1-b"). If empty or |
| 4240 * unspecified, the service will attempt to choose a reasonable default. |
| 4241 */ |
| 4242 core.String zone; |
| 4243 |
| 4244 WorkerPool(); |
| 4245 |
| 4246 WorkerPool.fromJson(core.Map _json) { |
| 4247 if (_json.containsKey("autoscalingSettings")) { |
| 4248 autoscalingSettings = new AutoscalingSettings.fromJson(_json["autoscalingS
ettings"]); |
| 4249 } |
| 4250 if (_json.containsKey("dataDisks")) { |
| 4251 dataDisks = _json["dataDisks"].map((value) => new Disk.fromJson(value)).to
List(); |
| 4252 } |
| 4253 if (_json.containsKey("defaultPackageSet")) { |
| 4254 defaultPackageSet = _json["defaultPackageSet"]; |
| 4255 } |
| 4256 if (_json.containsKey("diskSizeGb")) { |
| 4257 diskSizeGb = _json["diskSizeGb"]; |
| 4258 } |
| 4259 if (_json.containsKey("diskSourceImage")) { |
| 4260 diskSourceImage = _json["diskSourceImage"]; |
| 4261 } |
| 4262 if (_json.containsKey("diskType")) { |
| 4263 diskType = _json["diskType"]; |
| 4264 } |
| 4265 if (_json.containsKey("kind")) { |
| 4266 kind = _json["kind"]; |
| 4267 } |
| 4268 if (_json.containsKey("machineType")) { |
| 4269 machineType = _json["machineType"]; |
| 4270 } |
| 4271 if (_json.containsKey("metadata")) { |
| 4272 metadata = _json["metadata"]; |
| 4273 } |
| 4274 if (_json.containsKey("network")) { |
| 4275 network = _json["network"]; |
| 4276 } |
| 4277 if (_json.containsKey("numWorkers")) { |
| 4278 numWorkers = _json["numWorkers"]; |
| 4279 } |
| 4280 if (_json.containsKey("onHostMaintenance")) { |
| 4281 onHostMaintenance = _json["onHostMaintenance"]; |
| 4282 } |
| 4283 if (_json.containsKey("packages")) { |
| 4284 packages = _json["packages"].map((value) => new Package.fromJson(value)).t
oList(); |
| 4285 } |
| 4286 if (_json.containsKey("poolArgs")) { |
| 4287 poolArgs = _json["poolArgs"]; |
| 4288 } |
| 4289 if (_json.containsKey("taskrunnerSettings")) { |
| 4290 taskrunnerSettings = new TaskRunnerSettings.fromJson(_json["taskrunnerSett
ings"]); |
| 4291 } |
| 4292 if (_json.containsKey("teardownPolicy")) { |
| 4293 teardownPolicy = _json["teardownPolicy"]; |
| 4294 } |
| 4295 if (_json.containsKey("zone")) { |
| 4296 zone = _json["zone"]; |
| 4297 } |
| 4298 } |
| 4299 |
| 4300 core.Map toJson() { |
| 4301 var _json = new core.Map(); |
| 4302 if (autoscalingSettings != null) { |
| 4303 _json["autoscalingSettings"] = (autoscalingSettings).toJson(); |
| 4304 } |
| 4305 if (dataDisks != null) { |
| 4306 _json["dataDisks"] = dataDisks.map((value) => (value).toJson()).toList(); |
| 4307 } |
| 4308 if (defaultPackageSet != null) { |
| 4309 _json["defaultPackageSet"] = defaultPackageSet; |
| 4310 } |
| 4311 if (diskSizeGb != null) { |
| 4312 _json["diskSizeGb"] = diskSizeGb; |
| 4313 } |
| 4314 if (diskSourceImage != null) { |
| 4315 _json["diskSourceImage"] = diskSourceImage; |
| 4316 } |
| 4317 if (diskType != null) { |
| 4318 _json["diskType"] = diskType; |
| 4319 } |
| 4320 if (kind != null) { |
| 4321 _json["kind"] = kind; |
| 4322 } |
| 4323 if (machineType != null) { |
| 4324 _json["machineType"] = machineType; |
| 4325 } |
| 4326 if (metadata != null) { |
| 4327 _json["metadata"] = metadata; |
| 4328 } |
| 4329 if (network != null) { |
| 4330 _json["network"] = network; |
| 4331 } |
| 4332 if (numWorkers != null) { |
| 4333 _json["numWorkers"] = numWorkers; |
| 4334 } |
| 4335 if (onHostMaintenance != null) { |
| 4336 _json["onHostMaintenance"] = onHostMaintenance; |
| 4337 } |
| 4338 if (packages != null) { |
| 4339 _json["packages"] = packages.map((value) => (value).toJson()).toList(); |
| 4340 } |
| 4341 if (poolArgs != null) { |
| 4342 _json["poolArgs"] = poolArgs; |
| 4343 } |
| 4344 if (taskrunnerSettings != null) { |
| 4345 _json["taskrunnerSettings"] = (taskrunnerSettings).toJson(); |
| 4346 } |
| 4347 if (teardownPolicy != null) { |
| 4348 _json["teardownPolicy"] = teardownPolicy; |
| 4349 } |
| 4350 if (zone != null) { |
| 4351 _json["zone"] = zone; |
| 4352 } |
| 4353 return _json; |
| 4354 } |
| 4355 } |
| 4356 |
| 4357 /** Provides data to pass through to the worker harness. */ |
| 4358 class WorkerSettings { |
| 4359 /** |
| 4360 * The base URL for accessing Google Cloud APIs. When workers access Google |
| 4361 * Cloud APIs, they logically do so via relative URLs. If this field is |
| 4362 * specified, it supplies the base URL to use for resolving these relative |
| 4363 * URLs. The normative algorithm used is defined by RFC 1808, "Relative |
| 4364 * Uniform Resource Locators". If not specified, the default value is |
| 4365 * "http://www.googleapis.com/" |
| 4366 */ |
| 4367 core.String baseUrl; |
| 4368 /** Send work progress updates to service. */ |
| 4369 core.bool reportingEnabled; |
| 4370 /** |
| 4371 * The Dataflow service path relative to the root URL, for example, |
| 4372 * "dataflow/v1b3/projects". |
| 4373 */ |
| 4374 core.String servicePath; |
| 4375 /** |
| 4376 * The Shuffle service path relative to the root URL, for example, |
| 4377 * "shuffle/v1beta1". |
| 4378 */ |
| 4379 core.String shuffleServicePath; |
| 4380 /** |
| 4381 * The prefix of the resources the system should use for temporary storage. |
| 4382 * The supported resource type is: Google Cloud Storage: |
| 4383 * storage.googleapis.com/{bucket}/{object} |
| 4384 * bucket.storage.googleapis.com/{object} |
| 4385 */ |
| 4386 core.String tempStoragePrefix; |
| 4387 /** ID of the worker running this pipeline. */ |
| 4388 core.String workerId; |
| 4389 |
| 4390 WorkerSettings(); |
| 4391 |
| 4392 WorkerSettings.fromJson(core.Map _json) { |
| 4393 if (_json.containsKey("baseUrl")) { |
| 4394 baseUrl = _json["baseUrl"]; |
| 4395 } |
| 4396 if (_json.containsKey("reportingEnabled")) { |
| 4397 reportingEnabled = _json["reportingEnabled"]; |
| 4398 } |
| 4399 if (_json.containsKey("servicePath")) { |
| 4400 servicePath = _json["servicePath"]; |
| 4401 } |
| 4402 if (_json.containsKey("shuffleServicePath")) { |
| 4403 shuffleServicePath = _json["shuffleServicePath"]; |
| 4404 } |
| 4405 if (_json.containsKey("tempStoragePrefix")) { |
| 4406 tempStoragePrefix = _json["tempStoragePrefix"]; |
| 4407 } |
| 4408 if (_json.containsKey("workerId")) { |
| 4409 workerId = _json["workerId"]; |
| 4410 } |
| 4411 } |
| 4412 |
| 4413 core.Map toJson() { |
| 4414 var _json = new core.Map(); |
| 4415 if (baseUrl != null) { |
| 4416 _json["baseUrl"] = baseUrl; |
| 4417 } |
| 4418 if (reportingEnabled != null) { |
| 4419 _json["reportingEnabled"] = reportingEnabled; |
| 4420 } |
| 4421 if (servicePath != null) { |
| 4422 _json["servicePath"] = servicePath; |
| 4423 } |
| 4424 if (shuffleServicePath != null) { |
| 4425 _json["shuffleServicePath"] = shuffleServicePath; |
| 4426 } |
| 4427 if (tempStoragePrefix != null) { |
| 4428 _json["tempStoragePrefix"] = tempStoragePrefix; |
| 4429 } |
| 4430 if (workerId != null) { |
| 4431 _json["workerId"] = workerId; |
| 4432 } |
| 4433 return _json; |
| 4434 } |
| 4435 } |
| 4436 |
| 4437 /** |
| 4438 * An instruction that writes records. Takes one input, produces no outputs. |
| 4439 */ |
| 4440 class WriteInstruction { |
| 4441 /** The input. */ |
| 4442 InstructionInput input; |
| 4443 /** The sink to write to. */ |
| 4444 Sink sink; |
| 4445 |
| 4446 WriteInstruction(); |
| 4447 |
| 4448 WriteInstruction.fromJson(core.Map _json) { |
| 4449 if (_json.containsKey("input")) { |
| 4450 input = new InstructionInput.fromJson(_json["input"]); |
| 4451 } |
| 4452 if (_json.containsKey("sink")) { |
| 4453 sink = new Sink.fromJson(_json["sink"]); |
| 4454 } |
| 4455 } |
| 4456 |
| 4457 core.Map toJson() { |
| 4458 var _json = new core.Map(); |
| 4459 if (input != null) { |
| 4460 _json["input"] = (input).toJson(); |
| 4461 } |
| 4462 if (sink != null) { |
| 4463 _json["sink"] = (sink).toJson(); |
| 4464 } |
| 4465 return _json; |
| 4466 } |
| 4467 } |
OLD | NEW |