| OLD | NEW |
| 1 // This is a generated file (see the discoveryapis_generator project). | 1 // This is a generated file (see the discoveryapis_generator project). |
| 2 | 2 |
| 3 library googleapis_beta.dataflow.v1b3; | 3 library googleapis_beta.dataflow.v1b3; |
| 4 | 4 |
| 5 import 'dart:core' as core; | 5 import 'dart:core' as core; |
| 6 import 'dart:async' as async; | 6 import 'dart:async' as async; |
| 7 import 'dart:convert' as convert; | 7 import 'dart:convert' as convert; |
| 8 | 8 |
| 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; | 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; |
| 10 import 'package:http/http.dart' as http; | 10 import 'package:http/http.dart' as http; |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 101 | 101 |
| 102 /** | 102 /** |
| 103 * Creates a Cloud Dataflow job. | 103 * Creates a Cloud Dataflow job. |
| 104 * | 104 * |
| 105 * [request] - The metadata request object. | 105 * [request] - The metadata request object. |
| 106 * | 106 * |
| 107 * Request parameters: | 107 * Request parameters: |
| 108 * | 108 * |
| 109 * [projectId] - The ID of the Cloud Platform project that the job belongs to. | 109 * [projectId] - The ID of the Cloud Platform project that the job belongs to. |
| 110 * | 110 * |
| 111 * [location] - The location that contains this job. |
| 112 * |
| 113 * [replaceJobId] - Deprecated. This field is now in the Job message. |
| 114 * |
| 111 * [view] - The level of information requested in response. | 115 * [view] - The level of information requested in response. |
| 112 * Possible string values are: | 116 * Possible string values are: |
| 113 * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN. | 117 * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN. |
| 114 * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY. | 118 * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY. |
| 115 * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL. | 119 * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL. |
| 116 * | 120 * |
| 117 * [replaceJobId] - Deprecated. This field is now in the Job message. | |
| 118 * | |
| 119 * [location] - The location that contains this job. | |
| 120 * | |
| 121 * Completes with a [Job]. | 121 * Completes with a [Job]. |
| 122 * | 122 * |
| 123 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 123 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 124 * error. | 124 * error. |
| 125 * | 125 * |
| 126 * If the used [http.Client] completes with an error when making a REST call, | 126 * If the used [http.Client] completes with an error when making a REST call, |
| 127 * this method will complete with the same error. | 127 * this method will complete with the same error. |
| 128 */ | 128 */ |
| 129 async.Future<Job> create(Job request, core.String projectId, {core.String view
, core.String replaceJobId, core.String location}) { | 129 async.Future<Job> create(Job request, core.String projectId, {core.String loca
tion, core.String replaceJobId, core.String view}) { |
| 130 var _url = null; | 130 var _url = null; |
| 131 var _queryParams = new core.Map(); | 131 var _queryParams = new core.Map(); |
| 132 var _uploadMedia = null; | 132 var _uploadMedia = null; |
| 133 var _uploadOptions = null; | 133 var _uploadOptions = null; |
| 134 var _downloadOptions = commons.DownloadOptions.Metadata; | 134 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 135 var _body = null; | 135 var _body = null; |
| 136 | 136 |
| 137 if (request != null) { | 137 if (request != null) { |
| 138 _body = convert.JSON.encode((request).toJson()); | 138 _body = convert.JSON.encode((request).toJson()); |
| 139 } | 139 } |
| 140 if (projectId == null) { | 140 if (projectId == null) { |
| 141 throw new core.ArgumentError("Parameter projectId is required."); | 141 throw new core.ArgumentError("Parameter projectId is required."); |
| 142 } | 142 } |
| 143 if (location != null) { |
| 144 _queryParams["location"] = [location]; |
| 145 } |
| 146 if (replaceJobId != null) { |
| 147 _queryParams["replaceJobId"] = [replaceJobId]; |
| 148 } |
| 143 if (view != null) { | 149 if (view != null) { |
| 144 _queryParams["view"] = [view]; | 150 _queryParams["view"] = [view]; |
| 145 } | 151 } |
| 146 if (replaceJobId != null) { | |
| 147 _queryParams["replaceJobId"] = [replaceJobId]; | |
| 148 } | |
| 149 if (location != null) { | |
| 150 _queryParams["location"] = [location]; | |
| 151 } | |
| 152 | 152 |
| 153 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs'; | 153 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs'; |
| 154 | 154 |
| 155 var _response = _requester.request(_url, | 155 var _response = _requester.request(_url, |
| 156 "POST", | 156 "POST", |
| 157 body: _body, | 157 body: _body, |
| 158 queryParams: _queryParams, | 158 queryParams: _queryParams, |
| 159 uploadOptions: _uploadOptions, | 159 uploadOptions: _uploadOptions, |
| 160 uploadMedia: _uploadMedia, | 160 uploadMedia: _uploadMedia, |
| 161 downloadOptions: _downloadOptions); | 161 downloadOptions: _downloadOptions); |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 222 | 222 |
| 223 /** | 223 /** |
| 224 * Request the job status. | 224 * Request the job status. |
| 225 * | 225 * |
| 226 * Request parameters: | 226 * Request parameters: |
| 227 * | 227 * |
| 228 * [projectId] - A project id. | 228 * [projectId] - A project id. |
| 229 * | 229 * |
| 230 * [jobId] - The job to get messages for. | 230 * [jobId] - The job to get messages for. |
| 231 * | 231 * |
| 232 * [location] - The location which contains the job specified by job_id. |
| 233 * |
| 232 * [startTime] - Return only metric data that has changed since this time. | 234 * [startTime] - Return only metric data that has changed since this time. |
| 233 * Default is to return all information about all metrics for the job. | 235 * Default is to return all information about all metrics for the job. |
| 234 * | 236 * |
| 235 * [location] - The location which contains the job specified by job_id. | |
| 236 * | |
| 237 * Completes with a [JobMetrics]. | 237 * Completes with a [JobMetrics]. |
| 238 * | 238 * |
| 239 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 239 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 240 * error. | 240 * error. |
| 241 * | 241 * |
| 242 * If the used [http.Client] completes with an error when making a REST call, | 242 * If the used [http.Client] completes with an error when making a REST call, |
| 243 * this method will complete with the same error. | 243 * this method will complete with the same error. |
| 244 */ | 244 */ |
| 245 async.Future<JobMetrics> getMetrics(core.String projectId, core.String jobId,
{core.String startTime, core.String location}) { | 245 async.Future<JobMetrics> getMetrics(core.String projectId, core.String jobId,
{core.String location, core.String startTime}) { |
| 246 var _url = null; | 246 var _url = null; |
| 247 var _queryParams = new core.Map(); | 247 var _queryParams = new core.Map(); |
| 248 var _uploadMedia = null; | 248 var _uploadMedia = null; |
| 249 var _uploadOptions = null; | 249 var _uploadOptions = null; |
| 250 var _downloadOptions = commons.DownloadOptions.Metadata; | 250 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 251 var _body = null; | 251 var _body = null; |
| 252 | 252 |
| 253 if (projectId == null) { | 253 if (projectId == null) { |
| 254 throw new core.ArgumentError("Parameter projectId is required."); | 254 throw new core.ArgumentError("Parameter projectId is required."); |
| 255 } | 255 } |
| 256 if (jobId == null) { | 256 if (jobId == null) { |
| 257 throw new core.ArgumentError("Parameter jobId is required."); | 257 throw new core.ArgumentError("Parameter jobId is required."); |
| 258 } | 258 } |
| 259 if (location != null) { |
| 260 _queryParams["location"] = [location]; |
| 261 } |
| 259 if (startTime != null) { | 262 if (startTime != null) { |
| 260 _queryParams["startTime"] = [startTime]; | 263 _queryParams["startTime"] = [startTime]; |
| 261 } | 264 } |
| 262 if (location != null) { | |
| 263 _queryParams["location"] = [location]; | |
| 264 } | |
| 265 | 265 |
| 266 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs/' + commons.Escaper.ecapeVariable('$jobId') + '/metrics'; | 266 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs/' + commons.Escaper.ecapeVariable('$jobId') + '/metrics'; |
| 267 | 267 |
| 268 var _response = _requester.request(_url, | 268 var _response = _requester.request(_url, |
| 269 "GET", | 269 "GET", |
| 270 body: _body, | 270 body: _body, |
| 271 queryParams: _queryParams, | 271 queryParams: _queryParams, |
| 272 uploadOptions: _uploadOptions, | 272 uploadOptions: _uploadOptions, |
| 273 uploadMedia: _uploadMedia, | 273 uploadMedia: _uploadMedia, |
| 274 downloadOptions: _downloadOptions); | 274 downloadOptions: _downloadOptions); |
| 275 return _response.then((data) => new JobMetrics.fromJson(data)); | 275 return _response.then((data) => new JobMetrics.fromJson(data)); |
| 276 } | 276 } |
| 277 | 277 |
| 278 /** | 278 /** |
| 279 * List the jobs of a project. | 279 * List the jobs of a project. |
| 280 * | 280 * |
| 281 * Request parameters: | 281 * Request parameters: |
| 282 * | 282 * |
| 283 * [projectId] - The project which owns the jobs. | 283 * [projectId] - The project which owns the jobs. |
| 284 * | 284 * |
| 285 * [filter] - The kind of filter to use. | 285 * [filter] - The kind of filter to use. |
| 286 * Possible string values are: | 286 * Possible string values are: |
| 287 * - "UNKNOWN" : A UNKNOWN. | 287 * - "UNKNOWN" : A UNKNOWN. |
| 288 * - "ALL" : A ALL. | 288 * - "ALL" : A ALL. |
| 289 * - "TERMINATED" : A TERMINATED. | 289 * - "TERMINATED" : A TERMINATED. |
| 290 * - "ACTIVE" : A ACTIVE. | 290 * - "ACTIVE" : A ACTIVE. |
| 291 * | 291 * |
| 292 * [location] - The location that contains this job. |
| 293 * |
| 294 * [pageToken] - Set this to the 'next_page_token' field of a previous |
| 295 * response |
| 296 * to request additional results in a long list. |
| 297 * |
| 298 * [pageSize] - If there are many jobs, limit response to at most this many. |
| 299 * The actual number of jobs returned will be the lesser of max_responses |
| 300 * and an unspecified server-defined limit. |
| 301 * |
| 292 * [view] - Level of information requested in response. Default is | 302 * [view] - Level of information requested in response. Default is |
| 293 * `JOB_VIEW_SUMMARY`. | 303 * `JOB_VIEW_SUMMARY`. |
| 294 * Possible string values are: | 304 * Possible string values are: |
| 295 * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN. | 305 * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN. |
| 296 * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY. | 306 * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY. |
| 297 * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL. | 307 * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL. |
| 298 * | 308 * |
| 299 * [pageSize] - If there are many jobs, limit response to at most this many. | |
| 300 * The actual number of jobs returned will be the lesser of max_responses and | |
| 301 * an unspecified server-defined limit. | |
| 302 * | |
| 303 * [pageToken] - Set this to the 'next_page_token' field of a previous | |
| 304 * response to request additional results in a long list. | |
| 305 * | |
| 306 * [location] - The location that contains this job. | |
| 307 * | |
| 308 * Completes with a [ListJobsResponse]. | 309 * Completes with a [ListJobsResponse]. |
| 309 * | 310 * |
| 310 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 311 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 311 * error. | 312 * error. |
| 312 * | 313 * |
| 313 * If the used [http.Client] completes with an error when making a REST call, | 314 * If the used [http.Client] completes with an error when making a REST call, |
| 314 * this method will complete with the same error. | 315 * this method will complete with the same error. |
| 315 */ | 316 */ |
| 316 async.Future<ListJobsResponse> list(core.String projectId, {core.String filter
, core.String view, core.int pageSize, core.String pageToken, core.String locati
on}) { | 317 async.Future<ListJobsResponse> list(core.String projectId, {core.String filter
, core.String location, core.String pageToken, core.int pageSize, core.String vi
ew}) { |
| 317 var _url = null; | 318 var _url = null; |
| 318 var _queryParams = new core.Map(); | 319 var _queryParams = new core.Map(); |
| 319 var _uploadMedia = null; | 320 var _uploadMedia = null; |
| 320 var _uploadOptions = null; | 321 var _uploadOptions = null; |
| 321 var _downloadOptions = commons.DownloadOptions.Metadata; | 322 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 322 var _body = null; | 323 var _body = null; |
| 323 | 324 |
| 324 if (projectId == null) { | 325 if (projectId == null) { |
| 325 throw new core.ArgumentError("Parameter projectId is required."); | 326 throw new core.ArgumentError("Parameter projectId is required."); |
| 326 } | 327 } |
| 327 if (filter != null) { | 328 if (filter != null) { |
| 328 _queryParams["filter"] = [filter]; | 329 _queryParams["filter"] = [filter]; |
| 329 } | 330 } |
| 331 if (location != null) { |
| 332 _queryParams["location"] = [location]; |
| 333 } |
| 334 if (pageToken != null) { |
| 335 _queryParams["pageToken"] = [pageToken]; |
| 336 } |
| 337 if (pageSize != null) { |
| 338 _queryParams["pageSize"] = ["${pageSize}"]; |
| 339 } |
| 330 if (view != null) { | 340 if (view != null) { |
| 331 _queryParams["view"] = [view]; | 341 _queryParams["view"] = [view]; |
| 332 } | 342 } |
| 333 if (pageSize != null) { | |
| 334 _queryParams["pageSize"] = ["${pageSize}"]; | |
| 335 } | |
| 336 if (pageToken != null) { | |
| 337 _queryParams["pageToken"] = [pageToken]; | |
| 338 } | |
| 339 if (location != null) { | |
| 340 _queryParams["location"] = [location]; | |
| 341 } | |
| 342 | 343 |
| 343 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs'; | 344 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs'; |
| 344 | 345 |
| 345 var _response = _requester.request(_url, | 346 var _response = _requester.request(_url, |
| 346 "GET", | 347 "GET", |
| 347 body: _body, | 348 body: _body, |
| 348 queryParams: _queryParams, | 349 queryParams: _queryParams, |
| 349 uploadOptions: _uploadOptions, | 350 uploadOptions: _uploadOptions, |
| 350 uploadMedia: _uploadMedia, | 351 uploadMedia: _uploadMedia, |
| 351 downloadOptions: _downloadOptions); | 352 downloadOptions: _downloadOptions); |
| (...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 533 * | 534 * |
| 534 * [minimumImportance] - Filter to only get messages with importance >= level | 535 * [minimumImportance] - Filter to only get messages with importance >= level |
| 535 * Possible string values are: | 536 * Possible string values are: |
| 536 * - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : A JOB_MESSAGE_IMPORTANCE_UNKNOWN. | 537 * - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : A JOB_MESSAGE_IMPORTANCE_UNKNOWN. |
| 537 * - "JOB_MESSAGE_DEBUG" : A JOB_MESSAGE_DEBUG. | 538 * - "JOB_MESSAGE_DEBUG" : A JOB_MESSAGE_DEBUG. |
| 538 * - "JOB_MESSAGE_DETAILED" : A JOB_MESSAGE_DETAILED. | 539 * - "JOB_MESSAGE_DETAILED" : A JOB_MESSAGE_DETAILED. |
| 539 * - "JOB_MESSAGE_BASIC" : A JOB_MESSAGE_BASIC. | 540 * - "JOB_MESSAGE_BASIC" : A JOB_MESSAGE_BASIC. |
| 540 * - "JOB_MESSAGE_WARNING" : A JOB_MESSAGE_WARNING. | 541 * - "JOB_MESSAGE_WARNING" : A JOB_MESSAGE_WARNING. |
| 541 * - "JOB_MESSAGE_ERROR" : A JOB_MESSAGE_ERROR. | 542 * - "JOB_MESSAGE_ERROR" : A JOB_MESSAGE_ERROR. |
| 542 * | 543 * |
| 543 * [pageSize] - If specified, determines the maximum number of messages to | 544 * [location] - The location which contains the job specified by job_id. |
| 544 * return. If unspecified, the service may choose an appropriate default, or | 545 * |
| 545 * may return an arbitrarily large number of results. | 546 * [endTime] - Return only messages with timestamps < end_time. The default is |
| 547 * now |
| 548 * (i.e. return up to the latest messages available). |
| 549 * |
| 550 * [startTime] - If specified, return only messages with timestamps >= |
| 551 * start_time. |
| 552 * The default is the job creation time (i.e. beginning of messages). |
| 546 * | 553 * |
| 547 * [pageToken] - If supplied, this should be the value of next_page_token | 554 * [pageToken] - If supplied, this should be the value of next_page_token |
| 548 * returned by an earlier call. This will cause the next page of results to be | 555 * returned |
| 549 * returned. | 556 * by an earlier call. This will cause the next page of results to |
| 557 * be returned. |
| 550 * | 558 * |
| 551 * [startTime] - If specified, return only messages with timestamps >= | 559 * [pageSize] - If specified, determines the maximum number of messages to |
| 552 * start_time. The default is the job creation time (i.e. beginning of | 560 * return. If unspecified, the service may choose an appropriate |
| 553 * messages). | 561 * default, or may return an arbitrarily large number of results. |
| 554 * | |
| 555 * [endTime] - Return only messages with timestamps < end_time. The default is | |
| 556 * now (i.e. return up to the latest messages available). | |
| 557 * | |
| 558 * [location] - The location which contains the job specified by job_id. | |
| 559 * | 562 * |
| 560 * Completes with a [ListJobMessagesResponse]. | 563 * Completes with a [ListJobMessagesResponse]. |
| 561 * | 564 * |
| 562 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 565 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 563 * error. | 566 * error. |
| 564 * | 567 * |
| 565 * If the used [http.Client] completes with an error when making a REST call, | 568 * If the used [http.Client] completes with an error when making a REST call, |
| 566 * this method will complete with the same error. | 569 * this method will complete with the same error. |
| 567 */ | 570 */ |
| 568 async.Future<ListJobMessagesResponse> list(core.String projectId, core.String
jobId, {core.String minimumImportance, core.int pageSize, core.String pageToken,
core.String startTime, core.String endTime, core.String location}) { | 571 async.Future<ListJobMessagesResponse> list(core.String projectId, core.String
jobId, {core.String minimumImportance, core.String location, core.String endTime
, core.String startTime, core.String pageToken, core.int pageSize}) { |
| 569 var _url = null; | 572 var _url = null; |
| 570 var _queryParams = new core.Map(); | 573 var _queryParams = new core.Map(); |
| 571 var _uploadMedia = null; | 574 var _uploadMedia = null; |
| 572 var _uploadOptions = null; | 575 var _uploadOptions = null; |
| 573 var _downloadOptions = commons.DownloadOptions.Metadata; | 576 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 574 var _body = null; | 577 var _body = null; |
| 575 | 578 |
| 576 if (projectId == null) { | 579 if (projectId == null) { |
| 577 throw new core.ArgumentError("Parameter projectId is required."); | 580 throw new core.ArgumentError("Parameter projectId is required."); |
| 578 } | 581 } |
| 579 if (jobId == null) { | 582 if (jobId == null) { |
| 580 throw new core.ArgumentError("Parameter jobId is required."); | 583 throw new core.ArgumentError("Parameter jobId is required."); |
| 581 } | 584 } |
| 582 if (minimumImportance != null) { | 585 if (minimumImportance != null) { |
| 583 _queryParams["minimumImportance"] = [minimumImportance]; | 586 _queryParams["minimumImportance"] = [minimumImportance]; |
| 584 } | 587 } |
| 588 if (location != null) { |
| 589 _queryParams["location"] = [location]; |
| 590 } |
| 591 if (endTime != null) { |
| 592 _queryParams["endTime"] = [endTime]; |
| 593 } |
| 594 if (startTime != null) { |
| 595 _queryParams["startTime"] = [startTime]; |
| 596 } |
| 597 if (pageToken != null) { |
| 598 _queryParams["pageToken"] = [pageToken]; |
| 599 } |
| 585 if (pageSize != null) { | 600 if (pageSize != null) { |
| 586 _queryParams["pageSize"] = ["${pageSize}"]; | 601 _queryParams["pageSize"] = ["${pageSize}"]; |
| 587 } | 602 } |
| 588 if (pageToken != null) { | |
| 589 _queryParams["pageToken"] = [pageToken]; | |
| 590 } | |
| 591 if (startTime != null) { | |
| 592 _queryParams["startTime"] = [startTime]; | |
| 593 } | |
| 594 if (endTime != null) { | |
| 595 _queryParams["endTime"] = [endTime]; | |
| 596 } | |
| 597 if (location != null) { | |
| 598 _queryParams["location"] = [location]; | |
| 599 } | |
| 600 | 603 |
| 601 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs/' + commons.Escaper.ecapeVariable('$jobId') + '/messages'; | 604 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jo
bs/' + commons.Escaper.ecapeVariable('$jobId') + '/messages'; |
| 602 | 605 |
| 603 var _response = _requester.request(_url, | 606 var _response = _requester.request(_url, |
| 604 "GET", | 607 "GET", |
| 605 body: _body, | 608 body: _body, |
| 606 queryParams: _queryParams, | 609 queryParams: _queryParams, |
| 607 uploadOptions: _uploadOptions, | 610 uploadOptions: _uploadOptions, |
| 608 uploadMedia: _uploadMedia, | 611 uploadMedia: _uploadMedia, |
| 609 downloadOptions: _downloadOptions); | 612 downloadOptions: _downloadOptions); |
| (...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 924 * | 927 * |
| 925 * [location] - The location that contains this job. | 928 * [location] - The location that contains this job. |
| 926 * | 929 * |
| 927 * [filter] - The kind of filter to use. | 930 * [filter] - The kind of filter to use. |
| 928 * Possible string values are: | 931 * Possible string values are: |
| 929 * - "UNKNOWN" : A UNKNOWN. | 932 * - "UNKNOWN" : A UNKNOWN. |
| 930 * - "ALL" : A ALL. | 933 * - "ALL" : A ALL. |
| 931 * - "TERMINATED" : A TERMINATED. | 934 * - "TERMINATED" : A TERMINATED. |
| 932 * - "ACTIVE" : A ACTIVE. | 935 * - "ACTIVE" : A ACTIVE. |
| 933 * | 936 * |
| 937 * [pageToken] - Set this to the 'next_page_token' field of a previous |
| 938 * response |
| 939 * to request additional results in a long list. |
| 940 * |
| 941 * [pageSize] - If there are many jobs, limit response to at most this many. |
| 942 * The actual number of jobs returned will be the lesser of max_responses |
| 943 * and an unspecified server-defined limit. |
| 944 * |
| 934 * [view] - Level of information requested in response. Default is | 945 * [view] - Level of information requested in response. Default is |
| 935 * `JOB_VIEW_SUMMARY`. | 946 * `JOB_VIEW_SUMMARY`. |
| 936 * Possible string values are: | 947 * Possible string values are: |
| 937 * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN. | 948 * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN. |
| 938 * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY. | 949 * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY. |
| 939 * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL. | 950 * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL. |
| 940 * | 951 * |
| 941 * [pageSize] - If there are many jobs, limit response to at most this many. | |
| 942 * The actual number of jobs returned will be the lesser of max_responses and | |
| 943 * an unspecified server-defined limit. | |
| 944 * | |
| 945 * [pageToken] - Set this to the 'next_page_token' field of a previous | |
| 946 * response to request additional results in a long list. | |
| 947 * | |
| 948 * Completes with a [ListJobsResponse]. | 952 * Completes with a [ListJobsResponse]. |
| 949 * | 953 * |
| 950 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 954 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 951 * error. | 955 * error. |
| 952 * | 956 * |
| 953 * If the used [http.Client] completes with an error when making a REST call, | 957 * If the used [http.Client] completes with an error when making a REST call, |
| 954 * this method will complete with the same error. | 958 * this method will complete with the same error. |
| 955 */ | 959 */ |
| 956 async.Future<ListJobsResponse> list(core.String projectId, core.String locatio
n, {core.String filter, core.String view, core.int pageSize, core.String pageTok
en}) { | 960 async.Future<ListJobsResponse> list(core.String projectId, core.String locatio
n, {core.String filter, core.String pageToken, core.int pageSize, core.String vi
ew}) { |
| 957 var _url = null; | 961 var _url = null; |
| 958 var _queryParams = new core.Map(); | 962 var _queryParams = new core.Map(); |
| 959 var _uploadMedia = null; | 963 var _uploadMedia = null; |
| 960 var _uploadOptions = null; | 964 var _uploadOptions = null; |
| 961 var _downloadOptions = commons.DownloadOptions.Metadata; | 965 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 962 var _body = null; | 966 var _body = null; |
| 963 | 967 |
| 964 if (projectId == null) { | 968 if (projectId == null) { |
| 965 throw new core.ArgumentError("Parameter projectId is required."); | 969 throw new core.ArgumentError("Parameter projectId is required."); |
| 966 } | 970 } |
| 967 if (location == null) { | 971 if (location == null) { |
| 968 throw new core.ArgumentError("Parameter location is required."); | 972 throw new core.ArgumentError("Parameter location is required."); |
| 969 } | 973 } |
| 970 if (filter != null) { | 974 if (filter != null) { |
| 971 _queryParams["filter"] = [filter]; | 975 _queryParams["filter"] = [filter]; |
| 972 } | 976 } |
| 977 if (pageToken != null) { |
| 978 _queryParams["pageToken"] = [pageToken]; |
| 979 } |
| 980 if (pageSize != null) { |
| 981 _queryParams["pageSize"] = ["${pageSize}"]; |
| 982 } |
| 973 if (view != null) { | 983 if (view != null) { |
| 974 _queryParams["view"] = [view]; | 984 _queryParams["view"] = [view]; |
| 975 } | 985 } |
| 976 if (pageSize != null) { | |
| 977 _queryParams["pageSize"] = ["${pageSize}"]; | |
| 978 } | |
| 979 if (pageToken != null) { | |
| 980 _queryParams["pageToken"] = [pageToken]; | |
| 981 } | |
| 982 | 986 |
| 983 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/lo
cations/' + commons.Escaper.ecapeVariable('$location') + '/jobs'; | 987 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/lo
cations/' + commons.Escaper.ecapeVariable('$location') + '/jobs'; |
| 984 | 988 |
| 985 var _response = _requester.request(_url, | 989 var _response = _requester.request(_url, |
| 986 "GET", | 990 "GET", |
| 987 body: _body, | 991 body: _body, |
| 988 queryParams: _queryParams, | 992 queryParams: _queryParams, |
| 989 uploadOptions: _uploadOptions, | 993 uploadOptions: _uploadOptions, |
| 990 uploadMedia: _uploadMedia, | 994 uploadMedia: _uploadMedia, |
| 991 downloadOptions: _downloadOptions); | 995 downloadOptions: _downloadOptions); |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1059 * Request the job status. | 1063 * Request the job status. |
| 1060 * | 1064 * |
| 1061 * Request parameters: | 1065 * Request parameters: |
| 1062 * | 1066 * |
| 1063 * [projectId] - A project id. | 1067 * [projectId] - A project id. |
| 1064 * | 1068 * |
| 1065 * [location] - The location which contains the job specified by job_id. | 1069 * [location] - The location which contains the job specified by job_id. |
| 1066 * | 1070 * |
| 1067 * [jobId] - The job to get messages about. | 1071 * [jobId] - The job to get messages about. |
| 1068 * | 1072 * |
| 1073 * [endTime] - Return only messages with timestamps < end_time. The default is |
| 1074 * now |
| 1075 * (i.e. return up to the latest messages available). |
| 1076 * |
| 1077 * [startTime] - If specified, return only messages with timestamps >= |
| 1078 * start_time. |
| 1079 * The default is the job creation time (i.e. beginning of messages). |
| 1080 * |
| 1081 * [pageToken] - If supplied, this should be the value of next_page_token |
| 1082 * returned |
| 1083 * by an earlier call. This will cause the next page of results to |
| 1084 * be returned. |
| 1085 * |
| 1086 * [pageSize] - If specified, determines the maximum number of messages to |
| 1087 * return. If unspecified, the service may choose an appropriate |
| 1088 * default, or may return an arbitrarily large number of results. |
| 1089 * |
| 1069 * [minimumImportance] - Filter to only get messages with importance >= level | 1090 * [minimumImportance] - Filter to only get messages with importance >= level |
| 1070 * Possible string values are: | 1091 * Possible string values are: |
| 1071 * - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : A JOB_MESSAGE_IMPORTANCE_UNKNOWN. | 1092 * - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : A JOB_MESSAGE_IMPORTANCE_UNKNOWN. |
| 1072 * - "JOB_MESSAGE_DEBUG" : A JOB_MESSAGE_DEBUG. | 1093 * - "JOB_MESSAGE_DEBUG" : A JOB_MESSAGE_DEBUG. |
| 1073 * - "JOB_MESSAGE_DETAILED" : A JOB_MESSAGE_DETAILED. | 1094 * - "JOB_MESSAGE_DETAILED" : A JOB_MESSAGE_DETAILED. |
| 1074 * - "JOB_MESSAGE_BASIC" : A JOB_MESSAGE_BASIC. | 1095 * - "JOB_MESSAGE_BASIC" : A JOB_MESSAGE_BASIC. |
| 1075 * - "JOB_MESSAGE_WARNING" : A JOB_MESSAGE_WARNING. | 1096 * - "JOB_MESSAGE_WARNING" : A JOB_MESSAGE_WARNING. |
| 1076 * - "JOB_MESSAGE_ERROR" : A JOB_MESSAGE_ERROR. | 1097 * - "JOB_MESSAGE_ERROR" : A JOB_MESSAGE_ERROR. |
| 1077 * | 1098 * |
| 1078 * [pageSize] - If specified, determines the maximum number of messages to | |
| 1079 * return. If unspecified, the service may choose an appropriate default, or | |
| 1080 * may return an arbitrarily large number of results. | |
| 1081 * | |
| 1082 * [pageToken] - If supplied, this should be the value of next_page_token | |
| 1083 * returned by an earlier call. This will cause the next page of results to be | |
| 1084 * returned. | |
| 1085 * | |
| 1086 * [startTime] - If specified, return only messages with timestamps >= | |
| 1087 * start_time. The default is the job creation time (i.e. beginning of | |
| 1088 * messages). | |
| 1089 * | |
| 1090 * [endTime] - Return only messages with timestamps < end_time. The default is | |
| 1091 * now (i.e. return up to the latest messages available). | |
| 1092 * | |
| 1093 * Completes with a [ListJobMessagesResponse]. | 1099 * Completes with a [ListJobMessagesResponse]. |
| 1094 * | 1100 * |
| 1095 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 1101 * Completes with a [commons.ApiRequestError] if the API endpoint returned an |
| 1096 * error. | 1102 * error. |
| 1097 * | 1103 * |
| 1098 * If the used [http.Client] completes with an error when making a REST call, | 1104 * If the used [http.Client] completes with an error when making a REST call, |
| 1099 * this method will complete with the same error. | 1105 * this method will complete with the same error. |
| 1100 */ | 1106 */ |
| 1101 async.Future<ListJobMessagesResponse> list(core.String projectId, core.String
location, core.String jobId, {core.String minimumImportance, core.int pageSize,
core.String pageToken, core.String startTime, core.String endTime}) { | 1107 async.Future<ListJobMessagesResponse> list(core.String projectId, core.String
location, core.String jobId, {core.String endTime, core.String startTime, core.S
tring pageToken, core.int pageSize, core.String minimumImportance}) { |
| 1102 var _url = null; | 1108 var _url = null; |
| 1103 var _queryParams = new core.Map(); | 1109 var _queryParams = new core.Map(); |
| 1104 var _uploadMedia = null; | 1110 var _uploadMedia = null; |
| 1105 var _uploadOptions = null; | 1111 var _uploadOptions = null; |
| 1106 var _downloadOptions = commons.DownloadOptions.Metadata; | 1112 var _downloadOptions = commons.DownloadOptions.Metadata; |
| 1107 var _body = null; | 1113 var _body = null; |
| 1108 | 1114 |
| 1109 if (projectId == null) { | 1115 if (projectId == null) { |
| 1110 throw new core.ArgumentError("Parameter projectId is required."); | 1116 throw new core.ArgumentError("Parameter projectId is required."); |
| 1111 } | 1117 } |
| 1112 if (location == null) { | 1118 if (location == null) { |
| 1113 throw new core.ArgumentError("Parameter location is required."); | 1119 throw new core.ArgumentError("Parameter location is required."); |
| 1114 } | 1120 } |
| 1115 if (jobId == null) { | 1121 if (jobId == null) { |
| 1116 throw new core.ArgumentError("Parameter jobId is required."); | 1122 throw new core.ArgumentError("Parameter jobId is required."); |
| 1117 } | 1123 } |
| 1124 if (endTime != null) { |
| 1125 _queryParams["endTime"] = [endTime]; |
| 1126 } |
| 1127 if (startTime != null) { |
| 1128 _queryParams["startTime"] = [startTime]; |
| 1129 } |
| 1130 if (pageToken != null) { |
| 1131 _queryParams["pageToken"] = [pageToken]; |
| 1132 } |
| 1133 if (pageSize != null) { |
| 1134 _queryParams["pageSize"] = ["${pageSize}"]; |
| 1135 } |
| 1118 if (minimumImportance != null) { | 1136 if (minimumImportance != null) { |
| 1119 _queryParams["minimumImportance"] = [minimumImportance]; | 1137 _queryParams["minimumImportance"] = [minimumImportance]; |
| 1120 } | 1138 } |
| 1121 if (pageSize != null) { | |
| 1122 _queryParams["pageSize"] = ["${pageSize}"]; | |
| 1123 } | |
| 1124 if (pageToken != null) { | |
| 1125 _queryParams["pageToken"] = [pageToken]; | |
| 1126 } | |
| 1127 if (startTime != null) { | |
| 1128 _queryParams["startTime"] = [startTime]; | |
| 1129 } | |
| 1130 if (endTime != null) { | |
| 1131 _queryParams["endTime"] = [endTime]; | |
| 1132 } | |
| 1133 | 1139 |
| 1134 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/lo
cations/' + commons.Escaper.ecapeVariable('$location') + '/jobs/' + commons.Esca
per.ecapeVariable('$jobId') + '/messages'; | 1140 _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/lo
cations/' + commons.Escaper.ecapeVariable('$location') + '/jobs/' + commons.Esca
per.ecapeVariable('$jobId') + '/messages'; |
| 1135 | 1141 |
| 1136 var _response = _requester.request(_url, | 1142 var _response = _requester.request(_url, |
| 1137 "GET", | 1143 "GET", |
| 1138 body: _body, | 1144 body: _body, |
| 1139 queryParams: _queryParams, | 1145 queryParams: _queryParams, |
| 1140 uploadOptions: _uploadOptions, | 1146 uploadOptions: _uploadOptions, |
| 1141 uploadMedia: _uploadMedia, | 1147 uploadMedia: _uploadMedia, |
| 1142 downloadOptions: _downloadOptions); | 1148 downloadOptions: _downloadOptions); |
| (...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1357 return _json; | 1363 return _json; |
| 1358 } | 1364 } |
| 1359 } | 1365 } |
| 1360 | 1366 |
| 1361 /** A progress measurement of a WorkItem by a worker. */ | 1367 /** A progress measurement of a WorkItem by a worker. */ |
| 1362 class ApproximateReportedProgress { | 1368 class ApproximateReportedProgress { |
| 1363 /** | 1369 /** |
| 1364 * Total amount of parallelism in the portion of input of this task that has | 1370 * Total amount of parallelism in the portion of input of this task that has |
| 1365 * already been consumed and is no longer active. In the first two examples | 1371 * already been consumed and is no longer active. In the first two examples |
| 1366 * above (see remaining_parallelism), the value should be 29 or 2 | 1372 * above (see remaining_parallelism), the value should be 29 or 2 |
| 1367 * respectively. The sum of remaining_parallelism and consumed_parallelism | 1373 * respectively. The sum of remaining_parallelism and consumed_parallelism |
| 1368 * should equal the total amount of parallelism in this work item. If | 1374 * should equal the total amount of parallelism in this work item. If |
| 1369 * specified, must be finite. | 1375 * specified, must be finite. |
| 1370 */ | 1376 */ |
| 1371 ReportedParallelism consumedParallelism; | 1377 ReportedParallelism consumedParallelism; |
| 1372 /** | 1378 /** |
| 1373 * Completion as fraction of the input consumed, from 0.0 (beginning, nothing | 1379 * Completion as fraction of the input consumed, from 0.0 (beginning, nothing |
| 1374 * consumed), to 1.0 (end of the input, entire input consumed). | 1380 * consumed), to 1.0 (end of the input, entire input consumed). |
| 1375 */ | 1381 */ |
| 1376 core.double fractionConsumed; | 1382 core.double fractionConsumed; |
| 1377 /** A Position within the work to represent a progress. */ | 1383 /** A Position within the work to represent a progress. */ |
| 1378 Position position; | 1384 Position position; |
| 1379 /** | 1385 /** |
| 1380 * Total amount of parallelism in the input of this task that remains, (i.e. | 1386 * Total amount of parallelism in the input of this task that remains, |
| 1381 * can be delegated to this task and any new tasks via dynamic splitting). | 1387 * (i.e. can be delegated to this task and any new tasks via dynamic |
| 1382 * Always at least 1 for non-finished work items and 0 for finished. "Amount | 1388 * splitting). Always at least 1 for non-finished work items and 0 for |
| 1383 * of parallelism" refers to how many non-empty parts of the input can be read | 1389 * finished. |
| 1384 * in parallel. This does not necessarily equal number of records. An input | 1390 * |
| 1385 * that can be read in parallel down to the individual records is called | 1391 * "Amount of parallelism" refers to how many non-empty parts of the input |
| 1386 * "perfectly splittable". An example of non-perfectly parallelizable input is | 1392 * can be read in parallel. This does not necessarily equal number |
| 1387 * a block-compressed file format where a block of records has to be read as a | 1393 * of records. An input that can be read in parallel down to the |
| 1388 * whole, but different blocks can be read in parallel. Examples: * If we are | 1394 * individual records is called "perfectly splittable". |
| 1389 * processing record #30 (starting at 1) out of 50 in a perfectly splittable | 1395 * An example of non-perfectly parallelizable input is a block-compressed |
| 1390 * 50-record input, this value should be 21 (20 remaining + 1 current). * If | 1396 * file format where a block of records has to be read as a whole, |
| 1391 * we are reading through block 3 in a block-compressed file consisting of 5 | 1397 * but different blocks can be read in parallel. |
| 1392 * blocks, this value should be 3 (since blocks 4 and 5 can be processed in | 1398 * |
| 1393 * parallel by new tasks via dynamic splitting and the current task remains | 1399 * Examples: |
| 1394 * processing block 3). * If we are reading through the last block in a | 1400 * * If we are processing record #30 (starting at 1) out of 50 in a perfectly |
| 1395 * block-compressed file, or reading or processing the last record in a | 1401 * splittable 50-record input, this value should be 21 (20 remaining + 1 |
| 1396 * perfectly splittable input, this value should be 1, because apart from the | 1402 * current). |
| 1397 * current task, no additional remainder can be split off. | 1403 * * If we are reading through block 3 in a block-compressed file consisting |
| 1404 * of 5 blocks, this value should be 3 (since blocks 4 and 5 can be |
| 1405 * processed in parallel by new tasks via dynamic splitting and the current |
| 1406 * task remains processing block 3). |
| 1407 * * If we are reading through the last block in a block-compressed file, |
| 1408 * or reading or processing the last record in a perfectly splittable |
| 1409 * input, this value should be 1, because apart from the current task, no |
| 1410 * additional remainder can be split off. |
| 1398 */ | 1411 */ |
| 1399 ReportedParallelism remainingParallelism; | 1412 ReportedParallelism remainingParallelism; |
| 1400 | 1413 |
| 1401 ApproximateReportedProgress(); | 1414 ApproximateReportedProgress(); |
| 1402 | 1415 |
| 1403 ApproximateReportedProgress.fromJson(core.Map _json) { | 1416 ApproximateReportedProgress.fromJson(core.Map _json) { |
| 1404 if (_json.containsKey("consumedParallelism")) { | 1417 if (_json.containsKey("consumedParallelism")) { |
| 1405 consumedParallelism = new ReportedParallelism.fromJson(_json["consumedPara
llelism"]); | 1418 consumedParallelism = new ReportedParallelism.fromJson(_json["consumedPara
llelism"]); |
| 1406 } | 1419 } |
| 1407 if (_json.containsKey("fractionConsumed")) { | 1420 if (_json.containsKey("fractionConsumed")) { |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1466 } | 1479 } |
| 1467 return _json; | 1480 return _json; |
| 1468 } | 1481 } |
| 1469 } | 1482 } |
| 1470 | 1483 |
| 1471 /** Settings for WorkerPool autoscaling. */ | 1484 /** Settings for WorkerPool autoscaling. */ |
| 1472 class AutoscalingSettings { | 1485 class AutoscalingSettings { |
| 1473 /** | 1486 /** |
| 1474 * The algorithm to use for autoscaling. | 1487 * The algorithm to use for autoscaling. |
| 1475 * Possible string values are: | 1488 * Possible string values are: |
| 1476 * - "AUTOSCALING_ALGORITHM_UNKNOWN" : A AUTOSCALING_ALGORITHM_UNKNOWN. | 1489 * - "AUTOSCALING_ALGORITHM_UNKNOWN" : The algorithm is unknown, or |
| 1477 * - "AUTOSCALING_ALGORITHM_NONE" : A AUTOSCALING_ALGORITHM_NONE. | 1490 * unspecified. |
| 1478 * - "AUTOSCALING_ALGORITHM_BASIC" : A AUTOSCALING_ALGORITHM_BASIC. | 1491 * - "AUTOSCALING_ALGORITHM_NONE" : Disable autoscaling. |
| 1492 * - "AUTOSCALING_ALGORITHM_BASIC" : Increase worker count over time to reduce |
| 1493 * job execution time. |
| 1479 */ | 1494 */ |
| 1480 core.String algorithm; | 1495 core.String algorithm; |
| 1481 /** The maximum number of workers to cap scaling at. */ | 1496 /** The maximum number of workers to cap scaling at. */ |
| 1482 core.int maxNumWorkers; | 1497 core.int maxNumWorkers; |
| 1483 | 1498 |
| 1484 AutoscalingSettings(); | 1499 AutoscalingSettings(); |
| 1485 | 1500 |
| 1486 AutoscalingSettings.fromJson(core.Map _json) { | 1501 AutoscalingSettings.fromJson(core.Map _json) { |
| 1487 if (_json.containsKey("algorithm")) { | 1502 if (_json.containsKey("algorithm")) { |
| 1488 algorithm = _json["algorithm"]; | 1503 algorithm = _json["algorithm"]; |
| 1489 } | 1504 } |
| 1490 if (_json.containsKey("maxNumWorkers")) { | 1505 if (_json.containsKey("maxNumWorkers")) { |
| 1491 maxNumWorkers = _json["maxNumWorkers"]; | 1506 maxNumWorkers = _json["maxNumWorkers"]; |
| 1492 } | 1507 } |
| 1493 } | 1508 } |
| 1494 | 1509 |
| 1495 core.Map toJson() { | 1510 core.Map toJson() { |
| 1496 var _json = new core.Map(); | 1511 var _json = new core.Map(); |
| 1497 if (algorithm != null) { | 1512 if (algorithm != null) { |
| 1498 _json["algorithm"] = algorithm; | 1513 _json["algorithm"] = algorithm; |
| 1499 } | 1514 } |
| 1500 if (maxNumWorkers != null) { | 1515 if (maxNumWorkers != null) { |
| 1501 _json["maxNumWorkers"] = maxNumWorkers; | 1516 _json["maxNumWorkers"] = maxNumWorkers; |
| 1502 } | 1517 } |
| 1503 return _json; | 1518 return _json; |
| 1504 } | 1519 } |
| 1505 } | 1520 } |
| 1506 | 1521 |
| 1522 /** Modeled after information exposed by /proc/stat. */ |
| 1523 class CPUTime { |
| 1524 /** |
| 1525 * Average CPU utilization rate (% non-idle cpu / second) since previous |
| 1526 * sample. |
| 1527 */ |
| 1528 core.double rate; |
| 1529 /** Timestamp of the measurement. */ |
| 1530 core.String timestamp; |
| 1531 /** |
| 1532 * Total active CPU time across all cores (ie., non-idle) in milliseconds |
| 1533 * since start-up. |
| 1534 */ |
| 1535 core.String totalMs; |
| 1536 |
| 1537 CPUTime(); |
| 1538 |
| 1539 CPUTime.fromJson(core.Map _json) { |
| 1540 if (_json.containsKey("rate")) { |
| 1541 rate = _json["rate"]; |
| 1542 } |
| 1543 if (_json.containsKey("timestamp")) { |
| 1544 timestamp = _json["timestamp"]; |
| 1545 } |
| 1546 if (_json.containsKey("totalMs")) { |
| 1547 totalMs = _json["totalMs"]; |
| 1548 } |
| 1549 } |
| 1550 |
| 1551 core.Map toJson() { |
| 1552 var _json = new core.Map(); |
| 1553 if (rate != null) { |
| 1554 _json["rate"] = rate; |
| 1555 } |
| 1556 if (timestamp != null) { |
| 1557 _json["timestamp"] = timestamp; |
| 1558 } |
| 1559 if (totalMs != null) { |
| 1560 _json["totalMs"] = totalMs; |
| 1561 } |
| 1562 return _json; |
| 1563 } |
| 1564 } |
| 1565 |
| 1507 /** All configuration data for a particular Computation. */ | 1566 /** All configuration data for a particular Computation. */ |
| 1508 class ComputationTopology { | 1567 class ComputationTopology { |
| 1509 /** The ID of the computation. */ | 1568 /** The ID of the computation. */ |
| 1510 core.String computationId; | 1569 core.String computationId; |
| 1511 /** The inputs to the computation. */ | 1570 /** The inputs to the computation. */ |
| 1512 core.List<StreamLocation> inputs; | 1571 core.List<StreamLocation> inputs; |
| 1513 /** The key ranges processed by the computation. */ | 1572 /** The key ranges processed by the computation. */ |
| 1514 core.List<KeyRangeLocation> keyRanges; | 1573 core.List<KeyRangeLocation> keyRanges; |
| 1515 /** The outputs from the computation. */ | 1574 /** The outputs from the computation. */ |
| 1516 core.List<StreamLocation> outputs; | 1575 core.List<StreamLocation> outputs; |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1610 | 1669 |
| 1611 /** | 1670 /** |
| 1612 * CounterMetadata includes all static non-name non-value counter attributes. | 1671 * CounterMetadata includes all static non-name non-value counter attributes. |
| 1613 */ | 1672 */ |
| 1614 class CounterMetadata { | 1673 class CounterMetadata { |
| 1615 /** Human-readable description of the counter semantics. */ | 1674 /** Human-readable description of the counter semantics. */ |
| 1616 core.String description; | 1675 core.String description; |
| 1617 /** | 1676 /** |
| 1618 * Counter aggregation kind. | 1677 * Counter aggregation kind. |
| 1619 * Possible string values are: | 1678 * Possible string values are: |
| 1620 * - "INVALID" : A INVALID. | 1679 * - "INVALID" : Counter aggregation kind was not set. |
| 1621 * - "SUM" : A SUM. | 1680 * - "SUM" : Aggregated value is the sum of all contributed values. |
| 1622 * - "MAX" : A MAX. | 1681 * - "MAX" : Aggregated value is the max of all contributed values. |
| 1623 * - "MIN" : A MIN. | 1682 * - "MIN" : Aggregated value is the min of all contributed values. |
| 1624 * - "MEAN" : A MEAN. | 1683 * - "MEAN" : Aggregated value is the mean of all contributed values. |
| 1625 * - "OR" : A OR. | 1684 * - "OR" : Aggregated value represents the logical 'or' of all contributed |
| 1626 * - "AND" : A AND. | 1685 * values. |
| 1627 * - "SET" : A SET. | 1686 * - "AND" : Aggregated value represents the logical 'and' of all contributed |
| 1628 * - "DISTRIBUTION" : A DISTRIBUTION. | 1687 * values. |
| 1688 * - "SET" : Aggregated value is a set of unique contributed values. |
| 1689 * - "DISTRIBUTION" : Aggregated value captures statistics about a |
| 1690 * distribution. |
| 1629 */ | 1691 */ |
| 1630 core.String kind; | 1692 core.String kind; |
| 1631 /** A string referring to the unit type. */ | 1693 /** A string referring to the unit type. */ |
| 1632 core.String otherUnits; | 1694 core.String otherUnits; |
| 1633 /** | 1695 /** |
| 1634 * System defined Units, see above enum. | 1696 * System defined Units, see above enum. |
| 1635 * Possible string values are: | 1697 * Possible string values are: |
| 1636 * - "BYTES" : A BYTES. | 1698 * - "BYTES" : Counter returns a value in bytes. |
| 1637 * - "BYTES_PER_SEC" : A BYTES_PER_SEC. | 1699 * - "BYTES_PER_SEC" : Counter returns a value in bytes per second. |
| 1638 * - "MILLISECONDS" : A MILLISECONDS. | 1700 * - "MILLISECONDS" : Counter returns a value in milliseconds. |
| 1639 * - "MICROSECONDS" : A MICROSECONDS. | 1701 * - "MICROSECONDS" : Counter returns a value in microseconds. |
| 1640 * - "NANOSECONDS" : A NANOSECONDS. | 1702 * - "NANOSECONDS" : Counter returns a value in nanoseconds. |
| 1641 * - "TIMESTAMP_MSEC" : A TIMESTAMP_MSEC. | 1703 * - "TIMESTAMP_MSEC" : Counter returns a timestamp in milliseconds. |
| 1642 * - "TIMESTAMP_USEC" : A TIMESTAMP_USEC. | 1704 * - "TIMESTAMP_USEC" : Counter returns a timestamp in microseconds. |
| 1643 * - "TIMESTAMP_NSEC" : A TIMESTAMP_NSEC. | 1705 * - "TIMESTAMP_NSEC" : Counter returns a timestamp in nanoseconds. |
| 1644 */ | 1706 */ |
| 1645 core.String standardUnits; | 1707 core.String standardUnits; |
| 1646 | 1708 |
| 1647 CounterMetadata(); | 1709 CounterMetadata(); |
| 1648 | 1710 |
| 1649 CounterMetadata.fromJson(core.Map _json) { | 1711 CounterMetadata.fromJson(core.Map _json) { |
| 1650 if (_json.containsKey("description")) { | 1712 if (_json.containsKey("description")) { |
| 1651 description = _json["description"]; | 1713 description = _json["description"]; |
| 1652 } | 1714 } |
| 1653 if (_json.containsKey("kind")) { | 1715 if (_json.containsKey("kind")) { |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1685 */ | 1747 */ |
| 1686 class CounterStructuredName { | 1748 class CounterStructuredName { |
| 1687 /** Name of the optimized step being executed by the workers. */ | 1749 /** Name of the optimized step being executed by the workers. */ |
| 1688 core.String componentStepName; | 1750 core.String componentStepName; |
| 1689 /** | 1751 /** |
| 1690 * Name of the stage. An execution step contains multiple component steps. | 1752 * Name of the stage. An execution step contains multiple component steps. |
| 1691 */ | 1753 */ |
| 1692 core.String executionStepName; | 1754 core.String executionStepName; |
| 1693 /** | 1755 /** |
| 1694 * Counter name. Not necessarily globally-unique, but unique within the | 1756 * Counter name. Not necessarily globally-unique, but unique within the |
| 1695 * context of the other fields. Required. | 1757 * context of the other fields. |
| 1758 * Required. |
| 1696 */ | 1759 */ |
| 1697 core.String name; | 1760 core.String name; |
| 1698 /** | 1761 /** |
| 1762 * One of the standard Origins defined above. |
| 1763 * Possible string values are: |
| 1764 * - "SYSTEM" : Counter was created by the Dataflow system. |
| 1765 * - "USER" : Counter was created by the user. |
| 1766 */ |
| 1767 core.String origin; |
| 1768 /** A string containing a more specific namespace of the counter's origin. */ |
| 1769 core.String originNamespace; |
| 1770 /** |
| 1699 * System generated name of the original step in the user's graph, before | 1771 * System generated name of the original step in the user's graph, before |
| 1700 * optimization. | 1772 * optimization. |
| 1701 */ | 1773 */ |
| 1702 core.String originalStepName; | 1774 core.String originalStepName; |
| 1703 /** A string containing the origin of the counter. */ | |
| 1704 core.String otherOrigin; | |
| 1705 /** | 1775 /** |
| 1706 * Portion of this counter, either key or value. | 1776 * Portion of this counter, either key or value. |
| 1707 * Possible string values are: | 1777 * Possible string values are: |
| 1708 * - "ALL" : A ALL. | 1778 * - "ALL" : Counter portion has not been set. |
| 1709 * - "KEY" : A KEY. | 1779 * - "KEY" : Counter reports a key. |
| 1710 * - "VALUE" : A VALUE. | 1780 * - "VALUE" : Counter reports a value. |
| 1711 */ | 1781 */ |
| 1712 core.String portion; | 1782 core.String portion; |
| 1713 /** | |
| 1714 * One of the standard Origins defined above. | |
| 1715 * Possible string values are: | |
| 1716 * - "DATAFLOW" : A DATAFLOW. | |
| 1717 * - "USER" : A USER. | |
| 1718 */ | |
| 1719 core.String standardOrigin; | |
| 1720 /** ID of a particular worker. */ | 1783 /** ID of a particular worker. */ |
| 1721 core.String workerId; | 1784 core.String workerId; |
| 1722 | 1785 |
| 1723 CounterStructuredName(); | 1786 CounterStructuredName(); |
| 1724 | 1787 |
| 1725 CounterStructuredName.fromJson(core.Map _json) { | 1788 CounterStructuredName.fromJson(core.Map _json) { |
| 1726 if (_json.containsKey("componentStepName")) { | 1789 if (_json.containsKey("componentStepName")) { |
| 1727 componentStepName = _json["componentStepName"]; | 1790 componentStepName = _json["componentStepName"]; |
| 1728 } | 1791 } |
| 1729 if (_json.containsKey("executionStepName")) { | 1792 if (_json.containsKey("executionStepName")) { |
| 1730 executionStepName = _json["executionStepName"]; | 1793 executionStepName = _json["executionStepName"]; |
| 1731 } | 1794 } |
| 1732 if (_json.containsKey("name")) { | 1795 if (_json.containsKey("name")) { |
| 1733 name = _json["name"]; | 1796 name = _json["name"]; |
| 1734 } | 1797 } |
| 1798 if (_json.containsKey("origin")) { |
| 1799 origin = _json["origin"]; |
| 1800 } |
| 1801 if (_json.containsKey("originNamespace")) { |
| 1802 originNamespace = _json["originNamespace"]; |
| 1803 } |
| 1735 if (_json.containsKey("originalStepName")) { | 1804 if (_json.containsKey("originalStepName")) { |
| 1736 originalStepName = _json["originalStepName"]; | 1805 originalStepName = _json["originalStepName"]; |
| 1737 } | 1806 } |
| 1738 if (_json.containsKey("otherOrigin")) { | |
| 1739 otherOrigin = _json["otherOrigin"]; | |
| 1740 } | |
| 1741 if (_json.containsKey("portion")) { | 1807 if (_json.containsKey("portion")) { |
| 1742 portion = _json["portion"]; | 1808 portion = _json["portion"]; |
| 1743 } | 1809 } |
| 1744 if (_json.containsKey("standardOrigin")) { | |
| 1745 standardOrigin = _json["standardOrigin"]; | |
| 1746 } | |
| 1747 if (_json.containsKey("workerId")) { | 1810 if (_json.containsKey("workerId")) { |
| 1748 workerId = _json["workerId"]; | 1811 workerId = _json["workerId"]; |
| 1749 } | 1812 } |
| 1750 } | 1813 } |
| 1751 | 1814 |
| 1752 core.Map toJson() { | 1815 core.Map toJson() { |
| 1753 var _json = new core.Map(); | 1816 var _json = new core.Map(); |
| 1754 if (componentStepName != null) { | 1817 if (componentStepName != null) { |
| 1755 _json["componentStepName"] = componentStepName; | 1818 _json["componentStepName"] = componentStepName; |
| 1756 } | 1819 } |
| 1757 if (executionStepName != null) { | 1820 if (executionStepName != null) { |
| 1758 _json["executionStepName"] = executionStepName; | 1821 _json["executionStepName"] = executionStepName; |
| 1759 } | 1822 } |
| 1760 if (name != null) { | 1823 if (name != null) { |
| 1761 _json["name"] = name; | 1824 _json["name"] = name; |
| 1762 } | 1825 } |
| 1826 if (origin != null) { |
| 1827 _json["origin"] = origin; |
| 1828 } |
| 1829 if (originNamespace != null) { |
| 1830 _json["originNamespace"] = originNamespace; |
| 1831 } |
| 1763 if (originalStepName != null) { | 1832 if (originalStepName != null) { |
| 1764 _json["originalStepName"] = originalStepName; | 1833 _json["originalStepName"] = originalStepName; |
| 1765 } | 1834 } |
| 1766 if (otherOrigin != null) { | |
| 1767 _json["otherOrigin"] = otherOrigin; | |
| 1768 } | |
| 1769 if (portion != null) { | 1835 if (portion != null) { |
| 1770 _json["portion"] = portion; | 1836 _json["portion"] = portion; |
| 1771 } | 1837 } |
| 1772 if (standardOrigin != null) { | |
| 1773 _json["standardOrigin"] = standardOrigin; | |
| 1774 } | |
| 1775 if (workerId != null) { | 1838 if (workerId != null) { |
| 1776 _json["workerId"] = workerId; | 1839 _json["workerId"] = workerId; |
| 1777 } | 1840 } |
| 1778 return _json; | 1841 return _json; |
| 1779 } | 1842 } |
| 1780 } | 1843 } |
| 1781 | 1844 |
| 1782 /** | 1845 /** |
| 1783 * A single message which encapsulates structured name and metadata for a given | 1846 * A single message which encapsulates structured name and metadata for a given |
| 1784 * counter. | 1847 * counter. |
| (...skipping 25 matching lines...) Expand all Loading... |
| 1810 } | 1873 } |
| 1811 return _json; | 1874 return _json; |
| 1812 } | 1875 } |
| 1813 } | 1876 } |
| 1814 | 1877 |
| 1815 /** An update to a Counter sent from a worker. */ | 1878 /** An update to a Counter sent from a worker. */ |
| 1816 class CounterUpdate { | 1879 class CounterUpdate { |
| 1817 /** Boolean value for And, Or. */ | 1880 /** Boolean value for And, Or. */ |
| 1818 core.bool boolean; | 1881 core.bool boolean; |
| 1819 /** | 1882 /** |
| 1820 * True if this counter is reported as the total cumulative aggregate value | 1883 * True if this counter is reported as the total cumulative aggregate |
| 1821 * accumulated since the worker started working on this WorkItem. By default | 1884 * value accumulated since the worker started working on this WorkItem. |
| 1822 * this is false, indicating that this counter is reported as a delta. | 1885 * By default this is false, indicating that this counter is reported |
| 1886 * as a delta. |
| 1823 */ | 1887 */ |
| 1824 core.bool cumulative; | 1888 core.bool cumulative; |
| 1825 /** Distribution data */ | 1889 /** Distribution data */ |
| 1826 DistributionUpdate distribution; | 1890 DistributionUpdate distribution; |
| 1827 /** Floating point value for Sum, Max, Min. */ | 1891 /** Floating point value for Sum, Max, Min. */ |
| 1828 core.double floatingPoint; | 1892 core.double floatingPoint; |
| 1829 /** List of floating point numbers, for Set. */ | 1893 /** List of floating point numbers, for Set. */ |
| 1830 FloatingPointList floatingPointList; | 1894 FloatingPointList floatingPointList; |
| 1831 /** Floating point mean aggregation value for Mean. */ | 1895 /** Floating point mean aggregation value for Mean. */ |
| 1832 FloatingPointMean floatingPointMean; | 1896 FloatingPointMean floatingPointMean; |
| 1833 /** Integer value for Sum, Max, Min. */ | 1897 /** Integer value for Sum, Max, Min. */ |
| 1834 SplitInt64 integer; | 1898 SplitInt64 integer; |
| 1835 /** List of integers, for Set. */ | 1899 /** List of integers, for Set. */ |
| 1836 IntegerList integerList; | 1900 IntegerList integerList; |
| 1837 /** Integer mean aggregation value for Mean. */ | 1901 /** Integer mean aggregation value for Mean. */ |
| 1838 IntegerMean integerMean; | 1902 IntegerMean integerMean; |
| 1839 /** | 1903 /** |
| 1840 * Value for internally-defined counters used by the Dataflow service. | 1904 * Value for internally-defined counters used by the Dataflow service. |
| 1841 * | 1905 * |
| 1842 * The values for Object must be JSON objects. It can consist of `num`, | 1906 * The values for Object must be JSON objects. It can consist of `num`, |
| 1843 * `String`, `bool` and `null` as well as `Map` and `List` values. | 1907 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 1844 */ | 1908 */ |
| 1845 core.Object internal; | 1909 core.Object internal; |
| 1846 /** Counter name and aggregation type. */ | 1910 /** Counter name and aggregation type. */ |
| 1847 NameAndKind nameAndKind; | 1911 NameAndKind nameAndKind; |
| 1848 /** | 1912 /** |
| 1849 * The service-generated short identifier for this counter. The short_id -> | 1913 * The service-generated short identifier for this counter. |
| 1850 * (name, metadata) mapping is constant for the lifetime of a job. | 1914 * The short_id -> (name, metadata) mapping is constant for the lifetime of |
| 1915 * a job. |
| 1851 */ | 1916 */ |
| 1852 core.String shortId; | 1917 core.String shortId; |
| 1853 /** List of strings, for Set. */ | 1918 /** List of strings, for Set. */ |
| 1854 StringList stringList; | 1919 StringList stringList; |
| 1855 /** Counter structured name and metadata. */ | 1920 /** Counter structured name and metadata. */ |
| 1856 CounterStructuredNameAndMetadata structuredNameAndMetadata; | 1921 CounterStructuredNameAndMetadata structuredNameAndMetadata; |
| 1857 | 1922 |
| 1858 CounterUpdate(); | 1923 CounterUpdate(); |
| 1859 | 1924 |
| 1860 CounterUpdate.fromJson(core.Map _json) { | 1925 CounterUpdate.fromJson(core.Map _json) { |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1948 } | 2013 } |
| 1949 return _json; | 2014 return _json; |
| 1950 } | 2015 } |
| 1951 } | 2016 } |
| 1952 | 2017 |
| 1953 /** A request to create a Cloud Dataflow job from a template. */ | 2018 /** A request to create a Cloud Dataflow job from a template. */ |
| 1954 class CreateJobFromTemplateRequest { | 2019 class CreateJobFromTemplateRequest { |
| 1955 /** The runtime environment for the job. */ | 2020 /** The runtime environment for the job. */ |
| 1956 RuntimeEnvironment environment; | 2021 RuntimeEnvironment environment; |
| 1957 /** | 2022 /** |
| 1958 * Required. A Cloud Storage path to the template from which to create the | 2023 * Required. A Cloud Storage path to the template from which to |
| 1959 * job. Must be a valid Cloud Storage URL, beginning with `gs://`. | 2024 * create the job. |
| 2025 * Must be a valid Cloud Storage URL, beginning with `gs://`. |
| 1960 */ | 2026 */ |
| 1961 core.String gcsPath; | 2027 core.String gcsPath; |
| 1962 /** Required. The job name to use for the created job. */ | 2028 /** Required. The job name to use for the created job. */ |
| 1963 core.String jobName; | 2029 core.String jobName; |
| 1964 /** The runtime parameters to pass to the job. */ | 2030 /** The runtime parameters to pass to the job. */ |
| 1965 core.Map<core.String, core.String> parameters; | 2031 core.Map<core.String, core.String> parameters; |
| 1966 | 2032 |
| 1967 CreateJobFromTemplateRequest(); | 2033 CreateJobFromTemplateRequest(); |
| 1968 | 2034 |
| 1969 CreateJobFromTemplateRequest.fromJson(core.Map _json) { | 2035 CreateJobFromTemplateRequest.fromJson(core.Map _json) { |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2019 } | 2085 } |
| 2020 return _json; | 2086 return _json; |
| 2021 } | 2087 } |
| 2022 } | 2088 } |
| 2023 | 2089 |
| 2024 /** Data disk assignment for a given VM instance. */ | 2090 /** Data disk assignment for a given VM instance. */ |
| 2025 class DataDiskAssignment { | 2091 class DataDiskAssignment { |
| 2026 /** | 2092 /** |
| 2027 * Mounted data disks. The order is important a data disk's 0-based index in | 2093 * Mounted data disks. The order is important a data disk's 0-based index in |
| 2028 * this list defines which persistent directory the disk is mounted to, for | 2094 * this list defines which persistent directory the disk is mounted to, for |
| 2029 * example the list of { "myproject-1014-104817-4c2-harness-0-disk-0" }, { | 2095 * example the list of { "myproject-1014-104817-4c2-harness-0-disk-0" }, |
| 2030 * "myproject-1014-104817-4c2-harness-0-disk-1" }. | 2096 * { "myproject-1014-104817-4c2-harness-0-disk-1" }. |
| 2031 */ | 2097 */ |
| 2032 core.List<core.String> dataDisks; | 2098 core.List<core.String> dataDisks; |
| 2033 /** | 2099 /** |
| 2034 * VM instance name the data disks mounted to, for example | 2100 * VM instance name the data disks mounted to, for example |
| 2035 * "myproject-1014-104817-4c2-harness-0". | 2101 * "myproject-1014-104817-4c2-harness-0". |
| 2036 */ | 2102 */ |
| 2037 core.String vmInstance; | 2103 core.String vmInstance; |
| 2038 | 2104 |
| 2039 DataDiskAssignment(); | 2105 DataDiskAssignment(); |
| 2040 | 2106 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 2053 _json["dataDisks"] = dataDisks; | 2119 _json["dataDisks"] = dataDisks; |
| 2054 } | 2120 } |
| 2055 if (vmInstance != null) { | 2121 if (vmInstance != null) { |
| 2056 _json["vmInstance"] = vmInstance; | 2122 _json["vmInstance"] = vmInstance; |
| 2057 } | 2123 } |
| 2058 return _json; | 2124 return _json; |
| 2059 } | 2125 } |
| 2060 } | 2126 } |
| 2061 | 2127 |
| 2062 /** | 2128 /** |
| 2063 * Specification of one of the bundles produced as a result of splitting a | 2129 * Specification of one of the bundles produced as a result of splitting |
| 2064 * Source (e.g. when executing a SourceSplitRequest, or when splitting an active | 2130 * a Source (e.g. when executing a SourceSplitRequest, or when |
| 2065 * task using WorkItemStatus.dynamic_source_split), relative to the source being | 2131 * splitting an active task using WorkItemStatus.dynamic_source_split), |
| 2066 * split. | 2132 * relative to the source being split. |
| 2067 */ | 2133 */ |
| 2068 class DerivedSource { | 2134 class DerivedSource { |
| 2069 /** | 2135 /** |
| 2070 * What source to base the produced source on (if any). | 2136 * What source to base the produced source on (if any). |
| 2071 * Possible string values are: | 2137 * Possible string values are: |
| 2072 * - "SOURCE_DERIVATION_MODE_UNKNOWN" : A SOURCE_DERIVATION_MODE_UNKNOWN. | 2138 * - "SOURCE_DERIVATION_MODE_UNKNOWN" : The source derivation is unknown, or |
| 2073 * - "SOURCE_DERIVATION_MODE_INDEPENDENT" : A | 2139 * unspecified. |
| 2074 * SOURCE_DERIVATION_MODE_INDEPENDENT. | 2140 * - "SOURCE_DERIVATION_MODE_INDEPENDENT" : Produce a completely independent |
| 2075 * - "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" : A | 2141 * Source with no base. |
| 2076 * SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT. | 2142 * - "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" : Produce a Source based on the |
| 2077 * - "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" : A | 2143 * Source being split. |
| 2078 * SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT. | 2144 * - "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" : Produce a Source based on |
| 2145 * the base of the Source being split. |
| 2079 */ | 2146 */ |
| 2080 core.String derivationMode; | 2147 core.String derivationMode; |
| 2081 /** Specification of the source. */ | 2148 /** Specification of the source. */ |
| 2082 Source source; | 2149 Source source; |
| 2083 | 2150 |
| 2084 DerivedSource(); | 2151 DerivedSource(); |
| 2085 | 2152 |
| 2086 DerivedSource.fromJson(core.Map _json) { | 2153 DerivedSource.fromJson(core.Map _json) { |
| 2087 if (_json.containsKey("derivationMode")) { | 2154 if (_json.containsKey("derivationMode")) { |
| 2088 derivationMode = _json["derivationMode"]; | 2155 derivationMode = _json["derivationMode"]; |
| (...skipping 11 matching lines...) Expand all Loading... |
| 2100 if (source != null) { | 2167 if (source != null) { |
| 2101 _json["source"] = (source).toJson(); | 2168 _json["source"] = (source).toJson(); |
| 2102 } | 2169 } |
| 2103 return _json; | 2170 return _json; |
| 2104 } | 2171 } |
| 2105 } | 2172 } |
| 2106 | 2173 |
| 2107 /** Describes the data disk used by a workflow job. */ | 2174 /** Describes the data disk used by a workflow job. */ |
| 2108 class Disk { | 2175 class Disk { |
| 2109 /** | 2176 /** |
| 2110 * Disk storage type, as defined by Google Compute Engine. This must be a disk | 2177 * Disk storage type, as defined by Google Compute Engine. This |
| 2111 * type appropriate to the project and zone in which the workers will run. If | 2178 * must be a disk type appropriate to the project and zone in which |
| 2112 * unknown or unspecified, the service will attempt to choose a reasonable | 2179 * the workers will run. If unknown or unspecified, the service |
| 2113 * default. For example, the standard persistent disk type is a resource name | 2180 * will attempt to choose a reasonable default. |
| 2114 * typically ending in "pd-standard". If SSD persistent disks are available, | 2181 * |
| 2115 * the resource name typically ends with "pd-ssd". The actual valid values are | 2182 * For example, the standard persistent disk type is a resource name |
| 2116 * defined the Google Compute Engine API, not by the Cloud Dataflow API; | 2183 * typically ending in "pd-standard". If SSD persistent disks are |
| 2117 * consult the Google Compute Engine documentation for more information about | 2184 * available, the resource name typically ends with "pd-ssd". The |
| 2118 * determining the set of available disk types for a particular project and | 2185 * actual valid values are defined the Google Compute Engine API, |
| 2119 * zone. Google Compute Engine Disk types are local to a particular project in | 2186 * not by the Cloud Dataflow API; consult the Google Compute Engine |
| 2120 * a particular zone, and so the resource name will typically look something | 2187 * documentation for more information about determining the set of |
| 2121 * like this: | 2188 * available disk types for a particular project and zone. |
| 2189 * |
| 2190 * Google Compute Engine Disk types are local to a particular |
| 2191 * project in a particular zone, and so the resource name will |
| 2192 * typically look something like this: |
| 2193 * |
| 2122 * compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard | 2194 * compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard |
| 2123 */ | 2195 */ |
| 2124 core.String diskType; | 2196 core.String diskType; |
| 2125 /** Directory in a VM where disk is mounted. */ | 2197 /** Directory in a VM where disk is mounted. */ |
| 2126 core.String mountPoint; | 2198 core.String mountPoint; |
| 2127 /** | 2199 /** |
| 2128 * Size of disk in GB. If zero or unspecified, the service will attempt to | 2200 * Size of disk in GB. If zero or unspecified, the service will |
| 2129 * choose a reasonable default. | 2201 * attempt to choose a reasonable default. |
| 2130 */ | 2202 */ |
| 2131 core.int sizeGb; | 2203 core.int sizeGb; |
| 2132 | 2204 |
| 2133 Disk(); | 2205 Disk(); |
| 2134 | 2206 |
| 2135 Disk.fromJson(core.Map _json) { | 2207 Disk.fromJson(core.Map _json) { |
| 2136 if (_json.containsKey("diskType")) { | 2208 if (_json.containsKey("diskType")) { |
| 2137 diskType = _json["diskType"]; | 2209 diskType = _json["diskType"]; |
| 2138 } | 2210 } |
| 2139 if (_json.containsKey("mountPoint")) { | 2211 if (_json.containsKey("mountPoint")) { |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2210 _json["sum"] = (sum).toJson(); | 2282 _json["sum"] = (sum).toJson(); |
| 2211 } | 2283 } |
| 2212 if (sumOfSquares != null) { | 2284 if (sumOfSquares != null) { |
| 2213 _json["sumOfSquares"] = sumOfSquares; | 2285 _json["sumOfSquares"] = sumOfSquares; |
| 2214 } | 2286 } |
| 2215 return _json; | 2287 return _json; |
| 2216 } | 2288 } |
| 2217 } | 2289 } |
| 2218 | 2290 |
| 2219 /** | 2291 /** |
| 2220 * When a task splits using WorkItemStatus.dynamic_source_split, this message | 2292 * When a task splits using WorkItemStatus.dynamic_source_split, this |
| 2221 * describes the two parts of the split relative to the description of the | 2293 * message describes the two parts of the split relative to the |
| 2222 * current task's input. | 2294 * description of the current task's input. |
| 2223 */ | 2295 */ |
| 2224 class DynamicSourceSplit { | 2296 class DynamicSourceSplit { |
| 2225 /** | 2297 /** |
| 2226 * Primary part (continued to be processed by worker). Specified relative to | 2298 * Primary part (continued to be processed by worker). |
| 2227 * the previously-current source. Becomes current. | 2299 * Specified relative to the previously-current source. |
| 2300 * Becomes current. |
| 2228 */ | 2301 */ |
| 2229 DerivedSource primary; | 2302 DerivedSource primary; |
| 2230 /** | 2303 /** |
| 2231 * Residual part (returned to the pool of work). Specified relative to the | 2304 * Residual part (returned to the pool of work). |
| 2232 * previously-current source. | 2305 * Specified relative to the previously-current source. |
| 2233 */ | 2306 */ |
| 2234 DerivedSource residual; | 2307 DerivedSource residual; |
| 2235 | 2308 |
| 2236 DynamicSourceSplit(); | 2309 DynamicSourceSplit(); |
| 2237 | 2310 |
| 2238 DynamicSourceSplit.fromJson(core.Map _json) { | 2311 DynamicSourceSplit.fromJson(core.Map _json) { |
| 2239 if (_json.containsKey("primary")) { | 2312 if (_json.containsKey("primary")) { |
| 2240 primary = new DerivedSource.fromJson(_json["primary"]); | 2313 primary = new DerivedSource.fromJson(_json["primary"]); |
| 2241 } | 2314 } |
| 2242 if (_json.containsKey("residual")) { | 2315 if (_json.containsKey("residual")) { |
| 2243 residual = new DerivedSource.fromJson(_json["residual"]); | 2316 residual = new DerivedSource.fromJson(_json["residual"]); |
| 2244 } | 2317 } |
| 2245 } | 2318 } |
| 2246 | 2319 |
| 2247 core.Map toJson() { | 2320 core.Map toJson() { |
| 2248 var _json = new core.Map(); | 2321 var _json = new core.Map(); |
| 2249 if (primary != null) { | 2322 if (primary != null) { |
| 2250 _json["primary"] = (primary).toJson(); | 2323 _json["primary"] = (primary).toJson(); |
| 2251 } | 2324 } |
| 2252 if (residual != null) { | 2325 if (residual != null) { |
| 2253 _json["residual"] = (residual).toJson(); | 2326 _json["residual"] = (residual).toJson(); |
| 2254 } | 2327 } |
| 2255 return _json; | 2328 return _json; |
| 2256 } | 2329 } |
| 2257 } | 2330 } |
| 2258 | 2331 |
| 2259 /** Describes the environment in which a Dataflow Job runs. */ | 2332 /** Describes the environment in which a Dataflow Job runs. */ |
| 2260 class Environment { | 2333 class Environment { |
| 2261 /** | 2334 /** |
| 2262 * The type of cluster manager API to use. If unknown or unspecified, the | 2335 * The type of cluster manager API to use. If unknown or |
| 2263 * service will attempt to choose a reasonable default. This should be in the | 2336 * unspecified, the service will attempt to choose a reasonable |
| 2264 * form of the API service name, e.g. "compute.googleapis.com". | 2337 * default. This should be in the form of the API service name, |
| 2338 * e.g. "compute.googleapis.com". |
| 2265 */ | 2339 */ |
| 2266 core.String clusterManagerApiService; | 2340 core.String clusterManagerApiService; |
| 2267 /** | 2341 /** |
| 2268 * The dataset for the current project where various workflow related tables | 2342 * The dataset for the current project where various workflow |
| 2269 * are stored. The supported resource type is: Google BigQuery: | 2343 * related tables are stored. |
| 2270 * bigquery.googleapis.com/{dataset} | 2344 * |
| 2345 * The supported resource type is: |
| 2346 * |
| 2347 * Google BigQuery: |
| 2348 * bigquery.googleapis.com/{dataset} |
| 2271 */ | 2349 */ |
| 2272 core.String dataset; | 2350 core.String dataset; |
| 2273 /** The list of experiments to enable. */ | 2351 /** The list of experiments to enable. */ |
| 2274 core.List<core.String> experiments; | 2352 core.List<core.String> experiments; |
| 2275 /** | 2353 /** |
| 2276 * Experimental settings. | 2354 * Experimental settings. |
| 2277 * | 2355 * |
| 2278 * The values for Object must be JSON objects. It can consist of `num`, | 2356 * The values for Object must be JSON objects. It can consist of `num`, |
| 2279 * `String`, `bool` and `null` as well as `Map` and `List` values. | 2357 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2280 */ | 2358 */ |
| 2281 core.Map<core.String, core.Object> internalExperiments; | 2359 core.Map<core.String, core.Object> internalExperiments; |
| 2282 /** | 2360 /** |
| 2283 * The Cloud Dataflow SDK pipeline options specified by the user. These | 2361 * The Cloud Dataflow SDK pipeline options specified by the user. These |
| 2284 * options are passed through the service and are used to recreate the SDK | 2362 * options are passed through the service and are used to recreate the |
| 2285 * pipeline options on the worker in a language agnostic and platform | 2363 * SDK pipeline options on the worker in a language agnostic and platform |
| 2286 * independent way. | 2364 * independent way. |
| 2287 * | 2365 * |
| 2288 * The values for Object must be JSON objects. It can consist of `num`, | 2366 * The values for Object must be JSON objects. It can consist of `num`, |
| 2289 * `String`, `bool` and `null` as well as `Map` and `List` values. | 2367 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2290 */ | 2368 */ |
| 2291 core.Map<core.String, core.Object> sdkPipelineOptions; | 2369 core.Map<core.String, core.Object> sdkPipelineOptions; |
| 2292 /** Identity to run virtual machines as. Defaults to the default account. */ | 2370 /** Identity to run virtual machines as. Defaults to the default account. */ |
| 2293 core.String serviceAccountEmail; | 2371 core.String serviceAccountEmail; |
| 2294 /** | 2372 /** |
| 2295 * The prefix of the resources the system should use for temporary storage. | 2373 * The prefix of the resources the system should use for temporary |
| 2296 * The system will append the suffix "/temp-{JOBNAME} to this resource prefix, | 2374 * storage. The system will append the suffix "/temp-{JOBNAME} to |
| 2297 * where {JOBNAME} is the value of the job_name field. The resulting bucket | 2375 * this resource prefix, where {JOBNAME} is the value of the |
| 2298 * and object prefix is used as the prefix of the resources used to store | 2376 * job_name field. The resulting bucket and object prefix is used |
| 2299 * temporary data needed during the job execution. NOTE: This will override | 2377 * as the prefix of the resources used to store temporary data |
| 2300 * the value in taskrunner_settings. The supported resource type is: Google | 2378 * needed during the job execution. NOTE: This will override the |
| 2301 * Cloud Storage: storage.googleapis.com/{bucket}/{object} | 2379 * value in taskrunner_settings. |
| 2302 * bucket.storage.googleapis.com/{object} | 2380 * The supported resource type is: |
| 2381 * |
| 2382 * Google Cloud Storage: |
| 2383 * |
| 2384 * storage.googleapis.com/{bucket}/{object} |
| 2385 * bucket.storage.googleapis.com/{object} |
| 2303 */ | 2386 */ |
| 2304 core.String tempStoragePrefix; | 2387 core.String tempStoragePrefix; |
| 2305 /** | 2388 /** |
| 2306 * A description of the process that generated the request. | 2389 * A description of the process that generated the request. |
| 2307 * | 2390 * |
| 2308 * The values for Object must be JSON objects. It can consist of `num`, | 2391 * The values for Object must be JSON objects. It can consist of `num`, |
| 2309 * `String`, `bool` and `null` as well as `Map` and `List` values. | 2392 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2310 */ | 2393 */ |
| 2311 core.Map<core.String, core.Object> userAgent; | 2394 core.Map<core.String, core.Object> userAgent; |
| 2312 /** | 2395 /** |
| 2313 * A structure describing which components and their versions of the service | 2396 * A structure describing which components and their versions of the service |
| 2314 * are required in order to run the job. | 2397 * are required in order to run the job. |
| 2315 * | 2398 * |
| 2316 * The values for Object must be JSON objects. It can consist of `num`, | 2399 * The values for Object must be JSON objects. It can consist of `num`, |
| 2317 * `String`, `bool` and `null` as well as `Map` and `List` values. | 2400 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 2318 */ | 2401 */ |
| 2319 core.Map<core.String, core.Object> version; | 2402 core.Map<core.String, core.Object> version; |
| 2320 /** | 2403 /** |
| 2321 * The worker pools. At least one "harness" worker pool must be specified in | 2404 * The worker pools. At least one "harness" worker pool must be |
| 2322 * order for the job to have workers. | 2405 * specified in order for the job to have workers. |
| 2323 */ | 2406 */ |
| 2324 core.List<WorkerPool> workerPools; | 2407 core.List<WorkerPool> workerPools; |
| 2325 | 2408 |
| 2326 Environment(); | 2409 Environment(); |
| 2327 | 2410 |
| 2328 Environment.fromJson(core.Map _json) { | 2411 Environment.fromJson(core.Map _json) { |
| 2329 if (_json.containsKey("clusterManagerApiService")) { | 2412 if (_json.containsKey("clusterManagerApiService")) { |
| 2330 clusterManagerApiService = _json["clusterManagerApiService"]; | 2413 clusterManagerApiService = _json["clusterManagerApiService"]; |
| 2331 } | 2414 } |
| 2332 if (_json.containsKey("dataset")) { | 2415 if (_json.containsKey("dataset")) { |
| (...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2487 } | 2570 } |
| 2488 if (sum != null) { | 2571 if (sum != null) { |
| 2489 _json["sum"] = sum; | 2572 _json["sum"] = sum; |
| 2490 } | 2573 } |
| 2491 return _json; | 2574 return _json; |
| 2492 } | 2575 } |
| 2493 } | 2576 } |
| 2494 | 2577 |
| 2495 /** Request to get updated debug configuration for component. */ | 2578 /** Request to get updated debug configuration for component. */ |
| 2496 class GetDebugConfigRequest { | 2579 class GetDebugConfigRequest { |
| 2497 /** The internal component id for which debug configuration is requested. */ | 2580 /** |
| 2581 * The internal component id for which debug configuration is |
| 2582 * requested. |
| 2583 */ |
| 2498 core.String componentId; | 2584 core.String componentId; |
| 2499 /** The worker id, i.e., VM hostname. */ | 2585 /** The worker id, i.e., VM hostname. */ |
| 2500 core.String workerId; | 2586 core.String workerId; |
| 2501 | 2587 |
| 2502 GetDebugConfigRequest(); | 2588 GetDebugConfigRequest(); |
| 2503 | 2589 |
| 2504 GetDebugConfigRequest.fromJson(core.Map _json) { | 2590 GetDebugConfigRequest.fromJson(core.Map _json) { |
| 2505 if (_json.containsKey("componentId")) { | 2591 if (_json.containsKey("componentId")) { |
| 2506 componentId = _json["componentId"]; | 2592 componentId = _json["componentId"]; |
| 2507 } | 2593 } |
| (...skipping 30 matching lines...) Expand all Loading... |
| 2538 core.Map toJson() { | 2624 core.Map toJson() { |
| 2539 var _json = new core.Map(); | 2625 var _json = new core.Map(); |
| 2540 if (config != null) { | 2626 if (config != null) { |
| 2541 _json["config"] = config; | 2627 _json["config"] = config; |
| 2542 } | 2628 } |
| 2543 return _json; | 2629 return _json; |
| 2544 } | 2630 } |
| 2545 } | 2631 } |
| 2546 | 2632 |
| 2547 /** | 2633 /** |
| 2548 * An input of an instruction, as a reference to an output of a producer | 2634 * An input of an instruction, as a reference to an output of a |
| 2549 * instruction. | 2635 * producer instruction. |
| 2550 */ | 2636 */ |
| 2551 class InstructionInput { | 2637 class InstructionInput { |
| 2552 /** The output index (origin zero) within the producer. */ | 2638 /** The output index (origin zero) within the producer. */ |
| 2553 core.int outputNum; | 2639 core.int outputNum; |
| 2554 /** | 2640 /** |
| 2555 * The index (origin zero) of the parallel instruction that produces the | 2641 * The index (origin zero) of the parallel instruction that produces |
| 2556 * output to be consumed by this input. This index is relative to the list of | 2642 * the output to be consumed by this input. This index is relative |
| 2557 * instructions in this input's instruction's containing MapTask. | 2643 * to the list of instructions in this input's instruction's |
| 2644 * containing MapTask. |
| 2558 */ | 2645 */ |
| 2559 core.int producerInstructionIndex; | 2646 core.int producerInstructionIndex; |
| 2560 | 2647 |
| 2561 InstructionInput(); | 2648 InstructionInput(); |
| 2562 | 2649 |
| 2563 InstructionInput.fromJson(core.Map _json) { | 2650 InstructionInput.fromJson(core.Map _json) { |
| 2564 if (_json.containsKey("outputNum")) { | 2651 if (_json.containsKey("outputNum")) { |
| 2565 outputNum = _json["outputNum"]; | 2652 outputNum = _json["outputNum"]; |
| 2566 } | 2653 } |
| 2567 if (_json.containsKey("producerInstructionIndex")) { | 2654 if (_json.containsKey("producerInstructionIndex")) { |
| (...skipping 28 matching lines...) Expand all Loading... |
| 2596 * For system-generated byte and mean byte metrics, certain instructions | 2683 * For system-generated byte and mean byte metrics, certain instructions |
| 2597 * should only report the key size. | 2684 * should only report the key size. |
| 2598 */ | 2685 */ |
| 2599 core.bool onlyCountKeyBytes; | 2686 core.bool onlyCountKeyBytes; |
| 2600 /** | 2687 /** |
| 2601 * For system-generated byte and mean byte metrics, certain instructions | 2688 * For system-generated byte and mean byte metrics, certain instructions |
| 2602 * should only report the value size. | 2689 * should only report the value size. |
| 2603 */ | 2690 */ |
| 2604 core.bool onlyCountValueBytes; | 2691 core.bool onlyCountValueBytes; |
| 2605 /** | 2692 /** |
| 2606 * System-defined name for this output in the original workflow graph. Outputs | 2693 * System-defined name for this output in the original workflow graph. |
| 2607 * that do not contribute to an original instruction do not set this. | 2694 * Outputs that do not contribute to an original instruction do not set this. |
| 2608 */ | 2695 */ |
| 2609 core.String originalName; | 2696 core.String originalName; |
| 2610 /** System-defined name of this output. Unique across the workflow. */ | 2697 /** |
| 2698 * System-defined name of this output. |
| 2699 * Unique across the workflow. |
| 2700 */ |
| 2611 core.String systemName; | 2701 core.String systemName; |
| 2612 | 2702 |
| 2613 InstructionOutput(); | 2703 InstructionOutput(); |
| 2614 | 2704 |
| 2615 InstructionOutput.fromJson(core.Map _json) { | 2705 InstructionOutput.fromJson(core.Map _json) { |
| 2616 if (_json.containsKey("codec")) { | 2706 if (_json.containsKey("codec")) { |
| 2617 codec = _json["codec"]; | 2707 codec = _json["codec"]; |
| 2618 } | 2708 } |
| 2619 if (_json.containsKey("name")) { | 2709 if (_json.containsKey("name")) { |
| 2620 name = _json["name"]; | 2710 name = _json["name"]; |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2706 _json["sum"] = (sum).toJson(); | 2796 _json["sum"] = (sum).toJson(); |
| 2707 } | 2797 } |
| 2708 return _json; | 2798 return _json; |
| 2709 } | 2799 } |
| 2710 } | 2800 } |
| 2711 | 2801 |
| 2712 /** Defines a job to be run by the Cloud Dataflow service. */ | 2802 /** Defines a job to be run by the Cloud Dataflow service. */ |
| 2713 class Job { | 2803 class Job { |
| 2714 /** | 2804 /** |
| 2715 * The client's unique identifier of the job, re-used across retried attempts. | 2805 * The client's unique identifier of the job, re-used across retried attempts. |
| 2716 * If this field is set, the service will ensure its uniqueness. The request | 2806 * If this field is set, the service will ensure its uniqueness. |
| 2717 * to create a job will fail if the service has knowledge of a previously | 2807 * The request to create a job will fail if the service has knowledge of a |
| 2718 * submitted job with the same client's ID and job name. The caller may use | 2808 * previously submitted job with the same client's ID and job name. |
| 2719 * this field to ensure idempotence of job creation across retried attempts to | 2809 * The caller may use this field to ensure idempotence of job |
| 2720 * create a job. By default, the field is empty and, in that case, the service | 2810 * creation across retried attempts to create a job. |
| 2721 * ignores it. | 2811 * By default, the field is empty and, in that case, the service ignores it. |
| 2722 */ | 2812 */ |
| 2723 core.String clientRequestId; | 2813 core.String clientRequestId; |
| 2724 /** | 2814 /** |
| 2725 * The timestamp when the job was initially created. Immutable and set by the | 2815 * The timestamp when the job was initially created. Immutable and set by the |
| 2726 * Cloud Dataflow service. | 2816 * Cloud Dataflow service. |
| 2727 */ | 2817 */ |
| 2728 core.String createTime; | 2818 core.String createTime; |
| 2729 /** | 2819 /** |
| 2730 * The current state of the job. Jobs are created in the `JOB_STATE_STOPPED` | 2820 * The current state of the job. |
| 2731 * state unless otherwise specified. A job in the `JOB_STATE_RUNNING` state | 2821 * |
| 2732 * may asynchronously enter a terminal state. After a job has reached a | 2822 * Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise |
| 2733 * terminal state, no further state updates may be made. This field may be | 2823 * specified. |
| 2734 * mutated by the Cloud Dataflow service; callers cannot mutate it. | 2824 * |
| 2825 * A job in the `JOB_STATE_RUNNING` state may asynchronously enter a |
| 2826 * terminal state. After a job has reached a terminal state, no |
| 2827 * further state updates may be made. |
| 2828 * |
| 2829 * This field may be mutated by the Cloud Dataflow service; |
| 2830 * callers cannot mutate it. |
| 2735 * Possible string values are: | 2831 * Possible string values are: |
| 2736 * - "JOB_STATE_UNKNOWN" : A JOB_STATE_UNKNOWN. | 2832 * - "JOB_STATE_UNKNOWN" : The job's run state isn't specified. |
| 2737 * - "JOB_STATE_STOPPED" : A JOB_STATE_STOPPED. | 2833 * - "JOB_STATE_STOPPED" : `JOB_STATE_STOPPED` indicates that the job has not |
| 2738 * - "JOB_STATE_RUNNING" : A JOB_STATE_RUNNING. | 2834 * yet started to run. |
| 2739 * - "JOB_STATE_DONE" : A JOB_STATE_DONE. | 2835 * - "JOB_STATE_RUNNING" : `JOB_STATE_RUNNING` indicates that the job is |
| 2740 * - "JOB_STATE_FAILED" : A JOB_STATE_FAILED. | 2836 * currently running. |
| 2741 * - "JOB_STATE_CANCELLED" : A JOB_STATE_CANCELLED. | 2837 * - "JOB_STATE_DONE" : `JOB_STATE_DONE` indicates that the job has |
| 2742 * - "JOB_STATE_UPDATED" : A JOB_STATE_UPDATED. | 2838 * successfully completed. |
| 2743 * - "JOB_STATE_DRAINING" : A JOB_STATE_DRAINING. | 2839 * This is a terminal job state. This state may be set by the Cloud Dataflow |
| 2744 * - "JOB_STATE_DRAINED" : A JOB_STATE_DRAINED. | 2840 * service, as a transition from `JOB_STATE_RUNNING`. It may also be set via a |
| 2841 * Cloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal |
| 2842 * state. |
| 2843 * - "JOB_STATE_FAILED" : `JOB_STATE_FAILED` indicates that the job has |
| 2844 * failed. This is a |
| 2845 * terminal job state. This state may only be set by the Cloud Dataflow |
| 2846 * service, and only as a transition from `JOB_STATE_RUNNING`. |
| 2847 * - "JOB_STATE_CANCELLED" : `JOB_STATE_CANCELLED` indicates that the job has |
| 2848 * been explicitly |
| 2849 * cancelled. This is a terminal job state. This state may only be |
| 2850 * set via a Cloud Dataflow `UpdateJob` call, and only if the job has not |
| 2851 * yet reached another terminal state. |
| 2852 * - "JOB_STATE_UPDATED" : `JOB_STATE_UPDATED` indicates that the job was |
| 2853 * successfully updated, |
| 2854 * meaning that this job was stopped and another job was started, inheriting |
| 2855 * state from this one. This is a terminal job state. This state may only be |
| 2856 * set by the Cloud Dataflow service, and only as a transition from |
| 2857 * `JOB_STATE_RUNNING`. |
| 2858 * - "JOB_STATE_DRAINING" : `JOB_STATE_DRAINING` indicates that the job is in |
| 2859 * the process of draining. |
| 2860 * A draining job has stopped pulling from its input sources and is processing |
| 2861 * any data that remains in-flight. This state may be set via a Cloud Dataflow |
| 2862 * `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs |
| 2863 * that are draining may only transition to `JOB_STATE_DRAINED`, |
| 2864 * `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`. |
| 2865 * - "JOB_STATE_DRAINED" : `JOB_STATE_DRAINED` indicates that the job has been |
| 2866 * drained. |
| 2867 * A drained job terminated by stopping pulling from its input sources and |
| 2868 * processing any data that remained in-flight when draining was requested. |
| 2869 * This state is a terminal state, may only be set by the Cloud Dataflow |
| 2870 * service, and only as a transition from `JOB_STATE_DRAINING`. |
| 2745 */ | 2871 */ |
| 2746 core.String currentState; | 2872 core.String currentState; |
| 2747 /** The timestamp associated with the current state. */ | 2873 /** The timestamp associated with the current state. */ |
| 2748 core.String currentStateTime; | 2874 core.String currentStateTime; |
| 2749 /** The environment for the job. */ | 2875 /** The environment for the job. */ |
| 2750 Environment environment; | 2876 Environment environment; |
| 2751 /** Information about how the Cloud Dataflow service will run the job. */ | 2877 /** Information about how the Cloud Dataflow service will run the job. */ |
| 2752 JobExecutionInfo executionInfo; | 2878 JobExecutionInfo executionInfo; |
| 2753 /** | 2879 /** |
| 2754 * The unique ID of this job. This field is set by the Cloud Dataflow service | 2880 * The unique ID of this job. |
| 2755 * when the Job is created, and is immutable for the life of the job. | 2881 * |
| 2882 * This field is set by the Cloud Dataflow service when the Job is |
| 2883 * created, and is immutable for the life of the job. |
| 2756 */ | 2884 */ |
| 2757 core.String id; | 2885 core.String id; |
| 2758 /** | 2886 /** |
| 2759 * User-defined labels for this job. The labels map can contain no more than | 2887 * User-defined labels for this job. |
| 2760 * 64 entries. Entries of the labels map are UTF8 strings that comply with the | 2888 * |
| 2761 * following restrictions: * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} * | 2889 * The labels map can contain no more than 64 entries. Entries of the labels |
| 2762 * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} * Both keys and | 2890 * map are UTF8 strings that comply with the following restrictions: |
| 2763 * values are additionally constrained to be <= 128 bytes in size. | 2891 * |
| 2892 * * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62} |
| 2893 * * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63} |
| 2894 * * Both keys and values are additionally constrained to be <= 128 bytes in |
| 2895 * size. |
| 2764 */ | 2896 */ |
| 2765 core.Map<core.String, core.String> labels; | 2897 core.Map<core.String, core.String> labels; |
| 2766 /** The location that contains this job. */ | 2898 /** The location that contains this job. */ |
| 2767 core.String location; | 2899 core.String location; |
| 2768 /** | 2900 /** |
| 2769 * The user-specified Cloud Dataflow job name. Only one Job with a given name | 2901 * The user-specified Cloud Dataflow job name. |
| 2770 * may exist in a project at any given time. If a caller attempts to create a | 2902 * |
| 2771 * Job with the same name as an already-existing Job, the attempt returns the | 2903 * Only one Job with a given name may exist in a project at any |
| 2772 * existing Job. The name must match the regular expression | 2904 * given time. If a caller attempts to create a Job with the same |
| 2905 * name as an already-existing Job, the attempt returns the |
| 2906 * existing Job. |
| 2907 * |
| 2908 * The name must match the regular expression |
| 2773 * `[a-z]([-a-z0-9]{0,38}[a-z0-9])?` | 2909 * `[a-z]([-a-z0-9]{0,38}[a-z0-9])?` |
| 2774 */ | 2910 */ |
| 2775 core.String name; | 2911 core.String name; |
| 2776 /** The ID of the Cloud Platform project that the job belongs to. */ | 2912 /** The ID of the Cloud Platform project that the job belongs to. */ |
| 2777 core.String projectId; | 2913 core.String projectId; |
| 2778 /** | 2914 /** |
| 2779 * If this job is an update of an existing job, this field is the job ID of | 2915 * If this job is an update of an existing job, this field is the job ID |
| 2780 * the job it replaced. When sending a `CreateJobRequest`, you can update a | 2916 * of the job it replaced. |
| 2781 * job by specifying it here. The job named here is stopped, and its | 2917 * |
| 2782 * intermediate state is transferred to this job. | 2918 * When sending a `CreateJobRequest`, you can update a job by specifying it |
| 2919 * here. The job named here is stopped, and its intermediate state is |
| 2920 * transferred to this job. |
| 2783 */ | 2921 */ |
| 2784 core.String replaceJobId; | 2922 core.String replaceJobId; |
| 2785 /** | 2923 /** |
| 2786 * If another job is an update of this job (and thus, this job is in | 2924 * If another job is an update of this job (and thus, this job is in |
| 2787 * `JOB_STATE_UPDATED`), this field contains the ID of that job. | 2925 * `JOB_STATE_UPDATED`), this field contains the ID of that job. |
| 2788 */ | 2926 */ |
| 2789 core.String replacedByJobId; | 2927 core.String replacedByJobId; |
| 2790 /** | 2928 /** |
| 2791 * The job's requested state. `UpdateJob` may be used to switch between the | 2929 * The job's requested state. |
| 2792 * `JOB_STATE_STOPPED` and `JOB_STATE_RUNNING` states, by setting | 2930 * |
| 2793 * requested_state. `UpdateJob` may also be used to directly set a job's | 2931 * `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and |
| 2794 * requested state to `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably | 2932 * `JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may |
| 2795 * terminating the job if it has not already reached a terminal state. | 2933 * also be used to directly set a job's requested state to |
| 2934 * `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the |
| 2935 * job if it has not already reached a terminal state. |
| 2796 * Possible string values are: | 2936 * Possible string values are: |
| 2797 * - "JOB_STATE_UNKNOWN" : A JOB_STATE_UNKNOWN. | 2937 * - "JOB_STATE_UNKNOWN" : The job's run state isn't specified. |
| 2798 * - "JOB_STATE_STOPPED" : A JOB_STATE_STOPPED. | 2938 * - "JOB_STATE_STOPPED" : `JOB_STATE_STOPPED` indicates that the job has not |
| 2799 * - "JOB_STATE_RUNNING" : A JOB_STATE_RUNNING. | 2939 * yet started to run. |
| 2800 * - "JOB_STATE_DONE" : A JOB_STATE_DONE. | 2940 * - "JOB_STATE_RUNNING" : `JOB_STATE_RUNNING` indicates that the job is |
| 2801 * - "JOB_STATE_FAILED" : A JOB_STATE_FAILED. | 2941 * currently running. |
| 2802 * - "JOB_STATE_CANCELLED" : A JOB_STATE_CANCELLED. | 2942 * - "JOB_STATE_DONE" : `JOB_STATE_DONE` indicates that the job has |
| 2803 * - "JOB_STATE_UPDATED" : A JOB_STATE_UPDATED. | 2943 * successfully completed. |
| 2804 * - "JOB_STATE_DRAINING" : A JOB_STATE_DRAINING. | 2944 * This is a terminal job state. This state may be set by the Cloud Dataflow |
| 2805 * - "JOB_STATE_DRAINED" : A JOB_STATE_DRAINED. | 2945 * service, as a transition from `JOB_STATE_RUNNING`. It may also be set via a |
| 2946 * Cloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal |
| 2947 * state. |
| 2948 * - "JOB_STATE_FAILED" : `JOB_STATE_FAILED` indicates that the job has |
| 2949 * failed. This is a |
| 2950 * terminal job state. This state may only be set by the Cloud Dataflow |
| 2951 * service, and only as a transition from `JOB_STATE_RUNNING`. |
| 2952 * - "JOB_STATE_CANCELLED" : `JOB_STATE_CANCELLED` indicates that the job has |
| 2953 * been explicitly |
| 2954 * cancelled. This is a terminal job state. This state may only be |
| 2955 * set via a Cloud Dataflow `UpdateJob` call, and only if the job has not |
| 2956 * yet reached another terminal state. |
| 2957 * - "JOB_STATE_UPDATED" : `JOB_STATE_UPDATED` indicates that the job was |
| 2958 * successfully updated, |
| 2959 * meaning that this job was stopped and another job was started, inheriting |
| 2960 * state from this one. This is a terminal job state. This state may only be |
| 2961 * set by the Cloud Dataflow service, and only as a transition from |
| 2962 * `JOB_STATE_RUNNING`. |
| 2963 * - "JOB_STATE_DRAINING" : `JOB_STATE_DRAINING` indicates that the job is in |
| 2964 * the process of draining. |
| 2965 * A draining job has stopped pulling from its input sources and is processing |
| 2966 * any data that remains in-flight. This state may be set via a Cloud Dataflow |
| 2967 * `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs |
| 2968 * that are draining may only transition to `JOB_STATE_DRAINED`, |
| 2969 * `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`. |
| 2970 * - "JOB_STATE_DRAINED" : `JOB_STATE_DRAINED` indicates that the job has been |
| 2971 * drained. |
| 2972 * A drained job terminated by stopping pulling from its input sources and |
| 2973 * processing any data that remained in-flight when draining was requested. |
| 2974 * This state is a terminal state, may only be set by the Cloud Dataflow |
| 2975 * service, and only as a transition from `JOB_STATE_DRAINING`. |
| 2806 */ | 2976 */ |
| 2807 core.String requestedState; | 2977 core.String requestedState; |
| 2808 /** The top-level steps that constitute the entire job. */ | 2978 /** The top-level steps that constitute the entire job. */ |
| 2809 core.List<Step> steps; | 2979 core.List<Step> steps; |
| 2810 /** | 2980 /** |
| 2811 * A set of files the system should be aware of that are used for temporary | 2981 * A set of files the system should be aware of that are used |
| 2812 * storage. These temporary files will be removed on job completion. No | 2982 * for temporary storage. These temporary files will be |
| 2813 * duplicates are allowed. No file patterns are supported. The supported files | 2983 * removed on job completion. |
| 2814 * are: Google Cloud Storage: storage.googleapis.com/{bucket}/{object} | 2984 * No duplicates are allowed. |
| 2815 * bucket.storage.googleapis.com/{object} | 2985 * No file patterns are supported. |
| 2986 * |
| 2987 * The supported files are: |
| 2988 * |
| 2989 * Google Cloud Storage: |
| 2990 * |
| 2991 * storage.googleapis.com/{bucket}/{object} |
| 2992 * bucket.storage.googleapis.com/{object} |
| 2816 */ | 2993 */ |
| 2817 core.List<core.String> tempFiles; | 2994 core.List<core.String> tempFiles; |
| 2818 /** | 2995 /** |
| 2819 * The map of transform name prefixes of the job to be replaced to the | 2996 * The map of transform name prefixes of the job to be replaced to the |
| 2820 * corresponding name prefixes of the new job. | 2997 * corresponding name prefixes of the new job. |
| 2821 */ | 2998 */ |
| 2822 core.Map<core.String, core.String> transformNameMapping; | 2999 core.Map<core.String, core.String> transformNameMapping; |
| 2823 /** | 3000 /** |
| 2824 * The type of Cloud Dataflow job. | 3001 * The type of Cloud Dataflow job. |
| 2825 * Possible string values are: | 3002 * Possible string values are: |
| 2826 * - "JOB_TYPE_UNKNOWN" : A JOB_TYPE_UNKNOWN. | 3003 * - "JOB_TYPE_UNKNOWN" : The type of the job is unspecified, or unknown. |
| 2827 * - "JOB_TYPE_BATCH" : A JOB_TYPE_BATCH. | 3004 * - "JOB_TYPE_BATCH" : A batch job with a well-defined end point: data is |
| 2828 * - "JOB_TYPE_STREAMING" : A JOB_TYPE_STREAMING. | 3005 * read, data is |
| 3006 * processed, data is written, and the job is done. |
| 3007 * - "JOB_TYPE_STREAMING" : A continuously streaming job with no end: data is |
| 3008 * read, |
| 3009 * processed, and written continuously. |
| 2829 */ | 3010 */ |
| 2830 core.String type; | 3011 core.String type; |
| 2831 | 3012 |
| 2832 Job(); | 3013 Job(); |
| 2833 | 3014 |
| 2834 Job.fromJson(core.Map _json) { | 3015 Job.fromJson(core.Map _json) { |
| 2835 if (_json.containsKey("clientRequestId")) { | 3016 if (_json.containsKey("clientRequestId")) { |
| 2836 clientRequestId = _json["clientRequestId"]; | 3017 clientRequestId = _json["clientRequestId"]; |
| 2837 } | 3018 } |
| 2838 if (_json.containsKey("createTime")) { | 3019 if (_json.containsKey("createTime")) { |
| (...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2967 core.Map toJson() { | 3148 core.Map toJson() { |
| 2968 var _json = new core.Map(); | 3149 var _json = new core.Map(); |
| 2969 if (stages != null) { | 3150 if (stages != null) { |
| 2970 _json["stages"] = commons.mapMap(stages, (item) => (item).toJson()); | 3151 _json["stages"] = commons.mapMap(stages, (item) => (item).toJson()); |
| 2971 } | 3152 } |
| 2972 return _json; | 3153 return _json; |
| 2973 } | 3154 } |
| 2974 } | 3155 } |
| 2975 | 3156 |
| 2976 /** | 3157 /** |
| 2977 * Contains information about how a particular google.dataflow.v1beta3.Step will | 3158 * Contains information about how a particular |
| 2978 * be executed. | 3159 * google.dataflow.v1beta3.Step will be executed. |
| 2979 */ | 3160 */ |
| 2980 class JobExecutionStageInfo { | 3161 class JobExecutionStageInfo { |
| 2981 /** | 3162 /** |
| 2982 * The steps associated with the execution stage. Note that stages may have | 3163 * The steps associated with the execution stage. |
| 2983 * several steps, and that a given step might be run by more than one stage. | 3164 * Note that stages may have several steps, and that a given step |
| 3165 * might be run by more than one stage. |
| 2984 */ | 3166 */ |
| 2985 core.List<core.String> stepName; | 3167 core.List<core.String> stepName; |
| 2986 | 3168 |
| 2987 JobExecutionStageInfo(); | 3169 JobExecutionStageInfo(); |
| 2988 | 3170 |
| 2989 JobExecutionStageInfo.fromJson(core.Map _json) { | 3171 JobExecutionStageInfo.fromJson(core.Map _json) { |
| 2990 if (_json.containsKey("stepName")) { | 3172 if (_json.containsKey("stepName")) { |
| 2991 stepName = _json["stepName"]; | 3173 stepName = _json["stepName"]; |
| 2992 } | 3174 } |
| 2993 } | 3175 } |
| 2994 | 3176 |
| 2995 core.Map toJson() { | 3177 core.Map toJson() { |
| 2996 var _json = new core.Map(); | 3178 var _json = new core.Map(); |
| 2997 if (stepName != null) { | 3179 if (stepName != null) { |
| 2998 _json["stepName"] = stepName; | 3180 _json["stepName"] = stepName; |
| 2999 } | 3181 } |
| 3000 return _json; | 3182 return _json; |
| 3001 } | 3183 } |
| 3002 } | 3184 } |
| 3003 | 3185 |
| 3004 /** A particular message pertaining to a Dataflow job. */ | 3186 /** A particular message pertaining to a Dataflow job. */ |
| 3005 class JobMessage { | 3187 class JobMessage { |
| 3006 /** | 3188 /** |
| 3007 * Identifies the message. This is automatically generated by the service; the | 3189 * Identifies the message. This is automatically generated by the |
| 3008 * caller should treat it as an opaque string. | 3190 * service; the caller should treat it as an opaque string. |
| 3009 */ | 3191 */ |
| 3010 core.String id; | 3192 core.String id; |
| 3011 /** | 3193 /** |
| 3012 * Importance level of the message. | 3194 * Importance level of the message. |
| 3013 * Possible string values are: | 3195 * Possible string values are: |
| 3014 * - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : A JOB_MESSAGE_IMPORTANCE_UNKNOWN. | 3196 * - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : The message importance isn't |
| 3015 * - "JOB_MESSAGE_DEBUG" : A JOB_MESSAGE_DEBUG. | 3197 * specified, or is unknown. |
| 3016 * - "JOB_MESSAGE_DETAILED" : A JOB_MESSAGE_DETAILED. | 3198 * - "JOB_MESSAGE_DEBUG" : The message is at the 'debug' level: typically only |
| 3017 * - "JOB_MESSAGE_BASIC" : A JOB_MESSAGE_BASIC. | 3199 * useful for |
| 3018 * - "JOB_MESSAGE_WARNING" : A JOB_MESSAGE_WARNING. | 3200 * software engineers working on the code the job is running. |
| 3019 * - "JOB_MESSAGE_ERROR" : A JOB_MESSAGE_ERROR. | 3201 * Typically, Dataflow pipeline runners do not display log messages |
| 3202 * at this level by default. |
| 3203 * - "JOB_MESSAGE_DETAILED" : The message is at the 'detailed' level: somewhat |
| 3204 * verbose, but |
| 3205 * potentially useful to users. Typically, Dataflow pipeline |
| 3206 * runners do not display log messages at this level by default. |
| 3207 * These messages are displayed by default in the Dataflow |
| 3208 * monitoring UI. |
| 3209 * - "JOB_MESSAGE_BASIC" : The message is at the 'basic' level: useful for |
| 3210 * keeping |
| 3211 * track of the execution of a Dataflow pipeline. Typically, |
| 3212 * Dataflow pipeline runners display log messages at this level by |
| 3213 * default, and these messages are displayed by default in the |
| 3214 * Dataflow monitoring UI. |
| 3215 * - "JOB_MESSAGE_WARNING" : The message is at the 'warning' level: indicating |
| 3216 * a condition |
| 3217 * pertaining to a job which may require human intervention. |
| 3218 * Typically, Dataflow pipeline runners display log messages at this |
| 3219 * level by default, and these messages are displayed by default in |
| 3220 * the Dataflow monitoring UI. |
| 3221 * - "JOB_MESSAGE_ERROR" : The message is at the 'error' level: indicating a |
| 3222 * condition |
| 3223 * preventing a job from succeeding. Typically, Dataflow pipeline |
| 3224 * runners display log messages at this level by default, and these |
| 3225 * messages are displayed by default in the Dataflow monitoring UI. |
| 3020 */ | 3226 */ |
| 3021 core.String messageImportance; | 3227 core.String messageImportance; |
| 3022 /** The text of the message. */ | 3228 /** The text of the message. */ |
| 3023 core.String messageText; | 3229 core.String messageText; |
| 3024 /** The timestamp of the message. */ | 3230 /** The timestamp of the message. */ |
| 3025 core.String time; | 3231 core.String time; |
| 3026 | 3232 |
| 3027 JobMessage(); | 3233 JobMessage(); |
| 3028 | 3234 |
| 3029 JobMessage.fromJson(core.Map _json) { | 3235 JobMessage.fromJson(core.Map _json) { |
| (...skipping 25 matching lines...) Expand all Loading... |
| 3055 if (time != null) { | 3261 if (time != null) { |
| 3056 _json["time"] = time; | 3262 _json["time"] = time; |
| 3057 } | 3263 } |
| 3058 return _json; | 3264 return _json; |
| 3059 } | 3265 } |
| 3060 } | 3266 } |
| 3061 | 3267 |
| 3062 /** | 3268 /** |
| 3063 * JobMetrics contains a collection of metrics descibing the detailed progress | 3269 * JobMetrics contains a collection of metrics descibing the detailed progress |
| 3064 * of a Dataflow job. Metrics correspond to user-defined and system-defined | 3270 * of a Dataflow job. Metrics correspond to user-defined and system-defined |
| 3065 * metrics in the job. This resource captures only the most recent values of | 3271 * metrics in the job. |
| 3066 * each metric; time-series data can be queried for them (under the same metric | 3272 * |
| 3067 * names) from Cloud Monitoring. | 3273 * This resource captures only the most recent values of each metric; |
| 3274 * time-series data can be queried for them (under the same metric names) |
| 3275 * from Cloud Monitoring. |
| 3068 */ | 3276 */ |
| 3069 class JobMetrics { | 3277 class JobMetrics { |
| 3070 /** Timestamp as of which metric values are current. */ | 3278 /** Timestamp as of which metric values are current. */ |
| 3071 core.String metricTime; | 3279 core.String metricTime; |
| 3072 /** All metrics for this job. */ | 3280 /** All metrics for this job. */ |
| 3073 core.List<MetricUpdate> metrics; | 3281 core.List<MetricUpdate> metrics; |
| 3074 | 3282 |
| 3075 JobMetrics(); | 3283 JobMetrics(); |
| 3076 | 3284 |
| 3077 JobMetrics.fromJson(core.Map _json) { | 3285 JobMetrics.fromJson(core.Map _json) { |
| (...skipping 12 matching lines...) Expand all Loading... |
| 3090 } | 3298 } |
| 3091 if (metrics != null) { | 3299 if (metrics != null) { |
| 3092 _json["metrics"] = metrics.map((value) => (value).toJson()).toList(); | 3300 _json["metrics"] = metrics.map((value) => (value).toJson()).toList(); |
| 3093 } | 3301 } |
| 3094 return _json; | 3302 return _json; |
| 3095 } | 3303 } |
| 3096 } | 3304 } |
| 3097 | 3305 |
| 3098 /** | 3306 /** |
| 3099 * Data disk assignment information for a specific key-range of a sharded | 3307 * Data disk assignment information for a specific key-range of a sharded |
| 3100 * computation. Currently we only support UTF-8 character splits to simplify | 3308 * computation. |
| 3101 * encoding into JSON. | 3309 * Currently we only support UTF-8 character splits to simplify encoding into |
| 3310 * JSON. |
| 3102 */ | 3311 */ |
| 3103 class KeyRangeDataDiskAssignment { | 3312 class KeyRangeDataDiskAssignment { |
| 3104 /** | 3313 /** |
| 3105 * The name of the data disk where data for this range is stored. This name is | 3314 * The name of the data disk where data for this range is stored. |
| 3106 * local to the Google Cloud Platform project and uniquely identifies the disk | 3315 * This name is local to the Google Cloud Platform project and uniquely |
| 3107 * within that project, for example | 3316 * identifies the disk within that project, for example |
| 3108 * "myproject-1014-104817-4c2-harness-0-disk-1". | 3317 * "myproject-1014-104817-4c2-harness-0-disk-1". |
| 3109 */ | 3318 */ |
| 3110 core.String dataDisk; | 3319 core.String dataDisk; |
| 3111 /** The end (exclusive) of the key range. */ | 3320 /** The end (exclusive) of the key range. */ |
| 3112 core.String end; | 3321 core.String end; |
| 3113 /** The start (inclusive) of the key range. */ | 3322 /** The start (inclusive) of the key range. */ |
| 3114 core.String start; | 3323 core.String start; |
| 3115 | 3324 |
| 3116 KeyRangeDataDiskAssignment(); | 3325 KeyRangeDataDiskAssignment(); |
| 3117 | 3326 |
| (...skipping 24 matching lines...) Expand all Loading... |
| 3142 } | 3351 } |
| 3143 } | 3352 } |
| 3144 | 3353 |
| 3145 /** | 3354 /** |
| 3146 * Location information for a specific key-range of a sharded computation. | 3355 * Location information for a specific key-range of a sharded computation. |
| 3147 * Currently we only support UTF-8 character splits to simplify encoding into | 3356 * Currently we only support UTF-8 character splits to simplify encoding into |
| 3148 * JSON. | 3357 * JSON. |
| 3149 */ | 3358 */ |
| 3150 class KeyRangeLocation { | 3359 class KeyRangeLocation { |
| 3151 /** | 3360 /** |
| 3152 * The name of the data disk where data for this range is stored. This name is | 3361 * The name of the data disk where data for this range is stored. |
| 3153 * local to the Google Cloud Platform project and uniquely identifies the disk | 3362 * This name is local to the Google Cloud Platform project and uniquely |
| 3154 * within that project, for example | 3363 * identifies the disk within that project, for example |
| 3155 * "myproject-1014-104817-4c2-harness-0-disk-1". | 3364 * "myproject-1014-104817-4c2-harness-0-disk-1". |
| 3156 */ | 3365 */ |
| 3157 core.String dataDisk; | 3366 core.String dataDisk; |
| 3158 /** | 3367 /** |
| 3159 * The physical location of this range assignment to be used for streaming | 3368 * The physical location of this range assignment to be used for |
| 3160 * computation cross-worker message delivery. | 3369 * streaming computation cross-worker message delivery. |
| 3161 */ | 3370 */ |
| 3162 core.String deliveryEndpoint; | 3371 core.String deliveryEndpoint; |
| 3163 /** The end (exclusive) of the key range. */ | 3372 /** The end (exclusive) of the key range. */ |
| 3164 core.String end; | 3373 core.String end; |
| 3165 /** | 3374 /** |
| 3166 * The location of the persistent state for this range, as a persistent | 3375 * The location of the persistent state for this range, as a |
| 3167 * directory in the worker local filesystem. | 3376 * persistent directory in the worker local filesystem. |
| 3168 */ | 3377 */ |
| 3169 core.String persistentDirectory; | 3378 core.String persistentDirectory; |
| 3170 /** The start (inclusive) of the key range. */ | 3379 /** The start (inclusive) of the key range. */ |
| 3171 core.String start; | 3380 core.String start; |
| 3172 | 3381 |
| 3173 KeyRangeLocation(); | 3382 KeyRangeLocation(); |
| 3174 | 3383 |
| 3175 KeyRangeLocation.fromJson(core.Map _json) { | 3384 KeyRangeLocation.fromJson(core.Map _json) { |
| 3176 if (_json.containsKey("dataDisk")) { | 3385 if (_json.containsKey("dataDisk")) { |
| 3177 dataDisk = _json["dataDisk"]; | 3386 dataDisk = _json["dataDisk"]; |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3220 /** The initial lease period. */ | 3429 /** The initial lease period. */ |
| 3221 core.String requestedLeaseDuration; | 3430 core.String requestedLeaseDuration; |
| 3222 /** Filter for WorkItem type. */ | 3431 /** Filter for WorkItem type. */ |
| 3223 core.List<core.String> workItemTypes; | 3432 core.List<core.String> workItemTypes; |
| 3224 /** | 3433 /** |
| 3225 * Worker capabilities. WorkItems might be limited to workers with specific | 3434 * Worker capabilities. WorkItems might be limited to workers with specific |
| 3226 * capabilities. | 3435 * capabilities. |
| 3227 */ | 3436 */ |
| 3228 core.List<core.String> workerCapabilities; | 3437 core.List<core.String> workerCapabilities; |
| 3229 /** | 3438 /** |
| 3230 * Identifies the worker leasing work -- typically the ID of the virtual | 3439 * Identifies the worker leasing work -- typically the ID of the |
| 3231 * machine running the worker. | 3440 * virtual machine running the worker. |
| 3232 */ | 3441 */ |
| 3233 core.String workerId; | 3442 core.String workerId; |
| 3234 | 3443 |
| 3235 LeaseWorkItemRequest(); | 3444 LeaseWorkItemRequest(); |
| 3236 | 3445 |
| 3237 LeaseWorkItemRequest.fromJson(core.Map _json) { | 3446 LeaseWorkItemRequest.fromJson(core.Map _json) { |
| 3238 if (_json.containsKey("currentWorkerTime")) { | 3447 if (_json.containsKey("currentWorkerTime")) { |
| 3239 currentWorkerTime = _json["currentWorkerTime"]; | 3448 currentWorkerTime = _json["currentWorkerTime"]; |
| 3240 } | 3449 } |
| 3241 if (_json.containsKey("location")) { | 3450 if (_json.containsKey("location")) { |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3325 _json["jobMessages"] = jobMessages.map((value) => (value).toJson()).toList
(); | 3534 _json["jobMessages"] = jobMessages.map((value) => (value).toJson()).toList
(); |
| 3326 } | 3535 } |
| 3327 if (nextPageToken != null) { | 3536 if (nextPageToken != null) { |
| 3328 _json["nextPageToken"] = nextPageToken; | 3537 _json["nextPageToken"] = nextPageToken; |
| 3329 } | 3538 } |
| 3330 return _json; | 3539 return _json; |
| 3331 } | 3540 } |
| 3332 } | 3541 } |
| 3333 | 3542 |
| 3334 /** | 3543 /** |
| 3335 * Response to a request to list Cloud Dataflow jobs. This may be a partial | 3544 * Response to a request to list Cloud Dataflow jobs. This may be a partial |
| 3336 * response, depending on the page size in the ListJobsRequest. | 3545 * response, depending on the page size in the ListJobsRequest. |
| 3337 */ | 3546 */ |
| 3338 class ListJobsResponse { | 3547 class ListJobsResponse { |
| 3339 /** Zero or more messages describing locations that failed to respond. */ | 3548 /** Zero or more messages describing locations that failed to respond. */ |
| 3340 core.List<FailedLocation> failedLocation; | 3549 core.List<FailedLocation> failedLocation; |
| 3341 /** A subset of the requested job information. */ | 3550 /** A subset of the requested job information. */ |
| 3342 core.List<Job> jobs; | 3551 core.List<Job> jobs; |
| 3343 /** Set if there may be more results than fit in this response. */ | 3552 /** Set if there may be more results than fit in this response. */ |
| 3344 core.String nextPageToken; | 3553 core.String nextPageToken; |
| 3345 | 3554 |
| (...skipping 20 matching lines...) Expand all Loading... |
| 3366 _json["jobs"] = jobs.map((value) => (value).toJson()).toList(); | 3575 _json["jobs"] = jobs.map((value) => (value).toJson()).toList(); |
| 3367 } | 3576 } |
| 3368 if (nextPageToken != null) { | 3577 if (nextPageToken != null) { |
| 3369 _json["nextPageToken"] = nextPageToken; | 3578 _json["nextPageToken"] = nextPageToken; |
| 3370 } | 3579 } |
| 3371 return _json; | 3580 return _json; |
| 3372 } | 3581 } |
| 3373 } | 3582 } |
| 3374 | 3583 |
| 3375 /** | 3584 /** |
| 3376 * MapTask consists of an ordered set of instructions, each of which describes | 3585 * MapTask consists of an ordered set of instructions, each of which |
| 3377 * one particular low-level operation for the worker to perform in order to | 3586 * describes one particular low-level operation for the worker to |
| 3378 * accomplish the MapTask's WorkItem. Each instruction must appear in the list | 3587 * perform in order to accomplish the MapTask's WorkItem. |
| 3379 * before any instructions which depends on its output. | 3588 * |
| 3589 * Each instruction must appear in the list before any instructions which |
| 3590 * depends on its output. |
| 3380 */ | 3591 */ |
| 3381 class MapTask { | 3592 class MapTask { |
| 3382 /** The instructions in the MapTask. */ | 3593 /** The instructions in the MapTask. */ |
| 3383 core.List<ParallelInstruction> instructions; | 3594 core.List<ParallelInstruction> instructions; |
| 3384 /** | 3595 /** |
| 3385 * System-defined name of the stage containing this MapTask. Unique across the | 3596 * System-defined name of the stage containing this MapTask. |
| 3386 * workflow. | 3597 * Unique across the workflow. |
| 3387 */ | 3598 */ |
| 3388 core.String stageName; | 3599 core.String stageName; |
| 3389 /** System-defined name of this MapTask. Unique across the workflow. */ | 3600 /** |
| 3601 * System-defined name of this MapTask. |
| 3602 * Unique across the workflow. |
| 3603 */ |
| 3390 core.String systemName; | 3604 core.String systemName; |
| 3391 | 3605 |
| 3392 MapTask(); | 3606 MapTask(); |
| 3393 | 3607 |
| 3394 MapTask.fromJson(core.Map _json) { | 3608 MapTask.fromJson(core.Map _json) { |
| 3395 if (_json.containsKey("instructions")) { | 3609 if (_json.containsKey("instructions")) { |
| 3396 instructions = _json["instructions"].map((value) => new ParallelInstructio
n.fromJson(value)).toList(); | 3610 instructions = _json["instructions"].map((value) => new ParallelInstructio
n.fromJson(value)).toList(); |
| 3397 } | 3611 } |
| 3398 if (_json.containsKey("stageName")) { | 3612 if (_json.containsKey("stageName")) { |
| 3399 stageName = _json["stageName"]; | 3613 stageName = _json["stageName"]; |
| (...skipping 17 matching lines...) Expand all Loading... |
| 3417 return _json; | 3631 return _json; |
| 3418 } | 3632 } |
| 3419 } | 3633 } |
| 3420 | 3634 |
| 3421 /** | 3635 /** |
| 3422 * The metric short id is returned to the user alongside an offset into | 3636 * The metric short id is returned to the user alongside an offset into |
| 3423 * ReportWorkItemStatusRequest | 3637 * ReportWorkItemStatusRequest |
| 3424 */ | 3638 */ |
| 3425 class MetricShortId { | 3639 class MetricShortId { |
| 3426 /** | 3640 /** |
| 3427 * The index of the corresponding metric in the ReportWorkItemStatusRequest. | 3641 * The index of the corresponding metric in |
| 3428 * Required. | 3642 * the ReportWorkItemStatusRequest. Required. |
| 3429 */ | 3643 */ |
| 3430 core.int metricIndex; | 3644 core.int metricIndex; |
| 3431 /** The service-generated short identifier for the metric. */ | 3645 /** The service-generated short identifier for the metric. */ |
| 3432 core.String shortId; | 3646 core.String shortId; |
| 3433 | 3647 |
| 3434 MetricShortId(); | 3648 MetricShortId(); |
| 3435 | 3649 |
| 3436 MetricShortId.fromJson(core.Map _json) { | 3650 MetricShortId.fromJson(core.Map _json) { |
| 3437 if (_json.containsKey("metricIndex")) { | 3651 if (_json.containsKey("metricIndex")) { |
| 3438 metricIndex = _json["metricIndex"]; | 3652 metricIndex = _json["metricIndex"]; |
| 3439 } | 3653 } |
| 3440 if (_json.containsKey("shortId")) { | 3654 if (_json.containsKey("shortId")) { |
| 3441 shortId = _json["shortId"]; | 3655 shortId = _json["shortId"]; |
| 3442 } | 3656 } |
| 3443 } | 3657 } |
| 3444 | 3658 |
| 3445 core.Map toJson() { | 3659 core.Map toJson() { |
| 3446 var _json = new core.Map(); | 3660 var _json = new core.Map(); |
| 3447 if (metricIndex != null) { | 3661 if (metricIndex != null) { |
| 3448 _json["metricIndex"] = metricIndex; | 3662 _json["metricIndex"] = metricIndex; |
| 3449 } | 3663 } |
| 3450 if (shortId != null) { | 3664 if (shortId != null) { |
| 3451 _json["shortId"] = shortId; | 3665 _json["shortId"] = shortId; |
| 3452 } | 3666 } |
| 3453 return _json; | 3667 return _json; |
| 3454 } | 3668 } |
| 3455 } | 3669 } |
| 3456 | 3670 |
| 3457 /** | 3671 /** |
| 3458 * Identifies a metric, by describing the source which generated the metric. | 3672 * Identifies a metric, by describing the source which generated the |
| 3673 * metric. |
| 3459 */ | 3674 */ |
| 3460 class MetricStructuredName { | 3675 class MetricStructuredName { |
| 3461 /** | 3676 /** |
| 3462 * Zero or more labeled fields which identify the part of the job this metric | 3677 * Zero or more labeled fields which identify the part of the job this |
| 3463 * is associated with, such as the name of a step or collection. For example, | 3678 * metric is associated with, such as the name of a step or collection. |
| 3464 * built-in counters associated with steps will have context['step'] = . | 3679 * |
| 3465 * Counters associated with PCollections in the SDK will have | 3680 * For example, built-in counters associated with steps will have |
| 3466 * context['pcollection'] = | 3681 * context['step'] = <step-name>. Counters associated with PCollections |
| 3467 * . | 3682 * in the SDK will have context['pcollection'] = <pcollection-name>. |
| 3468 */ | 3683 */ |
| 3469 core.Map<core.String, core.String> context; | 3684 core.Map<core.String, core.String> context; |
| 3470 /** Worker-defined metric name. */ | 3685 /** Worker-defined metric name. */ |
| 3471 core.String name; | 3686 core.String name; |
| 3472 /** | 3687 /** |
| 3473 * Origin (namespace) of metric name. May be blank for user-define metrics; | 3688 * Origin (namespace) of metric name. May be blank for user-define metrics; |
| 3474 * will be "dataflow" for metrics defined by the Dataflow service or SDK. | 3689 * will be "dataflow" for metrics defined by the Dataflow service or SDK. |
| 3475 */ | 3690 */ |
| 3476 core.String origin; | 3691 core.String origin; |
| 3477 | 3692 |
| (...skipping 22 matching lines...) Expand all Loading... |
| 3500 if (origin != null) { | 3715 if (origin != null) { |
| 3501 _json["origin"] = origin; | 3716 _json["origin"] = origin; |
| 3502 } | 3717 } |
| 3503 return _json; | 3718 return _json; |
| 3504 } | 3719 } |
| 3505 } | 3720 } |
| 3506 | 3721 |
| 3507 /** Describes the state of a metric. */ | 3722 /** Describes the state of a metric. */ |
| 3508 class MetricUpdate { | 3723 class MetricUpdate { |
| 3509 /** | 3724 /** |
| 3510 * True if this metric is reported as the total cumulative aggregate value | 3725 * True if this metric is reported as the total cumulative aggregate |
| 3511 * accumulated since the worker started working on this WorkItem. By default | 3726 * value accumulated since the worker started working on this WorkItem. |
| 3512 * this is false, indicating that this metric is reported as a delta that is | 3727 * By default this is false, indicating that this metric is reported |
| 3513 * not associated with any WorkItem. | 3728 * as a delta that is not associated with any WorkItem. |
| 3514 */ | 3729 */ |
| 3515 core.bool cumulative; | 3730 core.bool cumulative; |
| 3516 /** | 3731 /** |
| 3517 * Worker-computed aggregate value for internal use by the Dataflow service. | 3732 * Worker-computed aggregate value for internal use by the Dataflow |
| 3733 * service. |
| 3518 * | 3734 * |
| 3519 * The values for Object must be JSON objects. It can consist of `num`, | 3735 * The values for Object must be JSON objects. It can consist of `num`, |
| 3520 * `String`, `bool` and `null` as well as `Map` and `List` values. | 3736 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 3521 */ | 3737 */ |
| 3522 core.Object internal; | 3738 core.Object internal; |
| 3523 /** | 3739 /** |
| 3524 * Metric aggregation kind. The possible metric aggregation kinds are "Sum", | 3740 * Metric aggregation kind. The possible metric aggregation kinds are |
| 3525 * "Max", "Min", "Mean", "Set", "And", and "Or". The specified aggregation | 3741 * "Sum", "Max", "Min", "Mean", "Set", "And", and "Or". |
| 3526 * kind is case-insensitive. If omitted, this is not an aggregated value but | 3742 * The specified aggregation kind is case-insensitive. |
| 3527 * instead a single metric sample value. | 3743 * |
| 3744 * If omitted, this is not an aggregated value but instead |
| 3745 * a single metric sample value. |
| 3528 */ | 3746 */ |
| 3529 core.String kind; | 3747 core.String kind; |
| 3530 /** | 3748 /** |
| 3531 * Worker-computed aggregate value for the "Mean" aggregation kind. This holds | 3749 * Worker-computed aggregate value for the "Mean" aggregation kind. |
| 3532 * the count of the aggregated values and is used in combination with mean_sum | 3750 * This holds the count of the aggregated values and is used in combination |
| 3533 * above to obtain the actual mean aggregate value. The only possible value | 3751 * with mean_sum above to obtain the actual mean aggregate value. |
| 3534 * type is Long. | 3752 * The only possible value type is Long. |
| 3535 * | 3753 * |
| 3536 * The values for Object must be JSON objects. It can consist of `num`, | 3754 * The values for Object must be JSON objects. It can consist of `num`, |
| 3537 * `String`, `bool` and `null` as well as `Map` and `List` values. | 3755 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 3538 */ | 3756 */ |
| 3539 core.Object meanCount; | 3757 core.Object meanCount; |
| 3540 /** | 3758 /** |
| 3541 * Worker-computed aggregate value for the "Mean" aggregation kind. This holds | 3759 * Worker-computed aggregate value for the "Mean" aggregation kind. |
| 3542 * the sum of the aggregated values and is used in combination with mean_count | 3760 * This holds the sum of the aggregated values and is used in combination |
| 3543 * below to obtain the actual mean aggregate value. The only possible value | 3761 * with mean_count below to obtain the actual mean aggregate value. |
| 3544 * types are Long and Double. | 3762 * The only possible value types are Long and Double. |
| 3545 * | 3763 * |
| 3546 * The values for Object must be JSON objects. It can consist of `num`, | 3764 * The values for Object must be JSON objects. It can consist of `num`, |
| 3547 * `String`, `bool` and `null` as well as `Map` and `List` values. | 3765 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 3548 */ | 3766 */ |
| 3549 core.Object meanSum; | 3767 core.Object meanSum; |
| 3550 /** Name of the metric. */ | 3768 /** Name of the metric. */ |
| 3551 MetricStructuredName name; | 3769 MetricStructuredName name; |
| 3552 /** | 3770 /** |
| 3553 * Worker-computed aggregate value for aggregation kinds "Sum", "Max", "Min", | 3771 * Worker-computed aggregate value for aggregation kinds "Sum", "Max", "Min", |
| 3554 * "And", and "Or". The possible value types are Long, Double, and Boolean. | 3772 * "And", and "Or". The possible value types are Long, Double, and Boolean. |
| 3555 * | 3773 * |
| 3556 * The values for Object must be JSON objects. It can consist of `num`, | 3774 * The values for Object must be JSON objects. It can consist of `num`, |
| 3557 * `String`, `bool` and `null` as well as `Map` and `List` values. | 3775 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 3558 */ | 3776 */ |
| 3559 core.Object scalar; | 3777 core.Object scalar; |
| 3560 /** | 3778 /** |
| 3561 * Worker-computed aggregate value for the "Set" aggregation kind. The only | 3779 * Worker-computed aggregate value for the "Set" aggregation kind. The only |
| 3562 * possible value type is a list of Values whose type can be Long, Double, or | 3780 * possible value type is a list of Values whose type can be Long, Double, |
| 3563 * String, according to the metric's type. All Values in the list must be of | 3781 * or String, according to the metric's type. All Values in the list must |
| 3564 * the same type. | 3782 * be of the same type. |
| 3565 * | 3783 * |
| 3566 * The values for Object must be JSON objects. It can consist of `num`, | 3784 * The values for Object must be JSON objects. It can consist of `num`, |
| 3567 * `String`, `bool` and `null` as well as `Map` and `List` values. | 3785 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 3568 */ | 3786 */ |
| 3569 core.Object set; | 3787 core.Object set; |
| 3570 /** | 3788 /** |
| 3571 * Timestamp associated with the metric value. Optional when workers are | 3789 * Timestamp associated with the metric value. Optional when workers are |
| 3572 * reporting work progress; it will be filled in responses from the metrics | 3790 * reporting work progress; it will be filled in responses from the |
| 3573 * API. | 3791 * metrics API. |
| 3574 */ | 3792 */ |
| 3575 core.String updateTime; | 3793 core.String updateTime; |
| 3576 | 3794 |
| 3577 MetricUpdate(); | 3795 MetricUpdate(); |
| 3578 | 3796 |
| 3579 MetricUpdate.fromJson(core.Map _json) { | 3797 MetricUpdate.fromJson(core.Map _json) { |
| 3580 if (_json.containsKey("cumulative")) { | 3798 if (_json.containsKey("cumulative")) { |
| 3581 cumulative = _json["cumulative"]; | 3799 cumulative = _json["cumulative"]; |
| 3582 } | 3800 } |
| 3583 if (_json.containsKey("internal")) { | 3801 if (_json.containsKey("internal")) { |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3635 if (updateTime != null) { | 3853 if (updateTime != null) { |
| 3636 _json["updateTime"] = updateTime; | 3854 _json["updateTime"] = updateTime; |
| 3637 } | 3855 } |
| 3638 return _json; | 3856 return _json; |
| 3639 } | 3857 } |
| 3640 } | 3858 } |
| 3641 | 3859 |
| 3642 /** Describes mounted data disk. */ | 3860 /** Describes mounted data disk. */ |
| 3643 class MountedDataDisk { | 3861 class MountedDataDisk { |
| 3644 /** | 3862 /** |
| 3645 * The name of the data disk. This name is local to the Google Cloud Platform | 3863 * The name of the data disk. |
| 3646 * project and uniquely identifies the disk within that project, for example | 3864 * This name is local to the Google Cloud Platform project and uniquely |
| 3865 * identifies the disk within that project, for example |
| 3647 * "myproject-1014-104817-4c2-harness-0-disk-1". | 3866 * "myproject-1014-104817-4c2-harness-0-disk-1". |
| 3648 */ | 3867 */ |
| 3649 core.String dataDisk; | 3868 core.String dataDisk; |
| 3650 | 3869 |
| 3651 MountedDataDisk(); | 3870 MountedDataDisk(); |
| 3652 | 3871 |
| 3653 MountedDataDisk.fromJson(core.Map _json) { | 3872 MountedDataDisk.fromJson(core.Map _json) { |
| 3654 if (_json.containsKey("dataDisk")) { | 3873 if (_json.containsKey("dataDisk")) { |
| 3655 dataDisk = _json["dataDisk"]; | 3874 dataDisk = _json["dataDisk"]; |
| 3656 } | 3875 } |
| 3657 } | 3876 } |
| 3658 | 3877 |
| 3659 core.Map toJson() { | 3878 core.Map toJson() { |
| 3660 var _json = new core.Map(); | 3879 var _json = new core.Map(); |
| 3661 if (dataDisk != null) { | 3880 if (dataDisk != null) { |
| 3662 _json["dataDisk"] = dataDisk; | 3881 _json["dataDisk"] = dataDisk; |
| 3663 } | 3882 } |
| 3664 return _json; | 3883 return _json; |
| 3665 } | 3884 } |
| 3666 } | 3885 } |
| 3667 | 3886 |
| 3668 /** Information about an output of a multi-output DoFn. */ | 3887 /** Information about an output of a multi-output DoFn. */ |
| 3669 class MultiOutputInfo { | 3888 class MultiOutputInfo { |
| 3670 /** | 3889 /** |
| 3671 * The id of the tag the user code will emit to this output by; this should | 3890 * The id of the tag the user code will emit to this output by; this |
| 3672 * correspond to the tag of some SideInputInfo. | 3891 * should correspond to the tag of some SideInputInfo. |
| 3673 */ | 3892 */ |
| 3674 core.String tag; | 3893 core.String tag; |
| 3675 | 3894 |
| 3676 MultiOutputInfo(); | 3895 MultiOutputInfo(); |
| 3677 | 3896 |
| 3678 MultiOutputInfo.fromJson(core.Map _json) { | 3897 MultiOutputInfo.fromJson(core.Map _json) { |
| 3679 if (_json.containsKey("tag")) { | 3898 if (_json.containsKey("tag")) { |
| 3680 tag = _json["tag"]; | 3899 tag = _json["tag"]; |
| 3681 } | 3900 } |
| 3682 } | 3901 } |
| 3683 | 3902 |
| 3684 core.Map toJson() { | 3903 core.Map toJson() { |
| 3685 var _json = new core.Map(); | 3904 var _json = new core.Map(); |
| 3686 if (tag != null) { | 3905 if (tag != null) { |
| 3687 _json["tag"] = tag; | 3906 _json["tag"] = tag; |
| 3688 } | 3907 } |
| 3689 return _json; | 3908 return _json; |
| 3690 } | 3909 } |
| 3691 } | 3910 } |
| 3692 | 3911 |
| 3693 /** Basic metadata about a counter. */ | 3912 /** Basic metadata about a counter. */ |
| 3694 class NameAndKind { | 3913 class NameAndKind { |
| 3695 /** | 3914 /** |
| 3696 * Counter aggregation kind. | 3915 * Counter aggregation kind. |
| 3697 * Possible string values are: | 3916 * Possible string values are: |
| 3698 * - "INVALID" : A INVALID. | 3917 * - "INVALID" : Counter aggregation kind was not set. |
| 3699 * - "SUM" : A SUM. | 3918 * - "SUM" : Aggregated value is the sum of all contributed values. |
| 3700 * - "MAX" : A MAX. | 3919 * - "MAX" : Aggregated value is the max of all contributed values. |
| 3701 * - "MIN" : A MIN. | 3920 * - "MIN" : Aggregated value is the min of all contributed values. |
| 3702 * - "MEAN" : A MEAN. | 3921 * - "MEAN" : Aggregated value is the mean of all contributed values. |
| 3703 * - "OR" : A OR. | 3922 * - "OR" : Aggregated value represents the logical 'or' of all contributed |
| 3704 * - "AND" : A AND. | 3923 * values. |
| 3705 * - "SET" : A SET. | 3924 * - "AND" : Aggregated value represents the logical 'and' of all contributed |
| 3706 * - "DISTRIBUTION" : A DISTRIBUTION. | 3925 * values. |
| 3926 * - "SET" : Aggregated value is a set of unique contributed values. |
| 3927 * - "DISTRIBUTION" : Aggregated value captures statistics about a |
| 3928 * distribution. |
| 3707 */ | 3929 */ |
| 3708 core.String kind; | 3930 core.String kind; |
| 3709 /** Name of the counter. */ | 3931 /** Name of the counter. */ |
| 3710 core.String name; | 3932 core.String name; |
| 3711 | 3933 |
| 3712 NameAndKind(); | 3934 NameAndKind(); |
| 3713 | 3935 |
| 3714 NameAndKind.fromJson(core.Map _json) { | 3936 NameAndKind.fromJson(core.Map _json) { |
| 3715 if (_json.containsKey("kind")) { | 3937 if (_json.containsKey("kind")) { |
| 3716 kind = _json["kind"]; | 3938 kind = _json["kind"]; |
| 3717 } | 3939 } |
| 3718 if (_json.containsKey("name")) { | 3940 if (_json.containsKey("name")) { |
| 3719 name = _json["name"]; | 3941 name = _json["name"]; |
| 3720 } | 3942 } |
| 3721 } | 3943 } |
| 3722 | 3944 |
| 3723 core.Map toJson() { | 3945 core.Map toJson() { |
| 3724 var _json = new core.Map(); | 3946 var _json = new core.Map(); |
| 3725 if (kind != null) { | 3947 if (kind != null) { |
| 3726 _json["kind"] = kind; | 3948 _json["kind"] = kind; |
| 3727 } | 3949 } |
| 3728 if (name != null) { | 3950 if (name != null) { |
| 3729 _json["name"] = name; | 3951 _json["name"] = name; |
| 3730 } | 3952 } |
| 3731 return _json; | 3953 return _json; |
| 3732 } | 3954 } |
| 3733 } | 3955 } |
| 3734 | 3956 |
| 3735 /** | 3957 /** |
| 3736 * The packages that must be installed in order for a worker to run the steps of | 3958 * The packages that must be installed in order for a worker to run the |
| 3737 * the Cloud Dataflow job that will be assigned to its worker pool. This is the | 3959 * steps of the Cloud Dataflow job that will be assigned to its worker |
| 3738 * mechanism by which the Cloud Dataflow SDK causes code to be loaded onto the | 3960 * pool. |
| 3739 * workers. For example, the Cloud Dataflow Java SDK might use this to install | 3961 * |
| 3740 * jars containing the user's code and all of the various dependencies | 3962 * This is the mechanism by which the Cloud Dataflow SDK causes code to |
| 3741 * (libraries, data files, etc.) required in order for that code to run. | 3963 * be loaded onto the workers. For example, the Cloud Dataflow Java SDK |
| 3964 * might use this to install jars containing the user's code and all of the |
| 3965 * various dependencies (libraries, data files, etc.) required in order |
| 3966 * for that code to run. |
| 3742 */ | 3967 */ |
| 3743 class Package { | 3968 class Package { |
| 3744 /** | 3969 /** |
| 3745 * The resource to read the package from. The supported resource type is: | 3970 * The resource to read the package from. The supported resource type is: |
| 3746 * Google Cloud Storage: storage.googleapis.com/{bucket} | 3971 * |
| 3747 * bucket.storage.googleapis.com/ | 3972 * Google Cloud Storage: |
| 3973 * |
| 3974 * storage.googleapis.com/{bucket} |
| 3975 * bucket.storage.googleapis.com/ |
| 3748 */ | 3976 */ |
| 3749 core.String location; | 3977 core.String location; |
| 3750 /** The name of the package. */ | 3978 /** The name of the package. */ |
| 3751 core.String name; | 3979 core.String name; |
| 3752 | 3980 |
| 3753 Package(); | 3981 Package(); |
| 3754 | 3982 |
| 3755 Package.fromJson(core.Map _json) { | 3983 Package.fromJson(core.Map _json) { |
| 3756 if (_json.containsKey("location")) { | 3984 if (_json.containsKey("location")) { |
| 3757 location = _json["location"]; | 3985 location = _json["location"]; |
| 3758 } | 3986 } |
| 3759 if (_json.containsKey("name")) { | 3987 if (_json.containsKey("name")) { |
| 3760 name = _json["name"]; | 3988 name = _json["name"]; |
| 3761 } | 3989 } |
| 3762 } | 3990 } |
| 3763 | 3991 |
| 3764 core.Map toJson() { | 3992 core.Map toJson() { |
| 3765 var _json = new core.Map(); | 3993 var _json = new core.Map(); |
| 3766 if (location != null) { | 3994 if (location != null) { |
| 3767 _json["location"] = location; | 3995 _json["location"] = location; |
| 3768 } | 3996 } |
| 3769 if (name != null) { | 3997 if (name != null) { |
| 3770 _json["name"] = name; | 3998 _json["name"] = name; |
| 3771 } | 3999 } |
| 3772 return _json; | 4000 return _json; |
| 3773 } | 4001 } |
| 3774 } | 4002 } |
| 3775 | 4003 |
| 3776 /** | 4004 /** |
| 3777 * An instruction that does a ParDo operation. Takes one main input and zero or | 4005 * An instruction that does a ParDo operation. |
| 3778 * more side inputs, and produces zero or more outputs. Runs user code. | 4006 * Takes one main input and zero or more side inputs, and produces |
| 4007 * zero or more outputs. |
| 4008 * Runs user code. |
| 3779 */ | 4009 */ |
| 3780 class ParDoInstruction { | 4010 class ParDoInstruction { |
| 3781 /** The input. */ | 4011 /** The input. */ |
| 3782 InstructionInput input; | 4012 InstructionInput input; |
| 3783 /** Information about each of the outputs, if user_fn is a MultiDoFn. */ | 4013 /** Information about each of the outputs, if user_fn is a MultiDoFn. */ |
| 3784 core.List<MultiOutputInfo> multiOutputInfos; | 4014 core.List<MultiOutputInfo> multiOutputInfos; |
| 3785 /** The number of outputs. */ | 4015 /** The number of outputs. */ |
| 3786 core.int numOutputs; | 4016 core.int numOutputs; |
| 3787 /** Zero or more side inputs. */ | 4017 /** Zero or more side inputs. */ |
| 3788 core.List<SideInputInfo> sideInputs; | 4018 core.List<SideInputInfo> sideInputs; |
| 3789 /** | 4019 /** |
| 3790 * The user function to invoke. | 4020 * The user function to invoke. |
| 3791 * | 4021 * |
| 3792 * The values for Object must be JSON objects. It can consist of `num`, | 4022 * The values for Object must be JSON objects. It can consist of `num`, |
| 3793 * `String`, `bool` and `null` as well as `Map` and `List` values. | 4023 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3844 /** System-defined name for the operation in the original workflow graph. */ | 4074 /** System-defined name for the operation in the original workflow graph. */ |
| 3845 core.String originalName; | 4075 core.String originalName; |
| 3846 /** Describes the outputs of the instruction. */ | 4076 /** Describes the outputs of the instruction. */ |
| 3847 core.List<InstructionOutput> outputs; | 4077 core.List<InstructionOutput> outputs; |
| 3848 /** Additional information for ParDo instructions. */ | 4078 /** Additional information for ParDo instructions. */ |
| 3849 ParDoInstruction parDo; | 4079 ParDoInstruction parDo; |
| 3850 /** Additional information for PartialGroupByKey instructions. */ | 4080 /** Additional information for PartialGroupByKey instructions. */ |
| 3851 PartialGroupByKeyInstruction partialGroupByKey; | 4081 PartialGroupByKeyInstruction partialGroupByKey; |
| 3852 /** Additional information for Read instructions. */ | 4082 /** Additional information for Read instructions. */ |
| 3853 ReadInstruction read; | 4083 ReadInstruction read; |
| 3854 /** System-defined name of this operation. Unique across the workflow. */ | 4084 /** |
| 4085 * System-defined name of this operation. |
| 4086 * Unique across the workflow. |
| 4087 */ |
| 3855 core.String systemName; | 4088 core.String systemName; |
| 3856 /** Additional information for Write instructions. */ | 4089 /** Additional information for Write instructions. */ |
| 3857 WriteInstruction write; | 4090 WriteInstruction write; |
| 3858 | 4091 |
| 3859 ParallelInstruction(); | 4092 ParallelInstruction(); |
| 3860 | 4093 |
| 3861 ParallelInstruction.fromJson(core.Map _json) { | 4094 ParallelInstruction.fromJson(core.Map _json) { |
| 3862 if (_json.containsKey("flatten")) { | 4095 if (_json.containsKey("flatten")) { |
| 3863 flatten = new FlattenInstruction.fromJson(_json["flatten"]); | 4096 flatten = new FlattenInstruction.fromJson(_json["flatten"]); |
| 3864 } | 4097 } |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3915 _json["systemName"] = systemName; | 4148 _json["systemName"] = systemName; |
| 3916 } | 4149 } |
| 3917 if (write != null) { | 4150 if (write != null) { |
| 3918 _json["write"] = (write).toJson(); | 4151 _json["write"] = (write).toJson(); |
| 3919 } | 4152 } |
| 3920 return _json; | 4153 return _json; |
| 3921 } | 4154 } |
| 3922 } | 4155 } |
| 3923 | 4156 |
| 3924 /** | 4157 /** |
| 3925 * An instruction that does a partial group-by-key. One input and one output. | 4158 * An instruction that does a partial group-by-key. |
| 4159 * One input and one output. |
| 3926 */ | 4160 */ |
| 3927 class PartialGroupByKeyInstruction { | 4161 class PartialGroupByKeyInstruction { |
| 3928 /** Describes the input to the partial group-by-key instruction. */ | 4162 /** Describes the input to the partial group-by-key instruction. */ |
| 3929 InstructionInput input; | 4163 InstructionInput input; |
| 3930 /** | 4164 /** |
| 3931 * The codec to use for interpreting an element in the input PTable. | 4165 * The codec to use for interpreting an element in the input PTable. |
| 3932 * | 4166 * |
| 3933 * The values for Object must be JSON objects. It can consist of `num`, | 4167 * The values for Object must be JSON objects. It can consist of `num`, |
| 3934 * `String`, `bool` and `null` as well as `Map` and `List` values. | 4168 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 3935 */ | 4169 */ |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3995 _json["sideInputs"] = sideInputs.map((value) => (value).toJson()).toList()
; | 4229 _json["sideInputs"] = sideInputs.map((value) => (value).toJson()).toList()
; |
| 3996 } | 4230 } |
| 3997 if (valueCombiningFn != null) { | 4231 if (valueCombiningFn != null) { |
| 3998 _json["valueCombiningFn"] = valueCombiningFn; | 4232 _json["valueCombiningFn"] = valueCombiningFn; |
| 3999 } | 4233 } |
| 4000 return _json; | 4234 return _json; |
| 4001 } | 4235 } |
| 4002 } | 4236 } |
| 4003 | 4237 |
| 4004 /** | 4238 /** |
| 4005 * Position defines a position within a collection of data. The value can be | 4239 * Position defines a position within a collection of data. The value |
| 4006 * either the end position, a key (used with ordered collections), a byte | 4240 * can be either the end position, a key (used with ordered |
| 4007 * offset, or a record index. | 4241 * collections), a byte offset, or a record index. |
| 4008 */ | 4242 */ |
| 4009 class Position { | 4243 class Position { |
| 4010 /** Position is a byte offset. */ | 4244 /** Position is a byte offset. */ |
| 4011 core.String byteOffset; | 4245 core.String byteOffset; |
| 4012 /** CloudPosition is a concat position. */ | 4246 /** CloudPosition is a concat position. */ |
| 4013 ConcatPosition concatPosition; | 4247 ConcatPosition concatPosition; |
| 4014 /** | 4248 /** |
| 4015 * Position is past all other positions. Also useful for the end position of | 4249 * Position is past all other positions. Also useful for the end |
| 4016 * an unbounded range. | 4250 * position of an unbounded range. |
| 4017 */ | 4251 */ |
| 4018 core.bool end; | 4252 core.bool end; |
| 4019 /** Position is a string key, ordered lexicographically. */ | 4253 /** Position is a string key, ordered lexicographically. */ |
| 4020 core.String key; | 4254 core.String key; |
| 4021 /** Position is a record index. */ | 4255 /** Position is a record index. */ |
| 4022 core.String recordIndex; | 4256 core.String recordIndex; |
| 4023 /** | 4257 /** |
| 4024 * CloudPosition is a base64 encoded BatchShufflePosition (with FIXED | 4258 * CloudPosition is a base64 encoded BatchShufflePosition (with FIXED |
| 4025 * sharding). | 4259 * sharding). |
| 4026 */ | 4260 */ |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4067 _json["recordIndex"] = recordIndex; | 4301 _json["recordIndex"] = recordIndex; |
| 4068 } | 4302 } |
| 4069 if (shufflePosition != null) { | 4303 if (shufflePosition != null) { |
| 4070 _json["shufflePosition"] = shufflePosition; | 4304 _json["shufflePosition"] = shufflePosition; |
| 4071 } | 4305 } |
| 4072 return _json; | 4306 return _json; |
| 4073 } | 4307 } |
| 4074 } | 4308 } |
| 4075 | 4309 |
| 4076 /** | 4310 /** |
| 4077 * Identifies a pubsub location to use for transferring data into or out of a | 4311 * Identifies a pubsub location to use for transferring data into or |
| 4078 * streaming Dataflow job. | 4312 * out of a streaming Dataflow job. |
| 4079 */ | 4313 */ |
| 4080 class PubsubLocation { | 4314 class PubsubLocation { |
| 4081 /** Indicates whether the pipeline allows late-arriving data. */ | 4315 /** Indicates whether the pipeline allows late-arriving data. */ |
| 4082 core.bool dropLateData; | 4316 core.bool dropLateData; |
| 4083 /** | 4317 /** |
| 4084 * If set, contains a pubsub label from which to extract record ids. If left | 4318 * If set, contains a pubsub label from which to extract record ids. |
| 4085 * empty, record deduplication will be strictly best effort. | 4319 * If left empty, record deduplication will be strictly best effort. |
| 4086 */ | 4320 */ |
| 4087 core.String idLabel; | 4321 core.String idLabel; |
| 4088 /** | 4322 /** |
| 4089 * A pubsub subscription, in the form of "pubsub.googleapis.com/subscriptions/ | 4323 * A pubsub subscription, in the form of |
| 4090 * /" | 4324 * "pubsub.googleapis.com/subscriptions/<project-id>/<subscription-name>" |
| 4091 */ | 4325 */ |
| 4092 core.String subscription; | 4326 core.String subscription; |
| 4093 /** | 4327 /** |
| 4094 * If set, contains a pubsub label from which to extract record timestamps. If | 4328 * If set, contains a pubsub label from which to extract record timestamps. |
| 4095 * left empty, record timestamps will be generated upon arrival. | 4329 * If left empty, record timestamps will be generated upon arrival. |
| 4096 */ | 4330 */ |
| 4097 core.String timestampLabel; | 4331 core.String timestampLabel; |
| 4098 /** | 4332 /** |
| 4099 * A pubsub topic, in the form of "pubsub.googleapis.com/topics/ | 4333 * A pubsub topic, in the form of |
| 4100 * /" | 4334 * "pubsub.googleapis.com/topics/<project-id>/<topic-name>" |
| 4101 */ | 4335 */ |
| 4102 core.String topic; | 4336 core.String topic; |
| 4103 /** | 4337 /** |
| 4104 * If set, specifies the pubsub subscription that will be used for tracking | 4338 * If set, specifies the pubsub subscription that will be used for tracking |
| 4105 * custom time timestamps for watermark estimation. | 4339 * custom time timestamps for watermark estimation. |
| 4106 */ | 4340 */ |
| 4107 core.String trackingSubscription; | 4341 core.String trackingSubscription; |
| 4108 /** If true, then the client has requested to get pubsub attributes. */ | 4342 /** If true, then the client has requested to get pubsub attributes. */ |
| 4109 core.bool withAttributes; | 4343 core.bool withAttributes; |
| 4110 | 4344 |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4154 if (trackingSubscription != null) { | 4388 if (trackingSubscription != null) { |
| 4155 _json["trackingSubscription"] = trackingSubscription; | 4389 _json["trackingSubscription"] = trackingSubscription; |
| 4156 } | 4390 } |
| 4157 if (withAttributes != null) { | 4391 if (withAttributes != null) { |
| 4158 _json["withAttributes"] = withAttributes; | 4392 _json["withAttributes"] = withAttributes; |
| 4159 } | 4393 } |
| 4160 return _json; | 4394 return _json; |
| 4161 } | 4395 } |
| 4162 } | 4396 } |
| 4163 | 4397 |
| 4164 /** An instruction that reads records. Takes no inputs, produces one output. */ | 4398 /** |
| 4399 * An instruction that reads records. |
| 4400 * Takes no inputs, produces one output. |
| 4401 */ |
| 4165 class ReadInstruction { | 4402 class ReadInstruction { |
| 4166 /** The source to read from. */ | 4403 /** The source to read from. */ |
| 4167 Source source; | 4404 Source source; |
| 4168 | 4405 |
| 4169 ReadInstruction(); | 4406 ReadInstruction(); |
| 4170 | 4407 |
| 4171 ReadInstruction.fromJson(core.Map _json) { | 4408 ReadInstruction.fromJson(core.Map _json) { |
| 4172 if (_json.containsKey("source")) { | 4409 if (_json.containsKey("source")) { |
| 4173 source = new Source.fromJson(_json["source"]); | 4410 source = new Source.fromJson(_json["source"]); |
| 4174 } | 4411 } |
| 4175 } | 4412 } |
| 4176 | 4413 |
| 4177 core.Map toJson() { | 4414 core.Map toJson() { |
| 4178 var _json = new core.Map(); | 4415 var _json = new core.Map(); |
| 4179 if (source != null) { | 4416 if (source != null) { |
| 4180 _json["source"] = (source).toJson(); | 4417 _json["source"] = (source).toJson(); |
| 4181 } | 4418 } |
| 4182 return _json; | 4419 return _json; |
| 4183 } | 4420 } |
| 4184 } | 4421 } |
| 4185 | 4422 |
| 4186 /** Request to report the status of WorkItems. */ | 4423 /** Request to report the status of WorkItems. */ |
| 4187 class ReportWorkItemStatusRequest { | 4424 class ReportWorkItemStatusRequest { |
| 4188 /** The current timestamp at the worker. */ | 4425 /** The current timestamp at the worker. */ |
| 4189 core.String currentWorkerTime; | 4426 core.String currentWorkerTime; |
| 4190 /** The location which contains the WorkItem's job. */ | 4427 /** The location which contains the WorkItem's job. */ |
| 4191 core.String location; | 4428 core.String location; |
| 4192 /** | 4429 /** |
| 4193 * The order is unimportant, except that the order of the WorkItemServiceState | 4430 * The order is unimportant, except that the order of the |
| 4194 * messages in the ReportWorkItemStatusResponse corresponds to the order of | 4431 * WorkItemServiceState messages in the ReportWorkItemStatusResponse |
| 4195 * WorkItemStatus messages here. | 4432 * corresponds to the order of WorkItemStatus messages here. |
| 4196 */ | 4433 */ |
| 4197 core.List<WorkItemStatus> workItemStatuses; | 4434 core.List<WorkItemStatus> workItemStatuses; |
| 4198 /** | 4435 /** |
| 4199 * The ID of the worker reporting the WorkItem status. If this does not match | 4436 * The ID of the worker reporting the WorkItem status. If this |
| 4200 * the ID of the worker which the Dataflow service believes currently has the | 4437 * does not match the ID of the worker which the Dataflow service |
| 4201 * lease on the WorkItem, the report will be dropped (with an error response). | 4438 * believes currently has the lease on the WorkItem, the report |
| 4439 * will be dropped (with an error response). |
| 4202 */ | 4440 */ |
| 4203 core.String workerId; | 4441 core.String workerId; |
| 4204 | 4442 |
| 4205 ReportWorkItemStatusRequest(); | 4443 ReportWorkItemStatusRequest(); |
| 4206 | 4444 |
| 4207 ReportWorkItemStatusRequest.fromJson(core.Map _json) { | 4445 ReportWorkItemStatusRequest.fromJson(core.Map _json) { |
| 4208 if (_json.containsKey("currentWorkerTime")) { | 4446 if (_json.containsKey("currentWorkerTime")) { |
| 4209 currentWorkerTime = _json["currentWorkerTime"]; | 4447 currentWorkerTime = _json["currentWorkerTime"]; |
| 4210 } | 4448 } |
| 4211 if (_json.containsKey("location")) { | 4449 if (_json.containsKey("location")) { |
| (...skipping 21 matching lines...) Expand all Loading... |
| 4233 if (workerId != null) { | 4471 if (workerId != null) { |
| 4234 _json["workerId"] = workerId; | 4472 _json["workerId"] = workerId; |
| 4235 } | 4473 } |
| 4236 return _json; | 4474 return _json; |
| 4237 } | 4475 } |
| 4238 } | 4476 } |
| 4239 | 4477 |
| 4240 /** Response from a request to report the status of WorkItems. */ | 4478 /** Response from a request to report the status of WorkItems. */ |
| 4241 class ReportWorkItemStatusResponse { | 4479 class ReportWorkItemStatusResponse { |
| 4242 /** | 4480 /** |
| 4243 * A set of messages indicating the service-side state for each WorkItem whose | 4481 * A set of messages indicating the service-side state for each |
| 4244 * status was reported, in the same order as the WorkItemStatus messages in | 4482 * WorkItem whose status was reported, in the same order as the |
| 4245 * the ReportWorkItemStatusRequest which resulting in this response. | 4483 * WorkItemStatus messages in the ReportWorkItemStatusRequest which |
| 4484 * resulting in this response. |
| 4246 */ | 4485 */ |
| 4247 core.List<WorkItemServiceState> workItemServiceStates; | 4486 core.List<WorkItemServiceState> workItemServiceStates; |
| 4248 | 4487 |
| 4249 ReportWorkItemStatusResponse(); | 4488 ReportWorkItemStatusResponse(); |
| 4250 | 4489 |
| 4251 ReportWorkItemStatusResponse.fromJson(core.Map _json) { | 4490 ReportWorkItemStatusResponse.fromJson(core.Map _json) { |
| 4252 if (_json.containsKey("workItemServiceStates")) { | 4491 if (_json.containsKey("workItemServiceStates")) { |
| 4253 workItemServiceStates = _json["workItemServiceStates"].map((value) => new
WorkItemServiceState.fromJson(value)).toList(); | 4492 workItemServiceStates = _json["workItemServiceStates"].map((value) => new
WorkItemServiceState.fromJson(value)).toList(); |
| 4254 } | 4493 } |
| 4255 } | 4494 } |
| 4256 | 4495 |
| 4257 core.Map toJson() { | 4496 core.Map toJson() { |
| 4258 var _json = new core.Map(); | 4497 var _json = new core.Map(); |
| 4259 if (workItemServiceStates != null) { | 4498 if (workItemServiceStates != null) { |
| 4260 _json["workItemServiceStates"] = workItemServiceStates.map((value) => (val
ue).toJson()).toList(); | 4499 _json["workItemServiceStates"] = workItemServiceStates.map((value) => (val
ue).toJson()).toList(); |
| 4261 } | 4500 } |
| 4262 return _json; | 4501 return _json; |
| 4263 } | 4502 } |
| 4264 } | 4503 } |
| 4265 | 4504 |
| 4266 /** | 4505 /** |
| 4267 * Represents the level of parallelism in a WorkItem's input, reported by the | 4506 * Represents the level of parallelism in a WorkItem's input, |
| 4268 * worker. | 4507 * reported by the worker. |
| 4269 */ | 4508 */ |
| 4270 class ReportedParallelism { | 4509 class ReportedParallelism { |
| 4271 /** | 4510 /** |
| 4272 * Specifies whether the parallelism is infinite. If true, "value" is ignored. | 4511 * Specifies whether the parallelism is infinite. If true, "value" is |
| 4273 * Infinite parallelism means the service will assume that the work item can | 4512 * ignored. |
| 4274 * always be split into more non-empty work items by dynamic splitting. This | 4513 * Infinite parallelism means the service will assume that the work item |
| 4275 * is a work-around for lack of support for infinity by the current JSON-based | 4514 * can always be split into more non-empty work items by dynamic splitting. |
| 4276 * Java RPC stack. | 4515 * This is a work-around for lack of support for infinity by the current |
| 4516 * JSON-based Java RPC stack. |
| 4277 */ | 4517 */ |
| 4278 core.bool isInfinite; | 4518 core.bool isInfinite; |
| 4279 /** Specifies the level of parallelism in case it is finite. */ | 4519 /** Specifies the level of parallelism in case it is finite. */ |
| 4280 core.double value; | 4520 core.double value; |
| 4281 | 4521 |
| 4282 ReportedParallelism(); | 4522 ReportedParallelism(); |
| 4283 | 4523 |
| 4284 ReportedParallelism.fromJson(core.Map _json) { | 4524 ReportedParallelism.fromJson(core.Map _json) { |
| 4285 if (_json.containsKey("isInfinite")) { | 4525 if (_json.containsKey("isInfinite")) { |
| 4286 isInfinite = _json["isInfinite"]; | 4526 isInfinite = _json["isInfinite"]; |
| (...skipping 11 matching lines...) Expand all Loading... |
| 4298 if (value != null) { | 4538 if (value != null) { |
| 4299 _json["value"] = value; | 4539 _json["value"] = value; |
| 4300 } | 4540 } |
| 4301 return _json; | 4541 return _json; |
| 4302 } | 4542 } |
| 4303 } | 4543 } |
| 4304 | 4544 |
| 4305 /** | 4545 /** |
| 4306 * Worker metrics exported from workers. This contains resource utilization | 4546 * Worker metrics exported from workers. This contains resource utilization |
| 4307 * metrics accumulated from a variety of sources. For more information, see | 4547 * metrics accumulated from a variety of sources. For more information, see |
| 4308 * go/df-resource-signals. Note that this proto closely follows the structure of | 4548 * go/df-resource-signals. |
| 4309 * its DFE siblings in its contents. | |
| 4310 */ | 4549 */ |
| 4311 class ResourceUtilizationReport { | 4550 class ResourceUtilizationReport { |
| 4312 /** | 4551 /** CPU utilization samples. */ |
| 4313 * Each Struct must parallel DFE worker metrics protos (eg., cpu_time metric | 4552 core.List<CPUTime> cpuTime; |
| 4314 * will have nested values “timestamp_ms, total_ms, rate”). | |
| 4315 * | |
| 4316 * The values for Object must be JSON objects. It can consist of `num`, | |
| 4317 * `String`, `bool` and `null` as well as `Map` and `List` values. | |
| 4318 */ | |
| 4319 core.List<core.Map<core.String, core.Object>> metrics; | |
| 4320 | 4553 |
| 4321 ResourceUtilizationReport(); | 4554 ResourceUtilizationReport(); |
| 4322 | 4555 |
| 4323 ResourceUtilizationReport.fromJson(core.Map _json) { | 4556 ResourceUtilizationReport.fromJson(core.Map _json) { |
| 4324 if (_json.containsKey("metrics")) { | 4557 if (_json.containsKey("cpuTime")) { |
| 4325 metrics = _json["metrics"]; | 4558 cpuTime = _json["cpuTime"].map((value) => new CPUTime.fromJson(value)).toL
ist(); |
| 4326 } | 4559 } |
| 4327 } | 4560 } |
| 4328 | 4561 |
| 4329 core.Map toJson() { | 4562 core.Map toJson() { |
| 4330 var _json = new core.Map(); | 4563 var _json = new core.Map(); |
| 4331 if (metrics != null) { | 4564 if (cpuTime != null) { |
| 4332 _json["metrics"] = metrics; | 4565 _json["cpuTime"] = cpuTime.map((value) => (value).toJson()).toList(); |
| 4333 } | 4566 } |
| 4334 return _json; | 4567 return _json; |
| 4335 } | 4568 } |
| 4336 } | 4569 } |
| 4337 | 4570 |
| 4338 /** Service-side response to WorkerMessage reporting resource utilization. */ | 4571 /** Service-side response to WorkerMessage reporting resource utilization. */ |
| 4339 class ResourceUtilizationReportResponse { | 4572 class ResourceUtilizationReportResponse { |
| 4340 | 4573 |
| 4341 ResourceUtilizationReportResponse(); | 4574 ResourceUtilizationReportResponse(); |
| 4342 | 4575 |
| 4343 ResourceUtilizationReportResponse.fromJson(core.Map _json) { | 4576 ResourceUtilizationReportResponse.fromJson(core.Map _json) { |
| 4344 } | 4577 } |
| 4345 | 4578 |
| 4346 core.Map toJson() { | 4579 core.Map toJson() { |
| 4347 var _json = new core.Map(); | 4580 var _json = new core.Map(); |
| 4348 return _json; | 4581 return _json; |
| 4349 } | 4582 } |
| 4350 } | 4583 } |
| 4351 | 4584 |
| 4352 /** The environment values to set at runtime. */ | 4585 /** The environment values to set at runtime. */ |
| 4353 class RuntimeEnvironment { | 4586 class RuntimeEnvironment { |
| 4354 /** | 4587 /** |
| 4355 * Whether to bypass the safety checks for the job's temporary directory. Use | 4588 * Whether to bypass the safety checks for the job's temporary directory. |
| 4356 * with caution. | 4589 * Use with caution. |
| 4357 */ | 4590 */ |
| 4358 core.bool bypassTempDirValidation; | 4591 core.bool bypassTempDirValidation; |
| 4359 /** | 4592 /** |
| 4360 * The maximum number of Google Compute Engine instances to be made available | 4593 * The maximum number of Google Compute Engine instances to be made |
| 4361 * to your pipeline during execution, from 1 to 1000. | 4594 * available to your pipeline during execution, from 1 to 1000. |
| 4362 */ | 4595 */ |
| 4363 core.int maxWorkers; | 4596 core.int maxWorkers; |
| 4364 /** The email address of the service account to run the job as. */ | 4597 /** The email address of the service account to run the job as. */ |
| 4365 core.String serviceAccountEmail; | 4598 core.String serviceAccountEmail; |
| 4366 /** | 4599 /** |
| 4367 * The Cloud Storage path to use for temporary files. Must be a valid Cloud | 4600 * The Cloud Storage path to use for temporary files. |
| 4368 * Storage URL, beginning with `gs://`. | 4601 * Must be a valid Cloud Storage URL, beginning with `gs://`. |
| 4369 */ | 4602 */ |
| 4370 core.String tempLocation; | 4603 core.String tempLocation; |
| 4371 /** | 4604 /** |
| 4372 * The Compute Engine [availability | 4605 * The Compute Engine [availability |
| 4373 * zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones) | 4606 * zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones) |
| 4374 * for launching worker instances to run your pipeline. | 4607 * for launching worker instances to run your pipeline. |
| 4375 */ | 4608 */ |
| 4376 core.String zone; | 4609 core.String zone; |
| 4377 | 4610 |
| 4378 RuntimeEnvironment(); | 4611 RuntimeEnvironment(); |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4447 if (data != null) { | 4680 if (data != null) { |
| 4448 _json["data"] = data; | 4681 _json["data"] = data; |
| 4449 } | 4682 } |
| 4450 if (workerId != null) { | 4683 if (workerId != null) { |
| 4451 _json["workerId"] = workerId; | 4684 _json["workerId"] = workerId; |
| 4452 } | 4685 } |
| 4453 return _json; | 4686 return _json; |
| 4454 } | 4687 } |
| 4455 } | 4688 } |
| 4456 | 4689 |
| 4457 /** Response to a send capture request. nothing */ | 4690 /** |
| 4691 * Response to a send capture request. |
| 4692 * nothing |
| 4693 */ |
| 4458 class SendDebugCaptureResponse { | 4694 class SendDebugCaptureResponse { |
| 4459 | 4695 |
| 4460 SendDebugCaptureResponse(); | 4696 SendDebugCaptureResponse(); |
| 4461 | 4697 |
| 4462 SendDebugCaptureResponse.fromJson(core.Map _json) { | 4698 SendDebugCaptureResponse.fromJson(core.Map _json) { |
| 4463 } | 4699 } |
| 4464 | 4700 |
| 4465 core.Map toJson() { | 4701 core.Map toJson() { |
| 4466 var _json = new core.Map(); | 4702 var _json = new core.Map(); |
| 4467 return _json; | 4703 return _json; |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4514 | 4750 |
| 4515 /** Describes a particular function to invoke. */ | 4751 /** Describes a particular function to invoke. */ |
| 4516 class SeqMapTask { | 4752 class SeqMapTask { |
| 4517 /** Information about each of the inputs. */ | 4753 /** Information about each of the inputs. */ |
| 4518 core.List<SideInputInfo> inputs; | 4754 core.List<SideInputInfo> inputs; |
| 4519 /** The user-provided name of the SeqDo operation. */ | 4755 /** The user-provided name of the SeqDo operation. */ |
| 4520 core.String name; | 4756 core.String name; |
| 4521 /** Information about each of the outputs. */ | 4757 /** Information about each of the outputs. */ |
| 4522 core.List<SeqMapTaskOutputInfo> outputInfos; | 4758 core.List<SeqMapTaskOutputInfo> outputInfos; |
| 4523 /** | 4759 /** |
| 4524 * System-defined name of the stage containing the SeqDo operation. Unique | 4760 * System-defined name of the stage containing the SeqDo operation. |
| 4525 * across the workflow. | 4761 * Unique across the workflow. |
| 4526 */ | 4762 */ |
| 4527 core.String stageName; | 4763 core.String stageName; |
| 4528 /** | 4764 /** |
| 4529 * System-defined name of the SeqDo operation. Unique across the workflow. | 4765 * System-defined name of the SeqDo operation. |
| 4766 * Unique across the workflow. |
| 4530 */ | 4767 */ |
| 4531 core.String systemName; | 4768 core.String systemName; |
| 4532 /** | 4769 /** |
| 4533 * The user function to invoke. | 4770 * The user function to invoke. |
| 4534 * | 4771 * |
| 4535 * The values for Object must be JSON objects. It can consist of `num`, | 4772 * The values for Object must be JSON objects. It can consist of `num`, |
| 4536 * `String`, `bool` and `null` as well as `Map` and `List` values. | 4773 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 4537 */ | 4774 */ |
| 4538 core.Map<core.String, core.Object> userFn; | 4775 core.Map<core.String, core.Object> userFn; |
| 4539 | 4776 |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4648 class SideInputInfo { | 4885 class SideInputInfo { |
| 4649 /** | 4886 /** |
| 4650 * How to interpret the source element(s) as a side input value. | 4887 * How to interpret the source element(s) as a side input value. |
| 4651 * | 4888 * |
| 4652 * The values for Object must be JSON objects. It can consist of `num`, | 4889 * The values for Object must be JSON objects. It can consist of `num`, |
| 4653 * `String`, `bool` and `null` as well as `Map` and `List` values. | 4890 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 4654 */ | 4891 */ |
| 4655 core.Map<core.String, core.Object> kind; | 4892 core.Map<core.String, core.Object> kind; |
| 4656 /** | 4893 /** |
| 4657 * The source(s) to read element(s) from to get the value of this side input. | 4894 * The source(s) to read element(s) from to get the value of this side input. |
| 4658 * If more than one source, then the elements are taken from the sources, in | 4895 * If more than one source, then the elements are taken from the |
| 4659 * the specified order if order matters. At least one source is required. | 4896 * sources, in the specified order if order matters. |
| 4897 * At least one source is required. |
| 4660 */ | 4898 */ |
| 4661 core.List<Source> sources; | 4899 core.List<Source> sources; |
| 4662 /** | 4900 /** |
| 4663 * The id of the tag the user code will access this side input by; this should | 4901 * The id of the tag the user code will access this side input by; |
| 4664 * correspond to the tag of some MultiOutputInfo. | 4902 * this should correspond to the tag of some MultiOutputInfo. |
| 4665 */ | 4903 */ |
| 4666 core.String tag; | 4904 core.String tag; |
| 4667 | 4905 |
| 4668 SideInputInfo(); | 4906 SideInputInfo(); |
| 4669 | 4907 |
| 4670 SideInputInfo.fromJson(core.Map _json) { | 4908 SideInputInfo.fromJson(core.Map _json) { |
| 4671 if (_json.containsKey("kind")) { | 4909 if (_json.containsKey("kind")) { |
| 4672 kind = _json["kind"]; | 4910 kind = _json["kind"]; |
| 4673 } | 4911 } |
| 4674 if (_json.containsKey("sources")) { | 4912 if (_json.containsKey("sources")) { |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4730 if (spec != null) { | 4968 if (spec != null) { |
| 4731 _json["spec"] = spec; | 4969 _json["spec"] = spec; |
| 4732 } | 4970 } |
| 4733 return _json; | 4971 return _json; |
| 4734 } | 4972 } |
| 4735 } | 4973 } |
| 4736 | 4974 |
| 4737 /** A source that records can be read and decoded from. */ | 4975 /** A source that records can be read and decoded from. */ |
| 4738 class Source { | 4976 class Source { |
| 4739 /** | 4977 /** |
| 4740 * While splitting, sources may specify the produced bundles as differences | 4978 * While splitting, sources may specify the produced bundles |
| 4741 * against another source, in order to save backend-side memory and allow | 4979 * as differences against another source, in order to save backend-side |
| 4742 * bigger jobs. For details, see SourceSplitRequest. To support this use case, | 4980 * memory and allow bigger jobs. For details, see SourceSplitRequest. |
| 4743 * the full set of parameters of the source is logically obtained by taking | 4981 * To support this use case, the full set of parameters of the source |
| 4744 * the latest explicitly specified value of each parameter in the order: | 4982 * is logically obtained by taking the latest explicitly specified value |
| 4983 * of each parameter in the order: |
| 4745 * base_specs (later items win), spec (overrides anything in base_specs). | 4984 * base_specs (later items win), spec (overrides anything in base_specs). |
| 4746 * | 4985 * |
| 4747 * The values for Object must be JSON objects. It can consist of `num`, | 4986 * The values for Object must be JSON objects. It can consist of `num`, |
| 4748 * `String`, `bool` and `null` as well as `Map` and `List` values. | 4987 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 4749 */ | 4988 */ |
| 4750 core.List<core.Map<core.String, core.Object>> baseSpecs; | 4989 core.List<core.Map<core.String, core.Object>> baseSpecs; |
| 4751 /** | 4990 /** |
| 4752 * The codec to use to decode data read from the source. | 4991 * The codec to use to decode data read from the source. |
| 4753 * | 4992 * |
| 4754 * The values for Object must be JSON objects. It can consist of `num`, | 4993 * The values for Object must be JSON objects. It can consist of `num`, |
| 4755 * `String`, `bool` and `null` as well as `Map` and `List` values. | 4994 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 4756 */ | 4995 */ |
| 4757 core.Map<core.String, core.Object> codec; | 4996 core.Map<core.String, core.Object> codec; |
| 4758 /** | 4997 /** |
| 4759 * Setting this value to true hints to the framework that the source doesn't | 4998 * Setting this value to true hints to the framework that the source |
| 4760 * need splitting, and using SourceSplitRequest on it would yield | 4999 * doesn't need splitting, and using SourceSplitRequest on it would |
| 4761 * SOURCE_SPLIT_OUTCOME_USE_CURRENT. E.g. a file splitter may set this to true | 5000 * yield SOURCE_SPLIT_OUTCOME_USE_CURRENT. |
| 4762 * when splitting a single file into a set of byte ranges of appropriate size, | 5001 * |
| 4763 * and set this to false when splitting a filepattern into individual files. | 5002 * E.g. a file splitter may set this to true when splitting a single file |
| 4764 * However, for efficiency, a file splitter may decide to produce file | 5003 * into a set of byte ranges of appropriate size, and set this |
| 4765 * subranges directly from the filepattern to avoid a splitting round-trip. | 5004 * to false when splitting a filepattern into individual files. |
| 4766 * See SourceSplitRequest for an overview of the splitting process. This field | 5005 * However, for efficiency, a file splitter may decide to produce |
| 4767 * is meaningful only in the Source objects populated by the user (e.g. when | 5006 * file subranges directly from the filepattern to avoid a splitting |
| 4768 * filling in a DerivedSource). Source objects supplied by the framework to | 5007 * round-trip. |
| 4769 * the user don't have this field populated. | 5008 * |
| 5009 * See SourceSplitRequest for an overview of the splitting process. |
| 5010 * |
| 5011 * This field is meaningful only in the Source objects populated |
| 5012 * by the user (e.g. when filling in a DerivedSource). |
| 5013 * Source objects supplied by the framework to the user don't have |
| 5014 * this field populated. |
| 4770 */ | 5015 */ |
| 4771 core.bool doesNotNeedSplitting; | 5016 core.bool doesNotNeedSplitting; |
| 4772 /** | 5017 /** |
| 4773 * Optionally, metadata for this source can be supplied right away, avoiding a | 5018 * Optionally, metadata for this source can be supplied right away, |
| 4774 * SourceGetMetadataOperation roundtrip (see SourceOperationRequest). This | 5019 * avoiding a SourceGetMetadataOperation roundtrip |
| 4775 * field is meaningful only in the Source objects populated by the user (e.g. | 5020 * (see SourceOperationRequest). |
| 4776 * when filling in a DerivedSource). Source objects supplied by the framework | 5021 * |
| 4777 * to the user don't have this field populated. | 5022 * This field is meaningful only in the Source objects populated |
| 5023 * by the user (e.g. when filling in a DerivedSource). |
| 5024 * Source objects supplied by the framework to the user don't have |
| 5025 * this field populated. |
| 4778 */ | 5026 */ |
| 4779 SourceMetadata metadata; | 5027 SourceMetadata metadata; |
| 4780 /** | 5028 /** |
| 4781 * The source to read from, plus its parameters. | 5029 * The source to read from, plus its parameters. |
| 4782 * | 5030 * |
| 4783 * The values for Object must be JSON objects. It can consist of `num`, | 5031 * The values for Object must be JSON objects. It can consist of `num`, |
| 4784 * `String`, `bool` and `null` as well as `Map` and `List` values. | 5032 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 4785 */ | 5033 */ |
| 4786 core.Map<core.String, core.Object> spec; | 5034 core.Map<core.String, core.Object> spec; |
| 4787 | 5035 |
| (...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4910 core.Map toJson() { | 5158 core.Map toJson() { |
| 4911 var _json = new core.Map(); | 5159 var _json = new core.Map(); |
| 4912 if (metadata != null) { | 5160 if (metadata != null) { |
| 4913 _json["metadata"] = (metadata).toJson(); | 5161 _json["metadata"] = (metadata).toJson(); |
| 4914 } | 5162 } |
| 4915 return _json; | 5163 return _json; |
| 4916 } | 5164 } |
| 4917 } | 5165 } |
| 4918 | 5166 |
| 4919 /** | 5167 /** |
| 4920 * Metadata about a Source useful for automatically optimizing and tuning the | 5168 * Metadata about a Source useful for automatically optimizing |
| 4921 * pipeline, etc. | 5169 * and tuning the pipeline, etc. |
| 4922 */ | 5170 */ |
| 4923 class SourceMetadata { | 5171 class SourceMetadata { |
| 4924 /** | 5172 /** |
| 4925 * An estimate of the total size (in bytes) of the data that would be read | 5173 * An estimate of the total size (in bytes) of the data that would be |
| 4926 * from this source. This estimate is in terms of external storage size, | 5174 * read from this source. This estimate is in terms of external storage |
| 4927 * before any decompression or other processing done by the reader. | 5175 * size, before any decompression or other processing done by the reader. |
| 4928 */ | 5176 */ |
| 4929 core.String estimatedSizeBytes; | 5177 core.String estimatedSizeBytes; |
| 4930 /** | 5178 /** |
| 4931 * Specifies that the size of this source is known to be infinite (this is a | 5179 * Specifies that the size of this source is known to be infinite |
| 4932 * streaming source). | 5180 * (this is a streaming source). |
| 4933 */ | 5181 */ |
| 4934 core.bool infinite; | 5182 core.bool infinite; |
| 4935 /** | 5183 /** |
| 4936 * Whether this source is known to produce key/value pairs with the (encoded) | 5184 * Whether this source is known to produce key/value pairs with |
| 4937 * keys in lexicographically sorted order. | 5185 * the (encoded) keys in lexicographically sorted order. |
| 4938 */ | 5186 */ |
| 4939 core.bool producesSortedKeys; | 5187 core.bool producesSortedKeys; |
| 4940 | 5188 |
| 4941 SourceMetadata(); | 5189 SourceMetadata(); |
| 4942 | 5190 |
| 4943 SourceMetadata.fromJson(core.Map _json) { | 5191 SourceMetadata.fromJson(core.Map _json) { |
| 4944 if (_json.containsKey("estimatedSizeBytes")) { | 5192 if (_json.containsKey("estimatedSizeBytes")) { |
| 4945 estimatedSizeBytes = _json["estimatedSizeBytes"]; | 5193 estimatedSizeBytes = _json["estimatedSizeBytes"]; |
| 4946 } | 5194 } |
| 4947 if (_json.containsKey("infinite")) { | 5195 if (_json.containsKey("infinite")) { |
| (...skipping 13 matching lines...) Expand all Loading... |
| 4961 _json["infinite"] = infinite; | 5209 _json["infinite"] = infinite; |
| 4962 } | 5210 } |
| 4963 if (producesSortedKeys != null) { | 5211 if (producesSortedKeys != null) { |
| 4964 _json["producesSortedKeys"] = producesSortedKeys; | 5212 _json["producesSortedKeys"] = producesSortedKeys; |
| 4965 } | 5213 } |
| 4966 return _json; | 5214 return _json; |
| 4967 } | 5215 } |
| 4968 } | 5216 } |
| 4969 | 5217 |
| 4970 /** | 5218 /** |
| 4971 * A work item that represents the different operations that can be performed on | 5219 * A work item that represents the different operations that can be |
| 4972 * a user-defined Source specification. | 5220 * performed on a user-defined Source specification. |
| 4973 */ | 5221 */ |
| 4974 class SourceOperationRequest { | 5222 class SourceOperationRequest { |
| 4975 /** Information about a request to get metadata about a source. */ | 5223 /** Information about a request to get metadata about a source. */ |
| 4976 SourceGetMetadataRequest getMetadata; | 5224 SourceGetMetadataRequest getMetadata; |
| 4977 /** Information about a request to split a source. */ | 5225 /** Information about a request to split a source. */ |
| 4978 SourceSplitRequest split; | 5226 SourceSplitRequest split; |
| 4979 | 5227 |
| 4980 SourceOperationRequest(); | 5228 SourceOperationRequest(); |
| 4981 | 5229 |
| 4982 SourceOperationRequest.fromJson(core.Map _json) { | 5230 SourceOperationRequest.fromJson(core.Map _json) { |
| (...skipping 12 matching lines...) Expand all Loading... |
| 4995 } | 5243 } |
| 4996 if (split != null) { | 5244 if (split != null) { |
| 4997 _json["split"] = (split).toJson(); | 5245 _json["split"] = (split).toJson(); |
| 4998 } | 5246 } |
| 4999 return _json; | 5247 return _json; |
| 5000 } | 5248 } |
| 5001 } | 5249 } |
| 5002 | 5250 |
| 5003 /** | 5251 /** |
| 5004 * The result of a SourceOperationRequest, specified in | 5252 * The result of a SourceOperationRequest, specified in |
| 5005 * ReportWorkItemStatusRequest.source_operation when the work item is completed. | 5253 * ReportWorkItemStatusRequest.source_operation when the work item |
| 5254 * is completed. |
| 5006 */ | 5255 */ |
| 5007 class SourceOperationResponse { | 5256 class SourceOperationResponse { |
| 5008 /** A response to a request to get metadata about a source. */ | 5257 /** A response to a request to get metadata about a source. */ |
| 5009 SourceGetMetadataResponse getMetadata; | 5258 SourceGetMetadataResponse getMetadata; |
| 5010 /** A response to a request to split a source. */ | 5259 /** A response to a request to split a source. */ |
| 5011 SourceSplitResponse split; | 5260 SourceSplitResponse split; |
| 5012 | 5261 |
| 5013 SourceOperationResponse(); | 5262 SourceOperationResponse(); |
| 5014 | 5263 |
| 5015 SourceOperationResponse.fromJson(core.Map _json) { | 5264 SourceOperationResponse.fromJson(core.Map _json) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 5027 _json["getMetadata"] = (getMetadata).toJson(); | 5276 _json["getMetadata"] = (getMetadata).toJson(); |
| 5028 } | 5277 } |
| 5029 if (split != null) { | 5278 if (split != null) { |
| 5030 _json["split"] = (split).toJson(); | 5279 _json["split"] = (split).toJson(); |
| 5031 } | 5280 } |
| 5032 return _json; | 5281 return _json; |
| 5033 } | 5282 } |
| 5034 } | 5283 } |
| 5035 | 5284 |
| 5036 /** | 5285 /** |
| 5037 * Hints for splitting a Source into bundles (parts for parallel processing) | 5286 * Hints for splitting a Source into bundles (parts for parallel |
| 5038 * using SourceSplitRequest. | 5287 * processing) using SourceSplitRequest. |
| 5039 */ | 5288 */ |
| 5040 class SourceSplitOptions { | 5289 class SourceSplitOptions { |
| 5041 /** | 5290 /** |
| 5042 * The source should be split into a set of bundles where the estimated size | 5291 * The source should be split into a set of bundles where the estimated size |
| 5043 * of each is approximately this many bytes. | 5292 * of each is approximately this many bytes. |
| 5044 */ | 5293 */ |
| 5045 core.String desiredBundleSizeBytes; | 5294 core.String desiredBundleSizeBytes; |
| 5046 /** DEPRECATED in favor of desired_bundle_size_bytes. */ | 5295 /** DEPRECATED in favor of desired_bundle_size_bytes. */ |
| 5047 core.String desiredShardSizeBytes; | 5296 core.String desiredShardSizeBytes; |
| 5048 | 5297 |
| (...skipping 14 matching lines...) Expand all Loading... |
| 5063 _json["desiredBundleSizeBytes"] = desiredBundleSizeBytes; | 5312 _json["desiredBundleSizeBytes"] = desiredBundleSizeBytes; |
| 5064 } | 5313 } |
| 5065 if (desiredShardSizeBytes != null) { | 5314 if (desiredShardSizeBytes != null) { |
| 5066 _json["desiredShardSizeBytes"] = desiredShardSizeBytes; | 5315 _json["desiredShardSizeBytes"] = desiredShardSizeBytes; |
| 5067 } | 5316 } |
| 5068 return _json; | 5317 return _json; |
| 5069 } | 5318 } |
| 5070 } | 5319 } |
| 5071 | 5320 |
| 5072 /** | 5321 /** |
| 5073 * Represents the operation to split a high-level Source specification into | 5322 * Represents the operation to split a high-level Source specification |
| 5074 * bundles (parts for parallel processing). At a high level, splitting of a | 5323 * into bundles (parts for parallel processing). |
| 5075 * source into bundles happens as follows: SourceSplitRequest is applied to the | 5324 * |
| 5076 * source. If it returns SOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting | 5325 * At a high level, splitting of a source into bundles happens as follows: |
| 5077 * happens and the source is used "as is". Otherwise, splitting is applied | 5326 * SourceSplitRequest is applied to the source. If it returns |
| 5078 * recursively to each produced DerivedSource. As an optimization, for any | 5327 * SOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting happens and the source |
| 5079 * Source, if its does_not_need_splitting is true, the framework assumes that | 5328 * is used "as is". Otherwise, splitting is applied recursively to each |
| 5080 * splitting this source would return SOURCE_SPLIT_OUTCOME_USE_CURRENT, and | 5329 * produced DerivedSource. |
| 5081 * doesn't initiate a SourceSplitRequest. This applies both to the initial | 5330 * |
| 5082 * source being split and to bundles produced from it. | 5331 * As an optimization, for any Source, if its does_not_need_splitting is |
| 5332 * true, the framework assumes that splitting this source would return |
| 5333 * SOURCE_SPLIT_OUTCOME_USE_CURRENT, and doesn't initiate a SourceSplitRequest. |
| 5334 * This applies both to the initial source being split and to bundles |
| 5335 * produced from it. |
| 5083 */ | 5336 */ |
| 5084 class SourceSplitRequest { | 5337 class SourceSplitRequest { |
| 5085 /** Hints for tuning the splitting process. */ | 5338 /** Hints for tuning the splitting process. */ |
| 5086 SourceSplitOptions options; | 5339 SourceSplitOptions options; |
| 5087 /** Specification of the source to be split. */ | 5340 /** Specification of the source to be split. */ |
| 5088 Source source; | 5341 Source source; |
| 5089 | 5342 |
| 5090 SourceSplitRequest(); | 5343 SourceSplitRequest(); |
| 5091 | 5344 |
| 5092 SourceSplitRequest.fromJson(core.Map _json) { | 5345 SourceSplitRequest.fromJson(core.Map _json) { |
| (...skipping 13 matching lines...) Expand all Loading... |
| 5106 if (source != null) { | 5359 if (source != null) { |
| 5107 _json["source"] = (source).toJson(); | 5360 _json["source"] = (source).toJson(); |
| 5108 } | 5361 } |
| 5109 return _json; | 5362 return _json; |
| 5110 } | 5363 } |
| 5111 } | 5364 } |
| 5112 | 5365 |
| 5113 /** The response to a SourceSplitRequest. */ | 5366 /** The response to a SourceSplitRequest. */ |
| 5114 class SourceSplitResponse { | 5367 class SourceSplitResponse { |
| 5115 /** | 5368 /** |
| 5116 * If outcome is SPLITTING_HAPPENED, then this is a list of bundles into which | 5369 * If outcome is SPLITTING_HAPPENED, then this is a list of bundles |
| 5117 * the source was split. Otherwise this field is ignored. This list can be | 5370 * into which the source was split. Otherwise this field is ignored. |
| 5118 * empty, which means the source represents an empty input. | 5371 * This list can be empty, which means the source represents an empty input. |
| 5119 */ | 5372 */ |
| 5120 core.List<DerivedSource> bundles; | 5373 core.List<DerivedSource> bundles; |
| 5121 /** | 5374 /** |
| 5122 * Indicates whether splitting happened and produced a list of bundles. If | 5375 * Indicates whether splitting happened and produced a list of bundles. |
| 5123 * this is USE_CURRENT_SOURCE_AS_IS, the current source should be processed | 5376 * If this is USE_CURRENT_SOURCE_AS_IS, the current source should |
| 5124 * "as is" without splitting. "bundles" is ignored in this case. If this is | 5377 * be processed "as is" without splitting. "bundles" is ignored in this case. |
| 5125 * SPLITTING_HAPPENED, then "bundles" contains a list of bundles into which | 5378 * If this is SPLITTING_HAPPENED, then "bundles" contains a list of |
| 5126 * the source was split. | 5379 * bundles into which the source was split. |
| 5127 * Possible string values are: | 5380 * Possible string values are: |
| 5128 * - "SOURCE_SPLIT_OUTCOME_UNKNOWN" : A SOURCE_SPLIT_OUTCOME_UNKNOWN. | 5381 * - "SOURCE_SPLIT_OUTCOME_UNKNOWN" : The source split outcome is unknown, or |
| 5129 * - "SOURCE_SPLIT_OUTCOME_USE_CURRENT" : A SOURCE_SPLIT_OUTCOME_USE_CURRENT. | 5382 * unspecified. |
| 5130 * - "SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED" : A | 5383 * - "SOURCE_SPLIT_OUTCOME_USE_CURRENT" : The current source should be |
| 5131 * SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED. | 5384 * processed "as is" without splitting. |
| 5385 * - "SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED" : Splitting produced a list of |
| 5386 * bundles. |
| 5132 */ | 5387 */ |
| 5133 core.String outcome; | 5388 core.String outcome; |
| 5134 /** DEPRECATED in favor of bundles. */ | 5389 /** DEPRECATED in favor of bundles. */ |
| 5135 core.List<SourceSplitShard> shards; | 5390 core.List<SourceSplitShard> shards; |
| 5136 | 5391 |
| 5137 SourceSplitResponse(); | 5392 SourceSplitResponse(); |
| 5138 | 5393 |
| 5139 SourceSplitResponse.fromJson(core.Map _json) { | 5394 SourceSplitResponse.fromJson(core.Map _json) { |
| 5140 if (_json.containsKey("bundles")) { | 5395 if (_json.containsKey("bundles")) { |
| 5141 bundles = _json["bundles"].map((value) => new DerivedSource.fromJson(value
)).toList(); | 5396 bundles = _json["bundles"].map((value) => new DerivedSource.fromJson(value
)).toList(); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 5161 } | 5416 } |
| 5162 return _json; | 5417 return _json; |
| 5163 } | 5418 } |
| 5164 } | 5419 } |
| 5165 | 5420 |
| 5166 /** DEPRECATED in favor of DerivedSource. */ | 5421 /** DEPRECATED in favor of DerivedSource. */ |
| 5167 class SourceSplitShard { | 5422 class SourceSplitShard { |
| 5168 /** | 5423 /** |
| 5169 * DEPRECATED | 5424 * DEPRECATED |
| 5170 * Possible string values are: | 5425 * Possible string values are: |
| 5171 * - "SOURCE_DERIVATION_MODE_UNKNOWN" : A SOURCE_DERIVATION_MODE_UNKNOWN. | 5426 * - "SOURCE_DERIVATION_MODE_UNKNOWN" : The source derivation is unknown, or |
| 5172 * - "SOURCE_DERIVATION_MODE_INDEPENDENT" : A | 5427 * unspecified. |
| 5173 * SOURCE_DERIVATION_MODE_INDEPENDENT. | 5428 * - "SOURCE_DERIVATION_MODE_INDEPENDENT" : Produce a completely independent |
| 5174 * - "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" : A | 5429 * Source with no base. |
| 5175 * SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT. | 5430 * - "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" : Produce a Source based on the |
| 5176 * - "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" : A | 5431 * Source being split. |
| 5177 * SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT. | 5432 * - "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" : Produce a Source based on |
| 5433 * the base of the Source being split. |
| 5178 */ | 5434 */ |
| 5179 core.String derivationMode; | 5435 core.String derivationMode; |
| 5180 /** DEPRECATED */ | 5436 /** DEPRECATED */ |
| 5181 Source source; | 5437 Source source; |
| 5182 | 5438 |
| 5183 SourceSplitShard(); | 5439 SourceSplitShard(); |
| 5184 | 5440 |
| 5185 SourceSplitShard.fromJson(core.Map _json) { | 5441 SourceSplitShard.fromJson(core.Map _json) { |
| 5186 if (_json.containsKey("derivationMode")) { | 5442 if (_json.containsKey("derivationMode")) { |
| 5187 derivationMode = _json["derivationMode"]; | 5443 derivationMode = _json["derivationMode"]; |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5261 } | 5517 } |
| 5262 if (stateFamily != null) { | 5518 if (stateFamily != null) { |
| 5263 _json["stateFamily"] = stateFamily; | 5519 _json["stateFamily"] = stateFamily; |
| 5264 } | 5520 } |
| 5265 return _json; | 5521 return _json; |
| 5266 } | 5522 } |
| 5267 } | 5523 } |
| 5268 | 5524 |
| 5269 /** | 5525 /** |
| 5270 * The `Status` type defines a logical error model that is suitable for | 5526 * The `Status` type defines a logical error model that is suitable for |
| 5271 * different programming environments, including REST APIs and RPC APIs. It is | 5527 * different |
| 5272 * used by [gRPC](https://github.com/grpc). The error model is designed to be: - | 5528 * programming environments, including REST APIs and RPC APIs. It is used by |
| 5273 * Simple to use and understand for most users - Flexible enough to meet | 5529 * [gRPC](https://github.com/grpc). The error model is designed to be: |
| 5274 * unexpected needs # Overview The `Status` message contains three pieces of | 5530 * |
| 5275 * data: error code, error message, and error details. The error code should be | 5531 * - Simple to use and understand for most users |
| 5276 * an enum value of google.rpc.Code, but it may accept additional error codes if | 5532 * - Flexible enough to meet unexpected needs |
| 5277 * needed. The error message should be a developer-facing English message that | 5533 * |
| 5278 * helps developers *understand* and *resolve* the error. If a localized | 5534 * # Overview |
| 5279 * user-facing error message is needed, put the localized message in the error | 5535 * |
| 5280 * details or localize it in the client. The optional error details may contain | 5536 * The `Status` message contains three pieces of data: error code, error |
| 5281 * arbitrary information about the error. There is a predefined set of error | 5537 * message, |
| 5282 * detail types in the package `google.rpc` which can be used for common error | 5538 * and error details. The error code should be an enum value of |
| 5283 * conditions. # Language mapping The `Status` message is the logical | 5539 * google.rpc.Code, but it may accept additional error codes if needed. The |
| 5284 * representation of the error model, but it is not necessarily the actual wire | 5540 * error message should be a developer-facing English message that helps |
| 5285 * format. When the `Status` message is exposed in different client libraries | 5541 * developers *understand* and *resolve* the error. If a localized user-facing |
| 5286 * and different wire protocols, it can be mapped differently. For example, it | 5542 * error message is needed, put the localized message in the error details or |
| 5287 * will likely be mapped to some exceptions in Java, but more likely mapped to | 5543 * localize it in the client. The optional error details may contain arbitrary |
| 5288 * some error codes in C. # Other uses The error model and the `Status` message | 5544 * information about the error. There is a predefined set of error detail types |
| 5289 * can be used in a variety of environments, either with or without APIs, to | 5545 * in the package `google.rpc` which can be used for common error conditions. |
| 5290 * provide a consistent developer experience across different environments. | 5546 * |
| 5291 * Example uses of this error model include: - Partial errors. If a service | 5547 * # Language mapping |
| 5292 * needs to return partial errors to the client, it may embed the `Status` in | 5548 * |
| 5293 * the normal response to indicate the partial errors. - Workflow errors. A | 5549 * The `Status` message is the logical representation of the error model, but it |
| 5294 * typical workflow has multiple steps. Each step may have a `Status` message | 5550 * is not necessarily the actual wire format. When the `Status` message is |
| 5295 * for error reporting purpose. - Batch operations. If a client uses batch | 5551 * exposed in different client libraries and different wire protocols, it can be |
| 5296 * request and batch response, the `Status` message should be used directly | 5552 * mapped differently. For example, it will likely be mapped to some exceptions |
| 5297 * inside batch response, one for each error sub-response. - Asynchronous | 5553 * in Java, but more likely mapped to some error codes in C. |
| 5298 * operations. If an API call embeds asynchronous operation results in its | 5554 * |
| 5299 * response, the status of those operations should be represented directly using | 5555 * # Other uses |
| 5300 * the `Status` message. - Logging. If some API errors are stored in logs, the | 5556 * |
| 5301 * message `Status` could be used directly after any stripping needed for | 5557 * The error model and the `Status` message can be used in a variety of |
| 5302 * security/privacy reasons. | 5558 * environments, either with or without APIs, to provide a |
| 5559 * consistent developer experience across different environments. |
| 5560 * |
| 5561 * Example uses of this error model include: |
| 5562 * |
| 5563 * - Partial errors. If a service needs to return partial errors to the client, |
| 5564 * it may embed the `Status` in the normal response to indicate the partial |
| 5565 * errors. |
| 5566 * |
| 5567 * - Workflow errors. A typical workflow has multiple steps. Each step may |
| 5568 * have a `Status` message for error reporting purpose. |
| 5569 * |
| 5570 * - Batch operations. If a client uses batch request and batch response, the |
| 5571 * `Status` message should be used directly inside batch response, one for |
| 5572 * each error sub-response. |
| 5573 * |
| 5574 * - Asynchronous operations. If an API call embeds asynchronous operation |
| 5575 * results in its response, the status of those operations should be |
| 5576 * represented directly using the `Status` message. |
| 5577 * |
| 5578 * - Logging. If some API errors are stored in logs, the message `Status` could |
| 5579 * be used directly after any stripping needed for security/privacy reasons. |
| 5303 */ | 5580 */ |
| 5304 class Status { | 5581 class Status { |
| 5305 /** The status code, which should be an enum value of google.rpc.Code. */ | 5582 /** The status code, which should be an enum value of google.rpc.Code. */ |
| 5306 core.int code; | 5583 core.int code; |
| 5307 /** | 5584 /** |
| 5308 * A list of messages that carry the error details. There will be a common set | 5585 * A list of messages that carry the error details. There will be a |
| 5309 * of message types for APIs to use. | 5586 * common set of message types for APIs to use. |
| 5310 * | 5587 * |
| 5311 * The values for Object must be JSON objects. It can consist of `num`, | 5588 * The values for Object must be JSON objects. It can consist of `num`, |
| 5312 * `String`, `bool` and `null` as well as `Map` and `List` values. | 5589 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 5313 */ | 5590 */ |
| 5314 core.List<core.Map<core.String, core.Object>> details; | 5591 core.List<core.Map<core.String, core.Object>> details; |
| 5315 /** | 5592 /** |
| 5316 * A developer-facing error message, which should be in English. Any | 5593 * A developer-facing error message, which should be in English. Any |
| 5317 * user-facing error message should be localized and sent in the | 5594 * user-facing error message should be localized and sent in the |
| 5318 * google.rpc.Status.details field, or localized by the client. | 5595 * google.rpc.Status.details field, or localized by the client. |
| 5319 */ | 5596 */ |
| (...skipping 22 matching lines...) Expand all Loading... |
| 5342 _json["details"] = details; | 5619 _json["details"] = details; |
| 5343 } | 5620 } |
| 5344 if (message != null) { | 5621 if (message != null) { |
| 5345 _json["message"] = message; | 5622 _json["message"] = message; |
| 5346 } | 5623 } |
| 5347 return _json; | 5624 return _json; |
| 5348 } | 5625 } |
| 5349 } | 5626 } |
| 5350 | 5627 |
| 5351 /** | 5628 /** |
| 5352 * Defines a particular step within a Cloud Dataflow job. A job consists of | 5629 * Defines a particular step within a Cloud Dataflow job. |
| 5353 * multiple steps, each of which performs some specific operation as part of the | 5630 * |
| 5354 * overall job. Data is typically passed from one step to another as part of the | 5631 * A job consists of multiple steps, each of which performs some |
| 5355 * job. Here's an example of a sequence of steps which together implement a | 5632 * specific operation as part of the overall job. Data is typically |
| 5356 * Map-Reduce job: * Read a collection of data from some source, parsing the | 5633 * passed from one step to another as part of the job. |
| 5357 * collection's elements. * Validate the elements. * Apply a user-defined | 5634 * |
| 5358 * function to map each element to some value and extract an element-specific | 5635 * Here's an example of a sequence of steps which together implement a |
| 5359 * key value. * Group elements with the same key into a single element with that | 5636 * Map-Reduce job: |
| 5360 * key, transforming a multiply-keyed collection into a uniquely-keyed | 5637 * |
| 5361 * collection. * Write the elements out to some data sink. Note that the Cloud | 5638 * * Read a collection of data from some source, parsing the |
| 5362 * Dataflow service may be used to run many different types of jobs, not just | 5639 * collection's elements. |
| 5363 * Map-Reduce. | 5640 * |
| 5641 * * Validate the elements. |
| 5642 * |
| 5643 * * Apply a user-defined function to map each element to some value |
| 5644 * and extract an element-specific key value. |
| 5645 * |
| 5646 * * Group elements with the same key into a single element with |
| 5647 * that key, transforming a multiply-keyed collection into a |
| 5648 * uniquely-keyed collection. |
| 5649 * |
| 5650 * * Write the elements out to some data sink. |
| 5651 * |
| 5652 * Note that the Cloud Dataflow service may be used to run many different |
| 5653 * types of jobs, not just Map-Reduce. |
| 5364 */ | 5654 */ |
| 5365 class Step { | 5655 class Step { |
| 5366 /** The kind of step in the Cloud Dataflow job. */ | 5656 /** The kind of step in the Cloud Dataflow job. */ |
| 5367 core.String kind; | 5657 core.String kind; |
| 5368 /** | 5658 /** |
| 5369 * The name that identifies the step. This must be unique for each step with | 5659 * The name that identifies the step. This must be unique for each |
| 5370 * respect to all other steps in the Cloud Dataflow job. | 5660 * step with respect to all other steps in the Cloud Dataflow job. |
| 5371 */ | 5661 */ |
| 5372 core.String name; | 5662 core.String name; |
| 5373 /** | 5663 /** |
| 5374 * Named properties associated with the step. Each kind of predefined step has | 5664 * Named properties associated with the step. Each kind of |
| 5375 * its own required set of properties. | 5665 * predefined step has its own required set of properties. |
| 5376 * | 5666 * |
| 5377 * The values for Object must be JSON objects. It can consist of `num`, | 5667 * The values for Object must be JSON objects. It can consist of `num`, |
| 5378 * `String`, `bool` and `null` as well as `Map` and `List` values. | 5668 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 5379 */ | 5669 */ |
| 5380 core.Map<core.String, core.Object> properties; | 5670 core.Map<core.String, core.Object> properties; |
| 5381 | 5671 |
| 5382 Step(); | 5672 Step(); |
| 5383 | 5673 |
| 5384 Step.fromJson(core.Map _json) { | 5674 Step.fromJson(core.Map _json) { |
| 5385 if (_json.containsKey("kind")) { | 5675 if (_json.containsKey("kind")) { |
| (...skipping 16 matching lines...) Expand all Loading... |
| 5402 _json["name"] = name; | 5692 _json["name"] = name; |
| 5403 } | 5693 } |
| 5404 if (properties != null) { | 5694 if (properties != null) { |
| 5405 _json["properties"] = properties; | 5695 _json["properties"] = properties; |
| 5406 } | 5696 } |
| 5407 return _json; | 5697 return _json; |
| 5408 } | 5698 } |
| 5409 } | 5699 } |
| 5410 | 5700 |
| 5411 /** | 5701 /** |
| 5412 * Describes a stream of data, either as input to be processed or as output of a | 5702 * Describes a stream of data, either as input to be processed or as |
| 5413 * streaming Dataflow job. | 5703 * output of a streaming Dataflow job. |
| 5414 */ | 5704 */ |
| 5415 class StreamLocation { | 5705 class StreamLocation { |
| 5416 /** The stream is a custom source. */ | 5706 /** The stream is a custom source. */ |
| 5417 CustomSourceLocation customSourceLocation; | 5707 CustomSourceLocation customSourceLocation; |
| 5418 /** The stream is a pubsub stream. */ | 5708 /** The stream is a pubsub stream. */ |
| 5419 PubsubLocation pubsubLocation; | 5709 PubsubLocation pubsubLocation; |
| 5420 /** The stream is a streaming side input. */ | 5710 /** The stream is a streaming side input. */ |
| 5421 StreamingSideInputLocation sideInputLocation; | 5711 StreamingSideInputLocation sideInputLocation; |
| 5422 /** | 5712 /** |
| 5423 * The stream is part of another computation within the current streaming | 5713 * The stream is part of another computation within the current |
| 5424 * Dataflow job. | 5714 * streaming Dataflow job. |
| 5425 */ | 5715 */ |
| 5426 StreamingStageLocation streamingStageLocation; | 5716 StreamingStageLocation streamingStageLocation; |
| 5427 | 5717 |
| 5428 StreamLocation(); | 5718 StreamLocation(); |
| 5429 | 5719 |
| 5430 StreamLocation.fromJson(core.Map _json) { | 5720 StreamLocation.fromJson(core.Map _json) { |
| 5431 if (_json.containsKey("customSourceLocation")) { | 5721 if (_json.containsKey("customSourceLocation")) { |
| 5432 customSourceLocation = new CustomSourceLocation.fromJson(_json["customSour
ceLocation"]); | 5722 customSourceLocation = new CustomSourceLocation.fromJson(_json["customSour
ceLocation"]); |
| 5433 } | 5723 } |
| 5434 if (_json.containsKey("pubsubLocation")) { | 5724 if (_json.containsKey("pubsubLocation")) { |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5544 * streaming computation ranges. | 5834 * streaming computation ranges. |
| 5545 */ | 5835 */ |
| 5546 class StreamingComputationTask { | 5836 class StreamingComputationTask { |
| 5547 /** Contains ranges of a streaming computation this task should apply to. */ | 5837 /** Contains ranges of a streaming computation this task should apply to. */ |
| 5548 core.List<StreamingComputationRanges> computationRanges; | 5838 core.List<StreamingComputationRanges> computationRanges; |
| 5549 /** Describes the set of data disks this task should apply to. */ | 5839 /** Describes the set of data disks this task should apply to. */ |
| 5550 core.List<MountedDataDisk> dataDisks; | 5840 core.List<MountedDataDisk> dataDisks; |
| 5551 /** | 5841 /** |
| 5552 * A type of streaming computation task. | 5842 * A type of streaming computation task. |
| 5553 * Possible string values are: | 5843 * Possible string values are: |
| 5554 * - "STREAMING_COMPUTATION_TASK_UNKNOWN" : A | 5844 * - "STREAMING_COMPUTATION_TASK_UNKNOWN" : The streaming computation task is |
| 5555 * STREAMING_COMPUTATION_TASK_UNKNOWN. | 5845 * unknown, or unspecified. |
| 5556 * - "STREAMING_COMPUTATION_TASK_STOP" : A STREAMING_COMPUTATION_TASK_STOP. | 5846 * - "STREAMING_COMPUTATION_TASK_STOP" : Stop processing specified streaming |
| 5557 * - "STREAMING_COMPUTATION_TASK_START" : A STREAMING_COMPUTATION_TASK_START. | 5847 * computation range(s). |
| 5848 * - "STREAMING_COMPUTATION_TASK_START" : Start processing specified streaming |
| 5849 * computation range(s). |
| 5558 */ | 5850 */ |
| 5559 core.String taskType; | 5851 core.String taskType; |
| 5560 | 5852 |
| 5561 StreamingComputationTask(); | 5853 StreamingComputationTask(); |
| 5562 | 5854 |
| 5563 StreamingComputationTask.fromJson(core.Map _json) { | 5855 StreamingComputationTask.fromJson(core.Map _json) { |
| 5564 if (_json.containsKey("computationRanges")) { | 5856 if (_json.containsKey("computationRanges")) { |
| 5565 computationRanges = _json["computationRanges"].map((value) => new Streamin
gComputationRanges.fromJson(value)).toList(); | 5857 computationRanges = _json["computationRanges"].map((value) => new Streamin
gComputationRanges.fromJson(value)).toList(); |
| 5566 } | 5858 } |
| 5567 if (_json.containsKey("dataDisks")) { | 5859 if (_json.containsKey("dataDisks")) { |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5617 } | 5909 } |
| 5618 return _json; | 5910 return _json; |
| 5619 } | 5911 } |
| 5620 } | 5912 } |
| 5621 | 5913 |
| 5622 /** A task which initializes part of a streaming Dataflow job. */ | 5914 /** A task which initializes part of a streaming Dataflow job. */ |
| 5623 class StreamingSetupTask { | 5915 class StreamingSetupTask { |
| 5624 /** The user has requested drain. */ | 5916 /** The user has requested drain. */ |
| 5625 core.bool drain; | 5917 core.bool drain; |
| 5626 /** | 5918 /** |
| 5627 * The TCP port on which the worker should listen for messages from other | 5919 * The TCP port on which the worker should listen for messages from |
| 5628 * streaming computation workers. | 5920 * other streaming computation workers. |
| 5629 */ | 5921 */ |
| 5630 core.int receiveWorkPort; | 5922 core.int receiveWorkPort; |
| 5631 /** The global topology of the streaming Dataflow job. */ | 5923 /** The global topology of the streaming Dataflow job. */ |
| 5632 TopologyConfig streamingComputationTopology; | 5924 TopologyConfig streamingComputationTopology; |
| 5633 /** | 5925 /** |
| 5634 * The TCP port used by the worker to communicate with the Dataflow worker | 5926 * The TCP port used by the worker to communicate with the Dataflow |
| 5635 * harness. | 5927 * worker harness. |
| 5636 */ | 5928 */ |
| 5637 core.int workerHarnessPort; | 5929 core.int workerHarnessPort; |
| 5638 | 5930 |
| 5639 StreamingSetupTask(); | 5931 StreamingSetupTask(); |
| 5640 | 5932 |
| 5641 StreamingSetupTask.fromJson(core.Map _json) { | 5933 StreamingSetupTask.fromJson(core.Map _json) { |
| 5642 if (_json.containsKey("drain")) { | 5934 if (_json.containsKey("drain")) { |
| 5643 drain = _json["drain"]; | 5935 drain = _json["drain"]; |
| 5644 } | 5936 } |
| 5645 if (_json.containsKey("receiveWorkPort")) { | 5937 if (_json.containsKey("receiveWorkPort")) { |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5697 _json["stateFamily"] = stateFamily; | 5989 _json["stateFamily"] = stateFamily; |
| 5698 } | 5990 } |
| 5699 if (tag != null) { | 5991 if (tag != null) { |
| 5700 _json["tag"] = tag; | 5992 _json["tag"] = tag; |
| 5701 } | 5993 } |
| 5702 return _json; | 5994 return _json; |
| 5703 } | 5995 } |
| 5704 } | 5996 } |
| 5705 | 5997 |
| 5706 /** | 5998 /** |
| 5707 * Identifies the location of a streaming computation stage, for stage-to-stage | 5999 * Identifies the location of a streaming computation stage, for |
| 5708 * communication. | 6000 * stage-to-stage communication. |
| 5709 */ | 6001 */ |
| 5710 class StreamingStageLocation { | 6002 class StreamingStageLocation { |
| 5711 /** Identifies the particular stream within the streaming Dataflow job. */ | 6003 /** |
| 6004 * Identifies the particular stream within the streaming Dataflow |
| 6005 * job. |
| 6006 */ |
| 5712 core.String streamId; | 6007 core.String streamId; |
| 5713 | 6008 |
| 5714 StreamingStageLocation(); | 6009 StreamingStageLocation(); |
| 5715 | 6010 |
| 5716 StreamingStageLocation.fromJson(core.Map _json) { | 6011 StreamingStageLocation.fromJson(core.Map _json) { |
| 5717 if (_json.containsKey("streamId")) { | 6012 if (_json.containsKey("streamId")) { |
| 5718 streamId = _json["streamId"]; | 6013 streamId = _json["streamId"]; |
| 5719 } | 6014 } |
| 5720 } | 6015 } |
| 5721 | 6016 |
| (...skipping 29 matching lines...) Expand all Loading... |
| 5751 } | 6046 } |
| 5752 | 6047 |
| 5753 /** Taskrunner configuration settings. */ | 6048 /** Taskrunner configuration settings. */ |
| 5754 class TaskRunnerSettings { | 6049 class TaskRunnerSettings { |
| 5755 /** Whether to also send taskrunner log info to stderr. */ | 6050 /** Whether to also send taskrunner log info to stderr. */ |
| 5756 core.bool alsologtostderr; | 6051 core.bool alsologtostderr; |
| 5757 /** The location on the worker for task-specific subdirectories. */ | 6052 /** The location on the worker for task-specific subdirectories. */ |
| 5758 core.String baseTaskDir; | 6053 core.String baseTaskDir; |
| 5759 /** | 6054 /** |
| 5760 * The base URL for the taskrunner to use when accessing Google Cloud APIs. | 6055 * The base URL for the taskrunner to use when accessing Google Cloud APIs. |
| 5761 * When workers access Google Cloud APIs, they logically do so via relative | 6056 * |
| 5762 * URLs. If this field is specified, it supplies the base URL to use for | 6057 * When workers access Google Cloud APIs, they logically do so via |
| 5763 * resolving these relative URLs. The normative algorithm used is defined by | 6058 * relative URLs. If this field is specified, it supplies the base |
| 5764 * RFC 1808, "Relative Uniform Resource Locators". If not specified, the | 6059 * URL to use for resolving these relative URLs. The normative |
| 5765 * default value is "http://www.googleapis.com/" | 6060 * algorithm used is defined by RFC 1808, "Relative Uniform Resource |
| 6061 * Locators". |
| 6062 * |
| 6063 * If not specified, the default value is "http://www.googleapis.com/" |
| 5766 */ | 6064 */ |
| 5767 core.String baseUrl; | 6065 core.String baseUrl; |
| 5768 /** The file to store preprocessing commands in. */ | 6066 /** The file to store preprocessing commands in. */ |
| 5769 core.String commandlinesFileName; | 6067 core.String commandlinesFileName; |
| 5770 /** Whether to continue taskrunner if an exception is hit. */ | 6068 /** Whether to continue taskrunner if an exception is hit. */ |
| 5771 core.bool continueOnException; | 6069 core.bool continueOnException; |
| 5772 /** The API version of endpoint, e.g. "v1b3" */ | 6070 /** The API version of endpoint, e.g. "v1b3" */ |
| 5773 core.String dataflowApiVersion; | 6071 core.String dataflowApiVersion; |
| 5774 /** The command to launch the worker harness. */ | 6072 /** The command to launch the worker harness. */ |
| 5775 core.String harnessCommand; | 6073 core.String harnessCommand; |
| 5776 /** The suggested backend language. */ | 6074 /** The suggested backend language. */ |
| 5777 core.String languageHint; | 6075 core.String languageHint; |
| 5778 /** The directory on the VM to store logs. */ | 6076 /** The directory on the VM to store logs. */ |
| 5779 core.String logDir; | 6077 core.String logDir; |
| 5780 /** | 6078 /** |
| 5781 * Whether to send taskrunner log info to Google Compute Engine VM serial | 6079 * Whether to send taskrunner log info to Google Compute Engine VM serial |
| 5782 * console. | 6080 * console. |
| 5783 */ | 6081 */ |
| 5784 core.bool logToSerialconsole; | 6082 core.bool logToSerialconsole; |
| 5785 /** | 6083 /** |
| 5786 * Indicates where to put logs. If this is not specified, the logs will not be | 6084 * Indicates where to put logs. If this is not specified, the logs |
| 5787 * uploaded. The supported resource type is: Google Cloud Storage: | 6085 * will not be uploaded. |
| 5788 * storage.googleapis.com/{bucket}/{object} | 6086 * |
| 5789 * bucket.storage.googleapis.com/{object} | 6087 * The supported resource type is: |
| 6088 * |
| 6089 * Google Cloud Storage: |
| 6090 * storage.googleapis.com/{bucket}/{object} |
| 6091 * bucket.storage.googleapis.com/{object} |
| 5790 */ | 6092 */ |
| 5791 core.String logUploadLocation; | 6093 core.String logUploadLocation; |
| 5792 /** | 6094 /** |
| 5793 * The OAuth2 scopes to be requested by the taskrunner in order to access the | 6095 * The OAuth2 scopes to be requested by the taskrunner in order to |
| 5794 * Cloud Dataflow API. | 6096 * access the Cloud Dataflow API. |
| 5795 */ | 6097 */ |
| 5796 core.List<core.String> oauthScopes; | 6098 core.List<core.String> oauthScopes; |
| 5797 /** The settings to pass to the parallel worker harness. */ | 6099 /** The settings to pass to the parallel worker harness. */ |
| 5798 WorkerSettings parallelWorkerSettings; | 6100 WorkerSettings parallelWorkerSettings; |
| 5799 /** The streaming worker main class name. */ | 6101 /** The streaming worker main class name. */ |
| 5800 core.String streamingWorkerMainClass; | 6102 core.String streamingWorkerMainClass; |
| 5801 /** | 6103 /** |
| 5802 * The UNIX group ID on the worker VM to use for tasks launched by taskrunner; | 6104 * The UNIX group ID on the worker VM to use for tasks launched by |
| 5803 * e.g. "wheel". | 6105 * taskrunner; e.g. "wheel". |
| 5804 */ | 6106 */ |
| 5805 core.String taskGroup; | 6107 core.String taskGroup; |
| 5806 /** | 6108 /** |
| 5807 * The UNIX user ID on the worker VM to use for tasks launched by taskrunner; | 6109 * The UNIX user ID on the worker VM to use for tasks launched by |
| 5808 * e.g. "root". | 6110 * taskrunner; e.g. "root". |
| 5809 */ | 6111 */ |
| 5810 core.String taskUser; | 6112 core.String taskUser; |
| 5811 /** | 6113 /** |
| 5812 * The prefix of the resources the taskrunner should use for temporary | 6114 * The prefix of the resources the taskrunner should use for |
| 5813 * storage. The supported resource type is: Google Cloud Storage: | 6115 * temporary storage. |
| 5814 * storage.googleapis.com/{bucket}/{object} | 6116 * |
| 5815 * bucket.storage.googleapis.com/{object} | 6117 * The supported resource type is: |
| 6118 * |
| 6119 * Google Cloud Storage: |
| 6120 * storage.googleapis.com/{bucket}/{object} |
| 6121 * bucket.storage.googleapis.com/{object} |
| 5816 */ | 6122 */ |
| 5817 core.String tempStoragePrefix; | 6123 core.String tempStoragePrefix; |
| 5818 /** The ID string of the VM. */ | 6124 /** The ID string of the VM. */ |
| 5819 core.String vmId; | 6125 core.String vmId; |
| 5820 /** The file to store the workflow in. */ | 6126 /** The file to store the workflow in. */ |
| 5821 core.String workflowFileName; | 6127 core.String workflowFileName; |
| 5822 | 6128 |
| 5823 TaskRunnerSettings(); | 6129 TaskRunnerSettings(); |
| 5824 | 6130 |
| 5825 TaskRunnerSettings.fromJson(core.Map _json) { | 6131 TaskRunnerSettings.fromJson(core.Map _json) { |
| (...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5939 _json["vmId"] = vmId; | 6245 _json["vmId"] = vmId; |
| 5940 } | 6246 } |
| 5941 if (workflowFileName != null) { | 6247 if (workflowFileName != null) { |
| 5942 _json["workflowFileName"] = workflowFileName; | 6248 _json["workflowFileName"] = workflowFileName; |
| 5943 } | 6249 } |
| 5944 return _json; | 6250 return _json; |
| 5945 } | 6251 } |
| 5946 } | 6252 } |
| 5947 | 6253 |
| 5948 /** | 6254 /** |
| 5949 * Global topology of the streaming Dataflow job, including all computations and | 6255 * Global topology of the streaming Dataflow job, including all |
| 5950 * their sharded locations. | 6256 * computations and their sharded locations. |
| 5951 */ | 6257 */ |
| 5952 class TopologyConfig { | 6258 class TopologyConfig { |
| 5953 /** The computations associated with a streaming Dataflow job. */ | 6259 /** The computations associated with a streaming Dataflow job. */ |
| 5954 core.List<ComputationTopology> computations; | 6260 core.List<ComputationTopology> computations; |
| 5955 /** The disks assigned to a streaming Dataflow job. */ | 6261 /** The disks assigned to a streaming Dataflow job. */ |
| 5956 core.List<DataDiskAssignment> dataDiskAssignments; | 6262 core.List<DataDiskAssignment> dataDiskAssignments; |
| 5957 /** The size (in bits) of keys that will be assigned to source messages. */ | 6263 /** The size (in bits) of keys that will be assigned to source messages. */ |
| 5958 core.int forwardingKeyBits; | 6264 core.int forwardingKeyBits; |
| 5959 /** Version number for persistent state. */ | 6265 /** Version number for persistent state. */ |
| 5960 core.int persistentStateVersion; | 6266 core.int persistentStateVersion; |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5996 _json["persistentStateVersion"] = persistentStateVersion; | 6302 _json["persistentStateVersion"] = persistentStateVersion; |
| 5997 } | 6303 } |
| 5998 if (userStageToComputationNameMap != null) { | 6304 if (userStageToComputationNameMap != null) { |
| 5999 _json["userStageToComputationNameMap"] = userStageToComputationNameMap; | 6305 _json["userStageToComputationNameMap"] = userStageToComputationNameMap; |
| 6000 } | 6306 } |
| 6001 return _json; | 6307 return _json; |
| 6002 } | 6308 } |
| 6003 } | 6309 } |
| 6004 | 6310 |
| 6005 /** | 6311 /** |
| 6006 * WorkItem represents basic information about a WorkItem to be executed in the | 6312 * WorkItem represents basic information about a WorkItem to be executed |
| 6007 * cloud. | 6313 * in the cloud. |
| 6008 */ | 6314 */ |
| 6009 class WorkItem { | 6315 class WorkItem { |
| 6010 /** Work item-specific configuration as an opaque blob. */ | 6316 /** Work item-specific configuration as an opaque blob. */ |
| 6011 core.String configuration; | 6317 core.String configuration; |
| 6012 /** Identifies this WorkItem. */ | 6318 /** Identifies this WorkItem. */ |
| 6013 core.String id; | 6319 core.String id; |
| 6014 /** The initial index to use when reporting the status of the WorkItem. */ | 6320 /** The initial index to use when reporting the status of the WorkItem. */ |
| 6015 core.String initialReportIndex; | 6321 core.String initialReportIndex; |
| 6016 /** Identifies the workflow job this WorkItem belongs to. */ | 6322 /** Identifies the workflow job this WorkItem belongs to. */ |
| 6017 core.String jobId; | 6323 core.String jobId; |
| 6018 /** Time when the lease on this Work will expire. */ | 6324 /** Time when the lease on this Work will expire. */ |
| 6019 core.String leaseExpireTime; | 6325 core.String leaseExpireTime; |
| 6020 /** Additional information for MapTask WorkItems. */ | 6326 /** Additional information for MapTask WorkItems. */ |
| 6021 MapTask mapTask; | 6327 MapTask mapTask; |
| 6022 /** | 6328 /** |
| 6023 * Any required packages that need to be fetched in order to execute this | 6329 * Any required packages that need to be fetched in order to execute |
| 6024 * WorkItem. | 6330 * this WorkItem. |
| 6025 */ | 6331 */ |
| 6026 core.List<Package> packages; | 6332 core.List<Package> packages; |
| 6027 /** Identifies the cloud project this WorkItem belongs to. */ | 6333 /** Identifies the cloud project this WorkItem belongs to. */ |
| 6028 core.String projectId; | 6334 core.String projectId; |
| 6029 /** Recommended reporting interval. */ | 6335 /** Recommended reporting interval. */ |
| 6030 core.String reportStatusInterval; | 6336 core.String reportStatusInterval; |
| 6031 /** Additional information for SeqMapTask WorkItems. */ | 6337 /** Additional information for SeqMapTask WorkItems. */ |
| 6032 SeqMapTask seqMapTask; | 6338 SeqMapTask seqMapTask; |
| 6033 /** Additional information for ShellTask WorkItems. */ | 6339 /** Additional information for ShellTask WorkItems. */ |
| 6034 ShellTask shellTask; | 6340 ShellTask shellTask; |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6136 _json["streamingConfigTask"] = (streamingConfigTask).toJson(); | 6442 _json["streamingConfigTask"] = (streamingConfigTask).toJson(); |
| 6137 } | 6443 } |
| 6138 if (streamingSetupTask != null) { | 6444 if (streamingSetupTask != null) { |
| 6139 _json["streamingSetupTask"] = (streamingSetupTask).toJson(); | 6445 _json["streamingSetupTask"] = (streamingSetupTask).toJson(); |
| 6140 } | 6446 } |
| 6141 return _json; | 6447 return _json; |
| 6142 } | 6448 } |
| 6143 } | 6449 } |
| 6144 | 6450 |
| 6145 /** | 6451 /** |
| 6146 * The Dataflow service's idea of the current state of a WorkItem being | 6452 * The Dataflow service's idea of the current state of a WorkItem |
| 6147 * processed by a worker. | 6453 * being processed by a worker. |
| 6148 */ | 6454 */ |
| 6149 class WorkItemServiceState { | 6455 class WorkItemServiceState { |
| 6150 /** | 6456 /** |
| 6151 * Other data returned by the service, specific to the particular worker | 6457 * Other data returned by the service, specific to the particular |
| 6152 * harness. | 6458 * worker harness. |
| 6153 * | 6459 * |
| 6154 * The values for Object must be JSON objects. It can consist of `num`, | 6460 * The values for Object must be JSON objects. It can consist of `num`, |
| 6155 * `String`, `bool` and `null` as well as `Map` and `List` values. | 6461 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 6156 */ | 6462 */ |
| 6157 core.Map<core.String, core.Object> harnessData; | 6463 core.Map<core.String, core.Object> harnessData; |
| 6158 /** Time at which the current lease will expire. */ | 6464 /** Time at which the current lease will expire. */ |
| 6159 core.String leaseExpireTime; | 6465 core.String leaseExpireTime; |
| 6160 /** | 6466 /** |
| 6161 * The short ids that workers should use in subsequent metric updates. Workers | 6467 * The short ids that workers should use in subsequent metric updates. |
| 6162 * should strive to use short ids whenever possible, but it is ok to request | 6468 * Workers should strive to use short ids whenever possible, but it is ok |
| 6163 * the short_id again if a worker lost track of it (e.g. if the worker is | 6469 * to request the short_id again if a worker lost track of it |
| 6164 * recovering from a crash). NOTE: it is possible that the response may have | 6470 * (e.g. if the worker is recovering from a crash). |
| 6165 * short ids for a subset of the metrics. | 6471 * NOTE: it is possible that the response may have short ids for a subset |
| 6472 * of the metrics. |
| 6166 */ | 6473 */ |
| 6167 core.List<MetricShortId> metricShortId; | 6474 core.List<MetricShortId> metricShortId; |
| 6168 /** | 6475 /** |
| 6169 * The index value to use for the next report sent by the worker. Note: If the | 6476 * The index value to use for the next report sent by the worker. |
| 6170 * report call fails for whatever reason, the worker should reuse this index | 6477 * Note: If the report call fails for whatever reason, the worker should |
| 6171 * for subsequent report attempts. | 6478 * reuse this index for subsequent report attempts. |
| 6172 */ | 6479 */ |
| 6173 core.String nextReportIndex; | 6480 core.String nextReportIndex; |
| 6174 /** New recommended reporting interval. */ | 6481 /** New recommended reporting interval. */ |
| 6175 core.String reportStatusInterval; | 6482 core.String reportStatusInterval; |
| 6176 /** | 6483 /** |
| 6177 * The progress point in the WorkItem where the Dataflow service suggests that | 6484 * The progress point in the WorkItem where the Dataflow service |
| 6178 * the worker truncate the task. | 6485 * suggests that the worker truncate the task. |
| 6179 */ | 6486 */ |
| 6180 ApproximateSplitRequest splitRequest; | 6487 ApproximateSplitRequest splitRequest; |
| 6181 /** DEPRECATED in favor of split_request. */ | 6488 /** DEPRECATED in favor of split_request. */ |
| 6182 ApproximateProgress suggestedStopPoint; | 6489 ApproximateProgress suggestedStopPoint; |
| 6183 /** Obsolete, always empty. */ | 6490 /** Obsolete, always empty. */ |
| 6184 Position suggestedStopPosition; | 6491 Position suggestedStopPosition; |
| 6185 | 6492 |
| 6186 WorkItemServiceState(); | 6493 WorkItemServiceState(); |
| 6187 | 6494 |
| 6188 WorkItemServiceState.fromJson(core.Map _json) { | 6495 WorkItemServiceState.fromJson(core.Map _json) { |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6244 | 6551 |
| 6245 /** Conveys a worker's progress through the work described by a WorkItem. */ | 6552 /** Conveys a worker's progress through the work described by a WorkItem. */ |
| 6246 class WorkItemStatus { | 6553 class WorkItemStatus { |
| 6247 /** True if the WorkItem was completed (successfully or unsuccessfully). */ | 6554 /** True if the WorkItem was completed (successfully or unsuccessfully). */ |
| 6248 core.bool completed; | 6555 core.bool completed; |
| 6249 /** Worker output counters for this WorkItem. */ | 6556 /** Worker output counters for this WorkItem. */ |
| 6250 core.List<CounterUpdate> counterUpdates; | 6557 core.List<CounterUpdate> counterUpdates; |
| 6251 /** See documentation of stop_position. */ | 6558 /** See documentation of stop_position. */ |
| 6252 DynamicSourceSplit dynamicSourceSplit; | 6559 DynamicSourceSplit dynamicSourceSplit; |
| 6253 /** | 6560 /** |
| 6254 * Specifies errors which occurred during processing. If errors are provided, | 6561 * Specifies errors which occurred during processing. If errors are |
| 6255 * and completed = true, then the WorkItem is considered to have failed. | 6562 * provided, and completed = true, then the WorkItem is considered |
| 6563 * to have failed. |
| 6256 */ | 6564 */ |
| 6257 core.List<Status> errors; | 6565 core.List<Status> errors; |
| 6258 /** DEPRECATED in favor of counter_updates. */ | 6566 /** DEPRECATED in favor of counter_updates. */ |
| 6259 core.List<MetricUpdate> metricUpdates; | 6567 core.List<MetricUpdate> metricUpdates; |
| 6260 /** DEPRECATED in favor of reported_progress. */ | 6568 /** DEPRECATED in favor of reported_progress. */ |
| 6261 ApproximateProgress progress; | 6569 ApproximateProgress progress; |
| 6262 /** | 6570 /** |
| 6263 * The report index. When a WorkItem is leased, the lease will contain an | 6571 * The report index. When a WorkItem is leased, the lease will |
| 6264 * initial report index. When a WorkItem's status is reported to the system, | 6572 * contain an initial report index. When a WorkItem's status is |
| 6265 * the report should be sent with that report index, and the response will | 6573 * reported to the system, the report should be sent with |
| 6266 * contain the index the worker should use for the next report. Reports | 6574 * that report index, and the response will contain the index the |
| 6267 * received with unexpected index values will be rejected by the service. In | 6575 * worker should use for the next report. Reports received with |
| 6268 * order to preserve idempotency, the worker should not alter the contents of | 6576 * unexpected index values will be rejected by the service. |
| 6269 * a report, even if the worker must submit the same report multiple times | 6577 * |
| 6270 * before getting back a response. The worker should not submit a subsequent | 6578 * In order to preserve idempotency, the worker should not alter the |
| 6271 * report until the response for the previous report had been received from | 6579 * contents of a report, even if the worker must submit the same |
| 6272 * the service. | 6580 * report multiple times before getting back a response. The worker |
| 6581 * should not submit a subsequent report until the response for the |
| 6582 * previous report had been received from the service. |
| 6273 */ | 6583 */ |
| 6274 core.String reportIndex; | 6584 core.String reportIndex; |
| 6275 /** The worker's progress through this WorkItem. */ | 6585 /** The worker's progress through this WorkItem. */ |
| 6276 ApproximateReportedProgress reportedProgress; | 6586 ApproximateReportedProgress reportedProgress; |
| 6277 /** Amount of time the worker requests for its lease. */ | 6587 /** Amount of time the worker requests for its lease. */ |
| 6278 core.String requestedLeaseDuration; | 6588 core.String requestedLeaseDuration; |
| 6279 /** DEPRECATED in favor of dynamic_source_split. */ | 6589 /** DEPRECATED in favor of dynamic_source_split. */ |
| 6280 SourceFork sourceFork; | 6590 SourceFork sourceFork; |
| 6281 /** | 6591 /** |
| 6282 * If the work item represented a SourceOperationRequest, and the work is | 6592 * If the work item represented a SourceOperationRequest, and the work |
| 6283 * completed, contains the result of the operation. | 6593 * is completed, contains the result of the operation. |
| 6284 */ | 6594 */ |
| 6285 SourceOperationResponse sourceOperationResponse; | 6595 SourceOperationResponse sourceOperationResponse; |
| 6286 /** | 6596 /** |
| 6287 * A worker may split an active map task in two parts, "primary" and | 6597 * A worker may split an active map task in two parts, "primary" and |
| 6288 * "residual", continuing to process the primary part and returning the | 6598 * "residual", continuing to process the primary part and returning the |
| 6289 * residual part into the pool of available work. This event is called a | 6599 * residual part into the pool of available work. |
| 6290 * "dynamic split" and is critical to the dynamic work rebalancing feature. | 6600 * This event is called a "dynamic split" and is critical to the dynamic |
| 6291 * The two obtained sub-tasks are called "parts" of the split. The parts, if | 6601 * work rebalancing feature. The two obtained sub-tasks are called |
| 6292 * concatenated, must represent the same input as would be read by the current | 6602 * "parts" of the split. |
| 6293 * task if the split did not happen. The exact way in which the original task | 6603 * The parts, if concatenated, must represent the same input as would |
| 6294 * is decomposed into the two parts is specified either as a position | 6604 * be read by the current task if the split did not happen. |
| 6295 * demarcating them (stop_position), or explicitly as two DerivedSources, if | 6605 * The exact way in which the original task is decomposed into the two |
| 6296 * this task consumes a user-defined source type (dynamic_source_split). The | 6606 * parts is specified either as a position demarcating them |
| 6297 * "current" task is adjusted as a result of the split: after a task with | 6607 * (stop_position), or explicitly as two DerivedSources, if this |
| 6298 * range [A, B) sends a stop_position update at C, its range is considered to | 6608 * task consumes a user-defined source type (dynamic_source_split). |
| 6299 * be [A, C), e.g.: * Progress should be interpreted relative to the new | 6609 * |
| 6300 * range, e.g. "75% completed" means "75% of [A, C) completed" * The worker | 6610 * The "current" task is adjusted as a result of the split: after a task |
| 6301 * should interpret proposed_stop_position relative to the new range, e.g. | 6611 * with range [A, B) sends a stop_position update at C, its range is |
| 6302 * "split at 68%" should be interpreted as "split at 68% of [A, C)". * If the | 6612 * considered to be [A, C), e.g.: |
| 6303 * worker chooses to split again using stop_position, only stop_positions in | 6613 * * Progress should be interpreted relative to the new range, e.g. |
| 6304 * [A, C) will be accepted. * Etc. dynamic_source_split has similar semantics: | 6614 * "75% completed" means "75% of [A, C) completed" |
| 6305 * e.g., if a task with source S splits using dynamic_source_split into {P, R} | 6615 * * The worker should interpret proposed_stop_position relative to the |
| 6306 * (where P and R must be together equivalent to S), then subsequent progress | 6616 * new range, e.g. "split at 68%" should be interpreted as |
| 6307 * and proposed_stop_position should be interpreted relative to P, and in a | 6617 * "split at 68% of [A, C)". |
| 6308 * potential subsequent dynamic_source_split into {P', R'}, P' and R' must be | 6618 * * If the worker chooses to split again using stop_position, only |
| 6309 * together equivalent to P, etc. | 6619 * stop_positions in [A, C) will be accepted. |
| 6620 * * Etc. |
| 6621 * dynamic_source_split has similar semantics: e.g., if a task with |
| 6622 * source S splits using dynamic_source_split into {P, R} |
| 6623 * (where P and R must be together equivalent to S), then subsequent |
| 6624 * progress and proposed_stop_position should be interpreted relative |
| 6625 * to P, and in a potential subsequent dynamic_source_split into {P', R'}, |
| 6626 * P' and R' must be together equivalent to P, etc. |
| 6310 */ | 6627 */ |
| 6311 Position stopPosition; | 6628 Position stopPosition; |
| 6312 /** Identifies the WorkItem. */ | 6629 /** Identifies the WorkItem. */ |
| 6313 core.String workItemId; | 6630 core.String workItemId; |
| 6314 | 6631 |
| 6315 WorkItemStatus(); | 6632 WorkItemStatus(); |
| 6316 | 6633 |
| 6317 WorkItemStatus.fromJson(core.Map _json) { | 6634 WorkItemStatus.fromJson(core.Map _json) { |
| 6318 if (_json.containsKey("completed")) { | 6635 if (_json.containsKey("completed")) { |
| 6319 completed = _json["completed"]; | 6636 completed = _json["completed"]; |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6395 _json["stopPosition"] = (stopPosition).toJson(); | 6712 _json["stopPosition"] = (stopPosition).toJson(); |
| 6396 } | 6713 } |
| 6397 if (workItemId != null) { | 6714 if (workItemId != null) { |
| 6398 _json["workItemId"] = workItemId; | 6715 _json["workItemId"] = workItemId; |
| 6399 } | 6716 } |
| 6400 return _json; | 6717 return _json; |
| 6401 } | 6718 } |
| 6402 } | 6719 } |
| 6403 | 6720 |
| 6404 /** | 6721 /** |
| 6405 * WorkerHealthReport contains information about the health of a worker. The VM | 6722 * WorkerHealthReport contains information about the health of a worker. |
| 6406 * should be identified by the labels attached to the WorkerMessage that this | 6723 * |
| 6407 * health ping belongs to. | 6724 * The VM should be identified by the labels attached to the WorkerMessage that |
| 6725 * this health ping belongs to. |
| 6408 */ | 6726 */ |
| 6409 class WorkerHealthReport { | 6727 class WorkerHealthReport { |
| 6410 /** | 6728 /** |
| 6411 * The pods running on the worker. See: | 6729 * The pods running on the worker. See: |
| 6412 * http://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html#_v1_pod | 6730 * http://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html#_v1_pod |
| 6731 * |
| 6413 * This field is used by the worker to send the status of the indvidual | 6732 * This field is used by the worker to send the status of the indvidual |
| 6414 * containers running on each worker. | 6733 * containers running on each worker. |
| 6415 * | 6734 * |
| 6416 * The values for Object must be JSON objects. It can consist of `num`, | 6735 * The values for Object must be JSON objects. It can consist of `num`, |
| 6417 * `String`, `bool` and `null` as well as `Map` and `List` values. | 6736 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 6418 */ | 6737 */ |
| 6419 core.List<core.Map<core.String, core.Object>> pods; | 6738 core.List<core.Map<core.String, core.Object>> pods; |
| 6420 /** | 6739 /** |
| 6421 * The interval at which the worker is sending health reports. The default | 6740 * The interval at which the worker is sending health reports. |
| 6422 * value of 0 should be interpreted as the field is not being explicitly set | 6741 * The default value of 0 should be interpreted as the field is not being |
| 6423 * by the worker. | 6742 * explicitly set by the worker. |
| 6424 */ | 6743 */ |
| 6425 core.String reportInterval; | 6744 core.String reportInterval; |
| 6426 /** Whether the VM is healthy. */ | 6745 /** Whether the VM is healthy. */ |
| 6427 core.bool vmIsHealthy; | 6746 core.bool vmIsHealthy; |
| 6428 /** The time the VM was booted. */ | 6747 /** The time the VM was booted. */ |
| 6429 core.String vmStartupTime; | 6748 core.String vmStartupTime; |
| 6430 | 6749 |
| 6431 WorkerHealthReport(); | 6750 WorkerHealthReport(); |
| 6432 | 6751 |
| 6433 WorkerHealthReport.fromJson(core.Map _json) { | 6752 WorkerHealthReport.fromJson(core.Map _json) { |
| (...skipping 23 matching lines...) Expand all Loading... |
| 6457 _json["vmIsHealthy"] = vmIsHealthy; | 6776 _json["vmIsHealthy"] = vmIsHealthy; |
| 6458 } | 6777 } |
| 6459 if (vmStartupTime != null) { | 6778 if (vmStartupTime != null) { |
| 6460 _json["vmStartupTime"] = vmStartupTime; | 6779 _json["vmStartupTime"] = vmStartupTime; |
| 6461 } | 6780 } |
| 6462 return _json; | 6781 return _json; |
| 6463 } | 6782 } |
| 6464 } | 6783 } |
| 6465 | 6784 |
| 6466 /** | 6785 /** |
| 6467 * WorkerHealthReportResponse contains information returned to the worker in | 6786 * WorkerHealthReportResponse contains information returned to the worker |
| 6468 * response to a health ping. | 6787 * in response to a health ping. |
| 6469 */ | 6788 */ |
| 6470 class WorkerHealthReportResponse { | 6789 class WorkerHealthReportResponse { |
| 6471 /** | 6790 /** |
| 6472 * A positive value indicates the worker should change its reporting interval | 6791 * A positive value indicates the worker should change its reporting interval |
| 6473 * to the specified value. The default value of zero means no change in report | 6792 * to the specified value. |
| 6474 * rate is requested by the server. | 6793 * |
| 6794 * The default value of zero means no change in report rate is requested by |
| 6795 * the server. |
| 6475 */ | 6796 */ |
| 6476 core.String reportInterval; | 6797 core.String reportInterval; |
| 6477 | 6798 |
| 6478 WorkerHealthReportResponse(); | 6799 WorkerHealthReportResponse(); |
| 6479 | 6800 |
| 6480 WorkerHealthReportResponse.fromJson(core.Map _json) { | 6801 WorkerHealthReportResponse.fromJson(core.Map _json) { |
| 6481 if (_json.containsKey("reportInterval")) { | 6802 if (_json.containsKey("reportInterval")) { |
| 6482 reportInterval = _json["reportInterval"]; | 6803 reportInterval = _json["reportInterval"]; |
| 6483 } | 6804 } |
| 6484 } | 6805 } |
| 6485 | 6806 |
| 6486 core.Map toJson() { | 6807 core.Map toJson() { |
| 6487 var _json = new core.Map(); | 6808 var _json = new core.Map(); |
| 6488 if (reportInterval != null) { | 6809 if (reportInterval != null) { |
| 6489 _json["reportInterval"] = reportInterval; | 6810 _json["reportInterval"] = reportInterval; |
| 6490 } | 6811 } |
| 6491 return _json; | 6812 return _json; |
| 6492 } | 6813 } |
| 6493 } | 6814 } |
| 6494 | 6815 |
| 6495 /** WorkerMessage provides information to the backend about a worker. */ | 6816 /** WorkerMessage provides information to the backend about a worker. */ |
| 6496 class WorkerMessage { | 6817 class WorkerMessage { |
| 6497 /** | 6818 /** |
| 6498 * Labels are used to group WorkerMessages. For example, a worker_message | 6819 * Labels are used to group WorkerMessages. |
| 6499 * about a particular container might have the labels: { "JOB_ID": | 6820 * For example, a worker_message about a particular container |
| 6500 * "2015-04-22", "WORKER_ID": "wordcount-vm-2015…" "CONTAINER_TYPE": "worker", | 6821 * might have the labels: |
| 6501 * "CONTAINER_ID": "ac1234def"} Label tags typically correspond to Label enum | 6822 * { "JOB_ID": "2015-04-22", |
| 6502 * values. However, for ease of development other strings can be used as tags. | 6823 * "WORKER_ID": "wordcount-vm-2015…" |
| 6503 * LABEL_UNSPECIFIED should not be used here. | 6824 * "CONTAINER_TYPE": "worker", |
| 6825 * "CONTAINER_ID": "ac1234def"} |
| 6826 * Label tags typically correspond to Label enum values. However, for ease |
| 6827 * of development other strings can be used as tags. LABEL_UNSPECIFIED should |
| 6828 * not be used here. |
| 6504 */ | 6829 */ |
| 6505 core.Map<core.String, core.String> labels; | 6830 core.Map<core.String, core.String> labels; |
| 6506 /** The timestamp of the worker_message. */ | 6831 /** The timestamp of the worker_message. */ |
| 6507 core.String time; | 6832 core.String time; |
| 6508 /** The health of a worker. */ | 6833 /** The health of a worker. */ |
| 6509 WorkerHealthReport workerHealthReport; | 6834 WorkerHealthReport workerHealthReport; |
| 6510 /** A worker message code. */ | 6835 /** A worker message code. */ |
| 6511 WorkerMessageCode workerMessageCode; | 6836 WorkerMessageCode workerMessageCode; |
| 6512 /** Resource metrics reported by workers. */ | 6837 /** Resource metrics reported by workers. */ |
| 6513 ResourceUtilizationReport workerMetrics; | 6838 ResourceUtilizationReport workerMetrics; |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6548 } | 6873 } |
| 6549 if (workerMetrics != null) { | 6874 if (workerMetrics != null) { |
| 6550 _json["workerMetrics"] = (workerMetrics).toJson(); | 6875 _json["workerMetrics"] = (workerMetrics).toJson(); |
| 6551 } | 6876 } |
| 6552 return _json; | 6877 return _json; |
| 6553 } | 6878 } |
| 6554 } | 6879 } |
| 6555 | 6880 |
| 6556 /** | 6881 /** |
| 6557 * A message code is used to report status and error messages to the service. | 6882 * A message code is used to report status and error messages to the service. |
| 6558 * The message codes are intended to be machine readable. The service will take | 6883 * The message codes are intended to be machine readable. The service will |
| 6559 * care of translating these into user understandable messages if necessary. | 6884 * take care of translating these into user understandable messages if |
| 6560 * Example use cases: 1. Worker processes reporting successful startup. 2. | 6885 * necessary. |
| 6561 * Worker processes reporting specific errors (e.g. package staging failure). | 6886 * |
| 6887 * Example use cases: |
| 6888 * 1. Worker processes reporting successful startup. |
| 6889 * 2. Worker processes reporting specific errors (e.g. package staging |
| 6890 * failure). |
| 6562 */ | 6891 */ |
| 6563 class WorkerMessageCode { | 6892 class WorkerMessageCode { |
| 6564 /** | 6893 /** |
| 6565 * The code is a string intended for consumption by a machine that identifies | 6894 * The code is a string intended for consumption by a machine that identifies |
| 6566 * the type of message being sent. Examples: 1. "HARNESS_STARTED" might be | 6895 * the type of message being sent. |
| 6567 * used to indicate the worker harness has started. 2. "GCS_DOWNLOAD_ERROR" | 6896 * Examples: |
| 6568 * might be used to indicate an error downloading a GCS file as part of the | 6897 * 1. "HARNESS_STARTED" might be used to indicate the worker harness has |
| 6569 * boot process of one of the worker containers. This is a string and not an | 6898 * started. |
| 6570 * enum to make it easy to add new codes without waiting for an API change. | 6899 * 2. "GCS_DOWNLOAD_ERROR" might be used to indicate an error downloading |
| 6900 * a GCS file as part of the boot process of one of the worker containers. |
| 6901 * |
| 6902 * This is a string and not an enum to make it easy to add new codes without |
| 6903 * waiting for an API change. |
| 6571 */ | 6904 */ |
| 6572 core.String code; | 6905 core.String code; |
| 6573 /** | 6906 /** |
| 6574 * Parameters contains specific information about the code. This is a struct | 6907 * Parameters contains specific information about the code. |
| 6575 * to allow parameters of different types. Examples: 1. For a | 6908 * |
| 6576 * "HARNESS_STARTED" message parameters might provide the name of the worker | 6909 * This is a struct to allow parameters of different types. |
| 6577 * and additional data like timing information. 2. For a "GCS_DOWNLOAD_ERROR" | 6910 * |
| 6578 * parameters might contain fields listing the GCS objects being downloaded | 6911 * Examples: |
| 6579 * and fields containing errors. In general complex data structures should be | 6912 * 1. For a "HARNESS_STARTED" message parameters might provide the name |
| 6580 * avoided. If a worker needs to send a specific and complicated data | 6913 * of the worker and additional data like timing information. |
| 6581 * structure then please consider defining a new proto and adding it to the | 6914 * 2. For a "GCS_DOWNLOAD_ERROR" parameters might contain fields listing |
| 6582 * data oneof in WorkerMessageResponse. Conventions: Parameters should only be | 6915 * the GCS objects being downloaded and fields containing errors. |
| 6583 * used for information that isn't typically passed as a label. hostname and | 6916 * |
| 6584 * other worker identifiers should almost always be passed as labels since | 6917 * In general complex data structures should be avoided. If a worker |
| 6585 * they will be included on most messages. | 6918 * needs to send a specific and complicated data structure then please |
| 6919 * consider defining a new proto and adding it to the data oneof in |
| 6920 * WorkerMessageResponse. |
| 6921 * |
| 6922 * Conventions: |
| 6923 * Parameters should only be used for information that isn't typically passed |
| 6924 * as a label. |
| 6925 * hostname and other worker identifiers should almost always be passed |
| 6926 * as labels since they will be included on most messages. |
| 6586 * | 6927 * |
| 6587 * The values for Object must be JSON objects. It can consist of `num`, | 6928 * The values for Object must be JSON objects. It can consist of `num`, |
| 6588 * `String`, `bool` and `null` as well as `Map` and `List` values. | 6929 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 6589 */ | 6930 */ |
| 6590 core.Map<core.String, core.Object> parameters; | 6931 core.Map<core.String, core.Object> parameters; |
| 6591 | 6932 |
| 6592 WorkerMessageCode(); | 6933 WorkerMessageCode(); |
| 6593 | 6934 |
| 6594 WorkerMessageCode.fromJson(core.Map _json) { | 6935 WorkerMessageCode.fromJson(core.Map _json) { |
| 6595 if (_json.containsKey("code")) { | 6936 if (_json.containsKey("code")) { |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6639 _json["workerHealthReportResponse"] = (workerHealthReportResponse).toJson(
); | 6980 _json["workerHealthReportResponse"] = (workerHealthReportResponse).toJson(
); |
| 6640 } | 6981 } |
| 6641 if (workerMetricsResponse != null) { | 6982 if (workerMetricsResponse != null) { |
| 6642 _json["workerMetricsResponse"] = (workerMetricsResponse).toJson(); | 6983 _json["workerMetricsResponse"] = (workerMetricsResponse).toJson(); |
| 6643 } | 6984 } |
| 6644 return _json; | 6985 return _json; |
| 6645 } | 6986 } |
| 6646 } | 6987 } |
| 6647 | 6988 |
| 6648 /** | 6989 /** |
| 6649 * Describes one particular pool of Cloud Dataflow workers to be instantiated by | 6990 * Describes one particular pool of Cloud Dataflow workers to be |
| 6650 * the Cloud Dataflow service in order to perform the computations required by a | 6991 * instantiated by the Cloud Dataflow service in order to perform the |
| 6651 * job. Note that a workflow job may use multiple pools, in order to match the | 6992 * computations required by a job. Note that a workflow job may use |
| 6652 * various computational requirements of the various stages of the job. | 6993 * multiple pools, in order to match the various computational |
| 6994 * requirements of the various stages of the job. |
| 6653 */ | 6995 */ |
| 6654 class WorkerPool { | 6996 class WorkerPool { |
| 6655 /** Settings for autoscaling of this WorkerPool. */ | 6997 /** Settings for autoscaling of this WorkerPool. */ |
| 6656 AutoscalingSettings autoscalingSettings; | 6998 AutoscalingSettings autoscalingSettings; |
| 6657 /** Data disks that are used by a VM in this workflow. */ | 6999 /** Data disks that are used by a VM in this workflow. */ |
| 6658 core.List<Disk> dataDisks; | 7000 core.List<Disk> dataDisks; |
| 6659 /** | 7001 /** |
| 6660 * The default package set to install. This allows the service to select a | 7002 * The default package set to install. This allows the service to |
| 6661 * default set of packages which are useful to worker harnesses written in a | 7003 * select a default set of packages which are useful to worker |
| 6662 * particular language. | 7004 * harnesses written in a particular language. |
| 6663 * Possible string values are: | 7005 * Possible string values are: |
| 6664 * - "DEFAULT_PACKAGE_SET_UNKNOWN" : A DEFAULT_PACKAGE_SET_UNKNOWN. | 7006 * - "DEFAULT_PACKAGE_SET_UNKNOWN" : The default set of packages to stage is |
| 6665 * - "DEFAULT_PACKAGE_SET_NONE" : A DEFAULT_PACKAGE_SET_NONE. | 7007 * unknown, or unspecified. |
| 6666 * - "DEFAULT_PACKAGE_SET_JAVA" : A DEFAULT_PACKAGE_SET_JAVA. | 7008 * - "DEFAULT_PACKAGE_SET_NONE" : Indicates that no packages should be staged |
| 6667 * - "DEFAULT_PACKAGE_SET_PYTHON" : A DEFAULT_PACKAGE_SET_PYTHON. | 7009 * at the worker unless |
| 7010 * explicitly specified by the job. |
| 7011 * - "DEFAULT_PACKAGE_SET_JAVA" : Stage packages typically useful to workers |
| 7012 * written in Java. |
| 7013 * - "DEFAULT_PACKAGE_SET_PYTHON" : Stage pacakges typically useful to workers |
| 7014 * written in Python. |
| 6668 */ | 7015 */ |
| 6669 core.String defaultPackageSet; | 7016 core.String defaultPackageSet; |
| 6670 /** | 7017 /** |
| 6671 * Size of root disk for VMs, in GB. If zero or unspecified, the service will | 7018 * Size of root disk for VMs, in GB. If zero or unspecified, the service will |
| 6672 * attempt to choose a reasonable default. | 7019 * attempt to choose a reasonable default. |
| 6673 */ | 7020 */ |
| 6674 core.int diskSizeGb; | 7021 core.int diskSizeGb; |
| 6675 /** Fully qualified source image for disks. */ | 7022 /** Fully qualified source image for disks. */ |
| 6676 core.String diskSourceImage; | 7023 core.String diskSourceImage; |
| 6677 /** | 7024 /** |
| 6678 * Type of root disk for VMs. If empty or unspecified, the service will | 7025 * Type of root disk for VMs. If empty or unspecified, the service will |
| 6679 * attempt to choose a reasonable default. | 7026 * attempt to choose a reasonable default. |
| 6680 */ | 7027 */ |
| 6681 core.String diskType; | 7028 core.String diskType; |
| 6682 /** | 7029 /** |
| 6683 * Configuration for VM IPs. | 7030 * Configuration for VM IPs. |
| 6684 * Possible string values are: | 7031 * Possible string values are: |
| 6685 * - "WORKER_IP_UNSPECIFIED" : A WORKER_IP_UNSPECIFIED. | 7032 * - "WORKER_IP_UNSPECIFIED" : The configuration is unknown, or unspecified. |
| 6686 * - "WORKER_IP_PUBLIC" : A WORKER_IP_PUBLIC. | 7033 * - "WORKER_IP_PUBLIC" : Workers should have public IP addresses. |
| 6687 * - "WORKER_IP_PRIVATE" : A WORKER_IP_PRIVATE. | 7034 * - "WORKER_IP_PRIVATE" : Workers should have private IP addresses. |
| 6688 */ | 7035 */ |
| 6689 core.String ipConfiguration; | 7036 core.String ipConfiguration; |
| 6690 /** | 7037 /** |
| 6691 * The kind of the worker pool; currently only `harness` and `shuffle` are | 7038 * The kind of the worker pool; currently only `harness` and `shuffle` |
| 6692 * supported. | 7039 * are supported. |
| 6693 */ | 7040 */ |
| 6694 core.String kind; | 7041 core.String kind; |
| 6695 /** | 7042 /** |
| 6696 * Machine type (e.g. "n1-standard-1"). If empty or unspecified, the service | 7043 * Machine type (e.g. "n1-standard-1"). If empty or unspecified, the |
| 6697 * will attempt to choose a reasonable default. | 7044 * service will attempt to choose a reasonable default. |
| 6698 */ | 7045 */ |
| 6699 core.String machineType; | 7046 core.String machineType; |
| 6700 /** Metadata to set on the Google Compute Engine VMs. */ | 7047 /** Metadata to set on the Google Compute Engine VMs. */ |
| 6701 core.Map<core.String, core.String> metadata; | 7048 core.Map<core.String, core.String> metadata; |
| 6702 /** | 7049 /** |
| 6703 * Network to which VMs will be assigned. If empty or unspecified, the service | 7050 * Network to which VMs will be assigned. If empty or unspecified, |
| 6704 * will use the network "default". | 7051 * the service will use the network "default". |
| 6705 */ | 7052 */ |
| 6706 core.String network; | 7053 core.String network; |
| 6707 /** | 7054 /** |
| 6708 * The number of threads per worker harness. If empty or unspecified, the | 7055 * The number of threads per worker harness. If empty or unspecified, the |
| 6709 * service will choose a number of threads (according to the number of cores | 7056 * service will choose a number of threads (according to the number of cores |
| 6710 * on the selected machine type for batch, or 1 by convention for streaming). | 7057 * on the selected machine type for batch, or 1 by convention for streaming). |
| 6711 */ | 7058 */ |
| 6712 core.int numThreadsPerWorker; | 7059 core.int numThreadsPerWorker; |
| 6713 /** | 7060 /** |
| 6714 * Number of Google Compute Engine workers in this pool needed to execute the | 7061 * Number of Google Compute Engine workers in this pool needed to |
| 6715 * job. If zero or unspecified, the service will attempt to choose a | 7062 * execute the job. If zero or unspecified, the service will |
| 6716 * reasonable default. | 7063 * attempt to choose a reasonable default. |
| 6717 */ | 7064 */ |
| 6718 core.int numWorkers; | 7065 core.int numWorkers; |
| 6719 /** | 7066 /** |
| 6720 * The action to take on host maintenance, as defined by the Google Compute | 7067 * The action to take on host maintenance, as defined by the Google |
| 6721 * Engine API. | 7068 * Compute Engine API. |
| 6722 */ | 7069 */ |
| 6723 core.String onHostMaintenance; | 7070 core.String onHostMaintenance; |
| 6724 /** Packages to be installed on workers. */ | 7071 /** Packages to be installed on workers. */ |
| 6725 core.List<Package> packages; | 7072 core.List<Package> packages; |
| 6726 /** | 7073 /** |
| 6727 * Extra arguments for this worker pool. | 7074 * Extra arguments for this worker pool. |
| 6728 * | 7075 * |
| 6729 * The values for Object must be JSON objects. It can consist of `num`, | 7076 * The values for Object must be JSON objects. It can consist of `num`, |
| 6730 * `String`, `bool` and `null` as well as `Map` and `List` values. | 7077 * `String`, `bool` and `null` as well as `Map` and `List` values. |
| 6731 */ | 7078 */ |
| 6732 core.Map<core.String, core.Object> poolArgs; | 7079 core.Map<core.String, core.Object> poolArgs; |
| 6733 /** | 7080 /** |
| 6734 * Subnetwork to which VMs will be assigned, if desired. Expected to be of the | 7081 * Subnetwork to which VMs will be assigned, if desired. Expected to be of |
| 6735 * form "regions/REGION/subnetworks/SUBNETWORK". | 7082 * the form "regions/REGION/subnetworks/SUBNETWORK". |
| 6736 */ | 7083 */ |
| 6737 core.String subnetwork; | 7084 core.String subnetwork; |
| 6738 /** | 7085 /** |
| 6739 * Settings passed through to Google Compute Engine workers when using the | 7086 * Settings passed through to Google Compute Engine workers when |
| 6740 * standard Dataflow task runner. Users should ignore this field. | 7087 * using the standard Dataflow task runner. Users should ignore |
| 7088 * this field. |
| 6741 */ | 7089 */ |
| 6742 TaskRunnerSettings taskrunnerSettings; | 7090 TaskRunnerSettings taskrunnerSettings; |
| 6743 /** | 7091 /** |
| 6744 * Sets the policy for determining when to turndown worker pool. Allowed | 7092 * Sets the policy for determining when to turndown worker pool. |
| 6745 * values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and `TEARDOWN_NEVER`. | 7093 * Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and |
| 7094 * `TEARDOWN_NEVER`. |
| 6746 * `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether | 7095 * `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether |
| 6747 * the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down if the | 7096 * the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down |
| 6748 * job succeeds. `TEARDOWN_NEVER` means the workers are never torn down. If | 7097 * if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn |
| 6749 * the workers are not torn down by the service, they will continue to run and | 7098 * down. |
| 6750 * use Google Compute Engine VM resources in the user's project until they are | 7099 * |
| 6751 * explicitly terminated by the user. Because of this, Google recommends using | 7100 * If the workers are not torn down by the service, they will |
| 6752 * the `TEARDOWN_ALWAYS` policy except for small, manually supervised test | 7101 * continue to run and use Google Compute Engine VM resources in the |
| 6753 * jobs. If unknown or unspecified, the service will attempt to choose a | 7102 * user's project until they are explicitly terminated by the user. |
| 6754 * reasonable default. | 7103 * Because of this, Google recommends using the `TEARDOWN_ALWAYS` |
| 7104 * policy except for small, manually supervised test jobs. |
| 7105 * |
| 7106 * If unknown or unspecified, the service will attempt to choose a reasonable |
| 7107 * default. |
| 6755 * Possible string values are: | 7108 * Possible string values are: |
| 6756 * - "TEARDOWN_POLICY_UNKNOWN" : A TEARDOWN_POLICY_UNKNOWN. | 7109 * - "TEARDOWN_POLICY_UNKNOWN" : The teardown policy isn't specified, or is |
| 6757 * - "TEARDOWN_ALWAYS" : A TEARDOWN_ALWAYS. | 7110 * unknown. |
| 6758 * - "TEARDOWN_ON_SUCCESS" : A TEARDOWN_ON_SUCCESS. | 7111 * - "TEARDOWN_ALWAYS" : Always teardown the resource. |
| 6759 * - "TEARDOWN_NEVER" : A TEARDOWN_NEVER. | 7112 * - "TEARDOWN_ON_SUCCESS" : Teardown the resource on success. This is useful |
| 7113 * for debugging |
| 7114 * failures. |
| 7115 * - "TEARDOWN_NEVER" : Never teardown the resource. This is useful for |
| 7116 * debugging and |
| 7117 * development. |
| 6760 */ | 7118 */ |
| 6761 core.String teardownPolicy; | 7119 core.String teardownPolicy; |
| 6762 /** | 7120 /** |
| 6763 * Required. Docker container image that executes the Cloud Dataflow worker | 7121 * Required. Docker container image that executes the Cloud Dataflow worker |
| 6764 * harness, residing in Google Container Registry. | 7122 * harness, residing in Google Container Registry. |
| 6765 */ | 7123 */ |
| 6766 core.String workerHarnessContainerImage; | 7124 core.String workerHarnessContainerImage; |
| 6767 /** | 7125 /** |
| 6768 * Zone to run the worker pools in. If empty or unspecified, the service will | 7126 * Zone to run the worker pools in. If empty or unspecified, the service |
| 6769 * attempt to choose a reasonable default. | 7127 * will attempt to choose a reasonable default. |
| 6770 */ | 7128 */ |
| 6771 core.String zone; | 7129 core.String zone; |
| 6772 | 7130 |
| 6773 WorkerPool(); | 7131 WorkerPool(); |
| 6774 | 7132 |
| 6775 WorkerPool.fromJson(core.Map _json) { | 7133 WorkerPool.fromJson(core.Map _json) { |
| 6776 if (_json.containsKey("autoscalingSettings")) { | 7134 if (_json.containsKey("autoscalingSettings")) { |
| 6777 autoscalingSettings = new AutoscalingSettings.fromJson(_json["autoscalingS
ettings"]); | 7135 autoscalingSettings = new AutoscalingSettings.fromJson(_json["autoscalingS
ettings"]); |
| 6778 } | 7136 } |
| 6779 if (_json.containsKey("dataDisks")) { | 7137 if (_json.containsKey("dataDisks")) { |
| (...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6903 if (zone != null) { | 7261 if (zone != null) { |
| 6904 _json["zone"] = zone; | 7262 _json["zone"] = zone; |
| 6905 } | 7263 } |
| 6906 return _json; | 7264 return _json; |
| 6907 } | 7265 } |
| 6908 } | 7266 } |
| 6909 | 7267 |
| 6910 /** Provides data to pass through to the worker harness. */ | 7268 /** Provides data to pass through to the worker harness. */ |
| 6911 class WorkerSettings { | 7269 class WorkerSettings { |
| 6912 /** | 7270 /** |
| 6913 * The base URL for accessing Google Cloud APIs. When workers access Google | 7271 * The base URL for accessing Google Cloud APIs. |
| 6914 * Cloud APIs, they logically do so via relative URLs. If this field is | 7272 * |
| 6915 * specified, it supplies the base URL to use for resolving these relative | 7273 * When workers access Google Cloud APIs, they logically do so via |
| 6916 * URLs. The normative algorithm used is defined by RFC 1808, "Relative | 7274 * relative URLs. If this field is specified, it supplies the base |
| 6917 * Uniform Resource Locators". If not specified, the default value is | 7275 * URL to use for resolving these relative URLs. The normative |
| 6918 * "http://www.googleapis.com/" | 7276 * algorithm used is defined by RFC 1808, "Relative Uniform Resource |
| 7277 * Locators". |
| 7278 * |
| 7279 * If not specified, the default value is "http://www.googleapis.com/" |
| 6919 */ | 7280 */ |
| 6920 core.String baseUrl; | 7281 core.String baseUrl; |
| 6921 /** Whether to send work progress updates to the service. */ | 7282 /** Whether to send work progress updates to the service. */ |
| 6922 core.bool reportingEnabled; | 7283 core.bool reportingEnabled; |
| 6923 /** | 7284 /** |
| 6924 * The Cloud Dataflow service path relative to the root URL, for example, | 7285 * The Cloud Dataflow service path relative to the root URL, for example, |
| 6925 * "dataflow/v1b3/projects". | 7286 * "dataflow/v1b3/projects". |
| 6926 */ | 7287 */ |
| 6927 core.String servicePath; | 7288 core.String servicePath; |
| 6928 /** | 7289 /** |
| 6929 * The Shuffle service path relative to the root URL, for example, | 7290 * The Shuffle service path relative to the root URL, for example, |
| 6930 * "shuffle/v1beta1". | 7291 * "shuffle/v1beta1". |
| 6931 */ | 7292 */ |
| 6932 core.String shuffleServicePath; | 7293 core.String shuffleServicePath; |
| 6933 /** | 7294 /** |
| 6934 * The prefix of the resources the system should use for temporary storage. | 7295 * The prefix of the resources the system should use for temporary |
| 6935 * The supported resource type is: Google Cloud Storage: | 7296 * storage. |
| 6936 * storage.googleapis.com/{bucket}/{object} | 7297 * |
| 6937 * bucket.storage.googleapis.com/{object} | 7298 * The supported resource type is: |
| 7299 * |
| 7300 * Google Cloud Storage: |
| 7301 * |
| 7302 * storage.googleapis.com/{bucket}/{object} |
| 7303 * bucket.storage.googleapis.com/{object} |
| 6938 */ | 7304 */ |
| 6939 core.String tempStoragePrefix; | 7305 core.String tempStoragePrefix; |
| 6940 /** The ID of the worker running this pipeline. */ | 7306 /** The ID of the worker running this pipeline. */ |
| 6941 core.String workerId; | 7307 core.String workerId; |
| 6942 | 7308 |
| 6943 WorkerSettings(); | 7309 WorkerSettings(); |
| 6944 | 7310 |
| 6945 WorkerSettings.fromJson(core.Map _json) { | 7311 WorkerSettings.fromJson(core.Map _json) { |
| 6946 if (_json.containsKey("baseUrl")) { | 7312 if (_json.containsKey("baseUrl")) { |
| 6947 baseUrl = _json["baseUrl"]; | 7313 baseUrl = _json["baseUrl"]; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6981 _json["tempStoragePrefix"] = tempStoragePrefix; | 7347 _json["tempStoragePrefix"] = tempStoragePrefix; |
| 6982 } | 7348 } |
| 6983 if (workerId != null) { | 7349 if (workerId != null) { |
| 6984 _json["workerId"] = workerId; | 7350 _json["workerId"] = workerId; |
| 6985 } | 7351 } |
| 6986 return _json; | 7352 return _json; |
| 6987 } | 7353 } |
| 6988 } | 7354 } |
| 6989 | 7355 |
| 6990 /** | 7356 /** |
| 6991 * An instruction that writes records. Takes one input, produces no outputs. | 7357 * An instruction that writes records. |
| 7358 * Takes one input, produces no outputs. |
| 6992 */ | 7359 */ |
| 6993 class WriteInstruction { | 7360 class WriteInstruction { |
| 6994 /** The input. */ | 7361 /** The input. */ |
| 6995 InstructionInput input; | 7362 InstructionInput input; |
| 6996 /** The sink to write to. */ | 7363 /** The sink to write to. */ |
| 6997 Sink sink; | 7364 Sink sink; |
| 6998 | 7365 |
| 6999 WriteInstruction(); | 7366 WriteInstruction(); |
| 7000 | 7367 |
| 7001 WriteInstruction.fromJson(core.Map _json) { | 7368 WriteInstruction.fromJson(core.Map _json) { |
| 7002 if (_json.containsKey("input")) { | 7369 if (_json.containsKey("input")) { |
| 7003 input = new InstructionInput.fromJson(_json["input"]); | 7370 input = new InstructionInput.fromJson(_json["input"]); |
| 7004 } | 7371 } |
| 7005 if (_json.containsKey("sink")) { | 7372 if (_json.containsKey("sink")) { |
| 7006 sink = new Sink.fromJson(_json["sink"]); | 7373 sink = new Sink.fromJson(_json["sink"]); |
| 7007 } | 7374 } |
| 7008 } | 7375 } |
| 7009 | 7376 |
| 7010 core.Map toJson() { | 7377 core.Map toJson() { |
| 7011 var _json = new core.Map(); | 7378 var _json = new core.Map(); |
| 7012 if (input != null) { | 7379 if (input != null) { |
| 7013 _json["input"] = (input).toJson(); | 7380 _json["input"] = (input).toJson(); |
| 7014 } | 7381 } |
| 7015 if (sink != null) { | 7382 if (sink != null) { |
| 7016 _json["sink"] = (sink).toJson(); | 7383 _json["sink"] = (sink).toJson(); |
| 7017 } | 7384 } |
| 7018 return _json; | 7385 return _json; |
| 7019 } | 7386 } |
| 7020 } | 7387 } |
| OLD | NEW |