| Index: generated/googleapis_beta/lib/dataflow/v1b3.dart
|
| diff --git a/generated/googleapis_beta/lib/dataflow/v1b3.dart b/generated/googleapis_beta/lib/dataflow/v1b3.dart
|
| index 44c7280a4659b806997bbd1ac1be8c95061ba4f8..984143adab4704c490b22c6c7f03d7d922802ad6 100644
|
| --- a/generated/googleapis_beta/lib/dataflow/v1b3.dart
|
| +++ b/generated/googleapis_beta/lib/dataflow/v1b3.dart
|
| @@ -9,63 +9,67 @@ import 'dart:convert' as convert;
|
| import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons;
|
| import 'package:http/http.dart' as http;
|
|
|
| -export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show
|
| - ApiRequestError, DetailedApiRequestError;
|
| +export 'package:_discoveryapis_commons/_discoveryapis_commons.dart'
|
| + show ApiRequestError, DetailedApiRequestError;
|
|
|
| const core.String USER_AGENT = 'dart-api-client dataflow/v1b3';
|
|
|
| -/** Manages Google Cloud Dataflow projects on Google Cloud Platform. */
|
| +/// Manages Google Cloud Dataflow projects on Google Cloud Platform.
|
| class DataflowApi {
|
| - /** View and manage your data across Google Cloud Platform services */
|
| - static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform";
|
| + /// View and manage your data across Google Cloud Platform services
|
| + static const CloudPlatformScope =
|
| + "https://www.googleapis.com/auth/cloud-platform";
|
|
|
| - /** View and manage your Google Compute Engine resources */
|
| + /// View and manage your Google Compute Engine resources
|
| static const ComputeScope = "https://www.googleapis.com/auth/compute";
|
|
|
| - /** View your Google Compute Engine resources */
|
| - static const ComputeReadonlyScope = "https://www.googleapis.com/auth/compute.readonly";
|
| -
|
| - /** View your email address */
|
| - static const UserinfoEmailScope = "https://www.googleapis.com/auth/userinfo.email";
|
| + /// View your Google Compute Engine resources
|
| + static const ComputeReadonlyScope =
|
| + "https://www.googleapis.com/auth/compute.readonly";
|
|
|
| + /// View your email address
|
| + static const UserinfoEmailScope =
|
| + "https://www.googleapis.com/auth/userinfo.email";
|
|
|
| final commons.ApiRequester _requester;
|
|
|
| ProjectsResourceApi get projects => new ProjectsResourceApi(_requester);
|
|
|
| - DataflowApi(http.Client client, {core.String rootUrl: "https://dataflow.googleapis.com/", core.String servicePath: ""}) :
|
| - _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_AGENT);
|
| + DataflowApi(http.Client client,
|
| + {core.String rootUrl: "https://dataflow.googleapis.com/",
|
| + core.String servicePath: ""})
|
| + : _requester =
|
| + new commons.ApiRequester(client, rootUrl, servicePath, USER_AGENT);
|
| }
|
|
|
| -
|
| class ProjectsResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| ProjectsJobsResourceApi get jobs => new ProjectsJobsResourceApi(_requester);
|
| - ProjectsLocationsResourceApi get locations => new ProjectsLocationsResourceApi(_requester);
|
| - ProjectsTemplatesResourceApi get templates => new ProjectsTemplatesResourceApi(_requester);
|
| -
|
| - ProjectsResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Send a worker_message to the service.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The project to send the WorkerMessages to.
|
| - *
|
| - * Completes with a [SendWorkerMessagesResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<SendWorkerMessagesResponse> workerMessages(SendWorkerMessagesRequest request, core.String projectId) {
|
| + ProjectsLocationsResourceApi get locations =>
|
| + new ProjectsLocationsResourceApi(_requester);
|
| + ProjectsTemplatesResourceApi get templates =>
|
| + new ProjectsTemplatesResourceApi(_requester);
|
| +
|
| + ProjectsResourceApi(commons.ApiRequester client) : _requester = client;
|
| +
|
| + /// Send a worker_message to the service.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The project to send the WorkerMessages to.
|
| + ///
|
| + /// Completes with a [SendWorkerMessagesResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<SendWorkerMessagesResponse> workerMessages(
|
| + SendWorkerMessagesRequest request, core.String projectId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -80,72 +84,77 @@ class ProjectsResourceApi {
|
| throw new core.ArgumentError("Parameter projectId is required.");
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/WorkerMessages';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/WorkerMessages';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| - return _response.then((data) => new SendWorkerMessagesResponse.fromJson(data));
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| + return _response
|
| + .then((data) => new SendWorkerMessagesResponse.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class ProjectsJobsResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - ProjectsJobsDebugResourceApi get debug => new ProjectsJobsDebugResourceApi(_requester);
|
| - ProjectsJobsMessagesResourceApi get messages => new ProjectsJobsMessagesResourceApi(_requester);
|
| - ProjectsJobsWorkItemsResourceApi get workItems => new ProjectsJobsWorkItemsResourceApi(_requester);
|
| -
|
| - ProjectsJobsResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * List the jobs of a project across all regions.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The project which owns the jobs.
|
| - *
|
| - * [filter] - The kind of filter to use.
|
| - * Possible string values are:
|
| - * - "UNKNOWN" : A UNKNOWN.
|
| - * - "ALL" : A ALL.
|
| - * - "TERMINATED" : A TERMINATED.
|
| - * - "ACTIVE" : A ACTIVE.
|
| - *
|
| - * [location] - The location that contains this job.
|
| - *
|
| - * [pageToken] - Set this to the 'next_page_token' field of a previous
|
| - * response
|
| - * to request additional results in a long list.
|
| - *
|
| - * [pageSize] - If there are many jobs, limit response to at most this many.
|
| - * The actual number of jobs returned will be the lesser of max_responses
|
| - * and an unspecified server-defined limit.
|
| - *
|
| - * [view] - Level of information requested in response. Default is
|
| - * `JOB_VIEW_SUMMARY`.
|
| - * Possible string values are:
|
| - * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| - * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| - * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| - * - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| - *
|
| - * Completes with a [ListJobsResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<ListJobsResponse> aggregated(core.String projectId, {core.String filter, core.String location, core.String pageToken, core.int pageSize, core.String view}) {
|
| + ProjectsJobsDebugResourceApi get debug =>
|
| + new ProjectsJobsDebugResourceApi(_requester);
|
| + ProjectsJobsMessagesResourceApi get messages =>
|
| + new ProjectsJobsMessagesResourceApi(_requester);
|
| + ProjectsJobsWorkItemsResourceApi get workItems =>
|
| + new ProjectsJobsWorkItemsResourceApi(_requester);
|
| +
|
| + ProjectsJobsResourceApi(commons.ApiRequester client) : _requester = client;
|
| +
|
| + /// List the jobs of a project across all regions.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The project which owns the jobs.
|
| + ///
|
| + /// [view] - Level of information requested in response. Default is
|
| + /// `JOB_VIEW_SUMMARY`.
|
| + /// Possible string values are:
|
| + /// - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| + /// - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| + /// - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| + /// - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| + ///
|
| + /// [filter] - The kind of filter to use.
|
| + /// Possible string values are:
|
| + /// - "UNKNOWN" : A UNKNOWN.
|
| + /// - "ALL" : A ALL.
|
| + /// - "TERMINATED" : A TERMINATED.
|
| + /// - "ACTIVE" : A ACTIVE.
|
| + ///
|
| + /// [location] - The location that contains this job.
|
| + ///
|
| + /// [pageToken] - Set this to the 'next_page_token' field of a previous
|
| + /// response
|
| + /// to request additional results in a long list.
|
| + ///
|
| + /// [pageSize] - If there are many jobs, limit response to at most this many.
|
| + /// The actual number of jobs returned will be the lesser of max_responses
|
| + /// and an unspecified server-defined limit.
|
| + ///
|
| + /// Completes with a [ListJobsResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<ListJobsResponse> aggregated(core.String projectId,
|
| + {core.String view,
|
| + core.String filter,
|
| + core.String location,
|
| + core.String pageToken,
|
| + core.int pageSize}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -156,6 +165,9 @@ class ProjectsJobsResourceApi {
|
| if (projectId == null) {
|
| throw new core.ArgumentError("Parameter projectId is required.");
|
| }
|
| + if (view != null) {
|
| + _queryParams["view"] = [view];
|
| + }
|
| if (filter != null) {
|
| _queryParams["filter"] = [filter];
|
| }
|
| @@ -168,51 +180,49 @@ class ProjectsJobsResourceApi {
|
| if (pageSize != null) {
|
| _queryParams["pageSize"] = ["${pageSize}"];
|
| }
|
| - if (view != null) {
|
| - _queryParams["view"] = [view];
|
| - }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs:aggregated';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs:aggregated';
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new ListJobsResponse.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Creates a Cloud Dataflow job.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The ID of the Cloud Platform project that the job belongs to.
|
| - *
|
| - * [location] - The location that contains this job.
|
| - *
|
| - * [replaceJobId] - Deprecated. This field is now in the Job message.
|
| - *
|
| - * [view] - The level of information requested in response.
|
| - * Possible string values are:
|
| - * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| - * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| - * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| - * - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| - *
|
| - * Completes with a [Job].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Job> create(Job request, core.String projectId, {core.String location, core.String replaceJobId, core.String view}) {
|
| + /// Creates a Cloud Dataflow job.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The ID of the Cloud Platform project that the job belongs
|
| + /// to.
|
| + ///
|
| + /// [location] - The location that contains this job.
|
| + ///
|
| + /// [replaceJobId] - Deprecated. This field is now in the Job message.
|
| + ///
|
| + /// [view] - The level of information requested in response.
|
| + /// Possible string values are:
|
| + /// - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| + /// - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| + /// - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| + /// - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| + ///
|
| + /// Completes with a [Job].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Job> create(Job request, core.String projectId,
|
| + {core.String location, core.String replaceJobId, core.String view}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -236,45 +246,46 @@ class ProjectsJobsResourceApi {
|
| _queryParams["view"] = [view];
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Job.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Gets the state of the specified Cloud Dataflow job.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The ID of the Cloud Platform project that the job belongs to.
|
| - *
|
| - * [jobId] - The job ID.
|
| - *
|
| - * [location] - The location that contains this job.
|
| - *
|
| - * [view] - The level of information requested in response.
|
| - * Possible string values are:
|
| - * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| - * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| - * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| - * - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| - *
|
| - * Completes with a [Job].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Job> get(core.String projectId, core.String jobId, {core.String location, core.String view}) {
|
| + /// Gets the state of the specified Cloud Dataflow job.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The ID of the Cloud Platform project that the job belongs
|
| + /// to.
|
| + ///
|
| + /// [jobId] - The job ID.
|
| + ///
|
| + /// [location] - The location that contains this job.
|
| + ///
|
| + /// [view] - The level of information requested in response.
|
| + /// Possible string values are:
|
| + /// - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| + /// - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| + /// - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| + /// - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| + ///
|
| + /// Completes with a [Job].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Job> get(core.String projectId, core.String jobId,
|
| + {core.String location, core.String view}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -295,41 +306,42 @@ class ProjectsJobsResourceApi {
|
| _queryParams["view"] = [view];
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId');
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId');
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Job.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Request the job status.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - A project id.
|
| - *
|
| - * [jobId] - The job to get messages for.
|
| - *
|
| - * [location] - The location which contains the job specified by job_id.
|
| - *
|
| - * [startTime] - Return only metric data that has changed since this time.
|
| - * Default is to return all information about all metrics for the job.
|
| - *
|
| - * Completes with a [JobMetrics].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<JobMetrics> getMetrics(core.String projectId, core.String jobId, {core.String location, core.String startTime}) {
|
| + /// Request the job status.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - A project id.
|
| + ///
|
| + /// [jobId] - The job to get messages for.
|
| + ///
|
| + /// [location] - The location which contains the job specified by job_id.
|
| + ///
|
| + /// [startTime] - Return only metric data that has changed since this time.
|
| + /// Default is to return all information about all metrics for the job.
|
| + ///
|
| + /// Completes with a [JobMetrics].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<JobMetrics> getMetrics(core.String projectId, core.String jobId,
|
| + {core.String location, core.String startTime}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -350,59 +362,65 @@ class ProjectsJobsResourceApi {
|
| _queryParams["startTime"] = [startTime];
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + '/metrics';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId') +
|
| + '/metrics';
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new JobMetrics.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * List the jobs of a project in a given region.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The project which owns the jobs.
|
| - *
|
| - * [view] - Level of information requested in response. Default is
|
| - * `JOB_VIEW_SUMMARY`.
|
| - * Possible string values are:
|
| - * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| - * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| - * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| - * - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| - *
|
| - * [filter] - The kind of filter to use.
|
| - * Possible string values are:
|
| - * - "UNKNOWN" : A UNKNOWN.
|
| - * - "ALL" : A ALL.
|
| - * - "TERMINATED" : A TERMINATED.
|
| - * - "ACTIVE" : A ACTIVE.
|
| - *
|
| - * [location] - The location that contains this job.
|
| - *
|
| - * [pageToken] - Set this to the 'next_page_token' field of a previous
|
| - * response
|
| - * to request additional results in a long list.
|
| - *
|
| - * [pageSize] - If there are many jobs, limit response to at most this many.
|
| - * The actual number of jobs returned will be the lesser of max_responses
|
| - * and an unspecified server-defined limit.
|
| - *
|
| - * Completes with a [ListJobsResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<ListJobsResponse> list(core.String projectId, {core.String view, core.String filter, core.String location, core.String pageToken, core.int pageSize}) {
|
| + /// List the jobs of a project in a given region.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The project which owns the jobs.
|
| + ///
|
| + /// [filter] - The kind of filter to use.
|
| + /// Possible string values are:
|
| + /// - "UNKNOWN" : A UNKNOWN.
|
| + /// - "ALL" : A ALL.
|
| + /// - "TERMINATED" : A TERMINATED.
|
| + /// - "ACTIVE" : A ACTIVE.
|
| + ///
|
| + /// [location] - The location that contains this job.
|
| + ///
|
| + /// [pageToken] - Set this to the 'next_page_token' field of a previous
|
| + /// response
|
| + /// to request additional results in a long list.
|
| + ///
|
| + /// [pageSize] - If there are many jobs, limit response to at most this many.
|
| + /// The actual number of jobs returned will be the lesser of max_responses
|
| + /// and an unspecified server-defined limit.
|
| + ///
|
| + /// [view] - Level of information requested in response. Default is
|
| + /// `JOB_VIEW_SUMMARY`.
|
| + /// Possible string values are:
|
| + /// - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| + /// - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| + /// - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| + /// - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| + ///
|
| + /// Completes with a [ListJobsResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<ListJobsResponse> list(core.String projectId,
|
| + {core.String filter,
|
| + core.String location,
|
| + core.String pageToken,
|
| + core.int pageSize,
|
| + core.String view}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -413,9 +431,6 @@ class ProjectsJobsResourceApi {
|
| if (projectId == null) {
|
| throw new core.ArgumentError("Parameter projectId is required.");
|
| }
|
| - if (view != null) {
|
| - _queryParams["view"] = [view];
|
| - }
|
| if (filter != null) {
|
| _queryParams["filter"] = [filter];
|
| }
|
| @@ -428,41 +443,46 @@ class ProjectsJobsResourceApi {
|
| if (pageSize != null) {
|
| _queryParams["pageSize"] = ["${pageSize}"];
|
| }
|
| + if (view != null) {
|
| + _queryParams["view"] = [view];
|
| + }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs';
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new ListJobsResponse.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Updates the state of an existing Cloud Dataflow job.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The ID of the Cloud Platform project that the job belongs to.
|
| - *
|
| - * [jobId] - The job ID.
|
| - *
|
| - * [location] - The location that contains this job.
|
| - *
|
| - * Completes with a [Job].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Job> update(Job request, core.String projectId, core.String jobId, {core.String location}) {
|
| + /// Updates the state of an existing Cloud Dataflow job.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The ID of the Cloud Platform project that the job belongs
|
| + /// to.
|
| + ///
|
| + /// [jobId] - The job ID.
|
| + ///
|
| + /// [location] - The location that contains this job.
|
| + ///
|
| + /// Completes with a [Job].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Job> update(
|
| + Job request, core.String projectId, core.String jobId,
|
| + {core.String location}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -483,47 +503,46 @@ class ProjectsJobsResourceApi {
|
| _queryParams["location"] = [location];
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId');
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId');
|
|
|
| - var _response = _requester.request(_url,
|
| - "PUT",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "PUT",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Job.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class ProjectsJobsDebugResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - ProjectsJobsDebugResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Get encoded debug configuration for component. Not cacheable.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The project id.
|
| - *
|
| - * [jobId] - The job id.
|
| - *
|
| - * Completes with a [GetDebugConfigResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<GetDebugConfigResponse> getConfig(GetDebugConfigRequest request, core.String projectId, core.String jobId) {
|
| + ProjectsJobsDebugResourceApi(commons.ApiRequester client)
|
| + : _requester = client;
|
| +
|
| + /// Get encoded debug configuration for component. Not cacheable.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The project id.
|
| + ///
|
| + /// [jobId] - The job id.
|
| + ///
|
| + /// Completes with a [GetDebugConfigResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<GetDebugConfigResponse> getConfig(
|
| + GetDebugConfigRequest request, core.String projectId, core.String jobId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -541,38 +560,42 @@ class ProjectsJobsDebugResourceApi {
|
| throw new core.ArgumentError("Parameter jobId is required.");
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + '/debug/getConfig';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId') +
|
| + '/debug/getConfig';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new GetDebugConfigResponse.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Send encoded debug capture data for component.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The project id.
|
| - *
|
| - * [jobId] - The job id.
|
| - *
|
| - * Completes with a [SendDebugCaptureResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<SendDebugCaptureResponse> sendCapture(SendDebugCaptureRequest request, core.String projectId, core.String jobId) {
|
| + /// Send encoded debug capture data for component.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The project id.
|
| + ///
|
| + /// [jobId] - The job id.
|
| + ///
|
| + /// Completes with a [SendDebugCaptureResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<SendDebugCaptureResponse> sendCapture(
|
| + SendDebugCaptureRequest request,
|
| + core.String projectId,
|
| + core.String jobId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -590,73 +613,81 @@ class ProjectsJobsDebugResourceApi {
|
| throw new core.ArgumentError("Parameter jobId is required.");
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + '/debug/sendCapture';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId') +
|
| + '/debug/sendCapture';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| - return _response.then((data) => new SendDebugCaptureResponse.fromJson(data));
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| + return _response
|
| + .then((data) => new SendDebugCaptureResponse.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class ProjectsJobsMessagesResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - ProjectsJobsMessagesResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Request the job status.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - A project id.
|
| - *
|
| - * [jobId] - The job to get messages about.
|
| - *
|
| - * [location] - The location which contains the job specified by job_id.
|
| - *
|
| - * [endTime] - Return only messages with timestamps < end_time. The default is
|
| - * now
|
| - * (i.e. return up to the latest messages available).
|
| - *
|
| - * [startTime] - If specified, return only messages with timestamps >=
|
| - * start_time.
|
| - * The default is the job creation time (i.e. beginning of messages).
|
| - *
|
| - * [pageToken] - If supplied, this should be the value of next_page_token
|
| - * returned
|
| - * by an earlier call. This will cause the next page of results to
|
| - * be returned.
|
| - *
|
| - * [pageSize] - If specified, determines the maximum number of messages to
|
| - * return. If unspecified, the service may choose an appropriate
|
| - * default, or may return an arbitrarily large number of results.
|
| - *
|
| - * [minimumImportance] - Filter to only get messages with importance >= level
|
| - * Possible string values are:
|
| - * - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : A JOB_MESSAGE_IMPORTANCE_UNKNOWN.
|
| - * - "JOB_MESSAGE_DEBUG" : A JOB_MESSAGE_DEBUG.
|
| - * - "JOB_MESSAGE_DETAILED" : A JOB_MESSAGE_DETAILED.
|
| - * - "JOB_MESSAGE_BASIC" : A JOB_MESSAGE_BASIC.
|
| - * - "JOB_MESSAGE_WARNING" : A JOB_MESSAGE_WARNING.
|
| - * - "JOB_MESSAGE_ERROR" : A JOB_MESSAGE_ERROR.
|
| - *
|
| - * Completes with a [ListJobMessagesResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<ListJobMessagesResponse> list(core.String projectId, core.String jobId, {core.String location, core.String endTime, core.String startTime, core.String pageToken, core.int pageSize, core.String minimumImportance}) {
|
| + ProjectsJobsMessagesResourceApi(commons.ApiRequester client)
|
| + : _requester = client;
|
| +
|
| + /// Request the job status.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - A project id.
|
| + ///
|
| + /// [jobId] - The job to get messages about.
|
| + ///
|
| + /// [location] - The location which contains the job specified by job_id.
|
| + ///
|
| + /// [endTime] - Return only messages with timestamps < end_time. The default
|
| + /// is now
|
| + /// (i.e. return up to the latest messages available).
|
| + ///
|
| + /// [pageToken] - If supplied, this should be the value of next_page_token
|
| + /// returned
|
| + /// by an earlier call. This will cause the next page of results to
|
| + /// be returned.
|
| + ///
|
| + /// [startTime] - If specified, return only messages with timestamps >=
|
| + /// start_time.
|
| + /// The default is the job creation time (i.e. beginning of messages).
|
| + ///
|
| + /// [pageSize] - If specified, determines the maximum number of messages to
|
| + /// return. If unspecified, the service may choose an appropriate
|
| + /// default, or may return an arbitrarily large number of results.
|
| + ///
|
| + /// [minimumImportance] - Filter to only get messages with importance >=
|
| + /// level
|
| + /// Possible string values are:
|
| + /// - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : A JOB_MESSAGE_IMPORTANCE_UNKNOWN.
|
| + /// - "JOB_MESSAGE_DEBUG" : A JOB_MESSAGE_DEBUG.
|
| + /// - "JOB_MESSAGE_DETAILED" : A JOB_MESSAGE_DETAILED.
|
| + /// - "JOB_MESSAGE_BASIC" : A JOB_MESSAGE_BASIC.
|
| + /// - "JOB_MESSAGE_WARNING" : A JOB_MESSAGE_WARNING.
|
| + /// - "JOB_MESSAGE_ERROR" : A JOB_MESSAGE_ERROR.
|
| + ///
|
| + /// Completes with a [ListJobMessagesResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<ListJobMessagesResponse> list(
|
| + core.String projectId, core.String jobId,
|
| + {core.String location,
|
| + core.String endTime,
|
| + core.String pageToken,
|
| + core.String startTime,
|
| + core.int pageSize,
|
| + core.String minimumImportance}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -676,12 +707,12 @@ class ProjectsJobsMessagesResourceApi {
|
| if (endTime != null) {
|
| _queryParams["endTime"] = [endTime];
|
| }
|
| - if (startTime != null) {
|
| - _queryParams["startTime"] = [startTime];
|
| - }
|
| if (pageToken != null) {
|
| _queryParams["pageToken"] = [pageToken];
|
| }
|
| + if (startTime != null) {
|
| + _queryParams["startTime"] = [startTime];
|
| + }
|
| if (pageSize != null) {
|
| _queryParams["pageSize"] = ["${pageSize}"];
|
| }
|
| @@ -689,47 +720,47 @@ class ProjectsJobsMessagesResourceApi {
|
| _queryParams["minimumImportance"] = [minimumImportance];
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + '/messages';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId') +
|
| + '/messages';
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new ListJobMessagesResponse.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class ProjectsJobsWorkItemsResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - ProjectsJobsWorkItemsResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Leases a dataflow WorkItem to run.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Identifies the project this worker belongs to.
|
| - *
|
| - * [jobId] - Identifies the workflow job this worker belongs to.
|
| - *
|
| - * Completes with a [LeaseWorkItemResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<LeaseWorkItemResponse> lease(LeaseWorkItemRequest request, core.String projectId, core.String jobId) {
|
| + ProjectsJobsWorkItemsResourceApi(commons.ApiRequester client)
|
| + : _requester = client;
|
| +
|
| + /// Leases a dataflow WorkItem to run.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Identifies the project this worker belongs to.
|
| + ///
|
| + /// [jobId] - Identifies the workflow job this worker belongs to.
|
| + ///
|
| + /// Completes with a [LeaseWorkItemResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<LeaseWorkItemResponse> lease(
|
| + LeaseWorkItemRequest request, core.String projectId, core.String jobId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -747,38 +778,42 @@ class ProjectsJobsWorkItemsResourceApi {
|
| throw new core.ArgumentError("Parameter jobId is required.");
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + '/workItems:lease';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId') +
|
| + '/workItems:lease';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new LeaseWorkItemResponse.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Reports the status of dataflow WorkItems leased by a worker.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The project which owns the WorkItem's job.
|
| - *
|
| - * [jobId] - The job which the WorkItem is part of.
|
| - *
|
| - * Completes with a [ReportWorkItemStatusResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<ReportWorkItemStatusResponse> reportStatus(ReportWorkItemStatusRequest request, core.String projectId, core.String jobId) {
|
| + /// Reports the status of dataflow WorkItems leased by a worker.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The project which owns the WorkItem's job.
|
| + ///
|
| + /// [jobId] - The job which the WorkItem is part of.
|
| + ///
|
| + /// Completes with a [ReportWorkItemStatusResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<ReportWorkItemStatusResponse> reportStatus(
|
| + ReportWorkItemStatusRequest request,
|
| + core.String projectId,
|
| + core.String jobId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -796,50 +831,55 @@ class ProjectsJobsWorkItemsResourceApi {
|
| throw new core.ArgumentError("Parameter jobId is required.");
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + '/workItems:reportStatus';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId') +
|
| + '/workItems:reportStatus';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| - return _response.then((data) => new ReportWorkItemStatusResponse.fromJson(data));
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| + return _response
|
| + .then((data) => new ReportWorkItemStatusResponse.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class ProjectsLocationsResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - ProjectsLocationsJobsResourceApi get jobs => new ProjectsLocationsJobsResourceApi(_requester);
|
| - ProjectsLocationsTemplatesResourceApi get templates => new ProjectsLocationsTemplatesResourceApi(_requester);
|
| -
|
| - ProjectsLocationsResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Send a worker_message to the service.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The project to send the WorkerMessages to.
|
| - *
|
| - * [location] - The location which contains the job
|
| - *
|
| - * Completes with a [SendWorkerMessagesResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<SendWorkerMessagesResponse> workerMessages(SendWorkerMessagesRequest request, core.String projectId, core.String location) {
|
| + ProjectsLocationsJobsResourceApi get jobs =>
|
| + new ProjectsLocationsJobsResourceApi(_requester);
|
| + ProjectsLocationsTemplatesResourceApi get templates =>
|
| + new ProjectsLocationsTemplatesResourceApi(_requester);
|
| +
|
| + ProjectsLocationsResourceApi(commons.ApiRequester client)
|
| + : _requester = client;
|
| +
|
| + /// Send a worker_message to the service.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The project to send the WorkerMessages to.
|
| + ///
|
| + /// [location] - The location which contains the job
|
| + ///
|
| + /// Completes with a [SendWorkerMessagesResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<SendWorkerMessagesResponse> workerMessages(
|
| + SendWorkerMessagesRequest request,
|
| + core.String projectId,
|
| + core.String location) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -857,60 +897,66 @@ class ProjectsLocationsResourceApi {
|
| throw new core.ArgumentError("Parameter location is required.");
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/WorkerMessages';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/WorkerMessages';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| - return _response.then((data) => new SendWorkerMessagesResponse.fromJson(data));
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| + return _response
|
| + .then((data) => new SendWorkerMessagesResponse.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class ProjectsLocationsJobsResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - ProjectsLocationsJobsDebugResourceApi get debug => new ProjectsLocationsJobsDebugResourceApi(_requester);
|
| - ProjectsLocationsJobsMessagesResourceApi get messages => new ProjectsLocationsJobsMessagesResourceApi(_requester);
|
| - ProjectsLocationsJobsWorkItemsResourceApi get workItems => new ProjectsLocationsJobsWorkItemsResourceApi(_requester);
|
| -
|
| - ProjectsLocationsJobsResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Creates a Cloud Dataflow job.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The ID of the Cloud Platform project that the job belongs to.
|
| - *
|
| - * [location] - The location that contains this job.
|
| - *
|
| - * [replaceJobId] - Deprecated. This field is now in the Job message.
|
| - *
|
| - * [view] - The level of information requested in response.
|
| - * Possible string values are:
|
| - * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| - * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| - * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| - * - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| - *
|
| - * Completes with a [Job].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Job> create(Job request, core.String projectId, core.String location, {core.String replaceJobId, core.String view}) {
|
| + ProjectsLocationsJobsDebugResourceApi get debug =>
|
| + new ProjectsLocationsJobsDebugResourceApi(_requester);
|
| + ProjectsLocationsJobsMessagesResourceApi get messages =>
|
| + new ProjectsLocationsJobsMessagesResourceApi(_requester);
|
| + ProjectsLocationsJobsWorkItemsResourceApi get workItems =>
|
| + new ProjectsLocationsJobsWorkItemsResourceApi(_requester);
|
| +
|
| + ProjectsLocationsJobsResourceApi(commons.ApiRequester client)
|
| + : _requester = client;
|
| +
|
| + /// Creates a Cloud Dataflow job.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The ID of the Cloud Platform project that the job belongs
|
| + /// to.
|
| + ///
|
| + /// [location] - The location that contains this job.
|
| + ///
|
| + /// [replaceJobId] - Deprecated. This field is now in the Job message.
|
| + ///
|
| + /// [view] - The level of information requested in response.
|
| + /// Possible string values are:
|
| + /// - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| + /// - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| + /// - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| + /// - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| + ///
|
| + /// Completes with a [Job].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Job> create(
|
| + Job request, core.String projectId, core.String location,
|
| + {core.String replaceJobId, core.String view}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -934,45 +980,49 @@ class ProjectsLocationsJobsResourceApi {
|
| _queryParams["view"] = [view];
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/jobs';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/jobs';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Job.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Gets the state of the specified Cloud Dataflow job.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The ID of the Cloud Platform project that the job belongs to.
|
| - *
|
| - * [location] - The location that contains this job.
|
| - *
|
| - * [jobId] - The job ID.
|
| - *
|
| - * [view] - The level of information requested in response.
|
| - * Possible string values are:
|
| - * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| - * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| - * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| - * - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| - *
|
| - * Completes with a [Job].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Job> get(core.String projectId, core.String location, core.String jobId, {core.String view}) {
|
| + /// Gets the state of the specified Cloud Dataflow job.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The ID of the Cloud Platform project that the job belongs
|
| + /// to.
|
| + ///
|
| + /// [location] - The location that contains this job.
|
| + ///
|
| + /// [jobId] - The job ID.
|
| + ///
|
| + /// [view] - The level of information requested in response.
|
| + /// Possible string values are:
|
| + /// - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| + /// - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| + /// - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| + /// - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| + ///
|
| + /// Completes with a [Job].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Job> get(
|
| + core.String projectId, core.String location, core.String jobId,
|
| + {core.String view}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -993,41 +1043,45 @@ class ProjectsLocationsJobsResourceApi {
|
| _queryParams["view"] = [view];
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId');
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId');
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Job.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Request the job status.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - A project id.
|
| - *
|
| - * [location] - The location which contains the job specified by job_id.
|
| - *
|
| - * [jobId] - The job to get messages for.
|
| - *
|
| - * [startTime] - Return only metric data that has changed since this time.
|
| - * Default is to return all information about all metrics for the job.
|
| - *
|
| - * Completes with a [JobMetrics].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<JobMetrics> getMetrics(core.String projectId, core.String location, core.String jobId, {core.String startTime}) {
|
| + /// Request the job status.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - A project id.
|
| + ///
|
| + /// [location] - The location which contains the job specified by job_id.
|
| + ///
|
| + /// [jobId] - The job to get messages for.
|
| + ///
|
| + /// [startTime] - Return only metric data that has changed since this time.
|
| + /// Default is to return all information about all metrics for the job.
|
| + ///
|
| + /// Completes with a [JobMetrics].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<JobMetrics> getMetrics(
|
| + core.String projectId, core.String location, core.String jobId,
|
| + {core.String startTime}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1048,59 +1102,67 @@ class ProjectsLocationsJobsResourceApi {
|
| _queryParams["startTime"] = [startTime];
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + '/metrics';
|
| -
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId') +
|
| + '/metrics';
|
| +
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new JobMetrics.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * List the jobs of a project in a given region.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The project which owns the jobs.
|
| - *
|
| - * [location] - The location that contains this job.
|
| - *
|
| - * [filter] - The kind of filter to use.
|
| - * Possible string values are:
|
| - * - "UNKNOWN" : A UNKNOWN.
|
| - * - "ALL" : A ALL.
|
| - * - "TERMINATED" : A TERMINATED.
|
| - * - "ACTIVE" : A ACTIVE.
|
| - *
|
| - * [pageToken] - Set this to the 'next_page_token' field of a previous
|
| - * response
|
| - * to request additional results in a long list.
|
| - *
|
| - * [pageSize] - If there are many jobs, limit response to at most this many.
|
| - * The actual number of jobs returned will be the lesser of max_responses
|
| - * and an unspecified server-defined limit.
|
| - *
|
| - * [view] - Level of information requested in response. Default is
|
| - * `JOB_VIEW_SUMMARY`.
|
| - * Possible string values are:
|
| - * - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| - * - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| - * - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| - * - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| - *
|
| - * Completes with a [ListJobsResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<ListJobsResponse> list(core.String projectId, core.String location, {core.String filter, core.String pageToken, core.int pageSize, core.String view}) {
|
| + /// List the jobs of a project in a given region.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The project which owns the jobs.
|
| + ///
|
| + /// [location] - The location that contains this job.
|
| + ///
|
| + /// [pageToken] - Set this to the 'next_page_token' field of a previous
|
| + /// response
|
| + /// to request additional results in a long list.
|
| + ///
|
| + /// [pageSize] - If there are many jobs, limit response to at most this many.
|
| + /// The actual number of jobs returned will be the lesser of max_responses
|
| + /// and an unspecified server-defined limit.
|
| + ///
|
| + /// [view] - Level of information requested in response. Default is
|
| + /// `JOB_VIEW_SUMMARY`.
|
| + /// Possible string values are:
|
| + /// - "JOB_VIEW_UNKNOWN" : A JOB_VIEW_UNKNOWN.
|
| + /// - "JOB_VIEW_SUMMARY" : A JOB_VIEW_SUMMARY.
|
| + /// - "JOB_VIEW_ALL" : A JOB_VIEW_ALL.
|
| + /// - "JOB_VIEW_DESCRIPTION" : A JOB_VIEW_DESCRIPTION.
|
| + ///
|
| + /// [filter] - The kind of filter to use.
|
| + /// Possible string values are:
|
| + /// - "UNKNOWN" : A UNKNOWN.
|
| + /// - "ALL" : A ALL.
|
| + /// - "TERMINATED" : A TERMINATED.
|
| + /// - "ACTIVE" : A ACTIVE.
|
| + ///
|
| + /// Completes with a [ListJobsResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<ListJobsResponse> list(
|
| + core.String projectId, core.String location,
|
| + {core.String pageToken,
|
| + core.int pageSize,
|
| + core.String view,
|
| + core.String filter}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1114,9 +1176,6 @@ class ProjectsLocationsJobsResourceApi {
|
| if (location == null) {
|
| throw new core.ArgumentError("Parameter location is required.");
|
| }
|
| - if (filter != null) {
|
| - _queryParams["filter"] = [filter];
|
| - }
|
| if (pageToken != null) {
|
| _queryParams["pageToken"] = [pageToken];
|
| }
|
| @@ -1126,41 +1185,47 @@ class ProjectsLocationsJobsResourceApi {
|
| if (view != null) {
|
| _queryParams["view"] = [view];
|
| }
|
| + if (filter != null) {
|
| + _queryParams["filter"] = [filter];
|
| + }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/jobs';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/jobs';
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new ListJobsResponse.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Updates the state of an existing Cloud Dataflow job.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The ID of the Cloud Platform project that the job belongs to.
|
| - *
|
| - * [location] - The location that contains this job.
|
| - *
|
| - * [jobId] - The job ID.
|
| - *
|
| - * Completes with a [Job].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Job> update(Job request, core.String projectId, core.String location, core.String jobId) {
|
| + /// Updates the state of an existing Cloud Dataflow job.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The ID of the Cloud Platform project that the job belongs
|
| + /// to.
|
| + ///
|
| + /// [location] - The location that contains this job.
|
| + ///
|
| + /// [jobId] - The job ID.
|
| + ///
|
| + /// Completes with a [Job].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Job> update(Job request, core.String projectId,
|
| + core.String location, core.String jobId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1181,49 +1246,50 @@ class ProjectsLocationsJobsResourceApi {
|
| throw new core.ArgumentError("Parameter jobId is required.");
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId');
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId');
|
|
|
| - var _response = _requester.request(_url,
|
| - "PUT",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "PUT",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Job.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class ProjectsLocationsJobsDebugResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - ProjectsLocationsJobsDebugResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Get encoded debug configuration for component. Not cacheable.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The project id.
|
| - *
|
| - * [location] - The location which contains the job specified by job_id.
|
| - *
|
| - * [jobId] - The job id.
|
| - *
|
| - * Completes with a [GetDebugConfigResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<GetDebugConfigResponse> getConfig(GetDebugConfigRequest request, core.String projectId, core.String location, core.String jobId) {
|
| + ProjectsLocationsJobsDebugResourceApi(commons.ApiRequester client)
|
| + : _requester = client;
|
| +
|
| + /// Get encoded debug configuration for component. Not cacheable.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The project id.
|
| + ///
|
| + /// [location] - The location which contains the job specified by job_id.
|
| + ///
|
| + /// [jobId] - The job id.
|
| + ///
|
| + /// Completes with a [GetDebugConfigResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<GetDebugConfigResponse> getConfig(GetDebugConfigRequest request,
|
| + core.String projectId, core.String location, core.String jobId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1244,40 +1310,47 @@ class ProjectsLocationsJobsDebugResourceApi {
|
| throw new core.ArgumentError("Parameter jobId is required.");
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + '/debug/getConfig';
|
| -
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId') +
|
| + '/debug/getConfig';
|
| +
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new GetDebugConfigResponse.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Send encoded debug capture data for component.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The project id.
|
| - *
|
| - * [location] - The location which contains the job specified by job_id.
|
| - *
|
| - * [jobId] - The job id.
|
| - *
|
| - * Completes with a [SendDebugCaptureResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<SendDebugCaptureResponse> sendCapture(SendDebugCaptureRequest request, core.String projectId, core.String location, core.String jobId) {
|
| + /// Send encoded debug capture data for component.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The project id.
|
| + ///
|
| + /// [location] - The location which contains the job specified by job_id.
|
| + ///
|
| + /// [jobId] - The job id.
|
| + ///
|
| + /// Completes with a [SendDebugCaptureResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<SendDebugCaptureResponse> sendCapture(
|
| + SendDebugCaptureRequest request,
|
| + core.String projectId,
|
| + core.String location,
|
| + core.String jobId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1298,73 +1371,82 @@ class ProjectsLocationsJobsDebugResourceApi {
|
| throw new core.ArgumentError("Parameter jobId is required.");
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + '/debug/sendCapture';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId') +
|
| + '/debug/sendCapture';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| - return _response.then((data) => new SendDebugCaptureResponse.fromJson(data));
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| + return _response
|
| + .then((data) => new SendDebugCaptureResponse.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class ProjectsLocationsJobsMessagesResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - ProjectsLocationsJobsMessagesResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Request the job status.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - A project id.
|
| - *
|
| - * [location] - The location which contains the job specified by job_id.
|
| - *
|
| - * [jobId] - The job to get messages about.
|
| - *
|
| - * [endTime] - Return only messages with timestamps < end_time. The default is
|
| - * now
|
| - * (i.e. return up to the latest messages available).
|
| - *
|
| - * [startTime] - If specified, return only messages with timestamps >=
|
| - * start_time.
|
| - * The default is the job creation time (i.e. beginning of messages).
|
| - *
|
| - * [pageToken] - If supplied, this should be the value of next_page_token
|
| - * returned
|
| - * by an earlier call. This will cause the next page of results to
|
| - * be returned.
|
| - *
|
| - * [pageSize] - If specified, determines the maximum number of messages to
|
| - * return. If unspecified, the service may choose an appropriate
|
| - * default, or may return an arbitrarily large number of results.
|
| - *
|
| - * [minimumImportance] - Filter to only get messages with importance >= level
|
| - * Possible string values are:
|
| - * - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : A JOB_MESSAGE_IMPORTANCE_UNKNOWN.
|
| - * - "JOB_MESSAGE_DEBUG" : A JOB_MESSAGE_DEBUG.
|
| - * - "JOB_MESSAGE_DETAILED" : A JOB_MESSAGE_DETAILED.
|
| - * - "JOB_MESSAGE_BASIC" : A JOB_MESSAGE_BASIC.
|
| - * - "JOB_MESSAGE_WARNING" : A JOB_MESSAGE_WARNING.
|
| - * - "JOB_MESSAGE_ERROR" : A JOB_MESSAGE_ERROR.
|
| - *
|
| - * Completes with a [ListJobMessagesResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<ListJobMessagesResponse> list(core.String projectId, core.String location, core.String jobId, {core.String endTime, core.String startTime, core.String pageToken, core.int pageSize, core.String minimumImportance}) {
|
| + ProjectsLocationsJobsMessagesResourceApi(commons.ApiRequester client)
|
| + : _requester = client;
|
| +
|
| + /// Request the job status.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - A project id.
|
| + ///
|
| + /// [location] - The location which contains the job specified by job_id.
|
| + ///
|
| + /// [jobId] - The job to get messages about.
|
| + ///
|
| + /// [endTime] - Return only messages with timestamps < end_time. The default
|
| + /// is now
|
| + /// (i.e. return up to the latest messages available).
|
| + ///
|
| + /// [pageToken] - If supplied, this should be the value of next_page_token
|
| + /// returned
|
| + /// by an earlier call. This will cause the next page of results to
|
| + /// be returned.
|
| + ///
|
| + /// [startTime] - If specified, return only messages with timestamps >=
|
| + /// start_time.
|
| + /// The default is the job creation time (i.e. beginning of messages).
|
| + ///
|
| + /// [pageSize] - If specified, determines the maximum number of messages to
|
| + /// return. If unspecified, the service may choose an appropriate
|
| + /// default, or may return an arbitrarily large number of results.
|
| + ///
|
| + /// [minimumImportance] - Filter to only get messages with importance >=
|
| + /// level
|
| + /// Possible string values are:
|
| + /// - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : A JOB_MESSAGE_IMPORTANCE_UNKNOWN.
|
| + /// - "JOB_MESSAGE_DEBUG" : A JOB_MESSAGE_DEBUG.
|
| + /// - "JOB_MESSAGE_DETAILED" : A JOB_MESSAGE_DETAILED.
|
| + /// - "JOB_MESSAGE_BASIC" : A JOB_MESSAGE_BASIC.
|
| + /// - "JOB_MESSAGE_WARNING" : A JOB_MESSAGE_WARNING.
|
| + /// - "JOB_MESSAGE_ERROR" : A JOB_MESSAGE_ERROR.
|
| + ///
|
| + /// Completes with a [ListJobMessagesResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<ListJobMessagesResponse> list(
|
| + core.String projectId, core.String location, core.String jobId,
|
| + {core.String endTime,
|
| + core.String pageToken,
|
| + core.String startTime,
|
| + core.int pageSize,
|
| + core.String minimumImportance}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1384,12 +1466,12 @@ class ProjectsLocationsJobsMessagesResourceApi {
|
| if (endTime != null) {
|
| _queryParams["endTime"] = [endTime];
|
| }
|
| - if (startTime != null) {
|
| - _queryParams["startTime"] = [startTime];
|
| - }
|
| if (pageToken != null) {
|
| _queryParams["pageToken"] = [pageToken];
|
| }
|
| + if (startTime != null) {
|
| + _queryParams["startTime"] = [startTime];
|
| + }
|
| if (pageSize != null) {
|
| _queryParams["pageSize"] = ["${pageSize}"];
|
| }
|
| @@ -1397,49 +1479,51 @@ class ProjectsLocationsJobsMessagesResourceApi {
|
| _queryParams["minimumImportance"] = [minimumImportance];
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + '/messages';
|
| -
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId') +
|
| + '/messages';
|
| +
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new ListJobMessagesResponse.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class ProjectsLocationsJobsWorkItemsResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - ProjectsLocationsJobsWorkItemsResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Leases a dataflow WorkItem to run.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Identifies the project this worker belongs to.
|
| - *
|
| - * [location] - The location which contains the WorkItem's job.
|
| - *
|
| - * [jobId] - Identifies the workflow job this worker belongs to.
|
| - *
|
| - * Completes with a [LeaseWorkItemResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<LeaseWorkItemResponse> lease(LeaseWorkItemRequest request, core.String projectId, core.String location, core.String jobId) {
|
| + ProjectsLocationsJobsWorkItemsResourceApi(commons.ApiRequester client)
|
| + : _requester = client;
|
| +
|
| + /// Leases a dataflow WorkItem to run.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Identifies the project this worker belongs to.
|
| + ///
|
| + /// [location] - The location which contains the WorkItem's job.
|
| + ///
|
| + /// [jobId] - Identifies the workflow job this worker belongs to.
|
| + ///
|
| + /// Completes with a [LeaseWorkItemResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<LeaseWorkItemResponse> lease(LeaseWorkItemRequest request,
|
| + core.String projectId, core.String location, core.String jobId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1460,40 +1544,47 @@ class ProjectsLocationsJobsWorkItemsResourceApi {
|
| throw new core.ArgumentError("Parameter jobId is required.");
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + '/workItems:lease';
|
| -
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId') +
|
| + '/workItems:lease';
|
| +
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new LeaseWorkItemResponse.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Reports the status of dataflow WorkItems leased by a worker.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - The project which owns the WorkItem's job.
|
| - *
|
| - * [location] - The location which contains the WorkItem's job.
|
| - *
|
| - * [jobId] - The job which the WorkItem is part of.
|
| - *
|
| - * Completes with a [ReportWorkItemStatusResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<ReportWorkItemStatusResponse> reportStatus(ReportWorkItemStatusRequest request, core.String projectId, core.String location, core.String jobId) {
|
| + /// Reports the status of dataflow WorkItems leased by a worker.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - The project which owns the WorkItem's job.
|
| + ///
|
| + /// [location] - The location which contains the WorkItem's job.
|
| + ///
|
| + /// [jobId] - The job which the WorkItem is part of.
|
| + ///
|
| + /// Completes with a [ReportWorkItemStatusResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<ReportWorkItemStatusResponse> reportStatus(
|
| + ReportWorkItemStatusRequest request,
|
| + core.String projectId,
|
| + core.String location,
|
| + core.String jobId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1514,48 +1605,51 @@ class ProjectsLocationsJobsWorkItemsResourceApi {
|
| throw new core.ArgumentError("Parameter jobId is required.");
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + '/workItems:reportStatus';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId') +
|
| + '/workItems:reportStatus';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| - return _response.then((data) => new ReportWorkItemStatusResponse.fromJson(data));
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| + return _response
|
| + .then((data) => new ReportWorkItemStatusResponse.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class ProjectsLocationsTemplatesResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - ProjectsLocationsTemplatesResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Creates a Cloud Dataflow job from a template.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Required. The ID of the Cloud Platform project that the job
|
| - * belongs to.
|
| - *
|
| - * [location] - The location to which to direct the request.
|
| - *
|
| - * Completes with a [Job].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Job> create(CreateJobFromTemplateRequest request, core.String projectId, core.String location) {
|
| + ProjectsLocationsTemplatesResourceApi(commons.ApiRequester client)
|
| + : _requester = client;
|
| +
|
| + /// Creates a Cloud Dataflow job from a template.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Required. The ID of the Cloud Platform project that the job
|
| + /// belongs to.
|
| + ///
|
| + /// [location] - The location to which to direct the request.
|
| + ///
|
| + /// Completes with a [Job].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Job> create(CreateJobFromTemplateRequest request,
|
| + core.String projectId, core.String location) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1573,45 +1667,48 @@ class ProjectsLocationsTemplatesResourceApi {
|
| throw new core.ArgumentError("Parameter location is required.");
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/templates';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/templates';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Job.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Get the template associated with a template.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Required. The ID of the Cloud Platform project that the job
|
| - * belongs to.
|
| - *
|
| - * [location] - The location to which to direct the request.
|
| - *
|
| - * [view] - The view to retrieve. Defaults to METADATA_ONLY.
|
| - * Possible string values are:
|
| - * - "METADATA_ONLY" : A METADATA_ONLY.
|
| - *
|
| - * [gcsPath] - Required. A Cloud Storage path to the template from which to
|
| - * create the job.
|
| - * Must be a valid Cloud Storage URL, beginning with `gs://`.
|
| - *
|
| - * Completes with a [GetTemplateResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<GetTemplateResponse> get(core.String projectId, core.String location, {core.String view, core.String gcsPath}) {
|
| + /// Get the template associated with a template.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Required. The ID of the Cloud Platform project that the job
|
| + /// belongs to.
|
| + ///
|
| + /// [location] - The location to which to direct the request.
|
| + ///
|
| + /// [view] - The view to retrieve. Defaults to METADATA_ONLY.
|
| + /// Possible string values are:
|
| + /// - "METADATA_ONLY" : A METADATA_ONLY.
|
| + ///
|
| + /// [gcsPath] - Required. A Cloud Storage path to the template from which to
|
| + /// create the job.
|
| + /// Must be a valid Cloud Storage URL, beginning with `gs://`.
|
| + ///
|
| + /// Completes with a [GetTemplateResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<GetTemplateResponse> get(
|
| + core.String projectId, core.String location,
|
| + {core.String view, core.String gcsPath}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1632,48 +1729,51 @@ class ProjectsLocationsTemplatesResourceApi {
|
| _queryParams["gcsPath"] = [gcsPath];
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/templates:get';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/templates:get';
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new GetTemplateResponse.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Launch a template.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Required. The ID of the Cloud Platform project that the job
|
| - * belongs to.
|
| - *
|
| - * [location] - The location to which to direct the request.
|
| - *
|
| - * [gcsPath] - Required. A Cloud Storage path to the template from which to
|
| - * create
|
| - * the job.
|
| - * Must be valid Cloud Storage URL, beginning with 'gs://'.
|
| - *
|
| - * [validateOnly] - If true, the request is validated but not actually
|
| - * executed.
|
| - * Defaults to false.
|
| - *
|
| - * Completes with a [LaunchTemplateResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<LaunchTemplateResponse> launch(LaunchTemplateParameters request, core.String projectId, core.String location, {core.String gcsPath, core.bool validateOnly}) {
|
| + /// Launch a template.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Required. The ID of the Cloud Platform project that the job
|
| + /// belongs to.
|
| + ///
|
| + /// [location] - The location to which to direct the request.
|
| + ///
|
| + /// [validateOnly] - If true, the request is validated but not actually
|
| + /// executed.
|
| + /// Defaults to false.
|
| + ///
|
| + /// [gcsPath] - Required. A Cloud Storage path to the template from which to
|
| + /// create
|
| + /// the job.
|
| + /// Must be valid Cloud Storage URL, beginning with 'gs://'.
|
| + ///
|
| + /// Completes with a [LaunchTemplateResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<LaunchTemplateResponse> launch(LaunchTemplateParameters request,
|
| + core.String projectId, core.String location,
|
| + {core.bool validateOnly, core.String gcsPath}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1690,53 +1790,53 @@ class ProjectsLocationsTemplatesResourceApi {
|
| if (location == null) {
|
| throw new core.ArgumentError("Parameter location is required.");
|
| }
|
| - if (gcsPath != null) {
|
| - _queryParams["gcsPath"] = [gcsPath];
|
| - }
|
| if (validateOnly != null) {
|
| _queryParams["validateOnly"] = ["${validateOnly}"];
|
| }
|
| + if (gcsPath != null) {
|
| + _queryParams["gcsPath"] = [gcsPath];
|
| + }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/locations/' + commons.Escaper.ecapeVariable('$location') + '/templates:launch';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/locations/' +
|
| + commons.Escaper.ecapeVariable('$location') +
|
| + '/templates:launch';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new LaunchTemplateResponse.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class ProjectsTemplatesResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - ProjectsTemplatesResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Creates a Cloud Dataflow job from a template.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Required. The ID of the Cloud Platform project that the job
|
| - * belongs to.
|
| - *
|
| - * Completes with a [Job].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Job> create(CreateJobFromTemplateRequest request, core.String projectId) {
|
| + ProjectsTemplatesResourceApi(commons.ApiRequester client)
|
| + : _requester = client;
|
| +
|
| + /// Creates a Cloud Dataflow job from a template.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Required. The ID of the Cloud Platform project that the job
|
| + /// belongs to.
|
| + ///
|
| + /// Completes with a [Job].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Job> create(
|
| + CreateJobFromTemplateRequest request, core.String projectId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1751,45 +1851,45 @@ class ProjectsTemplatesResourceApi {
|
| throw new core.ArgumentError("Parameter projectId is required.");
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/templates';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/templates';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Job.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Get the template associated with a template.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Required. The ID of the Cloud Platform project that the job
|
| - * belongs to.
|
| - *
|
| - * [location] - The location to which to direct the request.
|
| - *
|
| - * [view] - The view to retrieve. Defaults to METADATA_ONLY.
|
| - * Possible string values are:
|
| - * - "METADATA_ONLY" : A METADATA_ONLY.
|
| - *
|
| - * [gcsPath] - Required. A Cloud Storage path to the template from which to
|
| - * create the job.
|
| - * Must be a valid Cloud Storage URL, beginning with `gs://`.
|
| - *
|
| - * Completes with a [GetTemplateResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<GetTemplateResponse> get(core.String projectId, {core.String location, core.String view, core.String gcsPath}) {
|
| + /// Get the template associated with a template.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Required. The ID of the Cloud Platform project that the job
|
| + /// belongs to.
|
| + ///
|
| + /// [view] - The view to retrieve. Defaults to METADATA_ONLY.
|
| + /// Possible string values are:
|
| + /// - "METADATA_ONLY" : A METADATA_ONLY.
|
| + ///
|
| + /// [gcsPath] - Required. A Cloud Storage path to the template from which to
|
| + /// create the job.
|
| + /// Must be a valid Cloud Storage URL, beginning with `gs://`.
|
| + ///
|
| + /// [location] - The location to which to direct the request.
|
| + ///
|
| + /// Completes with a [GetTemplateResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<GetTemplateResponse> get(core.String projectId,
|
| + {core.String view, core.String gcsPath, core.String location}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1800,58 +1900,59 @@ class ProjectsTemplatesResourceApi {
|
| if (projectId == null) {
|
| throw new core.ArgumentError("Parameter projectId is required.");
|
| }
|
| - if (location != null) {
|
| - _queryParams["location"] = [location];
|
| - }
|
| if (view != null) {
|
| _queryParams["view"] = [view];
|
| }
|
| if (gcsPath != null) {
|
| _queryParams["gcsPath"] = [gcsPath];
|
| }
|
| + if (location != null) {
|
| + _queryParams["location"] = [location];
|
| + }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/templates:get';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/templates:get';
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new GetTemplateResponse.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Launch a template.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Required. The ID of the Cloud Platform project that the job
|
| - * belongs to.
|
| - *
|
| - * [location] - The location to which to direct the request.
|
| - *
|
| - * [validateOnly] - If true, the request is validated but not actually
|
| - * executed.
|
| - * Defaults to false.
|
| - *
|
| - * [gcsPath] - Required. A Cloud Storage path to the template from which to
|
| - * create
|
| - * the job.
|
| - * Must be valid Cloud Storage URL, beginning with 'gs://'.
|
| - *
|
| - * Completes with a [LaunchTemplateResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<LaunchTemplateResponse> launch(LaunchTemplateParameters request, core.String projectId, {core.String location, core.bool validateOnly, core.String gcsPath}) {
|
| + /// Launch a template.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Required. The ID of the Cloud Platform project that the job
|
| + /// belongs to.
|
| + ///
|
| + /// [location] - The location to which to direct the request.
|
| + ///
|
| + /// [validateOnly] - If true, the request is validated but not actually
|
| + /// executed.
|
| + /// Defaults to false.
|
| + ///
|
| + /// [gcsPath] - Required. A Cloud Storage path to the template from which to
|
| + /// create
|
| + /// the job.
|
| + /// Must be valid Cloud Storage URL, beginning with 'gs://'.
|
| + ///
|
| + /// Completes with a [LaunchTemplateResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<LaunchTemplateResponse> launch(
|
| + LaunchTemplateParameters request, core.String projectId,
|
| + {core.String location, core.bool validateOnly, core.String gcsPath}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1875,31 +1976,30 @@ class ProjectsTemplatesResourceApi {
|
| _queryParams["gcsPath"] = [gcsPath];
|
| }
|
|
|
| - _url = 'v1b3/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/templates:launch';
|
| + _url = 'v1b3/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/templates:launch';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new LaunchTemplateResponse.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| -
|
| -/**
|
| - * Obsolete in favor of ApproximateReportedProgress and ApproximateSplitRequest.
|
| - */
|
| +/// Obsolete in favor of ApproximateReportedProgress and
|
| +/// ApproximateSplitRequest.
|
| class ApproximateProgress {
|
| - /** Obsolete. */
|
| + /// Obsolete.
|
| core.double percentComplete;
|
| - /** Obsolete. */
|
| +
|
| + /// Obsolete.
|
| Position position;
|
| - /** Obsolete. */
|
| +
|
| + /// Obsolete.
|
| core.String remainingTime;
|
|
|
| ApproximateProgress();
|
| @@ -1917,7 +2017,8 @@ class ApproximateProgress {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (percentComplete != null) {
|
| _json["percentComplete"] = percentComplete;
|
| }
|
| @@ -1931,58 +2032,58 @@ class ApproximateProgress {
|
| }
|
| }
|
|
|
| -/** A progress measurement of a WorkItem by a worker. */
|
| +/// A progress measurement of a WorkItem by a worker.
|
| class ApproximateReportedProgress {
|
| - /**
|
| - * Total amount of parallelism in the portion of input of this task that has
|
| - * already been consumed and is no longer active. In the first two examples
|
| - * above (see remaining_parallelism), the value should be 29 or 2
|
| - * respectively. The sum of remaining_parallelism and consumed_parallelism
|
| - * should equal the total amount of parallelism in this work item. If
|
| - * specified, must be finite.
|
| - */
|
| + /// Total amount of parallelism in the portion of input of this task that has
|
| + /// already been consumed and is no longer active. In the first two examples
|
| + /// above (see remaining_parallelism), the value should be 29 or 2
|
| + /// respectively. The sum of remaining_parallelism and consumed_parallelism
|
| + /// should equal the total amount of parallelism in this work item. If
|
| + /// specified, must be finite.
|
| ReportedParallelism consumedParallelism;
|
| - /**
|
| - * Completion as fraction of the input consumed, from 0.0 (beginning, nothing
|
| - * consumed), to 1.0 (end of the input, entire input consumed).
|
| - */
|
| +
|
| + /// Completion as fraction of the input consumed, from 0.0 (beginning,
|
| + /// nothing
|
| + /// consumed), to 1.0 (end of the input, entire input consumed).
|
| core.double fractionConsumed;
|
| - /** A Position within the work to represent a progress. */
|
| +
|
| + /// A Position within the work to represent a progress.
|
| Position position;
|
| - /**
|
| - * Total amount of parallelism in the input of this task that remains,
|
| - * (i.e. can be delegated to this task and any new tasks via dynamic
|
| - * splitting). Always at least 1 for non-finished work items and 0 for
|
| - * finished.
|
| - *
|
| - * "Amount of parallelism" refers to how many non-empty parts of the input
|
| - * can be read in parallel. This does not necessarily equal number
|
| - * of records. An input that can be read in parallel down to the
|
| - * individual records is called "perfectly splittable".
|
| - * An example of non-perfectly parallelizable input is a block-compressed
|
| - * file format where a block of records has to be read as a whole,
|
| - * but different blocks can be read in parallel.
|
| - *
|
| - * Examples:
|
| - * * If we are processing record #30 (starting at 1) out of 50 in a perfectly
|
| - * splittable 50-record input, this value should be 21 (20 remaining + 1
|
| - * current).
|
| - * * If we are reading through block 3 in a block-compressed file consisting
|
| - * of 5 blocks, this value should be 3 (since blocks 4 and 5 can be
|
| - * processed in parallel by new tasks via dynamic splitting and the current
|
| - * task remains processing block 3).
|
| - * * If we are reading through the last block in a block-compressed file,
|
| - * or reading or processing the last record in a perfectly splittable
|
| - * input, this value should be 1, because apart from the current task, no
|
| - * additional remainder can be split off.
|
| - */
|
| +
|
| + /// Total amount of parallelism in the input of this task that remains,
|
| + /// (i.e. can be delegated to this task and any new tasks via dynamic
|
| + /// splitting). Always at least 1 for non-finished work items and 0 for
|
| + /// finished.
|
| + ///
|
| + /// "Amount of parallelism" refers to how many non-empty parts of the input
|
| + /// can be read in parallel. This does not necessarily equal number
|
| + /// of records. An input that can be read in parallel down to the
|
| + /// individual records is called "perfectly splittable".
|
| + /// An example of non-perfectly parallelizable input is a block-compressed
|
| + /// file format where a block of records has to be read as a whole,
|
| + /// but different blocks can be read in parallel.
|
| + ///
|
| + /// Examples:
|
| + /// * If we are processing record #30 (starting at 1) out of 50 in a
|
| + /// perfectly
|
| + /// splittable 50-record input, this value should be 21 (20 remaining + 1
|
| + /// current).
|
| + /// * If we are reading through block 3 in a block-compressed file consisting
|
| + /// of 5 blocks, this value should be 3 (since blocks 4 and 5 can be
|
| + /// processed in parallel by new tasks via dynamic splitting and the current
|
| + /// task remains processing block 3).
|
| + /// * If we are reading through the last block in a block-compressed file,
|
| + /// or reading or processing the last record in a perfectly splittable
|
| + /// input, this value should be 1, because apart from the current task, no
|
| + /// additional remainder can be split off.
|
| ReportedParallelism remainingParallelism;
|
|
|
| ApproximateReportedProgress();
|
|
|
| ApproximateReportedProgress.fromJson(core.Map _json) {
|
| if (_json.containsKey("consumedParallelism")) {
|
| - consumedParallelism = new ReportedParallelism.fromJson(_json["consumedParallelism"]);
|
| + consumedParallelism =
|
| + new ReportedParallelism.fromJson(_json["consumedParallelism"]);
|
| }
|
| if (_json.containsKey("fractionConsumed")) {
|
| fractionConsumed = _json["fractionConsumed"];
|
| @@ -1991,12 +2092,14 @@ class ApproximateReportedProgress {
|
| position = new Position.fromJson(_json["position"]);
|
| }
|
| if (_json.containsKey("remainingParallelism")) {
|
| - remainingParallelism = new ReportedParallelism.fromJson(_json["remainingParallelism"]);
|
| + remainingParallelism =
|
| + new ReportedParallelism.fromJson(_json["remainingParallelism"]);
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (consumedParallelism != null) {
|
| _json["consumedParallelism"] = (consumedParallelism).toJson();
|
| }
|
| @@ -2013,16 +2116,14 @@ class ApproximateReportedProgress {
|
| }
|
| }
|
|
|
| -/**
|
| - * A suggestion by the service to the worker to dynamically split the WorkItem.
|
| - */
|
| +/// A suggestion by the service to the worker to dynamically split the
|
| +/// WorkItem.
|
| class ApproximateSplitRequest {
|
| - /**
|
| - * A fraction at which to split the work item, from 0.0 (beginning of the
|
| - * input) to 1.0 (end of the input).
|
| - */
|
| + /// A fraction at which to split the work item, from 0.0 (beginning of the
|
| + /// input) to 1.0 (end of the input).
|
| core.double fractionConsumed;
|
| - /** A Position at which to split the work item. */
|
| +
|
| + /// A Position at which to split the work item.
|
| Position position;
|
|
|
| ApproximateSplitRequest();
|
| @@ -2037,7 +2138,8 @@ class ApproximateSplitRequest {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (fractionConsumed != null) {
|
| _json["fractionConsumed"] = fractionConsumed;
|
| }
|
| @@ -2048,49 +2150,46 @@ class ApproximateSplitRequest {
|
| }
|
| }
|
|
|
| -/**
|
| - * A structured message reporting an autoscaling decision made by the Dataflow
|
| - * service.
|
| - */
|
| +/// A structured message reporting an autoscaling decision made by the Dataflow
|
| +/// service.
|
| class AutoscalingEvent {
|
| - /** The current number of workers the job has. */
|
| + /// The current number of workers the job has.
|
| core.String currentNumWorkers;
|
| - /**
|
| - * A message describing why the system decided to adjust the current
|
| - * number of workers, why it failed, or why the system decided to
|
| - * not make any changes to the number of workers.
|
| - */
|
| +
|
| + /// A message describing why the system decided to adjust the current
|
| + /// number of workers, why it failed, or why the system decided to
|
| + /// not make any changes to the number of workers.
|
| StructuredMessage description;
|
| - /**
|
| - * The type of autoscaling event to report.
|
| - * Possible string values are:
|
| - * - "TYPE_UNKNOWN" : Default type for the enum. Value should never be
|
| - * returned.
|
| - * - "TARGET_NUM_WORKERS_CHANGED" : The TARGET_NUM_WORKERS_CHANGED type should
|
| - * be used when the target
|
| - * worker pool size has changed at the start of an actuation. An event
|
| - * should always be specified as TARGET_NUM_WORKERS_CHANGED if it reflects
|
| - * a change in the target_num_workers.
|
| - * - "CURRENT_NUM_WORKERS_CHANGED" : The CURRENT_NUM_WORKERS_CHANGED type
|
| - * should be used when actual worker
|
| - * pool size has been changed, but the target_num_workers has not changed.
|
| - * - "ACTUATION_FAILURE" : The ACTUATION_FAILURE type should be used when we
|
| - * want to report
|
| - * an error to the user indicating why the current number of workers
|
| - * in the pool could not be changed.
|
| - * Displayed in the current status and history widgets.
|
| - * - "NO_CHANGE" : Used when we want to report to the user a reason why we are
|
| - * not currently adjusting the number of workers.
|
| - * Should specify both target_num_workers, current_num_workers and a
|
| - * decision_message.
|
| - */
|
| +
|
| + /// The type of autoscaling event to report.
|
| + /// Possible string values are:
|
| + /// - "TYPE_UNKNOWN" : Default type for the enum. Value should never be
|
| + /// returned.
|
| + /// - "TARGET_NUM_WORKERS_CHANGED" : The TARGET_NUM_WORKERS_CHANGED type
|
| + /// should be used when the target
|
| + /// worker pool size has changed at the start of an actuation. An event
|
| + /// should always be specified as TARGET_NUM_WORKERS_CHANGED if it reflects
|
| + /// a change in the target_num_workers.
|
| + /// - "CURRENT_NUM_WORKERS_CHANGED" : The CURRENT_NUM_WORKERS_CHANGED type
|
| + /// should be used when actual worker
|
| + /// pool size has been changed, but the target_num_workers has not changed.
|
| + /// - "ACTUATION_FAILURE" : The ACTUATION_FAILURE type should be used when we
|
| + /// want to report
|
| + /// an error to the user indicating why the current number of workers
|
| + /// in the pool could not be changed.
|
| + /// Displayed in the current status and history widgets.
|
| + /// - "NO_CHANGE" : Used when we want to report to the user a reason why we
|
| + /// are
|
| + /// not currently adjusting the number of workers.
|
| + /// Should specify both target_num_workers, current_num_workers and a
|
| + /// decision_message.
|
| core.String eventType;
|
| - /** The target number of workers the worker pool wants to resize to use. */
|
| +
|
| + /// The target number of workers the worker pool wants to resize to use.
|
| core.String targetNumWorkers;
|
| - /**
|
| - * The time this event was emitted to indicate a new target or current
|
| - * num_workers value.
|
| - */
|
| +
|
| + /// The time this event was emitted to indicate a new target or current
|
| + /// num_workers value.
|
| core.String time;
|
|
|
| AutoscalingEvent();
|
| @@ -2114,7 +2213,8 @@ class AutoscalingEvent {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (currentNumWorkers != null) {
|
| _json["currentNumWorkers"] = currentNumWorkers;
|
| }
|
| @@ -2134,19 +2234,18 @@ class AutoscalingEvent {
|
| }
|
| }
|
|
|
| -/** Settings for WorkerPool autoscaling. */
|
| +/// Settings for WorkerPool autoscaling.
|
| class AutoscalingSettings {
|
| - /**
|
| - * The algorithm to use for autoscaling.
|
| - * Possible string values are:
|
| - * - "AUTOSCALING_ALGORITHM_UNKNOWN" : The algorithm is unknown, or
|
| - * unspecified.
|
| - * - "AUTOSCALING_ALGORITHM_NONE" : Disable autoscaling.
|
| - * - "AUTOSCALING_ALGORITHM_BASIC" : Increase worker count over time to reduce
|
| - * job execution time.
|
| - */
|
| + /// The algorithm to use for autoscaling.
|
| + /// Possible string values are:
|
| + /// - "AUTOSCALING_ALGORITHM_UNKNOWN" : The algorithm is unknown, or
|
| + /// unspecified.
|
| + /// - "AUTOSCALING_ALGORITHM_NONE" : Disable autoscaling.
|
| + /// - "AUTOSCALING_ALGORITHM_BASIC" : Increase worker count over time to
|
| + /// reduce job execution time.
|
| core.String algorithm;
|
| - /** The maximum number of workers to cap scaling at. */
|
| +
|
| + /// The maximum number of workers to cap scaling at.
|
| core.int maxNumWorkers;
|
|
|
| AutoscalingSettings();
|
| @@ -2161,7 +2260,8 @@ class AutoscalingSettings {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (algorithm != null) {
|
| _json["algorithm"] = algorithm;
|
| }
|
| @@ -2172,19 +2272,17 @@ class AutoscalingSettings {
|
| }
|
| }
|
|
|
| -/** Modeled after information exposed by /proc/stat. */
|
| +/// Modeled after information exposed by /proc/stat.
|
| class CPUTime {
|
| - /**
|
| - * Average CPU utilization rate (% non-idle cpu / second) since previous
|
| - * sample.
|
| - */
|
| + /// Average CPU utilization rate (% non-idle cpu / second) since previous
|
| + /// sample.
|
| core.double rate;
|
| - /** Timestamp of the measurement. */
|
| +
|
| + /// Timestamp of the measurement.
|
| core.String timestamp;
|
| - /**
|
| - * Total active CPU time across all cores (ie., non-idle) in milliseconds
|
| - * since start-up.
|
| - */
|
| +
|
| + /// Total active CPU time across all cores (ie., non-idle) in milliseconds
|
| + /// since start-up.
|
| core.String totalMs;
|
|
|
| CPUTime();
|
| @@ -2202,7 +2300,8 @@ class CPUTime {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (rate != null) {
|
| _json["rate"] = rate;
|
| }
|
| @@ -2216,21 +2315,17 @@ class CPUTime {
|
| }
|
| }
|
|
|
| -/**
|
| - * Description of an interstitial value between transforms in an execution
|
| - * stage.
|
| - */
|
| +/// Description of an interstitial value between transforms in an execution
|
| +/// stage.
|
| class ComponentSource {
|
| - /** Dataflow service generated name for this source. */
|
| + /// Dataflow service generated name for this source.
|
| core.String name;
|
| - /**
|
| - * User name for the original user transform or collection with which this
|
| - * source is most closely associated.
|
| - */
|
| +
|
| + /// User name for the original user transform or collection with which this
|
| + /// source is most closely associated.
|
| core.String originalTransformOrCollection;
|
| - /**
|
| - * Human-readable name for this transform; may be user or system generated.
|
| - */
|
| +
|
| + /// Human-readable name for this transform; may be user or system generated.
|
| core.String userName;
|
|
|
| ComponentSource();
|
| @@ -2248,7 +2343,8 @@ class ComponentSource {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (name != null) {
|
| _json["name"] = name;
|
| }
|
| @@ -2262,18 +2358,16 @@ class ComponentSource {
|
| }
|
| }
|
|
|
| -/** Description of a transform executed as part of an execution stage. */
|
| +/// Description of a transform executed as part of an execution stage.
|
| class ComponentTransform {
|
| - /** Dataflow service generated name for this source. */
|
| + /// Dataflow service generated name for this source.
|
| core.String name;
|
| - /**
|
| - * User name for the original user transform with which this transform is
|
| - * most closely associated.
|
| - */
|
| +
|
| + /// User name for the original user transform with which this transform is
|
| + /// most closely associated.
|
| core.String originalTransform;
|
| - /**
|
| - * Human-readable name for this transform; may be user or system generated.
|
| - */
|
| +
|
| + /// Human-readable name for this transform; may be user or system generated.
|
| core.String userName;
|
|
|
| ComponentTransform();
|
| @@ -2291,7 +2385,8 @@ class ComponentTransform {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (name != null) {
|
| _json["name"] = name;
|
| }
|
| @@ -2305,19 +2400,24 @@ class ComponentTransform {
|
| }
|
| }
|
|
|
| -/** All configuration data for a particular Computation. */
|
| +/// All configuration data for a particular Computation.
|
| class ComputationTopology {
|
| - /** The ID of the computation. */
|
| + /// The ID of the computation.
|
| core.String computationId;
|
| - /** The inputs to the computation. */
|
| +
|
| + /// The inputs to the computation.
|
| core.List<StreamLocation> inputs;
|
| - /** The key ranges processed by the computation. */
|
| +
|
| + /// The key ranges processed by the computation.
|
| core.List<KeyRangeLocation> keyRanges;
|
| - /** The outputs from the computation. */
|
| +
|
| + /// The outputs from the computation.
|
| core.List<StreamLocation> outputs;
|
| - /** The state family values. */
|
| +
|
| + /// The state family values.
|
| core.List<StateFamilyConfig> stateFamilies;
|
| - /** The system stage name. */
|
| +
|
| + /// The system stage name.
|
| core.String systemStageName;
|
|
|
| ComputationTopology();
|
| @@ -2327,16 +2427,24 @@ class ComputationTopology {
|
| computationId = _json["computationId"];
|
| }
|
| if (_json.containsKey("inputs")) {
|
| - inputs = _json["inputs"].map((value) => new StreamLocation.fromJson(value)).toList();
|
| + inputs = _json["inputs"]
|
| + .map((value) => new StreamLocation.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("keyRanges")) {
|
| - keyRanges = _json["keyRanges"].map((value) => new KeyRangeLocation.fromJson(value)).toList();
|
| + keyRanges = _json["keyRanges"]
|
| + .map((value) => new KeyRangeLocation.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("outputs")) {
|
| - outputs = _json["outputs"].map((value) => new StreamLocation.fromJson(value)).toList();
|
| + outputs = _json["outputs"]
|
| + .map((value) => new StreamLocation.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("stateFamilies")) {
|
| - stateFamilies = _json["stateFamilies"].map((value) => new StateFamilyConfig.fromJson(value)).toList();
|
| + stateFamilies = _json["stateFamilies"]
|
| + .map((value) => new StateFamilyConfig.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("systemStageName")) {
|
| systemStageName = _json["systemStageName"];
|
| @@ -2344,7 +2452,8 @@ class ComputationTopology {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (computationId != null) {
|
| _json["computationId"] = computationId;
|
| }
|
| @@ -2358,7 +2467,8 @@ class ComputationTopology {
|
| _json["outputs"] = outputs.map((value) => (value).toJson()).toList();
|
| }
|
| if (stateFamilies != null) {
|
| - _json["stateFamilies"] = stateFamilies.map((value) => (value).toJson()).toList();
|
| + _json["stateFamilies"] =
|
| + stateFamilies.map((value) => (value).toJson()).toList();
|
| }
|
| if (systemStageName != null) {
|
| _json["systemStageName"] = systemStageName;
|
| @@ -2367,15 +2477,14 @@ class ComputationTopology {
|
| }
|
| }
|
|
|
| -/**
|
| - * A position that encapsulates an inner position and an index for the inner
|
| - * position. A ConcatPosition can be used by a reader of a source that
|
| - * encapsulates a set of other sources.
|
| - */
|
| +/// A position that encapsulates an inner position and an index for the inner
|
| +/// position. A ConcatPosition can be used by a reader of a source that
|
| +/// encapsulates a set of other sources.
|
| class ConcatPosition {
|
| - /** Index of the inner source. */
|
| + /// Index of the inner source.
|
| core.int index;
|
| - /** Position within the inner source. */
|
| +
|
| + /// Position within the inner source.
|
| Position position;
|
|
|
| ConcatPosition();
|
| @@ -2390,7 +2499,8 @@ class ConcatPosition {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (index != null) {
|
| _json["index"] = index;
|
| }
|
| @@ -2401,43 +2511,40 @@ class ConcatPosition {
|
| }
|
| }
|
|
|
| -/**
|
| - * CounterMetadata includes all static non-name non-value counter attributes.
|
| - */
|
| +/// CounterMetadata includes all static non-name non-value counter attributes.
|
| class CounterMetadata {
|
| - /** Human-readable description of the counter semantics. */
|
| + /// Human-readable description of the counter semantics.
|
| core.String description;
|
| - /**
|
| - * Counter aggregation kind.
|
| - * Possible string values are:
|
| - * - "INVALID" : Counter aggregation kind was not set.
|
| - * - "SUM" : Aggregated value is the sum of all contributed values.
|
| - * - "MAX" : Aggregated value is the max of all contributed values.
|
| - * - "MIN" : Aggregated value is the min of all contributed values.
|
| - * - "MEAN" : Aggregated value is the mean of all contributed values.
|
| - * - "OR" : Aggregated value represents the logical 'or' of all contributed
|
| - * values.
|
| - * - "AND" : Aggregated value represents the logical 'and' of all contributed
|
| - * values.
|
| - * - "SET" : Aggregated value is a set of unique contributed values.
|
| - * - "DISTRIBUTION" : Aggregated value captures statistics about a
|
| - * distribution.
|
| - */
|
| +
|
| + /// Counter aggregation kind.
|
| + /// Possible string values are:
|
| + /// - "INVALID" : Counter aggregation kind was not set.
|
| + /// - "SUM" : Aggregated value is the sum of all contributed values.
|
| + /// - "MAX" : Aggregated value is the max of all contributed values.
|
| + /// - "MIN" : Aggregated value is the min of all contributed values.
|
| + /// - "MEAN" : Aggregated value is the mean of all contributed values.
|
| + /// - "OR" : Aggregated value represents the logical 'or' of all contributed
|
| + /// values.
|
| + /// - "AND" : Aggregated value represents the logical 'and' of all
|
| + /// contributed values.
|
| + /// - "SET" : Aggregated value is a set of unique contributed values.
|
| + /// - "DISTRIBUTION" : Aggregated value captures statistics about a
|
| + /// distribution.
|
| core.String kind;
|
| - /** A string referring to the unit type. */
|
| +
|
| + /// A string referring to the unit type.
|
| core.String otherUnits;
|
| - /**
|
| - * System defined Units, see above enum.
|
| - * Possible string values are:
|
| - * - "BYTES" : Counter returns a value in bytes.
|
| - * - "BYTES_PER_SEC" : Counter returns a value in bytes per second.
|
| - * - "MILLISECONDS" : Counter returns a value in milliseconds.
|
| - * - "MICROSECONDS" : Counter returns a value in microseconds.
|
| - * - "NANOSECONDS" : Counter returns a value in nanoseconds.
|
| - * - "TIMESTAMP_MSEC" : Counter returns a timestamp in milliseconds.
|
| - * - "TIMESTAMP_USEC" : Counter returns a timestamp in microseconds.
|
| - * - "TIMESTAMP_NSEC" : Counter returns a timestamp in nanoseconds.
|
| - */
|
| +
|
| + /// System defined Units, see above enum.
|
| + /// Possible string values are:
|
| + /// - "BYTES" : Counter returns a value in bytes.
|
| + /// - "BYTES_PER_SEC" : Counter returns a value in bytes per second.
|
| + /// - "MILLISECONDS" : Counter returns a value in milliseconds.
|
| + /// - "MICROSECONDS" : Counter returns a value in microseconds.
|
| + /// - "NANOSECONDS" : Counter returns a value in nanoseconds.
|
| + /// - "TIMESTAMP_MSEC" : Counter returns a timestamp in milliseconds.
|
| + /// - "TIMESTAMP_USEC" : Counter returns a timestamp in microseconds.
|
| + /// - "TIMESTAMP_NSEC" : Counter returns a timestamp in nanoseconds.
|
| core.String standardUnits;
|
|
|
| CounterMetadata();
|
| @@ -2458,7 +2565,8 @@ class CounterMetadata {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (description != null) {
|
| _json["description"] = description;
|
| }
|
| @@ -2475,46 +2583,50 @@ class CounterMetadata {
|
| }
|
| }
|
|
|
| -/**
|
| - * Identifies a counter within a per-job namespace. Counters whose structured
|
| - * names are the same get merged into a single value for the job.
|
| - */
|
| +/// Identifies a counter within a per-job namespace. Counters whose structured
|
| +/// names are the same get merged into a single value for the job.
|
| class CounterStructuredName {
|
| - /** Name of the optimized step being executed by the workers. */
|
| + /// Name of the optimized step being executed by the workers.
|
| core.String componentStepName;
|
| - /**
|
| - * Name of the stage. An execution step contains multiple component steps.
|
| - */
|
| +
|
| + /// Name of the stage. An execution step contains multiple component steps.
|
| core.String executionStepName;
|
| - /**
|
| - * Counter name. Not necessarily globally-unique, but unique within the
|
| - * context of the other fields.
|
| - * Required.
|
| - */
|
| +
|
| + /// Counter name. Not necessarily globally-unique, but unique within the
|
| + /// context of the other fields.
|
| + /// Required.
|
| core.String name;
|
| - /**
|
| - * One of the standard Origins defined above.
|
| - * Possible string values are:
|
| - * - "SYSTEM" : Counter was created by the Dataflow system.
|
| - * - "USER" : Counter was created by the user.
|
| - */
|
| +
|
| + /// One of the standard Origins defined above.
|
| + /// Possible string values are:
|
| + /// - "SYSTEM" : Counter was created by the Dataflow system.
|
| + /// - "USER" : Counter was created by the user.
|
| core.String origin;
|
| - /** A string containing a more specific namespace of the counter's origin. */
|
| +
|
| + /// A string containing a more specific namespace of the counter's origin.
|
| core.String originNamespace;
|
| - /**
|
| - * System generated name of the original step in the user's graph, before
|
| - * optimization.
|
| - */
|
| +
|
| + /// The GroupByKey step name from the original graph.
|
| + core.String originalShuffleStepName;
|
| +
|
| + /// System generated name of the original step in the user's graph, before
|
| + /// optimization.
|
| core.String originalStepName;
|
| - /**
|
| - * Portion of this counter, either key or value.
|
| - * Possible string values are:
|
| - * - "ALL" : Counter portion has not been set.
|
| - * - "KEY" : Counter reports a key.
|
| - * - "VALUE" : Counter reports a value.
|
| - */
|
| +
|
| + /// Portion of this counter, either key or value.
|
| + /// Possible string values are:
|
| + /// - "ALL" : Counter portion has not been set.
|
| + /// - "KEY" : Counter reports a key.
|
| + /// - "VALUE" : Counter reports a value.
|
| core.String portion;
|
| - /** ID of a particular worker. */
|
| +
|
| + /// ID of a side input being read from/written to. Side inputs are identified
|
| + /// by a pair of (reader, input_index). The reader is usually equal to the
|
| + /// original name, but it may be different, if a ParDo emits it's Iterator /
|
| + /// Map side input object.
|
| + SideInputId sideInput;
|
| +
|
| + /// ID of a particular worker.
|
| core.String workerId;
|
|
|
| CounterStructuredName();
|
| @@ -2535,19 +2647,26 @@ class CounterStructuredName {
|
| if (_json.containsKey("originNamespace")) {
|
| originNamespace = _json["originNamespace"];
|
| }
|
| + if (_json.containsKey("originalShuffleStepName")) {
|
| + originalShuffleStepName = _json["originalShuffleStepName"];
|
| + }
|
| if (_json.containsKey("originalStepName")) {
|
| originalStepName = _json["originalStepName"];
|
| }
|
| if (_json.containsKey("portion")) {
|
| portion = _json["portion"];
|
| }
|
| + if (_json.containsKey("sideInput")) {
|
| + sideInput = new SideInputId.fromJson(_json["sideInput"]);
|
| + }
|
| if (_json.containsKey("workerId")) {
|
| workerId = _json["workerId"];
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (componentStepName != null) {
|
| _json["componentStepName"] = componentStepName;
|
| }
|
| @@ -2563,12 +2682,18 @@ class CounterStructuredName {
|
| if (originNamespace != null) {
|
| _json["originNamespace"] = originNamespace;
|
| }
|
| + if (originalShuffleStepName != null) {
|
| + _json["originalShuffleStepName"] = originalShuffleStepName;
|
| + }
|
| if (originalStepName != null) {
|
| _json["originalStepName"] = originalStepName;
|
| }
|
| if (portion != null) {
|
| _json["portion"] = portion;
|
| }
|
| + if (sideInput != null) {
|
| + _json["sideInput"] = (sideInput).toJson();
|
| + }
|
| if (workerId != null) {
|
| _json["workerId"] = workerId;
|
| }
|
| @@ -2576,14 +2701,14 @@ class CounterStructuredName {
|
| }
|
| }
|
|
|
| -/**
|
| - * A single message which encapsulates structured name and metadata for a given
|
| - * counter.
|
| - */
|
| +/// A single message which encapsulates structured name and metadata for a
|
| +/// given
|
| +/// counter.
|
| class CounterStructuredNameAndMetadata {
|
| - /** Metadata associated with a counter */
|
| + /// Metadata associated with a counter
|
| CounterMetadata metadata;
|
| - /** Structured name of the counter. */
|
| +
|
| + /// Structured name of the counter.
|
| CounterStructuredName name;
|
|
|
| CounterStructuredNameAndMetadata();
|
| @@ -2598,7 +2723,8 @@ class CounterStructuredNameAndMetadata {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (metadata != null) {
|
| _json["metadata"] = (metadata).toJson();
|
| }
|
| @@ -2609,49 +2735,56 @@ class CounterStructuredNameAndMetadata {
|
| }
|
| }
|
|
|
| -/** An update to a Counter sent from a worker. */
|
| +/// An update to a Counter sent from a worker.
|
| class CounterUpdate {
|
| - /** Boolean value for And, Or. */
|
| + /// Boolean value for And, Or.
|
| core.bool boolean;
|
| - /**
|
| - * True if this counter is reported as the total cumulative aggregate
|
| - * value accumulated since the worker started working on this WorkItem.
|
| - * By default this is false, indicating that this counter is reported
|
| - * as a delta.
|
| - */
|
| +
|
| + /// True if this counter is reported as the total cumulative aggregate
|
| + /// value accumulated since the worker started working on this WorkItem.
|
| + /// By default this is false, indicating that this counter is reported
|
| + /// as a delta.
|
| core.bool cumulative;
|
| - /** Distribution data */
|
| +
|
| + /// Distribution data
|
| DistributionUpdate distribution;
|
| - /** Floating point value for Sum, Max, Min. */
|
| +
|
| + /// Floating point value for Sum, Max, Min.
|
| core.double floatingPoint;
|
| - /** List of floating point numbers, for Set. */
|
| +
|
| + /// List of floating point numbers, for Set.
|
| FloatingPointList floatingPointList;
|
| - /** Floating point mean aggregation value for Mean. */
|
| +
|
| + /// Floating point mean aggregation value for Mean.
|
| FloatingPointMean floatingPointMean;
|
| - /** Integer value for Sum, Max, Min. */
|
| +
|
| + /// Integer value for Sum, Max, Min.
|
| SplitInt64 integer;
|
| - /** List of integers, for Set. */
|
| +
|
| + /// List of integers, for Set.
|
| IntegerList integerList;
|
| - /** Integer mean aggregation value for Mean. */
|
| +
|
| + /// Integer mean aggregation value for Mean.
|
| IntegerMean integerMean;
|
| - /**
|
| - * Value for internally-defined counters used by the Dataflow service.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// Value for internally-defined counters used by the Dataflow service.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Object internal;
|
| - /** Counter name and aggregation type. */
|
| +
|
| + /// Counter name and aggregation type.
|
| NameAndKind nameAndKind;
|
| - /**
|
| - * The service-generated short identifier for this counter.
|
| - * The short_id -> (name, metadata) mapping is constant for the lifetime of
|
| - * a job.
|
| - */
|
| +
|
| + /// The service-generated short identifier for this counter.
|
| + /// The short_id -> (name, metadata) mapping is constant for the lifetime of
|
| + /// a job.
|
| core.String shortId;
|
| - /** List of strings, for Set. */
|
| +
|
| + /// List of strings, for Set.
|
| StringList stringList;
|
| - /** Counter structured name and metadata. */
|
| +
|
| + /// Counter structured name and metadata.
|
| CounterStructuredNameAndMetadata structuredNameAndMetadata;
|
|
|
| CounterUpdate();
|
| @@ -2670,10 +2803,12 @@ class CounterUpdate {
|
| floatingPoint = _json["floatingPoint"];
|
| }
|
| if (_json.containsKey("floatingPointList")) {
|
| - floatingPointList = new FloatingPointList.fromJson(_json["floatingPointList"]);
|
| + floatingPointList =
|
| + new FloatingPointList.fromJson(_json["floatingPointList"]);
|
| }
|
| if (_json.containsKey("floatingPointMean")) {
|
| - floatingPointMean = new FloatingPointMean.fromJson(_json["floatingPointMean"]);
|
| + floatingPointMean =
|
| + new FloatingPointMean.fromJson(_json["floatingPointMean"]);
|
| }
|
| if (_json.containsKey("integer")) {
|
| integer = new SplitInt64.fromJson(_json["integer"]);
|
| @@ -2697,12 +2832,14 @@ class CounterUpdate {
|
| stringList = new StringList.fromJson(_json["stringList"]);
|
| }
|
| if (_json.containsKey("structuredNameAndMetadata")) {
|
| - structuredNameAndMetadata = new CounterStructuredNameAndMetadata.fromJson(_json["structuredNameAndMetadata"]);
|
| + structuredNameAndMetadata = new CounterStructuredNameAndMetadata.fromJson(
|
| + _json["structuredNameAndMetadata"]);
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (boolean != null) {
|
| _json["boolean"] = boolean;
|
| }
|
| @@ -2749,21 +2886,23 @@ class CounterUpdate {
|
| }
|
| }
|
|
|
| -/** A request to create a Cloud Dataflow job from a template. */
|
| +/// A request to create a Cloud Dataflow job from a template.
|
| class CreateJobFromTemplateRequest {
|
| - /** The runtime environment for the job. */
|
| + /// The runtime environment for the job.
|
| RuntimeEnvironment environment;
|
| - /**
|
| - * Required. A Cloud Storage path to the template from which to
|
| - * create the job.
|
| - * Must be a valid Cloud Storage URL, beginning with `gs://`.
|
| - */
|
| +
|
| + /// Required. A Cloud Storage path to the template from which to
|
| + /// create the job.
|
| + /// Must be a valid Cloud Storage URL, beginning with `gs://`.
|
| core.String gcsPath;
|
| - /** Required. The job name to use for the created job. */
|
| +
|
| + /// Required. The job name to use for the created job.
|
| core.String jobName;
|
| - /** The location to which to direct the request. */
|
| +
|
| + /// The location to which to direct the request.
|
| core.String location;
|
| - /** The runtime parameters to pass to the job. */
|
| +
|
| + /// The runtime parameters to pass to the job.
|
| core.Map<core.String, core.String> parameters;
|
|
|
| CreateJobFromTemplateRequest();
|
| @@ -2787,7 +2926,8 @@ class CreateJobFromTemplateRequest {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (environment != null) {
|
| _json["environment"] = (environment).toJson();
|
| }
|
| @@ -2807,9 +2947,9 @@ class CreateJobFromTemplateRequest {
|
| }
|
| }
|
|
|
| -/** Identifies the location of a custom souce. */
|
| +/// Identifies the location of a custom souce.
|
| class CustomSourceLocation {
|
| - /** Whether this source is stateful. */
|
| + /// Whether this source is stateful.
|
| core.bool stateful;
|
|
|
| CustomSourceLocation();
|
| @@ -2821,7 +2961,8 @@ class CustomSourceLocation {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (stateful != null) {
|
| _json["stateful"] = stateful;
|
| }
|
| @@ -2829,19 +2970,16 @@ class CustomSourceLocation {
|
| }
|
| }
|
|
|
| -/** Data disk assignment for a given VM instance. */
|
| +/// Data disk assignment for a given VM instance.
|
| class DataDiskAssignment {
|
| - /**
|
| - * Mounted data disks. The order is important a data disk's 0-based index in
|
| - * this list defines which persistent directory the disk is mounted to, for
|
| - * example the list of { "myproject-1014-104817-4c2-harness-0-disk-0" },
|
| - * { "myproject-1014-104817-4c2-harness-0-disk-1" }.
|
| - */
|
| + /// Mounted data disks. The order is important a data disk's 0-based index in
|
| + /// this list defines which persistent directory the disk is mounted to, for
|
| + /// example the list of { "myproject-1014-104817-4c2-harness-0-disk-0" },
|
| + /// { "myproject-1014-104817-4c2-harness-0-disk-1" }.
|
| core.List<core.String> dataDisks;
|
| - /**
|
| - * VM instance name the data disks mounted to, for example
|
| - * "myproject-1014-104817-4c2-harness-0".
|
| - */
|
| +
|
| + /// VM instance name the data disks mounted to, for example
|
| + /// "myproject-1014-104817-4c2-harness-0".
|
| core.String vmInstance;
|
|
|
| DataDiskAssignment();
|
| @@ -2856,7 +2994,8 @@ class DataDiskAssignment {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (dataDisks != null) {
|
| _json["dataDisks"] = dataDisks;
|
| }
|
| @@ -2867,27 +3006,24 @@ class DataDiskAssignment {
|
| }
|
| }
|
|
|
| -/**
|
| - * Specification of one of the bundles produced as a result of splitting
|
| - * a Source (e.g. when executing a SourceSplitRequest, or when
|
| - * splitting an active task using WorkItemStatus.dynamic_source_split),
|
| - * relative to the source being split.
|
| - */
|
| +/// Specification of one of the bundles produced as a result of splitting
|
| +/// a Source (e.g. when executing a SourceSplitRequest, or when
|
| +/// splitting an active task using WorkItemStatus.dynamic_source_split),
|
| +/// relative to the source being split.
|
| class DerivedSource {
|
| - /**
|
| - * What source to base the produced source on (if any).
|
| - * Possible string values are:
|
| - * - "SOURCE_DERIVATION_MODE_UNKNOWN" : The source derivation is unknown, or
|
| - * unspecified.
|
| - * - "SOURCE_DERIVATION_MODE_INDEPENDENT" : Produce a completely independent
|
| - * Source with no base.
|
| - * - "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" : Produce a Source based on the
|
| - * Source being split.
|
| - * - "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" : Produce a Source based on
|
| - * the base of the Source being split.
|
| - */
|
| + /// What source to base the produced source on (if any).
|
| + /// Possible string values are:
|
| + /// - "SOURCE_DERIVATION_MODE_UNKNOWN" : The source derivation is unknown, or
|
| + /// unspecified.
|
| + /// - "SOURCE_DERIVATION_MODE_INDEPENDENT" : Produce a completely independent
|
| + /// Source with no base.
|
| + /// - "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" : Produce a Source based on
|
| + /// the Source being split.
|
| + /// - "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" : Produce a Source based on
|
| + /// the base of the Source being split.
|
| core.String derivationMode;
|
| - /** Specification of the source. */
|
| +
|
| + /// Specification of the source.
|
| Source source;
|
|
|
| DerivedSource();
|
| @@ -2902,7 +3038,8 @@ class DerivedSource {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (derivationMode != null) {
|
| _json["derivationMode"] = derivationMode;
|
| }
|
| @@ -2913,35 +3050,33 @@ class DerivedSource {
|
| }
|
| }
|
|
|
| -/** Describes the data disk used by a workflow job. */
|
| +/// Describes the data disk used by a workflow job.
|
| class Disk {
|
| - /**
|
| - * Disk storage type, as defined by Google Compute Engine. This
|
| - * must be a disk type appropriate to the project and zone in which
|
| - * the workers will run. If unknown or unspecified, the service
|
| - * will attempt to choose a reasonable default.
|
| - *
|
| - * For example, the standard persistent disk type is a resource name
|
| - * typically ending in "pd-standard". If SSD persistent disks are
|
| - * available, the resource name typically ends with "pd-ssd". The
|
| - * actual valid values are defined the Google Compute Engine API,
|
| - * not by the Cloud Dataflow API; consult the Google Compute Engine
|
| - * documentation for more information about determining the set of
|
| - * available disk types for a particular project and zone.
|
| - *
|
| - * Google Compute Engine Disk types are local to a particular
|
| - * project in a particular zone, and so the resource name will
|
| - * typically look something like this:
|
| - *
|
| - * compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
|
| - */
|
| + /// Disk storage type, as defined by Google Compute Engine. This
|
| + /// must be a disk type appropriate to the project and zone in which
|
| + /// the workers will run. If unknown or unspecified, the service
|
| + /// will attempt to choose a reasonable default.
|
| + ///
|
| + /// For example, the standard persistent disk type is a resource name
|
| + /// typically ending in "pd-standard". If SSD persistent disks are
|
| + /// available, the resource name typically ends with "pd-ssd". The
|
| + /// actual valid values are defined the Google Compute Engine API,
|
| + /// not by the Cloud Dataflow API; consult the Google Compute Engine
|
| + /// documentation for more information about determining the set of
|
| + /// available disk types for a particular project and zone.
|
| + ///
|
| + /// Google Compute Engine Disk types are local to a particular
|
| + /// project in a particular zone, and so the resource name will
|
| + /// typically look something like this:
|
| + ///
|
| + /// compute.googleapis.com/projects/project-id/zones/zone/diskTypes/pd-standard
|
| core.String diskType;
|
| - /** Directory in a VM where disk is mounted. */
|
| +
|
| + /// Directory in a VM where disk is mounted.
|
| core.String mountPoint;
|
| - /**
|
| - * Size of disk in GB. If zero or unspecified, the service will
|
| - * attempt to choose a reasonable default.
|
| - */
|
| +
|
| + /// Size of disk in GB. If zero or unspecified, the service will
|
| + /// attempt to choose a reasonable default.
|
| core.int sizeGb;
|
|
|
| Disk();
|
| @@ -2959,7 +3094,8 @@ class Disk {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (diskType != null) {
|
| _json["diskType"] = diskType;
|
| }
|
| @@ -2973,47 +3109,52 @@ class Disk {
|
| }
|
| }
|
|
|
| -/** Data provided with a pipeline or transform to provide descriptive info. */
|
| +/// Data provided with a pipeline or transform to provide descriptive info.
|
| class DisplayData {
|
| - /** Contains value if the data is of a boolean type. */
|
| + /// Contains value if the data is of a boolean type.
|
| core.bool boolValue;
|
| - /** Contains value if the data is of duration type. */
|
| +
|
| + /// Contains value if the data is of duration type.
|
| core.String durationValue;
|
| - /** Contains value if the data is of float type. */
|
| +
|
| + /// Contains value if the data is of float type.
|
| core.double floatValue;
|
| - /** Contains value if the data is of int64 type. */
|
| +
|
| + /// Contains value if the data is of int64 type.
|
| core.String int64Value;
|
| - /** Contains value if the data is of java class type. */
|
| +
|
| + /// Contains value if the data is of java class type.
|
| core.String javaClassValue;
|
| - /**
|
| - * The key identifying the display data.
|
| - * This is intended to be used as a label for the display data
|
| - * when viewed in a dax monitoring system.
|
| - */
|
| +
|
| + /// The key identifying the display data.
|
| + /// This is intended to be used as a label for the display data
|
| + /// when viewed in a dax monitoring system.
|
| core.String key;
|
| - /** An optional label to display in a dax UI for the element. */
|
| +
|
| + /// An optional label to display in a dax UI for the element.
|
| core.String label;
|
| - /**
|
| - * The namespace for the key. This is usually a class name or programming
|
| - * language namespace (i.e. python module) which defines the display data.
|
| - * This allows a dax monitoring system to specially handle the data
|
| - * and perform custom rendering.
|
| - */
|
| +
|
| + /// The namespace for the key. This is usually a class name or programming
|
| + /// language namespace (i.e. python module) which defines the display data.
|
| + /// This allows a dax monitoring system to specially handle the data
|
| + /// and perform custom rendering.
|
| core.String namespace;
|
| - /**
|
| - * A possible additional shorter value to display.
|
| - * For example a java_class_name_value of com.mypackage.MyDoFn
|
| - * will be stored with MyDoFn as the short_str_value and
|
| - * com.mypackage.MyDoFn as the java_class_name value.
|
| - * short_str_value can be displayed and java_class_name_value
|
| - * will be displayed as a tooltip.
|
| - */
|
| +
|
| + /// A possible additional shorter value to display.
|
| + /// For example a java_class_name_value of com.mypackage.MyDoFn
|
| + /// will be stored with MyDoFn as the short_str_value and
|
| + /// com.mypackage.MyDoFn as the java_class_name value.
|
| + /// short_str_value can be displayed and java_class_name_value
|
| + /// will be displayed as a tooltip.
|
| core.String shortStrValue;
|
| - /** Contains value if the data is of string type. */
|
| +
|
| + /// Contains value if the data is of string type.
|
| core.String strValue;
|
| - /** Contains value if the data is of timestamp type. */
|
| +
|
| + /// Contains value if the data is of timestamp type.
|
| core.String timestampValue;
|
| - /** An optional full URL. */
|
| +
|
| + /// An optional full URL.
|
| core.String url;
|
|
|
| DisplayData();
|
| @@ -3058,7 +3199,8 @@ class DisplayData {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (boolValue != null) {
|
| _json["boolValue"] = boolValue;
|
| }
|
| @@ -3099,25 +3241,27 @@ class DisplayData {
|
| }
|
| }
|
|
|
| -/** A metric value representing a distribution. */
|
| +/// A metric value representing a distribution.
|
| class DistributionUpdate {
|
| - /** The count of the number of elements present in the distribution. */
|
| + /// The count of the number of elements present in the distribution.
|
| SplitInt64 count;
|
| - /**
|
| - * (Optional) Logarithmic histogram of values.
|
| - * Each log may be in no more than one bucket. Order does not matter.
|
| - */
|
| +
|
| + /// (Optional) Logarithmic histogram of values.
|
| + /// Each log may be in no more than one bucket. Order does not matter.
|
| core.List<LogBucket> logBuckets;
|
| - /** The maximum value present in the distribution. */
|
| +
|
| + /// The maximum value present in the distribution.
|
| SplitInt64 max;
|
| - /** The minimum value present in the distribution. */
|
| +
|
| + /// The minimum value present in the distribution.
|
| SplitInt64 min;
|
| - /**
|
| - * Use an int64 since we'd prefer the added precision. If overflow is a common
|
| - * problem we can detect it and use an additional int64 or a double.
|
| - */
|
| +
|
| + /// Use an int64 since we'd prefer the added precision. If overflow is a
|
| + /// common
|
| + /// problem we can detect it and use an additional int64 or a double.
|
| SplitInt64 sum;
|
| - /** Use a double since the sum of squares is likely to overflow int64. */
|
| +
|
| + /// Use a double since the sum of squares is likely to overflow int64.
|
| core.double sumOfSquares;
|
|
|
| DistributionUpdate();
|
| @@ -3127,7 +3271,9 @@ class DistributionUpdate {
|
| count = new SplitInt64.fromJson(_json["count"]);
|
| }
|
| if (_json.containsKey("logBuckets")) {
|
| - logBuckets = _json["logBuckets"].map((value) => new LogBucket.fromJson(value)).toList();
|
| + logBuckets = _json["logBuckets"]
|
| + .map((value) => new LogBucket.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("max")) {
|
| max = new SplitInt64.fromJson(_json["max"]);
|
| @@ -3144,12 +3290,14 @@ class DistributionUpdate {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (count != null) {
|
| _json["count"] = (count).toJson();
|
| }
|
| if (logBuckets != null) {
|
| - _json["logBuckets"] = logBuckets.map((value) => (value).toJson()).toList();
|
| + _json["logBuckets"] =
|
| + logBuckets.map((value) => (value).toJson()).toList();
|
| }
|
| if (max != null) {
|
| _json["max"] = (max).toJson();
|
| @@ -3167,22 +3315,17 @@ class DistributionUpdate {
|
| }
|
| }
|
|
|
| -/**
|
| - * When a task splits using WorkItemStatus.dynamic_source_split, this
|
| - * message describes the two parts of the split relative to the
|
| - * description of the current task's input.
|
| - */
|
| +/// When a task splits using WorkItemStatus.dynamic_source_split, this
|
| +/// message describes the two parts of the split relative to the
|
| +/// description of the current task's input.
|
| class DynamicSourceSplit {
|
| - /**
|
| - * Primary part (continued to be processed by worker).
|
| - * Specified relative to the previously-current source.
|
| - * Becomes current.
|
| - */
|
| + /// Primary part (continued to be processed by worker).
|
| + /// Specified relative to the previously-current source.
|
| + /// Becomes current.
|
| DerivedSource primary;
|
| - /**
|
| - * Residual part (returned to the pool of work).
|
| - * Specified relative to the previously-current source.
|
| - */
|
| +
|
| + /// Residual part (returned to the pool of work).
|
| + /// Specified relative to the previously-current source.
|
| DerivedSource residual;
|
|
|
| DynamicSourceSplit();
|
| @@ -3197,7 +3340,8 @@ class DynamicSourceSplit {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (primary != null) {
|
| _json["primary"] = (primary).toJson();
|
| }
|
| @@ -3208,81 +3352,74 @@ class DynamicSourceSplit {
|
| }
|
| }
|
|
|
| -/** Describes the environment in which a Dataflow Job runs. */
|
| +/// Describes the environment in which a Dataflow Job runs.
|
| class Environment {
|
| - /**
|
| - * The type of cluster manager API to use. If unknown or
|
| - * unspecified, the service will attempt to choose a reasonable
|
| - * default. This should be in the form of the API service name,
|
| - * e.g. "compute.googleapis.com".
|
| - */
|
| + /// The type of cluster manager API to use. If unknown or
|
| + /// unspecified, the service will attempt to choose a reasonable
|
| + /// default. This should be in the form of the API service name,
|
| + /// e.g. "compute.googleapis.com".
|
| core.String clusterManagerApiService;
|
| - /**
|
| - * The dataset for the current project where various workflow
|
| - * related tables are stored.
|
| - *
|
| - * The supported resource type is:
|
| - *
|
| - * Google BigQuery:
|
| - * bigquery.googleapis.com/{dataset}
|
| - */
|
| +
|
| + /// The dataset for the current project where various workflow
|
| + /// related tables are stored.
|
| + ///
|
| + /// The supported resource type is:
|
| + ///
|
| + /// Google BigQuery:
|
| + /// bigquery.googleapis.com/{dataset}
|
| core.String dataset;
|
| - /** The list of experiments to enable. */
|
| +
|
| + /// The list of experiments to enable.
|
| core.List<core.String> experiments;
|
| - /**
|
| - * Experimental settings.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// Experimental settings.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> internalExperiments;
|
| - /**
|
| - * The Cloud Dataflow SDK pipeline options specified by the user. These
|
| - * options are passed through the service and are used to recreate the
|
| - * SDK pipeline options on the worker in a language agnostic and platform
|
| - * independent way.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// The Cloud Dataflow SDK pipeline options specified by the user. These
|
| + /// options are passed through the service and are used to recreate the
|
| + /// SDK pipeline options on the worker in a language agnostic and platform
|
| + /// independent way.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> sdkPipelineOptions;
|
| - /** Identity to run virtual machines as. Defaults to the default account. */
|
| +
|
| + /// Identity to run virtual machines as. Defaults to the default account.
|
| core.String serviceAccountEmail;
|
| - /**
|
| - * The prefix of the resources the system should use for temporary
|
| - * storage. The system will append the suffix "/temp-{JOBNAME} to
|
| - * this resource prefix, where {JOBNAME} is the value of the
|
| - * job_name field. The resulting bucket and object prefix is used
|
| - * as the prefix of the resources used to store temporary data
|
| - * needed during the job execution. NOTE: This will override the
|
| - * value in taskrunner_settings.
|
| - * The supported resource type is:
|
| - *
|
| - * Google Cloud Storage:
|
| - *
|
| - * storage.googleapis.com/{bucket}/{object}
|
| - * bucket.storage.googleapis.com/{object}
|
| - */
|
| +
|
| + /// The prefix of the resources the system should use for temporary
|
| + /// storage. The system will append the suffix "/temp-{JOBNAME} to
|
| + /// this resource prefix, where {JOBNAME} is the value of the
|
| + /// job_name field. The resulting bucket and object prefix is used
|
| + /// as the prefix of the resources used to store temporary data
|
| + /// needed during the job execution. NOTE: This will override the
|
| + /// value in taskrunner_settings.
|
| + /// The supported resource type is:
|
| + ///
|
| + /// Google Cloud Storage:
|
| + ///
|
| + /// storage.googleapis.com/{bucket}/{object}
|
| + /// bucket.storage.googleapis.com/{object}
|
| core.String tempStoragePrefix;
|
| - /**
|
| - * A description of the process that generated the request.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// A description of the process that generated the request.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> userAgent;
|
| - /**
|
| - * A structure describing which components and their versions of the service
|
| - * are required in order to run the job.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// A structure describing which components and their versions of the service
|
| + /// are required in order to run the job.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> version;
|
| - /**
|
| - * The worker pools. At least one "harness" worker pool must be
|
| - * specified in order for the job to have workers.
|
| - */
|
| +
|
| + /// The worker pools. At least one "harness" worker pool must be
|
| + /// specified in order for the job to have workers.
|
| core.List<WorkerPool> workerPools;
|
|
|
| Environment();
|
| @@ -3316,12 +3453,15 @@ class Environment {
|
| version = _json["version"];
|
| }
|
| if (_json.containsKey("workerPools")) {
|
| - workerPools = _json["workerPools"].map((value) => new WorkerPool.fromJson(value)).toList();
|
| + workerPools = _json["workerPools"]
|
| + .map((value) => new WorkerPool.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (clusterManagerApiService != null) {
|
| _json["clusterManagerApiService"] = clusterManagerApiService;
|
| }
|
| @@ -3350,69 +3490,77 @@ class Environment {
|
| _json["version"] = version;
|
| }
|
| if (workerPools != null) {
|
| - _json["workerPools"] = workerPools.map((value) => (value).toJson()).toList();
|
| + _json["workerPools"] =
|
| + workerPools.map((value) => (value).toJson()).toList();
|
| }
|
| return _json;
|
| }
|
| }
|
|
|
| -/** A message describing the state of a particular execution stage. */
|
| +/// A message describing the state of a particular execution stage.
|
| class ExecutionStageState {
|
| - /** The time at which the stage transitioned to this state. */
|
| + /// The time at which the stage transitioned to this state.
|
| core.String currentStateTime;
|
| - /** The name of the execution stage. */
|
| +
|
| + /// The name of the execution stage.
|
| core.String executionStageName;
|
| - /**
|
| - * Executions stage states allow the same set of values as JobState.
|
| - * Possible string values are:
|
| - * - "JOB_STATE_UNKNOWN" : The job's run state isn't specified.
|
| - * - "JOB_STATE_STOPPED" : `JOB_STATE_STOPPED` indicates that the job has not
|
| - * yet started to run.
|
| - * - "JOB_STATE_RUNNING" : `JOB_STATE_RUNNING` indicates that the job is
|
| - * currently running.
|
| - * - "JOB_STATE_DONE" : `JOB_STATE_DONE` indicates that the job has
|
| - * successfully completed.
|
| - * This is a terminal job state. This state may be set by the Cloud Dataflow
|
| - * service, as a transition from `JOB_STATE_RUNNING`. It may also be set via a
|
| - * Cloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal
|
| - * state.
|
| - * - "JOB_STATE_FAILED" : `JOB_STATE_FAILED` indicates that the job has
|
| - * failed. This is a
|
| - * terminal job state. This state may only be set by the Cloud Dataflow
|
| - * service, and only as a transition from `JOB_STATE_RUNNING`.
|
| - * - "JOB_STATE_CANCELLED" : `JOB_STATE_CANCELLED` indicates that the job has
|
| - * been explicitly
|
| - * cancelled. This is a terminal job state. This state may only be
|
| - * set via a Cloud Dataflow `UpdateJob` call, and only if the job has not
|
| - * yet reached another terminal state.
|
| - * - "JOB_STATE_UPDATED" : `JOB_STATE_UPDATED` indicates that the job was
|
| - * successfully updated,
|
| - * meaning that this job was stopped and another job was started, inheriting
|
| - * state from this one. This is a terminal job state. This state may only be
|
| - * set by the Cloud Dataflow service, and only as a transition from
|
| - * `JOB_STATE_RUNNING`.
|
| - * - "JOB_STATE_DRAINING" : `JOB_STATE_DRAINING` indicates that the job is in
|
| - * the process of draining.
|
| - * A draining job has stopped pulling from its input sources and is processing
|
| - * any data that remains in-flight. This state may be set via a Cloud Dataflow
|
| - * `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs
|
| - * that are draining may only transition to `JOB_STATE_DRAINED`,
|
| - * `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`.
|
| - * - "JOB_STATE_DRAINED" : `JOB_STATE_DRAINED` indicates that the job has been
|
| - * drained.
|
| - * A drained job terminated by stopping pulling from its input sources and
|
| - * processing any data that remained in-flight when draining was requested.
|
| - * This state is a terminal state, may only be set by the Cloud Dataflow
|
| - * service, and only as a transition from `JOB_STATE_DRAINING`.
|
| - * - "JOB_STATE_PENDING" : 'JOB_STATE_PENDING' indicates that the job has been
|
| - * created but is not yet
|
| - * running. Jobs that are pending may only transition to `JOB_STATE_RUNNING`,
|
| - * or `JOB_STATE_FAILED`.
|
| - * - "JOB_STATE_CANCELLING" : 'JOB_STATE_CANCELLING' indicates that the job
|
| - * has been explicitly cancelled
|
| - * and is in the process of stopping. Jobs that are cancelling may only
|
| - * transition to 'JOB_STATE_CANCELLED' or 'JOB_STATE_FAILED'.
|
| - */
|
| +
|
| + /// Executions stage states allow the same set of values as JobState.
|
| + /// Possible string values are:
|
| + /// - "JOB_STATE_UNKNOWN" : The job's run state isn't specified.
|
| + /// - "JOB_STATE_STOPPED" : `JOB_STATE_STOPPED` indicates that the job has
|
| + /// not
|
| + /// yet started to run.
|
| + /// - "JOB_STATE_RUNNING" : `JOB_STATE_RUNNING` indicates that the job is
|
| + /// currently running.
|
| + /// - "JOB_STATE_DONE" : `JOB_STATE_DONE` indicates that the job has
|
| + /// successfully completed.
|
| + /// This is a terminal job state. This state may be set by the Cloud
|
| + /// Dataflow
|
| + /// service, as a transition from `JOB_STATE_RUNNING`. It may also be set via
|
| + /// a
|
| + /// Cloud Dataflow `UpdateJob` call, if the job has not yet reached a
|
| + /// terminal
|
| + /// state.
|
| + /// - "JOB_STATE_FAILED" : `JOB_STATE_FAILED` indicates that the job has
|
| + /// failed. This is a
|
| + /// terminal job state. This state may only be set by the Cloud Dataflow
|
| + /// service, and only as a transition from `JOB_STATE_RUNNING`.
|
| + /// - "JOB_STATE_CANCELLED" : `JOB_STATE_CANCELLED` indicates that the job
|
| + /// has been explicitly
|
| + /// cancelled. This is a terminal job state. This state may only be
|
| + /// set via a Cloud Dataflow `UpdateJob` call, and only if the job has not
|
| + /// yet reached another terminal state.
|
| + /// - "JOB_STATE_UPDATED" : `JOB_STATE_UPDATED` indicates that the job was
|
| + /// successfully updated,
|
| + /// meaning that this job was stopped and another job was started, inheriting
|
| + /// state from this one. This is a terminal job state. This state may only be
|
| + /// set by the Cloud Dataflow service, and only as a transition from
|
| + /// `JOB_STATE_RUNNING`.
|
| + /// - "JOB_STATE_DRAINING" : `JOB_STATE_DRAINING` indicates that the job is
|
| + /// in the process of draining.
|
| + /// A draining job has stopped pulling from its input sources and is
|
| + /// processing
|
| + /// any data that remains in-flight. This state may be set via a Cloud
|
| + /// Dataflow
|
| + /// `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs
|
| + /// that are draining may only transition to `JOB_STATE_DRAINED`,
|
| + /// `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`.
|
| + /// - "JOB_STATE_DRAINED" : `JOB_STATE_DRAINED` indicates that the job has
|
| + /// been drained.
|
| + /// A drained job terminated by stopping pulling from its input sources and
|
| + /// processing any data that remained in-flight when draining was requested.
|
| + /// This state is a terminal state, may only be set by the Cloud Dataflow
|
| + /// service, and only as a transition from `JOB_STATE_DRAINING`.
|
| + /// - "JOB_STATE_PENDING" : 'JOB_STATE_PENDING' indicates that the job has
|
| + /// been created but is not yet
|
| + /// running. Jobs that are pending may only transition to
|
| + /// `JOB_STATE_RUNNING`,
|
| + /// or `JOB_STATE_FAILED`.
|
| + /// - "JOB_STATE_CANCELLING" : 'JOB_STATE_CANCELLING' indicates that the job
|
| + /// has been explicitly cancelled
|
| + /// and is in the process of stopping. Jobs that are cancelling may only
|
| + /// transition to 'JOB_STATE_CANCELLED' or 'JOB_STATE_FAILED'.
|
| core.String executionStageState;
|
|
|
| ExecutionStageState();
|
| @@ -3430,7 +3578,8 @@ class ExecutionStageState {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (currentStateTime != null) {
|
| _json["currentStateTime"] = currentStateTime;
|
| }
|
| @@ -3444,57 +3593,63 @@ class ExecutionStageState {
|
| }
|
| }
|
|
|
| -/**
|
| - * Description of the composing transforms, names/ids, and input/outputs of a
|
| - * stage of execution. Some composing transforms and sources may have been
|
| - * generated by the Dataflow service during execution planning.
|
| - */
|
| +/// Description of the composing transforms, names/ids, and input/outputs of a
|
| +/// stage of execution. Some composing transforms and sources may have been
|
| +/// generated by the Dataflow service during execution planning.
|
| class ExecutionStageSummary {
|
| - /**
|
| - * Collections produced and consumed by component transforms of this stage.
|
| - */
|
| + /// Collections produced and consumed by component transforms of this stage.
|
| core.List<ComponentSource> componentSource;
|
| - /** Transforms that comprise this execution stage. */
|
| +
|
| + /// Transforms that comprise this execution stage.
|
| core.List<ComponentTransform> componentTransform;
|
| - /** Dataflow service generated id for this stage. */
|
| +
|
| + /// Dataflow service generated id for this stage.
|
| core.String id;
|
| - /** Input sources for this stage. */
|
| +
|
| + /// Input sources for this stage.
|
| core.List<StageSource> inputSource;
|
| - /**
|
| - * Type of tranform this stage is executing.
|
| - * Possible string values are:
|
| - * - "UNKNOWN_KIND" : Unrecognized transform type.
|
| - * - "PAR_DO_KIND" : ParDo transform.
|
| - * - "GROUP_BY_KEY_KIND" : Group By Key transform.
|
| - * - "FLATTEN_KIND" : Flatten transform.
|
| - * - "READ_KIND" : Read transform.
|
| - * - "WRITE_KIND" : Write transform.
|
| - * - "CONSTANT_KIND" : Constructs from a constant value, such as with
|
| - * Create.of.
|
| - * - "SINGLETON_KIND" : Creates a Singleton view of a collection.
|
| - * - "SHUFFLE_KIND" : Opening or closing a shuffle session, often as part of a
|
| - * GroupByKey.
|
| - */
|
| +
|
| + /// Type of tranform this stage is executing.
|
| + /// Possible string values are:
|
| + /// - "UNKNOWN_KIND" : Unrecognized transform type.
|
| + /// - "PAR_DO_KIND" : ParDo transform.
|
| + /// - "GROUP_BY_KEY_KIND" : Group By Key transform.
|
| + /// - "FLATTEN_KIND" : Flatten transform.
|
| + /// - "READ_KIND" : Read transform.
|
| + /// - "WRITE_KIND" : Write transform.
|
| + /// - "CONSTANT_KIND" : Constructs from a constant value, such as with
|
| + /// Create.of.
|
| + /// - "SINGLETON_KIND" : Creates a Singleton view of a collection.
|
| + /// - "SHUFFLE_KIND" : Opening or closing a shuffle session, often as part of
|
| + /// a GroupByKey.
|
| core.String kind;
|
| - /** Dataflow service generated name for this stage. */
|
| +
|
| + /// Dataflow service generated name for this stage.
|
| core.String name;
|
| - /** Output sources for this stage. */
|
| +
|
| + /// Output sources for this stage.
|
| core.List<StageSource> outputSource;
|
|
|
| ExecutionStageSummary();
|
|
|
| ExecutionStageSummary.fromJson(core.Map _json) {
|
| if (_json.containsKey("componentSource")) {
|
| - componentSource = _json["componentSource"].map((value) => new ComponentSource.fromJson(value)).toList();
|
| + componentSource = _json["componentSource"]
|
| + .map((value) => new ComponentSource.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("componentTransform")) {
|
| - componentTransform = _json["componentTransform"].map((value) => new ComponentTransform.fromJson(value)).toList();
|
| + componentTransform = _json["componentTransform"]
|
| + .map((value) => new ComponentTransform.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("id")) {
|
| id = _json["id"];
|
| }
|
| if (_json.containsKey("inputSource")) {
|
| - inputSource = _json["inputSource"].map((value) => new StageSource.fromJson(value)).toList();
|
| + inputSource = _json["inputSource"]
|
| + .map((value) => new StageSource.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("kind")) {
|
| kind = _json["kind"];
|
| @@ -3503,23 +3658,29 @@ class ExecutionStageSummary {
|
| name = _json["name"];
|
| }
|
| if (_json.containsKey("outputSource")) {
|
| - outputSource = _json["outputSource"].map((value) => new StageSource.fromJson(value)).toList();
|
| + outputSource = _json["outputSource"]
|
| + .map((value) => new StageSource.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (componentSource != null) {
|
| - _json["componentSource"] = componentSource.map((value) => (value).toJson()).toList();
|
| + _json["componentSource"] =
|
| + componentSource.map((value) => (value).toJson()).toList();
|
| }
|
| if (componentTransform != null) {
|
| - _json["componentTransform"] = componentTransform.map((value) => (value).toJson()).toList();
|
| + _json["componentTransform"] =
|
| + componentTransform.map((value) => (value).toJson()).toList();
|
| }
|
| if (id != null) {
|
| _json["id"] = id;
|
| }
|
| if (inputSource != null) {
|
| - _json["inputSource"] = inputSource.map((value) => (value).toJson()).toList();
|
| + _json["inputSource"] =
|
| + inputSource.map((value) => (value).toJson()).toList();
|
| }
|
| if (kind != null) {
|
| _json["kind"] = kind;
|
| @@ -3528,15 +3689,16 @@ class ExecutionStageSummary {
|
| _json["name"] = name;
|
| }
|
| if (outputSource != null) {
|
| - _json["outputSource"] = outputSource.map((value) => (value).toJson()).toList();
|
| + _json["outputSource"] =
|
| + outputSource.map((value) => (value).toJson()).toList();
|
| }
|
| return _json;
|
| }
|
| }
|
|
|
| -/** Indicates which location failed to respond to a request for data. */
|
| +/// Indicates which location failed to respond to a request for data.
|
| class FailedLocation {
|
| - /** The name of the failed location. */
|
| + /// The name of the failed location.
|
| core.String name;
|
|
|
| FailedLocation();
|
| @@ -3548,7 +3710,8 @@ class FailedLocation {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (name != null) {
|
| _json["name"] = name;
|
| }
|
| @@ -3556,23 +3719,25 @@ class FailedLocation {
|
| }
|
| }
|
|
|
| -/**
|
| - * An instruction that copies its inputs (zero or more) to its (single) output.
|
| - */
|
| +/// An instruction that copies its inputs (zero or more) to its (single)
|
| +/// output.
|
| class FlattenInstruction {
|
| - /** Describes the inputs to the flatten instruction. */
|
| + /// Describes the inputs to the flatten instruction.
|
| core.List<InstructionInput> inputs;
|
|
|
| FlattenInstruction();
|
|
|
| FlattenInstruction.fromJson(core.Map _json) {
|
| if (_json.containsKey("inputs")) {
|
| - inputs = _json["inputs"].map((value) => new InstructionInput.fromJson(value)).toList();
|
| + inputs = _json["inputs"]
|
| + .map((value) => new InstructionInput.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (inputs != null) {
|
| _json["inputs"] = inputs.map((value) => (value).toJson()).toList();
|
| }
|
| @@ -3580,9 +3745,9 @@ class FlattenInstruction {
|
| }
|
| }
|
|
|
| -/** A metric value representing a list of floating point numbers. */
|
| +/// A metric value representing a list of floating point numbers.
|
| class FloatingPointList {
|
| - /** Elements of the list. */
|
| + /// Elements of the list.
|
| core.List<core.double> elements;
|
|
|
| FloatingPointList();
|
| @@ -3594,7 +3759,8 @@ class FloatingPointList {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (elements != null) {
|
| _json["elements"] = elements;
|
| }
|
| @@ -3602,11 +3768,12 @@ class FloatingPointList {
|
| }
|
| }
|
|
|
| -/** A representation of a floating point mean metric contribution. */
|
| +/// A representation of a floating point mean metric contribution.
|
| class FloatingPointMean {
|
| - /** The number of values being aggregated. */
|
| + /// The number of values being aggregated.
|
| SplitInt64 count;
|
| - /** The sum of all values being aggregated. */
|
| +
|
| + /// The sum of all values being aggregated.
|
| core.double sum;
|
|
|
| FloatingPointMean();
|
| @@ -3621,7 +3788,8 @@ class FloatingPointMean {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (count != null) {
|
| _json["count"] = (count).toJson();
|
| }
|
| @@ -3632,16 +3800,16 @@ class FloatingPointMean {
|
| }
|
| }
|
|
|
| -/** Request to get updated debug configuration for component. */
|
| +/// Request to get updated debug configuration for component.
|
| class GetDebugConfigRequest {
|
| - /**
|
| - * The internal component id for which debug configuration is
|
| - * requested.
|
| - */
|
| + /// The internal component id for which debug configuration is
|
| + /// requested.
|
| core.String componentId;
|
| - /** The location which contains the job specified by job_id. */
|
| +
|
| + /// The location which contains the job specified by job_id.
|
| core.String location;
|
| - /** The worker id, i.e., VM hostname. */
|
| +
|
| + /// The worker id, i.e., VM hostname.
|
| core.String workerId;
|
|
|
| GetDebugConfigRequest();
|
| @@ -3659,7 +3827,8 @@ class GetDebugConfigRequest {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (componentId != null) {
|
| _json["componentId"] = componentId;
|
| }
|
| @@ -3673,9 +3842,9 @@ class GetDebugConfigRequest {
|
| }
|
| }
|
|
|
| -/** Response to a get debug configuration request. */
|
| +/// Response to a get debug configuration request.
|
| class GetDebugConfigResponse {
|
| - /** The encoded debug configuration for the requested component. */
|
| + /// The encoded debug configuration for the requested component.
|
| core.String config;
|
|
|
| GetDebugConfigResponse();
|
| @@ -3687,7 +3856,8 @@ class GetDebugConfigResponse {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (config != null) {
|
| _json["config"] = config;
|
| }
|
| @@ -3695,17 +3865,14 @@ class GetDebugConfigResponse {
|
| }
|
| }
|
|
|
| -/** The response to a GetTemplate request. */
|
| +/// The response to a GetTemplate request.
|
| class GetTemplateResponse {
|
| - /**
|
| - * The template metadata describing the template name, available
|
| - * parameters, etc.
|
| - */
|
| + /// The template metadata describing the template name, available
|
| + /// parameters, etc.
|
| TemplateMetadata metadata;
|
| - /**
|
| - * The status of the get template request. Any problems with the
|
| - * request will be indicated in the error_details.
|
| - */
|
| +
|
| + /// The status of the get template request. Any problems with the
|
| + /// request will be indicated in the error_details.
|
| Status status;
|
|
|
| GetTemplateResponse();
|
| @@ -3720,7 +3887,8 @@ class GetTemplateResponse {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (metadata != null) {
|
| _json["metadata"] = (metadata).toJson();
|
| }
|
| @@ -3731,19 +3899,16 @@ class GetTemplateResponse {
|
| }
|
| }
|
|
|
| -/**
|
| - * An input of an instruction, as a reference to an output of a
|
| - * producer instruction.
|
| - */
|
| +/// An input of an instruction, as a reference to an output of a
|
| +/// producer instruction.
|
| class InstructionInput {
|
| - /** The output index (origin zero) within the producer. */
|
| + /// The output index (origin zero) within the producer.
|
| core.int outputNum;
|
| - /**
|
| - * The index (origin zero) of the parallel instruction that produces
|
| - * the output to be consumed by this input. This index is relative
|
| - * to the list of instructions in this input's instruction's
|
| - * containing MapTask.
|
| - */
|
| +
|
| + /// The index (origin zero) of the parallel instruction that produces
|
| + /// the output to be consumed by this input. This index is relative
|
| + /// to the list of instructions in this input's instruction's
|
| + /// containing MapTask.
|
| core.int producerInstructionIndex;
|
|
|
| InstructionInput();
|
| @@ -3758,7 +3923,8 @@ class InstructionInput {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (outputNum != null) {
|
| _json["outputNum"] = outputNum;
|
| }
|
| @@ -3769,36 +3935,32 @@ class InstructionInput {
|
| }
|
| }
|
|
|
| -/** An output of an instruction. */
|
| +/// An output of an instruction.
|
| class InstructionOutput {
|
| - /**
|
| - * The codec to use to encode data being written via this output.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| + /// The codec to use to encode data being written via this output.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> codec;
|
| - /** The user-provided name of this output. */
|
| +
|
| + /// The user-provided name of this output.
|
| core.String name;
|
| - /**
|
| - * For system-generated byte and mean byte metrics, certain instructions
|
| - * should only report the key size.
|
| - */
|
| +
|
| + /// For system-generated byte and mean byte metrics, certain instructions
|
| + /// should only report the key size.
|
| core.bool onlyCountKeyBytes;
|
| - /**
|
| - * For system-generated byte and mean byte metrics, certain instructions
|
| - * should only report the value size.
|
| - */
|
| +
|
| + /// For system-generated byte and mean byte metrics, certain instructions
|
| + /// should only report the value size.
|
| core.bool onlyCountValueBytes;
|
| - /**
|
| - * System-defined name for this output in the original workflow graph.
|
| - * Outputs that do not contribute to an original instruction do not set this.
|
| - */
|
| +
|
| + /// System-defined name for this output in the original workflow graph.
|
| + /// Outputs that do not contribute to an original instruction do not set
|
| + /// this.
|
| core.String originalName;
|
| - /**
|
| - * System-defined name of this output.
|
| - * Unique across the workflow.
|
| - */
|
| +
|
| + /// System-defined name of this output.
|
| + /// Unique across the workflow.
|
| core.String systemName;
|
|
|
| InstructionOutput();
|
| @@ -3825,7 +3987,8 @@ class InstructionOutput {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (codec != null) {
|
| _json["codec"] = codec;
|
| }
|
| @@ -3848,21 +4011,24 @@ class InstructionOutput {
|
| }
|
| }
|
|
|
| -/** A metric value representing a list of integers. */
|
| +/// A metric value representing a list of integers.
|
| class IntegerList {
|
| - /** Elements of the list. */
|
| + /// Elements of the list.
|
| core.List<SplitInt64> elements;
|
|
|
| IntegerList();
|
|
|
| IntegerList.fromJson(core.Map _json) {
|
| if (_json.containsKey("elements")) {
|
| - elements = _json["elements"].map((value) => new SplitInt64.fromJson(value)).toList();
|
| + elements = _json["elements"]
|
| + .map((value) => new SplitInt64.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (elements != null) {
|
| _json["elements"] = elements.map((value) => (value).toJson()).toList();
|
| }
|
| @@ -3870,11 +4036,12 @@ class IntegerList {
|
| }
|
| }
|
|
|
| -/** A representation of an integer mean metric contribution. */
|
| +/// A representation of an integer mean metric contribution.
|
| class IntegerMean {
|
| - /** The number of values being aggregated. */
|
| + /// The number of values being aggregated.
|
| SplitInt64 count;
|
| - /** The sum of all values being aggregated. */
|
| +
|
| + /// The sum of all values being aggregated.
|
| SplitInt64 sum;
|
|
|
| IntegerMean();
|
| @@ -3889,7 +4056,8 @@ class IntegerMean {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (count != null) {
|
| _json["count"] = (count).toJson();
|
| }
|
| @@ -3900,243 +4068,252 @@ class IntegerMean {
|
| }
|
| }
|
|
|
| -/** Defines a job to be run by the Cloud Dataflow service. */
|
| +/// Defines a job to be run by the Cloud Dataflow service.
|
| class Job {
|
| - /**
|
| - * The client's unique identifier of the job, re-used across retried attempts.
|
| - * If this field is set, the service will ensure its uniqueness.
|
| - * The request to create a job will fail if the service has knowledge of a
|
| - * previously submitted job with the same client's ID and job name.
|
| - * The caller may use this field to ensure idempotence of job
|
| - * creation across retried attempts to create a job.
|
| - * By default, the field is empty and, in that case, the service ignores it.
|
| - */
|
| + /// The client's unique identifier of the job, re-used across retried
|
| + /// attempts.
|
| + /// If this field is set, the service will ensure its uniqueness.
|
| + /// The request to create a job will fail if the service has knowledge of a
|
| + /// previously submitted job with the same client's ID and job name.
|
| + /// The caller may use this field to ensure idempotence of job
|
| + /// creation across retried attempts to create a job.
|
| + /// By default, the field is empty and, in that case, the service ignores it.
|
| core.String clientRequestId;
|
| - /**
|
| - * The timestamp when the job was initially created. Immutable and set by the
|
| - * Cloud Dataflow service.
|
| - */
|
| +
|
| + /// The timestamp when the job was initially created. Immutable and set by
|
| + /// the
|
| + /// Cloud Dataflow service.
|
| core.String createTime;
|
| - /**
|
| - * The current state of the job.
|
| - *
|
| - * Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
|
| - * specified.
|
| - *
|
| - * A job in the `JOB_STATE_RUNNING` state may asynchronously enter a
|
| - * terminal state. After a job has reached a terminal state, no
|
| - * further state updates may be made.
|
| - *
|
| - * This field may be mutated by the Cloud Dataflow service;
|
| - * callers cannot mutate it.
|
| - * Possible string values are:
|
| - * - "JOB_STATE_UNKNOWN" : The job's run state isn't specified.
|
| - * - "JOB_STATE_STOPPED" : `JOB_STATE_STOPPED` indicates that the job has not
|
| - * yet started to run.
|
| - * - "JOB_STATE_RUNNING" : `JOB_STATE_RUNNING` indicates that the job is
|
| - * currently running.
|
| - * - "JOB_STATE_DONE" : `JOB_STATE_DONE` indicates that the job has
|
| - * successfully completed.
|
| - * This is a terminal job state. This state may be set by the Cloud Dataflow
|
| - * service, as a transition from `JOB_STATE_RUNNING`. It may also be set via a
|
| - * Cloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal
|
| - * state.
|
| - * - "JOB_STATE_FAILED" : `JOB_STATE_FAILED` indicates that the job has
|
| - * failed. This is a
|
| - * terminal job state. This state may only be set by the Cloud Dataflow
|
| - * service, and only as a transition from `JOB_STATE_RUNNING`.
|
| - * - "JOB_STATE_CANCELLED" : `JOB_STATE_CANCELLED` indicates that the job has
|
| - * been explicitly
|
| - * cancelled. This is a terminal job state. This state may only be
|
| - * set via a Cloud Dataflow `UpdateJob` call, and only if the job has not
|
| - * yet reached another terminal state.
|
| - * - "JOB_STATE_UPDATED" : `JOB_STATE_UPDATED` indicates that the job was
|
| - * successfully updated,
|
| - * meaning that this job was stopped and another job was started, inheriting
|
| - * state from this one. This is a terminal job state. This state may only be
|
| - * set by the Cloud Dataflow service, and only as a transition from
|
| - * `JOB_STATE_RUNNING`.
|
| - * - "JOB_STATE_DRAINING" : `JOB_STATE_DRAINING` indicates that the job is in
|
| - * the process of draining.
|
| - * A draining job has stopped pulling from its input sources and is processing
|
| - * any data that remains in-flight. This state may be set via a Cloud Dataflow
|
| - * `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs
|
| - * that are draining may only transition to `JOB_STATE_DRAINED`,
|
| - * `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`.
|
| - * - "JOB_STATE_DRAINED" : `JOB_STATE_DRAINED` indicates that the job has been
|
| - * drained.
|
| - * A drained job terminated by stopping pulling from its input sources and
|
| - * processing any data that remained in-flight when draining was requested.
|
| - * This state is a terminal state, may only be set by the Cloud Dataflow
|
| - * service, and only as a transition from `JOB_STATE_DRAINING`.
|
| - * - "JOB_STATE_PENDING" : 'JOB_STATE_PENDING' indicates that the job has been
|
| - * created but is not yet
|
| - * running. Jobs that are pending may only transition to `JOB_STATE_RUNNING`,
|
| - * or `JOB_STATE_FAILED`.
|
| - * - "JOB_STATE_CANCELLING" : 'JOB_STATE_CANCELLING' indicates that the job
|
| - * has been explicitly cancelled
|
| - * and is in the process of stopping. Jobs that are cancelling may only
|
| - * transition to 'JOB_STATE_CANCELLED' or 'JOB_STATE_FAILED'.
|
| - */
|
| +
|
| + /// The current state of the job.
|
| + ///
|
| + /// Jobs are created in the `JOB_STATE_STOPPED` state unless otherwise
|
| + /// specified.
|
| + ///
|
| + /// A job in the `JOB_STATE_RUNNING` state may asynchronously enter a
|
| + /// terminal state. After a job has reached a terminal state, no
|
| + /// further state updates may be made.
|
| + ///
|
| + /// This field may be mutated by the Cloud Dataflow service;
|
| + /// callers cannot mutate it.
|
| + /// Possible string values are:
|
| + /// - "JOB_STATE_UNKNOWN" : The job's run state isn't specified.
|
| + /// - "JOB_STATE_STOPPED" : `JOB_STATE_STOPPED` indicates that the job has
|
| + /// not
|
| + /// yet started to run.
|
| + /// - "JOB_STATE_RUNNING" : `JOB_STATE_RUNNING` indicates that the job is
|
| + /// currently running.
|
| + /// - "JOB_STATE_DONE" : `JOB_STATE_DONE` indicates that the job has
|
| + /// successfully completed.
|
| + /// This is a terminal job state. This state may be set by the Cloud
|
| + /// Dataflow
|
| + /// service, as a transition from `JOB_STATE_RUNNING`. It may also be set via
|
| + /// a
|
| + /// Cloud Dataflow `UpdateJob` call, if the job has not yet reached a
|
| + /// terminal
|
| + /// state.
|
| + /// - "JOB_STATE_FAILED" : `JOB_STATE_FAILED` indicates that the job has
|
| + /// failed. This is a
|
| + /// terminal job state. This state may only be set by the Cloud Dataflow
|
| + /// service, and only as a transition from `JOB_STATE_RUNNING`.
|
| + /// - "JOB_STATE_CANCELLED" : `JOB_STATE_CANCELLED` indicates that the job
|
| + /// has been explicitly
|
| + /// cancelled. This is a terminal job state. This state may only be
|
| + /// set via a Cloud Dataflow `UpdateJob` call, and only if the job has not
|
| + /// yet reached another terminal state.
|
| + /// - "JOB_STATE_UPDATED" : `JOB_STATE_UPDATED` indicates that the job was
|
| + /// successfully updated,
|
| + /// meaning that this job was stopped and another job was started, inheriting
|
| + /// state from this one. This is a terminal job state. This state may only be
|
| + /// set by the Cloud Dataflow service, and only as a transition from
|
| + /// `JOB_STATE_RUNNING`.
|
| + /// - "JOB_STATE_DRAINING" : `JOB_STATE_DRAINING` indicates that the job is
|
| + /// in the process of draining.
|
| + /// A draining job has stopped pulling from its input sources and is
|
| + /// processing
|
| + /// any data that remains in-flight. This state may be set via a Cloud
|
| + /// Dataflow
|
| + /// `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs
|
| + /// that are draining may only transition to `JOB_STATE_DRAINED`,
|
| + /// `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`.
|
| + /// - "JOB_STATE_DRAINED" : `JOB_STATE_DRAINED` indicates that the job has
|
| + /// been drained.
|
| + /// A drained job terminated by stopping pulling from its input sources and
|
| + /// processing any data that remained in-flight when draining was requested.
|
| + /// This state is a terminal state, may only be set by the Cloud Dataflow
|
| + /// service, and only as a transition from `JOB_STATE_DRAINING`.
|
| + /// - "JOB_STATE_PENDING" : 'JOB_STATE_PENDING' indicates that the job has
|
| + /// been created but is not yet
|
| + /// running. Jobs that are pending may only transition to
|
| + /// `JOB_STATE_RUNNING`,
|
| + /// or `JOB_STATE_FAILED`.
|
| + /// - "JOB_STATE_CANCELLING" : 'JOB_STATE_CANCELLING' indicates that the job
|
| + /// has been explicitly cancelled
|
| + /// and is in the process of stopping. Jobs that are cancelling may only
|
| + /// transition to 'JOB_STATE_CANCELLED' or 'JOB_STATE_FAILED'.
|
| core.String currentState;
|
| - /** The timestamp associated with the current state. */
|
| +
|
| + /// The timestamp associated with the current state.
|
| core.String currentStateTime;
|
| - /** The environment for the job. */
|
| +
|
| + /// The environment for the job.
|
| Environment environment;
|
| - /** Deprecated. */
|
| +
|
| + /// Deprecated.
|
| JobExecutionInfo executionInfo;
|
| - /**
|
| - * The unique ID of this job.
|
| - *
|
| - * This field is set by the Cloud Dataflow service when the Job is
|
| - * created, and is immutable for the life of the job.
|
| - */
|
| +
|
| + /// The unique ID of this job.
|
| + ///
|
| + /// This field is set by the Cloud Dataflow service when the Job is
|
| + /// created, and is immutable for the life of the job.
|
| core.String id;
|
| - /**
|
| - * User-defined labels for this job.
|
| - *
|
| - * The labels map can contain no more than 64 entries. Entries of the labels
|
| - * map are UTF8 strings that comply with the following restrictions:
|
| - *
|
| - * * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62}
|
| - * * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
|
| - * * Both keys and values are additionally constrained to be <= 128 bytes in
|
| - * size.
|
| - */
|
| +
|
| + /// User-defined labels for this job.
|
| + ///
|
| + /// The labels map can contain no more than 64 entries. Entries of the
|
| + /// labels
|
| + /// map are UTF8 strings that comply with the following restrictions:
|
| + ///
|
| + /// * Keys must conform to regexp: \p{Ll}\p{Lo}{0,62}
|
| + /// * Values must conform to regexp: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
|
| + /// * Both keys and values are additionally constrained to be <= 128 bytes in
|
| + /// size.
|
| core.Map<core.String, core.String> labels;
|
| - /** The location that contains this job. */
|
| +
|
| + /// The location that contains this job.
|
| core.String location;
|
| - /**
|
| - * The user-specified Cloud Dataflow job name.
|
| - *
|
| - * Only one Job with a given name may exist in a project at any
|
| - * given time. If a caller attempts to create a Job with the same
|
| - * name as an already-existing Job, the attempt returns the
|
| - * existing Job.
|
| - *
|
| - * The name must match the regular expression
|
| - * `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`
|
| - */
|
| +
|
| + /// The user-specified Cloud Dataflow job name.
|
| + ///
|
| + /// Only one Job with a given name may exist in a project at any
|
| + /// given time. If a caller attempts to create a Job with the same
|
| + /// name as an already-existing Job, the attempt returns the
|
| + /// existing Job.
|
| + ///
|
| + /// The name must match the regular expression
|
| + /// `[a-z]([-a-z0-9]{0,38}[a-z0-9])?`
|
| core.String name;
|
| - /**
|
| - * Preliminary field: The format of this data may change at any time.
|
| - * A description of the user pipeline and stages through which it is executed.
|
| - * Created by Cloud Dataflow service. Only retrieved with
|
| - * JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL.
|
| - */
|
| +
|
| + /// Preliminary field: The format of this data may change at any time.
|
| + /// A description of the user pipeline and stages through which it is
|
| + /// executed.
|
| + /// Created by Cloud Dataflow service. Only retrieved with
|
| + /// JOB_VIEW_DESCRIPTION or JOB_VIEW_ALL.
|
| PipelineDescription pipelineDescription;
|
| - /** The ID of the Cloud Platform project that the job belongs to. */
|
| +
|
| + /// The ID of the Cloud Platform project that the job belongs to.
|
| core.String projectId;
|
| - /**
|
| - * If this job is an update of an existing job, this field is the job ID
|
| - * of the job it replaced.
|
| - *
|
| - * When sending a `CreateJobRequest`, you can update a job by specifying it
|
| - * here. The job named here is stopped, and its intermediate state is
|
| - * transferred to this job.
|
| - */
|
| +
|
| + /// If this job is an update of an existing job, this field is the job ID
|
| + /// of the job it replaced.
|
| + ///
|
| + /// When sending a `CreateJobRequest`, you can update a job by specifying it
|
| + /// here. The job named here is stopped, and its intermediate state is
|
| + /// transferred to this job.
|
| core.String replaceJobId;
|
| - /**
|
| - * If another job is an update of this job (and thus, this job is in
|
| - * `JOB_STATE_UPDATED`), this field contains the ID of that job.
|
| - */
|
| +
|
| + /// If another job is an update of this job (and thus, this job is in
|
| + /// `JOB_STATE_UPDATED`), this field contains the ID of that job.
|
| core.String replacedByJobId;
|
| - /**
|
| - * The job's requested state.
|
| - *
|
| - * `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and
|
| - * `JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may
|
| - * also be used to directly set a job's requested state to
|
| - * `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the
|
| - * job if it has not already reached a terminal state.
|
| - * Possible string values are:
|
| - * - "JOB_STATE_UNKNOWN" : The job's run state isn't specified.
|
| - * - "JOB_STATE_STOPPED" : `JOB_STATE_STOPPED` indicates that the job has not
|
| - * yet started to run.
|
| - * - "JOB_STATE_RUNNING" : `JOB_STATE_RUNNING` indicates that the job is
|
| - * currently running.
|
| - * - "JOB_STATE_DONE" : `JOB_STATE_DONE` indicates that the job has
|
| - * successfully completed.
|
| - * This is a terminal job state. This state may be set by the Cloud Dataflow
|
| - * service, as a transition from `JOB_STATE_RUNNING`. It may also be set via a
|
| - * Cloud Dataflow `UpdateJob` call, if the job has not yet reached a terminal
|
| - * state.
|
| - * - "JOB_STATE_FAILED" : `JOB_STATE_FAILED` indicates that the job has
|
| - * failed. This is a
|
| - * terminal job state. This state may only be set by the Cloud Dataflow
|
| - * service, and only as a transition from `JOB_STATE_RUNNING`.
|
| - * - "JOB_STATE_CANCELLED" : `JOB_STATE_CANCELLED` indicates that the job has
|
| - * been explicitly
|
| - * cancelled. This is a terminal job state. This state may only be
|
| - * set via a Cloud Dataflow `UpdateJob` call, and only if the job has not
|
| - * yet reached another terminal state.
|
| - * - "JOB_STATE_UPDATED" : `JOB_STATE_UPDATED` indicates that the job was
|
| - * successfully updated,
|
| - * meaning that this job was stopped and another job was started, inheriting
|
| - * state from this one. This is a terminal job state. This state may only be
|
| - * set by the Cloud Dataflow service, and only as a transition from
|
| - * `JOB_STATE_RUNNING`.
|
| - * - "JOB_STATE_DRAINING" : `JOB_STATE_DRAINING` indicates that the job is in
|
| - * the process of draining.
|
| - * A draining job has stopped pulling from its input sources and is processing
|
| - * any data that remains in-flight. This state may be set via a Cloud Dataflow
|
| - * `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs
|
| - * that are draining may only transition to `JOB_STATE_DRAINED`,
|
| - * `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`.
|
| - * - "JOB_STATE_DRAINED" : `JOB_STATE_DRAINED` indicates that the job has been
|
| - * drained.
|
| - * A drained job terminated by stopping pulling from its input sources and
|
| - * processing any data that remained in-flight when draining was requested.
|
| - * This state is a terminal state, may only be set by the Cloud Dataflow
|
| - * service, and only as a transition from `JOB_STATE_DRAINING`.
|
| - * - "JOB_STATE_PENDING" : 'JOB_STATE_PENDING' indicates that the job has been
|
| - * created but is not yet
|
| - * running. Jobs that are pending may only transition to `JOB_STATE_RUNNING`,
|
| - * or `JOB_STATE_FAILED`.
|
| - * - "JOB_STATE_CANCELLING" : 'JOB_STATE_CANCELLING' indicates that the job
|
| - * has been explicitly cancelled
|
| - * and is in the process of stopping. Jobs that are cancelling may only
|
| - * transition to 'JOB_STATE_CANCELLED' or 'JOB_STATE_FAILED'.
|
| - */
|
| +
|
| + /// The job's requested state.
|
| + ///
|
| + /// `UpdateJob` may be used to switch between the `JOB_STATE_STOPPED` and
|
| + /// `JOB_STATE_RUNNING` states, by setting requested_state. `UpdateJob` may
|
| + /// also be used to directly set a job's requested state to
|
| + /// `JOB_STATE_CANCELLED` or `JOB_STATE_DONE`, irrevocably terminating the
|
| + /// job if it has not already reached a terminal state.
|
| + /// Possible string values are:
|
| + /// - "JOB_STATE_UNKNOWN" : The job's run state isn't specified.
|
| + /// - "JOB_STATE_STOPPED" : `JOB_STATE_STOPPED` indicates that the job has
|
| + /// not
|
| + /// yet started to run.
|
| + /// - "JOB_STATE_RUNNING" : `JOB_STATE_RUNNING` indicates that the job is
|
| + /// currently running.
|
| + /// - "JOB_STATE_DONE" : `JOB_STATE_DONE` indicates that the job has
|
| + /// successfully completed.
|
| + /// This is a terminal job state. This state may be set by the Cloud
|
| + /// Dataflow
|
| + /// service, as a transition from `JOB_STATE_RUNNING`. It may also be set via
|
| + /// a
|
| + /// Cloud Dataflow `UpdateJob` call, if the job has not yet reached a
|
| + /// terminal
|
| + /// state.
|
| + /// - "JOB_STATE_FAILED" : `JOB_STATE_FAILED` indicates that the job has
|
| + /// failed. This is a
|
| + /// terminal job state. This state may only be set by the Cloud Dataflow
|
| + /// service, and only as a transition from `JOB_STATE_RUNNING`.
|
| + /// - "JOB_STATE_CANCELLED" : `JOB_STATE_CANCELLED` indicates that the job
|
| + /// has been explicitly
|
| + /// cancelled. This is a terminal job state. This state may only be
|
| + /// set via a Cloud Dataflow `UpdateJob` call, and only if the job has not
|
| + /// yet reached another terminal state.
|
| + /// - "JOB_STATE_UPDATED" : `JOB_STATE_UPDATED` indicates that the job was
|
| + /// successfully updated,
|
| + /// meaning that this job was stopped and another job was started, inheriting
|
| + /// state from this one. This is a terminal job state. This state may only be
|
| + /// set by the Cloud Dataflow service, and only as a transition from
|
| + /// `JOB_STATE_RUNNING`.
|
| + /// - "JOB_STATE_DRAINING" : `JOB_STATE_DRAINING` indicates that the job is
|
| + /// in the process of draining.
|
| + /// A draining job has stopped pulling from its input sources and is
|
| + /// processing
|
| + /// any data that remains in-flight. This state may be set via a Cloud
|
| + /// Dataflow
|
| + /// `UpdateJob` call, but only as a transition from `JOB_STATE_RUNNING`. Jobs
|
| + /// that are draining may only transition to `JOB_STATE_DRAINED`,
|
| + /// `JOB_STATE_CANCELLED`, or `JOB_STATE_FAILED`.
|
| + /// - "JOB_STATE_DRAINED" : `JOB_STATE_DRAINED` indicates that the job has
|
| + /// been drained.
|
| + /// A drained job terminated by stopping pulling from its input sources and
|
| + /// processing any data that remained in-flight when draining was requested.
|
| + /// This state is a terminal state, may only be set by the Cloud Dataflow
|
| + /// service, and only as a transition from `JOB_STATE_DRAINING`.
|
| + /// - "JOB_STATE_PENDING" : 'JOB_STATE_PENDING' indicates that the job has
|
| + /// been created but is not yet
|
| + /// running. Jobs that are pending may only transition to
|
| + /// `JOB_STATE_RUNNING`,
|
| + /// or `JOB_STATE_FAILED`.
|
| + /// - "JOB_STATE_CANCELLING" : 'JOB_STATE_CANCELLING' indicates that the job
|
| + /// has been explicitly cancelled
|
| + /// and is in the process of stopping. Jobs that are cancelling may only
|
| + /// transition to 'JOB_STATE_CANCELLED' or 'JOB_STATE_FAILED'.
|
| core.String requestedState;
|
| - /**
|
| - * This field may be mutated by the Cloud Dataflow service;
|
| - * callers cannot mutate it.
|
| - */
|
| +
|
| + /// This field may be mutated by the Cloud Dataflow service;
|
| + /// callers cannot mutate it.
|
| core.List<ExecutionStageState> stageStates;
|
| - /** The top-level steps that constitute the entire job. */
|
| +
|
| + /// The top-level steps that constitute the entire job.
|
| core.List<Step> steps;
|
| - /**
|
| - * A set of files the system should be aware of that are used
|
| - * for temporary storage. These temporary files will be
|
| - * removed on job completion.
|
| - * No duplicates are allowed.
|
| - * No file patterns are supported.
|
| - *
|
| - * The supported files are:
|
| - *
|
| - * Google Cloud Storage:
|
| - *
|
| - * storage.googleapis.com/{bucket}/{object}
|
| - * bucket.storage.googleapis.com/{object}
|
| - */
|
| +
|
| + /// A set of files the system should be aware of that are used
|
| + /// for temporary storage. These temporary files will be
|
| + /// removed on job completion.
|
| + /// No duplicates are allowed.
|
| + /// No file patterns are supported.
|
| + ///
|
| + /// The supported files are:
|
| + ///
|
| + /// Google Cloud Storage:
|
| + ///
|
| + /// storage.googleapis.com/{bucket}/{object}
|
| + /// bucket.storage.googleapis.com/{object}
|
| core.List<core.String> tempFiles;
|
| - /**
|
| - * The map of transform name prefixes of the job to be replaced to the
|
| - * corresponding name prefixes of the new job.
|
| - */
|
| +
|
| + /// The map of transform name prefixes of the job to be replaced to the
|
| + /// corresponding name prefixes of the new job.
|
| core.Map<core.String, core.String> transformNameMapping;
|
| - /**
|
| - * The type of Cloud Dataflow job.
|
| - * Possible string values are:
|
| - * - "JOB_TYPE_UNKNOWN" : The type of the job is unspecified, or unknown.
|
| - * - "JOB_TYPE_BATCH" : A batch job with a well-defined end point: data is
|
| - * read, data is
|
| - * processed, data is written, and the job is done.
|
| - * - "JOB_TYPE_STREAMING" : A continuously streaming job with no end: data is
|
| - * read,
|
| - * processed, and written continuously.
|
| - */
|
| +
|
| + /// The type of Cloud Dataflow job.
|
| + /// Possible string values are:
|
| + /// - "JOB_TYPE_UNKNOWN" : The type of the job is unspecified, or unknown.
|
| + /// - "JOB_TYPE_BATCH" : A batch job with a well-defined end point: data is
|
| + /// read, data is
|
| + /// processed, data is written, and the job is done.
|
| + /// - "JOB_TYPE_STREAMING" : A continuously streaming job with no end: data
|
| + /// is read,
|
| + /// processed, and written continuously.
|
| core.String type;
|
|
|
| Job();
|
| @@ -4173,7 +4350,8 @@ class Job {
|
| name = _json["name"];
|
| }
|
| if (_json.containsKey("pipelineDescription")) {
|
| - pipelineDescription = new PipelineDescription.fromJson(_json["pipelineDescription"]);
|
| + pipelineDescription =
|
| + new PipelineDescription.fromJson(_json["pipelineDescription"]);
|
| }
|
| if (_json.containsKey("projectId")) {
|
| projectId = _json["projectId"];
|
| @@ -4188,7 +4366,9 @@ class Job {
|
| requestedState = _json["requestedState"];
|
| }
|
| if (_json.containsKey("stageStates")) {
|
| - stageStates = _json["stageStates"].map((value) => new ExecutionStageState.fromJson(value)).toList();
|
| + stageStates = _json["stageStates"]
|
| + .map((value) => new ExecutionStageState.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("steps")) {
|
| steps = _json["steps"].map((value) => new Step.fromJson(value)).toList();
|
| @@ -4205,7 +4385,8 @@ class Job {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (clientRequestId != null) {
|
| _json["clientRequestId"] = clientRequestId;
|
| }
|
| @@ -4252,7 +4433,8 @@ class Job {
|
| _json["requestedState"] = requestedState;
|
| }
|
| if (stageStates != null) {
|
| - _json["stageStates"] = stageStates.map((value) => (value).toJson()).toList();
|
| + _json["stageStates"] =
|
| + stageStates.map((value) => (value).toJson()).toList();
|
| }
|
| if (steps != null) {
|
| _json["steps"] = steps.map((value) => (value).toJson()).toList();
|
| @@ -4270,41 +4452,42 @@ class Job {
|
| }
|
| }
|
|
|
| -/**
|
| - * Additional information about how a Cloud Dataflow job will be executed that
|
| - * isn't contained in the submitted job.
|
| - */
|
| +/// Additional information about how a Cloud Dataflow job will be executed that
|
| +/// isn't contained in the submitted job.
|
| class JobExecutionInfo {
|
| - /** A mapping from each stage to the information about that stage. */
|
| + /// A mapping from each stage to the information about that stage.
|
| core.Map<core.String, JobExecutionStageInfo> stages;
|
|
|
| JobExecutionInfo();
|
|
|
| JobExecutionInfo.fromJson(core.Map _json) {
|
| if (_json.containsKey("stages")) {
|
| - stages = commons.mapMap<core.Map<core.String, core.Object>, JobExecutionStageInfo>(_json["stages"], (core.Map<core.String, core.Object> item) => new JobExecutionStageInfo.fromJson(item));
|
| + stages = commons
|
| + .mapMap<core.Map<core.String, core.Object>, JobExecutionStageInfo>(
|
| + _json["stages"],
|
| + (core.Map<core.String, core.Object> item) =>
|
| + new JobExecutionStageInfo.fromJson(item));
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (stages != null) {
|
| - _json["stages"] = commons.mapMap<JobExecutionStageInfo, core.Map<core.String, core.Object>>(stages, (JobExecutionStageInfo item) => (item).toJson());
|
| + _json["stages"] = commons
|
| + .mapMap<JobExecutionStageInfo, core.Map<core.String, core.Object>>(
|
| + stages, (JobExecutionStageInfo item) => (item).toJson());
|
| }
|
| return _json;
|
| }
|
| }
|
|
|
| -/**
|
| - * Contains information about how a particular
|
| - * google.dataflow.v1beta3.Step will be executed.
|
| - */
|
| +/// Contains information about how a particular
|
| +/// google.dataflow.v1beta3.Step will be executed.
|
| class JobExecutionStageInfo {
|
| - /**
|
| - * The steps associated with the execution stage.
|
| - * Note that stages may have several steps, and that a given step
|
| - * might be run by more than one stage.
|
| - */
|
| + /// The steps associated with the execution stage.
|
| + /// Note that stages may have several steps, and that a given step
|
| + /// might be run by more than one stage.
|
| core.List<core.String> stepName;
|
|
|
| JobExecutionStageInfo();
|
| @@ -4316,7 +4499,8 @@ class JobExecutionStageInfo {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (stepName != null) {
|
| _json["stepName"] = stepName;
|
| }
|
| @@ -4324,48 +4508,49 @@ class JobExecutionStageInfo {
|
| }
|
| }
|
|
|
| -/** A particular message pertaining to a Dataflow job. */
|
| +/// A particular message pertaining to a Dataflow job.
|
| class JobMessage {
|
| - /** Deprecated. */
|
| + /// Deprecated.
|
| core.String id;
|
| - /**
|
| - * Importance level of the message.
|
| - * Possible string values are:
|
| - * - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : The message importance isn't
|
| - * specified, or is unknown.
|
| - * - "JOB_MESSAGE_DEBUG" : The message is at the 'debug' level: typically only
|
| - * useful for
|
| - * software engineers working on the code the job is running.
|
| - * Typically, Dataflow pipeline runners do not display log messages
|
| - * at this level by default.
|
| - * - "JOB_MESSAGE_DETAILED" : The message is at the 'detailed' level: somewhat
|
| - * verbose, but
|
| - * potentially useful to users. Typically, Dataflow pipeline
|
| - * runners do not display log messages at this level by default.
|
| - * These messages are displayed by default in the Dataflow
|
| - * monitoring UI.
|
| - * - "JOB_MESSAGE_BASIC" : The message is at the 'basic' level: useful for
|
| - * keeping
|
| - * track of the execution of a Dataflow pipeline. Typically,
|
| - * Dataflow pipeline runners display log messages at this level by
|
| - * default, and these messages are displayed by default in the
|
| - * Dataflow monitoring UI.
|
| - * - "JOB_MESSAGE_WARNING" : The message is at the 'warning' level: indicating
|
| - * a condition
|
| - * pertaining to a job which may require human intervention.
|
| - * Typically, Dataflow pipeline runners display log messages at this
|
| - * level by default, and these messages are displayed by default in
|
| - * the Dataflow monitoring UI.
|
| - * - "JOB_MESSAGE_ERROR" : The message is at the 'error' level: indicating a
|
| - * condition
|
| - * preventing a job from succeeding. Typically, Dataflow pipeline
|
| - * runners display log messages at this level by default, and these
|
| - * messages are displayed by default in the Dataflow monitoring UI.
|
| - */
|
| +
|
| + /// Importance level of the message.
|
| + /// Possible string values are:
|
| + /// - "JOB_MESSAGE_IMPORTANCE_UNKNOWN" : The message importance isn't
|
| + /// specified, or is unknown.
|
| + /// - "JOB_MESSAGE_DEBUG" : The message is at the 'debug' level: typically
|
| + /// only useful for
|
| + /// software engineers working on the code the job is running.
|
| + /// Typically, Dataflow pipeline runners do not display log messages
|
| + /// at this level by default.
|
| + /// - "JOB_MESSAGE_DETAILED" : The message is at the 'detailed' level:
|
| + /// somewhat verbose, but
|
| + /// potentially useful to users. Typically, Dataflow pipeline
|
| + /// runners do not display log messages at this level by default.
|
| + /// These messages are displayed by default in the Dataflow
|
| + /// monitoring UI.
|
| + /// - "JOB_MESSAGE_BASIC" : The message is at the 'basic' level: useful for
|
| + /// keeping
|
| + /// track of the execution of a Dataflow pipeline. Typically,
|
| + /// Dataflow pipeline runners display log messages at this level by
|
| + /// default, and these messages are displayed by default in the
|
| + /// Dataflow monitoring UI.
|
| + /// - "JOB_MESSAGE_WARNING" : The message is at the 'warning' level:
|
| + /// indicating a condition
|
| + /// pertaining to a job which may require human intervention.
|
| + /// Typically, Dataflow pipeline runners display log messages at this
|
| + /// level by default, and these messages are displayed by default in
|
| + /// the Dataflow monitoring UI.
|
| + /// - "JOB_MESSAGE_ERROR" : The message is at the 'error' level: indicating a
|
| + /// condition
|
| + /// preventing a job from succeeding. Typically, Dataflow pipeline
|
| + /// runners display log messages at this level by default, and these
|
| + /// messages are displayed by default in the Dataflow monitoring UI.
|
| core.String messageImportance;
|
| - /** The text of the message. */
|
| +
|
| + /// The text of the message.
|
| core.String messageText;
|
| - /** The timestamp of the message. */
|
| +
|
| + /// The timestamp of the message.
|
| core.String time;
|
|
|
| JobMessage();
|
| @@ -4386,7 +4571,8 @@ class JobMessage {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (id != null) {
|
| _json["id"] = id;
|
| }
|
| @@ -4403,19 +4589,18 @@ class JobMessage {
|
| }
|
| }
|
|
|
| -/**
|
| - * JobMetrics contains a collection of metrics descibing the detailed progress
|
| - * of a Dataflow job. Metrics correspond to user-defined and system-defined
|
| - * metrics in the job.
|
| - *
|
| - * This resource captures only the most recent values of each metric;
|
| - * time-series data can be queried for them (under the same metric names)
|
| - * from Cloud Monitoring.
|
| - */
|
| +/// JobMetrics contains a collection of metrics descibing the detailed progress
|
| +/// of a Dataflow job. Metrics correspond to user-defined and system-defined
|
| +/// metrics in the job.
|
| +///
|
| +/// This resource captures only the most recent values of each metric;
|
| +/// time-series data can be queried for them (under the same metric names)
|
| +/// from Cloud Monitoring.
|
| class JobMetrics {
|
| - /** Timestamp as of which metric values are current. */
|
| + /// Timestamp as of which metric values are current.
|
| core.String metricTime;
|
| - /** All metrics for this job. */
|
| +
|
| + /// All metrics for this job.
|
| core.List<MetricUpdate> metrics;
|
|
|
| JobMetrics();
|
| @@ -4425,12 +4610,15 @@ class JobMetrics {
|
| metricTime = _json["metricTime"];
|
| }
|
| if (_json.containsKey("metrics")) {
|
| - metrics = _json["metrics"].map((value) => new MetricUpdate.fromJson(value)).toList();
|
| + metrics = _json["metrics"]
|
| + .map((value) => new MetricUpdate.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (metricTime != null) {
|
| _json["metricTime"] = metricTime;
|
| }
|
| @@ -4441,23 +4629,21 @@ class JobMetrics {
|
| }
|
| }
|
|
|
| -/**
|
| - * Data disk assignment information for a specific key-range of a sharded
|
| - * computation.
|
| - * Currently we only support UTF-8 character splits to simplify encoding into
|
| - * JSON.
|
| - */
|
| +/// Data disk assignment information for a specific key-range of a sharded
|
| +/// computation.
|
| +/// Currently we only support UTF-8 character splits to simplify encoding into
|
| +/// JSON.
|
| class KeyRangeDataDiskAssignment {
|
| - /**
|
| - * The name of the data disk where data for this range is stored.
|
| - * This name is local to the Google Cloud Platform project and uniquely
|
| - * identifies the disk within that project, for example
|
| - * "myproject-1014-104817-4c2-harness-0-disk-1".
|
| - */
|
| + /// The name of the data disk where data for this range is stored.
|
| + /// This name is local to the Google Cloud Platform project and uniquely
|
| + /// identifies the disk within that project, for example
|
| + /// "myproject-1014-104817-4c2-harness-0-disk-1".
|
| core.String dataDisk;
|
| - /** The end (exclusive) of the key range. */
|
| +
|
| + /// The end (exclusive) of the key range.
|
| core.String end;
|
| - /** The start (inclusive) of the key range. */
|
| +
|
| + /// The start (inclusive) of the key range.
|
| core.String start;
|
|
|
| KeyRangeDataDiskAssignment();
|
| @@ -4475,7 +4661,8 @@ class KeyRangeDataDiskAssignment {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (dataDisk != null) {
|
| _json["dataDisk"] = dataDisk;
|
| }
|
| @@ -4489,32 +4676,28 @@ class KeyRangeDataDiskAssignment {
|
| }
|
| }
|
|
|
| -/**
|
| - * Location information for a specific key-range of a sharded computation.
|
| - * Currently we only support UTF-8 character splits to simplify encoding into
|
| - * JSON.
|
| - */
|
| +/// Location information for a specific key-range of a sharded computation.
|
| +/// Currently we only support UTF-8 character splits to simplify encoding into
|
| +/// JSON.
|
| class KeyRangeLocation {
|
| - /**
|
| - * The name of the data disk where data for this range is stored.
|
| - * This name is local to the Google Cloud Platform project and uniquely
|
| - * identifies the disk within that project, for example
|
| - * "myproject-1014-104817-4c2-harness-0-disk-1".
|
| - */
|
| + /// The name of the data disk where data for this range is stored.
|
| + /// This name is local to the Google Cloud Platform project and uniquely
|
| + /// identifies the disk within that project, for example
|
| + /// "myproject-1014-104817-4c2-harness-0-disk-1".
|
| core.String dataDisk;
|
| - /**
|
| - * The physical location of this range assignment to be used for
|
| - * streaming computation cross-worker message delivery.
|
| - */
|
| +
|
| + /// The physical location of this range assignment to be used for
|
| + /// streaming computation cross-worker message delivery.
|
| core.String deliveryEndpoint;
|
| - /**
|
| - * DEPRECATED. The location of the persistent state for this range, as a
|
| - * persistent directory in the worker local filesystem.
|
| - */
|
| +
|
| + /// DEPRECATED. The location of the persistent state for this range, as a
|
| + /// persistent directory in the worker local filesystem.
|
| core.String deprecatedPersistentDirectory;
|
| - /** The end (exclusive) of the key range. */
|
| +
|
| + /// The end (exclusive) of the key range.
|
| core.String end;
|
| - /** The start (inclusive) of the key range. */
|
| +
|
| + /// The start (inclusive) of the key range.
|
| core.String start;
|
|
|
| KeyRangeLocation();
|
| @@ -4538,7 +4721,8 @@ class KeyRangeLocation {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (dataDisk != null) {
|
| _json["dataDisk"] = dataDisk;
|
| }
|
| @@ -4558,13 +4742,15 @@ class KeyRangeLocation {
|
| }
|
| }
|
|
|
| -/** Parameters to provide to the template being launched. */
|
| +/// Parameters to provide to the template being launched.
|
| class LaunchTemplateParameters {
|
| - /** The runtime environment for the job. */
|
| + /// The runtime environment for the job.
|
| RuntimeEnvironment environment;
|
| - /** Required. The job name to use for the created job. */
|
| +
|
| + /// Required. The job name to use for the created job.
|
| core.String jobName;
|
| - /** The runtime parameters to pass to the job. */
|
| +
|
| + /// The runtime parameters to pass to the job.
|
| core.Map<core.String, core.String> parameters;
|
|
|
| LaunchTemplateParameters();
|
| @@ -4582,7 +4768,8 @@ class LaunchTemplateParameters {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (environment != null) {
|
| _json["environment"] = (environment).toJson();
|
| }
|
| @@ -4596,12 +4783,10 @@ class LaunchTemplateParameters {
|
| }
|
| }
|
|
|
| -/** Response to the request to launch a template. */
|
| +/// Response to the request to launch a template.
|
| class LaunchTemplateResponse {
|
| - /**
|
| - * The job that was launched, if the request was not a dry run and
|
| - * the job was successfully launched.
|
| - */
|
| + /// The job that was launched, if the request was not a dry run and
|
| + /// the job was successfully launched.
|
| Job job;
|
|
|
| LaunchTemplateResponse();
|
| @@ -4613,7 +4798,8 @@ class LaunchTemplateResponse {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (job != null) {
|
| _json["job"] = (job).toJson();
|
| }
|
| @@ -4621,25 +4807,26 @@ class LaunchTemplateResponse {
|
| }
|
| }
|
|
|
| -/** Request to lease WorkItems. */
|
| +/// Request to lease WorkItems.
|
| class LeaseWorkItemRequest {
|
| - /** The current timestamp at the worker. */
|
| + /// The current timestamp at the worker.
|
| core.String currentWorkerTime;
|
| - /** The location which contains the WorkItem's job. */
|
| +
|
| + /// The location which contains the WorkItem's job.
|
| core.String location;
|
| - /** The initial lease period. */
|
| +
|
| + /// The initial lease period.
|
| core.String requestedLeaseDuration;
|
| - /** Filter for WorkItem type. */
|
| +
|
| + /// Filter for WorkItem type.
|
| core.List<core.String> workItemTypes;
|
| - /**
|
| - * Worker capabilities. WorkItems might be limited to workers with specific
|
| - * capabilities.
|
| - */
|
| +
|
| + /// Worker capabilities. WorkItems might be limited to workers with specific
|
| + /// capabilities.
|
| core.List<core.String> workerCapabilities;
|
| - /**
|
| - * Identifies the worker leasing work -- typically the ID of the
|
| - * virtual machine running the worker.
|
| - */
|
| +
|
| + /// Identifies the worker leasing work -- typically the ID of the
|
| + /// virtual machine running the worker.
|
| core.String workerId;
|
|
|
| LeaseWorkItemRequest();
|
| @@ -4666,7 +4853,8 @@ class LeaseWorkItemRequest {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (currentWorkerTime != null) {
|
| _json["currentWorkerTime"] = currentWorkerTime;
|
| }
|
| @@ -4689,21 +4877,24 @@ class LeaseWorkItemRequest {
|
| }
|
| }
|
|
|
| -/** Response to a request to lease WorkItems. */
|
| +/// Response to a request to lease WorkItems.
|
| class LeaseWorkItemResponse {
|
| - /** A list of the leased WorkItems. */
|
| + /// A list of the leased WorkItems.
|
| core.List<WorkItem> workItems;
|
|
|
| LeaseWorkItemResponse();
|
|
|
| LeaseWorkItemResponse.fromJson(core.Map _json) {
|
| if (_json.containsKey("workItems")) {
|
| - workItems = _json["workItems"].map((value) => new WorkItem.fromJson(value)).toList();
|
| + workItems = _json["workItems"]
|
| + .map((value) => new WorkItem.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (workItems != null) {
|
| _json["workItems"] = workItems.map((value) => (value).toJson()).toList();
|
| }
|
| @@ -4711,23 +4902,29 @@ class LeaseWorkItemResponse {
|
| }
|
| }
|
|
|
| -/** Response to a request to list job messages. */
|
| +/// Response to a request to list job messages.
|
| class ListJobMessagesResponse {
|
| - /** Autoscaling events in ascending timestamp order. */
|
| + /// Autoscaling events in ascending timestamp order.
|
| core.List<AutoscalingEvent> autoscalingEvents;
|
| - /** Messages in ascending timestamp order. */
|
| +
|
| + /// Messages in ascending timestamp order.
|
| core.List<JobMessage> jobMessages;
|
| - /** The token to obtain the next page of results if there are more. */
|
| +
|
| + /// The token to obtain the next page of results if there are more.
|
| core.String nextPageToken;
|
|
|
| ListJobMessagesResponse();
|
|
|
| ListJobMessagesResponse.fromJson(core.Map _json) {
|
| if (_json.containsKey("autoscalingEvents")) {
|
| - autoscalingEvents = _json["autoscalingEvents"].map((value) => new AutoscalingEvent.fromJson(value)).toList();
|
| + autoscalingEvents = _json["autoscalingEvents"]
|
| + .map((value) => new AutoscalingEvent.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("jobMessages")) {
|
| - jobMessages = _json["jobMessages"].map((value) => new JobMessage.fromJson(value)).toList();
|
| + jobMessages = _json["jobMessages"]
|
| + .map((value) => new JobMessage.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("nextPageToken")) {
|
| nextPageToken = _json["nextPageToken"];
|
| @@ -4735,12 +4932,15 @@ class ListJobMessagesResponse {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (autoscalingEvents != null) {
|
| - _json["autoscalingEvents"] = autoscalingEvents.map((value) => (value).toJson()).toList();
|
| + _json["autoscalingEvents"] =
|
| + autoscalingEvents.map((value) => (value).toJson()).toList();
|
| }
|
| if (jobMessages != null) {
|
| - _json["jobMessages"] = jobMessages.map((value) => (value).toJson()).toList();
|
| + _json["jobMessages"] =
|
| + jobMessages.map((value) => (value).toJson()).toList();
|
| }
|
| if (nextPageToken != null) {
|
| _json["nextPageToken"] = nextPageToken;
|
| @@ -4749,23 +4949,25 @@ class ListJobMessagesResponse {
|
| }
|
| }
|
|
|
| -/**
|
| - * Response to a request to list Cloud Dataflow jobs. This may be a partial
|
| - * response, depending on the page size in the ListJobsRequest.
|
| - */
|
| +/// Response to a request to list Cloud Dataflow jobs. This may be a partial
|
| +/// response, depending on the page size in the ListJobsRequest.
|
| class ListJobsResponse {
|
| - /** Zero or more messages describing locations that failed to respond. */
|
| + /// Zero or more messages describing locations that failed to respond.
|
| core.List<FailedLocation> failedLocation;
|
| - /** A subset of the requested job information. */
|
| +
|
| + /// A subset of the requested job information.
|
| core.List<Job> jobs;
|
| - /** Set if there may be more results than fit in this response. */
|
| +
|
| + /// Set if there may be more results than fit in this response.
|
| core.String nextPageToken;
|
|
|
| ListJobsResponse();
|
|
|
| ListJobsResponse.fromJson(core.Map _json) {
|
| if (_json.containsKey("failedLocation")) {
|
| - failedLocation = _json["failedLocation"].map((value) => new FailedLocation.fromJson(value)).toList();
|
| + failedLocation = _json["failedLocation"]
|
| + .map((value) => new FailedLocation.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("jobs")) {
|
| jobs = _json["jobs"].map((value) => new Job.fromJson(value)).toList();
|
| @@ -4776,9 +4978,11 @@ class ListJobsResponse {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (failedLocation != null) {
|
| - _json["failedLocation"] = failedLocation.map((value) => (value).toJson()).toList();
|
| + _json["failedLocation"] =
|
| + failedLocation.map((value) => (value).toJson()).toList();
|
| }
|
| if (jobs != null) {
|
| _json["jobs"] = jobs.map((value) => (value).toJson()).toList();
|
| @@ -4790,20 +4994,19 @@ class ListJobsResponse {
|
| }
|
| }
|
|
|
| -/** Bucket of values for Distribution's logarithmic histogram. */
|
| +/// Bucket of values for Distribution's logarithmic histogram.
|
| class LogBucket {
|
| - /** Number of values in this bucket. */
|
| + /// Number of values in this bucket.
|
| core.String count;
|
| - /**
|
| - * floor(log2(value)); defined to be zero for nonpositive values.
|
| - * log(-1) = 0
|
| - * log(0) = 0
|
| - * log(1) = 0
|
| - * log(2) = 1
|
| - * log(3) = 1
|
| - * log(4) = 2
|
| - * log(5) = 2
|
| - */
|
| +
|
| + /// floor(log2(value)); defined to be zero for nonpositive values.
|
| + /// log(-1) = 0
|
| + /// log(0) = 0
|
| + /// log(1) = 0
|
| + /// log(2) = 1
|
| + /// log(3) = 1
|
| + /// log(4) = 2
|
| + /// log(5) = 2
|
| core.int log;
|
|
|
| LogBucket();
|
| @@ -4818,7 +5021,8 @@ class LogBucket {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (count != null) {
|
| _json["count"] = count;
|
| }
|
| @@ -4829,33 +5033,31 @@ class LogBucket {
|
| }
|
| }
|
|
|
| -/**
|
| - * MapTask consists of an ordered set of instructions, each of which
|
| - * describes one particular low-level operation for the worker to
|
| - * perform in order to accomplish the MapTask's WorkItem.
|
| - *
|
| - * Each instruction must appear in the list before any instructions which
|
| - * depends on its output.
|
| - */
|
| +/// MapTask consists of an ordered set of instructions, each of which
|
| +/// describes one particular low-level operation for the worker to
|
| +/// perform in order to accomplish the MapTask's WorkItem.
|
| +///
|
| +/// Each instruction must appear in the list before any instructions which
|
| +/// depends on its output.
|
| class MapTask {
|
| - /** The instructions in the MapTask. */
|
| + /// The instructions in the MapTask.
|
| core.List<ParallelInstruction> instructions;
|
| - /**
|
| - * System-defined name of the stage containing this MapTask.
|
| - * Unique across the workflow.
|
| - */
|
| +
|
| + /// System-defined name of the stage containing this MapTask.
|
| + /// Unique across the workflow.
|
| core.String stageName;
|
| - /**
|
| - * System-defined name of this MapTask.
|
| - * Unique across the workflow.
|
| - */
|
| +
|
| + /// System-defined name of this MapTask.
|
| + /// Unique across the workflow.
|
| core.String systemName;
|
|
|
| MapTask();
|
|
|
| MapTask.fromJson(core.Map _json) {
|
| if (_json.containsKey("instructions")) {
|
| - instructions = _json["instructions"].map((value) => new ParallelInstruction.fromJson(value)).toList();
|
| + instructions = _json["instructions"]
|
| + .map((value) => new ParallelInstruction.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("stageName")) {
|
| stageName = _json["stageName"];
|
| @@ -4866,9 +5068,11 @@ class MapTask {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (instructions != null) {
|
| - _json["instructions"] = instructions.map((value) => (value).toJson()).toList();
|
| + _json["instructions"] =
|
| + instructions.map((value) => (value).toJson()).toList();
|
| }
|
| if (stageName != null) {
|
| _json["stageName"] = stageName;
|
| @@ -4880,17 +5084,14 @@ class MapTask {
|
| }
|
| }
|
|
|
| -/**
|
| - * The metric short id is returned to the user alongside an offset into
|
| - * ReportWorkItemStatusRequest
|
| - */
|
| +/// The metric short id is returned to the user alongside an offset into
|
| +/// ReportWorkItemStatusRequest
|
| class MetricShortId {
|
| - /**
|
| - * The index of the corresponding metric in
|
| - * the ReportWorkItemStatusRequest. Required.
|
| - */
|
| + /// The index of the corresponding metric in
|
| + /// the ReportWorkItemStatusRequest. Required.
|
| core.int metricIndex;
|
| - /** The service-generated short identifier for the metric. */
|
| +
|
| + /// The service-generated short identifier for the metric.
|
| core.String shortId;
|
|
|
| MetricShortId();
|
| @@ -4905,7 +5106,8 @@ class MetricShortId {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (metricIndex != null) {
|
| _json["metricIndex"] = metricIndex;
|
| }
|
| @@ -4916,26 +5118,22 @@ class MetricShortId {
|
| }
|
| }
|
|
|
| -/**
|
| - * Identifies a metric, by describing the source which generated the
|
| - * metric.
|
| - */
|
| +/// Identifies a metric, by describing the source which generated the
|
| +/// metric.
|
| class MetricStructuredName {
|
| - /**
|
| - * Zero or more labeled fields which identify the part of the job this
|
| - * metric is associated with, such as the name of a step or collection.
|
| - *
|
| - * For example, built-in counters associated with steps will have
|
| - * context['step'] = <step-name>. Counters associated with PCollections
|
| - * in the SDK will have context['pcollection'] = <pcollection-name>.
|
| - */
|
| + /// Zero or more labeled fields which identify the part of the job this
|
| + /// metric is associated with, such as the name of a step or collection.
|
| + ///
|
| + /// For example, built-in counters associated with steps will have
|
| + /// context['step'] = <step-name>. Counters associated with PCollections
|
| + /// in the SDK will have context['pcollection'] = <pcollection-name>.
|
| core.Map<core.String, core.String> context;
|
| - /** Worker-defined metric name. */
|
| +
|
| + /// Worker-defined metric name.
|
| core.String name;
|
| - /**
|
| - * Origin (namespace) of metric name. May be blank for user-define metrics;
|
| - * will be "dataflow" for metrics defined by the Dataflow service or SDK.
|
| - */
|
| +
|
| + /// Origin (namespace) of metric name. May be blank for user-define metrics;
|
| + /// will be "dataflow" for metrics defined by the Dataflow service or SDK.
|
| core.String origin;
|
|
|
| MetricStructuredName();
|
| @@ -4953,7 +5151,8 @@ class MetricStructuredName {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (context != null) {
|
| _json["context"] = context;
|
| }
|
| @@ -4967,84 +5166,76 @@ class MetricStructuredName {
|
| }
|
| }
|
|
|
| -/** Describes the state of a metric. */
|
| +/// Describes the state of a metric.
|
| class MetricUpdate {
|
| - /**
|
| - * True if this metric is reported as the total cumulative aggregate
|
| - * value accumulated since the worker started working on this WorkItem.
|
| - * By default this is false, indicating that this metric is reported
|
| - * as a delta that is not associated with any WorkItem.
|
| - */
|
| + /// True if this metric is reported as the total cumulative aggregate
|
| + /// value accumulated since the worker started working on this WorkItem.
|
| + /// By default this is false, indicating that this metric is reported
|
| + /// as a delta that is not associated with any WorkItem.
|
| core.bool cumulative;
|
| - /**
|
| - * A struct value describing properties of a distribution of numeric values.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// A struct value describing properties of a distribution of numeric values.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Object distribution;
|
| - /**
|
| - * Worker-computed aggregate value for internal use by the Dataflow
|
| - * service.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// Worker-computed aggregate value for internal use by the Dataflow
|
| + /// service.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Object internal;
|
| - /**
|
| - * Metric aggregation kind. The possible metric aggregation kinds are
|
| - * "Sum", "Max", "Min", "Mean", "Set", "And", "Or", and "Distribution".
|
| - * The specified aggregation kind is case-insensitive.
|
| - *
|
| - * If omitted, this is not an aggregated value but instead
|
| - * a single metric sample value.
|
| - */
|
| +
|
| + /// Metric aggregation kind. The possible metric aggregation kinds are
|
| + /// "Sum", "Max", "Min", "Mean", "Set", "And", "Or", and "Distribution".
|
| + /// The specified aggregation kind is case-insensitive.
|
| + ///
|
| + /// If omitted, this is not an aggregated value but instead
|
| + /// a single metric sample value.
|
| core.String kind;
|
| - /**
|
| - * Worker-computed aggregate value for the "Mean" aggregation kind.
|
| - * This holds the count of the aggregated values and is used in combination
|
| - * with mean_sum above to obtain the actual mean aggregate value.
|
| - * The only possible value type is Long.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// Worker-computed aggregate value for the "Mean" aggregation kind.
|
| + /// This holds the count of the aggregated values and is used in combination
|
| + /// with mean_sum above to obtain the actual mean aggregate value.
|
| + /// The only possible value type is Long.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Object meanCount;
|
| - /**
|
| - * Worker-computed aggregate value for the "Mean" aggregation kind.
|
| - * This holds the sum of the aggregated values and is used in combination
|
| - * with mean_count below to obtain the actual mean aggregate value.
|
| - * The only possible value types are Long and Double.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// Worker-computed aggregate value for the "Mean" aggregation kind.
|
| + /// This holds the sum of the aggregated values and is used in combination
|
| + /// with mean_count below to obtain the actual mean aggregate value.
|
| + /// The only possible value types are Long and Double.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Object meanSum;
|
| - /** Name of the metric. */
|
| +
|
| + /// Name of the metric.
|
| MetricStructuredName name;
|
| - /**
|
| - * Worker-computed aggregate value for aggregation kinds "Sum", "Max", "Min",
|
| - * "And", and "Or". The possible value types are Long, Double, and Boolean.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// Worker-computed aggregate value for aggregation kinds "Sum", "Max",
|
| + /// "Min",
|
| + /// "And", and "Or". The possible value types are Long, Double, and Boolean.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Object scalar;
|
| - /**
|
| - * Worker-computed aggregate value for the "Set" aggregation kind. The only
|
| - * possible value type is a list of Values whose type can be Long, Double,
|
| - * or String, according to the metric's type. All Values in the list must
|
| - * be of the same type.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// Worker-computed aggregate value for the "Set" aggregation kind. The only
|
| + /// possible value type is a list of Values whose type can be Long, Double,
|
| + /// or String, according to the metric's type. All Values in the list must
|
| + /// be of the same type.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Object set;
|
| - /**
|
| - * Timestamp associated with the metric value. Optional when workers are
|
| - * reporting work progress; it will be filled in responses from the
|
| - * metrics API.
|
| - */
|
| +
|
| + /// Timestamp associated with the metric value. Optional when workers are
|
| + /// reporting work progress; it will be filled in responses from the
|
| + /// metrics API.
|
| core.String updateTime;
|
|
|
| MetricUpdate();
|
| @@ -5083,7 +5274,8 @@ class MetricUpdate {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (cumulative != null) {
|
| _json["cumulative"] = cumulative;
|
| }
|
| @@ -5118,14 +5310,12 @@ class MetricUpdate {
|
| }
|
| }
|
|
|
| -/** Describes mounted data disk. */
|
| +/// Describes mounted data disk.
|
| class MountedDataDisk {
|
| - /**
|
| - * The name of the data disk.
|
| - * This name is local to the Google Cloud Platform project and uniquely
|
| - * identifies the disk within that project, for example
|
| - * "myproject-1014-104817-4c2-harness-0-disk-1".
|
| - */
|
| + /// The name of the data disk.
|
| + /// This name is local to the Google Cloud Platform project and uniquely
|
| + /// identifies the disk within that project, for example
|
| + /// "myproject-1014-104817-4c2-harness-0-disk-1".
|
| core.String dataDisk;
|
|
|
| MountedDataDisk();
|
| @@ -5137,7 +5327,8 @@ class MountedDataDisk {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (dataDisk != null) {
|
| _json["dataDisk"] = dataDisk;
|
| }
|
| @@ -5145,12 +5336,10 @@ class MountedDataDisk {
|
| }
|
| }
|
|
|
| -/** Information about an output of a multi-output DoFn. */
|
| +/// Information about an output of a multi-output DoFn.
|
| class MultiOutputInfo {
|
| - /**
|
| - * The id of the tag the user code will emit to this output by; this
|
| - * should correspond to the tag of some SideInputInfo.
|
| - */
|
| + /// The id of the tag the user code will emit to this output by; this
|
| + /// should correspond to the tag of some SideInputInfo.
|
| core.String tag;
|
|
|
| MultiOutputInfo();
|
| @@ -5162,7 +5351,8 @@ class MultiOutputInfo {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (tag != null) {
|
| _json["tag"] = tag;
|
| }
|
| @@ -5170,26 +5360,25 @@ class MultiOutputInfo {
|
| }
|
| }
|
|
|
| -/** Basic metadata about a counter. */
|
| +/// Basic metadata about a counter.
|
| class NameAndKind {
|
| - /**
|
| - * Counter aggregation kind.
|
| - * Possible string values are:
|
| - * - "INVALID" : Counter aggregation kind was not set.
|
| - * - "SUM" : Aggregated value is the sum of all contributed values.
|
| - * - "MAX" : Aggregated value is the max of all contributed values.
|
| - * - "MIN" : Aggregated value is the min of all contributed values.
|
| - * - "MEAN" : Aggregated value is the mean of all contributed values.
|
| - * - "OR" : Aggregated value represents the logical 'or' of all contributed
|
| - * values.
|
| - * - "AND" : Aggregated value represents the logical 'and' of all contributed
|
| - * values.
|
| - * - "SET" : Aggregated value is a set of unique contributed values.
|
| - * - "DISTRIBUTION" : Aggregated value captures statistics about a
|
| - * distribution.
|
| - */
|
| + /// Counter aggregation kind.
|
| + /// Possible string values are:
|
| + /// - "INVALID" : Counter aggregation kind was not set.
|
| + /// - "SUM" : Aggregated value is the sum of all contributed values.
|
| + /// - "MAX" : Aggregated value is the max of all contributed values.
|
| + /// - "MIN" : Aggregated value is the min of all contributed values.
|
| + /// - "MEAN" : Aggregated value is the mean of all contributed values.
|
| + /// - "OR" : Aggregated value represents the logical 'or' of all contributed
|
| + /// values.
|
| + /// - "AND" : Aggregated value represents the logical 'and' of all
|
| + /// contributed values.
|
| + /// - "SET" : Aggregated value is a set of unique contributed values.
|
| + /// - "DISTRIBUTION" : Aggregated value captures statistics about a
|
| + /// distribution.
|
| core.String kind;
|
| - /** Name of the counter. */
|
| +
|
| + /// Name of the counter.
|
| core.String name;
|
|
|
| NameAndKind();
|
| @@ -5204,7 +5393,8 @@ class NameAndKind {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (kind != null) {
|
| _json["kind"] = kind;
|
| }
|
| @@ -5215,28 +5405,25 @@ class NameAndKind {
|
| }
|
| }
|
|
|
| -/**
|
| - * The packages that must be installed in order for a worker to run the
|
| - * steps of the Cloud Dataflow job that will be assigned to its worker
|
| - * pool.
|
| - *
|
| - * This is the mechanism by which the Cloud Dataflow SDK causes code to
|
| - * be loaded onto the workers. For example, the Cloud Dataflow Java SDK
|
| - * might use this to install jars containing the user's code and all of the
|
| - * various dependencies (libraries, data files, etc.) required in order
|
| - * for that code to run.
|
| - */
|
| +/// The packages that must be installed in order for a worker to run the
|
| +/// steps of the Cloud Dataflow job that will be assigned to its worker
|
| +/// pool.
|
| +///
|
| +/// This is the mechanism by which the Cloud Dataflow SDK causes code to
|
| +/// be loaded onto the workers. For example, the Cloud Dataflow Java SDK
|
| +/// might use this to install jars containing the user's code and all of the
|
| +/// various dependencies (libraries, data files, etc.) required in order
|
| +/// for that code to run.
|
| class Package {
|
| - /**
|
| - * The resource to read the package from. The supported resource type is:
|
| - *
|
| - * Google Cloud Storage:
|
| - *
|
| - * storage.googleapis.com/{bucket}
|
| - * bucket.storage.googleapis.com/
|
| - */
|
| + /// The resource to read the package from. The supported resource type is:
|
| + ///
|
| + /// Google Cloud Storage:
|
| + ///
|
| + /// storage.googleapis.com/{bucket}
|
| + /// bucket.storage.googleapis.com/
|
| core.String location;
|
| - /** The name of the package. */
|
| +
|
| + /// The name of the package.
|
| core.String name;
|
|
|
| Package();
|
| @@ -5251,7 +5438,8 @@ class Package {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (location != null) {
|
| _json["location"] = location;
|
| }
|
| @@ -5262,27 +5450,27 @@ class Package {
|
| }
|
| }
|
|
|
| -/**
|
| - * An instruction that does a ParDo operation.
|
| - * Takes one main input and zero or more side inputs, and produces
|
| - * zero or more outputs.
|
| - * Runs user code.
|
| - */
|
| +/// An instruction that does a ParDo operation.
|
| +/// Takes one main input and zero or more side inputs, and produces
|
| +/// zero or more outputs.
|
| +/// Runs user code.
|
| class ParDoInstruction {
|
| - /** The input. */
|
| + /// The input.
|
| InstructionInput input;
|
| - /** Information about each of the outputs, if user_fn is a MultiDoFn. */
|
| +
|
| + /// Information about each of the outputs, if user_fn is a MultiDoFn.
|
| core.List<MultiOutputInfo> multiOutputInfos;
|
| - /** The number of outputs. */
|
| +
|
| + /// The number of outputs.
|
| core.int numOutputs;
|
| - /** Zero or more side inputs. */
|
| +
|
| + /// Zero or more side inputs.
|
| core.List<SideInputInfo> sideInputs;
|
| - /**
|
| - * The user function to invoke.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// The user function to invoke.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> userFn;
|
|
|
| ParDoInstruction();
|
| @@ -5292,13 +5480,17 @@ class ParDoInstruction {
|
| input = new InstructionInput.fromJson(_json["input"]);
|
| }
|
| if (_json.containsKey("multiOutputInfos")) {
|
| - multiOutputInfos = _json["multiOutputInfos"].map((value) => new MultiOutputInfo.fromJson(value)).toList();
|
| + multiOutputInfos = _json["multiOutputInfos"]
|
| + .map((value) => new MultiOutputInfo.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("numOutputs")) {
|
| numOutputs = _json["numOutputs"];
|
| }
|
| if (_json.containsKey("sideInputs")) {
|
| - sideInputs = _json["sideInputs"].map((value) => new SideInputInfo.fromJson(value)).toList();
|
| + sideInputs = _json["sideInputs"]
|
| + .map((value) => new SideInputInfo.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("userFn")) {
|
| userFn = _json["userFn"];
|
| @@ -5306,18 +5498,21 @@ class ParDoInstruction {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (input != null) {
|
| _json["input"] = (input).toJson();
|
| }
|
| if (multiOutputInfos != null) {
|
| - _json["multiOutputInfos"] = multiOutputInfos.map((value) => (value).toJson()).toList();
|
| + _json["multiOutputInfos"] =
|
| + multiOutputInfos.map((value) => (value).toJson()).toList();
|
| }
|
| if (numOutputs != null) {
|
| _json["numOutputs"] = numOutputs;
|
| }
|
| if (sideInputs != null) {
|
| - _json["sideInputs"] = sideInputs.map((value) => (value).toJson()).toList();
|
| + _json["sideInputs"] =
|
| + sideInputs.map((value) => (value).toJson()).toList();
|
| }
|
| if (userFn != null) {
|
| _json["userFn"] = userFn;
|
| @@ -5326,28 +5521,34 @@ class ParDoInstruction {
|
| }
|
| }
|
|
|
| -/** Describes a particular operation comprising a MapTask. */
|
| +/// Describes a particular operation comprising a MapTask.
|
| class ParallelInstruction {
|
| - /** Additional information for Flatten instructions. */
|
| + /// Additional information for Flatten instructions.
|
| FlattenInstruction flatten;
|
| - /** User-provided name of this operation. */
|
| +
|
| + /// User-provided name of this operation.
|
| core.String name;
|
| - /** System-defined name for the operation in the original workflow graph. */
|
| +
|
| + /// System-defined name for the operation in the original workflow graph.
|
| core.String originalName;
|
| - /** Describes the outputs of the instruction. */
|
| +
|
| + /// Describes the outputs of the instruction.
|
| core.List<InstructionOutput> outputs;
|
| - /** Additional information for ParDo instructions. */
|
| +
|
| + /// Additional information for ParDo instructions.
|
| ParDoInstruction parDo;
|
| - /** Additional information for PartialGroupByKey instructions. */
|
| +
|
| + /// Additional information for PartialGroupByKey instructions.
|
| PartialGroupByKeyInstruction partialGroupByKey;
|
| - /** Additional information for Read instructions. */
|
| +
|
| + /// Additional information for Read instructions.
|
| ReadInstruction read;
|
| - /**
|
| - * System-defined name of this operation.
|
| - * Unique across the workflow.
|
| - */
|
| +
|
| + /// System-defined name of this operation.
|
| + /// Unique across the workflow.
|
| core.String systemName;
|
| - /** Additional information for Write instructions. */
|
| +
|
| + /// Additional information for Write instructions.
|
| WriteInstruction write;
|
|
|
| ParallelInstruction();
|
| @@ -5363,13 +5564,16 @@ class ParallelInstruction {
|
| originalName = _json["originalName"];
|
| }
|
| if (_json.containsKey("outputs")) {
|
| - outputs = _json["outputs"].map((value) => new InstructionOutput.fromJson(value)).toList();
|
| + outputs = _json["outputs"]
|
| + .map((value) => new InstructionOutput.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("parDo")) {
|
| parDo = new ParDoInstruction.fromJson(_json["parDo"]);
|
| }
|
| if (_json.containsKey("partialGroupByKey")) {
|
| - partialGroupByKey = new PartialGroupByKeyInstruction.fromJson(_json["partialGroupByKey"]);
|
| + partialGroupByKey =
|
| + new PartialGroupByKeyInstruction.fromJson(_json["partialGroupByKey"]);
|
| }
|
| if (_json.containsKey("read")) {
|
| read = new ReadInstruction.fromJson(_json["read"]);
|
| @@ -5383,7 +5587,8 @@ class ParallelInstruction {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (flatten != null) {
|
| _json["flatten"] = (flatten).toJson();
|
| }
|
| @@ -5415,16 +5620,15 @@ class ParallelInstruction {
|
| }
|
| }
|
|
|
| -/** Structured data associated with this message. */
|
| +/// Structured data associated with this message.
|
| class Parameter {
|
| - /** Key or name for this parameter. */
|
| + /// Key or name for this parameter.
|
| core.String key;
|
| - /**
|
| - * Value for this parameter.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// Value for this parameter.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Object value;
|
|
|
| Parameter();
|
| @@ -5439,7 +5643,8 @@ class Parameter {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (key != null) {
|
| _json["key"] = key;
|
| }
|
| @@ -5450,17 +5655,21 @@ class Parameter {
|
| }
|
| }
|
|
|
| -/** Metadata for a specific parameter. */
|
| +/// Metadata for a specific parameter.
|
| class ParameterMetadata {
|
| - /** Required. The help text to display for the parameter. */
|
| + /// Required. The help text to display for the parameter.
|
| core.String helpText;
|
| - /** Optional. Whether the parameter is optional. Defaults to false. */
|
| +
|
| + /// Optional. Whether the parameter is optional. Defaults to false.
|
| core.bool isOptional;
|
| - /** Required. The label to display for the parameter. */
|
| +
|
| + /// Required. The label to display for the parameter.
|
| core.String label;
|
| - /** Required. The name of the parameter. */
|
| +
|
| + /// Required. The name of the parameter.
|
| core.String name;
|
| - /** Optional. Regexes that the parameter must match. */
|
| +
|
| + /// Optional. Regexes that the parameter must match.
|
| core.List<core.String> regexes;
|
|
|
| ParameterMetadata();
|
| @@ -5484,7 +5693,8 @@ class ParameterMetadata {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (helpText != null) {
|
| _json["helpText"] = helpText;
|
| }
|
| @@ -5504,38 +5714,34 @@ class ParameterMetadata {
|
| }
|
| }
|
|
|
| -/**
|
| - * An instruction that does a partial group-by-key.
|
| - * One input and one output.
|
| - */
|
| +/// An instruction that does a partial group-by-key.
|
| +/// One input and one output.
|
| class PartialGroupByKeyInstruction {
|
| - /** Describes the input to the partial group-by-key instruction. */
|
| + /// Describes the input to the partial group-by-key instruction.
|
| InstructionInput input;
|
| - /**
|
| - * The codec to use for interpreting an element in the input PTable.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// The codec to use for interpreting an element in the input PTable.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> inputElementCodec;
|
| - /**
|
| - * If this instruction includes a combining function this is the name of the
|
| - * intermediate store between the GBK and the CombineValues.
|
| - */
|
| +
|
| + /// If this instruction includes a combining function this is the name of the
|
| + /// intermediate store between the GBK and the CombineValues.
|
| core.String originalCombineValuesInputStoreName;
|
| - /**
|
| - * If this instruction includes a combining function, this is the name of the
|
| - * CombineValues instruction lifted into this instruction.
|
| - */
|
| +
|
| + /// If this instruction includes a combining function, this is the name of
|
| + /// the
|
| + /// CombineValues instruction lifted into this instruction.
|
| core.String originalCombineValuesStepName;
|
| - /** Zero or more side inputs. */
|
| +
|
| + /// Zero or more side inputs.
|
| core.List<SideInputInfo> sideInputs;
|
| - /**
|
| - * The value combining function to invoke.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// The value combining function to invoke.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> valueCombiningFn;
|
|
|
| PartialGroupByKeyInstruction();
|
| @@ -5548,13 +5754,16 @@ class PartialGroupByKeyInstruction {
|
| inputElementCodec = _json["inputElementCodec"];
|
| }
|
| if (_json.containsKey("originalCombineValuesInputStoreName")) {
|
| - originalCombineValuesInputStoreName = _json["originalCombineValuesInputStoreName"];
|
| + originalCombineValuesInputStoreName =
|
| + _json["originalCombineValuesInputStoreName"];
|
| }
|
| if (_json.containsKey("originalCombineValuesStepName")) {
|
| originalCombineValuesStepName = _json["originalCombineValuesStepName"];
|
| }
|
| if (_json.containsKey("sideInputs")) {
|
| - sideInputs = _json["sideInputs"].map((value) => new SideInputInfo.fromJson(value)).toList();
|
| + sideInputs = _json["sideInputs"]
|
| + .map((value) => new SideInputInfo.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("valueCombiningFn")) {
|
| valueCombiningFn = _json["valueCombiningFn"];
|
| @@ -5562,7 +5771,8 @@ class PartialGroupByKeyInstruction {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (input != null) {
|
| _json["input"] = (input).toJson();
|
| }
|
| @@ -5570,13 +5780,15 @@ class PartialGroupByKeyInstruction {
|
| _json["inputElementCodec"] = inputElementCodec;
|
| }
|
| if (originalCombineValuesInputStoreName != null) {
|
| - _json["originalCombineValuesInputStoreName"] = originalCombineValuesInputStoreName;
|
| + _json["originalCombineValuesInputStoreName"] =
|
| + originalCombineValuesInputStoreName;
|
| }
|
| if (originalCombineValuesStepName != null) {
|
| _json["originalCombineValuesStepName"] = originalCombineValuesStepName;
|
| }
|
| if (sideInputs != null) {
|
| - _json["sideInputs"] = sideInputs.map((value) => (value).toJson()).toList();
|
| + _json["sideInputs"] =
|
| + sideInputs.map((value) => (value).toJson()).toList();
|
| }
|
| if (valueCombiningFn != null) {
|
| _json["valueCombiningFn"] = valueCombiningFn;
|
| @@ -5585,73 +5797,82 @@ class PartialGroupByKeyInstruction {
|
| }
|
| }
|
|
|
| -/**
|
| - * A descriptive representation of submitted pipeline as well as the executed
|
| - * form. This data is provided by the Dataflow service for ease of visualizing
|
| - * the pipeline and interpretting Dataflow provided metrics.
|
| - */
|
| +/// A descriptive representation of submitted pipeline as well as the executed
|
| +/// form. This data is provided by the Dataflow service for ease of
|
| +/// visualizing
|
| +/// the pipeline and interpretting Dataflow provided metrics.
|
| class PipelineDescription {
|
| - /** Pipeline level display data. */
|
| + /// Pipeline level display data.
|
| core.List<DisplayData> displayData;
|
| - /** Description of each stage of execution of the pipeline. */
|
| +
|
| + /// Description of each stage of execution of the pipeline.
|
| core.List<ExecutionStageSummary> executionPipelineStage;
|
| - /**
|
| - * Description of each transform in the pipeline and collections between them.
|
| - */
|
| +
|
| + /// Description of each transform in the pipeline and collections between
|
| + /// them.
|
| core.List<TransformSummary> originalPipelineTransform;
|
|
|
| PipelineDescription();
|
|
|
| PipelineDescription.fromJson(core.Map _json) {
|
| if (_json.containsKey("displayData")) {
|
| - displayData = _json["displayData"].map((value) => new DisplayData.fromJson(value)).toList();
|
| + displayData = _json["displayData"]
|
| + .map((value) => new DisplayData.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("executionPipelineStage")) {
|
| - executionPipelineStage = _json["executionPipelineStage"].map((value) => new ExecutionStageSummary.fromJson(value)).toList();
|
| + executionPipelineStage = _json["executionPipelineStage"]
|
| + .map((value) => new ExecutionStageSummary.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("originalPipelineTransform")) {
|
| - originalPipelineTransform = _json["originalPipelineTransform"].map((value) => new TransformSummary.fromJson(value)).toList();
|
| + originalPipelineTransform = _json["originalPipelineTransform"]
|
| + .map((value) => new TransformSummary.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (displayData != null) {
|
| - _json["displayData"] = displayData.map((value) => (value).toJson()).toList();
|
| + _json["displayData"] =
|
| + displayData.map((value) => (value).toJson()).toList();
|
| }
|
| if (executionPipelineStage != null) {
|
| - _json["executionPipelineStage"] = executionPipelineStage.map((value) => (value).toJson()).toList();
|
| + _json["executionPipelineStage"] =
|
| + executionPipelineStage.map((value) => (value).toJson()).toList();
|
| }
|
| if (originalPipelineTransform != null) {
|
| - _json["originalPipelineTransform"] = originalPipelineTransform.map((value) => (value).toJson()).toList();
|
| + _json["originalPipelineTransform"] =
|
| + originalPipelineTransform.map((value) => (value).toJson()).toList();
|
| }
|
| return _json;
|
| }
|
| }
|
|
|
| -/**
|
| - * Position defines a position within a collection of data. The value
|
| - * can be either the end position, a key (used with ordered
|
| - * collections), a byte offset, or a record index.
|
| - */
|
| +/// Position defines a position within a collection of data. The value
|
| +/// can be either the end position, a key (used with ordered
|
| +/// collections), a byte offset, or a record index.
|
| class Position {
|
| - /** Position is a byte offset. */
|
| + /// Position is a byte offset.
|
| core.String byteOffset;
|
| - /** CloudPosition is a concat position. */
|
| +
|
| + /// CloudPosition is a concat position.
|
| ConcatPosition concatPosition;
|
| - /**
|
| - * Position is past all other positions. Also useful for the end
|
| - * position of an unbounded range.
|
| - */
|
| +
|
| + /// Position is past all other positions. Also useful for the end
|
| + /// position of an unbounded range.
|
| core.bool end;
|
| - /** Position is a string key, ordered lexicographically. */
|
| +
|
| + /// Position is a string key, ordered lexicographically.
|
| core.String key;
|
| - /** Position is a record index. */
|
| +
|
| + /// Position is a record index.
|
| core.String recordIndex;
|
| - /**
|
| - * CloudPosition is a base64 encoded BatchShufflePosition (with FIXED
|
| - * sharding).
|
| - */
|
| +
|
| + /// CloudPosition is a base64 encoded BatchShufflePosition (with FIXED
|
| + /// sharding).
|
| core.String shufflePosition;
|
|
|
| Position();
|
| @@ -5678,7 +5899,8 @@ class Position {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (byteOffset != null) {
|
| _json["byteOffset"] = byteOffset;
|
| }
|
| @@ -5701,39 +5923,33 @@ class Position {
|
| }
|
| }
|
|
|
| -/**
|
| - * Identifies a pubsub location to use for transferring data into or
|
| - * out of a streaming Dataflow job.
|
| - */
|
| +/// Identifies a pubsub location to use for transferring data into or
|
| +/// out of a streaming Dataflow job.
|
| class PubsubLocation {
|
| - /** Indicates whether the pipeline allows late-arriving data. */
|
| + /// Indicates whether the pipeline allows late-arriving data.
|
| core.bool dropLateData;
|
| - /**
|
| - * If set, contains a pubsub label from which to extract record ids.
|
| - * If left empty, record deduplication will be strictly best effort.
|
| - */
|
| +
|
| + /// If set, contains a pubsub label from which to extract record ids.
|
| + /// If left empty, record deduplication will be strictly best effort.
|
| core.String idLabel;
|
| - /**
|
| - * A pubsub subscription, in the form of
|
| - * "pubsub.googleapis.com/subscriptions/<project-id>/<subscription-name>"
|
| - */
|
| +
|
| + /// A pubsub subscription, in the form of
|
| + /// "pubsub.googleapis.com/subscriptions/<project-id>/<subscription-name>"
|
| core.String subscription;
|
| - /**
|
| - * If set, contains a pubsub label from which to extract record timestamps.
|
| - * If left empty, record timestamps will be generated upon arrival.
|
| - */
|
| +
|
| + /// If set, contains a pubsub label from which to extract record timestamps.
|
| + /// If left empty, record timestamps will be generated upon arrival.
|
| core.String timestampLabel;
|
| - /**
|
| - * A pubsub topic, in the form of
|
| - * "pubsub.googleapis.com/topics/<project-id>/<topic-name>"
|
| - */
|
| +
|
| + /// A pubsub topic, in the form of
|
| + /// "pubsub.googleapis.com/topics/<project-id>/<topic-name>"
|
| core.String topic;
|
| - /**
|
| - * If set, specifies the pubsub subscription that will be used for tracking
|
| - * custom time timestamps for watermark estimation.
|
| - */
|
| +
|
| + /// If set, specifies the pubsub subscription that will be used for tracking
|
| + /// custom time timestamps for watermark estimation.
|
| core.String trackingSubscription;
|
| - /** If true, then the client has requested to get pubsub attributes. */
|
| +
|
| + /// If true, then the client has requested to get pubsub attributes.
|
| core.bool withAttributes;
|
|
|
| PubsubLocation();
|
| @@ -5763,7 +5979,8 @@ class PubsubLocation {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (dropLateData != null) {
|
| _json["dropLateData"] = dropLateData;
|
| }
|
| @@ -5789,12 +6006,10 @@ class PubsubLocation {
|
| }
|
| }
|
|
|
| -/**
|
| - * An instruction that reads records.
|
| - * Takes no inputs, produces one output.
|
| - */
|
| +/// An instruction that reads records.
|
| +/// Takes no inputs, produces one output.
|
| class ReadInstruction {
|
| - /** The source to read from. */
|
| + /// The source to read from.
|
| Source source;
|
|
|
| ReadInstruction();
|
| @@ -5806,7 +6021,8 @@ class ReadInstruction {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (source != null) {
|
| _json["source"] = (source).toJson();
|
| }
|
| @@ -5814,24 +6030,23 @@ class ReadInstruction {
|
| }
|
| }
|
|
|
| -/** Request to report the status of WorkItems. */
|
| +/// Request to report the status of WorkItems.
|
| class ReportWorkItemStatusRequest {
|
| - /** The current timestamp at the worker. */
|
| + /// The current timestamp at the worker.
|
| core.String currentWorkerTime;
|
| - /** The location which contains the WorkItem's job. */
|
| +
|
| + /// The location which contains the WorkItem's job.
|
| core.String location;
|
| - /**
|
| - * The order is unimportant, except that the order of the
|
| - * WorkItemServiceState messages in the ReportWorkItemStatusResponse
|
| - * corresponds to the order of WorkItemStatus messages here.
|
| - */
|
| +
|
| + /// The order is unimportant, except that the order of the
|
| + /// WorkItemServiceState messages in the ReportWorkItemStatusResponse
|
| + /// corresponds to the order of WorkItemStatus messages here.
|
| core.List<WorkItemStatus> workItemStatuses;
|
| - /**
|
| - * The ID of the worker reporting the WorkItem status. If this
|
| - * does not match the ID of the worker which the Dataflow service
|
| - * believes currently has the lease on the WorkItem, the report
|
| - * will be dropped (with an error response).
|
| - */
|
| +
|
| + /// The ID of the worker reporting the WorkItem status. If this
|
| + /// does not match the ID of the worker which the Dataflow service
|
| + /// believes currently has the lease on the WorkItem, the report
|
| + /// will be dropped (with an error response).
|
| core.String workerId;
|
|
|
| ReportWorkItemStatusRequest();
|
| @@ -5844,7 +6059,9 @@ class ReportWorkItemStatusRequest {
|
| location = _json["location"];
|
| }
|
| if (_json.containsKey("workItemStatuses")) {
|
| - workItemStatuses = _json["workItemStatuses"].map((value) => new WorkItemStatus.fromJson(value)).toList();
|
| + workItemStatuses = _json["workItemStatuses"]
|
| + .map((value) => new WorkItemStatus.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("workerId")) {
|
| workerId = _json["workerId"];
|
| @@ -5852,7 +6069,8 @@ class ReportWorkItemStatusRequest {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (currentWorkerTime != null) {
|
| _json["currentWorkerTime"] = currentWorkerTime;
|
| }
|
| @@ -5860,7 +6078,8 @@ class ReportWorkItemStatusRequest {
|
| _json["location"] = location;
|
| }
|
| if (workItemStatuses != null) {
|
| - _json["workItemStatuses"] = workItemStatuses.map((value) => (value).toJson()).toList();
|
| + _json["workItemStatuses"] =
|
| + workItemStatuses.map((value) => (value).toJson()).toList();
|
| }
|
| if (workerId != null) {
|
| _json["workerId"] = workerId;
|
| @@ -5869,48 +6088,47 @@ class ReportWorkItemStatusRequest {
|
| }
|
| }
|
|
|
| -/** Response from a request to report the status of WorkItems. */
|
| +/// Response from a request to report the status of WorkItems.
|
| class ReportWorkItemStatusResponse {
|
| - /**
|
| - * A set of messages indicating the service-side state for each
|
| - * WorkItem whose status was reported, in the same order as the
|
| - * WorkItemStatus messages in the ReportWorkItemStatusRequest which
|
| - * resulting in this response.
|
| - */
|
| + /// A set of messages indicating the service-side state for each
|
| + /// WorkItem whose status was reported, in the same order as the
|
| + /// WorkItemStatus messages in the ReportWorkItemStatusRequest which
|
| + /// resulting in this response.
|
| core.List<WorkItemServiceState> workItemServiceStates;
|
|
|
| ReportWorkItemStatusResponse();
|
|
|
| ReportWorkItemStatusResponse.fromJson(core.Map _json) {
|
| if (_json.containsKey("workItemServiceStates")) {
|
| - workItemServiceStates = _json["workItemServiceStates"].map((value) => new WorkItemServiceState.fromJson(value)).toList();
|
| + workItemServiceStates = _json["workItemServiceStates"]
|
| + .map((value) => new WorkItemServiceState.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (workItemServiceStates != null) {
|
| - _json["workItemServiceStates"] = workItemServiceStates.map((value) => (value).toJson()).toList();
|
| + _json["workItemServiceStates"] =
|
| + workItemServiceStates.map((value) => (value).toJson()).toList();
|
| }
|
| return _json;
|
| }
|
| }
|
|
|
| -/**
|
| - * Represents the level of parallelism in a WorkItem's input,
|
| - * reported by the worker.
|
| - */
|
| +/// Represents the level of parallelism in a WorkItem's input,
|
| +/// reported by the worker.
|
| class ReportedParallelism {
|
| - /**
|
| - * Specifies whether the parallelism is infinite. If true, "value" is
|
| - * ignored.
|
| - * Infinite parallelism means the service will assume that the work item
|
| - * can always be split into more non-empty work items by dynamic splitting.
|
| - * This is a work-around for lack of support for infinity by the current
|
| - * JSON-based Java RPC stack.
|
| - */
|
| + /// Specifies whether the parallelism is infinite. If true, "value" is
|
| + /// ignored.
|
| + /// Infinite parallelism means the service will assume that the work item
|
| + /// can always be split into more non-empty work items by dynamic splitting.
|
| + /// This is a work-around for lack of support for infinity by the current
|
| + /// JSON-based Java RPC stack.
|
| core.bool isInfinite;
|
| - /** Specifies the level of parallelism in case it is finite. */
|
| +
|
| + /// Specifies the level of parallelism in case it is finite.
|
| core.double value;
|
|
|
| ReportedParallelism();
|
| @@ -5925,7 +6143,8 @@ class ReportedParallelism {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (isInfinite != null) {
|
| _json["isInfinite"] = isInfinite;
|
| }
|
| @@ -5936,25 +6155,25 @@ class ReportedParallelism {
|
| }
|
| }
|
|
|
| -/**
|
| - * Worker metrics exported from workers. This contains resource utilization
|
| - * metrics accumulated from a variety of sources. For more information, see
|
| - * go/df-resource-signals.
|
| - */
|
| +/// Worker metrics exported from workers. This contains resource utilization
|
| +/// metrics accumulated from a variety of sources. For more information, see
|
| +/// go/df-resource-signals.
|
| class ResourceUtilizationReport {
|
| - /** CPU utilization samples. */
|
| + /// CPU utilization samples.
|
| core.List<CPUTime> cpuTime;
|
|
|
| ResourceUtilizationReport();
|
|
|
| ResourceUtilizationReport.fromJson(core.Map _json) {
|
| if (_json.containsKey("cpuTime")) {
|
| - cpuTime = _json["cpuTime"].map((value) => new CPUTime.fromJson(value)).toList();
|
| + cpuTime =
|
| + _json["cpuTime"].map((value) => new CPUTime.fromJson(value)).toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (cpuTime != null) {
|
| _json["cpuTime"] = cpuTime.map((value) => (value).toJson()).toList();
|
| }
|
| @@ -5962,49 +6181,43 @@ class ResourceUtilizationReport {
|
| }
|
| }
|
|
|
| -/** Service-side response to WorkerMessage reporting resource utilization. */
|
| +/// Service-side response to WorkerMessage reporting resource utilization.
|
| class ResourceUtilizationReportResponse {
|
| -
|
| ResourceUtilizationReportResponse();
|
|
|
| - ResourceUtilizationReportResponse.fromJson(core.Map _json) {
|
| - }
|
| + ResourceUtilizationReportResponse.fromJson(core.Map _json) {}
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| return _json;
|
| }
|
| }
|
|
|
| -/** The environment values to set at runtime. */
|
| +/// The environment values to set at runtime.
|
| class RuntimeEnvironment {
|
| - /**
|
| - * Whether to bypass the safety checks for the job's temporary directory.
|
| - * Use with caution.
|
| - */
|
| + /// Whether to bypass the safety checks for the job's temporary directory.
|
| + /// Use with caution.
|
| core.bool bypassTempDirValidation;
|
| - /**
|
| - * The machine type to use for the job. Defaults to the value from the
|
| - * template if not specified.
|
| - */
|
| +
|
| + /// The machine type to use for the job. Defaults to the value from the
|
| + /// template if not specified.
|
| core.String machineType;
|
| - /**
|
| - * The maximum number of Google Compute Engine instances to be made
|
| - * available to your pipeline during execution, from 1 to 1000.
|
| - */
|
| +
|
| + /// The maximum number of Google Compute Engine instances to be made
|
| + /// available to your pipeline during execution, from 1 to 1000.
|
| core.int maxWorkers;
|
| - /** The email address of the service account to run the job as. */
|
| +
|
| + /// The email address of the service account to run the job as.
|
| core.String serviceAccountEmail;
|
| - /**
|
| - * The Cloud Storage path to use for temporary files.
|
| - * Must be a valid Cloud Storage URL, beginning with `gs://`.
|
| - */
|
| +
|
| + /// The Cloud Storage path to use for temporary files.
|
| + /// Must be a valid Cloud Storage URL, beginning with `gs://`.
|
| core.String tempLocation;
|
| - /**
|
| - * The Compute Engine [availability
|
| - * zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones)
|
| - * for launching worker instances to run your pipeline.
|
| - */
|
| +
|
| + /// The Compute Engine [availability
|
| + /// zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones)
|
| + /// for launching worker instances to run your pipeline.
|
| core.String zone;
|
|
|
| RuntimeEnvironment();
|
| @@ -6031,7 +6244,8 @@ class RuntimeEnvironment {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (bypassTempDirValidation != null) {
|
| _json["bypassTempDirValidation"] = bypassTempDirValidation;
|
| }
|
| @@ -6054,15 +6268,18 @@ class RuntimeEnvironment {
|
| }
|
| }
|
|
|
| -/** Request to send encoded debug information. */
|
| +/// Request to send encoded debug information.
|
| class SendDebugCaptureRequest {
|
| - /** The internal component id for which debug information is sent. */
|
| + /// The internal component id for which debug information is sent.
|
| core.String componentId;
|
| - /** The encoded debug information. */
|
| +
|
| + /// The encoded debug information.
|
| core.String data;
|
| - /** The location which contains the job specified by job_id. */
|
| +
|
| + /// The location which contains the job specified by job_id.
|
| core.String location;
|
| - /** The worker id, i.e., VM hostname. */
|
| +
|
| + /// The worker id, i.e., VM hostname.
|
| core.String workerId;
|
|
|
| SendDebugCaptureRequest();
|
| @@ -6083,7 +6300,8 @@ class SendDebugCaptureRequest {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (componentId != null) {
|
| _json["componentId"] = componentId;
|
| }
|
| @@ -6100,28 +6318,26 @@ class SendDebugCaptureRequest {
|
| }
|
| }
|
|
|
| -/**
|
| - * Response to a send capture request.
|
| - * nothing
|
| - */
|
| +/// Response to a send capture request.
|
| +/// nothing
|
| class SendDebugCaptureResponse {
|
| -
|
| SendDebugCaptureResponse();
|
|
|
| - SendDebugCaptureResponse.fromJson(core.Map _json) {
|
| - }
|
| + SendDebugCaptureResponse.fromJson(core.Map _json) {}
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| return _json;
|
| }
|
| }
|
|
|
| -/** A request for sending worker messages to the service. */
|
| +/// A request for sending worker messages to the service.
|
| class SendWorkerMessagesRequest {
|
| - /** The location which contains the job */
|
| + /// The location which contains the job
|
| core.String location;
|
| - /** The WorkerMessages to send. */
|
| +
|
| + /// The WorkerMessages to send.
|
| core.List<WorkerMessage> workerMessages;
|
|
|
| SendWorkerMessagesRequest();
|
| @@ -6131,81 +6347,92 @@ class SendWorkerMessagesRequest {
|
| location = _json["location"];
|
| }
|
| if (_json.containsKey("workerMessages")) {
|
| - workerMessages = _json["workerMessages"].map((value) => new WorkerMessage.fromJson(value)).toList();
|
| + workerMessages = _json["workerMessages"]
|
| + .map((value) => new WorkerMessage.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (location != null) {
|
| _json["location"] = location;
|
| }
|
| if (workerMessages != null) {
|
| - _json["workerMessages"] = workerMessages.map((value) => (value).toJson()).toList();
|
| + _json["workerMessages"] =
|
| + workerMessages.map((value) => (value).toJson()).toList();
|
| }
|
| return _json;
|
| }
|
| }
|
|
|
| -/** The response to the worker messages. */
|
| +/// The response to the worker messages.
|
| class SendWorkerMessagesResponse {
|
| - /** The servers response to the worker messages. */
|
| + /// The servers response to the worker messages.
|
| core.List<WorkerMessageResponse> workerMessageResponses;
|
|
|
| SendWorkerMessagesResponse();
|
|
|
| SendWorkerMessagesResponse.fromJson(core.Map _json) {
|
| if (_json.containsKey("workerMessageResponses")) {
|
| - workerMessageResponses = _json["workerMessageResponses"].map((value) => new WorkerMessageResponse.fromJson(value)).toList();
|
| + workerMessageResponses = _json["workerMessageResponses"]
|
| + .map((value) => new WorkerMessageResponse.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (workerMessageResponses != null) {
|
| - _json["workerMessageResponses"] = workerMessageResponses.map((value) => (value).toJson()).toList();
|
| + _json["workerMessageResponses"] =
|
| + workerMessageResponses.map((value) => (value).toJson()).toList();
|
| }
|
| return _json;
|
| }
|
| }
|
|
|
| -/** Describes a particular function to invoke. */
|
| +/// Describes a particular function to invoke.
|
| class SeqMapTask {
|
| - /** Information about each of the inputs. */
|
| + /// Information about each of the inputs.
|
| core.List<SideInputInfo> inputs;
|
| - /** The user-provided name of the SeqDo operation. */
|
| +
|
| + /// The user-provided name of the SeqDo operation.
|
| core.String name;
|
| - /** Information about each of the outputs. */
|
| +
|
| + /// Information about each of the outputs.
|
| core.List<SeqMapTaskOutputInfo> outputInfos;
|
| - /**
|
| - * System-defined name of the stage containing the SeqDo operation.
|
| - * Unique across the workflow.
|
| - */
|
| +
|
| + /// System-defined name of the stage containing the SeqDo operation.
|
| + /// Unique across the workflow.
|
| core.String stageName;
|
| - /**
|
| - * System-defined name of the SeqDo operation.
|
| - * Unique across the workflow.
|
| - */
|
| +
|
| + /// System-defined name of the SeqDo operation.
|
| + /// Unique across the workflow.
|
| core.String systemName;
|
| - /**
|
| - * The user function to invoke.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// The user function to invoke.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> userFn;
|
|
|
| SeqMapTask();
|
|
|
| SeqMapTask.fromJson(core.Map _json) {
|
| if (_json.containsKey("inputs")) {
|
| - inputs = _json["inputs"].map((value) => new SideInputInfo.fromJson(value)).toList();
|
| + inputs = _json["inputs"]
|
| + .map((value) => new SideInputInfo.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("name")) {
|
| name = _json["name"];
|
| }
|
| if (_json.containsKey("outputInfos")) {
|
| - outputInfos = _json["outputInfos"].map((value) => new SeqMapTaskOutputInfo.fromJson(value)).toList();
|
| + outputInfos = _json["outputInfos"]
|
| + .map((value) => new SeqMapTaskOutputInfo.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("stageName")) {
|
| stageName = _json["stageName"];
|
| @@ -6219,7 +6446,8 @@ class SeqMapTask {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (inputs != null) {
|
| _json["inputs"] = inputs.map((value) => (value).toJson()).toList();
|
| }
|
| @@ -6227,7 +6455,8 @@ class SeqMapTask {
|
| _json["name"] = name;
|
| }
|
| if (outputInfos != null) {
|
| - _json["outputInfos"] = outputInfos.map((value) => (value).toJson()).toList();
|
| + _json["outputInfos"] =
|
| + outputInfos.map((value) => (value).toJson()).toList();
|
| }
|
| if (stageName != null) {
|
| _json["stageName"] = stageName;
|
| @@ -6242,11 +6471,12 @@ class SeqMapTask {
|
| }
|
| }
|
|
|
| -/** Information about an output of a SeqMapTask. */
|
| +/// Information about an output of a SeqMapTask.
|
| class SeqMapTaskOutputInfo {
|
| - /** The sink to write the output value to. */
|
| + /// The sink to write the output value to.
|
| Sink sink;
|
| - /** The id of the TupleTag the user code will tag the output value by. */
|
| +
|
| + /// The id of the TupleTag the user code will tag the output value by.
|
| core.String tag;
|
|
|
| SeqMapTaskOutputInfo();
|
| @@ -6261,7 +6491,8 @@ class SeqMapTaskOutputInfo {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (sink != null) {
|
| _json["sink"] = (sink).toJson();
|
| }
|
| @@ -6272,11 +6503,12 @@ class SeqMapTaskOutputInfo {
|
| }
|
| }
|
|
|
| -/** A task which consists of a shell command for the worker to execute. */
|
| +/// A task which consists of a shell command for the worker to execute.
|
| class ShellTask {
|
| - /** The shell command to run. */
|
| + /// The shell command to run.
|
| core.String command;
|
| - /** Exit code for the task. */
|
| +
|
| + /// Exit code for the task.
|
| core.int exitCode;
|
|
|
| ShellTask();
|
| @@ -6291,7 +6523,8 @@ class ShellTask {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (command != null) {
|
| _json["command"] = command;
|
| }
|
| @@ -6302,26 +6535,55 @@ class ShellTask {
|
| }
|
| }
|
|
|
| -/** Information about a side input of a DoFn or an input of a SeqDoFn. */
|
| +/// Uniquely identifies a side input.
|
| +class SideInputId {
|
| + /// The step that receives and usually consumes this side input.
|
| + core.String declaringStepName;
|
| +
|
| + /// The index of the side input, from the list of non_parallel_inputs.
|
| + core.int inputIndex;
|
| +
|
| + SideInputId();
|
| +
|
| + SideInputId.fromJson(core.Map _json) {
|
| + if (_json.containsKey("declaringStepName")) {
|
| + declaringStepName = _json["declaringStepName"];
|
| + }
|
| + if (_json.containsKey("inputIndex")) {
|
| + inputIndex = _json["inputIndex"];
|
| + }
|
| + }
|
| +
|
| + core.Map<core.String, core.Object> toJson() {
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| + if (declaringStepName != null) {
|
| + _json["declaringStepName"] = declaringStepName;
|
| + }
|
| + if (inputIndex != null) {
|
| + _json["inputIndex"] = inputIndex;
|
| + }
|
| + return _json;
|
| + }
|
| +}
|
| +
|
| +/// Information about a side input of a DoFn or an input of a SeqDoFn.
|
| class SideInputInfo {
|
| - /**
|
| - * How to interpret the source element(s) as a side input value.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| + /// How to interpret the source element(s) as a side input value.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> kind;
|
| - /**
|
| - * The source(s) to read element(s) from to get the value of this side input.
|
| - * If more than one source, then the elements are taken from the
|
| - * sources, in the specified order if order matters.
|
| - * At least one source is required.
|
| - */
|
| +
|
| + /// The source(s) to read element(s) from to get the value of this side
|
| + /// input.
|
| + /// If more than one source, then the elements are taken from the
|
| + /// sources, in the specified order if order matters.
|
| + /// At least one source is required.
|
| core.List<Source> sources;
|
| - /**
|
| - * The id of the tag the user code will access this side input by;
|
| - * this should correspond to the tag of some MultiOutputInfo.
|
| - */
|
| +
|
| + /// The id of the tag the user code will access this side input by;
|
| + /// this should correspond to the tag of some MultiOutputInfo.
|
| core.String tag;
|
|
|
| SideInputInfo();
|
| @@ -6331,7 +6593,8 @@ class SideInputInfo {
|
| kind = _json["kind"];
|
| }
|
| if (_json.containsKey("sources")) {
|
| - sources = _json["sources"].map((value) => new Source.fromJson(value)).toList();
|
| + sources =
|
| + _json["sources"].map((value) => new Source.fromJson(value)).toList();
|
| }
|
| if (_json.containsKey("tag")) {
|
| tag = _json["tag"];
|
| @@ -6339,7 +6602,8 @@ class SideInputInfo {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (kind != null) {
|
| _json["kind"] = kind;
|
| }
|
| @@ -6353,21 +6617,18 @@ class SideInputInfo {
|
| }
|
| }
|
|
|
| -/** A sink that records can be encoded and written to. */
|
| +/// A sink that records can be encoded and written to.
|
| class Sink {
|
| - /**
|
| - * The codec to use to encode data written to the sink.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| + /// The codec to use to encode data written to the sink.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> codec;
|
| - /**
|
| - * The sink to write to, plus its parameters.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// The sink to write to, plus its parameters.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> spec;
|
|
|
| Sink();
|
| @@ -6382,7 +6643,8 @@ class Sink {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (codec != null) {
|
| _json["codec"] = codec;
|
| }
|
| @@ -6393,65 +6655,59 @@ class Sink {
|
| }
|
| }
|
|
|
| -/** A source that records can be read and decoded from. */
|
| +/// A source that records can be read and decoded from.
|
| class Source {
|
| - /**
|
| - * While splitting, sources may specify the produced bundles
|
| - * as differences against another source, in order to save backend-side
|
| - * memory and allow bigger jobs. For details, see SourceSplitRequest.
|
| - * To support this use case, the full set of parameters of the source
|
| - * is logically obtained by taking the latest explicitly specified value
|
| - * of each parameter in the order:
|
| - * base_specs (later items win), spec (overrides anything in base_specs).
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| + /// While splitting, sources may specify the produced bundles
|
| + /// as differences against another source, in order to save backend-side
|
| + /// memory and allow bigger jobs. For details, see SourceSplitRequest.
|
| + /// To support this use case, the full set of parameters of the source
|
| + /// is logically obtained by taking the latest explicitly specified value
|
| + /// of each parameter in the order:
|
| + /// base_specs (later items win), spec (overrides anything in base_specs).
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.List<core.Map<core.String, core.Object>> baseSpecs;
|
| - /**
|
| - * The codec to use to decode data read from the source.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// The codec to use to decode data read from the source.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> codec;
|
| - /**
|
| - * Setting this value to true hints to the framework that the source
|
| - * doesn't need splitting, and using SourceSplitRequest on it would
|
| - * yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
|
| - *
|
| - * E.g. a file splitter may set this to true when splitting a single file
|
| - * into a set of byte ranges of appropriate size, and set this
|
| - * to false when splitting a filepattern into individual files.
|
| - * However, for efficiency, a file splitter may decide to produce
|
| - * file subranges directly from the filepattern to avoid a splitting
|
| - * round-trip.
|
| - *
|
| - * See SourceSplitRequest for an overview of the splitting process.
|
| - *
|
| - * This field is meaningful only in the Source objects populated
|
| - * by the user (e.g. when filling in a DerivedSource).
|
| - * Source objects supplied by the framework to the user don't have
|
| - * this field populated.
|
| - */
|
| +
|
| + /// Setting this value to true hints to the framework that the source
|
| + /// doesn't need splitting, and using SourceSplitRequest on it would
|
| + /// yield SOURCE_SPLIT_OUTCOME_USE_CURRENT.
|
| + ///
|
| + /// E.g. a file splitter may set this to true when splitting a single file
|
| + /// into a set of byte ranges of appropriate size, and set this
|
| + /// to false when splitting a filepattern into individual files.
|
| + /// However, for efficiency, a file splitter may decide to produce
|
| + /// file subranges directly from the filepattern to avoid a splitting
|
| + /// round-trip.
|
| + ///
|
| + /// See SourceSplitRequest for an overview of the splitting process.
|
| + ///
|
| + /// This field is meaningful only in the Source objects populated
|
| + /// by the user (e.g. when filling in a DerivedSource).
|
| + /// Source objects supplied by the framework to the user don't have
|
| + /// this field populated.
|
| core.bool doesNotNeedSplitting;
|
| - /**
|
| - * Optionally, metadata for this source can be supplied right away,
|
| - * avoiding a SourceGetMetadataOperation roundtrip
|
| - * (see SourceOperationRequest).
|
| - *
|
| - * This field is meaningful only in the Source objects populated
|
| - * by the user (e.g. when filling in a DerivedSource).
|
| - * Source objects supplied by the framework to the user don't have
|
| - * this field populated.
|
| - */
|
| +
|
| + /// Optionally, metadata for this source can be supplied right away,
|
| + /// avoiding a SourceGetMetadataOperation roundtrip
|
| + /// (see SourceOperationRequest).
|
| + ///
|
| + /// This field is meaningful only in the Source objects populated
|
| + /// by the user (e.g. when filling in a DerivedSource).
|
| + /// Source objects supplied by the framework to the user don't have
|
| + /// this field populated.
|
| SourceMetadata metadata;
|
| - /**
|
| - * The source to read from, plus its parameters.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// The source to read from, plus its parameters.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> spec;
|
|
|
| Source();
|
| @@ -6475,7 +6731,8 @@ class Source {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (baseSpecs != null) {
|
| _json["baseSpecs"] = baseSpecs;
|
| }
|
| @@ -6495,15 +6752,18 @@ class Source {
|
| }
|
| }
|
|
|
| -/** DEPRECATED in favor of DynamicSourceSplit. */
|
| +/// DEPRECATED in favor of DynamicSourceSplit.
|
| class SourceFork {
|
| - /** DEPRECATED */
|
| + /// DEPRECATED
|
| SourceSplitShard primary;
|
| - /** DEPRECATED */
|
| +
|
| + /// DEPRECATED
|
| DerivedSource primarySource;
|
| - /** DEPRECATED */
|
| +
|
| + /// DEPRECATED
|
| SourceSplitShard residual;
|
| - /** DEPRECATED */
|
| +
|
| + /// DEPRECATED
|
| DerivedSource residualSource;
|
|
|
| SourceFork();
|
| @@ -6524,7 +6784,8 @@ class SourceFork {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (primary != null) {
|
| _json["primary"] = (primary).toJson();
|
| }
|
| @@ -6541,9 +6802,9 @@ class SourceFork {
|
| }
|
| }
|
|
|
| -/** A request to compute the SourceMetadata of a Source. */
|
| +/// A request to compute the SourceMetadata of a Source.
|
| class SourceGetMetadataRequest {
|
| - /** Specification of the source whose metadata should be computed. */
|
| + /// Specification of the source whose metadata should be computed.
|
| Source source;
|
|
|
| SourceGetMetadataRequest();
|
| @@ -6555,7 +6816,8 @@ class SourceGetMetadataRequest {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (source != null) {
|
| _json["source"] = (source).toJson();
|
| }
|
| @@ -6563,9 +6825,9 @@ class SourceGetMetadataRequest {
|
| }
|
| }
|
|
|
| -/** The result of a SourceGetMetadataOperation. */
|
| +/// The result of a SourceGetMetadataOperation.
|
| class SourceGetMetadataResponse {
|
| - /** The computed metadata. */
|
| + /// The computed metadata.
|
| SourceMetadata metadata;
|
|
|
| SourceGetMetadataResponse();
|
| @@ -6577,7 +6839,8 @@ class SourceGetMetadataResponse {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (metadata != null) {
|
| _json["metadata"] = (metadata).toJson();
|
| }
|
| @@ -6585,26 +6848,20 @@ class SourceGetMetadataResponse {
|
| }
|
| }
|
|
|
| -/**
|
| - * Metadata about a Source useful for automatically optimizing
|
| - * and tuning the pipeline, etc.
|
| - */
|
| +/// Metadata about a Source useful for automatically optimizing
|
| +/// and tuning the pipeline, etc.
|
| class SourceMetadata {
|
| - /**
|
| - * An estimate of the total size (in bytes) of the data that would be
|
| - * read from this source. This estimate is in terms of external storage
|
| - * size, before any decompression or other processing done by the reader.
|
| - */
|
| + /// An estimate of the total size (in bytes) of the data that would be
|
| + /// read from this source. This estimate is in terms of external storage
|
| + /// size, before any decompression or other processing done by the reader.
|
| core.String estimatedSizeBytes;
|
| - /**
|
| - * Specifies that the size of this source is known to be infinite
|
| - * (this is a streaming source).
|
| - */
|
| +
|
| + /// Specifies that the size of this source is known to be infinite
|
| + /// (this is a streaming source).
|
| core.bool infinite;
|
| - /**
|
| - * Whether this source is known to produce key/value pairs with
|
| - * the (encoded) keys in lexicographically sorted order.
|
| - */
|
| +
|
| + /// Whether this source is known to produce key/value pairs with
|
| + /// the (encoded) keys in lexicographically sorted order.
|
| core.bool producesSortedKeys;
|
|
|
| SourceMetadata();
|
| @@ -6622,7 +6879,8 @@ class SourceMetadata {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (estimatedSizeBytes != null) {
|
| _json["estimatedSizeBytes"] = estimatedSizeBytes;
|
| }
|
| @@ -6636,14 +6894,13 @@ class SourceMetadata {
|
| }
|
| }
|
|
|
| -/**
|
| - * A work item that represents the different operations that can be
|
| - * performed on a user-defined Source specification.
|
| - */
|
| +/// A work item that represents the different operations that can be
|
| +/// performed on a user-defined Source specification.
|
| class SourceOperationRequest {
|
| - /** Information about a request to get metadata about a source. */
|
| + /// Information about a request to get metadata about a source.
|
| SourceGetMetadataRequest getMetadata;
|
| - /** Information about a request to split a source. */
|
| +
|
| + /// Information about a request to split a source.
|
| SourceSplitRequest split;
|
|
|
| SourceOperationRequest();
|
| @@ -6658,7 +6915,8 @@ class SourceOperationRequest {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (getMetadata != null) {
|
| _json["getMetadata"] = (getMetadata).toJson();
|
| }
|
| @@ -6669,22 +6927,22 @@ class SourceOperationRequest {
|
| }
|
| }
|
|
|
| -/**
|
| - * The result of a SourceOperationRequest, specified in
|
| - * ReportWorkItemStatusRequest.source_operation when the work item
|
| - * is completed.
|
| - */
|
| +/// The result of a SourceOperationRequest, specified in
|
| +/// ReportWorkItemStatusRequest.source_operation when the work item
|
| +/// is completed.
|
| class SourceOperationResponse {
|
| - /** A response to a request to get metadata about a source. */
|
| + /// A response to a request to get metadata about a source.
|
| SourceGetMetadataResponse getMetadata;
|
| - /** A response to a request to split a source. */
|
| +
|
| + /// A response to a request to split a source.
|
| SourceSplitResponse split;
|
|
|
| SourceOperationResponse();
|
|
|
| SourceOperationResponse.fromJson(core.Map _json) {
|
| if (_json.containsKey("getMetadata")) {
|
| - getMetadata = new SourceGetMetadataResponse.fromJson(_json["getMetadata"]);
|
| + getMetadata =
|
| + new SourceGetMetadataResponse.fromJson(_json["getMetadata"]);
|
| }
|
| if (_json.containsKey("split")) {
|
| split = new SourceSplitResponse.fromJson(_json["split"]);
|
| @@ -6692,7 +6950,8 @@ class SourceOperationResponse {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (getMetadata != null) {
|
| _json["getMetadata"] = (getMetadata).toJson();
|
| }
|
| @@ -6703,17 +6962,14 @@ class SourceOperationResponse {
|
| }
|
| }
|
|
|
| -/**
|
| - * Hints for splitting a Source into bundles (parts for parallel
|
| - * processing) using SourceSplitRequest.
|
| - */
|
| +/// Hints for splitting a Source into bundles (parts for parallel
|
| +/// processing) using SourceSplitRequest.
|
| class SourceSplitOptions {
|
| - /**
|
| - * The source should be split into a set of bundles where the estimated size
|
| - * of each is approximately this many bytes.
|
| - */
|
| + /// The source should be split into a set of bundles where the estimated size
|
| + /// of each is approximately this many bytes.
|
| core.String desiredBundleSizeBytes;
|
| - /** DEPRECATED in favor of desired_bundle_size_bytes. */
|
| +
|
| + /// DEPRECATED in favor of desired_bundle_size_bytes.
|
| core.String desiredShardSizeBytes;
|
|
|
| SourceSplitOptions();
|
| @@ -6728,7 +6984,8 @@ class SourceSplitOptions {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (desiredBundleSizeBytes != null) {
|
| _json["desiredBundleSizeBytes"] = desiredBundleSizeBytes;
|
| }
|
| @@ -6739,26 +6996,27 @@ class SourceSplitOptions {
|
| }
|
| }
|
|
|
| -/**
|
| - * Represents the operation to split a high-level Source specification
|
| - * into bundles (parts for parallel processing).
|
| - *
|
| - * At a high level, splitting of a source into bundles happens as follows:
|
| - * SourceSplitRequest is applied to the source. If it returns
|
| - * SOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting happens and the source
|
| - * is used "as is". Otherwise, splitting is applied recursively to each
|
| - * produced DerivedSource.
|
| - *
|
| - * As an optimization, for any Source, if its does_not_need_splitting is
|
| - * true, the framework assumes that splitting this source would return
|
| - * SOURCE_SPLIT_OUTCOME_USE_CURRENT, and doesn't initiate a SourceSplitRequest.
|
| - * This applies both to the initial source being split and to bundles
|
| - * produced from it.
|
| - */
|
| +/// Represents the operation to split a high-level Source specification
|
| +/// into bundles (parts for parallel processing).
|
| +///
|
| +/// At a high level, splitting of a source into bundles happens as follows:
|
| +/// SourceSplitRequest is applied to the source. If it returns
|
| +/// SOURCE_SPLIT_OUTCOME_USE_CURRENT, no further splitting happens and the
|
| +/// source
|
| +/// is used "as is". Otherwise, splitting is applied recursively to each
|
| +/// produced DerivedSource.
|
| +///
|
| +/// As an optimization, for any Source, if its does_not_need_splitting is
|
| +/// true, the framework assumes that splitting this source would return
|
| +/// SOURCE_SPLIT_OUTCOME_USE_CURRENT, and doesn't initiate a
|
| +/// SourceSplitRequest.
|
| +/// This applies both to the initial source being split and to bundles
|
| +/// produced from it.
|
| class SourceSplitRequest {
|
| - /** Hints for tuning the splitting process. */
|
| + /// Hints for tuning the splitting process.
|
| SourceSplitOptions options;
|
| - /** Specification of the source to be split. */
|
| +
|
| + /// Specification of the source to be split.
|
| Source source;
|
|
|
| SourceSplitRequest();
|
| @@ -6773,7 +7031,8 @@ class SourceSplitRequest {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (options != null) {
|
| _json["options"] = (options).toJson();
|
| }
|
| @@ -6784,48 +7043,52 @@ class SourceSplitRequest {
|
| }
|
| }
|
|
|
| -/** The response to a SourceSplitRequest. */
|
| +/// The response to a SourceSplitRequest.
|
| class SourceSplitResponse {
|
| - /**
|
| - * If outcome is SPLITTING_HAPPENED, then this is a list of bundles
|
| - * into which the source was split. Otherwise this field is ignored.
|
| - * This list can be empty, which means the source represents an empty input.
|
| - */
|
| + /// If outcome is SPLITTING_HAPPENED, then this is a list of bundles
|
| + /// into which the source was split. Otherwise this field is ignored.
|
| + /// This list can be empty, which means the source represents an empty input.
|
| core.List<DerivedSource> bundles;
|
| - /**
|
| - * Indicates whether splitting happened and produced a list of bundles.
|
| - * If this is USE_CURRENT_SOURCE_AS_IS, the current source should
|
| - * be processed "as is" without splitting. "bundles" is ignored in this case.
|
| - * If this is SPLITTING_HAPPENED, then "bundles" contains a list of
|
| - * bundles into which the source was split.
|
| - * Possible string values are:
|
| - * - "SOURCE_SPLIT_OUTCOME_UNKNOWN" : The source split outcome is unknown, or
|
| - * unspecified.
|
| - * - "SOURCE_SPLIT_OUTCOME_USE_CURRENT" : The current source should be
|
| - * processed "as is" without splitting.
|
| - * - "SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED" : Splitting produced a list of
|
| - * bundles.
|
| - */
|
| +
|
| + /// Indicates whether splitting happened and produced a list of bundles.
|
| + /// If this is USE_CURRENT_SOURCE_AS_IS, the current source should
|
| + /// be processed "as is" without splitting. "bundles" is ignored in this
|
| + /// case.
|
| + /// If this is SPLITTING_HAPPENED, then "bundles" contains a list of
|
| + /// bundles into which the source was split.
|
| + /// Possible string values are:
|
| + /// - "SOURCE_SPLIT_OUTCOME_UNKNOWN" : The source split outcome is unknown,
|
| + /// or unspecified.
|
| + /// - "SOURCE_SPLIT_OUTCOME_USE_CURRENT" : The current source should be
|
| + /// processed "as is" without splitting.
|
| + /// - "SOURCE_SPLIT_OUTCOME_SPLITTING_HAPPENED" : Splitting produced a list
|
| + /// of bundles.
|
| core.String outcome;
|
| - /** DEPRECATED in favor of bundles. */
|
| +
|
| + /// DEPRECATED in favor of bundles.
|
| core.List<SourceSplitShard> shards;
|
|
|
| SourceSplitResponse();
|
|
|
| SourceSplitResponse.fromJson(core.Map _json) {
|
| if (_json.containsKey("bundles")) {
|
| - bundles = _json["bundles"].map((value) => new DerivedSource.fromJson(value)).toList();
|
| + bundles = _json["bundles"]
|
| + .map((value) => new DerivedSource.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("outcome")) {
|
| outcome = _json["outcome"];
|
| }
|
| if (_json.containsKey("shards")) {
|
| - shards = _json["shards"].map((value) => new SourceSplitShard.fromJson(value)).toList();
|
| + shards = _json["shards"]
|
| + .map((value) => new SourceSplitShard.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (bundles != null) {
|
| _json["bundles"] = bundles.map((value) => (value).toJson()).toList();
|
| }
|
| @@ -6839,22 +7102,21 @@ class SourceSplitResponse {
|
| }
|
| }
|
|
|
| -/** DEPRECATED in favor of DerivedSource. */
|
| +/// DEPRECATED in favor of DerivedSource.
|
| class SourceSplitShard {
|
| - /**
|
| - * DEPRECATED
|
| - * Possible string values are:
|
| - * - "SOURCE_DERIVATION_MODE_UNKNOWN" : The source derivation is unknown, or
|
| - * unspecified.
|
| - * - "SOURCE_DERIVATION_MODE_INDEPENDENT" : Produce a completely independent
|
| - * Source with no base.
|
| - * - "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" : Produce a Source based on the
|
| - * Source being split.
|
| - * - "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" : Produce a Source based on
|
| - * the base of the Source being split.
|
| - */
|
| + /// DEPRECATED
|
| + /// Possible string values are:
|
| + /// - "SOURCE_DERIVATION_MODE_UNKNOWN" : The source derivation is unknown, or
|
| + /// unspecified.
|
| + /// - "SOURCE_DERIVATION_MODE_INDEPENDENT" : Produce a completely independent
|
| + /// Source with no base.
|
| + /// - "SOURCE_DERIVATION_MODE_CHILD_OF_CURRENT" : Produce a Source based on
|
| + /// the Source being split.
|
| + /// - "SOURCE_DERIVATION_MODE_SIBLING_OF_CURRENT" : Produce a Source based on
|
| + /// the base of the Source being split.
|
| core.String derivationMode;
|
| - /** DEPRECATED */
|
| +
|
| + /// DEPRECATED
|
| Source source;
|
|
|
| SourceSplitShard();
|
| @@ -6869,7 +7131,8 @@ class SourceSplitShard {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (derivationMode != null) {
|
| _json["derivationMode"] = derivationMode;
|
| }
|
| @@ -6880,14 +7143,13 @@ class SourceSplitShard {
|
| }
|
| }
|
|
|
| -/**
|
| - * A representation of an int64, n, that is immune to precision loss when
|
| - * encoded in JSON.
|
| - */
|
| +/// A representation of an int64, n, that is immune to precision loss when
|
| +/// encoded in JSON.
|
| class SplitInt64 {
|
| - /** The high order bits, including the sign: n >> 32. */
|
| + /// The high order bits, including the sign: n >> 32.
|
| core.int highBits;
|
| - /** The low order bits: n & 0xffffffff. */
|
| +
|
| + /// The low order bits: n & 0xffffffff.
|
| core.int lowBits;
|
|
|
| SplitInt64();
|
| @@ -6902,7 +7164,8 @@ class SplitInt64 {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (highBits != null) {
|
| _json["highBits"] = highBits;
|
| }
|
| @@ -6913,18 +7176,19 @@ class SplitInt64 {
|
| }
|
| }
|
|
|
| -/** Description of an input or output of an execution stage. */
|
| +/// Description of an input or output of an execution stage.
|
| class StageSource {
|
| - /** Dataflow service generated name for this source. */
|
| + /// Dataflow service generated name for this source.
|
| core.String name;
|
| - /**
|
| - * User name for the original user transform or collection with which this
|
| - * source is most closely associated.
|
| - */
|
| +
|
| + /// User name for the original user transform or collection with which this
|
| + /// source is most closely associated.
|
| core.String originalTransformOrCollection;
|
| - /** Size of the source, if measurable. */
|
| +
|
| + /// Size of the source, if measurable.
|
| core.String sizeBytes;
|
| - /** Human-readable name for this source; may be user or system generated. */
|
| +
|
| + /// Human-readable name for this source; may be user or system generated.
|
| core.String userName;
|
|
|
| StageSource();
|
| @@ -6945,7 +7209,8 @@ class StageSource {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (name != null) {
|
| _json["name"] = name;
|
| }
|
| @@ -6962,11 +7227,12 @@ class StageSource {
|
| }
|
| }
|
|
|
| -/** State family configuration. */
|
| +/// State family configuration.
|
| class StateFamilyConfig {
|
| - /** If true, this family corresponds to a read operation. */
|
| + /// If true, this family corresponds to a read operation.
|
| core.bool isRead;
|
| - /** The state family value. */
|
| +
|
| + /// The state family value.
|
| core.String stateFamily;
|
|
|
| StateFamilyConfig();
|
| @@ -6981,7 +7247,8 @@ class StateFamilyConfig {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (isRead != null) {
|
| _json["isRead"] = isRead;
|
| }
|
| @@ -6992,78 +7259,81 @@ class StateFamilyConfig {
|
| }
|
| }
|
|
|
| -/**
|
| - * The `Status` type defines a logical error model that is suitable for
|
| - * different
|
| - * programming environments, including REST APIs and RPC APIs. It is used by
|
| - * [gRPC](https://github.com/grpc). The error model is designed to be:
|
| - *
|
| - * - Simple to use and understand for most users
|
| - * - Flexible enough to meet unexpected needs
|
| - *
|
| - * # Overview
|
| - *
|
| - * The `Status` message contains three pieces of data: error code, error
|
| - * message,
|
| - * and error details. The error code should be an enum value of
|
| - * google.rpc.Code, but it may accept additional error codes if needed. The
|
| - * error message should be a developer-facing English message that helps
|
| - * developers *understand* and *resolve* the error. If a localized user-facing
|
| - * error message is needed, put the localized message in the error details or
|
| - * localize it in the client. The optional error details may contain arbitrary
|
| - * information about the error. There is a predefined set of error detail types
|
| - * in the package `google.rpc` that can be used for common error conditions.
|
| - *
|
| - * # Language mapping
|
| - *
|
| - * The `Status` message is the logical representation of the error model, but it
|
| - * is not necessarily the actual wire format. When the `Status` message is
|
| - * exposed in different client libraries and different wire protocols, it can be
|
| - * mapped differently. For example, it will likely be mapped to some exceptions
|
| - * in Java, but more likely mapped to some error codes in C.
|
| - *
|
| - * # Other uses
|
| - *
|
| - * The error model and the `Status` message can be used in a variety of
|
| - * environments, either with or without APIs, to provide a
|
| - * consistent developer experience across different environments.
|
| - *
|
| - * Example uses of this error model include:
|
| - *
|
| - * - Partial errors. If a service needs to return partial errors to the client,
|
| - * it may embed the `Status` in the normal response to indicate the partial
|
| - * errors.
|
| - *
|
| - * - Workflow errors. A typical workflow has multiple steps. Each step may
|
| - * have a `Status` message for error reporting.
|
| - *
|
| - * - Batch operations. If a client uses batch request and batch response, the
|
| - * `Status` message should be used directly inside batch response, one for
|
| - * each error sub-response.
|
| - *
|
| - * - Asynchronous operations. If an API call embeds asynchronous operation
|
| - * results in its response, the status of those operations should be
|
| - * represented directly using the `Status` message.
|
| - *
|
| - * - Logging. If some API errors are stored in logs, the message `Status` could
|
| - * be used directly after any stripping needed for security/privacy reasons.
|
| - */
|
| +/// The `Status` type defines a logical error model that is suitable for
|
| +/// different
|
| +/// programming environments, including REST APIs and RPC APIs. It is used by
|
| +/// [gRPC](https://github.com/grpc). The error model is designed to be:
|
| +///
|
| +/// - Simple to use and understand for most users
|
| +/// - Flexible enough to meet unexpected needs
|
| +///
|
| +/// # Overview
|
| +///
|
| +/// The `Status` message contains three pieces of data: error code, error
|
| +/// message,
|
| +/// and error details. The error code should be an enum value of
|
| +/// google.rpc.Code, but it may accept additional error codes if needed. The
|
| +/// error message should be a developer-facing English message that helps
|
| +/// developers *understand* and *resolve* the error. If a localized user-facing
|
| +/// error message is needed, put the localized message in the error details or
|
| +/// localize it in the client. The optional error details may contain arbitrary
|
| +/// information about the error. There is a predefined set of error detail
|
| +/// types
|
| +/// in the package `google.rpc` that can be used for common error conditions.
|
| +///
|
| +/// # Language mapping
|
| +///
|
| +/// The `Status` message is the logical representation of the error model, but
|
| +/// it
|
| +/// is not necessarily the actual wire format. When the `Status` message is
|
| +/// exposed in different client libraries and different wire protocols, it can
|
| +/// be
|
| +/// mapped differently. For example, it will likely be mapped to some
|
| +/// exceptions
|
| +/// in Java, but more likely mapped to some error codes in C.
|
| +///
|
| +/// # Other uses
|
| +///
|
| +/// The error model and the `Status` message can be used in a variety of
|
| +/// environments, either with or without APIs, to provide a
|
| +/// consistent developer experience across different environments.
|
| +///
|
| +/// Example uses of this error model include:
|
| +///
|
| +/// - Partial errors. If a service needs to return partial errors to the
|
| +/// client,
|
| +/// it may embed the `Status` in the normal response to indicate the partial
|
| +/// errors.
|
| +///
|
| +/// - Workflow errors. A typical workflow has multiple steps. Each step may
|
| +/// have a `Status` message for error reporting.
|
| +///
|
| +/// - Batch operations. If a client uses batch request and batch response, the
|
| +/// `Status` message should be used directly inside batch response, one for
|
| +/// each error sub-response.
|
| +///
|
| +/// - Asynchronous operations. If an API call embeds asynchronous operation
|
| +/// results in its response, the status of those operations should be
|
| +/// represented directly using the `Status` message.
|
| +///
|
| +/// - Logging. If some API errors are stored in logs, the message `Status`
|
| +/// could
|
| +/// be used directly after any stripping needed for security/privacy reasons.
|
| class Status {
|
| - /** The status code, which should be an enum value of google.rpc.Code. */
|
| + /// The status code, which should be an enum value of google.rpc.Code.
|
| core.int code;
|
| - /**
|
| - * A list of messages that carry the error details. There is a common set of
|
| - * message types for APIs to use.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// A list of messages that carry the error details. There is a common set
|
| + /// of
|
| + /// message types for APIs to use.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.List<core.Map<core.String, core.Object>> details;
|
| - /**
|
| - * A developer-facing error message, which should be in English. Any
|
| - * user-facing error message should be localized and sent in the
|
| - * google.rpc.Status.details field, or localized by the client.
|
| - */
|
| +
|
| + /// A developer-facing error message, which should be in English. Any
|
| + /// user-facing error message should be localized and sent in the
|
| + /// google.rpc.Status.details field, or localized by the client.
|
| core.String message;
|
|
|
| Status();
|
| @@ -7081,7 +7351,8 @@ class Status {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (code != null) {
|
| _json["code"] = code;
|
| }
|
| @@ -7095,49 +7366,45 @@ class Status {
|
| }
|
| }
|
|
|
| -/**
|
| - * Defines a particular step within a Cloud Dataflow job.
|
| - *
|
| - * A job consists of multiple steps, each of which performs some
|
| - * specific operation as part of the overall job. Data is typically
|
| - * passed from one step to another as part of the job.
|
| - *
|
| - * Here's an example of a sequence of steps which together implement a
|
| - * Map-Reduce job:
|
| - *
|
| - * * Read a collection of data from some source, parsing the
|
| - * collection's elements.
|
| - *
|
| - * * Validate the elements.
|
| - *
|
| - * * Apply a user-defined function to map each element to some value
|
| - * and extract an element-specific key value.
|
| - *
|
| - * * Group elements with the same key into a single element with
|
| - * that key, transforming a multiply-keyed collection into a
|
| - * uniquely-keyed collection.
|
| - *
|
| - * * Write the elements out to some data sink.
|
| - *
|
| - * Note that the Cloud Dataflow service may be used to run many different
|
| - * types of jobs, not just Map-Reduce.
|
| - */
|
| +/// Defines a particular step within a Cloud Dataflow job.
|
| +///
|
| +/// A job consists of multiple steps, each of which performs some
|
| +/// specific operation as part of the overall job. Data is typically
|
| +/// passed from one step to another as part of the job.
|
| +///
|
| +/// Here's an example of a sequence of steps which together implement a
|
| +/// Map-Reduce job:
|
| +///
|
| +/// * Read a collection of data from some source, parsing the
|
| +/// collection's elements.
|
| +///
|
| +/// * Validate the elements.
|
| +///
|
| +/// * Apply a user-defined function to map each element to some value
|
| +/// and extract an element-specific key value.
|
| +///
|
| +/// * Group elements with the same key into a single element with
|
| +/// that key, transforming a multiply-keyed collection into a
|
| +/// uniquely-keyed collection.
|
| +///
|
| +/// * Write the elements out to some data sink.
|
| +///
|
| +/// Note that the Cloud Dataflow service may be used to run many different
|
| +/// types of jobs, not just Map-Reduce.
|
| class Step {
|
| - /** The kind of step in the Cloud Dataflow job. */
|
| + /// The kind of step in the Cloud Dataflow job.
|
| core.String kind;
|
| - /**
|
| - * The name that identifies the step. This must be unique for each
|
| - * step with respect to all other steps in the Cloud Dataflow job.
|
| - */
|
| +
|
| + /// The name that identifies the step. This must be unique for each
|
| + /// step with respect to all other steps in the Cloud Dataflow job.
|
| core.String name;
|
| - /**
|
| - * Named properties associated with the step. Each kind of
|
| - * predefined step has its own required set of properties.
|
| - * Must be provided on Create. Only retrieved with JOB_VIEW_ALL.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// Named properties associated with the step. Each kind of
|
| + /// predefined step has its own required set of properties.
|
| + /// Must be provided on Create. Only retrieved with JOB_VIEW_ALL.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> properties;
|
|
|
| Step();
|
| @@ -7155,7 +7422,8 @@ class Step {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (kind != null) {
|
| _json["kind"] = kind;
|
| }
|
| @@ -7169,42 +7437,45 @@ class Step {
|
| }
|
| }
|
|
|
| -/**
|
| - * Describes a stream of data, either as input to be processed or as
|
| - * output of a streaming Dataflow job.
|
| - */
|
| +/// Describes a stream of data, either as input to be processed or as
|
| +/// output of a streaming Dataflow job.
|
| class StreamLocation {
|
| - /** The stream is a custom source. */
|
| + /// The stream is a custom source.
|
| CustomSourceLocation customSourceLocation;
|
| - /** The stream is a pubsub stream. */
|
| +
|
| + /// The stream is a pubsub stream.
|
| PubsubLocation pubsubLocation;
|
| - /** The stream is a streaming side input. */
|
| +
|
| + /// The stream is a streaming side input.
|
| StreamingSideInputLocation sideInputLocation;
|
| - /**
|
| - * The stream is part of another computation within the current
|
| - * streaming Dataflow job.
|
| - */
|
| +
|
| + /// The stream is part of another computation within the current
|
| + /// streaming Dataflow job.
|
| StreamingStageLocation streamingStageLocation;
|
|
|
| StreamLocation();
|
|
|
| StreamLocation.fromJson(core.Map _json) {
|
| if (_json.containsKey("customSourceLocation")) {
|
| - customSourceLocation = new CustomSourceLocation.fromJson(_json["customSourceLocation"]);
|
| + customSourceLocation =
|
| + new CustomSourceLocation.fromJson(_json["customSourceLocation"]);
|
| }
|
| if (_json.containsKey("pubsubLocation")) {
|
| pubsubLocation = new PubsubLocation.fromJson(_json["pubsubLocation"]);
|
| }
|
| if (_json.containsKey("sideInputLocation")) {
|
| - sideInputLocation = new StreamingSideInputLocation.fromJson(_json["sideInputLocation"]);
|
| + sideInputLocation =
|
| + new StreamingSideInputLocation.fromJson(_json["sideInputLocation"]);
|
| }
|
| if (_json.containsKey("streamingStageLocation")) {
|
| - streamingStageLocation = new StreamingStageLocation.fromJson(_json["streamingStageLocation"]);
|
| + streamingStageLocation =
|
| + new StreamingStageLocation.fromJson(_json["streamingStageLocation"]);
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (customSourceLocation != null) {
|
| _json["customSourceLocation"] = (customSourceLocation).toJson();
|
| }
|
| @@ -7221,15 +7492,18 @@ class StreamLocation {
|
| }
|
| }
|
|
|
| -/** Configuration information for a single streaming computation. */
|
| +/// Configuration information for a single streaming computation.
|
| class StreamingComputationConfig {
|
| - /** Unique identifier for this computation. */
|
| + /// Unique identifier for this computation.
|
| core.String computationId;
|
| - /** Instructions that comprise the computation. */
|
| +
|
| + /// Instructions that comprise the computation.
|
| core.List<ParallelInstruction> instructions;
|
| - /** Stage name of this computation. */
|
| +
|
| + /// Stage name of this computation.
|
| core.String stageName;
|
| - /** System defined name for this computation. */
|
| +
|
| + /// System defined name for this computation.
|
| core.String systemName;
|
|
|
| StreamingComputationConfig();
|
| @@ -7239,7 +7513,9 @@ class StreamingComputationConfig {
|
| computationId = _json["computationId"];
|
| }
|
| if (_json.containsKey("instructions")) {
|
| - instructions = _json["instructions"].map((value) => new ParallelInstruction.fromJson(value)).toList();
|
| + instructions = _json["instructions"]
|
| + .map((value) => new ParallelInstruction.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("stageName")) {
|
| stageName = _json["stageName"];
|
| @@ -7250,12 +7526,14 @@ class StreamingComputationConfig {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (computationId != null) {
|
| _json["computationId"] = computationId;
|
| }
|
| if (instructions != null) {
|
| - _json["instructions"] = instructions.map((value) => (value).toJson()).toList();
|
| + _json["instructions"] =
|
| + instructions.map((value) => (value).toJson()).toList();
|
| }
|
| if (stageName != null) {
|
| _json["stageName"] = stageName;
|
| @@ -7267,14 +7545,14 @@ class StreamingComputationConfig {
|
| }
|
| }
|
|
|
| -/**
|
| - * Describes full or partial data disk assignment information of the computation
|
| - * ranges.
|
| - */
|
| +/// Describes full or partial data disk assignment information of the
|
| +/// computation
|
| +/// ranges.
|
| class StreamingComputationRanges {
|
| - /** The ID of the computation. */
|
| + /// The ID of the computation.
|
| core.String computationId;
|
| - /** Data disk assignments for ranges from this computation. */
|
| +
|
| + /// Data disk assignments for ranges from this computation.
|
| core.List<KeyRangeDataDiskAssignment> rangeAssignments;
|
|
|
| StreamingComputationRanges();
|
| @@ -7284,51 +7562,57 @@ class StreamingComputationRanges {
|
| computationId = _json["computationId"];
|
| }
|
| if (_json.containsKey("rangeAssignments")) {
|
| - rangeAssignments = _json["rangeAssignments"].map((value) => new KeyRangeDataDiskAssignment.fromJson(value)).toList();
|
| + rangeAssignments = _json["rangeAssignments"]
|
| + .map((value) => new KeyRangeDataDiskAssignment.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (computationId != null) {
|
| _json["computationId"] = computationId;
|
| }
|
| if (rangeAssignments != null) {
|
| - _json["rangeAssignments"] = rangeAssignments.map((value) => (value).toJson()).toList();
|
| + _json["rangeAssignments"] =
|
| + rangeAssignments.map((value) => (value).toJson()).toList();
|
| }
|
| return _json;
|
| }
|
| }
|
|
|
| -/**
|
| - * A task which describes what action should be performed for the specified
|
| - * streaming computation ranges.
|
| - */
|
| +/// A task which describes what action should be performed for the specified
|
| +/// streaming computation ranges.
|
| class StreamingComputationTask {
|
| - /** Contains ranges of a streaming computation this task should apply to. */
|
| + /// Contains ranges of a streaming computation this task should apply to.
|
| core.List<StreamingComputationRanges> computationRanges;
|
| - /** Describes the set of data disks this task should apply to. */
|
| +
|
| + /// Describes the set of data disks this task should apply to.
|
| core.List<MountedDataDisk> dataDisks;
|
| - /**
|
| - * A type of streaming computation task.
|
| - * Possible string values are:
|
| - * - "STREAMING_COMPUTATION_TASK_UNKNOWN" : The streaming computation task is
|
| - * unknown, or unspecified.
|
| - * - "STREAMING_COMPUTATION_TASK_STOP" : Stop processing specified streaming
|
| - * computation range(s).
|
| - * - "STREAMING_COMPUTATION_TASK_START" : Start processing specified streaming
|
| - * computation range(s).
|
| - */
|
| +
|
| + /// A type of streaming computation task.
|
| + /// Possible string values are:
|
| + /// - "STREAMING_COMPUTATION_TASK_UNKNOWN" : The streaming computation task
|
| + /// is unknown, or unspecified.
|
| + /// - "STREAMING_COMPUTATION_TASK_STOP" : Stop processing specified streaming
|
| + /// computation range(s).
|
| + /// - "STREAMING_COMPUTATION_TASK_START" : Start processing specified
|
| + /// streaming computation range(s).
|
| core.String taskType;
|
|
|
| StreamingComputationTask();
|
|
|
| StreamingComputationTask.fromJson(core.Map _json) {
|
| if (_json.containsKey("computationRanges")) {
|
| - computationRanges = _json["computationRanges"].map((value) => new StreamingComputationRanges.fromJson(value)).toList();
|
| + computationRanges = _json["computationRanges"]
|
| + .map((value) => new StreamingComputationRanges.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("dataDisks")) {
|
| - dataDisks = _json["dataDisks"].map((value) => new MountedDataDisk.fromJson(value)).toList();
|
| + dataDisks = _json["dataDisks"]
|
| + .map((value) => new MountedDataDisk.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("taskType")) {
|
| taskType = _json["taskType"];
|
| @@ -7336,9 +7620,11 @@ class StreamingComputationTask {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (computationRanges != null) {
|
| - _json["computationRanges"] = computationRanges.map((value) => (value).toJson()).toList();
|
| + _json["computationRanges"] =
|
| + computationRanges.map((value) => (value).toJson()).toList();
|
| }
|
| if (dataDisks != null) {
|
| _json["dataDisks"] = dataDisks.map((value) => (value).toJson()).toList();
|
| @@ -7350,32 +7636,32 @@ class StreamingComputationTask {
|
| }
|
| }
|
|
|
| -/**
|
| - * A task that carries configuration information for streaming computations.
|
| - */
|
| +/// A task that carries configuration information for streaming computations.
|
| class StreamingConfigTask {
|
| - /** Set of computation configuration information. */
|
| + /// Set of computation configuration information.
|
| core.List<StreamingComputationConfig> streamingComputationConfigs;
|
| - /** Map from user step names to state families. */
|
| +
|
| + /// Map from user step names to state families.
|
| core.Map<core.String, core.String> userStepToStateFamilyNameMap;
|
| - /**
|
| - * If present, the worker must use this endpoint to communicate with Windmill
|
| - * Service dispatchers, otherwise the worker must continue to use whatever
|
| - * endpoint it had been using.
|
| - */
|
| +
|
| + /// If present, the worker must use this endpoint to communicate with
|
| + /// Windmill
|
| + /// Service dispatchers, otherwise the worker must continue to use whatever
|
| + /// endpoint it had been using.
|
| core.String windmillServiceEndpoint;
|
| - /**
|
| - * If present, the worker must use this port to communicate with Windmill
|
| - * Service dispatchers. Only applicable when windmill_service_endpoint is
|
| - * specified.
|
| - */
|
| +
|
| + /// If present, the worker must use this port to communicate with Windmill
|
| + /// Service dispatchers. Only applicable when windmill_service_endpoint is
|
| + /// specified.
|
| core.String windmillServicePort;
|
|
|
| StreamingConfigTask();
|
|
|
| StreamingConfigTask.fromJson(core.Map _json) {
|
| if (_json.containsKey("streamingComputationConfigs")) {
|
| - streamingComputationConfigs = _json["streamingComputationConfigs"].map((value) => new StreamingComputationConfig.fromJson(value)).toList();
|
| + streamingComputationConfigs = _json["streamingComputationConfigs"]
|
| + .map((value) => new StreamingComputationConfig.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("userStepToStateFamilyNameMap")) {
|
| userStepToStateFamilyNameMap = _json["userStepToStateFamilyNameMap"];
|
| @@ -7389,9 +7675,11 @@ class StreamingConfigTask {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (streamingComputationConfigs != null) {
|
| - _json["streamingComputationConfigs"] = streamingComputationConfigs.map((value) => (value).toJson()).toList();
|
| + _json["streamingComputationConfigs"] =
|
| + streamingComputationConfigs.map((value) => (value).toJson()).toList();
|
| }
|
| if (userStepToStateFamilyNameMap != null) {
|
| _json["userStepToStateFamilyNameMap"] = userStepToStateFamilyNameMap;
|
| @@ -7406,21 +7694,20 @@ class StreamingConfigTask {
|
| }
|
| }
|
|
|
| -/** A task which initializes part of a streaming Dataflow job. */
|
| +/// A task which initializes part of a streaming Dataflow job.
|
| class StreamingSetupTask {
|
| - /** The user has requested drain. */
|
| + /// The user has requested drain.
|
| core.bool drain;
|
| - /**
|
| - * The TCP port on which the worker should listen for messages from
|
| - * other streaming computation workers.
|
| - */
|
| +
|
| + /// The TCP port on which the worker should listen for messages from
|
| + /// other streaming computation workers.
|
| core.int receiveWorkPort;
|
| - /** The global topology of the streaming Dataflow job. */
|
| +
|
| + /// The global topology of the streaming Dataflow job.
|
| TopologyConfig streamingComputationTopology;
|
| - /**
|
| - * The TCP port used by the worker to communicate with the Dataflow
|
| - * worker harness.
|
| - */
|
| +
|
| + /// The TCP port used by the worker to communicate with the Dataflow
|
| + /// worker harness.
|
| core.int workerHarnessPort;
|
|
|
| StreamingSetupTask();
|
| @@ -7433,7 +7720,8 @@ class StreamingSetupTask {
|
| receiveWorkPort = _json["receiveWorkPort"];
|
| }
|
| if (_json.containsKey("streamingComputationTopology")) {
|
| - streamingComputationTopology = new TopologyConfig.fromJson(_json["streamingComputationTopology"]);
|
| + streamingComputationTopology =
|
| + new TopologyConfig.fromJson(_json["streamingComputationTopology"]);
|
| }
|
| if (_json.containsKey("workerHarnessPort")) {
|
| workerHarnessPort = _json["workerHarnessPort"];
|
| @@ -7441,7 +7729,8 @@ class StreamingSetupTask {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (drain != null) {
|
| _json["drain"] = drain;
|
| }
|
| @@ -7449,7 +7738,8 @@ class StreamingSetupTask {
|
| _json["receiveWorkPort"] = receiveWorkPort;
|
| }
|
| if (streamingComputationTopology != null) {
|
| - _json["streamingComputationTopology"] = (streamingComputationTopology).toJson();
|
| + _json["streamingComputationTopology"] =
|
| + (streamingComputationTopology).toJson();
|
| }
|
| if (workerHarnessPort != null) {
|
| _json["workerHarnessPort"] = workerHarnessPort;
|
| @@ -7458,13 +7748,12 @@ class StreamingSetupTask {
|
| }
|
| }
|
|
|
| -/** Identifies the location of a streaming side input. */
|
| +/// Identifies the location of a streaming side input.
|
| class StreamingSideInputLocation {
|
| - /** Identifies the state family where this side input is stored. */
|
| + /// Identifies the state family where this side input is stored.
|
| core.String stateFamily;
|
| - /**
|
| - * Identifies the particular side input within the streaming Dataflow job.
|
| - */
|
| +
|
| + /// Identifies the particular side input within the streaming Dataflow job.
|
| core.String tag;
|
|
|
| StreamingSideInputLocation();
|
| @@ -7479,7 +7768,8 @@ class StreamingSideInputLocation {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (stateFamily != null) {
|
| _json["stateFamily"] = stateFamily;
|
| }
|
| @@ -7490,15 +7780,11 @@ class StreamingSideInputLocation {
|
| }
|
| }
|
|
|
| -/**
|
| - * Identifies the location of a streaming computation stage, for
|
| - * stage-to-stage communication.
|
| - */
|
| +/// Identifies the location of a streaming computation stage, for
|
| +/// stage-to-stage communication.
|
| class StreamingStageLocation {
|
| - /**
|
| - * Identifies the particular stream within the streaming Dataflow
|
| - * job.
|
| - */
|
| + /// Identifies the particular stream within the streaming Dataflow
|
| + /// job.
|
| core.String streamId;
|
|
|
| StreamingStageLocation();
|
| @@ -7510,7 +7796,8 @@ class StreamingStageLocation {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (streamId != null) {
|
| _json["streamId"] = streamId;
|
| }
|
| @@ -7518,9 +7805,9 @@ class StreamingStageLocation {
|
| }
|
| }
|
|
|
| -/** A metric value representing a list of strings. */
|
| +/// A metric value representing a list of strings.
|
| class StringList {
|
| - /** Elements of the list. */
|
| + /// Elements of the list.
|
| core.List<core.String> elements;
|
|
|
| StringList();
|
| @@ -7532,7 +7819,8 @@ class StringList {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (elements != null) {
|
| _json["elements"] = elements;
|
| }
|
| @@ -7540,20 +7828,19 @@ class StringList {
|
| }
|
| }
|
|
|
| -/**
|
| - * A rich message format, including a human readable string, a key for
|
| - * identifying the message, and structured data associated with the message for
|
| - * programmatic consumption.
|
| - */
|
| +/// A rich message format, including a human readable string, a key for
|
| +/// identifying the message, and structured data associated with the message
|
| +/// for
|
| +/// programmatic consumption.
|
| class StructuredMessage {
|
| - /**
|
| - * Idenfier for this message type. Used by external systems to
|
| - * internationalize or personalize message.
|
| - */
|
| + /// Idenfier for this message type. Used by external systems to
|
| + /// internationalize or personalize message.
|
| core.String messageKey;
|
| - /** Human-readable version of message. */
|
| +
|
| + /// Human-readable version of message.
|
| core.String messageText;
|
| - /** The structured data associated with this message. */
|
| +
|
| + /// The structured data associated with this message.
|
| core.List<Parameter> parameters;
|
|
|
| StructuredMessage();
|
| @@ -7566,12 +7853,15 @@ class StructuredMessage {
|
| messageText = _json["messageText"];
|
| }
|
| if (_json.containsKey("parameters")) {
|
| - parameters = _json["parameters"].map((value) => new Parameter.fromJson(value)).toList();
|
| + parameters = _json["parameters"]
|
| + .map((value) => new Parameter.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (messageKey != null) {
|
| _json["messageKey"] = messageKey;
|
| }
|
| @@ -7579,91 +7869,96 @@ class StructuredMessage {
|
| _json["messageText"] = messageText;
|
| }
|
| if (parameters != null) {
|
| - _json["parameters"] = parameters.map((value) => (value).toJson()).toList();
|
| + _json["parameters"] =
|
| + parameters.map((value) => (value).toJson()).toList();
|
| }
|
| return _json;
|
| }
|
| }
|
|
|
| -/** Taskrunner configuration settings. */
|
| +/// Taskrunner configuration settings.
|
| class TaskRunnerSettings {
|
| - /** Whether to also send taskrunner log info to stderr. */
|
| + /// Whether to also send taskrunner log info to stderr.
|
| core.bool alsologtostderr;
|
| - /** The location on the worker for task-specific subdirectories. */
|
| +
|
| + /// The location on the worker for task-specific subdirectories.
|
| core.String baseTaskDir;
|
| - /**
|
| - * The base URL for the taskrunner to use when accessing Google Cloud APIs.
|
| - *
|
| - * When workers access Google Cloud APIs, they logically do so via
|
| - * relative URLs. If this field is specified, it supplies the base
|
| - * URL to use for resolving these relative URLs. The normative
|
| - * algorithm used is defined by RFC 1808, "Relative Uniform Resource
|
| - * Locators".
|
| - *
|
| - * If not specified, the default value is "http://www.googleapis.com/"
|
| - */
|
| +
|
| + /// The base URL for the taskrunner to use when accessing Google Cloud APIs.
|
| + ///
|
| + /// When workers access Google Cloud APIs, they logically do so via
|
| + /// relative URLs. If this field is specified, it supplies the base
|
| + /// URL to use for resolving these relative URLs. The normative
|
| + /// algorithm used is defined by RFC 1808, "Relative Uniform Resource
|
| + /// Locators".
|
| + ///
|
| + /// If not specified, the default value is "http://www.googleapis.com/"
|
| core.String baseUrl;
|
| - /** The file to store preprocessing commands in. */
|
| +
|
| + /// The file to store preprocessing commands in.
|
| core.String commandlinesFileName;
|
| - /** Whether to continue taskrunner if an exception is hit. */
|
| +
|
| + /// Whether to continue taskrunner if an exception is hit.
|
| core.bool continueOnException;
|
| - /** The API version of endpoint, e.g. "v1b3" */
|
| +
|
| + /// The API version of endpoint, e.g. "v1b3"
|
| core.String dataflowApiVersion;
|
| - /** The command to launch the worker harness. */
|
| +
|
| + /// The command to launch the worker harness.
|
| core.String harnessCommand;
|
| - /** The suggested backend language. */
|
| +
|
| + /// The suggested backend language.
|
| core.String languageHint;
|
| - /** The directory on the VM to store logs. */
|
| +
|
| + /// The directory on the VM to store logs.
|
| core.String logDir;
|
| - /**
|
| - * Whether to send taskrunner log info to Google Compute Engine VM serial
|
| - * console.
|
| - */
|
| +
|
| + /// Whether to send taskrunner log info to Google Compute Engine VM serial
|
| + /// console.
|
| core.bool logToSerialconsole;
|
| - /**
|
| - * Indicates where to put logs. If this is not specified, the logs
|
| - * will not be uploaded.
|
| - *
|
| - * The supported resource type is:
|
| - *
|
| - * Google Cloud Storage:
|
| - * storage.googleapis.com/{bucket}/{object}
|
| - * bucket.storage.googleapis.com/{object}
|
| - */
|
| +
|
| + /// Indicates where to put logs. If this is not specified, the logs
|
| + /// will not be uploaded.
|
| + ///
|
| + /// The supported resource type is:
|
| + ///
|
| + /// Google Cloud Storage:
|
| + /// storage.googleapis.com/{bucket}/{object}
|
| + /// bucket.storage.googleapis.com/{object}
|
| core.String logUploadLocation;
|
| - /**
|
| - * The OAuth2 scopes to be requested by the taskrunner in order to
|
| - * access the Cloud Dataflow API.
|
| - */
|
| +
|
| + /// The OAuth2 scopes to be requested by the taskrunner in order to
|
| + /// access the Cloud Dataflow API.
|
| core.List<core.String> oauthScopes;
|
| - /** The settings to pass to the parallel worker harness. */
|
| +
|
| + /// The settings to pass to the parallel worker harness.
|
| WorkerSettings parallelWorkerSettings;
|
| - /** The streaming worker main class name. */
|
| +
|
| + /// The streaming worker main class name.
|
| core.String streamingWorkerMainClass;
|
| - /**
|
| - * The UNIX group ID on the worker VM to use for tasks launched by
|
| - * taskrunner; e.g. "wheel".
|
| - */
|
| +
|
| + /// The UNIX group ID on the worker VM to use for tasks launched by
|
| + /// taskrunner; e.g. "wheel".
|
| core.String taskGroup;
|
| - /**
|
| - * The UNIX user ID on the worker VM to use for tasks launched by
|
| - * taskrunner; e.g. "root".
|
| - */
|
| +
|
| + /// The UNIX user ID on the worker VM to use for tasks launched by
|
| + /// taskrunner; e.g. "root".
|
| core.String taskUser;
|
| - /**
|
| - * The prefix of the resources the taskrunner should use for
|
| - * temporary storage.
|
| - *
|
| - * The supported resource type is:
|
| - *
|
| - * Google Cloud Storage:
|
| - * storage.googleapis.com/{bucket}/{object}
|
| - * bucket.storage.googleapis.com/{object}
|
| - */
|
| +
|
| + /// The prefix of the resources the taskrunner should use for
|
| + /// temporary storage.
|
| + ///
|
| + /// The supported resource type is:
|
| + ///
|
| + /// Google Cloud Storage:
|
| + /// storage.googleapis.com/{bucket}/{object}
|
| + /// bucket.storage.googleapis.com/{object}
|
| core.String tempStoragePrefix;
|
| - /** The ID string of the VM. */
|
| +
|
| + /// The ID string of the VM.
|
| core.String vmId;
|
| - /** The file to store the workflow in. */
|
| +
|
| + /// The file to store the workflow in.
|
| core.String workflowFileName;
|
|
|
| TaskRunnerSettings();
|
| @@ -7706,7 +8001,8 @@ class TaskRunnerSettings {
|
| oauthScopes = _json["oauthScopes"];
|
| }
|
| if (_json.containsKey("parallelWorkerSettings")) {
|
| - parallelWorkerSettings = new WorkerSettings.fromJson(_json["parallelWorkerSettings"]);
|
| + parallelWorkerSettings =
|
| + new WorkerSettings.fromJson(_json["parallelWorkerSettings"]);
|
| }
|
| if (_json.containsKey("streamingWorkerMainClass")) {
|
| streamingWorkerMainClass = _json["streamingWorkerMainClass"];
|
| @@ -7729,7 +8025,8 @@ class TaskRunnerSettings {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (alsologtostderr != null) {
|
| _json["alsologtostderr"] = alsologtostderr;
|
| }
|
| @@ -7791,13 +8088,15 @@ class TaskRunnerSettings {
|
| }
|
| }
|
|
|
| -/** Metadata describing a template. */
|
| +/// Metadata describing a template.
|
| class TemplateMetadata {
|
| - /** Optional. A description of the template. */
|
| + /// Optional. A description of the template.
|
| core.String description;
|
| - /** Required. The name of the template. */
|
| +
|
| + /// Required. The name of the template.
|
| core.String name;
|
| - /** The parameters for the template. */
|
| +
|
| + /// The parameters for the template.
|
| core.List<ParameterMetadata> parameters;
|
|
|
| TemplateMetadata();
|
| @@ -7810,12 +8109,15 @@ class TemplateMetadata {
|
| name = _json["name"];
|
| }
|
| if (_json.containsKey("parameters")) {
|
| - parameters = _json["parameters"].map((value) => new ParameterMetadata.fromJson(value)).toList();
|
| + parameters = _json["parameters"]
|
| + .map((value) => new ParameterMetadata.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (description != null) {
|
| _json["description"] = description;
|
| }
|
| @@ -7823,36 +8125,43 @@ class TemplateMetadata {
|
| _json["name"] = name;
|
| }
|
| if (parameters != null) {
|
| - _json["parameters"] = parameters.map((value) => (value).toJson()).toList();
|
| + _json["parameters"] =
|
| + parameters.map((value) => (value).toJson()).toList();
|
| }
|
| return _json;
|
| }
|
| }
|
|
|
| -/**
|
| - * Global topology of the streaming Dataflow job, including all
|
| - * computations and their sharded locations.
|
| - */
|
| +/// Global topology of the streaming Dataflow job, including all
|
| +/// computations and their sharded locations.
|
| class TopologyConfig {
|
| - /** The computations associated with a streaming Dataflow job. */
|
| + /// The computations associated with a streaming Dataflow job.
|
| core.List<ComputationTopology> computations;
|
| - /** The disks assigned to a streaming Dataflow job. */
|
| +
|
| + /// The disks assigned to a streaming Dataflow job.
|
| core.List<DataDiskAssignment> dataDiskAssignments;
|
| - /** The size (in bits) of keys that will be assigned to source messages. */
|
| +
|
| + /// The size (in bits) of keys that will be assigned to source messages.
|
| core.int forwardingKeyBits;
|
| - /** Version number for persistent state. */
|
| +
|
| + /// Version number for persistent state.
|
| core.int persistentStateVersion;
|
| - /** Maps user stage names to stable computation names. */
|
| +
|
| + /// Maps user stage names to stable computation names.
|
| core.Map<core.String, core.String> userStageToComputationNameMap;
|
|
|
| TopologyConfig();
|
|
|
| TopologyConfig.fromJson(core.Map _json) {
|
| if (_json.containsKey("computations")) {
|
| - computations = _json["computations"].map((value) => new ComputationTopology.fromJson(value)).toList();
|
| + computations = _json["computations"]
|
| + .map((value) => new ComputationTopology.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("dataDiskAssignments")) {
|
| - dataDiskAssignments = _json["dataDiskAssignments"].map((value) => new DataDiskAssignment.fromJson(value)).toList();
|
| + dataDiskAssignments = _json["dataDiskAssignments"]
|
| + .map((value) => new DataDiskAssignment.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("forwardingKeyBits")) {
|
| forwardingKeyBits = _json["forwardingKeyBits"];
|
| @@ -7866,12 +8175,15 @@ class TopologyConfig {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (computations != null) {
|
| - _json["computations"] = computations.map((value) => (value).toJson()).toList();
|
| + _json["computations"] =
|
| + computations.map((value) => (value).toJson()).toList();
|
| }
|
| if (dataDiskAssignments != null) {
|
| - _json["dataDiskAssignments"] = dataDiskAssignments.map((value) => (value).toJson()).toList();
|
| + _json["dataDiskAssignments"] =
|
| + dataDiskAssignments.map((value) => (value).toJson()).toList();
|
| }
|
| if (forwardingKeyBits != null) {
|
| _json["forwardingKeyBits"] = forwardingKeyBits;
|
| @@ -7886,40 +8198,45 @@ class TopologyConfig {
|
| }
|
| }
|
|
|
| -/** Description of the type, names/ids, and input/outputs for a transform. */
|
| +/// Description of the type, names/ids, and input/outputs for a transform.
|
| class TransformSummary {
|
| - /** Transform-specific display data. */
|
| + /// Transform-specific display data.
|
| core.List<DisplayData> displayData;
|
| - /** SDK generated id of this transform instance. */
|
| +
|
| + /// SDK generated id of this transform instance.
|
| core.String id;
|
| - /** User names for all collection inputs to this transform. */
|
| +
|
| + /// User names for all collection inputs to this transform.
|
| core.List<core.String> inputCollectionName;
|
| - /**
|
| - * Type of transform.
|
| - * Possible string values are:
|
| - * - "UNKNOWN_KIND" : Unrecognized transform type.
|
| - * - "PAR_DO_KIND" : ParDo transform.
|
| - * - "GROUP_BY_KEY_KIND" : Group By Key transform.
|
| - * - "FLATTEN_KIND" : Flatten transform.
|
| - * - "READ_KIND" : Read transform.
|
| - * - "WRITE_KIND" : Write transform.
|
| - * - "CONSTANT_KIND" : Constructs from a constant value, such as with
|
| - * Create.of.
|
| - * - "SINGLETON_KIND" : Creates a Singleton view of a collection.
|
| - * - "SHUFFLE_KIND" : Opening or closing a shuffle session, often as part of a
|
| - * GroupByKey.
|
| - */
|
| +
|
| + /// Type of transform.
|
| + /// Possible string values are:
|
| + /// - "UNKNOWN_KIND" : Unrecognized transform type.
|
| + /// - "PAR_DO_KIND" : ParDo transform.
|
| + /// - "GROUP_BY_KEY_KIND" : Group By Key transform.
|
| + /// - "FLATTEN_KIND" : Flatten transform.
|
| + /// - "READ_KIND" : Read transform.
|
| + /// - "WRITE_KIND" : Write transform.
|
| + /// - "CONSTANT_KIND" : Constructs from a constant value, such as with
|
| + /// Create.of.
|
| + /// - "SINGLETON_KIND" : Creates a Singleton view of a collection.
|
| + /// - "SHUFFLE_KIND" : Opening or closing a shuffle session, often as part of
|
| + /// a GroupByKey.
|
| core.String kind;
|
| - /** User provided name for this transform instance. */
|
| +
|
| + /// User provided name for this transform instance.
|
| core.String name;
|
| - /** User names for all collection outputs to this transform. */
|
| +
|
| + /// User names for all collection outputs to this transform.
|
| core.List<core.String> outputCollectionName;
|
|
|
| TransformSummary();
|
|
|
| TransformSummary.fromJson(core.Map _json) {
|
| if (_json.containsKey("displayData")) {
|
| - displayData = _json["displayData"].map((value) => new DisplayData.fromJson(value)).toList();
|
| + displayData = _json["displayData"]
|
| + .map((value) => new DisplayData.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("id")) {
|
| id = _json["id"];
|
| @@ -7939,9 +8256,11 @@ class TransformSummary {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (displayData != null) {
|
| - _json["displayData"] = displayData.map((value) => (value).toJson()).toList();
|
| + _json["displayData"] =
|
| + displayData.map((value) => (value).toJson()).toList();
|
| }
|
| if (id != null) {
|
| _json["id"] = id;
|
| @@ -7962,43 +8281,53 @@ class TransformSummary {
|
| }
|
| }
|
|
|
| -/**
|
| - * WorkItem represents basic information about a WorkItem to be executed
|
| - * in the cloud.
|
| - */
|
| +/// WorkItem represents basic information about a WorkItem to be executed
|
| +/// in the cloud.
|
| class WorkItem {
|
| - /** Work item-specific configuration as an opaque blob. */
|
| + /// Work item-specific configuration as an opaque blob.
|
| core.String configuration;
|
| - /** Identifies this WorkItem. */
|
| +
|
| + /// Identifies this WorkItem.
|
| core.String id;
|
| - /** The initial index to use when reporting the status of the WorkItem. */
|
| +
|
| + /// The initial index to use when reporting the status of the WorkItem.
|
| core.String initialReportIndex;
|
| - /** Identifies the workflow job this WorkItem belongs to. */
|
| +
|
| + /// Identifies the workflow job this WorkItem belongs to.
|
| core.String jobId;
|
| - /** Time when the lease on this Work will expire. */
|
| +
|
| + /// Time when the lease on this Work will expire.
|
| core.String leaseExpireTime;
|
| - /** Additional information for MapTask WorkItems. */
|
| +
|
| + /// Additional information for MapTask WorkItems.
|
| MapTask mapTask;
|
| - /**
|
| - * Any required packages that need to be fetched in order to execute
|
| - * this WorkItem.
|
| - */
|
| +
|
| + /// Any required packages that need to be fetched in order to execute
|
| + /// this WorkItem.
|
| core.List<Package> packages;
|
| - /** Identifies the cloud project this WorkItem belongs to. */
|
| +
|
| + /// Identifies the cloud project this WorkItem belongs to.
|
| core.String projectId;
|
| - /** Recommended reporting interval. */
|
| +
|
| + /// Recommended reporting interval.
|
| core.String reportStatusInterval;
|
| - /** Additional information for SeqMapTask WorkItems. */
|
| +
|
| + /// Additional information for SeqMapTask WorkItems.
|
| SeqMapTask seqMapTask;
|
| - /** Additional information for ShellTask WorkItems. */
|
| +
|
| + /// Additional information for ShellTask WorkItems.
|
| ShellTask shellTask;
|
| - /** Additional information for source operation WorkItems. */
|
| +
|
| + /// Additional information for source operation WorkItems.
|
| SourceOperationRequest sourceOperationTask;
|
| - /** Additional information for StreamingComputationTask WorkItems. */
|
| +
|
| + /// Additional information for StreamingComputationTask WorkItems.
|
| StreamingComputationTask streamingComputationTask;
|
| - /** Additional information for StreamingConfigTask WorkItems. */
|
| +
|
| + /// Additional information for StreamingConfigTask WorkItems.
|
| StreamingConfigTask streamingConfigTask;
|
| - /** Additional information for StreamingSetupTask WorkItems. */
|
| +
|
| + /// Additional information for StreamingSetupTask WorkItems.
|
| StreamingSetupTask streamingSetupTask;
|
|
|
| WorkItem();
|
| @@ -8023,7 +8352,9 @@ class WorkItem {
|
| mapTask = new MapTask.fromJson(_json["mapTask"]);
|
| }
|
| if (_json.containsKey("packages")) {
|
| - packages = _json["packages"].map((value) => new Package.fromJson(value)).toList();
|
| + packages = _json["packages"]
|
| + .map((value) => new Package.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("projectId")) {
|
| projectId = _json["projectId"];
|
| @@ -8038,21 +8369,26 @@ class WorkItem {
|
| shellTask = new ShellTask.fromJson(_json["shellTask"]);
|
| }
|
| if (_json.containsKey("sourceOperationTask")) {
|
| - sourceOperationTask = new SourceOperationRequest.fromJson(_json["sourceOperationTask"]);
|
| + sourceOperationTask =
|
| + new SourceOperationRequest.fromJson(_json["sourceOperationTask"]);
|
| }
|
| if (_json.containsKey("streamingComputationTask")) {
|
| - streamingComputationTask = new StreamingComputationTask.fromJson(_json["streamingComputationTask"]);
|
| + streamingComputationTask = new StreamingComputationTask.fromJson(
|
| + _json["streamingComputationTask"]);
|
| }
|
| if (_json.containsKey("streamingConfigTask")) {
|
| - streamingConfigTask = new StreamingConfigTask.fromJson(_json["streamingConfigTask"]);
|
| + streamingConfigTask =
|
| + new StreamingConfigTask.fromJson(_json["streamingConfigTask"]);
|
| }
|
| if (_json.containsKey("streamingSetupTask")) {
|
| - streamingSetupTask = new StreamingSetupTask.fromJson(_json["streamingSetupTask"]);
|
| + streamingSetupTask =
|
| + new StreamingSetupTask.fromJson(_json["streamingSetupTask"]);
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (configuration != null) {
|
| _json["configuration"] = configuration;
|
| }
|
| @@ -8102,46 +8438,43 @@ class WorkItem {
|
| }
|
| }
|
|
|
| -/**
|
| - * The Dataflow service's idea of the current state of a WorkItem
|
| - * being processed by a worker.
|
| - */
|
| +/// The Dataflow service's idea of the current state of a WorkItem
|
| +/// being processed by a worker.
|
| class WorkItemServiceState {
|
| - /**
|
| - * Other data returned by the service, specific to the particular
|
| - * worker harness.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| + /// Other data returned by the service, specific to the particular
|
| + /// worker harness.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> harnessData;
|
| - /** Time at which the current lease will expire. */
|
| +
|
| + /// Time at which the current lease will expire.
|
| core.String leaseExpireTime;
|
| - /**
|
| - * The short ids that workers should use in subsequent metric updates.
|
| - * Workers should strive to use short ids whenever possible, but it is ok
|
| - * to request the short_id again if a worker lost track of it
|
| - * (e.g. if the worker is recovering from a crash).
|
| - * NOTE: it is possible that the response may have short ids for a subset
|
| - * of the metrics.
|
| - */
|
| +
|
| + /// The short ids that workers should use in subsequent metric updates.
|
| + /// Workers should strive to use short ids whenever possible, but it is ok
|
| + /// to request the short_id again if a worker lost track of it
|
| + /// (e.g. if the worker is recovering from a crash).
|
| + /// NOTE: it is possible that the response may have short ids for a subset
|
| + /// of the metrics.
|
| core.List<MetricShortId> metricShortId;
|
| - /**
|
| - * The index value to use for the next report sent by the worker.
|
| - * Note: If the report call fails for whatever reason, the worker should
|
| - * reuse this index for subsequent report attempts.
|
| - */
|
| +
|
| + /// The index value to use for the next report sent by the worker.
|
| + /// Note: If the report call fails for whatever reason, the worker should
|
| + /// reuse this index for subsequent report attempts.
|
| core.String nextReportIndex;
|
| - /** New recommended reporting interval. */
|
| +
|
| + /// New recommended reporting interval.
|
| core.String reportStatusInterval;
|
| - /**
|
| - * The progress point in the WorkItem where the Dataflow service
|
| - * suggests that the worker truncate the task.
|
| - */
|
| +
|
| + /// The progress point in the WorkItem where the Dataflow service
|
| + /// suggests that the worker truncate the task.
|
| ApproximateSplitRequest splitRequest;
|
| - /** DEPRECATED in favor of split_request. */
|
| +
|
| + /// DEPRECATED in favor of split_request.
|
| ApproximateProgress suggestedStopPoint;
|
| - /** Obsolete, always empty. */
|
| +
|
| + /// Obsolete, always empty.
|
| Position suggestedStopPosition;
|
|
|
| WorkItemServiceState();
|
| @@ -8154,7 +8487,9 @@ class WorkItemServiceState {
|
| leaseExpireTime = _json["leaseExpireTime"];
|
| }
|
| if (_json.containsKey("metricShortId")) {
|
| - metricShortId = _json["metricShortId"].map((value) => new MetricShortId.fromJson(value)).toList();
|
| + metricShortId = _json["metricShortId"]
|
| + .map((value) => new MetricShortId.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("nextReportIndex")) {
|
| nextReportIndex = _json["nextReportIndex"];
|
| @@ -8163,18 +8498,22 @@ class WorkItemServiceState {
|
| reportStatusInterval = _json["reportStatusInterval"];
|
| }
|
| if (_json.containsKey("splitRequest")) {
|
| - splitRequest = new ApproximateSplitRequest.fromJson(_json["splitRequest"]);
|
| + splitRequest =
|
| + new ApproximateSplitRequest.fromJson(_json["splitRequest"]);
|
| }
|
| if (_json.containsKey("suggestedStopPoint")) {
|
| - suggestedStopPoint = new ApproximateProgress.fromJson(_json["suggestedStopPoint"]);
|
| + suggestedStopPoint =
|
| + new ApproximateProgress.fromJson(_json["suggestedStopPoint"]);
|
| }
|
| if (_json.containsKey("suggestedStopPosition")) {
|
| - suggestedStopPosition = new Position.fromJson(_json["suggestedStopPosition"]);
|
| + suggestedStopPosition =
|
| + new Position.fromJson(_json["suggestedStopPosition"]);
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (harnessData != null) {
|
| _json["harnessData"] = harnessData;
|
| }
|
| @@ -8182,7 +8521,8 @@ class WorkItemServiceState {
|
| _json["leaseExpireTime"] = leaseExpireTime;
|
| }
|
| if (metricShortId != null) {
|
| - _json["metricShortId"] = metricShortId.map((value) => (value).toJson()).toList();
|
| + _json["metricShortId"] =
|
| + metricShortId.map((value) => (value).toJson()).toList();
|
| }
|
| if (nextReportIndex != null) {
|
| _json["nextReportIndex"] = nextReportIndex;
|
| @@ -8203,84 +8543,88 @@ class WorkItemServiceState {
|
| }
|
| }
|
|
|
| -/** Conveys a worker's progress through the work described by a WorkItem. */
|
| +/// Conveys a worker's progress through the work described by a WorkItem.
|
| class WorkItemStatus {
|
| - /** True if the WorkItem was completed (successfully or unsuccessfully). */
|
| + /// True if the WorkItem was completed (successfully or unsuccessfully).
|
| core.bool completed;
|
| - /** Worker output counters for this WorkItem. */
|
| +
|
| + /// Worker output counters for this WorkItem.
|
| core.List<CounterUpdate> counterUpdates;
|
| - /** See documentation of stop_position. */
|
| +
|
| + /// See documentation of stop_position.
|
| DynamicSourceSplit dynamicSourceSplit;
|
| - /**
|
| - * Specifies errors which occurred during processing. If errors are
|
| - * provided, and completed = true, then the WorkItem is considered
|
| - * to have failed.
|
| - */
|
| +
|
| + /// Specifies errors which occurred during processing. If errors are
|
| + /// provided, and completed = true, then the WorkItem is considered
|
| + /// to have failed.
|
| core.List<Status> errors;
|
| - /** DEPRECATED in favor of counter_updates. */
|
| +
|
| + /// DEPRECATED in favor of counter_updates.
|
| core.List<MetricUpdate> metricUpdates;
|
| - /** DEPRECATED in favor of reported_progress. */
|
| +
|
| + /// DEPRECATED in favor of reported_progress.
|
| ApproximateProgress progress;
|
| - /**
|
| - * The report index. When a WorkItem is leased, the lease will
|
| - * contain an initial report index. When a WorkItem's status is
|
| - * reported to the system, the report should be sent with
|
| - * that report index, and the response will contain the index the
|
| - * worker should use for the next report. Reports received with
|
| - * unexpected index values will be rejected by the service.
|
| - *
|
| - * In order to preserve idempotency, the worker should not alter the
|
| - * contents of a report, even if the worker must submit the same
|
| - * report multiple times before getting back a response. The worker
|
| - * should not submit a subsequent report until the response for the
|
| - * previous report had been received from the service.
|
| - */
|
| +
|
| + /// The report index. When a WorkItem is leased, the lease will
|
| + /// contain an initial report index. When a WorkItem's status is
|
| + /// reported to the system, the report should be sent with
|
| + /// that report index, and the response will contain the index the
|
| + /// worker should use for the next report. Reports received with
|
| + /// unexpected index values will be rejected by the service.
|
| + ///
|
| + /// In order to preserve idempotency, the worker should not alter the
|
| + /// contents of a report, even if the worker must submit the same
|
| + /// report multiple times before getting back a response. The worker
|
| + /// should not submit a subsequent report until the response for the
|
| + /// previous report had been received from the service.
|
| core.String reportIndex;
|
| - /** The worker's progress through this WorkItem. */
|
| +
|
| + /// The worker's progress through this WorkItem.
|
| ApproximateReportedProgress reportedProgress;
|
| - /** Amount of time the worker requests for its lease. */
|
| +
|
| + /// Amount of time the worker requests for its lease.
|
| core.String requestedLeaseDuration;
|
| - /** DEPRECATED in favor of dynamic_source_split. */
|
| +
|
| + /// DEPRECATED in favor of dynamic_source_split.
|
| SourceFork sourceFork;
|
| - /**
|
| - * If the work item represented a SourceOperationRequest, and the work
|
| - * is completed, contains the result of the operation.
|
| - */
|
| +
|
| + /// If the work item represented a SourceOperationRequest, and the work
|
| + /// is completed, contains the result of the operation.
|
| SourceOperationResponse sourceOperationResponse;
|
| - /**
|
| - * A worker may split an active map task in two parts, "primary" and
|
| - * "residual", continuing to process the primary part and returning the
|
| - * residual part into the pool of available work.
|
| - * This event is called a "dynamic split" and is critical to the dynamic
|
| - * work rebalancing feature. The two obtained sub-tasks are called
|
| - * "parts" of the split.
|
| - * The parts, if concatenated, must represent the same input as would
|
| - * be read by the current task if the split did not happen.
|
| - * The exact way in which the original task is decomposed into the two
|
| - * parts is specified either as a position demarcating them
|
| - * (stop_position), or explicitly as two DerivedSources, if this
|
| - * task consumes a user-defined source type (dynamic_source_split).
|
| - *
|
| - * The "current" task is adjusted as a result of the split: after a task
|
| - * with range [A, B) sends a stop_position update at C, its range is
|
| - * considered to be [A, C), e.g.:
|
| - * * Progress should be interpreted relative to the new range, e.g.
|
| - * "75% completed" means "75% of [A, C) completed"
|
| - * * The worker should interpret proposed_stop_position relative to the
|
| - * new range, e.g. "split at 68%" should be interpreted as
|
| - * "split at 68% of [A, C)".
|
| - * * If the worker chooses to split again using stop_position, only
|
| - * stop_positions in [A, C) will be accepted.
|
| - * * Etc.
|
| - * dynamic_source_split has similar semantics: e.g., if a task with
|
| - * source S splits using dynamic_source_split into {P, R}
|
| - * (where P and R must be together equivalent to S), then subsequent
|
| - * progress and proposed_stop_position should be interpreted relative
|
| - * to P, and in a potential subsequent dynamic_source_split into {P', R'},
|
| - * P' and R' must be together equivalent to P, etc.
|
| - */
|
| +
|
| + /// A worker may split an active map task in two parts, "primary" and
|
| + /// "residual", continuing to process the primary part and returning the
|
| + /// residual part into the pool of available work.
|
| + /// This event is called a "dynamic split" and is critical to the dynamic
|
| + /// work rebalancing feature. The two obtained sub-tasks are called
|
| + /// "parts" of the split.
|
| + /// The parts, if concatenated, must represent the same input as would
|
| + /// be read by the current task if the split did not happen.
|
| + /// The exact way in which the original task is decomposed into the two
|
| + /// parts is specified either as a position demarcating them
|
| + /// (stop_position), or explicitly as two DerivedSources, if this
|
| + /// task consumes a user-defined source type (dynamic_source_split).
|
| + ///
|
| + /// The "current" task is adjusted as a result of the split: after a task
|
| + /// with range [A, B) sends a stop_position update at C, its range is
|
| + /// considered to be [A, C), e.g.:
|
| + /// * Progress should be interpreted relative to the new range, e.g.
|
| + /// "75% completed" means "75% of [A, C) completed"
|
| + /// * The worker should interpret proposed_stop_position relative to the
|
| + /// new range, e.g. "split at 68%" should be interpreted as
|
| + /// "split at 68% of [A, C)".
|
| + /// * If the worker chooses to split again using stop_position, only
|
| + /// stop_positions in [A, C) will be accepted.
|
| + /// * Etc.
|
| + /// dynamic_source_split has similar semantics: e.g., if a task with
|
| + /// source S splits using dynamic_source_split into {P, R}
|
| + /// (where P and R must be together equivalent to S), then subsequent
|
| + /// progress and proposed_stop_position should be interpreted relative
|
| + /// to P, and in a potential subsequent dynamic_source_split into {P', R'},
|
| + /// P' and R' must be together equivalent to P, etc.
|
| Position stopPosition;
|
| - /** Identifies the WorkItem. */
|
| +
|
| + /// Identifies the WorkItem.
|
| core.String workItemId;
|
|
|
| WorkItemStatus();
|
| @@ -8290,16 +8634,22 @@ class WorkItemStatus {
|
| completed = _json["completed"];
|
| }
|
| if (_json.containsKey("counterUpdates")) {
|
| - counterUpdates = _json["counterUpdates"].map((value) => new CounterUpdate.fromJson(value)).toList();
|
| + counterUpdates = _json["counterUpdates"]
|
| + .map((value) => new CounterUpdate.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("dynamicSourceSplit")) {
|
| - dynamicSourceSplit = new DynamicSourceSplit.fromJson(_json["dynamicSourceSplit"]);
|
| + dynamicSourceSplit =
|
| + new DynamicSourceSplit.fromJson(_json["dynamicSourceSplit"]);
|
| }
|
| if (_json.containsKey("errors")) {
|
| - errors = _json["errors"].map((value) => new Status.fromJson(value)).toList();
|
| + errors =
|
| + _json["errors"].map((value) => new Status.fromJson(value)).toList();
|
| }
|
| if (_json.containsKey("metricUpdates")) {
|
| - metricUpdates = _json["metricUpdates"].map((value) => new MetricUpdate.fromJson(value)).toList();
|
| + metricUpdates = _json["metricUpdates"]
|
| + .map((value) => new MetricUpdate.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("progress")) {
|
| progress = new ApproximateProgress.fromJson(_json["progress"]);
|
| @@ -8308,7 +8658,8 @@ class WorkItemStatus {
|
| reportIndex = _json["reportIndex"];
|
| }
|
| if (_json.containsKey("reportedProgress")) {
|
| - reportedProgress = new ApproximateReportedProgress.fromJson(_json["reportedProgress"]);
|
| + reportedProgress =
|
| + new ApproximateReportedProgress.fromJson(_json["reportedProgress"]);
|
| }
|
| if (_json.containsKey("requestedLeaseDuration")) {
|
| requestedLeaseDuration = _json["requestedLeaseDuration"];
|
| @@ -8317,7 +8668,8 @@ class WorkItemStatus {
|
| sourceFork = new SourceFork.fromJson(_json["sourceFork"]);
|
| }
|
| if (_json.containsKey("sourceOperationResponse")) {
|
| - sourceOperationResponse = new SourceOperationResponse.fromJson(_json["sourceOperationResponse"]);
|
| + sourceOperationResponse = new SourceOperationResponse.fromJson(
|
| + _json["sourceOperationResponse"]);
|
| }
|
| if (_json.containsKey("stopPosition")) {
|
| stopPosition = new Position.fromJson(_json["stopPosition"]);
|
| @@ -8328,12 +8680,14 @@ class WorkItemStatus {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (completed != null) {
|
| _json["completed"] = completed;
|
| }
|
| if (counterUpdates != null) {
|
| - _json["counterUpdates"] = counterUpdates.map((value) => (value).toJson()).toList();
|
| + _json["counterUpdates"] =
|
| + counterUpdates.map((value) => (value).toJson()).toList();
|
| }
|
| if (dynamicSourceSplit != null) {
|
| _json["dynamicSourceSplit"] = (dynamicSourceSplit).toJson();
|
| @@ -8342,7 +8696,8 @@ class WorkItemStatus {
|
| _json["errors"] = errors.map((value) => (value).toJson()).toList();
|
| }
|
| if (metricUpdates != null) {
|
| - _json["metricUpdates"] = metricUpdates.map((value) => (value).toJson()).toList();
|
| + _json["metricUpdates"] =
|
| + metricUpdates.map((value) => (value).toJson()).toList();
|
| }
|
| if (progress != null) {
|
| _json["progress"] = (progress).toJson();
|
| @@ -8372,33 +8727,31 @@ class WorkItemStatus {
|
| }
|
| }
|
|
|
| -/**
|
| - * WorkerHealthReport contains information about the health of a worker.
|
| - *
|
| - * The VM should be identified by the labels attached to the WorkerMessage that
|
| - * this health ping belongs to.
|
| - */
|
| +/// WorkerHealthReport contains information about the health of a worker.
|
| +///
|
| +/// The VM should be identified by the labels attached to the WorkerMessage
|
| +/// that
|
| +/// this health ping belongs to.
|
| class WorkerHealthReport {
|
| - /**
|
| - * The pods running on the worker. See:
|
| - * http://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html#_v1_pod
|
| - *
|
| - * This field is used by the worker to send the status of the indvidual
|
| - * containers running on each worker.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| + /// The pods running on the worker. See:
|
| + /// http://kubernetes.io/v1.1/docs/api-reference/v1/definitions.html#_v1_pod
|
| + ///
|
| + /// This field is used by the worker to send the status of the indvidual
|
| + /// containers running on each worker.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.List<core.Map<core.String, core.Object>> pods;
|
| - /**
|
| - * The interval at which the worker is sending health reports.
|
| - * The default value of 0 should be interpreted as the field is not being
|
| - * explicitly set by the worker.
|
| - */
|
| +
|
| + /// The interval at which the worker is sending health reports.
|
| + /// The default value of 0 should be interpreted as the field is not being
|
| + /// explicitly set by the worker.
|
| core.String reportInterval;
|
| - /** Whether the VM is healthy. */
|
| +
|
| + /// Whether the VM is healthy.
|
| core.bool vmIsHealthy;
|
| - /** The time the VM was booted. */
|
| +
|
| + /// The time the VM was booted.
|
| core.String vmStartupTime;
|
|
|
| WorkerHealthReport();
|
| @@ -8419,7 +8772,8 @@ class WorkerHealthReport {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (pods != null) {
|
| _json["pods"] = pods;
|
| }
|
| @@ -8436,18 +8790,15 @@ class WorkerHealthReport {
|
| }
|
| }
|
|
|
| -/**
|
| - * WorkerHealthReportResponse contains information returned to the worker
|
| - * in response to a health ping.
|
| - */
|
| +/// WorkerHealthReportResponse contains information returned to the worker
|
| +/// in response to a health ping.
|
| class WorkerHealthReportResponse {
|
| - /**
|
| - * A positive value indicates the worker should change its reporting interval
|
| - * to the specified value.
|
| - *
|
| - * The default value of zero means no change in report rate is requested by
|
| - * the server.
|
| - */
|
| + /// A positive value indicates the worker should change its reporting
|
| + /// interval
|
| + /// to the specified value.
|
| + ///
|
| + /// The default value of zero means no change in report rate is requested by
|
| + /// the server.
|
| core.String reportInterval;
|
|
|
| WorkerHealthReportResponse();
|
| @@ -8459,7 +8810,8 @@ class WorkerHealthReportResponse {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (reportInterval != null) {
|
| _json["reportInterval"] = reportInterval;
|
| }
|
| @@ -8467,30 +8819,36 @@ class WorkerHealthReportResponse {
|
| }
|
| }
|
|
|
| -/** WorkerMessage provides information to the backend about a worker. */
|
| +/// WorkerMessage provides information to the backend about a worker.
|
| class WorkerMessage {
|
| - /**
|
| - * Labels are used to group WorkerMessages.
|
| - * For example, a worker_message about a particular container
|
| - * might have the labels:
|
| - * { "JOB_ID": "2015-04-22",
|
| - * "WORKER_ID": "wordcount-vm-2015…"
|
| - * "CONTAINER_TYPE": "worker",
|
| - * "CONTAINER_ID": "ac1234def"}
|
| - * Label tags typically correspond to Label enum values. However, for ease
|
| - * of development other strings can be used as tags. LABEL_UNSPECIFIED should
|
| - * not be used here.
|
| - */
|
| + /// Labels are used to group WorkerMessages.
|
| + /// For example, a worker_message about a particular container
|
| + /// might have the labels:
|
| + /// { "JOB_ID": "2015-04-22",
|
| + /// "WORKER_ID": "wordcount-vm-2015…"
|
| + /// "CONTAINER_TYPE": "worker",
|
| + /// "CONTAINER_ID": "ac1234def"}
|
| + /// Label tags typically correspond to Label enum values. However, for ease
|
| + /// of development other strings can be used as tags. LABEL_UNSPECIFIED
|
| + /// should
|
| + /// not be used here.
|
| core.Map<core.String, core.String> labels;
|
| - /** The timestamp of the worker_message. */
|
| +
|
| + /// The timestamp of the worker_message.
|
| core.String time;
|
| - /** The health of a worker. */
|
| +
|
| + /// The health of a worker.
|
| WorkerHealthReport workerHealthReport;
|
| - /** A worker message code. */
|
| +
|
| + /// A worker message code.
|
| WorkerMessageCode workerMessageCode;
|
| - /** Resource metrics reported by workers. */
|
| +
|
| + /// Resource metrics reported by workers.
|
| ResourceUtilizationReport workerMetrics;
|
|
|
| + /// Shutdown notice by workers.
|
| + WorkerShutdownNotice workerShutdownNotice;
|
| +
|
| WorkerMessage();
|
|
|
| WorkerMessage.fromJson(core.Map _json) {
|
| @@ -8501,18 +8859,26 @@ class WorkerMessage {
|
| time = _json["time"];
|
| }
|
| if (_json.containsKey("workerHealthReport")) {
|
| - workerHealthReport = new WorkerHealthReport.fromJson(_json["workerHealthReport"]);
|
| + workerHealthReport =
|
| + new WorkerHealthReport.fromJson(_json["workerHealthReport"]);
|
| }
|
| if (_json.containsKey("workerMessageCode")) {
|
| - workerMessageCode = new WorkerMessageCode.fromJson(_json["workerMessageCode"]);
|
| + workerMessageCode =
|
| + new WorkerMessageCode.fromJson(_json["workerMessageCode"]);
|
| }
|
| if (_json.containsKey("workerMetrics")) {
|
| - workerMetrics = new ResourceUtilizationReport.fromJson(_json["workerMetrics"]);
|
| + workerMetrics =
|
| + new ResourceUtilizationReport.fromJson(_json["workerMetrics"]);
|
| + }
|
| + if (_json.containsKey("workerShutdownNotice")) {
|
| + workerShutdownNotice =
|
| + new WorkerShutdownNotice.fromJson(_json["workerShutdownNotice"]);
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (labels != null) {
|
| _json["labels"] = labels;
|
| }
|
| @@ -8528,60 +8894,60 @@ class WorkerMessage {
|
| if (workerMetrics != null) {
|
| _json["workerMetrics"] = (workerMetrics).toJson();
|
| }
|
| + if (workerShutdownNotice != null) {
|
| + _json["workerShutdownNotice"] = (workerShutdownNotice).toJson();
|
| + }
|
| return _json;
|
| }
|
| }
|
|
|
| -/**
|
| - * A message code is used to report status and error messages to the service.
|
| - * The message codes are intended to be machine readable. The service will
|
| - * take care of translating these into user understandable messages if
|
| - * necessary.
|
| - *
|
| - * Example use cases:
|
| - * 1. Worker processes reporting successful startup.
|
| - * 2. Worker processes reporting specific errors (e.g. package staging
|
| - * failure).
|
| - */
|
| +/// A message code is used to report status and error messages to the service.
|
| +/// The message codes are intended to be machine readable. The service will
|
| +/// take care of translating these into user understandable messages if
|
| +/// necessary.
|
| +///
|
| +/// Example use cases:
|
| +/// 1. Worker processes reporting successful startup.
|
| +/// 2. Worker processes reporting specific errors (e.g. package staging
|
| +/// failure).
|
| class WorkerMessageCode {
|
| - /**
|
| - * The code is a string intended for consumption by a machine that identifies
|
| - * the type of message being sent.
|
| - * Examples:
|
| - * 1. "HARNESS_STARTED" might be used to indicate the worker harness has
|
| - * started.
|
| - * 2. "GCS_DOWNLOAD_ERROR" might be used to indicate an error downloading
|
| - * a GCS file as part of the boot process of one of the worker containers.
|
| - *
|
| - * This is a string and not an enum to make it easy to add new codes without
|
| - * waiting for an API change.
|
| - */
|
| + /// The code is a string intended for consumption by a machine that
|
| + /// identifies
|
| + /// the type of message being sent.
|
| + /// Examples:
|
| + /// 1. "HARNESS_STARTED" might be used to indicate the worker harness has
|
| + /// started.
|
| + /// 2. "GCS_DOWNLOAD_ERROR" might be used to indicate an error downloading
|
| + /// a GCS file as part of the boot process of one of the worker containers.
|
| + ///
|
| + /// This is a string and not an enum to make it easy to add new codes without
|
| + /// waiting for an API change.
|
| core.String code;
|
| - /**
|
| - * Parameters contains specific information about the code.
|
| - *
|
| - * This is a struct to allow parameters of different types.
|
| - *
|
| - * Examples:
|
| - * 1. For a "HARNESS_STARTED" message parameters might provide the name
|
| - * of the worker and additional data like timing information.
|
| - * 2. For a "GCS_DOWNLOAD_ERROR" parameters might contain fields listing
|
| - * the GCS objects being downloaded and fields containing errors.
|
| - *
|
| - * In general complex data structures should be avoided. If a worker
|
| - * needs to send a specific and complicated data structure then please
|
| - * consider defining a new proto and adding it to the data oneof in
|
| - * WorkerMessageResponse.
|
| - *
|
| - * Conventions:
|
| - * Parameters should only be used for information that isn't typically passed
|
| - * as a label.
|
| - * hostname and other worker identifiers should almost always be passed
|
| - * as labels since they will be included on most messages.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// Parameters contains specific information about the code.
|
| + ///
|
| + /// This is a struct to allow parameters of different types.
|
| + ///
|
| + /// Examples:
|
| + /// 1. For a "HARNESS_STARTED" message parameters might provide the name
|
| + /// of the worker and additional data like timing information.
|
| + /// 2. For a "GCS_DOWNLOAD_ERROR" parameters might contain fields listing
|
| + /// the GCS objects being downloaded and fields containing errors.
|
| + ///
|
| + /// In general complex data structures should be avoided. If a worker
|
| + /// needs to send a specific and complicated data structure then please
|
| + /// consider defining a new proto and adding it to the data oneof in
|
| + /// WorkerMessageResponse.
|
| + ///
|
| + /// Conventions:
|
| + /// Parameters should only be used for information that isn't typically
|
| + /// passed
|
| + /// as a label.
|
| + /// hostname and other worker identifiers should almost always be passed
|
| + /// as labels since they will be included on most messages.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> parameters;
|
|
|
| WorkerMessageCode();
|
| @@ -8596,7 +8962,8 @@ class WorkerMessageCode {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (code != null) {
|
| _json["code"] = code;
|
| }
|
| @@ -8607,189 +8974,195 @@ class WorkerMessageCode {
|
| }
|
| }
|
|
|
| -/**
|
| - * A worker_message response allows the server to pass information to the
|
| - * sender.
|
| - */
|
| +/// A worker_message response allows the server to pass information to the
|
| +/// sender.
|
| class WorkerMessageResponse {
|
| - /** The service's response to a worker's health report. */
|
| + /// The service's response to a worker's health report.
|
| WorkerHealthReportResponse workerHealthReportResponse;
|
| - /** Service's response to reporting worker metrics (currently empty). */
|
| +
|
| + /// Service's response to reporting worker metrics (currently empty).
|
| ResourceUtilizationReportResponse workerMetricsResponse;
|
|
|
| + /// Service's response to shutdown notice (currently empty).
|
| + WorkerShutdownNoticeResponse workerShutdownNoticeResponse;
|
| +
|
| WorkerMessageResponse();
|
|
|
| WorkerMessageResponse.fromJson(core.Map _json) {
|
| if (_json.containsKey("workerHealthReportResponse")) {
|
| - workerHealthReportResponse = new WorkerHealthReportResponse.fromJson(_json["workerHealthReportResponse"]);
|
| + workerHealthReportResponse = new WorkerHealthReportResponse.fromJson(
|
| + _json["workerHealthReportResponse"]);
|
| }
|
| if (_json.containsKey("workerMetricsResponse")) {
|
| - workerMetricsResponse = new ResourceUtilizationReportResponse.fromJson(_json["workerMetricsResponse"]);
|
| + workerMetricsResponse = new ResourceUtilizationReportResponse.fromJson(
|
| + _json["workerMetricsResponse"]);
|
| + }
|
| + if (_json.containsKey("workerShutdownNoticeResponse")) {
|
| + workerShutdownNoticeResponse = new WorkerShutdownNoticeResponse.fromJson(
|
| + _json["workerShutdownNoticeResponse"]);
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (workerHealthReportResponse != null) {
|
| - _json["workerHealthReportResponse"] = (workerHealthReportResponse).toJson();
|
| + _json["workerHealthReportResponse"] =
|
| + (workerHealthReportResponse).toJson();
|
| }
|
| if (workerMetricsResponse != null) {
|
| _json["workerMetricsResponse"] = (workerMetricsResponse).toJson();
|
| }
|
| + if (workerShutdownNoticeResponse != null) {
|
| + _json["workerShutdownNoticeResponse"] =
|
| + (workerShutdownNoticeResponse).toJson();
|
| + }
|
| return _json;
|
| }
|
| }
|
|
|
| -/**
|
| - * Describes one particular pool of Cloud Dataflow workers to be
|
| - * instantiated by the Cloud Dataflow service in order to perform the
|
| - * computations required by a job. Note that a workflow job may use
|
| - * multiple pools, in order to match the various computational
|
| - * requirements of the various stages of the job.
|
| - */
|
| +/// Describes one particular pool of Cloud Dataflow workers to be
|
| +/// instantiated by the Cloud Dataflow service in order to perform the
|
| +/// computations required by a job. Note that a workflow job may use
|
| +/// multiple pools, in order to match the various computational
|
| +/// requirements of the various stages of the job.
|
| class WorkerPool {
|
| - /** Settings for autoscaling of this WorkerPool. */
|
| + /// Settings for autoscaling of this WorkerPool.
|
| AutoscalingSettings autoscalingSettings;
|
| - /** Data disks that are used by a VM in this workflow. */
|
| +
|
| + /// Data disks that are used by a VM in this workflow.
|
| core.List<Disk> dataDisks;
|
| - /**
|
| - * The default package set to install. This allows the service to
|
| - * select a default set of packages which are useful to worker
|
| - * harnesses written in a particular language.
|
| - * Possible string values are:
|
| - * - "DEFAULT_PACKAGE_SET_UNKNOWN" : The default set of packages to stage is
|
| - * unknown, or unspecified.
|
| - * - "DEFAULT_PACKAGE_SET_NONE" : Indicates that no packages should be staged
|
| - * at the worker unless
|
| - * explicitly specified by the job.
|
| - * - "DEFAULT_PACKAGE_SET_JAVA" : Stage packages typically useful to workers
|
| - * written in Java.
|
| - * - "DEFAULT_PACKAGE_SET_PYTHON" : Stage pacakges typically useful to workers
|
| - * written in Python.
|
| - */
|
| +
|
| + /// The default package set to install. This allows the service to
|
| + /// select a default set of packages which are useful to worker
|
| + /// harnesses written in a particular language.
|
| + /// Possible string values are:
|
| + /// - "DEFAULT_PACKAGE_SET_UNKNOWN" : The default set of packages to stage is
|
| + /// unknown, or unspecified.
|
| + /// - "DEFAULT_PACKAGE_SET_NONE" : Indicates that no packages should be
|
| + /// staged at the worker unless
|
| + /// explicitly specified by the job.
|
| + /// - "DEFAULT_PACKAGE_SET_JAVA" : Stage packages typically useful to workers
|
| + /// written in Java.
|
| + /// - "DEFAULT_PACKAGE_SET_PYTHON" : Stage pacakges typically useful to
|
| + /// workers written in Python.
|
| core.String defaultPackageSet;
|
| - /**
|
| - * Size of root disk for VMs, in GB. If zero or unspecified, the service will
|
| - * attempt to choose a reasonable default.
|
| - */
|
| +
|
| + /// Size of root disk for VMs, in GB. If zero or unspecified, the service
|
| + /// will
|
| + /// attempt to choose a reasonable default.
|
| core.int diskSizeGb;
|
| - /** Fully qualified source image for disks. */
|
| +
|
| + /// Fully qualified source image for disks.
|
| core.String diskSourceImage;
|
| - /**
|
| - * Type of root disk for VMs. If empty or unspecified, the service will
|
| - * attempt to choose a reasonable default.
|
| - */
|
| +
|
| + /// Type of root disk for VMs. If empty or unspecified, the service will
|
| + /// attempt to choose a reasonable default.
|
| core.String diskType;
|
| - /**
|
| - * Configuration for VM IPs.
|
| - * Possible string values are:
|
| - * - "WORKER_IP_UNSPECIFIED" : The configuration is unknown, or unspecified.
|
| - * - "WORKER_IP_PUBLIC" : Workers should have public IP addresses.
|
| - * - "WORKER_IP_PRIVATE" : Workers should have private IP addresses.
|
| - */
|
| +
|
| + /// Configuration for VM IPs.
|
| + /// Possible string values are:
|
| + /// - "WORKER_IP_UNSPECIFIED" : The configuration is unknown, or unspecified.
|
| + /// - "WORKER_IP_PUBLIC" : Workers should have public IP addresses.
|
| + /// - "WORKER_IP_PRIVATE" : Workers should have private IP addresses.
|
| core.String ipConfiguration;
|
| - /**
|
| - * The kind of the worker pool; currently only `harness` and `shuffle`
|
| - * are supported.
|
| - */
|
| +
|
| + /// The kind of the worker pool; currently only `harness` and `shuffle`
|
| + /// are supported.
|
| core.String kind;
|
| - /**
|
| - * Machine type (e.g. "n1-standard-1"). If empty or unspecified, the
|
| - * service will attempt to choose a reasonable default.
|
| - */
|
| +
|
| + /// Machine type (e.g. "n1-standard-1"). If empty or unspecified, the
|
| + /// service will attempt to choose a reasonable default.
|
| core.String machineType;
|
| - /** Metadata to set on the Google Compute Engine VMs. */
|
| +
|
| + /// Metadata to set on the Google Compute Engine VMs.
|
| core.Map<core.String, core.String> metadata;
|
| - /**
|
| - * Network to which VMs will be assigned. If empty or unspecified,
|
| - * the service will use the network "default".
|
| - */
|
| +
|
| + /// Network to which VMs will be assigned. If empty or unspecified,
|
| + /// the service will use the network "default".
|
| core.String network;
|
| - /**
|
| - * The number of threads per worker harness. If empty or unspecified, the
|
| - * service will choose a number of threads (according to the number of cores
|
| - * on the selected machine type for batch, or 1 by convention for streaming).
|
| - */
|
| +
|
| + /// The number of threads per worker harness. If empty or unspecified, the
|
| + /// service will choose a number of threads (according to the number of cores
|
| + /// on the selected machine type for batch, or 1 by convention for
|
| + /// streaming).
|
| core.int numThreadsPerWorker;
|
| - /**
|
| - * Number of Google Compute Engine workers in this pool needed to
|
| - * execute the job. If zero or unspecified, the service will
|
| - * attempt to choose a reasonable default.
|
| - */
|
| +
|
| + /// Number of Google Compute Engine workers in this pool needed to
|
| + /// execute the job. If zero or unspecified, the service will
|
| + /// attempt to choose a reasonable default.
|
| core.int numWorkers;
|
| - /**
|
| - * The action to take on host maintenance, as defined by the Google
|
| - * Compute Engine API.
|
| - */
|
| +
|
| + /// The action to take on host maintenance, as defined by the Google
|
| + /// Compute Engine API.
|
| core.String onHostMaintenance;
|
| - /** Packages to be installed on workers. */
|
| +
|
| + /// Packages to be installed on workers.
|
| core.List<Package> packages;
|
| - /**
|
| - * Extra arguments for this worker pool.
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| +
|
| + /// Extra arguments for this worker pool.
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Map<core.String, core.Object> poolArgs;
|
| - /**
|
| - * Subnetwork to which VMs will be assigned, if desired. Expected to be of
|
| - * the form "regions/REGION/subnetworks/SUBNETWORK".
|
| - */
|
| +
|
| + /// Subnetwork to which VMs will be assigned, if desired. Expected to be of
|
| + /// the form "regions/REGION/subnetworks/SUBNETWORK".
|
| core.String subnetwork;
|
| - /**
|
| - * Settings passed through to Google Compute Engine workers when
|
| - * using the standard Dataflow task runner. Users should ignore
|
| - * this field.
|
| - */
|
| +
|
| + /// Settings passed through to Google Compute Engine workers when
|
| + /// using the standard Dataflow task runner. Users should ignore
|
| + /// this field.
|
| TaskRunnerSettings taskrunnerSettings;
|
| - /**
|
| - * Sets the policy for determining when to turndown worker pool.
|
| - * Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
|
| - * `TEARDOWN_NEVER`.
|
| - * `TEARDOWN_ALWAYS` means workers are always torn down regardless of whether
|
| - * the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down
|
| - * if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn
|
| - * down.
|
| - *
|
| - * If the workers are not torn down by the service, they will
|
| - * continue to run and use Google Compute Engine VM resources in the
|
| - * user's project until they are explicitly terminated by the user.
|
| - * Because of this, Google recommends using the `TEARDOWN_ALWAYS`
|
| - * policy except for small, manually supervised test jobs.
|
| - *
|
| - * If unknown or unspecified, the service will attempt to choose a reasonable
|
| - * default.
|
| - * Possible string values are:
|
| - * - "TEARDOWN_POLICY_UNKNOWN" : The teardown policy isn't specified, or is
|
| - * unknown.
|
| - * - "TEARDOWN_ALWAYS" : Always teardown the resource.
|
| - * - "TEARDOWN_ON_SUCCESS" : Teardown the resource on success. This is useful
|
| - * for debugging
|
| - * failures.
|
| - * - "TEARDOWN_NEVER" : Never teardown the resource. This is useful for
|
| - * debugging and
|
| - * development.
|
| - */
|
| +
|
| + /// Sets the policy for determining when to turndown worker pool.
|
| + /// Allowed values are: `TEARDOWN_ALWAYS`, `TEARDOWN_ON_SUCCESS`, and
|
| + /// `TEARDOWN_NEVER`.
|
| + /// `TEARDOWN_ALWAYS` means workers are always torn down regardless of
|
| + /// whether
|
| + /// the job succeeds. `TEARDOWN_ON_SUCCESS` means workers are torn down
|
| + /// if the job succeeds. `TEARDOWN_NEVER` means the workers are never torn
|
| + /// down.
|
| + ///
|
| + /// If the workers are not torn down by the service, they will
|
| + /// continue to run and use Google Compute Engine VM resources in the
|
| + /// user's project until they are explicitly terminated by the user.
|
| + /// Because of this, Google recommends using the `TEARDOWN_ALWAYS`
|
| + /// policy except for small, manually supervised test jobs.
|
| + ///
|
| + /// If unknown or unspecified, the service will attempt to choose a
|
| + /// reasonable
|
| + /// default.
|
| + /// Possible string values are:
|
| + /// - "TEARDOWN_POLICY_UNKNOWN" : The teardown policy isn't specified, or is
|
| + /// unknown.
|
| + /// - "TEARDOWN_ALWAYS" : Always teardown the resource.
|
| + /// - "TEARDOWN_ON_SUCCESS" : Teardown the resource on success. This is
|
| + /// useful for debugging
|
| + /// failures.
|
| + /// - "TEARDOWN_NEVER" : Never teardown the resource. This is useful for
|
| + /// debugging and
|
| + /// development.
|
| core.String teardownPolicy;
|
| - /**
|
| - * Required. Docker container image that executes the Cloud Dataflow worker
|
| - * harness, residing in Google Container Registry.
|
| - */
|
| +
|
| + /// Required. Docker container image that executes the Cloud Dataflow worker
|
| + /// harness, residing in Google Container Registry.
|
| core.String workerHarnessContainerImage;
|
| - /**
|
| - * Zone to run the worker pools in. If empty or unspecified, the service
|
| - * will attempt to choose a reasonable default.
|
| - */
|
| +
|
| + /// Zone to run the worker pools in. If empty or unspecified, the service
|
| + /// will attempt to choose a reasonable default.
|
| core.String zone;
|
|
|
| WorkerPool();
|
|
|
| WorkerPool.fromJson(core.Map _json) {
|
| if (_json.containsKey("autoscalingSettings")) {
|
| - autoscalingSettings = new AutoscalingSettings.fromJson(_json["autoscalingSettings"]);
|
| + autoscalingSettings =
|
| + new AutoscalingSettings.fromJson(_json["autoscalingSettings"]);
|
| }
|
| if (_json.containsKey("dataDisks")) {
|
| - dataDisks = _json["dataDisks"].map((value) => new Disk.fromJson(value)).toList();
|
| + dataDisks =
|
| + _json["dataDisks"].map((value) => new Disk.fromJson(value)).toList();
|
| }
|
| if (_json.containsKey("defaultPackageSet")) {
|
| defaultPackageSet = _json["defaultPackageSet"];
|
| @@ -8828,7 +9201,9 @@ class WorkerPool {
|
| onHostMaintenance = _json["onHostMaintenance"];
|
| }
|
| if (_json.containsKey("packages")) {
|
| - packages = _json["packages"].map((value) => new Package.fromJson(value)).toList();
|
| + packages = _json["packages"]
|
| + .map((value) => new Package.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("poolArgs")) {
|
| poolArgs = _json["poolArgs"];
|
| @@ -8837,7 +9212,8 @@ class WorkerPool {
|
| subnetwork = _json["subnetwork"];
|
| }
|
| if (_json.containsKey("taskrunnerSettings")) {
|
| - taskrunnerSettings = new TaskRunnerSettings.fromJson(_json["taskrunnerSettings"]);
|
| + taskrunnerSettings =
|
| + new TaskRunnerSettings.fromJson(_json["taskrunnerSettings"]);
|
| }
|
| if (_json.containsKey("teardownPolicy")) {
|
| teardownPolicy = _json["teardownPolicy"];
|
| @@ -8851,7 +9227,8 @@ class WorkerPool {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (autoscalingSettings != null) {
|
| _json["autoscalingSettings"] = (autoscalingSettings).toJson();
|
| }
|
| @@ -8919,45 +9296,42 @@ class WorkerPool {
|
| }
|
| }
|
|
|
| -/** Provides data to pass through to the worker harness. */
|
| +/// Provides data to pass through to the worker harness.
|
| class WorkerSettings {
|
| - /**
|
| - * The base URL for accessing Google Cloud APIs.
|
| - *
|
| - * When workers access Google Cloud APIs, they logically do so via
|
| - * relative URLs. If this field is specified, it supplies the base
|
| - * URL to use for resolving these relative URLs. The normative
|
| - * algorithm used is defined by RFC 1808, "Relative Uniform Resource
|
| - * Locators".
|
| - *
|
| - * If not specified, the default value is "http://www.googleapis.com/"
|
| - */
|
| + /// The base URL for accessing Google Cloud APIs.
|
| + ///
|
| + /// When workers access Google Cloud APIs, they logically do so via
|
| + /// relative URLs. If this field is specified, it supplies the base
|
| + /// URL to use for resolving these relative URLs. The normative
|
| + /// algorithm used is defined by RFC 1808, "Relative Uniform Resource
|
| + /// Locators".
|
| + ///
|
| + /// If not specified, the default value is "http://www.googleapis.com/"
|
| core.String baseUrl;
|
| - /** Whether to send work progress updates to the service. */
|
| +
|
| + /// Whether to send work progress updates to the service.
|
| core.bool reportingEnabled;
|
| - /**
|
| - * The Cloud Dataflow service path relative to the root URL, for example,
|
| - * "dataflow/v1b3/projects".
|
| - */
|
| +
|
| + /// The Cloud Dataflow service path relative to the root URL, for example,
|
| + /// "dataflow/v1b3/projects".
|
| core.String servicePath;
|
| - /**
|
| - * The Shuffle service path relative to the root URL, for example,
|
| - * "shuffle/v1beta1".
|
| - */
|
| +
|
| + /// The Shuffle service path relative to the root URL, for example,
|
| + /// "shuffle/v1beta1".
|
| core.String shuffleServicePath;
|
| - /**
|
| - * The prefix of the resources the system should use for temporary
|
| - * storage.
|
| - *
|
| - * The supported resource type is:
|
| - *
|
| - * Google Cloud Storage:
|
| - *
|
| - * storage.googleapis.com/{bucket}/{object}
|
| - * bucket.storage.googleapis.com/{object}
|
| - */
|
| +
|
| + /// The prefix of the resources the system should use for temporary
|
| + /// storage.
|
| + ///
|
| + /// The supported resource type is:
|
| + ///
|
| + /// Google Cloud Storage:
|
| + ///
|
| + /// storage.googleapis.com/{bucket}/{object}
|
| + /// bucket.storage.googleapis.com/{object}
|
| core.String tempStoragePrefix;
|
| - /** The ID of the worker running this pipeline. */
|
| +
|
| + /// The ID of the worker running this pipeline.
|
| core.String workerId;
|
|
|
| WorkerSettings();
|
| @@ -8984,7 +9358,8 @@ class WorkerSettings {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (baseUrl != null) {
|
| _json["baseUrl"] = baseUrl;
|
| }
|
| @@ -9007,14 +9382,54 @@ class WorkerSettings {
|
| }
|
| }
|
|
|
| -/**
|
| - * An instruction that writes records.
|
| - * Takes one input, produces no outputs.
|
| - */
|
| +/// Shutdown notification from workers. This is to be sent by the shutdown
|
| +/// script of the worker VM so that the backend knows that the VM is being
|
| +/// shut down.
|
| +class WorkerShutdownNotice {
|
| + /// Optional reason to be attached for the shutdown notice.
|
| + /// For example: "PREEMPTION" would indicate the VM is being shut down
|
| + /// because
|
| + /// of preemption. Other possible reasons may be added in the future.
|
| + core.String reason;
|
| +
|
| + WorkerShutdownNotice();
|
| +
|
| + WorkerShutdownNotice.fromJson(core.Map _json) {
|
| + if (_json.containsKey("reason")) {
|
| + reason = _json["reason"];
|
| + }
|
| + }
|
| +
|
| + core.Map<core.String, core.Object> toJson() {
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| + if (reason != null) {
|
| + _json["reason"] = reason;
|
| + }
|
| + return _json;
|
| + }
|
| +}
|
| +
|
| +/// Service-side response to WorkerMessage issuing shutdown notice.
|
| +class WorkerShutdownNoticeResponse {
|
| + WorkerShutdownNoticeResponse();
|
| +
|
| + WorkerShutdownNoticeResponse.fromJson(core.Map _json) {}
|
| +
|
| + core.Map<core.String, core.Object> toJson() {
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| + return _json;
|
| + }
|
| +}
|
| +
|
| +/// An instruction that writes records.
|
| +/// Takes one input, produces no outputs.
|
| class WriteInstruction {
|
| - /** The input. */
|
| + /// The input.
|
| InstructionInput input;
|
| - /** The sink to write to. */
|
| +
|
| + /// The sink to write to.
|
| Sink sink;
|
|
|
| WriteInstruction();
|
| @@ -9029,7 +9444,8 @@ class WriteInstruction {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (input != null) {
|
| _json["input"] = (input).toJson();
|
| }
|
|
|