| Index: generated/googleapis/lib/bigquery/v2.dart
|
| diff --git a/generated/googleapis/lib/bigquery/v2.dart b/generated/googleapis/lib/bigquery/v2.dart
|
| index 11c817e019d2f2796b7884116034a117f7e37808..1de1e9a2d2d1a4f098a37c9e1d8201f55f0c1cac 100644
|
| --- a/generated/googleapis/lib/bigquery/v2.dart
|
| +++ b/generated/googleapis/lib/bigquery/v2.dart
|
| @@ -10,36 +10,47 @@ import 'dart:convert' as convert;
|
| import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons;
|
| import 'package:http/http.dart' as http;
|
|
|
| -export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show
|
| - ApiRequestError, DetailedApiRequestError, Media, UploadOptions,
|
| - ResumableUploadOptions, DownloadOptions, PartialDownloadOptions,
|
| - ByteRange;
|
| +export 'package:_discoveryapis_commons/_discoveryapis_commons.dart'
|
| + show
|
| + ApiRequestError,
|
| + DetailedApiRequestError,
|
| + Media,
|
| + UploadOptions,
|
| + ResumableUploadOptions,
|
| + DownloadOptions,
|
| + PartialDownloadOptions,
|
| + ByteRange;
|
|
|
| const core.String USER_AGENT = 'dart-api-client bigquery/v2';
|
|
|
| -/** A data platform for customers to create, manage, share and query data. */
|
| +/// A data platform for customers to create, manage, share and query data.
|
| class BigqueryApi {
|
| - /** View and manage your data in Google BigQuery */
|
| + /// View and manage your data in Google BigQuery
|
| static const BigqueryScope = "https://www.googleapis.com/auth/bigquery";
|
|
|
| - /** Insert data into Google BigQuery */
|
| - static const BigqueryInsertdataScope = "https://www.googleapis.com/auth/bigquery.insertdata";
|
| + /// Insert data into Google BigQuery
|
| + static const BigqueryInsertdataScope =
|
| + "https://www.googleapis.com/auth/bigquery.insertdata";
|
|
|
| - /** View and manage your data across Google Cloud Platform services */
|
| - static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform";
|
| + /// View and manage your data across Google Cloud Platform services
|
| + static const CloudPlatformScope =
|
| + "https://www.googleapis.com/auth/cloud-platform";
|
|
|
| - /** View your data across Google Cloud Platform services */
|
| - static const CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only";
|
| + /// View your data across Google Cloud Platform services
|
| + static const CloudPlatformReadOnlyScope =
|
| + "https://www.googleapis.com/auth/cloud-platform.read-only";
|
|
|
| - /** Manage your data and permissions in Google Cloud Storage */
|
| - static const DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control";
|
| + /// Manage your data and permissions in Google Cloud Storage
|
| + static const DevstorageFullControlScope =
|
| + "https://www.googleapis.com/auth/devstorage.full_control";
|
|
|
| - /** View your data in Google Cloud Storage */
|
| - static const DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only";
|
| -
|
| - /** Manage your data in Google Cloud Storage */
|
| - static const DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write";
|
| + /// View your data in Google Cloud Storage
|
| + static const DevstorageReadOnlyScope =
|
| + "https://www.googleapis.com/auth/devstorage.read_only";
|
|
|
| + /// Manage your data in Google Cloud Storage
|
| + static const DevstorageReadWriteScope =
|
| + "https://www.googleapis.com/auth/devstorage.read_write";
|
|
|
| final commons.ApiRequester _requester;
|
|
|
| @@ -49,39 +60,40 @@ class BigqueryApi {
|
| TabledataResourceApi get tabledata => new TabledataResourceApi(_requester);
|
| TablesResourceApi get tables => new TablesResourceApi(_requester);
|
|
|
| - BigqueryApi(http.Client client, {core.String rootUrl: "https://www.googleapis.com/", core.String servicePath: "bigquery/v2/"}) :
|
| - _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_AGENT);
|
| + BigqueryApi(http.Client client,
|
| + {core.String rootUrl: "https://www.googleapis.com/",
|
| + core.String servicePath: "bigquery/v2/"})
|
| + : _requester =
|
| + new commons.ApiRequester(client, rootUrl, servicePath, USER_AGENT);
|
| }
|
|
|
| -
|
| class DatasetsResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - DatasetsResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Deletes the dataset specified by the datasetId value. Before you can delete
|
| - * a dataset, you must delete all its tables, either manually or by specifying
|
| - * deleteContents. Immediately after deletion, you can create another dataset
|
| - * with the same name.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the dataset being deleted
|
| - *
|
| - * [datasetId] - Dataset ID of dataset being deleted
|
| - *
|
| - * [deleteContents] - If True, delete all the tables in the dataset. If False
|
| - * and the dataset contains tables, the request will fail. Default is False
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future delete(core.String projectId, core.String datasetId, {core.bool deleteContents}) {
|
| + DatasetsResourceApi(commons.ApiRequester client) : _requester = client;
|
| +
|
| + /// Deletes the dataset specified by the datasetId value. Before you can
|
| + /// delete a dataset, you must delete all its tables, either manually or by
|
| + /// specifying deleteContents. Immediately after deletion, you can create
|
| + /// another dataset with the same name.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the dataset being deleted
|
| + ///
|
| + /// [datasetId] - Dataset ID of dataset being deleted
|
| + ///
|
| + /// [deleteContents] - If True, delete all the tables in the dataset. If
|
| + /// False and the dataset contains tables, the request will fail. Default is
|
| + /// False
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future delete(core.String projectId, core.String datasetId,
|
| + {core.bool deleteContents}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -101,35 +113,35 @@ class DatasetsResourceApi {
|
|
|
| _downloadOptions = null;
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets/' + commons.Escaper.ecapeVariable('$datasetId');
|
| -
|
| - var _response = _requester.request(_url,
|
| - "DELETE",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/datasets/' +
|
| + commons.Escaper.ecapeVariable('$datasetId');
|
| +
|
| + var _response = _requester.request(_url, "DELETE",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => null);
|
| }
|
|
|
| - /**
|
| - * Returns the dataset specified by datasetID.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the requested dataset
|
| - *
|
| - * [datasetId] - Dataset ID of the requested dataset
|
| - *
|
| - * Completes with a [Dataset].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| + /// Returns the dataset specified by datasetID.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the requested dataset
|
| + ///
|
| + /// [datasetId] - Dataset ID of the requested dataset
|
| + ///
|
| + /// Completes with a [Dataset].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| async.Future<Dataset> get(core.String projectId, core.String datasetId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| @@ -145,35 +157,35 @@ class DatasetsResourceApi {
|
| throw new core.ArgumentError("Parameter datasetId is required.");
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets/' + commons.Escaper.ecapeVariable('$datasetId');
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/datasets/' +
|
| + commons.Escaper.ecapeVariable('$datasetId');
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Dataset.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Creates a new empty dataset.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the new dataset
|
| - *
|
| - * Completes with a [Dataset].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| + /// Creates a new empty dataset.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the new dataset
|
| + ///
|
| + /// Completes with a [Dataset].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| async.Future<Dataset> insert(Dataset request, core.String projectId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| @@ -189,47 +201,50 @@ class DatasetsResourceApi {
|
| throw new core.ArgumentError("Parameter projectId is required.");
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets';
|
| + _url =
|
| + 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Dataset.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Lists all datasets in the specified project to which you have been granted
|
| - * the READER dataset role.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the datasets to be listed
|
| - *
|
| - * [all] - Whether to list all datasets, including hidden ones
|
| - *
|
| - * [filter] - An expression for filtering the results of the request by label.
|
| - * The syntax is "labels.<name>[:<value>]". Multiple filters can be ANDed
|
| - * together by connecting with a space. Example: "labels.department:receiving
|
| - * labels.active". See Filtering datasets using labels for details.
|
| - *
|
| - * [maxResults] - The maximum number of results to return
|
| - *
|
| - * [pageToken] - Page token, returned by a previous call, to request the next
|
| - * page of results
|
| - *
|
| - * Completes with a [DatasetList].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<DatasetList> list(core.String projectId, {core.bool all, core.String filter, core.int maxResults, core.String pageToken}) {
|
| + /// Lists all datasets in the specified project to which you have been
|
| + /// granted the READER dataset role.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the datasets to be listed
|
| + ///
|
| + /// [all] - Whether to list all datasets, including hidden ones
|
| + ///
|
| + /// [filter] - An expression for filtering the results of the request by
|
| + /// label. The syntax is "labels.<name>[:<value>]". Multiple filters can be
|
| + /// ANDed together by connecting with a space. Example:
|
| + /// "labels.department:receiving labels.active". See Filtering datasets using
|
| + /// labels for details.
|
| + ///
|
| + /// [maxResults] - The maximum number of results to return
|
| + ///
|
| + /// [pageToken] - Page token, returned by a previous call, to request the
|
| + /// next page of results
|
| + ///
|
| + /// Completes with a [DatasetList].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<DatasetList> list(core.String projectId,
|
| + {core.bool all,
|
| + core.String filter,
|
| + core.int maxResults,
|
| + core.String pageToken}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -253,41 +268,40 @@ class DatasetsResourceApi {
|
| _queryParams["pageToken"] = [pageToken];
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets';
|
| + _url =
|
| + 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets';
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new DatasetList.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Updates information in an existing dataset. The update method replaces the
|
| - * entire dataset resource, whereas the patch method only replaces fields that
|
| - * are provided in the submitted dataset resource. This method supports patch
|
| - * semantics.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the dataset being updated
|
| - *
|
| - * [datasetId] - Dataset ID of the dataset being updated
|
| - *
|
| - * Completes with a [Dataset].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Dataset> patch(Dataset request, core.String projectId, core.String datasetId) {
|
| + /// Updates information in an existing dataset. The update method replaces
|
| + /// the entire dataset resource, whereas the patch method only replaces
|
| + /// fields that are provided in the submitted dataset resource. This method
|
| + /// supports patch semantics.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the dataset being updated
|
| + ///
|
| + /// [datasetId] - Dataset ID of the dataset being updated
|
| + ///
|
| + /// Completes with a [Dataset].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Dataset> patch(
|
| + Dataset request, core.String projectId, core.String datasetId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -305,40 +319,41 @@ class DatasetsResourceApi {
|
| throw new core.ArgumentError("Parameter datasetId is required.");
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets/' + commons.Escaper.ecapeVariable('$datasetId');
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/datasets/' +
|
| + commons.Escaper.ecapeVariable('$datasetId');
|
|
|
| - var _response = _requester.request(_url,
|
| - "PATCH",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "PATCH",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Dataset.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Updates information in an existing dataset. The update method replaces the
|
| - * entire dataset resource, whereas the patch method only replaces fields that
|
| - * are provided in the submitted dataset resource.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the dataset being updated
|
| - *
|
| - * [datasetId] - Dataset ID of the dataset being updated
|
| - *
|
| - * Completes with a [Dataset].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Dataset> update(Dataset request, core.String projectId, core.String datasetId) {
|
| + /// Updates information in an existing dataset. The update method replaces
|
| + /// the entire dataset resource, whereas the patch method only replaces
|
| + /// fields that are provided in the submitted dataset resource.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the dataset being updated
|
| + ///
|
| + /// [datasetId] - Dataset ID of the dataset being updated
|
| + ///
|
| + /// Completes with a [Dataset].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Dataset> update(
|
| + Dataset request, core.String projectId, core.String datasetId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -356,47 +371,45 @@ class DatasetsResourceApi {
|
| throw new core.ArgumentError("Parameter datasetId is required.");
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets/' + commons.Escaper.ecapeVariable('$datasetId');
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/datasets/' +
|
| + commons.Escaper.ecapeVariable('$datasetId');
|
|
|
| - var _response = _requester.request(_url,
|
| - "PUT",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "PUT",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Dataset.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class JobsResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - JobsResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Requests that a job be cancelled. This call will return immediately, and
|
| - * the client will need to poll for the job status to see if the cancel
|
| - * completed successfully. Cancelled jobs may still incur costs.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - [Required] Project ID of the job to cancel
|
| - *
|
| - * [jobId] - [Required] Job ID of the job to cancel
|
| - *
|
| - * Completes with a [JobCancelResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<JobCancelResponse> cancel(core.String projectId, core.String jobId) {
|
| + JobsResourceApi(commons.ApiRequester client) : _requester = client;
|
| +
|
| + /// Requests that a job be cancelled. This call will return immediately, and
|
| + /// the client will need to poll for the job status to see if the cancel
|
| + /// completed successfully. Cancelled jobs may still incur costs.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - [Required] Project ID of the job to cancel
|
| + ///
|
| + /// [jobId] - [Required] Job ID of the job to cancel
|
| + ///
|
| + /// Completes with a [JobCancelResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<JobCancelResponse> cancel(
|
| + core.String projectId, core.String jobId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -411,37 +424,38 @@ class JobsResourceApi {
|
| throw new core.ArgumentError("Parameter jobId is required.");
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId') + '/cancel';
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId') +
|
| + '/cancel';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new JobCancelResponse.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Returns information about a specific job. Job information is available for
|
| - * a six month period after creation. Requires that you're the person who ran
|
| - * the job, or have the Is Owner project role.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - [Required] Project ID of the requested job
|
| - *
|
| - * [jobId] - [Required] Job ID of the requested job
|
| - *
|
| - * Completes with a [Job].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| + /// Returns information about a specific job. Job information is available
|
| + /// for a six month period after creation. Requires that you're the person
|
| + /// who ran the job, or have the Is Owner project role.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - [Required] Project ID of the requested job
|
| + ///
|
| + /// [jobId] - [Required] Job ID of the requested job
|
| + ///
|
| + /// Completes with a [Job].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| async.Future<Job> get(core.String projectId, core.String jobId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| @@ -457,47 +471,53 @@ class JobsResourceApi {
|
| throw new core.ArgumentError("Parameter jobId is required.");
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs/' + commons.Escaper.ecapeVariable('$jobId');
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs/' +
|
| + commons.Escaper.ecapeVariable('$jobId');
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Job.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Retrieves the results of a query job.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - [Required] Project ID of the query job
|
| - *
|
| - * [jobId] - [Required] Job ID of the query job
|
| - *
|
| - * [maxResults] - Maximum number of results to read
|
| - *
|
| - * [pageToken] - Page token, returned by a previous call, to request the next
|
| - * page of results
|
| - *
|
| - * [startIndex] - Zero-based index of the starting row
|
| - *
|
| - * [timeoutMs] - How long to wait for the query to complete, in milliseconds,
|
| - * before returning. Default is 10 seconds. If the timeout passes before the
|
| - * job completes, the 'jobComplete' field in the response will be false
|
| - *
|
| - * Completes with a [GetQueryResultsResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<GetQueryResultsResponse> getQueryResults(core.String projectId, core.String jobId, {core.int maxResults, core.String pageToken, core.String startIndex, core.int timeoutMs}) {
|
| + /// Retrieves the results of a query job.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - [Required] Project ID of the query job
|
| + ///
|
| + /// [jobId] - [Required] Job ID of the query job
|
| + ///
|
| + /// [maxResults] - Maximum number of results to read
|
| + ///
|
| + /// [pageToken] - Page token, returned by a previous call, to request the
|
| + /// next page of results
|
| + ///
|
| + /// [startIndex] - Zero-based index of the starting row
|
| + ///
|
| + /// [timeoutMs] - How long to wait for the query to complete, in
|
| + /// milliseconds, before returning. Default is 10 seconds. If the timeout
|
| + /// passes before the job completes, the 'jobComplete' field in the response
|
| + /// will be false
|
| + ///
|
| + /// Completes with a [GetQueryResultsResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<GetQueryResultsResponse> getQueryResults(
|
| + core.String projectId, core.String jobId,
|
| + {core.int maxResults,
|
| + core.String pageToken,
|
| + core.String startIndex,
|
| + core.int timeoutMs}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -524,41 +544,44 @@ class JobsResourceApi {
|
| _queryParams["timeoutMs"] = ["${timeoutMs}"];
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/queries/' + commons.Escaper.ecapeVariable('$jobId');
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/queries/' +
|
| + commons.Escaper.ecapeVariable('$jobId');
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new GetQueryResultsResponse.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Starts a new asynchronous job. Requires the Can View project role.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the project that will be billed for the job
|
| - *
|
| - * [uploadMedia] - The media to upload.
|
| - *
|
| - * [uploadOptions] - Options for the media upload. Streaming Media without the
|
| - * length being known ahead of time is only supported via resumable uploads.
|
| - *
|
| - * Completes with a [Job].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Job> insert(Job request, core.String projectId, {commons.UploadOptions uploadOptions : commons.UploadOptions.Default, commons.Media uploadMedia}) {
|
| + /// Starts a new asynchronous job. Requires the Can View project role.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the project that will be billed for the job
|
| + ///
|
| + /// [uploadMedia] - The media to upload.
|
| + ///
|
| + /// [uploadOptions] - Options for the media upload. Streaming Media without
|
| + /// the length being known ahead of time is only supported via resumable
|
| + /// uploads.
|
| + ///
|
| + /// Completes with a [Job].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Job> insert(Job request, core.String projectId,
|
| + {commons.UploadOptions uploadOptions: commons.UploadOptions.Default,
|
| + commons.Media uploadMedia}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -573,63 +596,69 @@ class JobsResourceApi {
|
| throw new core.ArgumentError("Parameter projectId is required.");
|
| }
|
|
|
| - _uploadMedia = uploadMedia;
|
| - _uploadOptions = uploadOptions;
|
| + _uploadMedia = uploadMedia;
|
| + _uploadOptions = uploadOptions;
|
|
|
| if (_uploadMedia == null) {
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs';
|
| + _url =
|
| + 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs';
|
| } else if (_uploadOptions is commons.ResumableUploadOptions) {
|
| - _url = '/resumable/upload/bigquery/v2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs';
|
| + _url = '/resumable/upload/bigquery/v2/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs';
|
| } else {
|
| - _url = '/upload/bigquery/v2/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs';
|
| + _url = '/upload/bigquery/v2/projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/jobs';
|
| }
|
|
|
| -
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Job.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Lists all jobs that you started in the specified project. Job information
|
| - * is available for a six month period after creation. The job list is sorted
|
| - * in reverse chronological order, by job creation time. Requires the Can View
|
| - * project role, or the Is Owner project role if you set the allUsers
|
| - * property.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the jobs to list
|
| - *
|
| - * [allUsers] - Whether to display jobs owned by all users in the project.
|
| - * Default false
|
| - *
|
| - * [maxResults] - Maximum number of results to return
|
| - *
|
| - * [pageToken] - Page token, returned by a previous call, to request the next
|
| - * page of results
|
| - *
|
| - * [projection] - Restrict information returned to a set of selected fields
|
| - * Possible string values are:
|
| - * - "full" : Includes all job data
|
| - * - "minimal" : Does not include the job configuration
|
| - *
|
| - * [stateFilter] - Filter for job state
|
| - *
|
| - * Completes with a [JobList].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<JobList> list(core.String projectId, {core.bool allUsers, core.int maxResults, core.String pageToken, core.String projection, core.List<core.String> stateFilter}) {
|
| + /// Lists all jobs that you started in the specified project. Job information
|
| + /// is available for a six month period after creation. The job list is
|
| + /// sorted in reverse chronological order, by job creation time. Requires the
|
| + /// Can View project role, or the Is Owner project role if you set the
|
| + /// allUsers property.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the jobs to list
|
| + ///
|
| + /// [allUsers] - Whether to display jobs owned by all users in the project.
|
| + /// Default false
|
| + ///
|
| + /// [maxResults] - Maximum number of results to return
|
| + ///
|
| + /// [pageToken] - Page token, returned by a previous call, to request the
|
| + /// next page of results
|
| + ///
|
| + /// [projection] - Restrict information returned to a set of selected fields
|
| + /// Possible string values are:
|
| + /// - "full" : Includes all job data
|
| + /// - "minimal" : Does not include the job configuration
|
| + ///
|
| + /// [stateFilter] - Filter for job state
|
| + ///
|
| + /// Completes with a [JobList].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<JobList> list(core.String projectId,
|
| + {core.bool allUsers,
|
| + core.int maxResults,
|
| + core.String pageToken,
|
| + core.String projection,
|
| + core.List<core.String> stateFilter}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -658,35 +687,33 @@ class JobsResourceApi {
|
|
|
| _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/jobs';
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new JobList.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Runs a BigQuery SQL query synchronously and returns query results if the
|
| - * query completes within a specified timeout.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the project billed for the query
|
| - *
|
| - * Completes with a [QueryResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<QueryResponse> query(QueryRequest request, core.String projectId) {
|
| + /// Runs a BigQuery SQL query synchronously and returns query results if the
|
| + /// query completes within a specified timeout.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the project billed for the query
|
| + ///
|
| + /// Completes with a [QueryResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<QueryResponse> query(
|
| + QueryRequest request, core.String projectId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -701,45 +728,81 @@ class JobsResourceApi {
|
| throw new core.ArgumentError("Parameter projectId is required.");
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/queries';
|
| + _url =
|
| + 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/queries';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new QueryResponse.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class ProjectsResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - ProjectsResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Lists all projects to which you have been granted any project role.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [maxResults] - Maximum number of results to return
|
| - *
|
| - * [pageToken] - Page token, returned by a previous call, to request the next
|
| - * page of results
|
| - *
|
| - * Completes with a [ProjectList].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| + ProjectsResourceApi(commons.ApiRequester client) : _requester = client;
|
| +
|
| + /// Returns the email address of the service account for your project used
|
| + /// for interactions with Google Cloud KMS.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID for which the service account is requested.
|
| + ///
|
| + /// Completes with a [GetServiceAccountResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<GetServiceAccountResponse> getServiceAccount(
|
| + core.String projectId) {
|
| + var _url = null;
|
| + var _queryParams = new core.Map();
|
| + var _uploadMedia = null;
|
| + var _uploadOptions = null;
|
| + var _downloadOptions = commons.DownloadOptions.Metadata;
|
| + var _body = null;
|
| +
|
| + if (projectId == null) {
|
| + throw new core.ArgumentError("Parameter projectId is required.");
|
| + }
|
| +
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/serviceAccount';
|
| +
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| + return _response
|
| + .then((data) => new GetServiceAccountResponse.fromJson(data));
|
| + }
|
| +
|
| + /// Lists all projects to which you have been granted any project role.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [maxResults] - Maximum number of results to return
|
| + ///
|
| + /// [pageToken] - Page token, returned by a previous call, to request the
|
| + /// next page of results
|
| + ///
|
| + /// Completes with a [ProjectList].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| async.Future<ProjectList> list({core.int maxResults, core.String pageToken}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| @@ -757,48 +820,46 @@ class ProjectsResourceApi {
|
|
|
| _url = 'projects';
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new ProjectList.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class TabledataResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - TabledataResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Streams data into BigQuery one record at a time without needing to run a
|
| - * load job. Requires the WRITER dataset role.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the destination table.
|
| - *
|
| - * [datasetId] - Dataset ID of the destination table.
|
| - *
|
| - * [tableId] - Table ID of the destination table.
|
| - *
|
| - * Completes with a [TableDataInsertAllResponse].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<TableDataInsertAllResponse> insertAll(TableDataInsertAllRequest request, core.String projectId, core.String datasetId, core.String tableId) {
|
| + TabledataResourceApi(commons.ApiRequester client) : _requester = client;
|
| +
|
| + /// Streams data into BigQuery one record at a time without needing to run a
|
| + /// load job. Requires the WRITER dataset role.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the destination table.
|
| + ///
|
| + /// [datasetId] - Dataset ID of the destination table.
|
| + ///
|
| + /// [tableId] - Table ID of the destination table.
|
| + ///
|
| + /// Completes with a [TableDataInsertAllResponse].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<TableDataInsertAllResponse> insertAll(
|
| + TableDataInsertAllRequest request,
|
| + core.String projectId,
|
| + core.String datasetId,
|
| + core.String tableId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -819,49 +880,58 @@ class TabledataResourceApi {
|
| throw new core.ArgumentError("Parameter tableId is required.");
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets/' + commons.Escaper.ecapeVariable('$datasetId') + '/tables/' + commons.Escaper.ecapeVariable('$tableId') + '/insertAll';
|
| -
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| - return _response.then((data) => new TableDataInsertAllResponse.fromJson(data));
|
| - }
|
| -
|
| - /**
|
| - * Retrieves table data from a specified set of rows. Requires the READER
|
| - * dataset role.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the table to read
|
| - *
|
| - * [datasetId] - Dataset ID of the table to read
|
| - *
|
| - * [tableId] - Table ID of the table to read
|
| - *
|
| - * [maxResults] - Maximum number of results to return
|
| - *
|
| - * [pageToken] - Page token, returned by a previous call, identifying the
|
| - * result set
|
| - *
|
| - * [selectedFields] - List of fields to return (comma-separated). If
|
| - * unspecified, all fields are returned
|
| - *
|
| - * [startIndex] - Zero-based index of the starting row to read
|
| - *
|
| - * Completes with a [TableDataList].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<TableDataList> list(core.String projectId, core.String datasetId, core.String tableId, {core.int maxResults, core.String pageToken, core.String selectedFields, core.String startIndex}) {
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/datasets/' +
|
| + commons.Escaper.ecapeVariable('$datasetId') +
|
| + '/tables/' +
|
| + commons.Escaper.ecapeVariable('$tableId') +
|
| + '/insertAll';
|
| +
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| + return _response
|
| + .then((data) => new TableDataInsertAllResponse.fromJson(data));
|
| + }
|
| +
|
| + /// Retrieves table data from a specified set of rows. Requires the READER
|
| + /// dataset role.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the table to read
|
| + ///
|
| + /// [datasetId] - Dataset ID of the table to read
|
| + ///
|
| + /// [tableId] - Table ID of the table to read
|
| + ///
|
| + /// [maxResults] - Maximum number of results to return
|
| + ///
|
| + /// [pageToken] - Page token, returned by a previous call, identifying the
|
| + /// result set
|
| + ///
|
| + /// [selectedFields] - List of fields to return (comma-separated). If
|
| + /// unspecified, all fields are returned
|
| + ///
|
| + /// [startIndex] - Zero-based index of the starting row to read
|
| + ///
|
| + /// Completes with a [TableDataList].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<TableDataList> list(
|
| + core.String projectId, core.String datasetId, core.String tableId,
|
| + {core.int maxResults,
|
| + core.String pageToken,
|
| + core.String selectedFields,
|
| + core.String startIndex}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -891,46 +961,47 @@ class TabledataResourceApi {
|
| _queryParams["startIndex"] = [startIndex];
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets/' + commons.Escaper.ecapeVariable('$datasetId') + '/tables/' + commons.Escaper.ecapeVariable('$tableId') + '/data';
|
| -
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/datasets/' +
|
| + commons.Escaper.ecapeVariable('$datasetId') +
|
| + '/tables/' +
|
| + commons.Escaper.ecapeVariable('$tableId') +
|
| + '/data';
|
| +
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new TableDataList.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| class TablesResourceApi {
|
| final commons.ApiRequester _requester;
|
|
|
| - TablesResourceApi(commons.ApiRequester client) :
|
| - _requester = client;
|
| -
|
| - /**
|
| - * Deletes the table specified by tableId from the dataset. If the table
|
| - * contains data, all the data will be deleted.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the table to delete
|
| - *
|
| - * [datasetId] - Dataset ID of the table to delete
|
| - *
|
| - * [tableId] - Table ID of the table to delete
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future delete(core.String projectId, core.String datasetId, core.String tableId) {
|
| + TablesResourceApi(commons.ApiRequester client) : _requester = client;
|
| +
|
| + /// Deletes the table specified by tableId from the dataset. If the table
|
| + /// contains data, all the data will be deleted.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the table to delete
|
| + ///
|
| + /// [datasetId] - Dataset ID of the table to delete
|
| + ///
|
| + /// [tableId] - Table ID of the table to delete
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future delete(
|
| + core.String projectId, core.String datasetId, core.String tableId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -950,43 +1021,47 @@ class TablesResourceApi {
|
|
|
| _downloadOptions = null;
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets/' + commons.Escaper.ecapeVariable('$datasetId') + '/tables/' + commons.Escaper.ecapeVariable('$tableId');
|
| -
|
| - var _response = _requester.request(_url,
|
| - "DELETE",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/datasets/' +
|
| + commons.Escaper.ecapeVariable('$datasetId') +
|
| + '/tables/' +
|
| + commons.Escaper.ecapeVariable('$tableId');
|
| +
|
| + var _response = _requester.request(_url, "DELETE",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => null);
|
| }
|
|
|
| - /**
|
| - * Gets the specified table resource by table ID. This method does not return
|
| - * the data in the table, it only returns the table resource, which describes
|
| - * the structure of this table.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the requested table
|
| - *
|
| - * [datasetId] - Dataset ID of the requested table
|
| - *
|
| - * [tableId] - Table ID of the requested table
|
| - *
|
| - * [selectedFields] - List of fields to return (comma-separated). If
|
| - * unspecified, all fields are returned
|
| - *
|
| - * Completes with a [Table].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Table> get(core.String projectId, core.String datasetId, core.String tableId, {core.String selectedFields}) {
|
| + /// Gets the specified table resource by table ID. This method does not
|
| + /// return the data in the table, it only returns the table resource, which
|
| + /// describes the structure of this table.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the requested table
|
| + ///
|
| + /// [datasetId] - Dataset ID of the requested table
|
| + ///
|
| + /// [tableId] - Table ID of the requested table
|
| + ///
|
| + /// [selectedFields] - List of fields to return (comma-separated). If
|
| + /// unspecified, all fields are returned
|
| + ///
|
| + /// Completes with a [Table].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Table> get(
|
| + core.String projectId, core.String datasetId, core.String tableId,
|
| + {core.String selectedFields}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1007,38 +1082,41 @@ class TablesResourceApi {
|
| _queryParams["selectedFields"] = [selectedFields];
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets/' + commons.Escaper.ecapeVariable('$datasetId') + '/tables/' + commons.Escaper.ecapeVariable('$tableId');
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/datasets/' +
|
| + commons.Escaper.ecapeVariable('$datasetId') +
|
| + '/tables/' +
|
| + commons.Escaper.ecapeVariable('$tableId');
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Table.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Creates a new, empty table in the dataset.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the new table
|
| - *
|
| - * [datasetId] - Dataset ID of the new table
|
| - *
|
| - * Completes with a [Table].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Table> insert(Table request, core.String projectId, core.String datasetId) {
|
| + /// Creates a new, empty table in the dataset.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the new table
|
| + ///
|
| + /// [datasetId] - Dataset ID of the new table
|
| + ///
|
| + /// Completes with a [Table].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Table> insert(
|
| + Table request, core.String projectId, core.String datasetId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1056,42 +1134,44 @@ class TablesResourceApi {
|
| throw new core.ArgumentError("Parameter datasetId is required.");
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets/' + commons.Escaper.ecapeVariable('$datasetId') + '/tables';
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/datasets/' +
|
| + commons.Escaper.ecapeVariable('$datasetId') +
|
| + '/tables';
|
|
|
| - var _response = _requester.request(_url,
|
| - "POST",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "POST",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Table.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Lists all tables in the specified dataset. Requires the READER dataset
|
| - * role.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the tables to list
|
| - *
|
| - * [datasetId] - Dataset ID of the tables to list
|
| - *
|
| - * [maxResults] - Maximum number of results to return
|
| - *
|
| - * [pageToken] - Page token, returned by a previous call, to request the next
|
| - * page of results
|
| - *
|
| - * Completes with a [TableList].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<TableList> list(core.String projectId, core.String datasetId, {core.int maxResults, core.String pageToken}) {
|
| + /// Lists all tables in the specified dataset. Requires the READER dataset
|
| + /// role.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the tables to list
|
| + ///
|
| + /// [datasetId] - Dataset ID of the tables to list
|
| + ///
|
| + /// [maxResults] - Maximum number of results to return
|
| + ///
|
| + /// [pageToken] - Page token, returned by a previous call, to request the
|
| + /// next page of results
|
| + ///
|
| + /// Completes with a [TableList].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<TableList> list(core.String projectId, core.String datasetId,
|
| + {core.int maxResults, core.String pageToken}) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1112,43 +1192,45 @@ class TablesResourceApi {
|
| _queryParams["pageToken"] = [pageToken];
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets/' + commons.Escaper.ecapeVariable('$datasetId') + '/tables';
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/datasets/' +
|
| + commons.Escaper.ecapeVariable('$datasetId') +
|
| + '/tables';
|
|
|
| - var _response = _requester.request(_url,
|
| - "GET",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "GET",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new TableList.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Updates information in an existing table. The update method replaces the
|
| - * entire table resource, whereas the patch method only replaces fields that
|
| - * are provided in the submitted table resource. This method supports patch
|
| - * semantics.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the table to update
|
| - *
|
| - * [datasetId] - Dataset ID of the table to update
|
| - *
|
| - * [tableId] - Table ID of the table to update
|
| - *
|
| - * Completes with a [Table].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Table> patch(Table request, core.String projectId, core.String datasetId, core.String tableId) {
|
| + /// Updates information in an existing table. The update method replaces the
|
| + /// entire table resource, whereas the patch method only replaces fields that
|
| + /// are provided in the submitted table resource. This method supports patch
|
| + /// semantics.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the table to update
|
| + ///
|
| + /// [datasetId] - Dataset ID of the table to update
|
| + ///
|
| + /// [tableId] - Table ID of the table to update
|
| + ///
|
| + /// Completes with a [Table].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Table> patch(Table request, core.String projectId,
|
| + core.String datasetId, core.String tableId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1169,42 +1251,45 @@ class TablesResourceApi {
|
| throw new core.ArgumentError("Parameter tableId is required.");
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets/' + commons.Escaper.ecapeVariable('$datasetId') + '/tables/' + commons.Escaper.ecapeVariable('$tableId');
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/datasets/' +
|
| + commons.Escaper.ecapeVariable('$datasetId') +
|
| + '/tables/' +
|
| + commons.Escaper.ecapeVariable('$tableId');
|
|
|
| - var _response = _requester.request(_url,
|
| - "PATCH",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "PATCH",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Table.fromJson(data));
|
| }
|
|
|
| - /**
|
| - * Updates information in an existing table. The update method replaces the
|
| - * entire table resource, whereas the patch method only replaces fields that
|
| - * are provided in the submitted table resource.
|
| - *
|
| - * [request] - The metadata request object.
|
| - *
|
| - * Request parameters:
|
| - *
|
| - * [projectId] - Project ID of the table to update
|
| - *
|
| - * [datasetId] - Dataset ID of the table to update
|
| - *
|
| - * [tableId] - Table ID of the table to update
|
| - *
|
| - * Completes with a [Table].
|
| - *
|
| - * Completes with a [commons.ApiRequestError] if the API endpoint returned an
|
| - * error.
|
| - *
|
| - * If the used [http.Client] completes with an error when making a REST call,
|
| - * this method will complete with the same error.
|
| - */
|
| - async.Future<Table> update(Table request, core.String projectId, core.String datasetId, core.String tableId) {
|
| + /// Updates information in an existing table. The update method replaces the
|
| + /// entire table resource, whereas the patch method only replaces fields that
|
| + /// are provided in the submitted table resource.
|
| + ///
|
| + /// [request] - The metadata request object.
|
| + ///
|
| + /// Request parameters:
|
| + ///
|
| + /// [projectId] - Project ID of the table to update
|
| + ///
|
| + /// [datasetId] - Dataset ID of the table to update
|
| + ///
|
| + /// [tableId] - Table ID of the table to update
|
| + ///
|
| + /// Completes with a [Table].
|
| + ///
|
| + /// Completes with a [commons.ApiRequestError] if the API endpoint returned
|
| + /// an error.
|
| + ///
|
| + /// If the used [http.Client] completes with an error when making a REST
|
| + /// call, this method will complete with the same error.
|
| + async.Future<Table> update(Table request, core.String projectId,
|
| + core.String datasetId, core.String tableId) {
|
| var _url = null;
|
| var _queryParams = new core.Map();
|
| var _uploadMedia = null;
|
| @@ -1225,71 +1310,68 @@ class TablesResourceApi {
|
| throw new core.ArgumentError("Parameter tableId is required.");
|
| }
|
|
|
| - _url = 'projects/' + commons.Escaper.ecapeVariable('$projectId') + '/datasets/' + commons.Escaper.ecapeVariable('$datasetId') + '/tables/' + commons.Escaper.ecapeVariable('$tableId');
|
| + _url = 'projects/' +
|
| + commons.Escaper.ecapeVariable('$projectId') +
|
| + '/datasets/' +
|
| + commons.Escaper.ecapeVariable('$datasetId') +
|
| + '/tables/' +
|
| + commons.Escaper.ecapeVariable('$tableId');
|
|
|
| - var _response = _requester.request(_url,
|
| - "PUT",
|
| - body: _body,
|
| - queryParams: _queryParams,
|
| - uploadOptions: _uploadOptions,
|
| - uploadMedia: _uploadMedia,
|
| - downloadOptions: _downloadOptions);
|
| + var _response = _requester.request(_url, "PUT",
|
| + body: _body,
|
| + queryParams: _queryParams,
|
| + uploadOptions: _uploadOptions,
|
| + uploadMedia: _uploadMedia,
|
| + downloadOptions: _downloadOptions);
|
| return _response.then((data) => new Table.fromJson(data));
|
| }
|
| -
|
| }
|
|
|
| -
|
| -
|
| class BigtableColumn {
|
| - /**
|
| - * [Optional] The encoding of the values when the type is not STRING.
|
| - * Acceptable encoding values are: TEXT - indicates values are alphanumeric
|
| - * text strings. BINARY - indicates values are encoded using HBase
|
| - * Bytes.toBytes family of functions. 'encoding' can also be set at the column
|
| - * family level. However, the setting at this level takes precedence if
|
| - * 'encoding' is set at both levels.
|
| - */
|
| + /// [Optional] The encoding of the values when the type is not STRING.
|
| + /// Acceptable encoding values are: TEXT - indicates values are alphanumeric
|
| + /// text strings. BINARY - indicates values are encoded using HBase
|
| + /// Bytes.toBytes family of functions. 'encoding' can also be set at the
|
| + /// column family level. However, the setting at this level takes precedence
|
| + /// if 'encoding' is set at both levels.
|
| core.String encoding;
|
| - /**
|
| - * [Optional] If the qualifier is not a valid BigQuery field identifier i.e.
|
| - * does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided
|
| - * as the column field name and is used as field name in queries.
|
| - */
|
| +
|
| + /// [Optional] If the qualifier is not a valid BigQuery field identifier i.e.
|
| + /// does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided
|
| + /// as the column field name and is used as field name in queries.
|
| core.String fieldName;
|
| - /**
|
| - * [Optional] If this is set, only the latest version of value in this column
|
| - * are exposed. 'onlyReadLatest' can also be set at the column family level.
|
| - * However, the setting at this level takes precedence if 'onlyReadLatest' is
|
| - * set at both levels.
|
| - */
|
| +
|
| + /// [Optional] If this is set, only the latest version of value in this
|
| + /// column are exposed. 'onlyReadLatest' can also be set at the column family
|
| + /// level. However, the setting at this level takes precedence if
|
| + /// 'onlyReadLatest' is set at both levels.
|
| core.bool onlyReadLatest;
|
| - /**
|
| - * [Required] Qualifier of the column. Columns in the parent column family
|
| - * that has this exact qualifier are exposed as . field. If the qualifier is
|
| - * valid UTF-8 string, it can be specified in the qualifier_string field.
|
| - * Otherwise, a base-64 encoded value must be set to qualifier_encoded. The
|
| - * column field name is the same as the column qualifier. However, if the
|
| - * qualifier is not a valid BigQuery field identifier i.e. does not match
|
| - * [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as field_name.
|
| - */
|
| +
|
| + /// [Required] Qualifier of the column. Columns in the parent column family
|
| + /// that has this exact qualifier are exposed as . field. If the qualifier is
|
| + /// valid UTF-8 string, it can be specified in the qualifier_string field.
|
| + /// Otherwise, a base-64 encoded value must be set to qualifier_encoded. The
|
| + /// column field name is the same as the column qualifier. However, if the
|
| + /// qualifier is not a valid BigQuery field identifier i.e. does not match
|
| + /// [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as field_name.
|
| core.String qualifierEncoded;
|
| core.List<core.int> get qualifierEncodedAsBytes {
|
| return convert.BASE64.decode(qualifierEncoded);
|
| }
|
|
|
| void set qualifierEncodedAsBytes(core.List<core.int> _bytes) {
|
| - qualifierEncoded = convert.BASE64.encode(_bytes).replaceAll("/", "_").replaceAll("+", "-");
|
| + qualifierEncoded =
|
| + convert.BASE64.encode(_bytes).replaceAll("/", "_").replaceAll("+", "-");
|
| }
|
| +
|
| core.String qualifierString;
|
| - /**
|
| - * [Optional] The type to convert the value in cells of this column. The
|
| - * values are expected to be encoded using HBase Bytes.toBytes function when
|
| - * using the BINARY encoding value. Following BigQuery types are allowed
|
| - * (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default type is
|
| - * BYTES. 'type' can also be set at the column family level. However, the
|
| - * setting at this level takes precedence if 'type' is set at both levels.
|
| - */
|
| +
|
| + /// [Optional] The type to convert the value in cells of this column. The
|
| + /// values are expected to be encoded using HBase Bytes.toBytes function when
|
| + /// using the BINARY encoding value. Following BigQuery types are allowed
|
| + /// (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default type is
|
| + /// BYTES. 'type' can also be set at the column family level. However, the
|
| + /// setting at this level takes precedence if 'type' is set at both levels.
|
| core.String type;
|
|
|
| BigtableColumn();
|
| @@ -1316,7 +1398,8 @@ class BigtableColumn {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (encoding != null) {
|
| _json["encoding"] = encoding;
|
| }
|
| @@ -1340,46 +1423,44 @@ class BigtableColumn {
|
| }
|
|
|
| class BigtableColumnFamily {
|
| - /**
|
| - * [Optional] Lists of columns that should be exposed as individual fields as
|
| - * opposed to a list of (column name, value) pairs. All columns whose
|
| - * qualifier matches a qualifier in this list can be accessed as .. Other
|
| - * columns can be accessed as a list through .Column field.
|
| - */
|
| + /// [Optional] Lists of columns that should be exposed as individual fields
|
| + /// as opposed to a list of (column name, value) pairs. All columns whose
|
| + /// qualifier matches a qualifier in this list can be accessed as .. Other
|
| + /// columns can be accessed as a list through .Column field.
|
| core.List<BigtableColumn> columns;
|
| - /**
|
| - * [Optional] The encoding of the values when the type is not STRING.
|
| - * Acceptable encoding values are: TEXT - indicates values are alphanumeric
|
| - * text strings. BINARY - indicates values are encoded using HBase
|
| - * Bytes.toBytes family of functions. This can be overridden for a specific
|
| - * column by listing that column in 'columns' and specifying an encoding for
|
| - * it.
|
| - */
|
| +
|
| + /// [Optional] The encoding of the values when the type is not STRING.
|
| + /// Acceptable encoding values are: TEXT - indicates values are alphanumeric
|
| + /// text strings. BINARY - indicates values are encoded using HBase
|
| + /// Bytes.toBytes family of functions. This can be overridden for a specific
|
| + /// column by listing that column in 'columns' and specifying an encoding for
|
| + /// it.
|
| core.String encoding;
|
| - /** Identifier of the column family. */
|
| +
|
| + /// Identifier of the column family.
|
| core.String familyId;
|
| - /**
|
| - * [Optional] If this is set only the latest version of value are exposed for
|
| - * all columns in this column family. This can be overridden for a specific
|
| - * column by listing that column in 'columns' and specifying a different
|
| - * setting for that column.
|
| - */
|
| +
|
| + /// [Optional] If this is set only the latest version of value are exposed
|
| + /// for all columns in this column family. This can be overridden for a
|
| + /// specific column by listing that column in 'columns' and specifying a
|
| + /// different setting for that column.
|
| core.bool onlyReadLatest;
|
| - /**
|
| - * [Optional] The type to convert the value in cells of this column family.
|
| - * The values are expected to be encoded using HBase Bytes.toBytes function
|
| - * when using the BINARY encoding value. Following BigQuery types are allowed
|
| - * (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default type is
|
| - * BYTES. This can be overridden for a specific column by listing that column
|
| - * in 'columns' and specifying a type for it.
|
| - */
|
| +
|
| + /// [Optional] The type to convert the value in cells of this column family.
|
| + /// The values are expected to be encoded using HBase Bytes.toBytes function
|
| + /// when using the BINARY encoding value. Following BigQuery types are
|
| + /// allowed (case-sensitive) - BYTES STRING INTEGER FLOAT BOOLEAN Default
|
| + /// type is BYTES. This can be overridden for a specific column by listing
|
| + /// that column in 'columns' and specifying a type for it.
|
| core.String type;
|
|
|
| BigtableColumnFamily();
|
|
|
| BigtableColumnFamily.fromJson(core.Map _json) {
|
| if (_json.containsKey("columns")) {
|
| - columns = _json["columns"].map((value) => new BigtableColumn.fromJson(value)).toList();
|
| + columns = _json["columns"]
|
| + .map((value) => new BigtableColumn.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("encoding")) {
|
| encoding = _json["encoding"];
|
| @@ -1396,7 +1477,8 @@ class BigtableColumnFamily {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (columns != null) {
|
| _json["columns"] = columns.map((value) => (value).toJson()).toList();
|
| }
|
| @@ -1417,39 +1499,38 @@ class BigtableColumnFamily {
|
| }
|
|
|
| class BigtableOptions {
|
| - /**
|
| - * [Optional] List of column families to expose in the table schema along with
|
| - * their types. This list restricts the column families that can be referenced
|
| - * in queries and specifies their value types. You can use this list to do
|
| - * type conversions - see the 'type' field for more details. If you leave this
|
| - * list empty, all column families are present in the table schema and their
|
| - * values are read as BYTES. During a query only the column families
|
| - * referenced in that query are read from Bigtable.
|
| - */
|
| + /// [Optional] List of column families to expose in the table schema along
|
| + /// with their types. This list restricts the column families that can be
|
| + /// referenced in queries and specifies their value types. You can use this
|
| + /// list to do type conversions - see the 'type' field for more details. If
|
| + /// you leave this list empty, all column families are present in the table
|
| + /// schema and their values are read as BYTES. During a query only the column
|
| + /// families referenced in that query are read from Bigtable.
|
| core.List<BigtableColumnFamily> columnFamilies;
|
| - /**
|
| - * [Optional] If field is true, then the column families that are not
|
| - * specified in columnFamilies list are not exposed in the table schema.
|
| - * Otherwise, they are read with BYTES type values. The default value is
|
| - * false.
|
| - */
|
| +
|
| + /// [Optional] If field is true, then the column families that are not
|
| + /// specified in columnFamilies list are not exposed in the table schema.
|
| + /// Otherwise, they are read with BYTES type values. The default value is
|
| + /// false.
|
| core.bool ignoreUnspecifiedColumnFamilies;
|
| - /**
|
| - * [Optional] If field is true, then the rowkey column families will be read
|
| - * and converted to string. Otherwise they are read with BYTES type values and
|
| - * users need to manually cast them with CAST if necessary. The default value
|
| - * is false.
|
| - */
|
| +
|
| + /// [Optional] If field is true, then the rowkey column families will be read
|
| + /// and converted to string. Otherwise they are read with BYTES type values
|
| + /// and users need to manually cast them with CAST if necessary. The default
|
| + /// value is false.
|
| core.bool readRowkeyAsString;
|
|
|
| BigtableOptions();
|
|
|
| BigtableOptions.fromJson(core.Map _json) {
|
| if (_json.containsKey("columnFamilies")) {
|
| - columnFamilies = _json["columnFamilies"].map((value) => new BigtableColumnFamily.fromJson(value)).toList();
|
| + columnFamilies = _json["columnFamilies"]
|
| + .map((value) => new BigtableColumnFamily.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("ignoreUnspecifiedColumnFamilies")) {
|
| - ignoreUnspecifiedColumnFamilies = _json["ignoreUnspecifiedColumnFamilies"];
|
| + ignoreUnspecifiedColumnFamilies =
|
| + _json["ignoreUnspecifiedColumnFamilies"];
|
| }
|
| if (_json.containsKey("readRowkeyAsString")) {
|
| readRowkeyAsString = _json["readRowkeyAsString"];
|
| @@ -1457,12 +1538,15 @@ class BigtableOptions {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (columnFamilies != null) {
|
| - _json["columnFamilies"] = columnFamilies.map((value) => (value).toJson()).toList();
|
| + _json["columnFamilies"] =
|
| + columnFamilies.map((value) => (value).toJson()).toList();
|
| }
|
| if (ignoreUnspecifiedColumnFamilies != null) {
|
| - _json["ignoreUnspecifiedColumnFamilies"] = ignoreUnspecifiedColumnFamilies;
|
| + _json["ignoreUnspecifiedColumnFamilies"] =
|
| + ignoreUnspecifiedColumnFamilies;
|
| }
|
| if (readRowkeyAsString != null) {
|
| _json["readRowkeyAsString"] = readRowkeyAsString;
|
| @@ -1472,49 +1556,42 @@ class BigtableOptions {
|
| }
|
|
|
| class CsvOptions {
|
| - /**
|
| - * [Optional] Indicates if BigQuery should accept rows that are missing
|
| - * trailing optional columns. If true, BigQuery treats missing trailing
|
| - * columns as null values. If false, records with missing trailing columns are
|
| - * treated as bad records, and if there are too many bad records, an invalid
|
| - * error is returned in the job result. The default value is false.
|
| - */
|
| + /// [Optional] Indicates if BigQuery should accept rows that are missing
|
| + /// trailing optional columns. If true, BigQuery treats missing trailing
|
| + /// columns as null values. If false, records with missing trailing columns
|
| + /// are treated as bad records, and if there are too many bad records, an
|
| + /// invalid error is returned in the job result. The default value is false.
|
| core.bool allowJaggedRows;
|
| - /**
|
| - * [Optional] Indicates if BigQuery should allow quoted data sections that
|
| - * contain newline characters in a CSV file. The default value is false.
|
| - */
|
| +
|
| + /// [Optional] Indicates if BigQuery should allow quoted data sections that
|
| + /// contain newline characters in a CSV file. The default value is false.
|
| core.bool allowQuotedNewlines;
|
| - /**
|
| - * [Optional] The character encoding of the data. The supported values are
|
| - * UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data
|
| - * after the raw, binary data has been split using the values of the quote and
|
| - * fieldDelimiter properties.
|
| - */
|
| +
|
| + /// [Optional] The character encoding of the data. The supported values are
|
| + /// UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the
|
| + /// data after the raw, binary data has been split using the values of the
|
| + /// quote and fieldDelimiter properties.
|
| core.String encoding;
|
| - /**
|
| - * [Optional] The separator for fields in a CSV file. BigQuery converts the
|
| - * string to ISO-8859-1 encoding, and then uses the first byte of the encoded
|
| - * string to split the data in its raw, binary state. BigQuery also supports
|
| - * the escape sequence "\t" to specify a tab separator. The default value is a
|
| - * comma (',').
|
| - */
|
| +
|
| + /// [Optional] The separator for fields in a CSV file. BigQuery converts the
|
| + /// string to ISO-8859-1 encoding, and then uses the first byte of the
|
| + /// encoded string to split the data in its raw, binary state. BigQuery also
|
| + /// supports the escape sequence "\t" to specify a tab separator. The default
|
| + /// value is a comma (',').
|
| core.String fieldDelimiter;
|
| - /**
|
| - * [Optional] The value that is used to quote data sections in a CSV file.
|
| - * BigQuery converts the string to ISO-8859-1 encoding, and then uses the
|
| - * first byte of the encoded string to split the data in its raw, binary
|
| - * state. The default value is a double-quote ('"'). If your data does not
|
| - * contain quoted sections, set the property value to an empty string. If your
|
| - * data contains quoted newline characters, you must also set the
|
| - * allowQuotedNewlines property to true.
|
| - */
|
| +
|
| + /// [Optional] The value that is used to quote data sections in a CSV file.
|
| + /// BigQuery converts the string to ISO-8859-1 encoding, and then uses the
|
| + /// first byte of the encoded string to split the data in its raw, binary
|
| + /// state. The default value is a double-quote ('"'). If your data does not
|
| + /// contain quoted sections, set the property value to an empty string. If
|
| + /// your data contains quoted newline characters, you must also set the
|
| + /// allowQuotedNewlines property to true.
|
| core.String quote;
|
| - /**
|
| - * [Optional] The number of rows at the top of a CSV file that BigQuery will
|
| - * skip when reading the data. The default value is 0. This property is useful
|
| - * if you have header rows in the file that should be skipped.
|
| - */
|
| +
|
| + /// [Optional] The number of rows at the top of a CSV file that BigQuery will
|
| + /// skip when reading the data. The default value is 0. This property is
|
| + /// useful if you have header rows in the file that should be skipped.
|
| core.String skipLeadingRows;
|
|
|
| CsvOptions();
|
| @@ -1541,7 +1618,8 @@ class CsvOptions {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (allowJaggedRows != null) {
|
| _json["allowJaggedRows"] = allowJaggedRows;
|
| }
|
| @@ -1565,38 +1643,34 @@ class CsvOptions {
|
| }
|
|
|
| class DatasetAccess {
|
| - /**
|
| - * [Pick one] A domain to grant access to. Any users signed in with the domain
|
| - * specified will be granted the specified access. Example: "example.com".
|
| - */
|
| + /// [Pick one] A domain to grant access to. Any users signed in with the
|
| + /// domain specified will be granted the specified access. Example:
|
| + /// "example.com".
|
| core.String domain;
|
| - /** [Pick one] An email address of a Google Group to grant access to. */
|
| +
|
| + /// [Pick one] An email address of a Google Group to grant access to.
|
| core.String groupByEmail;
|
| - /**
|
| - * [Required] Describes the rights granted to the user specified by the other
|
| - * member of the access object. The following string values are supported:
|
| - * READER, WRITER, OWNER.
|
| - */
|
| +
|
| + /// [Required] Describes the rights granted to the user specified by the
|
| + /// other member of the access object. The following string values are
|
| + /// supported: READER, WRITER, OWNER.
|
| core.String role;
|
| - /**
|
| - * [Pick one] A special group to grant access to. Possible values include:
|
| - * projectOwners: Owners of the enclosing project. projectReaders: Readers of
|
| - * the enclosing project. projectWriters: Writers of the enclosing project.
|
| - * allAuthenticatedUsers: All authenticated BigQuery users.
|
| - */
|
| +
|
| + /// [Pick one] A special group to grant access to. Possible values include:
|
| + /// projectOwners: Owners of the enclosing project. projectReaders: Readers
|
| + /// of the enclosing project. projectWriters: Writers of the enclosing
|
| + /// project. allAuthenticatedUsers: All authenticated BigQuery users.
|
| core.String specialGroup;
|
| - /**
|
| - * [Pick one] An email address of a user to grant access to. For example:
|
| - * fred@example.com.
|
| - */
|
| +
|
| + /// [Pick one] An email address of a user to grant access to. For example:
|
| + /// fred@example.com.
|
| core.String userByEmail;
|
| - /**
|
| - * [Pick one] A view from a different dataset to grant access to. Queries
|
| - * executed against that view will have read access to tables in this dataset.
|
| - * The role field is not required when this field is set. If that view is
|
| - * updated by any user, access to the view needs to be granted again via an
|
| - * update operation.
|
| - */
|
| +
|
| + /// [Pick one] A view from a different dataset to grant access to. Queries
|
| + /// executed against that view will have read access to tables in this
|
| + /// dataset. The role field is not required when this field is set. If that
|
| + /// view is updated by any user, access to the view needs to be granted again
|
| + /// via an update operation.
|
| TableReference view;
|
|
|
| DatasetAccess();
|
| @@ -1623,7 +1697,8 @@ class DatasetAccess {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (domain != null) {
|
| _json["domain"] = domain;
|
| }
|
| @@ -1647,85 +1722,84 @@ class DatasetAccess {
|
| }
|
|
|
| class Dataset {
|
| - /**
|
| - * [Optional] An array of objects that define dataset access for one or more
|
| - * entities. You can set this property when inserting or updating a dataset in
|
| - * order to control who is allowed to access the data. If unspecified at
|
| - * dataset creation time, BigQuery adds default dataset access for the
|
| - * following entities: access.specialGroup: projectReaders; access.role:
|
| - * READER; access.specialGroup: projectWriters; access.role: WRITER;
|
| - * access.specialGroup: projectOwners; access.role: OWNER; access.userByEmail:
|
| - * [dataset creator email]; access.role: OWNER;
|
| - */
|
| + /// [Optional] An array of objects that define dataset access for one or more
|
| + /// entities. You can set this property when inserting or updating a dataset
|
| + /// in order to control who is allowed to access the data. If unspecified at
|
| + /// dataset creation time, BigQuery adds default dataset access for the
|
| + /// following entities: access.specialGroup: projectReaders; access.role:
|
| + /// READER; access.specialGroup: projectWriters; access.role: WRITER;
|
| + /// access.specialGroup: projectOwners; access.role: OWNER;
|
| + /// access.userByEmail: [dataset creator email]; access.role: OWNER;
|
| core.List<DatasetAccess> access;
|
| - /**
|
| - * [Output-only] The time when this dataset was created, in milliseconds since
|
| - * the epoch.
|
| - */
|
| +
|
| + /// [Output-only] The time when this dataset was created, in milliseconds
|
| + /// since the epoch.
|
| core.String creationTime;
|
| - /** [Required] A reference that identifies the dataset. */
|
| +
|
| + /// [Required] A reference that identifies the dataset.
|
| DatasetReference datasetReference;
|
| - /**
|
| - * [Optional] The default lifetime of all tables in the dataset, in
|
| - * milliseconds. The minimum value is 3600000 milliseconds (one hour). Once
|
| - * this property is set, all newly-created tables in the dataset will have an
|
| - * expirationTime property set to the creation time plus the value in this
|
| - * property, and changing the value will only affect new tables, not existing
|
| - * ones. When the expirationTime for a given table is reached, that table will
|
| - * be deleted automatically. If a table's expirationTime is modified or
|
| - * removed before the table expires, or if you provide an explicit
|
| - * expirationTime when creating a table, that value takes precedence over the
|
| - * default expiration time indicated by this property.
|
| - */
|
| +
|
| + /// [Optional] The default lifetime of all tables in the dataset, in
|
| + /// milliseconds. The minimum value is 3600000 milliseconds (one hour). Once
|
| + /// this property is set, all newly-created tables in the dataset will have
|
| + /// an expirationTime property set to the creation time plus the value in
|
| + /// this property, and changing the value will only affect new tables, not
|
| + /// existing ones. When the expirationTime for a given table is reached, that
|
| + /// table will be deleted automatically. If a table's expirationTime is
|
| + /// modified or removed before the table expires, or if you provide an
|
| + /// explicit expirationTime when creating a table, that value takes
|
| + /// precedence over the default expiration time indicated by this property.
|
| core.String defaultTableExpirationMs;
|
| - /** [Optional] A user-friendly description of the dataset. */
|
| +
|
| + /// [Optional] A user-friendly description of the dataset.
|
| core.String description;
|
| - /** [Output-only] A hash of the resource. */
|
| +
|
| + /// [Output-only] A hash of the resource.
|
| core.String etag;
|
| - /** [Optional] A descriptive name for the dataset. */
|
| +
|
| + /// [Optional] A descriptive name for the dataset.
|
| core.String friendlyName;
|
| - /**
|
| - * [Output-only] The fully-qualified unique name of the dataset in the format
|
| - * projectId:datasetId. The dataset name without the project name is given in
|
| - * the datasetId field. When creating a new dataset, leave this field blank,
|
| - * and instead specify the datasetId field.
|
| - */
|
| +
|
| + /// [Output-only] The fully-qualified unique name of the dataset in the
|
| + /// format projectId:datasetId. The dataset name without the project name is
|
| + /// given in the datasetId field. When creating a new dataset, leave this
|
| + /// field blank, and instead specify the datasetId field.
|
| core.String id;
|
| - /** [Output-only] The resource type. */
|
| +
|
| + /// [Output-only] The resource type.
|
| core.String kind;
|
| - /**
|
| - * The labels associated with this dataset. You can use these to organize and
|
| - * group your datasets. You can set this property when inserting or updating a
|
| - * dataset. See Labeling Datasets for more information.
|
| - */
|
| +
|
| + /// The labels associated with this dataset. You can use these to organize
|
| + /// and group your datasets. You can set this property when inserting or
|
| + /// updating a dataset. See Labeling Datasets for more information.
|
| core.Map<core.String, core.String> labels;
|
| - /**
|
| - * [Output-only] The date when this dataset or any of its tables was last
|
| - * modified, in milliseconds since the epoch.
|
| - */
|
| +
|
| + /// [Output-only] The date when this dataset or any of its tables was last
|
| + /// modified, in milliseconds since the epoch.
|
| core.String lastModifiedTime;
|
| - /**
|
| - * The geographic location where the dataset should reside. Possible values
|
| - * include EU and US. The default value is US.
|
| - */
|
| +
|
| + /// The geographic location where the dataset should reside. Possible values
|
| + /// include EU and US. The default value is US.
|
| core.String location;
|
| - /**
|
| - * [Output-only] A URL that can be used to access the resource again. You can
|
| - * use this URL in Get or Update requests to the resource.
|
| - */
|
| +
|
| + /// [Output-only] A URL that can be used to access the resource again. You
|
| + /// can use this URL in Get or Update requests to the resource.
|
| core.String selfLink;
|
|
|
| Dataset();
|
|
|
| Dataset.fromJson(core.Map _json) {
|
| if (_json.containsKey("access")) {
|
| - access = _json["access"].map((value) => new DatasetAccess.fromJson(value)).toList();
|
| + access = _json["access"]
|
| + .map((value) => new DatasetAccess.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("creationTime")) {
|
| creationTime = _json["creationTime"];
|
| }
|
| if (_json.containsKey("datasetReference")) {
|
| - datasetReference = new DatasetReference.fromJson(_json["datasetReference"]);
|
| + datasetReference =
|
| + new DatasetReference.fromJson(_json["datasetReference"]);
|
| }
|
| if (_json.containsKey("defaultTableExpirationMs")) {
|
| defaultTableExpirationMs = _json["defaultTableExpirationMs"];
|
| @@ -1760,7 +1834,8 @@ class Dataset {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (access != null) {
|
| _json["access"] = access.map((value) => (value).toJson()).toList();
|
| }
|
| @@ -1805,31 +1880,30 @@ class Dataset {
|
| }
|
|
|
| class DatasetListDatasets {
|
| - /**
|
| - * The dataset reference. Use this property to access specific parts of the
|
| - * dataset's ID, such as project ID or dataset ID.
|
| - */
|
| + /// The dataset reference. Use this property to access specific parts of the
|
| + /// dataset's ID, such as project ID or dataset ID.
|
| DatasetReference datasetReference;
|
| - /** A descriptive name for the dataset, if one exists. */
|
| +
|
| + /// A descriptive name for the dataset, if one exists.
|
| core.String friendlyName;
|
| - /** The fully-qualified, unique, opaque ID of the dataset. */
|
| +
|
| + /// The fully-qualified, unique, opaque ID of the dataset.
|
| core.String id;
|
| - /**
|
| - * The resource type. This property always returns the value
|
| - * "bigquery#dataset".
|
| - */
|
| +
|
| + /// The resource type. This property always returns the value
|
| + /// "bigquery#dataset".
|
| core.String kind;
|
| - /**
|
| - * The labels associated with this dataset. You can use these to organize and
|
| - * group your datasets.
|
| - */
|
| +
|
| + /// The labels associated with this dataset. You can use these to organize
|
| + /// and group your datasets.
|
| core.Map<core.String, core.String> labels;
|
|
|
| DatasetListDatasets();
|
|
|
| DatasetListDatasets.fromJson(core.Map _json) {
|
| if (_json.containsKey("datasetReference")) {
|
| - datasetReference = new DatasetReference.fromJson(_json["datasetReference"]);
|
| + datasetReference =
|
| + new DatasetReference.fromJson(_json["datasetReference"]);
|
| }
|
| if (_json.containsKey("friendlyName")) {
|
| friendlyName = _json["friendlyName"];
|
| @@ -1846,7 +1920,8 @@ class DatasetListDatasets {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (datasetReference != null) {
|
| _json["datasetReference"] = (datasetReference).toJson();
|
| }
|
| @@ -1867,34 +1942,31 @@ class DatasetListDatasets {
|
| }
|
|
|
| class DatasetList {
|
| - /**
|
| - * An array of the dataset resources in the project. Each resource contains
|
| - * basic information. For full information about a particular dataset
|
| - * resource, use the Datasets: get method. This property is omitted when there
|
| - * are no datasets in the project.
|
| - */
|
| + /// An array of the dataset resources in the project. Each resource contains
|
| + /// basic information. For full information about a particular dataset
|
| + /// resource, use the Datasets: get method. This property is omitted when
|
| + /// there are no datasets in the project.
|
| core.List<DatasetListDatasets> datasets;
|
| - /**
|
| - * A hash value of the results page. You can use this property to determine if
|
| - * the page has changed since the last request.
|
| - */
|
| +
|
| + /// A hash value of the results page. You can use this property to determine
|
| + /// if the page has changed since the last request.
|
| core.String etag;
|
| - /**
|
| - * The list type. This property always returns the value
|
| - * "bigquery#datasetList".
|
| - */
|
| +
|
| + /// The list type. This property always returns the value
|
| + /// "bigquery#datasetList".
|
| core.String kind;
|
| - /**
|
| - * A token that can be used to request the next results page. This property is
|
| - * omitted on the final results page.
|
| - */
|
| +
|
| + /// A token that can be used to request the next results page. This property
|
| + /// is omitted on the final results page.
|
| core.String nextPageToken;
|
|
|
| DatasetList();
|
|
|
| DatasetList.fromJson(core.Map _json) {
|
| if (_json.containsKey("datasets")) {
|
| - datasets = _json["datasets"].map((value) => new DatasetListDatasets.fromJson(value)).toList();
|
| + datasets = _json["datasets"]
|
| + .map((value) => new DatasetListDatasets.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("etag")) {
|
| etag = _json["etag"];
|
| @@ -1908,7 +1980,8 @@ class DatasetList {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (datasets != null) {
|
| _json["datasets"] = datasets.map((value) => (value).toJson()).toList();
|
| }
|
| @@ -1926,13 +1999,12 @@ class DatasetList {
|
| }
|
|
|
| class DatasetReference {
|
| - /**
|
| - * [Required] A unique ID for this dataset, without the project name. The ID
|
| - * must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).
|
| - * The maximum length is 1,024 characters.
|
| - */
|
| + /// [Required] A unique ID for this dataset, without the project name. The ID
|
| + /// must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).
|
| + /// The maximum length is 1,024 characters.
|
| core.String datasetId;
|
| - /** [Optional] The ID of the project containing this dataset. */
|
| +
|
| + /// [Optional] The ID of the project containing this dataset.
|
| core.String projectId;
|
|
|
| DatasetReference();
|
| @@ -1947,7 +2019,8 @@ class DatasetReference {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (datasetId != null) {
|
| _json["datasetId"] = datasetId;
|
| }
|
| @@ -1958,17 +2031,42 @@ class DatasetReference {
|
| }
|
| }
|
|
|
| +class EncryptionConfiguration {
|
| + /// [Optional] Describes the Cloud KMS encryption key that will be used to
|
| + /// protect destination BigQuery table. The BigQuery Service Account
|
| + /// associated with your project requires access to this encryption key.
|
| + core.String kmsKeyName;
|
| +
|
| + EncryptionConfiguration();
|
| +
|
| + EncryptionConfiguration.fromJson(core.Map _json) {
|
| + if (_json.containsKey("kmsKeyName")) {
|
| + kmsKeyName = _json["kmsKeyName"];
|
| + }
|
| + }
|
| +
|
| + core.Map<core.String, core.Object> toJson() {
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| + if (kmsKeyName != null) {
|
| + _json["kmsKeyName"] = kmsKeyName;
|
| + }
|
| + return _json;
|
| + }
|
| +}
|
| +
|
| class ErrorProto {
|
| - /**
|
| - * Debugging information. This property is internal to Google and should not
|
| - * be used.
|
| - */
|
| + /// Debugging information. This property is internal to Google and should not
|
| + /// be used.
|
| core.String debugInfo;
|
| - /** Specifies where the error occurred, if present. */
|
| +
|
| + /// Specifies where the error occurred, if present.
|
| core.String location;
|
| - /** A human-readable description of the error. */
|
| +
|
| + /// A human-readable description of the error.
|
| core.String message;
|
| - /** A short error code that summarizes the error. */
|
| +
|
| + /// A short error code that summarizes the error.
|
| core.String reason;
|
|
|
| ErrorProto();
|
| @@ -1989,7 +2087,8 @@ class ErrorProto {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (debugInfo != null) {
|
| _json["debugInfo"] = debugInfo;
|
| }
|
| @@ -2007,60 +2106,77 @@ class ErrorProto {
|
| }
|
|
|
| class ExplainQueryStage {
|
| - /** Milliseconds the average shard spent on CPU-bound tasks. */
|
| + /// Milliseconds the average shard spent on CPU-bound tasks.
|
| core.String computeMsAvg;
|
| - /** Milliseconds the slowest shard spent on CPU-bound tasks. */
|
| +
|
| + /// Milliseconds the slowest shard spent on CPU-bound tasks.
|
| core.String computeMsMax;
|
| - /** Relative amount of time the average shard spent on CPU-bound tasks. */
|
| +
|
| + /// Relative amount of time the average shard spent on CPU-bound tasks.
|
| core.double computeRatioAvg;
|
| - /** Relative amount of time the slowest shard spent on CPU-bound tasks. */
|
| +
|
| + /// Relative amount of time the slowest shard spent on CPU-bound tasks.
|
| core.double computeRatioMax;
|
| - /** Unique ID for stage within plan. */
|
| +
|
| + /// Unique ID for stage within plan.
|
| core.String id;
|
| - /** Human-readable name for stage. */
|
| +
|
| + /// Human-readable name for stage.
|
| core.String name;
|
| - /** Milliseconds the average shard spent reading input. */
|
| +
|
| + /// Milliseconds the average shard spent reading input.
|
| core.String readMsAvg;
|
| - /** Milliseconds the slowest shard spent reading input. */
|
| +
|
| + /// Milliseconds the slowest shard spent reading input.
|
| core.String readMsMax;
|
| - /** Relative amount of time the average shard spent reading input. */
|
| +
|
| + /// Relative amount of time the average shard spent reading input.
|
| core.double readRatioAvg;
|
| - /** Relative amount of time the slowest shard spent reading input. */
|
| +
|
| + /// Relative amount of time the slowest shard spent reading input.
|
| core.double readRatioMax;
|
| - /** Number of records read into the stage. */
|
| +
|
| + /// Number of records read into the stage.
|
| core.String recordsRead;
|
| - /** Number of records written by the stage. */
|
| +
|
| + /// Number of records written by the stage.
|
| core.String recordsWritten;
|
| - /** Total number of bytes written to shuffle. */
|
| +
|
| + /// Total number of bytes written to shuffle.
|
| core.String shuffleOutputBytes;
|
| - /** Total number of bytes written to shuffle and spilled to disk. */
|
| +
|
| + /// Total number of bytes written to shuffle and spilled to disk.
|
| core.String shuffleOutputBytesSpilled;
|
| - /** Current status for the stage. */
|
| +
|
| + /// Current status for the stage.
|
| core.String status;
|
| - /**
|
| - * List of operations within the stage in dependency order (approximately
|
| - * chronological).
|
| - */
|
| +
|
| + /// List of operations within the stage in dependency order (approximately
|
| + /// chronological).
|
| core.List<ExplainQueryStep> steps;
|
| - /** Milliseconds the average shard spent waiting to be scheduled. */
|
| +
|
| + /// Milliseconds the average shard spent waiting to be scheduled.
|
| core.String waitMsAvg;
|
| - /** Milliseconds the slowest shard spent waiting to be scheduled. */
|
| +
|
| + /// Milliseconds the slowest shard spent waiting to be scheduled.
|
| core.String waitMsMax;
|
| - /**
|
| - * Relative amount of time the average shard spent waiting to be scheduled.
|
| - */
|
| +
|
| + /// Relative amount of time the average shard spent waiting to be scheduled.
|
| core.double waitRatioAvg;
|
| - /**
|
| - * Relative amount of time the slowest shard spent waiting to be scheduled.
|
| - */
|
| +
|
| + /// Relative amount of time the slowest shard spent waiting to be scheduled.
|
| core.double waitRatioMax;
|
| - /** Milliseconds the average shard spent on writing output. */
|
| +
|
| + /// Milliseconds the average shard spent on writing output.
|
| core.String writeMsAvg;
|
| - /** Milliseconds the slowest shard spent on writing output. */
|
| +
|
| + /// Milliseconds the slowest shard spent on writing output.
|
| core.String writeMsMax;
|
| - /** Relative amount of time the average shard spent on writing output. */
|
| +
|
| + /// Relative amount of time the average shard spent on writing output.
|
| core.double writeRatioAvg;
|
| - /** Relative amount of time the slowest shard spent on writing output. */
|
| +
|
| + /// Relative amount of time the slowest shard spent on writing output.
|
| core.double writeRatioMax;
|
|
|
| ExplainQueryStage();
|
| @@ -2112,7 +2228,9 @@ class ExplainQueryStage {
|
| status = _json["status"];
|
| }
|
| if (_json.containsKey("steps")) {
|
| - steps = _json["steps"].map((value) => new ExplainQueryStep.fromJson(value)).toList();
|
| + steps = _json["steps"]
|
| + .map((value) => new ExplainQueryStep.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("waitMsAvg")) {
|
| waitMsAvg = _json["waitMsAvg"];
|
| @@ -2141,7 +2259,8 @@ class ExplainQueryStage {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (computeMsAvg != null) {
|
| _json["computeMsAvg"] = computeMsAvg;
|
| }
|
| @@ -2219,9 +2338,10 @@ class ExplainQueryStage {
|
| }
|
|
|
| class ExplainQueryStep {
|
| - /** Machine-readable operation type. */
|
| + /// Machine-readable operation type.
|
| core.String kind;
|
| - /** Human-readable stage descriptions. */
|
| +
|
| + /// Human-readable stage descriptions.
|
| core.List<core.String> substeps;
|
|
|
| ExplainQueryStep();
|
| @@ -2236,7 +2356,8 @@ class ExplainQueryStep {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (kind != null) {
|
| _json["kind"] = kind;
|
| }
|
| @@ -2248,67 +2369,63 @@ class ExplainQueryStep {
|
| }
|
|
|
| class ExternalDataConfiguration {
|
| - /**
|
| - * Try to detect schema and format options automatically. Any option specified
|
| - * explicitly will be honored.
|
| - */
|
| + /// Try to detect schema and format options automatically. Any option
|
| + /// specified explicitly will be honored.
|
| core.bool autodetect;
|
| - /** [Optional] Additional options if sourceFormat is set to BIGTABLE. */
|
| +
|
| + /// [Optional] Additional options if sourceFormat is set to BIGTABLE.
|
| BigtableOptions bigtableOptions;
|
| - /**
|
| - * [Optional] The compression type of the data source. Possible values include
|
| - * GZIP and NONE. The default value is NONE. This setting is ignored for
|
| - * Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
|
| - */
|
| +
|
| + /// [Optional] The compression type of the data source. Possible values
|
| + /// include GZIP and NONE. The default value is NONE. This setting is ignored
|
| + /// for Google Cloud Bigtable, Google Cloud Datastore backups and Avro
|
| + /// formats.
|
| core.String compression;
|
| - /** Additional properties to set if sourceFormat is set to CSV. */
|
| +
|
| + /// Additional properties to set if sourceFormat is set to CSV.
|
| CsvOptions csvOptions;
|
| - /** [Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS. */
|
| +
|
| + /// [Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS.
|
| GoogleSheetsOptions googleSheetsOptions;
|
| - /**
|
| - * [Optional] Indicates if BigQuery should allow extra values that are not
|
| - * represented in the table schema. If true, the extra values are ignored. If
|
| - * false, records with extra columns are treated as bad records, and if there
|
| - * are too many bad records, an invalid error is returned in the job result.
|
| - * The default value is false. The sourceFormat property determines what
|
| - * BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values
|
| - * that don't match any column names Google Cloud Bigtable: This setting is
|
| - * ignored. Google Cloud Datastore backups: This setting is ignored. Avro:
|
| - * This setting is ignored.
|
| - */
|
| +
|
| + /// [Optional] Indicates if BigQuery should allow extra values that are not
|
| + /// represented in the table schema. If true, the extra values are ignored.
|
| + /// If false, records with extra columns are treated as bad records, and if
|
| + /// there are too many bad records, an invalid error is returned in the job
|
| + /// result. The default value is false. The sourceFormat property determines
|
| + /// what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named
|
| + /// values that don't match any column names Google Cloud Bigtable: This
|
| + /// setting is ignored. Google Cloud Datastore backups: This setting is
|
| + /// ignored. Avro: This setting is ignored.
|
| core.bool ignoreUnknownValues;
|
| - /**
|
| - * [Optional] The maximum number of bad records that BigQuery can ignore when
|
| - * reading data. If the number of bad records exceeds this value, an invalid
|
| - * error is returned in the job result. The default value is 0, which requires
|
| - * that all records are valid. This setting is ignored for Google Cloud
|
| - * Bigtable, Google Cloud Datastore backups and Avro formats.
|
| - */
|
| +
|
| + /// [Optional] The maximum number of bad records that BigQuery can ignore
|
| + /// when reading data. If the number of bad records exceeds this value, an
|
| + /// invalid error is returned in the job result. The default value is 0,
|
| + /// which requires that all records are valid. This setting is ignored for
|
| + /// Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats.
|
| core.int maxBadRecords;
|
| - /**
|
| - * [Optional] The schema for the data. Schema is required for CSV and JSON
|
| - * formats. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore
|
| - * backups, and Avro formats.
|
| - */
|
| +
|
| + /// [Optional] The schema for the data. Schema is required for CSV and JSON
|
| + /// formats. Schema is disallowed for Google Cloud Bigtable, Cloud Datastore
|
| + /// backups, and Avro formats.
|
| TableSchema schema;
|
| - /**
|
| - * [Required] The data format. For CSV files, specify "CSV". For Google
|
| - * sheets, specify "GOOGLE_SHEETS". For newline-delimited JSON, specify
|
| - * "NEWLINE_DELIMITED_JSON". For Avro files, specify "AVRO". For Google Cloud
|
| - * Datastore backups, specify "DATASTORE_BACKUP". [Beta] For Google Cloud
|
| - * Bigtable, specify "BIGTABLE".
|
| - */
|
| +
|
| + /// [Required] The data format. For CSV files, specify "CSV". For Google
|
| + /// sheets, specify "GOOGLE_SHEETS". For newline-delimited JSON, specify
|
| + /// "NEWLINE_DELIMITED_JSON". For Avro files, specify "AVRO". For Google
|
| + /// Cloud Datastore backups, specify "DATASTORE_BACKUP". [Beta] For Google
|
| + /// Cloud Bigtable, specify "BIGTABLE".
|
| core.String sourceFormat;
|
| - /**
|
| - * [Required] The fully-qualified URIs that point to your data in Google
|
| - * Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard
|
| - * character and it must come after the 'bucket' name. Size limits related to
|
| - * load jobs apply to external data sources. For Google Cloud Bigtable URIs:
|
| - * Exactly one URI can be specified and it has be a fully specified and valid
|
| - * HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore
|
| - * backups, exactly one URI can be specified. Also, the '*' wildcard character
|
| - * is not allowed.
|
| - */
|
| +
|
| + /// [Required] The fully-qualified URIs that point to your data in Google
|
| + /// Cloud. For Google Cloud Storage URIs: Each URI can contain one '*'
|
| + /// wildcard character and it must come after the 'bucket' name. Size limits
|
| + /// related to load jobs apply to external data sources. For Google Cloud
|
| + /// Bigtable URIs: Exactly one URI can be specified and it has be a fully
|
| + /// specified and valid HTTPS URL for a Google Cloud Bigtable table. For
|
| + /// Google Cloud Datastore backups, exactly one URI can be specified. Also,
|
| + /// the '*' wildcard character is not allowed.
|
| core.List<core.String> sourceUris;
|
|
|
| ExternalDataConfiguration();
|
| @@ -2327,7 +2444,8 @@ class ExternalDataConfiguration {
|
| csvOptions = new CsvOptions.fromJson(_json["csvOptions"]);
|
| }
|
| if (_json.containsKey("googleSheetsOptions")) {
|
| - googleSheetsOptions = new GoogleSheetsOptions.fromJson(_json["googleSheetsOptions"]);
|
| + googleSheetsOptions =
|
| + new GoogleSheetsOptions.fromJson(_json["googleSheetsOptions"]);
|
| }
|
| if (_json.containsKey("ignoreUnknownValues")) {
|
| ignoreUnknownValues = _json["ignoreUnknownValues"];
|
| @@ -2347,7 +2465,8 @@ class ExternalDataConfiguration {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (autodetect != null) {
|
| _json["autodetect"] = autodetect;
|
| }
|
| @@ -2383,59 +2502,56 @@ class ExternalDataConfiguration {
|
| }
|
|
|
| class GetQueryResultsResponse {
|
| - /** Whether the query result was fetched from the query cache. */
|
| + /// Whether the query result was fetched from the query cache.
|
| core.bool cacheHit;
|
| - /**
|
| - * [Output-only] The first errors or warnings encountered during the running
|
| - * of the job. The final message includes the number of errors that caused the
|
| - * process to stop. Errors here do not necessarily mean that the job has
|
| - * completed or was unsuccessful.
|
| - */
|
| +
|
| + /// [Output-only] The first errors or warnings encountered during the running
|
| + /// of the job. The final message includes the number of errors that caused
|
| + /// the process to stop. Errors here do not necessarily mean that the job has
|
| + /// completed or was unsuccessful.
|
| core.List<ErrorProto> errors;
|
| - /** A hash of this response. */
|
| +
|
| + /// A hash of this response.
|
| core.String etag;
|
| - /**
|
| - * Whether the query has completed or not. If rows or totalRows are present,
|
| - * this will always be true. If this is false, totalRows will not be
|
| - * available.
|
| - */
|
| +
|
| + /// Whether the query has completed or not. If rows or totalRows are present,
|
| + /// this will always be true. If this is false, totalRows will not be
|
| + /// available.
|
| core.bool jobComplete;
|
| - /**
|
| - * Reference to the BigQuery Job that was created to run the query. This field
|
| - * will be present even if the original request timed out, in which case
|
| - * GetQueryResults can be used to read the results once the query has
|
| - * completed. Since this API only returns the first page of results,
|
| - * subsequent pages can be fetched via the same mechanism (GetQueryResults).
|
| - */
|
| +
|
| + /// Reference to the BigQuery Job that was created to run the query. This
|
| + /// field will be present even if the original request timed out, in which
|
| + /// case GetQueryResults can be used to read the results once the query has
|
| + /// completed. Since this API only returns the first page of results,
|
| + /// subsequent pages can be fetched via the same mechanism (GetQueryResults).
|
| JobReference jobReference;
|
| - /** The resource type of the response. */
|
| +
|
| + /// The resource type of the response.
|
| core.String kind;
|
| - /**
|
| - * [Output-only] The number of rows affected by a DML statement. Present only
|
| - * for DML statements INSERT, UPDATE or DELETE.
|
| - */
|
| +
|
| + /// [Output-only] The number of rows affected by a DML statement. Present
|
| + /// only for DML statements INSERT, UPDATE or DELETE.
|
| core.String numDmlAffectedRows;
|
| - /** A token used for paging results. */
|
| +
|
| + /// A token used for paging results.
|
| core.String pageToken;
|
| - /**
|
| - * An object with as many results as can be contained within the maximum
|
| - * permitted reply size. To get any additional rows, you can call
|
| - * GetQueryResults and specify the jobReference returned above. Present only
|
| - * when the query completes successfully.
|
| - */
|
| +
|
| + /// An object with as many results as can be contained within the maximum
|
| + /// permitted reply size. To get any additional rows, you can call
|
| + /// GetQueryResults and specify the jobReference returned above. Present only
|
| + /// when the query completes successfully.
|
| core.List<TableRow> rows;
|
| - /**
|
| - * The schema of the results. Present only when the query completes
|
| - * successfully.
|
| - */
|
| +
|
| + /// The schema of the results. Present only when the query completes
|
| + /// successfully.
|
| TableSchema schema;
|
| - /** The total number of bytes processed for this query. */
|
| +
|
| + /// The total number of bytes processed for this query.
|
| core.String totalBytesProcessed;
|
| - /**
|
| - * The total number of rows in the complete query result set, which can be
|
| - * more than the number of rows in this single page of results. Present only
|
| - * when the query completes successfully.
|
| - */
|
| +
|
| + /// The total number of rows in the complete query result set, which can be
|
| + /// more than the number of rows in this single page of results. Present only
|
| + /// when the query completes successfully.
|
| core.String totalRows;
|
|
|
| GetQueryResultsResponse();
|
| @@ -2445,7 +2561,9 @@ class GetQueryResultsResponse {
|
| cacheHit = _json["cacheHit"];
|
| }
|
| if (_json.containsKey("errors")) {
|
| - errors = _json["errors"].map((value) => new ErrorProto.fromJson(value)).toList();
|
| + errors = _json["errors"]
|
| + .map((value) => new ErrorProto.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("etag")) {
|
| etag = _json["etag"];
|
| @@ -2466,7 +2584,8 @@ class GetQueryResultsResponse {
|
| pageToken = _json["pageToken"];
|
| }
|
| if (_json.containsKey("rows")) {
|
| - rows = _json["rows"].map((value) => new TableRow.fromJson(value)).toList();
|
| + rows =
|
| + _json["rows"].map((value) => new TableRow.fromJson(value)).toList();
|
| }
|
| if (_json.containsKey("schema")) {
|
| schema = new TableSchema.fromJson(_json["schema"]);
|
| @@ -2480,7 +2599,8 @@ class GetQueryResultsResponse {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (cacheHit != null) {
|
| _json["cacheHit"] = cacheHit;
|
| }
|
| @@ -2521,20 +2641,49 @@ class GetQueryResultsResponse {
|
| }
|
| }
|
|
|
| +class GetServiceAccountResponse {
|
| + /// The service account email address.
|
| + core.String email;
|
| +
|
| + /// The resource type of the response.
|
| + core.String kind;
|
| +
|
| + GetServiceAccountResponse();
|
| +
|
| + GetServiceAccountResponse.fromJson(core.Map _json) {
|
| + if (_json.containsKey("email")) {
|
| + email = _json["email"];
|
| + }
|
| + if (_json.containsKey("kind")) {
|
| + kind = _json["kind"];
|
| + }
|
| + }
|
| +
|
| + core.Map<core.String, core.Object> toJson() {
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| + if (email != null) {
|
| + _json["email"] = email;
|
| + }
|
| + if (kind != null) {
|
| + _json["kind"] = kind;
|
| + }
|
| + return _json;
|
| + }
|
| +}
|
| +
|
| class GoogleSheetsOptions {
|
| - /**
|
| - * [Optional] The number of rows at the top of a sheet that BigQuery will skip
|
| - * when reading the data. The default value is 0. This property is useful if
|
| - * you have header rows that should be skipped. When autodetect is on,
|
| - * behavior is the following: * skipLeadingRows unspecified - Autodetect tries
|
| - * to detect headers in the first row. If they are not detected, the row is
|
| - * read as data. Otherwise data is read starting from the second row. *
|
| - * skipLeadingRows is 0 - Instructs autodetect that there are no headers and
|
| - * data should be read starting from the first row. * skipLeadingRows = N > 0
|
| - * - Autodetect skips N-1 rows and tries to detect headers in row N. If
|
| - * headers are not detected, row N is just skipped. Otherwise row N is used to
|
| - * extract column names for the detected schema.
|
| - */
|
| + /// [Optional] The number of rows at the top of a sheet that BigQuery will
|
| + /// skip when reading the data. The default value is 0. This property is
|
| + /// useful if you have header rows that should be skipped. When autodetect is
|
| + /// on, behavior is the following: * skipLeadingRows unspecified - Autodetect
|
| + /// tries to detect headers in the first row. If they are not detected, the
|
| + /// row is read as data. Otherwise data is read starting from the second row.
|
| + /// * skipLeadingRows is 0 - Instructs autodetect that there are no headers
|
| + /// and data should be read starting from the first row. * skipLeadingRows =
|
| + /// N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N.
|
| + /// If headers are not detected, row N is just skipped. Otherwise row N is
|
| + /// used to extract column names for the detected schema.
|
| core.String skipLeadingRows;
|
|
|
| GoogleSheetsOptions();
|
| @@ -2546,7 +2695,8 @@ class GoogleSheetsOptions {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (skipLeadingRows != null) {
|
| _json["skipLeadingRows"] = skipLeadingRows;
|
| }
|
| @@ -2555,29 +2705,33 @@ class GoogleSheetsOptions {
|
| }
|
|
|
| class Job {
|
| - /** [Required] Describes the job configuration. */
|
| + /// [Required] Describes the job configuration.
|
| JobConfiguration configuration;
|
| - /** [Output-only] A hash of this resource. */
|
| +
|
| + /// [Output-only] A hash of this resource.
|
| core.String etag;
|
| - /** [Output-only] Opaque ID field of the job */
|
| +
|
| + /// [Output-only] Opaque ID field of the job
|
| core.String id;
|
| - /** [Optional] Reference describing the unique-per-user name of the job. */
|
| +
|
| + /// [Optional] Reference describing the unique-per-user name of the job.
|
| JobReference jobReference;
|
| - /** [Output-only] The type of the resource. */
|
| +
|
| + /// [Output-only] The type of the resource.
|
| core.String kind;
|
| - /** [Output-only] A URL that can be used to access this resource again. */
|
| +
|
| + /// [Output-only] A URL that can be used to access this resource again.
|
| core.String selfLink;
|
| - /**
|
| - * [Output-only] Information about the job, including starting time and ending
|
| - * time of the job.
|
| - */
|
| +
|
| + /// [Output-only] Information about the job, including starting time and
|
| + /// ending time of the job.
|
| JobStatistics statistics;
|
| - /**
|
| - * [Output-only] The status of this job. Examine this value when polling an
|
| - * asynchronous job to see if the job is complete.
|
| - */
|
| +
|
| + /// [Output-only] The status of this job. Examine this value when polling an
|
| + /// asynchronous job to see if the job is complete.
|
| JobStatus status;
|
| - /** [Output-only] Email address of the user who ran the job. */
|
| +
|
| + /// [Output-only] Email address of the user who ran the job.
|
| core.String userEmail;
|
|
|
| Job();
|
| @@ -2613,7 +2767,8 @@ class Job {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (configuration != null) {
|
| _json["configuration"] = (configuration).toJson();
|
| }
|
| @@ -2646,9 +2801,10 @@ class Job {
|
| }
|
|
|
| class JobCancelResponse {
|
| - /** The final state of the job. */
|
| + /// The final state of the job.
|
| Job job;
|
| - /** The resource type of the response. */
|
| +
|
| + /// The resource type of the response.
|
| core.String kind;
|
|
|
| JobCancelResponse();
|
| @@ -2663,7 +2819,8 @@ class JobCancelResponse {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (job != null) {
|
| _json["job"] = (job).toJson();
|
| }
|
| @@ -2675,29 +2832,30 @@ class JobCancelResponse {
|
| }
|
|
|
| class JobConfiguration {
|
| - /** [Pick one] Copies a table. */
|
| + /// [Pick one] Copies a table.
|
| JobConfigurationTableCopy copy;
|
| - /**
|
| - * [Optional] If set, don't actually run this job. A valid query will return a
|
| - * mostly empty response with some processing statistics, while an invalid
|
| - * query will return the same error it would if it wasn't a dry run. Behavior
|
| - * of non-query jobs is undefined.
|
| - */
|
| +
|
| + /// [Optional] If set, don't actually run this job. A valid query will return
|
| + /// a mostly empty response with some processing statistics, while an invalid
|
| + /// query will return the same error it would if it wasn't a dry run.
|
| + /// Behavior of non-query jobs is undefined.
|
| core.bool dryRun;
|
| - /** [Pick one] Configures an extract job. */
|
| +
|
| + /// [Pick one] Configures an extract job.
|
| JobConfigurationExtract extract;
|
| - /**
|
| - * [Experimental] The labels associated with this job. You can use these to
|
| - * organize and group your jobs. Label keys and values can be no longer than
|
| - * 63 characters, can only contain lowercase letters, numeric characters,
|
| - * underscores and dashes. International characters are allowed. Label values
|
| - * are optional. Label keys must start with a letter and each label in the
|
| - * list must have a different key.
|
| - */
|
| +
|
| + /// [Experimental] The labels associated with this job. You can use these to
|
| + /// organize and group your jobs. Label keys and values can be no longer than
|
| + /// 63 characters, can only contain lowercase letters, numeric characters,
|
| + /// underscores and dashes. International characters are allowed. Label
|
| + /// values are optional. Label keys must start with a letter and each label
|
| + /// in the list must have a different key.
|
| core.Map<core.String, core.String> labels;
|
| - /** [Pick one] Configures a load job. */
|
| +
|
| + /// [Pick one] Configures a load job.
|
| JobConfigurationLoad load;
|
| - /** [Pick one] Configures a query job. */
|
| +
|
| + /// [Pick one] Configures a query job.
|
| JobConfigurationQuery query;
|
|
|
| JobConfiguration();
|
| @@ -2724,7 +2882,8 @@ class JobConfiguration {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (copy != null) {
|
| _json["copy"] = (copy).toJson();
|
| }
|
| @@ -2748,39 +2907,33 @@ class JobConfiguration {
|
| }
|
|
|
| class JobConfigurationExtract {
|
| - /**
|
| - * [Optional] The compression type to use for exported files. Possible values
|
| - * include GZIP and NONE. The default value is NONE.
|
| - */
|
| + /// [Optional] The compression type to use for exported files. Possible
|
| + /// values include GZIP and NONE. The default value is NONE.
|
| core.String compression;
|
| - /**
|
| - * [Optional] The exported file format. Possible values include CSV,
|
| - * NEWLINE_DELIMITED_JSON and AVRO. The default value is CSV. Tables with
|
| - * nested or repeated fields cannot be exported as CSV.
|
| - */
|
| +
|
| + /// [Optional] The exported file format. Possible values include CSV,
|
| + /// NEWLINE_DELIMITED_JSON and AVRO. The default value is CSV. Tables with
|
| + /// nested or repeated fields cannot be exported as CSV.
|
| core.String destinationFormat;
|
| - /**
|
| - * [Pick one] DEPRECATED: Use destinationUris instead, passing only one URI as
|
| - * necessary. The fully-qualified Google Cloud Storage URI where the extracted
|
| - * table should be written.
|
| - */
|
| +
|
| + /// [Pick one] DEPRECATED: Use destinationUris instead, passing only one URI
|
| + /// as necessary. The fully-qualified Google Cloud Storage URI where the
|
| + /// extracted table should be written.
|
| core.String destinationUri;
|
| - /**
|
| - * [Pick one] A list of fully-qualified Google Cloud Storage URIs where the
|
| - * extracted table should be written.
|
| - */
|
| +
|
| + /// [Pick one] A list of fully-qualified Google Cloud Storage URIs where the
|
| + /// extracted table should be written.
|
| core.List<core.String> destinationUris;
|
| - /**
|
| - * [Optional] Delimiter to use between fields in the exported data. Default is
|
| - * ','
|
| - */
|
| +
|
| + /// [Optional] Delimiter to use between fields in the exported data. Default
|
| + /// is ','
|
| core.String fieldDelimiter;
|
| - /**
|
| - * [Optional] Whether to print out a header row in the results. Default is
|
| - * true.
|
| - */
|
| +
|
| + /// [Optional] Whether to print out a header row in the results. Default is
|
| + /// true.
|
| core.bool printHeader;
|
| - /** [Required] A reference to the table being exported. */
|
| +
|
| + /// [Required] A reference to the table being exported.
|
| TableReference sourceTable;
|
|
|
| JobConfigurationExtract();
|
| @@ -2810,7 +2963,8 @@ class JobConfigurationExtract {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (compression != null) {
|
| _json["compression"] = compression;
|
| }
|
| @@ -2837,164 +2991,150 @@ class JobConfigurationExtract {
|
| }
|
|
|
| class JobConfigurationLoad {
|
| - /**
|
| - * [Optional] Accept rows that are missing trailing optional columns. The
|
| - * missing values are treated as nulls. If false, records with missing
|
| - * trailing columns are treated as bad records, and if there are too many bad
|
| - * records, an invalid error is returned in the job result. The default value
|
| - * is false. Only applicable to CSV, ignored for other formats.
|
| - */
|
| + /// [Optional] Accept rows that are missing trailing optional columns. The
|
| + /// missing values are treated as nulls. If false, records with missing
|
| + /// trailing columns are treated as bad records, and if there are too many
|
| + /// bad records, an invalid error is returned in the job result. The default
|
| + /// value is false. Only applicable to CSV, ignored for other formats.
|
| core.bool allowJaggedRows;
|
| - /**
|
| - * Indicates if BigQuery should allow quoted data sections that contain
|
| - * newline characters in a CSV file. The default value is false.
|
| - */
|
| +
|
| + /// Indicates if BigQuery should allow quoted data sections that contain
|
| + /// newline characters in a CSV file. The default value is false.
|
| core.bool allowQuotedNewlines;
|
| - /**
|
| - * Indicates if we should automatically infer the options and schema for CSV
|
| - * and JSON sources.
|
| - */
|
| +
|
| + /// Indicates if we should automatically infer the options and schema for CSV
|
| + /// and JSON sources.
|
| core.bool autodetect;
|
| - /**
|
| - * [Optional] Specifies whether the job is allowed to create new tables. The
|
| - * following values are supported: CREATE_IF_NEEDED: If the table does not
|
| - * exist, BigQuery creates the table. CREATE_NEVER: The table must already
|
| - * exist. If it does not, a 'notFound' error is returned in the job result.
|
| - * The default value is CREATE_IF_NEEDED. Creation, truncation and append
|
| - * actions occur as one atomic update upon job completion.
|
| - */
|
| +
|
| + /// [Optional] Specifies whether the job is allowed to create new tables. The
|
| + /// following values are supported: CREATE_IF_NEEDED: If the table does not
|
| + /// exist, BigQuery creates the table. CREATE_NEVER: The table must already
|
| + /// exist. If it does not, a 'notFound' error is returned in the job result.
|
| + /// The default value is CREATE_IF_NEEDED. Creation, truncation and append
|
| + /// actions occur as one atomic update upon job completion.
|
| core.String createDisposition;
|
| - /** [Required] The destination table to load the data into. */
|
| +
|
| + /// [Experimental] Custom encryption configuration (e.g., Cloud KMS keys).
|
| + EncryptionConfiguration destinationEncryptionConfiguration;
|
| +
|
| + /// [Required] The destination table to load the data into.
|
| TableReference destinationTable;
|
| - /**
|
| - * [Optional] The character encoding of the data. The supported values are
|
| - * UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the data
|
| - * after the raw, binary data has been split using the values of the quote and
|
| - * fieldDelimiter properties.
|
| - */
|
| +
|
| + /// [Optional] The character encoding of the data. The supported values are
|
| + /// UTF-8 or ISO-8859-1. The default value is UTF-8. BigQuery decodes the
|
| + /// data after the raw, binary data has been split using the values of the
|
| + /// quote and fieldDelimiter properties.
|
| core.String encoding;
|
| - /**
|
| - * [Optional] The separator for fields in a CSV file. The separator can be any
|
| - * ISO-8859-1 single-byte character. To use a character in the range 128-255,
|
| - * you must encode the character as UTF8. BigQuery converts the string to
|
| - * ISO-8859-1 encoding, and then uses the first byte of the encoded string to
|
| - * split the data in its raw, binary state. BigQuery also supports the escape
|
| - * sequence "\t" to specify a tab separator. The default value is a comma
|
| - * (',').
|
| - */
|
| +
|
| + /// [Optional] The separator for fields in a CSV file. The separator can be
|
| + /// any ISO-8859-1 single-byte character. To use a character in the range
|
| + /// 128-255, you must encode the character as UTF8. BigQuery converts the
|
| + /// string to ISO-8859-1 encoding, and then uses the first byte of the
|
| + /// encoded string to split the data in its raw, binary state. BigQuery also
|
| + /// supports the escape sequence "\t" to specify a tab separator. The default
|
| + /// value is a comma (',').
|
| core.String fieldDelimiter;
|
| - /**
|
| - * [Optional] Indicates if BigQuery should allow extra values that are not
|
| - * represented in the table schema. If true, the extra values are ignored. If
|
| - * false, records with extra columns are treated as bad records, and if there
|
| - * are too many bad records, an invalid error is returned in the job result.
|
| - * The default value is false. The sourceFormat property determines what
|
| - * BigQuery treats as an extra value: CSV: Trailing columns JSON: Named values
|
| - * that don't match any column names
|
| - */
|
| +
|
| + /// [Optional] Indicates if BigQuery should allow extra values that are not
|
| + /// represented in the table schema. If true, the extra values are ignored.
|
| + /// If false, records with extra columns are treated as bad records, and if
|
| + /// there are too many bad records, an invalid error is returned in the job
|
| + /// result. The default value is false. The sourceFormat property determines
|
| + /// what BigQuery treats as an extra value: CSV: Trailing columns JSON: Named
|
| + /// values that don't match any column names
|
| core.bool ignoreUnknownValues;
|
| - /**
|
| - * [Optional] The maximum number of bad records that BigQuery can ignore when
|
| - * running the job. If the number of bad records exceeds this value, an
|
| - * invalid error is returned in the job result. The default value is 0, which
|
| - * requires that all records are valid.
|
| - */
|
| +
|
| + /// [Optional] The maximum number of bad records that BigQuery can ignore
|
| + /// when running the job. If the number of bad records exceeds this value, an
|
| + /// invalid error is returned in the job result. The default value is 0,
|
| + /// which requires that all records are valid.
|
| core.int maxBadRecords;
|
| - /**
|
| - * [Optional] Specifies a string that represents a null value in a CSV file.
|
| - * For example, if you specify "\N", BigQuery interprets "\N" as a null value
|
| - * when loading a CSV file. The default value is the empty string. If you set
|
| - * this property to a custom value, BigQuery throws an error if an empty
|
| - * string is present for all data types except for STRING and BYTE. For STRING
|
| - * and BYTE columns, BigQuery interprets the empty string as an empty value.
|
| - */
|
| +
|
| + /// [Optional] Specifies a string that represents a null value in a CSV file.
|
| + /// For example, if you specify "\N", BigQuery interprets "\N" as a null
|
| + /// value when loading a CSV file. The default value is the empty string. If
|
| + /// you set this property to a custom value, BigQuery throws an error if an
|
| + /// empty string is present for all data types except for STRING and BYTE.
|
| + /// For STRING and BYTE columns, BigQuery interprets the empty string as an
|
| + /// empty value.
|
| core.String nullMarker;
|
| - /**
|
| - * If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity
|
| - * properties to load into BigQuery from a Cloud Datastore backup. Property
|
| - * names are case sensitive and must be top-level properties. If no properties
|
| - * are specified, BigQuery loads all properties. If any named property isn't
|
| - * found in the Cloud Datastore backup, an invalid error is returned in the
|
| - * job result.
|
| - */
|
| +
|
| + /// If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity
|
| + /// properties to load into BigQuery from a Cloud Datastore backup. Property
|
| + /// names are case sensitive and must be top-level properties. If no
|
| + /// properties are specified, BigQuery loads all properties. If any named
|
| + /// property isn't found in the Cloud Datastore backup, an invalid error is
|
| + /// returned in the job result.
|
| core.List<core.String> projectionFields;
|
| - /**
|
| - * [Optional] The value that is used to quote data sections in a CSV file.
|
| - * BigQuery converts the string to ISO-8859-1 encoding, and then uses the
|
| - * first byte of the encoded string to split the data in its raw, binary
|
| - * state. The default value is a double-quote ('"'). If your data does not
|
| - * contain quoted sections, set the property value to an empty string. If your
|
| - * data contains quoted newline characters, you must also set the
|
| - * allowQuotedNewlines property to true.
|
| - */
|
| +
|
| + /// [Optional] The value that is used to quote data sections in a CSV file.
|
| + /// BigQuery converts the string to ISO-8859-1 encoding, and then uses the
|
| + /// first byte of the encoded string to split the data in its raw, binary
|
| + /// state. The default value is a double-quote ('"'). If your data does not
|
| + /// contain quoted sections, set the property value to an empty string. If
|
| + /// your data contains quoted newline characters, you must also set the
|
| + /// allowQuotedNewlines property to true.
|
| core.String quote;
|
| - /**
|
| - * [Optional] The schema for the destination table. The schema can be omitted
|
| - * if the destination table already exists, or if you're loading data from
|
| - * Google Cloud Datastore.
|
| - */
|
| +
|
| + /// [Optional] The schema for the destination table. The schema can be
|
| + /// omitted if the destination table already exists, or if you're loading
|
| + /// data from Google Cloud Datastore.
|
| TableSchema schema;
|
| - /**
|
| - * [Deprecated] The inline schema. For CSV schemas, specify as
|
| - * "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER,
|
| - * baz:FLOAT".
|
| - */
|
| +
|
| + /// [Deprecated] The inline schema. For CSV schemas, specify as
|
| + /// "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER,
|
| + /// baz:FLOAT".
|
| core.String schemaInline;
|
| - /** [Deprecated] The format of the schemaInline property. */
|
| +
|
| + /// [Deprecated] The format of the schemaInline property.
|
| core.String schemaInlineFormat;
|
| - /**
|
| - * [Experimental] Allows the schema of the desitination table to be updated as
|
| - * a side effect of the load job if a schema is autodetected or supplied in
|
| - * the job configuration. Schema update options are supported in two cases:
|
| - * when writeDisposition is WRITE_APPEND; when writeDisposition is
|
| - * WRITE_TRUNCATE and the destination table is a partition of a table,
|
| - * specified by partition decorators. For normal tables, WRITE_TRUNCATE will
|
| - * always overwrite the schema. One or more of the following values are
|
| - * specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the
|
| - * schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the
|
| - * original schema to nullable.
|
| - */
|
| +
|
| + /// [Experimental] Allows the schema of the desitination table to be updated
|
| + /// as a side effect of the load job if a schema is autodetected or supplied
|
| + /// in the job configuration. Schema update options are supported in two
|
| + /// cases: when writeDisposition is WRITE_APPEND; when writeDisposition is
|
| + /// WRITE_TRUNCATE and the destination table is a partition of a table,
|
| + /// specified by partition decorators. For normal tables, WRITE_TRUNCATE will
|
| + /// always overwrite the schema. One or more of the following values are
|
| + /// specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the
|
| + /// schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the
|
| + /// original schema to nullable.
|
| core.List<core.String> schemaUpdateOptions;
|
| - /**
|
| - * [Optional] The number of rows at the top of a CSV file that BigQuery will
|
| - * skip when loading the data. The default value is 0. This property is useful
|
| - * if you have header rows in the file that should be skipped.
|
| - */
|
| +
|
| + /// [Optional] The number of rows at the top of a CSV file that BigQuery will
|
| + /// skip when loading the data. The default value is 0. This property is
|
| + /// useful if you have header rows in the file that should be skipped.
|
| core.int skipLeadingRows;
|
| - /**
|
| - * [Optional] The format of the data files. For CSV files, specify "CSV". For
|
| - * datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON,
|
| - * specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". The default
|
| - * value is CSV.
|
| - */
|
| +
|
| + /// [Optional] The format of the data files. For CSV files, specify "CSV".
|
| + /// For datastore backups, specify "DATASTORE_BACKUP". For newline-delimited
|
| + /// JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". The
|
| + /// default value is CSV.
|
| core.String sourceFormat;
|
| - /**
|
| - * [Required] The fully-qualified URIs that point to your data in Google
|
| - * Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard
|
| - * character and it must come after the 'bucket' name. Size limits related to
|
| - * load jobs apply to external data sources. For Google Cloud Bigtable URIs:
|
| - * Exactly one URI can be specified and it has be a fully specified and valid
|
| - * HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore
|
| - * backups: Exactly one URI can be specified. Also, the '*' wildcard character
|
| - * is not allowed.
|
| - */
|
| +
|
| + /// [Required] The fully-qualified URIs that point to your data in Google
|
| + /// Cloud. For Google Cloud Storage URIs: Each URI can contain one '*'
|
| + /// wildcard character and it must come after the 'bucket' name. Size limits
|
| + /// related to load jobs apply to external data sources. For Google Cloud
|
| + /// Bigtable URIs: Exactly one URI can be specified and it has be a fully
|
| + /// specified and valid HTTPS URL for a Google Cloud Bigtable table. For
|
| + /// Google Cloud Datastore backups: Exactly one URI can be specified. Also,
|
| + /// the '*' wildcard character is not allowed.
|
| core.List<core.String> sourceUris;
|
| - /**
|
| - * [Experimental] If specified, configures time-based partitioning for the
|
| - * destination table.
|
| - */
|
| +
|
| + /// [Experimental] If specified, configures time-based partitioning for the
|
| + /// destination table.
|
| TimePartitioning timePartitioning;
|
| - /**
|
| - * [Optional] Specifies the action that occurs if the destination table
|
| - * already exists. The following values are supported: WRITE_TRUNCATE: If the
|
| - * table already exists, BigQuery overwrites the table data. WRITE_APPEND: If
|
| - * the table already exists, BigQuery appends the data to the table.
|
| - * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate'
|
| - * error is returned in the job result. The default value is WRITE_APPEND.
|
| - * Each action is atomic and only occurs if BigQuery is able to complete the
|
| - * job successfully. Creation, truncation and append actions occur as one
|
| - * atomic update upon job completion.
|
| - */
|
| +
|
| + /// [Optional] Specifies the action that occurs if the destination table
|
| + /// already exists. The following values are supported: WRITE_TRUNCATE: If
|
| + /// the table already exists, BigQuery overwrites the table data.
|
| + /// WRITE_APPEND: If the table already exists, BigQuery appends the data to
|
| + /// the table. WRITE_EMPTY: If the table already exists and contains data, a
|
| + /// 'duplicate' error is returned in the job result. The default value is
|
| + /// WRITE_APPEND. Each action is atomic and only occurs if BigQuery is able
|
| + /// to complete the job successfully. Creation, truncation and append actions
|
| + /// occur as one atomic update upon job completion.
|
| core.String writeDisposition;
|
|
|
| JobConfigurationLoad();
|
| @@ -3012,6 +3152,10 @@ class JobConfigurationLoad {
|
| if (_json.containsKey("createDisposition")) {
|
| createDisposition = _json["createDisposition"];
|
| }
|
| + if (_json.containsKey("destinationEncryptionConfiguration")) {
|
| + destinationEncryptionConfiguration = new EncryptionConfiguration.fromJson(
|
| + _json["destinationEncryptionConfiguration"]);
|
| + }
|
| if (_json.containsKey("destinationTable")) {
|
| destinationTable = new TableReference.fromJson(_json["destinationTable"]);
|
| }
|
| @@ -3058,7 +3202,8 @@ class JobConfigurationLoad {
|
| sourceUris = _json["sourceUris"];
|
| }
|
| if (_json.containsKey("timePartitioning")) {
|
| - timePartitioning = new TimePartitioning.fromJson(_json["timePartitioning"]);
|
| + timePartitioning =
|
| + new TimePartitioning.fromJson(_json["timePartitioning"]);
|
| }
|
| if (_json.containsKey("writeDisposition")) {
|
| writeDisposition = _json["writeDisposition"];
|
| @@ -3066,7 +3211,8 @@ class JobConfigurationLoad {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (allowJaggedRows != null) {
|
| _json["allowJaggedRows"] = allowJaggedRows;
|
| }
|
| @@ -3079,6 +3225,10 @@ class JobConfigurationLoad {
|
| if (createDisposition != null) {
|
| _json["createDisposition"] = createDisposition;
|
| }
|
| + if (destinationEncryptionConfiguration != null) {
|
| + _json["destinationEncryptionConfiguration"] =
|
| + (destinationEncryptionConfiguration).toJson();
|
| + }
|
| if (destinationTable != null) {
|
| _json["destinationTable"] = (destinationTable).toJson();
|
| }
|
| @@ -3135,128 +3285,118 @@ class JobConfigurationLoad {
|
| }
|
|
|
| class JobConfigurationQuery {
|
| - /**
|
| - * [Optional] If true and query uses legacy SQL dialect, allows the query to
|
| - * produce arbitrarily large result tables at a slight cost in performance.
|
| - * Requires destinationTable to be set. For standard SQL queries, this flag is
|
| - * ignored and large results are always allowed. However, you must still set
|
| - * destinationTable when result size exceeds the allowed maximum response
|
| - * size.
|
| - */
|
| + /// [Optional] If true and query uses legacy SQL dialect, allows the query to
|
| + /// produce arbitrarily large result tables at a slight cost in performance.
|
| + /// Requires destinationTable to be set. For standard SQL queries, this flag
|
| + /// is ignored and large results are always allowed. However, you must still
|
| + /// set destinationTable when result size exceeds the allowed maximum
|
| + /// response size.
|
| core.bool allowLargeResults;
|
| - /**
|
| - * [Optional] Specifies whether the job is allowed to create new tables. The
|
| - * following values are supported: CREATE_IF_NEEDED: If the table does not
|
| - * exist, BigQuery creates the table. CREATE_NEVER: The table must already
|
| - * exist. If it does not, a 'notFound' error is returned in the job result.
|
| - * The default value is CREATE_IF_NEEDED. Creation, truncation and append
|
| - * actions occur as one atomic update upon job completion.
|
| - */
|
| +
|
| + /// [Optional] Specifies whether the job is allowed to create new tables. The
|
| + /// following values are supported: CREATE_IF_NEEDED: If the table does not
|
| + /// exist, BigQuery creates the table. CREATE_NEVER: The table must already
|
| + /// exist. If it does not, a 'notFound' error is returned in the job result.
|
| + /// The default value is CREATE_IF_NEEDED. Creation, truncation and append
|
| + /// actions occur as one atomic update upon job completion.
|
| core.String createDisposition;
|
| - /**
|
| - * [Optional] Specifies the default dataset to use for unqualified table names
|
| - * in the query.
|
| - */
|
| +
|
| + /// [Optional] Specifies the default dataset to use for unqualified table
|
| + /// names in the query.
|
| DatasetReference defaultDataset;
|
| - /**
|
| - * [Optional] Describes the table where the query results should be stored. If
|
| - * not present, a new table will be created to store the results. This
|
| - * property must be set for large results that exceed the maximum response
|
| - * size.
|
| - */
|
| +
|
| + /// [Experimental] Custom encryption configuration (e.g., Cloud KMS keys).
|
| + EncryptionConfiguration destinationEncryptionConfiguration;
|
| +
|
| + /// [Optional] Describes the table where the query results should be stored.
|
| + /// If not present, a new table will be created to store the results. This
|
| + /// property must be set for large results that exceed the maximum response
|
| + /// size.
|
| TableReference destinationTable;
|
| - /**
|
| - * [Optional] If true and query uses legacy SQL dialect, flattens all nested
|
| - * and repeated fields in the query results. allowLargeResults must be true if
|
| - * this is set to false. For standard SQL queries, this flag is ignored and
|
| - * results are never flattened.
|
| - */
|
| +
|
| + /// [Optional] If true and query uses legacy SQL dialect, flattens all nested
|
| + /// and repeated fields in the query results. allowLargeResults must be true
|
| + /// if this is set to false. For standard SQL queries, this flag is ignored
|
| + /// and results are never flattened.
|
| core.bool flattenResults;
|
| - /**
|
| - * [Optional] Limits the billing tier for this job. Queries that have resource
|
| - * usage beyond this tier will fail (without incurring a charge). If
|
| - * unspecified, this will be set to your project default.
|
| - */
|
| +
|
| + /// [Optional] Limits the billing tier for this job. Queries that have
|
| + /// resource usage beyond this tier will fail (without incurring a charge).
|
| + /// If unspecified, this will be set to your project default.
|
| core.int maximumBillingTier;
|
| - /**
|
| - * [Optional] Limits the bytes billed for this job. Queries that will have
|
| - * bytes billed beyond this limit will fail (without incurring a charge). If
|
| - * unspecified, this will be set to your project default.
|
| - */
|
| +
|
| + /// [Optional] Limits the bytes billed for this job. Queries that will have
|
| + /// bytes billed beyond this limit will fail (without incurring a charge). If
|
| + /// unspecified, this will be set to your project default.
|
| core.String maximumBytesBilled;
|
| - /**
|
| - * Standard SQL only. Set to POSITIONAL to use positional (?) query parameters
|
| - * or to NAMED to use named (@myparam) query parameters in this query.
|
| - */
|
| +
|
| + /// Standard SQL only. Set to POSITIONAL to use positional (?) query
|
| + /// parameters or to NAMED to use named (@myparam) query parameters in this
|
| + /// query.
|
| core.String parameterMode;
|
| - /** [Deprecated] This property is deprecated. */
|
| +
|
| + /// [Deprecated] This property is deprecated.
|
| core.bool preserveNulls;
|
| - /**
|
| - * [Optional] Specifies a priority for the query. Possible values include
|
| - * INTERACTIVE and BATCH. The default value is INTERACTIVE.
|
| - */
|
| +
|
| + /// [Optional] Specifies a priority for the query. Possible values include
|
| + /// INTERACTIVE and BATCH. The default value is INTERACTIVE.
|
| core.String priority;
|
| - /**
|
| - * [Required] SQL query text to execute. The useLegacySql field can be used to
|
| - * indicate whether the query uses legacy SQL or standard SQL.
|
| - */
|
| +
|
| + /// [Required] SQL query text to execute. The useLegacySql field can be used
|
| + /// to indicate whether the query uses legacy SQL or standard SQL.
|
| core.String query;
|
| - /** Query parameters for standard SQL queries. */
|
| +
|
| + /// Query parameters for standard SQL queries.
|
| core.List<QueryParameter> queryParameters;
|
| - /**
|
| - * [Experimental] Allows the schema of the destination table to be updated as
|
| - * a side effect of the query job. Schema update options are supported in two
|
| - * cases: when writeDisposition is WRITE_APPEND; when writeDisposition is
|
| - * WRITE_TRUNCATE and the destination table is a partition of a table,
|
| - * specified by partition decorators. For normal tables, WRITE_TRUNCATE will
|
| - * always overwrite the schema. One or more of the following values are
|
| - * specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the
|
| - * schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the
|
| - * original schema to nullable.
|
| - */
|
| +
|
| + /// [Experimental] Allows the schema of the destination table to be updated
|
| + /// as a side effect of the query job. Schema update options are supported in
|
| + /// two cases: when writeDisposition is WRITE_APPEND; when writeDisposition
|
| + /// is WRITE_TRUNCATE and the destination table is a partition of a table,
|
| + /// specified by partition decorators. For normal tables, WRITE_TRUNCATE will
|
| + /// always overwrite the schema. One or more of the following values are
|
| + /// specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the
|
| + /// schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the
|
| + /// original schema to nullable.
|
| core.List<core.String> schemaUpdateOptions;
|
| - /**
|
| - * [Optional] If querying an external data source outside of BigQuery,
|
| - * describes the data format, location and other properties of the data
|
| - * source. By defining these properties, the data source can then be queried
|
| - * as if it were a standard BigQuery table.
|
| - */
|
| +
|
| + /// [Optional] If querying an external data source outside of BigQuery,
|
| + /// describes the data format, location and other properties of the data
|
| + /// source. By defining these properties, the data source can then be queried
|
| + /// as if it were a standard BigQuery table.
|
| core.Map<core.String, ExternalDataConfiguration> tableDefinitions;
|
| - /**
|
| - * [Experimental] If specified, configures time-based partitioning for the
|
| - * destination table.
|
| - */
|
| +
|
| + /// [Experimental] If specified, configures time-based partitioning for the
|
| + /// destination table.
|
| TimePartitioning timePartitioning;
|
| - /**
|
| - * Specifies whether to use BigQuery's legacy SQL dialect for this query. The
|
| - * default value is true. If set to false, the query will use BigQuery's
|
| - * standard SQL: https://cloud.google.com/bigquery/sql-reference/ When
|
| - * useLegacySql is set to false, the value of flattenResults is ignored; query
|
| - * will be run as if flattenResults is false.
|
| - */
|
| +
|
| + /// Specifies whether to use BigQuery's legacy SQL dialect for this query.
|
| + /// The default value is true. If set to false, the query will use BigQuery's
|
| + /// standard SQL: https://cloud.google.com/bigquery/sql-reference/ When
|
| + /// useLegacySql is set to false, the value of flattenResults is ignored;
|
| + /// query will be run as if flattenResults is false.
|
| core.bool useLegacySql;
|
| - /**
|
| - * [Optional] Whether to look for the result in the query cache. The query
|
| - * cache is a best-effort cache that will be flushed whenever tables in the
|
| - * query are modified. Moreover, the query cache is only available when a
|
| - * query does not have a destination table specified. The default value is
|
| - * true.
|
| - */
|
| +
|
| + /// [Optional] Whether to look for the result in the query cache. The query
|
| + /// cache is a best-effort cache that will be flushed whenever tables in the
|
| + /// query are modified. Moreover, the query cache is only available when a
|
| + /// query does not have a destination table specified. The default value is
|
| + /// true.
|
| core.bool useQueryCache;
|
| - /** Describes user-defined function resources used in the query. */
|
| +
|
| + /// Describes user-defined function resources used in the query.
|
| core.List<UserDefinedFunctionResource> userDefinedFunctionResources;
|
| - /**
|
| - * [Optional] Specifies the action that occurs if the destination table
|
| - * already exists. The following values are supported: WRITE_TRUNCATE: If the
|
| - * table already exists, BigQuery overwrites the table data and uses the
|
| - * schema from the query result. WRITE_APPEND: If the table already exists,
|
| - * BigQuery appends the data to the table. WRITE_EMPTY: If the table already
|
| - * exists and contains data, a 'duplicate' error is returned in the job
|
| - * result. The default value is WRITE_EMPTY. Each action is atomic and only
|
| - * occurs if BigQuery is able to complete the job successfully. Creation,
|
| - * truncation and append actions occur as one atomic update upon job
|
| - * completion.
|
| - */
|
| +
|
| + /// [Optional] Specifies the action that occurs if the destination table
|
| + /// already exists. The following values are supported: WRITE_TRUNCATE: If
|
| + /// the table already exists, BigQuery overwrites the table data and uses the
|
| + /// schema from the query result. WRITE_APPEND: If the table already exists,
|
| + /// BigQuery appends the data to the table. WRITE_EMPTY: If the table already
|
| + /// exists and contains data, a 'duplicate' error is returned in the job
|
| + /// result. The default value is WRITE_EMPTY. Each action is atomic and only
|
| + /// occurs if BigQuery is able to complete the job successfully. Creation,
|
| + /// truncation and append actions occur as one atomic update upon job
|
| + /// completion.
|
| core.String writeDisposition;
|
|
|
| JobConfigurationQuery();
|
| @@ -3271,6 +3411,10 @@ class JobConfigurationQuery {
|
| if (_json.containsKey("defaultDataset")) {
|
| defaultDataset = new DatasetReference.fromJson(_json["defaultDataset"]);
|
| }
|
| + if (_json.containsKey("destinationEncryptionConfiguration")) {
|
| + destinationEncryptionConfiguration = new EncryptionConfiguration.fromJson(
|
| + _json["destinationEncryptionConfiguration"]);
|
| + }
|
| if (_json.containsKey("destinationTable")) {
|
| destinationTable = new TableReference.fromJson(_json["destinationTable"]);
|
| }
|
| @@ -3296,16 +3440,23 @@ class JobConfigurationQuery {
|
| query = _json["query"];
|
| }
|
| if (_json.containsKey("queryParameters")) {
|
| - queryParameters = _json["queryParameters"].map((value) => new QueryParameter.fromJson(value)).toList();
|
| + queryParameters = _json["queryParameters"]
|
| + .map((value) => new QueryParameter.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("schemaUpdateOptions")) {
|
| schemaUpdateOptions = _json["schemaUpdateOptions"];
|
| }
|
| if (_json.containsKey("tableDefinitions")) {
|
| - tableDefinitions = commons.mapMap<core.Map<core.String, core.Object>, ExternalDataConfiguration>(_json["tableDefinitions"], (core.Map<core.String, core.Object> item) => new ExternalDataConfiguration.fromJson(item));
|
| + tableDefinitions = commons.mapMap<core.Map<core.String, core.Object>,
|
| + ExternalDataConfiguration>(
|
| + _json["tableDefinitions"],
|
| + (core.Map<core.String, core.Object> item) =>
|
| + new ExternalDataConfiguration.fromJson(item));
|
| }
|
| if (_json.containsKey("timePartitioning")) {
|
| - timePartitioning = new TimePartitioning.fromJson(_json["timePartitioning"]);
|
| + timePartitioning =
|
| + new TimePartitioning.fromJson(_json["timePartitioning"]);
|
| }
|
| if (_json.containsKey("useLegacySql")) {
|
| useLegacySql = _json["useLegacySql"];
|
| @@ -3314,7 +3465,9 @@ class JobConfigurationQuery {
|
| useQueryCache = _json["useQueryCache"];
|
| }
|
| if (_json.containsKey("userDefinedFunctionResources")) {
|
| - userDefinedFunctionResources = _json["userDefinedFunctionResources"].map((value) => new UserDefinedFunctionResource.fromJson(value)).toList();
|
| + userDefinedFunctionResources = _json["userDefinedFunctionResources"]
|
| + .map((value) => new UserDefinedFunctionResource.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("writeDisposition")) {
|
| writeDisposition = _json["writeDisposition"];
|
| @@ -3322,7 +3475,8 @@ class JobConfigurationQuery {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (allowLargeResults != null) {
|
| _json["allowLargeResults"] = allowLargeResults;
|
| }
|
| @@ -3332,6 +3486,10 @@ class JobConfigurationQuery {
|
| if (defaultDataset != null) {
|
| _json["defaultDataset"] = (defaultDataset).toJson();
|
| }
|
| + if (destinationEncryptionConfiguration != null) {
|
| + _json["destinationEncryptionConfiguration"] =
|
| + (destinationEncryptionConfiguration).toJson();
|
| + }
|
| if (destinationTable != null) {
|
| _json["destinationTable"] = (destinationTable).toJson();
|
| }
|
| @@ -3357,13 +3515,16 @@ class JobConfigurationQuery {
|
| _json["query"] = query;
|
| }
|
| if (queryParameters != null) {
|
| - _json["queryParameters"] = queryParameters.map((value) => (value).toJson()).toList();
|
| + _json["queryParameters"] =
|
| + queryParameters.map((value) => (value).toJson()).toList();
|
| }
|
| if (schemaUpdateOptions != null) {
|
| _json["schemaUpdateOptions"] = schemaUpdateOptions;
|
| }
|
| if (tableDefinitions != null) {
|
| - _json["tableDefinitions"] = commons.mapMap<ExternalDataConfiguration, core.Map<core.String, core.Object>>(tableDefinitions, (ExternalDataConfiguration item) => (item).toJson());
|
| + _json["tableDefinitions"] = commons.mapMap<ExternalDataConfiguration,
|
| + core.Map<core.String, core.Object>>(tableDefinitions,
|
| + (ExternalDataConfiguration item) => (item).toJson());
|
| }
|
| if (timePartitioning != null) {
|
| _json["timePartitioning"] = (timePartitioning).toJson();
|
| @@ -3375,7 +3536,9 @@ class JobConfigurationQuery {
|
| _json["useQueryCache"] = useQueryCache;
|
| }
|
| if (userDefinedFunctionResources != null) {
|
| - _json["userDefinedFunctionResources"] = userDefinedFunctionResources.map((value) => (value).toJson()).toList();
|
| + _json["userDefinedFunctionResources"] = userDefinedFunctionResources
|
| + .map((value) => (value).toJson())
|
| + .toList();
|
| }
|
| if (writeDisposition != null) {
|
| _json["writeDisposition"] = writeDisposition;
|
| @@ -3385,32 +3548,35 @@ class JobConfigurationQuery {
|
| }
|
|
|
| class JobConfigurationTableCopy {
|
| - /**
|
| - * [Optional] Specifies whether the job is allowed to create new tables. The
|
| - * following values are supported: CREATE_IF_NEEDED: If the table does not
|
| - * exist, BigQuery creates the table. CREATE_NEVER: The table must already
|
| - * exist. If it does not, a 'notFound' error is returned in the job result.
|
| - * The default value is CREATE_IF_NEEDED. Creation, truncation and append
|
| - * actions occur as one atomic update upon job completion.
|
| - */
|
| + /// [Optional] Specifies whether the job is allowed to create new tables. The
|
| + /// following values are supported: CREATE_IF_NEEDED: If the table does not
|
| + /// exist, BigQuery creates the table. CREATE_NEVER: The table must already
|
| + /// exist. If it does not, a 'notFound' error is returned in the job result.
|
| + /// The default value is CREATE_IF_NEEDED. Creation, truncation and append
|
| + /// actions occur as one atomic update upon job completion.
|
| core.String createDisposition;
|
| - /** [Required] The destination table */
|
| +
|
| + /// [Experimental] Custom encryption configuration (e.g., Cloud KMS keys).
|
| + EncryptionConfiguration destinationEncryptionConfiguration;
|
| +
|
| + /// [Required] The destination table
|
| TableReference destinationTable;
|
| - /** [Pick one] Source table to copy. */
|
| +
|
| + /// [Pick one] Source table to copy.
|
| TableReference sourceTable;
|
| - /** [Pick one] Source tables to copy. */
|
| +
|
| + /// [Pick one] Source tables to copy.
|
| core.List<TableReference> sourceTables;
|
| - /**
|
| - * [Optional] Specifies the action that occurs if the destination table
|
| - * already exists. The following values are supported: WRITE_TRUNCATE: If the
|
| - * table already exists, BigQuery overwrites the table data. WRITE_APPEND: If
|
| - * the table already exists, BigQuery appends the data to the table.
|
| - * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate'
|
| - * error is returned in the job result. The default value is WRITE_EMPTY. Each
|
| - * action is atomic and only occurs if BigQuery is able to complete the job
|
| - * successfully. Creation, truncation and append actions occur as one atomic
|
| - * update upon job completion.
|
| - */
|
| +
|
| + /// [Optional] Specifies the action that occurs if the destination table
|
| + /// already exists. The following values are supported: WRITE_TRUNCATE: If
|
| + /// the table already exists, BigQuery overwrites the table data.
|
| + /// WRITE_APPEND: If the table already exists, BigQuery appends the data to
|
| + /// the table. WRITE_EMPTY: If the table already exists and contains data, a
|
| + /// 'duplicate' error is returned in the job result. The default value is
|
| + /// WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is able to
|
| + /// complete the job successfully. Creation, truncation and append actions
|
| + /// occur as one atomic update upon job completion.
|
| core.String writeDisposition;
|
|
|
| JobConfigurationTableCopy();
|
| @@ -3419,6 +3585,10 @@ class JobConfigurationTableCopy {
|
| if (_json.containsKey("createDisposition")) {
|
| createDisposition = _json["createDisposition"];
|
| }
|
| + if (_json.containsKey("destinationEncryptionConfiguration")) {
|
| + destinationEncryptionConfiguration = new EncryptionConfiguration.fromJson(
|
| + _json["destinationEncryptionConfiguration"]);
|
| + }
|
| if (_json.containsKey("destinationTable")) {
|
| destinationTable = new TableReference.fromJson(_json["destinationTable"]);
|
| }
|
| @@ -3426,7 +3596,9 @@ class JobConfigurationTableCopy {
|
| sourceTable = new TableReference.fromJson(_json["sourceTable"]);
|
| }
|
| if (_json.containsKey("sourceTables")) {
|
| - sourceTables = _json["sourceTables"].map((value) => new TableReference.fromJson(value)).toList();
|
| + sourceTables = _json["sourceTables"]
|
| + .map((value) => new TableReference.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("writeDisposition")) {
|
| writeDisposition = _json["writeDisposition"];
|
| @@ -3434,10 +3606,15 @@ class JobConfigurationTableCopy {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (createDisposition != null) {
|
| _json["createDisposition"] = createDisposition;
|
| }
|
| + if (destinationEncryptionConfiguration != null) {
|
| + _json["destinationEncryptionConfiguration"] =
|
| + (destinationEncryptionConfiguration).toJson();
|
| + }
|
| if (destinationTable != null) {
|
| _json["destinationTable"] = (destinationTable).toJson();
|
| }
|
| @@ -3445,7 +3622,8 @@ class JobConfigurationTableCopy {
|
| _json["sourceTable"] = (sourceTable).toJson();
|
| }
|
| if (sourceTables != null) {
|
| - _json["sourceTables"] = sourceTables.map((value) => (value).toJson()).toList();
|
| + _json["sourceTables"] =
|
| + sourceTables.map((value) => (value).toJson()).toList();
|
| }
|
| if (writeDisposition != null) {
|
| _json["writeDisposition"] = writeDisposition;
|
| @@ -3455,29 +3633,33 @@ class JobConfigurationTableCopy {
|
| }
|
|
|
| class JobListJobs {
|
| - /** [Full-projection-only] Specifies the job configuration. */
|
| + /// [Full-projection-only] Specifies the job configuration.
|
| JobConfiguration configuration;
|
| - /** A result object that will be present only if the job has failed. */
|
| +
|
| + /// A result object that will be present only if the job has failed.
|
| ErrorProto errorResult;
|
| - /** Unique opaque ID of the job. */
|
| +
|
| + /// Unique opaque ID of the job.
|
| core.String id;
|
| - /** Job reference uniquely identifying the job. */
|
| +
|
| + /// Job reference uniquely identifying the job.
|
| JobReference jobReference;
|
| - /** The resource type. */
|
| +
|
| + /// The resource type.
|
| core.String kind;
|
| - /**
|
| - * Running state of the job. When the state is DONE, errorResult can be
|
| - * checked to determine whether the job succeeded or failed.
|
| - */
|
| +
|
| + /// Running state of the job. When the state is DONE, errorResult can be
|
| + /// checked to determine whether the job succeeded or failed.
|
| core.String state;
|
| - /**
|
| - * [Output-only] Information about the job, including starting time and ending
|
| - * time of the job.
|
| - */
|
| +
|
| + /// [Output-only] Information about the job, including starting time and
|
| + /// ending time of the job.
|
| JobStatistics statistics;
|
| - /** [Full-projection-only] Describes the state of the job. */
|
| +
|
| + /// [Full-projection-only] Describes the state of the job.
|
| JobStatus status;
|
| - /** [Full-projection-only] Email address of the user who ran the job. */
|
| +
|
| + /// [Full-projection-only] Email address of the user who ran the job.
|
| core.String userEmail;
|
|
|
| JobListJobs();
|
| @@ -3513,7 +3695,8 @@ class JobListJobs {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (configuration != null) {
|
| _json["configuration"] = (configuration).toJson();
|
| }
|
| @@ -3546,13 +3729,16 @@ class JobListJobs {
|
| }
|
|
|
| class JobList {
|
| - /** A hash of this page of results. */
|
| + /// A hash of this page of results.
|
| core.String etag;
|
| - /** List of jobs that were requested. */
|
| +
|
| + /// List of jobs that were requested.
|
| core.List<JobListJobs> jobs;
|
| - /** The resource type of the response. */
|
| +
|
| + /// The resource type of the response.
|
| core.String kind;
|
| - /** A token to request the next page of results. */
|
| +
|
| + /// A token to request the next page of results.
|
| core.String nextPageToken;
|
|
|
| JobList();
|
| @@ -3562,7 +3748,9 @@ class JobList {
|
| etag = _json["etag"];
|
| }
|
| if (_json.containsKey("jobs")) {
|
| - jobs = _json["jobs"].map((value) => new JobListJobs.fromJson(value)).toList();
|
| + jobs = _json["jobs"]
|
| + .map((value) => new JobListJobs.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("kind")) {
|
| kind = _json["kind"];
|
| @@ -3573,7 +3761,8 @@ class JobList {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (etag != null) {
|
| _json["etag"] = etag;
|
| }
|
| @@ -3591,13 +3780,12 @@ class JobList {
|
| }
|
|
|
| class JobReference {
|
| - /**
|
| - * [Required] The ID of the job. The ID must contain only letters (a-z, A-Z),
|
| - * numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024
|
| - * characters.
|
| - */
|
| + /// [Required] The ID of the job. The ID must contain only letters (a-z,
|
| + /// A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length
|
| + /// is 1,024 characters.
|
| core.String jobId;
|
| - /** [Required] The ID of the project containing this job. */
|
| +
|
| + /// [Required] The ID of the project containing this job.
|
| core.String projectId;
|
|
|
| JobReference();
|
| @@ -3612,7 +3800,8 @@ class JobReference {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (jobId != null) {
|
| _json["jobId"] = jobId;
|
| }
|
| @@ -3624,32 +3813,30 @@ class JobReference {
|
| }
|
|
|
| class JobStatistics {
|
| - /**
|
| - * [Output-only] Creation time of this job, in milliseconds since the epoch.
|
| - * This field will be present on all jobs.
|
| - */
|
| + /// [Output-only] Creation time of this job, in milliseconds since the epoch.
|
| + /// This field will be present on all jobs.
|
| core.String creationTime;
|
| - /**
|
| - * [Output-only] End time of this job, in milliseconds since the epoch. This
|
| - * field will be present whenever a job is in the DONE state.
|
| - */
|
| +
|
| + /// [Output-only] End time of this job, in milliseconds since the epoch. This
|
| + /// field will be present whenever a job is in the DONE state.
|
| core.String endTime;
|
| - /** [Output-only] Statistics for an extract job. */
|
| +
|
| + /// [Output-only] Statistics for an extract job.
|
| JobStatistics4 extract;
|
| - /** [Output-only] Statistics for a load job. */
|
| +
|
| + /// [Output-only] Statistics for a load job.
|
| JobStatistics3 load;
|
| - /** [Output-only] Statistics for a query job. */
|
| +
|
| + /// [Output-only] Statistics for a query job.
|
| JobStatistics2 query;
|
| - /**
|
| - * [Output-only] Start time of this job, in milliseconds since the epoch. This
|
| - * field will be present when the job transitions from the PENDING state to
|
| - * either RUNNING or DONE.
|
| - */
|
| +
|
| + /// [Output-only] Start time of this job, in milliseconds since the epoch.
|
| + /// This field will be present when the job transitions from the PENDING
|
| + /// state to either RUNNING or DONE.
|
| core.String startTime;
|
| - /**
|
| - * [Output-only] [Deprecated] Use the bytes processed in the query statistics
|
| - * instead.
|
| - */
|
| +
|
| + /// [Output-only] [Deprecated] Use the bytes processed in the query
|
| + /// statistics instead.
|
| core.String totalBytesProcessed;
|
|
|
| JobStatistics();
|
| @@ -3679,7 +3866,8 @@ class JobStatistics {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (creationTime != null) {
|
| _json["creationTime"] = creationTime;
|
| }
|
| @@ -3706,39 +3894,38 @@ class JobStatistics {
|
| }
|
|
|
| class JobStatistics2 {
|
| - /** [Output-only] Billing tier for the job. */
|
| + /// [Output-only] Billing tier for the job.
|
| core.int billingTier;
|
| - /**
|
| - * [Output-only] Whether the query result was fetched from the query cache.
|
| - */
|
| +
|
| + /// [Output-only] Whether the query result was fetched from the query cache.
|
| core.bool cacheHit;
|
| - /**
|
| - * [Output-only] The number of rows affected by a DML statement. Present only
|
| - * for DML statements INSERT, UPDATE or DELETE.
|
| - */
|
| +
|
| + /// [Output-only] The number of rows affected by a DML statement. Present
|
| + /// only for DML statements INSERT, UPDATE or DELETE.
|
| core.String numDmlAffectedRows;
|
| - /** [Output-only] Describes execution plan for the query. */
|
| +
|
| + /// [Output-only] Describes execution plan for the query.
|
| core.List<ExplainQueryStage> queryPlan;
|
| - /**
|
| - * [Output-only, Experimental] Referenced tables for the job. Queries that
|
| - * reference more than 50 tables will not have a complete list.
|
| - */
|
| +
|
| + /// [Output-only, Experimental] Referenced tables for the job. Queries that
|
| + /// reference more than 50 tables will not have a complete list.
|
| core.List<TableReference> referencedTables;
|
| - /**
|
| - * [Output-only, Experimental] The schema of the results. Present only for
|
| - * successful dry run of non-legacy SQL queries.
|
| - */
|
| +
|
| + /// [Output-only, Experimental] The schema of the results. Present only for
|
| + /// successful dry run of non-legacy SQL queries.
|
| TableSchema schema;
|
| - /** [Output-only, Experimental] The type of query statement, if valid. */
|
| +
|
| + /// [Output-only, Experimental] The type of query statement, if valid.
|
| core.String statementType;
|
| - /** [Output-only] Total bytes billed for the job. */
|
| +
|
| + /// [Output-only] Total bytes billed for the job.
|
| core.String totalBytesBilled;
|
| - /** [Output-only] Total bytes processed for the job. */
|
| +
|
| + /// [Output-only] Total bytes processed for the job.
|
| core.String totalBytesProcessed;
|
| - /**
|
| - * [Output-only, Experimental] Standard SQL only: list of undeclared query
|
| - * parameters detected during a dry run validation.
|
| - */
|
| +
|
| + /// [Output-only, Experimental] Standard SQL only: list of undeclared query
|
| + /// parameters detected during a dry run validation.
|
| core.List<QueryParameter> undeclaredQueryParameters;
|
|
|
| JobStatistics2();
|
| @@ -3754,10 +3941,14 @@ class JobStatistics2 {
|
| numDmlAffectedRows = _json["numDmlAffectedRows"];
|
| }
|
| if (_json.containsKey("queryPlan")) {
|
| - queryPlan = _json["queryPlan"].map((value) => new ExplainQueryStage.fromJson(value)).toList();
|
| + queryPlan = _json["queryPlan"]
|
| + .map((value) => new ExplainQueryStage.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("referencedTables")) {
|
| - referencedTables = _json["referencedTables"].map((value) => new TableReference.fromJson(value)).toList();
|
| + referencedTables = _json["referencedTables"]
|
| + .map((value) => new TableReference.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("schema")) {
|
| schema = new TableSchema.fromJson(_json["schema"]);
|
| @@ -3772,12 +3963,15 @@ class JobStatistics2 {
|
| totalBytesProcessed = _json["totalBytesProcessed"];
|
| }
|
| if (_json.containsKey("undeclaredQueryParameters")) {
|
| - undeclaredQueryParameters = _json["undeclaredQueryParameters"].map((value) => new QueryParameter.fromJson(value)).toList();
|
| + undeclaredQueryParameters = _json["undeclaredQueryParameters"]
|
| + .map((value) => new QueryParameter.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (billingTier != null) {
|
| _json["billingTier"] = billingTier;
|
| }
|
| @@ -3791,7 +3985,8 @@ class JobStatistics2 {
|
| _json["queryPlan"] = queryPlan.map((value) => (value).toJson()).toList();
|
| }
|
| if (referencedTables != null) {
|
| - _json["referencedTables"] = referencedTables.map((value) => (value).toJson()).toList();
|
| + _json["referencedTables"] =
|
| + referencedTables.map((value) => (value).toJson()).toList();
|
| }
|
| if (schema != null) {
|
| _json["schema"] = (schema).toJson();
|
| @@ -3806,33 +4001,32 @@ class JobStatistics2 {
|
| _json["totalBytesProcessed"] = totalBytesProcessed;
|
| }
|
| if (undeclaredQueryParameters != null) {
|
| - _json["undeclaredQueryParameters"] = undeclaredQueryParameters.map((value) => (value).toJson()).toList();
|
| + _json["undeclaredQueryParameters"] =
|
| + undeclaredQueryParameters.map((value) => (value).toJson()).toList();
|
| }
|
| return _json;
|
| }
|
| }
|
|
|
| class JobStatistics3 {
|
| - /**
|
| - * [Output-only] The number of bad records encountered. Note that if the job
|
| - * has failed because of more bad records encountered than the maximum allowed
|
| - * in the load job configuration, then this number can be less than the total
|
| - * number of bad records present in the input data.
|
| - */
|
| + /// [Output-only] The number of bad records encountered. Note that if the job
|
| + /// has failed because of more bad records encountered than the maximum
|
| + /// allowed in the load job configuration, then this number can be less than
|
| + /// the total number of bad records present in the input data.
|
| core.String badRecords;
|
| - /** [Output-only] Number of bytes of source data in a load job. */
|
| +
|
| + /// [Output-only] Number of bytes of source data in a load job.
|
| core.String inputFileBytes;
|
| - /** [Output-only] Number of source files in a load job. */
|
| +
|
| + /// [Output-only] Number of source files in a load job.
|
| core.String inputFiles;
|
| - /**
|
| - * [Output-only] Size of the loaded data in bytes. Note that while a load job
|
| - * is in the running state, this value may change.
|
| - */
|
| +
|
| + /// [Output-only] Size of the loaded data in bytes. Note that while a load
|
| + /// job is in the running state, this value may change.
|
| core.String outputBytes;
|
| - /**
|
| - * [Output-only] Number of rows imported in a load job. Note that while an
|
| - * import job is in the running state, this value may change.
|
| - */
|
| +
|
| + /// [Output-only] Number of rows imported in a load job. Note that while an
|
| + /// import job is in the running state, this value may change.
|
| core.String outputRows;
|
|
|
| JobStatistics3();
|
| @@ -3856,7 +4050,8 @@ class JobStatistics3 {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (badRecords != null) {
|
| _json["badRecords"] = badRecords;
|
| }
|
| @@ -3877,11 +4072,9 @@ class JobStatistics3 {
|
| }
|
|
|
| class JobStatistics4 {
|
| - /**
|
| - * [Output-only] Number of files per destination URI or URI pattern specified
|
| - * in the extract configuration. These values will be in the same order as the
|
| - * URIs specified in the 'destinationUris' field.
|
| - */
|
| + /// [Output-only] Number of files per destination URI or URI pattern
|
| + /// specified in the extract configuration. These values will be in the same
|
| + /// order as the URIs specified in the 'destinationUris' field.
|
| core.List<core.String> destinationUriFileCounts;
|
|
|
| JobStatistics4();
|
| @@ -3893,7 +4086,8 @@ class JobStatistics4 {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (destinationUriFileCounts != null) {
|
| _json["destinationUriFileCounts"] = destinationUriFileCounts;
|
| }
|
| @@ -3902,19 +4096,17 @@ class JobStatistics4 {
|
| }
|
|
|
| class JobStatus {
|
| - /**
|
| - * [Output-only] Final error result of the job. If present, indicates that the
|
| - * job has completed and was unsuccessful.
|
| - */
|
| + /// [Output-only] Final error result of the job. If present, indicates that
|
| + /// the job has completed and was unsuccessful.
|
| ErrorProto errorResult;
|
| - /**
|
| - * [Output-only] The first errors encountered during the running of the job.
|
| - * The final message includes the number of errors that caused the process to
|
| - * stop. Errors here do not necessarily mean that the job has completed or was
|
| - * unsuccessful.
|
| - */
|
| +
|
| + /// [Output-only] The first errors encountered during the running of the job.
|
| + /// The final message includes the number of errors that caused the process
|
| + /// to stop. Errors here do not necessarily mean that the job has completed
|
| + /// or was unsuccessful.
|
| core.List<ErrorProto> errors;
|
| - /** [Output-only] Running state of the job. */
|
| +
|
| + /// [Output-only] Running state of the job.
|
| core.String state;
|
|
|
| JobStatus();
|
| @@ -3924,7 +4116,9 @@ class JobStatus {
|
| errorResult = new ErrorProto.fromJson(_json["errorResult"]);
|
| }
|
| if (_json.containsKey("errors")) {
|
| - errors = _json["errors"].map((value) => new ErrorProto.fromJson(value)).toList();
|
| + errors = _json["errors"]
|
| + .map((value) => new ErrorProto.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("state")) {
|
| state = _json["state"];
|
| @@ -3932,7 +4126,8 @@ class JobStatus {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (errorResult != null) {
|
| _json["errorResult"] = (errorResult).toJson();
|
| }
|
| @@ -3946,9 +4141,8 @@ class JobStatus {
|
| }
|
| }
|
|
|
| -/** Represents a single JSON object. */
|
| -class JsonObject
|
| - extends collection.MapBase<core.String, core.Object> {
|
| +/// Represents a single JSON object.
|
| +class JsonObject extends collection.MapBase<core.String, core.Object> {
|
| final core.Map _innerMap = {};
|
|
|
| JsonObject();
|
| @@ -3960,15 +4154,15 @@ class JsonObject
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = <core.String, core.Object>{};
|
| + final core.Map<core.String, core.Object> _json =
|
| + <core.String, core.Object>{};
|
| this.forEach((core.String key, value) {
|
| _json[key] = value;
|
| });
|
| return _json;
|
| }
|
|
|
| - core.Object operator [](core.Object key)
|
| - => _innerMap[key];
|
| + core.Object operator [](core.Object key) => _innerMap[key];
|
|
|
| operator []=(core.String key, core.Object value) {
|
| _innerMap[key] = value;
|
| @@ -3984,15 +4178,19 @@ class JsonObject
|
| }
|
|
|
| class ProjectListProjects {
|
| - /** A descriptive name for this project. */
|
| + /// A descriptive name for this project.
|
| core.String friendlyName;
|
| - /** An opaque ID of this project. */
|
| +
|
| + /// An opaque ID of this project.
|
| core.String id;
|
| - /** The resource type. */
|
| +
|
| + /// The resource type.
|
| core.String kind;
|
| - /** The numeric ID of this project. */
|
| +
|
| + /// The numeric ID of this project.
|
| core.String numericId;
|
| - /** A unique reference to this project. */
|
| +
|
| + /// A unique reference to this project.
|
| ProjectReference projectReference;
|
|
|
| ProjectListProjects();
|
| @@ -4011,12 +4209,14 @@ class ProjectListProjects {
|
| numericId = _json["numericId"];
|
| }
|
| if (_json.containsKey("projectReference")) {
|
| - projectReference = new ProjectReference.fromJson(_json["projectReference"]);
|
| + projectReference =
|
| + new ProjectReference.fromJson(_json["projectReference"]);
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (friendlyName != null) {
|
| _json["friendlyName"] = friendlyName;
|
| }
|
| @@ -4037,15 +4237,19 @@ class ProjectListProjects {
|
| }
|
|
|
| class ProjectList {
|
| - /** A hash of the page of results */
|
| + /// A hash of the page of results
|
| core.String etag;
|
| - /** The type of list. */
|
| +
|
| + /// The type of list.
|
| core.String kind;
|
| - /** A token to request the next page of results. */
|
| +
|
| + /// A token to request the next page of results.
|
| core.String nextPageToken;
|
| - /** Projects to which you have at least READ access. */
|
| +
|
| + /// Projects to which you have at least READ access.
|
| core.List<ProjectListProjects> projects;
|
| - /** The total number of projects in the list. */
|
| +
|
| + /// The total number of projects in the list.
|
| core.int totalItems;
|
|
|
| ProjectList();
|
| @@ -4061,7 +4265,9 @@ class ProjectList {
|
| nextPageToken = _json["nextPageToken"];
|
| }
|
| if (_json.containsKey("projects")) {
|
| - projects = _json["projects"].map((value) => new ProjectListProjects.fromJson(value)).toList();
|
| + projects = _json["projects"]
|
| + .map((value) => new ProjectListProjects.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("totalItems")) {
|
| totalItems = _json["totalItems"];
|
| @@ -4069,7 +4275,8 @@ class ProjectList {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (etag != null) {
|
| _json["etag"] = etag;
|
| }
|
| @@ -4090,10 +4297,8 @@ class ProjectList {
|
| }
|
|
|
| class ProjectReference {
|
| - /**
|
| - * [Required] ID of the project. Can be either the numeric ID or the assigned
|
| - * ID of the project.
|
| - */
|
| + /// [Required] ID of the project. Can be either the numeric ID or the
|
| + /// assigned ID of the project.
|
| core.String projectId;
|
|
|
| ProjectReference();
|
| @@ -4105,7 +4310,8 @@ class ProjectReference {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (projectId != null) {
|
| _json["projectId"] = projectId;
|
| }
|
| @@ -4114,14 +4320,14 @@ class ProjectReference {
|
| }
|
|
|
| class QueryParameter {
|
| - /**
|
| - * [Optional] If unset, this is a positional parameter. Otherwise, should be
|
| - * unique within a query.
|
| - */
|
| + /// [Optional] If unset, this is a positional parameter. Otherwise, should be
|
| + /// unique within a query.
|
| core.String name;
|
| - /** [Required] The type of this parameter. */
|
| +
|
| + /// [Required] The type of this parameter.
|
| QueryParameterType parameterType;
|
| - /** [Required] The value of this parameter. */
|
| +
|
| + /// [Required] The value of this parameter.
|
| QueryParameterValue parameterValue;
|
|
|
| QueryParameter();
|
| @@ -4134,12 +4340,14 @@ class QueryParameter {
|
| parameterType = new QueryParameterType.fromJson(_json["parameterType"]);
|
| }
|
| if (_json.containsKey("parameterValue")) {
|
| - parameterValue = new QueryParameterValue.fromJson(_json["parameterValue"]);
|
| + parameterValue =
|
| + new QueryParameterValue.fromJson(_json["parameterValue"]);
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (name != null) {
|
| _json["name"] = name;
|
| }
|
| @@ -4154,11 +4362,13 @@ class QueryParameter {
|
| }
|
|
|
| class QueryParameterTypeStructTypes {
|
| - /** [Optional] Human-oriented description of the field. */
|
| + /// [Optional] Human-oriented description of the field.
|
| core.String description;
|
| - /** [Optional] The name of this field. */
|
| +
|
| + /// [Optional] The name of this field.
|
| core.String name;
|
| - /** [Required] The type of this field. */
|
| +
|
| + /// [Required] The type of this field.
|
| QueryParameterType type;
|
|
|
| QueryParameterTypeStructTypes();
|
| @@ -4176,7 +4386,8 @@ class QueryParameterTypeStructTypes {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (description != null) {
|
| _json["description"] = description;
|
| }
|
| @@ -4191,14 +4402,14 @@ class QueryParameterTypeStructTypes {
|
| }
|
|
|
| class QueryParameterType {
|
| - /** [Optional] The type of the array's elements, if this is an array. */
|
| + /// [Optional] The type of the array's elements, if this is an array.
|
| QueryParameterType arrayType;
|
| - /**
|
| - * [Optional] The types of the fields of this struct, in order, if this is a
|
| - * struct.
|
| - */
|
| +
|
| + /// [Optional] The types of the fields of this struct, in order, if this is a
|
| + /// struct.
|
| core.List<QueryParameterTypeStructTypes> structTypes;
|
| - /** [Required] The top level type of this field. */
|
| +
|
| + /// [Required] The top level type of this field.
|
| core.String type;
|
|
|
| QueryParameterType();
|
| @@ -4208,7 +4419,9 @@ class QueryParameterType {
|
| arrayType = new QueryParameterType.fromJson(_json["arrayType"]);
|
| }
|
| if (_json.containsKey("structTypes")) {
|
| - structTypes = _json["structTypes"].map((value) => new QueryParameterTypeStructTypes.fromJson(value)).toList();
|
| + structTypes = _json["structTypes"]
|
| + .map((value) => new QueryParameterTypeStructTypes.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("type")) {
|
| type = _json["type"];
|
| @@ -4216,12 +4429,14 @@ class QueryParameterType {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (arrayType != null) {
|
| _json["arrayType"] = (arrayType).toJson();
|
| }
|
| if (structTypes != null) {
|
| - _json["structTypes"] = structTypes.map((value) => (value).toJson()).toList();
|
| + _json["structTypes"] =
|
| + structTypes.map((value) => (value).toJson()).toList();
|
| }
|
| if (type != null) {
|
| _json["type"] = type;
|
| @@ -4231,24 +4446,30 @@ class QueryParameterType {
|
| }
|
|
|
| class QueryParameterValue {
|
| - /** [Optional] The array values, if this is an array type. */
|
| + /// [Optional] The array values, if this is an array type.
|
| core.List<QueryParameterValue> arrayValues;
|
| - /**
|
| - * [Optional] The struct field values, in order of the struct type's
|
| - * declaration.
|
| - */
|
| +
|
| + /// [Optional] The struct field values, in order of the struct type's
|
| + /// declaration.
|
| core.Map<core.String, QueryParameterValue> structValues;
|
| - /** [Optional] The value of this value, if a simple scalar type. */
|
| +
|
| + /// [Optional] The value of this value, if a simple scalar type.
|
| core.String value;
|
|
|
| QueryParameterValue();
|
|
|
| QueryParameterValue.fromJson(core.Map _json) {
|
| if (_json.containsKey("arrayValues")) {
|
| - arrayValues = _json["arrayValues"].map((value) => new QueryParameterValue.fromJson(value)).toList();
|
| + arrayValues = _json["arrayValues"]
|
| + .map((value) => new QueryParameterValue.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("structValues")) {
|
| - structValues = commons.mapMap<core.Map<core.String, core.Object>, QueryParameterValue>(_json["structValues"], (core.Map<core.String, core.Object> item) => new QueryParameterValue.fromJson(item));
|
| + structValues = commons
|
| + .mapMap<core.Map<core.String, core.Object>, QueryParameterValue>(
|
| + _json["structValues"],
|
| + (core.Map<core.String, core.Object> item) =>
|
| + new QueryParameterValue.fromJson(item));
|
| }
|
| if (_json.containsKey("value")) {
|
| value = _json["value"];
|
| @@ -4256,12 +4477,16 @@ class QueryParameterValue {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (arrayValues != null) {
|
| - _json["arrayValues"] = arrayValues.map((value) => (value).toJson()).toList();
|
| + _json["arrayValues"] =
|
| + arrayValues.map((value) => (value).toJson()).toList();
|
| }
|
| if (structValues != null) {
|
| - _json["structValues"] = commons.mapMap<QueryParameterValue, core.Map<core.String, core.Object>>(structValues, (QueryParameterValue item) => (item).toJson());
|
| + _json["structValues"] = commons
|
| + .mapMap<QueryParameterValue, core.Map<core.String, core.Object>>(
|
| + structValues, (QueryParameterValue item) => (item).toJson());
|
| }
|
| if (value != null) {
|
| _json["value"] = value;
|
| @@ -4271,67 +4496,62 @@ class QueryParameterValue {
|
| }
|
|
|
| class QueryRequest {
|
| - /**
|
| - * [Optional] Specifies the default datasetId and projectId to assume for any
|
| - * unqualified table names in the query. If not set, all table names in the
|
| - * query string must be qualified in the format 'datasetId.tableId'.
|
| - */
|
| + /// [Optional] Specifies the default datasetId and projectId to assume for
|
| + /// any unqualified table names in the query. If not set, all table names in
|
| + /// the query string must be qualified in the format 'datasetId.tableId'.
|
| DatasetReference defaultDataset;
|
| - /**
|
| - * [Optional] If set to true, BigQuery doesn't run the job. Instead, if the
|
| - * query is valid, BigQuery returns statistics about the job such as how many
|
| - * bytes would be processed. If the query is invalid, an error returns. The
|
| - * default value is false.
|
| - */
|
| +
|
| + /// [Optional] If set to true, BigQuery doesn't run the job. Instead, if the
|
| + /// query is valid, BigQuery returns statistics about the job such as how
|
| + /// many bytes would be processed. If the query is invalid, an error returns.
|
| + /// The default value is false.
|
| core.bool dryRun;
|
| - /** The resource type of the request. */
|
| +
|
| + /// The resource type of the request.
|
| core.String kind;
|
| - /**
|
| - * [Optional] The maximum number of rows of data to return per page of
|
| - * results. Setting this flag to a small value such as 1000 and then paging
|
| - * through results might improve reliability when the query result set is
|
| - * large. In addition to this limit, responses are also limited to 10 MB. By
|
| - * default, there is no maximum row count, and only the byte limit applies.
|
| - */
|
| +
|
| + /// [Optional] The maximum number of rows of data to return per page of
|
| + /// results. Setting this flag to a small value such as 1000 and then paging
|
| + /// through results might improve reliability when the query result set is
|
| + /// large. In addition to this limit, responses are also limited to 10 MB. By
|
| + /// default, there is no maximum row count, and only the byte limit applies.
|
| core.int maxResults;
|
| - /**
|
| - * Standard SQL only. Set to POSITIONAL to use positional (?) query parameters
|
| - * or to NAMED to use named (@myparam) query parameters in this query.
|
| - */
|
| +
|
| + /// Standard SQL only. Set to POSITIONAL to use positional (?) query
|
| + /// parameters or to NAMED to use named (@myparam) query parameters in this
|
| + /// query.
|
| core.String parameterMode;
|
| - /** [Deprecated] This property is deprecated. */
|
| +
|
| + /// [Deprecated] This property is deprecated.
|
| core.bool preserveNulls;
|
| - /**
|
| - * [Required] A query string, following the BigQuery query syntax, of the
|
| - * query to execute. Example: "SELECT count(f1) FROM
|
| - * [myProjectId:myDatasetId.myTableId]".
|
| - */
|
| +
|
| + /// [Required] A query string, following the BigQuery query syntax, of the
|
| + /// query to execute. Example: "SELECT count(f1) FROM
|
| + /// [myProjectId:myDatasetId.myTableId]".
|
| core.String query;
|
| - /** Query parameters for Standard SQL queries. */
|
| +
|
| + /// Query parameters for Standard SQL queries.
|
| core.List<QueryParameter> queryParameters;
|
| - /**
|
| - * [Optional] How long to wait for the query to complete, in milliseconds,
|
| - * before the request times out and returns. Note that this is only a timeout
|
| - * for the request, not the query. If the query takes longer to run than the
|
| - * timeout value, the call returns without any results and with the
|
| - * 'jobComplete' flag set to false. You can call GetQueryResults() to wait for
|
| - * the query to complete and read the results. The default value is 10000
|
| - * milliseconds (10 seconds).
|
| - */
|
| +
|
| + /// [Optional] How long to wait for the query to complete, in milliseconds,
|
| + /// before the request times out and returns. Note that this is only a
|
| + /// timeout for the request, not the query. If the query takes longer to run
|
| + /// than the timeout value, the call returns without any results and with the
|
| + /// 'jobComplete' flag set to false. You can call GetQueryResults() to wait
|
| + /// for the query to complete and read the results. The default value is
|
| + /// 10000 milliseconds (10 seconds).
|
| core.int timeoutMs;
|
| - /**
|
| - * Specifies whether to use BigQuery's legacy SQL dialect for this query. The
|
| - * default value is true. If set to false, the query will use BigQuery's
|
| - * standard SQL: https://cloud.google.com/bigquery/sql-reference/ When
|
| - * useLegacySql is set to false, the value of flattenResults is ignored; query
|
| - * will be run as if flattenResults is false.
|
| - */
|
| +
|
| + /// Specifies whether to use BigQuery's legacy SQL dialect for this query.
|
| + /// The default value is true. If set to false, the query will use BigQuery's
|
| + /// standard SQL: https://cloud.google.com/bigquery/sql-reference/ When
|
| + /// useLegacySql is set to false, the value of flattenResults is ignored;
|
| + /// query will be run as if flattenResults is false.
|
| core.bool useLegacySql;
|
| - /**
|
| - * [Optional] Whether to look for the result in the query cache. The query
|
| - * cache is a best-effort cache that will be flushed whenever tables in the
|
| - * query are modified. The default value is true.
|
| - */
|
| +
|
| + /// [Optional] Whether to look for the result in the query cache. The query
|
| + /// cache is a best-effort cache that will be flushed whenever tables in the
|
| + /// query are modified. The default value is true.
|
| core.bool useQueryCache;
|
|
|
| QueryRequest();
|
| @@ -4359,7 +4579,9 @@ class QueryRequest {
|
| query = _json["query"];
|
| }
|
| if (_json.containsKey("queryParameters")) {
|
| - queryParameters = _json["queryParameters"].map((value) => new QueryParameter.fromJson(value)).toList();
|
| + queryParameters = _json["queryParameters"]
|
| + .map((value) => new QueryParameter.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("timeoutMs")) {
|
| timeoutMs = _json["timeoutMs"];
|
| @@ -4373,7 +4595,8 @@ class QueryRequest {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (defaultDataset != null) {
|
| _json["defaultDataset"] = (defaultDataset).toJson();
|
| }
|
| @@ -4396,7 +4619,8 @@ class QueryRequest {
|
| _json["query"] = query;
|
| }
|
| if (queryParameters != null) {
|
| - _json["queryParameters"] = queryParameters.map((value) => (value).toJson()).toList();
|
| + _json["queryParameters"] =
|
| + queryParameters.map((value) => (value).toJson()).toList();
|
| }
|
| if (timeoutMs != null) {
|
| _json["timeoutMs"] = timeoutMs;
|
| @@ -4412,59 +4636,53 @@ class QueryRequest {
|
| }
|
|
|
| class QueryResponse {
|
| - /** Whether the query result was fetched from the query cache. */
|
| + /// Whether the query result was fetched from the query cache.
|
| core.bool cacheHit;
|
| - /**
|
| - * [Output-only] The first errors or warnings encountered during the running
|
| - * of the job. The final message includes the number of errors that caused the
|
| - * process to stop. Errors here do not necessarily mean that the job has
|
| - * completed or was unsuccessful.
|
| - */
|
| +
|
| + /// [Output-only] The first errors or warnings encountered during the running
|
| + /// of the job. The final message includes the number of errors that caused
|
| + /// the process to stop. Errors here do not necessarily mean that the job has
|
| + /// completed or was unsuccessful.
|
| core.List<ErrorProto> errors;
|
| - /**
|
| - * Whether the query has completed or not. If rows or totalRows are present,
|
| - * this will always be true. If this is false, totalRows will not be
|
| - * available.
|
| - */
|
| +
|
| + /// Whether the query has completed or not. If rows or totalRows are present,
|
| + /// this will always be true. If this is false, totalRows will not be
|
| + /// available.
|
| core.bool jobComplete;
|
| - /**
|
| - * Reference to the Job that was created to run the query. This field will be
|
| - * present even if the original request timed out, in which case
|
| - * GetQueryResults can be used to read the results once the query has
|
| - * completed. Since this API only returns the first page of results,
|
| - * subsequent pages can be fetched via the same mechanism (GetQueryResults).
|
| - */
|
| +
|
| + /// Reference to the Job that was created to run the query. This field will
|
| + /// be present even if the original request timed out, in which case
|
| + /// GetQueryResults can be used to read the results once the query has
|
| + /// completed. Since this API only returns the first page of results,
|
| + /// subsequent pages can be fetched via the same mechanism (GetQueryResults).
|
| JobReference jobReference;
|
| - /** The resource type. */
|
| +
|
| + /// The resource type.
|
| core.String kind;
|
| - /**
|
| - * [Output-only] The number of rows affected by a DML statement. Present only
|
| - * for DML statements INSERT, UPDATE or DELETE.
|
| - */
|
| +
|
| + /// [Output-only] The number of rows affected by a DML statement. Present
|
| + /// only for DML statements INSERT, UPDATE or DELETE.
|
| core.String numDmlAffectedRows;
|
| - /** A token used for paging results. */
|
| +
|
| + /// A token used for paging results.
|
| core.String pageToken;
|
| - /**
|
| - * An object with as many results as can be contained within the maximum
|
| - * permitted reply size. To get any additional rows, you can call
|
| - * GetQueryResults and specify the jobReference returned above.
|
| - */
|
| +
|
| + /// An object with as many results as can be contained within the maximum
|
| + /// permitted reply size. To get any additional rows, you can call
|
| + /// GetQueryResults and specify the jobReference returned above.
|
| core.List<TableRow> rows;
|
| - /**
|
| - * The schema of the results. Present only when the query completes
|
| - * successfully.
|
| - */
|
| +
|
| + /// The schema of the results. Present only when the query completes
|
| + /// successfully.
|
| TableSchema schema;
|
| - /**
|
| - * The total number of bytes processed for this query. If this query was a dry
|
| - * run, this is the number of bytes that would be processed if the query were
|
| - * run.
|
| - */
|
| +
|
| + /// The total number of bytes processed for this query. If this query was a
|
| + /// dry run, this is the number of bytes that would be processed if the query
|
| + /// were run.
|
| core.String totalBytesProcessed;
|
| - /**
|
| - * The total number of rows in the complete query result set, which can be
|
| - * more than the number of rows in this single page of results.
|
| - */
|
| +
|
| + /// The total number of rows in the complete query result set, which can be
|
| + /// more than the number of rows in this single page of results.
|
| core.String totalRows;
|
|
|
| QueryResponse();
|
| @@ -4474,7 +4692,9 @@ class QueryResponse {
|
| cacheHit = _json["cacheHit"];
|
| }
|
| if (_json.containsKey("errors")) {
|
| - errors = _json["errors"].map((value) => new ErrorProto.fromJson(value)).toList();
|
| + errors = _json["errors"]
|
| + .map((value) => new ErrorProto.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("jobComplete")) {
|
| jobComplete = _json["jobComplete"];
|
| @@ -4492,7 +4712,8 @@ class QueryResponse {
|
| pageToken = _json["pageToken"];
|
| }
|
| if (_json.containsKey("rows")) {
|
| - rows = _json["rows"].map((value) => new TableRow.fromJson(value)).toList();
|
| + rows =
|
| + _json["rows"].map((value) => new TableRow.fromJson(value)).toList();
|
| }
|
| if (_json.containsKey("schema")) {
|
| schema = new TableSchema.fromJson(_json["schema"]);
|
| @@ -4506,7 +4727,8 @@ class QueryResponse {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (cacheHit != null) {
|
| _json["cacheHit"] = cacheHit;
|
| }
|
| @@ -4545,21 +4767,17 @@ class QueryResponse {
|
| }
|
|
|
| class Streamingbuffer {
|
| - /**
|
| - * [Output-only] A lower-bound estimate of the number of bytes currently in
|
| - * the streaming buffer.
|
| - */
|
| + /// [Output-only] A lower-bound estimate of the number of bytes currently in
|
| + /// the streaming buffer.
|
| core.String estimatedBytes;
|
| - /**
|
| - * [Output-only] A lower-bound estimate of the number of rows currently in the
|
| - * streaming buffer.
|
| - */
|
| +
|
| + /// [Output-only] A lower-bound estimate of the number of rows currently in
|
| + /// the streaming buffer.
|
| core.String estimatedRows;
|
| - /**
|
| - * [Output-only] Contains the timestamp of the oldest entry in the streaming
|
| - * buffer, in milliseconds since the epoch, if the streaming buffer is
|
| - * available.
|
| - */
|
| +
|
| + /// [Output-only] Contains the timestamp of the oldest entry in the streaming
|
| + /// buffer, in milliseconds since the epoch, if the streaming buffer is
|
| + /// available.
|
| core.String oldestEntryTime;
|
|
|
| Streamingbuffer();
|
| @@ -4577,7 +4795,8 @@ class Streamingbuffer {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (estimatedBytes != null) {
|
| _json["estimatedBytes"] = estimatedBytes;
|
| }
|
| @@ -4592,92 +4811,92 @@ class Streamingbuffer {
|
| }
|
|
|
| class Table {
|
| - /**
|
| - * [Output-only] The time when this table was created, in milliseconds since
|
| - * the epoch.
|
| - */
|
| + /// [Output-only] The time when this table was created, in milliseconds since
|
| + /// the epoch.
|
| core.String creationTime;
|
| - /** [Optional] A user-friendly description of this table. */
|
| +
|
| + /// [Optional] A user-friendly description of this table.
|
| core.String description;
|
| - /** [Output-only] A hash of this resource. */
|
| +
|
| + /// [Experimental] Custom encryption configuration (e.g., Cloud KMS keys).
|
| + EncryptionConfiguration encryptionConfiguration;
|
| +
|
| + /// [Output-only] A hash of this resource.
|
| core.String etag;
|
| - /**
|
| - * [Optional] The time when this table expires, in milliseconds since the
|
| - * epoch. If not present, the table will persist indefinitely. Expired tables
|
| - * will be deleted and their storage reclaimed.
|
| - */
|
| +
|
| + /// [Optional] The time when this table expires, in milliseconds since the
|
| + /// epoch. If not present, the table will persist indefinitely. Expired
|
| + /// tables will be deleted and their storage reclaimed.
|
| core.String expirationTime;
|
| - /**
|
| - * [Optional] Describes the data format, location, and other properties of a
|
| - * table stored outside of BigQuery. By defining these properties, the data
|
| - * source can then be queried as if it were a standard BigQuery table.
|
| - */
|
| +
|
| + /// [Optional] Describes the data format, location, and other properties of a
|
| + /// table stored outside of BigQuery. By defining these properties, the data
|
| + /// source can then be queried as if it were a standard BigQuery table.
|
| ExternalDataConfiguration externalDataConfiguration;
|
| - /** [Optional] A descriptive name for this table. */
|
| +
|
| + /// [Optional] A descriptive name for this table.
|
| core.String friendlyName;
|
| - /** [Output-only] An opaque ID uniquely identifying the table. */
|
| +
|
| + /// [Output-only] An opaque ID uniquely identifying the table.
|
| core.String id;
|
| - /** [Output-only] The type of the resource. */
|
| +
|
| + /// [Output-only] The type of the resource.
|
| core.String kind;
|
| - /**
|
| - * [Experimental] The labels associated with this table. You can use these to
|
| - * organize and group your tables. Label keys and values can be no longer than
|
| - * 63 characters, can only contain lowercase letters, numeric characters,
|
| - * underscores and dashes. International characters are allowed. Label values
|
| - * are optional. Label keys must start with a letter and each label in the
|
| - * list must have a different key.
|
| - */
|
| +
|
| + /// [Experimental] The labels associated with this table. You can use these
|
| + /// to organize and group your tables. Label keys and values can be no longer
|
| + /// than 63 characters, can only contain lowercase letters, numeric
|
| + /// characters, underscores and dashes. International characters are allowed.
|
| + /// Label values are optional. Label keys must start with a letter and each
|
| + /// label in the list must have a different key.
|
| core.Map<core.String, core.String> labels;
|
| - /**
|
| - * [Output-only] The time when this table was last modified, in milliseconds
|
| - * since the epoch.
|
| - */
|
| +
|
| + /// [Output-only] The time when this table was last modified, in milliseconds
|
| + /// since the epoch.
|
| core.String lastModifiedTime;
|
| - /**
|
| - * [Output-only] The geographic location where the table resides. This value
|
| - * is inherited from the dataset.
|
| - */
|
| +
|
| + /// [Output-only] The geographic location where the table resides. This value
|
| + /// is inherited from the dataset.
|
| core.String location;
|
| - /**
|
| - * [Output-only] The size of this table in bytes, excluding any data in the
|
| - * streaming buffer.
|
| - */
|
| +
|
| + /// [Output-only] The size of this table in bytes, excluding any data in the
|
| + /// streaming buffer.
|
| core.String numBytes;
|
| - /**
|
| - * [Output-only] The number of bytes in the table that are considered
|
| - * "long-term storage".
|
| - */
|
| +
|
| + /// [Output-only] The number of bytes in the table that are considered
|
| + /// "long-term storage".
|
| core.String numLongTermBytes;
|
| - /**
|
| - * [Output-only] The number of rows of data in this table, excluding any data
|
| - * in the streaming buffer.
|
| - */
|
| +
|
| + /// [Output-only] The number of rows of data in this table, excluding any
|
| + /// data in the streaming buffer.
|
| core.String numRows;
|
| - /** [Optional] Describes the schema of this table. */
|
| +
|
| + /// [Optional] Describes the schema of this table.
|
| TableSchema schema;
|
| - /** [Output-only] A URL that can be used to access this resource again. */
|
| +
|
| + /// [Output-only] A URL that can be used to access this resource again.
|
| core.String selfLink;
|
| - /**
|
| - * [Output-only] Contains information regarding this table's streaming buffer,
|
| - * if one is present. This field will be absent if the table is not being
|
| - * streamed to or if there is no data in the streaming buffer.
|
| - */
|
| +
|
| + /// [Output-only] Contains information regarding this table's streaming
|
| + /// buffer, if one is present. This field will be absent if the table is not
|
| + /// being streamed to or if there is no data in the streaming buffer.
|
| Streamingbuffer streamingBuffer;
|
| - /** [Required] Reference describing the ID of this table. */
|
| +
|
| + /// [Required] Reference describing the ID of this table.
|
| TableReference tableReference;
|
| - /**
|
| - * [Experimental] If specified, configures time-based partitioning for this
|
| - * table.
|
| - */
|
| +
|
| + /// [Experimental] If specified, configures time-based partitioning for this
|
| + /// table.
|
| TimePartitioning timePartitioning;
|
| - /**
|
| - * [Output-only] Describes the table type. The following values are supported:
|
| - * TABLE: A normal BigQuery table. VIEW: A virtual table defined by a SQL
|
| - * query. EXTERNAL: A table that references data stored in an external storage
|
| - * system, such as Google Cloud Storage. The default value is TABLE.
|
| - */
|
| +
|
| + /// [Output-only] Describes the table type. The following values are
|
| + /// supported: TABLE: A normal BigQuery table. VIEW: A virtual table defined
|
| + /// by a SQL query. EXTERNAL: A table that references data stored in an
|
| + /// external storage system, such as Google Cloud Storage. The default value
|
| + /// is TABLE.
|
| core.String type;
|
| - /** [Optional] The view definition. */
|
| +
|
| + /// [Optional] The view definition.
|
| ViewDefinition view;
|
|
|
| Table();
|
| @@ -4689,6 +4908,10 @@ class Table {
|
| if (_json.containsKey("description")) {
|
| description = _json["description"];
|
| }
|
| + if (_json.containsKey("encryptionConfiguration")) {
|
| + encryptionConfiguration = new EncryptionConfiguration.fromJson(
|
| + _json["encryptionConfiguration"]);
|
| + }
|
| if (_json.containsKey("etag")) {
|
| etag = _json["etag"];
|
| }
|
| @@ -4696,7 +4919,8 @@ class Table {
|
| expirationTime = _json["expirationTime"];
|
| }
|
| if (_json.containsKey("externalDataConfiguration")) {
|
| - externalDataConfiguration = new ExternalDataConfiguration.fromJson(_json["externalDataConfiguration"]);
|
| + externalDataConfiguration = new ExternalDataConfiguration.fromJson(
|
| + _json["externalDataConfiguration"]);
|
| }
|
| if (_json.containsKey("friendlyName")) {
|
| friendlyName = _json["friendlyName"];
|
| @@ -4738,7 +4962,8 @@ class Table {
|
| tableReference = new TableReference.fromJson(_json["tableReference"]);
|
| }
|
| if (_json.containsKey("timePartitioning")) {
|
| - timePartitioning = new TimePartitioning.fromJson(_json["timePartitioning"]);
|
| + timePartitioning =
|
| + new TimePartitioning.fromJson(_json["timePartitioning"]);
|
| }
|
| if (_json.containsKey("type")) {
|
| type = _json["type"];
|
| @@ -4749,13 +4974,17 @@ class Table {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (creationTime != null) {
|
| _json["creationTime"] = creationTime;
|
| }
|
| if (description != null) {
|
| _json["description"] = description;
|
| }
|
| + if (encryptionConfiguration != null) {
|
| + _json["encryptionConfiguration"] = (encryptionConfiguration).toJson();
|
| + }
|
| if (etag != null) {
|
| _json["etag"] = etag;
|
| }
|
| @@ -4818,12 +5047,10 @@ class Table {
|
| }
|
|
|
| class TableCell {
|
| - /**
|
| - *
|
| - *
|
| - * The values for Object must be JSON objects. It can consist of `num`,
|
| - * `String`, `bool` and `null` as well as `Map` and `List` values.
|
| - */
|
| + ///
|
| + ///
|
| + /// The values for Object must be JSON objects. It can consist of `num`,
|
| + /// `String`, `bool` and `null` as well as `Map` and `List` values.
|
| core.Object v;
|
|
|
| TableCell();
|
| @@ -4835,7 +5062,8 @@ class TableCell {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (v != null) {
|
| _json["v"] = v;
|
| }
|
| @@ -4844,15 +5072,12 @@ class TableCell {
|
| }
|
|
|
| class TableDataInsertAllRequestRows {
|
| - /**
|
| - * [Optional] A unique ID for each row. BigQuery uses this property to detect
|
| - * duplicate insertion requests on a best-effort basis.
|
| - */
|
| + /// [Optional] A unique ID for each row. BigQuery uses this property to
|
| + /// detect duplicate insertion requests on a best-effort basis.
|
| core.String insertId;
|
| - /**
|
| - * [Required] A JSON object that contains a row of data. The object's
|
| - * properties and values must match the destination table's schema.
|
| - */
|
| +
|
| + /// [Required] A JSON object that contains a row of data. The object's
|
| + /// properties and values must match the destination table's schema.
|
| JsonObject json;
|
|
|
| TableDataInsertAllRequestRows();
|
| @@ -4867,7 +5092,8 @@ class TableDataInsertAllRequestRows {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (insertId != null) {
|
| _json["insertId"] = insertId;
|
| }
|
| @@ -4879,30 +5105,28 @@ class TableDataInsertAllRequestRows {
|
| }
|
|
|
| class TableDataInsertAllRequest {
|
| - /**
|
| - * [Optional] Accept rows that contain values that do not match the schema.
|
| - * The unknown values are ignored. Default is false, which treats unknown
|
| - * values as errors.
|
| - */
|
| + /// [Optional] Accept rows that contain values that do not match the schema.
|
| + /// The unknown values are ignored. Default is false, which treats unknown
|
| + /// values as errors.
|
| core.bool ignoreUnknownValues;
|
| - /** The resource type of the response. */
|
| +
|
| + /// The resource type of the response.
|
| core.String kind;
|
| - /** The rows to insert. */
|
| +
|
| + /// The rows to insert.
|
| core.List<TableDataInsertAllRequestRows> rows;
|
| - /**
|
| - * [Optional] Insert all valid rows of a request, even if invalid rows exist.
|
| - * The default value is false, which causes the entire request to fail if any
|
| - * invalid rows exist.
|
| - */
|
| +
|
| + /// [Optional] Insert all valid rows of a request, even if invalid rows
|
| + /// exist. The default value is false, which causes the entire request to
|
| + /// fail if any invalid rows exist.
|
| core.bool skipInvalidRows;
|
| - /**
|
| - * [Experimental] If specified, treats the destination table as a base
|
| - * template, and inserts the rows into an instance table named
|
| - * "{destination}{templateSuffix}". BigQuery will manage creation of the
|
| - * instance table, using the schema of the base template table. See
|
| - * https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
|
| - * for considerations when working with templates tables.
|
| - */
|
| +
|
| + /// [Experimental] If specified, treats the destination table as a base
|
| + /// template, and inserts the rows into an instance table named
|
| + /// "{destination}{templateSuffix}". BigQuery will manage creation of the
|
| + /// instance table, using the schema of the base template table. See
|
| + /// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
|
| + /// for considerations when working with templates tables.
|
| core.String templateSuffix;
|
|
|
| TableDataInsertAllRequest();
|
| @@ -4915,7 +5139,9 @@ class TableDataInsertAllRequest {
|
| kind = _json["kind"];
|
| }
|
| if (_json.containsKey("rows")) {
|
| - rows = _json["rows"].map((value) => new TableDataInsertAllRequestRows.fromJson(value)).toList();
|
| + rows = _json["rows"]
|
| + .map((value) => new TableDataInsertAllRequestRows.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("skipInvalidRows")) {
|
| skipInvalidRows = _json["skipInvalidRows"];
|
| @@ -4926,7 +5152,8 @@ class TableDataInsertAllRequest {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (ignoreUnknownValues != null) {
|
| _json["ignoreUnknownValues"] = ignoreUnknownValues;
|
| }
|
| @@ -4947,16 +5174,19 @@ class TableDataInsertAllRequest {
|
| }
|
|
|
| class TableDataInsertAllResponseInsertErrors {
|
| - /** Error information for the row indicated by the index property. */
|
| + /// Error information for the row indicated by the index property.
|
| core.List<ErrorProto> errors;
|
| - /** The index of the row that error applies to. */
|
| +
|
| + /// The index of the row that error applies to.
|
| core.int index;
|
|
|
| TableDataInsertAllResponseInsertErrors();
|
|
|
| TableDataInsertAllResponseInsertErrors.fromJson(core.Map _json) {
|
| if (_json.containsKey("errors")) {
|
| - errors = _json["errors"].map((value) => new ErrorProto.fromJson(value)).toList();
|
| + errors = _json["errors"]
|
| + .map((value) => new ErrorProto.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("index")) {
|
| index = _json["index"];
|
| @@ -4964,7 +5194,8 @@ class TableDataInsertAllResponseInsertErrors {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (errors != null) {
|
| _json["errors"] = errors.map((value) => (value).toJson()).toList();
|
| }
|
| @@ -4976,16 +5207,20 @@ class TableDataInsertAllResponseInsertErrors {
|
| }
|
|
|
| class TableDataInsertAllResponse {
|
| - /** An array of errors for rows that were not inserted. */
|
| + /// An array of errors for rows that were not inserted.
|
| core.List<TableDataInsertAllResponseInsertErrors> insertErrors;
|
| - /** The resource type of the response. */
|
| +
|
| + /// The resource type of the response.
|
| core.String kind;
|
|
|
| TableDataInsertAllResponse();
|
|
|
| TableDataInsertAllResponse.fromJson(core.Map _json) {
|
| if (_json.containsKey("insertErrors")) {
|
| - insertErrors = _json["insertErrors"].map((value) => new TableDataInsertAllResponseInsertErrors.fromJson(value)).toList();
|
| + insertErrors = _json["insertErrors"]
|
| + .map((value) =>
|
| + new TableDataInsertAllResponseInsertErrors.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("kind")) {
|
| kind = _json["kind"];
|
| @@ -4993,9 +5228,11 @@ class TableDataInsertAllResponse {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (insertErrors != null) {
|
| - _json["insertErrors"] = insertErrors.map((value) => (value).toJson()).toList();
|
| + _json["insertErrors"] =
|
| + insertErrors.map((value) => (value).toJson()).toList();
|
| }
|
| if (kind != null) {
|
| _json["kind"] = kind;
|
| @@ -5005,19 +5242,21 @@ class TableDataInsertAllResponse {
|
| }
|
|
|
| class TableDataList {
|
| - /** A hash of this page of results. */
|
| + /// A hash of this page of results.
|
| core.String etag;
|
| - /** The resource type of the response. */
|
| +
|
| + /// The resource type of the response.
|
| core.String kind;
|
| - /**
|
| - * A token used for paging results. Providing this token instead of the
|
| - * startIndex parameter can help you retrieve stable results when an
|
| - * underlying table is changing.
|
| - */
|
| +
|
| + /// A token used for paging results. Providing this token instead of the
|
| + /// startIndex parameter can help you retrieve stable results when an
|
| + /// underlying table is changing.
|
| core.String pageToken;
|
| - /** Rows of results. */
|
| +
|
| + /// Rows of results.
|
| core.List<TableRow> rows;
|
| - /** The total number of rows in the complete table. */
|
| +
|
| + /// The total number of rows in the complete table.
|
| core.String totalRows;
|
|
|
| TableDataList();
|
| @@ -5033,7 +5272,8 @@ class TableDataList {
|
| pageToken = _json["pageToken"];
|
| }
|
| if (_json.containsKey("rows")) {
|
| - rows = _json["rows"].map((value) => new TableRow.fromJson(value)).toList();
|
| + rows =
|
| + _json["rows"].map((value) => new TableRow.fromJson(value)).toList();
|
| }
|
| if (_json.containsKey("totalRows")) {
|
| totalRows = _json["totalRows"];
|
| @@ -5041,7 +5281,8 @@ class TableDataList {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (etag != null) {
|
| _json["etag"] = etag;
|
| }
|
| @@ -5062,33 +5303,27 @@ class TableDataList {
|
| }
|
|
|
| class TableFieldSchema {
|
| - /**
|
| - * [Optional] The field description. The maximum length is 1,024 characters.
|
| - */
|
| + /// [Optional] The field description. The maximum length is 1,024 characters.
|
| core.String description;
|
| - /**
|
| - * [Optional] Describes the nested schema fields if the type property is set
|
| - * to RECORD.
|
| - */
|
| +
|
| + /// [Optional] Describes the nested schema fields if the type property is set
|
| + /// to RECORD.
|
| core.List<TableFieldSchema> fields;
|
| - /**
|
| - * [Optional] The field mode. Possible values include NULLABLE, REQUIRED and
|
| - * REPEATED. The default value is NULLABLE.
|
| - */
|
| +
|
| + /// [Optional] The field mode. Possible values include NULLABLE, REQUIRED and
|
| + /// REPEATED. The default value is NULLABLE.
|
| core.String mode;
|
| - /**
|
| - * [Required] The field name. The name must contain only letters (a-z, A-Z),
|
| - * numbers (0-9), or underscores (_), and must start with a letter or
|
| - * underscore. The maximum length is 128 characters.
|
| - */
|
| +
|
| + /// [Required] The field name. The name must contain only letters (a-z, A-Z),
|
| + /// numbers (0-9), or underscores (_), and must start with a letter or
|
| + /// underscore. The maximum length is 128 characters.
|
| core.String name;
|
| - /**
|
| - * [Required] The field data type. Possible values include STRING, BYTES,
|
| - * INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as FLOAT), BOOLEAN,
|
| - * BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, DATETIME, RECORD (where
|
| - * RECORD indicates that the field contains a nested schema) or STRUCT (same
|
| - * as RECORD).
|
| - */
|
| +
|
| + /// [Required] The field data type. Possible values include STRING, BYTES,
|
| + /// INTEGER, INT64 (same as INTEGER), FLOAT, FLOAT64 (same as FLOAT),
|
| + /// BOOLEAN, BOOL (same as BOOLEAN), TIMESTAMP, DATE, TIME, DATETIME, RECORD
|
| + /// (where RECORD indicates that the field contains a nested schema) or
|
| + /// STRUCT (same as RECORD).
|
| core.String type;
|
|
|
| TableFieldSchema();
|
| @@ -5098,7 +5333,9 @@ class TableFieldSchema {
|
| description = _json["description"];
|
| }
|
| if (_json.containsKey("fields")) {
|
| - fields = _json["fields"].map((value) => new TableFieldSchema.fromJson(value)).toList();
|
| + fields = _json["fields"]
|
| + .map((value) => new TableFieldSchema.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("mode")) {
|
| mode = _json["mode"];
|
| @@ -5112,7 +5349,8 @@ class TableFieldSchema {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (description != null) {
|
| _json["description"] = description;
|
| }
|
| @@ -5132,11 +5370,9 @@ class TableFieldSchema {
|
| }
|
| }
|
|
|
| -/** Additional details for a view. */
|
| +/// Additional details for a view.
|
| class TableListTablesView {
|
| - /**
|
| - * True if view is defined in legacy SQL dialect, false if in standard SQL.
|
| - */
|
| + /// True if view is defined in legacy SQL dialect, false if in standard SQL.
|
| core.bool useLegacySql;
|
|
|
| TableListTablesView();
|
| @@ -5148,7 +5384,8 @@ class TableListTablesView {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (useLegacySql != null) {
|
| _json["useLegacySql"] = useLegacySql;
|
| }
|
| @@ -5157,24 +5394,29 @@ class TableListTablesView {
|
| }
|
|
|
| class TableListTables {
|
| - /** The user-friendly name for this table. */
|
| + /// The user-friendly name for this table.
|
| core.String friendlyName;
|
| - /** An opaque ID of the table */
|
| +
|
| + /// An opaque ID of the table
|
| core.String id;
|
| - /** The resource type. */
|
| +
|
| + /// The resource type.
|
| core.String kind;
|
| - /**
|
| - * [Experimental] The labels associated with this table. You can use these to
|
| - * organize and group your tables.
|
| - */
|
| +
|
| + /// [Experimental] The labels associated with this table. You can use these
|
| + /// to organize and group your tables.
|
| core.Map<core.String, core.String> labels;
|
| - /** A reference uniquely identifying the table. */
|
| +
|
| + /// A reference uniquely identifying the table.
|
| TableReference tableReference;
|
| - /** [Experimental] The time-based partitioning for this table. */
|
| +
|
| + /// [Experimental] The time-based partitioning for this table.
|
| TimePartitioning timePartitioning;
|
| - /** The type of table. Possible values are: TABLE, VIEW. */
|
| +
|
| + /// The type of table. Possible values are: TABLE, VIEW.
|
| core.String type;
|
| - /** Additional details for a view. */
|
| +
|
| + /// Additional details for a view.
|
| TableListTablesView view;
|
|
|
| TableListTables();
|
| @@ -5196,7 +5438,8 @@ class TableListTables {
|
| tableReference = new TableReference.fromJson(_json["tableReference"]);
|
| }
|
| if (_json.containsKey("timePartitioning")) {
|
| - timePartitioning = new TimePartitioning.fromJson(_json["timePartitioning"]);
|
| + timePartitioning =
|
| + new TimePartitioning.fromJson(_json["timePartitioning"]);
|
| }
|
| if (_json.containsKey("type")) {
|
| type = _json["type"];
|
| @@ -5207,7 +5450,8 @@ class TableListTables {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (friendlyName != null) {
|
| _json["friendlyName"] = friendlyName;
|
| }
|
| @@ -5237,15 +5481,19 @@ class TableListTables {
|
| }
|
|
|
| class TableList {
|
| - /** A hash of this page of results. */
|
| + /// A hash of this page of results.
|
| core.String etag;
|
| - /** The type of list. */
|
| +
|
| + /// The type of list.
|
| core.String kind;
|
| - /** A token to request the next page of results. */
|
| +
|
| + /// A token to request the next page of results.
|
| core.String nextPageToken;
|
| - /** Tables in the requested dataset. */
|
| +
|
| + /// Tables in the requested dataset.
|
| core.List<TableListTables> tables;
|
| - /** The total number of tables in the dataset. */
|
| +
|
| + /// The total number of tables in the dataset.
|
| core.int totalItems;
|
|
|
| TableList();
|
| @@ -5261,7 +5509,9 @@ class TableList {
|
| nextPageToken = _json["nextPageToken"];
|
| }
|
| if (_json.containsKey("tables")) {
|
| - tables = _json["tables"].map((value) => new TableListTables.fromJson(value)).toList();
|
| + tables = _json["tables"]
|
| + .map((value) => new TableListTables.fromJson(value))
|
| + .toList();
|
| }
|
| if (_json.containsKey("totalItems")) {
|
| totalItems = _json["totalItems"];
|
| @@ -5269,7 +5519,8 @@ class TableList {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (etag != null) {
|
| _json["etag"] = etag;
|
| }
|
| @@ -5290,15 +5541,15 @@ class TableList {
|
| }
|
|
|
| class TableReference {
|
| - /** [Required] The ID of the dataset containing this table. */
|
| + /// [Required] The ID of the dataset containing this table.
|
| core.String datasetId;
|
| - /** [Required] The ID of the project containing this table. */
|
| +
|
| + /// [Required] The ID of the project containing this table.
|
| core.String projectId;
|
| - /**
|
| - * [Required] The ID of the table. The ID must contain only letters (a-z,
|
| - * A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024
|
| - * characters.
|
| - */
|
| +
|
| + /// [Required] The ID of the table. The ID must contain only letters (a-z,
|
| + /// A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024
|
| + /// characters.
|
| core.String tableId;
|
|
|
| TableReference();
|
| @@ -5316,7 +5567,8 @@ class TableReference {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (datasetId != null) {
|
| _json["datasetId"] = datasetId;
|
| }
|
| @@ -5331,10 +5583,8 @@ class TableReference {
|
| }
|
|
|
| class TableRow {
|
| - /**
|
| - * Represents a single row in the result set, consisting of one or more
|
| - * fields.
|
| - */
|
| + /// Represents a single row in the result set, consisting of one or more
|
| + /// fields.
|
| core.List<TableCell> f;
|
|
|
| TableRow();
|
| @@ -5346,7 +5596,8 @@ class TableRow {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (f != null) {
|
| _json["f"] = f.map((value) => (value).toJson()).toList();
|
| }
|
| @@ -5355,19 +5606,22 @@ class TableRow {
|
| }
|
|
|
| class TableSchema {
|
| - /** Describes the fields in a table. */
|
| + /// Describes the fields in a table.
|
| core.List<TableFieldSchema> fields;
|
|
|
| TableSchema();
|
|
|
| TableSchema.fromJson(core.Map _json) {
|
| if (_json.containsKey("fields")) {
|
| - fields = _json["fields"].map((value) => new TableFieldSchema.fromJson(value)).toList();
|
| + fields = _json["fields"]
|
| + .map((value) => new TableFieldSchema.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (fields != null) {
|
| _json["fields"] = fields.map((value) => (value).toJson()).toList();
|
| }
|
| @@ -5376,15 +5630,18 @@ class TableSchema {
|
| }
|
|
|
| class TimePartitioning {
|
| - /**
|
| - * [Optional] Number of milliseconds for which to keep the storage for a
|
| - * partition.
|
| - */
|
| + /// [Optional] Number of milliseconds for which to keep the storage for a
|
| + /// partition.
|
| core.String expirationMs;
|
| - /**
|
| - * [Required] The only type supported is DAY, which will generate one
|
| - * partition per day based on data loading time.
|
| - */
|
| +
|
| + /// [Experimental] [Optional] If not set, the table is partitioned by pseudo
|
| + /// column '_PARTITIONTIME'; if set, the table is partitioned by this field.
|
| + /// The field must be a top-level TIMESTAMP or DATE field. Its mode must be
|
| + /// NULLABLE or REQUIRED.
|
| + core.String field;
|
| +
|
| + /// [Required] The only type supported is DAY, which will generate one
|
| + /// partition per day.
|
| core.String type;
|
|
|
| TimePartitioning();
|
| @@ -5393,16 +5650,23 @@ class TimePartitioning {
|
| if (_json.containsKey("expirationMs")) {
|
| expirationMs = _json["expirationMs"];
|
| }
|
| + if (_json.containsKey("field")) {
|
| + field = _json["field"];
|
| + }
|
| if (_json.containsKey("type")) {
|
| type = _json["type"];
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (expirationMs != null) {
|
| _json["expirationMs"] = expirationMs;
|
| }
|
| + if (field != null) {
|
| + _json["field"] = field;
|
| + }
|
| if (type != null) {
|
| _json["type"] = type;
|
| }
|
| @@ -5411,16 +5675,13 @@ class TimePartitioning {
|
| }
|
|
|
| class UserDefinedFunctionResource {
|
| - /**
|
| - * [Pick one] An inline resource that contains code for a user-defined
|
| - * function (UDF). Providing a inline code resource is equivalent to providing
|
| - * a URI for a file containing the same code.
|
| - */
|
| + /// [Pick one] An inline resource that contains code for a user-defined
|
| + /// function (UDF). Providing a inline code resource is equivalent to
|
| + /// providing a URI for a file containing the same code.
|
| core.String inlineCode;
|
| - /**
|
| - * [Pick one] A code resource to load from a Google Cloud Storage URI
|
| - * (gs://bucket/path).
|
| - */
|
| +
|
| + /// [Pick one] A code resource to load from a Google Cloud Storage URI
|
| + /// (gs://bucket/path).
|
| core.String resourceUri;
|
|
|
| UserDefinedFunctionResource();
|
| @@ -5435,7 +5696,8 @@ class UserDefinedFunctionResource {
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (inlineCode != null) {
|
| _json["inlineCode"] = inlineCode;
|
| }
|
| @@ -5447,16 +5709,16 @@ class UserDefinedFunctionResource {
|
| }
|
|
|
| class ViewDefinition {
|
| - /** [Required] A query that BigQuery executes when the view is referenced. */
|
| + /// [Required] A query that BigQuery executes when the view is referenced.
|
| core.String query;
|
| - /**
|
| - * Specifies whether to use BigQuery's legacy SQL for this view. The default
|
| - * value is true. If set to false, the view will use BigQuery's standard SQL:
|
| - * https://cloud.google.com/bigquery/sql-reference/ Queries and views that
|
| - * reference this view must use the same flag value.
|
| - */
|
| +
|
| + /// Specifies whether to use BigQuery's legacy SQL for this view. The default
|
| + /// value is true. If set to false, the view will use BigQuery's standard
|
| + /// SQL: https://cloud.google.com/bigquery/sql-reference/ Queries and views
|
| + /// that reference this view must use the same flag value.
|
| core.bool useLegacySql;
|
| - /** Describes user-defined function resources used in the query. */
|
| +
|
| + /// Describes user-defined function resources used in the query.
|
| core.List<UserDefinedFunctionResource> userDefinedFunctionResources;
|
|
|
| ViewDefinition();
|
| @@ -5469,12 +5731,15 @@ class ViewDefinition {
|
| useLegacySql = _json["useLegacySql"];
|
| }
|
| if (_json.containsKey("userDefinedFunctionResources")) {
|
| - userDefinedFunctionResources = _json["userDefinedFunctionResources"].map((value) => new UserDefinedFunctionResource.fromJson(value)).toList();
|
| + userDefinedFunctionResources = _json["userDefinedFunctionResources"]
|
| + .map((value) => new UserDefinedFunctionResource.fromJson(value))
|
| + .toList();
|
| }
|
| }
|
|
|
| core.Map<core.String, core.Object> toJson() {
|
| - final core.Map<core.String, core.Object> _json = new core.Map<core.String, core.Object>();
|
| + final core.Map<core.String, core.Object> _json =
|
| + new core.Map<core.String, core.Object>();
|
| if (query != null) {
|
| _json["query"] = query;
|
| }
|
| @@ -5482,7 +5747,9 @@ class ViewDefinition {
|
| _json["useLegacySql"] = useLegacySql;
|
| }
|
| if (userDefinedFunctionResources != null) {
|
| - _json["userDefinedFunctionResources"] = userDefinedFunctionResources.map((value) => (value).toJson()).toList();
|
| + _json["userDefinedFunctionResources"] = userDefinedFunctionResources
|
| + .map((value) => (value).toJson())
|
| + .toList();
|
| }
|
| return _json;
|
| }
|
|
|