Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(483)

Side by Side Diff: generated/googleapis/lib/dataproc/v1.dart

Issue 2485703002: Api-roll 42: 2016-11-08 (Closed)
Patch Set: Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // This is a generated file (see the discoveryapis_generator project). 1 // This is a generated file (see the discoveryapis_generator project).
2 2
3 library googleapis.dataproc.v1; 3 library googleapis.dataproc.v1;
4 4
5 import 'dart:core' as core; 5 import 'dart:core' as core;
6 import 'dart:async' as async; 6 import 'dart:async' as async;
7 import 'dart:convert' as convert; 7 import 'dart:convert' as convert;
8 8
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons;
10 import 'package:http/http.dart' as http; 10 import 'package:http/http.dart' as http;
11 11
12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show 12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show
13 ApiRequestError, DetailedApiRequestError; 13 ApiRequestError, DetailedApiRequestError;
14 14
15 const core.String USER_AGENT = 'dart-api-client dataproc/v1'; 15 const core.String USER_AGENT = 'dart-api-client dataproc/v1';
16 16
17 /** Manages Hadoop-based clusters and jobs on Google Cloud Platform. */ 17 /**
18 * An API for managing Hadoop-based clusters and jobs on Google Cloud Platform.
19 */
18 class DataprocApi { 20 class DataprocApi {
19 /** View and manage your data across Google Cloud Platform services */ 21 /** View and manage your data across Google Cloud Platform services */
20 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf orm"; 22 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf orm";
21 23
22 24
23 final commons.ApiRequester _requester; 25 final commons.ApiRequester _requester;
24 26
25 ProjectsResourceApi get projects => new ProjectsResourceApi(_requester); 27 ProjectsResourceApi get projects => new ProjectsResourceApi(_requester);
26 28
27 DataprocApi(http.Client client, {core.String rootUrl: "https://dataproc.google apis.com/", core.String servicePath: ""}) : 29 DataprocApi(http.Client client, {core.String rootUrl: "https://dataproc.google apis.com/", core.String servicePath: ""}) :
(...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after
271 * Lists all regions/{region}/clusters in a project. 273 * Lists all regions/{region}/clusters in a project.
272 * 274 *
273 * Request parameters: 275 * Request parameters:
274 * 276 *
275 * [projectId] - [Required] The ID of the Google Cloud Platform project that 277 * [projectId] - [Required] The ID of the Google Cloud Platform project that
276 * the cluster belongs to. 278 * the cluster belongs to.
277 * 279 *
278 * [region] - [Required] The Cloud Dataproc region in which to handle the 280 * [region] - [Required] The Cloud Dataproc region in which to handle the
279 * request. 281 * request.
280 * 282 *
281 * [pageSize] - The standard List page size. 283 * [filter] - [Optional] A filter constraining the clusters to list. Filters
284 * are case-sensitive and have the following syntax: field:value [field:value]
285 * ... or field = value [AND [field = value]] ... where **field** is one of
286 * `status.state`, `clusterName`, or `labels.[KEY]`, and `[KEY]` is a label
287 * key. **value** can be `*` to match all values. `status.state` can be one of
288 * the following: `ACTIVE`, `INACTIVE`, `CREATING`, `RUNNING`, `ERROR`,
289 * `DELETING`, or `UPDATING`. `ACTIVE` contains the `CREATING`, `UPDATING`,
290 * and `RUNNING` states. `INACTIVE` contains the `DELETING` and `ERROR`
291 * states. `clusterName` is the name of the cluster provided at creation time.
292 * Only the logical `AND` operator is supported; space-separated items are
293 * treated as having an implicit `AND` operator. Example valid filters are:
294 * status.state:ACTIVE clusterName:mycluster labels.env:staging \
295 * labels.starred:* and status.state = ACTIVE AND clusterName = mycluster \
296 * AND labels.env = staging AND labels.starred = *
282 * 297 *
283 * [pageToken] - The standard List page token. 298 * [pageSize] - [Optional] The standard List page size.
299 *
300 * [pageToken] - [Optional] The standard List page token.
284 * 301 *
285 * Completes with a [ListClustersResponse]. 302 * Completes with a [ListClustersResponse].
286 * 303 *
287 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 304 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
288 * error. 305 * error.
289 * 306 *
290 * If the used [http.Client] completes with an error when making a REST call, 307 * If the used [http.Client] completes with an error when making a REST call,
291 * this method will complete with the same error. 308 * this method will complete with the same error.
292 */ 309 */
293 async.Future<ListClustersResponse> list(core.String projectId, core.String reg ion, {core.int pageSize, core.String pageToken}) { 310 async.Future<ListClustersResponse> list(core.String projectId, core.String reg ion, {core.String filter, core.int pageSize, core.String pageToken}) {
294 var _url = null; 311 var _url = null;
295 var _queryParams = new core.Map(); 312 var _queryParams = new core.Map();
296 var _uploadMedia = null; 313 var _uploadMedia = null;
297 var _uploadOptions = null; 314 var _uploadOptions = null;
298 var _downloadOptions = commons.DownloadOptions.Metadata; 315 var _downloadOptions = commons.DownloadOptions.Metadata;
299 var _body = null; 316 var _body = null;
300 317
301 if (projectId == null) { 318 if (projectId == null) {
302 throw new core.ArgumentError("Parameter projectId is required."); 319 throw new core.ArgumentError("Parameter projectId is required.");
303 } 320 }
304 if (region == null) { 321 if (region == null) {
305 throw new core.ArgumentError("Parameter region is required."); 322 throw new core.ArgumentError("Parameter region is required.");
306 } 323 }
324 if (filter != null) {
325 _queryParams["filter"] = [filter];
326 }
307 if (pageSize != null) { 327 if (pageSize != null) {
308 _queryParams["pageSize"] = ["${pageSize}"]; 328 _queryParams["pageSize"] = ["${pageSize}"];
309 } 329 }
310 if (pageToken != null) { 330 if (pageToken != null) {
311 _queryParams["pageToken"] = [pageToken]; 331 _queryParams["pageToken"] = [pageToken];
312 } 332 }
313 333
314 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/clusters'; 334 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/clusters';
315 335
316 var _response = _requester.request(_url, 336 var _response = _requester.request(_url,
(...skipping 264 matching lines...) Expand 10 before | Expand all | Expand 10 after
581 * 601 *
582 * [pageSize] - [Optional] The number of results to return in each response. 602 * [pageSize] - [Optional] The number of results to return in each response.
583 * 603 *
584 * [pageToken] - [Optional] The page token, returned by a previous call, to 604 * [pageToken] - [Optional] The page token, returned by a previous call, to
585 * request the next page of results. 605 * request the next page of results.
586 * 606 *
587 * [clusterName] - [Optional] If set, the returned jobs list includes only 607 * [clusterName] - [Optional] If set, the returned jobs list includes only
588 * jobs that were submitted to the named cluster. 608 * jobs that were submitted to the named cluster.
589 * 609 *
590 * [jobStateMatcher] - [Optional] Specifies enumerated categories of jobs to 610 * [jobStateMatcher] - [Optional] Specifies enumerated categories of jobs to
591 * list. 611 * list (default = match ALL jobs).
592 * Possible string values are: 612 * Possible string values are:
593 * - "ALL" : A ALL. 613 * - "ALL" : A ALL.
594 * - "ACTIVE" : A ACTIVE. 614 * - "ACTIVE" : A ACTIVE.
595 * - "NON_ACTIVE" : A NON_ACTIVE. 615 * - "NON_ACTIVE" : A NON_ACTIVE.
596 * 616 *
617 * [filter] - [Optional] A filter constraining the jobs to list. Filters are
618 * case-sensitive and have the following syntax: field:value] ... or [field =
619 * value] AND [field [= value]] ... where **field** is `status.state` or
620 * `labels.[KEY]`, and `[KEY]` is a label key. **value** can be `*` to match
621 * all values. `status.state` can be either `ACTIVE` or `INACTIVE`. Only the
622 * logical `AND` operator is supported; space-separated items are treated as
623 * having an implicit `AND` operator. Example valid filters are:
624 * status.state:ACTIVE labels.env:staging labels.starred:* and status.state =
625 * ACTIVE AND labels.env = staging AND labels.starred = *
626 *
597 * Completes with a [ListJobsResponse]. 627 * Completes with a [ListJobsResponse].
598 * 628 *
599 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 629 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
600 * error. 630 * error.
601 * 631 *
602 * If the used [http.Client] completes with an error when making a REST call, 632 * If the used [http.Client] completes with an error when making a REST call,
603 * this method will complete with the same error. 633 * this method will complete with the same error.
604 */ 634 */
605 async.Future<ListJobsResponse> list(core.String projectId, core.String region, {core.int pageSize, core.String pageToken, core.String clusterName, core.String jobStateMatcher}) { 635 async.Future<ListJobsResponse> list(core.String projectId, core.String region, {core.int pageSize, core.String pageToken, core.String clusterName, core.String jobStateMatcher, core.String filter}) {
606 var _url = null; 636 var _url = null;
607 var _queryParams = new core.Map(); 637 var _queryParams = new core.Map();
608 var _uploadMedia = null; 638 var _uploadMedia = null;
609 var _uploadOptions = null; 639 var _uploadOptions = null;
610 var _downloadOptions = commons.DownloadOptions.Metadata; 640 var _downloadOptions = commons.DownloadOptions.Metadata;
611 var _body = null; 641 var _body = null;
612 642
613 if (projectId == null) { 643 if (projectId == null) {
614 throw new core.ArgumentError("Parameter projectId is required."); 644 throw new core.ArgumentError("Parameter projectId is required.");
615 } 645 }
616 if (region == null) { 646 if (region == null) {
617 throw new core.ArgumentError("Parameter region is required."); 647 throw new core.ArgumentError("Parameter region is required.");
618 } 648 }
619 if (pageSize != null) { 649 if (pageSize != null) {
620 _queryParams["pageSize"] = ["${pageSize}"]; 650 _queryParams["pageSize"] = ["${pageSize}"];
621 } 651 }
622 if (pageToken != null) { 652 if (pageToken != null) {
623 _queryParams["pageToken"] = [pageToken]; 653 _queryParams["pageToken"] = [pageToken];
624 } 654 }
625 if (clusterName != null) { 655 if (clusterName != null) {
626 _queryParams["clusterName"] = [clusterName]; 656 _queryParams["clusterName"] = [clusterName];
627 } 657 }
628 if (jobStateMatcher != null) { 658 if (jobStateMatcher != null) {
629 _queryParams["jobStateMatcher"] = [jobStateMatcher]; 659 _queryParams["jobStateMatcher"] = [jobStateMatcher];
630 } 660 }
661 if (filter != null) {
662 _queryParams["filter"] = [filter];
663 }
631 664
632 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/jobs'; 665 _url = 'v1/projects/' + commons.Escaper.ecapeVariable('$projectId') + '/regi ons/' + commons.Escaper.ecapeVariable('$region') + '/jobs';
633 666
634 var _response = _requester.request(_url, 667 var _response = _requester.request(_url,
635 "GET", 668 "GET",
636 body: _body, 669 body: _body,
637 queryParams: _queryParams, 670 queryParams: _queryParams,
638 uploadOptions: _uploadOptions, 671 uploadOptions: _uploadOptions,
639 uploadMedia: _uploadMedia, 672 uploadMedia: _uploadMedia,
640 downloadOptions: _downloadOptions); 673 downloadOptions: _downloadOptions);
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
700 733
701 ProjectsRegionsOperationsResourceApi(commons.ApiRequester client) : 734 ProjectsRegionsOperationsResourceApi(commons.ApiRequester client) :
702 _requester = client; 735 _requester = client;
703 736
704 /** 737 /**
705 * Starts asynchronous cancellation on a long-running operation. The server 738 * Starts asynchronous cancellation on a long-running operation. The server
706 * makes a best effort to cancel the operation, but success is not guaranteed. 739 * makes a best effort to cancel the operation, but success is not guaranteed.
707 * If the server doesn't support this method, it returns 740 * If the server doesn't support this method, it returns
708 * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or 741 * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or
709 * other methods to check whether the cancellation succeeded or whether the 742 * other methods to check whether the cancellation succeeded or whether the
710 * operation completed despite cancellation. 743 * operation completed despite cancellation. On successful cancellation, the
744 * operation is not deleted; instead, it becomes an operation with an
745 * Operation.error value with a google.rpc.Status.code of 1, corresponding to
746 * `Code.CANCELLED`.
711 * 747 *
712 * Request parameters: 748 * Request parameters:
713 * 749 *
714 * [name] - The name of the operation resource to be cancelled. 750 * [name] - The name of the operation resource to be cancelled.
715 * Value must have pattern "^projects/[^/] * / regions/[^/] * / 751 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$".
716 * operations/[^/]*$".
717 * 752 *
718 * Completes with a [Empty]. 753 * Completes with a [Empty].
719 * 754 *
720 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 755 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
721 * error. 756 * error.
722 * 757 *
723 * If the used [http.Client] completes with an error when making a REST call, 758 * If the used [http.Client] completes with an error when making a REST call,
724 * this method will complete with the same error. 759 * this method will complete with the same error.
725 */ 760 */
726 async.Future<Empty> cancel(core.String name) { 761 async.Future<Empty> cancel(core.String name) {
(...skipping 22 matching lines...) Expand all
749 784
750 /** 785 /**
751 * Deletes a long-running operation. This method indicates that the client is 786 * Deletes a long-running operation. This method indicates that the client is
752 * no longer interested in the operation result. It does not cancel the 787 * no longer interested in the operation result. It does not cancel the
753 * operation. If the server doesn't support this method, it returns 788 * operation. If the server doesn't support this method, it returns
754 * `google.rpc.Code.UNIMPLEMENTED`. 789 * `google.rpc.Code.UNIMPLEMENTED`.
755 * 790 *
756 * Request parameters: 791 * Request parameters:
757 * 792 *
758 * [name] - The name of the operation resource to be deleted. 793 * [name] - The name of the operation resource to be deleted.
759 * Value must have pattern "^projects/[^/] * / regions/[^/] * / 794 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$".
760 * operations/[^/]*$".
761 * 795 *
762 * Completes with a [Empty]. 796 * Completes with a [Empty].
763 * 797 *
764 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 798 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
765 * error. 799 * error.
766 * 800 *
767 * If the used [http.Client] completes with an error when making a REST call, 801 * If the used [http.Client] completes with an error when making a REST call,
768 * this method will complete with the same error. 802 * this method will complete with the same error.
769 */ 803 */
770 async.Future<Empty> delete(core.String name) { 804 async.Future<Empty> delete(core.String name) {
(...skipping 21 matching lines...) Expand all
792 } 826 }
793 827
794 /** 828 /**
795 * Gets the latest state of a long-running operation. Clients can use this 829 * Gets the latest state of a long-running operation. Clients can use this
796 * method to poll the operation result at intervals as recommended by the API 830 * method to poll the operation result at intervals as recommended by the API
797 * service. 831 * service.
798 * 832 *
799 * Request parameters: 833 * Request parameters:
800 * 834 *
801 * [name] - The name of the operation resource. 835 * [name] - The name of the operation resource.
802 * Value must have pattern "^projects/[^/] * / regions/[^/] * / 836 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations/[^/]+$".
803 * operations/[^/]*$".
804 * 837 *
805 * Completes with a [Operation]. 838 * Completes with a [Operation].
806 * 839 *
807 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 840 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
808 * error. 841 * error.
809 * 842 *
810 * If the used [http.Client] completes with an error when making a REST call, 843 * If the used [http.Client] completes with an error when making a REST call,
811 * this method will complete with the same error. 844 * this method will complete with the same error.
812 */ 845 */
813 async.Future<Operation> get(core.String name) { 846 async.Future<Operation> get(core.String name) {
(...skipping 22 matching lines...) Expand all
836 869
837 /** 870 /**
838 * Lists operations that match the specified filter in the request. If the 871 * Lists operations that match the specified filter in the request. If the
839 * server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the 872 * server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the
840 * `name` binding below allows API services to override the binding to use 873 * `name` binding below allows API services to override the binding to use
841 * different resource name schemes, such as `users / * /operations`. 874 * different resource name schemes, such as `users / * /operations`.
842 * 875 *
843 * Request parameters: 876 * Request parameters:
844 * 877 *
845 * [name] - The name of the operation collection. 878 * [name] - The name of the operation collection.
846 * Value must have pattern "^projects/[^/] * / regions/[^/] * / operations$". 879 * Value must have pattern "^projects/[^/]+/regions/[^/]+/operations$".
847 * 880 *
848 * [filter] - The standard list filter. 881 * [filter] - The standard list filter.
849 * 882 *
850 * [pageSize] - The standard list page size. 883 * [pageSize] - The standard list page size.
851 * 884 *
852 * [pageToken] - The standard list page token. 885 * [pageToken] - The standard list page token.
853 * 886 *
854 * Completes with a [ListOperationsResponse]. 887 * Completes with a [ListOperationsResponse].
855 * 888 *
856 * Completes with a [commons.ApiRequestError] if the API endpoint returned an 889 * Completes with a [commons.ApiRequestError] if the API endpoint returned an
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
924 * [Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc 957 * [Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc
925 * generates this value when it creates the cluster. 958 * generates this value when it creates the cluster.
926 */ 959 */
927 core.String clusterUuid; 960 core.String clusterUuid;
928 /** 961 /**
929 * [Required] The cluster config. Note that Cloud Dataproc may set default 962 * [Required] The cluster config. Note that Cloud Dataproc may set default
930 * values, and values may change when clusters are updated. 963 * values, and values may change when clusters are updated.
931 */ 964 */
932 ClusterConfig config; 965 ClusterConfig config;
933 /** 966 /**
967 * [Optional] The labels to associate with this cluster. Label **keys** must
968 * contain 1 to 63 characters, and must conform to [RFC
969 * 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty,
970 * but, if present, must contain 1 to 63 characters, and must conform to [RFC
971 * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
972 * associated with a cluster.
973 */
974 core.Map<core.String, core.String> labels;
975 /**
976 * Contains cluster daemon metrics such as HDFS and YARN stats. **Beta
977 * Feature**: This report is available for testing purposes only. It may be
978 * changed before final release.
979 */
980 ClusterMetrics metrics;
981 /**
934 * [Required] The Google Cloud Platform project ID that the cluster belongs 982 * [Required] The Google Cloud Platform project ID that the cluster belongs
935 * to. 983 * to.
936 */ 984 */
937 core.String projectId; 985 core.String projectId;
938 /** [Output-only] Cluster status. */ 986 /** [Output-only] Cluster status. */
939 ClusterStatus status; 987 ClusterStatus status;
940 /** [Output-only] The previous cluster status. */ 988 /** [Output-only] The previous cluster status. */
941 core.List<ClusterStatus> statusHistory; 989 core.List<ClusterStatus> statusHistory;
942 990
943 Cluster(); 991 Cluster();
944 992
945 Cluster.fromJson(core.Map _json) { 993 Cluster.fromJson(core.Map _json) {
946 if (_json.containsKey("clusterName")) { 994 if (_json.containsKey("clusterName")) {
947 clusterName = _json["clusterName"]; 995 clusterName = _json["clusterName"];
948 } 996 }
949 if (_json.containsKey("clusterUuid")) { 997 if (_json.containsKey("clusterUuid")) {
950 clusterUuid = _json["clusterUuid"]; 998 clusterUuid = _json["clusterUuid"];
951 } 999 }
952 if (_json.containsKey("config")) { 1000 if (_json.containsKey("config")) {
953 config = new ClusterConfig.fromJson(_json["config"]); 1001 config = new ClusterConfig.fromJson(_json["config"]);
954 } 1002 }
1003 if (_json.containsKey("labels")) {
1004 labels = _json["labels"];
1005 }
1006 if (_json.containsKey("metrics")) {
1007 metrics = new ClusterMetrics.fromJson(_json["metrics"]);
1008 }
955 if (_json.containsKey("projectId")) { 1009 if (_json.containsKey("projectId")) {
956 projectId = _json["projectId"]; 1010 projectId = _json["projectId"];
957 } 1011 }
958 if (_json.containsKey("status")) { 1012 if (_json.containsKey("status")) {
959 status = new ClusterStatus.fromJson(_json["status"]); 1013 status = new ClusterStatus.fromJson(_json["status"]);
960 } 1014 }
961 if (_json.containsKey("statusHistory")) { 1015 if (_json.containsKey("statusHistory")) {
962 statusHistory = _json["statusHistory"].map((value) => new ClusterStatus.fr omJson(value)).toList(); 1016 statusHistory = _json["statusHistory"].map((value) => new ClusterStatus.fr omJson(value)).toList();
963 } 1017 }
964 } 1018 }
965 1019
966 core.Map toJson() { 1020 core.Map toJson() {
967 var _json = new core.Map(); 1021 var _json = new core.Map();
968 if (clusterName != null) { 1022 if (clusterName != null) {
969 _json["clusterName"] = clusterName; 1023 _json["clusterName"] = clusterName;
970 } 1024 }
971 if (clusterUuid != null) { 1025 if (clusterUuid != null) {
972 _json["clusterUuid"] = clusterUuid; 1026 _json["clusterUuid"] = clusterUuid;
973 } 1027 }
974 if (config != null) { 1028 if (config != null) {
975 _json["config"] = (config).toJson(); 1029 _json["config"] = (config).toJson();
976 } 1030 }
1031 if (labels != null) {
1032 _json["labels"] = labels;
1033 }
1034 if (metrics != null) {
1035 _json["metrics"] = (metrics).toJson();
1036 }
977 if (projectId != null) { 1037 if (projectId != null) {
978 _json["projectId"] = projectId; 1038 _json["projectId"] = projectId;
979 } 1039 }
980 if (status != null) { 1040 if (status != null) {
981 _json["status"] = (status).toJson(); 1041 _json["status"] = (status).toJson();
982 } 1042 }
983 if (statusHistory != null) { 1043 if (statusHistory != null) {
984 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List(); 1044 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List();
985 } 1045 }
986 return _json; 1046 return _json;
(...skipping 13 matching lines...) Expand all
1000 core.String configBucket; 1060 core.String configBucket;
1001 /** 1061 /**
1002 * [Required] The shared Google Compute Engine config settings for all 1062 * [Required] The shared Google Compute Engine config settings for all
1003 * instances in a cluster. 1063 * instances in a cluster.
1004 */ 1064 */
1005 GceClusterConfig gceClusterConfig; 1065 GceClusterConfig gceClusterConfig;
1006 /** 1066 /**
1007 * [Optional] Commands to execute on each node after config is completed. By 1067 * [Optional] Commands to execute on each node after config is completed. By
1008 * default, executables are run on master and all worker nodes. You can test a 1068 * default, executables are run on master and all worker nodes. You can test a
1009 * node's role metadata to run an executable on a master or worker node, as 1069 * node's role metadata to run an executable on a master or worker node, as
1010 * shown below: ROLE=$(/usr/share/google/get_metadata_value attributes/role) 1070 * shown below using `curl` (you can also use `wget`): ROLE=$(curl -H
1011 * if [[ "${ROLE}" == 'Master' ]]; then ... master specific actions ... else 1071 * Metadata-Flavor:Google
1012 * ... worker specific actions ... fi 1072 * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[
1073 * "${ROLE}" == 'Master' ]]; then ... master specific actions ... else ...
1074 * worker specific actions ... fi
1013 */ 1075 */
1014 core.List<NodeInitializationAction> initializationActions; 1076 core.List<NodeInitializationAction> initializationActions;
1015 /** 1077 /**
1016 * [Optional] The Google Compute Engine config settings for the master 1078 * [Optional] The Google Compute Engine config settings for the master
1017 * instance in a cluster. 1079 * instance in a cluster.
1018 */ 1080 */
1019 InstanceGroupConfig masterConfig; 1081 InstanceGroupConfig masterConfig;
1020 /** 1082 /**
1021 * [Optional] The Google Compute Engine config settings for additional worker 1083 * [Optional] The Google Compute Engine config settings for additional worker
1022 * instances in a cluster. 1084 * instances in a cluster.
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
1076 if (softwareConfig != null) { 1138 if (softwareConfig != null) {
1077 _json["softwareConfig"] = (softwareConfig).toJson(); 1139 _json["softwareConfig"] = (softwareConfig).toJson();
1078 } 1140 }
1079 if (workerConfig != null) { 1141 if (workerConfig != null) {
1080 _json["workerConfig"] = (workerConfig).toJson(); 1142 _json["workerConfig"] = (workerConfig).toJson();
1081 } 1143 }
1082 return _json; 1144 return _json;
1083 } 1145 }
1084 } 1146 }
1085 1147
1148 /**
1149 * Contains cluster daemon metrics, such as HDFS and YARN stats. **Beta
1150 * Feature**: This report is available for testing purposes only. It may be
1151 * changed before final release.
1152 */
1153 class ClusterMetrics {
1154 /** The HDFS metrics. */
1155 core.Map<core.String, core.String> hdfsMetrics;
1156 /** The YARN metrics. */
1157 core.Map<core.String, core.String> yarnMetrics;
1158
1159 ClusterMetrics();
1160
1161 ClusterMetrics.fromJson(core.Map _json) {
1162 if (_json.containsKey("hdfsMetrics")) {
1163 hdfsMetrics = _json["hdfsMetrics"];
1164 }
1165 if (_json.containsKey("yarnMetrics")) {
1166 yarnMetrics = _json["yarnMetrics"];
1167 }
1168 }
1169
1170 core.Map toJson() {
1171 var _json = new core.Map();
1172 if (hdfsMetrics != null) {
1173 _json["hdfsMetrics"] = hdfsMetrics;
1174 }
1175 if (yarnMetrics != null) {
1176 _json["yarnMetrics"] = yarnMetrics;
1177 }
1178 return _json;
1179 }
1180 }
1181
1086 /** Metadata describing the operation. */ 1182 /** Metadata describing the operation. */
1087 class ClusterOperationMetadata { 1183 class ClusterOperationMetadata {
1088 /** Name of the cluster for the operation. */ 1184 /** [Output-only] Name of the cluster for the operation. */
1089 core.String clusterName; 1185 core.String clusterName;
1090 /** Cluster UUId for the operation. */ 1186 /** [Output-only] Cluster UUID for the operation. */
1091 core.String clusterUuid; 1187 core.String clusterUuid;
1092 /** [Output-only] Short description of operation. */ 1188 /** [Output-only] Short description of operation. */
1093 core.String description; 1189 core.String description;
1190 /** [Output-only] labels associated with the operation */
1191 core.Map<core.String, core.String> labels;
1094 /** [Output-only] The operation type. */ 1192 /** [Output-only] The operation type. */
1095 core.String operationType; 1193 core.String operationType;
1096 /** [Output-only] Current operation status. */ 1194 /** [Output-only] Current operation status. */
1097 ClusterOperationStatus status; 1195 ClusterOperationStatus status;
1098 /** [Output-only] The previous operation status. */ 1196 /** [Output-only] The previous operation status. */
1099 core.List<ClusterOperationStatus> statusHistory; 1197 core.List<ClusterOperationStatus> statusHistory;
1100 1198
1101 ClusterOperationMetadata(); 1199 ClusterOperationMetadata();
1102 1200
1103 ClusterOperationMetadata.fromJson(core.Map _json) { 1201 ClusterOperationMetadata.fromJson(core.Map _json) {
1104 if (_json.containsKey("clusterName")) { 1202 if (_json.containsKey("clusterName")) {
1105 clusterName = _json["clusterName"]; 1203 clusterName = _json["clusterName"];
1106 } 1204 }
1107 if (_json.containsKey("clusterUuid")) { 1205 if (_json.containsKey("clusterUuid")) {
1108 clusterUuid = _json["clusterUuid"]; 1206 clusterUuid = _json["clusterUuid"];
1109 } 1207 }
1110 if (_json.containsKey("description")) { 1208 if (_json.containsKey("description")) {
1111 description = _json["description"]; 1209 description = _json["description"];
1112 } 1210 }
1211 if (_json.containsKey("labels")) {
1212 labels = _json["labels"];
1213 }
1113 if (_json.containsKey("operationType")) { 1214 if (_json.containsKey("operationType")) {
1114 operationType = _json["operationType"]; 1215 operationType = _json["operationType"];
1115 } 1216 }
1116 if (_json.containsKey("status")) { 1217 if (_json.containsKey("status")) {
1117 status = new ClusterOperationStatus.fromJson(_json["status"]); 1218 status = new ClusterOperationStatus.fromJson(_json["status"]);
1118 } 1219 }
1119 if (_json.containsKey("statusHistory")) { 1220 if (_json.containsKey("statusHistory")) {
1120 statusHistory = _json["statusHistory"].map((value) => new ClusterOperation Status.fromJson(value)).toList(); 1221 statusHistory = _json["statusHistory"].map((value) => new ClusterOperation Status.fromJson(value)).toList();
1121 } 1222 }
1122 } 1223 }
1123 1224
1124 core.Map toJson() { 1225 core.Map toJson() {
1125 var _json = new core.Map(); 1226 var _json = new core.Map();
1126 if (clusterName != null) { 1227 if (clusterName != null) {
1127 _json["clusterName"] = clusterName; 1228 _json["clusterName"] = clusterName;
1128 } 1229 }
1129 if (clusterUuid != null) { 1230 if (clusterUuid != null) {
1130 _json["clusterUuid"] = clusterUuid; 1231 _json["clusterUuid"] = clusterUuid;
1131 } 1232 }
1132 if (description != null) { 1233 if (description != null) {
1133 _json["description"] = description; 1234 _json["description"] = description;
1134 } 1235 }
1236 if (labels != null) {
1237 _json["labels"] = labels;
1238 }
1135 if (operationType != null) { 1239 if (operationType != null) {
1136 _json["operationType"] = operationType; 1240 _json["operationType"] = operationType;
1137 } 1241 }
1138 if (status != null) { 1242 if (status != null) {
1139 _json["status"] = (status).toJson(); 1243 _json["status"] = (status).toJson();
1140 } 1244 }
1141 if (statusHistory != null) { 1245 if (statusHistory != null) {
1142 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List(); 1246 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List();
1143 } 1247 }
1144 return _json; 1248 return _json;
1145 } 1249 }
1146 } 1250 }
1147 1251
1148 /** The status of the operation. */ 1252 /** The status of the operation. */
1149 class ClusterOperationStatus { 1253 class ClusterOperationStatus {
1150 /** A message containing any operation metadata details. */ 1254 /** [Output-only]A message containing any operation metadata details. */
1151 core.String details; 1255 core.String details;
1152 /** A message containing the detailed operation state. */ 1256 /** [Output-only] A message containing the detailed operation state. */
1153 core.String innerState; 1257 core.String innerState;
1154 /** 1258 /**
1155 * A message containing the operation state. 1259 * [Output-only] A message containing the operation state.
1156 * Possible string values are: 1260 * Possible string values are:
1157 * - "UNKNOWN" : A UNKNOWN. 1261 * - "UNKNOWN" : A UNKNOWN.
1158 * - "PENDING" : A PENDING. 1262 * - "PENDING" : A PENDING.
1159 * - "RUNNING" : A RUNNING. 1263 * - "RUNNING" : A RUNNING.
1160 * - "DONE" : A DONE. 1264 * - "DONE" : A DONE.
1161 */ 1265 */
1162 core.String state; 1266 core.String state;
1163 /** The time this state was entered. */ 1267 /** [Output-only] The time this state was entered. */
1164 core.String stateStartTime; 1268 core.String stateStartTime;
1165 1269
1166 ClusterOperationStatus(); 1270 ClusterOperationStatus();
1167 1271
1168 ClusterOperationStatus.fromJson(core.Map _json) { 1272 ClusterOperationStatus.fromJson(core.Map _json) {
1169 if (_json.containsKey("details")) { 1273 if (_json.containsKey("details")) {
1170 details = _json["details"]; 1274 details = _json["details"];
1171 } 1275 }
1172 if (_json.containsKey("innerState")) { 1276 if (_json.containsKey("innerState")) {
1173 innerState = _json["innerState"]; 1277 innerState = _json["innerState"];
(...skipping 19 matching lines...) Expand all
1193 } 1297 }
1194 if (stateStartTime != null) { 1298 if (stateStartTime != null) {
1195 _json["stateStartTime"] = stateStartTime; 1299 _json["stateStartTime"] = stateStartTime;
1196 } 1300 }
1197 return _json; 1301 return _json;
1198 } 1302 }
1199 } 1303 }
1200 1304
1201 /** The status of a cluster and its instances. */ 1305 /** The status of a cluster and its instances. */
1202 class ClusterStatus { 1306 class ClusterStatus {
1203 /** Optional details of cluster's state. */ 1307 /** [Output-only] Optional details of cluster's state. */
1204 core.String detail; 1308 core.String detail;
1205 /** 1309 /**
1206 * The cluster's state. 1310 * [Output-only] The cluster's state.
1207 * Possible string values are: 1311 * Possible string values are:
1208 * - "UNKNOWN" : A UNKNOWN. 1312 * - "UNKNOWN" : A UNKNOWN.
1209 * - "CREATING" : A CREATING. 1313 * - "CREATING" : A CREATING.
1210 * - "RUNNING" : A RUNNING. 1314 * - "RUNNING" : A RUNNING.
1211 * - "ERROR" : A ERROR. 1315 * - "ERROR" : A ERROR.
1212 * - "DELETING" : A DELETING. 1316 * - "DELETING" : A DELETING.
1213 * - "UPDATING" : A UPDATING. 1317 * - "UPDATING" : A UPDATING.
1214 */ 1318 */
1215 core.String state; 1319 core.String state;
1216 /** Time when this state was entered. */ 1320 /** [Output-only] Time when this state was entered. */
1217 core.String stateStartTime; 1321 core.String stateStartTime;
1218 1322
1219 ClusterStatus(); 1323 ClusterStatus();
1220 1324
1221 ClusterStatus.fromJson(core.Map _json) { 1325 ClusterStatus.fromJson(core.Map _json) {
1222 if (_json.containsKey("detail")) { 1326 if (_json.containsKey("detail")) {
1223 detail = _json["detail"]; 1327 detail = _json["detail"];
1224 } 1328 }
1225 if (_json.containsKey("state")) { 1329 if (_json.containsKey("state")) {
1226 state = _json["state"]; 1330 state = _json["state"];
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
1280 1384
1281 core.Map toJson() { 1385 core.Map toJson() {
1282 var _json = new core.Map(); 1386 var _json = new core.Map();
1283 return _json; 1387 return _json;
1284 } 1388 }
1285 } 1389 }
1286 1390
1287 /** The location of diagnostic output. */ 1391 /** The location of diagnostic output. */
1288 class DiagnoseClusterResults { 1392 class DiagnoseClusterResults {
1289 /** 1393 /**
1290 * [Output-only] The Google Cloud Storage URI of the diagnostic output. This 1394 * [Output-only] The Google Cloud Storage URI of the diagnostic output. The
1291 * is a plain text file with a summary of collected diagnostics. 1395 * output report is a plain text file with a summary of collected diagnostics.
1292 */ 1396 */
1293 core.String outputUri; 1397 core.String outputUri;
1294 1398
1295 DiagnoseClusterResults(); 1399 DiagnoseClusterResults();
1296 1400
1297 DiagnoseClusterResults.fromJson(core.Map _json) { 1401 DiagnoseClusterResults.fromJson(core.Map _json) {
1298 if (_json.containsKey("outputUri")) { 1402 if (_json.containsKey("outputUri")) {
1299 outputUri = _json["outputUri"]; 1403 outputUri = _json["outputUri"];
1300 } 1404 }
1301 } 1405 }
1302 1406
1303 core.Map toJson() { 1407 core.Map toJson() {
1304 var _json = new core.Map(); 1408 var _json = new core.Map();
1305 if (outputUri != null) { 1409 if (outputUri != null) {
1306 _json["outputUri"] = outputUri; 1410 _json["outputUri"] = outputUri;
1307 } 1411 }
1308 return _json; 1412 return _json;
1309 } 1413 }
1310 } 1414 }
1311 1415
1312 /** Specifies the config of disk options for a group of VM instances. */ 1416 /** Specifies the config of disk options for a group of VM instances. */
1313 class DiskConfig { 1417 class DiskConfig {
1314 /** [Optional] Size in GB of the boot disk (default is 500GB). */ 1418 /** [Optional] Size in GB of the boot disk (default is 500GB). */
1315 core.int bootDiskSizeGb; 1419 core.int bootDiskSizeGb;
1316 /** 1420 /**
1317 * [Optional] Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are 1421 * [Optional] Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are
1318 * not attached, the boot disk is used to store runtime logs and HDFS data. If 1422 * not attached, the boot disk is used to store runtime logs and
1423 * [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If
1319 * one or more SSDs are attached, this runtime bulk data is spread across 1424 * one or more SSDs are attached, this runtime bulk data is spread across
1320 * them, and the boot disk contains only basic config and installed binaries. 1425 * them, and the boot disk contains only basic config and installed binaries.
1321 */ 1426 */
1322 core.int numLocalSsds; 1427 core.int numLocalSsds;
1323 1428
1324 DiskConfig(); 1429 DiskConfig();
1325 1430
1326 DiskConfig.fromJson(core.Map _json) { 1431 DiskConfig.fromJson(core.Map _json) {
1327 if (_json.containsKey("bootDiskSizeGb")) { 1432 if (_json.containsKey("bootDiskSizeGb")) {
1328 bootDiskSizeGb = _json["bootDiskSizeGb"]; 1433 bootDiskSizeGb = _json["bootDiskSizeGb"];
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1362 var _json = new core.Map(); 1467 var _json = new core.Map();
1363 return _json; 1468 return _json;
1364 } 1469 }
1365 } 1470 }
1366 1471
1367 /** 1472 /**
1368 * Common config settings for resources of Google Compute Engine cluster 1473 * Common config settings for resources of Google Compute Engine cluster
1369 * instances, applicable to all instances in the cluster. 1474 * instances, applicable to all instances in the cluster.
1370 */ 1475 */
1371 class GceClusterConfig { 1476 class GceClusterConfig {
1372 /** The Google Compute Engine metadata entries to add to all instances. */ 1477 /**
1478 * [Optional] If true, all instances in the cluster will only have internal IP
1479 * addresses. By default, clusters are not restricted to internal IP
1480 * addresses, and will have ephemeral external IP addresses assigned to each
1481 * instance. This `internal_ip_only` restriction can only be enabled for
1482 * subnetwork enabled networks, and all off-cluster dependencies must be
1483 * configured to be accessible without external IP addresses.
1484 */
1485 core.bool internalIpOnly;
1486 /**
1487 * The Google Compute Engine metadata entries to add to all instances (see
1488 * [Project and instance
1489 * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata #project_and_instance_metadata)).
1490 */
1373 core.Map<core.String, core.String> metadata; 1491 core.Map<core.String, core.String> metadata;
1374 /** 1492 /**
1375 * The Google Compute Engine network to be used for machine communications. 1493 * [Optional] The Google Compute Engine network to be used for machine
1376 * Cannot be specified with subnetwork_uri. If neither network_uri nor 1494 * communications. Cannot be specified with subnetwork_uri. If neither
1377 * subnetwork_uri is specified, the "default" network of the project is used, 1495 * `network_uri` nor `subnetwork_uri` is specified, the "default" network of
1378 * if it exists. Cannot be a "Custom Subnet Network" (see 1496 * the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
1379 * https://cloud.google.com/compute/docs/subnetworks for more information). 1497 * [Using Subnetworks](/compute/docs/subnetworks) for more information).
1380 * Example: 1498 * Example:
1381 * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global /default`. 1499 * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global /default`.
1382 */ 1500 */
1383 core.String networkUri; 1501 core.String networkUri;
1384 /** 1502 /**
1385 * The URIs of service account scopes to be included in Google Compute Engine 1503 * [Optional] The URIs of service account scopes to be included in Google
1386 * instances. The following base set of scopes is always included: * 1504 * Compute Engine instances. The following base set of scopes is always
1387 * https://www.googleapis.com/auth/cloud.useraccounts.readonly * 1505 * included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly *
1388 * https://www.googleapis.com/auth/devstorage.read_write * 1506 * https://www.googleapis.com/auth/devstorage.read_write *
1389 * https://www.googleapis.com/auth/logging.write If no scopes are specfied, 1507 * https://www.googleapis.com/auth/logging.write If no scopes are specified,
1390 * the following defaults are also provided: * 1508 * the following defaults are also provided: *
1391 * https://www.googleapis.com/auth/bigquery * 1509 * https://www.googleapis.com/auth/bigquery *
1392 * https://www.googleapis.com/auth/bigtable.admin.table * 1510 * https://www.googleapis.com/auth/bigtable.admin.table *
1393 * https://www.googleapis.com/auth/bigtable.data * 1511 * https://www.googleapis.com/auth/bigtable.data *
1394 * https://www.googleapis.com/auth/devstorage.full_control 1512 * https://www.googleapis.com/auth/devstorage.full_control
1395 */ 1513 */
1396 core.List<core.String> serviceAccountScopes; 1514 core.List<core.String> serviceAccountScopes;
1397 /** 1515 /**
1398 * The Google Compute Engine subnetwork to be used for machine communications. 1516 * [Optional] The Google Compute Engine subnetwork to be used for machine
1399 * Cannot be specified with network_uri. Example: 1517 * communications. Cannot be specified with network_uri. Example:
1400 * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-eas t1/sub0`. 1518 * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-eas t1/sub0`.
1401 */ 1519 */
1402 core.String subnetworkUri; 1520 core.String subnetworkUri;
1403 /** The Google Compute Engine tags to add to all instances. */ 1521 /**
1522 * The Google Compute Engine tags to add to all instances (see [Tagging
1523 * instances](/compute/docs/label-or-tag-resources#tags)).
1524 */
1404 core.List<core.String> tags; 1525 core.List<core.String> tags;
1405 /** 1526 /**
1406 * [Required] The zone where the Google Compute Engine cluster will be 1527 * [Required] The zone where the Google Compute Engine cluster will be
1407 * located. Example: 1528 * located. Example:
1408 * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`. 1529 * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`.
1409 */ 1530 */
1410 core.String zoneUri; 1531 core.String zoneUri;
1411 1532
1412 GceClusterConfig(); 1533 GceClusterConfig();
1413 1534
1414 GceClusterConfig.fromJson(core.Map _json) { 1535 GceClusterConfig.fromJson(core.Map _json) {
1536 if (_json.containsKey("internalIpOnly")) {
1537 internalIpOnly = _json["internalIpOnly"];
1538 }
1415 if (_json.containsKey("metadata")) { 1539 if (_json.containsKey("metadata")) {
1416 metadata = _json["metadata"]; 1540 metadata = _json["metadata"];
1417 } 1541 }
1418 if (_json.containsKey("networkUri")) { 1542 if (_json.containsKey("networkUri")) {
1419 networkUri = _json["networkUri"]; 1543 networkUri = _json["networkUri"];
1420 } 1544 }
1421 if (_json.containsKey("serviceAccountScopes")) { 1545 if (_json.containsKey("serviceAccountScopes")) {
1422 serviceAccountScopes = _json["serviceAccountScopes"]; 1546 serviceAccountScopes = _json["serviceAccountScopes"];
1423 } 1547 }
1424 if (_json.containsKey("subnetworkUri")) { 1548 if (_json.containsKey("subnetworkUri")) {
1425 subnetworkUri = _json["subnetworkUri"]; 1549 subnetworkUri = _json["subnetworkUri"];
1426 } 1550 }
1427 if (_json.containsKey("tags")) { 1551 if (_json.containsKey("tags")) {
1428 tags = _json["tags"]; 1552 tags = _json["tags"];
1429 } 1553 }
1430 if (_json.containsKey("zoneUri")) { 1554 if (_json.containsKey("zoneUri")) {
1431 zoneUri = _json["zoneUri"]; 1555 zoneUri = _json["zoneUri"];
1432 } 1556 }
1433 } 1557 }
1434 1558
1435 core.Map toJson() { 1559 core.Map toJson() {
1436 var _json = new core.Map(); 1560 var _json = new core.Map();
1561 if (internalIpOnly != null) {
1562 _json["internalIpOnly"] = internalIpOnly;
1563 }
1437 if (metadata != null) { 1564 if (metadata != null) {
1438 _json["metadata"] = metadata; 1565 _json["metadata"] = metadata;
1439 } 1566 }
1440 if (networkUri != null) { 1567 if (networkUri != null) {
1441 _json["networkUri"] = networkUri; 1568 _json["networkUri"] = networkUri;
1442 } 1569 }
1443 if (serviceAccountScopes != null) { 1570 if (serviceAccountScopes != null) {
1444 _json["serviceAccountScopes"] = serviceAccountScopes; 1571 _json["serviceAccountScopes"] = serviceAccountScopes;
1445 } 1572 }
1446 if (subnetworkUri != null) { 1573 if (subnetworkUri != null) {
1447 _json["subnetworkUri"] = subnetworkUri; 1574 _json["subnetworkUri"] = subnetworkUri;
1448 } 1575 }
1449 if (tags != null) { 1576 if (tags != null) {
1450 _json["tags"] = tags; 1577 _json["tags"] = tags;
1451 } 1578 }
1452 if (zoneUri != null) { 1579 if (zoneUri != null) {
1453 _json["zoneUri"] = zoneUri; 1580 _json["zoneUri"] = zoneUri;
1454 } 1581 }
1455 return _json; 1582 return _json;
1456 } 1583 }
1457 } 1584 }
1458 1585
1459 /** A Cloud Dataproc job for running Hadoop MapReduce jobs on YARN. */ 1586 /**
1587 * A Cloud Dataproc job for running [Apache Hadoop
1588 * MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/had oop-mapreduce-client-core/MapReduceTutorial.html)
1589 * jobs on [Apache Hadoop
1590 * YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN .html).
1591 */
1460 class HadoopJob { 1592 class HadoopJob {
1461 /** 1593 /**
1462 * [Optional] HCFS URIs of archives to be extracted in the working directory 1594 * [Optional] HCFS URIs of archives to be extracted in the working directory
1463 * of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, 1595 * of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz,
1464 * .tgz, or .zip. 1596 * .tgz, or .zip.
1465 */ 1597 */
1466 core.List<core.String> archiveUris; 1598 core.List<core.String> archiveUris;
1467 /** 1599 /**
1468 * [Optional] The arguments to pass to the driver. Do not include arguments, 1600 * [Optional] The arguments to pass to the driver. Do not include arguments,
1469 * such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since 1601 * such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
1555 if (mainJarFileUri != null) { 1687 if (mainJarFileUri != null) {
1556 _json["mainJarFileUri"] = mainJarFileUri; 1688 _json["mainJarFileUri"] = mainJarFileUri;
1557 } 1689 }
1558 if (properties != null) { 1690 if (properties != null) {
1559 _json["properties"] = properties; 1691 _json["properties"] = properties;
1560 } 1692 }
1561 return _json; 1693 return _json;
1562 } 1694 }
1563 } 1695 }
1564 1696
1565 /** A Cloud Dataproc job for running Hive queries on YARN. */ 1697 /**
1698 * A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/)
1699 * queries on YARN.
1700 */
1566 class HiveJob { 1701 class HiveJob {
1567 /** 1702 /**
1568 * [Optional] Whether to continue executing queries if a query fails. The 1703 * [Optional] Whether to continue executing queries if a query fails. The
1569 * default value is `false`. Setting to `true` can be useful when executing 1704 * default value is `false`. Setting to `true` can be useful when executing
1570 * independent parallel queries. 1705 * independent parallel queries.
1571 */ 1706 */
1572 core.bool continueOnFailure; 1707 core.bool continueOnFailure;
1573 /** 1708 /**
1574 * [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Hive 1709 * [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Hive
1575 * server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. 1710 * server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1633 _json["queryList"] = (queryList).toJson(); 1768 _json["queryList"] = (queryList).toJson();
1634 } 1769 }
1635 if (scriptVariables != null) { 1770 if (scriptVariables != null) {
1636 _json["scriptVariables"] = scriptVariables; 1771 _json["scriptVariables"] = scriptVariables;
1637 } 1772 }
1638 return _json; 1773 return _json;
1639 } 1774 }
1640 } 1775 }
1641 1776
1642 /** 1777 /**
1643 * The config settings for Google Compute Engine resources in an instance group, 1778 * [Optional] The config settings for Google Compute Engine resources in an
1644 * such as a master or worker group. 1779 * instance group, such as a master or worker group.
1645 */ 1780 */
1646 class InstanceGroupConfig { 1781 class InstanceGroupConfig {
1647 /** Disk option config settings. */ 1782 /** [Optional] Disk option config settings. */
1648 DiskConfig diskConfig; 1783 DiskConfig diskConfig;
1649 /** 1784 /**
1650 * [Output-only] The Google Compute Engine image resource used for cluster 1785 * [Output-only] The Google Compute Engine image resource used for cluster
1651 * instances. Inferred from `SoftwareConfig.image_version`. 1786 * instances. Inferred from `SoftwareConfig.image_version`.
1652 */ 1787 */
1653 core.String imageUri; 1788 core.String imageUri;
1654 /** 1789 /**
1655 * The list of instance names. Cloud Dataproc derives the names from 1790 * [Optional] The list of instance names. Cloud Dataproc derives the names
1656 * `cluster_name`, `num_instances`, and the instance group if not set by user 1791 * from `cluster_name`, `num_instances`, and the instance group if not set by
1657 * (recommended practice is to let Cloud Dataproc derive the name). 1792 * user (recommended practice is to let Cloud Dataproc derive the name).
1658 */ 1793 */
1659 core.List<core.String> instanceNames; 1794 core.List<core.String> instanceNames;
1660 /** Specifies that this instance group contains Preemptible Instances. */ 1795 /**
1796 * [Optional] Specifies that this instance group contains preemptible
1797 * instances.
1798 */
1661 core.bool isPreemptible; 1799 core.bool isPreemptible;
1662 /** 1800 /**
1663 * The Google Compute Engine machine type used for cluster instances. Example: 1801 * [Required] The Google Compute Engine machine type used for cluster
1802 * instances. Example:
1664 * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1 -a/machineTypes/n1-standard-2`. 1803 * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1 -a/machineTypes/n1-standard-2`.
1665 */ 1804 */
1666 core.String machineTypeUri; 1805 core.String machineTypeUri;
1667 /** 1806 /**
1668 * [Output-only] The config for Google Compute Engine Instance Group Manager 1807 * [Output-only] The config for Google Compute Engine Instance Group Manager
1669 * that manages this group. This is only used for preemptible instance groups. 1808 * that manages this group. This is only used for preemptible instance groups.
1670 */ 1809 */
1671 ManagedGroupConfig managedGroupConfig; 1810 ManagedGroupConfig managedGroupConfig;
1672 /** 1811 /**
1673 * The number of VM instances in the instance group. For master instance 1812 * [Required] The number of VM instances in the instance group. For master
1674 * groups, must be set to 1. 1813 * instance groups, must be set to 1.
1675 */ 1814 */
1676 core.int numInstances; 1815 core.int numInstances;
1677 1816
1678 InstanceGroupConfig(); 1817 InstanceGroupConfig();
1679 1818
1680 InstanceGroupConfig.fromJson(core.Map _json) { 1819 InstanceGroupConfig.fromJson(core.Map _json) {
1681 if (_json.containsKey("diskConfig")) { 1820 if (_json.containsKey("diskConfig")) {
1682 diskConfig = new DiskConfig.fromJson(_json["diskConfig"]); 1821 diskConfig = new DiskConfig.fromJson(_json["diskConfig"]);
1683 } 1822 }
1684 if (_json.containsKey("imageUri")) { 1823 if (_json.containsKey("imageUri")) {
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
1738 core.String driverControlFilesUri; 1877 core.String driverControlFilesUri;
1739 /** 1878 /**
1740 * [Output-only] A URI pointing to the location of the stdout of the job's 1879 * [Output-only] A URI pointing to the location of the stdout of the job's
1741 * driver program. 1880 * driver program.
1742 */ 1881 */
1743 core.String driverOutputResourceUri; 1882 core.String driverOutputResourceUri;
1744 /** Job is a Hadoop job. */ 1883 /** Job is a Hadoop job. */
1745 HadoopJob hadoopJob; 1884 HadoopJob hadoopJob;
1746 /** Job is a Hive job. */ 1885 /** Job is a Hive job. */
1747 HiveJob hiveJob; 1886 HiveJob hiveJob;
1887 /**
1888 * [Optional] The labels to associate with this job. Label **keys** must
1889 * contain 1 to 63 characters, and must conform to [RFC
1890 * 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty,
1891 * but, if present, must contain 1 to 63 characters, and must conform to [RFC
1892 * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
1893 * associated with a job.
1894 */
1895 core.Map<core.String, core.String> labels;
1748 /** Job is a Pig job. */ 1896 /** Job is a Pig job. */
1749 PigJob pigJob; 1897 PigJob pigJob;
1750 /** 1898 /**
1751 * [Required] Job information, including how, when, and where to run the job. 1899 * [Required] Job information, including how, when, and where to run the job.
1752 */ 1900 */
1753 JobPlacement placement; 1901 JobPlacement placement;
1754 /** Job is a Pyspark job. */ 1902 /** Job is a Pyspark job. */
1755 PySparkJob pysparkJob; 1903 PySparkJob pysparkJob;
1756 /** 1904 /**
1757 * [Optional] The fully qualified reference to the job, which can be used to 1905 * [Optional] The fully qualified reference to the job, which can be used to
1758 * obtain the equivalent REST path of the job resource. If this property is 1906 * obtain the equivalent REST path of the job resource. If this property is
1759 * not specified when a job is created, the server generates a job_id. 1907 * not specified when a job is created, the server generates a job_id.
1760 */ 1908 */
1761 JobReference reference; 1909 JobReference reference;
1762 /** Job is a Spark job. */ 1910 /** Job is a Spark job. */
1763 SparkJob sparkJob; 1911 SparkJob sparkJob;
1764 /** Job is a SparkSql job. */ 1912 /** Job is a SparkSql job. */
1765 SparkSqlJob sparkSqlJob; 1913 SparkSqlJob sparkSqlJob;
1766 /** 1914 /**
1767 * [Output-only] The job status. Additional application-specific status 1915 * [Output-only] The job status. Additional application-specific status
1768 * information may be contained in the type_job and yarn_applications fields. 1916 * information may be contained in the type_job and yarn_applications fields.
1769 */ 1917 */
1770 JobStatus status; 1918 JobStatus status;
1771 /** [Output-only] The previous job status. */ 1919 /** [Output-only] The previous job status. */
1772 core.List<JobStatus> statusHistory; 1920 core.List<JobStatus> statusHistory;
1921 /**
1922 * [Output-only] The collection of YARN applications spun up by this job.
1923 * **Beta** Feature: This report is available for testing purposes only. It
1924 * may be changed before final release.
1925 */
1926 core.List<YarnApplication> yarnApplications;
1773 1927
1774 Job(); 1928 Job();
1775 1929
1776 Job.fromJson(core.Map _json) { 1930 Job.fromJson(core.Map _json) {
1777 if (_json.containsKey("driverControlFilesUri")) { 1931 if (_json.containsKey("driverControlFilesUri")) {
1778 driverControlFilesUri = _json["driverControlFilesUri"]; 1932 driverControlFilesUri = _json["driverControlFilesUri"];
1779 } 1933 }
1780 if (_json.containsKey("driverOutputResourceUri")) { 1934 if (_json.containsKey("driverOutputResourceUri")) {
1781 driverOutputResourceUri = _json["driverOutputResourceUri"]; 1935 driverOutputResourceUri = _json["driverOutputResourceUri"];
1782 } 1936 }
1783 if (_json.containsKey("hadoopJob")) { 1937 if (_json.containsKey("hadoopJob")) {
1784 hadoopJob = new HadoopJob.fromJson(_json["hadoopJob"]); 1938 hadoopJob = new HadoopJob.fromJson(_json["hadoopJob"]);
1785 } 1939 }
1786 if (_json.containsKey("hiveJob")) { 1940 if (_json.containsKey("hiveJob")) {
1787 hiveJob = new HiveJob.fromJson(_json["hiveJob"]); 1941 hiveJob = new HiveJob.fromJson(_json["hiveJob"]);
1788 } 1942 }
1943 if (_json.containsKey("labels")) {
1944 labels = _json["labels"];
1945 }
1789 if (_json.containsKey("pigJob")) { 1946 if (_json.containsKey("pigJob")) {
1790 pigJob = new PigJob.fromJson(_json["pigJob"]); 1947 pigJob = new PigJob.fromJson(_json["pigJob"]);
1791 } 1948 }
1792 if (_json.containsKey("placement")) { 1949 if (_json.containsKey("placement")) {
1793 placement = new JobPlacement.fromJson(_json["placement"]); 1950 placement = new JobPlacement.fromJson(_json["placement"]);
1794 } 1951 }
1795 if (_json.containsKey("pysparkJob")) { 1952 if (_json.containsKey("pysparkJob")) {
1796 pysparkJob = new PySparkJob.fromJson(_json["pysparkJob"]); 1953 pysparkJob = new PySparkJob.fromJson(_json["pysparkJob"]);
1797 } 1954 }
1798 if (_json.containsKey("reference")) { 1955 if (_json.containsKey("reference")) {
1799 reference = new JobReference.fromJson(_json["reference"]); 1956 reference = new JobReference.fromJson(_json["reference"]);
1800 } 1957 }
1801 if (_json.containsKey("sparkJob")) { 1958 if (_json.containsKey("sparkJob")) {
1802 sparkJob = new SparkJob.fromJson(_json["sparkJob"]); 1959 sparkJob = new SparkJob.fromJson(_json["sparkJob"]);
1803 } 1960 }
1804 if (_json.containsKey("sparkSqlJob")) { 1961 if (_json.containsKey("sparkSqlJob")) {
1805 sparkSqlJob = new SparkSqlJob.fromJson(_json["sparkSqlJob"]); 1962 sparkSqlJob = new SparkSqlJob.fromJson(_json["sparkSqlJob"]);
1806 } 1963 }
1807 if (_json.containsKey("status")) { 1964 if (_json.containsKey("status")) {
1808 status = new JobStatus.fromJson(_json["status"]); 1965 status = new JobStatus.fromJson(_json["status"]);
1809 } 1966 }
1810 if (_json.containsKey("statusHistory")) { 1967 if (_json.containsKey("statusHistory")) {
1811 statusHistory = _json["statusHistory"].map((value) => new JobStatus.fromJs on(value)).toList(); 1968 statusHistory = _json["statusHistory"].map((value) => new JobStatus.fromJs on(value)).toList();
1812 } 1969 }
1970 if (_json.containsKey("yarnApplications")) {
1971 yarnApplications = _json["yarnApplications"].map((value) => new YarnApplic ation.fromJson(value)).toList();
1972 }
1813 } 1973 }
1814 1974
1815 core.Map toJson() { 1975 core.Map toJson() {
1816 var _json = new core.Map(); 1976 var _json = new core.Map();
1817 if (driverControlFilesUri != null) { 1977 if (driverControlFilesUri != null) {
1818 _json["driverControlFilesUri"] = driverControlFilesUri; 1978 _json["driverControlFilesUri"] = driverControlFilesUri;
1819 } 1979 }
1820 if (driverOutputResourceUri != null) { 1980 if (driverOutputResourceUri != null) {
1821 _json["driverOutputResourceUri"] = driverOutputResourceUri; 1981 _json["driverOutputResourceUri"] = driverOutputResourceUri;
1822 } 1982 }
1823 if (hadoopJob != null) { 1983 if (hadoopJob != null) {
1824 _json["hadoopJob"] = (hadoopJob).toJson(); 1984 _json["hadoopJob"] = (hadoopJob).toJson();
1825 } 1985 }
1826 if (hiveJob != null) { 1986 if (hiveJob != null) {
1827 _json["hiveJob"] = (hiveJob).toJson(); 1987 _json["hiveJob"] = (hiveJob).toJson();
1828 } 1988 }
1989 if (labels != null) {
1990 _json["labels"] = labels;
1991 }
1829 if (pigJob != null) { 1992 if (pigJob != null) {
1830 _json["pigJob"] = (pigJob).toJson(); 1993 _json["pigJob"] = (pigJob).toJson();
1831 } 1994 }
1832 if (placement != null) { 1995 if (placement != null) {
1833 _json["placement"] = (placement).toJson(); 1996 _json["placement"] = (placement).toJson();
1834 } 1997 }
1835 if (pysparkJob != null) { 1998 if (pysparkJob != null) {
1836 _json["pysparkJob"] = (pysparkJob).toJson(); 1999 _json["pysparkJob"] = (pysparkJob).toJson();
1837 } 2000 }
1838 if (reference != null) { 2001 if (reference != null) {
1839 _json["reference"] = (reference).toJson(); 2002 _json["reference"] = (reference).toJson();
1840 } 2003 }
1841 if (sparkJob != null) { 2004 if (sparkJob != null) {
1842 _json["sparkJob"] = (sparkJob).toJson(); 2005 _json["sparkJob"] = (sparkJob).toJson();
1843 } 2006 }
1844 if (sparkSqlJob != null) { 2007 if (sparkSqlJob != null) {
1845 _json["sparkSqlJob"] = (sparkSqlJob).toJson(); 2008 _json["sparkSqlJob"] = (sparkSqlJob).toJson();
1846 } 2009 }
1847 if (status != null) { 2010 if (status != null) {
1848 _json["status"] = (status).toJson(); 2011 _json["status"] = (status).toJson();
1849 } 2012 }
1850 if (statusHistory != null) { 2013 if (statusHistory != null) {
1851 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List(); 2014 _json["statusHistory"] = statusHistory.map((value) => (value).toJson()).to List();
1852 } 2015 }
2016 if (yarnApplications != null) {
2017 _json["yarnApplications"] = yarnApplications.map((value) => (value).toJson ()).toList();
2018 }
1853 return _json; 2019 return _json;
1854 } 2020 }
1855 } 2021 }
1856 2022
1857 /** Cloud Dataproc job config. */ 2023 /** Cloud Dataproc job config. */
1858 class JobPlacement { 2024 class JobPlacement {
1859 /** [Required] The name of the cluster where the job will be submitted. */ 2025 /** [Required] The name of the cluster where the job will be submitted. */
1860 core.String clusterName; 2026 core.String clusterName;
1861 /** 2027 /**
1862 * [Output-only] A cluster UUID generated by the Cloud Dataproc service when 2028 * [Output-only] A cluster UUID generated by the Cloud Dataproc service when
(...skipping 20 matching lines...) Expand all
1883 if (clusterUuid != null) { 2049 if (clusterUuid != null) {
1884 _json["clusterUuid"] = clusterUuid; 2050 _json["clusterUuid"] = clusterUuid;
1885 } 2051 }
1886 return _json; 2052 return _json;
1887 } 2053 }
1888 } 2054 }
1889 2055
1890 /** Encapsulates the full scoping used to reference a job. */ 2056 /** Encapsulates the full scoping used to reference a job. */
1891 class JobReference { 2057 class JobReference {
1892 /** 2058 /**
1893 * [Required] The job ID, which must be unique within the project. The job ID 2059 * [Optional] The job ID, which must be unique within the project. The job ID
1894 * is generated by the server upon job submission or provided by the user as a 2060 * is generated by the server upon job submission or provided by the user as a
1895 * means to perform retries without creating duplicate jobs. The ID must 2061 * means to perform retries without creating duplicate jobs. The ID must
1896 * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens 2062 * contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or hyphens
1897 * (-). The maximum length is 512 characters. 2063 * (-). The maximum length is 512 characters.
1898 */ 2064 */
1899 core.String jobId; 2065 core.String jobId;
1900 /** 2066 /**
1901 * [Required] The ID of the Google Cloud Platform project that the job belongs 2067 * [Required] The ID of the Google Cloud Platform project that the job belongs
1902 * to. 2068 * to.
1903 */ 2069 */
(...skipping 18 matching lines...) Expand all
1922 if (projectId != null) { 2088 if (projectId != null) {
1923 _json["projectId"] = projectId; 2089 _json["projectId"] = projectId;
1924 } 2090 }
1925 return _json; 2091 return _json;
1926 } 2092 }
1927 } 2093 }
1928 2094
1929 /** Cloud Dataproc job status. */ 2095 /** Cloud Dataproc job status. */
1930 class JobStatus { 2096 class JobStatus {
1931 /** 2097 /**
1932 * [Optional] Job state details, such as an error description if the state is 2098 * [Output-only] Optional job state details, such as an error description if
1933 * ERROR. 2099 * the state is ERROR.
1934 */ 2100 */
1935 core.String details; 2101 core.String details;
1936 /** 2102 /**
1937 * [Required] A state message specifying the overall job state. 2103 * [Output-only] A state message specifying the overall job state.
1938 * Possible string values are: 2104 * Possible string values are:
1939 * - "STATE_UNSPECIFIED" : A STATE_UNSPECIFIED. 2105 * - "STATE_UNSPECIFIED" : A STATE_UNSPECIFIED.
1940 * - "PENDING" : A PENDING. 2106 * - "PENDING" : A PENDING.
1941 * - "SETUP_DONE" : A SETUP_DONE. 2107 * - "SETUP_DONE" : A SETUP_DONE.
1942 * - "RUNNING" : A RUNNING. 2108 * - "RUNNING" : A RUNNING.
1943 * - "CANCEL_PENDING" : A CANCEL_PENDING. 2109 * - "CANCEL_PENDING" : A CANCEL_PENDING.
1944 * - "CANCEL_STARTED" : A CANCEL_STARTED. 2110 * - "CANCEL_STARTED" : A CANCEL_STARTED.
1945 * - "CANCELLED" : A CANCELLED. 2111 * - "CANCELLED" : A CANCELLED.
1946 * - "DONE" : A DONE. 2112 * - "DONE" : A DONE.
1947 * - "ERROR" : A ERROR. 2113 * - "ERROR" : A ERROR.
(...skipping 29 matching lines...) Expand all
1977 } 2143 }
1978 return _json; 2144 return _json;
1979 } 2145 }
1980 } 2146 }
1981 2147
1982 /** The list of all clusters in a project. */ 2148 /** The list of all clusters in a project. */
1983 class ListClustersResponse { 2149 class ListClustersResponse {
1984 /** [Output-only] The clusters in the project. */ 2150 /** [Output-only] The clusters in the project. */
1985 core.List<Cluster> clusters; 2151 core.List<Cluster> clusters;
1986 /** 2152 /**
1987 * [Optional] This token is included in the response if there are more results 2153 * [Output-only] This token is included in the response if there are more
1988 * to fetch. To fetch additional results, provide this value as the 2154 * results to fetch. To fetch additional results, provide this value as the
1989 * `page_token` in a subsequent ListClustersRequest. 2155 * `page_token` in a subsequent ListClustersRequest.
1990 */ 2156 */
1991 core.String nextPageToken; 2157 core.String nextPageToken;
1992 2158
1993 ListClustersResponse(); 2159 ListClustersResponse();
1994 2160
1995 ListClustersResponse.fromJson(core.Map _json) { 2161 ListClustersResponse.fromJson(core.Map _json) {
1996 if (_json.containsKey("clusters")) { 2162 if (_json.containsKey("clusters")) {
1997 clusters = _json["clusters"].map((value) => new Cluster.fromJson(value)).t oList(); 2163 clusters = _json["clusters"].map((value) => new Cluster.fromJson(value)).t oList();
1998 } 2164 }
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after
2178 * This resource represents a long-running operation that is the result of a 2344 * This resource represents a long-running operation that is the result of a
2179 * network API call. 2345 * network API call.
2180 */ 2346 */
2181 class Operation { 2347 class Operation {
2182 /** 2348 /**
2183 * If the value is `false`, it means the operation is still in progress. If 2349 * If the value is `false`, it means the operation is still in progress. If
2184 * true, the operation is completed, and either `error` or `response` is 2350 * true, the operation is completed, and either `error` or `response` is
2185 * available. 2351 * available.
2186 */ 2352 */
2187 core.bool done; 2353 core.bool done;
2188 /** The error result of the operation in case of failure. */ 2354 /** The error result of the operation in case of failure or cancellation. */
2189 Status error; 2355 Status error;
2190 /** 2356 /**
2191 * Service-specific metadata associated with the operation. It typically 2357 * Service-specific metadata associated with the operation. It typically
2192 * contains progress information and common metadata such as create time. Some 2358 * contains progress information and common metadata such as create time. Some
2193 * services might not provide such metadata. Any method that returns a 2359 * services might not provide such metadata. Any method that returns a
2194 * long-running operation should document the metadata type, if any. 2360 * long-running operation should document the metadata type, if any.
2195 * 2361 *
2196 * The values for Object must be JSON objects. It can consist of `num`, 2362 * The values for Object must be JSON objects. It can consist of `num`,
2197 * `String`, `bool` and `null` as well as `Map` and `List` values. 2363 * `String`, `bool` and `null` as well as `Map` and `List` values.
2198 */ 2364 */
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after
2421 if (state != null) { 2587 if (state != null) {
2422 _json["state"] = state; 2588 _json["state"] = state;
2423 } 2589 }
2424 if (stateStartTime != null) { 2590 if (stateStartTime != null) {
2425 _json["stateStartTime"] = stateStartTime; 2591 _json["stateStartTime"] = stateStartTime;
2426 } 2592 }
2427 return _json; 2593 return _json;
2428 } 2594 }
2429 } 2595 }
2430 2596
2431 /** A Cloud Dataproc job for running Pig queries on YARN. */ 2597 /**
2598 * A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/)
2599 * queries on YARN.
2600 */
2432 class PigJob { 2601 class PigJob {
2433 /** 2602 /**
2434 * [Optional] Whether to continue executing queries if a query fails. The 2603 * [Optional] Whether to continue executing queries if a query fails. The
2435 * default value is `false`. Setting to `true` can be useful when executing 2604 * default value is `false`. Setting to `true` can be useful when executing
2436 * independent parallel queries. 2605 * independent parallel queries.
2437 */ 2606 */
2438 core.bool continueOnFailure; 2607 core.bool continueOnFailure;
2439 /** 2608 /**
2440 * [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Pig Client 2609 * [Optional] HCFS URIs of jar files to add to the CLASSPATH of the Pig Client
2441 * and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. 2610 * and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
2506 if (queryList != null) { 2675 if (queryList != null) {
2507 _json["queryList"] = (queryList).toJson(); 2676 _json["queryList"] = (queryList).toJson();
2508 } 2677 }
2509 if (scriptVariables != null) { 2678 if (scriptVariables != null) {
2510 _json["scriptVariables"] = scriptVariables; 2679 _json["scriptVariables"] = scriptVariables;
2511 } 2680 }
2512 return _json; 2681 return _json;
2513 } 2682 }
2514 } 2683 }
2515 2684
2516 /** A Cloud Dataproc job for running PySpark applications on YARN. */ 2685 /**
2686 * A Cloud Dataproc job for running [Apache
2687 * PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
2688 * applications on YARN.
2689 */
2517 class PySparkJob { 2690 class PySparkJob {
2518 /** 2691 /**
2519 * [Optional] HCFS URIs of archives to be extracted in the working directory 2692 * [Optional] HCFS URIs of archives to be extracted in the working directory
2520 * of .jar, .tar, .tar.gz, .tgz, and .zip. 2693 * of .jar, .tar, .tar.gz, .tgz, and .zip.
2521 */ 2694 */
2522 core.List<core.String> archiveUris; 2695 core.List<core.String> archiveUris;
2523 /** 2696 /**
2524 * [Optional] The arguments to pass to the driver. Do not include arguments, 2697 * [Optional] The arguments to pass to the driver. Do not include arguments,
2525 * such as `--conf`, that can be set as job properties, since a collision may 2698 * such as `--conf`, that can be set as job properties, since a collision may
2526 * occur that causes an incorrect job submission. 2699 * occur that causes an incorrect job submission.
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
2677 if (imageVersion != null) { 2850 if (imageVersion != null) {
2678 _json["imageVersion"] = imageVersion; 2851 _json["imageVersion"] = imageVersion;
2679 } 2852 }
2680 if (properties != null) { 2853 if (properties != null) {
2681 _json["properties"] = properties; 2854 _json["properties"] = properties;
2682 } 2855 }
2683 return _json; 2856 return _json;
2684 } 2857 }
2685 } 2858 }
2686 2859
2687 /** A Cloud Dataproc job for running Spark applications on YARN. */ 2860 /**
2861 * A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/)
2862 * applications on YARN.
2863 */
2688 class SparkJob { 2864 class SparkJob {
2689 /** 2865 /**
2690 * [Optional] HCFS URIs of archives to be extracted in the working directory 2866 * [Optional] HCFS URIs of archives to be extracted in the working directory
2691 * of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz, 2867 * of Spark drivers and tasks. Supported file types: .jar, .tar, .tar.gz,
2692 * .tgz, and .zip. 2868 * .tgz, and .zip.
2693 */ 2869 */
2694 core.List<core.String> archiveUris; 2870 core.List<core.String> archiveUris;
2695 /** 2871 /**
2696 * [Optional] The arguments to pass to the driver. Do not include arguments, 2872 * [Optional] The arguments to pass to the driver. Do not include arguments,
2697 * such as `--conf`, that can be set as job properties, since a collision may 2873 * such as `--conf`, that can be set as job properties, since a collision may
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
2777 if (mainJarFileUri != null) { 2953 if (mainJarFileUri != null) {
2778 _json["mainJarFileUri"] = mainJarFileUri; 2954 _json["mainJarFileUri"] = mainJarFileUri;
2779 } 2955 }
2780 if (properties != null) { 2956 if (properties != null) {
2781 _json["properties"] = properties; 2957 _json["properties"] = properties;
2782 } 2958 }
2783 return _json; 2959 return _json;
2784 } 2960 }
2785 } 2961 }
2786 2962
2787 /** A Cloud Dataproc job for running Spark SQL queries. */ 2963 /**
2964 * A Cloud Dataproc job for running [Apache Spark
2965 * SQL](http://spark.apache.org/sql/) queries.
2966 */
2788 class SparkSqlJob { 2967 class SparkSqlJob {
2789 /** [Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH. */ 2968 /** [Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH. */
2790 core.List<core.String> jarFileUris; 2969 core.List<core.String> jarFileUris;
2791 /** [Optional] The runtime log config for job execution. */ 2970 /** [Optional] The runtime log config for job execution. */
2792 LoggingConfig loggingConfig; 2971 LoggingConfig loggingConfig;
2793 /** 2972 /**
2794 * [Optional] A mapping of property names to values, used to configure Spark 2973 * [Optional] A mapping of property names to values, used to configure Spark
2795 * SQL's SparkConf. Properties that conflict with values set by the Cloud 2974 * SQL's SparkConf. Properties that conflict with values set by the Cloud
2796 * Dataproc API may be overwritten. 2975 * Dataproc API may be overwritten.
2797 */ 2976 */
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
2949 } 3128 }
2950 3129
2951 core.Map toJson() { 3130 core.Map toJson() {
2952 var _json = new core.Map(); 3131 var _json = new core.Map();
2953 if (job != null) { 3132 if (job != null) {
2954 _json["job"] = (job).toJson(); 3133 _json["job"] = (job).toJson();
2955 } 3134 }
2956 return _json; 3135 return _json;
2957 } 3136 }
2958 } 3137 }
3138
3139 /**
3140 * A YARN application created by a job. Application information is a subset of
3141 * org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. **Beta
3142 * Feature**: This report is available for testing purposes only. It may be
3143 * changed before final release.
3144 */
3145 class YarnApplication {
3146 /** [Required] The application name. */
3147 core.String name;
3148 /** [Required] The numerical progress of the application, from 1 to 100. */
3149 core.double progress;
3150 /**
3151 * [Required] The application state.
3152 * Possible string values are:
3153 * - "STATE_UNSPECIFIED" : A STATE_UNSPECIFIED.
3154 * - "NEW" : A NEW.
3155 * - "NEW_SAVING" : A NEW_SAVING.
3156 * - "SUBMITTED" : A SUBMITTED.
3157 * - "ACCEPTED" : A ACCEPTED.
3158 * - "RUNNING" : A RUNNING.
3159 * - "FINISHED" : A FINISHED.
3160 * - "FAILED" : A FAILED.
3161 * - "KILLED" : A KILLED.
3162 */
3163 core.String state;
3164 /**
3165 * [Optional] The HTTP URL of the ApplicationMaster, HistoryServer, or
3166 * TimelineServer that provides application-specific information. The URL uses
3167 * the internal hostname, and requires a proxy server for resolution and,
3168 * possibly, access.
3169 */
3170 core.String trackingUrl;
3171
3172 YarnApplication();
3173
3174 YarnApplication.fromJson(core.Map _json) {
3175 if (_json.containsKey("name")) {
3176 name = _json["name"];
3177 }
3178 if (_json.containsKey("progress")) {
3179 progress = _json["progress"];
3180 }
3181 if (_json.containsKey("state")) {
3182 state = _json["state"];
3183 }
3184 if (_json.containsKey("trackingUrl")) {
3185 trackingUrl = _json["trackingUrl"];
3186 }
3187 }
3188
3189 core.Map toJson() {
3190 var _json = new core.Map();
3191 if (name != null) {
3192 _json["name"] = name;
3193 }
3194 if (progress != null) {
3195 _json["progress"] = progress;
3196 }
3197 if (state != null) {
3198 _json["state"] = state;
3199 }
3200 if (trackingUrl != null) {
3201 _json["trackingUrl"] = trackingUrl;
3202 }
3203 return _json;
3204 }
3205 }
OLDNEW
« no previous file with comments | « generated/googleapis/lib/content/v2sandbox.dart ('k') | generated/googleapis/lib/deploymentmanager/v2.dart » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698