Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(159)

Unified Diff: generated/googleapis_beta/lib/genomics/v1beta2.dart

Issue 698403003: Api roll 5: 2014-11-05 (Closed) Base URL: git@github.com:dart-lang/googleapis.git@master
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: generated/googleapis_beta/lib/genomics/v1beta2.dart
diff --git a/generated/googleapis_beta/lib/genomics/v1beta.dart b/generated/googleapis_beta/lib/genomics/v1beta2.dart
similarity index 61%
copy from generated/googleapis_beta/lib/genomics/v1beta.dart
copy to generated/googleapis_beta/lib/genomics/v1beta2.dart
index ef44652c806d243bdd7430cf2d0a99b5c056124b..a4ab8bbfed2451fb299089c15b4b97bc8fa9420a 100644
--- a/generated/googleapis_beta/lib/genomics/v1beta.dart
+++ b/generated/googleapis_beta/lib/genomics/v1beta2.dart
@@ -1,4 +1,4 @@
-library googleapis_beta.genomics.v1beta;
+library googleapis_beta.genomics.v1beta2;
import "dart:core" as core;
import "dart:collection" as collection;
@@ -30,85 +30,19 @@ class GenomicsApi {
final common_internal.ApiRequester _requester;
- BeaconsResourceApi get beacons => new BeaconsResourceApi(_requester);
CallsetsResourceApi get callsets => new CallsetsResourceApi(_requester);
DatasetsResourceApi get datasets => new DatasetsResourceApi(_requester);
ExperimentalResourceApi get experimental => new ExperimentalResourceApi(_requester);
JobsResourceApi get jobs => new JobsResourceApi(_requester);
+ ReadgroupsetsResourceApi get readgroupsets => new ReadgroupsetsResourceApi(_requester);
ReadsResourceApi get reads => new ReadsResourceApi(_requester);
- ReadsetsResourceApi get readsets => new ReadsetsResourceApi(_requester);
+ ReferencesResourceApi get references => new ReferencesResourceApi(_requester);
+ ReferencesetsResourceApi get referencesets => new ReferencesetsResourceApi(_requester);
VariantsResourceApi get variants => new VariantsResourceApi(_requester);
VariantsetsResourceApi get variantsets => new VariantsetsResourceApi(_requester);
GenomicsApi(http.Client client) :
- _requester = new common_internal.ApiRequester(client, "https://www.googleapis.com/", "genomics/v1beta/");
-}
-
-
-/** Not documented yet. */
-class BeaconsResourceApi {
- final common_internal.ApiRequester _requester;
-
- BeaconsResourceApi(common_internal.ApiRequester client) :
- _requester = client;
-
- /**
- * This is an experimental API that provides a Global Alliance for Genomics
- * and Health Beacon. It may change at any time.
- *
- * Request parameters:
- *
- * [variantSetId] - The ID of the variant set to query over. It must be
- * public. Private variant sets will return an unauthorized exception.
- *
- * [allele] - Required. The allele to look for ('A', 'C', 'G' or 'T').
- *
- * [position] - Required. The 0-based position to query.
- *
- * [referenceName] - Required. The reference to query over.
- *
- * Completes with a [Beacon].
- *
- * Completes with a [common.ApiRequestError] if the API endpoint returned an
- * error.
- *
- * If the used [http.Client] completes with an error when making a REST call,
- * this method will complete with the same error.
- */
- async.Future<Beacon> get(core.String variantSetId, {core.String allele, core.String position, core.String referenceName}) {
- var _url = null;
- var _queryParams = new core.Map();
- var _uploadMedia = null;
- var _uploadOptions = null;
- var _downloadOptions = common.DownloadOptions.Metadata;
- var _body = null;
-
- if (variantSetId == null) {
- throw new core.ArgumentError("Parameter variantSetId is required.");
- }
- if (allele != null) {
- _queryParams["allele"] = [allele];
- }
- if (position != null) {
- _queryParams["position"] = [position];
- }
- if (referenceName != null) {
- _queryParams["referenceName"] = [referenceName];
- }
-
-
- _url = 'beacons/' + common_internal.Escaper.ecapeVariable('$variantSetId');
-
- var _response = _requester.request(_url,
- "GET",
- body: _body,
- queryParams: _queryParams,
- uploadOptions: _uploadOptions,
- uploadMedia: _uploadMedia,
- downloadOptions: _downloadOptions);
- return _response.then((data) => new Beacon.fromJson(data));
- }
-
+ _requester = new common_internal.ApiRequester(client, "https://www.googleapis.com/", "genomics/v1beta2/");
}
@@ -164,7 +98,7 @@ class CallsetsResourceApi {
*
* Request parameters:
*
- * [callSetId] - The ID of the callset to be deleted.
+ * [callSetId] - The ID of the call set to be deleted.
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -203,7 +137,7 @@ class CallsetsResourceApi {
*
* Request parameters:
*
- * [callSetId] - The ID of the callset.
+ * [callSetId] - The ID of the call set.
*
* Completes with a [CallSet].
*
@@ -245,7 +179,7 @@ class CallsetsResourceApi {
*
* Request parameters:
*
- * [callSetId] - The ID of the callset to be updated.
+ * [callSetId] - The ID of the call set to be updated.
*
* Completes with a [CallSet].
*
@@ -286,6 +220,8 @@ class CallsetsResourceApi {
/**
* Gets a list of call sets matching the criteria.
*
+ * Implements GlobalAllianceApi.searchCallSets.
+ *
* [request] - The metadata request object.
*
* Request parameters:
@@ -330,7 +266,7 @@ class CallsetsResourceApi {
*
* Request parameters:
*
- * [callSetId] - The ID of the callset to be updated.
+ * [callSetId] - The ID of the call set to be updated.
*
* Completes with a [CallSet].
*
@@ -502,15 +438,15 @@ class DatasetsResourceApi {
*
* Request parameters:
*
- * [maxResults] - The maximum number of results returned by this request.
+ * [pageSize] - The maximum number of results returned by this request.
*
* [pageToken] - The continuation token, which is used to page through large
* result sets. To get the next page of results, set this parameter to the
* value of nextPageToken from the previous response.
*
- * [projectId] - Only return datasets which belong to this Google Developers
- * Console project. Only accepts project numbers. Returns all public projects
- * if no project number is specified.
+ * [projectNumber] - Only return datasets which belong to this Google
+ * Developers Console project. Only accepts project numbers. Returns all
+ * public projects if no project number is specified.
*
* Completes with a [ListDatasetsResponse].
*
@@ -520,7 +456,7 @@ class DatasetsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<ListDatasetsResponse> list({core.String maxResults, core.String pageToken, core.String projectId}) {
+ async.Future<ListDatasetsResponse> list({core.int pageSize, core.String pageToken, core.String projectNumber}) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -528,14 +464,14 @@ class DatasetsResourceApi {
var _downloadOptions = common.DownloadOptions.Metadata;
var _body = null;
- if (maxResults != null) {
- _queryParams["maxResults"] = [maxResults];
+ if (pageSize != null) {
+ _queryParams["pageSize"] = ["${pageSize}"];
}
if (pageToken != null) {
_queryParams["pageToken"] = [pageToken];
}
- if (projectId != null) {
- _queryParams["projectId"] = [projectId];
+ if (projectNumber != null) {
+ _queryParams["projectNumber"] = [projectNumber];
}
@@ -878,28 +814,24 @@ class JobsResourceApi {
/** Not documented yet. */
-class ReadsResourceApi {
+class ReadgroupsetsResourceApi {
final common_internal.ApiRequester _requester;
- ReadsResourceApi(common_internal.ApiRequester client) :
+ ReadgroupsetsCoveragebucketsResourceApi get coveragebuckets => new ReadgroupsetsCoveragebucketsResourceApi(_requester);
+
+ ReadgroupsetsResourceApi(common_internal.ApiRequester client) :
_requester = client;
/**
- * Gets a list of reads for one or more readsets. Reads search operates over a
- * genomic coordinate space of reference sequence & position defined over the
- * reference sequences to which the requested readsets are aligned. If a
- * target positional range is specified, search returns all reads whose
- * alignment to the reference genome overlap the range. A query which
- * specifies only readset IDs yields all reads in those readsets, including
- * unmapped reads. All reads returned (including reads on subsequent pages)
- * are ordered by genomic coordinate (reference sequence & position). Reads
- * with equivalent genomic coordinates are returned in a deterministic order.
+ * Aligns read data from existing read group sets or files from Google Cloud
+ * Storage. See the alignment and variant calling documentation for more
+ * details.
*
* [request] - The metadata request object.
*
* Request parameters:
*
- * Completes with a [SearchReadsResponse].
+ * Completes with a [AlignReadGroupSetsResponse].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -907,7 +839,7 @@ class ReadsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<SearchReadsResponse> search(SearchReadsRequest request) {
+ async.Future<AlignReadGroupSetsResponse> align(AlignReadGroupSetsRequest request) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -920,7 +852,7 @@ class ReadsResourceApi {
}
- _url = 'reads/search';
+ _url = 'readgroupsets/align';
var _response = _requester.request(_url,
"POST",
@@ -929,28 +861,59 @@ class ReadsResourceApi {
uploadOptions: _uploadOptions,
uploadMedia: _uploadMedia,
downloadOptions: _downloadOptions);
- return _response.then((data) => new SearchReadsResponse.fromJson(data));
+ return _response.then((data) => new AlignReadGroupSetsResponse.fromJson(data));
}
-}
+ /**
+ * Calls variants on read data from existing read group sets or files from
+ * Google Cloud Storage. See the alignment and variant calling documentation
+ * for more details.
+ *
+ * [request] - The metadata request object.
+ *
+ * Request parameters:
+ *
+ * Completes with a [CallReadGroupSetsResponse].
+ *
+ * Completes with a [common.ApiRequestError] if the API endpoint returned an
+ * error.
+ *
+ * If the used [http.Client] completes with an error when making a REST call,
+ * this method will complete with the same error.
+ */
+ async.Future<CallReadGroupSetsResponse> call(CallReadGroupSetsRequest request) {
+ var _url = null;
+ var _queryParams = new core.Map();
+ var _uploadMedia = null;
+ var _uploadOptions = null;
+ var _downloadOptions = common.DownloadOptions.Metadata;
+ var _body = null;
+ if (request != null) {
+ _body = convert.JSON.encode((request).toJson());
+ }
-/** Not documented yet. */
-class ReadsetsResourceApi {
- final common_internal.ApiRequester _requester;
- ReadsetsCoveragebucketsResourceApi get coveragebuckets => new ReadsetsCoveragebucketsResourceApi(_requester);
+ _url = 'readgroupsets/call';
- ReadsetsResourceApi(common_internal.ApiRequester client) :
- _requester = client;
+ var _response = _requester.request(_url,
+ "POST",
+ body: _body,
+ queryParams: _queryParams,
+ uploadOptions: _uploadOptions,
+ uploadMedia: _uploadMedia,
+ downloadOptions: _downloadOptions);
+ return _response.then((data) => new CallReadGroupSetsResponse.fromJson(data));
+ }
/**
- * Deletes a readset.
+ * Deletes a read group set.
*
* Request parameters:
*
- * [readsetId] - The ID of the readset to be deleted. The caller must have
- * WRITE permissions to the dataset associated with this readset.
+ * [readGroupSetId] - The ID of the read group set to be deleted. The caller
+ * must have WRITE permissions to the dataset associated with this read group
+ * set.
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -958,7 +921,7 @@ class ReadsetsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future delete(core.String readsetId) {
+ async.Future delete(core.String readGroupSetId) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -966,13 +929,13 @@ class ReadsetsResourceApi {
var _downloadOptions = common.DownloadOptions.Metadata;
var _body = null;
- if (readsetId == null) {
- throw new core.ArgumentError("Parameter readsetId is required.");
+ if (readGroupSetId == null) {
+ throw new core.ArgumentError("Parameter readGroupSetId is required.");
}
_downloadOptions = null;
- _url = 'readsets/' + common_internal.Escaper.ecapeVariable('$readsetId');
+ _url = 'readgroupsets/' + common_internal.Escaper.ecapeVariable('$readGroupSetId');
var _response = _requester.request(_url,
"DELETE",
@@ -985,17 +948,18 @@ class ReadsetsResourceApi {
}
/**
- * Exports readsets to a BAM file in Google Cloud Storage. Note that currently
- * there may be some differences between exported BAM files and the original
- * BAM file at the time of import. In particular, comments in the input file
- * header will not be preserved, and some custom tags will be converted to
- * strings.
+ * Exports read group sets to a BAM file in Google Cloud Storage.
+ *
+ * Note that currently there may be some differences between exported BAM
+ * files and the original BAM file at the time of import. In particular,
+ * comments in the input file header will not be preserved, and some custom
+ * tags will be converted to strings.
*
* [request] - The metadata request object.
*
* Request parameters:
*
- * Completes with a [ExportReadsetsResponse].
+ * Completes with a [ExportReadGroupSetsResponse].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1003,7 +967,7 @@ class ReadsetsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<ExportReadsetsResponse> export(ExportReadsetsRequest request) {
+ async.Future<ExportReadGroupSetsResponse> export(ExportReadGroupSetsRequest request) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1016,7 +980,7 @@ class ReadsetsResourceApi {
}
- _url = 'readsets/export';
+ _url = 'readgroupsets/export';
var _response = _requester.request(_url,
"POST",
@@ -1025,17 +989,17 @@ class ReadsetsResourceApi {
uploadOptions: _uploadOptions,
uploadMedia: _uploadMedia,
downloadOptions: _downloadOptions);
- return _response.then((data) => new ExportReadsetsResponse.fromJson(data));
+ return _response.then((data) => new ExportReadGroupSetsResponse.fromJson(data));
}
/**
- * Gets a readset by ID.
+ * Gets a read group set by ID.
*
* Request parameters:
*
- * [readsetId] - The ID of the readset.
+ * [readGroupSetId] - The ID of the read group set.
*
- * Completes with a [Readset].
+ * Completes with a [ReadGroupSet].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1043,7 +1007,7 @@ class ReadsetsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<Readset> get(core.String readsetId) {
+ async.Future<ReadGroupSet> get(core.String readGroupSetId) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1051,12 +1015,12 @@ class ReadsetsResourceApi {
var _downloadOptions = common.DownloadOptions.Metadata;
var _body = null;
- if (readsetId == null) {
- throw new core.ArgumentError("Parameter readsetId is required.");
+ if (readGroupSetId == null) {
+ throw new core.ArgumentError("Parameter readGroupSetId is required.");
}
- _url = 'readsets/' + common_internal.Escaper.ecapeVariable('$readsetId');
+ _url = 'readgroupsets/' + common_internal.Escaper.ecapeVariable('$readGroupSetId');
var _response = _requester.request(_url,
"GET",
@@ -1065,20 +1029,22 @@ class ReadsetsResourceApi {
uploadOptions: _uploadOptions,
uploadMedia: _uploadMedia,
downloadOptions: _downloadOptions);
- return _response.then((data) => new Readset.fromJson(data));
+ return _response.then((data) => new ReadGroupSet.fromJson(data));
}
/**
- * Creates readsets by asynchronously importing the provided information. Note
- * that currently comments in the input file header are not imported and some
- * custom tags will be converted to strings, rather than preserving tag types.
- * The caller must have WRITE permissions to the dataset.
+ * Creates read group sets by asynchronously importing the provided
+ * information.
+ *
+ * Note that currently comments in the input file header are not imported and
+ * some custom tags will be converted to strings, rather than preserving tag
+ * types. The caller must have WRITE permissions to the dataset.
*
* [request] - The metadata request object.
*
* Request parameters:
*
- * Completes with a [ImportReadsetsResponse].
+ * Completes with a [ImportReadGroupSetsResponse].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1086,7 +1052,7 @@ class ReadsetsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<ImportReadsetsResponse> import(ImportReadsetsRequest request) {
+ async.Future<ImportReadGroupSetsResponse> import(ImportReadGroupSetsRequest request) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1099,7 +1065,7 @@ class ReadsetsResourceApi {
}
- _url = 'readsets/import';
+ _url = 'readgroupsets/import';
var _response = _requester.request(_url,
"POST",
@@ -1108,20 +1074,21 @@ class ReadsetsResourceApi {
uploadOptions: _uploadOptions,
uploadMedia: _uploadMedia,
downloadOptions: _downloadOptions);
- return _response.then((data) => new ImportReadsetsResponse.fromJson(data));
+ return _response.then((data) => new ImportReadGroupSetsResponse.fromJson(data));
}
/**
- * Updates a readset. This method supports patch semantics.
+ * Updates a read group set. This method supports patch semantics.
*
* [request] - The metadata request object.
*
* Request parameters:
*
- * [readsetId] - The ID of the readset to be updated. The caller must have
- * WRITE permissions to the dataset associated with this readset.
+ * [readGroupSetId] - The ID of the read group set to be updated. The caller
+ * must have WRITE permissions to the dataset associated with this read group
+ * set.
*
- * Completes with a [Readset].
+ * Completes with a [ReadGroupSet].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1129,7 +1096,7 @@ class ReadsetsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<Readset> patch(Readset request, core.String readsetId) {
+ async.Future<ReadGroupSet> patch(ReadGroupSet request, core.String readGroupSetId) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1140,12 +1107,12 @@ class ReadsetsResourceApi {
if (request != null) {
_body = convert.JSON.encode((request).toJson());
}
- if (readsetId == null) {
- throw new core.ArgumentError("Parameter readsetId is required.");
+ if (readGroupSetId == null) {
+ throw new core.ArgumentError("Parameter readGroupSetId is required.");
}
- _url = 'readsets/' + common_internal.Escaper.ecapeVariable('$readsetId');
+ _url = 'readgroupsets/' + common_internal.Escaper.ecapeVariable('$readGroupSetId');
var _response = _requester.request(_url,
"PATCH",
@@ -1154,17 +1121,19 @@ class ReadsetsResourceApi {
uploadOptions: _uploadOptions,
uploadMedia: _uploadMedia,
downloadOptions: _downloadOptions);
- return _response.then((data) => new Readset.fromJson(data));
+ return _response.then((data) => new ReadGroupSet.fromJson(data));
}
/**
- * Gets a list of readsets matching the criteria.
+ * Searches for read group sets matching the criteria.
+ *
+ * Implements GlobalAllianceApi.searchReadGroupSets.
*
* [request] - The metadata request object.
*
* Request parameters:
*
- * Completes with a [SearchReadsetsResponse].
+ * Completes with a [SearchReadGroupSetsResponse].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1172,7 +1141,7 @@ class ReadsetsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<SearchReadsetsResponse> search(SearchReadsetsRequest request) {
+ async.Future<SearchReadGroupSetsResponse> search(SearchReadGroupSetsRequest request) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1185,7 +1154,7 @@ class ReadsetsResourceApi {
}
- _url = 'readsets/search';
+ _url = 'readgroupsets/search';
var _response = _requester.request(_url,
"POST",
@@ -1194,20 +1163,21 @@ class ReadsetsResourceApi {
uploadOptions: _uploadOptions,
uploadMedia: _uploadMedia,
downloadOptions: _downloadOptions);
- return _response.then((data) => new SearchReadsetsResponse.fromJson(data));
+ return _response.then((data) => new SearchReadGroupSetsResponse.fromJson(data));
}
/**
- * Updates a readset.
+ * Updates a read group set.
*
* [request] - The metadata request object.
*
* Request parameters:
*
- * [readsetId] - The ID of the readset to be updated. The caller must have
- * WRITE permissions to the dataset associated with this readset.
+ * [readGroupSetId] - The ID of the read group set to be updated. The caller
+ * must have WRITE permissions to the dataset associated with this read group
+ * set.
*
- * Completes with a [Readset].
+ * Completes with a [ReadGroupSet].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1215,7 +1185,7 @@ class ReadsetsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<Readset> update(Readset request, core.String readsetId) {
+ async.Future<ReadGroupSet> update(ReadGroupSet request, core.String readGroupSetId) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1226,12 +1196,12 @@ class ReadsetsResourceApi {
if (request != null) {
_body = convert.JSON.encode((request).toJson());
}
- if (readsetId == null) {
- throw new core.ArgumentError("Parameter readsetId is required.");
+ if (readGroupSetId == null) {
+ throw new core.ArgumentError("Parameter readGroupSetId is required.");
}
- _url = 'readsets/' + common_internal.Escaper.ecapeVariable('$readsetId');
+ _url = 'readgroupsets/' + common_internal.Escaper.ecapeVariable('$readGroupSetId');
var _response = _requester.request(_url,
"PUT",
@@ -1240,48 +1210,50 @@ class ReadsetsResourceApi {
uploadOptions: _uploadOptions,
uploadMedia: _uploadMedia,
downloadOptions: _downloadOptions);
- return _response.then((data) => new Readset.fromJson(data));
+ return _response.then((data) => new ReadGroupSet.fromJson(data));
}
}
/** Not documented yet. */
-class ReadsetsCoveragebucketsResourceApi {
+class ReadgroupsetsCoveragebucketsResourceApi {
final common_internal.ApiRequester _requester;
- ReadsetsCoveragebucketsResourceApi(common_internal.ApiRequester client) :
+ ReadgroupsetsCoveragebucketsResourceApi(common_internal.ApiRequester client) :
_requester = client;
/**
- * Lists fixed width coverage buckets for a readset, each of which correspond
- * to a range of a reference sequence. Each bucket summarizes coverage
- * information across its corresponding genomic range. Coverage is defined as
- * the number of reads which are aligned to a given base in the reference
- * sequence. Coverage buckets are available at various bucket widths, enabling
- * various coverage "zoom levels". The caller must have READ permissions for
- * the target readset.
+ * Lists fixed width coverage buckets for a read group set, each of which
+ * correspond to a range of a reference sequence. Each bucket summarizes
+ * coverage information across its corresponding genomic range.
+ *
+ * Coverage is defined as the number of reads which are aligned to a given
+ * base in the reference sequence. Coverage buckets are available at several
+ * precomputed bucket widths, enabling retrieval of various coverage 'zoom
+ * levels'. The caller must have READ permissions for the target read group
+ * set.
*
* Request parameters:
*
- * [readsetId] - Required. The ID of the readset over which coverage is
- * requested.
+ * [readGroupSetId] - Required. The ID of the read group set over which
+ * coverage is requested.
*
- * [maxResults] - The maximum number of results to return in a single page. If
+ * [pageSize] - The maximum number of results to return in a single page. If
* unspecified, defaults to 1024. The maximum value is 2048.
*
* [pageToken] - The continuation token, which is used to page through large
* result sets. To get the next page of results, set this parameter to the
* value of nextPageToken from the previous response.
*
- * [range_sequenceEnd] - The end position of the range on the reference,
- * 1-based exclusive. If specified, sequenceName must also be specified.
+ * [range_end] - The end position of the range on the reference, 0-based
+ * exclusive. If specified, referenceName must also be specified.
*
- * [range_sequenceName] - The reference sequence name, for example chr1, 1, or
- * chrX.
+ * [range_referenceName] - The reference sequence name, for example chr1, 1,
+ * or chrX.
*
- * [range_sequenceStart] - The start position of the range on the reference,
- * 1-based inclusive. If specified, sequenceName must also be specified.
+ * [range_start] - The start position of the range on the reference, 0-based
+ * inclusive. If specified, referenceName must also be specified.
*
* [targetBucketWidth] - The desired width of each reported coverage bucket in
* base pairs. This will be rounded down to the nearest precomputed bucket
@@ -1298,7 +1270,7 @@ class ReadsetsCoveragebucketsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<ListCoverageBucketsResponse> list(core.String readsetId, {core.String maxResults, core.String pageToken, core.String range_sequenceEnd, core.String range_sequenceName, core.String range_sequenceStart, core.String targetBucketWidth}) {
+ async.Future<ListCoverageBucketsResponse> list(core.String readGroupSetId, {core.int pageSize, core.String pageToken, core.String range_end, core.String range_referenceName, core.String range_start, core.String targetBucketWidth}) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1306,30 +1278,30 @@ class ReadsetsCoveragebucketsResourceApi {
var _downloadOptions = common.DownloadOptions.Metadata;
var _body = null;
- if (readsetId == null) {
- throw new core.ArgumentError("Parameter readsetId is required.");
+ if (readGroupSetId == null) {
+ throw new core.ArgumentError("Parameter readGroupSetId is required.");
}
- if (maxResults != null) {
- _queryParams["maxResults"] = [maxResults];
+ if (pageSize != null) {
+ _queryParams["pageSize"] = ["${pageSize}"];
}
if (pageToken != null) {
_queryParams["pageToken"] = [pageToken];
}
- if (range_sequenceEnd != null) {
- _queryParams["range.sequenceEnd"] = [range_sequenceEnd];
+ if (range_end != null) {
+ _queryParams["range.end"] = [range_end];
}
- if (range_sequenceName != null) {
- _queryParams["range.sequenceName"] = [range_sequenceName];
+ if (range_referenceName != null) {
+ _queryParams["range.referenceName"] = [range_referenceName];
}
- if (range_sequenceStart != null) {
- _queryParams["range.sequenceStart"] = [range_sequenceStart];
+ if (range_start != null) {
+ _queryParams["range.start"] = [range_start];
}
if (targetBucketWidth != null) {
_queryParams["targetBucketWidth"] = [targetBucketWidth];
}
- _url = 'readsets/' + common_internal.Escaper.ecapeVariable('$readsetId') + '/coveragebuckets';
+ _url = 'readgroupsets/' + common_internal.Escaper.ecapeVariable('$readGroupSetId') + '/coveragebuckets';
var _response = _requester.request(_url,
"GET",
@@ -1345,20 +1317,34 @@ class ReadsetsCoveragebucketsResourceApi {
/** Not documented yet. */
-class VariantsResourceApi {
+class ReadsResourceApi {
final common_internal.ApiRequester _requester;
- VariantsResourceApi(common_internal.ApiRequester client) :
+ ReadsResourceApi(common_internal.ApiRequester client) :
_requester = client;
/**
- * Creates a new variant.
+ * Gets a list of reads for one or more read group sets. Reads search operates
+ * over a genomic coordinate space of reference sequence & position defined
+ * over the reference sequences to which the requested read group sets are
+ * aligned.
+ *
+ * If a target positional range is specified, search returns all reads whose
+ * alignment to the reference genome overlap the range. A query which
+ * specifies only read group set IDs yields all reads in those read group
+ * sets, including unmapped reads.
+ *
+ * All reads returned (including reads on subsequent pages) are ordered by
+ * genomic coordinate (reference sequence & position). Reads with equivalent
+ * genomic coordinates are returned in a deterministic order.
+ *
+ * Implements GlobalAllianceApi.searchReads.
*
* [request] - The metadata request object.
*
* Request parameters:
*
- * Completes with a [Variant].
+ * Completes with a [SearchReadsResponse].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1366,7 +1352,7 @@ class VariantsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<Variant> create(Variant request) {
+ async.Future<SearchReadsResponse> search(SearchReadsRequest request) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1379,7 +1365,7 @@ class VariantsResourceApi {
}
- _url = 'variants';
+ _url = 'reads/search';
var _response = _requester.request(_url,
"POST",
@@ -1388,15 +1374,31 @@ class VariantsResourceApi {
uploadOptions: _uploadOptions,
uploadMedia: _uploadMedia,
downloadOptions: _downloadOptions);
- return _response.then((data) => new Variant.fromJson(data));
+ return _response.then((data) => new SearchReadsResponse.fromJson(data));
}
+}
+
+
+/** Not documented yet. */
+class ReferencesResourceApi {
+ final common_internal.ApiRequester _requester;
+
+ ReferencesBasesResourceApi get bases => new ReferencesBasesResourceApi(_requester);
+
+ ReferencesResourceApi(common_internal.ApiRequester client) :
+ _requester = client;
+
/**
- * Deletes a variant.
+ * Gets a reference.
+ *
+ * Implements GlobalAllianceApi.getReference.
*
* Request parameters:
*
- * [variantId] - The ID of the variant to be deleted.
+ * [referenceId] - The ID of the reference.
+ *
+ * Completes with a [Reference].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1404,7 +1406,7 @@ class VariantsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future delete(core.String variantId) {
+ async.Future<Reference> get(core.String referenceId) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1412,32 +1414,33 @@ class VariantsResourceApi {
var _downloadOptions = common.DownloadOptions.Metadata;
var _body = null;
- if (variantId == null) {
- throw new core.ArgumentError("Parameter variantId is required.");
+ if (referenceId == null) {
+ throw new core.ArgumentError("Parameter referenceId is required.");
}
- _downloadOptions = null;
- _url = 'variants/' + common_internal.Escaper.ecapeVariable('$variantId');
+ _url = 'references/' + common_internal.Escaper.ecapeVariable('$referenceId');
var _response = _requester.request(_url,
- "DELETE",
+ "GET",
body: _body,
queryParams: _queryParams,
uploadOptions: _uploadOptions,
uploadMedia: _uploadMedia,
downloadOptions: _downloadOptions);
- return _response.then((data) => null);
+ return _response.then((data) => new Reference.fromJson(data));
}
/**
- * Exports variant data to an external destination.
+ * Searches for references which match the given criteria.
+ *
+ * Implements GlobalAllianceApi.searchReferences.
*
* [request] - The metadata request object.
*
* Request parameters:
*
- * Completes with a [ExportVariantsResponse].
+ * Completes with a [SearchReferencesResponse].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1445,7 +1448,7 @@ class VariantsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<ExportVariantsResponse> export(ExportVariantsRequest request) {
+ async.Future<SearchReferencesResponse> search(SearchReferencesRequest request) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1458,7 +1461,7 @@ class VariantsResourceApi {
}
- _url = 'variants/export';
+ _url = 'references/search';
var _response = _requester.request(_url,
"POST",
@@ -1467,17 +1470,41 @@ class VariantsResourceApi {
uploadOptions: _uploadOptions,
uploadMedia: _uploadMedia,
downloadOptions: _downloadOptions);
- return _response.then((data) => new ExportVariantsResponse.fromJson(data));
+ return _response.then((data) => new SearchReferencesResponse.fromJson(data));
}
+}
+
+
+/** Not documented yet. */
+class ReferencesBasesResourceApi {
+ final common_internal.ApiRequester _requester;
+
+ ReferencesBasesResourceApi(common_internal.ApiRequester client) :
+ _requester = client;
+
/**
- * Gets a variant by ID.
+ * Lists the bases in a reference, optionally restricted to a range.
+ *
+ * Implements GlobalAllianceApi.getReferenceBases.
*
* Request parameters:
*
- * [variantId] - The ID of the variant.
+ * [referenceId] - The ID of the reference.
*
- * Completes with a [Variant].
+ * [end] - The end position (0-based, exclusive) of this query. Defaults to
+ * the length of this reference.
+ *
+ * [pageSize] - Specifies the maximum number of bases to return in a single
+ * page.
+ *
+ * [pageToken] - The continuation token, which is used to page through large
+ * result sets. To get the next page of results, set this parameter to the
+ * value of nextPageToken from the previous response.
+ *
+ * [start] - The start position (0-based) of this query. Defaults to 0.
+ *
+ * Completes with a [ListBasesResponse].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1485,7 +1512,7 @@ class VariantsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<Variant> get(core.String variantId) {
+ async.Future<ListBasesResponse> list(core.String referenceId, {core.String end, core.int pageSize, core.String pageToken, core.String start}) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1493,12 +1520,24 @@ class VariantsResourceApi {
var _downloadOptions = common.DownloadOptions.Metadata;
var _body = null;
- if (variantId == null) {
- throw new core.ArgumentError("Parameter variantId is required.");
+ if (referenceId == null) {
+ throw new core.ArgumentError("Parameter referenceId is required.");
+ }
+ if (end != null) {
+ _queryParams["end"] = [end];
+ }
+ if (pageSize != null) {
+ _queryParams["pageSize"] = ["${pageSize}"];
+ }
+ if (pageToken != null) {
+ _queryParams["pageToken"] = [pageToken];
+ }
+ if (start != null) {
+ _queryParams["start"] = [start];
}
- _url = 'variants/' + common_internal.Escaper.ecapeVariable('$variantId');
+ _url = 'references/' + common_internal.Escaper.ecapeVariable('$referenceId') + '/bases';
var _response = _requester.request(_url,
"GET",
@@ -1507,24 +1546,29 @@ class VariantsResourceApi {
uploadOptions: _uploadOptions,
uploadMedia: _uploadMedia,
downloadOptions: _downloadOptions);
- return _response.then((data) => new Variant.fromJson(data));
+ return _response.then((data) => new ListBasesResponse.fromJson(data));
}
+}
+
+
+/** Not documented yet. */
+class ReferencesetsResourceApi {
+ final common_internal.ApiRequester _requester;
+
+ ReferencesetsResourceApi(common_internal.ApiRequester client) :
+ _requester = client;
+
/**
- * Creates variant data by asynchronously importing the provided information.
- * The variants for import will be merged with any existing data and each
- * other according to the behavior of mergeVariants. In particular, this means
- * for merged VCF variants that have conflicting INFO fields, some data will
- * be arbitrarily discarded. As a special case, for single-sample VCF files,
- * QUAL and FILTER fields will be moved to the call level; these are sometimes
- * interpreted in a call-specific context. Imported VCF headers are appended
- * to the metadata already in a VariantSet.
+ * Gets a reference set.
*
- * [request] - The metadata request object.
+ * Implements GlobalAllianceApi.getReferenceSet.
*
* Request parameters:
*
- * Completes with a [ImportVariantsResponse].
+ * [referenceSetId] - The ID of the reference set.
+ *
+ * Completes with a [ReferenceSet].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1532,7 +1576,7 @@ class VariantsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<ImportVariantsResponse> import(ImportVariantsRequest request) {
+ async.Future<ReferenceSet> get(core.String referenceSetId) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1540,31 +1584,33 @@ class VariantsResourceApi {
var _downloadOptions = common.DownloadOptions.Metadata;
var _body = null;
- if (request != null) {
- _body = convert.JSON.encode((request).toJson());
+ if (referenceSetId == null) {
+ throw new core.ArgumentError("Parameter referenceSetId is required.");
}
- _url = 'variants/import';
+ _url = 'referencesets/' + common_internal.Escaper.ecapeVariable('$referenceSetId');
var _response = _requester.request(_url,
- "POST",
+ "GET",
body: _body,
queryParams: _queryParams,
uploadOptions: _uploadOptions,
uploadMedia: _uploadMedia,
downloadOptions: _downloadOptions);
- return _response.then((data) => new ImportVariantsResponse.fromJson(data));
+ return _response.then((data) => new ReferenceSet.fromJson(data));
}
/**
- * Gets a list of variants matching the criteria.
+ * Searches for reference sets which match the given criteria.
+ *
+ * Implements GlobalAllianceApi.searchReferenceSets.
*
* [request] - The metadata request object.
*
* Request parameters:
*
- * Completes with a [SearchVariantsResponse].
+ * Completes with a [SearchReferenceSetsResponse].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1572,7 +1618,7 @@ class VariantsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<SearchVariantsResponse> search(SearchVariantsRequest request) {
+ async.Future<SearchReferenceSetsResponse> search(SearchReferenceSetsRequest request) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1585,7 +1631,7 @@ class VariantsResourceApi {
}
- _url = 'variants/search';
+ _url = 'referencesets/search';
var _response = _requester.request(_url,
"POST",
@@ -1594,19 +1640,26 @@ class VariantsResourceApi {
uploadOptions: _uploadOptions,
uploadMedia: _uploadMedia,
downloadOptions: _downloadOptions);
- return _response.then((data) => new SearchVariantsResponse.fromJson(data));
+ return _response.then((data) => new SearchReferenceSetsResponse.fromJson(data));
}
+}
+
+
+/** Not documented yet. */
+class VariantsResourceApi {
+ final common_internal.ApiRequester _requester;
+
+ VariantsResourceApi(common_internal.ApiRequester client) :
+ _requester = client;
+
/**
- * Updates a variant's names and info fields. All other modifications are
- * silently ignored. Returns the modified variant without its calls.
+ * Creates a new variant.
*
* [request] - The metadata request object.
*
* Request parameters:
*
- * [variantId] - The ID of the variant to be updated.
- *
* Completes with a [Variant].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
@@ -1615,7 +1668,7 @@ class VariantsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<Variant> update(Variant request, core.String variantId) {
+ async.Future<Variant> create(Variant request) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1626,15 +1679,12 @@ class VariantsResourceApi {
if (request != null) {
_body = convert.JSON.encode((request).toJson());
}
- if (variantId == null) {
- throw new core.ArgumentError("Parameter variantId is required.");
- }
- _url = 'variants/' + common_internal.Escaper.ecapeVariable('$variantId');
+ _url = 'variants';
var _response = _requester.request(_url,
- "PUT",
+ "POST",
body: _body,
queryParams: _queryParams,
uploadOptions: _uploadOptions,
@@ -1643,23 +1693,12 @@ class VariantsResourceApi {
return _response.then((data) => new Variant.fromJson(data));
}
-}
-
-
-/** Not documented yet. */
-class VariantsetsResourceApi {
- final common_internal.ApiRequester _requester;
-
- VariantsetsResourceApi(common_internal.ApiRequester client) :
- _requester = client;
-
/**
- * Deletes the contents of a variant set. The variant set object is not
- * deleted.
+ * Deletes a variant.
*
* Request parameters:
*
- * [variantSetId] - The ID of the variant set to be deleted.
+ * [variantId] - The ID of the variant to be deleted.
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1667,7 +1706,7 @@ class VariantsetsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future delete(core.String variantSetId) {
+ async.Future delete(core.String variantId) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1675,13 +1714,13 @@ class VariantsetsResourceApi {
var _downloadOptions = common.DownloadOptions.Metadata;
var _body = null;
- if (variantSetId == null) {
- throw new core.ArgumentError("Parameter variantSetId is required.");
+ if (variantId == null) {
+ throw new core.ArgumentError("Parameter variantId is required.");
}
_downloadOptions = null;
- _url = 'variantsets/' + common_internal.Escaper.ecapeVariable('$variantSetId');
+ _url = 'variants/' + common_internal.Escaper.ecapeVariable('$variantId');
var _response = _requester.request(_url,
"DELETE",
@@ -1694,13 +1733,13 @@ class VariantsetsResourceApi {
}
/**
- * Gets a variant set by ID.
+ * Gets a variant by ID.
*
* Request parameters:
*
- * [variantSetId] - Required. The ID of the variant set.
+ * [variantId] - The ID of the variant.
*
- * Completes with a [VariantSet].
+ * Completes with a [Variant].
*
* Completes with a [common.ApiRequestError] if the API endpoint returned an
* error.
@@ -1708,7 +1747,7 @@ class VariantsetsResourceApi {
* If the used [http.Client] completes with an error when making a REST call,
* this method will complete with the same error.
*/
- async.Future<VariantSet> get(core.String variantSetId) {
+ async.Future<Variant> get(core.String variantId) {
var _url = null;
var _queryParams = new core.Map();
var _uploadMedia = null;
@@ -1716,7 +1755,232 @@ class VariantsetsResourceApi {
var _downloadOptions = common.DownloadOptions.Metadata;
var _body = null;
- if (variantSetId == null) {
+ if (variantId == null) {
+ throw new core.ArgumentError("Parameter variantId is required.");
+ }
+
+
+ _url = 'variants/' + common_internal.Escaper.ecapeVariable('$variantId');
+
+ var _response = _requester.request(_url,
+ "GET",
+ body: _body,
+ queryParams: _queryParams,
+ uploadOptions: _uploadOptions,
+ uploadMedia: _uploadMedia,
+ downloadOptions: _downloadOptions);
+ return _response.then((data) => new Variant.fromJson(data));
+ }
+
+ /**
+ * Gets a list of variants matching the criteria.
+ *
+ * Implements GlobalAllianceApi.searchVariants.
+ *
+ * [request] - The metadata request object.
+ *
+ * Request parameters:
+ *
+ * Completes with a [SearchVariantsResponse].
+ *
+ * Completes with a [common.ApiRequestError] if the API endpoint returned an
+ * error.
+ *
+ * If the used [http.Client] completes with an error when making a REST call,
+ * this method will complete with the same error.
+ */
+ async.Future<SearchVariantsResponse> search(SearchVariantsRequest request) {
+ var _url = null;
+ var _queryParams = new core.Map();
+ var _uploadMedia = null;
+ var _uploadOptions = null;
+ var _downloadOptions = common.DownloadOptions.Metadata;
+ var _body = null;
+
+ if (request != null) {
+ _body = convert.JSON.encode((request).toJson());
+ }
+
+
+ _url = 'variants/search';
+
+ var _response = _requester.request(_url,
+ "POST",
+ body: _body,
+ queryParams: _queryParams,
+ uploadOptions: _uploadOptions,
+ uploadMedia: _uploadMedia,
+ downloadOptions: _downloadOptions);
+ return _response.then((data) => new SearchVariantsResponse.fromJson(data));
+ }
+
+ /**
+ * Updates a variant's names and info fields. All other modifications are
+ * silently ignored. Returns the modified variant without its calls.
+ *
+ * [request] - The metadata request object.
+ *
+ * Request parameters:
+ *
+ * [variantId] - The ID of the variant to be updated.
+ *
+ * Completes with a [Variant].
+ *
+ * Completes with a [common.ApiRequestError] if the API endpoint returned an
+ * error.
+ *
+ * If the used [http.Client] completes with an error when making a REST call,
+ * this method will complete with the same error.
+ */
+ async.Future<Variant> update(Variant request, core.String variantId) {
+ var _url = null;
+ var _queryParams = new core.Map();
+ var _uploadMedia = null;
+ var _uploadOptions = null;
+ var _downloadOptions = common.DownloadOptions.Metadata;
+ var _body = null;
+
+ if (request != null) {
+ _body = convert.JSON.encode((request).toJson());
+ }
+ if (variantId == null) {
+ throw new core.ArgumentError("Parameter variantId is required.");
+ }
+
+
+ _url = 'variants/' + common_internal.Escaper.ecapeVariable('$variantId');
+
+ var _response = _requester.request(_url,
+ "PUT",
+ body: _body,
+ queryParams: _queryParams,
+ uploadOptions: _uploadOptions,
+ uploadMedia: _uploadMedia,
+ downloadOptions: _downloadOptions);
+ return _response.then((data) => new Variant.fromJson(data));
+ }
+
+}
+
+
+/** Not documented yet. */
+class VariantsetsResourceApi {
+ final common_internal.ApiRequester _requester;
+
+ VariantsetsResourceApi(common_internal.ApiRequester client) :
+ _requester = client;
+
+ /**
+ * Deletes the contents of a variant set. The variant set object is not
+ * deleted.
+ *
+ * Request parameters:
+ *
+ * [variantSetId] - The ID of the variant set to be deleted.
+ *
+ * Completes with a [common.ApiRequestError] if the API endpoint returned an
+ * error.
+ *
+ * If the used [http.Client] completes with an error when making a REST call,
+ * this method will complete with the same error.
+ */
+ async.Future delete(core.String variantSetId) {
+ var _url = null;
+ var _queryParams = new core.Map();
+ var _uploadMedia = null;
+ var _uploadOptions = null;
+ var _downloadOptions = common.DownloadOptions.Metadata;
+ var _body = null;
+
+ if (variantSetId == null) {
+ throw new core.ArgumentError("Parameter variantSetId is required.");
+ }
+
+ _downloadOptions = null;
+
+ _url = 'variantsets/' + common_internal.Escaper.ecapeVariable('$variantSetId');
+
+ var _response = _requester.request(_url,
+ "DELETE",
+ body: _body,
+ queryParams: _queryParams,
+ uploadOptions: _uploadOptions,
+ uploadMedia: _uploadMedia,
+ downloadOptions: _downloadOptions);
+ return _response.then((data) => null);
+ }
+
+ /**
+ * Exports variant set data to an external destination.
+ *
+ * [request] - The metadata request object.
+ *
+ * Request parameters:
+ *
+ * [variantSetId] - Required. The ID of the variant set that contains variant
+ * data which should be exported. The caller must have READ access to this
+ * variant set.
+ *
+ * Completes with a [ExportVariantSetResponse].
+ *
+ * Completes with a [common.ApiRequestError] if the API endpoint returned an
+ * error.
+ *
+ * If the used [http.Client] completes with an error when making a REST call,
+ * this method will complete with the same error.
+ */
+ async.Future<ExportVariantSetResponse> export(ExportVariantSetRequest request, core.String variantSetId) {
+ var _url = null;
+ var _queryParams = new core.Map();
+ var _uploadMedia = null;
+ var _uploadOptions = null;
+ var _downloadOptions = common.DownloadOptions.Metadata;
+ var _body = null;
+
+ if (request != null) {
+ _body = convert.JSON.encode((request).toJson());
+ }
+ if (variantSetId == null) {
+ throw new core.ArgumentError("Parameter variantSetId is required.");
+ }
+
+
+ _url = 'variantsets/' + common_internal.Escaper.ecapeVariable('$variantSetId') + '/export';
+
+ var _response = _requester.request(_url,
+ "POST",
+ body: _body,
+ queryParams: _queryParams,
+ uploadOptions: _uploadOptions,
+ uploadMedia: _uploadMedia,
+ downloadOptions: _downloadOptions);
+ return _response.then((data) => new ExportVariantSetResponse.fromJson(data));
+ }
+
+ /**
+ * Gets a variant set by ID.
+ *
+ * Request parameters:
+ *
+ * [variantSetId] - Required. The ID of the variant set.
+ *
+ * Completes with a [VariantSet].
+ *
+ * Completes with a [common.ApiRequestError] if the API endpoint returned an
+ * error.
+ *
+ * If the used [http.Client] completes with an error when making a REST call,
+ * this method will complete with the same error.
+ */
+ async.Future<VariantSet> get(core.String variantSetId) {
+ var _url = null;
+ var _queryParams = new core.Map();
+ var _uploadMedia = null;
+ var _uploadOptions = null;
+ var _downloadOptions = common.DownloadOptions.Metadata;
+ var _body = null;
+
+ if (variantSetId == null) {
throw new core.ArgumentError("Parameter variantSetId is required.");
}
@@ -1734,6 +1998,60 @@ class VariantsetsResourceApi {
}
/**
+ * Creates variant data by asynchronously importing the provided information.
+ *
+ * The variants for import will be merged with any existing data and each
+ * other according to the behavior of mergeVariants. In particular, this means
+ * for merged VCF variants that have conflicting INFO fields, some data will
+ * be arbitrarily discarded. As a special case, for single-sample VCF files,
+ * QUAL and FILTER fields will be moved to the call level; these are sometimes
+ * interpreted in a call-specific context. Imported VCF headers are appended
+ * to the metadata already in a variant set.
+ *
+ * [request] - The metadata request object.
+ *
+ * Request parameters:
+ *
+ * [variantSetId] - Required. The variant set to which variant data should be
+ * imported.
+ *
+ * Completes with a [ImportVariantsResponse].
+ *
+ * Completes with a [common.ApiRequestError] if the API endpoint returned an
+ * error.
+ *
+ * If the used [http.Client] completes with an error when making a REST call,
+ * this method will complete with the same error.
+ */
+ async.Future<ImportVariantsResponse> importVariants(ImportVariantsRequest request, core.String variantSetId) {
+ var _url = null;
+ var _queryParams = new core.Map();
+ var _uploadMedia = null;
+ var _uploadOptions = null;
+ var _downloadOptions = common.DownloadOptions.Metadata;
+ var _body = null;
+
+ if (request != null) {
+ _body = convert.JSON.encode((request).toJson());
+ }
+ if (variantSetId == null) {
+ throw new core.ArgumentError("Parameter variantSetId is required.");
+ }
+
+
+ _url = 'variantsets/' + common_internal.Escaper.ecapeVariable('$variantSetId') + '/importVariants';
+
+ var _response = _requester.request(_url,
+ "POST",
+ body: _body,
+ queryParams: _queryParams,
+ uploadOptions: _uploadOptions,
+ uploadMedia: _uploadMedia,
+ downloadOptions: _downloadOptions);
+ return _response.then((data) => new ImportVariantsResponse.fromJson(data));
+ }
+
+ /**
* Merges the given variants with existing variants. Each variant will be
* merged with an existing variant that matches its reference sequence, start,
* end, reference bases, and alternative bases. If no such variant exists, a
@@ -1786,8 +2104,7 @@ class VariantsetsResourceApi {
/**
* Updates a variant set's metadata. All other modifications are silently
- * ignored. Returns the modified variant set. This method supports patch
- * semantics.
+ * ignored. This method supports patch semantics.
*
* [request] - The metadata request object.
*
@@ -1834,6 +2151,8 @@ class VariantsetsResourceApi {
/**
* Returns a list of all variant sets matching search criteria.
*
+ * Implements GlobalAllianceApi.searchVariantSets.
+ *
* [request] - The metadata request object.
*
* Request parameters:
@@ -1873,7 +2192,7 @@ class VariantsetsResourceApi {
/**
* Updates a variant set's metadata. All other modifications are silently
- * ignored. Returns the modified variant set.
+ * ignored.
*
* [request] - The metadata request object.
*
@@ -1921,27 +2240,107 @@ class VariantsetsResourceApi {
-/**
- * A beacon represents whether any variant call in a variant set has a specific
- * allele at a particular position.
- */
-class Beacon {
- /** True if the allele exists on any variant call, false otherwise. */
- core.bool exists;
+/** The read group set align request. */
+class AlignReadGroupSetsRequest {
+ /**
+ * The BAM source files for alignment. Exactly one of readGroupSetIds,
+ * bamSourceUris, interleavedFastqSource or pairedFastqSource must be
+ * provided. The caller must have READ permissions for these files.
+ */
+ core.List<core.String> bamSourceUris;
+ /**
+ * Required. The ID of the dataset the newly aligned read group sets will
+ * belong to. The caller must have WRITE permissions to this dataset.
+ */
+ core.String datasetId;
- Beacon();
+ /**
+ * The interleaved FASTQ source files for alignment, where both members of
+ * each pair of reads are found on consecutive records within the same FASTQ
+ * file. Exactly one of readGroupSetIds, bamSourceUris, interleavedFastqSource
+ * or pairedFastqSource must be provided.
+ */
+ InterleavedFastqSource interleavedFastqSource;
+
+ /**
+ * The paired end FASTQ source files for alignment, where each member of a
+ * pair of reads are found in separate files. Exactly one of readGroupSetIds,
+ * bamSourceUris, interleavedFastqSource or pairedFastqSource must be
+ * provided.
+ */
+ PairedFastqSource pairedFastqSource;
+
+ /**
+ * The IDs of the read group sets which will be aligned. New read group sets
+ * will be generated to hold the aligned data, the originals will not be
+ * modified. The caller must have READ permissions for these read group sets.
+ * Exactly one of readGroupSetIds, bamSourceUris, interleavedFastqSource or
+ * pairedFastqSource must be provided.
+ */
+ core.List<core.String> readGroupSetIds;
+
+
+ AlignReadGroupSetsRequest();
- Beacon.fromJson(core.Map _json) {
- if (_json.containsKey("exists")) {
- exists = _json["exists"];
+ AlignReadGroupSetsRequest.fromJson(core.Map _json) {
+ if (_json.containsKey("bamSourceUris")) {
+ bamSourceUris = _json["bamSourceUris"];
+ }
+ if (_json.containsKey("datasetId")) {
+ datasetId = _json["datasetId"];
+ }
+ if (_json.containsKey("interleavedFastqSource")) {
+ interleavedFastqSource = new InterleavedFastqSource.fromJson(_json["interleavedFastqSource"]);
+ }
+ if (_json.containsKey("pairedFastqSource")) {
+ pairedFastqSource = new PairedFastqSource.fromJson(_json["pairedFastqSource"]);
+ }
+ if (_json.containsKey("readGroupSetIds")) {
+ readGroupSetIds = _json["readGroupSetIds"];
}
}
core.Map toJson() {
var _json = new core.Map();
- if (exists != null) {
- _json["exists"] = exists;
+ if (bamSourceUris != null) {
+ _json["bamSourceUris"] = bamSourceUris;
+ }
+ if (datasetId != null) {
+ _json["datasetId"] = datasetId;
+ }
+ if (interleavedFastqSource != null) {
+ _json["interleavedFastqSource"] = (interleavedFastqSource).toJson();
+ }
+ if (pairedFastqSource != null) {
+ _json["pairedFastqSource"] = (pairedFastqSource).toJson();
+ }
+ if (readGroupSetIds != null) {
+ _json["readGroupSetIds"] = readGroupSetIds;
+ }
+ return _json;
+ }
+}
+
+
+/** The read group set align response. */
+class AlignReadGroupSetsResponse {
+ /** A job ID that can be used to get status information. */
+ core.String jobId;
+
+
+ AlignReadGroupSetsResponse();
+
+ AlignReadGroupSetsResponse.fromJson(core.Map _json) {
+ if (_json.containsKey("jobId")) {
+ jobId = _json["jobId"];
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (jobId != null) {
+ _json["jobId"] = jobId;
}
return _json;
}
@@ -1949,9 +2348,9 @@ class Beacon {
/**
- * A Call represents the determination of genotype with respect to a particular
+ * A call represents the determination of genotype with respect to a particular
* variant. It may include associated information such as quality and phasing.
- * For example, a Call might assign a probability of 0.32 to the occurrence of a
+ * For example, a call might assign a probability of 0.32 to the occurrence of a
* SNP named rs1234 in a call set with the name NA12345.
*/
class Call {
@@ -2041,7 +2440,87 @@ class Call {
}
-/** A CallSet is a collection of variant calls. It belongs to a variant set. */
+/** The read group set call request. */
+class CallReadGroupSetsRequest {
+ /**
+ * Required. The ID of the dataset the called variants will belong to. The
+ * caller must have WRITE permissions to this dataset.
+ */
+ core.String datasetId;
+
+ /**
+ * The IDs of the read group sets which will be called. The caller must have
+ * READ permissions for these read group sets. One of readGroupSetIds or
+ * sourceUris must be provided.
+ */
+ core.List<core.String> readGroupSetIds;
+
+ /**
+ * A list of URIs pointing at BAM files in Google Cloud Storage which will be
+ * called. FASTQ files are not allowed. The caller must have READ permissions
+ * for these files. One of readGroupSetIds or sourceUris must be provided.
+ */
+ core.List<core.String> sourceUris;
+
+
+ CallReadGroupSetsRequest();
+
+ CallReadGroupSetsRequest.fromJson(core.Map _json) {
+ if (_json.containsKey("datasetId")) {
+ datasetId = _json["datasetId"];
+ }
+ if (_json.containsKey("readGroupSetIds")) {
+ readGroupSetIds = _json["readGroupSetIds"];
+ }
+ if (_json.containsKey("sourceUris")) {
+ sourceUris = _json["sourceUris"];
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (datasetId != null) {
+ _json["datasetId"] = datasetId;
+ }
+ if (readGroupSetIds != null) {
+ _json["readGroupSetIds"] = readGroupSetIds;
+ }
+ if (sourceUris != null) {
+ _json["sourceUris"] = sourceUris;
+ }
+ return _json;
+ }
+}
+
+
+/** The read group set call response. */
+class CallReadGroupSetsResponse {
+ /** A job ID that can be used to get status information. */
+ core.String jobId;
+
+
+ CallReadGroupSetsResponse();
+
+ CallReadGroupSetsResponse.fromJson(core.Map _json) {
+ if (_json.containsKey("jobId")) {
+ jobId = _json["jobId"];
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (jobId != null) {
+ _json["jobId"] = jobId;
+ }
+ return _json;
+ }
+}
+
+
+/**
+ * A call set is a collection of variant calls, typically for one sample. It
+ * belongs to a variant set.
+ */
class CallSet {
/** The date this call set was created in milliseconds from the epoch. */
core.String created;
@@ -2049,10 +2528,10 @@ class CallSet {
/** The Google generated ID of the call set, immutable. */
core.String id;
- /** A map of additional callset information. */
+ /** A map of additional call set information. */
core.Map<core.String, core.List<core.String>> info;
- /** The callset name. */
+ /** The call set name. */
core.String name;
/** The sample ID this call set corresponds to. */
@@ -2110,6 +2589,64 @@ class CallSet {
}
+/** A single CIGAR operation. */
+class CigarUnit {
+ /**
+ * Not documented yet.
+ * Possible string values are:
+ * - "ALIGNMENT_MATCH"
+ * - "CLIP_HARD"
+ * - "CLIP_SOFT"
+ * - "DELETE"
+ * - "INSERT"
+ * - "PAD"
+ * - "SEQUENCE_MATCH"
+ * - "SEQUENCE_MISMATCH"
+ * - "SKIP"
+ */
+ core.String operation;
+
+ /** The number of bases that the operation runs for. Required. */
+ core.String operationLength;
+
+ /**
+ * referenceSequence is only used at mismatches (SEQUENCE_MISMATCH) and
+ * deletions (DELETE). Filling this field replaces SAM's MD tag. If the
+ * relevant information is not available, this field is unset.
+ */
+ core.String referenceSequence;
+
+
+ CigarUnit();
+
+ CigarUnit.fromJson(core.Map _json) {
+ if (_json.containsKey("operation")) {
+ operation = _json["operation"];
+ }
+ if (_json.containsKey("operationLength")) {
+ operationLength = _json["operationLength"];
+ }
+ if (_json.containsKey("referenceSequence")) {
+ referenceSequence = _json["referenceSequence"];
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (operation != null) {
+ _json["operation"] = operation;
+ }
+ if (operationLength != null) {
+ _json["operationLength"] = operationLength;
+ }
+ if (referenceSequence != null) {
+ _json["referenceSequence"] = referenceSequence;
+ }
+ return _json;
+ }
+}
+
+
/**
* A bucket over which read coverage has been precomputed. A bucket corresponds
* to a specific range of the reference sequence.
@@ -2122,7 +2659,7 @@ class CoverageBucket {
core.double meanCoverage;
/** The genomic coordinate range spanned by this bucket. */
- GenomicRange range;
+ Range range;
CoverageBucket();
@@ -2132,7 +2669,7 @@ class CoverageBucket {
meanCoverage = _json["meanCoverage"];
}
if (_json.containsKey("range")) {
- range = new GenomicRange.fromJson(_json["range"]);
+ range = new Range.fromJson(_json["range"]);
}
}
@@ -2166,7 +2703,7 @@ class Dataset {
/**
* The Google Developers Console project number that this dataset belongs to.
*/
- core.String projectId;
+ core.String projectNumber;
Dataset();
@@ -2181,8 +2718,8 @@ class Dataset {
if (_json.containsKey("name")) {
name = _json["name"];
}
- if (_json.containsKey("projectId")) {
- projectId = _json["projectId"];
+ if (_json.containsKey("projectNumber")) {
+ projectNumber = _json["projectNumber"];
}
}
@@ -2197,8 +2734,8 @@ class Dataset {
if (name != null) {
_json["name"] = name;
}
- if (projectId != null) {
- _json["projectId"] = projectId;
+ if (projectNumber != null) {
+ _json["projectNumber"] = projectNumber;
}
return _json;
}
@@ -2208,16 +2745,14 @@ class Dataset {
/** The job creation request. */
class ExperimentalCreateJobRequest {
/**
- * Specifies whether or not to run the alignment pipeline. At least one of
- * align or callVariants must be provided.
+ * Specifies whether or not to run the alignment pipeline. Either align or
+ * callVariants must be set.
*/
core.bool align;
/**
- * Specifies whether or not to run the variant calling pipeline. If specified,
- * alignment will be performed first and the aligned BAMs will passed as input
- * to the variant caller. At least one of align or callVariants must be
- * provided.
+ * Specifies whether or not to run the variant calling pipeline. Either align
+ * or callVariants must be set.
*/
core.bool callVariants;
@@ -2237,7 +2772,7 @@ class ExperimentalCreateJobRequest {
/**
* Required. The Google Cloud Project ID with which to associate the request.
*/
- core.String projectId;
+ core.String projectNumber;
/**
* A list of Google Cloud Storage URIs of data files to operate upon. These
@@ -2263,8 +2798,8 @@ class ExperimentalCreateJobRequest {
if (_json.containsKey("pairedSourceUris")) {
pairedSourceUris = _json["pairedSourceUris"];
}
- if (_json.containsKey("projectId")) {
- projectId = _json["projectId"];
+ if (_json.containsKey("projectNumber")) {
+ projectNumber = _json["projectNumber"];
}
if (_json.containsKey("sourceUris")) {
sourceUris = _json["sourceUris"];
@@ -2285,8 +2820,8 @@ class ExperimentalCreateJobRequest {
if (pairedSourceUris != null) {
_json["pairedSourceUris"] = pairedSourceUris;
}
- if (projectId != null) {
- _json["projectId"] = projectId;
+ if (projectNumber != null) {
+ _json["projectNumber"] = projectNumber;
}
if (sourceUris != null) {
_json["sourceUris"] = sourceUris;
@@ -2320,8 +2855,8 @@ class ExperimentalCreateJobResponse {
}
-/** The readset export request. */
-class ExportReadsetsRequest {
+/** The read group set export request. */
+class ExportReadGroupSetsRequest {
/**
* A Google Cloud Storage URI where the exported BAM file will be created. The
* currently authenticated user must have write access to the new file
@@ -2330,10 +2865,10 @@ class ExportReadsetsRequest {
core.String exportUri;
/** The Google Developers Console project number that owns this export. */
- core.String projectId;
+ core.String projectNumber;
- /** The IDs of the readsets to export. */
- core.List<core.String> readsetIds;
+ /** The IDs of the read group sets to export. */
+ core.List<core.String> readGroupSetIds;
/**
* The reference names to export. If this is not specified, all reference
@@ -2343,17 +2878,17 @@ class ExportReadsetsRequest {
core.List<core.String> referenceNames;
- ExportReadsetsRequest();
+ ExportReadGroupSetsRequest();
- ExportReadsetsRequest.fromJson(core.Map _json) {
+ ExportReadGroupSetsRequest.fromJson(core.Map _json) {
if (_json.containsKey("exportUri")) {
exportUri = _json["exportUri"];
}
- if (_json.containsKey("projectId")) {
- projectId = _json["projectId"];
+ if (_json.containsKey("projectNumber")) {
+ projectNumber = _json["projectNumber"];
}
- if (_json.containsKey("readsetIds")) {
- readsetIds = _json["readsetIds"];
+ if (_json.containsKey("readGroupSetIds")) {
+ readGroupSetIds = _json["readGroupSetIds"];
}
if (_json.containsKey("referenceNames")) {
referenceNames = _json["referenceNames"];
@@ -2365,11 +2900,11 @@ class ExportReadsetsRequest {
if (exportUri != null) {
_json["exportUri"] = exportUri;
}
- if (projectId != null) {
- _json["projectId"] = projectId;
+ if (projectNumber != null) {
+ _json["projectNumber"] = projectNumber;
}
- if (readsetIds != null) {
- _json["readsetIds"] = readsetIds;
+ if (readGroupSetIds != null) {
+ _json["readGroupSetIds"] = readGroupSetIds;
}
if (referenceNames != null) {
_json["referenceNames"] = referenceNames;
@@ -2379,15 +2914,15 @@ class ExportReadsetsRequest {
}
-/** The readset export response. */
-class ExportReadsetsResponse {
+/** The read group set export response. */
+class ExportReadGroupSetsResponse {
/** A job ID that can be used to get status information. */
core.String jobId;
- ExportReadsetsResponse();
+ ExportReadGroupSetsResponse();
- ExportReadsetsResponse.fromJson(core.Map _json) {
+ ExportReadGroupSetsResponse.fromJson(core.Map _json) {
if (_json.containsKey("jobId")) {
jobId = _json["jobId"];
}
@@ -2404,7 +2939,7 @@ class ExportReadsetsResponse {
/** The variant data export request. */
-class ExportVariantsRequest {
+class ExportVariantSetRequest {
/**
* The BigQuery dataset to export data to. Note that this is distinct from the
* Genomics concept of "dataset".
@@ -2426,7 +2961,7 @@ class ExportVariantsRequest {
/**
* The format for the exported data.
* Possible string values are:
- * - "bigquery"
+ * - "BIGQUERY"
*/
core.String format;
@@ -2435,18 +2970,12 @@ class ExportVariantsRequest {
* The caller must have WRITE access to this project. This project will also
* own the resulting export job.
*/
- core.String projectId;
-
- /**
- * Required. The ID of the variant set that contains variant data which should
- * be exported. The caller must have READ access to this variant set.
- */
- core.String variantSetId;
+ core.String projectNumber;
- ExportVariantsRequest();
+ ExportVariantSetRequest();
- ExportVariantsRequest.fromJson(core.Map _json) {
+ ExportVariantSetRequest.fromJson(core.Map _json) {
if (_json.containsKey("bigqueryDataset")) {
bigqueryDataset = _json["bigqueryDataset"];
}
@@ -2459,11 +2988,8 @@ class ExportVariantsRequest {
if (_json.containsKey("format")) {
format = _json["format"];
}
- if (_json.containsKey("projectId")) {
- projectId = _json["projectId"];
- }
- if (_json.containsKey("variantSetId")) {
- variantSetId = _json["variantSetId"];
+ if (_json.containsKey("projectNumber")) {
+ projectNumber = _json["projectNumber"];
}
}
@@ -2475,229 +3001,139 @@ class ExportVariantsRequest {
if (bigqueryTable != null) {
_json["bigqueryTable"] = bigqueryTable;
}
- if (callSetIds != null) {
- _json["callSetIds"] = callSetIds;
- }
- if (format != null) {
- _json["format"] = format;
- }
- if (projectId != null) {
- _json["projectId"] = projectId;
- }
- if (variantSetId != null) {
- _json["variantSetId"] = variantSetId;
- }
- return _json;
- }
-}
-
-
-/** The variant data export response. */
-class ExportVariantsResponse {
- /** A job ID that can be used to get status information. */
- core.String jobId;
-
-
- ExportVariantsResponse();
-
- ExportVariantsResponse.fromJson(core.Map _json) {
- if (_json.containsKey("jobId")) {
- jobId = _json["jobId"];
- }
- }
-
- core.Map toJson() {
- var _json = new core.Map();
- if (jobId != null) {
- _json["jobId"] = jobId;
- }
- return _json;
- }
-}
-
-
-/**
- * An inclusive, exclusive genomic coordinate range over a reference sequence.
- */
-class GenomicRange {
- /**
- * The end position of the range on the reference, 1-based exclusive. If
- * specified, sequenceName must also be specified.
- */
- core.String sequenceEnd;
-
- /** The reference sequence name, for example chr1, 1, or chrX. */
- core.String sequenceName;
-
- /**
- * The start position of the range on the reference, 1-based inclusive. If
- * specified, sequenceName must also be specified.
- */
- core.String sequenceStart;
-
-
- GenomicRange();
-
- GenomicRange.fromJson(core.Map _json) {
- if (_json.containsKey("sequenceEnd")) {
- sequenceEnd = _json["sequenceEnd"];
- }
- if (_json.containsKey("sequenceName")) {
- sequenceName = _json["sequenceName"];
- }
- if (_json.containsKey("sequenceStart")) {
- sequenceStart = _json["sequenceStart"];
- }
- }
-
- core.Map toJson() {
- var _json = new core.Map();
- if (sequenceEnd != null) {
- _json["sequenceEnd"] = sequenceEnd;
+ if (callSetIds != null) {
+ _json["callSetIds"] = callSetIds;
}
- if (sequenceName != null) {
- _json["sequenceName"] = sequenceName;
+ if (format != null) {
+ _json["format"] = format;
}
- if (sequenceStart != null) {
- _json["sequenceStart"] = sequenceStart;
+ if (projectNumber != null) {
+ _json["projectNumber"] = projectNumber;
}
return _json;
}
}
-/** Not documented yet. */
-class Header {
- /** (SO) Sorting order of alignments. */
- core.String sortingOrder;
-
- /** (VN) BAM format version. */
- core.String version;
+/** The variant data export response. */
+class ExportVariantSetResponse {
+ /** A job ID that can be used to get status information. */
+ core.String jobId;
- Header();
+ ExportVariantSetResponse();
- Header.fromJson(core.Map _json) {
- if (_json.containsKey("sortingOrder")) {
- sortingOrder = _json["sortingOrder"];
- }
- if (_json.containsKey("version")) {
- version = _json["version"];
+ ExportVariantSetResponse.fromJson(core.Map _json) {
+ if (_json.containsKey("jobId")) {
+ jobId = _json["jobId"];
}
}
core.Map toJson() {
var _json = new core.Map();
- if (sortingOrder != null) {
- _json["sortingOrder"] = sortingOrder;
- }
- if (version != null) {
- _json["version"] = version;
+ if (jobId != null) {
+ _json["jobId"] = jobId;
}
return _json;
}
}
-/** The header section of the BAM/SAM file. */
-class HeaderSection {
- /** (@CO) One-line text comments. */
- core.List<core.String> comments;
+/** Not documented yet. */
+class FastqMetadata {
+ /** Optionally specifies the library name for alignment from FASTQ. */
+ core.String libraryName;
/**
- * [Deprecated] This field is deprecated and will no longer be populated.
- * Please use filename instead.
+ * Optionally specifies the platform name for alignment from FASTQ. For
+ * example: CAPILLARY, LS454, ILLUMINA, SOLID, HELICOS, IONTORRENT, PACBIO.
*/
- core.String fileUri;
-
- /** The name of the file from which this data was imported. */
- core.String filename;
-
- /** (@HD) The header line. */
- core.List<Header> headers;
+ core.String platformName;
- /** (@PG) Programs. */
- core.List<Program> programs;
+ /**
+ * Optionally specifies the platform unit for alignment from FASTQ. For
+ * example: flowcell-barcode.lane for Illumina or slide for SOLID.
+ */
+ core.String platformUnit;
- /** (@RG) Read group. */
- core.List<ReadGroup> readGroups;
+ /** Optionally specifies the read group name for alignment from FASTQ. */
+ core.String readGroupName;
- /** (@SQ) Reference sequence dictionary. */
- core.List<ReferenceSequence> refSequences;
+ /** Optionally specifies the sample name for alignment from FASTQ. */
+ core.String sampleName;
- HeaderSection();
+ FastqMetadata();
- HeaderSection.fromJson(core.Map _json) {
- if (_json.containsKey("comments")) {
- comments = _json["comments"];
+ FastqMetadata.fromJson(core.Map _json) {
+ if (_json.containsKey("libraryName")) {
+ libraryName = _json["libraryName"];
}
- if (_json.containsKey("fileUri")) {
- fileUri = _json["fileUri"];
+ if (_json.containsKey("platformName")) {
+ platformName = _json["platformName"];
}
- if (_json.containsKey("filename")) {
- filename = _json["filename"];
- }
- if (_json.containsKey("headers")) {
- headers = _json["headers"].map((value) => new Header.fromJson(value)).toList();
- }
- if (_json.containsKey("programs")) {
- programs = _json["programs"].map((value) => new Program.fromJson(value)).toList();
+ if (_json.containsKey("platformUnit")) {
+ platformUnit = _json["platformUnit"];
}
- if (_json.containsKey("readGroups")) {
- readGroups = _json["readGroups"].map((value) => new ReadGroup.fromJson(value)).toList();
+ if (_json.containsKey("readGroupName")) {
+ readGroupName = _json["readGroupName"];
}
- if (_json.containsKey("refSequences")) {
- refSequences = _json["refSequences"].map((value) => new ReferenceSequence.fromJson(value)).toList();
+ if (_json.containsKey("sampleName")) {
+ sampleName = _json["sampleName"];
}
}
core.Map toJson() {
var _json = new core.Map();
- if (comments != null) {
- _json["comments"] = comments;
- }
- if (fileUri != null) {
- _json["fileUri"] = fileUri;
- }
- if (filename != null) {
- _json["filename"] = filename;
+ if (libraryName != null) {
+ _json["libraryName"] = libraryName;
}
- if (headers != null) {
- _json["headers"] = headers.map((value) => (value).toJson()).toList();
+ if (platformName != null) {
+ _json["platformName"] = platformName;
}
- if (programs != null) {
- _json["programs"] = programs.map((value) => (value).toJson()).toList();
+ if (platformUnit != null) {
+ _json["platformUnit"] = platformUnit;
}
- if (readGroups != null) {
- _json["readGroups"] = readGroups.map((value) => (value).toJson()).toList();
+ if (readGroupName != null) {
+ _json["readGroupName"] = readGroupName;
}
- if (refSequences != null) {
- _json["refSequences"] = refSequences.map((value) => (value).toJson()).toList();
+ if (sampleName != null) {
+ _json["sampleName"] = sampleName;
}
return _json;
}
}
-/** The readset import request. */
-class ImportReadsetsRequest {
+/** The read group set import request. */
+class ImportReadGroupSetsRequest {
/**
- * Required. The ID of the dataset these readsets will belong to. The caller
- * must have WRITE permissions to this dataset.
+ * Required. The ID of the dataset these read group sets will belong to. The
+ * caller must have WRITE permissions to this dataset.
*/
core.String datasetId;
+ /**
+ * The reference set to which the imported read group sets are aligned to, if
+ * any. The reference names of this reference set must be a superset of those
+ * found in the imported file headers. If no reference set id is provided, a
+ * best effort is made to associate with a matching reference set.
+ */
+ core.String referenceSetId;
+
/** A list of URIs pointing at BAM files in Google Cloud Storage. */
core.List<core.String> sourceUris;
- ImportReadsetsRequest();
+ ImportReadGroupSetsRequest();
- ImportReadsetsRequest.fromJson(core.Map _json) {
+ ImportReadGroupSetsRequest.fromJson(core.Map _json) {
if (_json.containsKey("datasetId")) {
datasetId = _json["datasetId"];
}
+ if (_json.containsKey("referenceSetId")) {
+ referenceSetId = _json["referenceSetId"];
+ }
if (_json.containsKey("sourceUris")) {
sourceUris = _json["sourceUris"];
}
@@ -2708,6 +3144,9 @@ class ImportReadsetsRequest {
if (datasetId != null) {
_json["datasetId"] = datasetId;
}
+ if (referenceSetId != null) {
+ _json["referenceSetId"] = referenceSetId;
+ }
if (sourceUris != null) {
_json["sourceUris"] = sourceUris;
}
@@ -2716,15 +3155,15 @@ class ImportReadsetsRequest {
}
-/** The readset import response. */
-class ImportReadsetsResponse {
+/** The read group set import response. */
+class ImportReadGroupSetsResponse {
/** A job ID that can be used to get status information. */
core.String jobId;
- ImportReadsetsResponse();
+ ImportReadGroupSetsResponse();
- ImportReadsetsResponse.fromJson(core.Map _json) {
+ ImportReadGroupSetsResponse.fromJson(core.Map _json) {
if (_json.containsKey("jobId")) {
jobId = _json["jobId"];
}
@@ -2745,8 +3184,8 @@ class ImportVariantsRequest {
/**
* The format of the variant data being imported.
* Possible string values are:
- * - "completeGenomics"
- * - "vcf"
+ * - "COMPLETE_GENOMICS"
+ * - "VCF"
*/
core.String format;
@@ -2756,9 +3195,6 @@ class ImportVariantsRequest {
*/
core.List<core.String> sourceUris;
- /** Required. The variant set to which variant data should be imported. */
- core.String variantSetId;
-
ImportVariantsRequest();
@@ -2769,9 +3205,6 @@ class ImportVariantsRequest {
if (_json.containsKey("sourceUris")) {
sourceUris = _json["sourceUris"];
}
- if (_json.containsKey("variantSetId")) {
- variantSetId = _json["variantSetId"];
- }
}
core.Map toJson() {
@@ -2782,9 +3215,6 @@ class ImportVariantsRequest {
if (sourceUris != null) {
_json["sourceUris"] = sourceUris;
}
- if (variantSetId != null) {
- _json["variantSetId"] = variantSetId;
- }
return _json;
}
}
@@ -2814,6 +3244,46 @@ class ImportVariantsResponse {
}
+/** Describes an interleaved FASTQ file source for alignment. */
+class InterleavedFastqSource {
+ /**
+ * Optionally specifies the metadata to be associated with the final aligned
+ * read group set.
+ */
+ FastqMetadata metadata;
+
+ /**
+ * A list of URIs pointing at interleaved FASTQ files in Google Cloud Storage
+ * which will be aligned. The caller must have READ permissions for these
+ * files.
+ */
+ core.List<core.String> sourceUris;
+
+
+ InterleavedFastqSource();
+
+ InterleavedFastqSource.fromJson(core.Map _json) {
+ if (_json.containsKey("metadata")) {
+ metadata = new FastqMetadata.fromJson(_json["metadata"]);
+ }
+ if (_json.containsKey("sourceUris")) {
+ sourceUris = _json["sourceUris"];
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (metadata != null) {
+ _json["metadata"] = (metadata).toJson();
+ }
+ if (sourceUris != null) {
+ _json["sourceUris"] = sourceUris;
+ }
+ return _json;
+ }
+}
+
+
/**
* A Job represents an ongoing process that can be monitored for status
* information.
@@ -2823,7 +3293,7 @@ class Job {
core.String created;
/** A more detailed description of this job's current status. */
- core.String description;
+ core.String detailedStatus;
/** Any errors that occurred during processing. */
core.List<core.String> errors;
@@ -2840,7 +3310,7 @@ class Job {
/**
* The Google Developers Console project number to which this job belongs.
*/
- core.String projectId;
+ core.String projectNumber;
/** A summarized representation of the original service request. */
JobRequest request;
@@ -2848,13 +3318,13 @@ class Job {
/**
* The status of this job.
* Possible string values are:
- * - "canceled"
- * - "failure"
- * - "new"
- * - "pending"
- * - "running"
- * - "success"
- * - "unknownStatus"
+ * - "CANCELED"
+ * - "FAILURE"
+ * - "NEW"
+ * - "PENDING"
+ * - "RUNNING"
+ * - "SUCCESS"
+ * - "UNKNOWN_STATUS"
*/
core.String status;
@@ -2868,8 +3338,8 @@ class Job {
if (_json.containsKey("created")) {
created = _json["created"];
}
- if (_json.containsKey("description")) {
- description = _json["description"];
+ if (_json.containsKey("detailedStatus")) {
+ detailedStatus = _json["detailedStatus"];
}
if (_json.containsKey("errors")) {
errors = _json["errors"];
@@ -2880,8 +3350,8 @@ class Job {
if (_json.containsKey("importedIds")) {
importedIds = _json["importedIds"];
}
- if (_json.containsKey("projectId")) {
- projectId = _json["projectId"];
+ if (_json.containsKey("projectNumber")) {
+ projectNumber = _json["projectNumber"];
}
if (_json.containsKey("request")) {
request = new JobRequest.fromJson(_json["request"]);
@@ -2899,8 +3369,8 @@ class Job {
if (created != null) {
_json["created"] = created;
}
- if (description != null) {
- _json["description"] = description;
+ if (detailedStatus != null) {
+ _json["detailedStatus"] = detailedStatus;
}
if (errors != null) {
_json["errors"] = errors;
@@ -2911,8 +3381,8 @@ class Job {
if (importedIds != null) {
_json["importedIds"] = importedIds;
}
- if (projectId != null) {
- _json["projectId"] = projectId;
+ if (projectNumber != null) {
+ _json["projectNumber"] = projectNumber;
}
if (request != null) {
_json["request"] = (request).toJson();
@@ -2945,12 +3415,14 @@ class JobRequest {
/**
* The original request type.
* Possible string values are:
- * - "experimentalCreateJob"
- * - "exportReadsets"
- * - "exportVariants"
- * - "importReadsets"
- * - "importVariants"
- * - "unknownType"
+ * - "ALIGN_READSETS"
+ * - "CALL_READSETS"
+ * - "EXPERIMENTAL_CREATE_JOB"
+ * - "EXPORT_READSETS"
+ * - "EXPORT_VARIANTS"
+ * - "IMPORT_READSETS"
+ * - "IMPORT_VARIANTS"
+ * - "UNKNOWN_TYPE"
*/
core.String type;
@@ -2985,6 +3457,106 @@ class JobRequest {
}
+/**
+ * A linear alignment can be represented by one CIGAR string. Describes the
+ * mapped position and local alignment of the read to the reference.
+ */
+class LinearAlignment {
+ /**
+ * Represents the local alignment of this sequence (alignment matches, indels,
+ * etc) against the reference.
+ */
+ core.List<CigarUnit> cigar;
+
+ /**
+ * The mapping quality of this alignment. Represents how likely the read maps
+ * to this position as opposed to other locations.
+ */
+ core.int mappingQuality;
+
+ /** The position of this alignment. */
+ Position position;
+
+
+ LinearAlignment();
+
+ LinearAlignment.fromJson(core.Map _json) {
+ if (_json.containsKey("cigar")) {
+ cigar = _json["cigar"].map((value) => new CigarUnit.fromJson(value)).toList();
+ }
+ if (_json.containsKey("mappingQuality")) {
+ mappingQuality = _json["mappingQuality"];
+ }
+ if (_json.containsKey("position")) {
+ position = new Position.fromJson(_json["position"]);
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (cigar != null) {
+ _json["cigar"] = cigar.map((value) => (value).toJson()).toList();
+ }
+ if (mappingQuality != null) {
+ _json["mappingQuality"] = mappingQuality;
+ }
+ if (position != null) {
+ _json["position"] = (position).toJson();
+ }
+ return _json;
+ }
+}
+
+
+/** Not documented yet. */
+class ListBasesResponse {
+ /**
+ * The continuation token, which is used to page through large result sets.
+ * Provide this value in a subsequent request to return the next page of
+ * results. This field will be empty if there aren't any additional results.
+ */
+ core.String nextPageToken;
+
+ /**
+ * The offset position (0-based) of the given sequence from the start of this
+ * Reference. This value will differ for each page in a paginated request.
+ */
+ core.String offset;
+
+ /** A substring of the bases that make up this reference. */
+ core.String sequence;
+
+
+ ListBasesResponse();
+
+ ListBasesResponse.fromJson(core.Map _json) {
+ if (_json.containsKey("nextPageToken")) {
+ nextPageToken = _json["nextPageToken"];
+ }
+ if (_json.containsKey("offset")) {
+ offset = _json["offset"];
+ }
+ if (_json.containsKey("sequence")) {
+ sequence = _json["sequence"];
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (nextPageToken != null) {
+ _json["nextPageToken"] = nextPageToken;
+ }
+ if (offset != null) {
+ _json["offset"] = offset;
+ }
+ if (sequence != null) {
+ _json["sequence"] = sequence;
+ }
+ return _json;
+ }
+}
+
+
/** Not documented yet. */
class ListCoverageBucketsResponse {
/**
@@ -3134,12 +3706,12 @@ class Metadata {
* The type of data. Possible types include: Integer, Float, Flag, Character,
* and String.
* Possible string values are:
- * - "character"
- * - "flag"
- * - "float"
- * - "integer"
- * - "string"
- * - "unknownType"
+ * - "CHARACTER"
+ * - "FLAG"
+ * - "FLOAT"
+ * - "INTEGER"
+ * - "STRING"
+ * - "UNKNOWN_TYPE"
*/
core.String type;
@@ -3201,60 +3773,159 @@ class Metadata {
}
-/** Not documented yet. */
-class Program {
- /** (CL) Command line. */
- core.String commandLine;
-
- /** (ID) Program record identifier. */
- core.String id;
+/** Describes a paired-end FASTQ file source for alignment. */
+class PairedFastqSource {
+ /**
+ * A list of URIs pointing at paired end FASTQ files in Google Cloud Storage
+ * which will be aligned. The first of each paired file should be specified
+ * here, in an order that matches the second of each paired file specified in
+ * secondSourceUris. For example: firstSourceUris: [file1_1.fq, file2_1.fq],
+ * secondSourceUris: [file1_2.fq, file2_2.fq]. The caller must have READ
+ * permissions for these files.
+ */
+ core.List<core.String> firstSourceUris;
- /** (PN) Program name. */
- core.String name;
+ /**
+ * Optionally specifies the metadata to be associated with the final aligned
+ * read group set.
+ */
+ FastqMetadata metadata;
- /** (PP) Previous program ID. */
- core.String prevProgramId;
+ /**
+ * A list of URIs pointing at paired end FASTQ files in Google Cloud Storage
+ * which will be aligned. The second of each paired file should be specified
+ * here, in an order that matches the first of each paired file specified in
+ * firstSourceUris. For example: firstSourceUris: [file1_1.fq, file2_1.fq],
+ * secondSourceUris: [file1_2.fq, file2_2.fq]. The caller must have READ
+ * permissions for these files.
+ */
+ core.List<core.String> secondSourceUris;
- /** (VN) Program version. */
- core.String version;
+ PairedFastqSource();
- Program();
+ PairedFastqSource.fromJson(core.Map _json) {
+ if (_json.containsKey("firstSourceUris")) {
+ firstSourceUris = _json["firstSourceUris"];
+ }
+ if (_json.containsKey("metadata")) {
+ metadata = new FastqMetadata.fromJson(_json["metadata"]);
+ }
+ if (_json.containsKey("secondSourceUris")) {
+ secondSourceUris = _json["secondSourceUris"];
+ }
+ }
- Program.fromJson(core.Map _json) {
- if (_json.containsKey("commandLine")) {
- commandLine = _json["commandLine"];
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (firstSourceUris != null) {
+ _json["firstSourceUris"] = firstSourceUris;
}
- if (_json.containsKey("id")) {
- id = _json["id"];
+ if (metadata != null) {
+ _json["metadata"] = (metadata).toJson();
}
- if (_json.containsKey("name")) {
- name = _json["name"];
+ if (secondSourceUris != null) {
+ _json["secondSourceUris"] = secondSourceUris;
}
- if (_json.containsKey("prevProgramId")) {
- prevProgramId = _json["prevProgramId"];
+ return _json;
+ }
+}
+
+
+/**
+ * An abstraction for referring to a genomic position, in relation to some
+ * already known reference. For now, represents a genomic position as a
+ * reference name, a base number on that reference (0-based), and a
+ * determination of forward or reverse strand.
+ */
+class Position {
+ /**
+ * The 0-based offset from the start of the forward strand for that reference.
+ */
+ core.String position;
+
+ /** The name of the reference in whatever reference set is being used. */
+ core.String referenceName;
+
+ /**
+ * Whether this position is on the reverse strand, as opposed to the forward
+ * strand.
+ */
+ core.bool reverseStrand;
+
+
+ Position();
+
+ Position.fromJson(core.Map _json) {
+ if (_json.containsKey("position")) {
+ position = _json["position"];
}
- if (_json.containsKey("version")) {
- version = _json["version"];
+ if (_json.containsKey("referenceName")) {
+ referenceName = _json["referenceName"];
+ }
+ if (_json.containsKey("reverseStrand")) {
+ reverseStrand = _json["reverseStrand"];
}
}
core.Map toJson() {
var _json = new core.Map();
- if (commandLine != null) {
- _json["commandLine"] = commandLine;
+ if (position != null) {
+ _json["position"] = position;
}
- if (id != null) {
- _json["id"] = id;
+ if (referenceName != null) {
+ _json["referenceName"] = referenceName;
}
- if (name != null) {
- _json["name"] = name;
+ if (reverseStrand != null) {
+ _json["reverseStrand"] = reverseStrand;
+ }
+ return _json;
+ }
+}
+
+
+/** A 0-based half-open genomic coordinate range over a reference sequence. */
+class Range {
+ /**
+ * The end position of the range on the reference, 0-based exclusive. If
+ * specified, referenceName must also be specified.
+ */
+ core.String end;
+
+ /** The reference sequence name, for example chr1, 1, or chrX. */
+ core.String referenceName;
+
+ /**
+ * The start position of the range on the reference, 0-based inclusive. If
+ * specified, referenceName must also be specified.
+ */
+ core.String start;
+
+
+ Range();
+
+ Range.fromJson(core.Map _json) {
+ if (_json.containsKey("end")) {
+ end = _json["end"];
+ }
+ if (_json.containsKey("referenceName")) {
+ referenceName = _json["referenceName"];
+ }
+ if (_json.containsKey("start")) {
+ start = _json["start"];
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (end != null) {
+ _json["end"] = end;
}
- if (prevProgramId != null) {
- _json["prevProgramId"] = prevProgramId;
+ if (referenceName != null) {
+ _json["referenceName"] = referenceName;
}
- if (version != null) {
- _json["version"] = version;
+ if (start != null) {
+ _json["start"] = start;
}
return _json;
}
@@ -3262,345 +3933,527 @@ class Program {
/**
- * A Read is a group of bases that may or may not have been aligned to a
- * reference. It contains quality information and other metadata.
+ * A read alignment describes a linear alignment of a string of DNA to a
+ * reference sequence, in addition to metadata about the fragment (the molecule
+ * of DNA sequenced) and the read (the bases which were read by the sequencer).
+ * A read is equivalent to a line in a SAM file. A read belongs to exactly one
+ * read group and exactly one read group set.
*/
class Read {
/**
- * The originalBases after the cigar field has been applied. Deletions are
- * represented with '-' and insertions are omitted.
+ * The quality of the read sequence contained in this alignment record.
+ * alignedSequence and alignedQuality may be shorter than the full read
+ * sequence and quality. This will occur if the alignment is part of a
+ * chimeric alignment, or if the read was trimmed. When this occurs, the CIGAR
+ * for this read will begin/end with a hard clip operator that will indicate
+ * the length of the excised sequence.
*/
- core.String alignedBases;
+ core.List<core.int> alignedQuality;
/**
- * Represents the quality of each base in this read. Each character represents
- * one base. To get the quality, take the ASCII value of the character and
- * subtract 33. (QUAL)
+ * The bases of the read sequence contained in this alignment record.
+ * alignedSequence and alignedQuality may be shorter than the full read
+ * sequence and quality. This will occur if the alignment is part of a
+ * chimeric alignment, or if the read was trimmed. When this occurs, the CIGAR
+ * for this read will begin/end with a hard clip operator that will indicate
+ * the length of the excised sequence.
*/
- core.String baseQuality;
+ core.String alignedSequence;
/**
- * A condensed representation of how this read matches up to the reference.
- * (CIGAR)
+ * The linear alignment for this alignment record. This field will be null if
+ * the read is unmapped.
*/
- core.String cigar;
+ LinearAlignment alignment;
+
+ /** The fragment is a PCR or optical duplicate (SAM flag 0x400) */
+ core.bool duplicateFragment;
+
+ /** SAM flag 0x200 */
+ core.bool failedVendorQualityChecks;
+
+ /** The observed length of the fragment, equivalent to TLEN in SAM. */
+ core.int fragmentLength;
+
+ /** The fragment name. Equivalent to QNAME (query template name) in SAM. */
+ core.String fragmentName;
/**
- * Each bit of this number has a different meaning if enabled. See the full
- * BAM spec for more details. (FLAG)
+ * The unique ID for this read. This is a generated unique ID, not to be
+ * confused with fragmentName.
*/
- core.int flags;
-
- /** The Google generated ID of the read, immutable. */
core.String id;
+ /** A map of additional read alignment information. */
+ core.Map<core.String, core.List<core.String>> info;
+
/**
- * A score up to 255 that represents how likely this read's aligned position
- * is to be correct. A higher value is better. (MAPQ)
+ * The mapping of the primary alignment of the (readNumber+1)%numberReads read
+ * in the fragment. It replaces mate position and mate strand in SAM.
*/
- core.int mappingQuality;
+ Position nextMatePosition;
- /** The 1-based start position of the paired read. (PNEXT) */
- core.int matePosition;
+ /** The number of reads in the fragment (extension to SAM flag 0x1). */
+ core.int numberReads;
/**
- * The name of the sequence that the paired read is aligned to. This is
- * usually the same as referenceSequenceName. (RNEXT)
+ * The orientation and the distance between reads from the fragment are
+ * consistent with the sequencing protocol (extension to SAM flag 0x2)
*/
- core.String mateReferenceSequenceName;
+ core.bool properPlacement;
/**
- * The name of the read. When imported from a BAM file, this is the query
- * template name. (QNAME)
+ * The ID of the read group this read belongs to. (Every read must belong to
+ * exactly one read group.)
*/
- core.String name;
-
- /** The list of bases that this read represents (such as "CATCGA"). (SEQ) */
- core.String originalBases;
+ core.String readGroupId;
/**
- * The 1-based start position of the aligned read. If the first base starts at
- * the very beginning of the reference sequence, then the position would be
- * '1'. (POS)
+ * The ID of the read group set this read belongs to. (Every read must belong
+ * to exactly one read group set.)
*/
- core.int position;
-
- /** The ID of the readset this read belongs to. */
- core.String readsetId;
+ core.String readGroupSetId;
/**
- * The name of the sequence that this read is aligned to. This would be, for
- * example, 'X' for the X Chromosome or '20' for Chromosome 20. (RNAME)
+ * The read number in sequencing. 0-based and less than numberReads. This
+ * field replaces SAM flag 0x40 and 0x80.
*/
- core.String referenceSequenceName;
+ core.int readNumber;
- /** A map of additional read information. (TAG) */
- core.Map<core.String, core.List<core.String>> tags;
+ /**
+ * Whether this alignment is secondary. Equivalent to SAM flag 0x100. A
+ * secondary alignment represents an alternative to the primary alignment for
+ * this read. Aligners may return secondary alignments if a read can map
+ * ambiguously to multiple coordinates in the genome. By convention, each read
+ * has one and only one alignment where both secondaryAlignment and
+ * supplementaryAlignment are false.
+ */
+ core.bool secondaryAlignment;
/**
- * Length of the original piece of DNA that produced both this read and the
- * paired read. (TLEN)
+ * Whether this alignment is supplementary. Equivalent to SAM flag 0x800.
+ * Supplementary alignments are used in the representation of a chimeric
+ * alignment. In a chimeric alignment, a read is split into multiple linear
+ * alignments that map to different reference contigs. The first linear
+ * alignment in the read will be designated as the representative alignment;
+ * the remaining linear alignments will be designated as supplementary
+ * alignments. These alignments may have different mapping quality scores. In
+ * each linear alignment in a chimeric alignment, the read will be hard
+ * clipped. The alignedSequence and alignedQuality fields in the alignment
+ * record will only represent the bases for its respective linear alignment.
*/
- core.int templateLength;
+ core.bool supplementaryAlignment;
Read();
Read.fromJson(core.Map _json) {
- if (_json.containsKey("alignedBases")) {
- alignedBases = _json["alignedBases"];
+ if (_json.containsKey("alignedQuality")) {
+ alignedQuality = _json["alignedQuality"];
}
- if (_json.containsKey("baseQuality")) {
- baseQuality = _json["baseQuality"];
+ if (_json.containsKey("alignedSequence")) {
+ alignedSequence = _json["alignedSequence"];
}
- if (_json.containsKey("cigar")) {
- cigar = _json["cigar"];
+ if (_json.containsKey("alignment")) {
+ alignment = new LinearAlignment.fromJson(_json["alignment"]);
}
- if (_json.containsKey("flags")) {
- flags = _json["flags"];
+ if (_json.containsKey("duplicateFragment")) {
+ duplicateFragment = _json["duplicateFragment"];
+ }
+ if (_json.containsKey("failedVendorQualityChecks")) {
+ failedVendorQualityChecks = _json["failedVendorQualityChecks"];
+ }
+ if (_json.containsKey("fragmentLength")) {
+ fragmentLength = _json["fragmentLength"];
+ }
+ if (_json.containsKey("fragmentName")) {
+ fragmentName = _json["fragmentName"];
}
if (_json.containsKey("id")) {
id = _json["id"];
}
- if (_json.containsKey("mappingQuality")) {
- mappingQuality = _json["mappingQuality"];
- }
- if (_json.containsKey("matePosition")) {
- matePosition = _json["matePosition"];
+ if (_json.containsKey("info")) {
+ info = _json["info"];
}
- if (_json.containsKey("mateReferenceSequenceName")) {
- mateReferenceSequenceName = _json["mateReferenceSequenceName"];
+ if (_json.containsKey("nextMatePosition")) {
+ nextMatePosition = new Position.fromJson(_json["nextMatePosition"]);
}
- if (_json.containsKey("name")) {
- name = _json["name"];
+ if (_json.containsKey("numberReads")) {
+ numberReads = _json["numberReads"];
}
- if (_json.containsKey("originalBases")) {
- originalBases = _json["originalBases"];
+ if (_json.containsKey("properPlacement")) {
+ properPlacement = _json["properPlacement"];
}
- if (_json.containsKey("position")) {
- position = _json["position"];
+ if (_json.containsKey("readGroupId")) {
+ readGroupId = _json["readGroupId"];
}
- if (_json.containsKey("readsetId")) {
- readsetId = _json["readsetId"];
+ if (_json.containsKey("readGroupSetId")) {
+ readGroupSetId = _json["readGroupSetId"];
}
- if (_json.containsKey("referenceSequenceName")) {
- referenceSequenceName = _json["referenceSequenceName"];
+ if (_json.containsKey("readNumber")) {
+ readNumber = _json["readNumber"];
}
- if (_json.containsKey("tags")) {
- tags = _json["tags"];
+ if (_json.containsKey("secondaryAlignment")) {
+ secondaryAlignment = _json["secondaryAlignment"];
}
- if (_json.containsKey("templateLength")) {
- templateLength = _json["templateLength"];
+ if (_json.containsKey("supplementaryAlignment")) {
+ supplementaryAlignment = _json["supplementaryAlignment"];
}
}
core.Map toJson() {
var _json = new core.Map();
- if (alignedBases != null) {
- _json["alignedBases"] = alignedBases;
+ if (alignedQuality != null) {
+ _json["alignedQuality"] = alignedQuality;
}
- if (baseQuality != null) {
- _json["baseQuality"] = baseQuality;
+ if (alignedSequence != null) {
+ _json["alignedSequence"] = alignedSequence;
}
- if (cigar != null) {
- _json["cigar"] = cigar;
+ if (alignment != null) {
+ _json["alignment"] = (alignment).toJson();
+ }
+ if (duplicateFragment != null) {
+ _json["duplicateFragment"] = duplicateFragment;
+ }
+ if (failedVendorQualityChecks != null) {
+ _json["failedVendorQualityChecks"] = failedVendorQualityChecks;
+ }
+ if (fragmentLength != null) {
+ _json["fragmentLength"] = fragmentLength;
}
- if (flags != null) {
- _json["flags"] = flags;
+ if (fragmentName != null) {
+ _json["fragmentName"] = fragmentName;
}
if (id != null) {
_json["id"] = id;
}
- if (mappingQuality != null) {
- _json["mappingQuality"] = mappingQuality;
- }
- if (matePosition != null) {
- _json["matePosition"] = matePosition;
+ if (info != null) {
+ _json["info"] = info;
}
- if (mateReferenceSequenceName != null) {
- _json["mateReferenceSequenceName"] = mateReferenceSequenceName;
+ if (nextMatePosition != null) {
+ _json["nextMatePosition"] = (nextMatePosition).toJson();
}
- if (name != null) {
- _json["name"] = name;
+ if (numberReads != null) {
+ _json["numberReads"] = numberReads;
}
- if (originalBases != null) {
- _json["originalBases"] = originalBases;
+ if (properPlacement != null) {
+ _json["properPlacement"] = properPlacement;
}
- if (position != null) {
- _json["position"] = position;
+ if (readGroupId != null) {
+ _json["readGroupId"] = readGroupId;
}
- if (readsetId != null) {
- _json["readsetId"] = readsetId;
+ if (readGroupSetId != null) {
+ _json["readGroupSetId"] = readGroupSetId;
}
- if (referenceSequenceName != null) {
- _json["referenceSequenceName"] = referenceSequenceName;
+ if (readNumber != null) {
+ _json["readNumber"] = readNumber;
}
- if (tags != null) {
- _json["tags"] = tags;
+ if (secondaryAlignment != null) {
+ _json["secondaryAlignment"] = secondaryAlignment;
}
- if (templateLength != null) {
- _json["templateLength"] = templateLength;
+ if (supplementaryAlignment != null) {
+ _json["supplementaryAlignment"] = supplementaryAlignment;
}
return _json;
}
}
-/** Not documented yet. */
+/**
+ * A read group is all the data that's processed the same way by the sequencer.
+ */
class ReadGroup {
- /** (DT) Date the run was produced (ISO8601 date or date/time). */
- core.String date;
+ /** The ID of the dataset this read group belongs to. */
+ core.String datasetId;
- /** (DS) Description. */
+ /** A free-form text description of this read group. */
core.String description;
+ /** The experiment used to generate this read group. */
+ ReadGroupExperiment experiment;
+
/**
- * (FO) Flow order. The array of nucleotide bases that correspond to the
- * nucleotides used for each flow of each read.
+ * The generated unique read group ID. Note: This is different than the @RG ID
+ * field in the SAM spec. For that value, see the name field.
*/
- core.String flowOrder;
-
- /** (ID) Read group identifier. */
core.String id;
+ /** A map of additional read group information. */
+ core.Map<core.String, core.List<core.String>> info;
+
/**
- * (KS) The array of nucleotide bases that correspond to the key sequence of
- * each read.
+ * The read group name. This corresponds to the @RG ID field in the SAM spec.
*/
- core.String keySequence;
-
- /** (LS) Library. */
- core.String library;
-
- /** (PU) Platform unit. */
- core.String platformUnit;
+ core.String name;
- /** (PI) Predicted median insert size. */
+ /**
+ * The predicted insert size of this read group. The insert size is the length
+ * the sequenced DNA fragment from end-to-end, not including the adapters.
+ */
core.int predictedInsertSize;
- /** (PG) Programs used for processing the read group. */
- core.String processingProgram;
-
- /** (SM) Sample. */
- core.String sample;
+ /**
+ * The programs used to generate this read group. Programs are always
+ * identical for all read groups within a read group set. For this reason,
+ * only the first read group in a returned set will have this field populated.
+ */
+ core.List<ReadGroupProgram> programs;
- /** (CN) Name of sequencing center producing the read. */
- core.String sequencingCenterName;
+ /**
+ * The reference set the reads in this read group are aligned to. Required if
+ * there are any read alignments.
+ */
+ core.String referenceSetId;
- /** (PL) Platform/technology used to produce the reads. */
- core.String sequencingTechnology;
+ /**
+ * The sample this read group's data was generated from. Note: This is not an
+ * actual ID within this repository, but rather an identifier for a sample
+ * which may be meaningful to some external system.
+ */
+ core.String sampleId;
ReadGroup();
ReadGroup.fromJson(core.Map _json) {
- if (_json.containsKey("date")) {
- date = _json["date"];
+ if (_json.containsKey("datasetId")) {
+ datasetId = _json["datasetId"];
}
if (_json.containsKey("description")) {
description = _json["description"];
}
- if (_json.containsKey("flowOrder")) {
- flowOrder = _json["flowOrder"];
+ if (_json.containsKey("experiment")) {
+ experiment = new ReadGroupExperiment.fromJson(_json["experiment"]);
}
if (_json.containsKey("id")) {
id = _json["id"];
}
- if (_json.containsKey("keySequence")) {
- keySequence = _json["keySequence"];
- }
- if (_json.containsKey("library")) {
- library = _json["library"];
+ if (_json.containsKey("info")) {
+ info = _json["info"];
}
- if (_json.containsKey("platformUnit")) {
- platformUnit = _json["platformUnit"];
+ if (_json.containsKey("name")) {
+ name = _json["name"];
}
if (_json.containsKey("predictedInsertSize")) {
predictedInsertSize = _json["predictedInsertSize"];
}
- if (_json.containsKey("processingProgram")) {
- processingProgram = _json["processingProgram"];
- }
- if (_json.containsKey("sample")) {
- sample = _json["sample"];
+ if (_json.containsKey("programs")) {
+ programs = _json["programs"].map((value) => new ReadGroupProgram.fromJson(value)).toList();
}
- if (_json.containsKey("sequencingCenterName")) {
- sequencingCenterName = _json["sequencingCenterName"];
+ if (_json.containsKey("referenceSetId")) {
+ referenceSetId = _json["referenceSetId"];
}
- if (_json.containsKey("sequencingTechnology")) {
- sequencingTechnology = _json["sequencingTechnology"];
+ if (_json.containsKey("sampleId")) {
+ sampleId = _json["sampleId"];
}
}
core.Map toJson() {
var _json = new core.Map();
- if (date != null) {
- _json["date"] = date;
+ if (datasetId != null) {
+ _json["datasetId"] = datasetId;
}
if (description != null) {
_json["description"] = description;
}
- if (flowOrder != null) {
- _json["flowOrder"] = flowOrder;
+ if (experiment != null) {
+ _json["experiment"] = (experiment).toJson();
}
if (id != null) {
_json["id"] = id;
}
- if (keySequence != null) {
- _json["keySequence"] = keySequence;
+ if (info != null) {
+ _json["info"] = info;
+ }
+ if (name != null) {
+ _json["name"] = name;
+ }
+ if (predictedInsertSize != null) {
+ _json["predictedInsertSize"] = predictedInsertSize;
+ }
+ if (programs != null) {
+ _json["programs"] = programs.map((value) => (value).toJson()).toList();
+ }
+ if (referenceSetId != null) {
+ _json["referenceSetId"] = referenceSetId;
+ }
+ if (sampleId != null) {
+ _json["sampleId"] = sampleId;
+ }
+ return _json;
+ }
+}
+
+
+/** Not documented yet. */
+class ReadGroupExperiment {
+ /**
+ * The instrument model used as part of this experiment. This maps to
+ * sequencing technology in BAM.
+ */
+ core.String instrumentModel;
+
+ /**
+ * The library used as part of this experiment. Note: This is not an actual ID
+ * within this repository, but rather an identifier for a library which may be
+ * meaningful to some external system.
+ */
+ core.String libraryId;
+
+ /**
+ * The platform unit used as part of this experiment e.g.
+ * flowcell-barcode.lane for Illumina or slide for SOLiD. Corresponds to the
+ */
+ core.String platformUnit;
+
+ /** The sequencing center used as part of this experiment. */
+ core.String sequencingCenter;
+
+
+ ReadGroupExperiment();
+
+ ReadGroupExperiment.fromJson(core.Map _json) {
+ if (_json.containsKey("instrumentModel")) {
+ instrumentModel = _json["instrumentModel"];
+ }
+ if (_json.containsKey("libraryId")) {
+ libraryId = _json["libraryId"];
+ }
+ if (_json.containsKey("platformUnit")) {
+ platformUnit = _json["platformUnit"];
+ }
+ if (_json.containsKey("sequencingCenter")) {
+ sequencingCenter = _json["sequencingCenter"];
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (instrumentModel != null) {
+ _json["instrumentModel"] = instrumentModel;
}
- if (library != null) {
- _json["library"] = library;
+ if (libraryId != null) {
+ _json["libraryId"] = libraryId;
}
if (platformUnit != null) {
_json["platformUnit"] = platformUnit;
}
- if (predictedInsertSize != null) {
- _json["predictedInsertSize"] = predictedInsertSize;
+ if (sequencingCenter != null) {
+ _json["sequencingCenter"] = sequencingCenter;
+ }
+ return _json;
+ }
+}
+
+
+/** Not documented yet. */
+class ReadGroupProgram {
+ /** The command line used to run this program. */
+ core.String commandLine;
+
+ /**
+ * The user specified locally unique ID of the program. Used along with
+ * prevProgramId to define an ordering between programs.
+ */
+ core.String id;
+
+ /** The name of the program. */
+ core.String name;
+
+ /** The ID of the program run before this one. */
+ core.String prevProgramId;
+
+ /** The version of the program run. */
+ core.String version;
+
+
+ ReadGroupProgram();
+
+ ReadGroupProgram.fromJson(core.Map _json) {
+ if (_json.containsKey("commandLine")) {
+ commandLine = _json["commandLine"];
}
- if (processingProgram != null) {
- _json["processingProgram"] = processingProgram;
+ if (_json.containsKey("id")) {
+ id = _json["id"];
+ }
+ if (_json.containsKey("name")) {
+ name = _json["name"];
+ }
+ if (_json.containsKey("prevProgramId")) {
+ prevProgramId = _json["prevProgramId"];
+ }
+ if (_json.containsKey("version")) {
+ version = _json["version"];
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (commandLine != null) {
+ _json["commandLine"] = commandLine;
+ }
+ if (id != null) {
+ _json["id"] = id;
}
- if (sample != null) {
- _json["sample"] = sample;
+ if (name != null) {
+ _json["name"] = name;
}
- if (sequencingCenterName != null) {
- _json["sequencingCenterName"] = sequencingCenterName;
+ if (prevProgramId != null) {
+ _json["prevProgramId"] = prevProgramId;
}
- if (sequencingTechnology != null) {
- _json["sequencingTechnology"] = sequencingTechnology;
+ if (version != null) {
+ _json["version"] = version;
}
return _json;
}
}
-/** A Readset is a collection of Reads. */
-class Readset {
- /** The ID of the dataset this readset belongs to. */
+/**
+ * A read group set is a logical collection of read groups, which are
+ * collections of reads produced by a sequencer. A read group set typically
+ * models reads corresponding to one sample, sequenced one way, and aligned one
+ * way.
+ * - A read group set belongs to one dataset.
+ * - A read group belongs to one read group set.
+ * - A read belongs to one read group.
+ */
+class ReadGroupSet {
+ /** The dataset ID. */
core.String datasetId;
/**
- * File information from the original BAM import. See the BAM format
- * specification for additional information on each field.
+ * The filename of the original source file for this read group set, if any.
*/
- core.List<HeaderSection> fileData;
+ core.String filename;
- /** The Google generated ID of the readset, immutable. */
+ /** The read group set ID. */
core.String id;
- /** The readset name, typically this is the sample name. */
+ /**
+ * The read group set name. By default this will be initialized to the sample
+ * name of the sequenced data contained in this set.
+ */
core.String name;
+ /**
+ * The read groups in this set. There are typically 1-10 read groups in a read
+ * group set.
+ */
+ core.List<ReadGroup> readGroups;
+
+ /** The reference set the reads in this read group set are aligned to. */
+ core.String referenceSetId;
- Readset();
- Readset.fromJson(core.Map _json) {
+ ReadGroupSet();
+
+ ReadGroupSet.fromJson(core.Map _json) {
if (_json.containsKey("datasetId")) {
datasetId = _json["datasetId"];
}
- if (_json.containsKey("fileData")) {
- fileData = _json["fileData"].map((value) => new HeaderSection.fromJson(value)).toList();
+ if (_json.containsKey("filename")) {
+ filename = _json["filename"];
}
if (_json.containsKey("id")) {
id = _json["id"];
@@ -3608,6 +4461,12 @@ class Readset {
if (_json.containsKey("name")) {
name = _json["name"];
}
+ if (_json.containsKey("readGroups")) {
+ readGroups = _json["readGroups"].map((value) => new ReadGroup.fromJson(value)).toList();
+ }
+ if (_json.containsKey("referenceSetId")) {
+ referenceSetId = _json["referenceSetId"];
+ }
}
core.Map toJson() {
@@ -3615,15 +4474,117 @@ class Readset {
if (datasetId != null) {
_json["datasetId"] = datasetId;
}
- if (fileData != null) {
- _json["fileData"] = fileData.map((value) => (value).toJson()).toList();
+ if (filename != null) {
+ _json["filename"] = filename;
+ }
+ if (id != null) {
+ _json["id"] = id;
+ }
+ if (name != null) {
+ _json["name"] = name;
+ }
+ if (readGroups != null) {
+ _json["readGroups"] = readGroups.map((value) => (value).toJson()).toList();
+ }
+ if (referenceSetId != null) {
+ _json["referenceSetId"] = referenceSetId;
+ }
+ return _json;
+ }
+}
+
+
+/**
+ * A reference is a canonical assembled DNA sequence, intended to act as a
+ * reference coordinate space for other genomic annotations. A single reference
+ * might represent the human chromosome 1 or mitochandrial DNA, for instance. A
+ * reference belongs to one or more reference sets.
+ */
+class Reference {
+ /** The Google generated immutable ID of the reference. */
+ core.String id;
+
+ /** The length of this reference's sequence. */
+ core.String length;
+
+ /**
+ * MD5 of the upper-case sequence excluding all whitespace characters (this is
+ * equivalent to SQ:M5 in SAM). This value is represented in lower case
+ * hexadecimal format.
+ */
+ core.String md5checksum;
+
+ /** The name of this reference, for example 22. */
+ core.String name;
+
+ /**
+ * ID from http://www.ncbi.nlm.nih.gov/taxonomy (e.g. 9606->human) if not
+ * specified by the containing reference set.
+ */
+ core.int ncbiTaxonId;
+
+ /**
+ * All known corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally
+ * with a version number, for example GCF_000001405.26.
+ */
+ core.List<core.String> sourceAccessions;
+
+ /**
+ * The URI from which the sequence was obtained. Specifies a FASTA format
+ * file/string with one name, sequence pair.
+ */
+ core.String sourceURI;
+
+
+ Reference();
+
+ Reference.fromJson(core.Map _json) {
+ if (_json.containsKey("id")) {
+ id = _json["id"];
+ }
+ if (_json.containsKey("length")) {
+ length = _json["length"];
+ }
+ if (_json.containsKey("md5checksum")) {
+ md5checksum = _json["md5checksum"];
}
+ if (_json.containsKey("name")) {
+ name = _json["name"];
+ }
+ if (_json.containsKey("ncbiTaxonId")) {
+ ncbiTaxonId = _json["ncbiTaxonId"];
+ }
+ if (_json.containsKey("sourceAccessions")) {
+ sourceAccessions = _json["sourceAccessions"];
+ }
+ if (_json.containsKey("sourceURI")) {
+ sourceURI = _json["sourceURI"];
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
if (id != null) {
_json["id"] = id;
}
+ if (length != null) {
+ _json["length"] = length;
+ }
+ if (md5checksum != null) {
+ _json["md5checksum"] = md5checksum;
+ }
if (name != null) {
_json["name"] = name;
}
+ if (ncbiTaxonId != null) {
+ _json["ncbiTaxonId"] = ncbiTaxonId;
+ }
+ if (sourceAccessions != null) {
+ _json["sourceAccessions"] = sourceAccessions;
+ }
+ if (sourceURI != null) {
+ _json["sourceURI"] = sourceURI;
+ }
return _json;
}
}
@@ -3668,50 +4629,83 @@ class ReferenceBound {
}
-/** Not documented yet. */
-class ReferenceSequence {
- /** (AS) Genome assembly identifier. */
+/**
+ * A reference set is a set of references which typically comprise a reference
+ * assembly for a species, such as GRCh38 which is representative of the human
+ * genome. A reference set defines a common coordinate space for comparing
+ * reference-aligned experimental data. A reference set contains 1 or more
+ * references.
+ */
+class ReferenceSet {
+ /** Public id of this reference set, such as GRCh37. */
core.String assemblyId;
- /** (LN) Reference sequence length. */
- core.int length;
+ /** Optional free text description of this reference set. */
+ core.String description;
+
+ /** The Google generated immutable ID of the reference set. */
+ core.String id;
/**
- * (M5) MD5 checksum of the sequence in the uppercase, excluding spaces but
- * including pads as *.
+ * Order-independent MD5 checksum which identifies this reference set. The
+ * checksum is computed by sorting all lower case hexidecimal string
+ * reference.md5checksum (for all reference in this set) in ascending
+ * lexicographic order, concatenating, and taking the MD5 of that value. The
+ * resulting value is represented in lower case hexadecimal format.
*/
- core.String md5Checksum;
+ core.String md5checksum;
- /** (SN) Reference sequence name. */
- core.String name;
+ /**
+ * ID from http://www.ncbi.nlm.nih.gov/taxonomy (e.g. 9606->human) indicating
+ * the species which this assembly is intended to model. Note that contained
+ * references may specify a different ncbiTaxonId, as assemblies may contain
+ * reference sequences which do not belong to the modeled species, e.g. EBV in
+ * a human reference genome.
+ */
+ core.int ncbiTaxonId;
+
+ /**
+ * The IDs of the reference objects that are part of this set.
+ * Reference.md5checksum must be unique within this set.
+ */
+ core.List<core.String> referenceIds;
- /** (SP) Species. */
- core.String species;
+ /**
+ * All known corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally
+ * with a version number, for example NC_000001.11.
+ */
+ core.List<core.String> sourceAccessions;
- /** (UR) URI of the sequence. */
- core.String uri;
+ /** The URI from which the references were obtained. */
+ core.String sourceURI;
- ReferenceSequence();
+ ReferenceSet();
- ReferenceSequence.fromJson(core.Map _json) {
+ ReferenceSet.fromJson(core.Map _json) {
if (_json.containsKey("assemblyId")) {
assemblyId = _json["assemblyId"];
}
- if (_json.containsKey("length")) {
- length = _json["length"];
+ if (_json.containsKey("description")) {
+ description = _json["description"];
+ }
+ if (_json.containsKey("id")) {
+ id = _json["id"];
}
- if (_json.containsKey("md5Checksum")) {
- md5Checksum = _json["md5Checksum"];
+ if (_json.containsKey("md5checksum")) {
+ md5checksum = _json["md5checksum"];
}
- if (_json.containsKey("name")) {
- name = _json["name"];
+ if (_json.containsKey("ncbiTaxonId")) {
+ ncbiTaxonId = _json["ncbiTaxonId"];
}
- if (_json.containsKey("species")) {
- species = _json["species"];
+ if (_json.containsKey("referenceIds")) {
+ referenceIds = _json["referenceIds"];
}
- if (_json.containsKey("uri")) {
- uri = _json["uri"];
+ if (_json.containsKey("sourceAccessions")) {
+ sourceAccessions = _json["sourceAccessions"];
+ }
+ if (_json.containsKey("sourceURI")) {
+ sourceURI = _json["sourceURI"];
}
}
@@ -3720,20 +4714,26 @@ class ReferenceSequence {
if (assemblyId != null) {
_json["assemblyId"] = assemblyId;
}
- if (length != null) {
- _json["length"] = length;
+ if (description != null) {
+ _json["description"] = description;
}
- if (md5Checksum != null) {
- _json["md5Checksum"] = md5Checksum;
+ if (id != null) {
+ _json["id"] = id;
}
- if (name != null) {
- _json["name"] = name;
+ if (md5checksum != null) {
+ _json["md5checksum"] = md5checksum;
+ }
+ if (ncbiTaxonId != null) {
+ _json["ncbiTaxonId"] = ncbiTaxonId;
}
- if (species != null) {
- _json["species"] = species;
+ if (referenceIds != null) {
+ _json["referenceIds"] = referenceIds;
}
- if (uri != null) {
- _json["uri"] = uri;
+ if (sourceAccessions != null) {
+ _json["sourceAccessions"] = sourceAccessions;
+ }
+ if (sourceURI != null) {
+ _json["sourceURI"] = sourceURI;
}
return _json;
}
@@ -3856,7 +4856,7 @@ class SearchJobsRequest {
* Specifies the number of results to return in a single page. Defaults to
* 128. The maximum value is 256.
*/
- core.String maxResults;
+ core.int pageSize;
/**
* The continuation token which is used to page through large result sets. To
@@ -3866,7 +4866,7 @@ class SearchJobsRequest {
core.String pageToken;
/** Required. Only return jobs which belong to this Google Developers */
- core.String projectId;
+ core.String projectNumber;
/** Only return jobs which have a matching status. */
core.List<core.String> status;
@@ -3881,14 +4881,14 @@ class SearchJobsRequest {
if (_json.containsKey("createdBefore")) {
createdBefore = _json["createdBefore"];
}
- if (_json.containsKey("maxResults")) {
- maxResults = _json["maxResults"];
+ if (_json.containsKey("pageSize")) {
+ pageSize = _json["pageSize"];
}
if (_json.containsKey("pageToken")) {
pageToken = _json["pageToken"];
}
- if (_json.containsKey("projectId")) {
- projectId = _json["projectId"];
+ if (_json.containsKey("projectNumber")) {
+ projectNumber = _json["projectNumber"];
}
if (_json.containsKey("status")) {
status = _json["status"];
@@ -3903,14 +4903,14 @@ class SearchJobsRequest {
if (createdBefore != null) {
_json["createdBefore"] = createdBefore;
}
- if (maxResults != null) {
- _json["maxResults"] = maxResults;
+ if (pageSize != null) {
+ _json["pageSize"] = pageSize;
}
if (pageToken != null) {
_json["pageToken"] = pageToken;
}
- if (projectId != null) {
- _json["projectId"] = projectId;
+ if (projectNumber != null) {
+ _json["projectNumber"] = projectNumber;
}
if (status != null) {
_json["status"] = status;
@@ -3957,13 +4957,120 @@ class SearchJobsResponse {
}
+/** The read group set search request. */
+class SearchReadGroupSetsRequest {
+ /**
+ * Restricts this query to read group sets within the given datasets. At least
+ * one ID must be provided.
+ */
+ core.List<core.String> datasetIds;
+
+ /**
+ * Only return read group sets for which a substring of the name matches this
+ * string.
+ */
+ core.String name;
+
+ /**
+ * Specifies number of results to return in a single page. If unspecified, it
+ * will default to 128. The maximum value is 1024.
+ */
+ core.int pageSize;
+
+ /**
+ * The continuation token, which is used to page through large result sets. To
+ * get the next page of results, set this parameter to the value of
+ * nextPageToken from the previous response.
+ */
+ core.String pageToken;
+
+
+ SearchReadGroupSetsRequest();
+
+ SearchReadGroupSetsRequest.fromJson(core.Map _json) {
+ if (_json.containsKey("datasetIds")) {
+ datasetIds = _json["datasetIds"];
+ }
+ if (_json.containsKey("name")) {
+ name = _json["name"];
+ }
+ if (_json.containsKey("pageSize")) {
+ pageSize = _json["pageSize"];
+ }
+ if (_json.containsKey("pageToken")) {
+ pageToken = _json["pageToken"];
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (datasetIds != null) {
+ _json["datasetIds"] = datasetIds;
+ }
+ if (name != null) {
+ _json["name"] = name;
+ }
+ if (pageSize != null) {
+ _json["pageSize"] = pageSize;
+ }
+ if (pageToken != null) {
+ _json["pageToken"] = pageToken;
+ }
+ return _json;
+ }
+}
+
+
+/** The read group set search response. */
+class SearchReadGroupSetsResponse {
+ /**
+ * The continuation token, which is used to page through large result sets.
+ * Provide this value in a subsequent request to return the next page of
+ * results. This field will be empty if there aren't any additional results.
+ */
+ core.String nextPageToken;
+
+ /** The list of matching read group sets. */
+ core.List<ReadGroupSet> readGroupSets;
+
+
+ SearchReadGroupSetsResponse();
+
+ SearchReadGroupSetsResponse.fromJson(core.Map _json) {
+ if (_json.containsKey("nextPageToken")) {
+ nextPageToken = _json["nextPageToken"];
+ }
+ if (_json.containsKey("readGroupSets")) {
+ readGroupSets = _json["readGroupSets"].map((value) => new ReadGroupSet.fromJson(value)).toList();
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (nextPageToken != null) {
+ _json["nextPageToken"] = nextPageToken;
+ }
+ if (readGroupSets != null) {
+ _json["readGroupSets"] = readGroupSets.map((value) => (value).toJson()).toList();
+ }
+ return _json;
+ }
+}
+
+
/** The read search request. */
class SearchReadsRequest {
/**
+ * The end position of the range on the reference, 0-based exclusive. If
+ * specified, referenceName must also be specified.
+ */
+ core.String end;
+
+ /**
* Specifies number of results to return in a single page. If unspecified, it
* will default to 256. The maximum value is 2048.
*/
- core.String maxResults;
+ core.int pageSize;
/**
* The continuation token, which is used to page through large result sets. To
@@ -3973,76 +5080,81 @@ class SearchReadsRequest {
core.String pageToken;
/**
- * The readsets within which to search for reads. At least one readset ID must
- * be provided. All specified readsets must be aligned against a common set of
- * reference sequences; this defines the genomic coordinates for the query.
+ * The IDs of the read groups within which to search for reads. All specified
+ * read groups must belong to the same read group sets. Must specify one of
+ * readGroupSetIds or readGroupIds.
*/
- core.List<core.String> readsetIds;
+ core.List<core.String> readGroupIds;
/**
- * The end position (1-based, inclusive) of the target range. If specified,
- * sequenceName must also be specified. Defaults to the end of the target
- * reference sequence, if any.
+ * The IDs of the read groups sets within which to search for reads. All
+ * specified read group sets must be aligned against a common set of reference
+ * sequences; this defines the genomic coordinates for the query. Must specify
+ * one of readGroupSetIds or readGroupIds.
*/
- core.String sequenceEnd;
+ core.List<core.String> readGroupSetIds;
/**
- * Restricts the results to a particular reference sequence such as 1, chr1,
- * or X. The set of valid references sequences depends on the readsets
- * specified. If set to *, only unmapped Reads are returned.
+ * The reference sequence name, for example chr1, 1, or chrX. If set to *,
+ * only unmapped reads are returned.
*/
- core.String sequenceName;
+ core.String referenceName;
/**
- * The start position (1-based, inclusive) of the target range. If specified,
- * sequenceName must also be specified. Defaults to the start of the target
- * reference sequence, if any.
+ * The start position of the range on the reference, 0-based inclusive. If
+ * specified, referenceName must also be specified.
*/
- core.String sequenceStart;
+ core.String start;
SearchReadsRequest();
SearchReadsRequest.fromJson(core.Map _json) {
- if (_json.containsKey("maxResults")) {
- maxResults = _json["maxResults"];
+ if (_json.containsKey("end")) {
+ end = _json["end"];
+ }
+ if (_json.containsKey("pageSize")) {
+ pageSize = _json["pageSize"];
}
if (_json.containsKey("pageToken")) {
pageToken = _json["pageToken"];
}
- if (_json.containsKey("readsetIds")) {
- readsetIds = _json["readsetIds"];
+ if (_json.containsKey("readGroupIds")) {
+ readGroupIds = _json["readGroupIds"];
}
- if (_json.containsKey("sequenceEnd")) {
- sequenceEnd = _json["sequenceEnd"];
+ if (_json.containsKey("readGroupSetIds")) {
+ readGroupSetIds = _json["readGroupSetIds"];
}
- if (_json.containsKey("sequenceName")) {
- sequenceName = _json["sequenceName"];
+ if (_json.containsKey("referenceName")) {
+ referenceName = _json["referenceName"];
}
- if (_json.containsKey("sequenceStart")) {
- sequenceStart = _json["sequenceStart"];
+ if (_json.containsKey("start")) {
+ start = _json["start"];
}
}
core.Map toJson() {
var _json = new core.Map();
- if (maxResults != null) {
- _json["maxResults"] = maxResults;
+ if (end != null) {
+ _json["end"] = end;
+ }
+ if (pageSize != null) {
+ _json["pageSize"] = pageSize;
}
if (pageToken != null) {
_json["pageToken"] = pageToken;
}
- if (readsetIds != null) {
- _json["readsetIds"] = readsetIds;
+ if (readGroupIds != null) {
+ _json["readGroupIds"] = readGroupIds;
}
- if (sequenceEnd != null) {
- _json["sequenceEnd"] = sequenceEnd;
+ if (readGroupSetIds != null) {
+ _json["readGroupSetIds"] = readGroupSetIds;
}
- if (sequenceName != null) {
- _json["sequenceName"] = sequenceName;
+ if (referenceName != null) {
+ _json["referenceName"] = referenceName;
}
- if (sequenceStart != null) {
- _json["sequenceStart"] = sequenceStart;
+ if (start != null) {
+ _json["start"] = start;
}
return _json;
}
@@ -4052,62 +5164,165 @@ class SearchReadsRequest {
/** The read search response. */
class SearchReadsResponse {
/**
+ * The list of matching alignments sorted by mapped genomic coordinate, if
+ * any, ascending in position within the same reference. Unmapped reads, which
+ * have no position, are returned last and are further sorted in ascending
+ * lexicographic order by fragment name.
+ */
+ core.List<Read> alignments;
+
+ /**
* The continuation token, which is used to page through large result sets.
* Provide this value in a subsequent request to return the next page of
* results. This field will be empty if there aren't any additional results.
*/
core.String nextPageToken;
- /**
- * The list of matching Reads. The resulting Reads are sorted by position; the
- * smallest positions are returned first. Unmapped reads, which have no
- * position, are returned last and are further sorted alphabetically by name.
- */
- core.List<Read> reads;
-
SearchReadsResponse();
SearchReadsResponse.fromJson(core.Map _json) {
+ if (_json.containsKey("alignments")) {
+ alignments = _json["alignments"].map((value) => new Read.fromJson(value)).toList();
+ }
if (_json.containsKey("nextPageToken")) {
nextPageToken = _json["nextPageToken"];
}
- if (_json.containsKey("reads")) {
- reads = _json["reads"].map((value) => new Read.fromJson(value)).toList();
- }
}
core.Map toJson() {
var _json = new core.Map();
+ if (alignments != null) {
+ _json["alignments"] = alignments.map((value) => (value).toJson()).toList();
+ }
if (nextPageToken != null) {
_json["nextPageToken"] = nextPageToken;
}
- if (reads != null) {
- _json["reads"] = reads.map((value) => (value).toJson()).toList();
+ return _json;
+ }
+}
+
+
+/** Not documented yet. */
+class SearchReferenceSetsRequest {
+ /**
+ * If present, return references for which the accession matches any of these
+ * strings. Best to give a version number, for example GCF_000001405.26. If
+ * only the main accession number is given then all records with that main
+ * accession will be returned, whichever version. Note that different versions
+ * will have different sequences.
+ */
+ core.List<core.String> accessions;
+
+ /**
+ * If present, return references for which the md5checksum matches. See
+ * ReferenceSet.md5checksum for details.
+ */
+ core.List<core.String> md5checksums;
+
+ /** Specifies the maximum number of results to return in a single page. */
+ core.int pageSize;
+
+ /**
+ * The continuation token, which is used to page through large result sets. To
+ * get the next page of results, set this parameter to the value of
+ * nextPageToken from the previous response.
+ */
+ core.String pageToken;
+
+
+ SearchReferenceSetsRequest();
+
+ SearchReferenceSetsRequest.fromJson(core.Map _json) {
+ if (_json.containsKey("accessions")) {
+ accessions = _json["accessions"];
+ }
+ if (_json.containsKey("md5checksums")) {
+ md5checksums = _json["md5checksums"];
+ }
+ if (_json.containsKey("pageSize")) {
+ pageSize = _json["pageSize"];
+ }
+ if (_json.containsKey("pageToken")) {
+ pageToken = _json["pageToken"];
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (accessions != null) {
+ _json["accessions"] = accessions;
+ }
+ if (md5checksums != null) {
+ _json["md5checksums"] = md5checksums;
+ }
+ if (pageSize != null) {
+ _json["pageSize"] = pageSize;
+ }
+ if (pageToken != null) {
+ _json["pageToken"] = pageToken;
}
return _json;
}
}
-/** The readset search request. */
-class SearchReadsetsRequest {
+/** Not documented yet. */
+class SearchReferenceSetsResponse {
/**
- * Restricts this query to readsets within the given datasets. At least one ID
- * must be provided.
+ * The continuation token, which is used to page through large result sets.
+ * Provide this value in a subsequent request to return the next page of
+ * results. This field will be empty if there aren't any additional results.
*/
- core.List<core.String> datasetIds;
+ core.String nextPageToken;
+
+ /** The matching references sets. */
+ core.List<ReferenceSet> referenceSets;
+
+
+ SearchReferenceSetsResponse();
+
+ SearchReferenceSetsResponse.fromJson(core.Map _json) {
+ if (_json.containsKey("nextPageToken")) {
+ nextPageToken = _json["nextPageToken"];
+ }
+ if (_json.containsKey("referenceSets")) {
+ referenceSets = _json["referenceSets"].map((value) => new ReferenceSet.fromJson(value)).toList();
+ }
+ }
+
+ core.Map toJson() {
+ var _json = new core.Map();
+ if (nextPageToken != null) {
+ _json["nextPageToken"] = nextPageToken;
+ }
+ if (referenceSets != null) {
+ _json["referenceSets"] = referenceSets.map((value) => (value).toJson()).toList();
+ }
+ return _json;
+ }
+}
+
+/** Not documented yet. */
+class SearchReferencesRequest {
/**
- * Specifies number of results to return in a single page. If unspecified, it
- * will default to 128. The maximum value is 1024.
+ * If present, return references for which the accession matches this string.
+ * Best to give a version number, for example GCF_000001405.26. If only the
+ * main accession number is given then all records with that main accession
+ * will be returned, whichever version. Note that different versions will have
+ * different sequences.
*/
- core.String maxResults;
+ core.List<core.String> accessions;
/**
- * Only return readsets for which a substring of the name matches this string.
+ * If present, return references for which the md5checksum matches. See
+ * Reference.md5checksum for construction details.
*/
- core.String name;
+ core.List<core.String> md5checksums;
+
+ /** Specifies the maximum number of results to return in a single page. */
+ core.int pageSize;
/**
* The continuation token, which is used to page through large result sets. To
@@ -4116,45 +5331,54 @@ class SearchReadsetsRequest {
*/
core.String pageToken;
+ /** If present, return only references which belong to this reference set. */
+ core.String referenceSetId;
- SearchReadsetsRequest();
- SearchReadsetsRequest.fromJson(core.Map _json) {
- if (_json.containsKey("datasetIds")) {
- datasetIds = _json["datasetIds"];
+ SearchReferencesRequest();
+
+ SearchReferencesRequest.fromJson(core.Map _json) {
+ if (_json.containsKey("accessions")) {
+ accessions = _json["accessions"];
}
- if (_json.containsKey("maxResults")) {
- maxResults = _json["maxResults"];
+ if (_json.containsKey("md5checksums")) {
+ md5checksums = _json["md5checksums"];
}
- if (_json.containsKey("name")) {
- name = _json["name"];
+ if (_json.containsKey("pageSize")) {
+ pageSize = _json["pageSize"];
}
if (_json.containsKey("pageToken")) {
pageToken = _json["pageToken"];
}
+ if (_json.containsKey("referenceSetId")) {
+ referenceSetId = _json["referenceSetId"];
+ }
}
core.Map toJson() {
var _json = new core.Map();
- if (datasetIds != null) {
- _json["datasetIds"] = datasetIds;
+ if (accessions != null) {
+ _json["accessions"] = accessions;
}
- if (maxResults != null) {
- _json["maxResults"] = maxResults;
+ if (md5checksums != null) {
+ _json["md5checksums"] = md5checksums;
}
- if (name != null) {
- _json["name"] = name;
+ if (pageSize != null) {
+ _json["pageSize"] = pageSize;
}
if (pageToken != null) {
_json["pageToken"] = pageToken;
}
+ if (referenceSetId != null) {
+ _json["referenceSetId"] = referenceSetId;
+ }
return _json;
}
}
-/** The readset search response. */
-class SearchReadsetsResponse {
+/** Not documented yet. */
+class SearchReferencesResponse {
/**
* The continuation token, which is used to page through large result sets.
* Provide this value in a subsequent request to return the next page of
@@ -4162,18 +5386,18 @@ class SearchReadsetsResponse {
*/
core.String nextPageToken;
- /** The list of matching Readsets. */
- core.List<Readset> readsets;
+ /** The matching references. */
+ core.List<Reference> references;
- SearchReadsetsResponse();
+ SearchReferencesResponse();
- SearchReadsetsResponse.fromJson(core.Map _json) {
+ SearchReferencesResponse.fromJson(core.Map _json) {
if (_json.containsKey("nextPageToken")) {
nextPageToken = _json["nextPageToken"];
}
- if (_json.containsKey("readsets")) {
- readsets = _json["readsets"].map((value) => new Readset.fromJson(value)).toList();
+ if (_json.containsKey("references")) {
+ references = _json["references"].map((value) => new Reference.fromJson(value)).toList();
}
}
@@ -4182,8 +5406,8 @@ class SearchReadsetsResponse {
if (nextPageToken != null) {
_json["nextPageToken"] = nextPageToken;
}
- if (readsets != null) {
- _json["readsets"] = readsets.map((value) => (value).toJson()).toList();
+ if (references != null) {
+ _json["references"] = references.map((value) => (value).toJson()).toList();
}
return _json;
}
@@ -4431,9 +5655,13 @@ class SearchVariantsResponse {
/**
- * A Variant represents a change in DNA sequence relative to some reference. For
- * example, a Variant could represent a SNP or an insertion. Variants belong to
- * a variant set.
+ * A variant represents a change in DNA sequence relative to a reference
+ * sequence. For example, a variant could represent a SNP or an insertion.
+ * Variants belong to a variant set. Each of the calls on a variant represent a
+ * determination of genotype with respect to that variant. For example, a call
+ * might assign probability of 0.32 to the occurrence of a SNP named rs1234 in a
+ * sample named NA12345. A call belongs to a call set, which contains related
+ * calls typically from one sample.
*/
class Variant {
/** The bases that appear instead of the reference bases. */
@@ -4586,8 +5814,8 @@ class Variant {
/**
- * A VariantSet represents a collection of Variants and their summary
- * statistics.
+ * A variant set is a collection of call sets and variants. It contains summary
+ * statistics of those contents. A variant set belongs to a dataset.
*/
class VariantSet {
/** The dataset to which this variant set belongs. Immutable. */

Powered by Google App Engine
This is Rietveld 408576698