OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. |
| 4 |
| 5 library gcloud.datastore_impl; |
| 6 |
| 7 import 'dart:async'; |
| 8 |
| 9 import 'package:http/http.dart' as http; |
| 10 |
| 11 import '../datastore.dart' as datastore; |
| 12 import '../common.dart' show Page; |
| 13 import 'package:googleapis_beta/datastore/v1beta2.dart' as api; |
| 14 |
| 15 class TransactionImpl implements datastore.Transaction { |
| 16 final String data; |
| 17 TransactionImpl(this.data); |
| 18 } |
| 19 |
| 20 class DatastoreImpl implements datastore.Datastore { |
| 21 static const List<String> SCOPES = const <String>[ |
| 22 api.DatastoreApi.DatastoreScope, |
| 23 api.DatastoreApi.UserinfoEmailScope, |
| 24 ]; |
| 25 |
| 26 final api.DatastoreApi _api; |
| 27 final String _project; |
| 28 |
| 29 DatastoreImpl(http.Client client, this._project) |
| 30 : _api = new api.DatastoreApi(client); |
| 31 |
| 32 api.Key _convertDatastore2ApiKey(datastore.Key key) { |
| 33 var apiKey = new api.Key(); |
| 34 |
| 35 apiKey.partitionId = new api.PartitionId() |
| 36 ..datasetId = _project |
| 37 ..namespace = key.partition.namespace; |
| 38 |
| 39 apiKey.path = key.elements.map((datastore.KeyElement element) { |
| 40 var part = new api.KeyPathElement(); |
| 41 part.kind = element.kind; |
| 42 if (element.id is int) { |
| 43 part.id = '${element.id}'; |
| 44 } else if (element.id is String) { |
| 45 part.name = element.id; |
| 46 } |
| 47 return part; |
| 48 }).toList(); |
| 49 |
| 50 return apiKey; |
| 51 } |
| 52 |
| 53 static datastore.Key _convertApi2DatastoreKey(api.Key key) { |
| 54 var elements = key.path.map((api.KeyPathElement element) { |
| 55 if (element.id != null) { |
| 56 return new datastore.KeyElement(element.kind, int.parse(element.id)); |
| 57 } else if (element.name != null) { |
| 58 return new datastore.KeyElement(element.kind, element.name); |
| 59 } else { |
| 60 throw new datastore.DatastoreError( |
| 61 'Invalid server response: Expected allocated name/id.'); |
| 62 } |
| 63 }).toList(); |
| 64 |
| 65 var partition; |
| 66 if (key.partitionId != null) { |
| 67 partition = new datastore.Partition(key.partitionId.namespace); |
| 68 // TODO: assert projectId. |
| 69 } |
| 70 return new datastore.Key(elements, partition: partition); |
| 71 } |
| 72 |
| 73 bool _compareApiKey(api.Key a, api.Key b) { |
| 74 if (a.path.length != b.path.length) return false; |
| 75 |
| 76 // FIXME(Issue #2): Is this comparison working correctly? |
| 77 if (a.partitionId != null) { |
| 78 if (b.partitionId == null) return false; |
| 79 if (a.partitionId.datasetId != b.partitionId.datasetId) return false; |
| 80 if (a.partitionId.namespace != b.partitionId.namespace) return false; |
| 81 } else { |
| 82 if (b.partitionId != null) return false; |
| 83 } |
| 84 |
| 85 for (int i = 0; i < a.path.length; i++) { |
| 86 if (a.path[i].id != b.path[i].id || |
| 87 a.path[i].name != b.path[i].name || |
| 88 a.path[i].kind != b.path[i].kind) return false; |
| 89 } |
| 90 return true; |
| 91 } |
| 92 |
| 93 static _convertApi2DatastorePropertyValue(api.Value value) { |
| 94 if (value.booleanValue != null) |
| 95 return value.booleanValue; |
| 96 else if (value.integerValue != null) |
| 97 return int.parse(value.integerValue); |
| 98 else if (value.doubleValue != null) |
| 99 return value.doubleValue; |
| 100 else if (value.stringValue != null) |
| 101 return value.stringValue; |
| 102 else if (value.dateTimeValue != null) |
| 103 return value.dateTimeValue; |
| 104 else if (value.blobValue != null) |
| 105 return new datastore.BlobValue(value.blobValueAsBytes); |
| 106 else if (value.keyValue != null) |
| 107 return _convertApi2DatastoreKey(value.keyValue); |
| 108 else if (value.listValue != null) |
| 109 // FIXME(Issue #3): Consistently handle exceptions. |
| 110 throw new Exception('Cannot have lists inside lists.'); |
| 111 else if (value.blobKeyValue != null) |
| 112 throw new UnsupportedError('Blob keys are not supported.'); |
| 113 else if (value.entityValue != null) |
| 114 throw new UnsupportedError('Entity values are not supported.'); |
| 115 return null; |
| 116 } |
| 117 |
| 118 api.Value _convertDatastore2ApiPropertyValue( |
| 119 value, bool indexed, {bool lists: true}) { |
| 120 var apiValue = new api.Value() |
| 121 ..indexed = indexed; |
| 122 if (value == null) { |
| 123 return apiValue; |
| 124 } else if (value is bool) { |
| 125 return apiValue |
| 126 ..booleanValue = value; |
| 127 } else if (value is int) { |
| 128 return apiValue |
| 129 ..integerValue = '$value'; |
| 130 } else if (value is double) { |
| 131 return apiValue |
| 132 ..doubleValue = value; |
| 133 } else if (value is String) { |
| 134 return apiValue |
| 135 ..stringValue = value; |
| 136 } else if (value is DateTime) { |
| 137 return apiValue |
| 138 ..dateTimeValue = value; |
| 139 } else if (value is datastore.BlobValue) { |
| 140 return apiValue |
| 141 ..blobValueAsBytes = value.bytes; |
| 142 } else if (value is datastore.Key) { |
| 143 return apiValue |
| 144 ..keyValue = _convertDatastore2ApiKey(value); |
| 145 } else if (value is List) { |
| 146 if (!lists) { |
| 147 // FIXME(Issue #3): Consistently handle exceptions. |
| 148 throw new Exception('List values are not allowed.'); |
| 149 } |
| 150 |
| 151 convertItem(i) |
| 152 => _convertDatastore2ApiPropertyValue(i, indexed, lists: false); |
| 153 |
| 154 return new api.Value() |
| 155 ..listValue = value.map(convertItem).toList(); |
| 156 } else { |
| 157 throw new UnsupportedError( |
| 158 'Types ${value.runtimeType} cannot be used for serializing.'); |
| 159 } |
| 160 } |
| 161 |
| 162 static _convertApi2DatastoreProperty(api.Property property) { |
| 163 if (property.booleanValue != null) |
| 164 return property.booleanValue; |
| 165 else if (property.integerValue != null) |
| 166 return int.parse(property.integerValue); |
| 167 else if (property.doubleValue != null) |
| 168 return property.doubleValue; |
| 169 else if (property.stringValue != null) |
| 170 return property.stringValue; |
| 171 else if (property.dateTimeValue != null) |
| 172 return property.dateTimeValue; |
| 173 else if (property.blobValue != null) |
| 174 return new datastore.BlobValue(property.blobValueAsBytes); |
| 175 else if (property.keyValue != null) |
| 176 return _convertApi2DatastoreKey(property.keyValue); |
| 177 else if (property.listValue != null) |
| 178 return |
| 179 property.listValue.map(_convertApi2DatastorePropertyValue).toList(); |
| 180 else if (property.blobKeyValue != null) |
| 181 throw new UnsupportedError('Blob keys are not supported.'); |
| 182 else if (property.entityValue != null) |
| 183 throw new UnsupportedError('Entity values are not supported.'); |
| 184 return null; |
| 185 } |
| 186 |
| 187 api.Property _convertDatastore2ApiProperty( |
| 188 value, bool indexed, {bool lists: true}) { |
| 189 var apiProperty = new api.Property() |
| 190 ..indexed = indexed; |
| 191 if (value == null) { |
| 192 return null; |
| 193 } else if (value is bool) { |
| 194 return apiProperty |
| 195 ..booleanValue = value; |
| 196 } else if (value is int) { |
| 197 return apiProperty |
| 198 ..integerValue = '$value'; |
| 199 } else if (value is double) { |
| 200 return apiProperty |
| 201 ..doubleValue = value; |
| 202 } else if (value is String) { |
| 203 return apiProperty |
| 204 ..stringValue = value; |
| 205 } else if (value is DateTime) { |
| 206 return apiProperty |
| 207 ..dateTimeValue = value; |
| 208 } else if (value is datastore.BlobValue) { |
| 209 return apiProperty |
| 210 ..blobValueAsBytes = value.bytes; |
| 211 } else if (value is datastore.Key) { |
| 212 return apiProperty |
| 213 ..keyValue = _convertDatastore2ApiKey(value); |
| 214 } else if (value is List) { |
| 215 if (!lists) { |
| 216 // FIXME(Issue #3): Consistently handle exceptions. |
| 217 throw new Exception('List values are not allowed.'); |
| 218 } |
| 219 convertItem(i) |
| 220 => _convertDatastore2ApiPropertyValue(i, indexed, lists: false); |
| 221 return new api.Property()..listValue = value.map(convertItem).toList(); |
| 222 } else { |
| 223 throw new UnsupportedError( |
| 224 'Types ${value.runtimeType} cannot be used for serializing.'); |
| 225 } |
| 226 } |
| 227 |
| 228 static datastore.Entity _convertApi2DatastoreEntity(api.Entity entity) { |
| 229 var unindexedProperties = new Set(); |
| 230 var properties = {}; |
| 231 |
| 232 if (entity.properties != null) { |
| 233 entity.properties.forEach((String name, api.Property property) { |
| 234 properties[name] = _convertApi2DatastoreProperty(property); |
| 235 if (property.indexed == false) { |
| 236 // TODO(Issue #$4): Should we support mixed indexed/non-indexed list |
| 237 // values? |
| 238 if (property.listValue != null) { |
| 239 if (property.listValue.length > 0) { |
| 240 var firstIndexed = property.listValue.first.indexed; |
| 241 for (int i = 1; i < property.listValue.length; i++) { |
| 242 if (property.listValue[i].indexed != firstIndexed) { |
| 243 throw new Exception('Some list entries are indexed and some ' |
| 244 'are not. This is currently not supported.'); |
| 245 } |
| 246 } |
| 247 if (firstIndexed == false) { |
| 248 unindexedProperties.add(name); |
| 249 } |
| 250 } |
| 251 } else { |
| 252 unindexedProperties.add(name); |
| 253 } |
| 254 } |
| 255 }); |
| 256 } |
| 257 return new datastore.Entity(_convertApi2DatastoreKey(entity.key), |
| 258 properties, |
| 259 unIndexedProperties: unindexedProperties); |
| 260 } |
| 261 |
| 262 api.Entity _convertDatastore2ApiEntity(datastore.Entity entity) { |
| 263 var apiEntity = new api.Entity(); |
| 264 |
| 265 apiEntity.key = _convertDatastore2ApiKey(entity.key); |
| 266 apiEntity.properties = {}; |
| 267 if (entity.properties != null) { |
| 268 for (var key in entity.properties.keys) { |
| 269 var value = entity.properties[key]; |
| 270 bool indexed = false; |
| 271 if (entity.unIndexedProperties != null) { |
| 272 indexed = !entity.unIndexedProperties.contains(key); |
| 273 } |
| 274 var property = _convertDatastore2ApiPropertyValue(value, indexed); |
| 275 apiEntity.properties[key] = property; |
| 276 } |
| 277 } |
| 278 return apiEntity; |
| 279 } |
| 280 |
| 281 static Map<datastore.FilterRelation, String> relationMapping = const { |
| 282 datastore.FilterRelation.LessThan: 'LESS_THAN', |
| 283 datastore.FilterRelation.LessThanOrEqual: 'LESS_THAN_OR_EQUAL', |
| 284 datastore.FilterRelation.Equal: 'EQUAL', |
| 285 datastore.FilterRelation.GreatherThan: 'GREATER_THAN', |
| 286 datastore.FilterRelation.GreatherThanOrEqual: 'GREATER_THAN_OR_EQUAL', |
| 287 // TODO(Issue #5): IN operator not supported currently. |
| 288 }; |
| 289 |
| 290 api.Filter _convertDatastore2ApiFilter(datastore.Filter filter) { |
| 291 var pf = new api.PropertyFilter(); |
| 292 var operator = relationMapping[filter.relation]; |
| 293 // FIXME(Issue #5): Is this OK? |
| 294 if (filter.relation == datastore.FilterRelation.In) { |
| 295 operator = 'EQUAL'; |
| 296 } |
| 297 |
| 298 if (operator == null) { |
| 299 throw new ArgumentError('Unknown filter relation: ${filter.relation}.'); |
| 300 } |
| 301 pf.operator = operator; |
| 302 pf.property = new api.PropertyReference()..name = filter.name; |
| 303 |
| 304 // FIXME(Issue #5): Is this OK? |
| 305 var value = filter.value; |
| 306 if (filter.relation == datastore.FilterRelation.In) { |
| 307 if (value is List && value.length == 1) { |
| 308 value = value.first; |
| 309 } else { |
| 310 throw new ArgumentError('List values not supported'); |
| 311 } |
| 312 } |
| 313 |
| 314 pf.value = _convertDatastore2ApiPropertyValue(value, true, lists: false); |
| 315 return new api.Filter()..propertyFilter = pf; |
| 316 } |
| 317 |
| 318 api.Filter _convertDatastoreAncestorKey2ApiFilter(datastore.Key key) { |
| 319 var pf = new api.PropertyFilter(); |
| 320 pf.operator = 'HAS_ANCESTOR'; |
| 321 pf.property = new api.PropertyReference()..name = '__key__'; |
| 322 pf.value = new api.Value()..keyValue = _convertDatastore2ApiKey(key); |
| 323 return new api.Filter()..propertyFilter = pf; |
| 324 } |
| 325 |
| 326 api.Filter _convertDatastore2ApiFilters(List<datastore.Filter> filters, |
| 327 datastore.Key ancestorKey) { |
| 328 if ((filters == null || filters.length == 0) && ancestorKey == null) { |
| 329 return null; |
| 330 } |
| 331 |
| 332 var compFilter = new api.CompositeFilter(); |
| 333 if (filters != null) { |
| 334 compFilter.filters = filters.map(_convertDatastore2ApiFilter).toList(); |
| 335 } |
| 336 if (ancestorKey != null) { |
| 337 var filter = _convertDatastoreAncestorKey2ApiFilter(ancestorKey); |
| 338 if (compFilter.filters == null) { |
| 339 compFilter.filters = [filter]; |
| 340 } else { |
| 341 compFilter.filters.add(filter); |
| 342 } |
| 343 } |
| 344 compFilter.operator = 'AND'; |
| 345 return new api.Filter()..compositeFilter = compFilter; |
| 346 } |
| 347 |
| 348 api.PropertyOrder _convertDatastore2ApiOrder(datastore.Order order) { |
| 349 var property = new api.PropertyReference()..name = order.propertyName; |
| 350 var direction = order.direction == datastore.OrderDirection.Ascending |
| 351 ? 'ASCENDING' : 'DESCENDING'; |
| 352 return new api.PropertyOrder() |
| 353 ..direction = direction |
| 354 ..property = property; |
| 355 } |
| 356 |
| 357 List<api.PropertyOrder> _convertDatastore2ApiOrders( |
| 358 List<datastore.Order> orders) { |
| 359 if (orders == null) return null; |
| 360 |
| 361 return orders.map(_convertDatastore2ApiOrder).toList(); |
| 362 } |
| 363 |
| 364 static Future _handleError(error, stack) { |
| 365 if (error is api.DetailedApiRequestError) { |
| 366 if (error.status == 400) { |
| 367 return new Future.error( |
| 368 new datastore.ApplicationError(error.message), stack); |
| 369 } else if (error.status == 409) { |
| 370 // NOTE: This is reported as: |
| 371 // "too much contention on these datastore entities" |
| 372 // TODO: |
| 373 return new Future.error(new datastore.TransactionAbortedError(), stack); |
| 374 } else if (error.status == 412) { |
| 375 return new Future.error(new datastore.NeedIndexError(), stack); |
| 376 } |
| 377 } |
| 378 return new Future.error(error, stack); |
| 379 } |
| 380 |
| 381 Future<List<datastore.Key>> allocateIds(List<datastore.Key> keys) { |
| 382 var request = new api.AllocateIdsRequest(); |
| 383 request..keys = keys.map(_convertDatastore2ApiKey).toList(); |
| 384 return _api.datasets.allocateIds(request, _project).then((response) { |
| 385 return response.keys.map(_convertApi2DatastoreKey).toList(); |
| 386 }, onError: _handleError); |
| 387 } |
| 388 |
| 389 Future<datastore.Transaction> beginTransaction( |
| 390 {bool crossEntityGroup: false}) { |
| 391 var request = new api.BeginTransactionRequest(); |
| 392 // TODO: Should this be made configurable? |
| 393 request.isolationLevel = 'SERIALIZABLE'; |
| 394 return _api.datasets.beginTransaction(request, _project).then((result) { |
| 395 return new TransactionImpl(result.transaction); |
| 396 }, onError: _handleError); |
| 397 } |
| 398 |
| 399 Future<datastore.CommitResult> commit({List<datastore.Entity> inserts, |
| 400 List<datastore.Entity> autoIdInserts, |
| 401 List<datastore.Key> deletes, |
| 402 datastore.Transaction transaction}) { |
| 403 var request = new api.CommitRequest(); |
| 404 |
| 405 if (transaction != null) { |
| 406 request.mode = 'TRANSACTIONAL'; |
| 407 request.transaction = (transaction as TransactionImpl).data; |
| 408 } else { |
| 409 request.mode = 'NON_TRANSACTIONAL'; |
| 410 } |
| 411 |
| 412 request.mutation = new api.Mutation(); |
| 413 if (inserts != null) { |
| 414 request.mutation.upsert = new List(inserts.length); |
| 415 for (int i = 0; i < inserts.length; i++) { |
| 416 request.mutation.upsert[i] = _convertDatastore2ApiEntity(inserts[i]); |
| 417 } |
| 418 } |
| 419 if (autoIdInserts != null) { |
| 420 request.mutation.insertAutoId = new List(autoIdInserts.length); |
| 421 for (int i = 0; i < autoIdInserts.length; i++) { |
| 422 request.mutation.insertAutoId[i] = |
| 423 _convertDatastore2ApiEntity(autoIdInserts[i]); |
| 424 } |
| 425 } |
| 426 if (deletes != null) { |
| 427 request.mutation.delete = new List(deletes.length); |
| 428 for (int i = 0; i < deletes.length; i++) { |
| 429 request.mutation.delete[i] = _convertDatastore2ApiKey(deletes[i]); |
| 430 } |
| 431 } |
| 432 return _api.datasets.commit(request, _project).then((result) { |
| 433 var keys; |
| 434 if (autoIdInserts != null && autoIdInserts.length > 0) { |
| 435 keys = result |
| 436 .mutationResult |
| 437 .insertAutoIdKeys |
| 438 .map(_convertApi2DatastoreKey).toList(); |
| 439 } |
| 440 return new datastore.CommitResult(keys); |
| 441 }, onError: _handleError); |
| 442 } |
| 443 |
| 444 Future<List<datastore.Entity>> lookup(List<datastore.Key> keys, |
| 445 {datastore.Transaction transaction}) { |
| 446 var apiKeys = keys.map(_convertDatastore2ApiKey).toList(); |
| 447 var request = new api.LookupRequest(); |
| 448 request.keys = apiKeys; |
| 449 if (transaction != null) { |
| 450 // TODO: Make readOptions more configurable. |
| 451 request.readOptions = new api.ReadOptions(); |
| 452 request.readOptions.transaction = (transaction as TransactionImpl).data; |
| 453 } |
| 454 return _api.datasets.lookup(request, _project).then((response) { |
| 455 if (response.deferred != null && response.deferred.length > 0) { |
| 456 throw new datastore.DatastoreError( |
| 457 'Could not successfully look up all keys due to resource ' |
| 458 'constraints.'); |
| 459 } |
| 460 |
| 461 // NOTE: This is worst-case O(n^2)! |
| 462 // Maybe we can optimize this somehow. But the API says: |
| 463 // message LookupResponse { |
| 464 // // The order of results in these fields is undefined and has no rela
tion to |
| 465 // // the order of the keys in the input. |
| 466 // |
| 467 // // Entities found as ResultType.FULL entities. |
| 468 // repeated EntityResult found = 1; |
| 469 // |
| 470 // // Entities not found as ResultType.KEY_ONLY entities. |
| 471 // repeated EntityResult missing = 2; |
| 472 // |
| 473 // // A list of keys that were not looked up due to resource constraint
s. |
| 474 // repeated Key deferred = 3; |
| 475 // } |
| 476 var entities = new List(apiKeys.length); |
| 477 for (int i = 0; i < apiKeys.length; i++) { |
| 478 var apiKey = apiKeys[i]; |
| 479 |
| 480 bool found = false; |
| 481 |
| 482 if (response.found != null) { |
| 483 for (var result in response.found) { |
| 484 if (_compareApiKey(apiKey, result.entity.key)) { |
| 485 entities[i] = _convertApi2DatastoreEntity(result.entity); |
| 486 found = true; |
| 487 break; |
| 488 } |
| 489 } |
| 490 } |
| 491 |
| 492 if (found) continue; |
| 493 |
| 494 if (response.missing != null) { |
| 495 for (var result in response.missing) { |
| 496 if (_compareApiKey(apiKey, result.entity.key)) { |
| 497 entities[i] = null; |
| 498 found = true; |
| 499 break; |
| 500 } |
| 501 } |
| 502 } |
| 503 |
| 504 if (!found) { |
| 505 throw new datastore.DatastoreError('Invalid server response: ' |
| 506 'Tried to lookup ${apiKey.toJson()} but entity was neither in ' |
| 507 'missing nor in found.'); |
| 508 } |
| 509 } |
| 510 return entities; |
| 511 }, onError: _handleError); |
| 512 } |
| 513 |
| 514 Future<Page<datastore.Entity>> query( |
| 515 datastore.Query query, {datastore.Partition partition, |
| 516 datastore.Transaction transaction}) { |
| 517 // NOTE: We explicitly do not set 'limit' here, since this is handled by |
| 518 // QueryPageImpl.runQuery. |
| 519 var apiQuery = new api.Query() |
| 520 ..filter = _convertDatastore2ApiFilters(query.filters, |
| 521 query.ancestorKey) |
| 522 ..order = _convertDatastore2ApiOrders(query.orders) |
| 523 ..offset = query.offset; |
| 524 |
| 525 if (query.kind != null) { |
| 526 apiQuery.kinds = [new api.KindExpression()..name = query.kind]; |
| 527 } |
| 528 |
| 529 var request = new api.RunQueryRequest(); |
| 530 request.query = apiQuery; |
| 531 if (transaction != null) { |
| 532 // TODO: Make readOptions more configurable. |
| 533 request.readOptions = new api.ReadOptions(); |
| 534 request.readOptions.transaction = (transaction as TransactionImpl).data; |
| 535 } |
| 536 if (partition != null) { |
| 537 request.partitionId = new api.PartitionId() |
| 538 ..namespace = partition.namespace; |
| 539 } |
| 540 |
| 541 return QueryPageImpl.runQuery(_api, _project, request, query.limit) |
| 542 .catchError(_handleError); |
| 543 } |
| 544 |
| 545 Future rollback(datastore.Transaction transaction) { |
| 546 // TODO: Handle [transaction] |
| 547 var request = new api.RollbackRequest() |
| 548 ..transaction = (transaction as TransactionImpl).data; |
| 549 return _api.datasets.rollback(request, _project).catchError(_handleError); |
| 550 } |
| 551 } |
| 552 |
| 553 class QueryPageImpl implements Page<datastore.Entity> { |
| 554 static const int MAX_ENTITIES_PER_RESPONSE = 2000; |
| 555 |
| 556 final api.DatastoreApi _api; |
| 557 final String _project; |
| 558 final api.RunQueryRequest _nextRequest; |
| 559 final List<datastore.Entity> _entities; |
| 560 final bool _isLast; |
| 561 |
| 562 // This might be `null` in which case we request as many as we can get. |
| 563 final int _remainingNumberOfEntities; |
| 564 |
| 565 QueryPageImpl(this._api, this._project, |
| 566 this._nextRequest, this._entities, |
| 567 this._isLast, this._remainingNumberOfEntities); |
| 568 |
| 569 static Future<QueryPageImpl> runQuery(api.DatastoreApi api, |
| 570 String project, |
| 571 api.RunQueryRequest request, |
| 572 int limit, |
| 573 {int batchSize}) { |
| 574 int batchLimit = batchSize; |
| 575 if (batchLimit == null) { |
| 576 batchLimit = MAX_ENTITIES_PER_RESPONSE; |
| 577 } |
| 578 if (limit != null && limit < batchLimit) { |
| 579 batchLimit = limit; |
| 580 } |
| 581 |
| 582 request.query.limit = batchLimit; |
| 583 |
| 584 return api.datasets.runQuery(request, project).then((response) { |
| 585 var returnedEntities = const []; |
| 586 |
| 587 var batch = response.batch; |
| 588 if (batch.entityResults != null) { |
| 589 returnedEntities = batch.entityResults |
| 590 .map((result) => result.entity) |
| 591 .map(DatastoreImpl._convertApi2DatastoreEntity) |
| 592 .toList(); |
| 593 } |
| 594 |
| 595 // This check is only necessary for the first request/response pair |
| 596 // (if offset was supplied). |
| 597 if (request.query.offset != null && |
| 598 request.query.offset > 0 && |
| 599 request.query.offset != response.batch.skippedResults) { |
| 600 throw new datastore.DatastoreError( |
| 601 'Server did not skip over the specified ${request.query.offset} ' |
| 602 'entities.'); |
| 603 } |
| 604 |
| 605 if (limit != null && returnedEntities.length > limit) { |
| 606 throw new datastore.DatastoreError( |
| 607 'Server returned more entities then the limit for the request' |
| 608 '(${request.query.limit}) was.'); |
| 609 } |
| 610 |
| 611 |
| 612 // FIXME: TODO: Big hack! |
| 613 // It looks like Apiary/Atlas is currently broken. |
| 614 /* |
| 615 if (limit != null && |
| 616 returnedEntities.length < batchLimit && |
| 617 response.batch.moreResults == 'MORE_RESULTS_AFTER_LIMIT') { |
| 618 throw new datastore.DatastoreError( |
| 619 'Server returned response with less entities then the limit was, ' |
| 620 'but signals there are more results after the limit.'); |
| 621 } |
| 622 */ |
| 623 |
| 624 // In case a limit was specified, we need to subtraction the number of |
| 625 // entities we already got. |
| 626 // (the checks above guarantee that this subraction is >= 0). |
| 627 int remainingEntities; |
| 628 if (limit != null) { |
| 629 remainingEntities = limit - returnedEntities.length; |
| 630 } |
| 631 |
| 632 // If the server signals there are more entities and we either have no |
| 633 // limit or our limit has not been reached, we set `moreBatches` to |
| 634 // `true`. |
| 635 bool moreBatches = |
| 636 (remainingEntities == null || remainingEntities > 0) && |
| 637 response.batch.moreResults == 'MORE_RESULTS_AFTER_LIMIT'; |
| 638 |
| 639 bool gotAll = limit != null && remainingEntities == 0; |
| 640 bool noMore = response.batch.moreResults == 'NO_MORE_RESULTS'; |
| 641 bool isLast = gotAll || noMore; |
| 642 |
| 643 // As a sanity check, we assert that `moreBatches XOR isLast`. |
| 644 assert (isLast != moreBatches); |
| 645 |
| 646 // FIXME: TODO: Big hack! |
| 647 // It looks like Apiary/Atlas is currently broken. |
| 648 if (moreBatches && returnedEntities.length == 0) { |
| 649 print('Warning: Api to Google Cloud Datastore returned bogus response. ' |
| 650 'Trying a workaround.'); |
| 651 isLast = true; |
| 652 moreBatches = false; |
| 653 } |
| 654 |
| 655 if (!isLast && response.batch.endCursor == null) { |
| 656 throw new datastore.DatastoreError( |
| 657 'Server did not supply an end cursor, even though the query ' |
| 658 'is not done.'); |
| 659 } |
| 660 |
| 661 if (isLast) { |
| 662 return new QueryPageImpl( |
| 663 api, project, request, returnedEntities, true, null); |
| 664 } else { |
| 665 // NOTE: We reuse the old RunQueryRequest object here . |
| 666 |
| 667 // The offset will be 0 from now on, since the first request will have |
| 668 // skipped over the first `offset` results. |
| 669 request.query.offset = 0; |
| 670 |
| 671 // Furthermore we set the startCursor to the endCursor of the previous |
| 672 // result batch, so we can continue where we left off. |
| 673 request.query.startCursor = batch.endCursor; |
| 674 |
| 675 return new QueryPageImpl( |
| 676 api, project, request, returnedEntities, false, remainingEntities); |
| 677 } |
| 678 }); |
| 679 } |
| 680 |
| 681 bool get isLast => _isLast; |
| 682 |
| 683 List<datastore.Entity> get items => _entities; |
| 684 |
| 685 Future<Page<datastore.Entity>> next({int pageSize}) { |
| 686 // NOTE: We do not respect [pageSize] here, the only mechanism we can |
| 687 // really use is `query.limit`, but this is user-specified when making |
| 688 // the query. |
| 689 if (isLast) { |
| 690 return new Future.sync(() { |
| 691 throw new ArgumentError('Cannot call next() on last page.'); |
| 692 }); |
| 693 } |
| 694 |
| 695 return QueryPageImpl.runQuery( |
| 696 _api, _project, _nextRequest, _remainingNumberOfEntities) |
| 697 .catchError(DatastoreImpl._handleError); |
| 698 } |
| 699 } |
OLD | NEW |