OLD | NEW |
---|---|
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 patch class HashMap<K, V> { | 5 patch class HashMap<K, V> { |
6 /* patch */ factory HashMap({ bool equals(K key1, K key2), | 6 /* patch */ factory HashMap({ bool equals(K key1, K key2), |
7 int hashCode(K key), | 7 int hashCode(K key), |
8 bool isValidKey(potentialKey) }) { | 8 bool isValidKey(potentialKey) }) { |
9 if (isValidKey == null) { | 9 if (isValidKey == null) { |
10 if (hashCode == null) { | 10 if (hashCode == null) { |
(...skipping 412 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
423 final HashMap _map; | 423 final HashMap _map; |
424 _HashMapIterable(this._map); | 424 _HashMapIterable(this._map); |
425 int get length => _map.length; | 425 int get length => _map.length; |
426 bool get isEmpty => _map.isEmpty; | 426 bool get isEmpty => _map.isEmpty; |
427 bool get isNotEmpty => _map.isNotEmpty; | 427 bool get isNotEmpty => _map.isNotEmpty; |
428 } | 428 } |
429 | 429 |
430 class _HashMapKeyIterable<K> extends _HashMapIterable<K> { | 430 class _HashMapKeyIterable<K> extends _HashMapIterable<K> { |
431 _HashMapKeyIterable(HashMap map) : super(map); | 431 _HashMapKeyIterable(HashMap map) : super(map); |
432 Iterator<K> get iterator => new _HashMapKeyIterator<K>(_map); | 432 Iterator<K> get iterator => new _HashMapKeyIterator<K>(_map); |
433 bool contains(K key) => _map.containsKey(key); | 433 bool contains(Object key) => _map.containsKey(key); |
434 void forEach(void action(K key)) { | 434 void forEach(void action(K key)) { |
435 _map.forEach((K key, _) { | 435 _map.forEach((K key, _) { |
436 action(key); | 436 action(key); |
437 }); | 437 }); |
438 } | 438 } |
439 } | 439 } |
440 | 440 |
441 class _HashMapValueIterable<V> extends _HashMapIterable<V> { | 441 class _HashMapValueIterable<V> extends _HashMapIterable<V> { |
442 _HashMapValueIterable(HashMap map) : super(map); | 442 _HashMapValueIterable(HashMap map) : super(map); |
443 Iterator<V> get iterator => new _HashMapValueIterator<V>(_map); | 443 Iterator<V> get iterator => new _HashMapValueIterator<V>(_map); |
444 bool contains(V value) => _map.containsValue(value); | 444 bool contains(Object value) => _map.containsValue(value); |
445 void forEach(void action(V value)) { | 445 void forEach(void action(V value)) { |
446 _map.forEach((_, V value) { | 446 _map.forEach((_, V value) { |
447 action(value); | 447 action(value); |
448 }); | 448 }); |
449 } | 449 } |
450 } | 450 } |
451 | 451 |
452 abstract class _HashMapIterator<E> implements Iterator<E> { | 452 abstract class _HashMapIterator<E> implements Iterator<E> { |
453 final HashMap _map; | 453 final HashMap _map; |
454 final int _stamp; | 454 final int _stamp; |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
497 | 497 |
498 class _HashMapValueIterator<V> extends _HashMapIterator<V> { | 498 class _HashMapValueIterator<V> extends _HashMapIterator<V> { |
499 _HashMapValueIterator(HashMap map) : super(map); | 499 _HashMapValueIterator(HashMap map) : super(map); |
500 V get current { | 500 V get current { |
501 _HashMapEntry entry = _entry; | 501 _HashMapEntry entry = _entry; |
502 return (entry == null) ? null : entry.value; | 502 return (entry == null) ? null : entry.value; |
503 } | 503 } |
504 } | 504 } |
505 | 505 |
506 patch class HashSet<E> { | 506 patch class HashSet<E> { |
507 /* patch */ factory HashSet({ bool equals(E e1, E e2), | |
508 int hashCode(E e), | |
509 bool isValidKey(potentialKey) }) { | |
510 if (isValidKey == null) { | |
511 if (hashCode == null) { | |
512 if (equals == null) { | |
513 return new _HashSet<E>(); | |
514 } | |
515 if (identical(identical, equals)) { | |
516 return new _IdentityHashSet<E>(); | |
517 } | |
518 _hashCode = _defaultHashCode; | |
519 } else if (equals == null) { | |
520 _equals = _defaultEquals; | |
521 } | |
522 isValidKey = new _TypeTest<E>().test; | |
523 } else { | |
524 if (hashCode == null) hashCode = _defaultHashCode; | |
525 if (equals == null) equals = _defaultEquals; | |
526 } | |
527 return new _CustomHashSet<E>(equals, hashCode, isValidKey); | |
528 } | |
529 } | |
530 | |
531 class _HashSet<E> extends _HashSetBase<E> implements HashSet<E> { | |
507 static const int _INITIAL_CAPACITY = 8; | 532 static const int _INITIAL_CAPACITY = 8; |
508 final _HashTable<E> _table; | 533 |
509 | 534 List<_HashSetEntry> _buckets = new List(_INITIAL_CAPACITY); |
510 /* patch */ HashSet() : _table = new _HashTable(_INITIAL_CAPACITY) { | 535 int _elementCount = 0; |
511 _table._container = this; | 536 int _modificationCount = 0; |
512 } | 537 |
513 | 538 bool _equals(e1, e2) => e1 == e2; |
514 factory HashSet.from(Iterable<E> iterable) { | 539 int _hashCode(e) => e.hashCode; |
515 return new HashSet<E>()..addAll(iterable); | |
516 } | |
517 | 540 |
518 // Iterable. | 541 // Iterable. |
519 /* patch */ Iterator<E> get iterator => new _HashTableKeyIterator<E>(_table); | 542 Iterator<E> get iterator => new _HashSetIterator<E>(this); |
520 | 543 |
521 /* patch */ int get length => _table._elementCount; | 544 int get length => _elementCount; |
522 | 545 |
523 /* patch */ bool get isEmpty => _table._elementCount == 0; | 546 bool get isEmpty => _elementCount == 0; |
524 | 547 |
525 /* patch */ bool get isNotEmpty => !isEmpty; | 548 bool get isNotEmpty => _elementCount != 0; |
526 | 549 |
527 /* patch */ bool contains(Object object) => _table._get(object) >= 0; | 550 bool contains(Object object) { |
528 | 551 int index = _hashCode(object) & (_buckets.length - 1); |
529 // Collection. | 552 HashSetEntry entry = _buckets[index]; |
530 /* patch */ void add(E element) { | 553 while (entry != null) { |
531 _table._put(element); | 554 if (_equals(entry.key, object)) return true; |
532 _table._checkCapacity(); | 555 entry = entry.next; |
533 } | 556 } |
534 | 557 return false; |
535 /* patch */ void addAll(Iterable<E> objects) { | 558 } |
559 | |
560 // Set | |
kasperl
2013/09/18 09:07:07
Terminate with . Remove newline after comment?
Lasse Reichstein Nielsen
2013/09/18 11:35:41
As a "separator", I like the newline after. It app
| |
561 | |
562 void _add(E element) { | |
563 int hashCode = _hashCode(element); | |
564 int index = hashCode & (_buckets.length - 1); | |
565 HashSetEntry entry = _buckets[index]; | |
566 while (entry != null) { | |
567 if (_equals(entry.key, element)) return; | |
568 entry = entry.next; | |
569 } | |
570 _addEntry(element, hashCode, index); | |
571 } | |
572 | |
573 void add(E element) { | |
574 _add(element); | |
Florian Schneider
2013/09/18 09:07:19
Why do you need add and _add when add is just a ca
Lasse Reichstein Nielsen
2013/09/18 11:35:41
Good point. It's old habit.
I prefer not to make
| |
575 } | |
576 | |
577 void addAll(Iterable<E> objects) { | |
578 int ctr = 0; | |
536 for (E object in objects) { | 579 for (E object in objects) { |
537 _table._put(object); | 580 ctr++; |
538 _table._checkCapacity(); | 581 _add(object); |
539 } | 582 } |
540 } | 583 } |
541 | 584 |
542 /* patch */ bool remove(Object object) { | 585 bool _remove(Object object, int hashCode) { |
543 int offset = _table._remove(object); | 586 int index = hashCode & (_buckets.length - 1); |
544 _table._checkCapacity(); | 587 _HashSetEntry entry = _buckets[index]; |
545 return offset >= 0; | 588 _HashSetEntry previous = null; |
546 } | 589 while (entry != null) { |
547 | 590 if (_equals(entry.key, object)) { |
548 /* patch */ void removeAll(Iterable<Object> objectsToRemove) { | 591 _HashSetEntry next = entry.remove(); |
592 if (previous == null) { | |
593 _buckets[index] = next; | |
594 } else { | |
595 previous.next = next; | |
596 } | |
597 _elementCount--; | |
598 _modificationCount = | |
599 (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | |
600 return true; | |
601 } | |
602 previous = entry; | |
603 entry = entry.next; | |
604 } | |
605 return false; | |
606 } | |
607 | |
608 bool remove(Object object) => _remove(object, _hashCode(object)); | |
609 | |
610 void removeAll(Iterable<Object> objectsToRemove) { | |
549 for (Object object in objectsToRemove) { | 611 for (Object object in objectsToRemove) { |
550 _table._remove(object); | 612 _remove(object, _hashCode(object)); |
551 _table._checkCapacity(); | |
552 } | 613 } |
553 } | 614 } |
554 | 615 |
555 void _filterWhere(bool test(E element), bool removeMatching) { | 616 void _filterWhere(bool test(E element), bool removeMatching) { |
556 int entrySize = _table._entrySize; | 617 int length = _buckets.length; |
557 int length = _table._table.length; | 618 for (int index = 0; index < length; index++) { |
558 for (int offset = 0; offset < length; offset += entrySize) { | 619 HashSetEntry entry = _buckets[index]; |
559 Object entry = _table._table[offset]; | 620 HashSetEntry previous = null; |
560 if (!_table._isFree(entry)) { | 621 while (entry != null) { |
561 E key = identical(entry, _NULL) ? null : entry; | 622 int modificationCount = _modificationCount; |
562 int modificationCount = _table._modificationCount; | 623 bool testResult = test(entry.key); |
563 bool shouldRemove = (removeMatching == test(key)); | 624 if (modificationCount != _modificationCount) { |
564 _table._checkModification(modificationCount); | 625 throw new ConcurrentModificationError(this); |
565 if (shouldRemove) { | 626 } |
566 _table._deleteEntry(offset); | 627 if (testResult == removeMatching) { |
567 } | 628 HashSetEntry next = entry.remove(); |
568 } | 629 if (previous == null) { |
569 } | 630 _buckets[index] = next; |
570 _table._checkCapacity(); | 631 } else { |
571 } | 632 previous.next = next; |
572 | 633 } |
573 /* patch */ void removeWhere(bool test(E element)) { | 634 _elementCount--; |
635 _modificationCount = | |
636 (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | |
637 entry = next; | |
638 } else { | |
639 previous = entry; | |
640 entry = entry.next; | |
641 } | |
642 } | |
643 } | |
644 } | |
645 | |
646 void removeWhere(bool test(E element)) { | |
574 _filterWhere(test, true); | 647 _filterWhere(test, true); |
575 } | 648 } |
576 | 649 |
577 /* patch */ void retainWhere(bool test(E element)) { | 650 void retainWhere(bool test(E element)) { |
578 _filterWhere(test, false); | 651 _filterWhere(test, false); |
579 } | 652 } |
580 | 653 |
581 /* patch */ void clear() { | 654 void clear() { |
582 _table._clear(); | 655 _elementCount = 0; |
583 } | 656 _buckets = new List(_INITIAL_CAPACITY); |
657 _modificationCount++; | |
658 } | |
659 | |
660 void _addEntry(E key, int hashCode, int index) { | |
661 _buckets[index] = new _HashSetEntry(key, hashCode, _buckets[index]); | |
662 int newElements = _elementCount + 1; | |
663 _elementCount = newElements; | |
664 int length = _buckets.length; | |
665 // If we end up with more than 75% non-empty entries, we | |
666 // resize the backing store. | |
667 if ((newElements << 2) > ((length << 1) + length)) _resize(); | |
668 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | |
669 } | |
670 | |
671 void _resize() { | |
672 int oldLength = _buckets.length; | |
673 int newLength = oldLength << 1; | |
674 List oldBuckets = _buckets; | |
675 List newBuckets = new List(newLength); | |
676 for (int i = 0; i < oldLength; i++) { | |
677 _HashSetEntry entry = oldBuckets[i]; | |
678 while (entry != null) { | |
679 _HashSetEntry next = entry.next; | |
680 int newIndex = entry.hashCode & (newLength - 1); | |
681 entry.next = newBuckets[newIndex]; | |
682 newBuckets[newIndex] = entry; | |
683 entry = next; | |
684 } | |
685 } | |
686 _buckets = newBuckets; | |
687 } | |
688 | |
689 HashSet<E> _newSet() => new _HashSet<E>(); | |
690 } | |
691 | |
692 class _IdentityHashSet<E> extends _HashSet<E> { | |
693 bool _equals(e1, e2) => identical(e1, e2); | |
694 HashSet<E> _newSet() => new _IdentityHashSet<E>(); | |
695 } | |
696 | |
697 class _CustomHashSet<E> extends _HashSet<E> { | |
698 final _Equality<E> _equality; | |
699 final _Hasher<E> _hasher; | |
700 final _Predicate _validKey; | |
701 _CustomHashSet(this._equality, this._hasher, this._validKey); | |
702 | |
703 bool remove(Object element) { | |
704 if (!_validKey(element)) return false; | |
705 return super.remove(element); | |
706 } | |
707 | |
708 bool contains(Object element) { | |
709 if (!_validKey(element)) return false; | |
710 return super.contains(element); | |
711 } | |
712 | |
713 bool containsAll(Iterable<Object> elements) { | |
714 for (Object element in elements) { | |
715 if (!_validKey(element) || !this.contains(element)) return false; | |
716 } | |
717 return true; | |
718 } | |
719 | |
720 void removeAll(Iterable<Object> elements) { | |
721 for (Object element in elements) { | |
722 if (_validKey(element)) { | |
723 super._remove(element, _hasher(element)); | |
724 } | |
725 } | |
726 } | |
727 | |
728 void retainAll(Iterable<Object> elements) { | |
729 super.retainAll(elements.where(_validKey)); | |
730 } | |
731 | |
732 bool _equals(e1, e2) => _equality(e1, e2); | |
733 int _hashCode(e) => _hasher(e); | |
734 | |
735 HashSet<E> _newSet() => new _CustomHashSet<E>(_equality, _hasher, _validKey); | |
736 } | |
737 | |
738 class _HashSetEntry { | |
739 final key; | |
740 final int hashCode; | |
741 _HashSetEntry next; | |
742 _HashSetEntry(this.key, this.hashCode, this.next); | |
743 | |
744 _HashSetEntry remove() { | |
745 _HashSetEntry result = next; | |
746 next = null; | |
747 return result; | |
748 } | |
749 } | |
750 | |
751 class _HashSetIterator<E> implements Iterator<E> { | |
752 final _HashSet _set; | |
753 final int _modificationCount; | |
754 int _index = 0; | |
755 _HashSetEntry _next = null; | |
756 E _current = null; | |
757 | |
758 _HashSetIterator(_HashSet hashSet) | |
759 : _set = hashSet, _modificationCount = hashSet._modificationCount; | |
760 | |
761 bool moveNext() { | |
762 if (_modificationCount != _set._modificationCount) { | |
763 throw new ConcurrentModificationError(_set); | |
764 } | |
765 if (_next != null) { | |
766 _current = _next.key; | |
767 _next = _next.next; | |
768 return true; | |
769 } | |
770 List<_HashSetEntry> buckets = _set._buckets; | |
771 while (_index < buckets.length) { | |
772 _next = buckets[_index]; | |
773 _index = _index + 1; | |
774 if (_next != null) { | |
775 _current = _next.key; | |
776 _next = _next.next; | |
777 return true; | |
778 } | |
779 } | |
780 _current = null; | |
781 return false; | |
782 } | |
783 | |
784 E get current => _current; | |
584 } | 785 } |
585 | 786 |
586 class _LinkedHashMapEntry extends _HashMapEntry { | 787 class _LinkedHashMapEntry extends _HashMapEntry { |
788 /// Double-linked list of entries of a linked hash map. | |
789 /// The _LinkedHashMap itself is the head of the list, so the type is "var". | |
790 /// Both are initialized to `this` when initialized. | |
587 var _nextEntry; | 791 var _nextEntry; |
588 var _previousEntry; | 792 var _previousEntry; |
589 _LinkedHashMapEntry(key, value, int hashCode, _LinkedHashMapEntry next, | 793 _LinkedHashMapEntry(key, value, int hashCode, _LinkedHashMapEntry next, |
590 this._previousEntry, this._nextEntry) | 794 this._previousEntry, this._nextEntry) |
591 : super(key, value, hashCode, next) { | 795 : super(key, value, hashCode, next) { |
592 _previousEntry._nextEntry = this; | 796 _previousEntry._nextEntry = this; |
593 _nextEntry._previousEntry = this; | 797 _nextEntry._previousEntry = this; |
594 } | 798 } |
595 } | 799 } |
596 | 800 |
597 class _LinkedHashMapKeyIterable<K> extends IterableBase<K> { | 801 class _LinkedHashMapKeyIterable<K> extends IterableBase<K> { |
598 LinkedHashMap<K, dynamic> _map; | 802 LinkedHashMap<K, dynamic> _map; |
599 _LinkedHashMapKeyIterable(this._map); | 803 _LinkedHashMapKeyIterable(this._map); |
600 Iterator<K> get iterator => new _LinkedHashMapKeyIterator<K>(_map); | 804 Iterator<K> get iterator => new _LinkedHashMapKeyIterator<K>(_map); |
601 bool contains(K key) => _map.containsKey(key); | 805 bool contains(Object key) => _map.containsKey(key); |
602 bool get isEmpty => _map.isEmpty; | 806 bool get isEmpty => _map.isEmpty; |
603 bool get isNotEmpty => _map.isNotEmpty; | 807 bool get isNotEmpty => _map.isNotEmpty; |
604 int get length => _map.length; | 808 int get length => _map.length; |
605 } | 809 } |
606 | 810 |
607 class _LinkedHashMapValueIterable<V> extends IterableBase<V> { | 811 class _LinkedHashMapValueIterable<V> extends IterableBase<V> { |
608 LinkedHashMap<dynamic, V> _map; | 812 LinkedHashMap<dynamic, V> _map; |
609 _LinkedHashMapValueIterable(this._map); | 813 _LinkedHashMapValueIterable(this._map); |
610 Iterator<K> get iterator => new _LinkedHashMapValueIterator<V>(_map); | 814 Iterator<K> get iterator => new _LinkedHashMapValueIterator<V>(_map); |
611 bool contains(V value) => _map.containsValue(value); | 815 bool contains(Object value) => _map.containsValue(value); |
612 bool get isEmpty => _map.isEmpty; | 816 bool get isEmpty => _map.isEmpty; |
613 bool get isNotEmpty => _map.isNotEmpty; | 817 bool get isNotEmpty => _map.isNotEmpty; |
614 int get length => _map.length; | 818 int get length => _map.length; |
615 } | 819 } |
616 | 820 |
617 abstract class _LinkedHashMapIterator<T> implements Iterator<T> { | 821 abstract class _LinkedHashMapIterator<T> implements Iterator<T> { |
618 final LinkedHashMap _map; | 822 final LinkedHashMap _map; |
619 var _next; | 823 var _next; |
620 T _current; | 824 T _current; |
621 int _modificationCount; | 825 int _modificationCount; |
(...skipping 30 matching lines...) Expand all Loading... | |
652 class _LinkedHashMapValueIterator<V> extends _LinkedHashMapIterator<V> { | 856 class _LinkedHashMapValueIterator<V> extends _LinkedHashMapIterator<V> { |
653 _LinkedHashMapValueIterator(LinkedHashMap map) : super(map); | 857 _LinkedHashMapValueIterator(LinkedHashMap map) : super(map); |
654 V _getValue(_LinkedHashMapEntry entry) => entry.value; | 858 V _getValue(_LinkedHashMapEntry entry) => entry.value; |
655 } | 859 } |
656 | 860 |
657 | 861 |
658 /** | 862 /** |
659 * A hash-based map that iterates keys and values in key insertion order. | 863 * A hash-based map that iterates keys and values in key insertion order. |
660 */ | 864 */ |
661 patch class LinkedHashMap<K, V> { | 865 patch class LinkedHashMap<K, V> { |
866 /// Holds a double-linked list of entries in insertion order. | |
867 /// The fields have the same name as the ones in [_LinkedHashMapEntry], | |
868 /// and this map is itself used as the head entry of the list. | |
869 /// Set to `this` when initialized, representing the empty list (containing | |
870 /// only the head entry itself). | |
662 var _nextEntry; | 871 var _nextEntry; |
663 var _previousEntry; | 872 var _previousEntry; |
664 | 873 |
665 /* patch */ factory LinkedHashMap({ bool equals(K key1, K key2), | 874 /* patch */ factory LinkedHashMap({ bool equals(K key1, K key2), |
666 int hashCode(K key), | 875 int hashCode(K key), |
667 bool isValidKey(potentialKey) }) { | 876 bool isValidKey(potentialKey) }) { |
668 if (isValidKey == null) { | 877 if (isValidKey == null) { |
669 if (hashCode == null) { | 878 if (hashCode == null) { |
670 if (equals == null) { | 879 if (equals == null) { |
671 return new _LinkedHashMap<K, V>(); | 880 return new _LinkedHashMap<K, V>(); |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
731 } | 940 } |
732 | 941 |
733 void _addEntry(List buckets, int index, int length, | 942 void _addEntry(List buckets, int index, int length, |
734 K key, V value, int hashCode) { | 943 K key, V value, int hashCode) { |
735 _HashMapEntry entry = | 944 _HashMapEntry entry = |
736 new _LinkedHashMapEntry(key, value, hashCode, buckets[index], | 945 new _LinkedHashMapEntry(key, value, hashCode, buckets[index], |
737 _previousEntry, this); | 946 _previousEntry, this); |
738 buckets[index] = entry; | 947 buckets[index] = entry; |
739 int newElements = _elementCount + 1; | 948 int newElements = _elementCount + 1; |
740 _elementCount = newElements; | 949 _elementCount = newElements; |
950 | |
741 // If we end up with more than 75% non-empty entries, we | 951 // If we end up with more than 75% non-empty entries, we |
742 // resize the backing store. | 952 // resize the backing store. |
743 if ((newElements << 2) > ((length << 1) + length)) _resize(); | 953 if ((newElements << 2) > ((length << 1) + length)) _resize(); |
744 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | 954 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; |
745 } | 955 } |
746 | 956 |
747 void _removeEntry(_LinkedHashMapEntry entry, | 957 void _removeEntry(_LinkedHashMapEntry entry, |
748 _HashMapEntry previousInBucket, | 958 _HashMapEntry previousInBucket, |
749 int bucketIndex) { | 959 int bucketIndex) { |
750 var previousInChain = entry._previousEntry; | 960 var previousInChain = entry._previousEntry; |
(...skipping 30 matching lines...) Expand all Loading... | |
781 with _LinkedHashMapMixin<K, V> { | 991 with _LinkedHashMapMixin<K, V> { |
782 _LinkedCustomHashMap(bool equals(K key1, K key2), | 992 _LinkedCustomHashMap(bool equals(K key1, K key2), |
783 int hashCode(K key), | 993 int hashCode(K key), |
784 bool isValidKey(potentialKey)) | 994 bool isValidKey(potentialKey)) |
785 : super(equals, hashCode, isValidKey) { | 995 : super(equals, hashCode, isValidKey) { |
786 _nextEntry = _previousEntry = this; | 996 _nextEntry = _previousEntry = this; |
787 } | 997 } |
788 } | 998 } |
789 | 999 |
790 | 1000 |
791 patch class LinkedHashSet<E> extends _HashSetBase<E> { | 1001 patch class LinkedHashSet<E> { |
792 static const int _INITIAL_CAPACITY = 8; | 1002 /* patch */ factory LinkedHashSet({ bool equals(E e1, E e2), |
793 _LinkedHashTable<E> _table; | 1003 int hashCode(E e), |
794 | 1004 bool isValidKey(potentialKey) }) { |
795 /* patch */ LinkedHashSet() { | 1005 if (isValidKey == null) { |
796 _table = new _LinkedHashTable(_INITIAL_CAPACITY); | 1006 if (hashCode == null) { |
797 _table._container = this; | 1007 if (equals == null) { |
1008 return new _LinkedHashSet<E>(); | |
1009 } | |
1010 if (identical(identical, equals)) { | |
1011 return new _LinkedIdentityHashSet<E>(); | |
1012 } | |
1013 _hashCode = _defaultHashCode; | |
1014 } else if (equals == null) { | |
1015 _equals = _defaultEquals; | |
1016 } | |
1017 isValidKey = new _TypeTest<E>().test; | |
1018 } else { | |
1019 if (hashCode == null) hashCode = _defaultHashCode; | |
1020 if (equals == null) equals = _defaultEquals; | |
1021 } | |
1022 return new _LinkedCustomHashSet<E>(equals, hashCode, isValidKey); | |
1023 } | |
1024 } | |
1025 | |
1026 class _LinkedHashSetEntry extends _HashSetEntry { | |
1027 /// Links this element into a double-linked list of elements of a hash set. | |
1028 /// The hash set object itself is used as the head entry of the list, so | |
1029 /// the field is typed as "var". | |
1030 /// Both links are initialized to `this` when the object is created. | |
1031 var _nextEntry; | |
1032 var _previousEntry; | |
1033 _LinkedHashSetEntry(var key, int hashCode, _LinkedHashSetEntry next, | |
1034 this._previousEntry, this._nextEntry) | |
1035 : super(key, hashCode, next) { | |
1036 _previousEntry._nextEntry = _nextEntry._previousEntry = this; | |
1037 } | |
1038 | |
1039 _LinkedHashSetEntry remove() { | |
1040 _previousEntry._nextEntry = _nextEntry; | |
1041 _nextEntry._previousEntry = _previousEntry; | |
1042 _nextEntry = _previousEntry = this; | |
1043 return super.remove(); | |
1044 } | |
1045 } | |
1046 | |
1047 class _LinkedHashSet<E> extends _HashSet<E> | |
1048 implements LinkedHashSet<E> { | |
1049 /// Holds a double linked list of the element entries of the set in | |
1050 /// insertion order. | |
1051 /// The fields have the same names as the ones in [_LinkedHashSetEntry], | |
1052 /// allowing this object to be used as the head entry of the list. | |
1053 /// The fields are initialized to `this` when created, representing the | |
1054 /// empty list that only contains the head entry. | |
1055 var _nextEntry; | |
1056 var _previousEntry; | |
1057 | |
1058 _LinkedHashSet() { | |
1059 _nextEntry = _previousEntry = this; | |
798 } | 1060 } |
799 | 1061 |
800 // Iterable. | 1062 // Iterable. |
801 /* patch */ Iterator<E> get iterator { | 1063 Iterator<E> get iterator => new _LinkedHashSetIterator<E>(this); |
802 return new _LinkedHashTableKeyIterator<E>(_table); | 1064 |
803 } | 1065 void forEach(void action(E element)) { |
804 | 1066 var cursor = _nextEntry; |
805 /* patch */ int get length => _table._elementCount; | 1067 int modificationCount = _modificationCount; |
806 | 1068 while (!identical(cursor, this)) { |
807 /* patch */ bool get isEmpty => _table._elementCount == 0; | 1069 _LinkedHashSetEntry entry = cursor; |
808 | 1070 action(entry.key); |
809 /* patch */ bool get isNotEmpty => !isEmpty; | 1071 if (_modificationCount != modificationCount) { |
810 | 1072 throw new ConcurrentModificationError(this); |
811 /* patch */ bool contains(Object object) => _table._get(object) >= 0; | 1073 } |
812 | 1074 cursor = entry._nextEntry; |
813 /* patch */ void forEach(void action(E element)) { | 1075 } |
814 int offset = _table._next(_LinkedHashTable._HEAD_OFFSET); | 1076 } |
815 int modificationCount = _table._modificationCount; | 1077 |
816 while (offset != _LinkedHashTable._HEAD_OFFSET) { | 1078 E get first { |
817 E key = _table._key(offset); | 1079 if (identical(_nextEntry, this)) { |
818 action(key); | |
819 _table._checkModification(modificationCount); | |
820 offset = _table._next(offset); | |
821 } | |
822 } | |
823 | |
824 /* patch */ E get first { | |
825 int firstOffset = _table._next(_LinkedHashTable._HEAD_OFFSET); | |
826 if (firstOffset == _LinkedHashTable._HEAD_OFFSET) { | |
827 throw new StateError("No elements"); | 1080 throw new StateError("No elements"); |
828 } | 1081 } |
829 return _table._key(firstOffset); | 1082 _LinkedHashSetEntry entry = _nextEntry; |
830 } | 1083 return entry.key; |
831 | 1084 } |
832 /* patch */ E get last { | 1085 |
833 int lastOffset = _table._prev(_LinkedHashTable._HEAD_OFFSET); | 1086 E get last { |
834 if (lastOffset == _LinkedHashTable._HEAD_OFFSET) { | 1087 if (identical(_previousEntry, this)) { |
835 throw new StateError("No elements"); | 1088 throw new StateError("No elements"); |
836 } | 1089 } |
837 return _table._key(lastOffset); | 1090 _LinkedHashSetEntry entry = _previousEntry; |
838 } | 1091 return entry.key; |
839 | 1092 } |
840 // Collection. | 1093 |
1094 // Set. | |
841 void _filterWhere(bool test(E element), bool removeMatching) { | 1095 void _filterWhere(bool test(E element), bool removeMatching) { |
842 int entrySize = _table._entrySize; | 1096 var cursor = _nextEntry; |
843 int length = _table._table.length; | 1097 while (!identical(cursor, this)) { |
844 int offset = _table._next(_LinkedHashTable._HEAD_OFFSET); | 1098 _LinkedHashSetEntry entry = cursor; |
845 while (offset != _LinkedHashTable._HEAD_OFFSET) { | 1099 int modificationCount = _modificationCount; |
846 E key = _table._key(offset); | 1100 bool testResult = test(entry.key); |
847 int nextOffset = _table._next(offset); | 1101 if (modificationCount != _modificationCount) { |
848 int modificationCount = _table._modificationCount; | 1102 throw new ConcurrentModificationError(this); |
849 bool shouldRemove = (removeMatching == test(key)); | 1103 } |
850 _table._checkModification(modificationCount); | 1104 cursor = entry._nextEntry; |
851 if (shouldRemove) { | 1105 if (testResult == removeMatching) { |
852 _table._deleteEntry(offset); | 1106 _remove(entry.key, entry.hashCode); |
853 } | 1107 } |
854 offset = nextOffset; | 1108 } |
855 } | 1109 } |
856 _table._checkCapacity(); | 1110 |
857 } | 1111 void _addEntry(E key, int hashCode, int index) { |
858 | 1112 _buckets[index] = |
859 /* patch */ void add(E element) { | 1113 new _LinkedHashSetEntry(key, hashCode, _buckets[index], |
860 _table._put(element); | 1114 _previousEntry, this); |
861 _table._checkCapacity(); | 1115 int newElements = _elementCount + 1; |
862 } | 1116 _elementCount = newElements; |
863 | 1117 int length = _buckets.length; |
864 /* patch */ void addAll(Iterable<E> objects) { | 1118 // If we end up with more than 75% non-empty entries, we |
865 for (E object in objects) { | 1119 // resize the backing store. |
866 _table._put(object); | 1120 if ((newElements << 2) > ((length << 1) + length)) _resize(); |
867 _table._checkCapacity(); | 1121 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; |
868 } | 1122 } |
869 } | 1123 |
870 | 1124 void clear() { |
871 /* patch */ bool remove(Object object) { | 1125 _nextEntry = _previousEntry = this; |
872 int offset = _table._remove(object); | 1126 super.clear(); |
873 if (offset >= 0) { | 1127 } |
874 _table._checkCapacity(); | 1128 |
875 return true; | 1129 HashSet<E> _newSet() => new _LinkedHashSet<E>(); |
876 } | 1130 } |
877 return false; | 1131 |
878 } | 1132 class _LinkedIdentityHashSet<E> extends _LinkedHashSet<E> { |
879 | 1133 bool _equals(e1, e2) => identical(e1, e2); |
880 /* patch */ void removeAll(Iterable objectsToRemove) { | 1134 HashSet<E> _newSet() => new _LinkedIdentityHashSet<E>(); |
881 for (Object object in objectsToRemove) { | 1135 } |
882 if (_table._remove(object) >= 0) { | 1136 |
883 _table._checkCapacity(); | 1137 class _LinkedCustomHashSet<E> extends _LinkedHashSet<E> { |
884 } | 1138 final _Equality<E> _equality; |
885 } | 1139 final _Hasher<E> _hasher; |
886 } | 1140 final _Predicate _validKey; |
887 | 1141 |
888 /* patch */ void removeWhere(bool test(E element)) { | 1142 _LinkedCustomHashSet(this._equality, this._hasher, bool validKey(object)) |
889 _filterWhere(test, true); | 1143 : _validKey = (validKey != null) ? validKey : new _TypeTest<E>().test; |
890 } | 1144 |
891 | 1145 bool _equals(e1, e2) => _equality(e1, e2); |
892 /* patch */ void retainWhere(bool test(E element)) { | 1146 |
893 _filterWhere(test, false); | 1147 int _hashCode(e) => _hasher(e); |
894 } | 1148 |
895 | 1149 bool contains(Object o) { |
896 /* patch */ void clear() { | 1150 if (!_validKey(o)) return false; |
897 _table._clear(); | 1151 return super.contains(o); |
898 } | 1152 } |
899 } | 1153 |
900 | 1154 bool remove(Object o) { |
901 class _DeadEntry { | 1155 if (!_validKey(o)) return false; |
902 const _DeadEntry(); | 1156 return super.remove(o); |
903 } | 1157 } |
904 | 1158 |
905 class _NullKey { | 1159 bool containsAll(Iterable<Object> elements) { |
906 const _NullKey(); | 1160 for (Object element in elements) { |
907 int get hashCode => null.hashCode; | 1161 if (!_validKey(element) || !this.contains(element)) return false; |
908 } | 1162 } |
909 | 1163 return true; |
910 const _TOMBSTONE = const _DeadEntry(); | 1164 } |
911 const _NULL = const _NullKey(); | 1165 |
912 | 1166 void removeAll(Iterable<Object> elements) { |
913 class _HashTable<K> { | 1167 for (Object element in elements) { |
914 /** | 1168 if (_validKey(element)) { |
915 * Table of entries with [_entrySize] slots per entry. | 1169 super._remove(element, _hasher(element)); |
916 * | 1170 } |
917 * Capacity in entries must be factor of two. | 1171 } |
918 */ | 1172 } |
919 List _table; | 1173 |
920 /** Current capacity. Always equal to [:_table.length ~/ _entrySize:]. */ | 1174 void retainAll(Iterable<Object> elements) { |
921 int _capacity; | 1175 // Override to not waste time on a linked set for the contains. |
922 /** Count of occupied entries, including deleted ones. */ | 1176 Set retainSet = new _CustomHashSet(_equality, _hasher, _validKey); |
923 int _entryCount = 0; | 1177 retainSet.addAll(elements.where(_validKey)); |
924 /** Count of deleted entries. */ | 1178 super.retainWhere(retainSet.contains); |
925 int _deletedCount = 0; | 1179 } |
926 /** Counter incremented when table is modified. */ | 1180 |
927 int _modificationCount = 0; | 1181 HashSet<E> _newSet() => |
928 /** If set, used as the source object for [ConcurrentModificationError]s. */ | 1182 new _LinkedCustomHashSet<E>(_equality, _hasher, _validKey); |
929 Object _container; | 1183 } |
930 | 1184 |
931 _HashTable(int initialCapacity) : _capacity = initialCapacity { | 1185 class _LinkedHashSetIterator<E> implements Iterator<E> { |
932 _table = _createTable(initialCapacity); | 1186 final _LinkedHashSet _set; |
933 } | |
934 | |
935 /** Reads key from table. Converts _NULL marker to null. */ | |
936 Object _key(offset) { | |
937 assert(!_isFree(_table[offset])); | |
938 Object key = _table[offset]; | |
939 if (!identical(key, _NULL)) return key; | |
940 return null; | |
941 } | |
942 | |
943 /** Writes key to table. Converts null to _NULL marker. */ | |
944 void _setKey(int offset, Object key) { | |
945 if (key == null) key = _NULL; | |
946 _table[offset] = key; | |
947 } | |
948 | |
949 int get _elementCount => _entryCount - _deletedCount; | |
950 | |
951 /** Size of each entry. */ | |
952 int get _entrySize => 1; | |
953 | |
954 void _checkModification(int expectedModificationCount) { | |
955 if (_modificationCount != expectedModificationCount) { | |
956 throw new ConcurrentModificationError(_container); | |
957 } | |
958 } | |
959 | |
960 void _recordModification() { | |
961 // Value cycles after 2^30 modifications. If you keep hold of an | |
962 // iterator for that long, you might miss a modification detection, | |
963 // and iteration can go sour. Don't do that. | |
964 _modificationCount = (_modificationCount + 1) & (0x3FFFFFFF); | |
965 } | |
966 | |
967 /** | |
968 * Create an empty table. | |
969 */ | |
970 List _createTable(int capacity) { | |
971 List table = new List(capacity * _entrySize); | |
972 return table; | |
973 } | |
974 | |
975 /** First table probe. */ | |
976 int _firstProbe(int hashCode, int capacity) { | |
977 return hashCode & (capacity - 1); | |
978 } | |
979 | |
980 /** Following table probes. */ | |
981 int _nextProbe(int previousIndex, int probeCount, int capacity) { | |
982 // When capacity is a power of 2, this probing algorithm (the triangular | |
983 // number sequence modulo capacity) is guaranteed to hit all indices exactly | |
984 // once before repeating. | |
985 return (previousIndex + probeCount) & (capacity - 1); | |
986 } | |
987 | |
988 /** Whether an object is a free-marker (either tombstone or free). */ | |
989 bool _isFree(Object marker) => | |
990 marker == null || identical(marker, _TOMBSTONE); | |
991 | |
992 /** | |
993 * Look up the offset for an object in the table. | |
994 * | |
995 * Finds the offset of the object in the table, if it is there, | |
996 * or the first free offset for its hashCode. | |
997 */ | |
998 int _probeForAdd(int hashCode, Object object) { | |
999 int entrySize = _entrySize; | |
1000 int index = _firstProbe(hashCode, _capacity); | |
1001 int firstTombstone = -1; | |
1002 int probeCount = 0; | |
1003 while (true) { | |
1004 int offset = index * entrySize; | |
1005 Object entry = _table[offset]; | |
1006 if (identical(entry, _TOMBSTONE)) { | |
1007 if (firstTombstone < 0) firstTombstone = offset; | |
1008 } else if (entry == null) { | |
1009 if (firstTombstone < 0) return offset; | |
1010 return firstTombstone; | |
1011 } else if (identical(_NULL, entry) ? _equals(null, object) | |
1012 : _equals(entry, object)) { | |
1013 return offset; | |
1014 } | |
1015 // The _nextProbe is designed so that it hits | |
1016 // every index eventually. | |
1017 index = _nextProbe(index, ++probeCount, _capacity); | |
1018 } | |
1019 } | |
1020 | |
1021 /** | |
1022 * Look up the offset for an object in the table. | |
1023 * | |
1024 * If the object is in the table, its offset is returned. | |
1025 * | |
1026 * If the object is not in the table, Otherwise a negative value is returned. | |
1027 */ | |
1028 int _probeForLookup(int hashCode, Object object) { | |
1029 int entrySize = _entrySize; | |
1030 int index = _firstProbe(hashCode, _capacity); | |
1031 int probeCount = 0; | |
1032 while (true) { | |
1033 int offset = index * entrySize; | |
1034 Object entry = _table[offset]; | |
1035 if (entry == null) { | |
1036 return -1; | |
1037 } else if (!identical(_TOMBSTONE, entry)) { | |
1038 if (identical(_NULL, entry) ? _equals(null, object) | |
1039 : _equals(entry, object)) { | |
1040 return offset; | |
1041 } | |
1042 } | |
1043 // The _nextProbe is designed so that it hits | |
1044 // every index eventually. | |
1045 index = _nextProbe(index, ++probeCount, _capacity); | |
1046 } | |
1047 } | |
1048 | |
1049 // Override the following two to change equality/hashCode computations | |
1050 | |
1051 /** | |
1052 * Compare two object for equality. | |
1053 * | |
1054 * The first object is the one already in the table, | |
1055 * and the second is the one being searched for. | |
1056 */ | |
1057 bool _equals(Object element, Object other) { | |
1058 return element == other; | |
1059 } | |
1060 | |
1061 /** | |
1062 * Compute hash-code for an object. | |
1063 */ | |
1064 int _hashCodeOf(Object object) => object.hashCode; | |
1065 | |
1066 /** | |
1067 * Ensure that the table isn't too full for its own good. | |
1068 * | |
1069 * Call this after adding an element. | |
1070 */ | |
1071 int _checkCapacity() { | |
1072 // Compute everything in multiples of entrySize to avoid division. | |
1073 int freeCount = _capacity - _entryCount; | |
1074 if (freeCount * 4 < _capacity || | |
1075 freeCount < _deletedCount) { | |
1076 // Less than 25% free or more deleted entries than free entries. | |
1077 _grow(_entryCount - _deletedCount); | |
1078 } | |
1079 } | |
1080 | |
1081 void _grow(int contentCount) { | |
1082 int capacity = _capacity; | |
1083 // Don't grow to less than twice the needed capacity. | |
1084 int minCapacity = contentCount * 2; | |
1085 while (capacity < minCapacity) { | |
1086 capacity *= 2; | |
1087 } | |
1088 // Reset to another table and add all existing elements. | |
1089 List oldTable = _table; | |
1090 _table = _createTable(capacity); | |
1091 _capacity = capacity; | |
1092 _entryCount = 0; | |
1093 _deletedCount = 0; | |
1094 _addAllEntries(oldTable); | |
1095 _recordModification(); | |
1096 } | |
1097 | |
1098 /** | |
1099 * Copies all non-free entries from the old table to the new empty table. | |
1100 */ | |
1101 void _addAllEntries(List oldTable) { | |
1102 for (int i = 0; i < oldTable.length; i += _entrySize) { | |
1103 Object object = oldTable[i]; | |
1104 if (!_isFree(object)) { | |
1105 int toOffset = _put(object); | |
1106 _copyEntry(oldTable, i, toOffset); | |
1107 } | |
1108 } | |
1109 } | |
1110 | |
1111 /** | |
1112 * Copies everything but the key element from one entry to another. | |
1113 * | |
1114 * Called while growing the base array. | |
1115 * | |
1116 * Override this if any non-key fields need copying. | |
1117 */ | |
1118 void _copyEntry(List fromTable, int fromOffset, int toOffset) {} | |
1119 | |
1120 // The following three methods are for simple get/set/remove operations. | |
1121 // They only affect the key of an entry. The remaining fields must be | |
1122 // filled by the caller. | |
1123 | |
1124 /** | |
1125 * Returns the offset of a key in [_table], or negative if it's not there. | |
1126 */ | |
1127 int _get(Object key) { | |
1128 return _probeForLookup(_hashCodeOf(key), key); | |
1129 } | |
1130 | |
1131 /** | |
1132 * Puts the key into the table and returns its offset into [_table]. | |
1133 * | |
1134 * If [_entrySize] is greater than 1, the caller should fill the | |
1135 * remaining fields. | |
1136 * | |
1137 * Remember to call [_checkCapacity] after using this method. | |
1138 */ | |
1139 int _put(K key) { | |
1140 int offset = _probeForAdd(_hashCodeOf(key), key); | |
1141 Object oldEntry = _table[offset]; | |
1142 if (oldEntry == null) { | |
1143 _entryCount++; | |
1144 } else if (identical(oldEntry, _TOMBSTONE)) { | |
1145 _deletedCount--; | |
1146 } else { | |
1147 return offset; | |
1148 } | |
1149 _setKey(offset, key); | |
1150 _recordModification(); | |
1151 return offset; | |
1152 } | |
1153 | |
1154 /** | |
1155 * Removes a key from the table and returns its offset into [_table]. | |
1156 * | |
1157 * Returns null if the key was not in the table. | |
1158 * If [_entrySize] is greater than 1, the caller should clean up the | |
1159 * remaining fields. | |
1160 */ | |
1161 int _remove(Object key) { | |
1162 int offset = _probeForLookup(_hashCodeOf(key), key); | |
1163 if (offset >= 0) { | |
1164 _deleteEntry(offset); | |
1165 } | |
1166 return offset; | |
1167 } | |
1168 | |
1169 /** Clears the table completely, leaving it empty. */ | |
1170 void _clear() { | |
1171 if (_elementCount == 0) return; | |
1172 for (int i = 0; i < _table.length; i++) { | |
1173 _table[i] = null; | |
1174 } | |
1175 _entryCount = _deletedCount = 0; | |
1176 _recordModification(); | |
1177 } | |
1178 | |
1179 /** Clears an entry in the table. */ | |
1180 void _deleteEntry(int offset) { | |
1181 assert(!_isFree(_table[offset])); | |
1182 _setKey(offset, _TOMBSTONE); | |
1183 _deletedCount++; | |
1184 _recordModification(); | |
1185 } | |
1186 } | |
1187 | |
1188 /** | |
1189 * Generic iterable based on a [_HashTable]. | |
1190 */ | |
1191 abstract class _HashTableIterable<E> extends IterableBase<E> { | |
1192 final _HashTable _hashTable; | |
1193 _HashTableIterable(this._hashTable); | |
1194 | |
1195 Iterator<E> get iterator; | |
1196 | |
1197 /** | |
1198 * Return the iterated value for a given entry. | |
1199 */ | |
1200 E _valueAt(int offset, Object key); | |
1201 | |
1202 int get length => _hashTable._elementCount; | |
1203 | |
1204 bool get isEmpty => _hashTable._elementCount == 0; | |
1205 | |
1206 void forEach(void action(E element)) { | |
1207 int entrySize = _hashTable._entrySize; | |
1208 List table = _hashTable._table; | |
1209 int modificationCount = _hashTable._modificationCount; | |
1210 for (int offset = 0; offset < table.length; offset += entrySize) { | |
1211 Object entry = table[offset]; | |
1212 if (!_hashTable._isFree(entry)) { | |
1213 E value = _valueAt(offset, entry); | |
1214 action(value); | |
1215 } | |
1216 _hashTable._checkModification(modificationCount); | |
1217 } | |
1218 } | |
1219 } | |
1220 | |
1221 abstract class _HashTableIterator<E> implements Iterator<E> { | |
1222 final _HashTable _hashTable; | |
1223 final int _modificationCount; | 1187 final int _modificationCount; |
1224 /** Location right after last found element. */ | 1188 var _next; |
1225 int _offset = 0; | 1189 E _current; |
1226 E _current = null; | 1190 |
1227 | 1191 _LinkedHashSetIterator(_LinkedHashSet hashSet) |
1228 _HashTableIterator(_HashTable hashTable) | 1192 : _set = hashSet, |
1229 : _hashTable = hashTable, | 1193 _modificationCount = hashSet._modificationCount, |
1230 _modificationCount = hashTable._modificationCount; | 1194 _next = hashSet._nextEntry; |
1231 | 1195 |
1232 bool moveNext() { | 1196 bool moveNext() { |
1233 _hashTable._checkModification(_modificationCount); | 1197 if (_modificationCount != _set._modificationCount) { |
1234 | 1198 throw new ConcurrentModificationError(_set); |
1235 List table = _hashTable._table; | 1199 } |
1236 int entrySize = _hashTable._entrySize; | 1200 if (identical(_set, _next)) { |
1237 | |
1238 while (_offset < table.length) { | |
1239 int currentOffset = _offset; | |
1240 Object entry = table[currentOffset]; | |
1241 _offset = currentOffset + entrySize; | |
1242 if (!_hashTable._isFree(entry)) { | |
1243 _current = _valueAt(currentOffset, entry); | |
1244 return true; | |
1245 } | |
1246 } | |
1247 _current = null; | |
1248 return false; | |
1249 } | |
1250 | |
1251 E get current => _current; | |
1252 | |
1253 E _valueAt(int offset, Object key); | |
1254 } | |
1255 | |
1256 class _HashTableKeyIterable<K> extends _HashTableIterable<K> { | |
1257 _HashTableKeyIterable(_HashTable<K> hashTable) : super(hashTable); | |
1258 | |
1259 Iterator<K> get iterator => new _HashTableKeyIterator<K>(_hashTable); | |
1260 | |
1261 K _valueAt(int offset, Object key) { | |
1262 if (identical(key, _NULL)) return null; | |
1263 return key; | |
1264 } | |
1265 | |
1266 bool contains(Object value) => _hashTable._get(value) >= 0; | |
1267 } | |
1268 | |
1269 class _HashTableKeyIterator<K> extends _HashTableIterator<K> { | |
1270 _HashTableKeyIterator(_HashTable hashTable) : super(hashTable); | |
1271 | |
1272 K _valueAt(int offset, Object key) { | |
1273 if (identical(key, _NULL)) return null; | |
1274 return key; | |
1275 } | |
1276 } | |
1277 | |
1278 class _HashTableValueIterable<V> extends _HashTableIterable<V> { | |
1279 final int _entryIndex; | |
1280 | |
1281 _HashTableValueIterable(_HashTable hashTable, this._entryIndex) | |
1282 : super(hashTable); | |
1283 | |
1284 Iterator<V> get iterator { | |
1285 return new _HashTableValueIterator<V>(_hashTable, _entryIndex); | |
1286 } | |
1287 | |
1288 V _valueAt(int offset, Object key) => _hashTable._table[offset + _entryIndex]; | |
1289 } | |
1290 | |
1291 class _HashTableValueIterator<V> extends _HashTableIterator<V> { | |
1292 final int _entryIndex; | |
1293 | |
1294 _HashTableValueIterator(_HashTable hashTable, this._entryIndex) | |
1295 : super(hashTable); | |
1296 | |
1297 V _valueAt(int offset, Object key) => _hashTable._table[offset + _entryIndex]; | |
1298 } | |
1299 | |
1300 class _HashMapTable<K, V> extends _HashTable<K> { | |
1301 static const int _INITIAL_CAPACITY = 8; | |
1302 static const int _VALUE_INDEX = 1; | |
1303 | |
1304 _HashMapTable() : super(_INITIAL_CAPACITY); | |
1305 | |
1306 int get _entrySize => 2; | |
1307 | |
1308 V _value(int offset) => _table[offset + _VALUE_INDEX]; | |
1309 void _setValue(int offset, V value) { _table[offset + _VALUE_INDEX] = value; } | |
1310 | |
1311 _copyEntry(List fromTable, int fromOffset, int toOffset) { | |
1312 _table[toOffset + _VALUE_INDEX] = fromTable[fromOffset + _VALUE_INDEX]; | |
1313 } | |
1314 } | |
1315 | |
1316 /** Unique marker object for the head of a linked list of entries. */ | |
1317 class _LinkedHashTableHeadMarker { | |
1318 const _LinkedHashTableHeadMarker(); | |
1319 } | |
1320 | |
1321 const _LinkedHashTableHeadMarker _HEAD_MARKER = | |
1322 const _LinkedHashTableHeadMarker(); | |
1323 | |
1324 class _LinkedHashTable<K> extends _HashTable<K> { | |
1325 static const _NEXT_INDEX = 1; | |
1326 static const _PREV_INDEX = 2; | |
1327 static const _HEAD_OFFSET = 0; | |
1328 | |
1329 _LinkedHashTable(int initialCapacity) : super(initialCapacity); | |
1330 | |
1331 int get _entrySize => 3; | |
1332 | |
1333 List _createTable(int capacity) { | |
1334 List result = new List(capacity * _entrySize); | |
1335 result[_HEAD_OFFSET] = _HEAD_MARKER; | |
1336 result[_HEAD_OFFSET + _NEXT_INDEX] = _HEAD_OFFSET; | |
1337 result[_HEAD_OFFSET + _PREV_INDEX] = _HEAD_OFFSET; | |
1338 return result; | |
1339 } | |
1340 | |
1341 int _next(int offset) => _table[offset + _NEXT_INDEX]; | |
1342 void _setNext(int offset, int to) { _table[offset + _NEXT_INDEX] = to; } | |
1343 | |
1344 int _prev(int offset) => _table[offset + _PREV_INDEX]; | |
1345 void _setPrev(int offset, int to) { _table[offset + _PREV_INDEX] = to; } | |
1346 | |
1347 void _linkLast(int offset) { | |
1348 // Add entry at offset at end of double-linked list. | |
1349 int last = _prev(_HEAD_OFFSET); | |
1350 _setNext(offset, _HEAD_OFFSET); | |
1351 _setPrev(offset, last); | |
1352 _setNext(last, offset); | |
1353 _setPrev(_HEAD_OFFSET, offset); | |
1354 } | |
1355 | |
1356 void _unlink(int offset) { | |
1357 assert(offset != _HEAD_OFFSET); | |
1358 int next = _next(offset); | |
1359 int prev = _prev(offset); | |
1360 _setNext(offset, null); | |
1361 _setPrev(offset, null); | |
1362 _setNext(prev, next); | |
1363 _setPrev(next, prev); | |
1364 } | |
1365 | |
1366 /** | |
1367 * Copies all non-free entries from the old table to the new empty table. | |
1368 */ | |
1369 void _addAllEntries(List oldTable) { | |
1370 int offset = oldTable[_HEAD_OFFSET + _NEXT_INDEX]; | |
1371 while (offset != _HEAD_OFFSET) { | |
1372 Object object = oldTable[offset]; | |
1373 int nextOffset = oldTable[offset + _NEXT_INDEX]; | |
1374 int toOffset = _put(object); | |
1375 _copyEntry(oldTable, offset, toOffset); | |
1376 offset = nextOffset; | |
1377 } | |
1378 } | |
1379 | |
1380 void _clear() { | |
1381 if (_elementCount == 0) return; | |
1382 _setNext(_HEAD_OFFSET, _HEAD_OFFSET); | |
1383 _setPrev(_HEAD_OFFSET, _HEAD_OFFSET); | |
1384 for (int i = _entrySize; i < _table.length; i++) { | |
1385 _table[i] = null; | |
1386 } | |
1387 _entryCount = _deletedCount = 0; | |
1388 _recordModification(); | |
1389 } | |
1390 | |
1391 int _put(K key) { | |
1392 int offset = _probeForAdd(_hashCodeOf(key), key); | |
1393 Object oldEntry = _table[offset]; | |
1394 if (identical(oldEntry, _TOMBSTONE)) { | |
1395 _deletedCount--; | |
1396 } else if (oldEntry == null) { | |
1397 _entryCount++; | |
1398 } else { | |
1399 return offset; | |
1400 } | |
1401 _recordModification(); | |
1402 _setKey(offset, key); | |
1403 _linkLast(offset); | |
1404 return offset; | |
1405 } | |
1406 | |
1407 void _deleteEntry(int offset) { | |
1408 _unlink(offset); | |
1409 _setKey(offset, _TOMBSTONE); | |
1410 _deletedCount++; | |
1411 _recordModification(); | |
1412 } | |
1413 } | |
1414 | |
1415 class _LinkedHashTableKeyIterable<K> extends IterableBase<K> { | |
1416 final _LinkedHashTable<K> _table; | |
1417 _LinkedHashTableKeyIterable(this._table); | |
1418 Iterator<K> get iterator => new _LinkedHashTableKeyIterator<K>(_table); | |
1419 | |
1420 bool contains(Object value) => _table._get(value) >= 0; | |
1421 | |
1422 int get length => _table._elementCount; | |
1423 } | |
1424 | |
1425 class _LinkedHashTableKeyIterator<K> extends _LinkedHashTableIterator<K> { | |
1426 _LinkedHashTableKeyIterator(_LinkedHashTable<K> hashTable): super(hashTable); | |
1427 | |
1428 K _getCurrent(int offset) => _hashTable._key(offset); | |
1429 } | |
1430 | |
1431 class _LinkedHashTableValueIterable<V> extends IterableBase<V> { | |
1432 final _LinkedHashTable _hashTable; | |
1433 final int _valueIndex; | |
1434 _LinkedHashTableValueIterable(this._hashTable, this._valueIndex); | |
1435 Iterator<V> get iterator => | |
1436 new _LinkedHashTableValueIterator<V>(_hashTable, _valueIndex); | |
1437 int get length => _hashTable._elementCount; | |
1438 } | |
1439 | |
1440 class _LinkedHashTableValueIterator<V> extends _LinkedHashTableIterator<V> { | |
1441 final int _valueIndex; | |
1442 | |
1443 _LinkedHashTableValueIterator(_LinkedHashTable hashTable, this._valueIndex) | |
1444 : super(hashTable); | |
1445 | |
1446 V _getCurrent(int offset) => _hashTable._table[offset + _valueIndex]; | |
1447 } | |
1448 | |
1449 abstract class _LinkedHashTableIterator<T> implements Iterator<T> { | |
1450 final _LinkedHashTable _hashTable; | |
1451 final int _modificationCount; | |
1452 int _offset; | |
1453 T _current; | |
1454 | |
1455 _LinkedHashTableIterator(_LinkedHashTable table) | |
1456 : _hashTable = table, | |
1457 _modificationCount = table._modificationCount, | |
1458 _offset = table._next(_LinkedHashTable._HEAD_OFFSET); | |
1459 | |
1460 bool moveNext() { | |
1461 _hashTable._checkModification(_modificationCount); | |
1462 if (_offset == _LinkedHashTable._HEAD_OFFSET) { | |
1463 _current = null; | 1201 _current = null; |
1464 return false; | 1202 return false; |
1465 } | 1203 } |
1466 _current = _getCurrent(_offset); | 1204 _LinkedHashSetEntry entry = _next; |
1467 _offset = _hashTable._next(_offset); | 1205 _current = entry.key; |
1206 _next = entry._nextEntry; | |
1468 return true; | 1207 return true; |
1469 } | 1208 } |
1470 | 1209 |
1471 T _getCurrent(int offset); | 1210 E get current => _current; |
1472 | 1211 } |
1473 T get current => _current; | |
1474 } | |
1475 | |
1476 class _LinkedHashMapTable<K, V> extends _LinkedHashTable<K> { | |
1477 static const int _INITIAL_CAPACITY = 8; | |
1478 static const int _VALUE_INDEX = 3; | |
1479 | |
1480 int get _entrySize => 4; | |
1481 | |
1482 _LinkedHashMapTable() : super(_INITIAL_CAPACITY); | |
1483 | |
1484 V _value(int offset) => _table[offset + _VALUE_INDEX]; | |
1485 void _setValue(int offset, V value) { _table[offset + _VALUE_INDEX] = value; } | |
1486 | |
1487 _copyEntry(List oldTable, int fromOffset, int toOffset) { | |
1488 _table[toOffset + _VALUE_INDEX] = oldTable[fromOffset + _VALUE_INDEX]; | |
1489 } | |
1490 } | |
OLD | NEW |