OLD | NEW |
---|---|
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 patch class HashMap<K, V> { | 5 patch class HashMap<K, V> { |
6 /* patch */ factory HashMap({ bool equals(K key1, K key2), | 6 /* patch */ factory HashMap({ bool equals(K key1, K key2), |
7 int hashCode(K key), | 7 int hashCode(K key), |
8 bool isValidKey(potentialKey) }) { | 8 bool isValidKey(potentialKey) }) { |
9 if (isValidKey == null) { | 9 if (isValidKey == null) { |
10 if (hashCode == null) { | 10 if (hashCode == null) { |
(...skipping 20 matching lines...) Expand all Loading... | |
31 } | 31 } |
32 | 32 |
33 const int _MODIFICATION_COUNT_MASK = 0x3fffffff; | 33 const int _MODIFICATION_COUNT_MASK = 0x3fffffff; |
34 | 34 |
35 class _HashMap<K, V> implements HashMap<K, V> { | 35 class _HashMap<K, V> implements HashMap<K, V> { |
36 static const int _INITIAL_CAPACITY = 8; | 36 static const int _INITIAL_CAPACITY = 8; |
37 | 37 |
38 Type get runtimeType => HashMap; | 38 Type get runtimeType => HashMap; |
39 | 39 |
40 int _elementCount = 0; | 40 int _elementCount = 0; |
41 List<_HashMapEntry> _buckets = new List(_INITIAL_CAPACITY); | 41 List<_HashMapEntry> _buckets = new List<_HashMapEntry>(_INITIAL_CAPACITY); |
Ivan Posva
2013/09/11 17:47:48
Have you had anybody from the VM team review these
Lasse Reichstein Nielsen
2013/09/12 05:09:36
No.
I assume you would want to avoid the type par
Ivan Posva
2013/09/24 21:16:47
Yes. And it seems that they have been remove by no
| |
42 int _modificationCount = 0; | 42 int _modificationCount = 0; |
43 | 43 |
44 int get length => _elementCount; | 44 int get length => _elementCount; |
45 bool get isEmpty => _elementCount == 0; | 45 bool get isEmpty => _elementCount == 0; |
46 bool get isNotEmpty => _elementCount != 0; | 46 bool get isNotEmpty => _elementCount != 0; |
47 | 47 |
48 Iterable<K> get keys => new _HashMapKeyIterable<K>(this); | 48 Iterable<K> get keys => new _HashMapKeyIterable<K>(this); |
49 Iterable<V> get values => new _HashMapValueIterable<V>(this); | 49 Iterable<V> get values => new _HashMapValueIterable<V>(this); |
50 | 50 |
51 bool containsKey(Object key) { | 51 bool containsKey(Object key) { |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
163 return entry.value; | 163 return entry.value; |
164 } | 164 } |
165 previous = entry; | 165 previous = entry; |
166 entry = next; | 166 entry = next; |
167 } | 167 } |
168 return null; | 168 return null; |
169 } | 169 } |
170 | 170 |
171 void clear() { | 171 void clear() { |
172 _elementCount = 0; | 172 _elementCount = 0; |
173 _buckets = new List(_INITIAL_CAPACITY); | 173 _buckets = new List<_HashMapEntry>(_INITIAL_CAPACITY); |
174 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | 174 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; |
175 } | 175 } |
176 | 176 |
177 void _removeEntry(_HashMapEntry entry, | 177 void _removeEntry(_HashMapEntry entry, |
178 _HashMapEntry previousInBucket, | 178 _HashMapEntry previousInBucket, |
179 int bucketIndex) { | 179 int bucketIndex) { |
180 if (previousInBucket == null) { | 180 if (previousInBucket == null) { |
181 _buckets[bucketIndex] = entry.next; | 181 _buckets[bucketIndex] = entry.next; |
182 } else { | 182 } else { |
183 previousInBucket.next = entry.next; | 183 previousInBucket.next = entry.next; |
(...skipping 10 matching lines...) Expand all Loading... | |
194 // If we end up with more than 75% non-empty entries, we | 194 // If we end up with more than 75% non-empty entries, we |
195 // resize the backing store. | 195 // resize the backing store. |
196 if ((newElements << 2) > ((length << 1) + length)) _resize(); | 196 if ((newElements << 2) > ((length << 1) + length)) _resize(); |
197 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | 197 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; |
198 } | 198 } |
199 | 199 |
200 void _resize() { | 200 void _resize() { |
201 List oldBuckets = _buckets; | 201 List oldBuckets = _buckets; |
202 int oldLength = oldBuckets.length; | 202 int oldLength = oldBuckets.length; |
203 int newLength = oldLength << 1; | 203 int newLength = oldLength << 1; |
204 List newBuckets = new List(newLength); | 204 List newBuckets = new List<_HashMapEntry>(newLength); |
205 for (int i = 0; i < oldLength; i++) { | 205 for (int i = 0; i < oldLength; i++) { |
206 _HashMapEntry entry = oldBuckets[i]; | 206 _HashMapEntry entry = oldBuckets[i]; |
207 while (entry != null) { | 207 while (entry != null) { |
208 _HashMapEntry next = entry.next; | 208 _HashMapEntry next = entry.next; |
209 int hashCode = entry.hashCode; | 209 int hashCode = entry.hashCode; |
210 int index = hashCode & (newLength - 1); | 210 int index = hashCode & (newLength - 1); |
211 entry.next = newBuckets[index]; | 211 entry.next = newBuckets[index]; |
212 newBuckets[index] = entry; | 212 newBuckets[index] = entry; |
213 entry = next; | 213 entry = next; |
214 } | 214 } |
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
497 | 497 |
498 class _HashMapValueIterator<V> extends _HashMapIterator<V> { | 498 class _HashMapValueIterator<V> extends _HashMapIterator<V> { |
499 _HashMapValueIterator(HashMap map) : super(map); | 499 _HashMapValueIterator(HashMap map) : super(map); |
500 V get current { | 500 V get current { |
501 _HashMapEntry entry = _entry; | 501 _HashMapEntry entry = _entry; |
502 return (entry == null) ? null : entry.value; | 502 return (entry == null) ? null : entry.value; |
503 } | 503 } |
504 } | 504 } |
505 | 505 |
506 patch class HashSet<E> { | 506 patch class HashSet<E> { |
507 /* patch */ factory HashSet({ bool equals(E e1, E e2), | |
508 int hashCode(E e), | |
509 bool isValidKey(potentialKey) }) { | |
510 if (isValidKey == null) { | |
511 if (hashCode == null) { | |
512 if (equals == null) { | |
513 return new _HashSet<E>(); | |
514 } | |
515 if (identical(identical, equals)) { | |
516 return new _IdentityHashSet<E>(); | |
517 } | |
518 _hashCode = _defaultHashCode; | |
519 } else if (equals == null) { | |
520 _equals = _defaultEquals; | |
521 } | |
522 isValidKey = new _TypeTest<E>().test; | |
523 } else { | |
524 if (hashCode == null) hashCode = _defaultHashCode; | |
525 if (equals == null) equals = _defaultEquals; | |
526 } | |
527 return new _CustomHashSet<E>(equals, hashCode, isValidKey); | |
528 } | |
529 } | |
530 | |
531 class _HashSet<E> extends _HashSetBase<E> implements HashSet<E> { | |
507 static const int _INITIAL_CAPACITY = 8; | 532 static const int _INITIAL_CAPACITY = 8; |
508 final _HashTable<E> _table; | 533 |
509 | 534 List<_HashSetEntry> _buckets = new List<_HashSetEntry>(_INITIAL_CAPACITY); |
510 /* patch */ HashSet() : _table = new _HashTable(_INITIAL_CAPACITY) { | 535 int _elementCount = 0; |
511 _table._container = this; | 536 int _modificationCount = 0; |
512 } | 537 |
513 | 538 bool _equals(e1, e2) => e1 == e2; |
514 factory HashSet.from(Iterable<E> iterable) { | 539 int _hashCode(e) => e.hashCode; |
515 return new HashSet<E>()..addAll(iterable); | |
516 } | |
517 | 540 |
518 // Iterable. | 541 // Iterable. |
519 /* patch */ Iterator<E> get iterator => new _HashTableKeyIterator<E>(_table); | 542 Iterator<E> get iterator => new _HashSetIterator<E>(this); |
520 | 543 |
521 /* patch */ int get length => _table._elementCount; | 544 int get length => _elementCount; |
522 | 545 |
523 /* patch */ bool get isEmpty => _table._elementCount == 0; | 546 bool get isEmpty => _elementCount == 0; |
524 | 547 |
525 /* patch */ bool get isNotEmpty => !isEmpty; | 548 bool get isNotEmpty => _elementCount != 0; |
526 | 549 |
527 /* patch */ bool contains(Object object) => _table._get(object) >= 0; | 550 bool contains(Object object) { |
528 | 551 int index = _hashCode(object) & (_buckets.length - 1); |
529 // Collection. | 552 HashSetEntry entry = _buckets[index]; |
530 /* patch */ void add(E element) { | 553 while (entry != null) { |
531 _table._put(element); | 554 if (_equals(entry.key, object)) return true; |
532 _table._checkCapacity(); | 555 entry = entry.next; |
533 } | 556 } |
534 | 557 return false; |
535 /* patch */ void addAll(Iterable<E> objects) { | 558 } |
559 | |
560 // Set | |
561 | |
562 void _add(E element) { | |
563 int hashCode = _hashCode(element); | |
564 int index = hashCode & (_buckets.length - 1); | |
565 HashSetEntry entry = _buckets[index]; | |
566 while (entry != null) { | |
567 if (_equals(entry.key, element)) return; | |
568 entry = entry.next; | |
569 } | |
570 _addEntry(element, hashCode, index); | |
571 } | |
572 | |
573 void add(E element) { | |
574 _add(element); | |
575 } | |
576 | |
577 void addAll(Iterable<E> objects) { | |
578 int ctr = 0; | |
536 for (E object in objects) { | 579 for (E object in objects) { |
537 _table._put(object); | 580 ctr++; |
538 _table._checkCapacity(); | 581 _add(object); |
539 } | 582 } |
540 } | 583 } |
541 | 584 |
542 /* patch */ bool remove(Object object) { | 585 bool _remove(Object object, int hashCode) { |
543 int offset = _table._remove(object); | 586 int index = hashCode & (_buckets.length - 1); |
544 _table._checkCapacity(); | 587 _HashSetEntry entry = _buckets[index]; |
545 return offset >= 0; | 588 _HashSetEntry previous = null; |
546 } | 589 while (entry != null) { |
547 | 590 if (_equals(entry.key, object)) { |
548 /* patch */ void removeAll(Iterable<Object> objectsToRemove) { | 591 _HashSetEntry next = entry.remove(); |
592 if (previous == null) { | |
593 _buckets[index] = next; | |
594 } else { | |
595 previous.next = next; | |
596 } | |
597 _elementCount--; | |
598 _modificationCount = | |
599 (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | |
600 return true; | |
601 } | |
602 previous = entry; | |
603 entry = entry.next; | |
604 } | |
605 return false; | |
606 } | |
607 | |
608 bool remove(Object object) => _remove(object, _hashCode(object)); | |
609 | |
610 void removeAll(Iterable<Object> objectsToRemove) { | |
549 for (Object object in objectsToRemove) { | 611 for (Object object in objectsToRemove) { |
550 _table._remove(object); | 612 _remove(object, _hashCode(object)); |
551 _table._checkCapacity(); | |
552 } | 613 } |
553 } | 614 } |
554 | 615 |
555 void _filterWhere(bool test(E element), bool removeMatching) { | 616 void _filterWhere(bool test(E element), bool removeMatching) { |
556 int entrySize = _table._entrySize; | 617 int length = _buckets.length; |
557 int length = _table._table.length; | 618 for (int index = 0; index < length; index++) { |
558 for (int offset = 0; offset < length; offset += entrySize) { | 619 HashSetEntry entry = _buckets[index]; |
559 Object entry = _table._table[offset]; | 620 HashSetEntry previous = null; |
560 if (!_table._isFree(entry)) { | 621 while (entry != null) { |
561 E key = identical(entry, _NULL) ? null : entry; | 622 int modificationCount = _modificationCount; |
562 int modificationCount = _table._modificationCount; | 623 bool testResult = test(entry.key); |
563 bool shouldRemove = (removeMatching == test(key)); | 624 if (modificationCount != _modificationCount) { |
564 _table._checkModification(modificationCount); | 625 throw new ConcurrentModificationError(this); |
565 if (shouldRemove) { | 626 } |
566 _table._deleteEntry(offset); | 627 if (testResult == removeMatching) { |
567 } | 628 HashSetEntry next = entry.remove(); |
568 } | 629 if (previous == null) { |
569 } | 630 _buckets[index] = next; |
570 _table._checkCapacity(); | 631 } else { |
571 } | 632 previous.next = next; |
572 | 633 } |
573 /* patch */ void removeWhere(bool test(E element)) { | 634 _elementCount--; |
635 _modificationCount = | |
636 (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | |
637 entry = next; | |
638 } else { | |
639 previous = entry; | |
640 entry = entry.next; | |
641 } | |
642 } | |
643 } | |
644 } | |
645 | |
646 void removeWhere(bool test(E element)) { | |
574 _filterWhere(test, true); | 647 _filterWhere(test, true); |
575 } | 648 } |
576 | 649 |
577 /* patch */ void retainWhere(bool test(E element)) { | 650 void retainWhere(bool test(E element)) { |
578 _filterWhere(test, false); | 651 _filterWhere(test, false); |
579 } | 652 } |
580 | 653 |
581 /* patch */ void clear() { | 654 void clear() { |
582 _table._clear(); | 655 _elementCount = 0; |
583 } | 656 _buckets = new List<HashSetEntry>(_INITIAL_CAPACITY); |
657 _modificationCount++; | |
658 } | |
659 | |
660 void _addEntry(E key, int hashCode, int index) { | |
661 _buckets[index] = new _HashSetEntry(key, hashCode, _buckets[index]); | |
662 int newElements = _elementCount + 1; | |
663 _elementCount = newElements; | |
664 int length = _buckets.length; | |
665 // If we end up with more than 75% non-empty entries, we | |
666 // resize the backing store. | |
667 if ((newElements << 2) > ((length << 1) + length)) _resize(); | |
668 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | |
669 } | |
670 | |
671 void _resize() { | |
672 int oldLength = _buckets.length; | |
673 int newLength = oldLength << 1; | |
674 List oldBuckets = _buckets; | |
675 List newBuckets = new List<_HashSetEntry>(newLength); | |
676 for (int i = 0; i < oldLength; i++) { | |
677 _HashSetEntry entry = oldBuckets[i]; | |
678 while (entry != null) { | |
679 _HashSetEntry next = entry.next; | |
680 int newIndex = entry.hashCode & (newLength - 1); | |
681 entry.next = newBuckets[newIndex]; | |
682 newBuckets[newIndex] = entry; | |
683 entry = next; | |
684 } | |
685 } | |
686 _buckets = newBuckets; | |
687 } | |
688 | |
689 HashSet<E> _newSet() => new _HashSet<E>(); | |
690 } | |
691 | |
692 class _IdentityHashSet<E> extends _HashSet<E> { | |
693 bool _equals(e1, e2) => identical(e1, e2); | |
694 HashSet<E> _newSet() => new _IdentityHashSet<E>(); | |
695 } | |
696 | |
697 class _CustomHashSet<E> extends _HashSet<E> { | |
698 final _Equality<E> _equality; | |
699 final _Hasher<E> _hasher; | |
700 final _Predicate _validKey; | |
701 _CustomHashSet(this._equality, this._hasher, this._validKey); | |
702 | |
703 E operator[](Object key) { | |
704 if (!_validKey(key)) return null; | |
705 return super[key]; | |
706 } | |
707 | |
708 bool remove(Object key) { | |
709 if (!_validKey(key)) return false; | |
710 return super.remove(key); | |
711 } | |
712 | |
713 bool containsKey(Object key) { | |
714 if (!_validKey(key)) return false; | |
715 return super.containsKey(key); | |
716 } | |
717 | |
718 bool _equals(e1, e2) => _equality(e1, e2); | |
719 int _hashCode(e) => _hasher(e); | |
720 | |
721 HashSet<E> _newSet() => new _CustomHashSet<E>(_equality, _hasher, _validKey); | |
722 } | |
723 | |
724 class _HashSetEntry { | |
725 final key; | |
726 final int hashCode; | |
727 _HashSetEntry next; | |
728 _HashSetEntry(this.key, this.hashCode, this.next); | |
729 | |
730 _HashSetEntry remove() { | |
731 _HashSetEntry result = next; | |
732 next = null; | |
733 return result; | |
734 } | |
735 } | |
736 | |
737 class _HashSetIterator<E> implements Iterator<E> { | |
738 final _HashSet _set; | |
739 final int _modificationCount; | |
740 int _index = 0; | |
741 _HashSetEntry _next = null; | |
742 E _current = null; | |
743 | |
744 _HashSetIterator(_HashSet hashSet) | |
745 : _set = hashSet, _modificationCount = hashSet._modificationCount; | |
746 | |
747 bool moveNext() { | |
748 if (_modificationCount != _set._modificationCount) { | |
749 throw new ConcurrentModificationError(_set); | |
750 } | |
751 if (_next != null) { | |
752 _current = _next.key; | |
753 _next = _next.next; | |
754 return true; | |
755 } | |
756 List<_HashSetEntry> buckets = _set._buckets; | |
757 while (_index < buckets.length) { | |
758 _next = buckets[_index]; | |
759 _index = _index + 1; | |
760 if (_next != null) { | |
761 _current = _next.key; | |
762 _next = _next.next; | |
763 return true; | |
764 } | |
765 } | |
766 _current = null; | |
767 return false; | |
768 } | |
769 | |
770 E get current => _current; | |
584 } | 771 } |
585 | 772 |
586 class _LinkedHashMapEntry extends _HashMapEntry { | 773 class _LinkedHashMapEntry extends _HashMapEntry { |
774 /// Double-linked list of entries of a linked hash map. | |
775 /// The _LinkedHashMap itself is the head of the list, so the type is "var". | |
776 /// Both are initialized to `this` when initialized. | |
587 var _nextEntry; | 777 var _nextEntry; |
588 var _previousEntry; | 778 var _previousEntry; |
589 _LinkedHashMapEntry(key, value, int hashCode, _LinkedHashMapEntry next, | 779 _LinkedHashMapEntry(key, value, int hashCode, _LinkedHashMapEntry next, |
590 this._previousEntry, this._nextEntry) | 780 this._previousEntry, this._nextEntry) |
591 : super(key, value, hashCode, next) { | 781 : super(key, value, hashCode, next) { |
592 _previousEntry._nextEntry = this; | 782 _previousEntry._nextEntry = this; |
593 _nextEntry._previousEntry = this; | 783 _nextEntry._previousEntry = this; |
594 } | 784 } |
595 } | 785 } |
596 | 786 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
652 class _LinkedHashMapValueIterator<V> extends _LinkedHashMapIterator<V> { | 842 class _LinkedHashMapValueIterator<V> extends _LinkedHashMapIterator<V> { |
653 _LinkedHashMapValueIterator(LinkedHashMap map) : super(map); | 843 _LinkedHashMapValueIterator(LinkedHashMap map) : super(map); |
654 V _getValue(_LinkedHashMapEntry entry) => entry.value; | 844 V _getValue(_LinkedHashMapEntry entry) => entry.value; |
655 } | 845 } |
656 | 846 |
657 | 847 |
658 /** | 848 /** |
659 * A hash-based map that iterates keys and values in key insertion order. | 849 * A hash-based map that iterates keys and values in key insertion order. |
660 */ | 850 */ |
661 patch class LinkedHashMap<K, V> { | 851 patch class LinkedHashMap<K, V> { |
852 /// Holds a double-linked list of entries in insertion order. | |
853 /// The fields have the same name as the ones in [_LinkedHashMapEntry], | |
854 /// and this map is itself used as the head entry of the list. | |
855 /// Set to `this` when initialized, representing the empty list (containing | |
856 /// only the head entry itself). | |
662 var _nextEntry; | 857 var _nextEntry; |
663 var _previousEntry; | 858 var _previousEntry; |
664 | 859 |
665 /* patch */ factory LinkedHashMap({ bool equals(K key1, K key2), | 860 /* patch */ factory LinkedHashMap({ bool equals(K key1, K key2), |
666 int hashCode(K key), | 861 int hashCode(K key), |
667 bool isValidKey(potentialKey) }) { | 862 bool isValidKey(potentialKey) }) { |
668 if (isValidKey == null) { | 863 if (isValidKey == null) { |
669 if (hashCode == null) { | 864 if (hashCode == null) { |
670 if (equals == null) { | 865 if (equals == null) { |
671 return new _LinkedHashMap<K, V>(); | 866 return new _LinkedHashMap<K, V>(); |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
731 } | 926 } |
732 | 927 |
733 void _addEntry(List buckets, int index, int length, | 928 void _addEntry(List buckets, int index, int length, |
734 K key, V value, int hashCode) { | 929 K key, V value, int hashCode) { |
735 _HashMapEntry entry = | 930 _HashMapEntry entry = |
736 new _LinkedHashMapEntry(key, value, hashCode, buckets[index], | 931 new _LinkedHashMapEntry(key, value, hashCode, buckets[index], |
737 _previousEntry, this); | 932 _previousEntry, this); |
738 buckets[index] = entry; | 933 buckets[index] = entry; |
739 int newElements = _elementCount + 1; | 934 int newElements = _elementCount + 1; |
740 _elementCount = newElements; | 935 _elementCount = newElements; |
936 | |
741 // If we end up with more than 75% non-empty entries, we | 937 // If we end up with more than 75% non-empty entries, we |
742 // resize the backing store. | 938 // resize the backing store. |
743 if ((newElements << 2) > ((length << 1) + length)) _resize(); | 939 if ((newElements << 2) > ((length << 1) + length)) _resize(); |
744 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | 940 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; |
745 } | 941 } |
746 | 942 |
747 void _removeEntry(_LinkedHashMapEntry entry, | 943 void _removeEntry(_LinkedHashMapEntry entry, |
748 _HashMapEntry previousInBucket, | 944 _HashMapEntry previousInBucket, |
749 int bucketIndex) { | 945 int bucketIndex) { |
750 var previousInChain = entry._previousEntry; | 946 var previousInChain = entry._previousEntry; |
(...skipping 30 matching lines...) Expand all Loading... | |
781 with _LinkedHashMapMixin<K, V> { | 977 with _LinkedHashMapMixin<K, V> { |
782 _LinkedCustomHashMap(bool equals(K key1, K key2), | 978 _LinkedCustomHashMap(bool equals(K key1, K key2), |
783 int hashCode(K key), | 979 int hashCode(K key), |
784 bool isValidKey(potentialKey)) | 980 bool isValidKey(potentialKey)) |
785 : super(equals, hashCode, isValidKey) { | 981 : super(equals, hashCode, isValidKey) { |
786 _nextEntry = _previousEntry = this; | 982 _nextEntry = _previousEntry = this; |
787 } | 983 } |
788 } | 984 } |
789 | 985 |
790 | 986 |
791 patch class LinkedHashSet<E> extends _HashSetBase<E> { | 987 patch class LinkedHashSet<E> { |
792 static const int _INITIAL_CAPACITY = 8; | 988 /* patch */ factory LinkedHashSet({ bool equals(E e1, E e2), |
793 _LinkedHashTable<E> _table; | 989 int hashCode(E e), |
990 bool isValidKey(potentialKey) }) { | |
991 if (isValidKey == null) { | |
992 if (hashCode == null) { | |
993 if (equals == null) { | |
994 return new _LinkedHashSet<E>(); | |
995 } | |
996 if (identical(identical, equals)) { | |
997 return new _LinkedIdentityHashSet<E>(); | |
998 } | |
999 _hashCode = _defaultHashCode; | |
1000 } else if (equals == null) { | |
1001 _equals = _defaultEquals; | |
1002 } | |
1003 isValidKey = new _TypeTest<E>().test; | |
1004 } else { | |
1005 if (hashCode == null) hashCode = _defaultHashCode; | |
1006 if (equals == null) equals = _defaultEquals; | |
1007 } | |
1008 return new _LinkedCustomHashSet<E>(equals, hashCode, isValidKey); | |
1009 } | |
1010 } | |
794 | 1011 |
795 /* patch */ LinkedHashSet() { | 1012 class _LinkedHashSetEntry extends _HashSetEntry { |
796 _table = new _LinkedHashTable(_INITIAL_CAPACITY); | 1013 /// Links this element into a double-linked list of elements of a hash set. |
797 _table._container = this; | 1014 /// The hash set object itself is used as the head entry of the list, so |
1015 /// the field is typed as "var". | |
1016 /// Both links are initialized to `this` when the object is created. | |
1017 var _nextEntry; | |
1018 var _previousEntry; | |
1019 _LinkedHashSetEntry(var key, int hashCode, _LinkedHashSetEntry next, | |
1020 this._previousEntry, this._nextEntry) | |
1021 : super(key, hashCode, next) { | |
1022 _previousEntry._nextEntry = _nextEntry._previousEntry = this; | |
1023 } | |
1024 | |
1025 _LinkedHashSetEntry remove() { | |
1026 _previousEntry._nextEntry = _nextEntry; | |
1027 _nextEntry._previousEntry = _previousEntry; | |
1028 _nextEntry = _previousEntry = this; | |
1029 return super.remove(); | |
1030 } | |
1031 } | |
1032 | |
1033 class _LinkedHashSet<E> extends _HashSet<E> | |
1034 implements LinkedHashSet<E> { | |
1035 /// Holds a double linked list of the element entries of the set in | |
1036 /// insertion order. | |
1037 /// The fields have the same names as the ones in [_LinkedHashSetEntry], | |
1038 /// allowing this object to be used as the head entry of the list. | |
1039 /// The fields are initialized to `this` when created, representing the | |
1040 /// empty list that only contains the head entry. | |
1041 var _nextEntry; | |
1042 var _previousEntry; | |
1043 | |
1044 _LinkedHashSet() { | |
1045 _nextEntry = _previousEntry = this; | |
798 } | 1046 } |
799 | 1047 |
800 // Iterable. | 1048 // Iterable. |
801 /* patch */ Iterator<E> get iterator { | 1049 Iterator<E> get iterator => new _LinkedHashSetIterator<E>(this); |
802 return new _LinkedHashTableKeyIterator<E>(_table); | |
803 } | |
804 | 1050 |
805 /* patch */ int get length => _table._elementCount; | 1051 void forEach(void action(E element)) { |
806 | 1052 var cursor = _nextEntry; |
807 /* patch */ bool get isEmpty => _table._elementCount == 0; | 1053 int modificationCount = _modificationCount; |
808 | 1054 while (!identical(cursor, this)) { |
809 /* patch */ bool get isNotEmpty => !isEmpty; | 1055 _LinkedHashSetEntry entry = cursor; |
810 | 1056 action(entry.key); |
811 /* patch */ bool contains(Object object) => _table._get(object) >= 0; | 1057 if (_modificationCount != modificationCount) { |
812 | 1058 throw new ConcurrentModificationError(this); |
813 /* patch */ void forEach(void action(E element)) { | 1059 } |
814 int offset = _table._next(_LinkedHashTable._HEAD_OFFSET); | 1060 cursor = entry._nextEntry; |
815 int modificationCount = _table._modificationCount; | |
816 while (offset != _LinkedHashTable._HEAD_OFFSET) { | |
817 E key = _table._key(offset); | |
818 action(key); | |
819 _table._checkModification(modificationCount); | |
820 offset = _table._next(offset); | |
821 } | 1061 } |
822 } | 1062 } |
823 | 1063 |
824 /* patch */ E get first { | 1064 E get first { |
825 int firstOffset = _table._next(_LinkedHashTable._HEAD_OFFSET); | 1065 if (identical(_nextEntry, this)) { |
826 if (firstOffset == _LinkedHashTable._HEAD_OFFSET) { | |
827 throw new StateError("No elements"); | 1066 throw new StateError("No elements"); |
828 } | 1067 } |
829 return _table._key(firstOffset); | 1068 _LinkedHashSetEntry entry = _nextEntry; |
1069 return entry.key; | |
830 } | 1070 } |
831 | 1071 |
832 /* patch */ E get last { | 1072 E get last { |
833 int lastOffset = _table._prev(_LinkedHashTable._HEAD_OFFSET); | 1073 if (identical(_previousEntry, this)) { |
834 if (lastOffset == _LinkedHashTable._HEAD_OFFSET) { | |
835 throw new StateError("No elements"); | 1074 throw new StateError("No elements"); |
836 } | 1075 } |
837 return _table._key(lastOffset); | 1076 _LinkedHashSetEntry entry = _previousEntry; |
1077 return entry.key; | |
838 } | 1078 } |
839 | 1079 |
840 // Collection. | 1080 // Set. |
841 void _filterWhere(bool test(E element), bool removeMatching) { | 1081 void _filterWhere(bool test(E element), bool removeMatching) { |
842 int entrySize = _table._entrySize; | 1082 var cursor = _nextEntry; |
843 int length = _table._table.length; | 1083 while (!identical(cursor, this)) { |
844 int offset = _table._next(_LinkedHashTable._HEAD_OFFSET); | 1084 _LinkedHashSetEntry entry = cursor; |
845 while (offset != _LinkedHashTable._HEAD_OFFSET) { | 1085 int modificationCount = _modificationCount; |
846 E key = _table._key(offset); | 1086 bool testResult = test(entry.key); |
847 int nextOffset = _table._next(offset); | 1087 if (modificationCount != _modificationCount) { |
848 int modificationCount = _table._modificationCount; | 1088 throw new ConcurrentModificationError(this); |
849 bool shouldRemove = (removeMatching == test(key)); | |
850 _table._checkModification(modificationCount); | |
851 if (shouldRemove) { | |
852 _table._deleteEntry(offset); | |
853 } | 1089 } |
854 offset = nextOffset; | 1090 cursor = entry._nextEntry; |
855 } | 1091 if (testResult == removeMatching) { |
856 _table._checkCapacity(); | 1092 _remove(entry.key, entry.hashCode); |
857 } | |
858 | |
859 /* patch */ void add(E element) { | |
860 _table._put(element); | |
861 _table._checkCapacity(); | |
862 } | |
863 | |
864 /* patch */ void addAll(Iterable<E> objects) { | |
865 for (E object in objects) { | |
866 _table._put(object); | |
867 _table._checkCapacity(); | |
868 } | |
869 } | |
870 | |
871 /* patch */ bool remove(Object object) { | |
872 int offset = _table._remove(object); | |
873 if (offset >= 0) { | |
874 _table._checkCapacity(); | |
875 return true; | |
876 } | |
877 return false; | |
878 } | |
879 | |
880 /* patch */ void removeAll(Iterable objectsToRemove) { | |
881 for (Object object in objectsToRemove) { | |
882 if (_table._remove(object) >= 0) { | |
883 _table._checkCapacity(); | |
884 } | 1093 } |
885 } | 1094 } |
886 } | 1095 } |
887 | 1096 |
888 /* patch */ void removeWhere(bool test(E element)) { | 1097 void _addEntry(E key, int hashCode, int index) { |
889 _filterWhere(test, true); | 1098 _buckets[index] = |
1099 new _LinkedHashSetEntry(key, hashCode, _buckets[index], | |
1100 _previousEntry, this); | |
1101 int newElements = _elementCount + 1; | |
1102 _elementCount = newElements; | |
1103 int length = _buckets.length; | |
1104 // If we end up with more than 75% non-empty entries, we | |
1105 // resize the backing store. | |
1106 if ((newElements << 2) > ((length << 1) + length)) _resize(); | |
1107 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | |
890 } | 1108 } |
891 | 1109 |
892 /* patch */ void retainWhere(bool test(E element)) { | 1110 HashSet<E> _newSet() => new _LinkedHashSet<E>(); |
893 _filterWhere(test, false); | 1111 } |
1112 | |
1113 class _LinkedIdentityHashSet<E> extends _LinkedHashSet<E> { | |
1114 bool _equals(e1, e2) => identical(e1, e2); | |
1115 HashSet<E> _newSet() => new _LinkedIdentityHashSet<E>(); | |
1116 } | |
1117 | |
1118 class _LinkedCustomHashSet<E> extends _LinkedHashSet<E> { | |
1119 final _Equality<E> _equality; | |
1120 final _Hasher<E> _hasher; | |
1121 final _Predicate _validKey; | |
1122 | |
1123 _LinkedCustomHashSet(this._equality, this._hasher, bool validKey(object)) | |
1124 : _validKey = (validKey != null) ? validKey : new _TypeTest<E>().test; | |
1125 | |
1126 bool _equals(e1, e2) => _equality(e1, e2); | |
1127 | |
1128 int _hashCode(e) => _hasher(e); | |
1129 | |
1130 bool contains(Object o) { | |
1131 if (!_validKey(o)) return false; | |
1132 return super.contains(o); | |
894 } | 1133 } |
895 | 1134 |
896 /* patch */ void clear() { | 1135 bool remove(Object o) { |
897 _table._clear(); | 1136 if (!_validKey(o)) return false; |
1137 return super.remove(o); | |
898 } | 1138 } |
1139 | |
1140 E operator[](Object o) { | |
1141 if (!_validKey(o)) return null; | |
1142 return super[o]; | |
1143 } | |
1144 | |
1145 HashSet<E> _newSet() => | |
1146 new _LinkedCustomHashSet<E>(_equality, _hasher, _validKey); | |
899 } | 1147 } |
900 | 1148 |
901 class _DeadEntry { | 1149 class _LinkedHashSetIterator<E> implements Iterator<E> { |
902 const _DeadEntry(); | 1150 final _LinkedHashSet _set; |
903 } | 1151 final int _modificationCount; |
1152 var _next; | |
1153 E _current; | |
904 | 1154 |
905 class _NullKey { | 1155 _LinkedHashSetIterator(_LinkedHashSet hashSet) |
906 const _NullKey(); | 1156 : _set = hashSet, |
907 int get hashCode => null.hashCode; | 1157 _modificationCount = hashSet._modificationCount, |
908 } | 1158 _next = hashSet._nextEntry; |
909 | |
910 const _TOMBSTONE = const _DeadEntry(); | |
911 const _NULL = const _NullKey(); | |
912 | |
913 class _HashTable<K> { | |
914 /** | |
915 * Table of entries with [_entrySize] slots per entry. | |
916 * | |
917 * Capacity in entries must be factor of two. | |
918 */ | |
919 List _table; | |
920 /** Current capacity. Always equal to [:_table.length ~/ _entrySize:]. */ | |
921 int _capacity; | |
922 /** Count of occupied entries, including deleted ones. */ | |
923 int _entryCount = 0; | |
924 /** Count of deleted entries. */ | |
925 int _deletedCount = 0; | |
926 /** Counter incremented when table is modified. */ | |
927 int _modificationCount = 0; | |
928 /** If set, used as the source object for [ConcurrentModificationError]s. */ | |
929 Object _container; | |
930 | |
931 _HashTable(int initialCapacity) : _capacity = initialCapacity { | |
932 _table = _createTable(initialCapacity); | |
933 } | |
934 | |
935 /** Reads key from table. Converts _NULL marker to null. */ | |
936 Object _key(offset) { | |
937 assert(!_isFree(_table[offset])); | |
938 Object key = _table[offset]; | |
939 if (!identical(key, _NULL)) return key; | |
940 return null; | |
941 } | |
942 | |
943 /** Writes key to table. Converts null to _NULL marker. */ | |
944 void _setKey(int offset, Object key) { | |
945 if (key == null) key = _NULL; | |
946 _table[offset] = key; | |
947 } | |
948 | |
949 int get _elementCount => _entryCount - _deletedCount; | |
950 | |
951 /** Size of each entry. */ | |
952 int get _entrySize => 1; | |
953 | |
954 void _checkModification(int expectedModificationCount) { | |
955 if (_modificationCount != expectedModificationCount) { | |
956 throw new ConcurrentModificationError(_container); | |
957 } | |
958 } | |
959 | |
960 void _recordModification() { | |
961 // Value cycles after 2^30 modifications. If you keep hold of an | |
962 // iterator for that long, you might miss a modification detection, | |
963 // and iteration can go sour. Don't do that. | |
964 _modificationCount = (_modificationCount + 1) & (0x3FFFFFFF); | |
965 } | |
966 | |
967 /** | |
968 * Create an empty table. | |
969 */ | |
970 List _createTable(int capacity) { | |
971 List table = new List(capacity * _entrySize); | |
972 return table; | |
973 } | |
974 | |
975 /** First table probe. */ | |
976 int _firstProbe(int hashCode, int capacity) { | |
977 return hashCode & (capacity - 1); | |
978 } | |
979 | |
980 /** Following table probes. */ | |
981 int _nextProbe(int previousIndex, int probeCount, int capacity) { | |
982 // When capacity is a power of 2, this probing algorithm (the triangular | |
983 // number sequence modulo capacity) is guaranteed to hit all indices exactly | |
984 // once before repeating. | |
985 return (previousIndex + probeCount) & (capacity - 1); | |
986 } | |
987 | |
988 /** Whether an object is a free-marker (either tombstone or free). */ | |
989 bool _isFree(Object marker) => | |
990 marker == null || identical(marker, _TOMBSTONE); | |
991 | |
992 /** | |
993 * Look up the offset for an object in the table. | |
994 * | |
995 * Finds the offset of the object in the table, if it is there, | |
996 * or the first free offset for its hashCode. | |
997 */ | |
998 int _probeForAdd(int hashCode, Object object) { | |
999 int entrySize = _entrySize; | |
1000 int index = _firstProbe(hashCode, _capacity); | |
1001 int firstTombstone = -1; | |
1002 int probeCount = 0; | |
1003 while (true) { | |
1004 int offset = index * entrySize; | |
1005 Object entry = _table[offset]; | |
1006 if (identical(entry, _TOMBSTONE)) { | |
1007 if (firstTombstone < 0) firstTombstone = offset; | |
1008 } else if (entry == null) { | |
1009 if (firstTombstone < 0) return offset; | |
1010 return firstTombstone; | |
1011 } else if (identical(_NULL, entry) ? _equals(null, object) | |
1012 : _equals(entry, object)) { | |
1013 return offset; | |
1014 } | |
1015 // The _nextProbe is designed so that it hits | |
1016 // every index eventually. | |
1017 index = _nextProbe(index, ++probeCount, _capacity); | |
1018 } | |
1019 } | |
1020 | |
1021 /** | |
1022 * Look up the offset for an object in the table. | |
1023 * | |
1024 * If the object is in the table, its offset is returned. | |
1025 * | |
1026 * If the object is not in the table, Otherwise a negative value is returned. | |
1027 */ | |
1028 int _probeForLookup(int hashCode, Object object) { | |
1029 int entrySize = _entrySize; | |
1030 int index = _firstProbe(hashCode, _capacity); | |
1031 int probeCount = 0; | |
1032 while (true) { | |
1033 int offset = index * entrySize; | |
1034 Object entry = _table[offset]; | |
1035 if (entry == null) { | |
1036 return -1; | |
1037 } else if (!identical(_TOMBSTONE, entry)) { | |
1038 if (identical(_NULL, entry) ? _equals(null, object) | |
1039 : _equals(entry, object)) { | |
1040 return offset; | |
1041 } | |
1042 } | |
1043 // The _nextProbe is designed so that it hits | |
1044 // every index eventually. | |
1045 index = _nextProbe(index, ++probeCount, _capacity); | |
1046 } | |
1047 } | |
1048 | |
1049 // Override the following two to change equality/hashCode computations | |
1050 | |
1051 /** | |
1052 * Compare two object for equality. | |
1053 * | |
1054 * The first object is the one already in the table, | |
1055 * and the second is the one being searched for. | |
1056 */ | |
1057 bool _equals(Object element, Object other) { | |
1058 return element == other; | |
1059 } | |
1060 | |
1061 /** | |
1062 * Compute hash-code for an object. | |
1063 */ | |
1064 int _hashCodeOf(Object object) => object.hashCode; | |
1065 | |
1066 /** | |
1067 * Ensure that the table isn't too full for its own good. | |
1068 * | |
1069 * Call this after adding an element. | |
1070 */ | |
1071 int _checkCapacity() { | |
1072 // Compute everything in multiples of entrySize to avoid division. | |
1073 int freeCount = _capacity - _entryCount; | |
1074 if (freeCount * 4 < _capacity || | |
1075 freeCount < _deletedCount) { | |
1076 // Less than 25% free or more deleted entries than free entries. | |
1077 _grow(_entryCount - _deletedCount); | |
1078 } | |
1079 } | |
1080 | |
1081 void _grow(int contentCount) { | |
1082 int capacity = _capacity; | |
1083 // Don't grow to less than twice the needed capacity. | |
1084 int minCapacity = contentCount * 2; | |
1085 while (capacity < minCapacity) { | |
1086 capacity *= 2; | |
1087 } | |
1088 // Reset to another table and add all existing elements. | |
1089 List oldTable = _table; | |
1090 _table = _createTable(capacity); | |
1091 _capacity = capacity; | |
1092 _entryCount = 0; | |
1093 _deletedCount = 0; | |
1094 _addAllEntries(oldTable); | |
1095 _recordModification(); | |
1096 } | |
1097 | |
1098 /** | |
1099 * Copies all non-free entries from the old table to the new empty table. | |
1100 */ | |
1101 void _addAllEntries(List oldTable) { | |
1102 for (int i = 0; i < oldTable.length; i += _entrySize) { | |
1103 Object object = oldTable[i]; | |
1104 if (!_isFree(object)) { | |
1105 int toOffset = _put(object); | |
1106 _copyEntry(oldTable, i, toOffset); | |
1107 } | |
1108 } | |
1109 } | |
1110 | |
1111 /** | |
1112 * Copies everything but the key element from one entry to another. | |
1113 * | |
1114 * Called while growing the base array. | |
1115 * | |
1116 * Override this if any non-key fields need copying. | |
1117 */ | |
1118 void _copyEntry(List fromTable, int fromOffset, int toOffset) {} | |
1119 | |
1120 // The following three methods are for simple get/set/remove operations. | |
1121 // They only affect the key of an entry. The remaining fields must be | |
1122 // filled by the caller. | |
1123 | |
1124 /** | |
1125 * Returns the offset of a key in [_table], or negative if it's not there. | |
1126 */ | |
1127 int _get(Object key) { | |
1128 return _probeForLookup(_hashCodeOf(key), key); | |
1129 } | |
1130 | |
1131 /** | |
1132 * Puts the key into the table and returns its offset into [_table]. | |
1133 * | |
1134 * If [_entrySize] is greater than 1, the caller should fill the | |
1135 * remaining fields. | |
1136 * | |
1137 * Remember to call [_checkCapacity] after using this method. | |
1138 */ | |
1139 int _put(K key) { | |
1140 int offset = _probeForAdd(_hashCodeOf(key), key); | |
1141 Object oldEntry = _table[offset]; | |
1142 if (oldEntry == null) { | |
1143 _entryCount++; | |
1144 } else if (identical(oldEntry, _TOMBSTONE)) { | |
1145 _deletedCount--; | |
1146 } else { | |
1147 return offset; | |
1148 } | |
1149 _setKey(offset, key); | |
1150 _recordModification(); | |
1151 return offset; | |
1152 } | |
1153 | |
1154 /** | |
1155 * Removes a key from the table and returns its offset into [_table]. | |
1156 * | |
1157 * Returns null if the key was not in the table. | |
1158 * If [_entrySize] is greater than 1, the caller should clean up the | |
1159 * remaining fields. | |
1160 */ | |
1161 int _remove(Object key) { | |
1162 int offset = _probeForLookup(_hashCodeOf(key), key); | |
1163 if (offset >= 0) { | |
1164 _deleteEntry(offset); | |
1165 } | |
1166 return offset; | |
1167 } | |
1168 | |
1169 /** Clears the table completely, leaving it empty. */ | |
1170 void _clear() { | |
1171 if (_elementCount == 0) return; | |
1172 for (int i = 0; i < _table.length; i++) { | |
1173 _table[i] = null; | |
1174 } | |
1175 _entryCount = _deletedCount = 0; | |
1176 _recordModification(); | |
1177 } | |
1178 | |
1179 /** Clears an entry in the table. */ | |
1180 void _deleteEntry(int offset) { | |
1181 assert(!_isFree(_table[offset])); | |
1182 _setKey(offset, _TOMBSTONE); | |
1183 _deletedCount++; | |
1184 _recordModification(); | |
1185 } | |
1186 } | |
1187 | |
1188 /** | |
1189 * Generic iterable based on a [_HashTable]. | |
1190 */ | |
1191 abstract class _HashTableIterable<E> extends IterableBase<E> { | |
1192 final _HashTable _hashTable; | |
1193 _HashTableIterable(this._hashTable); | |
1194 | |
1195 Iterator<E> get iterator; | |
1196 | |
1197 /** | |
1198 * Return the iterated value for a given entry. | |
1199 */ | |
1200 E _valueAt(int offset, Object key); | |
1201 | |
1202 int get length => _hashTable._elementCount; | |
1203 | |
1204 bool get isEmpty => _hashTable._elementCount == 0; | |
1205 | |
1206 void forEach(void action(E element)) { | |
1207 int entrySize = _hashTable._entrySize; | |
1208 List table = _hashTable._table; | |
1209 int modificationCount = _hashTable._modificationCount; | |
1210 for (int offset = 0; offset < table.length; offset += entrySize) { | |
1211 Object entry = table[offset]; | |
1212 if (!_hashTable._isFree(entry)) { | |
1213 E value = _valueAt(offset, entry); | |
1214 action(value); | |
1215 } | |
1216 _hashTable._checkModification(modificationCount); | |
1217 } | |
1218 } | |
1219 } | |
1220 | |
1221 abstract class _HashTableIterator<E> implements Iterator<E> { | |
1222 final _HashTable _hashTable; | |
1223 final int _modificationCount; | |
1224 /** Location right after last found element. */ | |
1225 int _offset = 0; | |
1226 E _current = null; | |
1227 | |
1228 _HashTableIterator(_HashTable hashTable) | |
1229 : _hashTable = hashTable, | |
1230 _modificationCount = hashTable._modificationCount; | |
1231 | 1159 |
1232 bool moveNext() { | 1160 bool moveNext() { |
1233 _hashTable._checkModification(_modificationCount); | 1161 if (_modificationCount != _set._modificationCount) { |
1234 | 1162 throw new ConcurrentModificationError(_set); |
1235 List table = _hashTable._table; | |
1236 int entrySize = _hashTable._entrySize; | |
1237 | |
1238 while (_offset < table.length) { | |
1239 int currentOffset = _offset; | |
1240 Object entry = table[currentOffset]; | |
1241 _offset = currentOffset + entrySize; | |
1242 if (!_hashTable._isFree(entry)) { | |
1243 _current = _valueAt(currentOffset, entry); | |
1244 return true; | |
1245 } | |
1246 } | 1163 } |
1247 _current = null; | 1164 if (identical(_set, _next)) { |
1248 return false; | 1165 _current = null; |
1166 return false; | |
1167 } | |
1168 _LinkedHashSetEntry entry = _next; | |
1169 _current = entry.key; | |
1170 _next = entry._nextEntry; | |
1171 return true; | |
1249 } | 1172 } |
1250 | 1173 |
1251 E get current => _current; | 1174 E get current => _current; |
1252 | |
1253 E _valueAt(int offset, Object key); | |
1254 } | 1175 } |
1255 | |
1256 class _HashTableKeyIterable<K> extends _HashTableIterable<K> { | |
1257 _HashTableKeyIterable(_HashTable<K> hashTable) : super(hashTable); | |
1258 | |
1259 Iterator<K> get iterator => new _HashTableKeyIterator<K>(_hashTable); | |
1260 | |
1261 K _valueAt(int offset, Object key) { | |
1262 if (identical(key, _NULL)) return null; | |
1263 return key; | |
1264 } | |
1265 | |
1266 bool contains(Object value) => _hashTable._get(value) >= 0; | |
1267 } | |
1268 | |
1269 class _HashTableKeyIterator<K> extends _HashTableIterator<K> { | |
1270 _HashTableKeyIterator(_HashTable hashTable) : super(hashTable); | |
1271 | |
1272 K _valueAt(int offset, Object key) { | |
1273 if (identical(key, _NULL)) return null; | |
1274 return key; | |
1275 } | |
1276 } | |
1277 | |
1278 class _HashTableValueIterable<V> extends _HashTableIterable<V> { | |
1279 final int _entryIndex; | |
1280 | |
1281 _HashTableValueIterable(_HashTable hashTable, this._entryIndex) | |
1282 : super(hashTable); | |
1283 | |
1284 Iterator<V> get iterator { | |
1285 return new _HashTableValueIterator<V>(_hashTable, _entryIndex); | |
1286 } | |
1287 | |
1288 V _valueAt(int offset, Object key) => _hashTable._table[offset + _entryIndex]; | |
1289 } | |
1290 | |
1291 class _HashTableValueIterator<V> extends _HashTableIterator<V> { | |
1292 final int _entryIndex; | |
1293 | |
1294 _HashTableValueIterator(_HashTable hashTable, this._entryIndex) | |
1295 : super(hashTable); | |
1296 | |
1297 V _valueAt(int offset, Object key) => _hashTable._table[offset + _entryIndex]; | |
1298 } | |
1299 | |
1300 class _HashMapTable<K, V> extends _HashTable<K> { | |
1301 static const int _INITIAL_CAPACITY = 8; | |
1302 static const int _VALUE_INDEX = 1; | |
1303 | |
1304 _HashMapTable() : super(_INITIAL_CAPACITY); | |
1305 | |
1306 int get _entrySize => 2; | |
1307 | |
1308 V _value(int offset) => _table[offset + _VALUE_INDEX]; | |
1309 void _setValue(int offset, V value) { _table[offset + _VALUE_INDEX] = value; } | |
1310 | |
1311 _copyEntry(List fromTable, int fromOffset, int toOffset) { | |
1312 _table[toOffset + _VALUE_INDEX] = fromTable[fromOffset + _VALUE_INDEX]; | |
1313 } | |
1314 } | |
1315 | |
1316 /** Unique marker object for the head of a linked list of entries. */ | |
1317 class _LinkedHashTableHeadMarker { | |
1318 const _LinkedHashTableHeadMarker(); | |
1319 } | |
1320 | |
1321 const _LinkedHashTableHeadMarker _HEAD_MARKER = | |
1322 const _LinkedHashTableHeadMarker(); | |
1323 | |
1324 class _LinkedHashTable<K> extends _HashTable<K> { | |
1325 static const _NEXT_INDEX = 1; | |
1326 static const _PREV_INDEX = 2; | |
1327 static const _HEAD_OFFSET = 0; | |
1328 | |
1329 _LinkedHashTable(int initialCapacity) : super(initialCapacity); | |
1330 | |
1331 int get _entrySize => 3; | |
1332 | |
1333 List _createTable(int capacity) { | |
1334 List result = new List(capacity * _entrySize); | |
1335 result[_HEAD_OFFSET] = _HEAD_MARKER; | |
1336 result[_HEAD_OFFSET + _NEXT_INDEX] = _HEAD_OFFSET; | |
1337 result[_HEAD_OFFSET + _PREV_INDEX] = _HEAD_OFFSET; | |
1338 return result; | |
1339 } | |
1340 | |
1341 int _next(int offset) => _table[offset + _NEXT_INDEX]; | |
1342 void _setNext(int offset, int to) { _table[offset + _NEXT_INDEX] = to; } | |
1343 | |
1344 int _prev(int offset) => _table[offset + _PREV_INDEX]; | |
1345 void _setPrev(int offset, int to) { _table[offset + _PREV_INDEX] = to; } | |
1346 | |
1347 void _linkLast(int offset) { | |
1348 // Add entry at offset at end of double-linked list. | |
1349 int last = _prev(_HEAD_OFFSET); | |
1350 _setNext(offset, _HEAD_OFFSET); | |
1351 _setPrev(offset, last); | |
1352 _setNext(last, offset); | |
1353 _setPrev(_HEAD_OFFSET, offset); | |
1354 } | |
1355 | |
1356 void _unlink(int offset) { | |
1357 assert(offset != _HEAD_OFFSET); | |
1358 int next = _next(offset); | |
1359 int prev = _prev(offset); | |
1360 _setNext(offset, null); | |
1361 _setPrev(offset, null); | |
1362 _setNext(prev, next); | |
1363 _setPrev(next, prev); | |
1364 } | |
1365 | |
1366 /** | |
1367 * Copies all non-free entries from the old table to the new empty table. | |
1368 */ | |
1369 void _addAllEntries(List oldTable) { | |
1370 int offset = oldTable[_HEAD_OFFSET + _NEXT_INDEX]; | |
1371 while (offset != _HEAD_OFFSET) { | |
1372 Object object = oldTable[offset]; | |
1373 int nextOffset = oldTable[offset + _NEXT_INDEX]; | |
1374 int toOffset = _put(object); | |
1375 _copyEntry(oldTable, offset, toOffset); | |
1376 offset = nextOffset; | |
1377 } | |
1378 } | |
1379 | |
1380 void _clear() { | |
1381 if (_elementCount == 0) return; | |
1382 _setNext(_HEAD_OFFSET, _HEAD_OFFSET); | |
1383 _setPrev(_HEAD_OFFSET, _HEAD_OFFSET); | |
1384 for (int i = _entrySize; i < _table.length; i++) { | |
1385 _table[i] = null; | |
1386 } | |
1387 _entryCount = _deletedCount = 0; | |
1388 _recordModification(); | |
1389 } | |
1390 | |
1391 int _put(K key) { | |
1392 int offset = _probeForAdd(_hashCodeOf(key), key); | |
1393 Object oldEntry = _table[offset]; | |
1394 if (identical(oldEntry, _TOMBSTONE)) { | |
1395 _deletedCount--; | |
1396 } else if (oldEntry == null) { | |
1397 _entryCount++; | |
1398 } else { | |
1399 return offset; | |
1400 } | |
1401 _recordModification(); | |
1402 _setKey(offset, key); | |
1403 _linkLast(offset); | |
1404 return offset; | |
1405 } | |
1406 | |
1407 void _deleteEntry(int offset) { | |
1408 _unlink(offset); | |
1409 _setKey(offset, _TOMBSTONE); | |
1410 _deletedCount++; | |
1411 _recordModification(); | |
1412 } | |
1413 } | |
1414 | |
1415 class _LinkedHashTableKeyIterable<K> extends IterableBase<K> { | |
1416 final _LinkedHashTable<K> _table; | |
1417 _LinkedHashTableKeyIterable(this._table); | |
1418 Iterator<K> get iterator => new _LinkedHashTableKeyIterator<K>(_table); | |
1419 | |
1420 bool contains(Object value) => _table._get(value) >= 0; | |
1421 | |
1422 int get length => _table._elementCount; | |
1423 } | |
1424 | |
1425 class _LinkedHashTableKeyIterator<K> extends _LinkedHashTableIterator<K> { | |
1426 _LinkedHashTableKeyIterator(_LinkedHashTable<K> hashTable): super(hashTable); | |
1427 | |
1428 K _getCurrent(int offset) => _hashTable._key(offset); | |
1429 } | |
1430 | |
1431 class _LinkedHashTableValueIterable<V> extends IterableBase<V> { | |
1432 final _LinkedHashTable _hashTable; | |
1433 final int _valueIndex; | |
1434 _LinkedHashTableValueIterable(this._hashTable, this._valueIndex); | |
1435 Iterator<V> get iterator => | |
1436 new _LinkedHashTableValueIterator<V>(_hashTable, _valueIndex); | |
1437 int get length => _hashTable._elementCount; | |
1438 } | |
1439 | |
1440 class _LinkedHashTableValueIterator<V> extends _LinkedHashTableIterator<V> { | |
1441 final int _valueIndex; | |
1442 | |
1443 _LinkedHashTableValueIterator(_LinkedHashTable hashTable, this._valueIndex) | |
1444 : super(hashTable); | |
1445 | |
1446 V _getCurrent(int offset) => _hashTable._table[offset + _valueIndex]; | |
1447 } | |
1448 | |
1449 abstract class _LinkedHashTableIterator<T> implements Iterator<T> { | |
1450 final _LinkedHashTable _hashTable; | |
1451 final int _modificationCount; | |
1452 int _offset; | |
1453 T _current; | |
1454 | |
1455 _LinkedHashTableIterator(_LinkedHashTable table) | |
1456 : _hashTable = table, | |
1457 _modificationCount = table._modificationCount, | |
1458 _offset = table._next(_LinkedHashTable._HEAD_OFFSET); | |
1459 | |
1460 bool moveNext() { | |
1461 _hashTable._checkModification(_modificationCount); | |
1462 if (_offset == _LinkedHashTable._HEAD_OFFSET) { | |
1463 _current = null; | |
1464 return false; | |
1465 } | |
1466 _current = _getCurrent(_offset); | |
1467 _offset = _hashTable._next(_offset); | |
1468 return true; | |
1469 } | |
1470 | |
1471 T _getCurrent(int offset); | |
1472 | |
1473 T get current => _current; | |
1474 } | |
1475 | |
1476 class _LinkedHashMapTable<K, V> extends _LinkedHashTable<K> { | |
1477 static const int _INITIAL_CAPACITY = 8; | |
1478 static const int _VALUE_INDEX = 3; | |
1479 | |
1480 int get _entrySize => 4; | |
1481 | |
1482 _LinkedHashMapTable() : super(_INITIAL_CAPACITY); | |
1483 | |
1484 V _value(int offset) => _table[offset + _VALUE_INDEX]; | |
1485 void _setValue(int offset, V value) { _table[offset + _VALUE_INDEX] = value; } | |
1486 | |
1487 _copyEntry(List oldTable, int fromOffset, int toOffset) { | |
1488 _table[toOffset + _VALUE_INDEX] = oldTable[fromOffset + _VALUE_INDEX]; | |
1489 } | |
1490 } | |
OLD | NEW |