OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 patch class HashMap<K, V> { | 5 patch class HashMap<K, V> { |
6 /* patch */ factory HashMap({ bool equals(K key1, K key2), | 6 /* patch */ factory HashMap({ bool equals(K key1, K key2), |
7 int hashCode(K key), | 7 int hashCode(K key), |
8 bool isValidKey(potentialKey) }) { | 8 bool isValidKey(potentialKey) }) { |
9 if (isValidKey == null) { | 9 if (isValidKey == null) { |
10 if (hashCode == null) { | 10 if (hashCode == null) { |
(...skipping 20 matching lines...) Expand all Loading... |
31 } | 31 } |
32 | 32 |
33 const int _MODIFICATION_COUNT_MASK = 0x3fffffff; | 33 const int _MODIFICATION_COUNT_MASK = 0x3fffffff; |
34 | 34 |
35 class _HashMap<K, V> implements HashMap<K, V> { | 35 class _HashMap<K, V> implements HashMap<K, V> { |
36 static const int _INITIAL_CAPACITY = 8; | 36 static const int _INITIAL_CAPACITY = 8; |
37 | 37 |
38 Type get runtimeType => HashMap; | 38 Type get runtimeType => HashMap; |
39 | 39 |
40 int _elementCount = 0; | 40 int _elementCount = 0; |
41 List<_HashMapEntry> _buckets = new List<_HashMapEntry>(_INITIAL_CAPACITY); | 41 List<_HashMapEntry> _buckets = new List(_INITIAL_CAPACITY); |
42 int _modificationCount = 0; | 42 int _modificationCount = 0; |
43 | 43 |
44 int get length => _elementCount; | 44 int get length => _elementCount; |
45 bool get isEmpty => _elementCount == 0; | 45 bool get isEmpty => _elementCount == 0; |
46 bool get isNotEmpty => _elementCount != 0; | 46 bool get isNotEmpty => _elementCount != 0; |
47 | 47 |
48 Iterable<K> get keys => new _HashMapKeyIterable<K>(this); | 48 Iterable<K> get keys => new _HashMapKeyIterable<K>(this); |
49 Iterable<V> get values => new _HashMapValueIterable<V>(this); | 49 Iterable<V> get values => new _HashMapValueIterable<V>(this); |
50 | 50 |
51 bool containsKey(Object key) { | 51 bool containsKey(Object key) { |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
163 return entry.value; | 163 return entry.value; |
164 } | 164 } |
165 previous = entry; | 165 previous = entry; |
166 entry = next; | 166 entry = next; |
167 } | 167 } |
168 return null; | 168 return null; |
169 } | 169 } |
170 | 170 |
171 void clear() { | 171 void clear() { |
172 _elementCount = 0; | 172 _elementCount = 0; |
173 _buckets = new List<_HashMapEntry>(_INITIAL_CAPACITY); | 173 _buckets = new List(_INITIAL_CAPACITY); |
174 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | 174 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; |
175 } | 175 } |
176 | 176 |
177 void _removeEntry(_HashMapEntry entry, | 177 void _removeEntry(_HashMapEntry entry, |
178 _HashMapEntry previousInBucket, | 178 _HashMapEntry previousInBucket, |
179 int bucketIndex) { | 179 int bucketIndex) { |
180 if (previousInBucket == null) { | 180 if (previousInBucket == null) { |
181 _buckets[bucketIndex] = entry.next; | 181 _buckets[bucketIndex] = entry.next; |
182 } else { | 182 } else { |
183 previousInBucket.next = entry.next; | 183 previousInBucket.next = entry.next; |
(...skipping 10 matching lines...) Expand all Loading... |
194 // If we end up with more than 75% non-empty entries, we | 194 // If we end up with more than 75% non-empty entries, we |
195 // resize the backing store. | 195 // resize the backing store. |
196 if ((newElements << 2) > ((length << 1) + length)) _resize(); | 196 if ((newElements << 2) > ((length << 1) + length)) _resize(); |
197 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | 197 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; |
198 } | 198 } |
199 | 199 |
200 void _resize() { | 200 void _resize() { |
201 List oldBuckets = _buckets; | 201 List oldBuckets = _buckets; |
202 int oldLength = oldBuckets.length; | 202 int oldLength = oldBuckets.length; |
203 int newLength = oldLength << 1; | 203 int newLength = oldLength << 1; |
204 List newBuckets = new List<_HashMapEntry>(newLength); | 204 List newBuckets = new List(newLength); |
205 for (int i = 0; i < oldLength; i++) { | 205 for (int i = 0; i < oldLength; i++) { |
206 _HashMapEntry entry = oldBuckets[i]; | 206 _HashMapEntry entry = oldBuckets[i]; |
207 while (entry != null) { | 207 while (entry != null) { |
208 _HashMapEntry next = entry.next; | 208 _HashMapEntry next = entry.next; |
209 int hashCode = entry.hashCode; | 209 int hashCode = entry.hashCode; |
210 int index = hashCode & (newLength - 1); | 210 int index = hashCode & (newLength - 1); |
211 entry.next = newBuckets[index]; | 211 entry.next = newBuckets[index]; |
212 newBuckets[index] = entry; | 212 newBuckets[index] = entry; |
213 entry = next; | 213 entry = next; |
214 } | 214 } |
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
497 | 497 |
498 class _HashMapValueIterator<V> extends _HashMapIterator<V> { | 498 class _HashMapValueIterator<V> extends _HashMapIterator<V> { |
499 _HashMapValueIterator(HashMap map) : super(map); | 499 _HashMapValueIterator(HashMap map) : super(map); |
500 V get current { | 500 V get current { |
501 _HashMapEntry entry = _entry; | 501 _HashMapEntry entry = _entry; |
502 return (entry == null) ? null : entry.value; | 502 return (entry == null) ? null : entry.value; |
503 } | 503 } |
504 } | 504 } |
505 | 505 |
506 patch class HashSet<E> { | 506 patch class HashSet<E> { |
507 /* patch */ factory HashSet({ bool equals(E e1, E e2), | 507 static const int _INITIAL_CAPACITY = 8; |
508 int hashCode(E e), | 508 final _HashTable<E> _table; |
509 bool isValidKey(potentialKey) }) { | 509 |
510 if (isValidKey == null) { | 510 /* patch */ HashSet() : _table = new _HashTable(_INITIAL_CAPACITY) { |
511 if (hashCode == null) { | 511 _table._container = this; |
512 if (equals == null) { | |
513 return new _HashSet<E>(); | |
514 } | |
515 if (identical(identical, equals)) { | |
516 return new _IdentityHashSet<E>(); | |
517 } | |
518 _hashCode = _defaultHashCode; | |
519 } else if (equals == null) { | |
520 _equals = _defaultEquals; | |
521 } | |
522 isValidKey = new _TypeTest<E>().test; | |
523 } else { | |
524 if (hashCode == null) hashCode = _defaultHashCode; | |
525 if (equals == null) equals = _defaultEquals; | |
526 } | |
527 return new _CustomHashSet<E>(equals, hashCode, isValidKey); | |
528 } | 512 } |
529 } | |
530 | 513 |
531 class _HashSet<E> extends _HashSetBase<E> implements HashSet<E> { | 514 factory HashSet.from(Iterable<E> iterable) { |
532 static const int _INITIAL_CAPACITY = 8; | 515 return new HashSet<E>()..addAll(iterable); |
533 | 516 } |
534 List<_HashSetEntry> _buckets = new List<_HashSetEntry>(_INITIAL_CAPACITY); | |
535 int _elementCount = 0; | |
536 int _modificationCount = 0; | |
537 | |
538 bool _equals(e1, e2) => e1 == e2; | |
539 int _hashCode(e) => e.hashCode; | |
540 | 517 |
541 // Iterable. | 518 // Iterable. |
542 Iterator<E> get iterator => new _HashSetIterator<E>(this); | 519 /* patch */ Iterator<E> get iterator => new _HashTableKeyIterator<E>(_table); |
543 | 520 |
544 int get length => _elementCount; | 521 /* patch */ int get length => _table._elementCount; |
545 | 522 |
546 bool get isEmpty => _elementCount == 0; | 523 /* patch */ bool get isEmpty => _table._elementCount == 0; |
547 | 524 |
548 bool get isNotEmpty => _elementCount != 0; | 525 /* patch */ bool get isNotEmpty => !isEmpty; |
549 | 526 |
550 bool contains(Object object) { | 527 /* patch */ bool contains(Object object) => _table._get(object) >= 0; |
551 int index = _hashCode(object) & (_buckets.length - 1); | 528 |
552 HashSetEntry entry = _buckets[index]; | 529 // Collection. |
553 while (entry != null) { | 530 /* patch */ void add(E element) { |
554 if (_equals(entry.key, object)) return true; | 531 _table._put(element); |
555 entry = entry.next; | 532 _table._checkCapacity(); |
556 } | |
557 return false; | |
558 } | 533 } |
559 | 534 |
560 // Set | 535 /* patch */ void addAll(Iterable<E> objects) { |
561 | |
562 void _add(E element) { | |
563 int hashCode = _hashCode(element); | |
564 int index = hashCode & (_buckets.length - 1); | |
565 HashSetEntry entry = _buckets[index]; | |
566 while (entry != null) { | |
567 if (_equals(entry.key, element)) return; | |
568 entry = entry.next; | |
569 } | |
570 _addEntry(element, hashCode, index); | |
571 } | |
572 | |
573 void add(E element) { | |
574 _add(element); | |
575 } | |
576 | |
577 void addAll(Iterable<E> objects) { | |
578 int ctr = 0; | |
579 for (E object in objects) { | 536 for (E object in objects) { |
580 ctr++; | 537 _table._put(object); |
581 _add(object); | 538 _table._checkCapacity(); |
582 } | 539 } |
583 } | 540 } |
584 | 541 |
585 bool _remove(Object object, int hashCode) { | 542 /* patch */ bool remove(Object object) { |
586 int index = hashCode & (_buckets.length - 1); | 543 int offset = _table._remove(object); |
587 _HashSetEntry entry = _buckets[index]; | 544 _table._checkCapacity(); |
588 _HashSetEntry previous = null; | 545 return offset >= 0; |
589 while (entry != null) { | |
590 if (_equals(entry.key, object)) { | |
591 _HashSetEntry next = entry.remove(); | |
592 if (previous == null) { | |
593 _buckets[index] = next; | |
594 } else { | |
595 previous.next = next; | |
596 } | |
597 _elementCount--; | |
598 _modificationCount = | |
599 (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | |
600 return true; | |
601 } | |
602 previous = entry; | |
603 entry = entry.next; | |
604 } | |
605 return false; | |
606 } | 546 } |
607 | 547 |
608 bool remove(Object object) => _remove(object, _hashCode(object)); | 548 /* patch */ void removeAll(Iterable<Object> objectsToRemove) { |
609 | |
610 void removeAll(Iterable<Object> objectsToRemove) { | |
611 for (Object object in objectsToRemove) { | 549 for (Object object in objectsToRemove) { |
612 _remove(object, _hashCode(object)); | 550 _table._remove(object); |
| 551 _table._checkCapacity(); |
613 } | 552 } |
614 } | 553 } |
615 | 554 |
616 void _filterWhere(bool test(E element), bool removeMatching) { | 555 void _filterWhere(bool test(E element), bool removeMatching) { |
617 int length = _buckets.length; | 556 int entrySize = _table._entrySize; |
618 for (int index = 0; index < length; index++) { | 557 int length = _table._table.length; |
619 HashSetEntry entry = _buckets[index]; | 558 for (int offset = 0; offset < length; offset += entrySize) { |
620 HashSetEntry previous = null; | 559 Object entry = _table._table[offset]; |
621 while (entry != null) { | 560 if (!_table._isFree(entry)) { |
622 int modificationCount = _modificationCount; | 561 E key = identical(entry, _NULL) ? null : entry; |
623 bool testResult = test(entry.key); | 562 int modificationCount = _table._modificationCount; |
624 if (modificationCount != _modificationCount) { | 563 bool shouldRemove = (removeMatching == test(key)); |
625 throw new ConcurrentModificationError(this); | 564 _table._checkModification(modificationCount); |
626 } | 565 if (shouldRemove) { |
627 if (testResult == removeMatching) { | 566 _table._deleteEntry(offset); |
628 HashSetEntry next = entry.remove(); | |
629 if (previous == null) { | |
630 _buckets[index] = next; | |
631 } else { | |
632 previous.next = next; | |
633 } | |
634 _elementCount--; | |
635 _modificationCount = | |
636 (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | |
637 entry = next; | |
638 } else { | |
639 previous = entry; | |
640 entry = entry.next; | |
641 } | 567 } |
642 } | 568 } |
643 } | 569 } |
| 570 _table._checkCapacity(); |
644 } | 571 } |
645 | 572 |
646 void removeWhere(bool test(E element)) { | 573 /* patch */ void removeWhere(bool test(E element)) { |
647 _filterWhere(test, true); | 574 _filterWhere(test, true); |
648 } | 575 } |
649 | 576 |
650 void retainWhere(bool test(E element)) { | 577 /* patch */ void retainWhere(bool test(E element)) { |
651 _filterWhere(test, false); | 578 _filterWhere(test, false); |
652 } | 579 } |
653 | 580 |
654 void clear() { | 581 /* patch */ void clear() { |
655 _elementCount = 0; | 582 _table._clear(); |
656 _buckets = new List<HashSetEntry>(_INITIAL_CAPACITY); | |
657 _modificationCount++; | |
658 } | |
659 | |
660 void _addEntry(E key, int hashCode, int index) { | |
661 _buckets[index] = new _HashSetEntry(key, hashCode, _buckets[index]); | |
662 int newElements = _elementCount + 1; | |
663 _elementCount = newElements; | |
664 int length = _buckets.length; | |
665 // If we end up with more than 75% non-empty entries, we | |
666 // resize the backing store. | |
667 if ((newElements << 2) > ((length << 1) + length)) _resize(); | |
668 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | |
669 } | |
670 | |
671 void _resize() { | |
672 int oldLength = _buckets.length; | |
673 int newLength = oldLength << 1; | |
674 List oldBuckets = _buckets; | |
675 List newBuckets = new List<_HashSetEntry>(newLength); | |
676 for (int i = 0; i < oldLength; i++) { | |
677 _HashSetEntry entry = oldBuckets[i]; | |
678 while (entry != null) { | |
679 _HashSetEntry next = entry.next; | |
680 int newIndex = entry.hashCode & (newLength - 1); | |
681 entry.next = newBuckets[newIndex]; | |
682 newBuckets[newIndex] = entry; | |
683 entry = next; | |
684 } | |
685 } | |
686 _buckets = newBuckets; | |
687 } | |
688 | |
689 HashSet<E> _newSet() => new _HashSet<E>(); | |
690 } | |
691 | |
692 class _IdentityHashSet<E> extends _HashSet<E> { | |
693 bool _equals(e1, e2) => identical(e1, e2); | |
694 HashSet<E> _newSet() => new _IdentityHashSet<E>(); | |
695 } | |
696 | |
697 class _CustomHashSet<E> extends _HashSet<E> { | |
698 final _Equality<E> _equality; | |
699 final _Hasher<E> _hasher; | |
700 final _Predicate _validKey; | |
701 _CustomHashSet(this._equality, this._hasher, this._validKey); | |
702 | |
703 E operator[](Object key) { | |
704 if (!_validKey(key)) return null; | |
705 return super[key]; | |
706 } | |
707 | |
708 bool remove(Object key) { | |
709 if (!_validKey(key)) return false; | |
710 return super.remove(key); | |
711 } | |
712 | |
713 bool containsKey(Object key) { | |
714 if (!_validKey(key)) return false; | |
715 return super.containsKey(key); | |
716 } | |
717 | |
718 bool _equals(e1, e2) => _equality(e1, e2); | |
719 int _hashCode(e) => _hasher(e); | |
720 | |
721 HashSet<E> _newSet() => new _CustomHashSet<E>(_equality, _hasher, _validKey); | |
722 } | |
723 | |
724 class _HashSetEntry { | |
725 final key; | |
726 final int hashCode; | |
727 _HashSetEntry next; | |
728 _HashSetEntry(this.key, this.hashCode, this.next); | |
729 | |
730 _HashSetEntry remove() { | |
731 _HashSetEntry result = next; | |
732 next = null; | |
733 return result; | |
734 } | 583 } |
735 } | 584 } |
736 | 585 |
737 class _HashSetIterator<E> implements Iterator<E> { | |
738 final _HashSet _set; | |
739 final int _modificationCount; | |
740 int _index = 0; | |
741 _HashSetEntry _next = null; | |
742 E _current = null; | |
743 | |
744 _HashSetIterator(_HashSet hashSet) | |
745 : _set = hashSet, _modificationCount = hashSet._modificationCount; | |
746 | |
747 bool moveNext() { | |
748 if (_modificationCount != _set._modificationCount) { | |
749 throw new ConcurrentModificationError(_set); | |
750 } | |
751 if (_next != null) { | |
752 _current = _next.key; | |
753 _next = _next.next; | |
754 return true; | |
755 } | |
756 List<_HashSetEntry> buckets = _set._buckets; | |
757 while (_index < buckets.length) { | |
758 _next = buckets[_index]; | |
759 _index = _index + 1; | |
760 if (_next != null) { | |
761 _current = _next.key; | |
762 _next = _next.next; | |
763 return true; | |
764 } | |
765 } | |
766 _current = null; | |
767 return false; | |
768 } | |
769 | |
770 E get current => _current; | |
771 } | |
772 | |
773 class _LinkedHashMapEntry extends _HashMapEntry { | 586 class _LinkedHashMapEntry extends _HashMapEntry { |
774 /// Double-linked list of entries of a linked hash map. | |
775 /// The _LinkedHashMap itself is the head of the list, so the type is "var". | |
776 /// Both are initialized to `this` when initialized. | |
777 var _nextEntry; | 587 var _nextEntry; |
778 var _previousEntry; | 588 var _previousEntry; |
779 _LinkedHashMapEntry(key, value, int hashCode, _LinkedHashMapEntry next, | 589 _LinkedHashMapEntry(key, value, int hashCode, _LinkedHashMapEntry next, |
780 this._previousEntry, this._nextEntry) | 590 this._previousEntry, this._nextEntry) |
781 : super(key, value, hashCode, next) { | 591 : super(key, value, hashCode, next) { |
782 _previousEntry._nextEntry = this; | 592 _previousEntry._nextEntry = this; |
783 _nextEntry._previousEntry = this; | 593 _nextEntry._previousEntry = this; |
784 } | 594 } |
785 } | 595 } |
786 | 596 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
842 class _LinkedHashMapValueIterator<V> extends _LinkedHashMapIterator<V> { | 652 class _LinkedHashMapValueIterator<V> extends _LinkedHashMapIterator<V> { |
843 _LinkedHashMapValueIterator(LinkedHashMap map) : super(map); | 653 _LinkedHashMapValueIterator(LinkedHashMap map) : super(map); |
844 V _getValue(_LinkedHashMapEntry entry) => entry.value; | 654 V _getValue(_LinkedHashMapEntry entry) => entry.value; |
845 } | 655 } |
846 | 656 |
847 | 657 |
848 /** | 658 /** |
849 * A hash-based map that iterates keys and values in key insertion order. | 659 * A hash-based map that iterates keys and values in key insertion order. |
850 */ | 660 */ |
851 patch class LinkedHashMap<K, V> { | 661 patch class LinkedHashMap<K, V> { |
852 /// Holds a double-linked list of entries in insertion order. | |
853 /// The fields have the same name as the ones in [_LinkedHashMapEntry], | |
854 /// and this map is itself used as the head entry of the list. | |
855 /// Set to `this` when initialized, representing the empty list (containing | |
856 /// only the head entry itself). | |
857 var _nextEntry; | 662 var _nextEntry; |
858 var _previousEntry; | 663 var _previousEntry; |
859 | 664 |
860 /* patch */ factory LinkedHashMap({ bool equals(K key1, K key2), | 665 /* patch */ factory LinkedHashMap({ bool equals(K key1, K key2), |
861 int hashCode(K key), | 666 int hashCode(K key), |
862 bool isValidKey(potentialKey) }) { | 667 bool isValidKey(potentialKey) }) { |
863 if (isValidKey == null) { | 668 if (isValidKey == null) { |
864 if (hashCode == null) { | 669 if (hashCode == null) { |
865 if (equals == null) { | 670 if (equals == null) { |
866 return new _LinkedHashMap<K, V>(); | 671 return new _LinkedHashMap<K, V>(); |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
926 } | 731 } |
927 | 732 |
928 void _addEntry(List buckets, int index, int length, | 733 void _addEntry(List buckets, int index, int length, |
929 K key, V value, int hashCode) { | 734 K key, V value, int hashCode) { |
930 _HashMapEntry entry = | 735 _HashMapEntry entry = |
931 new _LinkedHashMapEntry(key, value, hashCode, buckets[index], | 736 new _LinkedHashMapEntry(key, value, hashCode, buckets[index], |
932 _previousEntry, this); | 737 _previousEntry, this); |
933 buckets[index] = entry; | 738 buckets[index] = entry; |
934 int newElements = _elementCount + 1; | 739 int newElements = _elementCount + 1; |
935 _elementCount = newElements; | 740 _elementCount = newElements; |
936 | |
937 // If we end up with more than 75% non-empty entries, we | 741 // If we end up with more than 75% non-empty entries, we |
938 // resize the backing store. | 742 // resize the backing store. |
939 if ((newElements << 2) > ((length << 1) + length)) _resize(); | 743 if ((newElements << 2) > ((length << 1) + length)) _resize(); |
940 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | 744 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; |
941 } | 745 } |
942 | 746 |
943 void _removeEntry(_LinkedHashMapEntry entry, | 747 void _removeEntry(_LinkedHashMapEntry entry, |
944 _HashMapEntry previousInBucket, | 748 _HashMapEntry previousInBucket, |
945 int bucketIndex) { | 749 int bucketIndex) { |
946 var previousInChain = entry._previousEntry; | 750 var previousInChain = entry._previousEntry; |
(...skipping 30 matching lines...) Expand all Loading... |
977 with _LinkedHashMapMixin<K, V> { | 781 with _LinkedHashMapMixin<K, V> { |
978 _LinkedCustomHashMap(bool equals(K key1, K key2), | 782 _LinkedCustomHashMap(bool equals(K key1, K key2), |
979 int hashCode(K key), | 783 int hashCode(K key), |
980 bool isValidKey(potentialKey)) | 784 bool isValidKey(potentialKey)) |
981 : super(equals, hashCode, isValidKey) { | 785 : super(equals, hashCode, isValidKey) { |
982 _nextEntry = _previousEntry = this; | 786 _nextEntry = _previousEntry = this; |
983 } | 787 } |
984 } | 788 } |
985 | 789 |
986 | 790 |
987 patch class LinkedHashSet<E> { | 791 patch class LinkedHashSet<E> extends _HashSetBase<E> { |
988 /* patch */ factory LinkedHashSet({ bool equals(E e1, E e2), | 792 static const int _INITIAL_CAPACITY = 8; |
989 int hashCode(E e), | 793 _LinkedHashTable<E> _table; |
990 bool isValidKey(potentialKey) }) { | 794 |
991 if (isValidKey == null) { | 795 /* patch */ LinkedHashSet() { |
992 if (hashCode == null) { | 796 _table = new _LinkedHashTable(_INITIAL_CAPACITY); |
993 if (equals == null) { | 797 _table._container = this; |
994 return new _LinkedHashSet<E>(); | 798 } |
| 799 |
| 800 // Iterable. |
| 801 /* patch */ Iterator<E> get iterator { |
| 802 return new _LinkedHashTableKeyIterator<E>(_table); |
| 803 } |
| 804 |
| 805 /* patch */ int get length => _table._elementCount; |
| 806 |
| 807 /* patch */ bool get isEmpty => _table._elementCount == 0; |
| 808 |
| 809 /* patch */ bool get isNotEmpty => !isEmpty; |
| 810 |
| 811 /* patch */ bool contains(Object object) => _table._get(object) >= 0; |
| 812 |
| 813 /* patch */ void forEach(void action(E element)) { |
| 814 int offset = _table._next(_LinkedHashTable._HEAD_OFFSET); |
| 815 int modificationCount = _table._modificationCount; |
| 816 while (offset != _LinkedHashTable._HEAD_OFFSET) { |
| 817 E key = _table._key(offset); |
| 818 action(key); |
| 819 _table._checkModification(modificationCount); |
| 820 offset = _table._next(offset); |
| 821 } |
| 822 } |
| 823 |
| 824 /* patch */ E get first { |
| 825 int firstOffset = _table._next(_LinkedHashTable._HEAD_OFFSET); |
| 826 if (firstOffset == _LinkedHashTable._HEAD_OFFSET) { |
| 827 throw new StateError("No elements"); |
| 828 } |
| 829 return _table._key(firstOffset); |
| 830 } |
| 831 |
| 832 /* patch */ E get last { |
| 833 int lastOffset = _table._prev(_LinkedHashTable._HEAD_OFFSET); |
| 834 if (lastOffset == _LinkedHashTable._HEAD_OFFSET) { |
| 835 throw new StateError("No elements"); |
| 836 } |
| 837 return _table._key(lastOffset); |
| 838 } |
| 839 |
| 840 // Collection. |
| 841 void _filterWhere(bool test(E element), bool removeMatching) { |
| 842 int entrySize = _table._entrySize; |
| 843 int length = _table._table.length; |
| 844 int offset = _table._next(_LinkedHashTable._HEAD_OFFSET); |
| 845 while (offset != _LinkedHashTable._HEAD_OFFSET) { |
| 846 E key = _table._key(offset); |
| 847 int nextOffset = _table._next(offset); |
| 848 int modificationCount = _table._modificationCount; |
| 849 bool shouldRemove = (removeMatching == test(key)); |
| 850 _table._checkModification(modificationCount); |
| 851 if (shouldRemove) { |
| 852 _table._deleteEntry(offset); |
| 853 } |
| 854 offset = nextOffset; |
| 855 } |
| 856 _table._checkCapacity(); |
| 857 } |
| 858 |
| 859 /* patch */ void add(E element) { |
| 860 _table._put(element); |
| 861 _table._checkCapacity(); |
| 862 } |
| 863 |
| 864 /* patch */ void addAll(Iterable<E> objects) { |
| 865 for (E object in objects) { |
| 866 _table._put(object); |
| 867 _table._checkCapacity(); |
| 868 } |
| 869 } |
| 870 |
| 871 /* patch */ bool remove(Object object) { |
| 872 int offset = _table._remove(object); |
| 873 if (offset >= 0) { |
| 874 _table._checkCapacity(); |
| 875 return true; |
| 876 } |
| 877 return false; |
| 878 } |
| 879 |
| 880 /* patch */ void removeAll(Iterable objectsToRemove) { |
| 881 for (Object object in objectsToRemove) { |
| 882 if (_table._remove(object) >= 0) { |
| 883 _table._checkCapacity(); |
| 884 } |
| 885 } |
| 886 } |
| 887 |
| 888 /* patch */ void removeWhere(bool test(E element)) { |
| 889 _filterWhere(test, true); |
| 890 } |
| 891 |
| 892 /* patch */ void retainWhere(bool test(E element)) { |
| 893 _filterWhere(test, false); |
| 894 } |
| 895 |
| 896 /* patch */ void clear() { |
| 897 _table._clear(); |
| 898 } |
| 899 } |
| 900 |
| 901 class _DeadEntry { |
| 902 const _DeadEntry(); |
| 903 } |
| 904 |
| 905 class _NullKey { |
| 906 const _NullKey(); |
| 907 int get hashCode => null.hashCode; |
| 908 } |
| 909 |
| 910 const _TOMBSTONE = const _DeadEntry(); |
| 911 const _NULL = const _NullKey(); |
| 912 |
| 913 class _HashTable<K> { |
| 914 /** |
| 915 * Table of entries with [_entrySize] slots per entry. |
| 916 * |
| 917 * Capacity in entries must be factor of two. |
| 918 */ |
| 919 List _table; |
| 920 /** Current capacity. Always equal to [:_table.length ~/ _entrySize:]. */ |
| 921 int _capacity; |
| 922 /** Count of occupied entries, including deleted ones. */ |
| 923 int _entryCount = 0; |
| 924 /** Count of deleted entries. */ |
| 925 int _deletedCount = 0; |
| 926 /** Counter incremented when table is modified. */ |
| 927 int _modificationCount = 0; |
| 928 /** If set, used as the source object for [ConcurrentModificationError]s. */ |
| 929 Object _container; |
| 930 |
| 931 _HashTable(int initialCapacity) : _capacity = initialCapacity { |
| 932 _table = _createTable(initialCapacity); |
| 933 } |
| 934 |
| 935 /** Reads key from table. Converts _NULL marker to null. */ |
| 936 Object _key(offset) { |
| 937 assert(!_isFree(_table[offset])); |
| 938 Object key = _table[offset]; |
| 939 if (!identical(key, _NULL)) return key; |
| 940 return null; |
| 941 } |
| 942 |
| 943 /** Writes key to table. Converts null to _NULL marker. */ |
| 944 void _setKey(int offset, Object key) { |
| 945 if (key == null) key = _NULL; |
| 946 _table[offset] = key; |
| 947 } |
| 948 |
| 949 int get _elementCount => _entryCount - _deletedCount; |
| 950 |
| 951 /** Size of each entry. */ |
| 952 int get _entrySize => 1; |
| 953 |
| 954 void _checkModification(int expectedModificationCount) { |
| 955 if (_modificationCount != expectedModificationCount) { |
| 956 throw new ConcurrentModificationError(_container); |
| 957 } |
| 958 } |
| 959 |
| 960 void _recordModification() { |
| 961 // Value cycles after 2^30 modifications. If you keep hold of an |
| 962 // iterator for that long, you might miss a modification detection, |
| 963 // and iteration can go sour. Don't do that. |
| 964 _modificationCount = (_modificationCount + 1) & (0x3FFFFFFF); |
| 965 } |
| 966 |
| 967 /** |
| 968 * Create an empty table. |
| 969 */ |
| 970 List _createTable(int capacity) { |
| 971 List table = new List(capacity * _entrySize); |
| 972 return table; |
| 973 } |
| 974 |
| 975 /** First table probe. */ |
| 976 int _firstProbe(int hashCode, int capacity) { |
| 977 return hashCode & (capacity - 1); |
| 978 } |
| 979 |
| 980 /** Following table probes. */ |
| 981 int _nextProbe(int previousIndex, int probeCount, int capacity) { |
| 982 // When capacity is a power of 2, this probing algorithm (the triangular |
| 983 // number sequence modulo capacity) is guaranteed to hit all indices exactly |
| 984 // once before repeating. |
| 985 return (previousIndex + probeCount) & (capacity - 1); |
| 986 } |
| 987 |
| 988 /** Whether an object is a free-marker (either tombstone or free). */ |
| 989 bool _isFree(Object marker) => |
| 990 marker == null || identical(marker, _TOMBSTONE); |
| 991 |
| 992 /** |
| 993 * Look up the offset for an object in the table. |
| 994 * |
| 995 * Finds the offset of the object in the table, if it is there, |
| 996 * or the first free offset for its hashCode. |
| 997 */ |
| 998 int _probeForAdd(int hashCode, Object object) { |
| 999 int entrySize = _entrySize; |
| 1000 int index = _firstProbe(hashCode, _capacity); |
| 1001 int firstTombstone = -1; |
| 1002 int probeCount = 0; |
| 1003 while (true) { |
| 1004 int offset = index * entrySize; |
| 1005 Object entry = _table[offset]; |
| 1006 if (identical(entry, _TOMBSTONE)) { |
| 1007 if (firstTombstone < 0) firstTombstone = offset; |
| 1008 } else if (entry == null) { |
| 1009 if (firstTombstone < 0) return offset; |
| 1010 return firstTombstone; |
| 1011 } else if (identical(_NULL, entry) ? _equals(null, object) |
| 1012 : _equals(entry, object)) { |
| 1013 return offset; |
| 1014 } |
| 1015 // The _nextProbe is designed so that it hits |
| 1016 // every index eventually. |
| 1017 index = _nextProbe(index, ++probeCount, _capacity); |
| 1018 } |
| 1019 } |
| 1020 |
| 1021 /** |
| 1022 * Look up the offset for an object in the table. |
| 1023 * |
| 1024 * If the object is in the table, its offset is returned. |
| 1025 * |
| 1026 * If the object is not in the table, Otherwise a negative value is returned. |
| 1027 */ |
| 1028 int _probeForLookup(int hashCode, Object object) { |
| 1029 int entrySize = _entrySize; |
| 1030 int index = _firstProbe(hashCode, _capacity); |
| 1031 int probeCount = 0; |
| 1032 while (true) { |
| 1033 int offset = index * entrySize; |
| 1034 Object entry = _table[offset]; |
| 1035 if (entry == null) { |
| 1036 return -1; |
| 1037 } else if (!identical(_TOMBSTONE, entry)) { |
| 1038 if (identical(_NULL, entry) ? _equals(null, object) |
| 1039 : _equals(entry, object)) { |
| 1040 return offset; |
995 } | 1041 } |
996 if (identical(identical, equals)) { | |
997 return new _LinkedIdentityHashSet<E>(); | |
998 } | |
999 _hashCode = _defaultHashCode; | |
1000 } else if (equals == null) { | |
1001 _equals = _defaultEquals; | |
1002 } | 1042 } |
1003 isValidKey = new _TypeTest<E>().test; | 1043 // The _nextProbe is designed so that it hits |
| 1044 // every index eventually. |
| 1045 index = _nextProbe(index, ++probeCount, _capacity); |
| 1046 } |
| 1047 } |
| 1048 |
| 1049 // Override the following two to change equality/hashCode computations |
| 1050 |
| 1051 /** |
| 1052 * Compare two object for equality. |
| 1053 * |
| 1054 * The first object is the one already in the table, |
| 1055 * and the second is the one being searched for. |
| 1056 */ |
| 1057 bool _equals(Object element, Object other) { |
| 1058 return element == other; |
| 1059 } |
| 1060 |
| 1061 /** |
| 1062 * Compute hash-code for an object. |
| 1063 */ |
| 1064 int _hashCodeOf(Object object) => object.hashCode; |
| 1065 |
| 1066 /** |
| 1067 * Ensure that the table isn't too full for its own good. |
| 1068 * |
| 1069 * Call this after adding an element. |
| 1070 */ |
| 1071 int _checkCapacity() { |
| 1072 // Compute everything in multiples of entrySize to avoid division. |
| 1073 int freeCount = _capacity - _entryCount; |
| 1074 if (freeCount * 4 < _capacity || |
| 1075 freeCount < _deletedCount) { |
| 1076 // Less than 25% free or more deleted entries than free entries. |
| 1077 _grow(_entryCount - _deletedCount); |
| 1078 } |
| 1079 } |
| 1080 |
| 1081 void _grow(int contentCount) { |
| 1082 int capacity = _capacity; |
| 1083 // Don't grow to less than twice the needed capacity. |
| 1084 int minCapacity = contentCount * 2; |
| 1085 while (capacity < minCapacity) { |
| 1086 capacity *= 2; |
| 1087 } |
| 1088 // Reset to another table and add all existing elements. |
| 1089 List oldTable = _table; |
| 1090 _table = _createTable(capacity); |
| 1091 _capacity = capacity; |
| 1092 _entryCount = 0; |
| 1093 _deletedCount = 0; |
| 1094 _addAllEntries(oldTable); |
| 1095 _recordModification(); |
| 1096 } |
| 1097 |
| 1098 /** |
| 1099 * Copies all non-free entries from the old table to the new empty table. |
| 1100 */ |
| 1101 void _addAllEntries(List oldTable) { |
| 1102 for (int i = 0; i < oldTable.length; i += _entrySize) { |
| 1103 Object object = oldTable[i]; |
| 1104 if (!_isFree(object)) { |
| 1105 int toOffset = _put(object); |
| 1106 _copyEntry(oldTable, i, toOffset); |
| 1107 } |
| 1108 } |
| 1109 } |
| 1110 |
| 1111 /** |
| 1112 * Copies everything but the key element from one entry to another. |
| 1113 * |
| 1114 * Called while growing the base array. |
| 1115 * |
| 1116 * Override this if any non-key fields need copying. |
| 1117 */ |
| 1118 void _copyEntry(List fromTable, int fromOffset, int toOffset) {} |
| 1119 |
| 1120 // The following three methods are for simple get/set/remove operations. |
| 1121 // They only affect the key of an entry. The remaining fields must be |
| 1122 // filled by the caller. |
| 1123 |
| 1124 /** |
| 1125 * Returns the offset of a key in [_table], or negative if it's not there. |
| 1126 */ |
| 1127 int _get(Object key) { |
| 1128 return _probeForLookup(_hashCodeOf(key), key); |
| 1129 } |
| 1130 |
| 1131 /** |
| 1132 * Puts the key into the table and returns its offset into [_table]. |
| 1133 * |
| 1134 * If [_entrySize] is greater than 1, the caller should fill the |
| 1135 * remaining fields. |
| 1136 * |
| 1137 * Remember to call [_checkCapacity] after using this method. |
| 1138 */ |
| 1139 int _put(K key) { |
| 1140 int offset = _probeForAdd(_hashCodeOf(key), key); |
| 1141 Object oldEntry = _table[offset]; |
| 1142 if (oldEntry == null) { |
| 1143 _entryCount++; |
| 1144 } else if (identical(oldEntry, _TOMBSTONE)) { |
| 1145 _deletedCount--; |
1004 } else { | 1146 } else { |
1005 if (hashCode == null) hashCode = _defaultHashCode; | 1147 return offset; |
1006 if (equals == null) equals = _defaultEquals; | 1148 } |
1007 } | 1149 _setKey(offset, key); |
1008 return new _LinkedCustomHashSet<E>(equals, hashCode, isValidKey); | 1150 _recordModification(); |
1009 } | 1151 return offset; |
1010 } | 1152 } |
1011 | 1153 |
1012 class _LinkedHashSetEntry extends _HashSetEntry { | 1154 /** |
1013 /// Links this element into a double-linked list of elements of a hash set. | 1155 * Removes a key from the table and returns its offset into [_table]. |
1014 /// The hash set object itself is used as the head entry of the list, so | 1156 * |
1015 /// the field is typed as "var". | 1157 * Returns null if the key was not in the table. |
1016 /// Both links are initialized to `this` when the object is created. | 1158 * If [_entrySize] is greater than 1, the caller should clean up the |
1017 var _nextEntry; | 1159 * remaining fields. |
1018 var _previousEntry; | 1160 */ |
1019 _LinkedHashSetEntry(var key, int hashCode, _LinkedHashSetEntry next, | 1161 int _remove(Object key) { |
1020 this._previousEntry, this._nextEntry) | 1162 int offset = _probeForLookup(_hashCodeOf(key), key); |
1021 : super(key, hashCode, next) { | 1163 if (offset >= 0) { |
1022 _previousEntry._nextEntry = _nextEntry._previousEntry = this; | 1164 _deleteEntry(offset); |
1023 } | 1165 } |
1024 | 1166 return offset; |
1025 _LinkedHashSetEntry remove() { | 1167 } |
1026 _previousEntry._nextEntry = _nextEntry; | 1168 |
1027 _nextEntry._previousEntry = _previousEntry; | 1169 /** Clears the table completely, leaving it empty. */ |
1028 _nextEntry = _previousEntry = this; | 1170 void _clear() { |
1029 return super.remove(); | 1171 if (_elementCount == 0) return; |
1030 } | 1172 for (int i = 0; i < _table.length; i++) { |
1031 } | 1173 _table[i] = null; |
1032 | 1174 } |
1033 class _LinkedHashSet<E> extends _HashSet<E> | 1175 _entryCount = _deletedCount = 0; |
1034 implements LinkedHashSet<E> { | 1176 _recordModification(); |
1035 /// Holds a double linked list of the element entries of the set in | 1177 } |
1036 /// insertion order. | 1178 |
1037 /// The fields have the same names as the ones in [_LinkedHashSetEntry], | 1179 /** Clears an entry in the table. */ |
1038 /// allowing this object to be used as the head entry of the list. | 1180 void _deleteEntry(int offset) { |
1039 /// The fields are initialized to `this` when created, representing the | 1181 assert(!_isFree(_table[offset])); |
1040 /// empty list that only contains the head entry. | 1182 _setKey(offset, _TOMBSTONE); |
1041 var _nextEntry; | 1183 _deletedCount++; |
1042 var _previousEntry; | 1184 _recordModification(); |
1043 | 1185 } |
1044 _LinkedHashSet() { | 1186 } |
1045 _nextEntry = _previousEntry = this; | 1187 |
1046 } | 1188 /** |
1047 | 1189 * Generic iterable based on a [_HashTable]. |
1048 // Iterable. | 1190 */ |
1049 Iterator<E> get iterator => new _LinkedHashSetIterator<E>(this); | 1191 abstract class _HashTableIterable<E> extends IterableBase<E> { |
| 1192 final _HashTable _hashTable; |
| 1193 _HashTableIterable(this._hashTable); |
| 1194 |
| 1195 Iterator<E> get iterator; |
| 1196 |
| 1197 /** |
| 1198 * Return the iterated value for a given entry. |
| 1199 */ |
| 1200 E _valueAt(int offset, Object key); |
| 1201 |
| 1202 int get length => _hashTable._elementCount; |
| 1203 |
| 1204 bool get isEmpty => _hashTable._elementCount == 0; |
1050 | 1205 |
1051 void forEach(void action(E element)) { | 1206 void forEach(void action(E element)) { |
1052 var cursor = _nextEntry; | 1207 int entrySize = _hashTable._entrySize; |
1053 int modificationCount = _modificationCount; | 1208 List table = _hashTable._table; |
1054 while (!identical(cursor, this)) { | 1209 int modificationCount = _hashTable._modificationCount; |
1055 _LinkedHashSetEntry entry = cursor; | 1210 for (int offset = 0; offset < table.length; offset += entrySize) { |
1056 action(entry.key); | 1211 Object entry = table[offset]; |
1057 if (_modificationCount != modificationCount) { | 1212 if (!_hashTable._isFree(entry)) { |
1058 throw new ConcurrentModificationError(this); | 1213 E value = _valueAt(offset, entry); |
| 1214 action(value); |
1059 } | 1215 } |
1060 cursor = entry._nextEntry; | 1216 _hashTable._checkModification(modificationCount); |
1061 } | 1217 } |
1062 } | 1218 } |
1063 | 1219 } |
1064 E get first { | 1220 |
1065 if (identical(_nextEntry, this)) { | 1221 abstract class _HashTableIterator<E> implements Iterator<E> { |
1066 throw new StateError("No elements"); | 1222 final _HashTable _hashTable; |
1067 } | 1223 final int _modificationCount; |
1068 _LinkedHashSetEntry entry = _nextEntry; | 1224 /** Location right after last found element. */ |
1069 return entry.key; | 1225 int _offset = 0; |
1070 } | 1226 E _current = null; |
1071 | 1227 |
1072 E get last { | 1228 _HashTableIterator(_HashTable hashTable) |
1073 if (identical(_previousEntry, this)) { | 1229 : _hashTable = hashTable, |
1074 throw new StateError("No elements"); | 1230 _modificationCount = hashTable._modificationCount; |
1075 } | 1231 |
1076 _LinkedHashSetEntry entry = _previousEntry; | 1232 bool moveNext() { |
1077 return entry.key; | 1233 _hashTable._checkModification(_modificationCount); |
1078 } | 1234 |
1079 | 1235 List table = _hashTable._table; |
1080 // Set. | 1236 int entrySize = _hashTable._entrySize; |
1081 void _filterWhere(bool test(E element), bool removeMatching) { | 1237 |
1082 var cursor = _nextEntry; | 1238 while (_offset < table.length) { |
1083 while (!identical(cursor, this)) { | 1239 int currentOffset = _offset; |
1084 _LinkedHashSetEntry entry = cursor; | 1240 Object entry = table[currentOffset]; |
1085 int modificationCount = _modificationCount; | 1241 _offset = currentOffset + entrySize; |
1086 bool testResult = test(entry.key); | 1242 if (!_hashTable._isFree(entry)) { |
1087 if (modificationCount != _modificationCount) { | 1243 _current = _valueAt(currentOffset, entry); |
1088 throw new ConcurrentModificationError(this); | 1244 return true; |
1089 } | 1245 } |
1090 cursor = entry._nextEntry; | 1246 } |
1091 if (testResult == removeMatching) { | 1247 _current = null; |
1092 _remove(entry.key, entry.hashCode); | 1248 return false; |
1093 } | 1249 } |
1094 } | 1250 |
1095 } | 1251 E get current => _current; |
1096 | 1252 |
1097 void _addEntry(E key, int hashCode, int index) { | 1253 E _valueAt(int offset, Object key); |
1098 _buckets[index] = | 1254 } |
1099 new _LinkedHashSetEntry(key, hashCode, _buckets[index], | 1255 |
1100 _previousEntry, this); | 1256 class _HashTableKeyIterable<K> extends _HashTableIterable<K> { |
1101 int newElements = _elementCount + 1; | 1257 _HashTableKeyIterable(_HashTable<K> hashTable) : super(hashTable); |
1102 _elementCount = newElements; | 1258 |
1103 int length = _buckets.length; | 1259 Iterator<K> get iterator => new _HashTableKeyIterator<K>(_hashTable); |
1104 // If we end up with more than 75% non-empty entries, we | 1260 |
1105 // resize the backing store. | 1261 K _valueAt(int offset, Object key) { |
1106 if ((newElements << 2) > ((length << 1) + length)) _resize(); | 1262 if (identical(key, _NULL)) return null; |
1107 _modificationCount = (_modificationCount + 1) & _MODIFICATION_COUNT_MASK; | 1263 return key; |
1108 } | 1264 } |
1109 | 1265 |
1110 HashSet<E> _newSet() => new _LinkedHashSet<E>(); | 1266 bool contains(Object value) => _hashTable._get(value) >= 0; |
1111 } | 1267 } |
1112 | 1268 |
1113 class _LinkedIdentityHashSet<E> extends _LinkedHashSet<E> { | 1269 class _HashTableKeyIterator<K> extends _HashTableIterator<K> { |
1114 bool _equals(e1, e2) => identical(e1, e2); | 1270 _HashTableKeyIterator(_HashTable hashTable) : super(hashTable); |
1115 HashSet<E> _newSet() => new _LinkedIdentityHashSet<E>(); | 1271 |
1116 } | 1272 K _valueAt(int offset, Object key) { |
1117 | 1273 if (identical(key, _NULL)) return null; |
1118 class _LinkedCustomHashSet<E> extends _LinkedHashSet<E> { | 1274 return key; |
1119 final _Equality<E> _equality; | 1275 } |
1120 final _Hasher<E> _hasher; | 1276 } |
1121 final _Predicate _validKey; | 1277 |
1122 | 1278 class _HashTableValueIterable<V> extends _HashTableIterable<V> { |
1123 _LinkedCustomHashSet(this._equality, this._hasher, bool validKey(object)) | 1279 final int _entryIndex; |
1124 : _validKey = (validKey != null) ? validKey : new _TypeTest<E>().test; | 1280 |
1125 | 1281 _HashTableValueIterable(_HashTable hashTable, this._entryIndex) |
1126 bool _equals(e1, e2) => _equality(e1, e2); | 1282 : super(hashTable); |
1127 | 1283 |
1128 int _hashCode(e) => _hasher(e); | 1284 Iterator<V> get iterator { |
1129 | 1285 return new _HashTableValueIterator<V>(_hashTable, _entryIndex); |
1130 bool contains(Object o) { | 1286 } |
1131 if (!_validKey(o)) return false; | 1287 |
1132 return super.contains(o); | 1288 V _valueAt(int offset, Object key) => _hashTable._table[offset + _entryIndex]; |
1133 } | 1289 } |
1134 | 1290 |
1135 bool remove(Object o) { | 1291 class _HashTableValueIterator<V> extends _HashTableIterator<V> { |
1136 if (!_validKey(o)) return false; | 1292 final int _entryIndex; |
1137 return super.remove(o); | 1293 |
1138 } | 1294 _HashTableValueIterator(_HashTable hashTable, this._entryIndex) |
1139 | 1295 : super(hashTable); |
1140 E operator[](Object o) { | 1296 |
1141 if (!_validKey(o)) return null; | 1297 V _valueAt(int offset, Object key) => _hashTable._table[offset + _entryIndex]; |
1142 return super[o]; | 1298 } |
1143 } | 1299 |
1144 | 1300 class _HashMapTable<K, V> extends _HashTable<K> { |
1145 HashSet<E> _newSet() => | 1301 static const int _INITIAL_CAPACITY = 8; |
1146 new _LinkedCustomHashSet<E>(_equality, _hasher, _validKey); | 1302 static const int _VALUE_INDEX = 1; |
1147 } | 1303 |
1148 | 1304 _HashMapTable() : super(_INITIAL_CAPACITY); |
1149 class _LinkedHashSetIterator<E> implements Iterator<E> { | 1305 |
1150 final _LinkedHashSet _set; | 1306 int get _entrySize => 2; |
| 1307 |
| 1308 V _value(int offset) => _table[offset + _VALUE_INDEX]; |
| 1309 void _setValue(int offset, V value) { _table[offset + _VALUE_INDEX] = value; } |
| 1310 |
| 1311 _copyEntry(List fromTable, int fromOffset, int toOffset) { |
| 1312 _table[toOffset + _VALUE_INDEX] = fromTable[fromOffset + _VALUE_INDEX]; |
| 1313 } |
| 1314 } |
| 1315 |
| 1316 /** Unique marker object for the head of a linked list of entries. */ |
| 1317 class _LinkedHashTableHeadMarker { |
| 1318 const _LinkedHashTableHeadMarker(); |
| 1319 } |
| 1320 |
| 1321 const _LinkedHashTableHeadMarker _HEAD_MARKER = |
| 1322 const _LinkedHashTableHeadMarker(); |
| 1323 |
| 1324 class _LinkedHashTable<K> extends _HashTable<K> { |
| 1325 static const _NEXT_INDEX = 1; |
| 1326 static const _PREV_INDEX = 2; |
| 1327 static const _HEAD_OFFSET = 0; |
| 1328 |
| 1329 _LinkedHashTable(int initialCapacity) : super(initialCapacity); |
| 1330 |
| 1331 int get _entrySize => 3; |
| 1332 |
| 1333 List _createTable(int capacity) { |
| 1334 List result = new List(capacity * _entrySize); |
| 1335 result[_HEAD_OFFSET] = _HEAD_MARKER; |
| 1336 result[_HEAD_OFFSET + _NEXT_INDEX] = _HEAD_OFFSET; |
| 1337 result[_HEAD_OFFSET + _PREV_INDEX] = _HEAD_OFFSET; |
| 1338 return result; |
| 1339 } |
| 1340 |
| 1341 int _next(int offset) => _table[offset + _NEXT_INDEX]; |
| 1342 void _setNext(int offset, int to) { _table[offset + _NEXT_INDEX] = to; } |
| 1343 |
| 1344 int _prev(int offset) => _table[offset + _PREV_INDEX]; |
| 1345 void _setPrev(int offset, int to) { _table[offset + _PREV_INDEX] = to; } |
| 1346 |
| 1347 void _linkLast(int offset) { |
| 1348 // Add entry at offset at end of double-linked list. |
| 1349 int last = _prev(_HEAD_OFFSET); |
| 1350 _setNext(offset, _HEAD_OFFSET); |
| 1351 _setPrev(offset, last); |
| 1352 _setNext(last, offset); |
| 1353 _setPrev(_HEAD_OFFSET, offset); |
| 1354 } |
| 1355 |
| 1356 void _unlink(int offset) { |
| 1357 assert(offset != _HEAD_OFFSET); |
| 1358 int next = _next(offset); |
| 1359 int prev = _prev(offset); |
| 1360 _setNext(offset, null); |
| 1361 _setPrev(offset, null); |
| 1362 _setNext(prev, next); |
| 1363 _setPrev(next, prev); |
| 1364 } |
| 1365 |
| 1366 /** |
| 1367 * Copies all non-free entries from the old table to the new empty table. |
| 1368 */ |
| 1369 void _addAllEntries(List oldTable) { |
| 1370 int offset = oldTable[_HEAD_OFFSET + _NEXT_INDEX]; |
| 1371 while (offset != _HEAD_OFFSET) { |
| 1372 Object object = oldTable[offset]; |
| 1373 int nextOffset = oldTable[offset + _NEXT_INDEX]; |
| 1374 int toOffset = _put(object); |
| 1375 _copyEntry(oldTable, offset, toOffset); |
| 1376 offset = nextOffset; |
| 1377 } |
| 1378 } |
| 1379 |
| 1380 void _clear() { |
| 1381 if (_elementCount == 0) return; |
| 1382 _setNext(_HEAD_OFFSET, _HEAD_OFFSET); |
| 1383 _setPrev(_HEAD_OFFSET, _HEAD_OFFSET); |
| 1384 for (int i = _entrySize; i < _table.length; i++) { |
| 1385 _table[i] = null; |
| 1386 } |
| 1387 _entryCount = _deletedCount = 0; |
| 1388 _recordModification(); |
| 1389 } |
| 1390 |
| 1391 int _put(K key) { |
| 1392 int offset = _probeForAdd(_hashCodeOf(key), key); |
| 1393 Object oldEntry = _table[offset]; |
| 1394 if (identical(oldEntry, _TOMBSTONE)) { |
| 1395 _deletedCount--; |
| 1396 } else if (oldEntry == null) { |
| 1397 _entryCount++; |
| 1398 } else { |
| 1399 return offset; |
| 1400 } |
| 1401 _recordModification(); |
| 1402 _setKey(offset, key); |
| 1403 _linkLast(offset); |
| 1404 return offset; |
| 1405 } |
| 1406 |
| 1407 void _deleteEntry(int offset) { |
| 1408 _unlink(offset); |
| 1409 _setKey(offset, _TOMBSTONE); |
| 1410 _deletedCount++; |
| 1411 _recordModification(); |
| 1412 } |
| 1413 } |
| 1414 |
| 1415 class _LinkedHashTableKeyIterable<K> extends IterableBase<K> { |
| 1416 final _LinkedHashTable<K> _table; |
| 1417 _LinkedHashTableKeyIterable(this._table); |
| 1418 Iterator<K> get iterator => new _LinkedHashTableKeyIterator<K>(_table); |
| 1419 |
| 1420 bool contains(Object value) => _table._get(value) >= 0; |
| 1421 |
| 1422 int get length => _table._elementCount; |
| 1423 } |
| 1424 |
| 1425 class _LinkedHashTableKeyIterator<K> extends _LinkedHashTableIterator<K> { |
| 1426 _LinkedHashTableKeyIterator(_LinkedHashTable<K> hashTable): super(hashTable); |
| 1427 |
| 1428 K _getCurrent(int offset) => _hashTable._key(offset); |
| 1429 } |
| 1430 |
| 1431 class _LinkedHashTableValueIterable<V> extends IterableBase<V> { |
| 1432 final _LinkedHashTable _hashTable; |
| 1433 final int _valueIndex; |
| 1434 _LinkedHashTableValueIterable(this._hashTable, this._valueIndex); |
| 1435 Iterator<V> get iterator => |
| 1436 new _LinkedHashTableValueIterator<V>(_hashTable, _valueIndex); |
| 1437 int get length => _hashTable._elementCount; |
| 1438 } |
| 1439 |
| 1440 class _LinkedHashTableValueIterator<V> extends _LinkedHashTableIterator<V> { |
| 1441 final int _valueIndex; |
| 1442 |
| 1443 _LinkedHashTableValueIterator(_LinkedHashTable hashTable, this._valueIndex) |
| 1444 : super(hashTable); |
| 1445 |
| 1446 V _getCurrent(int offset) => _hashTable._table[offset + _valueIndex]; |
| 1447 } |
| 1448 |
| 1449 abstract class _LinkedHashTableIterator<T> implements Iterator<T> { |
| 1450 final _LinkedHashTable _hashTable; |
1151 final int _modificationCount; | 1451 final int _modificationCount; |
1152 var _next; | 1452 int _offset; |
1153 E _current; | 1453 T _current; |
1154 | 1454 |
1155 _LinkedHashSetIterator(_LinkedHashSet hashSet) | 1455 _LinkedHashTableIterator(_LinkedHashTable table) |
1156 : _set = hashSet, | 1456 : _hashTable = table, |
1157 _modificationCount = hashSet._modificationCount, | 1457 _modificationCount = table._modificationCount, |
1158 _next = hashSet._nextEntry; | 1458 _offset = table._next(_LinkedHashTable._HEAD_OFFSET); |
1159 | 1459 |
1160 bool moveNext() { | 1460 bool moveNext() { |
1161 if (_modificationCount != _set._modificationCount) { | 1461 _hashTable._checkModification(_modificationCount); |
1162 throw new ConcurrentModificationError(_set); | 1462 if (_offset == _LinkedHashTable._HEAD_OFFSET) { |
1163 } | |
1164 if (identical(_set, _next)) { | |
1165 _current = null; | 1463 _current = null; |
1166 return false; | 1464 return false; |
1167 } | 1465 } |
1168 _LinkedHashSetEntry entry = _next; | 1466 _current = _getCurrent(_offset); |
1169 _current = entry.key; | 1467 _offset = _hashTable._next(_offset); |
1170 _next = entry._nextEntry; | |
1171 return true; | 1468 return true; |
1172 } | 1469 } |
1173 | 1470 |
1174 E get current => _current; | 1471 T _getCurrent(int offset); |
1175 } | 1472 |
| 1473 T get current => _current; |
| 1474 } |
| 1475 |
| 1476 class _LinkedHashMapTable<K, V> extends _LinkedHashTable<K> { |
| 1477 static const int _INITIAL_CAPACITY = 8; |
| 1478 static const int _VALUE_INDEX = 3; |
| 1479 |
| 1480 int get _entrySize => 4; |
| 1481 |
| 1482 _LinkedHashMapTable() : super(_INITIAL_CAPACITY); |
| 1483 |
| 1484 V _value(int offset) => _table[offset + _VALUE_INDEX]; |
| 1485 void _setValue(int offset, V value) { _table[offset + _VALUE_INDEX] = value; } |
| 1486 |
| 1487 _copyEntry(List oldTable, int fromOffset, int toOffset) { |
| 1488 _table[toOffset + _VALUE_INDEX] = oldTable[fromOffset + _VALUE_INDEX]; |
| 1489 } |
| 1490 } |
OLD | NEW |