OLD | NEW |
(Empty) | |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #ifndef V8_STUB_CACHE_H_ |
| 6 #define V8_STUB_CACHE_H_ |
| 7 |
| 8 #include "src/macro-assembler.h" |
| 9 |
| 10 namespace v8 { |
| 11 namespace internal { |
| 12 |
| 13 |
| 14 // The stub cache is used for megamorphic property accesses. |
| 15 // It maps (map, name, type) to property access handlers. The cache does not |
| 16 // need explicit invalidation when a prototype chain is modified, since the |
| 17 // handlers verify the chain. |
| 18 |
| 19 |
| 20 class SCTableReference { |
| 21 public: |
| 22 Address address() const { return address_; } |
| 23 |
| 24 private: |
| 25 explicit SCTableReference(Address address) : address_(address) {} |
| 26 |
| 27 Address address_; |
| 28 |
| 29 friend class StubCache; |
| 30 }; |
| 31 |
| 32 |
| 33 class StubCache { |
| 34 public: |
| 35 struct Entry { |
| 36 Name* key; |
| 37 Code* value; |
| 38 Map* map; |
| 39 }; |
| 40 |
| 41 void Initialize(); |
| 42 // Access cache for entry hash(name, map). |
| 43 Code* Set(Name* name, Map* map, Code* code); |
| 44 Code* Get(Name* name, Map* map, Code::Flags flags); |
| 45 // Clear the lookup table (@ mark compact collection). |
| 46 void Clear(); |
| 47 // Collect all maps that match the name and flags. |
| 48 void CollectMatchingMaps(SmallMapList* types, Handle<Name> name, |
| 49 Code::Flags flags, Handle<Context> native_context, |
| 50 Zone* zone); |
| 51 // Generate code for probing the stub cache table. |
| 52 // Arguments extra, extra2 and extra3 may be used to pass additional scratch |
| 53 // registers. Set to no_reg if not needed. |
| 54 void GenerateProbe(MacroAssembler* masm, Code::Flags flags, Register receiver, |
| 55 Register name, Register scratch, Register extra, |
| 56 Register extra2 = no_reg, Register extra3 = no_reg); |
| 57 |
| 58 enum Table { kPrimary, kSecondary }; |
| 59 |
| 60 SCTableReference key_reference(StubCache::Table table) { |
| 61 return SCTableReference( |
| 62 reinterpret_cast<Address>(&first_entry(table)->key)); |
| 63 } |
| 64 |
| 65 SCTableReference map_reference(StubCache::Table table) { |
| 66 return SCTableReference( |
| 67 reinterpret_cast<Address>(&first_entry(table)->map)); |
| 68 } |
| 69 |
| 70 SCTableReference value_reference(StubCache::Table table) { |
| 71 return SCTableReference( |
| 72 reinterpret_cast<Address>(&first_entry(table)->value)); |
| 73 } |
| 74 |
| 75 StubCache::Entry* first_entry(StubCache::Table table) { |
| 76 switch (table) { |
| 77 case StubCache::kPrimary: |
| 78 return StubCache::primary_; |
| 79 case StubCache::kSecondary: |
| 80 return StubCache::secondary_; |
| 81 } |
| 82 UNREACHABLE(); |
| 83 return NULL; |
| 84 } |
| 85 |
| 86 Isolate* isolate() { return isolate_; } |
| 87 |
| 88 // Setting the entry size such that the index is shifted by Name::kHashShift |
| 89 // is convenient; shifting down the length field (to extract the hash code) |
| 90 // automatically discards the hash bit field. |
| 91 static const int kCacheIndexShift = Name::kHashShift; |
| 92 |
| 93 private: |
| 94 explicit StubCache(Isolate* isolate); |
| 95 |
| 96 // The stub cache has a primary and secondary level. The two levels have |
| 97 // different hashing algorithms in order to avoid simultaneous collisions |
| 98 // in both caches. Unlike a probing strategy (quadratic or otherwise) the |
| 99 // update strategy on updates is fairly clear and simple: Any existing entry |
| 100 // in the primary cache is moved to the secondary cache, and secondary cache |
| 101 // entries are overwritten. |
| 102 |
| 103 // Hash algorithm for the primary table. This algorithm is replicated in |
| 104 // assembler for every architecture. Returns an index into the table that |
| 105 // is scaled by 1 << kCacheIndexShift. |
| 106 static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) { |
| 107 STATIC_ASSERT(kCacheIndexShift == Name::kHashShift); |
| 108 // Compute the hash of the name (use entire hash field). |
| 109 DCHECK(name->HasHashCode()); |
| 110 uint32_t field = name->hash_field(); |
| 111 // Using only the low bits in 64-bit mode is unlikely to increase the |
| 112 // risk of collision even if the heap is spread over an area larger than |
| 113 // 4Gb (and not at all if it isn't). |
| 114 uint32_t map_low32bits = |
| 115 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)); |
| 116 // We always set the in_loop bit to zero when generating the lookup code |
| 117 // so do it here too so the hash codes match. |
| 118 uint32_t iflags = |
| 119 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); |
| 120 // Base the offset on a simple combination of name, flags, and map. |
| 121 uint32_t key = (map_low32bits + field) ^ iflags; |
| 122 return key & ((kPrimaryTableSize - 1) << kCacheIndexShift); |
| 123 } |
| 124 |
| 125 // Hash algorithm for the secondary table. This algorithm is replicated in |
| 126 // assembler for every architecture. Returns an index into the table that |
| 127 // is scaled by 1 << kCacheIndexShift. |
| 128 static int SecondaryOffset(Name* name, Code::Flags flags, int seed) { |
| 129 // Use the seed from the primary cache in the secondary cache. |
| 130 uint32_t name_low32bits = |
| 131 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)); |
| 132 // We always set the in_loop bit to zero when generating the lookup code |
| 133 // so do it here too so the hash codes match. |
| 134 uint32_t iflags = |
| 135 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); |
| 136 uint32_t key = (seed - name_low32bits) + iflags; |
| 137 return key & ((kSecondaryTableSize - 1) << kCacheIndexShift); |
| 138 } |
| 139 |
| 140 // Compute the entry for a given offset in exactly the same way as |
| 141 // we do in generated code. We generate an hash code that already |
| 142 // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple |
| 143 // of sizeof(Entry). This makes it easier to avoid making mistakes |
| 144 // in the hashed offset computations. |
| 145 static Entry* entry(Entry* table, int offset) { |
| 146 const int multiplier = sizeof(*table) >> Name::kHashShift; |
| 147 return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) + |
| 148 offset * multiplier); |
| 149 } |
| 150 |
| 151 static const int kPrimaryTableBits = 11; |
| 152 static const int kPrimaryTableSize = (1 << kPrimaryTableBits); |
| 153 static const int kSecondaryTableBits = 9; |
| 154 static const int kSecondaryTableSize = (1 << kSecondaryTableBits); |
| 155 |
| 156 Entry primary_[kPrimaryTableSize]; |
| 157 Entry secondary_[kSecondaryTableSize]; |
| 158 Isolate* isolate_; |
| 159 |
| 160 friend class Isolate; |
| 161 friend class SCTableReference; |
| 162 |
| 163 DISALLOW_COPY_AND_ASSIGN(StubCache); |
| 164 }; |
| 165 } |
| 166 } // namespace v8::internal |
| 167 |
| 168 #endif // V8_STUB_CACHE_H_ |
OLD | NEW |