| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_STUB_CACHE_H_ | 5 #ifndef V8_STUB_CACHE_H_ |
| 6 #define V8_STUB_CACHE_H_ | 6 #define V8_STUB_CACHE_H_ |
| 7 | 7 |
| 8 #include "src/macro-assembler.h" | 8 #include "src/macro-assembler.h" |
| 9 | 9 |
| 10 namespace v8 { | 10 namespace v8 { |
| (...skipping 23 matching lines...) Expand all Loading... |
| 34 public: | 34 public: |
| 35 struct Entry { | 35 struct Entry { |
| 36 Name* key; | 36 Name* key; |
| 37 Code* value; | 37 Code* value; |
| 38 Map* map; | 38 Map* map; |
| 39 }; | 39 }; |
| 40 | 40 |
| 41 void Initialize(); | 41 void Initialize(); |
| 42 // Access cache for entry hash(name, map). | 42 // Access cache for entry hash(name, map). |
| 43 Code* Set(Name* name, Map* map, Code* code); | 43 Code* Set(Name* name, Map* map, Code* code); |
| 44 Code* Get(Name* name, Map* map); | 44 Code* Get(Name* name, Map* map, Code::Flags flags); |
| 45 // Clear the lookup table (@ mark compact collection). | 45 // Clear the lookup table (@ mark compact collection). |
| 46 void Clear(); | 46 void Clear(); |
| 47 // Collect all maps that match the name. | 47 // Collect all maps that match the name and flags. |
| 48 void CollectMatchingMaps(SmallMapList* types, Handle<Name> name, | 48 void CollectMatchingMaps(SmallMapList* types, Handle<Name> name, |
| 49 Handle<Context> native_context, Zone* zone); | 49 Code::Flags flags, Handle<Context> native_context, |
| 50 Zone* zone); |
| 50 // Generate code for probing the stub cache table. | 51 // Generate code for probing the stub cache table. |
| 51 // Arguments extra, extra2 and extra3 may be used to pass additional scratch | 52 // Arguments extra, extra2 and extra3 may be used to pass additional scratch |
| 52 // registers. Set to no_reg if not needed. | 53 // registers. Set to no_reg if not needed. |
| 53 // If leave_frame is true, then exit a frame before the tail call. | 54 // If leave_frame is true, then exit a frame before the tail call. |
| 54 void GenerateProbe(MacroAssembler* masm, Register receiver, Register name, | 55 void GenerateProbe(MacroAssembler* masm, Register receiver, Register name, |
| 55 Register scratch, Register extra, Register extra2 = no_reg, | 56 Register scratch, Register extra, Register extra2 = no_reg, |
| 56 Register extra3 = no_reg); | 57 Register extra3 = no_reg); |
| 57 | 58 |
| 58 enum Table { kPrimary, kSecondary }; | 59 enum Table { kPrimary, kSecondary }; |
| 59 | 60 |
| (...skipping 29 matching lines...) Expand all Loading... |
| 89 // Setting the entry size such that the index is shifted by Name::kHashShift | 90 // Setting the entry size such that the index is shifted by Name::kHashShift |
| 90 // is convenient; shifting down the length field (to extract the hash code) | 91 // is convenient; shifting down the length field (to extract the hash code) |
| 91 // automatically discards the hash bit field. | 92 // automatically discards the hash bit field. |
| 92 static const int kCacheIndexShift = Name::kHashShift; | 93 static const int kCacheIndexShift = Name::kHashShift; |
| 93 | 94 |
| 94 static const int kPrimaryTableBits = 11; | 95 static const int kPrimaryTableBits = 11; |
| 95 static const int kPrimaryTableSize = (1 << kPrimaryTableBits); | 96 static const int kPrimaryTableSize = (1 << kPrimaryTableBits); |
| 96 static const int kSecondaryTableBits = 9; | 97 static const int kSecondaryTableBits = 9; |
| 97 static const int kSecondaryTableSize = (1 << kSecondaryTableBits); | 98 static const int kSecondaryTableSize = (1 << kSecondaryTableBits); |
| 98 | 99 |
| 99 static int PrimaryOffsetForTesting(Name* name, Map* map) { | 100 static int PrimaryOffsetForTesting(Name* name, Code::Flags flags, Map* map) { |
| 100 return PrimaryOffset(name, map); | 101 return PrimaryOffset(name, flags, map); |
| 101 } | 102 } |
| 102 | 103 |
| 103 static int SecondaryOffsetForTesting(Name* name, int seed) { | 104 static int SecondaryOffsetForTesting(Name* name, Code::Flags flags, |
| 104 return SecondaryOffset(name, seed); | 105 int seed) { |
| 106 return SecondaryOffset(name, flags, seed); |
| 105 } | 107 } |
| 106 | 108 |
| 107 // The constructor is made public only for the purposes of testing. | 109 // The constructor is made public only for the purposes of testing. |
| 108 StubCache(Isolate* isolate, Code::Kind ic_kind); | 110 StubCache(Isolate* isolate, Code::Kind ic_kind); |
| 109 | 111 |
| 110 private: | 112 private: |
| 111 // The stub cache has a primary and secondary level. The two levels have | 113 // The stub cache has a primary and secondary level. The two levels have |
| 112 // different hashing algorithms in order to avoid simultaneous collisions | 114 // different hashing algorithms in order to avoid simultaneous collisions |
| 113 // in both caches. Unlike a probing strategy (quadratic or otherwise) the | 115 // in both caches. Unlike a probing strategy (quadratic or otherwise) the |
| 114 // update strategy on updates is fairly clear and simple: Any existing entry | 116 // update strategy on updates is fairly clear and simple: Any existing entry |
| 115 // in the primary cache is moved to the secondary cache, and secondary cache | 117 // in the primary cache is moved to the secondary cache, and secondary cache |
| 116 // entries are overwritten. | 118 // entries are overwritten. |
| 117 | 119 |
| 118 // Hash algorithm for the primary table. This algorithm is replicated in | 120 // Hash algorithm for the primary table. This algorithm is replicated in |
| 119 // assembler for every architecture. Returns an index into the table that | 121 // assembler for every architecture. Returns an index into the table that |
| 120 // is scaled by 1 << kCacheIndexShift. | 122 // is scaled by 1 << kCacheIndexShift. |
| 121 static int PrimaryOffset(Name* name, Map* map) { | 123 static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) { |
| 122 STATIC_ASSERT(kCacheIndexShift == Name::kHashShift); | 124 STATIC_ASSERT(kCacheIndexShift == Name::kHashShift); |
| 123 // Compute the hash of the name (use entire hash field). | 125 // Compute the hash of the name (use entire hash field). |
| 124 DCHECK(name->HasHashCode()); | 126 DCHECK(name->HasHashCode()); |
| 125 uint32_t field = name->hash_field(); | 127 uint32_t field = name->hash_field(); |
| 126 // Using only the low bits in 64-bit mode is unlikely to increase the | 128 // Using only the low bits in 64-bit mode is unlikely to increase the |
| 127 // risk of collision even if the heap is spread over an area larger than | 129 // risk of collision even if the heap is spread over an area larger than |
| 128 // 4Gb (and not at all if it isn't). | 130 // 4Gb (and not at all if it isn't). |
| 129 uint32_t map_low32bits = | 131 uint32_t map_low32bits = |
| 130 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)); | 132 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)); |
| 131 // Base the offset on a simple combination of name and map. | 133 // We always set the in_loop bit to zero when generating the lookup code |
| 132 uint32_t key = map_low32bits + field; | 134 // so do it here too so the hash codes match. |
| 135 uint32_t iflags = |
| 136 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); |
| 137 // Base the offset on a simple combination of name, flags, and map. |
| 138 uint32_t key = (map_low32bits + field) ^ iflags; |
| 133 return key & ((kPrimaryTableSize - 1) << kCacheIndexShift); | 139 return key & ((kPrimaryTableSize - 1) << kCacheIndexShift); |
| 134 } | 140 } |
| 135 | 141 |
| 136 // Hash algorithm for the secondary table. This algorithm is replicated in | 142 // Hash algorithm for the secondary table. This algorithm is replicated in |
| 137 // assembler for every architecture. Returns an index into the table that | 143 // assembler for every architecture. Returns an index into the table that |
| 138 // is scaled by 1 << kCacheIndexShift. | 144 // is scaled by 1 << kCacheIndexShift. |
| 139 static int SecondaryOffset(Name* name, int seed) { | 145 static int SecondaryOffset(Name* name, Code::Flags flags, int seed) { |
| 140 // Use the seed from the primary cache in the secondary cache. | 146 // Use the seed from the primary cache in the secondary cache. |
| 141 uint32_t name_low32bits = | 147 uint32_t name_low32bits = |
| 142 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)); | 148 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)); |
| 143 uint32_t key = (seed - name_low32bits); | 149 // We always set the in_loop bit to zero when generating the lookup code |
| 150 // so do it here too so the hash codes match. |
| 151 uint32_t iflags = |
| 152 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); |
| 153 uint32_t key = (seed - name_low32bits) + iflags; |
| 144 return key & ((kSecondaryTableSize - 1) << kCacheIndexShift); | 154 return key & ((kSecondaryTableSize - 1) << kCacheIndexShift); |
| 145 } | 155 } |
| 146 | 156 |
| 147 // Compute the entry for a given offset in exactly the same way as | 157 // Compute the entry for a given offset in exactly the same way as |
| 148 // we do in generated code. We generate an hash code that already | 158 // we do in generated code. We generate an hash code that already |
| 149 // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple | 159 // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple |
| 150 // of sizeof(Entry). This makes it easier to avoid making mistakes | 160 // of sizeof(Entry). This makes it easier to avoid making mistakes |
| 151 // in the hashed offset computations. | 161 // in the hashed offset computations. |
| 152 static Entry* entry(Entry* table, int offset) { | 162 static Entry* entry(Entry* table, int offset) { |
| 153 const int multiplier = sizeof(*table) >> Name::kHashShift; | 163 const int multiplier = sizeof(*table) >> Name::kHashShift; |
| 154 return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) + | 164 return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) + |
| 155 offset * multiplier); | 165 offset * multiplier); |
| 156 } | 166 } |
| 157 | 167 |
| 158 private: | 168 private: |
| 159 Entry primary_[kPrimaryTableSize]; | 169 Entry primary_[kPrimaryTableSize]; |
| 160 Entry secondary_[kSecondaryTableSize]; | 170 Entry secondary_[kSecondaryTableSize]; |
| 161 Isolate* isolate_; | 171 Isolate* isolate_; |
| 162 Code::Kind ic_kind_; | 172 Code::Kind ic_kind_; |
| 163 | 173 |
| 164 friend class Isolate; | 174 friend class Isolate; |
| 165 friend class SCTableReference; | 175 friend class SCTableReference; |
| 166 | 176 |
| 167 DISALLOW_COPY_AND_ASSIGN(StubCache); | 177 DISALLOW_COPY_AND_ASSIGN(StubCache); |
| 168 }; | 178 }; |
| 169 } // namespace internal | 179 } // namespace internal |
| 170 } // namespace v8 | 180 } // namespace v8 |
| 171 | 181 |
| 172 #endif // V8_STUB_CACHE_H_ | 182 #endif // V8_STUB_CACHE_H_ |
| OLD | NEW |