Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(270)

Side by Side Diff: src/ic/stub-cache.h

Issue 2167493003: [ic] [stubs] Don't use Code::flags in megamorphic stub cache hash computations. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@stub-cache-fix
Patch Set: Rebasing Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/ic/s390/stub-cache-s390.cc ('k') | src/ic/stub-cache.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_STUB_CACHE_H_ 5 #ifndef V8_STUB_CACHE_H_
6 #define V8_STUB_CACHE_H_ 6 #define V8_STUB_CACHE_H_
7 7
8 #include "src/macro-assembler.h" 8 #include "src/macro-assembler.h"
9 9
10 namespace v8 { 10 namespace v8 {
(...skipping 23 matching lines...) Expand all
34 public: 34 public:
35 struct Entry { 35 struct Entry {
36 Name* key; 36 Name* key;
37 Code* value; 37 Code* value;
38 Map* map; 38 Map* map;
39 }; 39 };
40 40
41 void Initialize(); 41 void Initialize();
42 // Access cache for entry hash(name, map). 42 // Access cache for entry hash(name, map).
43 Code* Set(Name* name, Map* map, Code* code); 43 Code* Set(Name* name, Map* map, Code* code);
44 Code* Get(Name* name, Map* map, Code::Flags flags); 44 Code* Get(Name* name, Map* map);
45 // Clear the lookup table (@ mark compact collection). 45 // Clear the lookup table (@ mark compact collection).
46 void Clear(); 46 void Clear();
47 // Collect all maps that match the name and flags. 47 // Collect all maps that match the name.
48 void CollectMatchingMaps(SmallMapList* types, Handle<Name> name, 48 void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
49 Code::Flags flags, Handle<Context> native_context, 49 Handle<Context> native_context, Zone* zone);
50 Zone* zone);
51 // Generate code for probing the stub cache table. 50 // Generate code for probing the stub cache table.
52 // Arguments extra, extra2 and extra3 may be used to pass additional scratch 51 // Arguments extra, extra2 and extra3 may be used to pass additional scratch
53 // registers. Set to no_reg if not needed. 52 // registers. Set to no_reg if not needed.
54 // If leave_frame is true, then exit a frame before the tail call. 53 // If leave_frame is true, then exit a frame before the tail call.
55 void GenerateProbe(MacroAssembler* masm, Register receiver, Register name, 54 void GenerateProbe(MacroAssembler* masm, Register receiver, Register name,
56 Register scratch, Register extra, Register extra2 = no_reg, 55 Register scratch, Register extra, Register extra2 = no_reg,
57 Register extra3 = no_reg); 56 Register extra3 = no_reg);
58 57
59 enum Table { kPrimary, kSecondary }; 58 enum Table { kPrimary, kSecondary };
60 59
(...skipping 29 matching lines...) Expand all
90 // Setting the entry size such that the index is shifted by Name::kHashShift 89 // Setting the entry size such that the index is shifted by Name::kHashShift
91 // is convenient; shifting down the length field (to extract the hash code) 90 // is convenient; shifting down the length field (to extract the hash code)
92 // automatically discards the hash bit field. 91 // automatically discards the hash bit field.
93 static const int kCacheIndexShift = Name::kHashShift; 92 static const int kCacheIndexShift = Name::kHashShift;
94 93
95 static const int kPrimaryTableBits = 11; 94 static const int kPrimaryTableBits = 11;
96 static const int kPrimaryTableSize = (1 << kPrimaryTableBits); 95 static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
97 static const int kSecondaryTableBits = 9; 96 static const int kSecondaryTableBits = 9;
98 static const int kSecondaryTableSize = (1 << kSecondaryTableBits); 97 static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
99 98
100 static int PrimaryOffsetForTesting(Name* name, Code::Flags flags, Map* map) { 99 // Some magic number used in primary and secondary hash computations.
101 return PrimaryOffset(name, flags, map); 100 static const int kPrimaryMagic = 0x3d532433;
101 static const int kSecondaryMagic = 0xb16b00b5;
102
103 static int PrimaryOffsetForTesting(Name* name, Map* map) {
104 return PrimaryOffset(name, map);
102 } 105 }
103 106
104 static int SecondaryOffsetForTesting(Name* name, Code::Flags flags, 107 static int SecondaryOffsetForTesting(Name* name, int seed) {
105 int seed) { 108 return SecondaryOffset(name, seed);
106 return SecondaryOffset(name, flags, seed);
107 } 109 }
108 110
109 // The constructor is made public only for the purposes of testing. 111 // The constructor is made public only for the purposes of testing.
110 StubCache(Isolate* isolate, Code::Kind ic_kind); 112 StubCache(Isolate* isolate, Code::Kind ic_kind);
111 113
112 private: 114 private:
113 // The stub cache has a primary and secondary level. The two levels have 115 // The stub cache has a primary and secondary level. The two levels have
114 // different hashing algorithms in order to avoid simultaneous collisions 116 // different hashing algorithms in order to avoid simultaneous collisions
115 // in both caches. Unlike a probing strategy (quadratic or otherwise) the 117 // in both caches. Unlike a probing strategy (quadratic or otherwise) the
116 // update strategy on updates is fairly clear and simple: Any existing entry 118 // update strategy on updates is fairly clear and simple: Any existing entry
117 // in the primary cache is moved to the secondary cache, and secondary cache 119 // in the primary cache is moved to the secondary cache, and secondary cache
118 // entries are overwritten. 120 // entries are overwritten.
119 121
120 // Hash algorithm for the primary table. This algorithm is replicated in 122 // Hash algorithm for the primary table. This algorithm is replicated in
121 // assembler for every architecture. Returns an index into the table that 123 // assembler for every architecture. Returns an index into the table that
122 // is scaled by 1 << kCacheIndexShift. 124 // is scaled by 1 << kCacheIndexShift.
123 static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) { 125 static int PrimaryOffset(Name* name, Map* map) {
124 STATIC_ASSERT(kCacheIndexShift == Name::kHashShift); 126 STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
125 // Compute the hash of the name (use entire hash field). 127 // Compute the hash of the name (use entire hash field).
126 DCHECK(name->HasHashCode()); 128 DCHECK(name->HasHashCode());
127 uint32_t field = name->hash_field(); 129 uint32_t field = name->hash_field();
128 // Using only the low bits in 64-bit mode is unlikely to increase the 130 // Using only the low bits in 64-bit mode is unlikely to increase the
129 // risk of collision even if the heap is spread over an area larger than 131 // risk of collision even if the heap is spread over an area larger than
130 // 4Gb (and not at all if it isn't). 132 // 4Gb (and not at all if it isn't).
131 uint32_t map_low32bits = 133 uint32_t map_low32bits =
132 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)); 134 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
133 // We always set the in_loop bit to zero when generating the lookup code 135 // Base the offset on a simple combination of name and map.
134 // so do it here too so the hash codes match. 136 uint32_t key = (map_low32bits + field) ^ kPrimaryMagic;
135 uint32_t iflags =
136 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
137 // Base the offset on a simple combination of name, flags, and map.
138 uint32_t key = (map_low32bits + field) ^ iflags;
139 return key & ((kPrimaryTableSize - 1) << kCacheIndexShift); 137 return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
140 } 138 }
141 139
142 // Hash algorithm for the secondary table. This algorithm is replicated in 140 // Hash algorithm for the secondary table. This algorithm is replicated in
143 // assembler for every architecture. Returns an index into the table that 141 // assembler for every architecture. Returns an index into the table that
144 // is scaled by 1 << kCacheIndexShift. 142 // is scaled by 1 << kCacheIndexShift.
145 static int SecondaryOffset(Name* name, Code::Flags flags, int seed) { 143 static int SecondaryOffset(Name* name, int seed) {
146 // Use the seed from the primary cache in the secondary cache. 144 // Use the seed from the primary cache in the secondary cache.
147 uint32_t name_low32bits = 145 uint32_t name_low32bits =
148 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)); 146 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
149 // We always set the in_loop bit to zero when generating the lookup code 147 uint32_t key = (seed - name_low32bits) + kSecondaryMagic;
150 // so do it here too so the hash codes match.
151 uint32_t iflags =
152 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
153 uint32_t key = (seed - name_low32bits) + iflags;
154 return key & ((kSecondaryTableSize - 1) << kCacheIndexShift); 148 return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
155 } 149 }
156 150
157 // Compute the entry for a given offset in exactly the same way as 151 // Compute the entry for a given offset in exactly the same way as
158 // we do in generated code. We generate an hash code that already 152 // we do in generated code. We generate an hash code that already
159 // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple 153 // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple
160 // of sizeof(Entry). This makes it easier to avoid making mistakes 154 // of sizeof(Entry). This makes it easier to avoid making mistakes
161 // in the hashed offset computations. 155 // in the hashed offset computations.
162 static Entry* entry(Entry* table, int offset) { 156 static Entry* entry(Entry* table, int offset) {
163 const int multiplier = sizeof(*table) >> Name::kHashShift; 157 const int multiplier = sizeof(*table) >> Name::kHashShift;
164 return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) + 158 return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
165 offset * multiplier); 159 offset * multiplier);
166 } 160 }
167 161
168 private: 162 private:
169 Entry primary_[kPrimaryTableSize]; 163 Entry primary_[kPrimaryTableSize];
170 Entry secondary_[kSecondaryTableSize]; 164 Entry secondary_[kSecondaryTableSize];
171 Isolate* isolate_; 165 Isolate* isolate_;
172 Code::Kind ic_kind_; 166 Code::Kind ic_kind_;
173 167
174 friend class Isolate; 168 friend class Isolate;
175 friend class SCTableReference; 169 friend class SCTableReference;
176 170
177 DISALLOW_COPY_AND_ASSIGN(StubCache); 171 DISALLOW_COPY_AND_ASSIGN(StubCache);
178 }; 172 };
179 } // namespace internal 173 } // namespace internal
180 } // namespace v8 174 } // namespace v8
181 175
182 #endif // V8_STUB_CACHE_H_ 176 #endif // V8_STUB_CACHE_H_
OLDNEW
« no previous file with comments | « src/ic/s390/stub-cache-s390.cc ('k') | src/ic/stub-cache.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698