Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/stub-cache.h

Issue 401613003: Unravel kHeapObjectTagSize from the stub cache. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips64/stub-cache-mips64.cc ('k') | src/x64/stub-cache-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_STUB_CACHE_H_ 5 #ifndef V8_STUB_CACHE_H_
6 #define V8_STUB_CACHE_H_ 6 #define V8_STUB_CACHE_H_
7 7
8 #include "src/allocation.h" 8 #include "src/allocation.h"
9 #include "src/arguments.h" 9 #include "src/arguments.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
176 // These constants describe the structure of the interceptor arguments on the 176 // These constants describe the structure of the interceptor arguments on the
177 // stack. The arguments are pushed by the (platform-specific) 177 // stack. The arguments are pushed by the (platform-specific)
178 // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and 178 // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
179 // LoadWithInterceptor. 179 // LoadWithInterceptor.
180 static const int kInterceptorArgsNameIndex = 0; 180 static const int kInterceptorArgsNameIndex = 0;
181 static const int kInterceptorArgsInfoIndex = 1; 181 static const int kInterceptorArgsInfoIndex = 1;
182 static const int kInterceptorArgsThisIndex = 2; 182 static const int kInterceptorArgsThisIndex = 2;
183 static const int kInterceptorArgsHolderIndex = 3; 183 static const int kInterceptorArgsHolderIndex = 3;
184 static const int kInterceptorArgsLength = 4; 184 static const int kInterceptorArgsLength = 4;
185 185
186 // Setting the entry size such that the index is shifted by Name::kHashShift
187 // is convenient; shifting down the length field (to extract the hash code)
188 // automatically discards the hash bit field.
189 static const int kCacheIndexShift = Name::kHashShift;
190
186 private: 191 private:
187 explicit StubCache(Isolate* isolate); 192 explicit StubCache(Isolate* isolate);
188 193
189 // The stub cache has a primary and secondary level. The two levels have 194 // The stub cache has a primary and secondary level. The two levels have
190 // different hashing algorithms in order to avoid simultaneous collisions 195 // different hashing algorithms in order to avoid simultaneous collisions
191 // in both caches. Unlike a probing strategy (quadratic or otherwise) the 196 // in both caches. Unlike a probing strategy (quadratic or otherwise) the
192 // update strategy on updates is fairly clear and simple: Any existing entry 197 // update strategy on updates is fairly clear and simple: Any existing entry
193 // in the primary cache is moved to the secondary cache, and secondary cache 198 // in the primary cache is moved to the secondary cache, and secondary cache
194 // entries are overwritten. 199 // entries are overwritten.
195 200
196 // Hash algorithm for the primary table. This algorithm is replicated in 201 // Hash algorithm for the primary table. This algorithm is replicated in
197 // assembler for every architecture. Returns an index into the table that 202 // assembler for every architecture. Returns an index into the table that
198 // is scaled by 1 << kHeapObjectTagSize. 203 // is scaled by 1 << kCacheIndexShift.
199 static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) { 204 static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
200 // This works well because the heap object tag size and the hash 205 STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
201 // shift are equal. Shifting down the length field to get the
202 // hash code would effectively throw away two bits of the hash
203 // code.
204 STATIC_ASSERT(kHeapObjectTagSize == Name::kHashShift);
205 // Compute the hash of the name (use entire hash field). 206 // Compute the hash of the name (use entire hash field).
206 ASSERT(name->HasHashCode()); 207 ASSERT(name->HasHashCode());
207 uint32_t field = name->hash_field(); 208 uint32_t field = name->hash_field();
208 // Using only the low bits in 64-bit mode is unlikely to increase the 209 // Using only the low bits in 64-bit mode is unlikely to increase the
209 // risk of collision even if the heap is spread over an area larger than 210 // risk of collision even if the heap is spread over an area larger than
210 // 4Gb (and not at all if it isn't). 211 // 4Gb (and not at all if it isn't).
211 uint32_t map_low32bits = 212 uint32_t map_low32bits =
212 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)); 213 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
213 // We always set the in_loop bit to zero when generating the lookup code 214 // We always set the in_loop bit to zero when generating the lookup code
214 // so do it here too so the hash codes match. 215 // so do it here too so the hash codes match.
215 uint32_t iflags = 216 uint32_t iflags =
216 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); 217 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
217 // Base the offset on a simple combination of name, flags, and map. 218 // Base the offset on a simple combination of name, flags, and map.
218 uint32_t key = (map_low32bits + field) ^ iflags; 219 uint32_t key = (map_low32bits + field) ^ iflags;
219 return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize); 220 return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
220 } 221 }
221 222
222 // Hash algorithm for the secondary table. This algorithm is replicated in 223 // Hash algorithm for the secondary table. This algorithm is replicated in
223 // assembler for every architecture. Returns an index into the table that 224 // assembler for every architecture. Returns an index into the table that
224 // is scaled by 1 << kHeapObjectTagSize. 225 // is scaled by 1 << kCacheIndexShift.
225 static int SecondaryOffset(Name* name, Code::Flags flags, int seed) { 226 static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
226 // Use the seed from the primary cache in the secondary cache. 227 // Use the seed from the primary cache in the secondary cache.
227 uint32_t name_low32bits = 228 uint32_t name_low32bits =
228 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)); 229 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
229 // We always set the in_loop bit to zero when generating the lookup code 230 // We always set the in_loop bit to zero when generating the lookup code
230 // so do it here too so the hash codes match. 231 // so do it here too so the hash codes match.
231 uint32_t iflags = 232 uint32_t iflags =
232 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); 233 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
233 uint32_t key = (seed - name_low32bits) + iflags; 234 uint32_t key = (seed - name_low32bits) + iflags;
234 return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize); 235 return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
235 } 236 }
236 237
237 // Compute the entry for a given offset in exactly the same way as 238 // Compute the entry for a given offset in exactly the same way as
238 // we do in generated code. We generate an hash code that already 239 // we do in generated code. We generate an hash code that already
239 // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple 240 // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple
240 // of sizeof(Entry). This makes it easier to avoid making mistakes 241 // of sizeof(Entry). This makes it easier to avoid making mistakes
241 // in the hashed offset computations. 242 // in the hashed offset computations.
242 static Entry* entry(Entry* table, int offset) { 243 static Entry* entry(Entry* table, int offset) {
243 const int multiplier = sizeof(*table) >> Name::kHashShift; 244 const int multiplier = sizeof(*table) >> Name::kHashShift;
244 return reinterpret_cast<Entry*>( 245 return reinterpret_cast<Entry*>(
(...skipping 579 matching lines...) Expand 10 before | Expand all | Expand 10 after
824 Handle<JSFunction> constant_function_; 825 Handle<JSFunction> constant_function_;
825 bool is_simple_api_call_; 826 bool is_simple_api_call_;
826 Handle<FunctionTemplateInfo> expected_receiver_type_; 827 Handle<FunctionTemplateInfo> expected_receiver_type_;
827 Handle<CallHandlerInfo> api_call_info_; 828 Handle<CallHandlerInfo> api_call_info_;
828 }; 829 };
829 830
830 831
831 } } // namespace v8::internal 832 } } // namespace v8::internal
832 833
833 #endif // V8_STUB_CACHE_H_ 834 #endif // V8_STUB_CACHE_H_
OLDNEW
« no previous file with comments | « src/mips64/stub-cache-mips64.cc ('k') | src/x64/stub-cache-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698