| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 316 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 327 explicit StubCache(Isolate* isolate); | 327 explicit StubCache(Isolate* isolate); |
| 328 | 328 |
| 329 friend class Isolate; | 329 friend class Isolate; |
| 330 friend class SCTableReference; | 330 friend class SCTableReference; |
| 331 static const int kPrimaryTableSize = 2048; | 331 static const int kPrimaryTableSize = 2048; |
| 332 static const int kSecondaryTableSize = 512; | 332 static const int kSecondaryTableSize = 512; |
| 333 Entry primary_[kPrimaryTableSize]; | 333 Entry primary_[kPrimaryTableSize]; |
| 334 Entry secondary_[kSecondaryTableSize]; | 334 Entry secondary_[kSecondaryTableSize]; |
| 335 | 335 |
| 336 // Computes the hashed offsets for primary and secondary caches. | 336 // Computes the hashed offsets for primary and secondary caches. |
| 337 RLYSTC int PrimaryOffset(String* name, Code::Flags flags, Map* map) { | 337 static int PrimaryOffset(String* name, Code::Flags flags, Map* map) { |
| 338 // This works well because the heap object tag size and the hash | 338 // This works well because the heap object tag size and the hash |
| 339 // shift are equal. Shifting down the length field to get the | 339 // shift are equal. Shifting down the length field to get the |
| 340 // hash code would effectively throw away two bits of the hash | 340 // hash code would effectively throw away two bits of the hash |
| 341 // code. | 341 // code. |
| 342 ASSERT(kHeapObjectTagSize == String::kHashShift); | 342 ASSERT(kHeapObjectTagSize == String::kHashShift); |
| 343 // Compute the hash of the name (use entire hash field). | 343 // Compute the hash of the name (use entire hash field). |
| 344 ASSERT(name->HasHashCode()); | 344 ASSERT(name->HasHashCode()); |
| 345 uint32_t field = name->hash_field(); | 345 uint32_t field = name->hash_field(); |
| 346 // Using only the low bits in 64-bit mode is unlikely to increase the | 346 // Using only the low bits in 64-bit mode is unlikely to increase the |
| 347 // risk of collision even if the heap is spread over an area larger than | 347 // risk of collision even if the heap is spread over an area larger than |
| 348 // 4Gb (and not at all if it isn't). | 348 // 4Gb (and not at all if it isn't). |
| 349 uint32_t map_low32bits = | 349 uint32_t map_low32bits = |
| 350 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)); | 350 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)); |
| 351 // We always set the in_loop bit to zero when generating the lookup code | 351 // We always set the in_loop bit to zero when generating the lookup code |
| 352 // so do it here too so the hash codes match. | 352 // so do it here too so the hash codes match. |
| 353 uint32_t iflags = | 353 uint32_t iflags = |
| 354 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); | 354 (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); |
| 355 // Base the offset on a simple combination of name, flags, and map. | 355 // Base the offset on a simple combination of name, flags, and map. |
| 356 uint32_t key = (map_low32bits + field) ^ iflags; | 356 uint32_t key = (map_low32bits + field) ^ iflags; |
| 357 return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize); | 357 return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize); |
| 358 } | 358 } |
| 359 | 359 |
| 360 RLYSTC int SecondaryOffset(String* name, Code::Flags flags, int seed) { | 360 static int SecondaryOffset(String* name, Code::Flags flags, int seed) { |
| 361 // Use the seed from the primary cache in the secondary cache. | 361 // Use the seed from the primary cache in the secondary cache. |
| 362 uint32_t string_low32bits = | 362 uint32_t string_low32bits = |
| 363 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)); | 363 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)); |
| 364 // We always set the in_loop bit to zero when generating the lookup code | 364 // We always set the in_loop bit to zero when generating the lookup code |
| 365 // so do it here too so the hash codes match. | 365 // so do it here too so the hash codes match. |
| 366 uint32_t iflags = | 366 uint32_t iflags = |
| 367 (static_cast<uint32_t>(flags) & ~Code::kFlagsICInLoopMask); | 367 (static_cast<uint32_t>(flags) & ~Code::kFlagsICInLoopMask); |
| 368 uint32_t key = seed - string_low32bits + iflags; | 368 uint32_t key = seed - string_low32bits + iflags; |
| 369 return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize); | 369 return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize); |
| 370 } | 370 } |
| 371 | 371 |
| 372 // Compute the entry for a given offset in exactly the same way as | 372 // Compute the entry for a given offset in exactly the same way as |
| 373 // we do in generated code. We generate an hash code that already | 373 // we do in generated code. We generate an hash code that already |
| 374 // ends in String::kHashShift 0s. Then we shift it so it is a multiple | 374 // ends in String::kHashShift 0s. Then we shift it so it is a multiple |
| 375 // of sizeof(Entry). This makes it easier to avoid making mistakes | 375 // of sizeof(Entry). This makes it easier to avoid making mistakes |
| 376 // in the hashed offset computations. | 376 // in the hashed offset computations. |
| 377 RLYSTC Entry* entry(Entry* table, int offset) { | 377 static Entry* entry(Entry* table, int offset) { |
| 378 const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift; | 378 const int shift_amount = kPointerSizeLog2 + 1 - String::kHashShift; |
| 379 return reinterpret_cast<Entry*>( | 379 return reinterpret_cast<Entry*>( |
| 380 reinterpret_cast<Address>(table) + (offset << shift_amount)); | 380 reinterpret_cast<Address>(table) + (offset << shift_amount)); |
| 381 } | 381 } |
| 382 | 382 |
| 383 Isolate* isolate_; | 383 Isolate* isolate_; |
| 384 | 384 |
| 385 DISALLOW_COPY_AND_ASSIGN(StubCache); | 385 DISALLOW_COPY_AND_ASSIGN(StubCache); |
| 386 }; | 386 }; |
| 387 | 387 |
| (...skipping 509 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 897 private: | 897 private: |
| 898 MaybeObject* GetCode(); | 898 MaybeObject* GetCode(); |
| 899 | 899 |
| 900 StrictModeFlag strict_mode_; | 900 StrictModeFlag strict_mode_; |
| 901 }; | 901 }; |
| 902 | 902 |
| 903 | 903 |
| 904 } } // namespace v8::internal | 904 } } // namespace v8::internal |
| 905 | 905 |
| 906 #endif // V8_STUB_CACHE_H_ | 906 #endif // V8_STUB_CACHE_H_ |
| OLD | NEW |