| OLD | NEW |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/lookup-cache.h" | 5 #include "src/lookup-cache.h" |
| 6 | 6 |
| 7 #include "src/objects-inl.h" | 7 #include "src/objects-inl.h" |
| 8 | 8 |
| 9 namespace v8 { | 9 namespace v8 { |
| 10 namespace internal { | 10 namespace internal { |
| 11 | 11 |
| 12 void DescriptorLookupCache::Clear() { | 12 void DescriptorLookupCache::Clear() { |
| 13 for (int index = 0; index < kLength; index++) keys_[index].source = NULL; | 13 for (int index = 0; index < kLength; index++) keys_[index].source = NULL; |
| 14 } | 14 } |
| 15 | 15 |
| 16 int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) { | |
| 17 DisallowHeapAllocation no_gc; | |
| 18 // Uses only lower 32 bits if pointers are larger. | |
| 19 uintptr_t addr_hash = | |
| 20 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift; | |
| 21 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask); | |
| 22 } | |
| 23 | |
| 24 int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) { | |
| 25 DisallowHeapAllocation no_gc; | |
| 26 int index = (Hash(map, name) & kHashMask); | |
| 27 for (int i = 0; i < kEntriesPerBucket; i++) { | |
| 28 Key& key = keys_[index + i]; | |
| 29 if ((key.map == *map) && key.name->Equals(*name)) { | |
| 30 return field_offsets_[index + i]; | |
| 31 } | |
| 32 } | |
| 33 return kNotFound; | |
| 34 } | |
| 35 | |
| 36 void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name, | |
| 37 int field_offset) { | |
| 38 DisallowHeapAllocation no_gc; | |
| 39 if (!name->IsUniqueName()) { | |
| 40 if (!StringTable::InternalizeStringIfExists(name->GetIsolate(), | |
| 41 Handle<String>::cast(name)) | |
| 42 .ToHandle(&name)) { | |
| 43 return; | |
| 44 } | |
| 45 } | |
| 46 // This cache is cleared only between mark compact passes, so we expect the | |
| 47 // cache to only contain old space names. | |
| 48 DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name)); | |
| 49 | |
| 50 int index = (Hash(map, name) & kHashMask); | |
| 51 // After a GC there will be free slots, so we use them in order (this may | |
| 52 // help to get the most frequently used one in position 0). | |
| 53 for (int i = 0; i < kEntriesPerBucket; i++) { | |
| 54 Key& key = keys_[index]; | |
| 55 Object* free_entry_indicator = NULL; | |
| 56 if (key.map == free_entry_indicator) { | |
| 57 key.map = *map; | |
| 58 key.name = *name; | |
| 59 field_offsets_[index + i] = field_offset; | |
| 60 return; | |
| 61 } | |
| 62 } | |
| 63 // No free entry found in this bucket, so we move them all down one and | |
| 64 // put the new entry at position zero. | |
| 65 for (int i = kEntriesPerBucket - 1; i > 0; i--) { | |
| 66 Key& key = keys_[index + i]; | |
| 67 Key& key2 = keys_[index + i - 1]; | |
| 68 key = key2; | |
| 69 field_offsets_[index + i] = field_offsets_[index + i - 1]; | |
| 70 } | |
| 71 | |
| 72 // Write the new first entry. | |
| 73 Key& key = keys_[index]; | |
| 74 key.map = *map; | |
| 75 key.name = *name; | |
| 76 field_offsets_[index] = field_offset; | |
| 77 } | |
| 78 | |
| 79 void KeyedLookupCache::Clear() { | |
| 80 for (int index = 0; index < kLength; index++) keys_[index].map = NULL; | |
| 81 } | |
| 82 | |
| 83 } // namespace internal | 16 } // namespace internal |
| 84 } // namespace v8 | 17 } // namespace v8 |
| OLD | NEW |