Index: src/objects.cc |
=================================================================== |
--- src/objects.cc (revision 5318) |
+++ src/objects.cc (working copy) |
@@ -2098,6 +2098,124 @@ |
} |
+bool NormalizedMapCache::IsCacheable(JSObject* object) { |
+ // Caching for global objects is not worth it (there are too few of them). |
+ return !object->IsGlobalObject(); |
+} |
+ |
+ |
+Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) { |
+ Object* result; |
+ |
+ Map* fast = obj->map(); |
+ if (!IsCacheable(obj)) { |
+ result = fast->CopyNormalized(mode); |
+ if (result->IsFailure()) return result; |
+ } else { |
+ int index = Hash(fast) % kEntries; |
+ result = get(index); |
+ |
+ if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) { |
+#ifdef DEBUG |
+ if (FLAG_enable_slow_asserts) { |
+ // Make sure that the new slow map has exactly the same hash as the |
+ // original fast map. This way we can use hash to check if a slow map |
+ // is already in the hash (see Contains method). |
+ ASSERT(Hash(fast) == Hash(Map::cast(result))); |
+ // The cached map should match newly created normalized map bit-by-bit. |
+ Object* fresh = fast->CopyNormalized(mode); |
+ if (!fresh->IsFailure()) { |
+ // Copy the unused byte so that the assertion below works. |
+ Map::cast(fresh)->address()[Map::kUnusedOffset] = |
+ Map::cast(result)->address()[Map::kUnusedOffset]; |
+ ASSERT(memcmp(Map::cast(fresh)->address(), |
+ Map::cast(result)->address(), |
+ Map::kSize) == 0); |
+ } |
+ } |
+#endif |
+ return result; |
+ } |
+ |
+ result = fast->CopyNormalized(mode); |
+ if (result->IsFailure()) return result; |
+ set(index, result); |
+ } |
+ Counters::normalized_maps.Increment(); |
+ |
+ return result; |
+} |
+ |
+ |
+bool NormalizedMapCache::Contains(Map* map) { |
+ // If the map is present in the cache it can only be at one place: |
+ // at the index calculated from the hash. We assume that a slow map has the |
+ // same hash as a fast map it has been generated from. |
+ int index = Hash(map) % kEntries; |
+ return get(index) == map; |
+} |
+ |
+ |
+void NormalizedMapCache::Clear() { |
+ int entries = length(); |
+ for (int i = 0; i != entries; i++) { |
+ set_undefined(i); |
+ } |
+} |
+ |
+ |
+int NormalizedMapCache::Hash(Map* fast) { |
+ // For performance reasons we only hash the 3 most variable fields of a map: |
+ // constructor, prototype and bit_field2. |
+ |
+ // Shift away the tag. |
+ int hash = (static_cast<uint32_t>( |
+ reinterpret_cast<uintptr_t>(fast->constructor())) >> 2); |
+ |
+ // XOR-ing the prototype and constructor directly yields too many zero bits |
+ // when the two pointers are close (which is fairly common). |
+ // To avoid this we shift the prototype 4 bits relatively to the constructor. |
+ hash ^= (static_cast<uint32_t>( |
+ reinterpret_cast<uintptr_t>(fast->prototype())) << 2); |
+ |
+ return hash ^ (hash >> 16) ^ fast->bit_field2(); |
+} |
+ |
+ |
+bool NormalizedMapCache::CheckHit(Map* slow, |
+ Map* fast, |
+ PropertyNormalizationMode mode) { |
+#ifdef DEBUG |
+ slow->NormalizedMapVerify(); |
+#endif |
+ return |
+ slow->constructor() == fast->constructor() && |
+ slow->prototype() == fast->prototype() && |
+ slow->inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ? |
+ 0 : |
+ fast->inobject_properties()) && |
+ slow->instance_type() == fast->instance_type() && |
+ slow->bit_field() == fast->bit_field() && |
+ slow->bit_field2() == fast->bit_field2(); |
+} |
+ |
+ |
+Object* JSObject::UpdateMapCodeCache(String* name, Code* code) { |
+ if (!HasFastProperties() && |
+ NormalizedMapCache::IsCacheable(this) && |
+ Top::context()->global_context()->normalized_map_cache()-> |
+ Contains(map())) { |
+ // Replace the map with the identical copy that can be safely modified. |
+ Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES); |
+ if (obj->IsFailure()) return obj; |
+ Counters::normalized_maps.Increment(); |
+ |
+ set_map(Map::cast(obj)); |
+ } |
+ return map()->UpdateCodeCache(name, code); |
+} |
+ |
+ |
Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode, |
int expected_additional_properties) { |
if (!HasFastProperties()) return this; |
@@ -2162,28 +2280,22 @@ |
int index = map()->instance_descriptors()->NextEnumerationIndex(); |
dictionary->SetNextEnumerationIndex(index); |
- // Allocate new map. |
- obj = map()->CopyDropDescriptors(); |
+ obj = Top::context()->global_context()-> |
+ normalized_map_cache()->Get(this, mode); |
if (obj->IsFailure()) return obj; |
Map* new_map = Map::cast(obj); |
- // Clear inobject properties if needed by adjusting the instance size and |
- // putting in a filler object instead of the inobject properties. |
- if (mode == CLEAR_INOBJECT_PROPERTIES && map()->inobject_properties() > 0) { |
- int instance_size_delta = map()->inobject_properties() * kPointerSize; |
- int new_instance_size = map()->instance_size() - instance_size_delta; |
- new_map->set_inobject_properties(0); |
- new_map->set_instance_size(new_instance_size); |
- new_map->set_visitor_id(StaticVisitorBase::GetVisitorId(new_map)); |
- Heap::CreateFillerObjectAt(this->address() + new_instance_size, |
- instance_size_delta); |
- } |
- new_map->set_unused_property_fields(0); |
- |
// We have now successfully allocated all the necessary objects. |
// Changes can now be made with the guarantee that all of them take effect. |
+ |
+ // Resize the object in the heap if necessary. |
+ int new_instance_size = new_map->instance_size(); |
+ int instance_size_delta = map()->instance_size() - new_instance_size; |
+ ASSERT(instance_size_delta >= 0); |
+ Heap::CreateFillerObjectAt(this->address() + new_instance_size, |
+ instance_size_delta); |
+ |
set_map(new_map); |
- map()->set_instance_descriptors(Heap::empty_descriptor_array()); |
set_properties(dictionary); |
@@ -3083,6 +3195,33 @@ |
} |
+Object* Map::CopyNormalized(PropertyNormalizationMode mode) { |
+ int new_instance_size = instance_size(); |
+ if (mode == CLEAR_INOBJECT_PROPERTIES) { |
+ new_instance_size -= inobject_properties() * kPointerSize; |
+ } |
+ |
+ Object* result = Heap::AllocateMap(instance_type(), new_instance_size); |
+ if (result->IsFailure()) return result; |
+ |
+ if (mode != CLEAR_INOBJECT_PROPERTIES) { |
+ Map::cast(result)->set_inobject_properties(inobject_properties()); |
+ } |
+ |
+ Map::cast(result)->set_prototype(prototype()); |
+ Map::cast(result)->set_constructor(constructor()); |
+ |
+ Map::cast(result)->set_bit_field(bit_field()); |
+ Map::cast(result)->set_bit_field2(bit_field2()); |
+ |
+#ifdef DEBUG |
+ Map::cast(result)->NormalizedMapVerify(); |
+#endif |
+ |
+ return result; |
+} |
+ |
+ |
Object* Map::CopyDropTransitions() { |
Object* new_map = CopyDropDescriptors(); |
if (new_map->IsFailure()) return new_map; |