Chromium Code Reviews| Index: src/objects.cc |
| diff --git a/src/objects.cc b/src/objects.cc |
| index 1daf1ebc0297df1a7e89257ed80d2a2d853841c8..fc43b7dfc2faa82c4b33d0aae66cd5ae0d54f366 100644 |
| --- a/src/objects.cc |
| +++ b/src/objects.cc |
| @@ -4658,48 +4658,26 @@ PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor( |
| } |
| -Handle<Map> NormalizedMapCache::Get(Handle<NormalizedMapCache> cache, |
| - Handle<Map> fast_map, |
| - PropertyNormalizationMode mode) { |
| - int index = fast_map->Hash() % kEntries; |
| - Handle<Object> result = handle(cache->get(index), cache->GetIsolate()); |
| - if (result->IsMap() && |
| - Handle<Map>::cast(result)->EquivalentToForNormalization( |
| - *fast_map, mode)) { |
| -#ifdef VERIFY_HEAP |
| - if (FLAG_verify_heap) { |
| - Handle<Map>::cast(result)->SharedMapVerify(); |
| - } |
| -#endif |
| -#ifdef ENABLE_SLOW_ASSERTS |
| - if (FLAG_enable_slow_asserts) { |
| - // The cached map should match newly created normalized map bit-by-bit, |
| - // except for the code cache, which can contain some ics which can be |
| - // applied to the shared map. |
| - Handle<Map> fresh = Map::CopyNormalized( |
| - fast_map, mode, SHARED_NORMALIZED_MAP); |
| +Handle<NormalizedMapCache> NormalizedMapCache::New(Isolate* isolate) { |
| + Handle<FixedArray> array( |
| + isolate->factory()->NewFixedArray(kEntries, TENURED)); |
| + return Handle<NormalizedMapCache>::cast(array); |
| +} |
| - ASSERT(memcmp(fresh->address(), |
| - Handle<Map>::cast(result)->address(), |
| - Map::kCodeCacheOffset) == 0); |
| - STATIC_ASSERT(Map::kDependentCodeOffset == |
| - Map::kCodeCacheOffset + kPointerSize); |
| - int offset = Map::kDependentCodeOffset + kPointerSize; |
| - ASSERT(memcmp(fresh->address() + offset, |
| - Handle<Map>::cast(result)->address() + offset, |
| - Map::kSize - offset) == 0); |
| - } |
| -#endif |
| - return Handle<Map>::cast(result); |
| - } |
| - Isolate* isolate = cache->GetIsolate(); |
| - Handle<Map> map = Map::CopyNormalized(fast_map, mode, SHARED_NORMALIZED_MAP); |
| - ASSERT(map->is_dictionary_map()); |
| - cache->set(index, *map); |
| - isolate->counters()->normalized_maps()->Increment(); |
| +MaybeHandle<Map> NormalizedMapCache::Get(Handle<Map> fast_map) { |
| + DisallowHeapAllocation no_gc; |
| + Object* value = FixedArray::get(GetIndex(fast_map)); |
| + if (!value->IsMap()) return MaybeHandle<Map>(); |
| + return handle(Map::cast(value)); |
| +} |
| - return map; |
| + |
| +void NormalizedMapCache::Set(Handle<Map> fast_map, |
| + Handle<Map> normalized_map) { |
| + DisallowHeapAllocation no_gc; |
| + ASSERT(normalized_map->is_dictionary_map()); |
| + FixedArray::set(GetIndex(fast_map), *normalized_map); |
| } |
| @@ -4732,6 +4710,7 @@ void JSObject::NormalizeProperties(Handle<JSObject> object, |
| Isolate* isolate = object->GetIsolate(); |
| HandleScope scope(isolate); |
| Handle<Map> map(object->map()); |
| + Handle<Map> new_map = Map::Normalize(map, mode); |
| // Allocate new content. |
| int real_size = map->NumberOfOwnDescriptors(); |
| @@ -4786,12 +4765,6 @@ void JSObject::NormalizeProperties(Handle<JSObject> object, |
| // Copy the next enumeration index from instance descriptor. |
| dictionary->SetNextEnumerationIndex(real_size + 1); |
| - Handle<NormalizedMapCache> cache( |
| - isolate->context()->native_context()->normalized_map_cache()); |
| - Handle<Map> new_map = NormalizedMapCache::Get( |
| - cache, handle(object->map()), mode); |
| - ASSERT(new_map->is_dictionary_map()); |
| - |
| // From here on we cannot fail and we shouldn't GC anymore. |
| DisallowHeapAllocation no_allocation; |
| @@ -4810,8 +4783,6 @@ void JSObject::NormalizeProperties(Handle<JSObject> object, |
| // the left-over space to avoid races with the sweeper thread. |
| object->synchronized_set_map(*new_map); |
| - map->NotifyLeafMapLayoutChange(); |
| - |
| object->set_properties(*dictionary); |
| isolate->counters()->props_to_dictionary()->Increment(); |
| @@ -7238,6 +7209,51 @@ Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size) { |
| } |
| +Handle<Map> Map::Normalize(Handle<Map> fast_map, |
| + PropertyNormalizationMode mode) { |
| + ASSERT(!fast_map->is_dictionary_map()); |
| + |
| + Isolate* isolate = fast_map->GetIsolate(); |
| + Handle<NormalizedMapCache> cache( |
| + isolate->context()->native_context()->normalized_map_cache()); |
| + |
| + Handle<Map> new_map; |
| + if (cache->Get(fast_map).ToHandle(&new_map) && |
| + new_map->EquivalentToForNormalization(*fast_map, mode)) { |
|
Toon Verwaest
2014/05/01 11:24:34
Seems like EquivalentToForNormalization should be
Igor Sheludko
2014/05/02 09:47:38
Done.
|
| +#ifdef VERIFY_HEAP |
| + if (FLAG_verify_heap) { |
| + new_map->SharedMapVerify(); |
| + } |
| +#endif |
| +#ifdef ENABLE_SLOW_ASSERTS |
| + if (FLAG_enable_slow_asserts) { |
| + // The cached map should match newly created normalized map bit-by-bit, |
| + // except for the code cache, which can contain some ics which can be |
| + // applied to the shared map. |
| + Handle<Map> fresh = Map::CopyNormalized( |
| + fast_map, mode, SHARED_NORMALIZED_MAP); |
| + |
| + ASSERT(memcmp(fresh->address(), |
| + new_map->address(), |
| + Map::kCodeCacheOffset) == 0); |
| + STATIC_ASSERT(Map::kDependentCodeOffset == |
| + Map::kCodeCacheOffset + kPointerSize); |
| + int offset = Map::kDependentCodeOffset + kPointerSize; |
| + ASSERT(memcmp(fresh->address() + offset, |
| + new_map->address() + offset, |
| + Map::kSize - offset) == 0); |
| + } |
| +#endif |
| + } else { |
| + new_map = Map::CopyNormalized(fast_map, mode, SHARED_NORMALIZED_MAP); |
| + cache->Set(fast_map, new_map); |
| + isolate->counters()->normalized_maps()->Increment(); |
| + } |
| + fast_map->NotifyLeafMapLayoutChange(); |
| + return new_map; |
| +} |
| + |
| + |
| Handle<Map> Map::CopyNormalized(Handle<Map> map, |
| PropertyNormalizationMode mode, |
| NormalizedMapSharingMode sharing) { |