Chromium Code Reviews| Index: src/objects.cc |
| diff --git a/src/objects.cc b/src/objects.cc |
| index dd2d9b68c1826f6a6ed4931e155ac4ea8e89d82f..57ed20fae116d15170e38c497dffe5838e5e2439 100644 |
| --- a/src/objects.cc |
| +++ b/src/objects.cc |
| @@ -1715,10 +1715,10 @@ MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map, |
| if (map()->unused_property_fields() == 0) { |
| int new_unused = new_map->unused_property_fields(); |
| FixedArray* values; |
| - { MaybeObject* maybe_values = |
| - properties()->CopySize(properties()->length() + new_unused + 1); |
| - if (!maybe_values->To(&values)) return maybe_values; |
| - } |
| + MaybeObject* maybe_values = |
| + properties()->CopySize(properties()->length() + new_unused + 1); |
| + if (!maybe_values->To(&values)) return maybe_values; |
| + |
| set_properties(values); |
| } |
| set_map(new_map); |
| @@ -1775,6 +1775,7 @@ MaybeObject* JSObject::AddFastProperty(Name* name, |
| // Allocate new instance descriptors with (name, index) added |
| FieldDescriptor new_field(name, index, attributes, 0); |
| + new_field.SetStorageType(value->RequiredStorage()); |
| ASSERT(index < map()->inobject_properties() || |
| (index - map()->inobject_properties()) < properties()->length() || |
| @@ -2028,7 +2029,6 @@ MaybeObject* JSObject::ConvertTransitionToMapTransition( |
| // TODO(verwaest): From here on we lose existing map transitions, causing |
| // invalid back pointers. This will change once we can store multiple |
| // transitions with the same key. |
| - |
| bool owned_descriptors = old_map->owns_descriptors(); |
| if (owned_descriptors || |
| old_target->instance_descriptors() == old_map->instance_descriptors()) { |
| @@ -2049,6 +2049,8 @@ MaybeObject* JSObject::ConvertTransitionToMapTransition( |
| old_map->set_owns_descriptors(false); |
| } |
| + old_target->InvalidateTransitionTree(); |
|
danno
2013/04/24 15:23:00
nit: How about InvalidateMapTransitionTree()?
|
| + |
| old_map->SetTransition(transition_index, new_map); |
| new_map->SetBackPointer(old_map); |
| return result; |
| @@ -2068,6 +2070,7 @@ MaybeObject* JSObject::ConvertDescriptorToField(Name* name, |
| int index = map()->NextFreePropertyIndex(); |
| FieldDescriptor new_field(name, index, attributes, 0); |
| + new_field.SetStorageType(new_value->RequiredStorage()); |
| // Make a new map for the object. |
| Map* new_map; |
| @@ -2096,6 +2099,341 @@ MaybeObject* JSObject::ConvertDescriptorToField(Name* name, |
| } |
| +enum RightTrimMode { FROM_GC, FROM_MUTATOR }; |
| + |
| + |
| +static void ZapEndOfFixedArray(Address new_end, int to_trim) { |
| + // If we are doing a big trim in old space then we zap the space. |
| + Object** zap = reinterpret_cast<Object**>(new_end); |
| + zap++; // Header of filler must be at least one word so skip that. |
| + for (int i = 1; i < to_trim; i++) { |
| + *zap++ = Smi::FromInt(0); |
| + } |
| +} |
| + |
| + |
| +template<RightTrimMode trim_mode> |
| +static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) { |
| + ASSERT(elms->map() != HEAP->fixed_cow_array_map()); |
| + // For now this trick is only applied to fixed arrays in new and paged space. |
| + ASSERT(!HEAP->lo_space()->Contains(elms)); |
| + |
| + const int len = elms->length(); |
| + |
| + ASSERT(to_trim < len); |
| + |
| + Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim); |
| + |
| + if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) { |
| + ZapEndOfFixedArray(new_end, to_trim); |
| + } |
| + |
| + int size_delta = to_trim * kPointerSize; |
| + |
| + // Technically in new space this write might be omitted (except for |
| + // debug mode which iterates through the heap), but to play safer |
| + // we still do it. |
| + heap->CreateFillerObjectAt(new_end, size_delta); |
| + |
| + elms->set_length(len - to_trim); |
| + |
| + // Maintain marking consistency for IncrementalMarking. |
| + if (Marking::IsBlack(Marking::MarkBitFrom(elms))) { |
| + if (trim_mode == FROM_GC) { |
| + MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta); |
| + } else { |
| + MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta); |
| + } |
| + } |
| +} |
| + |
| + |
| +MaybeObject* JSObject::MigrateToMap(Map* new_map) { |
| + Heap* heap = GetHeap(); |
| + Map* old_map = map(); |
| + int number_of_fields = new_map->NumberOfFields(); |
| + |
| + // Nothing to do if no functions were converted to fields. |
| + if (old_map->NumberOfFields() == number_of_fields && |
| + (old_map->inobject_properties() == new_map->inobject_properties() || |
| + number_of_fields < new_map->inobject_properties())) { |
| + ASSERT(map()->inobject_properties() == new_map->inobject_properties() || |
| + new_map->NumberOfFields() <= new_map->inobject_properties()); |
| + ASSERT(map()->unused_property_fields() == |
| + (new_map->unused_property_fields() + |
| + map()->inobject_properties() - |
| + new_map->inobject_properties())); |
| + set_map(new_map); |
| + return this; |
| + } |
| + |
| + int inobject = new_map->inobject_properties(); |
| + int total_size = number_of_fields + new_map->unused_property_fields(); |
| + int external = total_size - inobject; |
| + FixedArray* array; |
| + MaybeObject* maybe_array = heap->AllocateFixedArray(total_size); |
| + if (!maybe_array->To(&array)) return maybe_array; |
| + |
| + DescriptorArray* old_descriptors = old_map->instance_descriptors(); |
| + DescriptorArray* new_descriptors = new_map->instance_descriptors(); |
| + int descriptors = new_map->NumberOfOwnDescriptors(); |
| + |
| + for (int i = 0; i < descriptors; i++) { |
| + PropertyDetails details = new_descriptors->GetDetails(i); |
| + if (details.type() != FIELD) continue; |
| + PropertyDetails old_details = old_descriptors->GetDetails(i); |
| + ASSERT(old_details.type() == CONSTANT_FUNCTION || |
| + old_details.type() == FIELD); |
| + Object* value = old_details.type() == CONSTANT_FUNCTION |
| + ? old_descriptors->GetValue(i) |
| + : FastPropertyAt(old_descriptors->GetFieldIndex(i)); |
| + int target_index = new_descriptors->GetFieldIndex(i) - inobject; |
| + if (target_index < 0) target_index += total_size; |
| + array->set(target_index, value); |
| + } |
| + |
| + // From here on we cannot fail anymore. |
| + |
| + // Copy (real) inobject properties. |
| + int limit = Min(inobject, number_of_fields); |
| + for (int i = 0; i < limit; i++) { |
| + FastPropertyAtPut(i, array->get(external + i)); |
| + } |
| + |
| + // Create filler object past the new instance size. |
| + int new_instance_size = new_map->instance_size(); |
| + int instance_size_delta = old_map->instance_size() - new_instance_size; |
| + ASSERT(instance_size_delta >= 0); |
| + Address address = this->address() + new_instance_size; |
| + heap->CreateFillerObjectAt(address, instance_size_delta); |
| + |
| + // If there are properties in the new backing store, trim it to the correct |
| + // size and install the backing store into the object. |
| + if (external > 0) { |
| + RightTrimFixedArray<FROM_MUTATOR>(heap, array, inobject); |
| + set_properties(array); |
| + } |
| + |
| + set_map(new_map); |
| + |
| + return this; |
| +} |
| + |
| + |
| +MaybeObject* JSObject::GeneralizeFieldStorage(int modify_index, |
| + StorageType new_storage) { |
| + Map* new_map; |
| + MaybeObject* maybe_new_map = |
| + map()->GeneralizeStorage(modify_index, new_storage); |
| + if (!maybe_new_map->To(&new_map)) return maybe_new_map; |
| + ASSERT(map() != new_map || new_map->FindRootMap()->is_invalid_transition()); |
| + |
| + return MigrateToMap(new_map); |
| +} |
| + |
| + |
| +int Map::NumberOfFields() { |
| + DescriptorArray* descriptors = instance_descriptors(); |
| + int result = 0; |
| + for (int i = 0; i < NumberOfOwnDescriptors(); i++) { |
| + if (descriptors->GetDetails(i).type() == FIELD) result++; |
| + } |
| + return result; |
| +} |
| + |
| + |
| +MaybeObject* Map::CopyGeneralizeStorage(int modify_index, |
| + StorageType new_storage) { |
| + Map* new_map; |
| + MaybeObject* maybe_map = this->Copy(); |
| + if (!maybe_map->To(&new_map)) return maybe_map; |
| + |
| + new_map->instance_descriptors()->InitializeStorageTypes(TAGGED); |
| + return new_map; |
| +} |
| + |
| + |
| +void Map::InvalidateTransitionTree() { |
| + if (is_invalid_transition()) return; |
| + if (HasTransitionArray()) { |
| + TransitionArray* transitions = this->transitions(); |
| + for (int i = 0; i < transitions->number_of_transitions(); i++) { |
| + transitions->GetTarget(i)->InvalidateTransitionTree(); |
| + } |
| + } |
| + invalidate_transition(); |
| + dependent_code()->DeoptimizeDependentCodeGroup( |
| + GetIsolate(), DependentCode::kFieldTransitionGroup); |
| + dependent_code()->DeoptimizeDependentCodeGroup( |
| + GetIsolate(), DependentCode::kPrototypeCheckGroup); |
| +} |
| + |
| + |
| +void Map::InvalidateTarget(Name* key, DescriptorArray* new_descriptors) { |
| + if (HasTransitionArray()) { |
| + TransitionArray* transitions = this->transitions(); |
| + int transition = transitions->Search(key); |
| + if (transition != TransitionArray::kNotFound) { |
| + transitions->GetTarget(transition)->InvalidateTransitionTree(); |
| + } |
| + } |
| + |
| + // Don't overwrite the empty descriptor array. |
| + if (NumberOfOwnDescriptors() == 0) return; |
| + |
| + DescriptorArray* to_replace = instance_descriptors(); |
| + Map* current = this; |
| + while (current->instance_descriptors() == to_replace) { |
| + current->SetEnumLength(Map::kInvalidEnumCache); |
| + current->set_instance_descriptors(new_descriptors); |
| + Object* next = current->GetBackPointer(); |
| + if (next->IsUndefined()) break; |
| + current = Map::cast(next); |
| + } |
| + |
| + set_owns_descriptors(false); |
| +} |
| + |
| + |
| +bool Map::TransitionsTo(Map* other) { |
| + if (!HasTransitionArray()) return false; |
| + |
| + int descriptor = other->LastAdded(); |
| + Name* name = other->instance_descriptors()->GetKey(descriptor); |
| + |
| + TransitionArray* transitions = this->transitions(); |
| + int transition = transitions->Search(name); |
| + if (transition == TransitionArray::kNotFound) return false; |
| + |
| + return transitions->GetTarget(transition) == other; |
| +} |
| + |
| + |
| +Map* Map::FindRootMap() { |
| + Map* result = this; |
| + while (true) { |
| + Object* back = result->GetBackPointer(); |
| + if (back->IsUndefined()) return result; |
| + result = Map::cast(back); |
| + } |
| +} |
| + |
| + |
| +Map* Map::FindUpdatedMap(int length, DescriptorArray* descriptors) { |
| + // This can only be called on roots of transition trees. |
| + ASSERT(GetBackPointer()->IsUndefined()); |
| + |
| + Map* current = this; |
| + |
| + for (int i = 0; i < length; i++) { |
| + if (!current->HasTransitionArray()) break; |
| + Name* name = descriptors->GetKey(i); |
| + TransitionArray* transitions = current->transitions(); |
| + int transition = transitions->Search(name); |
| + if (transition == TransitionArray::kNotFound) break; |
| + current = transitions->GetTarget(transition); |
| + } |
| + |
| + return current; |
| +} |
| + |
| + |
| +Map* Map::FindLastMatchMap(DescriptorArray* descriptors) { |
| + // This can only be called on roots of transition trees. |
| + ASSERT(GetBackPointer()->IsUndefined()); |
| + |
| + Map* current = this; |
| + int length = descriptors->number_of_descriptors(); |
| + |
| + for (int i = 0; i < length; i++) { |
| + if (!current->HasTransitionArray()) break; |
| + Name* name = descriptors->GetKey(i); |
| + TransitionArray* transitions = current->transitions(); |
| + int transition = transitions->Search(name); |
| + if (transition == TransitionArray::kNotFound) break; |
| + |
| + Map* next = transitions->GetTarget(transition); |
| + DescriptorArray* next_descriptors = next->instance_descriptors(); |
| + |
| + if (next_descriptors->GetValue(i) != descriptors->GetValue(i)) break; |
| + |
| + PropertyDetails details = descriptors->GetDetails(i); |
| + PropertyDetails next_details = next_descriptors->GetDetails(i); |
| + if (details.type() != next_details.type()) break; |
| + if (details.attributes() != next_details.attributes()) break; |
| + if (details.storage_type() != next_details.storage_type()) break; |
| + ASSERT(!details.IsDeleted()); |
| + ASSERT(!next_details.IsDeleted()); |
| + |
| + current = next; |
| + } |
| + return current; |
| +} |
| + |
| + |
| +MaybeObject* Map::GeneralizeStorage(int modify_index, StorageType new_storage) { |
| + Map* old_map = this; |
| + DescriptorArray* old_descriptors = old_map->instance_descriptors(); |
| + StorageType old_storage = |
| + old_descriptors->GetDetails(modify_index).storage_type(); |
| + |
| + if (old_storage == ANY) { |
| + old_descriptors->SetStorageType(modify_index, new_storage); |
| + return this; |
| + } |
| + |
| + int descriptors = old_map->NumberOfOwnDescriptors(); |
| + Map* root_map = old_map->FindRootMap(); |
| + |
| + if (!old_map->EquivalentToForTransition(root_map)) { |
| + return CopyGeneralizeStorage(modify_index, new_storage); |
| + } |
| + |
| + Map* updated = root_map->FindUpdatedMap(descriptors, old_descriptors); |
| + // Check the state of the root map. |
| + DescriptorArray* updated_descriptors = updated->instance_descriptors(); |
| + |
| + DescriptorArray* new_descriptors; |
| + MaybeObject* maybe_descriptors = updated_descriptors->Merge( |
| + root_map->NumberOfOwnDescriptors(), |
| + updated->NumberOfOwnDescriptors(), |
| + descriptors, |
| + old_descriptors); |
| + if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors; |
| + |
| + if (IsMoreGeneralStorageType( |
| + new_storage, |
| + new_descriptors->GetDetails(modify_index).storage_type())) { |
| + new_descriptors->SetStorageType(modify_index, new_storage); |
| + } |
| + |
| + Map* split_map = root_map->FindLastMatchMap(new_descriptors); |
| + |
| + int descriptor = split_map->NumberOfOwnDescriptors(); |
| + // Check whether |split_map| matches what we were looking for. If so, return |
| + // it. |
| + if (descriptors == descriptor) return split_map; |
| + |
| + split_map->InvalidateTarget( |
| + old_descriptors->GetKey(descriptor), new_descriptors); |
| + |
| + Map* new_map = split_map; |
| + // Add missing transitions. |
| + for (; descriptor < descriptors; descriptor++) { |
| + MaybeObject* maybe_map = new_map->CopyInstallDescriptors( |
| + descriptor, new_descriptors); |
| + if (!maybe_map->To(&new_map)) { |
| + // Create a handle for the last created map to ensure it stays alive |
| + // during GC. Its descriptor array is too large, but it will be |
| + // overwritten during retry anyway. |
| + Handle<Map>(new_map); |
| + } |
| + } |
| + |
| + new_map->set_owns_descriptors(true); |
| + return new_map; |
| +} |
| + |
| MaybeObject* JSObject::SetPropertyWithInterceptor( |
| Name* name, |
| @@ -2391,55 +2729,6 @@ MaybeObject* JSObject::SetPropertyViaPrototypes( |
| } |
| -enum RightTrimMode { FROM_GC, FROM_MUTATOR }; |
| - |
| - |
| -static void ZapEndOfFixedArray(Address new_end, int to_trim) { |
| - // If we are doing a big trim in old space then we zap the space. |
| - Object** zap = reinterpret_cast<Object**>(new_end); |
| - zap++; // Header of filler must be at least one word so skip that. |
| - for (int i = 1; i < to_trim; i++) { |
| - *zap++ = Smi::FromInt(0); |
| - } |
| -} |
| - |
| - |
| -template<RightTrimMode trim_mode> |
| -static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) { |
| - ASSERT(elms->map() != HEAP->fixed_cow_array_map()); |
| - // For now this trick is only applied to fixed arrays in new and paged space. |
| - ASSERT(!HEAP->lo_space()->Contains(elms)); |
| - |
| - const int len = elms->length(); |
| - |
| - ASSERT(to_trim < len); |
| - |
| - Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim); |
| - |
| - if (trim_mode != FROM_GC || Heap::ShouldZapGarbage()) { |
| - ZapEndOfFixedArray(new_end, to_trim); |
| - } |
| - |
| - int size_delta = to_trim * kPointerSize; |
| - |
| - // Technically in new space this write might be omitted (except for |
| - // debug mode which iterates through the heap), but to play safer |
| - // we still do it. |
| - heap->CreateFillerObjectAt(new_end, size_delta); |
| - |
| - elms->set_length(len - to_trim); |
| - |
| - // Maintain marking consistency for IncrementalMarking. |
| - if (Marking::IsBlack(Marking::MarkBitFrom(elms))) { |
| - if (trim_mode == FROM_GC) { |
| - MemoryChunk::IncrementLiveBytesFromGC(elms->address(), -size_delta); |
| - } else { |
| - MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta); |
| - } |
| - } |
| -} |
| - |
| - |
| void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) { |
| Handle<DescriptorArray> descriptors(map->instance_descriptors()); |
| if (slack <= descriptors->NumberOfSlackDescriptors()) return; |
| @@ -3103,18 +3392,17 @@ MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name, |
| } |
| -void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object, |
| - Handle<Map> map) { |
| +void JSObject::TransitionToMap(Handle<JSObject> object, Handle<Map> map) { |
| CALL_HEAP_FUNCTION_VOID( |
| object->GetIsolate(), |
| - object->AddFastPropertyUsingMap(*map)); |
| + object->TransitionToMap(*map)); |
| } |
| -void JSObject::TransitionToMap(Handle<JSObject> object, Handle<Map> map) { |
| +void JSObject::MigrateInstance(Handle<JSObject> object) { |
| CALL_HEAP_FUNCTION_VOID( |
| object->GetIsolate(), |
| - object->TransitionToMap(*map)); |
| + object->MigrateInstance()); |
| } |
| @@ -3206,10 +3494,17 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup, |
| case NORMAL: |
| result = lookup->holder()->SetNormalizedProperty(lookup, *value); |
| break; |
| - case FIELD: |
| + case FIELD: { |
| + StorageType storage_type = lookup->storage_type(); |
| + if (!value->FitsStorage(storage_type)) { |
| + MaybeObject* maybe_failure = GeneralizeFieldStorage( |
| + lookup->GetDescriptorIndex(), value->RequiredStorage()); |
| + if (maybe_failure->IsFailure()) return maybe_failure; |
| + } |
| result = lookup->holder()->FastPropertyAtPut( |
| lookup->GetFieldIndex().field_index(), *value); |
| break; |
| + } |
| case CONSTANT_FUNCTION: |
| // Only replace the function if necessary. |
| if (*value == lookup->GetConstantFunction()) return *value; |
| @@ -3236,6 +3531,16 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup, |
| if (details.type() == FIELD) { |
| if (attributes == details.attributes()) { |
| + if (!value->FitsStorage(details.storage_type())) { |
| + MaybeObject* maybe_map = transition_map->GeneralizeStorage( |
| + descriptor, value->RequiredStorage()); |
| + if (!maybe_map->To(&transition_map)) return maybe_map; |
| + Object* back = transition_map->GetBackPointer(); |
| + if (back->IsMap()) { |
| + MaybeObject* maybe_failure = MigrateToMap(Map::cast(back)); |
| + if (maybe_failure->IsFailure()) return maybe_failure; |
| + } |
| + } |
| int field_index = descriptors->GetFieldIndex(descriptor); |
| result = lookup->holder()->AddFastPropertyUsingMap( |
| transition_map, *name, *value, field_index); |
| @@ -3370,10 +3675,17 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes( |
| result = self->SetNormalizedProperty(*name, *value, details); |
| break; |
| } |
| - case FIELD: |
| + case FIELD: { |
| + StorageType storage_type = lookup.storage_type(); |
| + if (!value->FitsStorage(storage_type)) { |
| + MaybeObject* maybe_failure = GeneralizeFieldStorage( |
| + lookup.GetDescriptorIndex(), value->RequiredStorage()); |
| + if (maybe_failure->IsFailure()) return maybe_failure; |
| + } |
| result = self->FastPropertyAtPut( |
| lookup.GetFieldIndex().field_index(), *value); |
| break; |
| + } |
| case CONSTANT_FUNCTION: |
| // Only replace the function if necessary. |
| if (*value != lookup.GetConstantFunction()) { |
| @@ -3396,6 +3708,16 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes( |
| if (details.type() == FIELD) { |
| if (attributes == details.attributes()) { |
| + if (!value->FitsStorage(details.storage_type())) { |
| + MaybeObject* maybe_map = transition_map->GeneralizeStorage( |
| + descriptor, value->RequiredStorage()); |
| + if (!maybe_map->To(&transition_map)) return maybe_map; |
| + Object* back = transition_map->GetBackPointer(); |
| + if (back->IsMap()) { |
| + MaybeObject* maybe_failure = MigrateToMap(Map::cast(back)); |
| + if (maybe_failure->IsFailure()) return maybe_failure; |
| + } |
| + } |
| int field_index = descriptors->GetFieldIndex(descriptor); |
| result = self->AddFastPropertyUsingMap( |
| transition_map, *name, *value, field_index); |
| @@ -4918,16 +5240,6 @@ int Map::NumberOfDescribedProperties(DescriptorFlag which, |
| } |
| -int Map::PropertyIndexFor(Name* name) { |
| - DescriptorArray* descs = instance_descriptors(); |
| - int limit = NumberOfOwnDescriptors(); |
| - for (int i = 0; i < limit; i++) { |
| - if (name->Equals(descs->GetKey(i))) return descs->GetFieldIndex(i); |
| - } |
| - return -1; |
| -} |
| - |
| - |
| int Map::NextFreePropertyIndex() { |
| int max_index = -1; |
| int number_of_own_descriptors = NumberOfOwnDescriptors(); |
| @@ -5778,6 +6090,7 @@ MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors, |
| (descriptor_index == descriptors->number_of_descriptors() - 1) |
| ? SIMPLE_TRANSITION |
| : FULL_TRANSITION; |
| + ASSERT(name == descriptors->GetKey(descriptor_index)); |
| MaybeObject* maybe_transitions = AddTransition(name, result, simple_flag); |
| if (!maybe_transitions->To(&transitions)) return maybe_transitions; |
| @@ -5789,6 +6102,43 @@ MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors, |
| } |
| +MaybeObject* Map::CopyInstallDescriptors(int new_descriptor, |
| + DescriptorArray* descriptors) { |
| + ASSERT(descriptors->IsSortedNoDuplicates()); |
| + |
| + Map* result; |
| + MaybeObject* maybe_result = CopyDropDescriptors(); |
| + if (!maybe_result->To(&result)) return maybe_result; |
| + |
| + result->InitializeDescriptors(descriptors); |
| + result->SetNumberOfOwnDescriptors(new_descriptor + 1); |
| + |
| + int unused_property_fields = this->unused_property_fields(); |
| + if (descriptors->GetDetails(new_descriptor).type() == FIELD) { |
| + unused_property_fields = this->unused_property_fields() - 1; |
| + if (unused_property_fields < 0) { |
| + unused_property_fields += JSObject::kFieldsAdded; |
| + } |
| + } |
| + |
| + result->set_unused_property_fields(unused_property_fields); |
| + result->set_owns_descriptors(false); |
| + |
| + if (CanHaveMoreTransitions()) { |
| + Name* name = descriptors->GetKey(new_descriptor); |
| + TransitionArray* transitions; |
| + MaybeObject* maybe_transitions = |
| + AddTransition(name, result, SIMPLE_TRANSITION); |
| + if (!maybe_transitions->To(&transitions)) return maybe_transitions; |
| + |
| + set_transitions(transitions); |
| + result->SetBackPointer(this); |
| + } |
| + |
| + return result; |
| +} |
| + |
| + |
| MaybeObject* Map::CopyAsElementsKind(ElementsKind kind, TransitionFlag flag) { |
| if (flag == INSERT_TRANSITION) { |
| ASSERT(!HasElementsTransition() || |
| @@ -5857,6 +6207,8 @@ MaybeObject* Map::CopyWithPreallocatedFieldDescriptors() { |
| descriptors->CopyUpTo(number_of_own_descriptors); |
| if (!maybe_descriptors->To(&new_descriptors)) return maybe_descriptors; |
| + new_descriptors->InitializeStorageTypes(TAGGED); |
| + |
| return CopyReplaceDescriptors(new_descriptors, NULL, OMIT_TRANSITION, 0); |
| } |
| @@ -6812,6 +7164,105 @@ void DescriptorArray::CopyFrom(int dst_index, |
| } |
| +MaybeObject* DescriptorArray::Merge(int verbatim, |
| + int valid, |
| + int new_size, |
| + DescriptorArray* other) { |
| + ASSERT(verbatim <= valid); |
| + ASSERT(valid <= new_size); |
| + |
| + DescriptorArray* result; |
| + // Allocate a new descriptor array large enough to hold the required |
| + // descriptors, with minimally the exact same size as this descriptor array. |
| + MaybeObject* maybe_descriptors = DescriptorArray::Allocate( |
| + new_size, Max(new_size, number_of_descriptors()) - new_size); |
| + if (!maybe_descriptors->To(&result)) return maybe_descriptors; |
| + ASSERT(result->length() > length() || |
| + result->NumberOfSlackDescriptors() > 0 || |
| + result->number_of_descriptors() == other->number_of_descriptors()); |
| + ASSERT(result->number_of_descriptors() == new_size); |
| + |
| + DescriptorArray::WhitenessWitness witness(result); |
| + |
| + int descriptor; |
| + |
| + int current_offset = 0; |
| + for (descriptor = 0; descriptor < verbatim; descriptor++) { |
| + if (GetDetails(descriptor).type() == FIELD) current_offset++; |
| + result->CopyFrom(descriptor, this, descriptor, witness); |
| + } |
| + |
| + for (; descriptor < valid; descriptor++) { |
| + Name* key = GetKey(descriptor); |
| + PropertyDetails details = GetDetails(descriptor); |
| + PropertyDetails other_details = other->GetDetails(descriptor); |
| + ASSERT(details.attributes() == other_details.attributes()); |
| + |
| + if (details.type() == FIELD) { |
| + ASSERT(other_details.type() != CALLBACKS); |
| + FieldDescriptor d(key, |
| + current_offset++, |
| + details.attributes(), |
| + descriptor + 1); |
| + StorageType storage = |
| + IsMoreGeneralStorageType( |
| + other_details.storage_type(), details.storage_type()) |
| + ? other_details.storage_type() : details.storage_type(); |
| + d.SetStorageType(storage); |
| + result->Set(descriptor, &d, witness); |
| + } else if (other_details.type() == FIELD) { |
| + FieldDescriptor d(key, |
| + current_offset++, |
| + details.attributes(), |
| + descriptor + 1); |
| + StorageType storage = |
| + IsMoreGeneralStorageType( |
| + other_details.storage_type(), details.storage_type()) |
| + ? other_details.storage_type() : details.storage_type(); |
| + d.SetStorageType(storage); |
| + result->Set(descriptor, &d, witness); |
| + } else if (other_details.type() == CONSTANT_FUNCTION) { |
| + Object* value = GetValue(descriptor); |
| + Object* other_value = other->GetValue(descriptor); |
| + if (details.type() == CONSTANT_FUNCTION && value != other_value) { |
| + FieldDescriptor d(key, |
| + current_offset++, |
| + details.attributes(), |
| + descriptor + 1); |
| + d.SetStorageType(TAGGED); |
| + result->Set(descriptor, &d, witness); |
| + } else { |
| + ConstantFunctionDescriptor d(key, |
| + JSFunction::cast(other_value), |
| + details.attributes(), |
| + descriptor + 1); |
| + result->Set(descriptor, &d, witness); |
| + } |
| + } else { |
| + ASSERT(other_details.type() != FIELD); |
| + result->CopyFrom(descriptor, other, descriptor, witness); |
| + } |
| + } |
| + |
| + for (; descriptor < new_size; descriptor++) { |
| + PropertyDetails details = other->GetDetails(descriptor); |
| + if (details.type() == FIELD) { |
| + Name* key = other->GetKey(descriptor); |
| + FieldDescriptor d(key, |
| + current_offset++, |
| + details.attributes(), |
| + descriptor + 1); |
| + result->Set(descriptor, &d, witness); |
| + } else { |
| + result->CopyFrom(descriptor, other, descriptor, witness); |
| + } |
| + } |
| + |
| + result->Sort(); |
| + return result; |
| +} |
| + |
| + |
| // We need the whiteness witness since sort will reshuffle the entries in the |
| // descriptor array. If the descriptor array were to be black, the shuffling |
| // would move a slot that was already recorded as pointing into an evacuation |
| @@ -8173,19 +8624,28 @@ int Map::Hash() { |
| } |
| +static bool CheckEquivalent(Map* first, Map* second) { |
| + return |
| + first->constructor() == second->constructor() && |
| + first->prototype() == second->prototype() && |
| + first->instance_type() == second->instance_type() && |
| + first->bit_field() == second->bit_field() && |
| + first->bit_field2() == second->bit_field2() && |
| + first->is_observed() == second->is_observed() && |
| + first->function_with_prototype() == second->function_with_prototype(); |
| +} |
| + |
| + |
| +bool Map::EquivalentToForTransition(Map* other) { |
| + return CheckEquivalent(this, other); |
| +} |
| + |
| + |
| bool Map::EquivalentToForNormalization(Map* other, |
| PropertyNormalizationMode mode) { |
| - return |
| - constructor() == other->constructor() && |
| - prototype() == other->prototype() && |
| - inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ? |
| - 0 : |
| - other->inobject_properties()) && |
| - instance_type() == other->instance_type() && |
| - bit_field() == other->bit_field() && |
| - bit_field2() == other->bit_field2() && |
| - is_observed() == other->is_observed() && |
| - function_with_prototype() == other->function_with_prototype(); |
| + int properties = mode == CLEAR_INOBJECT_PROPERTIES |
| + ? 0 : other->inobject_properties(); |
| + return CheckEquivalent(this, other) && inobject_properties() == properties; |
| } |
| @@ -13922,6 +14382,9 @@ MaybeObject* NameDictionary::TransformPropertiesToFastFor( |
| current_offset++, |
| details.attributes(), |
| enumeration_index); |
| + // TODO(verwaest): Support storage types in the boilerplate. |
| + // d.SetStorageType(value->RequiredStorage()); |
| + d.SetStorageType(TAGGED); |
| descriptors->Set(enumeration_index - 1, &d, witness); |
| } else if (type == CALLBACKS) { |
| CallbacksDescriptor d(key, |