OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 2517 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2528 slow->SharedMapVerify(); | 2528 slow->SharedMapVerify(); |
2529 #endif | 2529 #endif |
2530 return | 2530 return |
2531 slow->constructor() == fast->constructor() && | 2531 slow->constructor() == fast->constructor() && |
2532 slow->prototype() == fast->prototype() && | 2532 slow->prototype() == fast->prototype() && |
2533 slow->inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ? | 2533 slow->inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ? |
2534 0 : | 2534 0 : |
2535 fast->inobject_properties()) && | 2535 fast->inobject_properties()) && |
2536 slow->instance_type() == fast->instance_type() && | 2536 slow->instance_type() == fast->instance_type() && |
2537 slow->bit_field() == fast->bit_field() && | 2537 slow->bit_field() == fast->bit_field() && |
| 2538 slow->bit_field3() == fast->bit_field3() && |
2538 (slow->bit_field2() & ~(1<<Map::kIsShared)) == fast->bit_field2(); | 2539 (slow->bit_field2() & ~(1<<Map::kIsShared)) == fast->bit_field2(); |
2539 } | 2540 } |
2540 | 2541 |
2541 | 2542 |
2542 MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) { | 2543 MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) { |
2543 if (map()->is_shared()) { | 2544 if (map()->is_shared()) { |
2544 // Fast case maps are never marked as shared. | 2545 // Fast case maps are never marked as shared. |
2545 ASSERT(!HasFastProperties()); | 2546 ASSERT(!HasFastProperties()); |
2546 // Replace the map with an identical copy that can be safely modified. | 2547 // Replace the map with an identical copy that can be safely modified. |
2547 Object* obj; | 2548 Object* obj; |
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2650 // Changes can now be made with the guarantee that all of them take effect. | 2651 // Changes can now be made with the guarantee that all of them take effect. |
2651 | 2652 |
2652 // Resize the object in the heap if necessary. | 2653 // Resize the object in the heap if necessary. |
2653 int new_instance_size = new_map->instance_size(); | 2654 int new_instance_size = new_map->instance_size(); |
2654 int instance_size_delta = map_of_this->instance_size() - new_instance_size; | 2655 int instance_size_delta = map_of_this->instance_size() - new_instance_size; |
2655 ASSERT(instance_size_delta >= 0); | 2656 ASSERT(instance_size_delta >= 0); |
2656 current_heap->CreateFillerObjectAt(this->address() + new_instance_size, | 2657 current_heap->CreateFillerObjectAt(this->address() + new_instance_size, |
2657 instance_size_delta); | 2658 instance_size_delta); |
2658 | 2659 |
2659 set_map(new_map); | 2660 set_map(new_map); |
2660 new_map->set_instance_descriptors(current_heap->empty_descriptor_array()); | 2661 new_map->clear_instance_descriptors(); |
2661 | 2662 |
2662 set_properties(dictionary); | 2663 set_properties(dictionary); |
2663 | 2664 |
2664 current_heap->isolate()->counters()->props_to_dictionary()->Increment(); | 2665 current_heap->isolate()->counters()->props_to_dictionary()->Increment(); |
2665 | 2666 |
2666 #ifdef DEBUG | 2667 #ifdef DEBUG |
2667 if (FLAG_trace_normalization) { | 2668 if (FLAG_trace_normalization) { |
2668 PrintF("Object properties have been normalized:\n"); | 2669 PrintF("Object properties have been normalized:\n"); |
2669 Print(); | 2670 Print(); |
2670 } | 2671 } |
(...skipping 969 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3640 heap->AllocateMap(instance_type(), instance_size()); | 3641 heap->AllocateMap(instance_type(), instance_size()); |
3641 if (!maybe_result->ToObject(&result)) return maybe_result; | 3642 if (!maybe_result->ToObject(&result)) return maybe_result; |
3642 } | 3643 } |
3643 Map::cast(result)->set_prototype(prototype()); | 3644 Map::cast(result)->set_prototype(prototype()); |
3644 Map::cast(result)->set_constructor(constructor()); | 3645 Map::cast(result)->set_constructor(constructor()); |
3645 // Don't copy descriptors, so map transitions always remain a forest. | 3646 // Don't copy descriptors, so map transitions always remain a forest. |
3646 // If we retained the same descriptors we would have two maps | 3647 // If we retained the same descriptors we would have two maps |
3647 // pointing to the same transition which is bad because the garbage | 3648 // pointing to the same transition which is bad because the garbage |
3648 // collector relies on being able to reverse pointers from transitions | 3649 // collector relies on being able to reverse pointers from transitions |
3649 // to maps. If properties need to be retained use CopyDropTransitions. | 3650 // to maps. If properties need to be retained use CopyDropTransitions. |
3650 Map::cast(result)->set_instance_descriptors( | 3651 Map::cast(result)->clear_instance_descriptors(); |
3651 heap->empty_descriptor_array()); | |
3652 // Please note instance_type and instance_size are set when allocated. | 3652 // Please note instance_type and instance_size are set when allocated. |
3653 Map::cast(result)->set_inobject_properties(inobject_properties()); | 3653 Map::cast(result)->set_inobject_properties(inobject_properties()); |
3654 Map::cast(result)->set_unused_property_fields(unused_property_fields()); | 3654 Map::cast(result)->set_unused_property_fields(unused_property_fields()); |
3655 | 3655 |
3656 // If the map has pre-allocated properties always start out with a descriptor | 3656 // If the map has pre-allocated properties always start out with a descriptor |
3657 // array describing these properties. | 3657 // array describing these properties. |
3658 if (pre_allocated_property_fields() > 0) { | 3658 if (pre_allocated_property_fields() > 0) { |
3659 ASSERT(constructor()->IsJSFunction()); | 3659 ASSERT(constructor()->IsJSFunction()); |
3660 JSFunction* ctor = JSFunction::cast(constructor()); | 3660 JSFunction* ctor = JSFunction::cast(constructor()); |
3661 Object* descriptors; | 3661 Object* descriptors; |
3662 { MaybeObject* maybe_descriptors = | 3662 { MaybeObject* maybe_descriptors = |
3663 ctor->initial_map()->instance_descriptors()->RemoveTransitions(); | 3663 ctor->initial_map()->instance_descriptors()->RemoveTransitions(); |
3664 if (!maybe_descriptors->ToObject(&descriptors)) return maybe_descriptors; | 3664 if (!maybe_descriptors->ToObject(&descriptors)) return maybe_descriptors; |
3665 } | 3665 } |
3666 Map::cast(result)->set_instance_descriptors( | 3666 Map::cast(result)->set_instance_descriptors( |
3667 DescriptorArray::cast(descriptors)); | 3667 DescriptorArray::cast(descriptors)); |
3668 Map::cast(result)->set_pre_allocated_property_fields( | 3668 Map::cast(result)->set_pre_allocated_property_fields( |
3669 pre_allocated_property_fields()); | 3669 pre_allocated_property_fields()); |
3670 } | 3670 } |
3671 Map::cast(result)->set_bit_field(bit_field()); | 3671 Map::cast(result)->set_bit_field(bit_field()); |
3672 Map::cast(result)->set_bit_field2(bit_field2()); | 3672 Map::cast(result)->set_bit_field2(bit_field2()); |
| 3673 Map::cast(result)->set_bit_field3(bit_field3()); |
3673 Map::cast(result)->set_is_shared(false); | 3674 Map::cast(result)->set_is_shared(false); |
3674 Map::cast(result)->ClearCodeCache(heap); | 3675 Map::cast(result)->ClearCodeCache(heap); |
3675 return result; | 3676 return result; |
3676 } | 3677 } |
3677 | 3678 |
3678 | 3679 |
3679 MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode, | 3680 MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode, |
3680 NormalizedMapSharingMode sharing) { | 3681 NormalizedMapSharingMode sharing) { |
3681 int new_instance_size = instance_size(); | 3682 int new_instance_size = instance_size(); |
3682 if (mode == CLEAR_INOBJECT_PROPERTIES) { | 3683 if (mode == CLEAR_INOBJECT_PROPERTIES) { |
3683 new_instance_size -= inobject_properties() * kPointerSize; | 3684 new_instance_size -= inobject_properties() * kPointerSize; |
3684 } | 3685 } |
3685 | 3686 |
3686 Object* result; | 3687 Object* result; |
3687 { MaybeObject* maybe_result = | 3688 { MaybeObject* maybe_result = |
3688 GetHeap()->AllocateMap(instance_type(), new_instance_size); | 3689 GetHeap()->AllocateMap(instance_type(), new_instance_size); |
3689 if (!maybe_result->ToObject(&result)) return maybe_result; | 3690 if (!maybe_result->ToObject(&result)) return maybe_result; |
3690 } | 3691 } |
3691 | 3692 |
3692 if (mode != CLEAR_INOBJECT_PROPERTIES) { | 3693 if (mode != CLEAR_INOBJECT_PROPERTIES) { |
3693 Map::cast(result)->set_inobject_properties(inobject_properties()); | 3694 Map::cast(result)->set_inobject_properties(inobject_properties()); |
3694 } | 3695 } |
3695 | 3696 |
3696 Map::cast(result)->set_prototype(prototype()); | 3697 Map::cast(result)->set_prototype(prototype()); |
3697 Map::cast(result)->set_constructor(constructor()); | 3698 Map::cast(result)->set_constructor(constructor()); |
3698 | 3699 |
3699 Map::cast(result)->set_bit_field(bit_field()); | 3700 Map::cast(result)->set_bit_field(bit_field()); |
3700 Map::cast(result)->set_bit_field2(bit_field2()); | 3701 Map::cast(result)->set_bit_field2(bit_field2()); |
| 3702 Map::cast(result)->set_bit_field3(bit_field3()); |
3701 | 3703 |
3702 Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP); | 3704 Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP); |
3703 | 3705 |
3704 #ifdef DEBUG | 3706 #ifdef DEBUG |
3705 if (Map::cast(result)->is_shared()) { | 3707 if (Map::cast(result)->is_shared()) { |
3706 Map::cast(result)->SharedMapVerify(); | 3708 Map::cast(result)->SharedMapVerify(); |
3707 } | 3709 } |
3708 #endif | 3710 #endif |
3709 | 3711 |
3710 return result; | 3712 return result; |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3766 ASSERT(!code_cache()->IsFixedArray()); | 3768 ASSERT(!code_cache()->IsFixedArray()); |
3767 CodeCache::cast(code_cache())->RemoveByIndex(name, code, index); | 3769 CodeCache::cast(code_cache())->RemoveByIndex(name, code, index); |
3768 } | 3770 } |
3769 | 3771 |
3770 | 3772 |
3771 void Map::TraverseTransitionTree(TraverseCallback callback, void* data) { | 3773 void Map::TraverseTransitionTree(TraverseCallback callback, void* data) { |
3772 Map* current = this; | 3774 Map* current = this; |
3773 Map* meta_map = heap()->meta_map(); | 3775 Map* meta_map = heap()->meta_map(); |
3774 while (current != meta_map) { | 3776 while (current != meta_map) { |
3775 DescriptorArray* d = reinterpret_cast<DescriptorArray*>( | 3777 DescriptorArray* d = reinterpret_cast<DescriptorArray*>( |
3776 *RawField(current, Map::kInstanceDescriptorsOffset)); | 3778 *RawField(current, Map::kInstanceDescriptorsOrBitField3Offset)); |
3777 if (d == heap()->empty_descriptor_array()) { | 3779 if (d->IsEmpty()) { |
3778 Map* prev = current->map(); | 3780 Map* prev = current->map(); |
3779 current->set_map(meta_map); | 3781 current->set_map(meta_map); |
3780 callback(current, data); | 3782 callback(current, data); |
3781 current = prev; | 3783 current = prev; |
3782 continue; | 3784 continue; |
3783 } | 3785 } |
3784 | 3786 |
3785 FixedArray* contents = reinterpret_cast<FixedArray*>( | 3787 FixedArray* contents = reinterpret_cast<FixedArray*>( |
3786 d->get(DescriptorArray::kContentArrayIndex)); | 3788 d->get(DescriptorArray::kContentArrayIndex)); |
3787 Object** map_or_index_field = RawField(contents, HeapObject::kMapOffset); | 3789 Object** map_or_index_field = RawField(contents, HeapObject::kMapOffset); |
(...skipping 450 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4238 if (!maybe_array->ToObject(&array)) return maybe_array; | 4240 if (!maybe_array->ToObject(&array)) return maybe_array; |
4239 } | 4241 } |
4240 // Do not use DescriptorArray::cast on incomplete object. | 4242 // Do not use DescriptorArray::cast on incomplete object. |
4241 FixedArray* result = FixedArray::cast(array); | 4243 FixedArray* result = FixedArray::cast(array); |
4242 | 4244 |
4243 // Allocate the content array and set it in the descriptor array. | 4245 // Allocate the content array and set it in the descriptor array. |
4244 { MaybeObject* maybe_array = | 4246 { MaybeObject* maybe_array = |
4245 heap->AllocateFixedArray(number_of_descriptors << 1); | 4247 heap->AllocateFixedArray(number_of_descriptors << 1); |
4246 if (!maybe_array->ToObject(&array)) return maybe_array; | 4248 if (!maybe_array->ToObject(&array)) return maybe_array; |
4247 } | 4249 } |
| 4250 result->set(kBitField3StorageIndex, Smi::FromInt(0)); |
4248 result->set(kContentArrayIndex, array); | 4251 result->set(kContentArrayIndex, array); |
4249 result->set(kEnumerationIndexIndex, | 4252 result->set(kEnumerationIndexIndex, |
4250 Smi::FromInt(PropertyDetails::kInitialIndex)); | 4253 Smi::FromInt(PropertyDetails::kInitialIndex)); |
4251 return result; | 4254 return result; |
4252 } | 4255 } |
4253 | 4256 |
4254 | 4257 |
4255 void DescriptorArray::SetEnumCache(FixedArray* bridge_storage, | 4258 void DescriptorArray::SetEnumCache(FixedArray* bridge_storage, |
4256 FixedArray* new_cache) { | 4259 FixedArray* new_cache) { |
4257 ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength); | 4260 ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength); |
(...skipping 1436 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5694 *RawField(target, kPrototypeOffset) = this; | 5697 *RawField(target, kPrototypeOffset) = this; |
5695 } | 5698 } |
5696 } | 5699 } |
5697 } | 5700 } |
5698 | 5701 |
5699 | 5702 |
5700 void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) { | 5703 void Map::ClearNonLiveTransitions(Heap* heap, Object* real_prototype) { |
5701 // Live DescriptorArray objects will be marked, so we must use | 5704 // Live DescriptorArray objects will be marked, so we must use |
5702 // low-level accessors to get and modify their data. | 5705 // low-level accessors to get and modify their data. |
5703 DescriptorArray* d = reinterpret_cast<DescriptorArray*>( | 5706 DescriptorArray* d = reinterpret_cast<DescriptorArray*>( |
5704 *RawField(this, Map::kInstanceDescriptorsOffset)); | 5707 *RawField(this, Map::kInstanceDescriptorsOrBitField3Offset)); |
5705 if (d == heap->raw_unchecked_empty_descriptor_array()) return; | 5708 if (d->IsEmpty()) return; |
5706 Smi* NullDescriptorDetails = | 5709 Smi* NullDescriptorDetails = |
5707 PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi(); | 5710 PropertyDetails(NONE, NULL_DESCRIPTOR).AsSmi(); |
5708 FixedArray* contents = reinterpret_cast<FixedArray*>( | 5711 FixedArray* contents = reinterpret_cast<FixedArray*>( |
5709 d->get(DescriptorArray::kContentArrayIndex)); | 5712 d->get(DescriptorArray::kContentArrayIndex)); |
5710 ASSERT(contents->length() >= 2); | 5713 ASSERT(contents->length() >= 2); |
5711 for (int i = 0; i < contents->length(); i += 2) { | 5714 for (int i = 0; i < contents->length(); i += 2) { |
5712 // If the pair (value, details) is a map transition, | 5715 // If the pair (value, details) is a map transition, |
5713 // check if the target is live. If not, null the descriptor. | 5716 // check if the target is live. If not, null the descriptor. |
5714 // Also drop the back pointer for that map transition, so that this | 5717 // Also drop the back pointer for that map transition, so that this |
5715 // map is not reached again by following a back pointer from a | 5718 // map is not reached again by following a back pointer from a |
(...skipping 4775 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
10491 if (break_point_objects()->IsUndefined()) return 0; | 10494 if (break_point_objects()->IsUndefined()) return 0; |
10492 // Single beak point. | 10495 // Single beak point. |
10493 if (!break_point_objects()->IsFixedArray()) return 1; | 10496 if (!break_point_objects()->IsFixedArray()) return 1; |
10494 // Multiple break points. | 10497 // Multiple break points. |
10495 return FixedArray::cast(break_point_objects())->length(); | 10498 return FixedArray::cast(break_point_objects())->length(); |
10496 } | 10499 } |
10497 #endif | 10500 #endif |
10498 | 10501 |
10499 | 10502 |
10500 } } // namespace v8::internal | 10503 } } // namespace v8::internal |
OLD | NEW |