OLD | NEW |
1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/objects.h" | 5 #include "src/objects.h" |
6 | 6 |
7 #include <cmath> | 7 #include <cmath> |
8 #include <iomanip> | 8 #include <iomanip> |
9 #include <sstream> | 9 #include <sstream> |
10 | 10 |
(...skipping 2865 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2876 // * Copy inobject properties from the backing store back into the object. | 2876 // * Copy inobject properties from the backing store back into the object. |
2877 // * Trim the difference in instance size of the object. This also cleanly | 2877 // * Trim the difference in instance size of the object. This also cleanly |
2878 // frees inobject properties that moved to the backing store. | 2878 // frees inobject properties that moved to the backing store. |
2879 // * If there are properties left in the backing store, trim of the space used | 2879 // * If there are properties left in the backing store, trim of the space used |
2880 // to temporarily store the inobject properties. | 2880 // to temporarily store the inobject properties. |
2881 // * If there are properties left in the backing store, install the backing | 2881 // * If there are properties left in the backing store, install the backing |
2882 // store. | 2882 // store. |
2883 void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) { | 2883 void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) { |
2884 Isolate* isolate = object->GetIsolate(); | 2884 Isolate* isolate = object->GetIsolate(); |
2885 Handle<Map> old_map(object->map()); | 2885 Handle<Map> old_map(object->map()); |
2886 int old_number_of_fields; | 2886 // In case of a regular transition. |
2887 int number_of_fields = new_map->NumberOfFields(); | 2887 if (new_map->GetBackPointer() == *old_map) { |
2888 int inobject = new_map->GetInObjectProperties(); | 2888 // If the map does not add named properties, simply set the map. |
2889 int unused = new_map->unused_property_fields(); | 2889 if (old_map->NumberOfOwnDescriptors() == |
2890 | 2890 new_map->NumberOfOwnDescriptors()) { |
2891 // Nothing to do if no functions were converted to fields and no smis were | |
2892 // converted to doubles. | |
2893 if (!old_map->InstancesNeedRewriting(*new_map, number_of_fields, inobject, | |
2894 unused, &old_number_of_fields)) { | |
2895 object->synchronized_set_map(*new_map); | |
2896 return; | |
2897 } | |
2898 | |
2899 int total_size = number_of_fields + unused; | |
2900 int external = total_size - inobject; | |
2901 | |
2902 if (number_of_fields != old_number_of_fields && | |
2903 new_map->GetBackPointer() == *old_map) { | |
2904 PropertyDetails details = new_map->GetLastDescriptorDetails(); | |
2905 | |
2906 if (old_map->unused_property_fields() > 0) { | |
2907 if (details.representation().IsDouble()) { | |
2908 FieldIndex index = | |
2909 FieldIndex::ForDescriptor(*new_map, new_map->LastAdded()); | |
2910 if (new_map->IsUnboxedDoubleField(index)) { | |
2911 object->RawFastDoublePropertyAtPut(index, 0); | |
2912 } else { | |
2913 Handle<Object> value = isolate->factory()->NewHeapNumber(0, MUTABLE); | |
2914 object->RawFastPropertyAtPut(index, *value); | |
2915 } | |
2916 } | |
2917 object->synchronized_set_map(*new_map); | 2891 object->synchronized_set_map(*new_map); |
2918 return; | 2892 return; |
2919 } | 2893 } |
2920 | 2894 |
2921 DCHECK(number_of_fields == old_number_of_fields + 1); | 2895 PropertyDetails details = new_map->GetLastDescriptorDetails(); |
| 2896 // Either new_map adds an kDescriptor property, or a kField property for |
| 2897 // which there is still space, and which does not require a mutable double |
| 2898 // box (an out-of-object double). |
| 2899 if (details.location() == kDescriptor || |
| 2900 (old_map->unused_property_fields() > 0 && |
| 2901 ((FLAG_unbox_double_fields && object->properties()->length() == 0) || |
| 2902 !details.representation().IsDouble()))) { |
| 2903 object->synchronized_set_map(*new_map); |
| 2904 return; |
| 2905 } |
| 2906 |
| 2907 // If there is still space in the object, we need to allocate a mutable |
| 2908 // double box. |
| 2909 if (old_map->unused_property_fields() > 0) { |
| 2910 FieldIndex index = |
| 2911 FieldIndex::ForDescriptor(*new_map, new_map->LastAdded()); |
| 2912 DCHECK(details.representation().IsDouble()); |
| 2913 DCHECK(!new_map->IsUnboxedDoubleField(index)); |
| 2914 Handle<Object> value = isolate->factory()->NewHeapNumber(0, MUTABLE); |
| 2915 object->RawFastPropertyAtPut(index, *value); |
| 2916 object->synchronized_set_map(*new_map); |
| 2917 return; |
| 2918 } |
| 2919 |
2922 // This migration is a transition from a map that has run out of property | 2920 // This migration is a transition from a map that has run out of property |
2923 // space. Therefore it could be done by extending the backing store. | 2921 // space. Extend the backing store. |
2924 int grow_by = external - object->properties()->length(); | 2922 int grow_by = new_map->unused_property_fields() + 1; |
2925 Handle<FixedArray> old_storage = handle(object->properties(), isolate); | 2923 Handle<FixedArray> old_storage = handle(object->properties(), isolate); |
2926 Handle<FixedArray> new_storage = | 2924 Handle<FixedArray> new_storage = |
2927 isolate->factory()->CopyFixedArrayAndGrow(old_storage, grow_by); | 2925 isolate->factory()->CopyFixedArrayAndGrow(old_storage, grow_by); |
2928 | 2926 |
2929 // Properly initialize newly added property. | 2927 // Properly initialize newly added property. |
2930 Handle<Object> value; | 2928 Handle<Object> value; |
2931 if (details.representation().IsDouble()) { | 2929 if (details.representation().IsDouble()) { |
2932 value = isolate->factory()->NewHeapNumber(0, MUTABLE); | 2930 value = isolate->factory()->NewHeapNumber(0, MUTABLE); |
2933 } else { | 2931 } else { |
2934 value = isolate->factory()->uninitialized_value(); | 2932 value = isolate->factory()->uninitialized_value(); |
2935 } | 2933 } |
2936 DCHECK(details.type() == DATA); | 2934 DCHECK_EQ(DATA, details.type()); |
2937 int target_index = details.field_index() - inobject; | 2935 int target_index = details.field_index() - new_map->GetInObjectProperties(); |
2938 DCHECK(target_index >= 0); // Must be a backing store index. | 2936 DCHECK(target_index >= 0); // Must be a backing store index. |
2939 new_storage->set(target_index, *value); | 2937 new_storage->set(target_index, *value); |
2940 | 2938 |
2941 // From here on we cannot fail and we shouldn't GC anymore. | 2939 // From here on we cannot fail and we shouldn't GC anymore. |
2942 DisallowHeapAllocation no_allocation; | 2940 DisallowHeapAllocation no_allocation; |
2943 | 2941 |
2944 // Set the new property value and do the map transition. | 2942 // Set the new property value and do the map transition. |
2945 object->set_properties(*new_storage); | 2943 object->set_properties(*new_storage); |
2946 object->synchronized_set_map(*new_map); | 2944 object->synchronized_set_map(*new_map); |
2947 return; | 2945 return; |
2948 } | 2946 } |
| 2947 |
| 2948 int old_number_of_fields; |
| 2949 int number_of_fields = new_map->NumberOfFields(); |
| 2950 int inobject = new_map->GetInObjectProperties(); |
| 2951 int unused = new_map->unused_property_fields(); |
| 2952 |
| 2953 // Nothing to do if no functions were converted to fields and no smis were |
| 2954 // converted to doubles. |
| 2955 if (!old_map->InstancesNeedRewriting(*new_map, number_of_fields, inobject, |
| 2956 unused, &old_number_of_fields)) { |
| 2957 object->synchronized_set_map(*new_map); |
| 2958 return; |
| 2959 } |
| 2960 |
| 2961 int total_size = number_of_fields + unused; |
| 2962 int external = total_size - inobject; |
| 2963 |
2949 Handle<FixedArray> array = isolate->factory()->NewFixedArray(total_size); | 2964 Handle<FixedArray> array = isolate->factory()->NewFixedArray(total_size); |
2950 | 2965 |
2951 Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors()); | 2966 Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors()); |
2952 Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors()); | 2967 Handle<DescriptorArray> new_descriptors(new_map->instance_descriptors()); |
2953 int old_nof = old_map->NumberOfOwnDescriptors(); | 2968 int old_nof = old_map->NumberOfOwnDescriptors(); |
2954 int new_nof = new_map->NumberOfOwnDescriptors(); | 2969 int new_nof = new_map->NumberOfOwnDescriptors(); |
2955 | 2970 |
2956 // This method only supports generalizing instances to at least the same | 2971 // This method only supports generalizing instances to at least the same |
2957 // number of properties. | 2972 // number of properties. |
2958 DCHECK(old_nof <= new_nof); | 2973 DCHECK(old_nof <= new_nof); |
(...skipping 17011 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
19970 if (cell->value() != *new_value) { | 19985 if (cell->value() != *new_value) { |
19971 cell->set_value(*new_value); | 19986 cell->set_value(*new_value); |
19972 Isolate* isolate = cell->GetIsolate(); | 19987 Isolate* isolate = cell->GetIsolate(); |
19973 cell->dependent_code()->DeoptimizeDependentCodeGroup( | 19988 cell->dependent_code()->DeoptimizeDependentCodeGroup( |
19974 isolate, DependentCode::kPropertyCellChangedGroup); | 19989 isolate, DependentCode::kPropertyCellChangedGroup); |
19975 } | 19990 } |
19976 } | 19991 } |
19977 | 19992 |
19978 } // namespace internal | 19993 } // namespace internal |
19979 } // namespace v8 | 19994 } // namespace v8 |
OLD | NEW |