OLD | NEW |
---|---|
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <iomanip> | 5 #include <iomanip> |
6 #include <sstream> | 6 #include <sstream> |
7 | 7 |
8 #include "src/v8.h" | 8 #include "src/v8.h" |
9 | 9 |
10 #include "src/accessors.h" | 10 #include "src/accessors.h" |
(...skipping 1898 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1909 // For slow-to-fast migrations JSObject::TransformToFastProperties() | 1909 // For slow-to-fast migrations JSObject::TransformToFastProperties() |
1910 // must be used instead. | 1910 // must be used instead. |
1911 CHECK(new_map->is_dictionary_map()); | 1911 CHECK(new_map->is_dictionary_map()); |
1912 | 1912 |
1913 // Slow-to-slow migration is trivial. | 1913 // Slow-to-slow migration is trivial. |
1914 object->set_map(*new_map); | 1914 object->set_map(*new_map); |
1915 } | 1915 } |
1916 } | 1916 } |
1917 | 1917 |
1918 | 1918 |
1919 // Returns true if during migration from |old_map| to |new_map| "tagged" | |
1920 // inobject fields are going to be replaced with unboxed double fields. | |
1921 static bool ShouldClearSlotsRecorded(Map* old_map, Map* new_map, | |
1922 int new_number_of_fields) { | |
1923 DisallowHeapAllocation no_gc; | |
1924 int inobject = new_map->inobject_properties(); | |
1925 DCHECK(inobject <= old_map->inobject_properties()); | |
1926 | |
1927 int limit = Min(inobject, new_number_of_fields); | |
1928 for (int i = 0; i < limit; i++) { | |
1929 FieldIndex index = FieldIndex::ForPropertyIndex(new_map, i); | |
1930 if (new_map->IsUnboxedDoubleField(index) && | |
1931 !old_map->IsUnboxedDoubleField(index)) { | |
1932 return true; | |
1933 } | |
1934 } | |
1935 return false; | |
1936 } | |
1937 | |
1938 | |
1939 static void RemoveOldToOldSlotsRecorded(Heap* heap, JSObject* object, | |
1940 FieldIndex index) { | |
1941 DisallowHeapAllocation no_gc; | |
1942 | |
1943 Object* old_value = object->RawFastPropertyAt(index); | |
1944 if (old_value->IsHeapObject()) { | |
1945 HeapObject* ho = HeapObject::cast(old_value); | |
1946 if (heap->InNewSpace(ho)) { | |
1947 // At this point there must be no old-to-new slots recorded for this | |
1948 // object. | |
1949 SLOW_DCHECK( | |
1950 !heap->store_buffer()->CellIsInStoreBuffer(reinterpret_cast<Address>( | |
1951 HeapObject::RawField(object, index.offset())))); | |
1952 } else { | |
1953 Page* p = Page::FromAddress(reinterpret_cast<Address>(ho)); | |
1954 if (p->IsEvacuationCandidate()) { | |
1955 Object** slot = HeapObject::RawField(object, index.offset()); | |
1956 SlotsBuffer::RemoveSlot(p->slots_buffer(), slot); | |
1957 } | |
1958 } | |
1959 } | |
1960 } | |
1961 | |
1962 | |
1919 // To migrate a fast instance to a fast map: | 1963 // To migrate a fast instance to a fast map: |
1920 // - First check whether the instance needs to be rewritten. If not, simply | 1964 // - First check whether the instance needs to be rewritten. If not, simply |
1921 // change the map. | 1965 // change the map. |
1922 // - Otherwise, allocate a fixed array large enough to hold all fields, in | 1966 // - Otherwise, allocate a fixed array large enough to hold all fields, in |
1923 // addition to unused space. | 1967 // addition to unused space. |
1924 // - Copy all existing properties in, in the following order: backing store | 1968 // - Copy all existing properties in, in the following order: backing store |
1925 // properties, unused fields, inobject properties. | 1969 // properties, unused fields, inobject properties. |
1926 // - If all allocation succeeded, commit the state atomically: | 1970 // - If all allocation succeeded, commit the state atomically: |
1927 // * Copy inobject properties from the backing store back into the object. | 1971 // * Copy inobject properties from the backing store back into the object. |
1928 // * Trim the difference in instance size of the object. This also cleanly | 1972 // * Trim the difference in instance size of the object. This also cleanly |
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2062 value = isolate->factory()->uninitialized_value(); | 2106 value = isolate->factory()->uninitialized_value(); |
2063 } | 2107 } |
2064 int target_index = new_descriptors->GetFieldIndex(i) - inobject; | 2108 int target_index = new_descriptors->GetFieldIndex(i) - inobject; |
2065 if (target_index < 0) target_index += total_size; | 2109 if (target_index < 0) target_index += total_size; |
2066 array->set(target_index, *value); | 2110 array->set(target_index, *value); |
2067 } | 2111 } |
2068 | 2112 |
2069 // From here on we cannot fail and we shouldn't GC anymore. | 2113 // From here on we cannot fail and we shouldn't GC anymore. |
2070 DisallowHeapAllocation no_allocation; | 2114 DisallowHeapAllocation no_allocation; |
2071 | 2115 |
2116 Heap* heap = isolate->heap(); | |
2117 | |
2118 // If we are going to put an unboxed double to the field that used to | |
2119 // contain HeapObject we should ensure that this slot is removed from | |
2120 // both StoreBuffer or respective SlotsBuffer. | |
Hannes Payer (out of office)
2015/03/03 09:48:53
"or respective" => "and"
Igor Sheludko
2015/03/04 14:54:20
Done.
| |
2121 bool clear_slots_recorded = | |
2122 FLAG_unbox_double_fields && !heap->InNewSpace(object->address()) && | |
2123 ShouldClearSlotsRecorded(*old_map, *new_map, number_of_fields); | |
2124 if (clear_slots_recorded) { | |
2125 Address obj_address = object->address(); | |
2126 Address end_address = obj_address + old_map->instance_size(); | |
Hannes Payer (out of office)
2015/03/03 09:48:53
Why are you removing all the pointers?
Igor Sheludko
2015/03/04 14:54:20
Because in the next loop all the values will be wr
| |
2127 heap->store_buffer()->RemoveSlots(obj_address, end_address); | |
2128 } | |
2129 | |
2072 // Copy (real) inobject properties. If necessary, stop at number_of_fields to | 2130 // Copy (real) inobject properties. If necessary, stop at number_of_fields to |
2073 // avoid overwriting |one_pointer_filler_map|. | 2131 // avoid overwriting |one_pointer_filler_map|. |
2074 int limit = Min(inobject, number_of_fields); | 2132 int limit = Min(inobject, number_of_fields); |
2075 for (int i = 0; i < limit; i++) { | 2133 for (int i = 0; i < limit; i++) { |
2076 FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i); | 2134 FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i); |
2077 Object* value = array->get(external + i); | 2135 Object* value = array->get(external + i); |
2078 // Can't use JSObject::FastPropertyAtPut() because proper map was not set | |
2079 // yet. | |
2080 if (new_map->IsUnboxedDoubleField(index)) { | 2136 if (new_map->IsUnboxedDoubleField(index)) { |
2081 DCHECK(value->IsMutableHeapNumber()); | 2137 DCHECK(value->IsMutableHeapNumber()); |
2138 if (clear_slots_recorded && !old_map->IsUnboxedDoubleField(index)) { | |
2139 RemoveOldToOldSlotsRecorded(heap, *object, index); | |
2140 } | |
2082 object->RawFastDoublePropertyAtPut(index, | 2141 object->RawFastDoublePropertyAtPut(index, |
2083 HeapNumber::cast(value)->value()); | 2142 HeapNumber::cast(value)->value()); |
2084 } else { | 2143 } else { |
2085 object->RawFastPropertyAtPut(index, value); | 2144 object->RawFastPropertyAtPut(index, value); |
2086 } | 2145 } |
2087 } | 2146 } |
2088 | 2147 |
2089 Heap* heap = isolate->heap(); | |
2090 | |
2091 // If there are properties in the new backing store, trim it to the correct | 2148 // If there are properties in the new backing store, trim it to the correct |
2092 // size and install the backing store into the object. | 2149 // size and install the backing store into the object. |
2093 if (external > 0) { | 2150 if (external > 0) { |
2094 heap->RightTrimFixedArray<Heap::FROM_MUTATOR>(*array, inobject); | 2151 heap->RightTrimFixedArray<Heap::FROM_MUTATOR>(*array, inobject); |
2095 object->set_properties(*array); | 2152 object->set_properties(*array); |
2096 } | 2153 } |
2097 | 2154 |
2098 // Create filler object past the new instance size. | 2155 // Create filler object past the new instance size. |
2099 int new_instance_size = new_map->instance_size(); | 2156 int new_instance_size = new_map->instance_size(); |
2100 int instance_size_delta = old_map->instance_size() - new_instance_size; | 2157 int instance_size_delta = old_map->instance_size() - new_instance_size; |
(...skipping 15056 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
17157 CompilationInfo* info) { | 17214 CompilationInfo* info) { |
17158 Handle<DependentCode> codes = DependentCode::InsertCompilationInfo( | 17215 Handle<DependentCode> codes = DependentCode::InsertCompilationInfo( |
17159 handle(cell->dependent_code(), info->isolate()), | 17216 handle(cell->dependent_code(), info->isolate()), |
17160 DependentCode::kPropertyCellChangedGroup, info->object_wrapper()); | 17217 DependentCode::kPropertyCellChangedGroup, info->object_wrapper()); |
17161 if (*codes != cell->dependent_code()) cell->set_dependent_code(*codes); | 17218 if (*codes != cell->dependent_code()) cell->set_dependent_code(*codes); |
17162 info->dependencies(DependentCode::kPropertyCellChangedGroup)->Add( | 17219 info->dependencies(DependentCode::kPropertyCellChangedGroup)->Add( |
17163 cell, info->zone()); | 17220 cell, info->zone()); |
17164 } | 17221 } |
17165 | 17222 |
17166 } } // namespace v8::internal | 17223 } } // namespace v8::internal |
OLD | NEW |