Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: src/heap/heap.cc

Issue 2578233003: [heap] Explicitly clear mark bits when writing filler for left and right trimming. (Closed)
Patch Set: turn off for landing Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/heap.h" 5 #include "src/heap/heap.h"
6 6
7 #include "src/accessors.h" 7 #include "src/accessors.h"
8 #include "src/api.h" 8 #include "src/api.h"
9 #include "src/ast/context-slot-cache.h" 9 #include "src/ast/context-slot-cache.h"
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 1151 matching lines...) Expand 10 before | Expand all | Expand 10 after
1162 for (int i = 0; i < num_maps; i++) { 1162 for (int i = 0; i < num_maps; i++) {
1163 // The deserializer will update the skip list. 1163 // The deserializer will update the skip list.
1164 AllocationResult allocation = map_space()->AllocateRawUnaligned( 1164 AllocationResult allocation = map_space()->AllocateRawUnaligned(
1165 Map::kSize, PagedSpace::IGNORE_SKIP_LIST); 1165 Map::kSize, PagedSpace::IGNORE_SKIP_LIST);
1166 HeapObject* free_space = nullptr; 1166 HeapObject* free_space = nullptr;
1167 if (allocation.To(&free_space)) { 1167 if (allocation.To(&free_space)) {
1168 // Mark with a free list node, in case we have a GC before 1168 // Mark with a free list node, in case we have a GC before
1169 // deserializing. 1169 // deserializing.
1170 Address free_space_address = free_space->address(); 1170 Address free_space_address = free_space->address();
1171 CreateFillerObjectAt(free_space_address, Map::kSize, 1171 CreateFillerObjectAt(free_space_address, Map::kSize,
1172 ClearRecordedSlots::kNo, ClearBlackArea::kNo); 1172 ClearRecordedSlots::kNo);
1173 maps->Add(free_space_address); 1173 maps->Add(free_space_address);
1174 } else { 1174 } else {
1175 perform_gc = true; 1175 perform_gc = true;
1176 break; 1176 break;
1177 } 1177 }
1178 } 1178 }
1179 } else if (space == LO_SPACE) { 1179 } else if (space == LO_SPACE) {
1180 // Just check that we can allocate during deserialization. 1180 // Just check that we can allocate during deserialization.
1181 DCHECK_EQ(1, reservation->length()); 1181 DCHECK_EQ(1, reservation->length());
1182 perform_gc = !CanExpandOldGeneration(reservation->at(0).size); 1182 perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
(...skipping 10 matching lines...) Expand all
1193 // The deserializer will update the skip list. 1193 // The deserializer will update the skip list.
1194 allocation = paged_space(space)->AllocateRawUnaligned( 1194 allocation = paged_space(space)->AllocateRawUnaligned(
1195 size, PagedSpace::IGNORE_SKIP_LIST); 1195 size, PagedSpace::IGNORE_SKIP_LIST);
1196 } 1196 }
1197 HeapObject* free_space = nullptr; 1197 HeapObject* free_space = nullptr;
1198 if (allocation.To(&free_space)) { 1198 if (allocation.To(&free_space)) {
1199 // Mark with a free list node, in case we have a GC before 1199 // Mark with a free list node, in case we have a GC before
1200 // deserializing. 1200 // deserializing.
1201 Address free_space_address = free_space->address(); 1201 Address free_space_address = free_space->address();
1202 CreateFillerObjectAt(free_space_address, size, 1202 CreateFillerObjectAt(free_space_address, size,
1203 ClearRecordedSlots::kNo, ClearBlackArea::kNo); 1203 ClearRecordedSlots::kNo);
1204 DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces); 1204 DCHECK(space < SerializerDeserializer::kNumberOfPreallocatedSpaces);
1205 chunk.start = free_space_address; 1205 chunk.start = free_space_address;
1206 chunk.end = free_space_address + size; 1206 chunk.end = free_space_address + size;
1207 } else { 1207 } else {
1208 perform_gc = true; 1208 perform_gc = true;
1209 break; 1209 break;
1210 } 1210 }
1211 } 1211 }
1212 } 1212 }
1213 if (perform_gc) { 1213 if (perform_gc) {
(...skipping 879 matching lines...) Expand 10 before | Expand all | Expand 10 after
2093 HeapObject* obj = nullptr; 2093 HeapObject* obj = nullptr;
2094 { 2094 {
2095 AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned; 2095 AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
2096 AllocationResult allocation = AllocateRaw(size, space, align); 2096 AllocationResult allocation = AllocateRaw(size, space, align);
2097 if (!allocation.To(&obj)) return allocation; 2097 if (!allocation.To(&obj)) return allocation;
2098 } 2098 }
2099 #ifdef DEBUG 2099 #ifdef DEBUG
2100 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); 2100 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
2101 DCHECK(chunk->owner()->identity() == space); 2101 DCHECK(chunk->owner()->identity() == space);
2102 #endif 2102 #endif
2103 CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo, 2103 CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo);
2104 ClearBlackArea::kNo);
2105 return obj; 2104 return obj;
2106 } 2105 }
2107 2106
2108 2107
2109 const Heap::StringTypeTable Heap::string_type_table[] = { 2108 const Heap::StringTypeTable Heap::string_type_table[] = {
2110 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \ 2109 #define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
2111 { type, size, k##camel_name##MapRootIndex } \ 2110 { type, size, k##camel_name##MapRootIndex } \
2112 , 2111 ,
2113 STRING_TYPE_LIST(STRING_TYPE_ELEMENT) 2112 STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2114 #undef STRING_TYPE_ELEMENT 2113 #undef STRING_TYPE_ELEMENT
(...skipping 941 matching lines...) Expand 10 before | Expand all | Expand 10 after
3056 instance->set_osr_loop_nesting_level(0); 3055 instance->set_osr_loop_nesting_level(0);
3057 instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge); 3056 instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
3058 instance->set_constant_pool(constant_pool); 3057 instance->set_constant_pool(constant_pool);
3059 instance->set_handler_table(empty_fixed_array()); 3058 instance->set_handler_table(empty_fixed_array());
3060 instance->set_source_position_table(empty_byte_array()); 3059 instance->set_source_position_table(empty_byte_array());
3061 CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length); 3060 CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length);
3062 3061
3063 return result; 3062 return result;
3064 } 3063 }
3065 3064
3066 void Heap::CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode, 3065 void Heap::CreateFillerObjectAt(Address addr, int size,
3067 ClearBlackArea black_area_mode) { 3066 ClearRecordedSlots mode) {
3068 if (size == 0) return; 3067 if (size == 0) return;
3069 HeapObject* filler = HeapObject::FromAddress(addr); 3068 HeapObject* filler = HeapObject::FromAddress(addr);
3070 if (size == kPointerSize) { 3069 if (size == kPointerSize) {
3071 filler->set_map_no_write_barrier( 3070 filler->set_map_no_write_barrier(
3072 reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex))); 3071 reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)));
3073 } else if (size == 2 * kPointerSize) { 3072 } else if (size == 2 * kPointerSize) {
3074 filler->set_map_no_write_barrier( 3073 filler->set_map_no_write_barrier(
3075 reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex))); 3074 reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)));
3076 } else { 3075 } else {
3077 DCHECK_GT(size, 2 * kPointerSize); 3076 DCHECK_GT(size, 2 * kPointerSize);
3078 filler->set_map_no_write_barrier( 3077 filler->set_map_no_write_barrier(
3079 reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex))); 3078 reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)));
3080 FreeSpace::cast(filler)->nobarrier_set_size(size); 3079 FreeSpace::cast(filler)->nobarrier_set_size(size);
3081 } 3080 }
3082 if (mode == ClearRecordedSlots::kYes) { 3081 if (mode == ClearRecordedSlots::kYes) {
3083 ClearRecordedSlotRange(addr, addr + size); 3082 ClearRecordedSlotRange(addr, addr + size);
3084 } 3083 }
3085 3084
3086 // If the location where the filler is created is within a black area we have
3087 // to clear the mark bits of the filler space.
3088 if (black_area_mode == ClearBlackArea::kYes &&
3089 incremental_marking()->black_allocation() &&
3090 Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(addr))) {
3091 Page* page = Page::FromAddress(addr);
3092 page->markbits()->ClearRange(page->AddressToMarkbitIndex(addr),
3093 page->AddressToMarkbitIndex(addr + size));
3094 }
3095
3096 // At this point, we may be deserializing the heap from a snapshot, and 3085 // At this point, we may be deserializing the heap from a snapshot, and
3097 // none of the maps have been created yet and are NULL. 3086 // none of the maps have been created yet and are NULL.
3098 DCHECK((filler->map() == NULL && !deserialization_complete_) || 3087 DCHECK((filler->map() == NULL && !deserialization_complete_) ||
3099 filler->map()->IsMap()); 3088 filler->map()->IsMap());
3100 } 3089 }
3101 3090
3102 3091
3103 bool Heap::CanMoveObjectStart(HeapObject* object) { 3092 bool Heap::CanMoveObjectStart(HeapObject* object) {
3104 if (!FLAG_move_object_start) return false; 3093 if (!FLAG_move_object_start) return false;
3105 3094
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
3160 // a black area. 3149 // a black area.
3161 if (!incremental_marking()->black_allocation() || 3150 if (!incremental_marking()->black_allocation() ||
3162 !Marking::IsBlack(ObjectMarking::MarkBitFrom(new_start))) { 3151 !Marking::IsBlack(ObjectMarking::MarkBitFrom(new_start))) {
3163 IncrementalMarking::TransferMark(this, old_start, new_start); 3152 IncrementalMarking::TransferMark(this, old_start, new_start);
3164 } 3153 }
3165 3154
3166 // Technically in new space this write might be omitted (except for 3155 // Technically in new space this write might be omitted (except for
3167 // debug mode which iterates through the heap), but to play safer 3156 // debug mode which iterates through the heap), but to play safer
3168 // we still do it. 3157 // we still do it.
3169 CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes); 3158 CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
3159
3160 // Clear the mark bits of the black area that belongs now to the filler.
3161 // This is an optimization. The sweeper will release black fillers anyway.
3162 if (incremental_marking()->black_allocation() &&
3163 Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(old_start))) {
3164 Page* page = Page::FromAddress(old_start);
3165 page->markbits()->ClearRange(
3166 page->AddressToMarkbitIndex(old_start),
3167 page->AddressToMarkbitIndex(old_start + bytes_to_trim));
3168 }
3169
3170 // Initialize header of the trimmed array. Since left trimming is only 3170 // Initialize header of the trimmed array. Since left trimming is only
3171 // performed on pages which are not concurrently swept creating a filler 3171 // performed on pages which are not concurrently swept creating a filler
3172 // object does not require synchronization. 3172 // object does not require synchronization.
3173 DCHECK(CanMoveObjectStart(object)); 3173 DCHECK(CanMoveObjectStart(object));
3174 Object** former_start = HeapObject::RawField(object, 0); 3174 Object** former_start = HeapObject::RawField(object, 0);
3175 int new_start_index = elements_to_trim * (element_size / kPointerSize); 3175 int new_start_index = elements_to_trim * (element_size / kPointerSize);
3176 former_start[new_start_index] = map; 3176 former_start[new_start_index] = map;
3177 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim); 3177 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
3178 3178
3179 FixedArrayBase* new_object = 3179 FixedArrayBase* new_object =
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
3231 // Technically in new space this write might be omitted (except for 3231 // Technically in new space this write might be omitted (except for
3232 // debug mode which iterates through the heap), but to play safer 3232 // debug mode which iterates through the heap), but to play safer
3233 // we still do it. 3233 // we still do it.
3234 // We do not create a filler for objects in large object space. 3234 // We do not create a filler for objects in large object space.
3235 // TODO(hpayer): We should shrink the large object page if the size 3235 // TODO(hpayer): We should shrink the large object page if the size
3236 // of the object changed significantly. 3236 // of the object changed significantly.
3237 if (!lo_space()->Contains(object)) { 3237 if (!lo_space()->Contains(object)) {
3238 CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes); 3238 CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
3239 } 3239 }
3240 3240
3241 // Clear the mark bits of the black area that belongs now to the filler.
3242 // This is an optimization. The sweeper will release black fillers anyway.
3243 if (incremental_marking()->black_allocation() &&
3244 Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(new_end))) {
3245 Page* page = Page::FromAddress(new_end);
3246 page->markbits()->ClearRange(
3247 page->AddressToMarkbitIndex(new_end),
3248 page->AddressToMarkbitIndex(new_end + bytes_to_trim));
3249 }
3250
3241 // Initialize header of the trimmed array. We are storing the new length 3251 // Initialize header of the trimmed array. We are storing the new length
3242 // using release store after creating a filler for the left-over space to 3252 // using release store after creating a filler for the left-over space to
3243 // avoid races with the sweeper thread. 3253 // avoid races with the sweeper thread.
3244 object->synchronized_set_length(len - elements_to_trim); 3254 object->synchronized_set_length(len - elements_to_trim);
3245 3255
3246 // Maintain consistency of live bytes during incremental marking 3256 // Maintain consistency of live bytes during incremental marking
3247 AdjustLiveBytes(object, -bytes_to_trim); 3257 AdjustLiveBytes(object, -bytes_to_trim);
3248 3258
3249 // Notify the heap profiler of change in object layout. The array may not be 3259 // Notify the heap profiler of change in object layout. The array may not be
3250 // moved during GC, and size has to be adjusted nevertheless. 3260 // moved during GC, and size has to be adjusted nevertheless.
(...skipping 3239 matching lines...) Expand 10 before | Expand all | Expand 10 after
6490 } 6500 }
6491 6501
6492 6502
6493 // static 6503 // static
6494 int Heap::GetStaticVisitorIdForMap(Map* map) { 6504 int Heap::GetStaticVisitorIdForMap(Map* map) {
6495 return StaticVisitorBase::GetVisitorId(map); 6505 return StaticVisitorBase::GetVisitorId(map);
6496 } 6506 }
6497 6507
6498 } // namespace internal 6508 } // namespace internal
6499 } // namespace v8 6509 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/heap.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698