| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/compilation-cache.h" | 10 #include "src/compilation-cache.h" |
| (...skipping 23 matching lines...) Expand all Loading... |
| 34 | 34 |
| 35 | 35 |
| 36 // ------------------------------------------------------------------------- | 36 // ------------------------------------------------------------------------- |
| 37 // MarkCompactCollector | 37 // MarkCompactCollector |
| 38 | 38 |
| 39 MarkCompactCollector::MarkCompactCollector(Heap* heap) | 39 MarkCompactCollector::MarkCompactCollector(Heap* heap) |
| 40 : // NOLINT | 40 : // NOLINT |
| 41 #ifdef DEBUG | 41 #ifdef DEBUG |
| 42 state_(IDLE), | 42 state_(IDLE), |
| 43 #endif | 43 #endif |
| 44 sweep_precisely_(false), | |
| 45 reduce_memory_footprint_(false), | 44 reduce_memory_footprint_(false), |
| 46 abort_incremental_marking_(false), | 45 abort_incremental_marking_(false), |
| 47 marking_parity_(ODD_MARKING_PARITY), | 46 marking_parity_(ODD_MARKING_PARITY), |
| 48 compacting_(false), | 47 compacting_(false), |
| 49 was_marked_incrementally_(false), | 48 was_marked_incrementally_(false), |
| 50 sweeping_in_progress_(false), | 49 sweeping_in_progress_(false), |
| 51 pending_sweeper_jobs_semaphore_(0), | 50 pending_sweeper_jobs_semaphore_(0), |
| 52 sequential_sweeping_(false), | 51 sequential_sweeping_(false), |
| 53 migration_slots_buffer_(NULL), | 52 migration_slots_buffer_(NULL), |
| 54 heap_(heap), | 53 heap_(heap), |
| (...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 193 while (current < limit) { | 192 while (current < limit) { |
| 194 HeapObject* object = HeapObject::FromAddress(current); | 193 HeapObject* object = HeapObject::FromAddress(current); |
| 195 object->Iterate(&visitor); | 194 object->Iterate(&visitor); |
| 196 current += object->Size(); | 195 current += object->Size(); |
| 197 } | 196 } |
| 198 } | 197 } |
| 199 } | 198 } |
| 200 | 199 |
| 201 | 200 |
| 202 static void VerifyEvacuation(Heap* heap, PagedSpace* space) { | 201 static void VerifyEvacuation(Heap* heap, PagedSpace* space) { |
| 203 if (!space->swept_precisely()) return; | |
| 204 if (FLAG_use_allocation_folding && | 202 if (FLAG_use_allocation_folding && |
| 205 (space == heap->old_pointer_space() || space == heap->old_data_space())) { | 203 (space == heap->old_pointer_space() || space == heap->old_data_space())) { |
| 206 return; | 204 return; |
| 207 } | 205 } |
| 208 PageIterator it(space); | 206 PageIterator it(space); |
| 209 | 207 |
| 210 while (it.has_next()) { | 208 while (it.has_next()) { |
| 211 Page* p = it.next(); | 209 Page* p = it.next(); |
| 212 if (p->IsEvacuationCandidate()) continue; | 210 if (p->IsEvacuationCandidate()) continue; |
| 213 VerifyEvacuation(p); | 211 VerifyEvacuation(p); |
| (...skipping 2905 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3119 | 3117 |
| 3120 heap_->IncrementYoungSurvivorsCounter(survivors_size); | 3118 heap_->IncrementYoungSurvivorsCounter(survivors_size); |
| 3121 new_space->set_age_mark(new_space->top()); | 3119 new_space->set_age_mark(new_space->top()); |
| 3122 } | 3120 } |
| 3123 | 3121 |
| 3124 | 3122 |
| 3125 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { | 3123 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { |
| 3126 AlwaysAllocateScope always_allocate(isolate()); | 3124 AlwaysAllocateScope always_allocate(isolate()); |
| 3127 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3125 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3128 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); | 3126 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); |
| 3129 p->MarkSweptPrecisely(); | 3127 p->SetWasSwept(); |
| 3130 | 3128 |
| 3131 int offsets[16]; | 3129 int offsets[16]; |
| 3132 | 3130 |
| 3133 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { | 3131 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
| 3134 Address cell_base = it.CurrentCellBase(); | 3132 Address cell_base = it.CurrentCellBase(); |
| 3135 MarkBit::CellType* cell = it.CurrentCell(); | 3133 MarkBit::CellType* cell = it.CurrentCell(); |
| 3136 | 3134 |
| 3137 if (*cell == 0) continue; | 3135 if (*cell == 0) continue; |
| 3138 | 3136 |
| 3139 int live_objects = MarkWordToObjectStarts(*cell, offsets); | 3137 int live_objects = MarkWordToObjectStarts(*cell, offsets); |
| (...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3283 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) { | 3281 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) { |
| 3284 DCHECK(free_list == NULL); | 3282 DCHECK(free_list == NULL); |
| 3285 return space->Free(start, size); | 3283 return space->Free(start, size); |
| 3286 } else { | 3284 } else { |
| 3287 // TODO(hpayer): account for wasted bytes in concurrent sweeping too. | 3285 // TODO(hpayer): account for wasted bytes in concurrent sweeping too. |
| 3288 return size - free_list->Free(start, size); | 3286 return size - free_list->Free(start, size); |
| 3289 } | 3287 } |
| 3290 } | 3288 } |
| 3291 | 3289 |
| 3292 | 3290 |
| 3293 // Sweep a space precisely. After this has been done the space can | 3291 // Sweeps a page. After sweeping the page can be iterated. |
| 3294 // be iterated precisely, hitting only the live objects. Code space | |
| 3295 // is always swept precisely because we want to be able to iterate | |
| 3296 // over it. Map space is swept precisely, because it is not compacted. | |
| 3297 // Slots in live objects pointing into evacuation candidates are updated | 3292 // Slots in live objects pointing into evacuation candidates are updated |
| 3298 // if requested. | 3293 // if requested. |
| 3299 // Returns the size of the biggest continuous freed memory chunk in bytes. | 3294 // Returns the size of the biggest continuous freed memory chunk in bytes. |
| 3300 template <SweepingMode sweeping_mode, | 3295 template <SweepingMode sweeping_mode, |
| 3301 MarkCompactCollector::SweepingParallelism parallelism, | 3296 MarkCompactCollector::SweepingParallelism parallelism, |
| 3302 SkipListRebuildingMode skip_list_mode, | 3297 SkipListRebuildingMode skip_list_mode, |
| 3303 FreeSpaceTreatmentMode free_space_mode> | 3298 FreeSpaceTreatmentMode free_space_mode> |
| 3304 static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p, | 3299 static int Sweep(PagedSpace* space, FreeList* free_list, Page* p, |
| 3305 ObjectVisitor* v) { | 3300 ObjectVisitor* v) { |
| 3306 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); | 3301 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); |
| 3307 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, | 3302 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, |
| 3308 space->identity() == CODE_SPACE); | 3303 space->identity() == CODE_SPACE); |
| 3309 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); | 3304 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); |
| 3310 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD || | 3305 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD || |
| 3311 sweeping_mode == SWEEP_ONLY); | 3306 sweeping_mode == SWEEP_ONLY); |
| 3312 | 3307 |
| 3313 Address free_start = p->area_start(); | 3308 Address free_start = p->area_start(); |
| 3314 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); | 3309 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); |
| 3315 int offsets[16]; | 3310 int offsets[16]; |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3377 } | 3372 } |
| 3378 #endif | 3373 #endif |
| 3379 } | 3374 } |
| 3380 p->ResetLiveBytes(); | 3375 p->ResetLiveBytes(); |
| 3381 | 3376 |
| 3382 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) { | 3377 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) { |
| 3383 // When concurrent sweeping is active, the page will be marked after | 3378 // When concurrent sweeping is active, the page will be marked after |
| 3384 // sweeping by the main thread. | 3379 // sweeping by the main thread. |
| 3385 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); | 3380 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); |
| 3386 } else { | 3381 } else { |
| 3387 p->MarkSweptPrecisely(); | 3382 p->SetWasSwept(); |
| 3388 } | 3383 } |
| 3389 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | 3384 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
| 3390 } | 3385 } |
| 3391 | 3386 |
| 3392 | 3387 |
| 3393 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { | 3388 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { |
| 3394 Page* p = Page::FromAddress(code->address()); | 3389 Page* p = Page::FromAddress(code->address()); |
| 3395 | 3390 |
| 3396 if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | 3391 if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
| 3397 return false; | 3392 return false; |
| (...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3614 } else { | 3609 } else { |
| 3615 if (FLAG_gc_verbose) { | 3610 if (FLAG_gc_verbose) { |
| 3616 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", | 3611 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", |
| 3617 reinterpret_cast<intptr_t>(p)); | 3612 reinterpret_cast<intptr_t>(p)); |
| 3618 } | 3613 } |
| 3619 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3614 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3620 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | 3615 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
| 3621 | 3616 |
| 3622 switch (space->identity()) { | 3617 switch (space->identity()) { |
| 3623 case OLD_DATA_SPACE: | 3618 case OLD_DATA_SPACE: |
| 3624 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); | 3619 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, |
| 3620 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, |
| 3621 &updating_visitor); |
| 3625 break; | 3622 break; |
| 3626 case OLD_POINTER_SPACE: | 3623 case OLD_POINTER_SPACE: |
| 3627 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, | 3624 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, |
| 3628 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>( | 3625 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, |
| 3629 space, NULL, p, &updating_visitor); | 3626 &updating_visitor); |
| 3630 break; | 3627 break; |
| 3631 case CODE_SPACE: | 3628 case CODE_SPACE: |
| 3632 if (FLAG_zap_code_space) { | 3629 if (FLAG_zap_code_space) { |
| 3633 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, | 3630 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, |
| 3634 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>( | 3631 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p, |
| 3635 space, NULL, p, &updating_visitor); | 3632 &updating_visitor); |
| 3636 } else { | 3633 } else { |
| 3637 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, | 3634 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, |
| 3638 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>( | 3635 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, |
| 3639 space, NULL, p, &updating_visitor); | 3636 &updating_visitor); |
| 3640 } | 3637 } |
| 3641 break; | 3638 break; |
| 3642 default: | 3639 default: |
| 3643 UNREACHABLE(); | 3640 UNREACHABLE(); |
| 3644 break; | 3641 break; |
| 3645 } | 3642 } |
| 3646 } | 3643 } |
| 3647 } | 3644 } |
| 3648 } | 3645 } |
| 3649 | 3646 |
| (...skipping 462 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4112 for (int i = 0; i < objects_in_these_8_words; i++) { | 4109 for (int i = 0; i < objects_in_these_8_words; i++) { |
| 4113 starts[objects++] = offset + table[1 + i]; | 4110 starts[objects++] = offset + table[1 + i]; |
| 4114 } | 4111 } |
| 4115 } | 4112 } |
| 4116 offset += 8; | 4113 offset += 8; |
| 4117 } | 4114 } |
| 4118 return objects; | 4115 return objects; |
| 4119 } | 4116 } |
| 4120 | 4117 |
| 4121 | 4118 |
| 4122 static inline Address DigestFreeStart(Address approximate_free_start, | |
| 4123 uint32_t free_start_cell) { | |
| 4124 DCHECK(free_start_cell != 0); | |
| 4125 | |
| 4126 // No consecutive 1 bits. | |
| 4127 DCHECK((free_start_cell & (free_start_cell << 1)) == 0); | |
| 4128 | |
| 4129 int offsets[16]; | |
| 4130 uint32_t cell = free_start_cell; | |
| 4131 int offset_of_last_live; | |
| 4132 if ((cell & 0x80000000u) != 0) { | |
| 4133 // This case would overflow below. | |
| 4134 offset_of_last_live = 31; | |
| 4135 } else { | |
| 4136 // Remove all but one bit, the most significant. This is an optimization | |
| 4137 // that may or may not be worthwhile. | |
| 4138 cell |= cell >> 16; | |
| 4139 cell |= cell >> 8; | |
| 4140 cell |= cell >> 4; | |
| 4141 cell |= cell >> 2; | |
| 4142 cell |= cell >> 1; | |
| 4143 cell = (cell + 1) >> 1; | |
| 4144 int live_objects = MarkWordToObjectStarts(cell, offsets); | |
| 4145 DCHECK(live_objects == 1); | |
| 4146 offset_of_last_live = offsets[live_objects - 1]; | |
| 4147 } | |
| 4148 Address last_live_start = | |
| 4149 approximate_free_start + offset_of_last_live * kPointerSize; | |
| 4150 HeapObject* last_live = HeapObject::FromAddress(last_live_start); | |
| 4151 Address free_start = last_live_start + last_live->Size(); | |
| 4152 return free_start; | |
| 4153 } | |
| 4154 | |
| 4155 | |
| 4156 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { | |
| 4157 DCHECK(cell != 0); | |
| 4158 | |
| 4159 // No consecutive 1 bits. | |
| 4160 DCHECK((cell & (cell << 1)) == 0); | |
| 4161 | |
| 4162 int offsets[16]; | |
| 4163 if (cell == 0x80000000u) { // Avoid overflow below. | |
| 4164 return block_address + 31 * kPointerSize; | |
| 4165 } | |
| 4166 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; | |
| 4167 DCHECK((first_set_bit & cell) == first_set_bit); | |
| 4168 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); | |
| 4169 DCHECK(live_objects == 1); | |
| 4170 USE(live_objects); | |
| 4171 return block_address + offsets[0] * kPointerSize; | |
| 4172 } | |
| 4173 | |
| 4174 | |
| 4175 // Force instantiation of templatized SweepConservatively method for | |
| 4176 // SWEEP_ON_MAIN_THREAD mode. | |
| 4177 template int MarkCompactCollector::SweepConservatively< | |
| 4178 MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(PagedSpace*, FreeList*, Page*); | |
| 4179 | |
| 4180 | |
| 4181 // Force instantiation of templatized SweepConservatively method for | |
| 4182 // SWEEP_IN_PARALLEL mode. | |
| 4183 template int MarkCompactCollector::SweepConservatively< | |
| 4184 MarkCompactCollector::SWEEP_IN_PARALLEL>(PagedSpace*, FreeList*, Page*); | |
| 4185 | |
| 4186 | |
| 4187 // Sweeps a space conservatively. After this has been done the larger free | |
| 4188 // spaces have been put on the free list and the smaller ones have been | |
| 4189 // ignored and left untouched. A free space is always either ignored or put | |
| 4190 // on the free list, never split up into two parts. This is important | |
| 4191 // because it means that any FreeSpace maps left actually describe a region of | |
| 4192 // memory that can be ignored when scanning. Dead objects other than free | |
| 4193 // spaces will not contain the free space map. | |
| 4194 template <MarkCompactCollector::SweepingParallelism mode> | |
| 4195 int MarkCompactCollector::SweepConservatively(PagedSpace* space, | |
| 4196 FreeList* free_list, Page* p) { | |
| 4197 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); | |
| 4198 DCHECK( | |
| 4199 (mode == MarkCompactCollector::SWEEP_IN_PARALLEL && free_list != NULL) || | |
| 4200 (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD && | |
| 4201 free_list == NULL)); | |
| 4202 | |
| 4203 intptr_t freed_bytes = 0; | |
| 4204 intptr_t max_freed_bytes = 0; | |
| 4205 size_t size = 0; | |
| 4206 | |
| 4207 // Skip over all the dead objects at the start of the page and mark them free. | |
| 4208 Address cell_base = 0; | |
| 4209 MarkBit::CellType* cell = NULL; | |
| 4210 MarkBitCellIterator it(p); | |
| 4211 for (; !it.Done(); it.Advance()) { | |
| 4212 cell_base = it.CurrentCellBase(); | |
| 4213 cell = it.CurrentCell(); | |
| 4214 if (*cell != 0) break; | |
| 4215 } | |
| 4216 | |
| 4217 if (it.Done()) { | |
| 4218 size = p->area_end() - p->area_start(); | |
| 4219 freed_bytes = | |
| 4220 Free<mode>(space, free_list, p->area_start(), static_cast<int>(size)); | |
| 4221 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
| 4222 DCHECK_EQ(0, p->LiveBytes()); | |
| 4223 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) { | |
| 4224 // When concurrent sweeping is active, the page will be marked after | |
| 4225 // sweeping by the main thread. | |
| 4226 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); | |
| 4227 } else { | |
| 4228 p->MarkSweptConservatively(); | |
| 4229 } | |
| 4230 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | |
| 4231 } | |
| 4232 | |
| 4233 // Grow the size of the start-of-page free space a little to get up to the | |
| 4234 // first live object. | |
| 4235 Address free_end = StartOfLiveObject(cell_base, *cell); | |
| 4236 // Free the first free space. | |
| 4237 size = free_end - p->area_start(); | |
| 4238 freed_bytes = | |
| 4239 Free<mode>(space, free_list, p->area_start(), static_cast<int>(size)); | |
| 4240 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
| 4241 | |
| 4242 // The start of the current free area is represented in undigested form by | |
| 4243 // the address of the last 32-word section that contained a live object and | |
| 4244 // the marking bitmap for that cell, which describes where the live object | |
| 4245 // started. Unless we find a large free space in the bitmap we will not | |
| 4246 // digest this pair into a real address. We start the iteration here at the | |
| 4247 // first word in the marking bit map that indicates a live object. | |
| 4248 Address free_start = cell_base; | |
| 4249 MarkBit::CellType free_start_cell = *cell; | |
| 4250 | |
| 4251 for (; !it.Done(); it.Advance()) { | |
| 4252 cell_base = it.CurrentCellBase(); | |
| 4253 cell = it.CurrentCell(); | |
| 4254 if (*cell != 0) { | |
| 4255 // We have a live object. Check approximately whether it is more than 32 | |
| 4256 // words since the last live object. | |
| 4257 if (cell_base - free_start > 32 * kPointerSize) { | |
| 4258 free_start = DigestFreeStart(free_start, free_start_cell); | |
| 4259 if (cell_base - free_start > 32 * kPointerSize) { | |
| 4260 // Now that we know the exact start of the free space it still looks | |
| 4261 // like we have a large enough free space to be worth bothering with. | |
| 4262 // so now we need to find the start of the first live object at the | |
| 4263 // end of the free space. | |
| 4264 free_end = StartOfLiveObject(cell_base, *cell); | |
| 4265 freed_bytes = Free<mode>(space, free_list, free_start, | |
| 4266 static_cast<int>(free_end - free_start)); | |
| 4267 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
| 4268 } | |
| 4269 } | |
| 4270 // Update our undigested record of where the current free area started. | |
| 4271 free_start = cell_base; | |
| 4272 free_start_cell = *cell; | |
| 4273 // Clear marking bits for current cell. | |
| 4274 *cell = 0; | |
| 4275 } | |
| 4276 } | |
| 4277 | |
| 4278 // Handle the free space at the end of the page. | |
| 4279 if (cell_base - free_start > 32 * kPointerSize) { | |
| 4280 free_start = DigestFreeStart(free_start, free_start_cell); | |
| 4281 freed_bytes = Free<mode>(space, free_list, free_start, | |
| 4282 static_cast<int>(p->area_end() - free_start)); | |
| 4283 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | |
| 4284 } | |
| 4285 | |
| 4286 p->ResetLiveBytes(); | |
| 4287 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) { | |
| 4288 // When concurrent sweeping is active, the page will be marked after | |
| 4289 // sweeping by the main thread. | |
| 4290 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); | |
| 4291 } else { | |
| 4292 p->MarkSweptConservatively(); | |
| 4293 } | |
| 4294 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | |
| 4295 } | |
| 4296 | |
| 4297 | |
| 4298 int MarkCompactCollector::SweepInParallel(PagedSpace* space, | 4119 int MarkCompactCollector::SweepInParallel(PagedSpace* space, |
| 4299 int required_freed_bytes) { | 4120 int required_freed_bytes) { |
| 4300 int max_freed = 0; | 4121 int max_freed = 0; |
| 4301 int max_freed_overall = 0; | 4122 int max_freed_overall = 0; |
| 4302 PageIterator it(space); | 4123 PageIterator it(space); |
| 4303 while (it.has_next()) { | 4124 while (it.has_next()) { |
| 4304 Page* p = it.next(); | 4125 Page* p = it.next(); |
| 4305 max_freed = SweepInParallel(p, space); | 4126 max_freed = SweepInParallel(p, space); |
| 4306 DCHECK(max_freed >= 0); | 4127 DCHECK(max_freed >= 0); |
| 4307 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { | 4128 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { |
| 4308 return max_freed; | 4129 return max_freed; |
| 4309 } | 4130 } |
| 4310 max_freed_overall = Max(max_freed, max_freed_overall); | 4131 max_freed_overall = Max(max_freed, max_freed_overall); |
| 4311 if (p == space->end_of_unswept_pages()) break; | 4132 if (p == space->end_of_unswept_pages()) break; |
| 4312 } | 4133 } |
| 4313 return max_freed_overall; | 4134 return max_freed_overall; |
| 4314 } | 4135 } |
| 4315 | 4136 |
| 4316 | 4137 |
| 4317 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { | 4138 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { |
| 4318 int max_freed = 0; | 4139 int max_freed = 0; |
| 4319 if (page->TryParallelSweeping()) { | 4140 if (page->TryParallelSweeping()) { |
| 4320 FreeList* free_list = space == heap()->old_pointer_space() | 4141 FreeList* free_list = space == heap()->old_pointer_space() |
| 4321 ? free_list_old_pointer_space_.get() | 4142 ? free_list_old_pointer_space_.get() |
| 4322 : free_list_old_data_space_.get(); | 4143 : free_list_old_data_space_.get(); |
| 4323 FreeList private_free_list(space); | 4144 FreeList private_free_list(space); |
| 4324 if (space->swept_precisely()) { | 4145 max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
| 4325 max_freed = SweepPrecisely<SWEEP_ONLY, SWEEP_IN_PARALLEL, | 4146 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL); |
| 4326 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>( | |
| 4327 space, &private_free_list, page, NULL); | |
| 4328 } else { | |
| 4329 max_freed = SweepConservatively<SWEEP_IN_PARALLEL>( | |
| 4330 space, &private_free_list, page); | |
| 4331 } | |
| 4332 free_list->Concatenate(&private_free_list); | 4147 free_list->Concatenate(&private_free_list); |
| 4333 } | 4148 } |
| 4334 return max_freed; | 4149 return max_freed; |
| 4335 } | 4150 } |
| 4336 | 4151 |
| 4337 | 4152 |
| 4338 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { | 4153 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
| 4339 space->set_swept_precisely(sweeper == PRECISE || | |
| 4340 sweeper == CONCURRENT_PRECISE || | |
| 4341 sweeper == PARALLEL_PRECISE); | |
| 4342 space->ClearStats(); | 4154 space->ClearStats(); |
| 4343 | 4155 |
| 4344 // We defensively initialize end_of_unswept_pages_ here with the first page | 4156 // We defensively initialize end_of_unswept_pages_ here with the first page |
| 4345 // of the pages list. | 4157 // of the pages list. |
| 4346 space->set_end_of_unswept_pages(space->FirstPage()); | 4158 space->set_end_of_unswept_pages(space->FirstPage()); |
| 4347 | 4159 |
| 4348 PageIterator it(space); | 4160 PageIterator it(space); |
| 4349 | 4161 |
| 4350 int pages_swept = 0; | 4162 int pages_swept = 0; |
| 4351 bool unused_page_present = false; | 4163 bool unused_page_present = false; |
| 4352 bool parallel_sweeping_active = false; | 4164 bool parallel_sweeping_active = false; |
| 4353 | 4165 |
| 4354 while (it.has_next()) { | 4166 while (it.has_next()) { |
| 4355 Page* p = it.next(); | 4167 Page* p = it.next(); |
| 4356 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); | 4168 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); |
| 4357 | 4169 |
| 4358 // Clear sweeping flags indicating that marking bits are still intact. | 4170 // Clear sweeping flags indicating that marking bits are still intact. |
| 4359 p->ClearSweptPrecisely(); | 4171 p->ClearWasSwept(); |
| 4360 p->ClearSweptConservatively(); | |
| 4361 | 4172 |
| 4362 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || | 4173 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || |
| 4363 p->IsEvacuationCandidate()) { | 4174 p->IsEvacuationCandidate()) { |
| 4364 // Will be processed in EvacuateNewSpaceAndCandidates. | 4175 // Will be processed in EvacuateNewSpaceAndCandidates. |
| 4365 DCHECK(evacuation_candidates_.length() > 0); | 4176 DCHECK(evacuation_candidates_.length() > 0); |
| 4366 continue; | 4177 continue; |
| 4367 } | 4178 } |
| 4368 | 4179 |
| 4369 // One unused page is kept, all further are released before sweeping them. | 4180 // One unused page is kept, all further are released before sweeping them. |
| 4370 if (p->LiveBytes() == 0) { | 4181 if (p->LiveBytes() == 0) { |
| 4371 if (unused_page_present) { | 4182 if (unused_page_present) { |
| 4372 if (FLAG_gc_verbose) { | 4183 if (FLAG_gc_verbose) { |
| 4373 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", | 4184 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", |
| 4374 reinterpret_cast<intptr_t>(p)); | 4185 reinterpret_cast<intptr_t>(p)); |
| 4375 } | 4186 } |
| 4376 // Adjust unswept free bytes because releasing a page expects said | 4187 // Adjust unswept free bytes because releasing a page expects said |
| 4377 // counter to be accurate for unswept pages. | 4188 // counter to be accurate for unswept pages. |
| 4378 space->IncreaseUnsweptFreeBytes(p); | 4189 space->IncreaseUnsweptFreeBytes(p); |
| 4379 space->ReleasePage(p); | 4190 space->ReleasePage(p); |
| 4380 continue; | 4191 continue; |
| 4381 } | 4192 } |
| 4382 unused_page_present = true; | 4193 unused_page_present = true; |
| 4383 } | 4194 } |
| 4384 | 4195 |
| 4385 switch (sweeper) { | 4196 switch (sweeper) { |
| 4386 case CONCURRENT_CONSERVATIVE: | 4197 case CONCURRENT_SWEEPING: |
| 4387 case PARALLEL_CONSERVATIVE: { | 4198 case PARALLEL_SWEEPING: |
| 4388 if (!parallel_sweeping_active) { | 4199 if (!parallel_sweeping_active) { |
| 4389 if (FLAG_gc_verbose) { | 4200 if (FLAG_gc_verbose) { |
| 4390 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", | 4201 PrintF("Sweeping 0x%" V8PRIxPTR ".\n", |
| 4391 reinterpret_cast<intptr_t>(p)); | 4202 reinterpret_cast<intptr_t>(p)); |
| 4392 } | 4203 } |
| 4393 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); | 4204 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, |
| 4205 IGNORE_FREE_SPACE>(space, NULL, p, NULL); |
| 4394 pages_swept++; | 4206 pages_swept++; |
| 4395 parallel_sweeping_active = true; | 4207 parallel_sweeping_active = true; |
| 4396 } else { | 4208 } else { |
| 4397 if (FLAG_gc_verbose) { | 4209 if (FLAG_gc_verbose) { |
| 4398 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", | 4210 PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n", |
| 4399 reinterpret_cast<intptr_t>(p)); | 4211 reinterpret_cast<intptr_t>(p)); |
| 4400 } | 4212 } |
| 4401 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING); | 4213 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING); |
| 4402 space->IncreaseUnsweptFreeBytes(p); | 4214 space->IncreaseUnsweptFreeBytes(p); |
| 4403 } | 4215 } |
| 4404 space->set_end_of_unswept_pages(p); | 4216 space->set_end_of_unswept_pages(p); |
| 4405 break; | 4217 break; |
| 4406 } | 4218 case SEQUENTIAL_SWEEPING: { |
| 4407 case CONCURRENT_PRECISE: | |
| 4408 case PARALLEL_PRECISE: | |
| 4409 if (!parallel_sweeping_active) { | |
| 4410 if (FLAG_gc_verbose) { | |
| 4411 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", | |
| 4412 reinterpret_cast<intptr_t>(p)); | |
| 4413 } | |
| 4414 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, | |
| 4415 IGNORE_FREE_SPACE>(space, NULL, p, NULL); | |
| 4416 pages_swept++; | |
| 4417 parallel_sweeping_active = true; | |
| 4418 } else { | |
| 4419 if (FLAG_gc_verbose) { | |
| 4420 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", | |
| 4421 reinterpret_cast<intptr_t>(p)); | |
| 4422 } | |
| 4423 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING); | |
| 4424 space->IncreaseUnsweptFreeBytes(p); | |
| 4425 } | |
| 4426 space->set_end_of_unswept_pages(p); | |
| 4427 break; | |
| 4428 case PRECISE: { | |
| 4429 if (FLAG_gc_verbose) { | 4219 if (FLAG_gc_verbose) { |
| 4430 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", | 4220 PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p)); |
| 4431 reinterpret_cast<intptr_t>(p)); | |
| 4432 } | 4221 } |
| 4433 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) { | 4222 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) { |
| 4434 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, | 4223 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, |
| 4435 ZAP_FREE_SPACE>(space, NULL, p, NULL); | 4224 ZAP_FREE_SPACE>(space, NULL, p, NULL); |
| 4436 } else if (space->identity() == CODE_SPACE) { | 4225 } else if (space->identity() == CODE_SPACE) { |
| 4437 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, | 4226 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, |
| 4438 IGNORE_FREE_SPACE>(space, NULL, p, NULL); | 4227 IGNORE_FREE_SPACE>(space, NULL, p, NULL); |
| 4439 } else { | 4228 } else { |
| 4440 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, | 4229 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, |
| 4441 IGNORE_FREE_SPACE>(space, NULL, p, NULL); | 4230 IGNORE_FREE_SPACE>(space, NULL, p, NULL); |
| 4442 } | 4231 } |
| 4443 pages_swept++; | 4232 pages_swept++; |
| 4444 break; | 4233 break; |
| 4445 } | 4234 } |
| 4446 default: { UNREACHABLE(); } | 4235 default: { UNREACHABLE(); } |
| 4447 } | 4236 } |
| 4448 } | 4237 } |
| 4449 | 4238 |
| 4450 if (FLAG_gc_verbose) { | 4239 if (FLAG_gc_verbose) { |
| 4451 PrintF("SweepSpace: %s (%d pages swept)\n", | 4240 PrintF("SweepSpace: %s (%d pages swept)\n", |
| 4452 AllocationSpaceName(space->identity()), pages_swept); | 4241 AllocationSpaceName(space->identity()), pages_swept); |
| 4453 } | 4242 } |
| 4454 | 4243 |
| 4455 // Give pages that are queued to be freed back to the OS. | 4244 // Give pages that are queued to be freed back to the OS. |
| 4456 heap()->FreeQueuedChunks(); | 4245 heap()->FreeQueuedChunks(); |
| 4457 } | 4246 } |
| 4458 | 4247 |
| 4459 | 4248 |
| 4460 static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) { | 4249 static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) { |
| 4461 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE || | 4250 return type == MarkCompactCollector::PARALLEL_SWEEPING || |
| 4462 type == MarkCompactCollector::CONCURRENT_CONSERVATIVE || | 4251 type == MarkCompactCollector::CONCURRENT_SWEEPING; |
| 4463 type == MarkCompactCollector::PARALLEL_PRECISE || | |
| 4464 type == MarkCompactCollector::CONCURRENT_PRECISE; | |
| 4465 } | 4252 } |
| 4466 | 4253 |
| 4467 | 4254 |
| 4468 static bool ShouldWaitForSweeperThreads( | 4255 static bool ShouldWaitForSweeperThreads( |
| 4469 MarkCompactCollector::SweeperType type) { | 4256 MarkCompactCollector::SweeperType type) { |
| 4470 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE || | 4257 return type == MarkCompactCollector::PARALLEL_SWEEPING; |
| 4471 type == MarkCompactCollector::PARALLEL_PRECISE; | |
| 4472 } | 4258 } |
| 4473 | 4259 |
| 4474 | 4260 |
| 4475 void MarkCompactCollector::SweepSpaces() { | 4261 void MarkCompactCollector::SweepSpaces() { |
| 4476 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP); | 4262 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP); |
| 4477 double start_time = 0.0; | 4263 double start_time = 0.0; |
| 4478 if (FLAG_print_cumulative_gc_stat) { | 4264 if (FLAG_print_cumulative_gc_stat) { |
| 4479 start_time = base::OS::TimeCurrentMillis(); | 4265 start_time = base::OS::TimeCurrentMillis(); |
| 4480 } | 4266 } |
| 4481 | 4267 |
| 4482 #ifdef DEBUG | 4268 #ifdef DEBUG |
| 4483 state_ = SWEEP_SPACES; | 4269 state_ = SWEEP_SPACES; |
| 4484 #endif | 4270 #endif |
| 4485 SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE; | 4271 SweeperType how_to_sweep = CONCURRENT_SWEEPING; |
| 4486 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; | 4272 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_SWEEPING; |
| 4487 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; | 4273 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_SWEEPING; |
| 4488 if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) { | |
| 4489 how_to_sweep = PARALLEL_PRECISE; | |
| 4490 } | |
| 4491 if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) { | |
| 4492 how_to_sweep = CONCURRENT_PRECISE; | |
| 4493 } | |
| 4494 if (sweep_precisely_) how_to_sweep = PRECISE; | |
| 4495 | 4274 |
| 4496 MoveEvacuationCandidatesToEndOfPagesList(); | 4275 MoveEvacuationCandidatesToEndOfPagesList(); |
| 4497 | 4276 |
| 4498 // Noncompacting collections simply sweep the spaces to clear the mark | 4277 // Noncompacting collections simply sweep the spaces to clear the mark |
| 4499 // bits and free the nonlive blocks (for old and map spaces). We sweep | 4278 // bits and free the nonlive blocks (for old and map spaces). We sweep |
| 4500 // the map space last because freeing non-live maps overwrites them and | 4279 // the map space last because freeing non-live maps overwrites them and |
| 4501 // the other spaces rely on possibly non-live maps to get the sizes for | 4280 // the other spaces rely on possibly non-live maps to get the sizes for |
| 4502 // non-live objects. | 4281 // non-live objects. |
| 4503 { | 4282 { |
| 4504 GCTracer::Scope sweep_scope(heap()->tracer(), | 4283 GCTracer::Scope sweep_scope(heap()->tracer(), |
| (...skipping 10 matching lines...) Expand all Loading... |
| 4515 | 4294 |
| 4516 if (ShouldWaitForSweeperThreads(how_to_sweep)) { | 4295 if (ShouldWaitForSweeperThreads(how_to_sweep)) { |
| 4517 EnsureSweepingCompleted(); | 4296 EnsureSweepingCompleted(); |
| 4518 } | 4297 } |
| 4519 } | 4298 } |
| 4520 RemoveDeadInvalidatedCode(); | 4299 RemoveDeadInvalidatedCode(); |
| 4521 | 4300 |
| 4522 { | 4301 { |
| 4523 GCTracer::Scope sweep_scope(heap()->tracer(), | 4302 GCTracer::Scope sweep_scope(heap()->tracer(), |
| 4524 GCTracer::Scope::MC_SWEEP_CODE); | 4303 GCTracer::Scope::MC_SWEEP_CODE); |
| 4525 SweepSpace(heap()->code_space(), PRECISE); | 4304 SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING); |
| 4526 } | 4305 } |
| 4527 | 4306 |
| 4528 { | 4307 { |
| 4529 GCTracer::Scope sweep_scope(heap()->tracer(), | 4308 GCTracer::Scope sweep_scope(heap()->tracer(), |
| 4530 GCTracer::Scope::MC_SWEEP_CELL); | 4309 GCTracer::Scope::MC_SWEEP_CELL); |
| 4531 SweepSpace(heap()->cell_space(), PRECISE); | 4310 SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING); |
| 4532 SweepSpace(heap()->property_cell_space(), PRECISE); | 4311 SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING); |
| 4533 } | 4312 } |
| 4534 | 4313 |
| 4535 EvacuateNewSpaceAndCandidates(); | 4314 EvacuateNewSpaceAndCandidates(); |
| 4536 | 4315 |
| 4537 // ClearNonLiveTransitions depends on precise sweeping of map space to | 4316 // ClearNonLiveTransitions depends on precise sweeping of map space to |
| 4538 // detect whether unmarked map became dead in this collection or in one | 4317 // detect whether unmarked map became dead in this collection or in one |
| 4539 // of the previous ones. | 4318 // of the previous ones. |
| 4540 { | 4319 { |
| 4541 GCTracer::Scope sweep_scope(heap()->tracer(), | 4320 GCTracer::Scope sweep_scope(heap()->tracer(), |
| 4542 GCTracer::Scope::MC_SWEEP_MAP); | 4321 GCTracer::Scope::MC_SWEEP_MAP); |
| 4543 SweepSpace(heap()->map_space(), PRECISE); | 4322 SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING); |
| 4544 } | 4323 } |
| 4545 | 4324 |
| 4546 // Deallocate unmarked objects and clear marked bits for marked objects. | 4325 // Deallocate unmarked objects and clear marked bits for marked objects. |
| 4547 heap_->lo_space()->FreeUnmarkedObjects(); | 4326 heap_->lo_space()->FreeUnmarkedObjects(); |
| 4548 | 4327 |
| 4549 // Deallocate evacuated candidate pages. | 4328 // Deallocate evacuated candidate pages. |
| 4550 ReleaseEvacuationCandidates(); | 4329 ReleaseEvacuationCandidates(); |
| 4551 | 4330 |
| 4552 if (FLAG_print_cumulative_gc_stat) { | 4331 if (FLAG_print_cumulative_gc_stat) { |
| 4553 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - | 4332 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - |
| 4554 start_time); | 4333 start_time); |
| 4555 } | 4334 } |
| 4556 } | 4335 } |
| 4557 | 4336 |
| 4558 | 4337 |
| 4559 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { | 4338 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { |
| 4560 PageIterator it(space); | 4339 PageIterator it(space); |
| 4561 while (it.has_next()) { | 4340 while (it.has_next()) { |
| 4562 Page* p = it.next(); | 4341 Page* p = it.next(); |
| 4563 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) { | 4342 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) { |
| 4564 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE); | 4343 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE); |
| 4565 if (space->swept_precisely()) { | 4344 p->SetWasSwept(); |
| 4566 p->MarkSweptPrecisely(); | |
| 4567 } else { | |
| 4568 p->MarkSweptConservatively(); | |
| 4569 } | |
| 4570 } | 4345 } |
| 4571 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); | 4346 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); |
| 4572 } | 4347 } |
| 4573 } | 4348 } |
| 4574 | 4349 |
| 4575 | 4350 |
| 4576 void MarkCompactCollector::ParallelSweepSpacesComplete() { | 4351 void MarkCompactCollector::ParallelSweepSpacesComplete() { |
| 4577 ParallelSweepSpaceComplete(heap()->old_pointer_space()); | 4352 ParallelSweepSpaceComplete(heap()->old_pointer_space()); |
| 4578 ParallelSweepSpaceComplete(heap()->old_data_space()); | 4353 ParallelSweepSpaceComplete(heap()->old_data_space()); |
| 4579 } | 4354 } |
| (...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4779 SlotsBuffer* buffer = *buffer_address; | 4554 SlotsBuffer* buffer = *buffer_address; |
| 4780 while (buffer != NULL) { | 4555 while (buffer != NULL) { |
| 4781 SlotsBuffer* next_buffer = buffer->next(); | 4556 SlotsBuffer* next_buffer = buffer->next(); |
| 4782 DeallocateBuffer(buffer); | 4557 DeallocateBuffer(buffer); |
| 4783 buffer = next_buffer; | 4558 buffer = next_buffer; |
| 4784 } | 4559 } |
| 4785 *buffer_address = NULL; | 4560 *buffer_address = NULL; |
| 4786 } | 4561 } |
| 4787 } | 4562 } |
| 4788 } // namespace v8::internal | 4563 } // namespace v8::internal |
| OLD | NEW |