Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(90)

Side by Side Diff: src/mark-compact.cc

Issue 398333002: Concurrent/parallel precise sweeping. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mark-compact.h ('k') | src/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/code-stubs.h" 8 #include "src/code-stubs.h"
9 #include "src/compilation-cache.h" 9 #include "src/compilation-cache.h"
10 #include "src/cpu-profiler.h" 10 #include "src/cpu-profiler.h"
(...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after
201 current += object->Size(); 201 current += object->Size();
202 } 202 }
203 } 203 }
204 } 204 }
205 205
206 206
207 static void VerifyEvacuation(PagedSpace* space) { 207 static void VerifyEvacuation(PagedSpace* space) {
208 // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently 208 // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently
209 // swept pages. 209 // swept pages.
210 if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) && 210 if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
211 !space->is_iterable()) return; 211 !space->swept_precisely()) return;
212 PageIterator it(space); 212 PageIterator it(space);
213 213
214 while (it.has_next()) { 214 while (it.has_next()) {
215 Page* p = it.next(); 215 Page* p = it.next();
216 if (p->IsEvacuationCandidate()) continue; 216 if (p->IsEvacuationCandidate()) continue;
217 VerifyEvacuation(p->area_start(), p->area_end()); 217 VerifyEvacuation(p->area_start(), p->area_end());
218 } 218 }
219 } 219 }
220 220
221 221
(...skipping 1822 matching lines...) Expand 10 before | Expand all | Expand 10 after
2044 } 2044 }
2045 *cells = 0; 2045 *cells = 0;
2046 } 2046 }
2047 return survivors_size; 2047 return survivors_size;
2048 } 2048 }
2049 2049
2050 2050
2051 static void DiscoverGreyObjectsInSpace(Heap* heap, 2051 static void DiscoverGreyObjectsInSpace(Heap* heap,
2052 MarkingDeque* marking_deque, 2052 MarkingDeque* marking_deque,
2053 PagedSpace* space) { 2053 PagedSpace* space) {
2054 if (space->is_iterable()) { 2054 if (space->swept_precisely()) {
2055 HeapObjectIterator it(space); 2055 HeapObjectIterator it(space);
2056 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it); 2056 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2057 } else { 2057 } else {
2058 PageIterator it(space); 2058 PageIterator it(space);
2059 while (it.has_next()) { 2059 while (it.has_next()) {
2060 Page* p = it.next(); 2060 Page* p = it.next();
2061 DiscoverGreyObjectsOnPage(marking_deque, p); 2061 DiscoverGreyObjectsOnPage(marking_deque, p);
2062 if (marking_deque->IsFull()) return; 2062 if (marking_deque->IsFull()) return;
2063 } 2063 }
2064 } 2064 }
(...skipping 1060 matching lines...) Expand 10 before | Expand all | Expand 10 after
3125 } 3125 }
3126 3126
3127 3127
3128 void MarkCompactCollector::EvacuatePages() { 3128 void MarkCompactCollector::EvacuatePages() {
3129 int npages = evacuation_candidates_.length(); 3129 int npages = evacuation_candidates_.length();
3130 for (int i = 0; i < npages; i++) { 3130 for (int i = 0; i < npages; i++) {
3131 Page* p = evacuation_candidates_[i]; 3131 Page* p = evacuation_candidates_[i];
3132 ASSERT(p->IsEvacuationCandidate() || 3132 ASSERT(p->IsEvacuationCandidate() ||
3133 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3133 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3134 ASSERT(static_cast<int>(p->parallel_sweeping()) == 3134 ASSERT(static_cast<int>(p->parallel_sweeping()) ==
3135 MemoryChunk::PARALLEL_SWEEPING_DONE); 3135 MemoryChunk::SWEEPING_DONE);
3136 if (p->IsEvacuationCandidate()) { 3136 if (p->IsEvacuationCandidate()) {
3137 // During compaction we might have to request a new page. 3137 // During compaction we might have to request a new page.
3138 // Check that space still have room for that. 3138 // Check that space still have room for that.
3139 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) { 3139 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
3140 EvacuateLiveObjectsFromPage(p); 3140 EvacuateLiveObjectsFromPage(p);
3141 } else { 3141 } else {
3142 // Without room for expansion evacuation is not guaranteed to succeed. 3142 // Without room for expansion evacuation is not guaranteed to succeed.
3143 // Pessimistically abandon unevacuated pages. 3143 // Pessimistically abandon unevacuated pages.
3144 for (int j = i; j < npages; j++) { 3144 for (int j = i; j < npages; j++) {
3145 Page* page = evacuation_candidates_[j]; 3145 Page* page = evacuation_candidates_[j];
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
3221 IGNORE_SKIP_LIST 3221 IGNORE_SKIP_LIST
3222 }; 3222 };
3223 3223
3224 3224
3225 enum FreeSpaceTreatmentMode { 3225 enum FreeSpaceTreatmentMode {
3226 IGNORE_FREE_SPACE, 3226 IGNORE_FREE_SPACE,
3227 ZAP_FREE_SPACE 3227 ZAP_FREE_SPACE
3228 }; 3228 };
3229 3229
3230 3230
3231 template<MarkCompactCollector::SweepingParallelism mode>
3232 static intptr_t Free(PagedSpace* space,
3233 FreeList* free_list,
3234 Address start,
3235 int size) {
3236 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
3237 ASSERT(free_list == NULL);
3238 return space->Free(start, size);
3239 } else {
3240 return size - free_list->Free(start, size);
3241 }
3242 }
3243
3244
3231 // Sweep a space precisely. After this has been done the space can 3245 // Sweep a space precisely. After this has been done the space can
3232 // be iterated precisely, hitting only the live objects. Code space 3246 // be iterated precisely, hitting only the live objects. Code space
3233 // is always swept precisely because we want to be able to iterate 3247 // is always swept precisely because we want to be able to iterate
3234 // over it. Map space is swept precisely, because it is not compacted. 3248 // over it. Map space is swept precisely, because it is not compacted.
3235 // Slots in live objects pointing into evacuation candidates are updated 3249 // Slots in live objects pointing into evacuation candidates are updated
3236 // if requested. 3250 // if requested.
3237 template<SweepingMode sweeping_mode, 3251 template<SweepingMode sweeping_mode,
3252 MarkCompactCollector::SweepingParallelism parallelism,
3238 SkipListRebuildingMode skip_list_mode, 3253 SkipListRebuildingMode skip_list_mode,
3239 FreeSpaceTreatmentMode free_space_mode> 3254 FreeSpaceTreatmentMode free_space_mode>
3240 static void SweepPrecisely(PagedSpace* space, 3255 static int SweepPrecisely(PagedSpace* space,
3256 FreeList* free_list,
3241 Page* p, 3257 Page* p,
3242 ObjectVisitor* v) { 3258 ObjectVisitor* v) {
3243 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); 3259 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3244 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST, 3260 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3245 space->identity() == CODE_SPACE); 3261 space->identity() == CODE_SPACE);
3246 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); 3262 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3263 ASSERT(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
3264 sweeping_mode == SWEEP_ONLY);
3247 3265
3248 double start_time = 0.0; 3266 double start_time = 0.0;
3249 if (FLAG_print_cumulative_gc_stat) { 3267 if (FLAG_print_cumulative_gc_stat) {
3250 start_time = base::OS::TimeCurrentMillis(); 3268 start_time = base::OS::TimeCurrentMillis();
3251 } 3269 }
3252 3270
3253 p->MarkSweptPrecisely(); 3271 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3272 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
3273 } else {
3274 p->MarkSweptPrecisely();
3275 }
3254 3276
3255 Address free_start = p->area_start(); 3277 Address free_start = p->area_start();
3256 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); 3278 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3257 int offsets[16]; 3279 int offsets[16];
3258 3280
3259 SkipList* skip_list = p->skip_list(); 3281 SkipList* skip_list = p->skip_list();
3260 int curr_region = -1; 3282 int curr_region = -1;
3261 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { 3283 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3262 skip_list->Clear(); 3284 skip_list->Clear();
3263 } 3285 }
3264 3286
3287 intptr_t freed_bytes = 0;
3288 intptr_t max_freed_bytes = 0;
3289
3265 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { 3290 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3266 Address cell_base = it.CurrentCellBase(); 3291 Address cell_base = it.CurrentCellBase();
3267 MarkBit::CellType* cell = it.CurrentCell(); 3292 MarkBit::CellType* cell = it.CurrentCell();
3268 int live_objects = MarkWordToObjectStarts(*cell, offsets); 3293 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3269 int live_index = 0; 3294 int live_index = 0;
3270 for ( ; live_objects != 0; live_objects--) { 3295 for ( ; live_objects != 0; live_objects--) {
3271 Address free_end = cell_base + offsets[live_index++] * kPointerSize; 3296 Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3272 if (free_end != free_start) { 3297 if (free_end != free_start) {
3298 int size = static_cast<int>(free_end - free_start);
3273 if (free_space_mode == ZAP_FREE_SPACE) { 3299 if (free_space_mode == ZAP_FREE_SPACE) {
3274 memset(free_start, 0xcc, static_cast<int>(free_end - free_start)); 3300 memset(free_start, 0xcc, size);
3275 } 3301 }
3276 space->Free(free_start, static_cast<int>(free_end - free_start)); 3302 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3303 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3277 #ifdef ENABLE_GDB_JIT_INTERFACE 3304 #ifdef ENABLE_GDB_JIT_INTERFACE
3278 if (FLAG_gdbjit && space->identity() == CODE_SPACE) { 3305 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3279 GDBJITInterface::RemoveCodeRange(free_start, free_end); 3306 GDBJITInterface::RemoveCodeRange(free_start, free_end);
3280 } 3307 }
3281 #endif 3308 #endif
3282 } 3309 }
3283 HeapObject* live_object = HeapObject::FromAddress(free_end); 3310 HeapObject* live_object = HeapObject::FromAddress(free_end);
3284 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); 3311 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3285 Map* map = live_object->map(); 3312 Map* map = live_object->map();
3286 int size = live_object->SizeFromMap(map); 3313 int size = live_object->SizeFromMap(map);
(...skipping 10 matching lines...) Expand all
3297 skip_list->AddObject(free_end, size); 3324 skip_list->AddObject(free_end, size);
3298 curr_region = new_region_end; 3325 curr_region = new_region_end;
3299 } 3326 }
3300 } 3327 }
3301 free_start = free_end + size; 3328 free_start = free_end + size;
3302 } 3329 }
3303 // Clear marking bits for current cell. 3330 // Clear marking bits for current cell.
3304 *cell = 0; 3331 *cell = 0;
3305 } 3332 }
3306 if (free_start != p->area_end()) { 3333 if (free_start != p->area_end()) {
3334 int size = static_cast<int>(p->area_end() - free_start);
3307 if (free_space_mode == ZAP_FREE_SPACE) { 3335 if (free_space_mode == ZAP_FREE_SPACE) {
3308 memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start)); 3336 memset(free_start, 0xcc, size);
3309 } 3337 }
3310 space->Free(free_start, static_cast<int>(p->area_end() - free_start)); 3338 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3339 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3311 #ifdef ENABLE_GDB_JIT_INTERFACE 3340 #ifdef ENABLE_GDB_JIT_INTERFACE
3312 if (FLAG_gdbjit && space->identity() == CODE_SPACE) { 3341 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3313 GDBJITInterface::RemoveCodeRange(free_start, p->area_end()); 3342 GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3314 } 3343 }
3315 #endif 3344 #endif
3316 } 3345 }
3317 p->ResetLiveBytes(); 3346 p->ResetLiveBytes();
3318 if (FLAG_print_cumulative_gc_stat) { 3347 if (FLAG_print_cumulative_gc_stat) {
3319 space->heap()->AddSweepingTime(base::OS::TimeCurrentMillis() - start_time); 3348 space->heap()->AddSweepingTime(base::OS::TimeCurrentMillis() - start_time);
3320 } 3349 }
3350 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3321 } 3351 }
3322 3352
3323 3353
3324 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { 3354 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3325 Page* p = Page::FromAddress(code->address()); 3355 Page* p = Page::FromAddress(code->address());
3326 3356
3327 if (p->IsEvacuationCandidate() || 3357 if (p->IsEvacuationCandidate() ||
3328 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { 3358 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3329 return false; 3359 return false;
3330 } 3360 }
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
3546 } 3576 }
3547 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3577 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3548 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); 3578 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3549 3579
3550 switch (space->identity()) { 3580 switch (space->identity()) {
3551 case OLD_DATA_SPACE: 3581 case OLD_DATA_SPACE:
3552 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); 3582 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
3553 break; 3583 break;
3554 case OLD_POINTER_SPACE: 3584 case OLD_POINTER_SPACE:
3555 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, 3585 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3586 SWEEP_ON_MAIN_THREAD,
3556 IGNORE_SKIP_LIST, 3587 IGNORE_SKIP_LIST,
3557 IGNORE_FREE_SPACE>( 3588 IGNORE_FREE_SPACE>(
3558 space, p, &updating_visitor); 3589 space, NULL, p, &updating_visitor);
3559 break; 3590 break;
3560 case CODE_SPACE: 3591 case CODE_SPACE:
3561 if (FLAG_zap_code_space) { 3592 if (FLAG_zap_code_space) {
3562 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, 3593 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3594 SWEEP_ON_MAIN_THREAD,
3563 REBUILD_SKIP_LIST, 3595 REBUILD_SKIP_LIST,
3564 ZAP_FREE_SPACE>( 3596 ZAP_FREE_SPACE>(
3565 space, p, &updating_visitor); 3597 space, NULL, p, &updating_visitor);
3566 } else { 3598 } else {
3567 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, 3599 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3600 SWEEP_ON_MAIN_THREAD,
3568 REBUILD_SKIP_LIST, 3601 REBUILD_SKIP_LIST,
3569 IGNORE_FREE_SPACE>( 3602 IGNORE_FREE_SPACE>(
3570 space, p, &updating_visitor); 3603 space, NULL, p, &updating_visitor);
3571 } 3604 }
3572 break; 3605 break;
3573 default: 3606 default:
3574 UNREACHABLE(); 3607 UNREACHABLE();
3575 break; 3608 break;
3576 } 3609 }
3577 } 3610 }
3578 } 3611 }
3579 } 3612 }
3580 3613
(...skipping 351 matching lines...) Expand 10 before | Expand all | Expand 10 after
3932 } 3965 }
3933 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; 3966 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3934 ASSERT((first_set_bit & cell) == first_set_bit); 3967 ASSERT((first_set_bit & cell) == first_set_bit);
3935 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); 3968 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3936 ASSERT(live_objects == 1); 3969 ASSERT(live_objects == 1);
3937 USE(live_objects); 3970 USE(live_objects);
3938 return block_address + offsets[0] * kPointerSize; 3971 return block_address + offsets[0] * kPointerSize;
3939 } 3972 }
3940 3973
3941 3974
3942 template<MarkCompactCollector::SweepingParallelism mode>
3943 static intptr_t Free(PagedSpace* space,
3944 FreeList* free_list,
3945 Address start,
3946 int size) {
3947 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
3948 return space->Free(start, size);
3949 } else {
3950 return size - free_list->Free(start, size);
3951 }
3952 }
3953
3954
3955 // Force instantiation of templatized SweepConservatively method for 3975 // Force instantiation of templatized SweepConservatively method for
3956 // SWEEP_ON_MAIN_THREAD mode. 3976 // SWEEP_ON_MAIN_THREAD mode.
3957 template intptr_t MarkCompactCollector:: 3977 template intptr_t MarkCompactCollector::
3958 SweepConservatively<MarkCompactCollector::SWEEP_ON_MAIN_THREAD>( 3978 SweepConservatively<MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(
3959 PagedSpace*, FreeList*, Page*); 3979 PagedSpace*, FreeList*, Page*);
3960 3980
3961 3981
3962 // Force instantiation of templatized SweepConservatively method for 3982 // Force instantiation of templatized SweepConservatively method for
3963 // SWEEP_IN_PARALLEL mode. 3983 // SWEEP_IN_PARALLEL mode.
3964 template intptr_t MarkCompactCollector:: 3984 template intptr_t MarkCompactCollector::
3965 SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>( 3985 SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
3966 PagedSpace*, FreeList*, Page*); 3986 PagedSpace*, FreeList*, Page*);
3967 3987
3968 3988
3969 // Sweeps a space conservatively. After this has been done the larger free 3989 // Sweeps a space conservatively. After this has been done the larger free
3970 // spaces have been put on the free list and the smaller ones have been 3990 // spaces have been put on the free list and the smaller ones have been
3971 // ignored and left untouched. A free space is always either ignored or put 3991 // ignored and left untouched. A free space is always either ignored or put
3972 // on the free list, never split up into two parts. This is important 3992 // on the free list, never split up into two parts. This is important
3973 // because it means that any FreeSpace maps left actually describe a region of 3993 // because it means that any FreeSpace maps left actually describe a region of
3974 // memory that can be ignored when scanning. Dead objects other than free 3994 // memory that can be ignored when scanning. Dead objects other than free
3975 // spaces will not contain the free space map. 3995 // spaces will not contain the free space map.
3976 template<MarkCompactCollector::SweepingParallelism mode> 3996 template<MarkCompactCollector::SweepingParallelism mode>
3977 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, 3997 int MarkCompactCollector::SweepConservatively(PagedSpace* space,
3978 FreeList* free_list, 3998 FreeList* free_list,
3979 Page* p) { 3999 Page* p) {
3980 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); 4000 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3981 ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL && 4001 ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
3982 free_list != NULL) || 4002 free_list != NULL) ||
3983 (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD && 4003 (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
3984 free_list == NULL)); 4004 free_list == NULL));
3985 4005
3986 // When parallel sweeping is active, the page will be marked after 4006 // When parallel sweeping is active, the page will be marked after
3987 // sweeping by the main thread. 4007 // sweeping by the main thread.
3988 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) { 4008 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3989 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE); 4009 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
3990 } else { 4010 } else {
3991 p->MarkSweptConservatively(); 4011 p->MarkSweptConservatively();
3992 } 4012 }
3993 4013
3994 intptr_t freed_bytes = 0; 4014 intptr_t freed_bytes = 0;
3995 intptr_t max_freed_bytes = 0; 4015 intptr_t max_freed_bytes = 0;
3996 size_t size = 0; 4016 size_t size = 0;
3997 4017
3998 // Skip over all the dead objects at the start of the page and mark them free. 4018 // Skip over all the dead objects at the start of the page and mark them free.
3999 Address cell_base = 0; 4019 Address cell_base = 0;
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
4076 int required_freed_bytes) { 4096 int required_freed_bytes) {
4077 PageIterator it(space); 4097 PageIterator it(space);
4078 FreeList* free_list = space == heap()->old_pointer_space() 4098 FreeList* free_list = space == heap()->old_pointer_space()
4079 ? free_list_old_pointer_space_.get() 4099 ? free_list_old_pointer_space_.get()
4080 : free_list_old_data_space_.get(); 4100 : free_list_old_data_space_.get();
4081 FreeList private_free_list(space); 4101 FreeList private_free_list(space);
4082 int max_freed = 0; 4102 int max_freed = 0;
4083 int max_freed_overall = 0; 4103 int max_freed_overall = 0;
4084 while (it.has_next()) { 4104 while (it.has_next()) {
4085 Page* p = it.next(); 4105 Page* p = it.next();
4086
4087 if (p->TryParallelSweeping()) { 4106 if (p->TryParallelSweeping()) {
4088 max_freed = static_cast<int>(SweepConservatively<SWEEP_IN_PARALLEL>( 4107 if (space->swept_precisely()) {
4089 space, &private_free_list, p)); 4108 max_freed = SweepPrecisely<SWEEP_ONLY,
4109 SWEEP_IN_PARALLEL,
4110 IGNORE_SKIP_LIST,
4111 IGNORE_FREE_SPACE>(
4112 space, &private_free_list, p, NULL);
4113 } else {
4114 max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
4115 space, &private_free_list, p);
4116 }
4090 ASSERT(max_freed >= 0); 4117 ASSERT(max_freed >= 0);
4091 free_list->Concatenate(&private_free_list); 4118 free_list->Concatenate(&private_free_list);
4092 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { 4119 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
4093 return max_freed; 4120 return max_freed;
4094 } 4121 }
4095 max_freed_overall = Max(max_freed, max_freed_overall); 4122 max_freed_overall = Max(max_freed, max_freed_overall);
4096 } 4123 }
4097 if (p == space->end_of_unswept_pages()) break; 4124 if (p == space->end_of_unswept_pages()) break;
4098 } 4125 }
4099 return max_freed_overall; 4126 return max_freed_overall;
4100 } 4127 }
4101 4128
4102 4129
4103 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { 4130 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
4104 space->set_is_iterable(sweeper == PRECISE); 4131 space->set_swept_precisely(sweeper == PRECISE ||
4132 sweeper == CONCURRENT_PRECISE ||
4133 sweeper == PARALLEL_PRECISE);
4105 space->ClearStats(); 4134 space->ClearStats();
4106 4135
4107 // We defensively initialize end_of_unswept_pages_ here with the first page 4136 // We defensively initialize end_of_unswept_pages_ here with the first page
4108 // of the pages list. 4137 // of the pages list.
4109 space->set_end_of_unswept_pages(space->FirstPage()); 4138 space->set_end_of_unswept_pages(space->FirstPage());
4110 4139
4111 PageIterator it(space); 4140 PageIterator it(space);
4112 4141
4113 int pages_swept = 0; 4142 int pages_swept = 0;
4114 bool unused_page_present = false; 4143 bool unused_page_present = false;
4115 bool parallel_sweeping_active = false; 4144 bool parallel_sweeping_active = false;
4116 4145
4117 while (it.has_next()) { 4146 while (it.has_next()) {
4118 Page* p = it.next(); 4147 Page* p = it.next();
4119 ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE); 4148 ASSERT(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4120 4149
4121 // Clear sweeping flags indicating that marking bits are still intact. 4150 // Clear sweeping flags indicating that marking bits are still intact.
4122 p->ClearSweptPrecisely(); 4151 p->ClearSweptPrecisely();
4123 p->ClearSweptConservatively(); 4152 p->ClearSweptConservatively();
4124 4153
4125 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || 4154 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
4126 p->IsEvacuationCandidate()) { 4155 p->IsEvacuationCandidate()) {
4127 // Will be processed in EvacuateNewSpaceAndCandidates. 4156 // Will be processed in EvacuateNewSpaceAndCandidates.
4128 ASSERT(evacuation_candidates_.length() > 0); 4157 ASSERT(evacuation_candidates_.length() > 0);
4129 continue; 4158 continue;
(...skipping 24 matching lines...) Expand all
4154 reinterpret_cast<intptr_t>(p)); 4183 reinterpret_cast<intptr_t>(p));
4155 } 4184 }
4156 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); 4185 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
4157 pages_swept++; 4186 pages_swept++;
4158 parallel_sweeping_active = true; 4187 parallel_sweeping_active = true;
4159 } else { 4188 } else {
4160 if (FLAG_gc_verbose) { 4189 if (FLAG_gc_verbose) {
4161 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", 4190 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4162 reinterpret_cast<intptr_t>(p)); 4191 reinterpret_cast<intptr_t>(p));
4163 } 4192 }
4164 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING); 4193 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
4165 space->IncreaseUnsweptFreeBytes(p); 4194 space->IncreaseUnsweptFreeBytes(p);
4166 } 4195 }
4167 space->set_end_of_unswept_pages(p); 4196 space->set_end_of_unswept_pages(p);
4168 break; 4197 break;
4169 } 4198 }
4199 case CONCURRENT_PRECISE:
4200 case PARALLEL_PRECISE:
4201 if (!parallel_sweeping_active) {
4202 if (FLAG_gc_verbose) {
4203 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4204 reinterpret_cast<intptr_t>(p));
4205 }
4206 SweepPrecisely<SWEEP_ONLY,
4207 SWEEP_ON_MAIN_THREAD,
4208 IGNORE_SKIP_LIST,
4209 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4210 pages_swept++;
4211 parallel_sweeping_active = true;
4212 } else {
4213 if (FLAG_gc_verbose) {
4214 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4215 reinterpret_cast<intptr_t>(p));
4216 }
4217 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
4218 space->IncreaseUnsweptFreeBytes(p);
4219 }
4220 space->set_end_of_unswept_pages(p);
4221 break;
4170 case PRECISE: { 4222 case PRECISE: {
4171 if (FLAG_gc_verbose) { 4223 if (FLAG_gc_verbose) {
4172 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", 4224 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4173 reinterpret_cast<intptr_t>(p)); 4225 reinterpret_cast<intptr_t>(p));
4174 } 4226 }
4175 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) { 4227 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
4176 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>( 4228 SweepPrecisely<SWEEP_ONLY,
4177 space, p, NULL); 4229 SWEEP_ON_MAIN_THREAD,
4230 REBUILD_SKIP_LIST,
4231 ZAP_FREE_SPACE>(space, NULL, p, NULL);
4178 } else if (space->identity() == CODE_SPACE) { 4232 } else if (space->identity() == CODE_SPACE) {
4179 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>( 4233 SweepPrecisely<SWEEP_ONLY,
4180 space, p, NULL); 4234 SWEEP_ON_MAIN_THREAD,
4235 REBUILD_SKIP_LIST,
4236 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4181 } else { 4237 } else {
4182 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>( 4238 SweepPrecisely<SWEEP_ONLY,
4183 space, p, NULL); 4239 SWEEP_ON_MAIN_THREAD,
4240 IGNORE_SKIP_LIST,
4241 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4184 } 4242 }
4185 pages_swept++; 4243 pages_swept++;
4186 break; 4244 break;
4187 } 4245 }
4188 default: { 4246 default: {
4189 UNREACHABLE(); 4247 UNREACHABLE();
4190 } 4248 }
4191 } 4249 }
4192 } 4250 }
4193 4251
4194 if (FLAG_gc_verbose) { 4252 if (FLAG_gc_verbose) {
4195 PrintF("SweepSpace: %s (%d pages swept)\n", 4253 PrintF("SweepSpace: %s (%d pages swept)\n",
4196 AllocationSpaceName(space->identity()), 4254 AllocationSpaceName(space->identity()),
4197 pages_swept); 4255 pages_swept);
4198 } 4256 }
4199 4257
4200 // Give pages that are queued to be freed back to the OS. 4258 // Give pages that are queued to be freed back to the OS.
4201 heap()->FreeQueuedChunks(); 4259 heap()->FreeQueuedChunks();
4202 } 4260 }
4203 4261
4204 4262
4263 static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
4264 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
4265 type == MarkCompactCollector::CONCURRENT_CONSERVATIVE ||
4266 type == MarkCompactCollector::PARALLEL_PRECISE ||
4267 type == MarkCompactCollector::CONCURRENT_PRECISE;
4268 }
4269
4270
4271 static bool ShouldWaitForSweeperThreads(
4272 MarkCompactCollector::SweeperType type) {
4273 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
4274 type == MarkCompactCollector::PARALLEL_PRECISE;
4275 }
4276
4277
4205 void MarkCompactCollector::SweepSpaces() { 4278 void MarkCompactCollector::SweepSpaces() {
4206 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); 4279 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
4207 #ifdef DEBUG 4280 #ifdef DEBUG
4208 state_ = SWEEP_SPACES; 4281 state_ = SWEEP_SPACES;
4209 #endif 4282 #endif
4210 SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE; 4283 SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE;
4211 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; 4284 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4212 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; 4285 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
4213 4286 if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) {
4287 how_to_sweep = PARALLEL_PRECISE;
4288 }
4289 if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) {
4290 how_to_sweep = CONCURRENT_PRECISE;
4291 }
4214 if (sweep_precisely_) how_to_sweep = PRECISE; 4292 if (sweep_precisely_) how_to_sweep = PRECISE;
4215 4293
4216 MoveEvacuationCandidatesToEndOfPagesList(); 4294 MoveEvacuationCandidatesToEndOfPagesList();
4217 4295
4218 // Noncompacting collections simply sweep the spaces to clear the mark 4296 // Noncompacting collections simply sweep the spaces to clear the mark
4219 // bits and free the nonlive blocks (for old and map spaces). We sweep 4297 // bits and free the nonlive blocks (for old and map spaces). We sweep
4220 // the map space last because freeing non-live maps overwrites them and 4298 // the map space last because freeing non-live maps overwrites them and
4221 // the other spaces rely on possibly non-live maps to get the sizes for 4299 // the other spaces rely on possibly non-live maps to get the sizes for
4222 // non-live objects. 4300 // non-live objects.
4223 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE); 4301 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE);
4224 { SequentialSweepingScope scope(this); 4302 { SequentialSweepingScope scope(this);
4225 SweepSpace(heap()->old_pointer_space(), how_to_sweep); 4303 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4226 SweepSpace(heap()->old_data_space(), how_to_sweep); 4304 SweepSpace(heap()->old_data_space(), how_to_sweep);
4227 } 4305 }
4228 4306
4229 if (how_to_sweep == PARALLEL_CONSERVATIVE || 4307 if (ShouldStartSweeperThreads(how_to_sweep)) {
4230 how_to_sweep == CONCURRENT_CONSERVATIVE) {
4231 StartSweeperThreads(); 4308 StartSweeperThreads();
4232 } 4309 }
4233 4310
4234 if (how_to_sweep == PARALLEL_CONSERVATIVE) { 4311 if (ShouldWaitForSweeperThreads(how_to_sweep)) {
4235 EnsureSweepingCompleted(); 4312 EnsureSweepingCompleted();
4236 } 4313 }
4237 } 4314 }
4238 RemoveDeadInvalidatedCode(); 4315 RemoveDeadInvalidatedCode();
4239 4316
4240 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CODE); 4317 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CODE);
4241 SweepSpace(heap()->code_space(), PRECISE); 4318 SweepSpace(heap()->code_space(), PRECISE);
4242 } 4319 }
4243 4320
4244 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CELL); 4321 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CELL);
(...skipping 15 matching lines...) Expand all
4260 4337
4261 // Deallocate evacuated candidate pages. 4338 // Deallocate evacuated candidate pages.
4262 ReleaseEvacuationCandidates(); 4339 ReleaseEvacuationCandidates();
4263 } 4340 }
4264 4341
4265 4342
4266 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { 4343 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
4267 PageIterator it(space); 4344 PageIterator it(space);
4268 while (it.has_next()) { 4345 while (it.has_next()) {
4269 Page* p = it.next(); 4346 Page* p = it.next();
4270 if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) { 4347 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
4271 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE); 4348 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
4272 p->MarkSweptConservatively(); 4349 if (space->swept_precisely()) {
4350 p->MarkSweptPrecisely();
4351 } else {
4352 p->MarkSweptConservatively();
4353 }
4273 } 4354 }
4274 ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE); 4355 ASSERT(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4275 } 4356 }
4276 } 4357 }
4277 4358
4278 4359
4279 void MarkCompactCollector::ParallelSweepSpacesComplete() { 4360 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4280 ParallelSweepSpaceComplete(heap()->old_pointer_space()); 4361 ParallelSweepSpaceComplete(heap()->old_pointer_space());
4281 ParallelSweepSpaceComplete(heap()->old_data_space()); 4362 ParallelSweepSpaceComplete(heap()->old_data_space());
4282 } 4363 }
4283 4364
4284 4365
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after
4495 while (buffer != NULL) { 4576 while (buffer != NULL) {
4496 SlotsBuffer* next_buffer = buffer->next(); 4577 SlotsBuffer* next_buffer = buffer->next();
4497 DeallocateBuffer(buffer); 4578 DeallocateBuffer(buffer);
4498 buffer = next_buffer; 4579 buffer = next_buffer;
4499 } 4580 }
4500 *buffer_address = NULL; 4581 *buffer_address = NULL;
4501 } 4582 }
4502 4583
4503 4584
4504 } } // namespace v8::internal 4585 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mark-compact.h ('k') | src/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698