Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(184)

Side by Side Diff: src/mark-compact.cc

Issue 398333002: Concurrent/parallel precise sweeping. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mark-compact.h ('k') | src/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/code-stubs.h" 8 #include "src/code-stubs.h"
9 #include "src/compilation-cache.h" 9 #include "src/compilation-cache.h"
10 #include "src/cpu-profiler.h" 10 #include "src/cpu-profiler.h"
(...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after
201 current += object->Size(); 201 current += object->Size();
202 } 202 }
203 } 203 }
204 } 204 }
205 205
206 206
207 static void VerifyEvacuation(PagedSpace* space) { 207 static void VerifyEvacuation(PagedSpace* space) {
208 // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently 208 // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently
209 // swept pages. 209 // swept pages.
210 if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) && 210 if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
211 !space->is_iterable()) return; 211 !space->swept_precisely()) return;
212 PageIterator it(space); 212 PageIterator it(space);
213 213
214 while (it.has_next()) { 214 while (it.has_next()) {
215 Page* p = it.next(); 215 Page* p = it.next();
216 if (p->IsEvacuationCandidate()) continue; 216 if (p->IsEvacuationCandidate()) continue;
217 VerifyEvacuation(p->area_start(), p->area_end()); 217 VerifyEvacuation(p->area_start(), p->area_end());
218 } 218 }
219 } 219 }
220 220
221 221
(...skipping 1822 matching lines...) Expand 10 before | Expand all | Expand 10 after
2044 } 2044 }
2045 *cells = 0; 2045 *cells = 0;
2046 } 2046 }
2047 return survivors_size; 2047 return survivors_size;
2048 } 2048 }
2049 2049
2050 2050
2051 static void DiscoverGreyObjectsInSpace(Heap* heap, 2051 static void DiscoverGreyObjectsInSpace(Heap* heap,
2052 MarkingDeque* marking_deque, 2052 MarkingDeque* marking_deque,
2053 PagedSpace* space) { 2053 PagedSpace* space) {
2054 if (space->is_iterable()) { 2054 if (space->swept_precisely()) {
2055 HeapObjectIterator it(space); 2055 HeapObjectIterator it(space);
2056 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it); 2056 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2057 } else { 2057 } else {
2058 PageIterator it(space); 2058 PageIterator it(space);
2059 while (it.has_next()) { 2059 while (it.has_next()) {
2060 Page* p = it.next(); 2060 Page* p = it.next();
2061 DiscoverGreyObjectsOnPage(marking_deque, p); 2061 DiscoverGreyObjectsOnPage(marking_deque, p);
2062 if (marking_deque->IsFull()) return; 2062 if (marking_deque->IsFull()) return;
2063 } 2063 }
2064 } 2064 }
(...skipping 1060 matching lines...) Expand 10 before | Expand all | Expand 10 after
3125 } 3125 }
3126 3126
3127 3127
3128 void MarkCompactCollector::EvacuatePages() { 3128 void MarkCompactCollector::EvacuatePages() {
3129 int npages = evacuation_candidates_.length(); 3129 int npages = evacuation_candidates_.length();
3130 for (int i = 0; i < npages; i++) { 3130 for (int i = 0; i < npages; i++) {
3131 Page* p = evacuation_candidates_[i]; 3131 Page* p = evacuation_candidates_[i];
3132 ASSERT(p->IsEvacuationCandidate() || 3132 ASSERT(p->IsEvacuationCandidate() ||
3133 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3133 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3134 ASSERT(static_cast<int>(p->parallel_sweeping()) == 3134 ASSERT(static_cast<int>(p->parallel_sweeping()) ==
3135 MemoryChunk::PARALLEL_SWEEPING_DONE); 3135 MemoryChunk::SWEEPING_DONE);
3136 if (p->IsEvacuationCandidate()) { 3136 if (p->IsEvacuationCandidate()) {
3137 // During compaction we might have to request a new page. 3137 // During compaction we might have to request a new page.
3138 // Check that space still have room for that. 3138 // Check that space still have room for that.
3139 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) { 3139 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
3140 EvacuateLiveObjectsFromPage(p); 3140 EvacuateLiveObjectsFromPage(p);
3141 } else { 3141 } else {
3142 // Without room for expansion evacuation is not guaranteed to succeed. 3142 // Without room for expansion evacuation is not guaranteed to succeed.
3143 // Pessimistically abandon unevacuated pages. 3143 // Pessimistically abandon unevacuated pages.
3144 for (int j = i; j < npages; j++) { 3144 for (int j = i; j < npages; j++) {
3145 Page* page = evacuation_candidates_[j]; 3145 Page* page = evacuation_candidates_[j];
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
3221 IGNORE_SKIP_LIST 3221 IGNORE_SKIP_LIST
3222 }; 3222 };
3223 3223
3224 3224
3225 enum FreeSpaceTreatmentMode { 3225 enum FreeSpaceTreatmentMode {
3226 IGNORE_FREE_SPACE, 3226 IGNORE_FREE_SPACE,
3227 ZAP_FREE_SPACE 3227 ZAP_FREE_SPACE
3228 }; 3228 };
3229 3229
3230 3230
3231 template<MarkCompactCollector::SweepingParallelism mode>
3232 static intptr_t Free(PagedSpace* space,
3233 FreeList* free_list,
3234 Address start,
3235 int size) {
3236 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
3237 return space->Free(start, size);
Jarin 2014/07/18 08:31:29 Could we ASSERT(free_list == NULL) here?
Hannes Payer (out of office) 2014/07/18 10:47:20 Done.
3238 } else {
3239 return size - free_list->Free(start, size);
3240 }
3241 }
3242
3243
3231 // Sweep a space precisely. After this has been done the space can 3244 // Sweep a space precisely. After this has been done the space can
3232 // be iterated precisely, hitting only the live objects. Code space 3245 // be iterated precisely, hitting only the live objects. Code space
3233 // is always swept precisely because we want to be able to iterate 3246 // is always swept precisely because we want to be able to iterate
3234 // over it. Map space is swept precisely, because it is not compacted. 3247 // over it. Map space is swept precisely, because it is not compacted.
3235 // Slots in live objects pointing into evacuation candidates are updated 3248 // Slots in live objects pointing into evacuation candidates are updated
3236 // if requested. 3249 // if requested.
3237 template<SweepingMode sweeping_mode, 3250 template<SweepingMode sweeping_mode,
3251 MarkCompactCollector::SweepingParallelism parallelism,
3238 SkipListRebuildingMode skip_list_mode, 3252 SkipListRebuildingMode skip_list_mode,
3239 FreeSpaceTreatmentMode free_space_mode> 3253 FreeSpaceTreatmentMode free_space_mode>
3240 static void SweepPrecisely(PagedSpace* space, 3254 static int SweepPrecisely(PagedSpace* space,
3255 FreeList* free_list,
3241 Page* p, 3256 Page* p,
3242 ObjectVisitor* v) { 3257 ObjectVisitor* v) {
3243 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); 3258 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3244 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST, 3259 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3245 space->identity() == CODE_SPACE); 3260 space->identity() == CODE_SPACE);
3246 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); 3261 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3262 ASSERT(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
3263 sweeping_mode == SWEEP_ONLY);
3247 3264
3248 double start_time = 0.0; 3265 double start_time = 0.0;
3249 if (FLAG_print_cumulative_gc_stat) { 3266 if (FLAG_print_cumulative_gc_stat) {
3250 start_time = base::OS::TimeCurrentMillis(); 3267 start_time = base::OS::TimeCurrentMillis();
3251 } 3268 }
3252 3269
3253 p->MarkSweptPrecisely(); 3270 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3271 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
3272 } else {
3273 p->MarkSweptPrecisely();
3274 }
3254 3275
3255 Address free_start = p->area_start(); 3276 Address free_start = p->area_start();
3256 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); 3277 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3257 int offsets[16]; 3278 int offsets[16];
3258 3279
3259 SkipList* skip_list = p->skip_list(); 3280 SkipList* skip_list = p->skip_list();
3260 int curr_region = -1; 3281 int curr_region = -1;
3261 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { 3282 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3262 skip_list->Clear(); 3283 skip_list->Clear();
3263 } 3284 }
3264 3285
3286 intptr_t freed_bytes = 0;
3287 intptr_t max_freed_bytes = 0;
3288
3265 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { 3289 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3266 Address cell_base = it.CurrentCellBase(); 3290 Address cell_base = it.CurrentCellBase();
3267 MarkBit::CellType* cell = it.CurrentCell(); 3291 MarkBit::CellType* cell = it.CurrentCell();
3268 int live_objects = MarkWordToObjectStarts(*cell, offsets); 3292 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3269 int live_index = 0; 3293 int live_index = 0;
3270 for ( ; live_objects != 0; live_objects--) { 3294 for ( ; live_objects != 0; live_objects--) {
3271 Address free_end = cell_base + offsets[live_index++] * kPointerSize; 3295 Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3272 if (free_end != free_start) { 3296 if (free_end != free_start) {
3297 int size = static_cast<int>(free_end - free_start);
3273 if (free_space_mode == ZAP_FREE_SPACE) { 3298 if (free_space_mode == ZAP_FREE_SPACE) {
3274 memset(free_start, 0xcc, static_cast<int>(free_end - free_start)); 3299 memset(free_start, 0xcc, size);
3275 } 3300 }
3276 space->Free(free_start, static_cast<int>(free_end - free_start)); 3301 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3302 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3277 #ifdef ENABLE_GDB_JIT_INTERFACE 3303 #ifdef ENABLE_GDB_JIT_INTERFACE
3278 if (FLAG_gdbjit && space->identity() == CODE_SPACE) { 3304 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3279 GDBJITInterface::RemoveCodeRange(free_start, free_end); 3305 GDBJITInterface::RemoveCodeRange(free_start, free_end);
3280 } 3306 }
3281 #endif 3307 #endif
3282 } 3308 }
3283 HeapObject* live_object = HeapObject::FromAddress(free_end); 3309 HeapObject* live_object = HeapObject::FromAddress(free_end);
3284 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); 3310 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3285 Map* map = live_object->map(); 3311 Map* map = live_object->map();
3286 int size = live_object->SizeFromMap(map); 3312 int size = live_object->SizeFromMap(map);
(...skipping 10 matching lines...) Expand all
3297 skip_list->AddObject(free_end, size); 3323 skip_list->AddObject(free_end, size);
3298 curr_region = new_region_end; 3324 curr_region = new_region_end;
3299 } 3325 }
3300 } 3326 }
3301 free_start = free_end + size; 3327 free_start = free_end + size;
3302 } 3328 }
3303 // Clear marking bits for current cell. 3329 // Clear marking bits for current cell.
3304 *cell = 0; 3330 *cell = 0;
3305 } 3331 }
3306 if (free_start != p->area_end()) { 3332 if (free_start != p->area_end()) {
3333 int size = static_cast<int>(p->area_end() - free_start);
3307 if (free_space_mode == ZAP_FREE_SPACE) { 3334 if (free_space_mode == ZAP_FREE_SPACE) {
3308 memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start)); 3335 memset(free_start, 0xcc, size);
3309 } 3336 }
3310 space->Free(free_start, static_cast<int>(p->area_end() - free_start)); 3337 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3338 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3311 #ifdef ENABLE_GDB_JIT_INTERFACE 3339 #ifdef ENABLE_GDB_JIT_INTERFACE
3312 if (FLAG_gdbjit && space->identity() == CODE_SPACE) { 3340 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3313 GDBJITInterface::RemoveCodeRange(free_start, p->area_end()); 3341 GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3314 } 3342 }
3315 #endif 3343 #endif
3316 } 3344 }
3317 p->ResetLiveBytes(); 3345 p->ResetLiveBytes();
3318 if (FLAG_print_cumulative_gc_stat) { 3346 if (FLAG_print_cumulative_gc_stat) {
3319 space->heap()->AddSweepingTime(base::OS::TimeCurrentMillis() - start_time); 3347 space->heap()->AddSweepingTime(base::OS::TimeCurrentMillis() - start_time);
3320 } 3348 }
3349 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3321 } 3350 }
3322 3351
3323 3352
3324 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { 3353 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3325 Page* p = Page::FromAddress(code->address()); 3354 Page* p = Page::FromAddress(code->address());
3326 3355
3327 if (p->IsEvacuationCandidate() || 3356 if (p->IsEvacuationCandidate() ||
3328 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { 3357 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3329 return false; 3358 return false;
3330 } 3359 }
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
3546 } 3575 }
3547 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3576 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3548 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); 3577 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3549 3578
3550 switch (space->identity()) { 3579 switch (space->identity()) {
3551 case OLD_DATA_SPACE: 3580 case OLD_DATA_SPACE:
3552 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); 3581 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
3553 break; 3582 break;
3554 case OLD_POINTER_SPACE: 3583 case OLD_POINTER_SPACE:
3555 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, 3584 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3585 SWEEP_ON_MAIN_THREAD,
3556 IGNORE_SKIP_LIST, 3586 IGNORE_SKIP_LIST,
3557 IGNORE_FREE_SPACE>( 3587 IGNORE_FREE_SPACE>(
3558 space, p, &updating_visitor); 3588 space, NULL, p, &updating_visitor);
3559 break; 3589 break;
3560 case CODE_SPACE: 3590 case CODE_SPACE:
3561 if (FLAG_zap_code_space) { 3591 if (FLAG_zap_code_space) {
3562 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, 3592 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3593 SWEEP_ON_MAIN_THREAD,
3563 REBUILD_SKIP_LIST, 3594 REBUILD_SKIP_LIST,
3564 ZAP_FREE_SPACE>( 3595 ZAP_FREE_SPACE>(
3565 space, p, &updating_visitor); 3596 space, NULL, p, &updating_visitor);
3566 } else { 3597 } else {
3567 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, 3598 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3599 SWEEP_ON_MAIN_THREAD,
3568 REBUILD_SKIP_LIST, 3600 REBUILD_SKIP_LIST,
3569 IGNORE_FREE_SPACE>( 3601 IGNORE_FREE_SPACE>(
3570 space, p, &updating_visitor); 3602 space, NULL, p, &updating_visitor);
3571 } 3603 }
3572 break; 3604 break;
3573 default: 3605 default:
3574 UNREACHABLE(); 3606 UNREACHABLE();
3575 break; 3607 break;
3576 } 3608 }
3577 } 3609 }
3578 } 3610 }
3579 } 3611 }
3580 3612
(...skipping 351 matching lines...) Expand 10 before | Expand all | Expand 10 after
3932 } 3964 }
3933 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; 3965 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3934 ASSERT((first_set_bit & cell) == first_set_bit); 3966 ASSERT((first_set_bit & cell) == first_set_bit);
3935 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); 3967 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3936 ASSERT(live_objects == 1); 3968 ASSERT(live_objects == 1);
3937 USE(live_objects); 3969 USE(live_objects);
3938 return block_address + offsets[0] * kPointerSize; 3970 return block_address + offsets[0] * kPointerSize;
3939 } 3971 }
3940 3972
3941 3973
3942 template<MarkCompactCollector::SweepingParallelism mode>
3943 static intptr_t Free(PagedSpace* space,
3944 FreeList* free_list,
3945 Address start,
3946 int size) {
3947 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
3948 return space->Free(start, size);
3949 } else {
3950 return size - free_list->Free(start, size);
3951 }
3952 }
3953
3954
3955 // Force instantiation of templatized SweepConservatively method for 3974 // Force instantiation of templatized SweepConservatively method for
3956 // SWEEP_ON_MAIN_THREAD mode. 3975 // SWEEP_ON_MAIN_THREAD mode.
3957 template intptr_t MarkCompactCollector:: 3976 template intptr_t MarkCompactCollector::
3958 SweepConservatively<MarkCompactCollector::SWEEP_ON_MAIN_THREAD>( 3977 SweepConservatively<MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(
3959 PagedSpace*, FreeList*, Page*); 3978 PagedSpace*, FreeList*, Page*);
3960 3979
3961 3980
3962 // Force instantiation of templatized SweepConservatively method for 3981 // Force instantiation of templatized SweepConservatively method for
3963 // SWEEP_IN_PARALLEL mode. 3982 // SWEEP_IN_PARALLEL mode.
3964 template intptr_t MarkCompactCollector:: 3983 template intptr_t MarkCompactCollector::
3965 SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>( 3984 SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
3966 PagedSpace*, FreeList*, Page*); 3985 PagedSpace*, FreeList*, Page*);
3967 3986
3968 3987
3969 // Sweeps a space conservatively. After this has been done the larger free 3988 // Sweeps a space conservatively. After this has been done the larger free
3970 // spaces have been put on the free list and the smaller ones have been 3989 // spaces have been put on the free list and the smaller ones have been
3971 // ignored and left untouched. A free space is always either ignored or put 3990 // ignored and left untouched. A free space is always either ignored or put
3972 // on the free list, never split up into two parts. This is important 3991 // on the free list, never split up into two parts. This is important
3973 // because it means that any FreeSpace maps left actually describe a region of 3992 // because it means that any FreeSpace maps left actually describe a region of
3974 // memory that can be ignored when scanning. Dead objects other than free 3993 // memory that can be ignored when scanning. Dead objects other than free
3975 // spaces will not contain the free space map. 3994 // spaces will not contain the free space map.
3976 template<MarkCompactCollector::SweepingParallelism mode> 3995 template<MarkCompactCollector::SweepingParallelism mode>
3977 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, 3996 int MarkCompactCollector::SweepConservatively(PagedSpace* space,
3978 FreeList* free_list, 3997 FreeList* free_list,
3979 Page* p) { 3998 Page* p) {
3980 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); 3999 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3981 ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL && 4000 ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
3982 free_list != NULL) || 4001 free_list != NULL) ||
3983 (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD && 4002 (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
3984 free_list == NULL)); 4003 free_list == NULL));
3985 4004
3986 // When parallel sweeping is active, the page will be marked after 4005 // When parallel sweeping is active, the page will be marked after
3987 // sweeping by the main thread. 4006 // sweeping by the main thread.
3988 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) { 4007 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3989 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE); 4008 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
3990 } else { 4009 } else {
3991 p->MarkSweptConservatively(); 4010 p->MarkSweptConservatively();
3992 } 4011 }
3993 4012
3994 intptr_t freed_bytes = 0; 4013 intptr_t freed_bytes = 0;
3995 intptr_t max_freed_bytes = 0; 4014 intptr_t max_freed_bytes = 0;
3996 size_t size = 0; 4015 size_t size = 0;
3997 4016
3998 // Skip over all the dead objects at the start of the page and mark them free. 4017 // Skip over all the dead objects at the start of the page and mark them free.
3999 Address cell_base = 0; 4018 Address cell_base = 0;
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
4076 int required_freed_bytes) { 4095 int required_freed_bytes) {
4077 PageIterator it(space); 4096 PageIterator it(space);
4078 FreeList* free_list = space == heap()->old_pointer_space() 4097 FreeList* free_list = space == heap()->old_pointer_space()
4079 ? free_list_old_pointer_space_.get() 4098 ? free_list_old_pointer_space_.get()
4080 : free_list_old_data_space_.get(); 4099 : free_list_old_data_space_.get();
4081 FreeList private_free_list(space); 4100 FreeList private_free_list(space);
4082 int max_freed = 0; 4101 int max_freed = 0;
4083 int max_freed_overall = 0; 4102 int max_freed_overall = 0;
4084 while (it.has_next()) { 4103 while (it.has_next()) {
4085 Page* p = it.next(); 4104 Page* p = it.next();
4086
4087 if (p->TryParallelSweeping()) { 4105 if (p->TryParallelSweeping()) {
4088 max_freed = static_cast<int>(SweepConservatively<SWEEP_IN_PARALLEL>( 4106 if (space->swept_precisely()) {
4089 space, &private_free_list, p)); 4107 max_freed = SweepPrecisely<SWEEP_ONLY,
4108 SWEEP_IN_PARALLEL,
4109 IGNORE_SKIP_LIST,
4110 IGNORE_FREE_SPACE>(
4111 space, &private_free_list, p, NULL);
4112 } else {
4113 max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
4114 space, &private_free_list, p);
4115 }
4090 ASSERT(max_freed >= 0); 4116 ASSERT(max_freed >= 0);
4091 free_list->Concatenate(&private_free_list); 4117 free_list->Concatenate(&private_free_list);
4092 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { 4118 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
4093 return max_freed; 4119 return max_freed;
4094 } 4120 }
4095 max_freed_overall = Max(max_freed, max_freed_overall); 4121 max_freed_overall = Max(max_freed, max_freed_overall);
4096 } 4122 }
4097 if (p == space->end_of_unswept_pages()) break; 4123 if (p == space->end_of_unswept_pages()) break;
4098 } 4124 }
4099 return max_freed_overall; 4125 return max_freed_overall;
4100 } 4126 }
4101 4127
4102 4128
4103 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { 4129 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
4104 space->set_is_iterable(sweeper == PRECISE); 4130 space->set_swept_precisely(sweeper == PRECISE);
Jarin 2014/07/18 08:31:29 I think the flag should be also set if sweeper is
Hannes Payer (out of office) 2014/07/18 10:47:20 Good catch! Done.
4105 space->ClearStats(); 4131 space->ClearStats();
4106 4132
4107 // We defensively initialize end_of_unswept_pages_ here with the first page 4133 // We defensively initialize end_of_unswept_pages_ here with the first page
4108 // of the pages list. 4134 // of the pages list.
4109 space->set_end_of_unswept_pages(space->FirstPage()); 4135 space->set_end_of_unswept_pages(space->FirstPage());
4110 4136
4111 PageIterator it(space); 4137 PageIterator it(space);
4112 4138
4113 int pages_swept = 0; 4139 int pages_swept = 0;
4114 bool unused_page_present = false; 4140 bool unused_page_present = false;
4115 bool parallel_sweeping_active = false; 4141 bool parallel_sweeping_active = false;
4116 4142
4117 while (it.has_next()) { 4143 while (it.has_next()) {
4118 Page* p = it.next(); 4144 Page* p = it.next();
4119 ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE); 4145 ASSERT(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4120 4146
4121 // Clear sweeping flags indicating that marking bits are still intact. 4147 // Clear sweeping flags indicating that marking bits are still intact.
4122 p->ClearSweptPrecisely(); 4148 p->ClearSweptPrecisely();
4123 p->ClearSweptConservatively(); 4149 p->ClearSweptConservatively();
4124 4150
4125 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || 4151 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
4126 p->IsEvacuationCandidate()) { 4152 p->IsEvacuationCandidate()) {
4127 // Will be processed in EvacuateNewSpaceAndCandidates. 4153 // Will be processed in EvacuateNewSpaceAndCandidates.
4128 ASSERT(evacuation_candidates_.length() > 0); 4154 ASSERT(evacuation_candidates_.length() > 0);
4129 continue; 4155 continue;
(...skipping 24 matching lines...) Expand all
4154 reinterpret_cast<intptr_t>(p)); 4180 reinterpret_cast<intptr_t>(p));
4155 } 4181 }
4156 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); 4182 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
4157 pages_swept++; 4183 pages_swept++;
4158 parallel_sweeping_active = true; 4184 parallel_sweeping_active = true;
4159 } else { 4185 } else {
4160 if (FLAG_gc_verbose) { 4186 if (FLAG_gc_verbose) {
4161 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", 4187 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4162 reinterpret_cast<intptr_t>(p)); 4188 reinterpret_cast<intptr_t>(p));
4163 } 4189 }
4164 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING); 4190 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
4165 space->IncreaseUnsweptFreeBytes(p); 4191 space->IncreaseUnsweptFreeBytes(p);
4166 } 4192 }
4167 space->set_end_of_unswept_pages(p); 4193 space->set_end_of_unswept_pages(p);
4168 break; 4194 break;
4169 } 4195 }
4196 case CONCURRENT_PRECISE:
4197 case PARALLEL_PRECISE:
4198 if (!parallel_sweeping_active) {
4199 if (FLAG_gc_verbose) {
4200 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4201 reinterpret_cast<intptr_t>(p));
4202 }
4203 SweepPrecisely<SWEEP_ONLY,
4204 SWEEP_ON_MAIN_THREAD,
4205 IGNORE_SKIP_LIST,
4206 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4207 pages_swept++;
4208 parallel_sweeping_active = true;
4209 } else {
4210 if (FLAG_gc_verbose) {
4211 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4212 reinterpret_cast<intptr_t>(p));
4213 }
4214 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
4215 space->IncreaseUnsweptFreeBytes(p);
4216 }
4217 space->set_end_of_unswept_pages(p);
4218 break;
4170 case PRECISE: { 4219 case PRECISE: {
4171 if (FLAG_gc_verbose) { 4220 if (FLAG_gc_verbose) {
4172 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", 4221 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4173 reinterpret_cast<intptr_t>(p)); 4222 reinterpret_cast<intptr_t>(p));
4174 } 4223 }
4175 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) { 4224 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
4176 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>( 4225 SweepPrecisely<SWEEP_ONLY,
4177 space, p, NULL); 4226 SWEEP_ON_MAIN_THREAD,
4227 REBUILD_SKIP_LIST,
4228 ZAP_FREE_SPACE>(space, NULL, p, NULL);
4178 } else if (space->identity() == CODE_SPACE) { 4229 } else if (space->identity() == CODE_SPACE) {
4179 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>( 4230 SweepPrecisely<SWEEP_ONLY,
4180 space, p, NULL); 4231 SWEEP_ON_MAIN_THREAD,
4232 REBUILD_SKIP_LIST,
4233 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4181 } else { 4234 } else {
4182 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>( 4235 SweepPrecisely<SWEEP_ONLY,
4183 space, p, NULL); 4236 SWEEP_ON_MAIN_THREAD,
4237 IGNORE_SKIP_LIST,
4238 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4184 } 4239 }
4185 pages_swept++; 4240 pages_swept++;
4186 break; 4241 break;
4187 } 4242 }
4188 default: { 4243 default: {
4189 UNREACHABLE(); 4244 UNREACHABLE();
4190 } 4245 }
4191 } 4246 }
4192 } 4247 }
4193 4248
4194 if (FLAG_gc_verbose) { 4249 if (FLAG_gc_verbose) {
4195 PrintF("SweepSpace: %s (%d pages swept)\n", 4250 PrintF("SweepSpace: %s (%d pages swept)\n",
4196 AllocationSpaceName(space->identity()), 4251 AllocationSpaceName(space->identity()),
4197 pages_swept); 4252 pages_swept);
4198 } 4253 }
4199 4254
4200 // Give pages that are queued to be freed back to the OS. 4255 // Give pages that are queued to be freed back to the OS.
4201 heap()->FreeQueuedChunks(); 4256 heap()->FreeQueuedChunks();
4202 } 4257 }
4203 4258
4204 4259
4260 static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
4261 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
4262 type == MarkCompactCollector::CONCURRENT_CONSERVATIVE ||
4263 type == MarkCompactCollector::PARALLEL_PRECISE ||
4264 type == MarkCompactCollector::CONCURRENT_PRECISE;
4265 }
4266
4267
4268 static bool ShouldWaitForSweeperThreads(
4269 MarkCompactCollector::SweeperType type) {
4270 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
4271 type == MarkCompactCollector::PARALLEL_PRECISE;
4272 }
4273
4274
4205 void MarkCompactCollector::SweepSpaces() { 4275 void MarkCompactCollector::SweepSpaces() {
4206 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); 4276 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
4207 #ifdef DEBUG 4277 #ifdef DEBUG
4208 state_ = SWEEP_SPACES; 4278 state_ = SWEEP_SPACES;
4209 #endif 4279 #endif
4210 SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE; 4280 SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE;
4211 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; 4281 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4212 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; 4282 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
4213 4283 if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) {
4284 how_to_sweep = PARALLEL_PRECISE;
4285 }
4286 if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) {
4287 how_to_sweep = CONCURRENT_PRECISE;
4288 }
4214 if (sweep_precisely_) how_to_sweep = PRECISE; 4289 if (sweep_precisely_) how_to_sweep = PRECISE;
Jarin 2014/07/18 08:31:29 I am wondering whether we want sweep in parallel h
Hannes Payer (out of office) 2014/07/18 10:47:20 I will address this comment in a separate cl.
4215 4290
4216 MoveEvacuationCandidatesToEndOfPagesList(); 4291 MoveEvacuationCandidatesToEndOfPagesList();
4217 4292
4218 // Noncompacting collections simply sweep the spaces to clear the mark 4293 // Noncompacting collections simply sweep the spaces to clear the mark
4219 // bits and free the nonlive blocks (for old and map spaces). We sweep 4294 // bits and free the nonlive blocks (for old and map spaces). We sweep
4220 // the map space last because freeing non-live maps overwrites them and 4295 // the map space last because freeing non-live maps overwrites them and
4221 // the other spaces rely on possibly non-live maps to get the sizes for 4296 // the other spaces rely on possibly non-live maps to get the sizes for
4222 // non-live objects. 4297 // non-live objects.
4223 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE); 4298 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE);
4224 { SequentialSweepingScope scope(this); 4299 { SequentialSweepingScope scope(this);
4225 SweepSpace(heap()->old_pointer_space(), how_to_sweep); 4300 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4226 SweepSpace(heap()->old_data_space(), how_to_sweep); 4301 SweepSpace(heap()->old_data_space(), how_to_sweep);
4227 } 4302 }
4228 4303
4229 if (how_to_sweep == PARALLEL_CONSERVATIVE || 4304 if (ShouldStartSweeperThreads(how_to_sweep)) {
4230 how_to_sweep == CONCURRENT_CONSERVATIVE) {
4231 StartSweeperThreads(); 4305 StartSweeperThreads();
4232 } 4306 }
4233 4307
4234 if (how_to_sweep == PARALLEL_CONSERVATIVE) { 4308 if (ShouldWaitForSweeperThreads(how_to_sweep)) {
4235 EnsureSweepingCompleted(); 4309 EnsureSweepingCompleted();
4236 } 4310 }
4237 } 4311 }
4238 RemoveDeadInvalidatedCode(); 4312 RemoveDeadInvalidatedCode();
4239 4313
4240 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CODE); 4314 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CODE);
4241 SweepSpace(heap()->code_space(), PRECISE); 4315 SweepSpace(heap()->code_space(), PRECISE);
4242 } 4316 }
4243 4317
4244 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CELL); 4318 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CELL);
(...skipping 15 matching lines...) Expand all
4260 4334
4261 // Deallocate evacuated candidate pages. 4335 // Deallocate evacuated candidate pages.
4262 ReleaseEvacuationCandidates(); 4336 ReleaseEvacuationCandidates();
4263 } 4337 }
4264 4338
4265 4339
4266 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { 4340 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
4267 PageIterator it(space); 4341 PageIterator it(space);
4268 while (it.has_next()) { 4342 while (it.has_next()) {
4269 Page* p = it.next(); 4343 Page* p = it.next();
4270 if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) { 4344 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
4271 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE); 4345 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
4272 p->MarkSweptConservatively(); 4346 if (space->swept_precisely()) {
4347 p->MarkSweptPrecisely();
4348 } else {
4349 p->MarkSweptConservatively();
4350 }
4273 } 4351 }
4274 ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE); 4352 ASSERT(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4275 } 4353 }
4276 } 4354 }
4277 4355
4278 4356
4279 void MarkCompactCollector::ParallelSweepSpacesComplete() { 4357 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4280 ParallelSweepSpaceComplete(heap()->old_pointer_space()); 4358 ParallelSweepSpaceComplete(heap()->old_pointer_space());
4281 ParallelSweepSpaceComplete(heap()->old_data_space()); 4359 ParallelSweepSpaceComplete(heap()->old_data_space());
4282 } 4360 }
4283 4361
4284 4362
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after
4495 while (buffer != NULL) { 4573 while (buffer != NULL) {
4496 SlotsBuffer* next_buffer = buffer->next(); 4574 SlotsBuffer* next_buffer = buffer->next();
4497 DeallocateBuffer(buffer); 4575 DeallocateBuffer(buffer);
4498 buffer = next_buffer; 4576 buffer = next_buffer;
4499 } 4577 }
4500 *buffer_address = NULL; 4578 *buffer_address = NULL;
4501 } 4579 }
4502 4580
4503 4581
4504 } } // namespace v8::internal 4582 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mark-compact.h ('k') | src/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698