OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
54 #endif | 54 #endif |
55 marking_parity_(ODD_MARKING_PARITY), | 55 marking_parity_(ODD_MARKING_PARITY), |
56 was_marked_incrementally_(false), | 56 was_marked_incrementally_(false), |
57 evacuation_(false), | 57 evacuation_(false), |
58 heap_(heap), | 58 heap_(heap), |
59 marking_deque_memory_(NULL), | 59 marking_deque_memory_(NULL), |
60 marking_deque_memory_committed_(0), | 60 marking_deque_memory_committed_(0), |
61 code_flusher_(nullptr), | 61 code_flusher_(nullptr), |
62 have_code_to_deoptimize_(false), | 62 have_code_to_deoptimize_(false), |
63 compacting_(false), | 63 compacting_(false), |
64 sweeping_in_progress_(false), | 64 pending_compaction_tasks_semaphore_(0), |
65 pending_sweeper_tasks_semaphore_(0), | 65 sweeper_(heap) { |
66 pending_compaction_tasks_semaphore_(0) { | |
67 } | 66 } |
68 | 67 |
69 #ifdef VERIFY_HEAP | 68 #ifdef VERIFY_HEAP |
70 class VerifyMarkingVisitor : public ObjectVisitor { | 69 class VerifyMarkingVisitor : public ObjectVisitor { |
71 public: | 70 public: |
72 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} | 71 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} |
73 | 72 |
74 void VisitPointers(Object** start, Object** end) override { | 73 void VisitPointers(Object** start, Object** end) override { |
75 for (Object** current = start; current < end; current++) { | 74 for (Object** current = start; current < end; current++) { |
76 if ((*current)->IsHeapObject()) { | 75 if ((*current)->IsHeapObject()) { |
(...skipping 375 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
452 Marking::MarkWhite(Marking::MarkBitFrom(obj)); | 451 Marking::MarkWhite(Marking::MarkBitFrom(obj)); |
453 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); | 452 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
454 chunk->ResetProgressBar(); | 453 chunk->ResetProgressBar(); |
455 chunk->ResetLiveBytes(); | 454 chunk->ResetLiveBytes(); |
456 if (chunk->IsFlagSet(Page::BLACK_PAGE)) { | 455 if (chunk->IsFlagSet(Page::BLACK_PAGE)) { |
457 chunk->ClearFlag(Page::BLACK_PAGE); | 456 chunk->ClearFlag(Page::BLACK_PAGE); |
458 } | 457 } |
459 } | 458 } |
460 } | 459 } |
461 | 460 |
462 | 461 class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task { |
463 class MarkCompactCollector::SweeperTask : public v8::Task { | |
464 public: | 462 public: |
465 SweeperTask(Heap* heap, AllocationSpace space_to_start) | 463 SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks, |
466 : heap_(heap), space_to_start_(space_to_start) {} | 464 AllocationSpace space_to_start) |
| 465 : sweeper_(sweeper), |
| 466 pending_sweeper_tasks_(pending_sweeper_tasks), |
| 467 space_to_start_(space_to_start) {} |
467 | 468 |
468 virtual ~SweeperTask() {} | 469 virtual ~SweeperTask() {} |
469 | 470 |
470 private: | 471 private: |
471 // v8::Task overrides. | 472 // v8::Task overrides. |
472 void Run() override { | 473 void Run() override { |
473 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); | 474 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); |
474 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); | 475 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); |
475 const int offset = space_to_start_ - FIRST_PAGED_SPACE; | 476 const int offset = space_to_start_ - FIRST_PAGED_SPACE; |
476 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; | 477 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; |
477 for (int i = 0; i < num_spaces; i++) { | 478 for (int i = 0; i < num_spaces; i++) { |
478 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); | 479 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); |
479 DCHECK_GE(space_id, FIRST_PAGED_SPACE); | 480 DCHECK_GE(space_id, FIRST_PAGED_SPACE); |
480 DCHECK_LE(space_id, LAST_PAGED_SPACE); | 481 DCHECK_LE(space_id, LAST_PAGED_SPACE); |
481 heap_->mark_compact_collector()->SweepInParallel( | 482 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0); |
482 heap_->paged_space(space_id), 0); | |
483 } | 483 } |
484 heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal(); | 484 pending_sweeper_tasks_->Signal(); |
485 } | 485 } |
486 | 486 |
487 Heap* heap_; | 487 Sweeper* sweeper_; |
| 488 base::Semaphore* pending_sweeper_tasks_; |
488 AllocationSpace space_to_start_; | 489 AllocationSpace space_to_start_; |
489 | 490 |
490 DISALLOW_COPY_AND_ASSIGN(SweeperTask); | 491 DISALLOW_COPY_AND_ASSIGN(SweeperTask); |
491 }; | 492 }; |
492 | 493 |
493 | 494 void MarkCompactCollector::Sweeper::StartSweeping() { |
494 void MarkCompactCollector::StartSweeperThreads() { | 495 sweeping_in_progress_ = true; |
495 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 496 ForAllSweepingSpaces([this](AllocationSpace space) { |
496 new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask); | 497 std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(), |
497 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 498 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); }); |
498 new SweeperTask(heap(), CODE_SPACE), v8::Platform::kShortRunningTask); | 499 }); |
499 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 500 if (FLAG_concurrent_sweeping) { |
500 new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask); | 501 ForAllSweepingSpaces([this](AllocationSpace space) { |
| 502 if (space == NEW_SPACE) return; |
| 503 StartSweepingHelper(space); |
| 504 }); |
| 505 } |
501 } | 506 } |
502 | 507 |
| 508 void MarkCompactCollector::Sweeper::StartSweepingHelper( |
| 509 AllocationSpace space_to_start) { |
| 510 num_sweeping_tasks_++; |
| 511 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 512 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), |
| 513 v8::Platform::kShortRunningTask); |
| 514 } |
503 | 515 |
504 void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) { | 516 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( |
| 517 Page* page) { |
505 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); | 518 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); |
506 if (!page->SweepingDone()) { | 519 if (!page->SweepingDone()) { |
507 SweepInParallel(page, owner); | 520 ParallelSweepPage(page, owner); |
508 if (!page->SweepingDone()) { | 521 if (!page->SweepingDone()) { |
509 // We were not able to sweep that page, i.e., a concurrent | 522 // We were not able to sweep that page, i.e., a concurrent |
510 // sweeper thread currently owns this page. Wait for the sweeper | 523 // sweeper thread currently owns this page. Wait for the sweeper |
511 // thread to be done with this page. | 524 // thread to be done with this page. |
512 page->WaitUntilSweepingCompleted(); | 525 page->WaitUntilSweepingCompleted(); |
513 } | 526 } |
514 } | 527 } |
515 } | 528 } |
516 | 529 |
517 | |
518 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { | 530 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { |
519 if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) { | 531 if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) { |
520 SweepInParallel(heap()->paged_space(space->identity()), 0); | 532 sweeper().ParallelSweepSpace(space->identity(), 0); |
521 space->RefillFreeList(); | 533 space->RefillFreeList(); |
522 } | 534 } |
523 } | 535 } |
524 | 536 |
| 537 Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) { |
| 538 base::LockGuard<base::Mutex> guard(&mutex_); |
| 539 SweptList& list = swept_list_[space->identity()]; |
| 540 if (list.length() > 0) { |
| 541 return list.RemoveLast(); |
| 542 } |
| 543 return nullptr; |
| 544 } |
525 | 545 |
526 void MarkCompactCollector::EnsureSweepingCompleted() { | 546 void MarkCompactCollector::Sweeper::EnsureCompleted() { |
527 DCHECK(sweeping_in_progress_ == true); | 547 DCHECK(sweeping_in_progress_ == true); |
528 | 548 |
529 // If sweeping is not completed or not running at all, we try to complete it | 549 // If sweeping is not completed or not running at all, we try to complete it |
530 // here. | 550 // here. |
531 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { | 551 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { |
532 SweepInParallel(heap()->paged_space(OLD_SPACE), 0); | 552 ForAllSweepingSpaces( |
533 SweepInParallel(heap()->paged_space(CODE_SPACE), 0); | 553 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); |
534 SweepInParallel(heap()->paged_space(MAP_SPACE), 0); | |
535 } | 554 } |
536 | 555 |
537 if (FLAG_concurrent_sweeping) { | 556 if (FLAG_concurrent_sweeping) { |
538 pending_sweeper_tasks_semaphore_.Wait(); | 557 while (num_sweeping_tasks_ > 0) { |
539 pending_sweeper_tasks_semaphore_.Wait(); | 558 pending_sweeper_tasks_semaphore_.Wait(); |
540 pending_sweeper_tasks_semaphore_.Wait(); | 559 num_sweeping_tasks_--; |
| 560 } |
541 } | 561 } |
542 | 562 |
543 ParallelSweepSpacesComplete(); | 563 DCHECK(sweeping_list_[NEW_SPACE].empty()); |
| 564 ForAllSweepingSpaces([this](AllocationSpace space) { |
| 565 DCHECK_NULL(tmp_late_sweeping_list_[space]); |
| 566 sweeping_list_[space].clear(); |
| 567 if (late_sweeping_list_[space] != nullptr) { |
| 568 delete late_sweeping_list_[space]; |
| 569 } |
| 570 }); |
| 571 |
544 sweeping_in_progress_ = false; | 572 sweeping_in_progress_ = false; |
| 573 } |
| 574 |
| 575 void MarkCompactCollector::EnsureSweepingCompleted() { |
| 576 DCHECK(sweeper().sweeping_in_progress() == true); |
| 577 |
| 578 sweeper().EnsureCompleted(); |
545 heap()->old_space()->RefillFreeList(); | 579 heap()->old_space()->RefillFreeList(); |
546 heap()->code_space()->RefillFreeList(); | 580 heap()->code_space()->RefillFreeList(); |
547 heap()->map_space()->RefillFreeList(); | 581 heap()->map_space()->RefillFreeList(); |
548 | 582 |
549 #ifdef VERIFY_HEAP | 583 #ifdef VERIFY_HEAP |
550 if (FLAG_verify_heap && !evacuation()) { | 584 if (FLAG_verify_heap && !evacuation()) { |
551 VerifyEvacuation(heap_); | 585 VerifyEvacuation(heap_); |
552 } | 586 } |
553 #endif | 587 #endif |
554 } | 588 } |
555 | 589 |
556 | 590 bool MarkCompactCollector::Sweeper::IsSweepingCompleted() { |
557 bool MarkCompactCollector::IsSweepingCompleted() { | |
558 if (!pending_sweeper_tasks_semaphore_.WaitFor( | 591 if (!pending_sweeper_tasks_semaphore_.WaitFor( |
559 base::TimeDelta::FromSeconds(0))) { | 592 base::TimeDelta::FromSeconds(0))) { |
560 return false; | 593 return false; |
561 } | 594 } |
562 pending_sweeper_tasks_semaphore_.Signal(); | 595 pending_sweeper_tasks_semaphore_.Signal(); |
563 return true; | 596 return true; |
564 } | 597 } |
565 | 598 |
566 | |
567 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) { | 599 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) { |
568 // This is only used when resizing an object. | 600 // This is only used when resizing an object. |
569 DCHECK(MemoryChunk::FromAddress(old_start) == | 601 DCHECK(MemoryChunk::FromAddress(old_start) == |
570 MemoryChunk::FromAddress(new_start)); | 602 MemoryChunk::FromAddress(new_start)); |
571 | 603 |
572 if (!heap->incremental_marking()->IsMarking() || | 604 if (!heap->incremental_marking()->IsMarking() || |
573 Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE)) | 605 Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE)) |
574 return; | 606 return; |
575 | 607 |
576 // If the mark doesn't move, we don't check the color of the object. | 608 // If the mark doesn't move, we don't check the color of the object. |
(...skipping 2585 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3162 HeapObject* heap_object = HeapObject::cast(object); | 3194 HeapObject* heap_object = HeapObject::cast(object); |
3163 MapWord map_word = heap_object->map_word(); | 3195 MapWord map_word = heap_object->map_word(); |
3164 if (map_word.IsForwardingAddress()) { | 3196 if (map_word.IsForwardingAddress()) { |
3165 return map_word.ToForwardingAddress(); | 3197 return map_word.ToForwardingAddress(); |
3166 } | 3198 } |
3167 } | 3199 } |
3168 return object; | 3200 return object; |
3169 } | 3201 } |
3170 }; | 3202 }; |
3171 | 3203 |
3172 enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS }; | 3204 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode, |
3173 | 3205 MarkCompactCollector::Sweeper::SweepingParallelism parallelism, |
3174 enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST }; | 3206 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode, |
3175 | 3207 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode> |
3176 enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE }; | 3208 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p, |
3177 | 3209 ObjectVisitor* v) { |
3178 // Sweeps a page. After sweeping the page can be iterated. | |
3179 // Slots in live objects pointing into evacuation candidates are updated | |
3180 // if requested. | |
3181 // Returns the size of the biggest continuous freed memory chunk in bytes. | |
3182 template <SweepingMode sweeping_mode, | |
3183 MarkCompactCollector::SweepingParallelism parallelism, | |
3184 SkipListRebuildingMode skip_list_mode, | |
3185 FreeSpaceTreatmentMode free_space_mode> | |
3186 static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) { | |
3187 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); | 3210 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); |
3188 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); | 3211 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); |
3189 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, | 3212 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, |
3190 space->identity() == CODE_SPACE); | 3213 space->identity() == CODE_SPACE); |
3191 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); | 3214 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); |
3192 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD || | 3215 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY); |
3193 sweeping_mode == SWEEP_ONLY); | |
3194 | 3216 |
3195 Address free_start = p->area_start(); | 3217 Address free_start = p->area_start(); |
3196 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); | 3218 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); |
3197 | 3219 |
3198 // If we use the skip list for code space pages, we have to lock the skip | 3220 // If we use the skip list for code space pages, we have to lock the skip |
3199 // list because it could be accessed concurrently by the runtime or the | 3221 // list because it could be accessed concurrently by the runtime or the |
3200 // deoptimizer. | 3222 // deoptimizer. |
3201 SkipList* skip_list = p->skip_list(); | 3223 SkipList* skip_list = p->skip_list(); |
3202 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { | 3224 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { |
3203 skip_list->Clear(); | 3225 skip_list->Clear(); |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3245 if (free_space_mode == ZAP_FREE_SPACE) { | 3267 if (free_space_mode == ZAP_FREE_SPACE) { |
3246 memset(free_start, 0xcc, size); | 3268 memset(free_start, 0xcc, size); |
3247 } | 3269 } |
3248 freed_bytes = space->UnaccountedFree(free_start, size); | 3270 freed_bytes = space->UnaccountedFree(free_start, size); |
3249 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 3271 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
3250 } | 3272 } |
3251 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3273 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
3252 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | 3274 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
3253 } | 3275 } |
3254 | 3276 |
3255 | |
3256 void MarkCompactCollector::InvalidateCode(Code* code) { | 3277 void MarkCompactCollector::InvalidateCode(Code* code) { |
3257 if (heap_->incremental_marking()->IsCompacting() && | 3278 if (heap_->incremental_marking()->IsCompacting() && |
3258 !ShouldSkipEvacuationSlotRecording(code)) { | 3279 !ShouldSkipEvacuationSlotRecording(code)) { |
3259 DCHECK(compacting_); | 3280 DCHECK(compacting_); |
3260 | 3281 |
3261 // If the object is white than no slots were recorded on it yet. | 3282 // If the object is white than no slots were recorded on it yet. |
3262 MarkBit mark_bit = Marking::MarkBitFrom(code); | 3283 MarkBit mark_bit = Marking::MarkBitFrom(code); |
3263 if (Marking::IsWhite(mark_bit)) return; | 3284 if (Marking::IsWhite(mark_bit)) return; |
3264 | 3285 |
3265 // Ignore all slots that might have been recorded in the body of the | 3286 // Ignore all slots that might have been recorded in the body of the |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3342 LiveObjectIterator<kBlackObjects> it(page); | 3363 LiveObjectIterator<kBlackObjects> it(page); |
3343 HeapObject* object = NULL; | 3364 HeapObject* object = NULL; |
3344 while ((object = it.Next()) != NULL) { | 3365 while ((object = it.Next()) != NULL) { |
3345 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3366 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
3346 Map* map = object->synchronized_map(); | 3367 Map* map = object->synchronized_map(); |
3347 int size = object->SizeFromMap(map); | 3368 int size = object->SizeFromMap(map); |
3348 object->IterateBody(map->instance_type(), size, visitor); | 3369 object->IterateBody(map->instance_type(), size, visitor); |
3349 } | 3370 } |
3350 } | 3371 } |
3351 | 3372 |
| 3373 void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space, |
| 3374 Page* page) { |
| 3375 base::LockGuard<base::Mutex> guard(&mutex_); |
| 3376 swept_list_[space->identity()].Add(page); |
| 3377 } |
3352 | 3378 |
3353 void MarkCompactCollector::SweepAbortedPages() { | 3379 void MarkCompactCollector::SweepAbortedPages() { |
3354 // Second pass on aborted pages. | 3380 // Second pass on aborted pages. |
3355 for (Page* p : evacuation_candidates_) { | 3381 for (Page* p : evacuation_candidates_) { |
3356 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | 3382 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
3357 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); | 3383 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); |
3358 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3384 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
3359 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3385 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
3360 switch (space->identity()) { | 3386 switch (space->identity()) { |
3361 case OLD_SPACE: | 3387 case OLD_SPACE: |
3362 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, | 3388 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
3363 IGNORE_FREE_SPACE>(space, p, nullptr); | 3389 Sweeper::IGNORE_SKIP_LIST, |
| 3390 Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr); |
3364 break; | 3391 break; |
3365 case CODE_SPACE: | 3392 case CODE_SPACE: |
3366 if (FLAG_zap_code_space) { | 3393 if (FLAG_zap_code_space) { |
3367 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, | 3394 Sweeper::RawSweep< |
3368 ZAP_FREE_SPACE>(space, p, nullptr); | 3395 Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
| 3396 Sweeper::REBUILD_SKIP_LIST, Sweeper::ZAP_FREE_SPACE>(space, p, |
| 3397 nullptr); |
3369 } else { | 3398 } else { |
3370 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, | 3399 Sweeper::RawSweep< |
3371 IGNORE_FREE_SPACE>(space, p, nullptr); | 3400 Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
| 3401 Sweeper::REBUILD_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>( |
| 3402 space, p, nullptr); |
3372 } | 3403 } |
3373 break; | 3404 break; |
3374 default: | 3405 default: |
3375 UNREACHABLE(); | 3406 UNREACHABLE(); |
3376 break; | 3407 break; |
3377 } | 3408 } |
3378 { | 3409 sweeper().AddSweptPageSafe(space, p); |
3379 base::LockGuard<base::Mutex> guard(&swept_pages_mutex_); | |
3380 swept_pages(space->identity())->Add(p); | |
3381 } | |
3382 } | 3410 } |
3383 } | 3411 } |
3384 } | 3412 } |
3385 | 3413 |
3386 | 3414 |
3387 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 3415 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
3388 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | 3416 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
3389 Heap::RelocationLock relocation_lock(heap()); | 3417 Heap::RelocationLock relocation_lock(heap()); |
3390 | 3418 |
3391 { | 3419 { |
(...skipping 23 matching lines...) Expand all Loading... |
3415 // EvacuateNewSpaceAndCandidates iterates over new space objects and for | 3443 // EvacuateNewSpaceAndCandidates iterates over new space objects and for |
3416 // ArrayBuffers either re-registers them as live or promotes them. This is | 3444 // ArrayBuffers either re-registers them as live or promotes them. This is |
3417 // needed to properly free them. | 3445 // needed to properly free them. |
3418 heap()->array_buffer_tracker()->FreeDead(false); | 3446 heap()->array_buffer_tracker()->FreeDead(false); |
3419 | 3447 |
3420 // Deallocate evacuated candidate pages. | 3448 // Deallocate evacuated candidate pages. |
3421 ReleaseEvacuationCandidates(); | 3449 ReleaseEvacuationCandidates(); |
3422 } | 3450 } |
3423 | 3451 |
3424 #ifdef VERIFY_HEAP | 3452 #ifdef VERIFY_HEAP |
3425 if (FLAG_verify_heap && !sweeping_in_progress_) { | 3453 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { |
3426 VerifyEvacuation(heap()); | 3454 VerifyEvacuation(heap()); |
3427 } | 3455 } |
3428 #endif | 3456 #endif |
3429 } | 3457 } |
3430 | 3458 |
3431 template <PointerDirection direction> | 3459 template <PointerDirection direction> |
3432 class PointerUpdateJobTraits { | 3460 class PointerUpdateJobTraits { |
3433 public: | 3461 public: |
3434 typedef int PerPageData; // Per page data is not used in this job. | 3462 typedef int PerPageData; // Per page data is not used in this job. |
3435 typedef PointersUpdatingVisitor* PerTaskData; | 3463 typedef PointersUpdatingVisitor* PerTaskData; |
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3602 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3630 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
3603 p->ResetLiveBytes(); | 3631 p->ResetLiveBytes(); |
3604 CHECK(p->SweepingDone()); | 3632 CHECK(p->SweepingDone()); |
3605 space->ReleasePage(p); | 3633 space->ReleasePage(p); |
3606 } | 3634 } |
3607 evacuation_candidates_.Rewind(0); | 3635 evacuation_candidates_.Rewind(0); |
3608 compacting_ = false; | 3636 compacting_ = false; |
3609 heap()->FreeQueuedChunks(); | 3637 heap()->FreeQueuedChunks(); |
3610 } | 3638 } |
3611 | 3639 |
3612 | 3640 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity, |
3613 int MarkCompactCollector::SweepInParallel(PagedSpace* space, | 3641 int required_freed_bytes, |
3614 int required_freed_bytes, | 3642 int max_pages) { |
3615 int max_pages) { | |
3616 int max_freed = 0; | 3643 int max_freed = 0; |
3617 int max_freed_overall = 0; | 3644 int pages_freed = 0; |
3618 int page_count = 0; | 3645 ParallelSweepList(sweeping_list_[identity], identity, required_freed_bytes, |
3619 for (Page* p : sweeping_list(space)) { | 3646 max_pages, &max_freed, &pages_freed); |
3620 max_freed = SweepInParallel(p, space); | 3647 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes)) |
3621 DCHECK(max_freed >= 0); | 3648 return max_freed; |
3622 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { | 3649 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed; |
3623 return max_freed; | 3650 SweepingList* late_list = GetLateSweepingListSafe(identity); |
3624 } | 3651 if (late_list != nullptr) { |
3625 max_freed_overall = Max(max_freed, max_freed_overall); | 3652 ParallelSweepList(*late_list, identity, required_freed_bytes, max_pages, |
3626 page_count++; | 3653 &max_freed, &pages_freed); |
3627 if (max_pages > 0 && page_count >= max_pages) { | |
3628 break; | |
3629 } | |
3630 } | 3654 } |
3631 return max_freed_overall; | 3655 return max_freed; |
3632 } | 3656 } |
3633 | 3657 |
| 3658 void MarkCompactCollector::Sweeper::ParallelSweepList( |
| 3659 SweepingList& list, AllocationSpace out_space, int required_freed_bytes, |
| 3660 int max_pages, int* max_freed, int* pages_freed) { |
| 3661 for (Page* p : list) { |
| 3662 int freed = ParallelSweepPage(p, heap_->paged_space(out_space)); |
| 3663 *pages_freed += 1; |
| 3664 DCHECK_GE(freed, 0); |
| 3665 *max_freed = Max(*max_freed, freed); |
| 3666 if ((required_freed_bytes) > 0 && (*max_freed >= required_freed_bytes)) |
| 3667 return; |
| 3668 if ((max_pages > 0) && (*pages_freed >= max_pages)) return; |
| 3669 } |
| 3670 } |
3634 | 3671 |
3635 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { | 3672 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page, |
| 3673 PagedSpace* space) { |
3636 int max_freed = 0; | 3674 int max_freed = 0; |
3637 if (page->mutex()->TryLock()) { | 3675 if (page->mutex()->TryLock()) { |
3638 // If this page was already swept in the meantime, we can return here. | 3676 // If this page was already swept in the meantime, we can return here. |
3639 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { | 3677 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { |
3640 page->mutex()->Unlock(); | 3678 page->mutex()->Unlock(); |
3641 return 0; | 3679 return 0; |
3642 } | 3680 } |
3643 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3681 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
3644 if (space->identity() == OLD_SPACE) { | 3682 if (space->identity() == OLD_SPACE) { |
3645 max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3683 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
3646 IGNORE_FREE_SPACE>(space, page, NULL); | 3684 IGNORE_FREE_SPACE>(space, page, NULL); |
3647 } else if (space->identity() == CODE_SPACE) { | 3685 } else if (space->identity() == CODE_SPACE) { |
3648 max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, | 3686 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, |
3649 IGNORE_FREE_SPACE>(space, page, NULL); | 3687 IGNORE_FREE_SPACE>(space, page, NULL); |
3650 } else { | 3688 } else { |
3651 max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3689 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
3652 IGNORE_FREE_SPACE>(space, page, NULL); | 3690 IGNORE_FREE_SPACE>(space, page, NULL); |
3653 } | 3691 } |
3654 { | 3692 { |
3655 base::LockGuard<base::Mutex> guard(&swept_pages_mutex_); | 3693 base::LockGuard<base::Mutex> guard(&mutex_); |
3656 swept_pages(space->identity())->Add(page); | 3694 swept_list_[space->identity()].Add(page); |
3657 } | 3695 } |
3658 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3696 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
3659 page->mutex()->Unlock(); | 3697 page->mutex()->Unlock(); |
3660 } | 3698 } |
3661 return max_freed; | 3699 return max_freed; |
3662 } | 3700 } |
3663 | 3701 |
| 3702 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { |
| 3703 DCHECK(!sweeping_in_progress_); |
| 3704 PrepareToBeSweptPage(space, page); |
| 3705 sweeping_list_[space].push_back(page); |
| 3706 } |
| 3707 |
| 3708 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, |
| 3709 Page* page) { |
| 3710 DCHECK(sweeping_in_progress_); |
| 3711 PrepareToBeSweptPage(space, page); |
| 3712 if (tmp_late_sweeping_list_[space] == nullptr) { |
| 3713 tmp_late_sweeping_list_[space] = new SweepingList(); |
| 3714 } |
| 3715 tmp_late_sweeping_list_[space]->push_back(page); |
| 3716 } |
| 3717 |
| 3718 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, |
| 3719 Page* page) { |
| 3720 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); |
| 3721 int to_sweep = page->area_size() - page->LiveBytes(); |
| 3722 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); |
| 3723 } |
| 3724 |
| 3725 void MarkCompactCollector::Sweeper::CommitLateList(AllocationSpace space) { |
| 3726 DCHECK(sweeping_in_progress_); |
| 3727 base::LockGuard<base::Mutex> guard(&mutex_); |
| 3728 DCHECK_NULL(late_sweeping_list_[space]); |
| 3729 late_sweeping_list_[space] = tmp_late_sweeping_list_[space]; |
| 3730 } |
| 3731 |
| 3732 MarkCompactCollector::Sweeper::SweepingList* |
| 3733 MarkCompactCollector::Sweeper::GetLateSweepingListSafe(AllocationSpace space) { |
| 3734 base::LockGuard<base::Mutex> guard(&mutex_); |
| 3735 return late_sweeping_list_[space]; |
| 3736 } |
3664 | 3737 |
3665 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { | 3738 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { |
3666 space->ClearStats(); | 3739 space->ClearStats(); |
3667 | 3740 |
3668 PageIterator it(space); | 3741 PageIterator it(space); |
3669 | 3742 |
3670 int will_be_swept = 0; | 3743 int will_be_swept = 0; |
3671 bool unused_page_present = false; | 3744 bool unused_page_present = false; |
3672 | 3745 |
3673 while (it.has_next()) { | 3746 while (it.has_next()) { |
(...skipping 15 matching lines...) Expand all Loading... |
3689 // TODO(hpayer): Free unused memory of last black page. | 3762 // TODO(hpayer): Free unused memory of last black page. |
3690 continue; | 3763 continue; |
3691 } | 3764 } |
3692 | 3765 |
3693 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { | 3766 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { |
3694 // We need to sweep the page to get it into an iterable state again. Note | 3767 // We need to sweep the page to get it into an iterable state again. Note |
3695 // that this adds unusable memory into the free list that is later on | 3768 // that this adds unusable memory into the free list that is later on |
3696 // (in the free list) dropped again. Since we only use the flag for | 3769 // (in the free list) dropped again. Since we only use the flag for |
3697 // testing this is fine. | 3770 // testing this is fine. |
3698 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3771 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
3699 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, | 3772 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
3700 IGNORE_FREE_SPACE>(space, p, nullptr); | 3773 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>( |
| 3774 space, p, nullptr); |
3701 continue; | 3775 continue; |
3702 } | 3776 } |
3703 | 3777 |
3704 // One unused page is kept, all further are released before sweeping them. | 3778 // One unused page is kept, all further are released before sweeping them. |
3705 if (p->LiveBytes() == 0) { | 3779 if (p->LiveBytes() == 0) { |
3706 if (unused_page_present) { | 3780 if (unused_page_present) { |
3707 if (FLAG_gc_verbose) { | 3781 if (FLAG_gc_verbose) { |
3708 PrintIsolate(isolate(), "sweeping: released page: %p", p); | 3782 PrintIsolate(isolate(), "sweeping: released page: %p", p); |
3709 } | 3783 } |
3710 space->ReleasePage(p); | 3784 space->ReleasePage(p); |
3711 continue; | 3785 continue; |
3712 } | 3786 } |
3713 unused_page_present = true; | 3787 unused_page_present = true; |
3714 } | 3788 } |
3715 | 3789 |
3716 p->concurrent_sweeping_state().SetValue(Page::kSweepingPending); | 3790 sweeper().AddPage(space->identity(), p); |
3717 sweeping_list(space).push_back(p); | |
3718 int to_sweep = p->area_size() - p->LiveBytes(); | |
3719 space->accounting_stats_.ShrinkSpace(to_sweep); | |
3720 will_be_swept++; | 3791 will_be_swept++; |
3721 } | 3792 } |
3722 | 3793 |
3723 if (FLAG_gc_verbose) { | 3794 if (FLAG_gc_verbose) { |
3724 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d", | 3795 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d", |
3725 AllocationSpaceName(space->identity()), will_be_swept); | 3796 AllocationSpaceName(space->identity()), will_be_swept); |
3726 } | 3797 } |
3727 std::sort(sweeping_list(space).begin(), sweeping_list(space).end(), | |
3728 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); }); | |
3729 } | 3798 } |
3730 | 3799 |
3731 | 3800 |
3732 void MarkCompactCollector::SweepSpaces() { | 3801 void MarkCompactCollector::SweepSpaces() { |
3733 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP); | 3802 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP); |
3734 double start_time = 0.0; | 3803 double start_time = 0.0; |
3735 if (FLAG_print_cumulative_gc_stat) { | 3804 if (FLAG_print_cumulative_gc_stat) { |
3736 start_time = heap_->MonotonicallyIncreasingTimeInMs(); | 3805 start_time = heap_->MonotonicallyIncreasingTimeInMs(); |
3737 } | 3806 } |
3738 | 3807 |
3739 #ifdef DEBUG | 3808 #ifdef DEBUG |
3740 state_ = SWEEP_SPACES; | 3809 state_ = SWEEP_SPACES; |
3741 #endif | 3810 #endif |
3742 | 3811 |
3743 { | 3812 { |
3744 sweeping_in_progress_ = true; | |
3745 { | 3813 { |
3746 GCTracer::Scope sweep_scope(heap()->tracer(), | 3814 GCTracer::Scope sweep_scope(heap()->tracer(), |
3747 GCTracer::Scope::MC_SWEEP_OLD); | 3815 GCTracer::Scope::MC_SWEEP_OLD); |
3748 StartSweepSpace(heap()->old_space()); | 3816 StartSweepSpace(heap()->old_space()); |
3749 } | 3817 } |
3750 { | 3818 { |
3751 GCTracer::Scope sweep_scope(heap()->tracer(), | 3819 GCTracer::Scope sweep_scope(heap()->tracer(), |
3752 GCTracer::Scope::MC_SWEEP_CODE); | 3820 GCTracer::Scope::MC_SWEEP_CODE); |
3753 StartSweepSpace(heap()->code_space()); | 3821 StartSweepSpace(heap()->code_space()); |
3754 } | 3822 } |
3755 { | 3823 { |
3756 GCTracer::Scope sweep_scope(heap()->tracer(), | 3824 GCTracer::Scope sweep_scope(heap()->tracer(), |
3757 GCTracer::Scope::MC_SWEEP_MAP); | 3825 GCTracer::Scope::MC_SWEEP_MAP); |
3758 StartSweepSpace(heap()->map_space()); | 3826 StartSweepSpace(heap()->map_space()); |
3759 } | 3827 } |
3760 if (FLAG_concurrent_sweeping) { | 3828 sweeper().StartSweeping(); |
3761 StartSweeperThreads(); | |
3762 } | |
3763 } | 3829 } |
3764 | 3830 |
3765 // Deallocate unmarked large objects. | 3831 // Deallocate unmarked large objects. |
3766 heap_->lo_space()->FreeUnmarkedObjects(); | 3832 heap_->lo_space()->FreeUnmarkedObjects(); |
3767 | 3833 |
3768 if (FLAG_print_cumulative_gc_stat) { | 3834 if (FLAG_print_cumulative_gc_stat) { |
3769 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() - | 3835 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() - |
3770 start_time); | 3836 start_time); |
3771 } | 3837 } |
3772 } | 3838 } |
3773 | 3839 |
3774 | |
3775 void MarkCompactCollector::ParallelSweepSpacesComplete() { | |
3776 sweeping_list(heap()->old_space()).clear(); | |
3777 sweeping_list(heap()->code_space()).clear(); | |
3778 sweeping_list(heap()->map_space()).clear(); | |
3779 } | |
3780 | |
3781 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); } | 3840 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); } |
3782 | 3841 |
3783 | 3842 |
3784 void MarkCompactCollector::Initialize() { | 3843 void MarkCompactCollector::Initialize() { |
3785 MarkCompactMarkingVisitor::Initialize(); | 3844 MarkCompactMarkingVisitor::Initialize(); |
3786 IncrementalMarking::Initialize(); | 3845 IncrementalMarking::Initialize(); |
3787 } | 3846 } |
3788 | 3847 |
3789 void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot, | 3848 void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot, |
3790 Code* target) { | 3849 Code* target) { |
(...skipping 15 matching lines...) Expand all Loading... |
3806 MarkBit mark_bit = Marking::MarkBitFrom(host); | 3865 MarkBit mark_bit = Marking::MarkBitFrom(host); |
3807 if (Marking::IsBlack(mark_bit)) { | 3866 if (Marking::IsBlack(mark_bit)) { |
3808 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 3867 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
3809 RecordRelocSlot(host, &rinfo, target); | 3868 RecordRelocSlot(host, &rinfo, target); |
3810 } | 3869 } |
3811 } | 3870 } |
3812 } | 3871 } |
3813 | 3872 |
3814 } // namespace internal | 3873 } // namespace internal |
3815 } // namespace v8 | 3874 } // namespace v8 |
OLD | NEW |