Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(105)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1354613002: [heap] Cleanup: Align naming of parallel sweeping with parallel compaction. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Fix initialization order in constructor Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h" 10 #include "src/compilation-cache.h"
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
45 45
46 // ------------------------------------------------------------------------- 46 // -------------------------------------------------------------------------
47 // MarkCompactCollector 47 // MarkCompactCollector
48 48
49 MarkCompactCollector::MarkCompactCollector(Heap* heap) 49 MarkCompactCollector::MarkCompactCollector(Heap* heap)
50 : // NOLINT 50 : // NOLINT
51 #ifdef DEBUG 51 #ifdef DEBUG
52 state_(IDLE), 52 state_(IDLE),
53 #endif 53 #endif
54 marking_parity_(ODD_MARKING_PARITY), 54 marking_parity_(ODD_MARKING_PARITY),
55 compacting_(false),
56 was_marked_incrementally_(false), 55 was_marked_incrementally_(false),
57 sweeping_in_progress_(false),
58 parallel_compaction_in_progress_(false),
59 pending_sweeper_jobs_semaphore_(0),
60 pending_compaction_tasks_semaphore_(0),
61 concurrent_compaction_tasks_active_(0),
62 evacuation_(false), 56 evacuation_(false),
63 slots_buffer_allocator_(nullptr), 57 slots_buffer_allocator_(nullptr),
64 migration_slots_buffer_(nullptr), 58 migration_slots_buffer_(nullptr),
65 heap_(heap), 59 heap_(heap),
66 marking_deque_memory_(NULL), 60 marking_deque_memory_(NULL),
67 marking_deque_memory_committed_(0), 61 marking_deque_memory_committed_(0),
68 code_flusher_(NULL), 62 code_flusher_(NULL),
69 have_code_to_deoptimize_(false) { 63 have_code_to_deoptimize_(false),
64 compacting_(false),
65 sweeping_in_progress_(false),
66 compaction_in_progress_(false),
67 pending_sweeper_tasks_semaphore_(0),
68 pending_compaction_tasks_semaphore_(0),
69 concurrent_compaction_tasks_active_(0) {
70 } 70 }
71 71
72 #ifdef VERIFY_HEAP 72 #ifdef VERIFY_HEAP
73 class VerifyMarkingVisitor : public ObjectVisitor { 73 class VerifyMarkingVisitor : public ObjectVisitor {
74 public: 74 public:
75 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} 75 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
76 76
77 void VisitPointers(Object** start, Object** end) { 77 void VisitPointers(Object** start, Object** end) {
78 for (Object** current = start; current < end; current++) { 78 for (Object** current = start; current < end; current++) {
79 if ((*current)->IsHeapObject()) { 79 if ((*current)->IsHeapObject()) {
(...skipping 420 matching lines...) Expand 10 before | Expand all | Expand 10 after
500 class MarkCompactCollector::SweeperTask : public v8::Task { 500 class MarkCompactCollector::SweeperTask : public v8::Task {
501 public: 501 public:
502 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {} 502 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
503 503
504 virtual ~SweeperTask() {} 504 virtual ~SweeperTask() {}
505 505
506 private: 506 private:
507 // v8::Task overrides. 507 // v8::Task overrides.
508 void Run() override { 508 void Run() override {
509 heap_->mark_compact_collector()->SweepInParallel(space_, 0); 509 heap_->mark_compact_collector()->SweepInParallel(space_, 0);
510 heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal(); 510 heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
511 } 511 }
512 512
513 Heap* heap_; 513 Heap* heap_;
514 PagedSpace* space_; 514 PagedSpace* space_;
515 515
516 DISALLOW_COPY_AND_ASSIGN(SweeperTask); 516 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
517 }; 517 };
518 518
519 519
520 void MarkCompactCollector::StartSweeperThreads() { 520 void MarkCompactCollector::StartSweeperThreads() {
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
552 552
553 // If sweeping is not completed or not running at all, we try to complete it 553 // If sweeping is not completed or not running at all, we try to complete it
554 // here. 554 // here.
555 if (!heap()->concurrent_sweeping_enabled() || !IsSweepingCompleted()) { 555 if (!heap()->concurrent_sweeping_enabled() || !IsSweepingCompleted()) {
556 SweepInParallel(heap()->paged_space(OLD_SPACE), 0); 556 SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
557 SweepInParallel(heap()->paged_space(CODE_SPACE), 0); 557 SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
558 SweepInParallel(heap()->paged_space(MAP_SPACE), 0); 558 SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
559 } 559 }
560 560
561 if (heap()->concurrent_sweeping_enabled()) { 561 if (heap()->concurrent_sweeping_enabled()) {
562 pending_sweeper_jobs_semaphore_.Wait(); 562 pending_sweeper_tasks_semaphore_.Wait();
563 pending_sweeper_jobs_semaphore_.Wait(); 563 pending_sweeper_tasks_semaphore_.Wait();
564 pending_sweeper_jobs_semaphore_.Wait(); 564 pending_sweeper_tasks_semaphore_.Wait();
565 } 565 }
566 566
567 ParallelSweepSpacesComplete(); 567 ParallelSweepSpacesComplete();
568 sweeping_in_progress_ = false; 568 sweeping_in_progress_ = false;
569 RefillFreeList(heap()->paged_space(OLD_SPACE)); 569 RefillFreeList(heap()->paged_space(OLD_SPACE));
570 RefillFreeList(heap()->paged_space(CODE_SPACE)); 570 RefillFreeList(heap()->paged_space(CODE_SPACE));
571 RefillFreeList(heap()->paged_space(MAP_SPACE)); 571 RefillFreeList(heap()->paged_space(MAP_SPACE));
572 heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes(); 572 heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes();
573 heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes(); 573 heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes();
574 heap()->paged_space(MAP_SPACE)->ResetUnsweptFreeBytes(); 574 heap()->paged_space(MAP_SPACE)->ResetUnsweptFreeBytes();
575 575
576 #ifdef VERIFY_HEAP 576 #ifdef VERIFY_HEAP
577 if (FLAG_verify_heap && !evacuation()) { 577 if (FLAG_verify_heap && !evacuation()) {
578 VerifyEvacuation(heap_); 578 VerifyEvacuation(heap_);
579 } 579 }
580 #endif 580 #endif
581 } 581 }
582 582
583 583
584 bool MarkCompactCollector::IsSweepingCompleted() { 584 bool MarkCompactCollector::IsSweepingCompleted() {
585 if (!pending_sweeper_jobs_semaphore_.WaitFor( 585 if (!pending_sweeper_tasks_semaphore_.WaitFor(
586 base::TimeDelta::FromSeconds(0))) { 586 base::TimeDelta::FromSeconds(0))) {
587 return false; 587 return false;
588 } 588 }
589 pending_sweeper_jobs_semaphore_.Signal(); 589 pending_sweeper_tasks_semaphore_.Signal();
590 return true; 590 return true;
591 } 591 }
592 592
593 593
594 void MarkCompactCollector::RefillFreeList(PagedSpace* space) { 594 void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
595 FreeList* free_list; 595 FreeList* free_list;
596 596
597 if (space == heap()->old_space()) { 597 if (space == heap()->old_space()) {
598 free_list = free_list_old_space_.get(); 598 free_list = free_list_old_space_.get();
599 } else if (space == heap()->code_space()) { 599 } else if (space == heap()->code_space()) {
(...skipping 1958 matching lines...) Expand 10 before | Expand all | Expand 10 after
2558 } 2558 }
2559 heap()->set_encountered_weak_cells(Smi::FromInt(0)); 2559 heap()->set_encountered_weak_cells(Smi::FromInt(0));
2560 } 2560 }
2561 2561
2562 2562
2563 void MarkCompactCollector::RecordMigratedSlot( 2563 void MarkCompactCollector::RecordMigratedSlot(
2564 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) { 2564 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
2565 // When parallel compaction is in progress, store and slots buffer entries 2565 // When parallel compaction is in progress, store and slots buffer entries
2566 // require synchronization. 2566 // require synchronization.
2567 if (heap_->InNewSpace(value)) { 2567 if (heap_->InNewSpace(value)) {
2568 if (parallel_compaction_in_progress_) { 2568 if (compaction_in_progress_) {
2569 heap_->store_buffer()->MarkSynchronized(slot); 2569 heap_->store_buffer()->MarkSynchronized(slot);
2570 } else { 2570 } else {
2571 heap_->store_buffer()->Mark(slot); 2571 heap_->store_buffer()->Mark(slot);
2572 } 2572 }
2573 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { 2573 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2574 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer, 2574 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2575 reinterpret_cast<Object**>(slot), 2575 reinterpret_cast<Object**>(slot),
2576 SlotsBuffer::IGNORE_OVERFLOW); 2576 SlotsBuffer::IGNORE_OVERFLOW);
2577 } 2577 }
2578 } 2578 }
(...skipping 802 matching lines...) Expand 10 before | Expand all | Expand 10 after
3381 for (int i = 0; i < num_tasks; i++) { 3381 for (int i = 0; i < num_tasks; i++) {
3382 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); 3382 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
3383 } 3383 }
3384 3384
3385 compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory( 3385 compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
3386 heap()->old_space()); 3386 heap()->old_space());
3387 compaction_spaces_for_tasks[0] 3387 compaction_spaces_for_tasks[0]
3388 ->Get(CODE_SPACE) 3388 ->Get(CODE_SPACE)
3389 ->MoveOverFreeMemory(heap()->code_space()); 3389 ->MoveOverFreeMemory(heap()->code_space());
3390 3390
3391 parallel_compaction_in_progress_ = true; 3391 compaction_in_progress_ = true;
3392 // Kick off parallel tasks. 3392 // Kick off parallel tasks.
3393 for (int i = 1; i < num_tasks; i++) { 3393 for (int i = 1; i < num_tasks; i++) {
3394 concurrent_compaction_tasks_active_++; 3394 concurrent_compaction_tasks_active_++;
3395 V8::GetCurrentPlatform()->CallOnBackgroundThread( 3395 V8::GetCurrentPlatform()->CallOnBackgroundThread(
3396 new CompactionTask(heap(), compaction_spaces_for_tasks[i]), 3396 new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
3397 v8::Platform::kShortRunningTask); 3397 v8::Platform::kShortRunningTask);
3398 } 3398 }
3399 3399
3400 // Contribute in main thread. Counter and signal are in principal not needed. 3400 // Contribute in main thread. Counter and signal are in principal not needed.
3401 concurrent_compaction_tasks_active_++; 3401 concurrent_compaction_tasks_active_++;
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
3464 } 3464 }
3465 } 3465 }
3466 } 3466 }
3467 3467
3468 3468
3469 void MarkCompactCollector::WaitUntilCompactionCompleted() { 3469 void MarkCompactCollector::WaitUntilCompactionCompleted() {
3470 while (concurrent_compaction_tasks_active_ > 0) { 3470 while (concurrent_compaction_tasks_active_ > 0) {
3471 pending_compaction_tasks_semaphore_.Wait(); 3471 pending_compaction_tasks_semaphore_.Wait();
3472 concurrent_compaction_tasks_active_--; 3472 concurrent_compaction_tasks_active_--;
3473 } 3473 }
3474 parallel_compaction_in_progress_ = false; 3474 compaction_in_progress_ = false;
3475 } 3475 }
3476 3476
3477 3477
3478 void MarkCompactCollector::EvacuatePages( 3478 void MarkCompactCollector::EvacuatePages(
3479 CompactionSpaceCollection* compaction_spaces, 3479 CompactionSpaceCollection* compaction_spaces,
3480 SlotsBuffer** evacuation_slots_buffer) { 3480 SlotsBuffer** evacuation_slots_buffer) {
3481 for (int i = 0; i < evacuation_candidates_.length(); i++) { 3481 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3482 Page* p = evacuation_candidates_[i]; 3482 Page* p = evacuation_candidates_[i];
3483 DCHECK(p->IsEvacuationCandidate() || 3483 DCHECK(p->IsEvacuationCandidate() ||
3484 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3484 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3485 DCHECK(static_cast<int>(p->parallel_sweeping()) == 3485 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
3486 MemoryChunk::SWEEPING_DONE); 3486 MemoryChunk::kSweepingDone);
3487 if (p->parallel_compaction_state().TrySetValue( 3487 if (p->parallel_compaction_state().TrySetValue(
3488 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { 3488 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3489 if (p->IsEvacuationCandidate()) { 3489 if (p->IsEvacuationCandidate()) {
3490 DCHECK_EQ(p->parallel_compaction_state().Value(), 3490 DCHECK_EQ(p->parallel_compaction_state().Value(),
3491 MemoryChunk::kCompactingInProgress); 3491 MemoryChunk::kCompactingInProgress);
3492 if (EvacuateLiveObjectsFromPage( 3492 if (EvacuateLiveObjectsFromPage(
3493 p, compaction_spaces->Get(p->owner()->identity()), 3493 p, compaction_spaces->Get(p->owner()->identity()),
3494 evacuation_slots_buffer)) { 3494 evacuation_slots_buffer)) {
3495 p->parallel_compaction_state().SetValue( 3495 p->parallel_compaction_state().SetValue(
3496 MemoryChunk::kCompactingFinalize); 3496 MemoryChunk::kCompactingFinalize);
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
3620 memset(free_start, 0xcc, size); 3620 memset(free_start, 0xcc, size);
3621 } 3621 }
3622 freed_bytes = Free<parallelism>(space, free_list, free_start, size); 3622 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3623 max_freed_bytes = Max(freed_bytes, max_freed_bytes); 3623 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3624 } 3624 }
3625 p->ResetLiveBytes(); 3625 p->ResetLiveBytes();
3626 3626
3627 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) { 3627 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3628 // When concurrent sweeping is active, the page will be marked after 3628 // When concurrent sweeping is active, the page will be marked after
3629 // sweeping by the main thread. 3629 // sweeping by the main thread.
3630 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE); 3630 p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingFinalize);
3631 } else { 3631 } else {
3632 p->SetWasSwept(); 3632 p->SetWasSwept();
3633 } 3633 }
3634 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); 3634 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3635 } 3635 }
3636 3636
3637 3637
3638 void MarkCompactCollector::InvalidateCode(Code* code) { 3638 void MarkCompactCollector::InvalidateCode(Code* code) {
3639 if (heap_->incremental_marking()->IsCompacting() && 3639 if (heap_->incremental_marking()->IsCompacting() &&
3640 !ShouldSkipEvacuationSlotRecording(code)) { 3640 !ShouldSkipEvacuationSlotRecording(code)) {
(...skipping 628 matching lines...) Expand 10 before | Expand all | Expand 10 after
4269 if (p == space->end_of_unswept_pages()) break; 4269 if (p == space->end_of_unswept_pages()) break;
4270 } 4270 }
4271 return max_freed_overall; 4271 return max_freed_overall;
4272 } 4272 }
4273 4273
4274 4274
4275 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { 4275 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
4276 int max_freed = 0; 4276 int max_freed = 0;
4277 if (page->TryLock()) { 4277 if (page->TryLock()) {
4278 // If this page was already swept in the meantime, we can return here. 4278 // If this page was already swept in the meantime, we can return here.
4279 if (page->parallel_sweeping() != MemoryChunk::SWEEPING_PENDING) { 4279 if (page->parallel_sweeping_state().Value() !=
4280 MemoryChunk::kSweepingPending) {
4280 page->mutex()->Unlock(); 4281 page->mutex()->Unlock();
4281 return 0; 4282 return 0;
4282 } 4283 }
4283 page->set_parallel_sweeping(MemoryChunk::SWEEPING_IN_PROGRESS); 4284 page->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingInProgress);
4284 FreeList* free_list; 4285 FreeList* free_list;
4285 FreeList private_free_list(space); 4286 FreeList private_free_list(space);
4286 if (space->identity() == OLD_SPACE) { 4287 if (space->identity() == OLD_SPACE) {
4287 free_list = free_list_old_space_.get(); 4288 free_list = free_list_old_space_.get();
4288 max_freed = 4289 max_freed =
4289 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, 4290 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
4290 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL); 4291 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
4291 } else if (space->identity() == CODE_SPACE) { 4292 } else if (space->identity() == CODE_SPACE) {
4292 free_list = free_list_code_space_.get(); 4293 free_list = free_list_code_space_.get();
4293 max_freed = 4294 max_freed =
(...skipping 20 matching lines...) Expand all
4314 space->set_end_of_unswept_pages(space->FirstPage()); 4315 space->set_end_of_unswept_pages(space->FirstPage());
4315 4316
4316 PageIterator it(space); 4317 PageIterator it(space);
4317 4318
4318 int pages_swept = 0; 4319 int pages_swept = 0;
4319 bool unused_page_present = false; 4320 bool unused_page_present = false;
4320 bool parallel_sweeping_active = false; 4321 bool parallel_sweeping_active = false;
4321 4322
4322 while (it.has_next()) { 4323 while (it.has_next()) {
4323 Page* p = it.next(); 4324 Page* p = it.next();
4324 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); 4325 DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
4325 4326
4326 // Clear sweeping flags indicating that marking bits are still intact. 4327 // Clear sweeping flags indicating that marking bits are still intact.
4327 p->ClearWasSwept(); 4328 p->ClearWasSwept();
4328 4329
4329 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || 4330 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
4330 p->IsEvacuationCandidate()) { 4331 p->IsEvacuationCandidate()) {
4331 // Will be processed in EvacuateNewSpaceAndCandidates. 4332 // Will be processed in EvacuateNewSpaceAndCandidates.
4332 DCHECK(evacuation_candidates_.length() > 0); 4333 DCHECK(evacuation_candidates_.length() > 0);
4333 continue; 4334 continue;
4334 } 4335 }
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
4368 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, 4369 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
4369 IGNORE_FREE_SPACE>(space, NULL, p, NULL); 4370 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4370 } 4371 }
4371 pages_swept++; 4372 pages_swept++;
4372 parallel_sweeping_active = true; 4373 parallel_sweeping_active = true;
4373 } else { 4374 } else {
4374 if (FLAG_gc_verbose) { 4375 if (FLAG_gc_verbose) {
4375 PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n", 4376 PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
4376 reinterpret_cast<intptr_t>(p)); 4377 reinterpret_cast<intptr_t>(p));
4377 } 4378 }
4378 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING); 4379 p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
4379 space->IncreaseUnsweptFreeBytes(p); 4380 space->IncreaseUnsweptFreeBytes(p);
4380 } 4381 }
4381 space->set_end_of_unswept_pages(p); 4382 space->set_end_of_unswept_pages(p);
4382 break; 4383 break;
4383 case SEQUENTIAL_SWEEPING: { 4384 case SEQUENTIAL_SWEEPING: {
4384 if (FLAG_gc_verbose) { 4385 if (FLAG_gc_verbose) {
4385 PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p)); 4386 PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
4386 } 4387 }
4387 if (space->identity() == CODE_SPACE) { 4388 if (space->identity() == CODE_SPACE) {
4388 if (FLAG_zap_code_space) { 4389 if (FLAG_zap_code_space) {
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
4475 VerifyEvacuation(heap()); 4476 VerifyEvacuation(heap());
4476 } 4477 }
4477 #endif 4478 #endif
4478 } 4479 }
4479 4480
4480 4481
4481 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { 4482 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
4482 PageIterator it(space); 4483 PageIterator it(space);
4483 while (it.has_next()) { 4484 while (it.has_next()) {
4484 Page* p = it.next(); 4485 Page* p = it.next();
4485 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) { 4486 if (p->parallel_sweeping_state().Value() ==
4486 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE); 4487 MemoryChunk::kSweepingFinalize) {
4488 p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingDone);
4487 p->SetWasSwept(); 4489 p->SetWasSwept();
4488 } 4490 }
4489 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE); 4491 DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
4490 } 4492 }
4491 } 4493 }
4492 4494
4493 4495
4494 void MarkCompactCollector::ParallelSweepSpacesComplete() { 4496 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4495 ParallelSweepSpaceComplete(heap()->old_space()); 4497 ParallelSweepSpaceComplete(heap()->old_space());
4496 ParallelSweepSpaceComplete(heap()->code_space()); 4498 ParallelSweepSpaceComplete(heap()->code_space());
4497 ParallelSweepSpaceComplete(heap()->map_space()); 4499 ParallelSweepSpaceComplete(heap()->map_space());
4498 } 4500 }
4499 4501
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
4584 MarkBit mark_bit = Marking::MarkBitFrom(host); 4586 MarkBit mark_bit = Marking::MarkBitFrom(host);
4585 if (Marking::IsBlack(mark_bit)) { 4587 if (Marking::IsBlack(mark_bit)) {
4586 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 4588 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4587 RecordRelocSlot(&rinfo, target); 4589 RecordRelocSlot(&rinfo, target);
4588 } 4590 }
4589 } 4591 }
4590 } 4592 }
4591 4593
4592 } // namespace internal 4594 } // namespace internal
4593 } // namespace v8 4595 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698