Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(69)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1382003002: [heap] Divide available memory upon compaction tasks (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@counters
Patch Set: Rework tests Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 590 matching lines...) Expand 10 before | Expand all | Expand 10 after
601 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure 601 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
602 // to only refill them for the old space. 602 // to only refill them for the old space.
603 return; 603 return;
604 } 604 }
605 605
606 intptr_t added = space->free_list()->Concatenate(free_list); 606 intptr_t added = space->free_list()->Concatenate(free_list);
607 space->accounting_stats_.IncreaseCapacity(added); 607 space->accounting_stats_.IncreaseCapacity(added);
608 } 608 }
609 609
610 610
611 void MarkCompactCollector::RefillFreeList(CompactionSpace* space) {
ulan 2015/10/09 11:48:48 Will this function be used in other CL?
Michael Lippautz 2015/10/09 12:20:12 The function should already be used as PagedSpace:
612 FreeList* free_list = nullptr;
613 if (space->identity() == OLD_SPACE) {
614 free_list = free_list_old_space_.get();
615 } else if (space->identity() == CODE_SPACE) {
616 free_list = free_list_code_space_.get();
617 } else {
618 UNREACHABLE();
619 }
620
621 intptr_t kWantedMemory = 500 * KB;
ulan 2015/10/09 11:48:48 This constant is duplicated below. Let's unify the
Michael Lippautz 2015/10/09 12:20:13 Unified as MarkCompactCollector::kCompactionMemory
622 intptr_t refilled = 0;
623 while (refilled < kWantedMemory) {
624 FreeSpace* node = free_list->TryRemoveMemory(kWantedMemory - refilled);
625 if (node == nullptr) return;
626 refilled += node->size();
627 space->AddMemory(node->address(), node->size());
628 }
629 }
630
631
611 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) { 632 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
612 // This is only used when resizing an object. 633 // This is only used when resizing an object.
613 DCHECK(MemoryChunk::FromAddress(old_start) == 634 DCHECK(MemoryChunk::FromAddress(old_start) ==
614 MemoryChunk::FromAddress(new_start)); 635 MemoryChunk::FromAddress(new_start));
615 636
616 if (!heap->incremental_marking()->IsMarking()) return; 637 if (!heap->incremental_marking()->IsMarking()) return;
617 638
618 // If the mark doesn't move, we don't check the color of the object. 639 // If the mark doesn't move, we don't check the color of the object.
619 // It doesn't matter whether the object is black, since it hasn't changed 640 // It doesn't matter whether the object is black, since it hasn't changed
620 // size, so the adjustment to the live data count will be zero anyway. 641 // size, so the adjustment to the live data count will be zero anyway.
(...skipping 2769 matching lines...) Expand 10 before | Expand all | Expand 10 after
3390 3411
3391 const int num_tasks = NumberOfParallelCompactionTasks(); 3412 const int num_tasks = NumberOfParallelCompactionTasks();
3392 3413
3393 // Set up compaction spaces. 3414 // Set up compaction spaces.
3394 CompactionSpaceCollection** compaction_spaces_for_tasks = 3415 CompactionSpaceCollection** compaction_spaces_for_tasks =
3395 new CompactionSpaceCollection*[num_tasks]; 3416 new CompactionSpaceCollection*[num_tasks];
3396 for (int i = 0; i < num_tasks; i++) { 3417 for (int i = 0; i < num_tasks; i++) {
3397 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); 3418 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
3398 } 3419 }
3399 3420
3400 compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory( 3421 const intptr_t kWantedMemory = 500 * KB;
Michael Lippautz 2015/10/09 11:08:59 We could branch in a fast case here for 1 compact
3401 heap()->old_space()); 3422 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
3402 compaction_spaces_for_tasks[0] 3423 num_tasks, kWantedMemory);
3403 ->Get(CODE_SPACE) 3424 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
3404 ->MoveOverFreeMemory(heap()->code_space()); 3425 num_tasks, kWantedMemory);
3405 3426
3406 compaction_in_progress_ = true; 3427 compaction_in_progress_ = true;
3407 // Kick off parallel tasks. 3428 // Kick off parallel tasks.
3408 for (int i = 1; i < num_tasks; i++) { 3429 for (int i = 1; i < num_tasks; i++) {
3409 concurrent_compaction_tasks_active_++; 3430 concurrent_compaction_tasks_active_++;
3410 V8::GetCurrentPlatform()->CallOnBackgroundThread( 3431 V8::GetCurrentPlatform()->CallOnBackgroundThread(
3411 new CompactionTask(heap(), compaction_spaces_for_tasks[i]), 3432 new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
3412 v8::Platform::kShortRunningTask); 3433 v8::Platform::kShortRunningTask);
3413 } 3434 }
3414 3435
3415 // Contribute in main thread. Counter and signal are in principal not needed. 3436 // Contribute in main thread. Counter and signal are in principal not needed.
3416 concurrent_compaction_tasks_active_++;
3417 EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_); 3437 EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_);
3418 pending_compaction_tasks_semaphore_.Signal();
3419 3438
3420 WaitUntilCompactionCompleted(); 3439 WaitUntilCompactionCompleted();
3421 3440
3422 // Merge back memory (compacted and unused) from compaction spaces. 3441 // Merge back memory (compacted and unused) from compaction spaces.
3423 for (int i = 0; i < num_tasks; i++) { 3442 for (int i = 0; i < num_tasks; i++) {
3424 heap()->old_space()->MergeCompactionSpace( 3443 heap()->old_space()->MergeCompactionSpace(
3425 compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); 3444 compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
3426 heap()->code_space()->MergeCompactionSpace( 3445 heap()->code_space()->MergeCompactionSpace(
3427 compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); 3446 compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
3428 delete compaction_spaces_for_tasks[i]; 3447 delete compaction_spaces_for_tasks[i];
(...skipping 1172 matching lines...) Expand 10 before | Expand all | Expand 10 after
4601 MarkBit mark_bit = Marking::MarkBitFrom(host); 4620 MarkBit mark_bit = Marking::MarkBitFrom(host);
4602 if (Marking::IsBlack(mark_bit)) { 4621 if (Marking::IsBlack(mark_bit)) {
4603 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 4622 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4604 RecordRelocSlot(&rinfo, target); 4623 RecordRelocSlot(&rinfo, target);
4605 } 4624 }
4606 } 4625 }
4607 } 4626 }
4608 4627
4609 } // namespace internal 4628 } // namespace internal
4610 } // namespace v8 4629 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | src/heap/spaces.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698