Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(367)

Unified Diff: src/heap/mark-compact.cc

Issue 1577853007: [heap] Parallel newspace evacuation, semispace copy, and compaction \o/ (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase (2 changes factored out) and addressed comments Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/heap/mark-compact.cc
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index 8338c5c38b9cb4650249d84c62e62ee6eeea9884..aff1e03f3e4935a6865947d33a21187dad0878d8 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -320,9 +320,7 @@ void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
- int number_of_pages = evacuation_candidates_.length();
- for (int i = 0; i < number_of_pages; i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
}
}
@@ -831,9 +829,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
void MarkCompactCollector::AbortCompaction() {
if (compacting_) {
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate();
p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
@@ -1548,8 +1544,11 @@ class MarkCompactCollector::HeapObjectVisitor {
class MarkCompactCollector::EvacuateVisitorBase
: public MarkCompactCollector::HeapObjectVisitor {
public:
- EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
- : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
+ EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer,
+ CompactionSpaceCollection* compaction_spaces)
+ : heap_(heap),
+ evacuation_slots_buffer_(evacuation_slots_buffer),
+ compaction_spaces_(compaction_spaces) {}
bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
HeapObject** target_object) {
@@ -1559,7 +1558,7 @@ class MarkCompactCollector::EvacuateVisitorBase
if (allocation.To(target_object)) {
heap_->mark_compact_collector()->MigrateObject(
*target_object, object, size, target_space->identity(),
- evacuation_slots_buffer_);
+ evacuation_slots_buffer_, compaction_spaces_->local_store_buffer());
return true;
}
return false;
@@ -1568,6 +1567,7 @@ class MarkCompactCollector::EvacuateVisitorBase
protected:
Heap* heap_;
SlotsBuffer** evacuation_slots_buffer_;
+ CompactionSpaceCollection* compaction_spaces_;
};
@@ -1579,20 +1579,22 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
explicit EvacuateNewSpaceVisitor(Heap* heap,
SlotsBuffer** evacuation_slots_buffer,
- HashMap* local_pretenuring_feedback)
- : EvacuateVisitorBase(heap, evacuation_slots_buffer),
+ CompactionSpaceCollection* compaction_spaces)
+ : EvacuateVisitorBase(heap, evacuation_slots_buffer, compaction_spaces),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
space_to_allocate_(NEW_SPACE),
promoted_size_(0),
semispace_copied_size_(0),
- local_pretenuring_feedback_(local_pretenuring_feedback) {}
+ local_pretenuring_feedback_(
+ compaction_spaces->local_pretenuring_feedback()) {}
bool Visit(HeapObject* object) override {
heap_->UpdateAllocationSite(object, local_pretenuring_feedback_);
int size = object->Size();
HeapObject* target_object = nullptr;
if (heap_->ShouldBePromoted(object->address(), size) &&
- TryEvacuateObject(heap_->old_space(), object, &target_object)) {
+ TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
+ &target_object)) {
// If we end up needing more special cases, we should factor this out.
if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
heap_->array_buffer_tracker()->Promote(
@@ -1605,7 +1607,8 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
AllocationSpace space = AllocateTargetObject(object, &target);
heap_->mark_compact_collector()->MigrateObject(
HeapObject::cast(target), object, size, space,
- (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_);
+ (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_,
+ compaction_spaces_->local_store_buffer());
if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
}
@@ -1677,8 +1680,8 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
inline AllocationResult AllocateInOldSpace(int size_in_bytes,
AllocationAlignment alignment) {
- AllocationResult allocation =
- heap_->old_space()->AllocateRaw(size_in_bytes, alignment);
+ AllocationResult allocation = compaction_spaces_->Get(OLD_SPACE)
+ ->AllocateRaw(size_in_bytes, alignment);
if (allocation.IsRetry()) {
FatalProcessOutOfMemory(
"MarkCompactCollector: semi-space copy, fallback in old gen\n");
@@ -1725,8 +1728,7 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final
EvacuateOldSpaceVisitor(Heap* heap,
CompactionSpaceCollection* compaction_spaces,
SlotsBuffer** evacuation_slots_buffer)
- : EvacuateVisitorBase(heap, evacuation_slots_buffer),
- compaction_spaces_(compaction_spaces) {}
+ : EvacuateVisitorBase(heap, evacuation_slots_buffer, compaction_spaces) {}
bool Visit(HeapObject* object) override {
CompactionSpace* target_space = compaction_spaces_->Get(
@@ -1740,7 +1742,6 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final
}
private:
- CompactionSpaceCollection* compaction_spaces_;
};
@@ -2550,12 +2551,13 @@ void MarkCompactCollector::AbortTransitionArrays() {
void MarkCompactCollector::RecordMigratedSlot(
- Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
+ Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer,
+ LocalStoreBuffer* local_store_buffer) {
// When parallel compaction is in progress, store and slots buffer entries
// require synchronization.
if (heap_->InNewSpace(value)) {
if (compaction_in_progress_) {
- heap_->store_buffer()->MarkSynchronized(slot);
+ local_store_buffer->Record(slot);
} else {
heap_->store_buffer()->Mark(slot);
}
@@ -2637,19 +2639,23 @@ void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
class RecordMigratedSlotVisitor final : public ObjectVisitor {
public:
RecordMigratedSlotVisitor(MarkCompactCollector* collector,
- SlotsBuffer** evacuation_slots_buffer)
+ SlotsBuffer** evacuation_slots_buffer,
+ LocalStoreBuffer* local_store_buffer)
: collector_(collector),
- evacuation_slots_buffer_(evacuation_slots_buffer) {}
+ evacuation_slots_buffer_(evacuation_slots_buffer),
+ local_store_buffer_(local_store_buffer) {}
V8_INLINE void VisitPointer(Object** p) override {
collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
- evacuation_slots_buffer_);
+ evacuation_slots_buffer_,
+ local_store_buffer_);
}
V8_INLINE void VisitPointers(Object** start, Object** end) override {
while (start < end) {
collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
- evacuation_slots_buffer_);
+ evacuation_slots_buffer_,
+ local_store_buffer_);
++start;
}
}
@@ -2665,6 +2671,7 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor {
private:
MarkCompactCollector* collector_;
SlotsBuffer** evacuation_slots_buffer_;
+ LocalStoreBuffer* local_store_buffer_;
};
@@ -2682,9 +2689,10 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor {
// pointer iteration. This is an issue if the store buffer overflows and we
// have to scan the entire old space, including dead objects, looking for
// pointers to new space.
-void MarkCompactCollector::MigrateObject(
- HeapObject* dst, HeapObject* src, int size, AllocationSpace dest,
- SlotsBuffer** evacuation_slots_buffer) {
+void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
+ int size, AllocationSpace dest,
+ SlotsBuffer** evacuation_slots_buffer,
+ LocalStoreBuffer* local_store_buffer) {
Address dst_addr = dst->address();
Address src_addr = src->address();
DCHECK(heap()->AllowedToBeMigrated(src, dest));
@@ -2695,7 +2703,8 @@ void MarkCompactCollector::MigrateObject(
DCHECK(IsAligned(size, kPointerSize));
heap()->MoveBlock(dst->address(), src->address(), size);
- RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer);
+ RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer,
+ local_store_buffer);
dst->IterateBody(&visitor);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
@@ -3057,53 +3066,19 @@ void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
void MarkCompactCollector::EvacuateNewSpacePrologue() {
- // There are soft limits in the allocation code, designed trigger a mark
- // sweep collection by failing allocations. But since we are already in
- // a mark-sweep allocation, there is no sense in trying to trigger one.
- AlwaysAllocateScope scope(isolate());
-
NewSpace* new_space = heap()->new_space();
-
- // Store allocation range before flipping semispaces.
- Address from_bottom = new_space->bottom();
- Address from_top = new_space->top();
-
- // Flip the semispaces. After flipping, to space is empty, from space has
- // live objects.
- new_space->Flip();
- new_space->ResetAllocationInfo();
-
- newspace_evacuation_candidates_.Clear();
- NewSpacePageIterator it(from_bottom, from_top);
+ NewSpacePageIterator it(new_space->bottom(), new_space->top());
+ // Append the list of new space pages to be processed.
while (it.has_next()) {
newspace_evacuation_candidates_.Add(it.next());
}
+ new_space->Flip();
+ new_space->ResetAllocationInfo();
}
-HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() {
- HashMap* local_pretenuring_feedback = new HashMap(
- HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity);
- EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_,
- local_pretenuring_feedback);
- // First pass: traverse all objects in inactive semispace, remove marks,
- // migrate live objects and write forwarding addresses. This stage puts
- // new entries in the store buffer and may cause some pages to be marked
- // scan-on-scavenge.
- for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) {
- NewSpacePage* p =
- reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]);
- bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
- USE(ok);
- DCHECK(ok);
- }
- heap_->IncrementPromotedObjectsSize(new_space_visitor.promoted_size());
- heap_->IncrementSemiSpaceCopiedObjectSize(
- new_space_visitor.semispace_copied_size());
- heap_->IncrementYoungSurvivorsCounter(
- new_space_visitor.promoted_size() +
- new_space_visitor.semispace_copied_size());
- return local_pretenuring_feedback;
+void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
+ newspace_evacuation_candidates_.Rewind(0);
}
@@ -3114,7 +3089,8 @@ void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
}
-int MarkCompactCollector::NumberOfParallelCompactionTasks() {
+int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
+ intptr_t live_bytes) {
if (!FLAG_parallel_compaction) return 1;
// Compute the number of needed tasks based on a target compaction time, the
// profiled compaction speed and marked live memory.
@@ -3122,44 +3098,48 @@ int MarkCompactCollector::NumberOfParallelCompactionTasks() {
// The number of parallel compaction tasks is limited by:
// - #evacuation pages
// - (#cores - 1)
- // - a hard limit
const double kTargetCompactionTimeInMs = 1;
- const int kMaxCompactionTasks = 8;
+ const int kNumSweepingTasks = 3;
intptr_t compaction_speed =
heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
- if (compaction_speed == 0) return 1;
- intptr_t live_bytes = 0;
- for (Page* page : evacuation_candidates_) {
- live_bytes += page->LiveBytes();
+ const int cores =
+ Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1);
+ int tasks;
+ if (compaction_speed > 0) {
+ tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) /
+ compaction_speed / kTargetCompactionTimeInMs);
+ } else {
+ tasks = pages;
}
-
- const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1);
- const int tasks =
- 1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed /
- kTargetCompactionTimeInMs);
- const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks);
+ const int tasks_capped_pages = Min(pages, tasks);
const int tasks_capped_cores = Min(cores, tasks_capped_pages);
- const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores);
- return tasks_capped_hard;
+ return tasks_capped_cores;
}
void MarkCompactCollector::EvacuatePagesInParallel() {
- const int num_pages = evacuation_candidates_.length();
- if (num_pages == 0) return;
+ int num_pages = 0;
+ intptr_t live_bytes = 0;
+ for (Page* page : evacuation_candidates_) {
+ num_pages++;
+ live_bytes += page->LiveBytes();
+ }
+ for (NewSpacePage* page : newspace_evacuation_candidates_) {
+ num_pages++;
+ live_bytes += page->LiveBytes();
+ }
+ DCHECK_GE(num_pages, 1);
+
// Used for trace summary.
- intptr_t live_bytes = 0;
intptr_t compaction_speed = 0;
if (FLAG_trace_fragmentation) {
- for (Page* page : evacuation_candidates_) {
- live_bytes += page->LiveBytes();
- }
compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
}
- const int num_tasks = NumberOfParallelCompactionTasks();
+
+ const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes);
// Set up compaction spaces.
CompactionSpaceCollection** compaction_spaces_for_tasks =
@@ -3182,7 +3162,8 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
double compaction_duration = 0.0;
intptr_t compacted_memory = 0;
- // Merge back memory (compacted and unused) from compaction spaces.
+ // Merge back memory (compacted and unused) from compaction spaces and update
+ // pretenuring feedback.
for (int i = 0; i < num_tasks; i++) {
heap()->old_space()->MergeCompactionSpace(
compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
@@ -3190,15 +3171,24 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted();
compaction_duration += compaction_spaces_for_tasks[i]->duration();
+ heap()->MergeAllocationSitePretenuringFeedback(
+ *compaction_spaces_for_tasks[i]->local_pretenuring_feedback());
+ compaction_spaces_for_tasks[i]->local_store_buffer()->Process(
+ heap()->store_buffer());
delete compaction_spaces_for_tasks[i];
}
delete[] compaction_spaces_for_tasks;
heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory);
// Finalize sequentially.
+ for (NewSpacePage* p : newspace_evacuation_candidates_) {
+ DCHECK_EQ(p->parallel_compaction_state().Value(),
+ MemoryChunk::kCompactingFinalize);
+ p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
+ }
+
int abandoned_pages = 0;
- for (int i = 0; i < num_pages; i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
switch (p->parallel_compaction_state().Value()) {
case MemoryChunk::ParallelCompactingState::kCompactingAborted:
// We have partially compacted the page, i.e., some objects may have
@@ -3231,7 +3221,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
break;
default:
- // We should not observe kCompactingInProgress, or kCompactingDone.
+ // MemoryChunk::kCompactingInProgress.
UNREACHABLE();
}
p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
@@ -3280,42 +3270,72 @@ void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
}
-void MarkCompactCollector::EvacuatePages(
- CompactionSpaceCollection* compaction_spaces,
- SlotsBuffer** evacuation_slots_buffer) {
- EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
- evacuation_slots_buffer);
- for (int i = 0; i < evacuation_candidates_.length(); i++) {
- Page* p = evacuation_candidates_[i];
- DCHECK(p->IsEvacuationCandidate() ||
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
- DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
- MemoryChunk::kSweepingDone);
- if (p->parallel_compaction_state().TrySetValue(
- MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
- if (p->IsEvacuationCandidate()) {
- DCHECK_EQ(p->parallel_compaction_state().Value(),
- MemoryChunk::kCompactingInProgress);
- double start = heap()->MonotonicallyIncreasingTimeInMs();
- intptr_t live_bytes = p->LiveBytes();
+bool MarkCompactCollector::EvacuateSinglePage(
+ MemoryChunk* p, HeapObjectVisitor* visitor,
+ CompactionSpaceCollection* compaction_spaces) {
+ bool aborted = false;
+ if (p->parallel_compaction_state().TrySetValue(
+ MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
+ if (p->IsEvacuationCandidate() || p->InNewSpace()) {
+ DCHECK_EQ(p->parallel_compaction_state().Value(),
+ MemoryChunk::kCompactingInProgress);
+ int saved_live_bytes = p->LiveBytes();
+ double evacuation_time;
+ bool success;
+ {
AlwaysAllocateScope always_allocate(isolate());
- if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
- p->ResetLiveBytes();
- p->parallel_compaction_state().SetValue(
- MemoryChunk::kCompactingFinalize);
- compaction_spaces->ReportCompactionProgress(
- heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
- } else {
- p->parallel_compaction_state().SetValue(
- MemoryChunk::kCompactingAborted);
- }
+ TimedScope timed_scope(heap(), &evacuation_time);
+ success = VisitLiveObjects(p, visitor, kClearMarkbits);
+ }
+ if (success) {
+ compaction_spaces->ReportCompactionProgress(evacuation_time,
+ saved_live_bytes);
+ p->parallel_compaction_state().SetValue(
+ MemoryChunk::kCompactingFinalize);
} else {
- // There could be popular pages in the list of evacuation candidates
- // which we do compact.
- p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
+ p->parallel_compaction_state().SetValue(
+ MemoryChunk::kCompactingAborted);
+ aborted = true;
}
+ } else {
+ // There could be popular pages in the list of evacuation candidates
+ // which we do compact.
+ p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
}
}
+ return !aborted;
+}
+
+
+void MarkCompactCollector::EvacuatePages(
+ CompactionSpaceCollection* compaction_spaces,
+ SlotsBuffer** evacuation_slots_buffer) {
+ EvacuateOldSpaceVisitor old_space_visitor(heap(), compaction_spaces,
+ evacuation_slots_buffer);
+ EvacuateNewSpaceVisitor new_space_visitor(heap(), evacuation_slots_buffer,
+ compaction_spaces);
+ for (NewSpacePage* p : newspace_evacuation_candidates_) {
+ DCHECK(p->InNewSpace());
+ DCHECK_EQ(static_cast<int>(p->parallel_sweeping_state().Value()),
+ MemoryChunk::kSweepingDone);
+ bool success = EvacuateSinglePage(p, &new_space_visitor, compaction_spaces);
+ DCHECK(success);
+ USE(success);
+ }
+ for (Page* p : evacuation_candidates_) {
+ DCHECK(p->IsEvacuationCandidate() ||
+ p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION));
+ DCHECK_EQ(static_cast<int>(p->parallel_sweeping_state().Value()),
+ MemoryChunk::kSweepingDone);
+ EvacuateSinglePage(p, &old_space_visitor, compaction_spaces);
+ }
+
+ heap()->IncrementPromotedObjectsSize(new_space_visitor.promoted_size());
+ heap()->IncrementSemiSpaceCopiedObjectSize(
+ new_space_visitor.semispace_copied_size());
+ heap()->IncrementYoungSurvivorsCounter(
+ new_space_visitor.promoted_size() +
+ new_space_visitor.semispace_copied_size());
}
@@ -3468,9 +3488,7 @@ void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
Address end_slot) {
// Remove entries by replacing them with an old-space slot containing a smi
// that is located in an unmovable page.
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
if (p->IsEvacuationCandidate()) {
@@ -3550,8 +3568,7 @@ void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
void MarkCompactCollector::SweepAbortedPages() {
// Second pass on aborted pages.
- for (int i = 0; i < evacuation_candidates_.length(); i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
@@ -3582,26 +3599,15 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Heap::RelocationLock relocation_lock(heap());
- HashMap* local_pretenuring_feedback = nullptr;
{
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
EvacuationScope evacuation_scope(this);
- EvacuateNewSpacePrologue();
- local_pretenuring_feedback = EvacuateNewSpaceInParallel();
- heap_->new_space()->set_age_mark(heap_->new_space()->top());
- }
- {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_CANDIDATES);
- EvacuationScope evacuation_scope(this);
+ EvacuateNewSpacePrologue();
EvacuatePagesInParallel();
- }
-
- {
- heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback);
- delete local_pretenuring_feedback;
+ EvacuateNewSpaceEpilogue();
+ heap()->new_space()->set_age_mark(heap()->new_space()->top());
}
UpdatePointersAfterEvacuation();
@@ -3678,13 +3684,11 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
}
- int npages = evacuation_candidates_.length();
{
GCTracer::Scope gc_scope(
heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
@@ -3758,9 +3762,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
if (!p->IsEvacuationCandidate()) continue;
p->Unlink();
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
@@ -3770,9 +3772,7 @@ void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
void MarkCompactCollector::ReleaseEvacuationCandidates() {
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
- Page* p = evacuation_candidates_[i];
+ for (Page* p : evacuation_candidates_) {
if (!p->IsEvacuationCandidate()) continue;
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->area_start(), p->area_size());
@@ -3789,7 +3789,9 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
int MarkCompactCollector::SweepInParallel(PagedSpace* space,
- int required_freed_bytes) {
+ int required_freed_bytes,
+ int max_pages) {
Hannes Payer (out of office) 2016/01/18 11:46:34 We should optimize for sweeping as many pages as p
Michael Lippautz 2016/01/19 14:56:52 As discussed offline, this will go in separately:
+ int page_count = 0;
int max_freed = 0;
int max_freed_overall = 0;
PageIterator it(space);
@@ -3797,11 +3799,15 @@ int MarkCompactCollector::SweepInParallel(PagedSpace* space,
Page* p = it.next();
max_freed = SweepInParallel(p, space);
DCHECK(max_freed >= 0);
- if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
+ if ((required_freed_bytes > 0) && (max_freed >= required_freed_bytes)) {
return max_freed;
}
max_freed_overall = Max(max_freed, max_freed_overall);
if (p == space->end_of_unswept_pages()) break;
+ page_count++;
+ if ((max_pages > 0) && (page_count == max_pages)) {
+ return max_freed;
+ }
}
return max_freed_overall;
}
« src/heap/heap-inl.h ('K') | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698