Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(491)

Unified Diff: src/heap/mark-compact.cc

Issue 1896883003: Revert of 🏄 [heap] Add page evacuation mode for new->old (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/heap/mark-compact.cc
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index 5c11e513fec63760f1dcf7f2382de257a1737720..e1eaba0b92cf0a92bb6ceff2bd10a5dd6decdda8 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -1666,7 +1666,7 @@
semispace_copied_size_(0),
local_pretenuring_feedback_(local_pretenuring_feedback) {}
- inline bool Visit(HeapObject* object) override {
+ bool Visit(HeapObject* object) override {
heap_->UpdateAllocationSite<Heap::kCached>(object,
local_pretenuring_feedback_);
int size = object->Size();
@@ -1798,33 +1798,6 @@
HashMap* local_pretenuring_feedback_;
};
-class MarkCompactCollector::EvacuateNewSpacePageVisitor final
- : public MarkCompactCollector::HeapObjectVisitor {
- public:
- EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
-
- static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) {
- page->heap()->new_space()->ReplaceWithEmptyPage(page);
- Page* new_page = Page::Convert(page, owner);
- new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
- }
-
- inline bool Visit(HeapObject* object) {
- if (V8_UNLIKELY(object->IsJSArrayBuffer())) {
- object->GetHeap()->array_buffer_tracker()->Promote(
- JSArrayBuffer::cast(object));
- }
- RecordMigratedSlotVisitor visitor;
- object->IterateBodyFast(&visitor);
- promoted_size_ += object->Size();
- return true;
- }
-
- intptr_t promoted_size() { return promoted_size_; }
-
- private:
- intptr_t promoted_size_;
-};
class MarkCompactCollector::EvacuateOldSpaceVisitor final
: public MarkCompactCollector::EvacuateVisitorBase {
@@ -1833,7 +1806,7 @@
CompactionSpaceCollection* compaction_spaces)
: EvacuateVisitorBase(heap, compaction_spaces) {}
- inline bool Visit(HeapObject* object) override {
+ bool Visit(HeapObject* object) override {
CompactionSpace* target_space = compaction_spaces_->Get(
Page::FromAddress(object->address())->owner()->identity());
HeapObject* target_object = nullptr;
@@ -3048,17 +3021,9 @@
newspace_evacuation_candidates_.Rewind(0);
}
+
class MarkCompactCollector::Evacuator : public Malloced {
public:
- // NewSpacePages with more live bytes than this threshold qualify for fast
- // evacuation.
- static int PageEvacuationThreshold() {
- if (FLAG_page_promotion)
- return FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory /
- 100;
- return NewSpacePage::kAllocatableMemory + kPointerSize;
- }
-
explicit Evacuator(MarkCompactCollector* collector)
: collector_(collector),
compaction_spaces_(collector->heap()),
@@ -3066,7 +3031,6 @@
kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(collector->heap(), &compaction_spaces_,
&local_pretenuring_feedback_),
- new_space_page_visitor(),
old_space_visitor_(collector->heap(), &compaction_spaces_),
duration_(0.0),
bytes_compacted_(0) {}
@@ -3080,32 +3044,17 @@
CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
private:
- enum EvacuationMode {
- kObjectsNewToOld,
- kPageNewToOld,
- kObjectsOldToOld,
- };
-
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
- inline Heap* heap() { return collector_->heap(); }
-
- inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
- // Note: The order of checks is important in this function.
- if (chunk->InNewSpace()) return kObjectsNewToOld;
- if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
- return kPageNewToOld;
- DCHECK(chunk->IsEvacuationCandidate());
- return kObjectsOldToOld;
- }
+ Heap* heap() { return collector_->heap(); }
void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
duration_ += duration;
bytes_compacted_ += bytes_compacted;
}
- template <IterationMode mode, class Visitor>
- inline bool EvacuateSinglePage(MemoryChunk* p, Visitor* visitor);
+ template <IterationMode mode>
+ inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor);
MarkCompactCollector* collector_;
@@ -3115,7 +3064,6 @@
// Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_;
- EvacuateNewSpacePageVisitor new_space_page_visitor;
EvacuateOldSpaceVisitor old_space_visitor_;
// Book keeping info.
@@ -3123,18 +3071,17 @@
intptr_t bytes_compacted_;
};
-template <MarkCompactCollector::IterationMode mode, class Visitor>
-bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
- Visitor* visitor) {
+template <MarkCompactCollector::IterationMode mode>
+bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
+ MemoryChunk* p, HeapObjectVisitor* visitor) {
bool success = false;
- DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
- p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
+ DCHECK(p->IsEvacuationCandidate() || p->InNewSpace());
int saved_live_bytes = p->LiveBytes();
double evacuation_time;
{
AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time);
- success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode);
+ success = collector_->VisitLiveObjects(p, visitor, mode);
}
if (FLAG_trace_evacuation) {
const char age_mark_tag =
@@ -3146,9 +3093,8 @@
: '#';
PrintIsolate(heap()->isolate(),
"evacuation[%p]: page=%p new_space=%d age_mark_tag=%c "
- "page_evacuation=%d executable=%d live_bytes=%d time=%f\n",
+ "executable=%d live_bytes=%d time=%f\n",
this, p, p->InNewSpace(), age_mark_tag,
- p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
evacuation_time);
}
@@ -3159,38 +3105,30 @@
}
bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
- bool result = false;
- DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
- NewSpacePage::kSweepingDone);
- switch (ComputeEvacuationMode(chunk)) {
- case kObjectsNewToOld:
- result = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
- DCHECK(result);
- USE(result);
- break;
- case kPageNewToOld:
- result = EvacuateSinglePage<kKeepMarking>(chunk, &new_space_page_visitor);
- DCHECK(result);
- USE(result);
- break;
- case kObjectsOldToOld:
- result = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
- if (!result) {
- // Aborted compaction page. We can record slots here to have them
- // processed in parallel later on.
- EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity());
- result = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
- DCHECK(result);
- USE(result);
- // We need to return failure here to indicate that we want this page
- // added to the sweeper.
- return false;
- }
- break;
- default:
- UNREACHABLE();
- }
- return result;
+ bool success = false;
+ if (chunk->InNewSpace()) {
+ DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
+ NewSpacePage::kSweepingDone);
+ success = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
+ DCHECK(success);
+ USE(success);
+ } else {
+ DCHECK(chunk->IsEvacuationCandidate());
+ DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone);
+ success = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
+ if (!success) {
+ // Aborted compaction page. We can record slots here to have them
+ // processed in parallel later on.
+ EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity());
+ success = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
+ DCHECK(success);
+ USE(success);
+ // We need to return failure here to indicate that we want this page added
+ // to the sweeper.
+ return false;
+ }
+ }
+ return success;
}
void MarkCompactCollector::Evacuator::Finalize() {
@@ -3198,14 +3136,12 @@
heap()->code_space()->MergeCompactionSpace(
compaction_spaces_.Get(CODE_SPACE));
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
- heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
- new_space_page_visitor.promoted_size());
+ heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
heap()->IncrementSemiSpaceCopiedObjectSize(
new_space_visitor_.semispace_copied_size());
heap()->IncrementYoungSurvivorsCounter(
new_space_visitor_.promoted_size() +
- new_space_visitor_.semispace_copied_size() +
- new_space_page_visitor.promoted_size());
+ new_space_visitor_.semispace_copied_size());
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
}
@@ -3255,14 +3191,6 @@
bool success, PerPageData data) {
if (chunk->InNewSpace()) {
DCHECK(success);
- } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
- DCHECK(success);
- Page* p = static_cast<Page*>(chunk);
- p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
- p->ForAllFreeListCategories(
- [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
- heap->mark_compact_collector()->sweeper().AddLatePage(
- p->owner()->identity(), p);
} else {
Page* p = static_cast<Page*>(chunk);
if (success) {
@@ -3292,15 +3220,8 @@
live_bytes += page->LiveBytes();
job.AddPage(page, &abandoned_pages);
}
- const Address age_mark = heap()->new_space()->age_mark();
for (NewSpacePage* page : newspace_evacuation_candidates_) {
live_bytes += page->LiveBytes();
- if (!page->NeverEvacuate() &&
- (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
- page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
- !page->Contains(age_mark)) {
- EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
- }
job.AddPage(page, &abandoned_pages);
}
DCHECK_GE(job.NumberOfPages(), 1);
@@ -3460,8 +3381,9 @@
}
#endif // VERIFY_HEAP
-template <class Visitor>
-bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
+
+bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
+ HeapObjectVisitor* visitor,
IterationMode mode) {
#ifdef VERIFY_HEAP
VerifyAllBlackObjects(page);
@@ -3622,8 +3544,12 @@
static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
MapWord map_word = object->map_word();
- // There could still be stale pointers in large object space, map space,
- // and old space for pages that have been promoted.
+ // Since we only filter invalid slots in old space, the store buffer can
+ // still contain stale pointers in large object and in map spaces. Ignore
+ // these pointers here.
+ DCHECK(map_word.IsForwardingAddress() ||
+ !object->GetHeap()->old_space()->Contains(
+ reinterpret_cast<Address>(address)));
if (map_word.IsForwardingAddress()) {
// Update the corresponding slot.
*address = map_word.ToForwardingAddress();
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698