Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(502)

Unified Diff: src/mark-compact.cc

Issue 7302003: Support slots recording for compaction during incremental marking. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Created 9 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/mark-compact.cc
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 25d1faa48ceb5ef61d68b9f484abbd5a4f6326c0..5445aea1653a310ce6f7c39a3dca545fc77607d8 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -57,6 +57,8 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
#ifdef DEBUG
state_(IDLE),
#endif
+ sweep_precisely_(false),
+ compacting_(false),
tracer_(NULL),
#ifdef DEBUG
live_young_objects_size_(0),
@@ -154,7 +156,6 @@ class VerifyEvacuationVisitor: public ObjectVisitor {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
if (MarkCompactCollector::IsOnEvacuationCandidate(object)) {
- HEAP->TracePathToObject(source_);
CHECK(false);
}
}
@@ -225,6 +226,26 @@ void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
}
+bool MarkCompactCollector::StartCompaction() {
+ // Don't start compaction if we are in the middle of incremental
+ // marking cycle. We did not collect any slots.
+ if (!compacting_ && !heap_->incremental_marking()->IsMarking()) {
+ slots_buffer_.Clear();
+ evacuation_candidates_.Rewind(0);
+
+ CollectEvacuationCandidates(heap()->old_pointer_space());
+ CollectEvacuationCandidates(heap()->old_data_space());
+
+ heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
+ heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
+
+ compacting_ = evacuation_candidates_.length() > 0;
+ }
+
+ return compacting_;
+}
+
+
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
@@ -370,6 +391,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
}
+#if 0
static void ClearEvacuationCandidates(PagedSpace* space) {
Erik Corry 2011/07/04 11:04:11 Commented code
Vyacheslav Egorov (Chromium) 2011/08/05 12:50:28 Done.
ASSERT(space->identity() == OLD_POINTER_SPACE ||
space->identity() == OLD_DATA_SPACE);
@@ -380,6 +402,7 @@ static void ClearEvacuationCandidates(PagedSpace* space) {
p->ClearEvacuationCandidate();
}
}
+#endif
void MarkCompactCollector::Prepare(GCTracer* tracer) {
@@ -410,18 +433,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
}
#endif
- if (!FLAG_never_compact) {
- slots_buffer_.Clear();
- evacuation_candidates_.Rewind(0);
-
- if (!heap()->incremental_marking()->IsMarking()) {
- CollectEvacuationCandidates(heap()->old_pointer_space());
- CollectEvacuationCandidates(heap()->old_data_space());
- } else {
- ClearEvacuationCandidates(heap()->old_pointer_space());
- ClearEvacuationCandidates(heap()->old_data_space());
- }
- }
+ if (!FLAG_never_compact) StartCompaction();
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
@@ -2314,6 +2326,21 @@ void MarkCompactCollector::EvacuatePages() {
}
+class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+ virtual Object* RetainAs(Object* object) {
+ if (object->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(object);
+ MapWord map_word = heap_object->map_word();
+ if (map_word.IsForwardingAddress()) {
+ return map_word.ToForwardingAddress();
+ }
+ }
+ return object;
+ }
+};
+
+
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuateNewSpace();
EvacuatePages();
@@ -2343,7 +2370,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
&Heap::ScavengeStoreBufferCallback);
heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
}
- slots_buffer_.Iterate(&updating_visitor);
+ slots_buffer_.Update();
// Update pointers from cells.
HeapObjectIterator cell_iterator(heap_->cell_space());
@@ -2370,6 +2397,9 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
// Update JSFunction pointers from the runtime profiler.
heap_->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
+ EvacuationWeakObjectRetainer evacuation_object_retainer;
+ heap()->ProcessWeakReferences(&evacuation_object_retainer);
+
#ifdef DEBUG
if (FLAG_verify_heap) {
VerifyEvacuation(heap_);
@@ -2377,16 +2407,17 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
#endif
int npages = evacuation_candidates_.length();
+ ASSERT(compacting_ == (npages > 0));
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->ObjectAreaStart(), Page::kObjectAreaSize);
p->set_scan_on_scavenge(false);
-
- // We are not clearing evacuation candidate flag here
- // because it is required to notify lazy sweeper to skip
- // these pages.
+ p->ClearEvacuationCandidate();
+ p->SetFlag(MemoryChunk::EVACUATED);
+ p->ClearFlag(MemoryChunk::WAS_SWEPT_CONSERVATIVELY);
}
+ compacting_ = false;
}
@@ -3058,19 +3089,34 @@ void SlotsBuffer::Add(Object** slot) {
}
-void SlotsBuffer::Iterate(ObjectVisitor* visitor) {
+static inline void UpdateSlot(Object** slot) {
+ Object* obj = *slot;
+ if (!obj->IsHeapObject()) return;
+
+ HeapObject* heap_obj = HeapObject::cast(obj);
+
+ MapWord map_word = heap_obj->map_word();
+ if (map_word.IsForwardingAddress()) {
+ ASSERT(MarkCompactCollector::IsOnEvacuationCandidate(*slot));
+ *slot = map_word.ToForwardingAddress();
+ ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(*slot));
+ }
+}
+
+
+void SlotsBuffer::Update() {
if (buffer_idx_ < 0) return;
for (int buffer_index = 0; buffer_index < buffer_idx_; ++buffer_index) {
ObjectSlot* buffer = buffers_[buffer_index];
for (int slot_idx = 0; slot_idx < kBufferSize; ++slot_idx) {
- visitor->VisitPointer(buffer[slot_idx]);
+ UpdateSlot(buffer[slot_idx]);
}
}
ObjectSlot* last_buffer = buffers_[buffer_idx_];
for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
- visitor->VisitPointer(last_buffer[slot_idx]);
+ UpdateSlot(last_buffer[slot_idx]);
}
}

Powered by Google App Engine
This is Rietveld 408576698