Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(695)

Unified Diff: src/mark-compact.cc

Issue 2101002: Cardmarking writebarrier. (Closed)
Patch Set: fixed review comments Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/mark-compact.h ('k') | src/objects.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/mark-compact.cc
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index e3cc6ab6cbcefb6e99fa8374a71eb0755a90e177..ce80287ab1112f2b45f5135aafcafc96825ce199 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -83,9 +83,6 @@ void MarkCompactCollector::CollectGarbage() {
UpdatePointers();
RelocateObjects();
-
- RebuildRSets();
-
} else {
SweepSpaces();
}
@@ -120,14 +117,6 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
-#ifdef DEBUG
- if (compacting_collection_) {
- // We will write bookkeeping information to the remembered set area
- // starting now.
- Page::set_rset_state(Page::NOT_IN_USE);
- }
-#endif
-
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL; space = spaces.next()) {
@@ -149,7 +138,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
void MarkCompactCollector::Finish() {
#ifdef DEBUG
- ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS);
+ ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
state_ = IDLE;
#endif
// The stub cache is not traversed during GC; clear the cache to
@@ -243,8 +232,8 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
}
// Since we don't have the object's start, it is impossible to update the
- // remembered set. Therefore, we only replace the string with its left
- // substring when the remembered set does not change.
+ // page dirty marks. Therefore, we only replace the string with its left
+ // substring when page dirty marks do not change.
Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
@@ -774,6 +763,7 @@ void MarkCompactCollector::SweepLargeObjectSpace() {
Heap::lo_space()->FreeUnmarkedObjects();
}
+
// Safe to use during marking phase only.
bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
MapWord metamap = object->map_word();
@@ -781,6 +771,7 @@ bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
return metamap.ToMap()->instance_type() == MAP_TYPE;
}
+
void MarkCompactCollector::ClearNonLiveTransitions() {
HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
// Iterate over the map space, setting map transitions that go from
@@ -1076,13 +1067,18 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
// first word of object without any encoding. If object is dead we are writing
// NULL as a forwarding address.
// The second pass updates pointers to new space in all spaces. It is possible
-// to encounter pointers to dead objects during traversal of remembered set for
-// map space because remembered set bits corresponding to dead maps are cleared
-// later during map space sweeping.
-static void MigrateObject(Address dst, Address src, int size) {
- Heap::CopyBlock(reinterpret_cast<Object**>(dst),
- reinterpret_cast<Object**>(src),
- size);
+// to encounter pointers to dead objects during traversal of dirty regions we
+// should clear them to avoid encountering them during next dirty regions
+// iteration.
+static void MigrateObject(Address dst,
+ Address src,
+ int size,
+ bool to_old_space) {
+ if (to_old_space) {
+ Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
+ } else {
+ Heap::CopyBlock(dst, src, size);
+ }
Memory::Address_at(src) = dst;
}
@@ -1129,6 +1125,7 @@ class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
}
};
+
// Visitor for updating pointers from live objects in old spaces to new space.
// It can encounter pointers to dead objects in new space when traversing map
// space (see comment for MigrateObject).
@@ -1140,10 +1137,13 @@ static void UpdatePointerToNewGen(HeapObject** p) {
Address new_addr = Memory::Address_at(old_addr);
- // Object pointed by *p is dead. Update is not required.
- if (new_addr == NULL) return;
-
- *p = HeapObject::FromAddress(new_addr);
+ if (new_addr == NULL) {
+ // We encountered pointer to a dead object. Clear it so we will
+ // not visit it again during next iteration of dirty regions.
+ *p = NULL;
+ } else {
+ *p = HeapObject::FromAddress(new_addr);
+ }
}
@@ -1161,8 +1161,7 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
result = Heap::lo_space()->AllocateRawFixedArray(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(), object->address(), object_size);
- Heap::UpdateRSet(target);
+ MigrateObject(target->address(), object->address(), object_size, true);
return true;
}
} else {
@@ -1173,10 +1172,10 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
result = target_space->AllocateRaw(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(), object->address(), object_size);
- if (target_space == Heap::old_pointer_space()) {
- Heap::UpdateRSet(target);
- }
+ MigrateObject(target->address(),
+ object->address(),
+ object_size,
+ target_space == Heap::old_pointer_space());
return true;
}
}
@@ -1216,14 +1215,16 @@ static void SweepNewSpace(NewSpace* space) {
continue;
}
- // Promotion either failed or not required.
- // Copy the content of the object.
+ // Promotion failed. Just migrate object to another semispace.
Object* target = space->AllocateRaw(size);
// Allocation cannot fail at this point: semispaces are of equal size.
ASSERT(!target->IsFailure());
- MigrateObject(HeapObject::cast(target)->address(), current, size);
+ MigrateObject(HeapObject::cast(target)->address(),
+ current,
+ size,
+ false);
} else {
size = object->Size();
Memory::Address_at(current) = NULL;
@@ -1249,9 +1250,12 @@ static void SweepNewSpace(NewSpace* space) {
Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
// Update pointers in old spaces.
- Heap::IterateRSet(Heap::old_pointer_space(), &UpdatePointerToNewGen);
- Heap::IterateRSet(Heap::map_space(), &UpdatePointerToNewGen);
- Heap::lo_space()->IterateRSet(&UpdatePointerToNewGen);
+ Heap::IterateDirtyRegions(Heap::old_pointer_space(),
+ &Heap::IteratePointersInDirtyRegion,
+ &UpdatePointerToNewGen,
+ Heap::WATERMARK_SHOULD_BE_VALID);
+
+ Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
// Update pointers from cells.
HeapObjectIterator cell_iterator(Heap::cell_space());
@@ -1317,7 +1321,10 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
- dealloc(free_start, static_cast<int>(current - free_start), true);
+ dealloc(free_start,
+ static_cast<int>(current - free_start),
+ true,
+ false);
is_previous_alive = true;
}
} else {
@@ -1347,8 +1354,18 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) {
- dealloc(free_start, size_in_bytes, false);
+ dealloc(free_start, size_in_bytes, false, true);
+ } else {
+#ifdef DEBUG
+ MemoryAllocator::ZapBlock(p->ObjectAreaStart(),
+ Page::kObjectAreaSize);
+#endif
}
+ } else {
+#ifdef DEBUG
+ MemoryAllocator::ZapBlock(p->ObjectAreaStart(),
+ Page::kObjectAreaSize);
+#endif
}
} else {
// This page is not empty. Sequence of empty pages ended on the previous
@@ -1361,7 +1378,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// If there is a free ending area on one of the previous pages we have
// deallocate that area and put it on the free list.
if (last_free_size > 0) {
- dealloc(last_free_start, last_free_size, true);
+ Page::FromAddress(last_free_start)->
+ SetAllocationWatermark(last_free_start);
+ dealloc(last_free_start, last_free_size, true, true);
last_free_start = NULL;
last_free_size = 0;
}
@@ -1392,7 +1411,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area.
- dealloc(last_free_start, last_free_size, false);
+ dealloc(last_free_start, last_free_size, false, true);
new_allocation_top = last_free_start;
}
@@ -1415,34 +1434,36 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
- Heap::ClearRSetRange(start, size_in_bytes);
+ bool add_to_freelist,
+ bool last_on_page) {
Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateOldDataBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
+ bool add_to_freelist,
+ bool last_on_page) {
Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateCodeBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
+ bool add_to_freelist,
+ bool last_on_page) {
Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateMapBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
+ bool add_to_freelist,
+ bool last_on_page) {
// Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into
// chunks and free them separately.
ASSERT(size_in_bytes % Map::kSize == 0);
- Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += Map::kSize) {
Heap::map_space()->Free(a, add_to_freelist);
@@ -1452,13 +1473,13 @@ void MarkCompactCollector::DeallocateMapBlock(Address start,
void MarkCompactCollector::DeallocateCellBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
+ bool add_to_freelist,
+ bool last_on_page) {
// Free-list elements in cell space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
int size = Heap::cell_space()->object_size_in_bytes();
ASSERT(size_in_bytes % size == 0);
- Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
Heap::cell_space()->Free(a, add_to_freelist);
@@ -1557,20 +1578,6 @@ class MapCompact {
GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
}
- void FinishMapSpace() {
- // Iterate through to space and finish move.
- MapIterator it;
- HeapObject* o = it.next();
- for (; o != first_map_to_evacuate_; o = it.next()) {
- ASSERT(o != NULL);
- Map* map = reinterpret_cast<Map*>(o);
- ASSERT(!map->IsMarked());
- ASSERT(!map->IsOverflowed());
- ASSERT(map->IsMap());
- Heap::UpdateRSet(map);
- }
- }
-
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
ASSERT(space != Heap::map_space());
@@ -1663,9 +1670,9 @@ class MapCompact {
ASSERT(Map::kSize % 4 == 0);
- Heap::CopyBlock(reinterpret_cast<Object**>(vacant_map->address()),
- reinterpret_cast<Object**>(map_to_evacuate->address()),
- Map::kSize);
+ Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
+ map_to_evacuate->address(),
+ Map::kSize);
ASSERT(vacant_map->IsMap()); // Due to memcpy above.
@@ -1748,6 +1755,12 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
SweepNewSpace(Heap::new_space());
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
+
+ Heap::IterateDirtyRegions(Heap::map_space(),
+ &Heap::IteratePointersInDirtyMapsRegion,
+ &UpdatePointerToNewGen,
+ Heap::WATERMARK_SHOULD_BE_VALID);
+
int live_maps_size = Heap::map_space()->Size();
int live_maps = live_maps_size / Map::kSize;
ASSERT(live_map_objects_size_ == live_maps_size);
@@ -1758,7 +1771,6 @@ void MarkCompactCollector::SweepSpaces() {
map_compact.CompactMaps();
map_compact.UpdateMapPointersInRoots();
- map_compact.FinishMapSpace();
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL; space = spaces.next()) {
@@ -2031,9 +2043,8 @@ Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
Page* forwarded_page = Page::FromAddress(first_forwarded);
int forwarded_offset = forwarded_page->Offset(first_forwarded);
- // Find end of allocation of in the page of first_forwarded.
- Address mc_top = forwarded_page->mc_relocation_top;
- int mc_top_offset = forwarded_page->Offset(mc_top);
+ // Find end of allocation in the page of first_forwarded.
+ int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
// Check if current object's forward pointer is in the same page
// as the first live object's forwarding pointer
@@ -2050,7 +2061,7 @@ Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
offset += Page::kObjectStartOffset;
ASSERT_PAGE_OFFSET(offset);
- ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top);
+ ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
return next_page->OffsetToAddress(offset);
}
@@ -2095,16 +2106,12 @@ void MarkCompactCollector::RelocateObjects() {
// Flip from and to spaces
Heap::new_space()->Flip();
+ Heap::new_space()->MCCommitRelocationInfo();
+
// Set age_mark to bottom in to space
Address mark = Heap::new_space()->bottom();
Heap::new_space()->set_age_mark(mark);
- Heap::new_space()->MCCommitRelocationInfo();
-#ifdef DEBUG
- // It is safe to write to the remembered sets as remembered sets on a
- // page-by-page basis after committing the m-c forwarding pointer.
- Page::set_rset_state(Page::IN_USE);
-#endif
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
space->MCCommitRelocationInfo();
@@ -2131,9 +2138,9 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- Map::kSize);
+ Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ Map::kSize);
}
#ifdef DEBUG
@@ -2190,9 +2197,13 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- obj_size);
+ if (space == Heap::old_data_space()) {
+ Heap::MoveBlock(new_addr, old_addr, obj_size);
+ } else {
+ Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
+ }
}
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
@@ -2237,9 +2248,7 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- obj_size);
+ Heap::MoveBlock(new_addr, old_addr, obj_size);
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
@@ -2275,9 +2284,13 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
#endif
// New and old addresses cannot overlap.
- Heap::CopyBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- obj_size);
+ if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
+ Heap::CopyBlock(new_addr, old_addr, obj_size);
+ } else {
+ Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
+ }
#ifdef DEBUG
if (FLAG_gc_verbose) {
@@ -2294,18 +2307,6 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
}
-// -------------------------------------------------------------------------
-// Phase 5: rebuild remembered sets
-
-void MarkCompactCollector::RebuildRSets() {
-#ifdef DEBUG
- ASSERT(state_ == RELOCATE_OBJECTS);
- state_ = REBUILD_RSETS;
-#endif
- Heap::RebuildRSets();
-}
-
-
void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
« no previous file with comments | « src/mark-compact.h ('k') | src/objects.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698