Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1625)

Unified Diff: src/heap.cc

Issue 6309012: * Complete new store buffer on ia32. The store buffer now covers... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: '' Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/heap.cc
===================================================================
--- src/heap.cc (revision 6183)
+++ src/heap.cc (working copy)
@@ -971,6 +971,8 @@
CheckNewSpaceExpansionCriteria();
+ StoreBuffer::Verify();
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 we should move verification of storebuffer into He
Erik Corry 2011/01/24 13:56:00 Done.
+
// Flip the semispaces. After flipping, to space is empty, from space has
// live objects.
new_space_.Flip();
@@ -1000,6 +1002,10 @@
// Copy roots.
IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
+#ifdef DEBUG
+ StoreBuffer::Clean();
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 Why we do this only in debug? Comment?
Erik Corry 2011/01/24 13:56:00 When we actually use the store buffer info it will
+#endif
+
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateDirtyRegions(old_pointer_space_,
@@ -4058,20 +4064,21 @@
bool Heap::IteratePointersInDirtyRegion(Address start,
Address end,
ObjectSlotCallback copy_object_func) {
- Address slot_address = start;
bool pointers_to_new_space_found = false;
- while (slot_address < end) {
+ for (Address slot_address = start;
+ slot_address < end;
+ slot_address += kPointerSize) {
Object** slot = reinterpret_cast<Object**>(slot_address);
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
copy_object_func(reinterpret_cast<HeapObject**>(slot));
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
+ StoreBuffer::Mark(reinterpret_cast<Address>(slot));
pointers_to_new_space_found = true;
}
}
- slot_address += kPointerSize;
}
return pointers_to_new_space_found;
}
@@ -4176,10 +4183,6 @@
Address end,
ObjectSlotCallback callback) {
Address slot_address = start;
- Page* page = Page::FromAddress(start);
-
- uint32_t marks = page->GetRegionMarks();
-
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
if (Heap::InFromSpace(*slot)) {
@@ -4187,13 +4190,11 @@
callback(reinterpret_cast<HeapObject**>(slot));
if (Heap::InNewSpace(*slot)) {
ASSERT((*slot)->IsHeapObject());
- marks |= page->GetRegionMaskForAddress(slot_address);
+ StoreBuffer::Mark(reinterpret_cast<Address>(slot));
}
}
slot_address += kPointerSize;
}
-
- page->SetRegionMarks(marks);
}
@@ -4203,109 +4204,186 @@
Address area_end,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback copy_object_func) {
-#ifndef ENABLE_CARDMARKING_WRITE_BARRIER
ASSERT(marks == Page::kAllRegionsDirtyMarks);
visit_dirty_region(area_start, area_end, copy_object_func);
return Page::kAllRegionsDirtyMarks;
-#else
- uint32_t newmarks = 0;
- uint32_t mask = 1;
+}
- if (area_start >= area_end) {
- return newmarks;
- }
- Address region_start = area_start;
+#ifdef DEBUG
+// Check that the store buffer contains all intergenerational pointers by
+// scanning a page and ensuring that all pointers to young space are in the
+// store buffer.
+void Heap::OldPointerSpaceCheckStoreBuffer(
+ ExpectedPageWatermarkState watermark_state) {
+ OldSpace* space = old_pointer_space();
+ PageIterator pages(space, PageIterator::PAGES_IN_USE);
- // area_start does not necessarily coincide with start of the first region.
- // Thus to calculate the beginning of the next region we have to align
- // area_start by Page::kRegionSize.
- Address second_region =
- reinterpret_cast<Address>(
- reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
- ~Page::kRegionAlignmentMask);
+ space->free_list()->Zap();
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 Why do we need to Zap free_list? Can we zap it wi
Erik Corry 2011/01/24 13:56:00 We zap the free list so that we can walk the whole
- // Next region might be beyond area_end.
- Address region_end = Min(second_region, area_end);
+ StoreBuffer::SortUniq();
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 Verifying StoreBuffer has an unfortunate side-effe
Erik Corry 2011/01/24 13:56:00 I think we will be sorting and uniqifying on each
- if (marks & mask) {
- if (visit_dirty_region(region_start, region_end, copy_object_func)) {
- newmarks |= mask;
+ while (pages.has_next()) {
+ Page* page = pages.next();
+ Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
+
+ // Do not try to visit pointers beyond page allocation watermark.
+ // Page can contain garbage pointers there.
+ Address end;
+
+ if (watermark_state == WATERMARK_SHOULD_BE_VALID ||
+ page->IsWatermarkValid()) {
+ end = page->AllocationWatermark();
+ } else {
+ end = page->CachedAllocationWatermark();
}
- }
- mask <<= 1;
- // Iterate subsequent regions which fully lay inside [area_start, area_end[.
- region_start = region_end;
- region_end = region_start + Page::kRegionSize;
+ Object*** store_buffer_position = StoreBuffer::Start();
+ Object*** store_buffer_top = StoreBuffer::Top();
- while (region_end <= area_end) {
- if (marks & mask) {
- if (visit_dirty_region(region_start, region_end, copy_object_func)) {
- newmarks |= mask;
+ Object** limit = reinterpret_cast<Object**>(end);
+ for ( ; current < limit; current++) {
+ Object* o = *current;
+ if (o->IsSmi()) continue;
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 InNewSpace(Object* o) can't return true for a smi.
Erik Corry 2011/01/24 13:56:00 Done.
+ // We have to check that the pointer does not point into new space
+ // without trying to cast it to a heap object since the hash field of
+ // a string can contain values like 1 and 3 which are tagged null
+ // pointers.
+ if (!InNewSpace(o)) continue;
+ while (*store_buffer_position < current) {
+ store_buffer_position++;
+ ASSERT(store_buffer_position < store_buffer_top);
}
+ if (*store_buffer_position != current) {
+ Object** obj_start = current;
+ while (!(*obj_start)->IsMap()) obj_start--;
+ UNREACHABLE();
+ }
}
+ }
+}
- region_start = region_end;
- region_end = region_start + Page::kRegionSize;
- mask <<= 1;
+void Heap::MapSpaceCheckStoreBuffer(
+ ExpectedPageWatermarkState watermark_state) {
+ MapSpace* space = map_space();
+ PageIterator pages(space, PageIterator::PAGES_IN_USE);
+
+ space->free_list()->Zap();
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 Can we zap it with a special value to detect cases
Erik Corry 2011/01/24 13:56:00 Done.
+
+ StoreBuffer::SortUniq();
+
+ while (pages.has_next()) {
+ Page* page = pages.next();
+
+ // Do not try to visit pointers beyond page allocation watermark.
+ // Page can contain garbage pointers there.
+ Address end;
+
+ if (watermark_state == WATERMARK_SHOULD_BE_VALID ||
+ page->IsWatermarkValid()) {
+ end = page->AllocationWatermark();
+ } else {
+ end = page->CachedAllocationWatermark();
+ }
+
+ Address map_aligned_current = MapStartAlign(page->ObjectAreaStart());
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 both ObjectAreaStart and watermark should be align
Erik Corry 2011/01/24 13:56:00 Done.
+ Address map_aligned_end = MapEndAlign(end);
+
+ Object*** store_buffer_position = StoreBuffer::Start();
+ Object*** store_buffer_top = StoreBuffer::Top();
+
+ for ( ;
+ map_aligned_current < map_aligned_end;
+ map_aligned_current += Map::kSize) {
+ ASSERT(!Heap::InNewSpace(Memory::Object_at(map_aligned_current)));
+ ASSERT(Memory::Object_at(map_aligned_current)->IsMap());
+
+ Object** current = reinterpret_cast<Object**>(
+ map_aligned_current + Map::kPointerFieldsBeginOffset);
+ Object** limit = reinterpret_cast<Object**>(
+ map_aligned_current + Map::kPointerFieldsEndOffset);
+
+ for ( ; current < limit; current++) {
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 This loop looks familiar. I saw similar one above
Erik Corry 2011/01/24 13:56:00 Done.
+ Object* o = *current;
+ if (o->IsSmi()) continue;
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 InNewSpace(Object* o) can't return true for a smi.
Erik Corry 2011/01/24 13:56:00 Done.
+ HeapObject* heap_object = HeapObject::cast(o);
+ if (!InNewSpace(heap_object)) continue;
+ while (*store_buffer_position < current) {
+ store_buffer_position++;
+ ASSERT(store_buffer_position < store_buffer_top);
+ }
+ ASSERT(*store_buffer_position == current);
+ }
+ }
}
+}
- if (region_start != area_end) {
- // A small piece of area left uniterated because area_end does not coincide
- // with region end. Check whether region covering last part of area is
- // dirty.
- if (marks & mask) {
- if (visit_dirty_region(region_start, area_end, copy_object_func)) {
- newmarks |= mask;
+
+void Heap::LargeObjectSpaceCheckStoreBuffer() {
+ LargeObjectIterator it(lo_space());
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
+ // We only have code, sequential strings, or fixed arrays in large
+ // object space, and only fixed arrays can possibly contain pointers to
+ // the young generation.
+ if (object->IsFixedArray()) {
+ Object*** store_buffer_position = StoreBuffer::Start();
+ Object*** store_buffer_top = StoreBuffer::Top();
+ Object** current = reinterpret_cast<Object**>(object->address());
+ Object** limit =
+ reinterpret_cast<Object**>(object->address() + object->Size());
+ for ( ; current < limit; current++) {
Vyacheslav Egorov (Chromium) 2011/01/21 18:18:18 This loop looks familiar. Consider moving it to s
Erik Corry 2011/01/24 13:56:00 Done.
+ Object* o = *current;
+ if (o->IsSmi()) continue;
+ HeapObject* heap_object = HeapObject::cast(o);
+ if (!InNewSpace(heap_object)) continue;
+ while (*store_buffer_position < current) {
+ store_buffer_position++;
+ ASSERT(store_buffer_position < store_buffer_top);
+ }
+ ASSERT(*store_buffer_position == current);
}
}
}
+}
- return newmarks;
+
#endif
-}
-
void Heap::IterateDirtyRegions(
PagedSpace* space,
DirtyRegionCallback visit_dirty_region,
ObjectSlotCallback copy_object_func,
ExpectedPageWatermarkState expected_page_watermark_state) {
- PageIterator it(space, PageIterator::PAGES_IN_USE);
+ PageIterator pages(space, PageIterator::PAGES_IN_USE);
- while (it.has_next()) {
- Page* page = it.next();
- uint32_t marks = page->GetRegionMarks();
+ while (pages.has_next()) {
+ Page* page = pages.next();
+ Address start = page->ObjectAreaStart();
- if (marks != Page::kAllRegionsCleanMarks) {
- Address start = page->ObjectAreaStart();
+ // Do not try to visit pointers beyond page allocation watermark.
+ // Page can contain garbage pointers there.
+ Address end;
- // Do not try to visit pointers beyond page allocation watermark.
- // Page can contain garbage pointers there.
- Address end;
+ if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
+ page->IsWatermarkValid()) {
+ end = page->AllocationWatermark();
+ } else {
+ end = page->CachedAllocationWatermark();
+ }
- if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
- page->IsWatermarkValid()) {
- end = page->AllocationWatermark();
- } else {
- end = page->CachedAllocationWatermark();
- }
+ ASSERT(space == old_pointer_space_ ||
+ (space == map_space_ &&
+ ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
- ASSERT(space == old_pointer_space_ ||
- (space == map_space_ &&
- ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
+ IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
+ start,
+ end,
+ visit_dirty_region,
+ copy_object_func);
- page->SetRegionMarks(IterateDirtyRegions(marks,
- start,
- end,
- visit_dirty_region,
- copy_object_func));
- }
-
// Mark page watermark as invalid to maintain watermark validity invariant.
// See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
page->InvalidateWatermark(true);

Powered by Google App Engine
This is Rietveld 408576698