| Index: src/spaces.cc
 | 
| diff --git a/src/spaces.cc b/src/spaces.cc
 | 
| index 6b6d926e257153f35de2453e9359cb9faf772f0a..50f3572ba58237bfc2b436c45887c1d845924721 100644
 | 
| --- a/src/spaces.cc
 | 
| +++ b/src/spaces.cc
 | 
| @@ -41,6 +41,7 @@ namespace internal {
 | 
|           && (info).top <= (space).high()              \
 | 
|           && (info).limit == (space).high())
 | 
|  
 | 
| +intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED;
 | 
|  
 | 
|  // ----------------------------------------------------------------------------
 | 
|  // HeapObjectIterator
 | 
| @@ -139,13 +140,6 @@ PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
 | 
|  
 | 
|  
 | 
|  // -----------------------------------------------------------------------------
 | 
| -// Page
 | 
| -
 | 
| -#ifdef DEBUG
 | 
| -Page::RSetState Page::rset_state_ = Page::IN_USE;
 | 
| -#endif
 | 
| -
 | 
| -// -----------------------------------------------------------------------------
 | 
|  // CodeRange
 | 
|  
 | 
|  List<CodeRange::FreeBlock> CodeRange::free_list_(0);
 | 
| @@ -524,7 +518,10 @@ Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
 | 
|    for (int i = 0; i < pages_in_chunk; i++) {
 | 
|      Page* p = Page::FromAddress(page_addr);
 | 
|      p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
 | 
| +    p->InvalidateWatermark(true);
 | 
|      p->SetIsLargeObjectPage(false);
 | 
| +    p->SetAllocationWatermark(p->ObjectAreaStart());
 | 
| +    p->SetCachedAllocationWatermark(p->ObjectAreaStart());
 | 
|      page_addr += Page::kPageSize;
 | 
|    }
 | 
|  
 | 
| @@ -681,6 +678,7 @@ Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
 | 
|      p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
 | 
|      page_addr += Page::kPageSize;
 | 
|  
 | 
| +    p->InvalidateWatermark(true);
 | 
|      if (p->WasInUseBeforeMC()) {
 | 
|        *last_page_in_use = p;
 | 
|      }
 | 
| @@ -744,10 +742,10 @@ bool PagedSpace::Setup(Address start, size_t size) {
 | 
|    accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
 | 
|    ASSERT(Capacity() <= max_capacity_);
 | 
|  
 | 
| -  // Sequentially initialize remembered sets in the newly allocated
 | 
| +  // Sequentially clear region marks in the newly allocated
 | 
|    // pages and cache the current last page in the space.
 | 
|    for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
 | 
| -    p->ClearRSet();
 | 
| +    p->SetRegionMarks(Page::kAllRegionsCleanMarks);
 | 
|      last_page_ = p;
 | 
|    }
 | 
|  
 | 
| @@ -794,10 +792,10 @@ void PagedSpace::Unprotect() {
 | 
|  #endif
 | 
|  
 | 
|  
 | 
| -void PagedSpace::ClearRSet() {
 | 
| +void PagedSpace::MarkAllPagesClean() {
 | 
|    PageIterator it(this, PageIterator::ALL_PAGES);
 | 
|    while (it.has_next()) {
 | 
| -    it.next()->ClearRSet();
 | 
| +    it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
 | 
|    }
 | 
|  }
 | 
|  
 | 
| @@ -900,7 +898,8 @@ HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
 | 
|    // of forwarding addresses is as an offset in terms of live bytes, so we
 | 
|    // need quick access to the allocation top of each page to decode
 | 
|    // forwarding addresses.
 | 
| -  current_page->mc_relocation_top = mc_forwarding_info_.top;
 | 
| +  current_page->SetAllocationWatermark(mc_forwarding_info_.top);
 | 
| +  current_page->next_page()->InvalidateWatermark(true);
 | 
|    SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
 | 
|    return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
 | 
|  }
 | 
| @@ -928,10 +927,10 @@ bool PagedSpace::Expand(Page* last_page) {
 | 
|  
 | 
|    MemoryAllocator::SetNextPage(last_page, p);
 | 
|  
 | 
| -  // Sequentially clear remembered set of new pages and and cache the
 | 
| +  // Sequentially clear region marks of new pages and and cache the
 | 
|    // new last page in the space.
 | 
|    while (p->is_valid()) {
 | 
| -    p->ClearRSet();
 | 
| +    p->SetRegionMarks(Page::kAllRegionsCleanMarks);
 | 
|      last_page_ = p;
 | 
|      p = p->next_page();
 | 
|    }
 | 
| @@ -1030,16 +1029,11 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
 | 
|      if (above_allocation_top) {
 | 
|        // We don't care what's above the allocation top.
 | 
|      } else {
 | 
| -      // Unless this is the last page in the space containing allocated
 | 
| -      // objects, the allocation top should be at a constant offset from the
 | 
| -      // object area end.
 | 
|        Address top = current_page->AllocationTop();
 | 
|        if (current_page == top_page) {
 | 
|          ASSERT(top == allocation_info_.top);
 | 
|          // The next page will be above the allocation top.
 | 
|          above_allocation_top = true;
 | 
| -      } else {
 | 
| -        ASSERT(top == PageAllocationLimit(current_page));
 | 
|        }
 | 
|  
 | 
|        // It should be packed with objects from the bottom to the top.
 | 
| @@ -1060,8 +1054,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
 | 
|          object->Verify();
 | 
|  
 | 
|          // All the interior pointers should be contained in the heap and
 | 
| -        // have their remembered set bits set if required as determined
 | 
| -        // by the visitor.
 | 
| +        // have page regions covering intergenerational references should be
 | 
| +        // marked dirty.
 | 
|          int size = object->Size();
 | 
|          object->IterateBody(map->instance_type(), size, visitor);
 | 
|  
 | 
| @@ -1634,7 +1628,7 @@ void FreeListNode::set_size(int size_in_bytes) {
 | 
|    // If the block is too small (eg, one or two words), to hold both a size
 | 
|    // field and a next pointer, we give it a filler map that gives it the
 | 
|    // correct size.
 | 
| -  if (size_in_bytes > ByteArray::kAlignedSize) {
 | 
| +  if (size_in_bytes > ByteArray::kHeaderSize) {
 | 
|      set_map(Heap::raw_unchecked_byte_array_map());
 | 
|      // Can't use ByteArray::cast because it fails during deserialization.
 | 
|      ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
 | 
| @@ -1907,15 +1901,14 @@ void OldSpace::MCCommitRelocationInfo() {
 | 
|      Page* p = it.next();
 | 
|      // Space below the relocation pointer is allocated.
 | 
|      computed_size +=
 | 
| -        static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
 | 
| +        static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
 | 
|      if (it.has_next()) {
 | 
| -      // Free the space at the top of the page.  We cannot use
 | 
| -      // p->mc_relocation_top after the call to Free (because Free will clear
 | 
| -      // remembered set bits).
 | 
| +      // Free the space at the top of the page.
 | 
|        int extra_size =
 | 
| -          static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
 | 
| +          static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
 | 
|        if (extra_size > 0) {
 | 
| -        int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
 | 
| +        int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
 | 
| +                                           extra_size);
 | 
|          // The bytes we have just "freed" to add to the free list were
 | 
|          // already accounted as available.
 | 
|          accounting_stats_.WasteBytes(wasted_bytes);
 | 
| @@ -1963,7 +1956,10 @@ void PagedSpace::FreePages(Page* prev, Page* last) {
 | 
|  
 | 
|    // Clean them up.
 | 
|    do {
 | 
| -    first->ClearRSet();
 | 
| +    first->InvalidateWatermark(true);
 | 
| +    first->SetAllocationWatermark(first->ObjectAreaStart());
 | 
| +    first->SetCachedAllocationWatermark(first->ObjectAreaStart());
 | 
| +    first->SetRegionMarks(Page::kAllRegionsCleanMarks);
 | 
|      first = first->next_page();
 | 
|    } while (first != NULL);
 | 
|  
 | 
| @@ -2003,6 +1999,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
 | 
|          // Current allocation top points to a page which is now in the middle
 | 
|          // of page list. We should move allocation top forward to the new last
 | 
|          // used page so various object iterators will continue to work properly.
 | 
| +        last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
 | 
|  
 | 
|          int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
 | 
|                                               last_in_use->AllocationTop());
 | 
| @@ -2035,6 +2032,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
 | 
|            int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
 | 
|                                                 p->ObjectAreaStart());
 | 
|  
 | 
| +          p->SetAllocationWatermark(p->ObjectAreaStart());
 | 
|            Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
 | 
|          }
 | 
|        }
 | 
| @@ -2066,6 +2064,7 @@ bool PagedSpace::ReserveSpace(int bytes) {
 | 
|      if (!reserved_page->is_valid()) return false;
 | 
|    }
 | 
|    ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
 | 
| +  TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
 | 
|    SetAllocationInfo(&allocation_info_,
 | 
|                      TopPageOf(allocation_info_)->next_page());
 | 
|    return true;
 | 
| @@ -2100,7 +2099,15 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
 | 
|      accounting_stats_.WasteBytes(wasted_bytes);
 | 
|      if (!result->IsFailure()) {
 | 
|        accounting_stats_.AllocateBytes(size_in_bytes);
 | 
| -      return HeapObject::cast(result);
 | 
| +
 | 
| +      HeapObject* obj = HeapObject::cast(result);
 | 
| +      Page* p = Page::FromAddress(obj->address());
 | 
| +
 | 
| +      if (obj->address() >= p->AllocationWatermark()) {
 | 
| +        p->SetAllocationWatermark(obj->address() + size_in_bytes);
 | 
| +      }
 | 
| +
 | 
| +      return obj;
 | 
|      }
 | 
|    }
 | 
|  
 | 
| @@ -2123,6 +2130,7 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
 | 
|  
 | 
|  
 | 
|  void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
 | 
| +  current_page->SetAllocationWatermark(allocation_info_.top);
 | 
|    int free_size =
 | 
|        static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
 | 
|    if (free_size > 0) {
 | 
| @@ -2133,6 +2141,7 @@ void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
 | 
|  
 | 
|  
 | 
|  void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
 | 
| +  current_page->SetAllocationWatermark(allocation_info_.top);
 | 
|    int free_size =
 | 
|        static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
 | 
|    // In the fixed space free list all the free list items have the right size.
 | 
| @@ -2152,6 +2161,7 @@ void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
 | 
|  HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
 | 
|                                           int size_in_bytes) {
 | 
|    ASSERT(current_page->next_page()->is_valid());
 | 
| +  current_page->next_page()->InvalidateWatermark(true);
 | 
|    PutRestOfCurrentPageOnFreeList(current_page);
 | 
|    SetAllocationInfo(&allocation_info_, current_page->next_page());
 | 
|    return AllocateLinearly(&allocation_info_, size_in_bytes);
 | 
| @@ -2296,160 +2306,12 @@ void OldSpace::ReportStatistics() {
 | 
|    PrintF("  capacity: %d, waste: %d, available: %d, %%%d\n",
 | 
|           Capacity(), Waste(), Available(), pct);
 | 
|  
 | 
| -  // Report remembered set statistics.
 | 
| -  int rset_marked_pointers = 0;
 | 
| -  int rset_marked_arrays = 0;
 | 
| -  int rset_marked_array_elements = 0;
 | 
| -  int cross_gen_pointers = 0;
 | 
| -  int cross_gen_array_elements = 0;
 | 
| -
 | 
| -  PageIterator page_it(this, PageIterator::PAGES_IN_USE);
 | 
| -  while (page_it.has_next()) {
 | 
| -    Page* p = page_it.next();
 | 
| -
 | 
| -    for (Address rset_addr = p->RSetStart();
 | 
| -         rset_addr < p->RSetEnd();
 | 
| -         rset_addr += kIntSize) {
 | 
| -      int rset = Memory::int_at(rset_addr);
 | 
| -      if (rset != 0) {
 | 
| -        // Bits were set
 | 
| -        int intoff =
 | 
| -            static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
 | 
| -        int bitoff = 0;
 | 
| -        for (; bitoff < kBitsPerInt; ++bitoff) {
 | 
| -          if ((rset & (1 << bitoff)) != 0) {
 | 
| -            int bitpos = intoff*kBitsPerByte + bitoff;
 | 
| -            Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
 | 
| -            Object** obj = reinterpret_cast<Object**>(slot);
 | 
| -            if (*obj == Heap::raw_unchecked_fixed_array_map()) {
 | 
| -              rset_marked_arrays++;
 | 
| -              FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
 | 
| -
 | 
| -              rset_marked_array_elements += fa->length();
 | 
| -              // Manually inline FixedArray::IterateBody
 | 
| -              Address elm_start = slot + FixedArray::kHeaderSize;
 | 
| -              Address elm_stop = elm_start + fa->length() * kPointerSize;
 | 
| -              for (Address elm_addr = elm_start;
 | 
| -                   elm_addr < elm_stop; elm_addr += kPointerSize) {
 | 
| -                // Filter non-heap-object pointers
 | 
| -                Object** elm_p = reinterpret_cast<Object**>(elm_addr);
 | 
| -                if (Heap::InNewSpace(*elm_p))
 | 
| -                  cross_gen_array_elements++;
 | 
| -              }
 | 
| -            } else {
 | 
| -              rset_marked_pointers++;
 | 
| -              if (Heap::InNewSpace(*obj))
 | 
| -                cross_gen_pointers++;
 | 
| -            }
 | 
| -          }
 | 
| -        }
 | 
| -      }
 | 
| -    }
 | 
| -  }
 | 
| -
 | 
| -  pct = rset_marked_pointers == 0 ?
 | 
| -        0 : cross_gen_pointers * 100 / rset_marked_pointers;
 | 
| -  PrintF("  rset-marked pointers %d, to-new-space %d (%%%d)\n",
 | 
| -            rset_marked_pointers, cross_gen_pointers, pct);
 | 
| -  PrintF("  rset_marked arrays %d, ", rset_marked_arrays);
 | 
| -  PrintF("  elements %d, ", rset_marked_array_elements);
 | 
| -  pct = rset_marked_array_elements == 0 ? 0
 | 
| -           : cross_gen_array_elements * 100 / rset_marked_array_elements;
 | 
| -  PrintF("  pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
 | 
| -  PrintF("  total rset-marked bits %d\n",
 | 
| -            (rset_marked_pointers + rset_marked_arrays));
 | 
| -  pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
 | 
| -        : (cross_gen_pointers + cross_gen_array_elements) * 100 /
 | 
| -          (rset_marked_pointers + rset_marked_array_elements);
 | 
| -  PrintF("  total rset pointers %d, true cross generation ones %d (%%%d)\n",
 | 
| -         (rset_marked_pointers + rset_marked_array_elements),
 | 
| -         (cross_gen_pointers + cross_gen_array_elements),
 | 
| -         pct);
 | 
| -
 | 
|    ClearHistograms();
 | 
|    HeapObjectIterator obj_it(this);
 | 
|    for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
 | 
|      CollectHistogramInfo(obj);
 | 
|    ReportHistogram(true);
 | 
|  }
 | 
| -
 | 
| -
 | 
| -// Dump the range of remembered set words between [start, end) corresponding
 | 
| -// to the pointers starting at object_p.  The allocation_top is an object
 | 
| -// pointer which should not be read past.  This is important for large object
 | 
| -// pages, where some bits in the remembered set range do not correspond to
 | 
| -// allocated addresses.
 | 
| -static void PrintRSetRange(Address start, Address end, Object** object_p,
 | 
| -                           Address allocation_top) {
 | 
| -  Address rset_address = start;
 | 
| -
 | 
| -  // If the range starts on on odd numbered word (eg, for large object extra
 | 
| -  // remembered set ranges), print some spaces.
 | 
| -  if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
 | 
| -    PrintF("                                    ");
 | 
| -  }
 | 
| -
 | 
| -  // Loop over all the words in the range.
 | 
| -  while (rset_address < end) {
 | 
| -    uint32_t rset_word = Memory::uint32_at(rset_address);
 | 
| -    int bit_position = 0;
 | 
| -
 | 
| -    // Loop over all the bits in the word.
 | 
| -    while (bit_position < kBitsPerInt) {
 | 
| -      if (object_p == reinterpret_cast<Object**>(allocation_top)) {
 | 
| -        // Print a bar at the allocation pointer.
 | 
| -        PrintF("|");
 | 
| -      } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
 | 
| -        // Do not dereference object_p past the allocation pointer.
 | 
| -        PrintF("#");
 | 
| -      } else if ((rset_word & (1 << bit_position)) == 0) {
 | 
| -        // Print a dot for zero bits.
 | 
| -        PrintF(".");
 | 
| -      } else if (Heap::InNewSpace(*object_p)) {
 | 
| -        // Print an X for one bits for pointers to new space.
 | 
| -        PrintF("X");
 | 
| -      } else {
 | 
| -        // Print a circle for one bits for pointers to old space.
 | 
| -        PrintF("o");
 | 
| -      }
 | 
| -
 | 
| -      // Print a space after every 8th bit except the last.
 | 
| -      if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
 | 
| -        PrintF(" ");
 | 
| -      }
 | 
| -
 | 
| -      // Advance to next bit.
 | 
| -      bit_position++;
 | 
| -      object_p++;
 | 
| -    }
 | 
| -
 | 
| -    // Print a newline after every odd numbered word, otherwise a space.
 | 
| -    if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
 | 
| -      PrintF("\n");
 | 
| -    } else {
 | 
| -      PrintF(" ");
 | 
| -    }
 | 
| -
 | 
| -    // Advance to next remembered set word.
 | 
| -    rset_address += kIntSize;
 | 
| -  }
 | 
| -}
 | 
| -
 | 
| -
 | 
| -void PagedSpace::DoPrintRSet(const char* space_name) {
 | 
| -  PageIterator it(this, PageIterator::PAGES_IN_USE);
 | 
| -  while (it.has_next()) {
 | 
| -    Page* p = it.next();
 | 
| -    PrintF("%s page 0x%x:\n", space_name, p);
 | 
| -    PrintRSetRange(p->RSetStart(), p->RSetEnd(),
 | 
| -                   reinterpret_cast<Object**>(p->ObjectAreaStart()),
 | 
| -                   p->AllocationTop());
 | 
| -    PrintF("\n");
 | 
| -  }
 | 
| -}
 | 
| -
 | 
| -
 | 
| -void OldSpace::PrintRSet() { DoPrintRSet("old"); }
 | 
|  #endif
 | 
|  
 | 
|  // -----------------------------------------------------------------------------
 | 
| @@ -2499,6 +2361,7 @@ void FixedSpace::MCCommitRelocationInfo() {
 | 
|      if (it.has_next()) {
 | 
|        accounting_stats_.WasteBytes(
 | 
|            static_cast<int>(page->ObjectAreaEnd() - page_top));
 | 
| +      page->SetAllocationWatermark(page_top);
 | 
|      }
 | 
|    }
 | 
|  
 | 
| @@ -2528,7 +2391,14 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
 | 
|      Object* result = free_list_.Allocate();
 | 
|      if (!result->IsFailure()) {
 | 
|        accounting_stats_.AllocateBytes(size_in_bytes);
 | 
| -      return HeapObject::cast(result);
 | 
| +      HeapObject* obj = HeapObject::cast(result);
 | 
| +      Page* p = Page::FromAddress(obj->address());
 | 
| +
 | 
| +      if (obj->address() >= p->AllocationWatermark()) {
 | 
| +        p->SetAllocationWatermark(obj->address() + size_in_bytes);
 | 
| +      }
 | 
| +
 | 
| +      return obj;
 | 
|      }
 | 
|    }
 | 
|  
 | 
| @@ -2558,6 +2428,8 @@ HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
 | 
|    ASSERT(current_page->next_page()->is_valid());
 | 
|    ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
 | 
|    ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
 | 
| +  current_page->next_page()->InvalidateWatermark(true);
 | 
| +  current_page->SetAllocationWatermark(allocation_info_.top);
 | 
|    accounting_stats_.WasteBytes(page_extra_);
 | 
|    SetAllocationInfo(&allocation_info_, current_page->next_page());
 | 
|    return AllocateLinearly(&allocation_info_, size_in_bytes);
 | 
| @@ -2570,51 +2442,12 @@ void FixedSpace::ReportStatistics() {
 | 
|    PrintF("  capacity: %d, waste: %d, available: %d, %%%d\n",
 | 
|           Capacity(), Waste(), Available(), pct);
 | 
|  
 | 
| -  // Report remembered set statistics.
 | 
| -  int rset_marked_pointers = 0;
 | 
| -  int cross_gen_pointers = 0;
 | 
| -
 | 
| -  PageIterator page_it(this, PageIterator::PAGES_IN_USE);
 | 
| -  while (page_it.has_next()) {
 | 
| -    Page* p = page_it.next();
 | 
| -
 | 
| -    for (Address rset_addr = p->RSetStart();
 | 
| -         rset_addr < p->RSetEnd();
 | 
| -         rset_addr += kIntSize) {
 | 
| -      int rset = Memory::int_at(rset_addr);
 | 
| -      if (rset != 0) {
 | 
| -        // Bits were set
 | 
| -        int intoff =
 | 
| -            static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
 | 
| -        int bitoff = 0;
 | 
| -        for (; bitoff < kBitsPerInt; ++bitoff) {
 | 
| -          if ((rset & (1 << bitoff)) != 0) {
 | 
| -            int bitpos = intoff*kBitsPerByte + bitoff;
 | 
| -            Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
 | 
| -            Object** obj = reinterpret_cast<Object**>(slot);
 | 
| -            rset_marked_pointers++;
 | 
| -            if (Heap::InNewSpace(*obj))
 | 
| -              cross_gen_pointers++;
 | 
| -          }
 | 
| -        }
 | 
| -      }
 | 
| -    }
 | 
| -  }
 | 
| -
 | 
| -  pct = rset_marked_pointers == 0 ?
 | 
| -          0 : cross_gen_pointers * 100 / rset_marked_pointers;
 | 
| -  PrintF("  rset-marked pointers %d, to-new-space %d (%%%d)\n",
 | 
| -            rset_marked_pointers, cross_gen_pointers, pct);
 | 
| -
 | 
|    ClearHistograms();
 | 
|    HeapObjectIterator obj_it(this);
 | 
|    for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
 | 
|      CollectHistogramInfo(obj);
 | 
|    ReportHistogram(false);
 | 
|  }
 | 
| -
 | 
| -
 | 
| -void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
 | 
|  #endif
 | 
|  
 | 
|  
 | 
| @@ -2793,8 +2626,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
 | 
|    chunk->set_size(chunk_size);
 | 
|    first_chunk_ = chunk;
 | 
|  
 | 
| -  // Set the object address and size in the page header and clear its
 | 
| -  // remembered set.
 | 
| +  // Initialize page header.
 | 
|    Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
 | 
|    Address object_address = page->ObjectAreaStart();
 | 
|    // Clear the low order bit of the second word in the page to flag it as a
 | 
| @@ -2802,13 +2634,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
 | 
|    // low order bit should already be clear.
 | 
|    ASSERT((chunk_size & 0x1) == 0);
 | 
|    page->SetIsLargeObjectPage(true);
 | 
| -  page->ClearRSet();
 | 
| -  int extra_bytes = requested_size - object_size;
 | 
| -  if (extra_bytes > 0) {
 | 
| -    // The extra memory for the remembered set should be cleared.
 | 
| -    memset(object_address + object_size, 0, extra_bytes);
 | 
| -  }
 | 
| -
 | 
| +  page->SetRegionMarks(Page::kAllRegionsCleanMarks);
 | 
|    return HeapObject::FromAddress(object_address);
 | 
|  }
 | 
|  
 | 
| @@ -2823,8 +2649,7 @@ Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
 | 
|  
 | 
|  Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
 | 
|    ASSERT(0 < size_in_bytes);
 | 
| -  int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
 | 
| -  return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
 | 
| +  return AllocateRawInternal(size_in_bytes,
 | 
|                               size_in_bytes,
 | 
|                               NOT_EXECUTABLE);
 | 
|  }
 | 
| @@ -2851,59 +2676,61 @@ Object* LargeObjectSpace::FindObject(Address a) {
 | 
|    return Failure::Exception();
 | 
|  }
 | 
|  
 | 
| -
 | 
| -void LargeObjectSpace::ClearRSet() {
 | 
| -  ASSERT(Page::is_rset_in_use());
 | 
| -
 | 
| -  LargeObjectIterator it(this);
 | 
| -  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
 | 
| -    // We only have code, sequential strings, or fixed arrays in large
 | 
| -    // object space, and only fixed arrays need remembered set support.
 | 
| -    if (object->IsFixedArray()) {
 | 
| -      // Clear the normal remembered set region of the page;
 | 
| -      Page* page = Page::FromAddress(object->address());
 | 
| -      page->ClearRSet();
 | 
| -
 | 
| -      // Clear the extra remembered set.
 | 
| -      int size = object->Size();
 | 
| -      int extra_rset_bytes = ExtraRSetBytesFor(size);
 | 
| -      memset(object->address() + size, 0, extra_rset_bytes);
 | 
| -    }
 | 
| -  }
 | 
| -}
 | 
| -
 | 
| -
 | 
| -void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
 | 
| -  ASSERT(Page::is_rset_in_use());
 | 
| -
 | 
| -  static void* lo_rset_histogram = StatsTable::CreateHistogram(
 | 
| -      "V8.RSetLO",
 | 
| -      0,
 | 
| -      // Keeping this histogram's buckets the same as the paged space histogram.
 | 
| -      Page::kObjectAreaSize / kPointerSize,
 | 
| -      30);
 | 
| -
 | 
| +void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
 | 
|    LargeObjectIterator it(this);
 | 
|    for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
 | 
|      // We only have code, sequential strings, or fixed arrays in large
 | 
|      // object space, and only fixed arrays can possibly contain pointers to
 | 
|      // the young generation.
 | 
|      if (object->IsFixedArray()) {
 | 
| -      // Iterate the normal page remembered set range.
 | 
|        Page* page = Page::FromAddress(object->address());
 | 
| -      Address object_end = object->address() + object->Size();
 | 
| -      int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
 | 
| -                                         Min(page->ObjectAreaEnd(), object_end),
 | 
| -                                         page->RSetStart(),
 | 
| -                                         copy_object_func);
 | 
| -
 | 
| -      // Iterate the extra array elements.
 | 
| -      if (object_end > page->ObjectAreaEnd()) {
 | 
| -        count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
 | 
| -                                        object_end, copy_object_func);
 | 
| -      }
 | 
| -      if (lo_rset_histogram != NULL) {
 | 
| -        StatsTable::AddHistogramSample(lo_rset_histogram, count);
 | 
| +      uint32_t marks = page->GetRegionMarks();
 | 
| +      uint32_t newmarks = Page::kAllRegionsCleanMarks;
 | 
| +
 | 
| +      if (marks != Page::kAllRegionsCleanMarks) {
 | 
| +        // For a large page a single dirty mark corresponds to several
 | 
| +        // regions (modulo 32). So we treat a large page as a sequence of
 | 
| +        // normal pages of size Page::kPageSize having same dirty marks
 | 
| +        // and subsequently iterate dirty regions on each of these pages.
 | 
| +        Address start = object->address();
 | 
| +        Address end = page->ObjectAreaEnd();
 | 
| +        Address object_end = start + object->Size();
 | 
| +
 | 
| +        // Iterate regions of the first normal page covering object.
 | 
| +        uint32_t first_region_number = page->GetRegionNumberForAddress(start);
 | 
| +        newmarks |=
 | 
| +            Heap::IterateDirtyRegions(marks >> first_region_number,
 | 
| +                                      start,
 | 
| +                                      end,
 | 
| +                                      &Heap::IteratePointersInDirtyRegion,
 | 
| +                                      copy_object) << first_region_number;
 | 
| +
 | 
| +        start = end;
 | 
| +        end = start + Page::kPageSize;
 | 
| +        while (end <= object_end) {
 | 
| +          // Iterate next 32 regions.
 | 
| +          newmarks |=
 | 
| +              Heap::IterateDirtyRegions(marks,
 | 
| +                                        start,
 | 
| +                                        end,
 | 
| +                                        &Heap::IteratePointersInDirtyRegion,
 | 
| +                                        copy_object);
 | 
| +          start = end;
 | 
| +          end = start + Page::kPageSize;
 | 
| +        }
 | 
| +
 | 
| +        if (start != object_end) {
 | 
| +          // Iterate the last piece of an object which is less than
 | 
| +          // Page::kPageSize.
 | 
| +          newmarks |=
 | 
| +              Heap::IterateDirtyRegions(marks,
 | 
| +                                        start,
 | 
| +                                        object_end,
 | 
| +                                        &Heap::IteratePointersInDirtyRegion,
 | 
| +                                        copy_object);
 | 
| +        }
 | 
| +
 | 
| +        page->SetRegionMarks(newmarks);
 | 
|        }
 | 
|      }
 | 
|    }
 | 
| @@ -2995,7 +2822,7 @@ void LargeObjectSpace::Verify() {
 | 
|      } else if (object->IsFixedArray()) {
 | 
|        // We loop over fixed arrays ourselves, rather then using the visitor,
 | 
|        // because the visitor doesn't support the start/offset iteration
 | 
| -      // needed for IsRSetSet.
 | 
| +      // needed for IsRegionDirty.
 | 
|        FixedArray* array = FixedArray::cast(object);
 | 
|        for (int j = 0; j < array->length(); j++) {
 | 
|          Object* element = array->get(j);
 | 
| @@ -3004,8 +2831,11 @@ void LargeObjectSpace::Verify() {
 | 
|            ASSERT(Heap::Contains(element_object));
 | 
|            ASSERT(element_object->map()->IsMap());
 | 
|            if (Heap::InNewSpace(element_object)) {
 | 
| -            ASSERT(Page::IsRSetSet(object->address(),
 | 
| -                                   FixedArray::kHeaderSize + j * kPointerSize));
 | 
| +            Address array_addr = object->address();
 | 
| +            Address element_addr = array_addr + FixedArray::kHeaderSize +
 | 
| +                j * kPointerSize;
 | 
| +
 | 
| +            ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
 | 
|            }
 | 
|          }
 | 
|        }
 | 
| @@ -3046,33 +2876,6 @@ void LargeObjectSpace::CollectCodeStatistics() {
 | 
|      }
 | 
|    }
 | 
|  }
 | 
| -
 | 
| -
 | 
| -void LargeObjectSpace::PrintRSet() {
 | 
| -  LargeObjectIterator it(this);
 | 
| -  for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
 | 
| -    if (object->IsFixedArray()) {
 | 
| -      Page* page = Page::FromAddress(object->address());
 | 
| -
 | 
| -      Address allocation_top = object->address() + object->Size();
 | 
| -      PrintF("large page 0x%x:\n", page);
 | 
| -      PrintRSetRange(page->RSetStart(), page->RSetEnd(),
 | 
| -                     reinterpret_cast<Object**>(object->address()),
 | 
| -                     allocation_top);
 | 
| -      int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
 | 
| -      int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
 | 
| -                                    kBitsPerInt);
 | 
| -      PrintF("------------------------------------------------------------"
 | 
| -             "-----------\n");
 | 
| -      PrintRSetRange(allocation_top,
 | 
| -                     allocation_top + extra_rset_bits / kBitsPerByte,
 | 
| -                     reinterpret_cast<Object**>(object->address()
 | 
| -                                                + Page::kObjectAreaSize),
 | 
| -                     allocation_top);
 | 
| -      PrintF("\n");
 | 
| -    }
 | 
| -  }
 | 
| -}
 | 
|  #endif  // DEBUG
 | 
|  
 | 
|  } }  // namespace v8::internal
 | 
| 
 |