 Chromium Code Reviews
 Chromium Code Reviews Issue 254603002:
  Remove lazy sweeping.  (Closed) 
  Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
    
  
    Issue 254603002:
  Remove lazy sweeping.  (Closed) 
  Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge| Index: src/spaces.cc | 
| diff --git a/src/spaces.cc b/src/spaces.cc | 
| index 5a7bc4b1af0d6c95634182f9d06d7c133b9dd905..1755fa5459065e3975986cdb6d6ac0462ac72eae 100644 | 
| --- a/src/spaces.cc | 
| +++ b/src/spaces.cc | 
| @@ -953,7 +953,6 @@ PagedSpace::PagedSpace(Heap* heap, | 
| : Space(heap, id, executable), | 
| free_list_(this), | 
| was_swept_conservatively_(false), | 
| - first_unswept_page_(Page::FromAddress(NULL)), | 
| unswept_free_bytes_(0) { | 
| if (id == CODE_SPACE) { | 
| area_size_ = heap->isolate()->memory_allocator()-> | 
| @@ -1131,14 +1130,6 @@ void PagedSpace::ReleasePage(Page* page, bool unlink) { | 
| ASSERT(page->LiveBytes() == 0); | 
| ASSERT(AreaSize() == page->area_size()); | 
| - // Adjust list of unswept pages if the page is the head of the list. | 
| - if (first_unswept_page_ == page) { | 
| - first_unswept_page_ = page->next_page(); | 
| - if (first_unswept_page_ == anchor()) { | 
| - first_unswept_page_ = Page::FromAddress(NULL); | 
| - } | 
| - } | 
| - | 
| if (page->WasSwept()) { | 
| intptr_t size = free_list_.EvictFreeListItems(page); | 
| accounting_stats_.AllocateBytes(size); | 
| @@ -2555,24 +2546,8 @@ void PagedSpace::PrepareForMarkCompact() { | 
| // on the first allocation after the sweep. | 
| EmptyAllocationInfo(); | 
| - // Stop lazy sweeping and clear marking bits for unswept pages. | 
| - if (first_unswept_page_ != NULL) { | 
| - Page* p = first_unswept_page_; | 
| - do { | 
| - // Do not use ShouldBeSweptLazily predicate here. | 
| - // New evacuation candidates were selected but they still have | 
| - // to be swept before collection starts. | 
| - if (!p->WasSwept()) { | 
| - Bitmap::Clear(p); | 
| - if (FLAG_gc_verbose) { | 
| - PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n", | 
| - reinterpret_cast<intptr_t>(p)); | 
| - } | 
| - } | 
| - p = p->next_page(); | 
| - } while (p != anchor()); | 
| - } | 
| - first_unswept_page_ = Page::FromAddress(NULL); | 
| + // This counter will be increased for pages which will be swept by the | 
| + // sweeper threads. | 
| unswept_free_bytes_ = 0; | 
| // Clear the free list before a full GC---it will be rebuilt afterward. | 
| @@ -2581,7 +2556,8 @@ void PagedSpace::PrepareForMarkCompact() { | 
| intptr_t PagedSpace::SizeOfObjects() { | 
| - ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0)); | 
| + ASSERT(heap()->mark_compact_collector()->IsConcurrentSweepingInProgress() || | 
| + (unswept_free_bytes_ == 0)); | 
| return Size() - unswept_free_bytes_ - (limit() - top()); | 
| } | 
| @@ -2595,39 +2571,6 @@ void PagedSpace::RepairFreeListsAfterBoot() { | 
| } | 
| -bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { | 
| - if (IsLazySweepingComplete()) return true; | 
| - | 
| - intptr_t freed_bytes = 0; | 
| - Page* p = first_unswept_page_; | 
| - do { | 
| - Page* next_page = p->next_page(); | 
| - if (ShouldBeSweptLazily(p)) { | 
| - if (FLAG_gc_verbose) { | 
| - PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", | 
| - reinterpret_cast<intptr_t>(p)); | 
| - } | 
| - DecreaseUnsweptFreeBytes(p); | 
| - freed_bytes += | 
| - MarkCompactCollector:: | 
| - SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>( | 
| - this, NULL, p); | 
| - } | 
| - p = next_page; | 
| - } while (p != anchor() && freed_bytes < bytes_to_sweep); | 
| - | 
| - if (p == anchor()) { | 
| - first_unswept_page_ = Page::FromAddress(NULL); | 
| - } else { | 
| - first_unswept_page_ = p; | 
| - } | 
| - | 
| - heap()->FreeQueuedChunks(); | 
| - | 
| - return IsLazySweepingComplete(); | 
| -} | 
| - | 
| - | 
| void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { | 
| if (allocation_info_.top() >= allocation_info_.limit()) return; | 
| @@ -2656,28 +2599,18 @@ bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) { | 
| } | 
| return false; | 
| } | 
| - return true; | 
| - } else { | 
| - return AdvanceSweeper(size_in_bytes); | 
| } | 
| + return true; | 
| } | 
| HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { | 
| // Allocation in this space has failed. | 
| + EnsureSweeperProgress(size_in_bytes); | 
| - // If there are unswept pages advance lazy sweeper a bounded number of times | 
| - // until we find a size_in_bytes contiguous piece of memory | 
| - const int kMaxSweepingTries = 5; | 
| - bool sweeping_complete = false; | 
| - | 
| - for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) { | 
| - sweeping_complete = EnsureSweeperProgress(size_in_bytes); | 
| - | 
| - // Retry the free list allocation. | 
| - HeapObject* object = free_list_.Allocate(size_in_bytes); | 
| - if (object != NULL) return object; | 
| - } | 
| + // Retry the free list allocation. | 
| + HeapObject* object = free_list_.Allocate(size_in_bytes); | 
| + if (object != NULL) return object; | 
| 
Michael Starzinger
2014/04/25 07:57:14
As discussed offline: We should stick with the old
 
Hannes Payer (out of office)
2014/04/25 09:40:54
Done.
 | 
| // Free list allocation failed and there is no next page. Fail if we have | 
| // hit the old generation size limit that should cause a garbage | 
| @@ -2693,16 +2626,6 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { | 
| return free_list_.Allocate(size_in_bytes); | 
| } | 
| 
Michael Starzinger
2014/04/25 07:57:14
As discussed offline: Likewise for the last-ditch
 
Hannes Payer (out of office)
2014/04/25 09:40:54
Done. The last ditch is different now, it will wai
 | 
| - // Last ditch, sweep all the remaining pages to try to find space. This may | 
| - // cause a pause. | 
| - if (!IsLazySweepingComplete()) { | 
| - EnsureSweeperProgress(kMaxInt); | 
| - | 
| - // Retry the free list allocation. | 
| - HeapObject* object = free_list_.Allocate(size_in_bytes); | 
| - if (object != NULL) return object; | 
| - } | 
| - | 
| // Finally, fail. | 
| return NULL; | 
| } |