Chromium Code Reviews| Index: src/spaces.cc |
| diff --git a/src/spaces.cc b/src/spaces.cc |
| index f7c67129c8169931cf033a5ca4a20891be42efe0..55274dabd85589b36f0153fa7f6dc8548627e77a 100644 |
| --- a/src/spaces.cc |
| +++ b/src/spaces.cc |
| @@ -1617,7 +1617,6 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) { |
| // If the block is too small (eg, one or two words), to hold both a size |
| // field and a next pointer, we give it a filler map that gives it the |
| // correct size. |
| - // TODO(gc) ISOLATES MERGE cleanup HEAP macro usage |
| if (size_in_bytes > FreeSpace::kHeaderSize) { |
| set_map(heap->raw_unchecked_free_space_map()); |
| // Can't use FreeSpace::cast because it fails during deserialization. |
| @@ -1738,6 +1737,75 @@ int FreeList::Free(Address start, int size_in_bytes) { |
| } |
| +FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) { |
| + FreeListNode* node = *list; |
| + |
| + if (node == NULL) return NULL; |
| + |
| + while (node != NULL && |
| + Page::FromAddress(node->address())->IsEvacuationCandidate()) { |
|
Erik Corry
2011/07/04 11:04:11
It would be nicer to not put the free list entries
Vyacheslav Egorov (Chromium)
2011/08/05 12:50:28
Evacuation decision is made late.
|
| + available_ -= node->Size(); |
| + node = node->next(); |
| + } |
| + |
| + if (node != NULL) { |
| + *node_size = node->Size(); |
| + *list = node->next(); |
| + } else { |
| + *list = NULL; |
| + } |
| + |
| + return node; |
| +} |
| + |
| + |
| +FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { |
| + FreeListNode* node = NULL; |
| + |
| + if (size_in_bytes <= kSmallAllocationMax) { |
| + node = PickNodeFromList(&small_list_, node_size); |
| + if (node != NULL) return node; |
| + } |
| + |
| + if (size_in_bytes <= kMediumAllocationMax) { |
| + node = PickNodeFromList(&medium_list_, node_size); |
| + if (node != NULL) return node; |
| + } |
| + |
| + if (size_in_bytes <= kLargeAllocationMax) { |
| + node = PickNodeFromList(&large_list_, node_size); |
| + if (node != NULL) return node; |
| + } |
| + |
| + for (FreeListNode** cur = &huge_list_; |
| + *cur != NULL; |
| + cur = (*cur)->next_address()) { |
| + |
| + FreeListNode* cur_node = *cur; |
| + while (cur_node != NULL && |
| + Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { |
| + available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size(); |
| + cur_node = cur_node->next(); |
| + } |
| + |
| + *cur = cur_node; |
| + if (cur_node == NULL) break; |
| + |
| + ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map()); |
| + FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); |
| + int size = cur_as_free_space->Size(); |
| + if (size >= size_in_bytes) { |
| + // Large enough node found. Unlink it from the list. |
| + node = *cur; |
| + *node_size = size; |
| + *cur = node->next(); |
| + break; |
| + } |
| + } |
| + |
| + return node; |
| +} |
| + |
|
Erik Corry
2011/07/04 11:04:11
Missing blank line
Vyacheslav Egorov (Chromium)
2011/08/05 12:50:28
Done.
|
| // Allocation on the old space free list. If it succeeds then a new linear |
| // allocation space has been set up with the top and limit of the space. If |
| // the allocation fails then NULL is returned, and the caller can perform a GC |
| @@ -1749,38 +1817,9 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { |
| // Don't free list allocate if there is linear space available. |
| ASSERT(owner_->limit() - owner_->top() < size_in_bytes); |
| - FreeListNode* new_node = NULL; |
| int new_node_size = 0; |
| - |
| - if (size_in_bytes <= kSmallAllocationMax && small_list_ != NULL) { |
| - new_node = small_list_; |
| - new_node_size = new_node->Size(); |
| - small_list_ = new_node->next(); |
| - } else if (size_in_bytes <= kMediumAllocationMax && medium_list_ != NULL) { |
| - new_node = medium_list_; |
| - new_node_size = new_node->Size(); |
| - medium_list_ = new_node->next(); |
| - } else if (size_in_bytes <= kLargeAllocationMax && large_list_ != NULL) { |
| - new_node = large_list_; |
| - new_node_size = new_node->Size(); |
| - large_list_ = new_node->next(); |
| - } else { |
| - for (FreeListNode** cur = &huge_list_; |
| - *cur != NULL; |
| - cur = (*cur)->next_address()) { |
| - ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map()); |
| - FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); |
| - int size = cur_as_free_space->Size(); |
| - if (size >= size_in_bytes) { |
| - // Large enough node found. Unlink it from the list. |
| - new_node = *cur; |
| - new_node_size = size; |
| - *cur = new_node->next(); |
| - break; |
| - } |
| - } |
| - if (new_node == NULL) return NULL; |
| - } |
| + FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
| + if (new_node == NULL) return NULL; |
| available_ -= new_node_size; |
| ASSERT(IsVeryLong() || available_ == SumFreeLists()); |
| @@ -1790,8 +1829,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { |
| // skipped when scanning the heap. This also puts it back in the free list |
| // if it is big enough. |
| owner_->Free(owner_->top(), old_linear_size); |
| - // TODO(gc) ISOLATES MERGE |
| - HEAP->incremental_marking()->Step(size_in_bytes - old_linear_size); |
| + owner_->heap()->incremental_marking()->Step(size_in_bytes - old_linear_size); |
| ASSERT(new_node_size - size_in_bytes >= 0); // New linear size. |
| @@ -1801,8 +1839,9 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { |
| // a little of this again immediately - see below. |
| owner_->Allocate(new_node_size); |
| - if (new_node_size - size_in_bytes > kThreshold && |
| - HEAP->incremental_marking()->IsMarkingIncomplete() && |
| + int bytes_left = new_node_size - size_in_bytes; |
| + if (bytes_left > kThreshold && |
| + owner_->heap()->incremental_marking()->IsMarkingIncomplete() && |
| FLAG_incremental_marking_steps) { |
| int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); |
| // We don't want to give too large linear areas to the allocator while |
| @@ -1812,7 +1851,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { |
| new_node_size - size_in_bytes - linear_size); |
| owner_->SetTop(new_node->address() + size_in_bytes, |
| new_node->address() + size_in_bytes + linear_size); |
| - } else { |
| + } else if (bytes_left > 0) { |
| // Normally we give the rest of the node to the allocator as its new |
| // linear allocation area. |
| owner_->SetTop(new_node->address() + size_in_bytes, |
| @@ -1895,18 +1934,6 @@ intptr_t FreeList::SumFreeLists() { |
| // ----------------------------------------------------------------------------- |
| // OldSpace implementation |
| -void OldSpace::PrepareForMarkCompact() { |
| - // Call prepare of the super class. |
| - PagedSpace::PrepareForMarkCompact(); |
| - |
| - // Stop lazy sweeping for this space. |
| - first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL); |
| - |
| - // Clear the free list before a full GC---it will be rebuilt afterward. |
| - free_list_.Reset(); |
| -} |
| - |
| - |
| bool NewSpace::ReserveSpace(int bytes) { |
| // We can't reliably unpack a partial snapshot that needs more new space |
| // space than the minimum NewSpace size. |
| @@ -1925,6 +1952,19 @@ void PagedSpace::PrepareForMarkCompact() { |
| int old_linear_size = limit() - top(); |
| Free(top(), old_linear_size); |
| SetTop(NULL, NULL); |
| + |
| + // Stop lazy sweeping for the space. |
| + first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL); |
| + |
| + // Clear the free list before a full GC---it will be rebuilt afterward. |
| + free_list_.Reset(); |
| + |
| + // Clear EVACUATED flag from all pages. |
| + PageIterator it(this); |
| + while (it.has_next()) { |
| + Page* page = it.next(); |
| + page->ClearFlag(MemoryChunk::EVACUATED); |
| + } |
| } |
| @@ -1966,7 +2006,10 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { |
| Page* p = first_unswept_page_; |
| do { |
| Page* next_page = p->next_page(); |
| - freed_bytes += MarkCompactCollector::SweepConservatively(this, p); |
| + // Evacuation candidates were swept by evacuator. |
| + if (!p->WasEvacuated()) { |
| + freed_bytes += MarkCompactCollector::SweepConservatively(this, p); |
| + } |
| p = next_page; |
| } while (p != last && freed_bytes < bytes_to_sweep); |
| @@ -1984,6 +2027,16 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { |
| } |
| +void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { |
| + if (allocation_info_.top >= allocation_info_.limit) return; |
| + |
| + if (Page::FromAddress(allocation_info_.top)->IsEvacuationCandidate()) { |
| + allocation_info_.top = NULL; |
| + allocation_info_.limit = NULL; |
| + } |
| +} |
| + |
| + |
| HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| // Allocation in this space has failed. |