Chromium Code Reviews| Index: src/heap/spaces.cc |
| diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc |
| index 50a213e8ae4f582a3a684b4639c73894616fb851..ed8bba69e444275779ed217cdf50785bee81205c 100644 |
| --- a/src/heap/spaces.cc |
| +++ b/src/heap/spaces.cc |
| @@ -982,6 +982,46 @@ void PagedSpace::TearDown() { |
| } |
| +void PagedSpace::AddMemory(Address start, intptr_t size) { |
| + accounting_stats_.ExpandSpace(static_cast<int>(size)); |
| + Free(start, static_cast<int>(size)); |
| +} |
| + |
| + |
| +FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) { |
| + FreeSpace* space = free_list()->TryRemoveMemory(size_in_bytes); |
| + if (space != nullptr) { |
| + accounting_stats_.DecreaseCapacity(space->size()); |
| + } |
| + return space; |
| +} |
| + |
| + |
| +void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other, |
| + int num, intptr_t limit) { |
| + DCHECK_GT(num, 0); |
| + DCHECK(other != nullptr); |
| + |
| + if (limit == 0) limit = std::numeric_limits<intptr_t>::max(); |
| + |
| + EmptyAllocationInfo(); |
| + |
| + int index = 0; |
| + FreeSpace* node = nullptr; |
| + // We evenly distribute existing memory among compaction spaces, i.e., while |
| + // iterating over them in round-robin fashion, we check whether they have |
| + // reached the {limit} of available bytes and try to add some memory if not. |
| + for (CompactionSpace* space = other[index]->Get(identity()); |
|
ulan
2015/10/09 11:48:48
Let's simplify the "for" statement so that the cor
Michael Lippautz
2015/10/09 12:20:13
Done, rewrote the loop.
The intention is summariz
|
| + (space->free_list()->available() < limit) && |
| + ((node = TryRemoveMemory(limit - space->free_list()->available())) != |
| + nullptr); |
| + space = other[++index % num]->Get(identity())) { |
| + CHECK(space->identity() == identity()); |
| + space->AddMemory(node->address(), node->size()); |
| + } |
| +} |
| + |
| + |
| void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { |
| DCHECK(identity() == other->identity()); |
| // Destroy the linear allocation space of {other}. This is needed to |
| @@ -994,8 +1034,7 @@ void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { |
| intptr_t added = free_list_.Concatenate(other->free_list()); |
| // Moved memory is not recorded as allocated memory, but rather increases and |
| - // decreases capacity of the corresponding spaces. Used size and waste size |
| - // are maintained by the receiving space upon allocating and freeing blocks. |
| + // decreases capacity of the corresponding spaces. |
| other->accounting_stats_.DecreaseCapacity(added); |
| accounting_stats_.IncreaseCapacity(added); |
| } |
| @@ -1004,16 +1043,19 @@ void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { |
| void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { |
| // Unmerged fields: |
| // area_size_ |
| - // allocation_info_ |
| - // end_of_unswept_pages_ |
| - // unswept_free_bytes_ |
| // anchor_ |
| MoveOverFreeMemory(other); |
| // Update and clear accounting statistics. |
| accounting_stats_.Merge(other->accounting_stats_); |
| - other->accounting_stats_.Reset(); |
| + other->accounting_stats_.Clear(); |
| + |
| + // The linear allocation area of {other} should be destroyed now. |
| + DCHECK(other->top() == nullptr); |
| + DCHECK(other->limit() == nullptr); |
| + |
| + DCHECK(other->end_of_unswept_pages_ == nullptr); |
| AccountCommitted(other->CommittedMemory()); |
| @@ -2392,6 +2434,30 @@ FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { |
| } |
| +FreeSpace* FreeList::TryRemoveMemory(intptr_t size_in_bytes) { |
|
ulan
2015/10/09 11:48:48
Could you please comment the postconditions of thi
Michael Lippautz
2015/10/09 12:20:13
Done. I added a high-level description to the .h f
|
| + base::LockGuard<base::Mutex> guard(&mutex_); |
| + FreeSpace* node = nullptr; |
| + int node_size = 0; |
| + // Try to find a node that fits exactly. |
| + node = FindNodeFor(static_cast<int>(size_in_bytes), &node_size); |
| + // If no node could be found get as much memory as possible. |
| + if (node == nullptr) node = FindNodeIn(kHuge, &node_size); |
| + if (node == nullptr) node = FindNodeIn(kLarge, &node_size); |
| + if (node == nullptr) node = FindNodeIn(kMedium, &node_size); |
| + if (node == nullptr) node = FindNodeIn(kSmall, &node_size); |
| + if (node != nullptr) { |
| + // Give back left overs that were not required by {size_in_bytes}. |
|
Michael Lippautz
2015/10/09 11:08:59
This could potentially increase fragmentation, but
|
| + intptr_t aligned_size = RoundUp(size_in_bytes, kPointerSize); |
| + intptr_t left_over = node_size - aligned_size; |
| + if (left_over > 0) { |
| + Free(node->address() + aligned_size, static_cast<int>(left_over)); |
| + node->set_size(static_cast<int>(aligned_size)); |
| + } |
| + } |
| + return node; |
| +} |
| + |
| + |
| // Allocation on the old space free list. If it succeeds then a new linear |
| // allocation space has been set up with the top and limit of the space. If |
| // the allocation fails then NULL is returned, and the caller can perform a GC |