| Index: src/heap/spaces.cc
|
| diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
|
| index 8fdca694a178d41f836533d43890cc612c896137..da97649a87291a495d3d57683adc51afb2d841d4 100644
|
| --- a/src/heap/spaces.cc
|
| +++ b/src/heap/spaces.cc
|
| @@ -924,7 +924,7 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
|
| void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
|
| MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
|
| if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
|
| - static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
|
| + static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
|
| }
|
| chunk->IncrementLiveBytes(by);
|
| }
|
| @@ -954,7 +954,6 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
|
| Executability executable)
|
| : Space(heap, space, executable),
|
| free_list_(this),
|
| - unswept_free_bytes_(0),
|
| end_of_unswept_pages_(NULL) {
|
| area_size_ = MemoryAllocator::PageAreaSize(space);
|
| accounting_stats_.Clear();
|
| @@ -992,13 +991,13 @@ void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
|
|
|
| // Move over the free list. Concatenate makes sure that the source free list
|
| // gets properly reset after moving over all nodes.
|
| - intptr_t freed_bytes = free_list_.Concatenate(other->free_list());
|
| + intptr_t added = free_list_.Concatenate(other->free_list());
|
|
|
| // Moved memory is not recorded as allocated memory, but rather increases and
|
| // decreases capacity of the corresponding spaces. Used size and waste size
|
| // are maintained by the receiving space upon allocating and freeing blocks.
|
| - other->accounting_stats_.DecreaseCapacity(freed_bytes);
|
| - accounting_stats_.IncreaseCapacity(freed_bytes);
|
| + other->accounting_stats_.DecreaseCapacity(added);
|
| + accounting_stats_.IncreaseCapacity(added);
|
| }
|
|
|
|
|
| @@ -1139,7 +1138,7 @@ void PagedSpace::ReleasePage(Page* page) {
|
| accounting_stats_.AllocateBytes(size);
|
| DCHECK_EQ(AreaSize(), static_cast<int>(size));
|
| } else {
|
| - DecreaseUnsweptFreeBytes(page);
|
| + accounting_stats_.DeallocateBytes(page->area_size());
|
| }
|
|
|
| if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
|
| @@ -2245,7 +2244,8 @@ FreeList::FreeList(PagedSpace* owner)
|
|
|
|
|
| intptr_t FreeList::Concatenate(FreeList* other) {
|
| - intptr_t free_bytes = 0;
|
| + intptr_t usable_bytes = 0;
|
| + intptr_t wasted_bytes = 0;
|
|
|
| // This is safe (not going to deadlock) since Concatenate operations
|
| // are never performed on the same free lists at the same time in
|
| @@ -2254,17 +2254,18 @@ intptr_t FreeList::Concatenate(FreeList* other) {
|
| if (!owner()->is_local()) mutex_.Lock();
|
| if (!other->owner()->is_local()) other->mutex()->Lock();
|
|
|
| - wasted_bytes_ += other->wasted_bytes_;
|
| + wasted_bytes = other->wasted_bytes_;
|
| + wasted_bytes_ += wasted_bytes;
|
| other->wasted_bytes_ = 0;
|
|
|
| - free_bytes += small_list_.Concatenate(other->small_list());
|
| - free_bytes += medium_list_.Concatenate(other->medium_list());
|
| - free_bytes += large_list_.Concatenate(other->large_list());
|
| - free_bytes += huge_list_.Concatenate(other->huge_list());
|
| + usable_bytes += small_list_.Concatenate(other->small_list());
|
| + usable_bytes += medium_list_.Concatenate(other->medium_list());
|
| + usable_bytes += large_list_.Concatenate(other->large_list());
|
| + usable_bytes += huge_list_.Concatenate(other->huge_list());
|
|
|
| if (!other->owner()->is_local()) other->mutex()->Unlock();
|
| if (!owner()->is_local()) mutex_.Unlock();
|
| - return free_bytes;
|
| + return usable_bytes + wasted_bytes;
|
| }
|
|
|
|
|
| @@ -2549,20 +2550,13 @@ void PagedSpace::PrepareForMarkCompact() {
|
| // on the first allocation after the sweep.
|
| EmptyAllocationInfo();
|
|
|
| - // This counter will be increased for pages which will be swept by the
|
| - // sweeper threads.
|
| - unswept_free_bytes_ = 0;
|
| -
|
| // Clear the free list before a full GC---it will be rebuilt afterward.
|
| free_list_.Reset();
|
| }
|
|
|
|
|
| intptr_t PagedSpace::SizeOfObjects() {
|
| - DCHECK(!FLAG_concurrent_sweeping ||
|
| - heap()->mark_compact_collector()->sweeping_in_progress() ||
|
| - (unswept_free_bytes_ == 0));
|
| - const intptr_t size = Size() - unswept_free_bytes_ - (limit() - top());
|
| + const intptr_t size = Size() - (limit() - top());
|
| DCHECK_GE(size, 0);
|
| USE(size);
|
| return size;
|
|
|