| Index: src/heap/spaces.cc
|
| diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
|
| index 1a2ff27c2b6826434fbdbc0d6ca0aee9cc5adac7..1a0d0bbecc622e3eb595cbf3442fdd7727d6b520 100644
|
| --- a/src/heap/spaces.cc
|
| +++ b/src/heap/spaces.cc
|
| @@ -1028,10 +1028,6 @@ bool PagedSpace::Expand() {
|
|
|
| intptr_t size = AreaSize();
|
|
|
| - if (anchor_.next_page() == &anchor_) {
|
| - size = SizeOfFirstPage();
|
| - }
|
| -
|
| Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
|
| executable());
|
| if (p == NULL) return false;
|
| @@ -1044,50 +1040,6 @@ bool PagedSpace::Expand() {
|
| }
|
|
|
|
|
| -intptr_t PagedSpace::SizeOfFirstPage() {
|
| - // If the snapshot contains a custom script, all size guarantees are off.
|
| - if (Snapshot::EmbedsScript()) return AreaSize();
|
| - // If using an ool constant pool then transfer the constant pool allowance
|
| - // from the code space to the old pointer space.
|
| - static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0;
|
| - int size = 0;
|
| - switch (identity()) {
|
| - case OLD_POINTER_SPACE:
|
| - size = (128 + constant_pool_delta) * kPointerSize * KB;
|
| - break;
|
| - case OLD_DATA_SPACE:
|
| - size = 192 * KB;
|
| - break;
|
| - case MAP_SPACE:
|
| - size = 16 * kPointerSize * KB;
|
| - break;
|
| - case CELL_SPACE:
|
| - size = 16 * kPointerSize * KB;
|
| - break;
|
| - case PROPERTY_CELL_SPACE:
|
| - size = 8 * kPointerSize * KB;
|
| - break;
|
| - case CODE_SPACE: {
|
| - CodeRange* code_range = heap()->isolate()->code_range();
|
| - if (code_range != NULL && code_range->valid()) {
|
| - // When code range exists, code pages are allocated in a special way
|
| - // (from the reserved code range). That part of the code is not yet
|
| - // upgraded to handle small pages.
|
| - size = AreaSize();
|
| - } else {
|
| - size = RoundUp((480 - constant_pool_delta) * KB *
|
| - FullCodeGenerator::kBootCodeSizeMultiplier / 100,
|
| - kPointerSize);
|
| - }
|
| - break;
|
| - }
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| - return Min(size, AreaSize());
|
| -}
|
| -
|
| -
|
| int PagedSpace::CountTotalPages() {
|
| PageIterator it(this);
|
| int count = 0;
|
| @@ -2082,79 +2034,6 @@ size_t NewSpace::CommittedPhysicalMemory() {
|
| // -----------------------------------------------------------------------------
|
| // Free lists for old object spaces implementation
|
|
|
| -void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
|
| - DCHECK(size_in_bytes > 0);
|
| - DCHECK(IsAligned(size_in_bytes, kPointerSize));
|
| -
|
| - // We write a map and possibly size information to the block. If the block
|
| - // is big enough to be a FreeSpace with at least one extra word (the next
|
| - // pointer), we set its map to be the free space map and its size to an
|
| - // appropriate array length for the desired size from HeapObject::Size().
|
| - // If the block is too small (eg, one or two words), to hold both a size
|
| - // field and a next pointer, we give it a filler map that gives it the
|
| - // correct size.
|
| - if (size_in_bytes > FreeSpace::kHeaderSize) {
|
| - // Can't use FreeSpace::cast because it fails during deserialization.
|
| - // We have to set the size first with a release store before we store
|
| - // the map because a concurrent store buffer scan on scavenge must not
|
| - // observe a map with an invalid size.
|
| - FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
|
| - this_as_free_space->nobarrier_set_size(size_in_bytes);
|
| - synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
|
| - } else if (size_in_bytes == kPointerSize) {
|
| - set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
|
| - } else if (size_in_bytes == 2 * kPointerSize) {
|
| - set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
|
| - } else {
|
| - UNREACHABLE();
|
| - }
|
| - // We would like to DCHECK(Size() == size_in_bytes) but this would fail during
|
| - // deserialization because the free space map is not done yet.
|
| -}
|
| -
|
| -
|
| -FreeListNode* FreeListNode::next() {
|
| - DCHECK(IsFreeListNode(this));
|
| - if (map() == GetHeap()->raw_unchecked_free_space_map()) {
|
| - DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
|
| - return reinterpret_cast<FreeListNode*>(
|
| - Memory::Address_at(address() + kNextOffset));
|
| - } else {
|
| - return reinterpret_cast<FreeListNode*>(
|
| - Memory::Address_at(address() + kPointerSize));
|
| - }
|
| -}
|
| -
|
| -
|
| -FreeListNode** FreeListNode::next_address() {
|
| - DCHECK(IsFreeListNode(this));
|
| - if (map() == GetHeap()->raw_unchecked_free_space_map()) {
|
| - DCHECK(Size() >= kNextOffset + kPointerSize);
|
| - return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
|
| - } else {
|
| - return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
|
| - }
|
| -}
|
| -
|
| -
|
| -void FreeListNode::set_next(FreeListNode* next) {
|
| - DCHECK(IsFreeListNode(this));
|
| - // While we are booting the VM the free space map will actually be null. So
|
| - // we have to make sure that we don't try to use it for anything at that
|
| - // stage.
|
| - if (map() == GetHeap()->raw_unchecked_free_space_map()) {
|
| - DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
|
| - base::NoBarrier_Store(
|
| - reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
|
| - reinterpret_cast<base::AtomicWord>(next));
|
| - } else {
|
| - base::NoBarrier_Store(
|
| - reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
|
| - reinterpret_cast<base::AtomicWord>(next));
|
| - }
|
| -}
|
| -
|
| -
|
| intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
|
| intptr_t free_bytes = 0;
|
| if (category->top() != NULL) {
|
| @@ -2188,11 +2067,11 @@ void FreeListCategory::Reset() {
|
|
|
| intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
|
| int sum = 0;
|
| - FreeListNode* t = top();
|
| - FreeListNode** n = &t;
|
| + FreeSpace* t = top();
|
| + FreeSpace** n = &t;
|
| while (*n != NULL) {
|
| if (Page::FromAddress((*n)->address()) == p) {
|
| - FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
|
| + FreeSpace* free_space = *n;
|
| sum += free_space->Size();
|
| *n = (*n)->next();
|
| } else {
|
| @@ -2209,7 +2088,7 @@ intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
|
|
|
|
|
| bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
|
| - FreeListNode* node = top();
|
| + FreeSpace* node = top();
|
| while (node != NULL) {
|
| if (Page::FromAddress(node->address()) == p) return true;
|
| node = node->next();
|
| @@ -2218,20 +2097,20 @@ bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
|
| }
|
|
|
|
|
| -FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
|
| - FreeListNode* node = top();
|
| +FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
|
| + FreeSpace* node = top();
|
|
|
| if (node == NULL) return NULL;
|
|
|
| while (node != NULL &&
|
| Page::FromAddress(node->address())->IsEvacuationCandidate()) {
|
| - available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
|
| + available_ -= node->Size();
|
| node = node->next();
|
| }
|
|
|
| if (node != NULL) {
|
| set_top(node->next());
|
| - *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
|
| + *node_size = node->Size();
|
| available_ -= *node_size;
|
| } else {
|
| set_top(NULL);
|
| @@ -2245,9 +2124,9 @@ FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
|
| }
|
|
|
|
|
| -FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
|
| - int* node_size) {
|
| - FreeListNode* node = PickNodeFromList(node_size);
|
| +FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
|
| + int* node_size) {
|
| + FreeSpace* node = PickNodeFromList(node_size);
|
| if (node != NULL && *node_size < size_in_bytes) {
|
| Free(node, *node_size);
|
| *node_size = 0;
|
| @@ -2257,18 +2136,19 @@ FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
|
| }
|
|
|
|
|
| -void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
|
| - node->set_next(top());
|
| - set_top(node);
|
| +void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
|
| + DCHECK_LE(FreeList::kSmallListMin, size_in_bytes);
|
| + free_space->set_next(top());
|
| + set_top(free_space);
|
| if (end_ == NULL) {
|
| - end_ = node;
|
| + end_ = free_space;
|
| }
|
| available_ += size_in_bytes;
|
| }
|
|
|
|
|
| void FreeListCategory::RepairFreeList(Heap* heap) {
|
| - FreeListNode* n = top();
|
| + FreeSpace* n = top();
|
| while (n != NULL) {
|
| Map** map_location = reinterpret_cast<Map**>(n->address());
|
| if (*map_location == NULL) {
|
| @@ -2307,8 +2187,8 @@ void FreeList::Reset() {
|
| int FreeList::Free(Address start, int size_in_bytes) {
|
| if (size_in_bytes == 0) return 0;
|
|
|
| - FreeListNode* node = FreeListNode::FromAddress(start);
|
| - node->set_size(heap_, size_in_bytes);
|
| + heap_->CreateFillerObjectAt(start, size_in_bytes);
|
| +
|
| Page* page = Page::FromAddress(start);
|
|
|
| // Early return to drop too-small blocks on the floor.
|
| @@ -2317,19 +2197,20 @@ int FreeList::Free(Address start, int size_in_bytes) {
|
| return size_in_bytes;
|
| }
|
|
|
| + FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
|
| // Insert other blocks at the head of a free list of the appropriate
|
| // magnitude.
|
| if (size_in_bytes <= kSmallListMax) {
|
| - small_list_.Free(node, size_in_bytes);
|
| + small_list_.Free(free_space, size_in_bytes);
|
| page->add_available_in_small_free_list(size_in_bytes);
|
| } else if (size_in_bytes <= kMediumListMax) {
|
| - medium_list_.Free(node, size_in_bytes);
|
| + medium_list_.Free(free_space, size_in_bytes);
|
| page->add_available_in_medium_free_list(size_in_bytes);
|
| } else if (size_in_bytes <= kLargeListMax) {
|
| - large_list_.Free(node, size_in_bytes);
|
| + large_list_.Free(free_space, size_in_bytes);
|
| page->add_available_in_large_free_list(size_in_bytes);
|
| } else {
|
| - huge_list_.Free(node, size_in_bytes);
|
| + huge_list_.Free(free_space, size_in_bytes);
|
| page->add_available_in_huge_free_list(size_in_bytes);
|
| }
|
|
|
| @@ -2338,8 +2219,8 @@ int FreeList::Free(Address start, int size_in_bytes) {
|
| }
|
|
|
|
|
| -FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
|
| - FreeListNode* node = NULL;
|
| +FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
|
| + FreeSpace* node = NULL;
|
| Page* page = NULL;
|
|
|
| if (size_in_bytes <= kSmallAllocationMax) {
|
| @@ -2376,13 +2257,13 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
|
| }
|
|
|
| int huge_list_available = huge_list_.available();
|
| - FreeListNode* top_node = huge_list_.top();
|
| - for (FreeListNode** cur = &top_node; *cur != NULL;
|
| + FreeSpace* top_node = huge_list_.top();
|
| + for (FreeSpace** cur = &top_node; *cur != NULL;
|
| cur = (*cur)->next_address()) {
|
| - FreeListNode* cur_node = *cur;
|
| + FreeSpace* cur_node = *cur;
|
| while (cur_node != NULL &&
|
| Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
|
| - int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
|
| + int size = cur_node->Size();
|
| huge_list_available -= size;
|
| page = Page::FromAddress(cur_node->address());
|
| page->add_available_in_huge_free_list(-size);
|
| @@ -2395,9 +2276,7 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
|
| break;
|
| }
|
|
|
| - DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map());
|
| - FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
|
| - int size = cur_as_free_space->Size();
|
| + int size = cur_node->Size();
|
| if (size >= size_in_bytes) {
|
| // Large enough node found. Unlink it from the list.
|
| node = *cur;
|
| @@ -2470,7 +2349,7 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
|
| old_linear_size);
|
|
|
| int new_node_size = 0;
|
| - FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
|
| + FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
|
| if (new_node == NULL) {
|
| owner_->SetTopAndLimit(NULL, NULL);
|
| return NULL;
|
| @@ -2564,11 +2443,10 @@ void FreeList::RepairLists(Heap* heap) {
|
| #ifdef DEBUG
|
| intptr_t FreeListCategory::SumFreeList() {
|
| intptr_t sum = 0;
|
| - FreeListNode* cur = top();
|
| + FreeSpace* cur = top();
|
| while (cur != NULL) {
|
| DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
|
| - FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
|
| - sum += cur_as_free_space->nobarrier_size();
|
| + sum += cur->nobarrier_size();
|
| cur = cur->next();
|
| }
|
| return sum;
|
| @@ -2580,7 +2458,7 @@ static const int kVeryLongFreeList = 500;
|
|
|
| int FreeListCategory::FreeListLength() {
|
| int length = 0;
|
| - FreeListNode* cur = top();
|
| + FreeSpace* cur = top();
|
| while (cur != NULL) {
|
| length++;
|
| cur = cur->next();
|
| @@ -2641,7 +2519,19 @@ intptr_t PagedSpace::SizeOfObjects() {
|
| // on the heap. If there was already a free list then the elements on it
|
| // were created with the wrong FreeSpaceMap (normally NULL), so we need to
|
| // fix them.
|
| -void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
|
| +void PagedSpace::RepairFreeListsAfterDeserialization() {
|
| + free_list_.RepairLists(heap());
|
| + // Each page may have a small free space that is not tracked by a free list.
|
| + // Update the maps for those free space objects.
|
| + PageIterator iterator(this);
|
| + while (iterator.has_next()) {
|
| + Page* page = iterator.next();
|
| + int size = static_cast<int>(page->non_available_small_blocks());
|
| + if (size == 0) continue;
|
| + Address address = page->OffsetToAddress(Page::kPageSize - size);
|
| + heap()->CreateFillerObjectAt(address, size);
|
| + }
|
| +}
|
|
|
|
|
| void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
|
|
|