| OLD | NEW |
| 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| (...skipping 23 matching lines...) Expand all Loading... |
| 35 namespace v8 { | 35 namespace v8 { |
| 36 namespace internal { | 36 namespace internal { |
| 37 | 37 |
| 38 // For contiguous spaces, top should be in the space (or at the end) and limit | 38 // For contiguous spaces, top should be in the space (or at the end) and limit |
| 39 // should be the end of the space. | 39 // should be the end of the space. |
| 40 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ | 40 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ |
| 41 ASSERT((space).low() <= (info).top \ | 41 ASSERT((space).low() <= (info).top \ |
| 42 && (info).top <= (space).high() \ | 42 && (info).top <= (space).high() \ |
| 43 && (info).limit == (space).high()) | 43 && (info).limit == (space).high()) |
| 44 | 44 |
| 45 intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED; | |
| 46 | |
| 47 // ---------------------------------------------------------------------------- | 45 // ---------------------------------------------------------------------------- |
| 48 // HeapObjectIterator | 46 // HeapObjectIterator |
| 49 | 47 |
| 50 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { | 48 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { |
| 51 Initialize(space->bottom(), space->top(), NULL); | 49 // You can't actually iterate over the anchor page. It is not a real page, |
| 50 // just an anchor for the double linked page list. Initialize as if we have |
| 51 // reached the end of the anchor page, then the first iteration will move on |
| 52 // to the first page. |
| 53 Initialize(space, |
| 54 NULL, |
| 55 NULL, |
| 56 kAllPagesInSpace, |
| 57 NULL); |
| 52 } | 58 } |
| 53 | 59 |
| 54 | 60 |
| 55 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, | 61 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, |
| 56 HeapObjectCallback size_func) { | 62 HeapObjectCallback size_func) { |
| 57 Initialize(space->bottom(), space->top(), size_func); | 63 // You can't actually iterate over the anchor page. It is not a real page, |
| 64 // just an anchor for the double linked page list. Initialize the current |
| 65 // address and end as NULL, then the first iteration will move on |
| 66 // to the first page. |
| 67 Initialize(space, |
| 68 NULL, |
| 69 NULL, |
| 70 kAllPagesInSpace, |
| 71 size_func); |
| 58 } | 72 } |
| 59 | 73 |
| 60 | 74 |
| 61 HeapObjectIterator::HeapObjectIterator(Page* page, | 75 HeapObjectIterator::HeapObjectIterator(Page* page, |
| 62 HeapObjectCallback size_func) { | 76 HeapObjectCallback size_func) { |
| 63 Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func); | 77 Initialize(page->owner(), |
| 78 page->ObjectAreaStart(), |
| 79 page->ObjectAreaEnd(), |
| 80 kOnePageOnly, |
| 81 size_func); |
| 82 ASSERT(!page->IsFlagSet(Page::WAS_SWEPT_CONSERVATIVELY)); |
| 64 } | 83 } |
| 65 | 84 |
| 66 | 85 |
| 67 void HeapObjectIterator::Initialize(Address cur, Address end, | 86 void HeapObjectIterator::Initialize(PagedSpace* space, |
| 87 Address cur, Address end, |
| 88 HeapObjectIterator::PageMode mode, |
| 68 HeapObjectCallback size_f) { | 89 HeapObjectCallback size_f) { |
| 90 space_ = space; |
| 69 cur_addr_ = cur; | 91 cur_addr_ = cur; |
| 70 end_addr_ = end; | 92 cur_end_ = end; |
| 71 end_page_ = Page::FromAllocationTop(end); | 93 page_mode_ = mode; |
| 72 size_func_ = size_f; | 94 size_func_ = size_f; |
| 73 Page* p = Page::FromAllocationTop(cur_addr_); | |
| 74 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop(); | |
| 75 | 95 |
| 76 if (!p->IsFlagSet(Page::IS_CONTINUOUS)) { | 96 // You must call Heap::EnsureHeapIsIterable before creating a heap object |
| 77 ASSERT(IncrementalMarking::state() == IncrementalMarking::STOPPED); | 97 // iterator. |
| 78 cur_addr_ = Marking::FirstLiveObject(cur_addr_, cur_limit_); | 98 ASSERT(IncrementalMarking::state() == IncrementalMarking::STOPPED); |
| 79 if (cur_addr_ > cur_limit_) cur_addr_ = cur_limit_; | |
| 80 } | |
| 81 | 99 |
| 82 #ifdef DEBUG | 100 #ifdef DEBUG |
| 83 Verify(); | 101 Verify(); |
| 84 #endif | 102 #endif |
| 85 } | 103 } |
| 86 | 104 |
| 87 | 105 |
| 88 HeapObject* HeapObjectIterator::FromNextPage() { | 106 // We have hit the end of the page and should advance to the next block of |
| 89 if (cur_addr_ == end_addr_) return NULL; | 107 // objects. This happens at the end of the page. |
| 90 | 108 bool HeapObjectIterator::AdvanceToNextPage() { |
| 91 Page* cur_page = Page::FromAllocationTop(cur_addr_); | 109 ASSERT(cur_addr_ == cur_end_); |
| 110 if (page_mode_ == kOnePageOnly) return false; |
| 111 Page* cur_page; |
| 112 if (cur_addr_ == NULL) { |
| 113 cur_page = space_->anchor(); |
| 114 } else { |
| 115 cur_page = Page::FromAddress(cur_addr_ - 1); |
| 116 ASSERT(cur_addr_ == cur_page->ObjectAreaEnd()); |
| 117 } |
| 92 cur_page = cur_page->next_page(); | 118 cur_page = cur_page->next_page(); |
| 93 ASSERT(cur_page->is_valid()); | 119 if (cur_page == space_->anchor()) return false; |
| 94 | |
| 95 cur_addr_ = cur_page->ObjectAreaStart(); | 120 cur_addr_ = cur_page->ObjectAreaStart(); |
| 96 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop(); | 121 cur_end_ = cur_page->ObjectAreaEnd(); |
| 97 | 122 ASSERT(!cur_page->IsFlagSet(Page::WAS_SWEPT_CONSERVATIVELY)); |
| 98 if (!cur_page->IsFlagSet(Page::IS_CONTINUOUS)) { | |
| 99 ASSERT(IncrementalMarking::state() == IncrementalMarking::STOPPED); | |
| 100 cur_addr_ = Marking::FirstLiveObject(cur_addr_, cur_limit_); | |
| 101 if (cur_addr_ > cur_limit_) cur_addr_ = cur_limit_; | |
| 102 } | |
| 103 | |
| 104 if (cur_addr_ == end_addr_) return NULL; | |
| 105 ASSERT(cur_addr_ < cur_limit_); | |
| 106 #ifdef DEBUG | |
| 107 Verify(); | |
| 108 #endif | |
| 109 return FromCurrentPage(); | |
| 110 } | |
| 111 | |
| 112 | |
| 113 void HeapObjectIterator::AdvanceUsingMarkbits() { | |
| 114 ASSERT(IncrementalMarking::state() == IncrementalMarking::STOPPED); | 123 ASSERT(IncrementalMarking::state() == IncrementalMarking::STOPPED); |
| 115 HeapObject* obj = HeapObject::FromAddress(cur_addr_); | 124 return true; |
| 116 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); | |
| 117 ASSERT_OBJECT_SIZE(obj_size); | |
| 118 cur_addr_ = Marking::NextLiveObject(obj, | |
| 119 obj_size, | |
| 120 cur_limit_); | |
| 121 if (cur_addr_ > cur_limit_) cur_addr_ = cur_limit_; | |
| 122 } | 125 } |
| 123 | 126 |
| 124 | 127 |
| 125 #ifdef DEBUG | 128 #ifdef DEBUG |
| 126 void HeapObjectIterator::Verify() { | 129 void HeapObjectIterator::Verify() { |
| 127 Page* p = Page::FromAllocationTop(cur_addr_); | 130 // TODO(gc): We should do something here. |
| 128 ASSERT(p == Page::FromAllocationTop(cur_limit_)); | |
| 129 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_)); | |
| 130 } | 131 } |
| 131 #endif | 132 #endif |
| 132 | 133 |
| 133 | 134 |
| 134 // ----------------------------------------------------------------------------- | 135 // ----------------------------------------------------------------------------- |
| 135 // PageIterator | |
| 136 | |
| 137 PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) { | |
| 138 prev_page_ = NULL; | |
| 139 switch (mode) { | |
| 140 case PAGES_IN_USE: | |
| 141 stop_page_ = space->AllocationTopPage(); | |
| 142 break; | |
| 143 case ALL_PAGES: | |
| 144 #ifdef DEBUG | |
| 145 // Verify that the cached last page in the space is actually the | |
| 146 // last page. | |
| 147 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) { | |
| 148 if (!p->next_page()->is_valid()) { | |
| 149 ASSERT(space->last_page_ == p); | |
| 150 } | |
| 151 } | |
| 152 #endif | |
| 153 stop_page_ = space->last_page_; | |
| 154 break; | |
| 155 } | |
| 156 } | |
| 157 | |
| 158 | |
| 159 // ----------------------------------------------------------------------------- | |
| 160 // CodeRange | 136 // CodeRange |
| 161 | 137 |
| 162 List<CodeRange::FreeBlock> CodeRange::free_list_(0); | 138 List<CodeRange::FreeBlock> CodeRange::free_list_(0); |
| 163 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0); | 139 List<CodeRange::FreeBlock> CodeRange::allocation_list_(0); |
| 164 int CodeRange::current_allocation_block_index_ = 0; | 140 int CodeRange::current_allocation_block_index_ = 0; |
| 165 VirtualMemory* CodeRange::code_range_ = NULL; | 141 VirtualMemory* CodeRange::code_range_ = NULL; |
| 166 | 142 |
| 167 | 143 |
| 168 bool CodeRange::Setup(const size_t requested) { | 144 bool CodeRange::Setup(const size_t requested) { |
| 169 ASSERT(code_range_ == NULL); | 145 ASSERT(code_range_ == NULL); |
| (...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 406 executable == EXECUTABLE)) { | 382 executable == EXECUTABLE)) { |
| 407 VirtualMemory::ReleaseRegion(base, *allocated_size); | 383 VirtualMemory::ReleaseRegion(base, *allocated_size); |
| 408 size_ -= *allocated_size; | 384 size_ -= *allocated_size; |
| 409 return NULL; | 385 return NULL; |
| 410 } | 386 } |
| 411 | 387 |
| 412 return base; | 388 return base; |
| 413 } | 389 } |
| 414 | 390 |
| 415 | 391 |
| 392 void Page::InitializeAsAnchor(PagedSpace* owner) { |
| 393 owner_ = owner; |
| 394 set_prev_page(this); |
| 395 set_next_page(this); |
| 396 } |
| 397 |
| 398 |
| 399 void MemoryChunk::InsertAfter(MemoryChunk* other) { |
| 400 next_chunk_ = other->next_chunk_; |
| 401 prev_chunk_ = other; |
| 402 other->next_chunk_->prev_chunk_ = this; |
| 403 other->next_chunk_ = this; |
| 404 } |
| 405 |
| 406 |
| 407 void MemoryChunk::Unlink() { |
| 408 next_chunk_->prev_chunk_ = prev_chunk_; |
| 409 prev_chunk_->next_chunk_ = next_chunk_; |
| 410 prev_chunk_ = NULL; |
| 411 next_chunk_ = NULL; |
| 412 } |
| 413 |
| 414 |
| 416 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, | 415 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, |
| 417 Executability executable, | 416 Executability executable, |
| 418 Space* owner) { | 417 Space* owner) { |
| 419 size_t chunk_size = MemoryChunk::kBodyOffset + body_size; | 418 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; |
| 420 Address base = NULL; | 419 Address base = NULL; |
| 421 if (executable == EXECUTABLE) { | 420 if (executable == EXECUTABLE) { |
| 422 // Check executable memory limit. | 421 // Check executable memory limit. |
| 423 if (size_executable_ + chunk_size > capacity_executable_) { | 422 if (size_executable_ + chunk_size > capacity_executable_) { |
| 424 LOG(StringEvent("MemoryAllocator::AllocateRawMemory", | 423 LOG(StringEvent("MemoryAllocator::AllocateRawMemory", |
| 425 "V8 Executable Allocation capacity exceeded")); | 424 "V8 Executable Allocation capacity exceeded")); |
| 426 return NULL; | 425 return NULL; |
| 427 } | 426 } |
| 428 | 427 |
| 429 // Allocate executable memory either from code range or from the | 428 // Allocate executable memory either from code range or from the |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 467 return MemoryChunk::Initialize(base, chunk_size, executable, owner); | 466 return MemoryChunk::Initialize(base, chunk_size, executable, owner); |
| 468 } | 467 } |
| 469 | 468 |
| 470 | 469 |
| 471 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, | 470 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, |
| 472 Executability executable) { | 471 Executability executable) { |
| 473 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); | 472 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); |
| 474 | 473 |
| 475 if (chunk == NULL) return NULL; | 474 if (chunk == NULL) return NULL; |
| 476 | 475 |
| 477 return Page::Initialize(chunk); | 476 return Page::Initialize(chunk, executable, owner); |
| 478 } | 477 } |
| 479 | 478 |
| 480 | 479 |
| 481 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | 480 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 482 Executability executable, | 481 Executability executable, |
| 483 Space* owner) { | 482 Space* owner) { |
| 484 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); | 483 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); |
| 485 if (chunk == NULL) return NULL; | 484 if (chunk == NULL) return NULL; |
| 486 return LargePage::Initialize(chunk); | 485 return LargePage::Initialize(chunk); |
| 487 } | 486 } |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 582 capacity_, size_, static_cast<int>(pct*100)); | 581 capacity_, size_, static_cast<int>(pct*100)); |
| 583 } | 582 } |
| 584 #endif | 583 #endif |
| 585 | 584 |
| 586 // ----------------------------------------------------------------------------- | 585 // ----------------------------------------------------------------------------- |
| 587 // PagedSpace implementation | 586 // PagedSpace implementation |
| 588 | 587 |
| 589 PagedSpace::PagedSpace(intptr_t max_capacity, | 588 PagedSpace::PagedSpace(intptr_t max_capacity, |
| 590 AllocationSpace id, | 589 AllocationSpace id, |
| 591 Executability executable) | 590 Executability executable) |
| 592 : Space(id, executable) { | 591 : Space(id, executable), |
| 592 free_list_(this), |
| 593 was_swept_conservatively_(false) { |
| 593 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | 594 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) |
| 594 * Page::kObjectAreaSize; | 595 * Page::kObjectAreaSize; |
| 595 accounting_stats_.Clear(); | 596 accounting_stats_.Clear(); |
| 596 | 597 |
| 597 allocation_info_.top = NULL; | 598 allocation_info_.top = NULL; |
| 598 allocation_info_.limit = NULL; | 599 allocation_info_.limit = NULL; |
| 600 |
| 601 anchor_.InitializeAsAnchor(this); |
| 599 } | 602 } |
| 600 | 603 |
| 601 | 604 |
| 602 bool PagedSpace::Setup() { | 605 bool PagedSpace::Setup() { |
| 603 if (HasBeenSetup()) return false; | |
| 604 | |
| 605 // Maximum space capacity can not be less than single page size. | |
| 606 if (max_capacity_ < Page::kPageSize) return false; | |
| 607 | |
| 608 first_page_ = MemoryAllocator::AllocatePage(this, executable()); | |
| 609 if (!first_page_->is_valid()) return false; | |
| 610 | |
| 611 // We are sure that the first page is valid and that we have at least one | |
| 612 // page. | |
| 613 accounting_stats_.ExpandSpace(Page::kObjectAreaSize); | |
| 614 ASSERT(Capacity() <= max_capacity_); | |
| 615 | |
| 616 last_page_ = first_page_; | |
| 617 ASSERT(!last_page_->next_page()->is_valid()); | |
| 618 | |
| 619 // Use first_page_ for allocation. | |
| 620 SetAllocationInfo(&allocation_info_, first_page_); | |
| 621 | |
| 622 return true; | 606 return true; |
| 623 } | 607 } |
| 624 | 608 |
| 625 | 609 |
| 626 bool PagedSpace::HasBeenSetup() { | 610 bool PagedSpace::HasBeenSetup() { |
| 627 return (Capacity() > 0); | 611 return true; |
| 628 } | 612 } |
| 629 | 613 |
| 630 | 614 |
| 631 void PagedSpace::TearDown() { | 615 void PagedSpace::TearDown() { |
| 632 Page* next = NULL; | 616 PageIterator iterator(this); |
| 633 for (Page* p = first_page_; p->is_valid(); p = next) { | 617 while (iterator.has_next()) { |
| 634 next = p->next_page(); | 618 MemoryAllocator::Free(iterator.next()); |
| 635 MemoryAllocator::Free(p); | |
| 636 } | 619 } |
| 637 first_page_ = NULL; | 620 anchor_.set_next_page(&anchor_); |
| 638 last_page_ = NULL; | 621 anchor_.set_prev_page(&anchor_); |
| 639 accounting_stats_.Clear(); | 622 accounting_stats_.Clear(); |
| 640 } | 623 } |
| 641 | 624 |
| 642 | 625 |
| 643 #ifdef ENABLE_HEAP_PROTECTION | 626 #ifdef ENABLE_HEAP_PROTECTION |
| 644 | 627 |
| 645 void PagedSpace::Protect() { | 628 void PagedSpace::Protect() { |
| 646 Page* page = first_page_; | 629 Page* page = first_page_; |
| 647 while (page->is_valid()) { | 630 while (page->is_valid()) { |
| 648 MemoryAllocator::ProtectChunkFromPage(page); | 631 MemoryAllocator::ProtectChunkFromPage(page); |
| 649 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); | 632 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); |
| 650 } | 633 } |
| 651 } | 634 } |
| 652 | 635 |
| 653 | 636 |
| 654 void PagedSpace::Unprotect() { | 637 void PagedSpace::Unprotect() { |
| 655 Page* page = first_page_; | 638 Page* page = first_page_; |
| 656 while (page->is_valid()) { | 639 while (page->is_valid()) { |
| 657 MemoryAllocator::UnprotectChunkFromPage(page); | 640 MemoryAllocator::UnprotectChunkFromPage(page); |
| 658 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); | 641 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); |
| 659 } | 642 } |
| 660 } | 643 } |
| 661 | 644 |
| 662 #endif | 645 #endif |
| 663 | 646 |
| 664 | 647 |
| 665 MaybeObject* PagedSpace::FindObject(Address addr) { | 648 MaybeObject* PagedSpace::FindObject(Address addr) { |
| 666 // Note: this function can only be called before or after mark-compact GC | 649 // Note: this function can only be called on precisely swept spaces. |
| 667 // because it accesses map pointers. | |
| 668 ASSERT(!MarkCompactCollector::in_use()); | 650 ASSERT(!MarkCompactCollector::in_use()); |
| 669 | 651 |
| 670 if (!Contains(addr)) return Failure::Exception(); | 652 if (!Contains(addr)) return Failure::Exception(); |
| 671 | 653 |
| 672 Page* p = Page::FromAddress(addr); | 654 Page* p = Page::FromAddress(addr); |
| 673 ASSERT(IsUsed(p)); | 655 HeapObjectIterator it(p, NULL); |
| 674 Address cur = p->ObjectAreaStart(); | 656 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 675 Address end = p->AllocationTop(); | 657 Address cur = obj->address(); |
| 676 while (cur < end) { | |
| 677 HeapObject* obj = HeapObject::FromAddress(cur); | |
| 678 Address next = cur + obj->Size(); | 658 Address next = cur + obj->Size(); |
| 679 if ((cur <= addr) && (addr < next)) return obj; | 659 if ((cur <= addr) && (addr < next)) return obj; |
| 680 cur = next; | |
| 681 } | 660 } |
| 682 | 661 |
| 683 UNREACHABLE(); | 662 UNREACHABLE(); |
| 684 return Failure::Exception(); | 663 return Failure::Exception(); |
| 685 } | 664 } |
| 686 | 665 |
| 687 | 666 |
| 688 bool PagedSpace::IsUsed(Page* page) { | 667 void PagedSpace::SetAllocationInfo(Address top, Address limit) { |
| 689 PageIterator it(this, PageIterator::PAGES_IN_USE); | 668 Free(allocation_info_.top, allocation_info_.limit - allocation_info_.top); |
| 690 while (it.has_next()) { | 669 allocation_info_.top = top; |
| 691 if (page == it.next()) return true; | 670 allocation_info_.limit = limit; |
| 692 } | 671 ASSERT(allocation_info_.VerifyPagedAllocation()); |
| 693 return false; | |
| 694 } | |
| 695 | |
| 696 | |
| 697 void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) { | |
| 698 alloc_info->top = p->ObjectAreaStart(); | |
| 699 alloc_info->limit = p->ObjectAreaEnd(); | |
| 700 ASSERT(alloc_info->VerifyPagedAllocation()); | |
| 701 } | 672 } |
| 702 | 673 |
| 703 | 674 |
| 704 bool PagedSpace::Expand() { | 675 bool PagedSpace::Expand() { |
| 705 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); | 676 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); |
| 706 ASSERT(Capacity() % Page::kObjectAreaSize == 0); | 677 ASSERT(Capacity() % Page::kObjectAreaSize == 0); |
| 707 | 678 |
| 708 if (Capacity() == max_capacity_) return false; | 679 if (Capacity() == max_capacity_) return false; |
| 709 | 680 |
| 710 ASSERT(Capacity() < max_capacity_); | 681 ASSERT(Capacity() < max_capacity_); |
| 711 // Last page must be valid and its next page is invalid. | |
| 712 ASSERT(last_page_->is_valid() && !last_page_->next_page()->is_valid()); | |
| 713 | 682 |
| 714 // We are going to exceed capacity for this space. | 683 // Are we going to exceed capacity for this space? |
| 715 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; | 684 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; |
| 716 | 685 |
| 717 Page* p = MemoryAllocator::AllocatePage(this, executable()); | 686 Page* p = MemoryAllocator::AllocatePage(this, executable()); |
| 718 if (!p->is_valid()) return false; | 687 if (p == NULL) return false; |
| 719 | 688 |
| 720 accounting_stats_.ExpandSpace(Page::kObjectAreaSize); | |
| 721 ASSERT(Capacity() <= max_capacity_); | 689 ASSERT(Capacity() <= max_capacity_); |
| 722 | 690 |
| 723 last_page_->set_next_page(p); | 691 p->InsertAfter(anchor_.prev_page()); |
| 724 last_page_ = p; | |
| 725 | |
| 726 ASSERT(!last_page_->next_page()->is_valid()); | |
| 727 | 692 |
| 728 return true; | 693 return true; |
| 729 } | 694 } |
| 730 | 695 |
| 731 | 696 |
| 732 #ifdef DEBUG | 697 #ifdef DEBUG |
| 733 int PagedSpace::CountTotalPages() { | 698 int PagedSpace::CountTotalPages() { |
| 699 PageIterator it(this); |
| 734 int count = 0; | 700 int count = 0; |
| 735 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { | 701 while (it.has_next()) { |
| 702 it.next(); |
| 736 count++; | 703 count++; |
| 737 } | 704 } |
| 738 return count; | 705 return count; |
| 739 } | 706 } |
| 740 #endif | 707 #endif |
| 741 | 708 |
| 742 | 709 |
| 743 void PagedSpace::Shrink() { | 710 void PagedSpace::Shrink() { |
| 744 Page* top_page = AllocationTopPage(); | |
| 745 ASSERT(top_page->is_valid()); | |
| 746 | |
| 747 // TODO(gc) release half of pages? | 711 // TODO(gc) release half of pages? |
| 748 if (top_page->next_page()->is_valid()) { | |
| 749 int pages_freed = 0; | |
| 750 Page* page = top_page->next_page(); | |
| 751 Page* next_page; | |
| 752 while (page->is_valid()) { | |
| 753 next_page = page->next_page(); | |
| 754 MemoryAllocator::Free(page); | |
| 755 pages_freed++; | |
| 756 page = next_page; | |
| 757 } | |
| 758 top_page->set_next_page(Page::FromAddress(NULL)); | |
| 759 last_page_ = top_page; | |
| 760 | |
| 761 accounting_stats_.ShrinkSpace(pages_freed * Page::kObjectAreaSize); | |
| 762 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize); | |
| 763 } | |
| 764 } | 712 } |
| 765 | 713 |
| 766 | 714 |
| 767 bool PagedSpace::EnsureCapacity(int capacity) { | 715 bool PagedSpace::EnsureCapacity(int capacity) { |
| 768 while (Capacity() < capacity) { | 716 while (Capacity() < capacity) { |
| 769 // Expand the space until it has the required capacity or expansion fails. | 717 // Expand the space until it has the required capacity or expansion fails. |
| 770 if (!Expand()) return false; | 718 if (!Expand()) return false; |
| 771 } | 719 } |
| 772 return true; | 720 return true; |
| 773 } | 721 } |
| 774 | 722 |
| 775 | 723 |
| 776 #ifdef DEBUG | 724 #ifdef DEBUG |
| 777 void PagedSpace::Print() { } | 725 void PagedSpace::Print() { } |
| 778 #endif | 726 #endif |
| 779 | 727 |
| 780 | 728 |
| 781 #ifdef DEBUG | 729 #ifdef DEBUG |
| 782 // We do not assume that the PageIterator works, because it depends on the | |
| 783 // invariants we are checking during verification. | |
| 784 void PagedSpace::Verify(ObjectVisitor* visitor) { | 730 void PagedSpace::Verify(ObjectVisitor* visitor) { |
| 785 // The allocation pointer should be valid, and it should be in a page in the | 731 // We can only iterate over the pages if they were swept precisely. |
| 786 // space. | 732 if (was_swept_conservatively_) return; |
| 787 ASSERT(allocation_info_.VerifyPagedAllocation()); | |
| 788 Page* top_page = Page::FromAllocationTop(allocation_info_.top); | |
| 789 | 733 |
| 790 // Loop over all the pages. | 734 bool allocation_pointer_found_in_space = |
| 791 bool above_allocation_top = false; | 735 (allocation_info_.top != allocation_info_.limit); |
| 792 Page* current_page = first_page_; | 736 PageIterator page_iterator(this); |
| 793 while (current_page->is_valid()) { | 737 while (page_iterator.has_next()) { |
| 794 if (above_allocation_top) { | 738 Page* page = page_iterator.next(); |
| 795 // We don't care what's above the allocation top. | 739 ASSERT(page->owner() == this); |
| 796 } else { | 740 if (page == Page::FromAllocationTop(allocation_info_.top)) { |
| 797 Address top = current_page->AllocationTop(); | 741 allocation_pointer_found_in_space = true; |
| 798 if (current_page == top_page) { | 742 } |
| 799 ASSERT(top == allocation_info_.top); | 743 ASSERT(!page->IsFlagSet(MemoryChunk::WAS_SWEPT_CONSERVATIVELY)); |
| 800 // The next page will be above the allocation top. | 744 HeapObjectIterator it(page, NULL); |
| 801 above_allocation_top = true; | 745 Address end_of_previous_object = page->ObjectAreaStart(); |
| 802 } | 746 Address top = page->ObjectAreaEnd(); |
| 747 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { |
| 748 ASSERT(end_of_previous_object <= object->address()); |
| 803 | 749 |
| 804 HeapObjectIterator it(current_page, NULL); | 750 // The first word should be a map, and we expect all map pointers to |
| 805 Address end_of_previous_object = current_page->ObjectAreaStart(); | 751 // be in map space. |
| 806 for (HeapObject* object = it.next(); object != NULL; object = it.next()) { | 752 Map* map = object->map(); |
| 807 ASSERT(end_of_previous_object <= object->address()); | 753 ASSERT(map->IsMap()); |
| 754 ASSERT(Heap::map_space()->Contains(map)); |
| 808 | 755 |
| 809 // The first word should be a map, and we expect all map pointers to | 756 // Perform space-specific object verification. |
| 810 // be in map space. | 757 VerifyObject(object); |
| 811 Map* map = object->map(); | |
| 812 ASSERT(map->IsMap()); | |
| 813 ASSERT(Heap::map_space()->Contains(map)); | |
| 814 | 758 |
| 815 // Perform space-specific object verification. | 759 // The object itself should look OK. |
| 816 VerifyObject(object); | 760 object->Verify(); |
| 817 | 761 |
| 818 // The object itself should look OK. | 762 // All the interior pointers should be contained in the heap. |
| 819 object->Verify(); | 763 int size = object->Size(); |
| 764 object->IterateBody(map->instance_type(), size, visitor); |
| 820 | 765 |
| 821 // All the interior pointers should be contained in the heap and | 766 ASSERT(object->address() + size <= top); |
| 822 // have page regions covering intergenerational references should be | 767 end_of_previous_object = object->address() + size; |
| 823 // marked dirty. | |
| 824 int size = object->Size(); | |
| 825 object->IterateBody(map->instance_type(), size, visitor); | |
| 826 | |
| 827 ASSERT(object->address() + size <= top); | |
| 828 end_of_previous_object = object->address() + size; | |
| 829 } | |
| 830 } | 768 } |
| 831 | |
| 832 current_page = current_page->next_page(); | |
| 833 } | 769 } |
| 834 } | 770 } |
| 835 #endif | 771 #endif |
| 836 | 772 |
| 837 | 773 |
| 838 // ----------------------------------------------------------------------------- | 774 // ----------------------------------------------------------------------------- |
| 839 // NewSpace implementation | 775 // NewSpace implementation |
| 840 | 776 |
| 841 | 777 |
| 842 bool NewSpace::Setup(int maximum_semispace_capacity) { | 778 bool NewSpace::Setup(int maximum_semispace_capacity) { |
| (...skipping 563 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1406 } else if (size_in_bytes == 2 * kPointerSize) { | 1342 } else if (size_in_bytes == 2 * kPointerSize) { |
| 1407 set_map(Heap::raw_unchecked_two_pointer_filler_map()); | 1343 set_map(Heap::raw_unchecked_two_pointer_filler_map()); |
| 1408 } else { | 1344 } else { |
| 1409 UNREACHABLE(); | 1345 UNREACHABLE(); |
| 1410 } | 1346 } |
| 1411 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during | 1347 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during |
| 1412 // deserialization because the byte array map is not done yet. | 1348 // deserialization because the byte array map is not done yet. |
| 1413 } | 1349 } |
| 1414 | 1350 |
| 1415 | 1351 |
| 1416 Address FreeListNode::next() { | 1352 FreeListNode* FreeListNode::next() { |
| 1417 ASSERT(IsFreeListNode(this)); | 1353 ASSERT(IsFreeListNode(this)); |
| 1418 if (map() == Heap::raw_unchecked_byte_array_map()) { | 1354 if (map() == Heap::raw_unchecked_byte_array_map()) { |
| 1419 ASSERT(Size() >= kNextOffset + kPointerSize); | 1355 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); |
| 1420 return Memory::Address_at(address() + kNextOffset); | 1356 return reinterpret_cast<FreeListNode*>( |
| 1357 Memory::Address_at(address() + kNextOffset)); |
| 1421 } else { | 1358 } else { |
| 1422 return Memory::Address_at(address() + kPointerSize); | 1359 return reinterpret_cast<FreeListNode*>( |
| 1360 Memory::Address_at(address() + kPointerSize)); |
| 1423 } | 1361 } |
| 1424 } | 1362 } |
| 1425 | 1363 |
| 1426 | 1364 |
| 1427 void FreeListNode::set_next(Address next) { | 1365 FreeListNode** FreeListNode::next_address() { |
| 1428 ASSERT(IsFreeListNode(this)); | 1366 ASSERT(IsFreeListNode(this)); |
| 1429 if (map() == Heap::raw_unchecked_byte_array_map()) { | 1367 if (map() == Heap::raw_unchecked_byte_array_map()) { |
| 1430 ASSERT(Size() >= kNextOffset + kPointerSize); | 1368 ASSERT(Size() >= kNextOffset + kPointerSize); |
| 1431 Memory::Address_at(address() + kNextOffset) = next; | 1369 return reinterpret_cast<FreeListNode**>(address() + kNextOffset); |
| 1432 } else { | 1370 } else { |
| 1433 Memory::Address_at(address() + kPointerSize) = next; | 1371 return reinterpret_cast<FreeListNode**>(address() + kPointerSize); |
| 1434 } | 1372 } |
| 1435 } | 1373 } |
| 1436 | 1374 |
| 1437 | 1375 |
| 1438 OldSpaceFreeList::OldSpaceFreeList(AllocationSpace owner) : owner_(owner) { | 1376 void FreeListNode::set_next(FreeListNode* next) { |
| 1377 ASSERT(IsFreeListNode(this)); |
| 1378 // While we are booting the VM the byte array map will actually be null. So |
| 1379 // we have to make sure that we don't try to use it for anything at that |
| 1380 // stage. |
| 1381 if (map() == Heap::raw_unchecked_byte_array_map()) { |
| 1382 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); |
| 1383 Memory::Address_at(address() + kNextOffset) = |
| 1384 reinterpret_cast<Address>(next); |
| 1385 } else { |
| 1386 Memory::Address_at(address() + kPointerSize) = |
| 1387 reinterpret_cast<Address>(next); |
| 1388 } |
| 1389 } |
| 1390 |
| 1391 |
| 1392 OldSpaceFreeList::OldSpaceFreeList(PagedSpace* owner) : owner_(owner) { |
| 1439 Reset(); | 1393 Reset(); |
| 1440 } | 1394 } |
| 1441 | 1395 |
| 1442 | 1396 |
| 1443 void OldSpaceFreeList::Reset() { | 1397 void OldSpaceFreeList::Reset() { |
| 1444 available_ = 0; | 1398 available_ = 0; |
| 1445 for (int i = 0; i < kFreeListsLength; i++) { | 1399 small_list_ = NULL; |
| 1446 free_[i].head_node_ = NULL; | 1400 medium_list_ = NULL; |
| 1447 } | 1401 large_list_ = NULL; |
| 1448 needs_rebuild_ = false; | 1402 huge_list_ = NULL; |
| 1449 finger_ = kHead; | |
| 1450 free_[kHead].next_size_ = kEnd; | |
| 1451 } | |
| 1452 | |
| 1453 | |
| 1454 void OldSpaceFreeList::RebuildSizeList() { | |
| 1455 ASSERT(needs_rebuild_); | |
| 1456 int cur = kHead; | |
| 1457 for (int i = cur + 1; i < kFreeListsLength; i++) { | |
| 1458 if (free_[i].head_node_ != NULL) { | |
| 1459 free_[cur].next_size_ = i; | |
| 1460 cur = i; | |
| 1461 } | |
| 1462 } | |
| 1463 free_[cur].next_size_ = kEnd; | |
| 1464 needs_rebuild_ = false; | |
| 1465 } | 1403 } |
| 1466 | 1404 |
| 1467 | 1405 |
| 1468 int OldSpaceFreeList::Free(Address start, int size_in_bytes) { | 1406 int OldSpaceFreeList::Free(Address start, int size_in_bytes) { |
| 1469 #ifdef DEBUG | 1407 #ifdef DEBUG |
| 1470 MemoryAllocator::ZapBlock(start, size_in_bytes); | 1408 MemoryAllocator::ZapBlock(start, size_in_bytes); |
| 1471 #endif | 1409 #endif |
| 1410 if (size_in_bytes == 0) return 0; |
| 1472 FreeListNode* node = FreeListNode::FromAddress(start); | 1411 FreeListNode* node = FreeListNode::FromAddress(start); |
| 1473 node->set_size(size_in_bytes); | 1412 node->set_size(size_in_bytes); |
| 1474 | 1413 |
| 1475 // We don't use the freelists in compacting mode. This makes it more like a | 1414 // Early return to drop too-small blocks on the floor. |
| 1476 // GC that only has mark-sweep-compact and doesn't have a mark-sweep | 1415 if (size_in_bytes < kSmallListMin) return size_in_bytes; |
| 1477 // collector. | 1416 |
| 1478 if (FLAG_always_compact) { | 1417 // Insert other blocks at the head of a free list of the appropriate |
| 1479 return size_in_bytes; | 1418 // magnitude. |
| 1419 if (size_in_bytes <= kSmallListMax) { |
| 1420 node->set_next(small_list_); |
| 1421 small_list_ = node; |
| 1422 } else if (size_in_bytes <= kMediumListMax) { |
| 1423 node->set_next(medium_list_); |
| 1424 medium_list_ = node; |
| 1425 } else if (size_in_bytes <= kLargeListMax) { |
| 1426 node->set_next(large_list_); |
| 1427 large_list_ = node; |
| 1428 } else { |
| 1429 node->set_next(huge_list_); |
| 1430 huge_list_ = node; |
| 1480 } | 1431 } |
| 1481 | |
| 1482 // Early return to drop too-small blocks on the floor (one or two word | |
| 1483 // blocks cannot hold a map pointer, a size field, and a pointer to the | |
| 1484 // next block in the free list). | |
| 1485 if (size_in_bytes < kMinBlockSize) { | |
| 1486 return size_in_bytes; | |
| 1487 } | |
| 1488 | |
| 1489 // Insert other blocks at the head of an exact free list. | |
| 1490 int index = size_in_bytes >> kPointerSizeLog2; | |
| 1491 node->set_next(free_[index].head_node_); | |
| 1492 free_[index].head_node_ = node->address(); | |
| 1493 available_ += size_in_bytes; | 1432 available_ += size_in_bytes; |
| 1494 needs_rebuild_ = true; | 1433 ASSERT(available_ == SumFreeLists()); |
| 1495 return 0; | 1434 return 0; |
| 1496 } | 1435 } |
| 1497 | 1436 |
| 1498 | 1437 |
| 1499 MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) { | 1438 // Allocation on the old space free list. If it succeeds then a new linear |
| 1439 // allocation space has been set up with the top and limit of the space. If |
| 1440 // the allocation fails then NULL is returned, and the caller can perform a GC |
| 1441 // or allocate a new page before retrying. |
| 1442 HeapObject* OldSpaceFreeList::Allocate(int size_in_bytes) { |
| 1500 ASSERT(0 < size_in_bytes); | 1443 ASSERT(0 < size_in_bytes); |
| 1501 ASSERT(size_in_bytes <= kMaxBlockSize); | 1444 ASSERT(size_in_bytes <= kMaxBlockSize); |
| 1502 ASSERT(IsAligned(size_in_bytes, kPointerSize)); | 1445 ASSERT(IsAligned(size_in_bytes, kPointerSize)); |
| 1446 // Don't free list allocate if there is linear space available. |
| 1447 ASSERT(owner_->limit() - owner_->top() < size_in_bytes); |
| 1503 | 1448 |
| 1504 if (needs_rebuild_) RebuildSizeList(); | 1449 FreeListNode* new_node = NULL; |
| 1505 int index = size_in_bytes >> kPointerSizeLog2; | 1450 int new_node_size = 0; |
| 1506 // Check for a perfect fit. | 1451 |
| 1507 if (free_[index].head_node_ != NULL) { | 1452 if (size_in_bytes <= kSmallAllocationMax && small_list_ != NULL) { |
| 1508 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_); | 1453 new_node = small_list_; |
| 1509 // If this was the last block of its size, remove the size. | 1454 new_node_size = new_node->Size(); |
| 1510 if ((free_[index].head_node_ = node->next()) == NULL) RemoveSize(index); | 1455 small_list_ = new_node->next(); |
| 1511 available_ -= size_in_bytes; | 1456 } else if (size_in_bytes <= kMediumAllocationMax && medium_list_ != NULL) { |
| 1512 *wasted_bytes = 0; | 1457 new_node = medium_list_; |
| 1513 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. | 1458 new_node_size = new_node->Size(); |
| 1514 return node; | 1459 medium_list_ = new_node->next(); |
| 1460 } else if (size_in_bytes <= kLargeAllocationMax && large_list_ != NULL) { |
| 1461 new_node = large_list_; |
| 1462 new_node_size = new_node->Size(); |
| 1463 large_list_ = new_node->next(); |
| 1464 } else { |
| 1465 for (FreeListNode** cur = &huge_list_; |
| 1466 *cur != NULL; |
| 1467 cur = (*cur)->next_address()) { |
| 1468 ASSERT((*cur)->map() == Heap::raw_unchecked_byte_array_map()); |
| 1469 ByteArray* cur_as_byte_array = reinterpret_cast<ByteArray*>(*cur); |
| 1470 int size = cur_as_byte_array->Size(); |
| 1471 if (size >= size_in_bytes) { |
| 1472 // Large enough node found. Unlink it from the list. |
| 1473 new_node = *cur; |
| 1474 new_node_size = size; |
| 1475 *cur = new_node->next(); |
| 1476 break; |
| 1477 } |
| 1478 } |
| 1479 if (new_node == NULL) return NULL; |
| 1515 } | 1480 } |
| 1516 // Search the size list for the best fit. | 1481 |
| 1517 int prev = finger_ < index ? finger_ : kHead; | 1482 available_ -= new_node_size; |
| 1518 int cur = FindSize(index, &prev); | 1483 ASSERT(available_ == SumFreeLists()); |
| 1519 ASSERT(index < cur); | 1484 |
| 1520 if (cur == kEnd) { | 1485 int old_linear_size = owner_->limit() - owner_->top(); |
| 1521 // No large enough size in list. | 1486 // Mark the old linear allocation area with a byte array so it can be |
| 1522 *wasted_bytes = 0; | 1487 // skipped when scanning the heap. This also puts it back in the free list |
| 1523 return Failure::RetryAfterGC(owner_); | 1488 // if it is big enough. |
| 1489 owner_->Free(owner_->top(), old_linear_size); |
| 1490 IncrementalMarking::Step(size_in_bytes - old_linear_size); |
| 1491 |
| 1492 ASSERT(new_node_size - size_in_bytes >= 0); // New linear size. |
| 1493 |
| 1494 const int kThreshold = IncrementalMarking::kAllocatedThreshold; |
| 1495 |
| 1496 if (new_node_size - size_in_bytes > kThreshold && |
| 1497 IncrementalMarking::state() == IncrementalMarking::MARKING) { |
| 1498 // We don't want to give too large linear areas to the allocator while |
| 1499 // incremental marking is going on, because we won't check again whether |
| 1500 // we want to do another increment until the linear area is used up. |
| 1501 owner_->Free(new_node->address() + size_in_bytes + kThreshold, |
| 1502 new_node_size - size_in_bytes - kThreshold); |
| 1503 owner_->SetTop(new_node->address() + size_in_bytes, |
| 1504 new_node->address() + size_in_bytes + kThreshold); |
| 1505 } else { |
| 1506 // Normally we give the rest of the node to the allocator as its new |
| 1507 // linear allocation area. |
| 1508 owner_->SetTop(new_node->address() + size_in_bytes, |
| 1509 new_node->address() + new_node_size); |
| 1524 } | 1510 } |
| 1525 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. | |
| 1526 int rem = cur - index; | |
| 1527 int rem_bytes = rem << kPointerSizeLog2; | |
| 1528 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_); | |
| 1529 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2)); | |
| 1530 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ + | |
| 1531 size_in_bytes); | |
| 1532 // Distinguish the cases prev < rem < cur and rem <= prev < cur | |
| 1533 // to avoid many redundant tests and calls to Insert/RemoveSize. | |
| 1534 if (prev < rem) { | |
| 1535 // Simple case: insert rem between prev and cur. | |
| 1536 finger_ = prev; | |
| 1537 free_[prev].next_size_ = rem; | |
| 1538 // If this was the last block of size cur, remove the size. | |
| 1539 if ((free_[cur].head_node_ = cur_node->next()) == NULL) { | |
| 1540 free_[rem].next_size_ = free_[cur].next_size_; | |
| 1541 } else { | |
| 1542 free_[rem].next_size_ = cur; | |
| 1543 } | |
| 1544 // Add the remainder block. | |
| 1545 rem_node->set_size(rem_bytes); | |
| 1546 rem_node->set_next(free_[rem].head_node_); | |
| 1547 free_[rem].head_node_ = rem_node->address(); | |
| 1548 } else { | |
| 1549 // If this was the last block of size cur, remove the size. | |
| 1550 if ((free_[cur].head_node_ = cur_node->next()) == NULL) { | |
| 1551 finger_ = prev; | |
| 1552 free_[prev].next_size_ = free_[cur].next_size_; | |
| 1553 } | |
| 1554 if (rem_bytes < kMinBlockSize) { | |
| 1555 // Too-small remainder is wasted. | |
| 1556 rem_node->set_size(rem_bytes); | |
| 1557 available_ -= size_in_bytes + rem_bytes; | |
| 1558 *wasted_bytes = rem_bytes; | |
| 1559 return cur_node; | |
| 1560 } | |
| 1561 // Add the remainder block and, if needed, insert its size. | |
| 1562 rem_node->set_size(rem_bytes); | |
| 1563 rem_node->set_next(free_[rem].head_node_); | |
| 1564 free_[rem].head_node_ = rem_node->address(); | |
| 1565 if (rem_node->next() == NULL) InsertSize(rem); | |
| 1566 } | |
| 1567 available_ -= size_in_bytes; | |
| 1568 *wasted_bytes = 0; | |
| 1569 return cur_node; | |
| 1570 } | |
| 1571 | 1511 |
| 1572 | 1512 return new_node; |
| 1573 void OldSpaceFreeList::MarkNodes() { | |
| 1574 for (int i = 0; i < kFreeListsLength; i++) { | |
| 1575 Address cur_addr = free_[i].head_node_; | |
| 1576 while (cur_addr != NULL) { | |
| 1577 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); | |
| 1578 cur_addr = cur_node->next(); | |
| 1579 IntrusiveMarking::SetMark(cur_node); | |
| 1580 } | |
| 1581 } | |
| 1582 } | 1513 } |
| 1583 | 1514 |
| 1584 | 1515 |
| 1585 #ifdef DEBUG | 1516 #ifdef DEBUG |
| 1586 bool OldSpaceFreeList::Contains(FreeListNode* node) { | 1517 intptr_t OldSpaceFreeList::SumFreeList(FreeListNode* cur) { |
| 1587 for (int i = 0; i < kFreeListsLength; i++) { | 1518 intptr_t sum = 0; |
| 1588 Address cur_addr = free_[i].head_node_; | 1519 while (cur != NULL) { |
| 1589 while (cur_addr != NULL) { | 1520 ASSERT(cur->map() == Heap::raw_unchecked_byte_array_map()); |
| 1590 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); | 1521 ByteArray* cur_as_byte_array = reinterpret_cast<ByteArray*>(cur); |
| 1591 if (cur_node == node) return true; | 1522 sum += cur_as_byte_array->Size(); |
| 1592 cur_addr = cur_node->next(); | 1523 cur = cur->next(); |
| 1593 } | |
| 1594 } | 1524 } |
| 1595 return false; | 1525 return sum; |
| 1596 } | 1526 } |
| 1597 | 1527 |
| 1598 | 1528 |
| 1599 void FreeListNode::Zap() { | 1529 intptr_t OldSpaceFreeList::SumFreeLists() { |
| 1600 if (IsByteArray()) { | 1530 intptr_t sum = SumFreeList(small_list_); |
| 1601 ByteArray* ba = ByteArray::cast(this); | 1531 sum += SumFreeList(medium_list_); |
| 1602 // Skip map, length and next pointer. | 1532 sum += SumFreeList(large_list_); |
| 1603 Address payload_start = ba->GetDataStartAddress() + kPointerSize; | 1533 sum += SumFreeList(huge_list_); |
| 1604 Address payload_end = ba->address() + ba->Size(); | 1534 return sum; |
| 1605 for (Address cur = payload_start; | |
| 1606 cur < payload_end; | |
| 1607 cur += kPointerSize) { | |
| 1608 *reinterpret_cast<uintptr_t*>(cur) = kFreeListZapValue; | |
| 1609 } | |
| 1610 } | |
| 1611 } | |
| 1612 | |
| 1613 | |
| 1614 void OldSpaceFreeList::Zap() { | |
| 1615 for (int i = 0; i < kFreeListsLength; i++) { | |
| 1616 Address cur_addr = free_[i].head_node_; | |
| 1617 while (cur_addr != NULL) { | |
| 1618 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); | |
| 1619 cur_node->Zap(); | |
| 1620 cur_addr = cur_node->next(); | |
| 1621 } | |
| 1622 } | |
| 1623 } | |
| 1624 | |
| 1625 | |
| 1626 void FixedSizeFreeList::Zap() { | |
| 1627 Address cur_addr = head_; | |
| 1628 while (cur_addr != NULL && cur_addr != tail_) { | |
| 1629 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); | |
| 1630 cur_node->Zap(); | |
| 1631 cur_addr = cur_node->next(); | |
| 1632 } | |
| 1633 } | 1535 } |
| 1634 #endif | 1536 #endif |
| 1635 | 1537 |
| 1636 | 1538 |
| 1637 FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size) | |
| 1638 : owner_(owner), object_size_(object_size) { | |
| 1639 Reset(); | |
| 1640 } | |
| 1641 | |
| 1642 | |
| 1643 void FixedSizeFreeList::Reset() { | |
| 1644 available_ = 0; | |
| 1645 head_ = tail_ = NULL; | |
| 1646 } | |
| 1647 | |
| 1648 | |
| 1649 void FixedSizeFreeList::Free(Address start) { | |
| 1650 #ifdef DEBUG | |
| 1651 MemoryAllocator::ZapBlock(start, object_size_); | |
| 1652 #endif | |
| 1653 // We only use the freelists with mark-sweep. | |
| 1654 ASSERT(!MarkCompactCollector::IsCompacting()); | |
| 1655 FreeListNode* node = FreeListNode::FromAddress(start); | |
| 1656 node->set_size(object_size_); | |
| 1657 node->set_next(NULL); | |
| 1658 if (head_ == NULL) { | |
| 1659 tail_ = head_ = node->address(); | |
| 1660 } else { | |
| 1661 FreeListNode::FromAddress(tail_)->set_next(node->address()); | |
| 1662 tail_ = node->address(); | |
| 1663 } | |
| 1664 available_ += object_size_; | |
| 1665 } | |
| 1666 | |
| 1667 | |
| 1668 MaybeObject* FixedSizeFreeList::Allocate() { | |
| 1669 if (head_ == NULL) { | |
| 1670 return Failure::RetryAfterGC(owner_); | |
| 1671 } | |
| 1672 | |
| 1673 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. | |
| 1674 FreeListNode* node = FreeListNode::FromAddress(head_); | |
| 1675 head_ = node->next(); | |
| 1676 available_ -= object_size_; | |
| 1677 return node; | |
| 1678 } | |
| 1679 | |
| 1680 | |
| 1681 void FixedSizeFreeList::MarkNodes() { | |
| 1682 Address cur_addr = head_; | |
| 1683 while (cur_addr != NULL && cur_addr != tail_) { | |
| 1684 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); | |
| 1685 cur_addr = cur_node->next(); | |
| 1686 IntrusiveMarking::SetMark(cur_node); | |
| 1687 } | |
| 1688 } | |
| 1689 | |
| 1690 | |
| 1691 // ----------------------------------------------------------------------------- | 1539 // ----------------------------------------------------------------------------- |
| 1692 // OldSpace implementation | 1540 // OldSpace implementation |
| 1693 | 1541 |
| 1694 void OldSpace::PrepareForMarkCompact(bool will_compact) { | 1542 void OldSpace::PrepareForMarkCompact(bool will_compact) { |
| 1695 ASSERT(!will_compact); | 1543 ASSERT(!will_compact); |
| 1696 // Call prepare of the super class. | 1544 // Call prepare of the super class. |
| 1697 PagedSpace::PrepareForMarkCompact(will_compact); | 1545 PagedSpace::PrepareForMarkCompact(will_compact); |
| 1698 | 1546 |
| 1699 // During a non-compacting collection, everything below the linear | |
| 1700 // allocation pointer is considered allocated (everything above is | |
| 1701 // available) and we will rediscover available and wasted bytes during | |
| 1702 // the collection. | |
| 1703 accounting_stats_.AllocateBytes(free_list_.available()); | |
| 1704 accounting_stats_.FillWastedBytes(Waste()); | |
| 1705 | |
| 1706 // Clear the free list before a full GC---it will be rebuilt afterward. | 1547 // Clear the free list before a full GC---it will be rebuilt afterward. |
| 1707 free_list_.Reset(); | 1548 free_list_.Reset(); |
| 1708 } | 1549 } |
| 1709 | 1550 |
| 1710 | 1551 |
| 1711 bool NewSpace::ReserveSpace(int bytes) { | 1552 bool NewSpace::ReserveSpace(int bytes) { |
| 1712 // We can't reliably unpack a partial snapshot that needs more new space | 1553 // We can't reliably unpack a partial snapshot that needs more new space |
| 1713 // space than the minimum NewSpace size. | 1554 // space than the minimum NewSpace size. |
| 1714 ASSERT(bytes <= InitialCapacity()); | 1555 ASSERT(bytes <= InitialCapacity()); |
| 1715 Address limit = allocation_info_.limit; | 1556 Address limit = allocation_info_.limit; |
| 1716 Address top = allocation_info_.top; | 1557 Address top = allocation_info_.top; |
| 1717 return limit - top >= bytes; | 1558 return limit - top >= bytes; |
| 1718 } | 1559 } |
| 1719 | 1560 |
| 1720 | 1561 |
| 1721 void PagedSpace::FreePages(Page* prev, Page* last) { | |
| 1722 if (last == AllocationTopPage()) { | |
| 1723 // Pages are already at the end of used pages. | |
| 1724 // Just mark them as continuous. | |
| 1725 Page* p = prev == NULL ? first_page_ : prev->next_page(); | |
| 1726 Page* end_page = last->next_page(); | |
| 1727 do { | |
| 1728 p->SetFlag(Page::IS_CONTINUOUS); | |
| 1729 p = p->next_page(); | |
| 1730 } while (p != end_page); | |
| 1731 return; | |
| 1732 } | |
| 1733 | |
| 1734 Page* first = NULL; | |
| 1735 | |
| 1736 // Remove pages from the list. | |
| 1737 if (prev == NULL) { | |
| 1738 first = first_page_; | |
| 1739 first_page_ = last->next_page(); | |
| 1740 } else { | |
| 1741 first = prev->next_page(); | |
| 1742 prev->set_next_page(last->next_page()); | |
| 1743 } | |
| 1744 | |
| 1745 // Attach it after the last page. | |
| 1746 last_page_->set_next_page(first); | |
| 1747 last_page_ = last; | |
| 1748 last->set_next_page(NULL); | |
| 1749 | |
| 1750 // Clean them up. | |
| 1751 do { | |
| 1752 first->InvalidateWatermark(true); | |
| 1753 first->SetAllocationWatermark(first->ObjectAreaStart()); | |
| 1754 first->SetCachedAllocationWatermark(first->ObjectAreaStart()); | |
| 1755 first->SetFlag(Page::IS_CONTINUOUS); | |
| 1756 first->markbits()->Clear(); | |
| 1757 first = first->next_page(); | |
| 1758 } while (first->is_valid()); | |
| 1759 } | |
| 1760 | |
| 1761 | |
| 1762 void PagedSpace::PrepareForMarkCompact(bool will_compact) { | 1562 void PagedSpace::PrepareForMarkCompact(bool will_compact) { |
| 1763 ASSERT(!will_compact); | 1563 ASSERT(!will_compact); |
| 1764 } | 1564 } |
| 1765 | 1565 |
| 1766 | 1566 |
| 1767 bool PagedSpace::ReserveSpace(int bytes) { | 1567 bool PagedSpace::ReserveSpace(int size_in_bytes) { |
| 1768 Address limit = allocation_info_.limit; | 1568 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize); |
| 1769 Address top = allocation_info_.top; | 1569 ASSERT(size_in_bytes == RoundUp(size_in_bytes, kPointerSize)); |
| 1770 if (limit - top >= bytes) return true; | 1570 Address current_top = allocation_info_.top; |
| 1571 Address new_top = current_top + size_in_bytes; |
| 1572 if (new_top <= allocation_info_.limit) return true; |
| 1771 | 1573 |
| 1772 // There wasn't enough space in the current page. Lets put the rest | 1574 HeapObject* new_area = free_list_.Allocate(size_in_bytes); |
| 1773 // of the page on the free list and start a fresh page. | 1575 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); |
| 1774 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_)); | 1576 if (new_area == NULL) return false; |
| 1775 | 1577 |
| 1776 Page* reserved_page = TopPageOf(allocation_info_); | 1578 int old_linear_size = limit() - top(); |
| 1777 int bytes_left_to_reserve = bytes; | 1579 // Mark the old linear allocation area with a byte array so it can be |
| 1778 while (bytes_left_to_reserve > 0) { | 1580 // skipped when scanning the heap. This also puts it back in the free list |
| 1779 if (!reserved_page->next_page()->is_valid()) { | 1581 // if it is big enough. |
| 1780 if (Heap::OldGenerationAllocationLimitReached()) return false; | 1582 Free(top(), old_linear_size); |
| 1781 Expand(); | 1583 |
| 1782 } | 1584 SetTop(new_area->address(), new_area->address() + size_in_bytes); |
| 1783 bytes_left_to_reserve -= Page::kPageSize; | |
| 1784 reserved_page = reserved_page->next_page(); | |
| 1785 if (!reserved_page->is_valid()) return false; | |
| 1786 } | |
| 1787 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid()); | |
| 1788 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true); | |
| 1789 SetAllocationInfo(&allocation_info_, | |
| 1790 TopPageOf(allocation_info_)->next_page()); | |
| 1791 return true; | 1585 return true; |
| 1792 } | 1586 } |
| 1793 | 1587 |
| 1794 | 1588 |
| 1795 // You have to call this last, since the implementation from PagedSpace | 1589 // You have to call this last, since the implementation from PagedSpace |
| 1796 // doesn't know that memory was 'promised' to large object space. | 1590 // doesn't know that memory was 'promised' to large object space. |
| 1797 bool LargeObjectSpace::ReserveSpace(int bytes) { | 1591 bool LargeObjectSpace::ReserveSpace(int bytes) { |
| 1798 return Heap::OldGenerationSpaceAvailable() >= bytes; | 1592 return Heap::OldGenerationSpaceAvailable() >= bytes; |
| 1799 } | 1593 } |
| 1800 | 1594 |
| 1801 | 1595 |
| 1802 // Slow case for normal allocation. Try in order: (1) allocate in the next | 1596 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| 1803 // page in the space, (2) allocate off the space's free list, (3) expand the | 1597 // Allocation in this space has failed. |
| 1804 // space, (4) fail. | |
| 1805 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { | |
| 1806 // Linear allocation in this space has failed. If there is another page | |
| 1807 // in the space, move to that page and allocate there. This allocation | |
| 1808 // should succeed (size_in_bytes should not be greater than a page's | |
| 1809 // object area size). | |
| 1810 Page* current_page = TopPageOf(allocation_info_); | |
| 1811 if (current_page->next_page()->is_valid()) { | |
| 1812 return AllocateInNextPage(current_page, size_in_bytes); | |
| 1813 } | |
| 1814 | |
| 1815 // There is no next page in this space. Try free list allocation unless that | |
| 1816 // is currently forbidden. | |
| 1817 if (!Heap::linear_allocation()) { | |
| 1818 int wasted_bytes; | |
| 1819 Object* result; | |
| 1820 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes); | |
| 1821 accounting_stats_.WasteBytes(wasted_bytes); | |
| 1822 if (maybe->ToObject(&result)) { | |
| 1823 accounting_stats_.AllocateBytes(size_in_bytes); | |
| 1824 | |
| 1825 HeapObject* obj = HeapObject::cast(result); | |
| 1826 Page* p = Page::FromAddress(obj->address()); | |
| 1827 | |
| 1828 if (obj->address() >= p->AllocationWatermark()) { | |
| 1829 // There should be no hole between the allocation watermark | |
| 1830 // and allocated object address. | |
| 1831 // Memory above the allocation watermark was not swept and | |
| 1832 // might contain garbage pointers to new space. | |
| 1833 ASSERT(obj->address() == p->AllocationWatermark()); | |
| 1834 p->SetAllocationWatermark(obj->address() + size_in_bytes); | |
| 1835 } | |
| 1836 | |
| 1837 if (!p->IsFlagSet(Page::IS_CONTINUOUS)) { | |
| 1838 // This page is not continuous so we have to mark objects | |
| 1839 // that should be visited by HeapObjectIterator. | |
| 1840 ASSERT(!Marking::IsMarked(obj)); | |
| 1841 Marking::SetMark(obj); | |
| 1842 } | |
| 1843 | |
| 1844 return obj; | |
| 1845 } | |
| 1846 } | |
| 1847 | 1598 |
| 1848 // Free list allocation failed and there is no next page. Fail if we have | 1599 // Free list allocation failed and there is no next page. Fail if we have |
| 1849 // hit the old generation size limit that should cause a garbage | 1600 // hit the old generation size limit that should cause a garbage |
| 1850 // collection. | 1601 // collection. |
| 1851 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { | 1602 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { |
| 1852 return NULL; | 1603 return NULL; |
| 1853 } | 1604 } |
| 1854 | 1605 |
| 1855 // Try to expand the space and allocate in the new next page. | 1606 // Try to expand the space and allocate in the new next page. |
| 1856 ASSERT(!current_page->next_page()->is_valid()); | |
| 1857 if (Expand()) { | 1607 if (Expand()) { |
| 1858 return AllocateInNextPage(current_page, size_in_bytes); | 1608 return free_list_.Allocate(size_in_bytes); |
| 1859 } | 1609 } |
| 1860 | 1610 |
| 1861 // Finally, fail. | 1611 // Finally, fail. |
| 1862 return NULL; | 1612 return NULL; |
| 1863 } | 1613 } |
| 1864 | 1614 |
| 1865 | 1615 |
| 1866 void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { | |
| 1867 current_page->SetAllocationWatermark(allocation_info_.top); | |
| 1868 int free_size = | |
| 1869 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); | |
| 1870 if (free_size > 0) { | |
| 1871 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size); | |
| 1872 accounting_stats_.WasteBytes(wasted_bytes); | |
| 1873 } | |
| 1874 } | |
| 1875 | |
| 1876 | |
| 1877 void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { | |
| 1878 current_page->SetAllocationWatermark(allocation_info_.top); | |
| 1879 int free_size = | |
| 1880 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); | |
| 1881 // In the fixed space free list all the free list items have the right size. | |
| 1882 // We use up the rest of the page while preserving this invariant. | |
| 1883 while (free_size >= object_size_in_bytes_) { | |
| 1884 free_list_.Free(allocation_info_.top); | |
| 1885 allocation_info_.top += object_size_in_bytes_; | |
| 1886 free_size -= object_size_in_bytes_; | |
| 1887 accounting_stats_.WasteBytes(object_size_in_bytes_); | |
| 1888 } | |
| 1889 } | |
| 1890 | |
| 1891 | |
| 1892 // Add the block at the top of the page to the space's free list, set the | |
| 1893 // allocation info to the next page (assumed to be one), and allocate | |
| 1894 // linearly there. | |
| 1895 HeapObject* OldSpace::AllocateInNextPage(Page* current_page, | |
| 1896 int size_in_bytes) { | |
| 1897 ASSERT(current_page->next_page()->is_valid()); | |
| 1898 Page* next_page = current_page->next_page(); | |
| 1899 next_page->ClearGCFields(); | |
| 1900 PutRestOfCurrentPageOnFreeList(current_page); | |
| 1901 SetAllocationInfo(&allocation_info_, next_page); | |
| 1902 return AllocateLinearly(&allocation_info_, size_in_bytes); | |
| 1903 } | |
| 1904 | |
| 1905 | |
| 1906 void OldSpace::DeallocateBlock(Address start, | |
| 1907 int size_in_bytes, | |
| 1908 bool add_to_freelist) { | |
| 1909 Free(start, size_in_bytes, add_to_freelist); | |
| 1910 } | |
| 1911 | |
| 1912 | |
| 1913 #ifdef DEBUG | 1616 #ifdef DEBUG |
| 1914 struct CommentStatistic { | 1617 struct CommentStatistic { |
| 1915 const char* comment; | 1618 const char* comment; |
| 1916 int size; | 1619 int size; |
| 1917 int count; | 1620 int count; |
| 1918 void Clear() { | 1621 void Clear() { |
| 1919 comment = NULL; | 1622 comment = NULL; |
| 1920 size = 0; | 1623 size = 0; |
| 1921 count = 0; | 1624 count = 0; |
| 1922 } | 1625 } |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2011 } | 1714 } |
| 2012 EnterComment(comment_txt, flat_delta); | 1715 EnterComment(comment_txt, flat_delta); |
| 2013 } | 1716 } |
| 2014 | 1717 |
| 2015 | 1718 |
| 2016 // Collects code size statistics: | 1719 // Collects code size statistics: |
| 2017 // - by code kind | 1720 // - by code kind |
| 2018 // - by code comment | 1721 // - by code comment |
| 2019 void PagedSpace::CollectCodeStatistics() { | 1722 void PagedSpace::CollectCodeStatistics() { |
| 2020 HeapObjectIterator obj_it(this); | 1723 HeapObjectIterator obj_it(this); |
| 2021 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 1724 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { |
| 2022 if (obj->IsCode()) { | 1725 if (obj->IsCode()) { |
| 2023 Code* code = Code::cast(obj); | 1726 Code* code = Code::cast(obj); |
| 2024 code_kind_statistics[code->kind()] += code->Size(); | 1727 code_kind_statistics[code->kind()] += code->Size(); |
| 2025 RelocIterator it(code); | 1728 RelocIterator it(code); |
| 2026 int delta = 0; | 1729 int delta = 0; |
| 2027 const byte* prev_pc = code->instruction_start(); | 1730 const byte* prev_pc = code->instruction_start(); |
| 2028 while (!it.done()) { | 1731 while (!it.done()) { |
| 2029 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { | 1732 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { |
| 2030 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); | 1733 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); |
| 2031 CollectCommentStatistics(&it); | 1734 CollectCommentStatistics(&it); |
| 2032 prev_pc = it.rinfo()->pc(); | 1735 prev_pc = it.rinfo()->pc(); |
| 2033 } | 1736 } |
| 2034 it.next(); | 1737 it.next(); |
| 2035 } | 1738 } |
| 2036 | 1739 |
| 2037 ASSERT(code->instruction_start() <= prev_pc && | 1740 ASSERT(code->instruction_start() <= prev_pc && |
| 2038 prev_pc <= code->instruction_end()); | 1741 prev_pc <= code->instruction_end()); |
| 2039 delta += static_cast<int>(code->instruction_end() - prev_pc); | 1742 delta += static_cast<int>(code->instruction_end() - prev_pc); |
| 2040 EnterComment("NoComment", delta); | 1743 EnterComment("NoComment", delta); |
| 2041 } | 1744 } |
| 2042 } | 1745 } |
| 2043 } | 1746 } |
| 2044 | 1747 |
| 2045 | 1748 |
| 2046 void OldSpace::ReportStatistics() { | 1749 void PagedSpace::ReportStatistics() { |
| 2047 int pct = static_cast<int>(Available() * 100 / Capacity()); | 1750 int pct = static_cast<int>(Available() * 100 / Capacity()); |
| 2048 PrintF(" capacity: %" V8_PTR_PREFIX "d" | 1751 PrintF(" capacity: %" V8_PTR_PREFIX "d" |
| 2049 ", waste: %" V8_PTR_PREFIX "d" | 1752 ", waste: %" V8_PTR_PREFIX "d" |
| 2050 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | 1753 ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
| 2051 Capacity(), Waste(), Available(), pct); | 1754 Capacity(), Waste(), Available(), pct); |
| 2052 | 1755 |
| 2053 ClearHistograms(); | 1756 ClearHistograms(); |
| 2054 HeapObjectIterator obj_it(this); | 1757 HeapObjectIterator obj_it(this); |
| 2055 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) | 1758 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) |
| 2056 CollectHistogramInfo(obj); | 1759 CollectHistogramInfo(obj); |
| 2057 ReportHistogram(true); | 1760 ReportHistogram(true); |
| 2058 } | 1761 } |
| 2059 #endif | 1762 #endif |
| 2060 | 1763 |
| 2061 // ----------------------------------------------------------------------------- | 1764 // ----------------------------------------------------------------------------- |
| 2062 // FixedSpace implementation | 1765 // FixedSpace implementation |
| 2063 | 1766 |
| 2064 void FixedSpace::PrepareForMarkCompact(bool will_compact) { | 1767 void FixedSpace::PrepareForMarkCompact(bool will_compact) { |
| 2065 // Call prepare of the super class. | 1768 // Call prepare of the super class. |
| 2066 PagedSpace::PrepareForMarkCompact(will_compact); | 1769 PagedSpace::PrepareForMarkCompact(will_compact); |
| 2067 | 1770 |
| 2068 ASSERT(!will_compact); | 1771 ASSERT(!will_compact); |
| 2069 | 1772 |
| 2070 // During a non-compacting collection, everything below the linear | 1773 // During a non-compacting collection, everything below the linear |
| 2071 // allocation pointer except wasted top-of-page blocks is considered | 1774 // allocation pointer except wasted top-of-page blocks is considered |
| 2072 // allocated and we will rediscover available bytes during the | 1775 // allocated and we will rediscover available bytes during the |
| 2073 // collection. | 1776 // collection. |
| 2074 accounting_stats_.AllocateBytes(free_list_.available()); | 1777 accounting_stats_.AllocateBytes(free_list_.available()); |
| 2075 | 1778 |
| 2076 // Clear the free list before a full GC---it will be rebuilt afterward. | 1779 // Clear the free list before a full GC---it will be rebuilt afterward. |
| 2077 free_list_.Reset(); | 1780 free_list_.Reset(); |
| 2078 } | 1781 } |
| 2079 | 1782 |
| 2080 | 1783 |
| 2081 // Slow case for normal allocation. Try in order: (1) allocate in the next | |
| 2082 // page in the space, (2) allocate off the space's free list, (3) expand the | |
| 2083 // space, (4) fail. | |
| 2084 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) { | |
| 2085 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); | |
| 2086 // Linear allocation in this space has failed. If there is another page | |
| 2087 // in the space, move to that page and allocate there. This allocation | |
| 2088 // should succeed. | |
| 2089 Page* current_page = TopPageOf(allocation_info_); | |
| 2090 if (current_page->next_page()->is_valid()) { | |
| 2091 return AllocateInNextPage(current_page, size_in_bytes); | |
| 2092 } | |
| 2093 | |
| 2094 // There is no next page in this space. Try free list allocation unless | |
| 2095 // that is currently forbidden. The fixed space free list implicitly assumes | |
| 2096 // that all free blocks are of the fixed size. | |
| 2097 if (!Heap::linear_allocation()) { | |
| 2098 Object* result; | |
| 2099 MaybeObject* maybe = free_list_.Allocate(); | |
| 2100 if (maybe->ToObject(&result)) { | |
| 2101 accounting_stats_.AllocateBytes(size_in_bytes); | |
| 2102 HeapObject* obj = HeapObject::cast(result); | |
| 2103 Page* p = Page::FromAddress(obj->address()); | |
| 2104 | |
| 2105 if (obj->address() >= p->AllocationWatermark()) { | |
| 2106 // There should be no hole between the allocation watermark | |
| 2107 // and allocated object address. | |
| 2108 // Memory above the allocation watermark was not swept and | |
| 2109 // might contain garbage pointers to new space. | |
| 2110 ASSERT(obj->address() == p->AllocationWatermark()); | |
| 2111 p->SetAllocationWatermark(obj->address() + size_in_bytes); | |
| 2112 } | |
| 2113 | |
| 2114 return obj; | |
| 2115 } | |
| 2116 } | |
| 2117 | |
| 2118 // Free list allocation failed and there is no next page. Fail if we have | |
| 2119 // hit the old generation size limit that should cause a garbage | |
| 2120 // collection. | |
| 2121 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { | |
| 2122 return NULL; | |
| 2123 } | |
| 2124 | |
| 2125 // Try to expand the space and allocate in the new next page. | |
| 2126 ASSERT(!current_page->next_page()->is_valid()); | |
| 2127 if (Expand()) { | |
| 2128 return AllocateInNextPage(current_page, size_in_bytes); | |
| 2129 } | |
| 2130 | |
| 2131 // Finally, fail. | |
| 2132 return NULL; | |
| 2133 } | |
| 2134 | |
| 2135 | |
| 2136 // Move to the next page (there is assumed to be one) and allocate there. | |
| 2137 // The top of page block is always wasted, because it is too small to hold a | |
| 2138 // map. | |
| 2139 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page, | |
| 2140 int size_in_bytes) { | |
| 2141 ASSERT(current_page->next_page()->is_valid()); | |
| 2142 ASSERT(allocation_info_.top == PageAllocationLimit(current_page)); | |
| 2143 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); | |
| 2144 Page* next_page = current_page->next_page(); | |
| 2145 next_page->ClearGCFields(); | |
| 2146 current_page->SetAllocationWatermark(allocation_info_.top); | |
| 2147 accounting_stats_.WasteBytes(page_extra_); | |
| 2148 SetAllocationInfo(&allocation_info_, next_page); | |
| 2149 return AllocateLinearly(&allocation_info_, size_in_bytes); | |
| 2150 } | |
| 2151 | |
| 2152 | |
| 2153 void FixedSpace::DeallocateBlock(Address start, | |
| 2154 int size_in_bytes, | |
| 2155 bool add_to_freelist) { | |
| 2156 // Free-list elements in fixed space are assumed to have a fixed size. | |
| 2157 // We break the free block into chunks and add them to the free list | |
| 2158 // individually. | |
| 2159 int size = object_size_in_bytes(); | |
| 2160 ASSERT(size_in_bytes % size == 0); | |
| 2161 Address end = start + size_in_bytes; | |
| 2162 for (Address a = start; a < end; a += size) { | |
| 2163 Free(a, add_to_freelist); | |
| 2164 } | |
| 2165 } | |
| 2166 | |
| 2167 | |
| 2168 #ifdef DEBUG | |
| 2169 void FixedSpace::ReportStatistics() { | |
| 2170 int pct = static_cast<int>(Available() * 100 / Capacity()); | |
| 2171 PrintF(" capacity: %" V8_PTR_PREFIX "d" | |
| 2172 ", waste: %" V8_PTR_PREFIX "d" | |
| 2173 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | |
| 2174 Capacity(), Waste(), Available(), pct); | |
| 2175 | |
| 2176 ClearHistograms(); | |
| 2177 HeapObjectIterator obj_it(this); | |
| 2178 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) | |
| 2179 CollectHistogramInfo(obj); | |
| 2180 ReportHistogram(false); | |
| 2181 } | |
| 2182 #endif | |
| 2183 | |
| 2184 | |
| 2185 // ----------------------------------------------------------------------------- | 1784 // ----------------------------------------------------------------------------- |
| 2186 // MapSpace implementation | 1785 // MapSpace implementation |
| 2187 | 1786 |
| 2188 void MapSpace::PrepareForMarkCompact(bool will_compact) { | 1787 void MapSpace::PrepareForMarkCompact(bool will_compact) { |
| 2189 // Call prepare of the super class. | 1788 // Call prepare of the super class. |
| 2190 FixedSpace::PrepareForMarkCompact(will_compact); | 1789 FixedSpace::PrepareForMarkCompact(will_compact); |
| 2191 } | 1790 } |
| 2192 | 1791 |
| 2193 | 1792 |
| 2194 #ifdef DEBUG | 1793 #ifdef DEBUG |
| (...skipping 317 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2512 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 2111 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
| 2513 if (obj->IsCode()) { | 2112 if (obj->IsCode()) { |
| 2514 Code* code = Code::cast(obj); | 2113 Code* code = Code::cast(obj); |
| 2515 code_kind_statistics[code->kind()] += code->Size(); | 2114 code_kind_statistics[code->kind()] += code->Size(); |
| 2516 } | 2115 } |
| 2517 } | 2116 } |
| 2518 } | 2117 } |
| 2519 #endif // DEBUG | 2118 #endif // DEBUG |
| 2520 | 2119 |
| 2521 } } // namespace v8::internal | 2120 } } // namespace v8::internal |
| OLD | NEW |