Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 50 Initialize(space->bottom(), space->top(), NULL); | 50 Initialize(space->bottom(), space->top(), NULL); |
| 51 } | 51 } |
| 52 | 52 |
| 53 | 53 |
| 54 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, | 54 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, |
| 55 HeapObjectCallback size_func) { | 55 HeapObjectCallback size_func) { |
| 56 Initialize(space->bottom(), space->top(), size_func); | 56 Initialize(space->bottom(), space->top(), size_func); |
| 57 } | 57 } |
| 58 | 58 |
| 59 | 59 |
| 60 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) { | |
| 61 Initialize(start, space->top(), NULL); | |
| 62 } | |
| 63 | |
| 64 | |
| 65 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start, | |
| 66 HeapObjectCallback size_func) { | |
| 67 Initialize(start, space->top(), size_func); | |
| 68 } | |
| 69 | |
| 70 | |
| 71 HeapObjectIterator::HeapObjectIterator(Page* page, | 60 HeapObjectIterator::HeapObjectIterator(Page* page, |
| 72 HeapObjectCallback size_func) { | 61 HeapObjectCallback size_func) { |
| 73 Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func); | 62 Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func); |
| 74 } | 63 } |
| 75 | 64 |
| 76 | 65 |
| 77 void HeapObjectIterator::Initialize(Address cur, Address end, | 66 void HeapObjectIterator::Initialize(Address cur, Address end, |
| 78 HeapObjectCallback size_f) { | 67 HeapObjectCallback size_f) { |
| 79 cur_addr_ = cur; | 68 cur_addr_ = cur; |
| 80 end_addr_ = end; | 69 end_addr_ = end; |
| 81 end_page_ = Page::FromAllocationTop(end); | 70 end_page_ = Page::FromAllocationTop(end); |
| 82 size_func_ = size_f; | 71 size_func_ = size_f; |
| 83 Page* p = Page::FromAllocationTop(cur_addr_); | 72 Page* p = Page::FromAllocationTop(cur_addr_); |
| 84 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop(); | 73 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop(); |
| 85 | 74 |
| 75 if (!p->IsFlagSet(Page::IS_CONTINIOUS)) { | |
| 76 cur_addr_ = Marking::FirstLiveObject(cur_addr_, cur_limit_); | |
| 77 if (cur_addr_ > cur_limit_) cur_addr_ = cur_limit_; | |
| 78 } | |
| 79 | |
| 86 #ifdef DEBUG | 80 #ifdef DEBUG |
| 87 Verify(); | 81 Verify(); |
| 88 #endif | 82 #endif |
| 89 } | 83 } |
| 90 | 84 |
| 91 | 85 |
| 92 HeapObject* HeapObjectIterator::FromNextPage() { | 86 HeapObject* HeapObjectIterator::FromNextPage() { |
| 93 if (cur_addr_ == end_addr_) return NULL; | 87 if (cur_addr_ == end_addr_) return NULL; |
| 94 | 88 |
| 95 Page* cur_page = Page::FromAllocationTop(cur_addr_); | 89 Page* cur_page = Page::FromAllocationTop(cur_addr_); |
| 96 cur_page = cur_page->next_page(); | 90 cur_page = cur_page->next_page(); |
| 97 ASSERT(cur_page->is_valid()); | 91 ASSERT(cur_page->is_valid()); |
| 98 | 92 |
| 99 cur_addr_ = cur_page->ObjectAreaStart(); | 93 cur_addr_ = cur_page->ObjectAreaStart(); |
| 100 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop(); | 94 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop(); |
| 101 | 95 |
| 96 if (!cur_page->IsFlagSet(Page::IS_CONTINIOUS)) { | |
| 97 cur_addr_ = Marking::FirstLiveObject(cur_addr_, cur_limit_); | |
| 98 if (cur_addr_ > cur_limit_) cur_addr_ = cur_limit_; | |
| 99 } | |
| 100 | |
| 102 if (cur_addr_ == end_addr_) return NULL; | 101 if (cur_addr_ == end_addr_) return NULL; |
| 103 ASSERT(cur_addr_ < cur_limit_); | 102 ASSERT(cur_addr_ < cur_limit_); |
| 104 #ifdef DEBUG | 103 #ifdef DEBUG |
| 105 Verify(); | 104 Verify(); |
| 106 #endif | 105 #endif |
| 107 return FromCurrentPage(); | 106 return FromCurrentPage(); |
| 108 } | 107 } |
| 109 | 108 |
| 110 | 109 |
| 110 void HeapObjectIterator::AdvanceUsingMarkbits() { | |
| 111 HeapObject* obj = HeapObject::FromAddress(cur_addr_); | |
| 112 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); | |
| 113 ASSERT_OBJECT_SIZE(obj_size); | |
| 114 cur_addr_ = Marking::NextLiveObject(obj, | |
| 115 obj_size, | |
| 116 cur_limit_); | |
| 117 if (cur_addr_ > cur_limit_) cur_addr_ = cur_limit_; | |
| 118 } | |
| 119 | |
| 120 | |
| 111 #ifdef DEBUG | 121 #ifdef DEBUG |
| 112 void HeapObjectIterator::Verify() { | 122 void HeapObjectIterator::Verify() { |
| 113 Page* p = Page::FromAllocationTop(cur_addr_); | 123 Page* p = Page::FromAllocationTop(cur_addr_); |
| 114 ASSERT(p == Page::FromAllocationTop(cur_limit_)); | 124 ASSERT(p == Page::FromAllocationTop(cur_limit_)); |
| 115 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_)); | 125 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_)); |
| 116 } | 126 } |
| 117 #endif | 127 #endif |
| 118 | 128 |
| 119 | 129 |
| 120 // ----------------------------------------------------------------------------- | 130 // ----------------------------------------------------------------------------- |
| (...skipping 659 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 780 if (above_allocation_top) { | 790 if (above_allocation_top) { |
| 781 // We don't care what's above the allocation top. | 791 // We don't care what's above the allocation top. |
| 782 } else { | 792 } else { |
| 783 Address top = current_page->AllocationTop(); | 793 Address top = current_page->AllocationTop(); |
| 784 if (current_page == top_page) { | 794 if (current_page == top_page) { |
| 785 ASSERT(top == allocation_info_.top); | 795 ASSERT(top == allocation_info_.top); |
| 786 // The next page will be above the allocation top. | 796 // The next page will be above the allocation top. |
| 787 above_allocation_top = true; | 797 above_allocation_top = true; |
| 788 } | 798 } |
| 789 | 799 |
| 790 // It should be packed with objects from the bottom to the top. | 800 HeapObjectIterator it(current_page, NULL); |
| 791 Address current = current_page->ObjectAreaStart(); | 801 Address end_of_previous_object = current_page->ObjectAreaStart(); |
| 792 while (current < top) { | 802 for(HeapObject* object = it.next(); object != NULL; object = it.next()) { |
| 793 HeapObject* object = HeapObject::FromAddress(current); | 803 ASSERT(end_of_previous_object <= object->address()); |
| 794 | 804 |
| 795 // The first word should be a map, and we expect all map pointers to | 805 // The first word should be a map, and we expect all map pointers to |
| 796 // be in map space. | 806 // be in map space. |
| 797 Map* map = object->map(); | 807 Map* map = object->map(); |
| 798 ASSERT(map->IsMap()); | 808 ASSERT(map->IsMap()); |
| 799 ASSERT(Heap::map_space()->Contains(map)); | 809 ASSERT(Heap::map_space()->Contains(map)); |
| 800 | 810 |
| 801 // Perform space-specific object verification. | 811 // Perform space-specific object verification. |
| 802 VerifyObject(object); | 812 VerifyObject(object); |
| 803 | 813 |
| 814 if (object->IsCodeCache() && ((uint32_t*)object->address())[2] == 0x2) { | |
| 815 current_page->PrintMarkbits(); | |
| 816 } | |
| 817 | |
| 804 // The object itself should look OK. | 818 // The object itself should look OK. |
| 805 object->Verify(); | 819 object->Verify(); |
| 806 | 820 |
| 807 // All the interior pointers should be contained in the heap and | 821 // All the interior pointers should be contained in the heap and |
| 808 // have page regions covering intergenerational references should be | 822 // have page regions covering intergenerational references should be |
| 809 // marked dirty. | 823 // marked dirty. |
| 810 int size = object->Size(); | 824 int size = object->Size(); |
| 811 object->IterateBody(map->instance_type(), size, visitor); | 825 object->IterateBody(map->instance_type(), size, visitor); |
| 812 | 826 |
| 813 current += size; | 827 ASSERT(object->address() + size <= top); |
| 828 end_of_previous_object = object->address() + size; | |
| 814 } | 829 } |
| 815 | |
| 816 // The allocation pointer should not be in the middle of an object. | |
| 817 ASSERT(current == top); | |
| 818 } | 830 } |
| 819 | 831 |
| 820 current_page = current_page->next_page(); | 832 current_page = current_page->next_page(); |
| 821 } | 833 } |
| 822 } | 834 } |
| 823 #endif | 835 #endif |
| 824 | 836 |
| 825 | 837 |
| 826 // ----------------------------------------------------------------------------- | 838 // ----------------------------------------------------------------------------- |
| 827 // NewSpace implementation | 839 // NewSpace implementation |
| (...skipping 837 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1665 ASSERT(bytes <= InitialCapacity()); | 1677 ASSERT(bytes <= InitialCapacity()); |
| 1666 Address limit = allocation_info_.limit; | 1678 Address limit = allocation_info_.limit; |
| 1667 Address top = allocation_info_.top; | 1679 Address top = allocation_info_.top; |
| 1668 return limit - top >= bytes; | 1680 return limit - top >= bytes; |
| 1669 } | 1681 } |
| 1670 | 1682 |
| 1671 | 1683 |
| 1672 void PagedSpace::FreePages(Page* prev, Page* last) { | 1684 void PagedSpace::FreePages(Page* prev, Page* last) { |
| 1673 if (last == AllocationTopPage()) { | 1685 if (last == AllocationTopPage()) { |
| 1674 // Pages are already at the end of used pages. | 1686 // Pages are already at the end of used pages. |
| 1687 // Just clean them. | |
|
Erik Corry
2011/01/19 13:46:48
How are they cleaned?
Vyacheslav Egorov (Chromium)
2011/01/20 16:40:21
Done.
| |
| 1688 Page* p = prev == NULL ? first_page_ : prev->next_page(); | |
| 1689 Page* end_page = last->next_page(); | |
| 1690 do { | |
| 1691 p->SetFlag(Page::IS_CONTINIOUS); | |
| 1692 p = p->next_page(); | |
| 1693 } while (p != end_page); | |
| 1675 return; | 1694 return; |
| 1676 } | 1695 } |
| 1677 | 1696 |
| 1678 Page* first = NULL; | 1697 Page* first = NULL; |
| 1679 | 1698 |
| 1680 // Remove pages from the list. | 1699 // Remove pages from the list. |
| 1681 if (prev == NULL) { | 1700 if (prev == NULL) { |
| 1682 first = first_page_; | 1701 first = first_page_; |
| 1683 first_page_ = last->next_page(); | 1702 first_page_ = last->next_page(); |
| 1684 } else { | 1703 } else { |
| 1685 first = prev->next_page(); | 1704 first = prev->next_page(); |
| 1686 prev->set_next_page(last->next_page()); | 1705 prev->set_next_page(last->next_page()); |
| 1687 } | 1706 } |
| 1688 | 1707 |
| 1689 // Attach it after the last page. | 1708 // Attach it after the last page. |
| 1690 last_page_->set_next_page(first); | 1709 last_page_->set_next_page(first); |
| 1691 last_page_ = last; | 1710 last_page_ = last; |
| 1692 last->set_next_page(NULL); | 1711 last->set_next_page(NULL); |
| 1693 | 1712 |
| 1694 // Clean them up. | 1713 // Clean them up. |
| 1695 do { | 1714 do { |
| 1696 first->InvalidateWatermark(true); | 1715 first->InvalidateWatermark(true); |
| 1697 first->SetAllocationWatermark(first->ObjectAreaStart()); | 1716 first->SetAllocationWatermark(first->ObjectAreaStart()); |
| 1698 first->SetCachedAllocationWatermark(first->ObjectAreaStart()); | 1717 first->SetCachedAllocationWatermark(first->ObjectAreaStart()); |
| 1699 first->SetRegionMarks(Page::kAllRegionsCleanMarks); | 1718 first->SetRegionMarks(Page::kAllRegionsCleanMarks); |
| 1719 first->SetFlag(Page::IS_CONTINIOUS); | |
| 1700 first->markbits()->Clear(); | 1720 first->markbits()->Clear(); |
| 1701 first = first->next_page(); | 1721 first = first->next_page(); |
| 1702 } while (first != NULL); | 1722 } while (first->is_valid()); |
| 1703 } | 1723 } |
| 1704 | 1724 |
| 1705 | 1725 |
| 1706 void PagedSpace::PrepareForMarkCompact(bool will_compact) { | 1726 void PagedSpace::PrepareForMarkCompact(bool will_compact) { |
| 1707 ASSERT(!will_compact); | 1727 ASSERT(!will_compact); |
| 1708 } | 1728 } |
| 1709 | 1729 |
| 1710 | 1730 |
| 1711 bool PagedSpace::ReserveSpace(int bytes) { | 1731 bool PagedSpace::ReserveSpace(int bytes) { |
| 1712 Address limit = allocation_info_.limit; | 1732 Address limit = allocation_info_.limit; |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1771 | 1791 |
| 1772 if (obj->address() >= p->AllocationWatermark()) { | 1792 if (obj->address() >= p->AllocationWatermark()) { |
| 1773 // There should be no hole between the allocation watermark | 1793 // There should be no hole between the allocation watermark |
| 1774 // and allocated object address. | 1794 // and allocated object address. |
| 1775 // Memory above the allocation watermark was not swept and | 1795 // Memory above the allocation watermark was not swept and |
| 1776 // might contain garbage pointers to new space. | 1796 // might contain garbage pointers to new space. |
| 1777 ASSERT(obj->address() == p->AllocationWatermark()); | 1797 ASSERT(obj->address() == p->AllocationWatermark()); |
| 1778 p->SetAllocationWatermark(obj->address() + size_in_bytes); | 1798 p->SetAllocationWatermark(obj->address() + size_in_bytes); |
| 1779 } | 1799 } |
| 1780 | 1800 |
| 1801 if (!p->IsFlagSet(Page::IS_CONTINIOUS)) { | |
| 1802 // This page is not continious so we have to mark objects | |
| 1803 // that should be visited by HeapObjectIterator. | |
| 1804 ASSERT(!Marking::IsMarked(obj)); | |
| 1805 Marking::SetMark(obj); | |
| 1806 } | |
| 1807 | |
| 1781 return obj; | 1808 return obj; |
| 1782 } | 1809 } |
| 1783 } | 1810 } |
| 1784 | 1811 |
| 1785 // Free list allocation failed and there is no next page. Fail if we have | 1812 // Free list allocation failed and there is no next page. Fail if we have |
| 1786 // hit the old generation size limit that should cause a garbage | 1813 // hit the old generation size limit that should cause a garbage |
| 1787 // collection. | 1814 // collection. |
| 1788 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { | 1815 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { |
| 1789 return NULL; | 1816 return NULL; |
| 1790 } | 1817 } |
| (...skipping 670 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2461 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 2488 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
| 2462 if (obj->IsCode()) { | 2489 if (obj->IsCode()) { |
| 2463 Code* code = Code::cast(obj); | 2490 Code* code = Code::cast(obj); |
| 2464 code_kind_statistics[code->kind()] += code->Size(); | 2491 code_kind_statistics[code->kind()] += code->Size(); |
| 2465 } | 2492 } |
| 2466 } | 2493 } |
| 2467 } | 2494 } |
| 2468 #endif // DEBUG | 2495 #endif // DEBUG |
| 2469 | 2496 |
| 2470 } } // namespace v8::internal | 2497 } } // namespace v8::internal |
| OLD | NEW |