OLD | NEW |
---|---|
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
85 bool HeapObjectIterator::HasNextInNextPage() { | 85 bool HeapObjectIterator::HasNextInNextPage() { |
86 if (cur_addr_ == end_addr_) return false; | 86 if (cur_addr_ == end_addr_) return false; |
87 | 87 |
88 Page* cur_page = Page::FromAllocationTop(cur_addr_); | 88 Page* cur_page = Page::FromAllocationTop(cur_addr_); |
89 cur_page = cur_page->next_page(); | 89 cur_page = cur_page->next_page(); |
90 ASSERT(cur_page->is_valid()); | 90 ASSERT(cur_page->is_valid()); |
91 | 91 |
92 cur_addr_ = cur_page->ObjectAreaStart(); | 92 cur_addr_ = cur_page->ObjectAreaStart(); |
93 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop(); | 93 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop(); |
94 | 94 |
95 if (cur_addr_ == end_addr_) return false; | |
95 ASSERT(cur_addr_ < cur_limit_); | 96 ASSERT(cur_addr_ < cur_limit_); |
96 #ifdef DEBUG | 97 #ifdef DEBUG |
97 Verify(); | 98 Verify(); |
98 #endif | 99 #endif |
99 return true; | 100 return true; |
100 } | 101 } |
101 | 102 |
102 | 103 |
103 #ifdef DEBUG | 104 #ifdef DEBUG |
104 void HeapObjectIterator::Verify() { | 105 void HeapObjectIterator::Verify() { |
(...skipping 1710 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1815 } | 1816 } |
1816 } | 1817 } |
1817 } | 1818 } |
1818 | 1819 |
1819 // Make sure the computed size - based on the used portion of the pages in | 1820 // Make sure the computed size - based on the used portion of the pages in |
1820 // use - matches the size obtained while computing forwarding addresses. | 1821 // use - matches the size obtained while computing forwarding addresses. |
1821 ASSERT(computed_size == Size()); | 1822 ASSERT(computed_size == Size()); |
1822 } | 1823 } |
1823 | 1824 |
1824 | 1825 |
1826 bool NewSpace::ReserveSpace(int bytes) { | |
1827 // We can't reliably unpack a partial snapshot that needs more new space | |
1828 // space than the minimum NewSpace size. | |
1829 ASSERT(bytes <= InitialCapacity()); | |
1830 Address limit = allocation_info_.limit; | |
1831 Address top = allocation_info_.top; | |
1832 return limit - top >= bytes; | |
1833 } | |
1834 | |
1835 | |
1836 bool PagedSpace::ReserveSpace(int bytes) { | |
1837 Address limit = allocation_info_.limit; | |
1838 Address top = allocation_info_.top; | |
1839 if (limit - top >= bytes) return true; | |
1840 | |
1841 // There wasn't enough space in the current page. Lets put the rest | |
1842 // of the page on the free list and start a fresh page. | |
1843 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_)); | |
1844 | |
1845 Page* reserved_page = TopPageOf(allocation_info_); | |
1846 int bytes_left_to_reserve = bytes; | |
1847 while (bytes_left_to_reserve > 0) { | |
1848 if (!reserved_page->next_page()->is_valid()) { | |
1849 if (Heap::OldGenerationAllocationLimitReached()) return false; | |
1850 Expand(reserved_page); | |
1851 } | |
1852 bytes_left_to_reserve -= Page::kPageSize; | |
Mads Ager (chromium)
2010/01/12 14:17:49
Do we need some slack here? Allocating just enoug
| |
1853 reserved_page = reserved_page->next_page(); | |
1854 if (!reserved_page->is_valid()) return false; | |
1855 } | |
1856 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid()); | |
1857 SetAllocationInfo(&allocation_info_, | |
1858 TopPageOf(allocation_info_)->next_page()); | |
1859 return true; | |
1860 } | |
1861 | |
1862 | |
1863 // You have to call this last, since the implementation from PagedSpace | |
1864 // doesn't know that memory was 'promised' to large object space. | |
1865 bool LargeObjectSpace::ReserveSpace(int bytes) { | |
1866 // We add a slack-factor of 2 in order to have space for the remembered | |
Mads Ager (chromium)
2010/01/12 14:17:49
Is a factor of 2 enough? I would guess that this
| |
1867 // set and a series of large-object allocations that are only just larger | |
1868 // than the page size. | |
1869 return Heap::OldGenerationSpaceAvailable() >= bytes * 2; | |
1870 } | |
1871 | |
1872 | |
1825 // Slow case for normal allocation. Try in order: (1) allocate in the next | 1873 // Slow case for normal allocation. Try in order: (1) allocate in the next |
1826 // page in the space, (2) allocate off the space's free list, (3) expand the | 1874 // page in the space, (2) allocate off the space's free list, (3) expand the |
1827 // space, (4) fail. | 1875 // space, (4) fail. |
1828 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { | 1876 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { |
1829 // Linear allocation in this space has failed. If there is another page | 1877 // Linear allocation in this space has failed. If there is another page |
1830 // in the space, move to that page and allocate there. This allocation | 1878 // in the space, move to that page and allocate there. This allocation |
1831 // should succeed (size_in_bytes should not be greater than a page's | 1879 // should succeed (size_in_bytes should not be greater than a page's |
1832 // object area size). | 1880 // object area size). |
1833 Page* current_page = TopPageOf(allocation_info_); | 1881 Page* current_page = TopPageOf(allocation_info_); |
1834 if (current_page->next_page()->is_valid()) { | 1882 if (current_page->next_page()->is_valid()) { |
(...skipping 23 matching lines...) Expand all Loading... | |
1858 ASSERT(!current_page->next_page()->is_valid()); | 1906 ASSERT(!current_page->next_page()->is_valid()); |
1859 if (Expand(current_page)) { | 1907 if (Expand(current_page)) { |
1860 return AllocateInNextPage(current_page, size_in_bytes); | 1908 return AllocateInNextPage(current_page, size_in_bytes); |
1861 } | 1909 } |
1862 | 1910 |
1863 // Finally, fail. | 1911 // Finally, fail. |
1864 return NULL; | 1912 return NULL; |
1865 } | 1913 } |
1866 | 1914 |
1867 | 1915 |
1916 void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { | |
1917 int free_size = | |
1918 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); | |
1919 if (free_size > 0) { | |
1920 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size); | |
1921 accounting_stats_.WasteBytes(wasted_bytes); | |
1922 } | |
1923 } | |
1924 | |
1925 | |
1926 void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) { | |
1927 int free_size = | |
1928 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); | |
1929 // In the fixed space free list all the free list items have the right size. | |
1930 // We use up the rest of the page while preserving this invariant. | |
1931 while (free_size >= object_size_in_bytes_) { | |
1932 free_list_.Free(allocation_info_.top); | |
1933 allocation_info_.top += object_size_in_bytes_; | |
1934 free_size -= object_size_in_bytes_; | |
1935 accounting_stats_.WasteBytes(object_size_in_bytes_); | |
1936 } | |
1937 } | |
1938 | |
1939 | |
1868 // Add the block at the top of the page to the space's free list, set the | 1940 // Add the block at the top of the page to the space's free list, set the |
1869 // allocation info to the next page (assumed to be one), and allocate | 1941 // allocation info to the next page (assumed to be one), and allocate |
1870 // linearly there. | 1942 // linearly there. |
1871 HeapObject* OldSpace::AllocateInNextPage(Page* current_page, | 1943 HeapObject* OldSpace::AllocateInNextPage(Page* current_page, |
1872 int size_in_bytes) { | 1944 int size_in_bytes) { |
1873 ASSERT(current_page->next_page()->is_valid()); | 1945 ASSERT(current_page->next_page()->is_valid()); |
1874 // Add the block at the top of this page to the free list. | 1946 PutRestOfCurrentPageOnFreeList(current_page); |
1875 int free_size = | |
1876 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); | |
1877 if (free_size > 0) { | |
1878 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size); | |
1879 accounting_stats_.WasteBytes(wasted_bytes); | |
1880 } | |
1881 SetAllocationInfo(&allocation_info_, current_page->next_page()); | 1947 SetAllocationInfo(&allocation_info_, current_page->next_page()); |
1882 return AllocateLinearly(&allocation_info_, size_in_bytes); | 1948 return AllocateLinearly(&allocation_info_, size_in_bytes); |
1883 } | 1949 } |
1884 | 1950 |
1885 | 1951 |
1886 #ifdef DEBUG | 1952 #ifdef DEBUG |
1887 struct CommentStatistic { | 1953 struct CommentStatistic { |
1888 const char* comment; | 1954 const char* comment; |
1889 int size; | 1955 int size; |
1890 int count; | 1956 int count; |
(...skipping 901 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2792 reinterpret_cast<Object**>(object->address() | 2858 reinterpret_cast<Object**>(object->address() |
2793 + Page::kObjectAreaSize), | 2859 + Page::kObjectAreaSize), |
2794 allocation_top); | 2860 allocation_top); |
2795 PrintF("\n"); | 2861 PrintF("\n"); |
2796 } | 2862 } |
2797 } | 2863 } |
2798 } | 2864 } |
2799 #endif // DEBUG | 2865 #endif // DEBUG |
2800 | 2866 |
2801 } } // namespace v8::internal | 2867 } } // namespace v8::internal |
OLD | NEW |