OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1978 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1989 first->SetRegionMarks(Page::kAllRegionsCleanMarks); | 1989 first->SetRegionMarks(Page::kAllRegionsCleanMarks); |
1990 first = first->next_page(); | 1990 first = first->next_page(); |
1991 } while (first != NULL); | 1991 } while (first != NULL); |
1992 | 1992 |
1993 // Order of pages in this space might no longer be consistent with | 1993 // Order of pages in this space might no longer be consistent with |
1994 // order of pages in chunks. | 1994 // order of pages in chunks. |
1995 page_list_is_chunk_ordered_ = false; | 1995 page_list_is_chunk_ordered_ = false; |
1996 } | 1996 } |
1997 | 1997 |
1998 | 1998 |
| 1999 void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) { |
| 2000 const bool add_to_freelist = true; |
| 2001 |
| 2002 // Mark used and unused pages to properly fill unused pages |
| 2003 // after reordering. |
| 2004 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES); |
| 2005 Page* last_in_use = AllocationTopPage(); |
| 2006 bool in_use = true; |
| 2007 |
| 2008 while (all_pages_iterator.has_next()) { |
| 2009 Page* p = all_pages_iterator.next(); |
| 2010 p->SetWasInUseBeforeMC(in_use); |
| 2011 if (p == last_in_use) { |
| 2012 // We passed a page containing allocation top. All consequent |
| 2013 // pages are not used. |
| 2014 in_use = false; |
| 2015 } |
| 2016 } |
| 2017 |
| 2018 if (page_list_is_chunk_ordered_) return; |
| 2019 |
| 2020 Page* new_last_in_use = Page::FromAddress(NULL); |
| 2021 MemoryAllocator::RelinkPageListInChunkOrder(this, |
| 2022 &first_page_, |
| 2023 &last_page_, |
| 2024 &new_last_in_use); |
| 2025 ASSERT(new_last_in_use->is_valid()); |
| 2026 |
| 2027 if (new_last_in_use != last_in_use) { |
| 2028 // Current allocation top points to a page which is now in the middle |
| 2029 // of page list. We should move allocation top forward to the new last |
| 2030 // used page so various object iterators will continue to work properly. |
| 2031 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - |
| 2032 last_in_use->AllocationTop()); |
| 2033 |
| 2034 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop()); |
| 2035 if (size_in_bytes > 0) { |
| 2036 Address start = last_in_use->AllocationTop(); |
| 2037 if (deallocate_blocks) { |
| 2038 accounting_stats_.AllocateBytes(size_in_bytes); |
| 2039 DeallocateBlock(start, size_in_bytes, add_to_freelist); |
| 2040 } else { |
| 2041 Heap::CreateFillerObjectAt(start, size_in_bytes); |
| 2042 } |
| 2043 } |
| 2044 |
| 2045 // New last in use page was in the middle of the list before |
| 2046 // sorting so it full. |
| 2047 SetTop(new_last_in_use->AllocationTop()); |
| 2048 |
| 2049 ASSERT(AllocationTopPage() == new_last_in_use); |
| 2050 ASSERT(AllocationTopPage()->WasInUseBeforeMC()); |
| 2051 } |
| 2052 |
| 2053 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); |
| 2054 while (pages_in_use_iterator.has_next()) { |
| 2055 Page* p = pages_in_use_iterator.next(); |
| 2056 if (!p->WasInUseBeforeMC()) { |
| 2057 // Empty page is in the middle of a sequence of used pages. |
| 2058 // Allocate it as a whole and deallocate immediately. |
| 2059 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - |
| 2060 p->ObjectAreaStart()); |
| 2061 |
| 2062 p->SetAllocationWatermark(p->ObjectAreaStart()); |
| 2063 Address start = p->ObjectAreaStart(); |
| 2064 if (deallocate_blocks) { |
| 2065 accounting_stats_.AllocateBytes(size_in_bytes); |
| 2066 DeallocateBlock(start, size_in_bytes, add_to_freelist); |
| 2067 } else { |
| 2068 Heap::CreateFillerObjectAt(start, size_in_bytes); |
| 2069 } |
| 2070 } |
| 2071 } |
| 2072 |
| 2073 page_list_is_chunk_ordered_ = true; |
| 2074 } |
| 2075 |
| 2076 |
1999 void PagedSpace::PrepareForMarkCompact(bool will_compact) { | 2077 void PagedSpace::PrepareForMarkCompact(bool will_compact) { |
2000 if (will_compact) { | 2078 if (will_compact) { |
2001 // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag | 2079 RelinkPageListInChunkOrder(false); |
2002 // to skip unused pages. Update flag value for all pages in space. | |
2003 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES); | |
2004 Page* last_in_use = AllocationTopPage(); | |
2005 bool in_use = true; | |
2006 | |
2007 while (all_pages_iterator.has_next()) { | |
2008 Page* p = all_pages_iterator.next(); | |
2009 p->SetWasInUseBeforeMC(in_use); | |
2010 if (p == last_in_use) { | |
2011 // We passed a page containing allocation top. All consequent | |
2012 // pages are not used. | |
2013 in_use = false; | |
2014 } | |
2015 } | |
2016 | |
2017 if (!page_list_is_chunk_ordered_) { | |
2018 Page* new_last_in_use = Page::FromAddress(NULL); | |
2019 MemoryAllocator::RelinkPageListInChunkOrder(this, | |
2020 &first_page_, | |
2021 &last_page_, | |
2022 &new_last_in_use); | |
2023 ASSERT(new_last_in_use->is_valid()); | |
2024 | |
2025 if (new_last_in_use != last_in_use) { | |
2026 // Current allocation top points to a page which is now in the middle | |
2027 // of page list. We should move allocation top forward to the new last | |
2028 // used page so various object iterators will continue to work properly. | |
2029 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop()); | |
2030 | |
2031 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - | |
2032 last_in_use->AllocationTop()); | |
2033 | |
2034 if (size_in_bytes > 0) { | |
2035 // There is still some space left on this page. Create a fake | |
2036 // object which will occupy all free space on this page. | |
2037 // Otherwise iterators would not be able to scan this page | |
2038 // correctly. | |
2039 | |
2040 Heap::CreateFillerObjectAt(last_in_use->AllocationTop(), | |
2041 size_in_bytes); | |
2042 } | |
2043 | |
2044 // New last in use page was in the middle of the list before | |
2045 // sorting so it full. | |
2046 SetTop(new_last_in_use->AllocationTop()); | |
2047 | |
2048 ASSERT(AllocationTopPage() == new_last_in_use); | |
2049 ASSERT(AllocationTopPage()->WasInUseBeforeMC()); | |
2050 } | |
2051 | |
2052 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); | |
2053 while (pages_in_use_iterator.has_next()) { | |
2054 Page* p = pages_in_use_iterator.next(); | |
2055 if (!p->WasInUseBeforeMC()) { | |
2056 // Empty page is in the middle of a sequence of used pages. | |
2057 // Create a fake object which will occupy all free space on this page. | |
2058 // Otherwise iterators would not be able to scan this page correctly. | |
2059 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - | |
2060 p->ObjectAreaStart()); | |
2061 | |
2062 p->SetAllocationWatermark(p->ObjectAreaStart()); | |
2063 Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes); | |
2064 } | |
2065 } | |
2066 | |
2067 page_list_is_chunk_ordered_ = true; | |
2068 } | |
2069 } | 2080 } |
2070 } | 2081 } |
2071 | 2082 |
2072 | 2083 |
2073 bool PagedSpace::ReserveSpace(int bytes) { | 2084 bool PagedSpace::ReserveSpace(int bytes) { |
2074 Address limit = allocation_info_.limit; | 2085 Address limit = allocation_info_.limit; |
2075 Address top = allocation_info_.top; | 2086 Address top = allocation_info_.top; |
2076 if (limit - top >= bytes) return true; | 2087 if (limit - top >= bytes) return true; |
2077 | 2088 |
2078 // There wasn't enough space in the current page. Lets put the rest | 2089 // There wasn't enough space in the current page. Lets put the rest |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2194 int size_in_bytes) { | 2205 int size_in_bytes) { |
2195 ASSERT(current_page->next_page()->is_valid()); | 2206 ASSERT(current_page->next_page()->is_valid()); |
2196 Page* next_page = current_page->next_page(); | 2207 Page* next_page = current_page->next_page(); |
2197 next_page->ClearGCFields(); | 2208 next_page->ClearGCFields(); |
2198 PutRestOfCurrentPageOnFreeList(current_page); | 2209 PutRestOfCurrentPageOnFreeList(current_page); |
2199 SetAllocationInfo(&allocation_info_, next_page); | 2210 SetAllocationInfo(&allocation_info_, next_page); |
2200 return AllocateLinearly(&allocation_info_, size_in_bytes); | 2211 return AllocateLinearly(&allocation_info_, size_in_bytes); |
2201 } | 2212 } |
2202 | 2213 |
2203 | 2214 |
| 2215 void OldSpace::DeallocateBlock(Address start, |
| 2216 int size_in_bytes, |
| 2217 bool add_to_freelist) { |
| 2218 Free(start, size_in_bytes, add_to_freelist); |
| 2219 } |
| 2220 |
| 2221 |
2204 #ifdef DEBUG | 2222 #ifdef DEBUG |
2205 struct CommentStatistic { | 2223 struct CommentStatistic { |
2206 const char* comment; | 2224 const char* comment; |
2207 int size; | 2225 int size; |
2208 int count; | 2226 int count; |
2209 void Clear() { | 2227 void Clear() { |
2210 comment = NULL; | 2228 comment = NULL; |
2211 size = 0; | 2229 size = 0; |
2212 count = 0; | 2230 count = 0; |
2213 } | 2231 } |
(...skipping 254 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2468 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); | 2486 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); |
2469 Page* next_page = current_page->next_page(); | 2487 Page* next_page = current_page->next_page(); |
2470 next_page->ClearGCFields(); | 2488 next_page->ClearGCFields(); |
2471 current_page->SetAllocationWatermark(allocation_info_.top); | 2489 current_page->SetAllocationWatermark(allocation_info_.top); |
2472 accounting_stats_.WasteBytes(page_extra_); | 2490 accounting_stats_.WasteBytes(page_extra_); |
2473 SetAllocationInfo(&allocation_info_, next_page); | 2491 SetAllocationInfo(&allocation_info_, next_page); |
2474 return AllocateLinearly(&allocation_info_, size_in_bytes); | 2492 return AllocateLinearly(&allocation_info_, size_in_bytes); |
2475 } | 2493 } |
2476 | 2494 |
2477 | 2495 |
| 2496 void FixedSpace::DeallocateBlock(Address start, |
| 2497 int size_in_bytes, |
| 2498 bool add_to_freelist) { |
| 2499 // Free-list elements in fixed space are assumed to have a fixed size. |
| 2500 // We break the free block into chunks and add them to the free list |
| 2501 // individually. |
| 2502 int size = object_size_in_bytes(); |
| 2503 ASSERT(size_in_bytes % size == 0); |
| 2504 Address end = start + size_in_bytes; |
| 2505 for (Address a = start; a < end; a += size) { |
| 2506 Free(a, add_to_freelist); |
| 2507 } |
| 2508 } |
| 2509 |
| 2510 |
2478 #ifdef DEBUG | 2511 #ifdef DEBUG |
2479 void FixedSpace::ReportStatistics() { | 2512 void FixedSpace::ReportStatistics() { |
2480 int pct = Available() * 100 / Capacity(); | 2513 int pct = Available() * 100 / Capacity(); |
2481 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", | 2514 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", |
2482 Capacity(), Waste(), Available(), pct); | 2515 Capacity(), Waste(), Available(), pct); |
2483 | 2516 |
2484 ClearHistograms(); | 2517 ClearHistograms(); |
2485 HeapObjectIterator obj_it(this); | 2518 HeapObjectIterator obj_it(this); |
2486 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) | 2519 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) |
2487 CollectHistogramInfo(obj); | 2520 CollectHistogramInfo(obj); |
(...skipping 433 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2921 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 2954 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
2922 if (obj->IsCode()) { | 2955 if (obj->IsCode()) { |
2923 Code* code = Code::cast(obj); | 2956 Code* code = Code::cast(obj); |
2924 code_kind_statistics[code->kind()] += code->Size(); | 2957 code_kind_statistics[code->kind()] += code->Size(); |
2925 } | 2958 } |
2926 } | 2959 } |
2927 } | 2960 } |
2928 #endif // DEBUG | 2961 #endif // DEBUG |
2929 | 2962 |
2930 } } // namespace v8::internal | 2963 } } // namespace v8::internal |
OLD | NEW |