OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 648 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
659 } | 659 } |
660 | 660 |
661 if (last_page != NULL) { | 661 if (last_page != NULL) { |
662 *last_page = last; | 662 *last_page = last; |
663 } | 663 } |
664 } | 664 } |
665 | 665 |
666 | 666 |
667 Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id, | 667 Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id, |
668 Address chunk_start, | 668 Address chunk_start, |
669 int chunk_size, | 669 size_t chunk_size, |
670 Page* prev, | 670 Page* prev, |
671 Page** last_page_in_use) { | 671 Page** last_page_in_use) { |
672 Address page_addr = RoundUp(chunk_start, Page::kPageSize); | 672 Address page_addr = RoundUp(chunk_start, Page::kPageSize); |
673 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size); | 673 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size); |
674 | 674 |
675 if (prev->is_valid()) { | 675 if (prev->is_valid()) { |
676 SetNextPage(prev, Page::FromAddress(page_addr)); | 676 SetNextPage(prev, Page::FromAddress(page_addr)); |
677 } | 677 } |
678 | 678 |
679 for (int i = 0; i < pages_in_chunk; i++) { | 679 for (int i = 0; i < pages_in_chunk; i++) { |
(...skipping 1305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1985 Page* p = all_pages_iterator.next(); | 1985 Page* p = all_pages_iterator.next(); |
1986 p->SetWasInUseBeforeMC(in_use); | 1986 p->SetWasInUseBeforeMC(in_use); |
1987 if (p == last_in_use) { | 1987 if (p == last_in_use) { |
1988 // We passed a page containing allocation top. All consequent | 1988 // We passed a page containing allocation top. All consequent |
1989 // pages are not used. | 1989 // pages are not used. |
1990 in_use = false; | 1990 in_use = false; |
1991 } | 1991 } |
1992 } | 1992 } |
1993 | 1993 |
1994 if (!page_list_is_chunk_ordered_) { | 1994 if (!page_list_is_chunk_ordered_) { |
1995 Page* new_last_in_use = NULL; | 1995 Page* new_last_in_use = Page::FromAddress(NULL); |
1996 MemoryAllocator::RelinkPageListInChunkOrder(this, | 1996 MemoryAllocator::RelinkPageListInChunkOrder(this, |
1997 &first_page_, | 1997 &first_page_, |
1998 &last_page_, | 1998 &last_page_, |
1999 &new_last_in_use); | 1999 &new_last_in_use); |
2000 ASSERT(new_last_in_use != NULL); | 2000 ASSERT(new_last_in_use->is_valid()); |
2001 | 2001 |
2002 if (new_last_in_use != last_in_use) { | 2002 if (new_last_in_use != last_in_use) { |
2003 // Current allocation top points to a page which is now in the middle | 2003 // Current allocation top points to a page which is now in the middle |
2004 // of page list. We should move allocation top forward to the new last | 2004 // of page list. We should move allocation top forward to the new last |
2005 // used page so various object iterators will continue to work properly. | 2005 // used page so various object iterators will continue to work properly. |
2006 | 2006 |
2007 int size_in_bytes = | 2007 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - |
2008 PageAllocationLimit(last_in_use) - last_in_use->AllocationTop(); | 2008 last_in_use->AllocationTop()); |
2009 | 2009 |
2010 if (size_in_bytes > 0) { | 2010 if (size_in_bytes > 0) { |
2011 // There is still some space left on this page. Create a fake | 2011 // There is still some space left on this page. Create a fake |
2012 // object which will occupy all free space on this page. | 2012 // object which will occupy all free space on this page. |
2013 // Otherwise iterators would not be able to scan this page | 2013 // Otherwise iterators would not be able to scan this page |
2014 // correctly. | 2014 // correctly. |
2015 | 2015 |
2016 FreeListNode* node = | 2016 Heap::CreateFillerObjectAt(last_in_use->AllocationTop(), |
2017 FreeListNode::FromAddress(last_in_use->AllocationTop()); | 2017 size_in_bytes); |
2018 node->set_size(size_in_bytes); | |
2019 } | 2018 } |
2020 | 2019 |
2021 // New last in use page was in the middle of the list before | 2020 // New last in use page was in the middle of the list before |
2022 // sorting so it full. | 2021 // sorting so it full. |
2023 SetTop(new_last_in_use->AllocationTop()); | 2022 SetTop(new_last_in_use->AllocationTop()); |
2024 | 2023 |
2025 ASSERT(AllocationTopPage() == new_last_in_use); | 2024 ASSERT(AllocationTopPage() == new_last_in_use); |
2026 ASSERT(AllocationTopPage()->WasInUseBeforeMC()); | 2025 ASSERT(AllocationTopPage()->WasInUseBeforeMC()); |
2027 } | 2026 } |
2028 | 2027 |
2029 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); | 2028 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); |
2030 while (pages_in_use_iterator.has_next()) { | 2029 while (pages_in_use_iterator.has_next()) { |
2031 Page* p = pages_in_use_iterator.next(); | 2030 Page* p = pages_in_use_iterator.next(); |
2032 if (!p->WasInUseBeforeMC()) { | 2031 if (!p->WasInUseBeforeMC()) { |
2033 // Empty page is in the middle of a sequence of used pages. | 2032 // Empty page is in the middle of a sequence of used pages. |
2034 // Create a fake object which will occupy all free space on this page. | 2033 // Create a fake object which will occupy all free space on this page. |
2035 // Otherwise iterators would not be able to scan this page correctly. | 2034 // Otherwise iterators would not be able to scan this page correctly. |
2036 FreeListNode* node = | 2035 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - |
2037 FreeListNode::FromAddress(p->ObjectAreaStart()); | 2036 p->ObjectAreaStart()); |
2038 node->set_size(PageAllocationLimit(p) - p->ObjectAreaStart()); | 2037 |
| 2038 Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes); |
2039 } | 2039 } |
2040 } | 2040 } |
2041 | 2041 |
2042 page_list_is_chunk_ordered_ = true; | 2042 page_list_is_chunk_ordered_ = true; |
2043 } | 2043 } |
2044 } | 2044 } |
2045 } | 2045 } |
2046 | 2046 |
2047 | 2047 |
2048 bool PagedSpace::ReserveSpace(int bytes) { | 2048 bool PagedSpace::ReserveSpace(int bytes) { |
(...skipping 1020 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3069 reinterpret_cast<Object**>(object->address() | 3069 reinterpret_cast<Object**>(object->address() |
3070 + Page::kObjectAreaSize), | 3070 + Page::kObjectAreaSize), |
3071 allocation_top); | 3071 allocation_top); |
3072 PrintF("\n"); | 3072 PrintF("\n"); |
3073 } | 3073 } |
3074 } | 3074 } |
3075 } | 3075 } |
3076 #endif // DEBUG | 3076 #endif // DEBUG |
3077 | 3077 |
3078 } } // namespace v8::internal | 3078 } } // namespace v8::internal |
OLD | NEW |