Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(21)

Side by Side Diff: src/spaces.cc

Issue 1700005: Fix bugs introduced by r4475:... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1021 matching lines...) Expand 10 before | Expand all | Expand 10 after
1032 } else { 1032 } else {
1033 // Unless this is the last page in the space containing allocated 1033 // Unless this is the last page in the space containing allocated
1034 // objects, the allocation top should be at a constant offset from the 1034 // objects, the allocation top should be at a constant offset from the
1035 // object area end. 1035 // object area end.
1036 Address top = current_page->AllocationTop(); 1036 Address top = current_page->AllocationTop();
1037 if (current_page == top_page) { 1037 if (current_page == top_page) {
1038 ASSERT(top == allocation_info_.top); 1038 ASSERT(top == allocation_info_.top);
1039 // The next page will be above the allocation top. 1039 // The next page will be above the allocation top.
1040 above_allocation_top = true; 1040 above_allocation_top = true;
1041 } else { 1041 } else {
1042 ASSERT(top == current_page->ObjectAreaEnd() - page_extra_); 1042 ASSERT(top == PageAllocationLimit(current_page));
1043 } 1043 }
1044 1044
1045 // It should be packed with objects from the bottom to the top. 1045 // It should be packed with objects from the bottom to the top.
1046 Address current = current_page->ObjectAreaStart(); 1046 Address current = current_page->ObjectAreaStart();
1047 while (current < top) { 1047 while (current < top) {
1048 HeapObject* object = HeapObject::FromAddress(current); 1048 HeapObject* object = HeapObject::FromAddress(current);
1049 1049
1050 // The first word should be a map, and we expect all map pointers to 1050 // The first word should be a map, and we expect all map pointers to
1051 // be in map space. 1051 // be in map space.
1052 Map* map = object->map(); 1052 Map* map = object->map();
(...skipping 917 matching lines...) Expand 10 before | Expand all | Expand 10 after
1970 // Order of pages in this space might no longer be consistent with 1970 // Order of pages in this space might no longer be consistent with
1971 // order of pages in chunks. 1971 // order of pages in chunks.
1972 page_list_is_chunk_ordered_ = false; 1972 page_list_is_chunk_ordered_ = false;
1973 } 1973 }
1974 1974
1975 1975
1976 void PagedSpace::PrepareForMarkCompact(bool will_compact) { 1976 void PagedSpace::PrepareForMarkCompact(bool will_compact) {
1977 if (will_compact) { 1977 if (will_compact) {
1978 // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag 1978 // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
1979 // to skip unused pages. Update flag value for all pages in space. 1979 // to skip unused pages. Update flag value for all pages in space.
1980 PageIterator it(this, PageIterator::ALL_PAGES); 1980 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
1981 Page* last_in_use = AllocationTopPage(); 1981 Page* last_in_use = AllocationTopPage();
1982 bool in_use = true; 1982 bool in_use = true;
1983 1983
1984 while (it.has_next()) { 1984 while (all_pages_iterator.has_next()) {
1985 Page* p = it.next(); 1985 Page* p = all_pages_iterator.next();
1986 p->SetWasInUseBeforeMC(in_use); 1986 p->SetWasInUseBeforeMC(in_use);
1987 if (p == last_in_use) { 1987 if (p == last_in_use) {
1988 // We passed a page containing allocation top. All consequent 1988 // We passed a page containing allocation top. All consequent
1989 // pages are not used. 1989 // pages are not used.
1990 in_use = false; 1990 in_use = false;
1991 } 1991 }
1992 } 1992 }
1993 1993
1994 if (!page_list_is_chunk_ordered_) { 1994 if (!page_list_is_chunk_ordered_) {
1995 Page* new_last_in_use = NULL; 1995 Page* new_last_in_use = NULL;
1996 MemoryAllocator::RelinkPageListInChunkOrder(this, 1996 MemoryAllocator::RelinkPageListInChunkOrder(this,
1997 &first_page_, 1997 &first_page_,
1998 &last_page_, 1998 &last_page_,
1999 &new_last_in_use); 1999 &new_last_in_use);
2000 ASSERT(new_last_in_use != NULL); 2000 ASSERT(new_last_in_use != NULL);
2001 2001
2002 if (new_last_in_use != last_in_use) { 2002 if (new_last_in_use != last_in_use) {
2003 // Current allocation top points to a page which is now in the middle 2003 // Current allocation top points to a page which is now in the middle
2004 // of page list. We should move allocation top forward to the new last 2004 // of page list. We should move allocation top forward to the new last
2005 // used page so various object iterators will continue to work properly. 2005 // used page so various object iterators will continue to work properly.
2006 2006
2007 int size_in_bytes = 2007 int size_in_bytes =
2008 last_in_use->ObjectAreaEnd() - last_in_use->AllocationTop(); 2008 PageAllocationLimit(last_in_use) - last_in_use->AllocationTop();
2009 2009
2010 if (size_in_bytes > 0) { 2010 if (size_in_bytes > 0) {
2011 // There is still some space left on this page. Create a fake 2011 // There is still some space left on this page. Create a fake
2012 // object which will occupy all free space on this page. 2012 // object which will occupy all free space on this page.
2013 // Otherwise iterators would not be able to scan this page 2013 // Otherwise iterators would not be able to scan this page
2014 // correctly. 2014 // correctly.
2015 2015
2016 FreeListNode* node = 2016 FreeListNode* node =
2017 FreeListNode::FromAddress(last_in_use->AllocationTop()); 2017 FreeListNode::FromAddress(last_in_use->AllocationTop());
2018 node->set_size(last_in_use->ObjectAreaEnd() - 2018 node->set_size(size_in_bytes);
2019 last_in_use->AllocationTop());
2020 } 2019 }
2021 2020
2022 // New last in use page was in the middle of the list before 2021 // New last in use page was in the middle of the list before
2023 // sorting so it full. 2022 // sorting so it full.
2024 SetTop(new_last_in_use->AllocationTop(), 2023 SetTop(new_last_in_use->AllocationTop());
2025 new_last_in_use->AllocationTop());
2026 2024
2027 ASSERT(AllocationTopPage() == new_last_in_use); 2025 ASSERT(AllocationTopPage() == new_last_in_use);
2026 ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2027 }
2028
2029 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2030 while (pages_in_use_iterator.has_next()) {
2031 Page* p = pages_in_use_iterator.next();
2032 if (!p->WasInUseBeforeMC()) {
2033 // Empty page is in the middle of a sequence of used pages.
2034 // Create a fake object which will occupy all free space on this page.
2035 // Otherwise iterators would not be able to scan this page correctly.
2036 FreeListNode* node =
2037 FreeListNode::FromAddress(p->ObjectAreaStart());
2038 node->set_size(PageAllocationLimit(p) - p->ObjectAreaStart());
2039 }
2028 } 2040 }
2029 2041
2030 page_list_is_chunk_ordered_ = true; 2042 page_list_is_chunk_ordered_ = true;
2031 } 2043 }
2032 } 2044 }
2033 } 2045 }
2034 2046
2035 2047
2036 bool PagedSpace::ReserveSpace(int bytes) { 2048 bool PagedSpace::ReserveSpace(int bytes) {
2037 Address limit = allocation_info_.limit; 2049 Address limit = allocation_info_.limit;
(...skipping 499 matching lines...) Expand 10 before | Expand all | Expand 10 after
2537 return NULL; 2549 return NULL;
2538 } 2550 }
2539 2551
2540 2552
2541 // Move to the next page (there is assumed to be one) and allocate there. 2553 // Move to the next page (there is assumed to be one) and allocate there.
2542 // The top of page block is always wasted, because it is too small to hold a 2554 // The top of page block is always wasted, because it is too small to hold a
2543 // map. 2555 // map.
2544 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page, 2556 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2545 int size_in_bytes) { 2557 int size_in_bytes) {
2546 ASSERT(current_page->next_page()->is_valid()); 2558 ASSERT(current_page->next_page()->is_valid());
2547 ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_); 2559 ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
2548 ASSERT_EQ(object_size_in_bytes_, size_in_bytes); 2560 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2549 accounting_stats_.WasteBytes(page_extra_); 2561 accounting_stats_.WasteBytes(page_extra_);
2550 SetAllocationInfo(&allocation_info_, current_page->next_page()); 2562 SetAllocationInfo(&allocation_info_, current_page->next_page());
2551 return AllocateLinearly(&allocation_info_, size_in_bytes); 2563 return AllocateLinearly(&allocation_info_, size_in_bytes);
2552 } 2564 }
2553 2565
2554 2566
2555 #ifdef DEBUG 2567 #ifdef DEBUG
2556 void FixedSpace::ReportStatistics() { 2568 void FixedSpace::ReportStatistics() {
2557 int pct = Available() * 100 / Capacity(); 2569 int pct = Available() * 100 / Capacity();
(...skipping 499 matching lines...) Expand 10 before | Expand all | Expand 10 after
3057 reinterpret_cast<Object**>(object->address() 3069 reinterpret_cast<Object**>(object->address()
3058 + Page::kObjectAreaSize), 3070 + Page::kObjectAreaSize),
3059 allocation_top); 3071 allocation_top);
3060 PrintF("\n"); 3072 PrintF("\n");
3061 } 3073 }
3062 } 3074 }
3063 } 3075 }
3064 #endif // DEBUG 3076 #endif // DEBUG
3065 3077
3066 } } // namespace v8::internal 3078 } } // namespace v8::internal
OLDNEW
« src/mark-compact.cc ('K') | « src/spaces.h ('k') | test/cctest/test-heap.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698