Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(741)

Side by Side Diff: src/spaces.cc

Issue 1683001: Put empty pages discovered during sweeping to the end of the list of pages... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 506 matching lines...) Expand 10 before | Expand all | Expand 10 after
517 size_t chunk_size = chunks_[chunk_id].size(); 517 size_t chunk_size = chunks_[chunk_id].size();
518 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); 518 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
519 ASSERT(pages_in_chunk <= 519 ASSERT(pages_in_chunk <=
520 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize)); 520 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
521 #endif 521 #endif
522 522
523 Address page_addr = low; 523 Address page_addr = low;
524 for (int i = 0; i < pages_in_chunk; i++) { 524 for (int i = 0; i < pages_in_chunk; i++) {
525 Page* p = Page::FromAddress(page_addr); 525 Page* p = Page::FromAddress(page_addr);
526 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id; 526 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
527 p->is_normal_page = 1; 527 p->SetIsLargeObjectPage(false);
528 page_addr += Page::kPageSize; 528 page_addr += Page::kPageSize;
529 } 529 }
530 530
531 // Set the next page of the last page to 0. 531 // Set the next page of the last page to 0.
532 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize); 532 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
533 last_page->opaque_header = OffsetFrom(0) | chunk_id; 533 last_page->opaque_header = OffsetFrom(0) | chunk_id;
534 534
535 return Page::FromAddress(low); 535 return Page::FromAddress(low);
536 } 536 }
537 537
(...skipping 23 matching lines...) Expand all
561 first_page = GetNextPage(FindLastPageInSameChunk(first_page)); 561 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
562 562
563 // Free the current chunk. 563 // Free the current chunk.
564 DeleteChunk(chunk_id); 564 DeleteChunk(chunk_id);
565 } 565 }
566 566
567 return page_to_return; 567 return page_to_return;
568 } 568 }
569 569
570 570
571 void MemoryAllocator::FreeAllPages(PagedSpace* space) {
572 for (int i = 0, length = chunks_.length(); i < length; i++) {
573 if (chunks_[i].owner() == space) {
574 DeleteChunk(i);
575 }
576 }
577 }
578
579
571 void MemoryAllocator::DeleteChunk(int chunk_id) { 580 void MemoryAllocator::DeleteChunk(int chunk_id) {
572 ASSERT(IsValidChunk(chunk_id)); 581 ASSERT(IsValidChunk(chunk_id));
573 582
574 ChunkInfo& c = chunks_[chunk_id]; 583 ChunkInfo& c = chunks_[chunk_id];
575 584
576 // We cannot free a chunk contained in the initial chunk because it was not 585 // We cannot free a chunk contained in the initial chunk because it was not
577 // allocated with AllocateRawMemory. Instead we uncommit the virtual 586 // allocated with AllocateRawMemory. Instead we uncommit the virtual
578 // memory. 587 // memory.
579 if (InInitialChunk(c.address())) { 588 if (InInitialChunk(c.address())) {
580 // TODO(1240712): VirtualMemory::Uncommit has a return value which 589 // TODO(1240712): VirtualMemory::Uncommit has a return value which
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
615 624
616 #ifdef DEBUG 625 #ifdef DEBUG
617 void MemoryAllocator::ReportStatistics() { 626 void MemoryAllocator::ReportStatistics() {
618 float pct = static_cast<float>(capacity_ - size_) / capacity_; 627 float pct = static_cast<float>(capacity_ - size_) / capacity_;
619 PrintF(" capacity: %d, used: %d, available: %%%d\n\n", 628 PrintF(" capacity: %d, used: %d, available: %%%d\n\n",
620 capacity_, size_, static_cast<int>(pct*100)); 629 capacity_, size_, static_cast<int>(pct*100));
621 } 630 }
622 #endif 631 #endif
623 632
624 633
634 void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
635 Page** first_page,
636 Page** last_page,
637 Page** last_page_in_use) {
638 Page* first = NULL;
639 Page* last = NULL;
640
641 for (int i = 0, length = chunks_.length(); i < length; i++) {
642 ChunkInfo& chunk = chunks_[i];
643
644 if (chunk.owner() == space) {
645 if (first == NULL) {
646 Address low = RoundUp(chunk.address(), Page::kPageSize);
647 first = Page::FromAddress(low);
648 }
649 last = RelinkPagesInChunk(i,
650 chunk.address(),
651 chunk.size(),
652 last,
653 last_page_in_use);
654 }
655 }
656
657 if (first_page != NULL) {
658 *first_page = first;
659 }
660
661 if (last_page != NULL) {
662 *last_page = last;
663 }
664 }
665
666
667 Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
668 Address chunk_start,
669 int chunk_size,
670 Page* prev,
671 Page** last_page_in_use) {
672 Address page_addr = RoundUp(chunk_start, Page::kPageSize);
673 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
674
675 if (prev->is_valid()) {
676 SetNextPage(prev, Page::FromAddress(page_addr));
677 }
678
679 for (int i = 0; i < pages_in_chunk; i++) {
680 Page* p = Page::FromAddress(page_addr);
681 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
682 page_addr += Page::kPageSize;
683
684 if (p->WasInUseBeforeMC()) {
685 *last_page_in_use = p;
686 }
687 }
688
689 // Set the next page of the last page to 0.
690 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
691 last_page->opaque_header = OffsetFrom(0) | chunk_id;
692
693 if (last_page->WasInUseBeforeMC()) {
694 *last_page_in_use = last_page;
695 }
696
697 return last_page;
698 }
699
700
701
625 // ----------------------------------------------------------------------------- 702 // -----------------------------------------------------------------------------
626 // PagedSpace implementation 703 // PagedSpace implementation
627 704
628 PagedSpace::PagedSpace(int max_capacity, 705 PagedSpace::PagedSpace(int max_capacity,
629 AllocationSpace id, 706 AllocationSpace id,
630 Executability executable) 707 Executability executable)
631 : Space(id, executable) { 708 : Space(id, executable) {
632 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) 709 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
633 * Page::kObjectAreaSize; 710 * Page::kObjectAreaSize;
634 accounting_stats_.Clear(); 711 accounting_stats_.Clear();
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
670 // Sequentially initialize remembered sets in the newly allocated 747 // Sequentially initialize remembered sets in the newly allocated
671 // pages and cache the current last page in the space. 748 // pages and cache the current last page in the space.
672 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { 749 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
673 p->ClearRSet(); 750 p->ClearRSet();
674 last_page_ = p; 751 last_page_ = p;
675 } 752 }
676 753
677 // Use first_page_ for allocation. 754 // Use first_page_ for allocation.
678 SetAllocationInfo(&allocation_info_, first_page_); 755 SetAllocationInfo(&allocation_info_, first_page_);
679 756
757 page_list_is_chunk_ordered_ = true;
758
680 return true; 759 return true;
681 } 760 }
682 761
683 762
684 bool PagedSpace::HasBeenSetup() { 763 bool PagedSpace::HasBeenSetup() {
685 return (Capacity() > 0); 764 return (Capacity() > 0);
686 } 765 }
687 766
688 767
689 void PagedSpace::TearDown() { 768 void PagedSpace::TearDown() {
690 first_page_ = MemoryAllocator::FreePages(first_page_); 769 MemoryAllocator::FreeAllPages(this);
691 ASSERT(!first_page_->is_valid()); 770 first_page_ = NULL;
692
693 accounting_stats_.Clear(); 771 accounting_stats_.Clear();
694 } 772 }
695 773
696 774
697 #ifdef ENABLE_HEAP_PROTECTION 775 #ifdef ENABLE_HEAP_PROTECTION
698 776
699 void PagedSpace::Protect() { 777 void PagedSpace::Protect() {
700 Page* page = first_page_; 778 Page* page = first_page_;
701 while (page->is_valid()) { 779 while (page->is_valid()) {
702 MemoryAllocator::ProtectChunkFromPage(page); 780 MemoryAllocator::ProtectChunkFromPage(page);
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
867 int count = 0; 945 int count = 0;
868 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { 946 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
869 count++; 947 count++;
870 } 948 }
871 return count; 949 return count;
872 } 950 }
873 #endif 951 #endif
874 952
875 953
876 void PagedSpace::Shrink() { 954 void PagedSpace::Shrink() {
955 if (!page_list_is_chunk_ordered_) {
956 // We can't shrink space if pages is not chunk-ordered
957 // (see comment for class MemoryAllocator for definition).
958 return;
959 }
960
877 // Release half of free pages. 961 // Release half of free pages.
878 Page* top_page = AllocationTopPage(); 962 Page* top_page = AllocationTopPage();
879 ASSERT(top_page->is_valid()); 963 ASSERT(top_page->is_valid());
880 964
881 // Count the number of pages we would like to free. 965 // Count the number of pages we would like to free.
882 int pages_to_free = 0; 966 int pages_to_free = 0;
883 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) { 967 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
884 pages_to_free++; 968 pages_to_free++;
885 } 969 }
886 970
(...skipping 888 matching lines...) Expand 10 before | Expand all | Expand 10 after
1775 head_ = node->next(); 1859 head_ = node->next();
1776 available_ -= object_size_; 1860 available_ -= object_size_;
1777 return node; 1861 return node;
1778 } 1862 }
1779 1863
1780 1864
1781 // ----------------------------------------------------------------------------- 1865 // -----------------------------------------------------------------------------
1782 // OldSpace implementation 1866 // OldSpace implementation
1783 1867
1784 void OldSpace::PrepareForMarkCompact(bool will_compact) { 1868 void OldSpace::PrepareForMarkCompact(bool will_compact) {
1869 // Call prepare of the super class.
1870 PagedSpace::PrepareForMarkCompact(will_compact);
1871
1785 if (will_compact) { 1872 if (will_compact) {
1786 // Reset relocation info. During a compacting collection, everything in 1873 // Reset relocation info. During a compacting collection, everything in
1787 // the space is considered 'available' and we will rediscover live data 1874 // the space is considered 'available' and we will rediscover live data
1788 // and waste during the collection. 1875 // and waste during the collection.
1789 MCResetRelocationInfo(); 1876 MCResetRelocationInfo();
1790 ASSERT(Available() == Capacity()); 1877 ASSERT(Available() == Capacity());
1791 } else { 1878 } else {
1792 // During a non-compacting collection, everything below the linear 1879 // During a non-compacting collection, everything below the linear
1793 // allocation pointer is considered allocated (everything above is 1880 // allocation pointer is considered allocated (everything above is
1794 // available) and we will rediscover available and wasted bytes during 1881 // available) and we will rediscover available and wasted bytes during
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
1845 bool NewSpace::ReserveSpace(int bytes) { 1932 bool NewSpace::ReserveSpace(int bytes) {
1846 // We can't reliably unpack a partial snapshot that needs more new space 1933 // We can't reliably unpack a partial snapshot that needs more new space
1847 // space than the minimum NewSpace size. 1934 // space than the minimum NewSpace size.
1848 ASSERT(bytes <= InitialCapacity()); 1935 ASSERT(bytes <= InitialCapacity());
1849 Address limit = allocation_info_.limit; 1936 Address limit = allocation_info_.limit;
1850 Address top = allocation_info_.top; 1937 Address top = allocation_info_.top;
1851 return limit - top >= bytes; 1938 return limit - top >= bytes;
1852 } 1939 }
1853 1940
1854 1941
1942 void PagedSpace::FreePages(Page* prev, Page* last) {
1943 if (last == AllocationTopPage()) {
1944 // Pages are already at the end of used pages.
1945 return;
1946 }
1947
1948 Page* first = NULL;
1949
1950 // Remove pages from the list.
1951 if (prev == NULL) {
1952 first = first_page_;
1953 first_page_ = last->next_page();
1954 } else {
1955 first = prev->next_page();
1956 MemoryAllocator::SetNextPage(prev, last->next_page());
1957 }
1958
1959 // Attach it after the last page.
1960 MemoryAllocator::SetNextPage(last_page_, first);
1961 last_page_ = last;
1962 MemoryAllocator::SetNextPage(last, NULL);
1963
1964 // Clean them up.
1965 do {
1966 first->ClearRSet();
1967 first = first->next_page();
1968 } while (first != NULL);
1969
1970 // Order of pages in this space might no longer be consistent with
1971 // order of pages in chunks.
1972 page_list_is_chunk_ordered_ = false;
1973 }
1974
1975
1976 void PagedSpace::PrepareForMarkCompact(bool will_compact) {
1977 if (will_compact) {
1978 // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
1979 // to skip unused pages. Update flag value for all pages in space.
1980 PageIterator it(this, PageIterator::ALL_PAGES);
1981 Page* last_in_use = AllocationTopPage();
1982 bool in_use = true;
1983
1984 while (it.has_next()) {
1985 Page* p = it.next();
1986 p->SetWasInUseBeforeMC(in_use);
1987 if (p == last_in_use) {
1988 // We passed a page containing allocation top. All consequent
1989 // pages are not used.
1990 in_use = false;
1991 }
1992 }
1993
1994 if (!page_list_is_chunk_ordered_) {
1995 Page* new_last_in_use = NULL;
1996 MemoryAllocator::RelinkPageListInChunkOrder(this,
1997 &first_page_,
1998 &last_page_,
1999 &new_last_in_use);
2000 ASSERT(new_last_in_use != NULL);
2001
2002 if (new_last_in_use != last_in_use) {
2003 // Current allocation top points to a page which is now in the middle
2004 // of page list. We should move allocation top forward to the new last
2005 // used page so various object iterators will continue to work properly.
2006
2007 int size_in_bytes =
2008 last_in_use->ObjectAreaEnd() - last_in_use->AllocationTop();
2009
2010 if (size_in_bytes > 0) {
2011 // There is still some space left on this page. Create a fake
2012 // object which will occupy all free space on this page.
2013 // Otherwise iterators would not be able to scan this page
2014 // correctly.
2015
2016 FreeListNode* node =
2017 FreeListNode::FromAddress(last_in_use->AllocationTop());
2018 node->set_size(last_in_use->ObjectAreaEnd() -
2019 last_in_use->AllocationTop());
2020 }
2021
2022 // New last in use page was in the middle of the list before
2023 // sorting so it full.
2024 SetTop(new_last_in_use->AllocationTop(),
2025 new_last_in_use->AllocationTop());
2026
2027 ASSERT(AllocationTopPage() == new_last_in_use);
2028 }
2029
2030 page_list_is_chunk_ordered_ = true;
2031 }
2032 }
2033 }
2034
2035
1855 bool PagedSpace::ReserveSpace(int bytes) { 2036 bool PagedSpace::ReserveSpace(int bytes) {
1856 Address limit = allocation_info_.limit; 2037 Address limit = allocation_info_.limit;
1857 Address top = allocation_info_.top; 2038 Address top = allocation_info_.top;
1858 if (limit - top >= bytes) return true; 2039 if (limit - top >= bytes) return true;
1859 2040
1860 // There wasn't enough space in the current page. Lets put the rest 2041 // There wasn't enough space in the current page. Lets put the rest
1861 // of the page on the free list and start a fresh page. 2042 // of the page on the free list and start a fresh page.
1862 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_)); 2043 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
1863 2044
1864 Page* reserved_page = TopPageOf(allocation_info_); 2045 Page* reserved_page = TopPageOf(allocation_info_);
(...skipping 391 matching lines...) Expand 10 before | Expand all | Expand 10 after
2256 } 2437 }
2257 2438
2258 2439
2259 void OldSpace::PrintRSet() { DoPrintRSet("old"); } 2440 void OldSpace::PrintRSet() { DoPrintRSet("old"); }
2260 #endif 2441 #endif
2261 2442
2262 // ----------------------------------------------------------------------------- 2443 // -----------------------------------------------------------------------------
2263 // FixedSpace implementation 2444 // FixedSpace implementation
2264 2445
2265 void FixedSpace::PrepareForMarkCompact(bool will_compact) { 2446 void FixedSpace::PrepareForMarkCompact(bool will_compact) {
2447 // Call prepare of the super class.
2448 PagedSpace::PrepareForMarkCompact(will_compact);
2449
2266 if (will_compact) { 2450 if (will_compact) {
2267 // Reset relocation info. 2451 // Reset relocation info.
2268 MCResetRelocationInfo(); 2452 MCResetRelocationInfo();
2269 2453
2270 // During a compacting collection, everything in the space is considered 2454 // During a compacting collection, everything in the space is considered
2271 // 'available' (set by the call to MCResetRelocationInfo) and we will 2455 // 'available' (set by the call to MCResetRelocationInfo) and we will
2272 // rediscover live and wasted bytes during the collection. 2456 // rediscover live and wasted bytes during the collection.
2273 ASSERT(Available() == Capacity()); 2457 ASSERT(Available() == Capacity());
2274 } else { 2458 } else {
2275 // During a non-compacting collection, everything below the linear 2459 // During a non-compacting collection, everything below the linear
(...skipping 322 matching lines...) Expand 10 before | Expand all | Expand 10 after
2598 first_chunk_ = chunk; 2782 first_chunk_ = chunk;
2599 2783
2600 // Set the object address and size in the page header and clear its 2784 // Set the object address and size in the page header and clear its
2601 // remembered set. 2785 // remembered set.
2602 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2786 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2603 Address object_address = page->ObjectAreaStart(); 2787 Address object_address = page->ObjectAreaStart();
2604 // Clear the low order bit of the second word in the page to flag it as a 2788 // Clear the low order bit of the second word in the page to flag it as a
2605 // large object page. If the chunk_size happened to be written there, its 2789 // large object page. If the chunk_size happened to be written there, its
2606 // low order bit should already be clear. 2790 // low order bit should already be clear.
2607 ASSERT((chunk_size & 0x1) == 0); 2791 ASSERT((chunk_size & 0x1) == 0);
2608 page->is_normal_page &= ~0x1; 2792 page->SetIsLargeObjectPage(true);
2609 page->ClearRSet(); 2793 page->ClearRSet();
2610 int extra_bytes = requested_size - object_size; 2794 int extra_bytes = requested_size - object_size;
2611 if (extra_bytes > 0) { 2795 if (extra_bytes > 0) {
2612 // The extra memory for the remembered set should be cleared. 2796 // The extra memory for the remembered set should be cleared.
2613 memset(object_address + object_size, 0, extra_bytes); 2797 memset(object_address + object_size, 0, extra_bytes);
2614 } 2798 }
2615 2799
2616 return HeapObject::FromAddress(object_address); 2800 return HeapObject::FromAddress(object_address);
2617 } 2801 }
2618 2802
(...skipping 254 matching lines...) Expand 10 before | Expand all | Expand 10 after
2873 reinterpret_cast<Object**>(object->address() 3057 reinterpret_cast<Object**>(object->address()
2874 + Page::kObjectAreaSize), 3058 + Page::kObjectAreaSize),
2875 allocation_top); 3059 allocation_top);
2876 PrintF("\n"); 3060 PrintF("\n");
2877 } 3061 }
2878 } 3062 }
2879 } 3063 }
2880 #endif // DEBUG 3064 #endif // DEBUG
2881 3065
2882 } } // namespace v8::internal 3066 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698