| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 743 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 754 int count = 0; | 754 int count = 0; |
| 755 while (it.has_next()) { | 755 while (it.has_next()) { |
| 756 it.next(); | 756 it.next(); |
| 757 count++; | 757 count++; |
| 758 } | 758 } |
| 759 return count; | 759 return count; |
| 760 } | 760 } |
| 761 #endif | 761 #endif |
| 762 | 762 |
| 763 | 763 |
| 764 void PagedSpace::Shrink() { | 764 void PagedSpace::ReleasePage(Page* page) { |
| 765 // TODO(1614) Not implemented. | 765 ASSERT(page->LiveBytes() == 0); |
| 766 page->Unlink(); |
| 767 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { |
| 768 heap()->isolate()->memory_allocator()->Free(page); |
| 769 } else { |
| 770 heap()->QueueMemoryChunkForFree(page); |
| 771 } |
| 772 |
| 773 ASSERT(Capacity() > 0); |
| 774 ASSERT(Capacity() % Page::kObjectAreaSize == 0); |
| 775 accounting_stats_.ShrinkSpace(Page::kObjectAreaSize); |
| 766 } | 776 } |
| 767 | 777 |
| 768 | 778 |
| 779 void PagedSpace::ReleaseAllUnusedPages() { |
| 780 PageIterator it(this); |
| 781 while (it.has_next()) { |
| 782 Page* page = it.next(); |
| 783 if (page->LiveBytes() == 0) { |
| 784 ReleasePage(page); |
| 785 } |
| 786 } |
| 787 heap()->FreeQueuedChunks(); |
| 788 } |
| 789 |
| 790 |
| 769 #ifdef DEBUG | 791 #ifdef DEBUG |
| 770 void PagedSpace::Print() { } | 792 void PagedSpace::Print() { } |
| 771 #endif | 793 #endif |
| 772 | 794 |
| 773 | 795 |
| 774 #ifdef DEBUG | 796 #ifdef DEBUG |
| 775 void PagedSpace::Verify(ObjectVisitor* visitor) { | 797 void PagedSpace::Verify(ObjectVisitor* visitor) { |
| 776 // We can only iterate over the pages if they were swept precisely. | 798 // We can only iterate over the pages if they were swept precisely. |
| 777 if (was_swept_conservatively_) return; | 799 if (was_swept_conservatively_) return; |
| 778 | 800 |
| (...skipping 861 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1640 | 1662 |
| 1641 void FreeList::Reset() { | 1663 void FreeList::Reset() { |
| 1642 available_ = 0; | 1664 available_ = 0; |
| 1643 small_list_ = NULL; | 1665 small_list_ = NULL; |
| 1644 medium_list_ = NULL; | 1666 medium_list_ = NULL; |
| 1645 large_list_ = NULL; | 1667 large_list_ = NULL; |
| 1646 huge_list_ = NULL; | 1668 huge_list_ = NULL; |
| 1647 } | 1669 } |
| 1648 | 1670 |
| 1649 | 1671 |
| 1650 int PagedSpace::FreeOrUnmapPage(Page* page, Address start, int size_in_bytes) { | |
| 1651 Heap* heap = page->heap(); | |
| 1652 // TODO(gc): When we count the live bytes per page we can free empty pages | |
| 1653 // instead of sweeping. At that point this if should be turned into an | |
| 1654 // ASSERT that the area to be freed cannot be the entire page. | |
| 1655 if (size_in_bytes == Page::kObjectAreaSize && | |
| 1656 heap->ShouldWeGiveBackAPageToTheOS()) { | |
| 1657 page->Unlink(); | |
| 1658 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { | |
| 1659 heap->isolate()->memory_allocator()->Free(page); | |
| 1660 } else { | |
| 1661 heap->QueueMemoryChunkForFree(page); | |
| 1662 } | |
| 1663 return 0; | |
| 1664 } | |
| 1665 return Free(start, size_in_bytes); | |
| 1666 } | |
| 1667 | |
| 1668 | |
| 1669 int FreeList::Free(Address start, int size_in_bytes) { | 1672 int FreeList::Free(Address start, int size_in_bytes) { |
| 1670 if (size_in_bytes == 0) return 0; | 1673 if (size_in_bytes == 0) return 0; |
| 1671 FreeListNode* node = FreeListNode::FromAddress(start); | 1674 FreeListNode* node = FreeListNode::FromAddress(start); |
| 1672 node->set_size(heap_, size_in_bytes); | 1675 node->set_size(heap_, size_in_bytes); |
| 1673 | 1676 |
| 1674 // Early return to drop too-small blocks on the floor. | 1677 // Early return to drop too-small blocks on the floor. |
| 1675 if (size_in_bytes < kSmallListMin) return size_in_bytes; | 1678 if (size_in_bytes < kSmallListMin) return size_in_bytes; |
| 1676 | 1679 |
| 1677 // Insert other blocks at the head of a free list of the appropriate | 1680 // Insert other blocks at the head of a free list of the appropriate |
| 1678 // magnitude. | 1681 // magnitude. |
| (...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1911 // We don't have a linear allocation area while sweeping. It will be restored | 1914 // We don't have a linear allocation area while sweeping. It will be restored |
| 1912 // on the first allocation after the sweep. | 1915 // on the first allocation after the sweep. |
| 1913 // Mark the old linear allocation area with a free space map so it can be | 1916 // Mark the old linear allocation area with a free space map so it can be |
| 1914 // skipped when scanning the heap. | 1917 // skipped when scanning the heap. |
| 1915 int old_linear_size = static_cast<int>(limit() - top()); | 1918 int old_linear_size = static_cast<int>(limit() - top()); |
| 1916 Free(top(), old_linear_size); | 1919 Free(top(), old_linear_size); |
| 1917 SetTop(NULL, NULL); | 1920 SetTop(NULL, NULL); |
| 1918 | 1921 |
| 1919 // Stop lazy sweeping and clear marking bits for unswept pages. | 1922 // Stop lazy sweeping and clear marking bits for unswept pages. |
| 1920 if (first_unswept_page_ != NULL) { | 1923 if (first_unswept_page_ != NULL) { |
| 1921 Page* last = last_unswept_page_->next_page(); | 1924 Page* last = last_unswept_page_; |
| 1922 Page* p = first_unswept_page_; | 1925 Page* p = first_unswept_page_; |
| 1923 do { | 1926 do { |
| 1924 // Do not use ShouldBeSweptLazily predicate here. | 1927 // Do not use ShouldBeSweptLazily predicate here. |
| 1925 // New evacuation candidates were selected but they still have | 1928 // New evacuation candidates were selected but they still have |
| 1926 // to be swept before collection starts. | 1929 // to be swept before collection starts. |
| 1927 if (!p->WasSwept()) { | 1930 if (!p->WasSwept()) { |
| 1928 Bitmap::Clear(p); | 1931 Bitmap::Clear(p); |
| 1929 if (FLAG_gc_verbose) { | 1932 if (FLAG_gc_verbose) { |
| 1930 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n", | 1933 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n", |
| 1931 reinterpret_cast<intptr_t>(p)); | 1934 reinterpret_cast<intptr_t>(p)); |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1968 // doesn't know that memory was 'promised' to large object space. | 1971 // doesn't know that memory was 'promised' to large object space. |
| 1969 bool LargeObjectSpace::ReserveSpace(int bytes) { | 1972 bool LargeObjectSpace::ReserveSpace(int bytes) { |
| 1970 return heap()->OldGenerationSpaceAvailable() >= bytes; | 1973 return heap()->OldGenerationSpaceAvailable() >= bytes; |
| 1971 } | 1974 } |
| 1972 | 1975 |
| 1973 | 1976 |
| 1974 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { | 1977 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { |
| 1975 if (IsSweepingComplete()) return true; | 1978 if (IsSweepingComplete()) return true; |
| 1976 | 1979 |
| 1977 intptr_t freed_bytes = 0; | 1980 intptr_t freed_bytes = 0; |
| 1978 Page* last = last_unswept_page_->next_page(); | 1981 Page* last = last_unswept_page_; |
| 1979 Page* p = first_unswept_page_; | 1982 Page* p = first_unswept_page_; |
| 1980 do { | 1983 do { |
| 1981 Page* next_page = p->next_page(); | 1984 Page* next_page = p->next_page(); |
| 1982 if (ShouldBeSweptLazily(p)) { | 1985 if (ShouldBeSweptLazily(p)) { |
| 1983 if (FLAG_gc_verbose) { | 1986 if (FLAG_gc_verbose) { |
| 1984 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", | 1987 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", |
| 1985 reinterpret_cast<intptr_t>(p)); | 1988 reinterpret_cast<intptr_t>(p)); |
| 1986 } | 1989 } |
| 1987 freed_bytes += MarkCompactCollector::SweepConservatively(this, p); | 1990 freed_bytes += MarkCompactCollector::SweepConservatively(this, p); |
| 1988 } | 1991 } |
| (...skipping 524 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2513 object->ShortPrint(); | 2516 object->ShortPrint(); |
| 2514 PrintF("\n"); | 2517 PrintF("\n"); |
| 2515 } | 2518 } |
| 2516 printf(" --------------------------------------\n"); | 2519 printf(" --------------------------------------\n"); |
| 2517 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 2520 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 2518 } | 2521 } |
| 2519 | 2522 |
| 2520 #endif // DEBUG | 2523 #endif // DEBUG |
| 2521 | 2524 |
| 2522 } } // namespace v8::internal | 2525 } } // namespace v8::internal |
| OLD | NEW |