Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/compilation-cache.h" | 10 #include "src/compilation-cache.h" |
| (...skipping 623 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 634 case LO_SPACE: | 634 case LO_SPACE: |
| 635 return "LO_SPACE"; | 635 return "LO_SPACE"; |
| 636 default: | 636 default: |
| 637 UNREACHABLE(); | 637 UNREACHABLE(); |
| 638 } | 638 } |
| 639 | 639 |
| 640 return NULL; | 640 return NULL; |
| 641 } | 641 } |
| 642 | 642 |
| 643 | 643 |
| 644 // Returns zero for pages that have so little fragmentation that it is not | |
| 645 // worth defragmenting them. Otherwise a positive integer that gives an | |
| 646 // estimate of fragmentation on an arbitrary scale. | |
| 647 static int FreeListFragmentation(PagedSpace* space, Page* p) { | |
| 648 // If page was not swept then there are no free list items on it. | |
| 649 if (!p->WasSwept()) { | |
| 650 if (FLAG_trace_fragmentation_verbose) { | |
| 651 PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p), | |
| 652 AllocationSpaceName(space->identity()), p->LiveBytes()); | |
| 653 } | |
| 654 return FLAG_always_compact ? 1 : 0; | |
| 655 } | |
| 656 | |
| 657 PagedSpace::SizeStats sizes; | |
| 658 space->ObtainFreeListStatistics(p, &sizes); | |
| 659 | |
| 660 intptr_t ratio; | |
| 661 intptr_t ratio_threshold; | |
| 662 intptr_t area_size = space->AreaSize(); | |
| 663 if (space->identity() == CODE_SPACE) { | |
| 664 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size; | |
| 665 ratio_threshold = 10; | |
| 666 } else { | |
| 667 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size; | |
| 668 ratio_threshold = 15; | |
| 669 } | |
| 670 | |
| 671 if (FLAG_trace_fragmentation_verbose) { | |
| 672 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", | |
| 673 reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()), | |
| 674 static_cast<int>(sizes.small_size_), | |
| 675 static_cast<double>(sizes.small_size_ * 100) / area_size, | |
| 676 static_cast<int>(sizes.medium_size_), | |
| 677 static_cast<double>(sizes.medium_size_ * 100) / area_size, | |
| 678 static_cast<int>(sizes.large_size_), | |
| 679 static_cast<double>(sizes.large_size_ * 100) / area_size, | |
| 680 static_cast<int>(sizes.huge_size_), | |
| 681 static_cast<double>(sizes.huge_size_ * 100) / area_size, | |
| 682 (ratio > ratio_threshold) ? "[fragmented]" : ""); | |
| 683 } | |
| 684 | |
| 685 if (FLAG_always_compact && sizes.Total() != area_size) { | |
| 686 return 1; | |
| 687 } | |
| 688 | |
| 689 if (ratio <= ratio_threshold) return 0; // Not fragmented. | |
| 690 | |
| 691 return static_cast<int>(ratio - ratio_threshold); | |
| 692 } | |
| 693 | |
| 694 | |
| 695 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { | 644 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
| 696 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE); | 645 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE); |
| 697 | 646 |
| 698 static const int kMaxMaxEvacuationCandidates = 1000; | |
| 699 int number_of_pages = space->CountTotalPages(); | 647 int number_of_pages = space->CountTotalPages(); |
| 700 int max_evacuation_candidates = | 648 int area_size = space->AreaSize(); |
| 701 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1); | |
| 702 | 649 |
| 703 if (FLAG_stress_compaction || FLAG_always_compact) { | 650 // Pairs of (live_bytes_in_page, page). |
| 704 max_evacuation_candidates = kMaxMaxEvacuationCandidates; | 651 std::vector<std::pair<int, Page*> > pages; |
| 705 } | 652 pages.reserve(number_of_pages); |
| 706 | |
| 707 class Candidate { | |
| 708 public: | |
| 709 Candidate() : fragmentation_(0), page_(NULL) {} | |
| 710 Candidate(int f, Page* p) : fragmentation_(f), page_(p) {} | |
| 711 | |
| 712 int fragmentation() { return fragmentation_; } | |
| 713 Page* page() { return page_; } | |
| 714 | |
| 715 private: | |
| 716 int fragmentation_; | |
| 717 Page* page_; | |
| 718 }; | |
| 719 | |
| 720 enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT }; | |
| 721 | |
| 722 CompactionMode mode = COMPACT_FREE_LISTS; | |
| 723 | |
| 724 intptr_t reserved = number_of_pages * space->AreaSize(); | |
| 725 intptr_t over_reserved = reserved - space->SizeOfObjects(); | |
| 726 static const intptr_t kFreenessThreshold = 50; | |
| 727 | |
| 728 if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) { | |
| 729 // If reduction of memory footprint was requested, we are aggressive | |
| 730 // about choosing pages to free. We expect that half-empty pages | |
| 731 // are easier to compact so slightly bump the limit. | |
| 732 mode = REDUCE_MEMORY_FOOTPRINT; | |
| 733 max_evacuation_candidates += 2; | |
| 734 } | |
| 735 | |
| 736 | |
| 737 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) { | |
| 738 // If over-usage is very high (more than a third of the space), we | |
| 739 // try to free all mostly empty pages. We expect that almost empty | |
| 740 // pages are even easier to compact so bump the limit even more. | |
| 741 mode = REDUCE_MEMORY_FOOTPRINT; | |
| 742 max_evacuation_candidates *= 2; | |
| 743 } | |
| 744 | |
| 745 if (FLAG_always_compact) { | |
| 746 max_evacuation_candidates = kMaxMaxEvacuationCandidates; | |
| 747 } | |
| 748 | |
| 749 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) { | |
| 750 PrintF( | |
| 751 "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), " | |
| 752 "evacuation candidate limit: %d\n", | |
| 753 static_cast<double>(over_reserved) / MB, | |
| 754 static_cast<double>(reserved) / MB, | |
| 755 static_cast<int>(kFreenessThreshold), max_evacuation_candidates); | |
| 756 } | |
| 757 | |
| 758 intptr_t estimated_release = 0; | |
| 759 | |
| 760 if (FLAG_trace_fragmentation && | |
| 761 max_evacuation_candidates >= kMaxMaxEvacuationCandidates) { | |
| 762 PrintF("Hit max page compaction limit of %d pages\n", | |
| 763 kMaxMaxEvacuationCandidates); | |
| 764 } | |
| 765 max_evacuation_candidates = | |
| 766 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates); | |
| 767 | |
| 768 std::vector<Candidate> candidates(max_evacuation_candidates); | |
| 769 | |
| 770 int count = 0; | |
| 771 int fragmentation = 0; | |
| 772 int page_number = 0; | |
| 773 int least_index = -1; | |
| 774 | 653 |
| 775 PageIterator it(space); | 654 PageIterator it(space); |
| 776 while (it.has_next()) { | 655 while (it.has_next()) { |
| 777 Page* p = it.next(); | 656 Page* p = it.next(); |
| 778 if (p->NeverEvacuate()) continue; | 657 if (p->NeverEvacuate()) continue; |
| 779 if (p->IsFlagSet(Page::POPULAR_PAGE)) { | 658 if (p->IsFlagSet(Page::POPULAR_PAGE)) { |
| 780 // This page had slots buffer overflow on previous GC, skip it. | 659 // This page had slots buffer overflow on previous GC, skip it. |
| 781 p->ClearFlag(Page::POPULAR_PAGE); | 660 p->ClearFlag(Page::POPULAR_PAGE); |
| 782 continue; | 661 continue; |
| 783 } | 662 } |
| 784 | |
| 785 // Invariant: Evacuation candidates are just created when marking is | 663 // Invariant: Evacuation candidates are just created when marking is |
| 786 // started. At the end of a GC all evacuation candidates are cleared and | 664 // started. At the end of a GC all evacuation candidates are cleared and |
| 787 // their slot buffers are released. | 665 // their slot buffers are released. |
| 788 CHECK(!p->IsEvacuationCandidate()); | 666 CHECK(!p->IsEvacuationCandidate()); |
| 789 CHECK(p->slots_buffer() == NULL); | 667 CHECK(p->slots_buffer() == NULL); |
| 668 DCHECK(p->area_size() == area_size); | |
| 669 int live_bytes = | |
| 670 p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes(); | |
| 671 pages.push_back(std::make_pair(live_bytes, p)); | |
| 672 } | |
| 790 | 673 |
| 791 if (FLAG_stress_compaction) { | 674 int candidate_count = 0; |
| 792 if (FLAG_manual_evacuation_candidates_selection) { | 675 int total_live_bytes = 0; |
| 793 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) { | 676 |
| 794 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); | 677 bool reduce_memory = |
| 795 fragmentation = 1; | 678 reduce_memory_footprint_ || |
| 796 } | 679 heap()->HasLowAllocationRate( |
| 797 } else { | 680 heap() |
| 798 unsigned int counter = space->heap()->ms_count(); | 681 ->tracer() |
| 799 if ((counter & 1) == (page_number & 1)) fragmentation = 1; | 682 ->CurrentNewSpaceAllocationThroughputInBytesPerMillisecond()); |
|
Hannes Payer (out of office)
2015/05/27 08:49:30
That should be now overall allocation throughput.
ulan
2015/05/27 11:33:20
Done.
| |
| 800 page_number++; | 683 |
| 684 if (FLAG_stress_compaction || FLAG_manual_evacuation_candidates_selection) { | |
| 685 for (size_t i = 0; i < pages.size(); i++) { | |
| 686 Page* p = pages[i].second; | |
| 687 if (((i % 2 == 0) && FLAG_stress_compaction) || | |
| 688 p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) { | |
| 689 candidate_count++; | |
| 690 total_live_bytes += pages[i].first; | |
| 691 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); | |
| 692 AddEvacuationCandidate(p); | |
| 801 } | 693 } |
| 802 } else if (mode == REDUCE_MEMORY_FOOTPRINT && !FLAG_always_compact) { | 694 } |
| 803 // Don't try to release too many pages. | 695 } else { |
| 804 if (estimated_release >= over_reserved) { | 696 const int kTargetFragmentationPercent = 50; |
|
Hannes Payer (out of office)
2015/05/27 08:49:30
Don't you wanna use the FLAG_target_fragmentation
ulan
2015/05/27 11:33:20
I removed the flags to be consistent with "*ForRed
| |
| 805 continue; | 697 const int kMaxEvacuatedBytes = 4 * Page::kPageSize; |
| 698 | |
| 699 const int kTargetFragmentationPercentForReduceMemory = 20; | |
| 700 const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize; | |
| 701 | |
| 702 int max_evacuated_bytes; | |
| 703 int target_fragmentation_percent; | |
| 704 | |
| 705 if (reduce_memory) { | |
| 706 target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory; | |
| 707 max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory; | |
| 708 } else { | |
| 709 target_fragmentation_percent = kTargetFragmentationPercent; | |
| 710 max_evacuated_bytes = kMaxEvacuatedBytes; | |
| 711 } | |
| 712 intptr_t free_bytes_threshold = | |
| 713 target_fragmentation_percent * (area_size / 100); | |
| 714 | |
| 715 // Sort pages from the most free to the least free, then select | |
| 716 // the first n pages for evacuation such that: | |
| 717 // - the total size of evacuated objects does not exceed the specified | |
| 718 // limit. | |
| 719 // - fragmentation of (n+1)-th page does not exceed the specified limit. | |
| 720 std::sort(pages.begin(), pages.end()); | |
| 721 for (size_t i = 0; i < pages.size(); i++) { | |
| 722 int live_bytes = pages[i].first; | |
| 723 int free_bytes = area_size - live_bytes; | |
| 724 if (FLAG_always_compact || | |
| 725 (free_bytes >= free_bytes_threshold && | |
| 726 total_live_bytes + live_bytes <= max_evacuated_bytes)) { | |
| 727 candidate_count++; | |
| 728 total_live_bytes += live_bytes; | |
| 806 } | 729 } |
| 807 | 730 if (FLAG_trace_fragmentation_verbose) { |
| 808 intptr_t free_bytes = 0; | 731 PrintF( |
| 809 | 732 "Page in %s: %d KB free [fragmented if this >= %d KB], " |
| 810 if (!p->WasSwept()) { | 733 "sum of live bytes in fragmented pages %d KB [max is %d KB]\n", |
| 811 free_bytes = (p->area_size() - p->LiveBytes()); | 734 AllocationSpaceName(space->identity()), |
| 812 } else { | 735 static_cast<int>(free_bytes / KB), |
| 813 PagedSpace::SizeStats sizes; | 736 static_cast<int>(free_bytes_threshold / KB), |
| 814 space->ObtainFreeListStatistics(p, &sizes); | 737 static_cast<int>(total_live_bytes / KB), |
| 815 free_bytes = sizes.Total(); | 738 static_cast<int>(max_evacuated_bytes / KB)); |
| 816 } | 739 } |
| 817 | |
| 818 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size(); | |
| 819 | |
| 820 if (free_pct >= kFreenessThreshold) { | |
| 821 estimated_release += free_bytes; | |
| 822 fragmentation = free_pct; | |
| 823 } else { | |
| 824 fragmentation = 0; | |
| 825 } | |
| 826 | |
| 827 if (FLAG_trace_fragmentation_verbose) { | |
| 828 PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p), | |
| 829 AllocationSpaceName(space->identity()), | |
| 830 static_cast<int>(free_bytes), | |
| 831 static_cast<double>(free_bytes * 100) / p->area_size(), | |
| 832 (fragmentation > 0) ? "[fragmented]" : ""); | |
| 833 } | |
| 834 } else { | |
| 835 fragmentation = FreeListFragmentation(space, p); | |
| 836 } | 740 } |
|
Hannes Payer (out of office)
2015/05/27 08:49:30
This piece needs a comment.
ulan
2015/05/27 11:33:20
Done.
| |
| 837 | 741 int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size; |
| 838 if (fragmentation != 0) { | 742 int estimated_released_pages = candidate_count - estimated_new_pages; |
|
Hannes Payer (out of office)
2015/05/27 08:49:30
estimated_released_pages can be negative. That is
ulan
2015/05/27 11:33:20
I think estimated_new_page <= candidate_count alwa
| |
| 839 if (count < max_evacuation_candidates) { | 743 // Avoid (compact -> expand) cycles. |
| 840 candidates[count++] = Candidate(fragmentation, p); | 744 if (estimated_released_pages == 0) candidate_count = 0; |
| 841 } else { | 745 for (int i = 0; i < candidate_count; i++) { |
| 842 if (least_index == -1) { | 746 AddEvacuationCandidate(pages[i].second); |
| 843 for (int i = 0; i < max_evacuation_candidates; i++) { | |
| 844 if (least_index == -1 || | |
| 845 candidates[i].fragmentation() < | |
| 846 candidates[least_index].fragmentation()) { | |
| 847 least_index = i; | |
| 848 } | |
| 849 } | |
| 850 } | |
| 851 if (candidates[least_index].fragmentation() < fragmentation) { | |
| 852 candidates[least_index] = Candidate(fragmentation, p); | |
| 853 least_index = -1; | |
| 854 } | |
| 855 } | |
| 856 } | 747 } |
| 857 } | 748 } |
| 858 | 749 |
| 859 for (int i = 0; i < count; i++) { | 750 if (FLAG_trace_fragmentation) { |
| 860 AddEvacuationCandidate(candidates[i].page()); | 751 PrintF( |
| 861 } | 752 "Collected %d evacuation candidates [%d KB live] for space %s " |
| 862 | 753 "[mode %s]\n", |
| 863 if (count > 0 && FLAG_trace_fragmentation) { | 754 candidate_count, static_cast<int>(total_live_bytes / KB), |
| 864 PrintF("Collected %d evacuation candidates for space %s\n", count, | 755 AllocationSpaceName(space->identity()), |
| 865 AllocationSpaceName(space->identity())); | 756 (reduce_memory ? "reduce memory footprint" : "normal")); |
| 866 } | 757 } |
| 867 } | 758 } |
| 868 | 759 |
| 869 | 760 |
| 870 void MarkCompactCollector::AbortCompaction() { | 761 void MarkCompactCollector::AbortCompaction() { |
| 871 if (compacting_) { | 762 if (compacting_) { |
| 872 int npages = evacuation_candidates_.length(); | 763 int npages = evacuation_candidates_.length(); |
| 873 for (int i = 0; i < npages; i++) { | 764 for (int i = 0; i < npages; i++) { |
| 874 Page* p = evacuation_candidates_[i]; | 765 Page* p = evacuation_candidates_[i]; |
| 875 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); | 766 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); |
| (...skipping 3882 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4758 SlotsBuffer* buffer = *buffer_address; | 4649 SlotsBuffer* buffer = *buffer_address; |
| 4759 while (buffer != NULL) { | 4650 while (buffer != NULL) { |
| 4760 SlotsBuffer* next_buffer = buffer->next(); | 4651 SlotsBuffer* next_buffer = buffer->next(); |
| 4761 DeallocateBuffer(buffer); | 4652 DeallocateBuffer(buffer); |
| 4762 buffer = next_buffer; | 4653 buffer = next_buffer; |
| 4763 } | 4654 } |
| 4764 *buffer_address = NULL; | 4655 *buffer_address = NULL; |
| 4765 } | 4656 } |
| 4766 } | 4657 } |
| 4767 } // namespace v8::internal | 4658 } // namespace v8::internal |
| OLD | NEW |