Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(609)

Side by Side Diff: src/heap/mark-compact.cc

Issue 2406363002: [heap] Use size_t in free list and evacuation candidate selection. (Closed)
Patch Set: fix typo Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 575 matching lines...) Expand 10 before | Expand all | Expand 10 after
586 return "MAP_SPACE"; 586 return "MAP_SPACE";
587 case LO_SPACE: 587 case LO_SPACE:
588 return "LO_SPACE"; 588 return "LO_SPACE";
589 default: 589 default:
590 UNREACHABLE(); 590 UNREACHABLE();
591 } 591 }
592 592
593 return NULL; 593 return NULL;
594 } 594 }
595 595
596
597 void MarkCompactCollector::ComputeEvacuationHeuristics( 596 void MarkCompactCollector::ComputeEvacuationHeuristics(
598 int area_size, int* target_fragmentation_percent, 597 size_t area_size, int* target_fragmentation_percent,
599 int* max_evacuated_bytes) { 598 size_t* max_evacuated_bytes) {
600 // For memory reducing and optimize for memory mode we directly define both 599 // For memory reducing and optimize for memory mode we directly define both
601 // constants. 600 // constants.
602 const int kTargetFragmentationPercentForReduceMemory = 20; 601 const int kTargetFragmentationPercentForReduceMemory = 20;
603 const int kMaxEvacuatedBytesForReduceMemory = 12 * MB; 602 const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
604 const int kTargetFragmentationPercentForOptimizeMemory = 20; 603 const int kTargetFragmentationPercentForOptimizeMemory = 20;
605 const int kMaxEvacuatedBytesForOptimizeMemory = 6 * MB; 604 const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
606 605
607 // For regular mode (which is latency critical) we define less aggressive 606 // For regular mode (which is latency critical) we define less aggressive
608 // defaults to start and switch to a trace-based (using compaction speed) 607 // defaults to start and switch to a trace-based (using compaction speed)
609 // approach as soon as we have enough samples. 608 // approach as soon as we have enough samples.
610 const int kTargetFragmentationPercent = 70; 609 const int kTargetFragmentationPercent = 70;
611 const int kMaxEvacuatedBytes = 4 * MB; 610 const size_t kMaxEvacuatedBytes = 4 * MB;
612 // Time to take for a single area (=payload of page). Used as soon as there 611 // Time to take for a single area (=payload of page). Used as soon as there
613 // exist enough compaction speed samples. 612 // exist enough compaction speed samples.
614 const float kTargetMsPerArea = .5; 613 const float kTargetMsPerArea = .5;
615 614
616 if (heap()->ShouldReduceMemory()) { 615 if (heap()->ShouldReduceMemory()) {
617 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory; 616 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
618 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory; 617 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
619 } else if (heap()->ShouldOptimizeForMemoryUsage()) { 618 } else if (heap()->ShouldOptimizeForMemoryUsage()) {
620 *target_fragmentation_percent = 619 *target_fragmentation_percent =
621 kTargetFragmentationPercentForOptimizeMemory; 620 kTargetFragmentationPercentForOptimizeMemory;
(...skipping 18 matching lines...) Expand all
640 } 639 }
641 *max_evacuated_bytes = kMaxEvacuatedBytes; 640 *max_evacuated_bytes = kMaxEvacuatedBytes;
642 } 641 }
643 } 642 }
644 643
645 644
646 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { 645 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
647 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE); 646 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
648 647
649 int number_of_pages = space->CountTotalPages(); 648 int number_of_pages = space->CountTotalPages();
650 int area_size = space->AreaSize(); 649 size_t area_size = space->AreaSize();
651 650
652 // Pairs of (live_bytes_in_page, page). 651 // Pairs of (live_bytes_in_page, page).
653 typedef std::pair<int, Page*> LiveBytesPagePair; 652 typedef std::pair<size_t, Page*> LiveBytesPagePair;
654 std::vector<LiveBytesPagePair> pages; 653 std::vector<LiveBytesPagePair> pages;
655 pages.reserve(number_of_pages); 654 pages.reserve(number_of_pages);
656 655
657 for (Page* p : *space) { 656 for (Page* p : *space) {
658 if (p->NeverEvacuate()) continue; 657 if (p->NeverEvacuate()) continue;
659 // Invariant: Evacuation candidates are just created when marking is 658 // Invariant: Evacuation candidates are just created when marking is
660 // started. This means that sweeping has finished. Furthermore, at the end 659 // started. This means that sweeping has finished. Furthermore, at the end
661 // of a GC all evacuation candidates are cleared and their slot buffers are 660 // of a GC all evacuation candidates are cleared and their slot buffers are
662 // released. 661 // released.
663 CHECK(!p->IsEvacuationCandidate()); 662 CHECK(!p->IsEvacuationCandidate());
664 CHECK_NULL(p->old_to_old_slots()); 663 CHECK_NULL(p->old_to_old_slots());
665 CHECK_NULL(p->typed_old_to_old_slots()); 664 CHECK_NULL(p->typed_old_to_old_slots());
666 CHECK(p->SweepingDone()); 665 CHECK(p->SweepingDone());
667 DCHECK(p->area_size() == area_size); 666 DCHECK(p->area_size() == area_size);
668 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p)); 667 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
669 } 668 }
670 669
671 int candidate_count = 0; 670 int candidate_count = 0;
672 int total_live_bytes = 0; 671 size_t total_live_bytes = 0;
673 672
674 const bool reduce_memory = heap()->ShouldReduceMemory(); 673 const bool reduce_memory = heap()->ShouldReduceMemory();
675 if (FLAG_manual_evacuation_candidates_selection) { 674 if (FLAG_manual_evacuation_candidates_selection) {
676 for (size_t i = 0; i < pages.size(); i++) { 675 for (size_t i = 0; i < pages.size(); i++) {
677 Page* p = pages[i].second; 676 Page* p = pages[i].second;
678 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) { 677 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
679 candidate_count++; 678 candidate_count++;
680 total_live_bytes += pages[i].first; 679 total_live_bytes += pages[i].first;
681 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); 680 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
682 AddEvacuationCandidate(p); 681 AddEvacuationCandidate(p);
(...skipping 15 matching lines...) Expand all
698 // candidate, or not: 697 // candidate, or not:
699 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio 698 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
700 // between live bytes and capacity of this page (= area). 699 // between live bytes and capacity of this page (= area).
701 // * Evacuation quota: A global quota determining how much bytes should be 700 // * Evacuation quota: A global quota determining how much bytes should be
702 // compacted. 701 // compacted.
703 // 702 //
704 // The algorithm sorts all pages by live bytes and then iterates through 703 // The algorithm sorts all pages by live bytes and then iterates through
705 // them starting with the page with the most free memory, adding them to the 704 // them starting with the page with the most free memory, adding them to the
706 // set of evacuation candidates as long as both conditions (fragmentation 705 // set of evacuation candidates as long as both conditions (fragmentation
707 // and quota) hold. 706 // and quota) hold.
708 int max_evacuated_bytes; 707 size_t max_evacuated_bytes;
709 int target_fragmentation_percent; 708 int target_fragmentation_percent;
710 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent, 709 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
711 &max_evacuated_bytes); 710 &max_evacuated_bytes);
712 711
713 const intptr_t free_bytes_threshold = 712 const size_t free_bytes_threshold =
714 target_fragmentation_percent * (area_size / 100); 713 target_fragmentation_percent * (area_size / 100);
715 714
716 // Sort pages from the most free to the least free, then select 715 // Sort pages from the most free to the least free, then select
717 // the first n pages for evacuation such that: 716 // the first n pages for evacuation such that:
718 // - the total size of evacuated objects does not exceed the specified 717 // - the total size of evacuated objects does not exceed the specified
719 // limit. 718 // limit.
720 // - fragmentation of (n+1)-th page does not exceed the specified limit. 719 // - fragmentation of (n+1)-th page does not exceed the specified limit.
721 std::sort(pages.begin(), pages.end(), 720 std::sort(pages.begin(), pages.end(),
722 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) { 721 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
723 return a.first < b.first; 722 return a.first < b.first;
724 }); 723 });
725 for (size_t i = 0; i < pages.size(); i++) { 724 for (size_t i = 0; i < pages.size(); i++) {
726 int live_bytes = pages[i].first; 725 size_t live_bytes = pages[i].first;
727 int free_bytes = area_size - live_bytes; 726 DCHECK_GE(area_size, live_bytes);
727 size_t free_bytes = area_size - live_bytes;
728 if (FLAG_always_compact || 728 if (FLAG_always_compact ||
729 ((free_bytes >= free_bytes_threshold) && 729 ((free_bytes >= free_bytes_threshold) &&
730 ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) { 730 ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
731 candidate_count++; 731 candidate_count++;
732 total_live_bytes += live_bytes; 732 total_live_bytes += live_bytes;
733 } 733 }
734 if (FLAG_trace_fragmentation_verbose) { 734 if (FLAG_trace_fragmentation_verbose) {
735 PrintIsolate(isolate(), 735 PrintIsolate(isolate(),
736 "compaction-selection-page: space=%s free_bytes_page=%d " 736 "compaction-selection-page: space=%s free_bytes_page=%zu "
737 "fragmentation_limit_kb=%" V8PRIdPTR 737 "fragmentation_limit_kb=%" V8PRIdPTR
738 " fragmentation_limit_percent=%d sum_compaction_kb=%d " 738 " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
739 "compaction_limit_kb=%d\n", 739 "compaction_limit_kb=%zu\n",
740 AllocationSpaceName(space->identity()), free_bytes / KB, 740 AllocationSpaceName(space->identity()), free_bytes / KB,
741 free_bytes_threshold / KB, target_fragmentation_percent, 741 free_bytes_threshold / KB, target_fragmentation_percent,
742 total_live_bytes / KB, max_evacuated_bytes / KB); 742 total_live_bytes / KB, max_evacuated_bytes / KB);
743 } 743 }
744 } 744 }
745 // How many pages we will allocated for the evacuated objects 745 // How many pages we will allocated for the evacuated objects
746 // in the worst case: ceil(total_live_bytes / area_size) 746 // in the worst case: ceil(total_live_bytes / area_size)
747 int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size; 747 int estimated_new_pages =
748 static_cast<int>((total_live_bytes + area_size - 1) / area_size);
748 DCHECK_LE(estimated_new_pages, candidate_count); 749 DCHECK_LE(estimated_new_pages, candidate_count);
749 int estimated_released_pages = candidate_count - estimated_new_pages; 750 int estimated_released_pages = candidate_count - estimated_new_pages;
750 // Avoid (compact -> expand) cycles. 751 // Avoid (compact -> expand) cycles.
751 if ((estimated_released_pages == 0) && !FLAG_always_compact) { 752 if ((estimated_released_pages == 0) && !FLAG_always_compact) {
752 candidate_count = 0; 753 candidate_count = 0;
753 } 754 }
754 for (int i = 0; i < candidate_count; i++) { 755 for (int i = 0; i < candidate_count; i++) {
755 AddEvacuationCandidate(pages[i].second); 756 AddEvacuationCandidate(pages[i].second);
756 } 757 }
757 } 758 }
758 759
759 if (FLAG_trace_fragmentation) { 760 if (FLAG_trace_fragmentation) {
760 PrintIsolate(isolate(), 761 PrintIsolate(isolate(),
761 "compaction-selection: space=%s reduce_memory=%d pages=%d " 762 "compaction-selection: space=%s reduce_memory=%d pages=%d "
762 "total_live_bytes=%d\n", 763 "total_live_bytes=%zu\n",
763 AllocationSpaceName(space->identity()), reduce_memory, 764 AllocationSpaceName(space->identity()), reduce_memory,
764 candidate_count, total_live_bytes / KB); 765 candidate_count, total_live_bytes / KB);
765 } 766 }
766 } 767 }
767 768
768 769
769 void MarkCompactCollector::AbortCompaction() { 770 void MarkCompactCollector::AbortCompaction() {
770 if (compacting_) { 771 if (compacting_) {
771 RememberedSet<OLD_TO_OLD>::ClearAll(heap()); 772 RememberedSet<OLD_TO_OLD>::ClearAll(heap());
772 for (Page* p : evacuation_candidates_) { 773 for (Page* p : evacuation_candidates_) {
(...skipping 2594 matching lines...) Expand 10 before | Expand all | Expand 10 after
3367 intptr_t freed_bytes = 0; 3368 intptr_t freed_bytes = 0;
3368 intptr_t max_freed_bytes = 0; 3369 intptr_t max_freed_bytes = 0;
3369 int curr_region = -1; 3370 int curr_region = -1;
3370 3371
3371 LiveObjectIterator<kBlackObjects> it(p); 3372 LiveObjectIterator<kBlackObjects> it(p);
3372 HeapObject* object = NULL; 3373 HeapObject* object = NULL;
3373 while ((object = it.Next()) != NULL) { 3374 while ((object = it.Next()) != NULL) {
3374 DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object))); 3375 DCHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
3375 Address free_end = object->address(); 3376 Address free_end = object->address();
3376 if (free_end != free_start) { 3377 if (free_end != free_start) {
3377 int size = static_cast<int>(free_end - free_start); 3378 CHECK_GT(free_end, free_start);
3379 size_t size = static_cast<size_t>(free_end - free_start);
3378 if (free_space_mode == ZAP_FREE_SPACE) { 3380 if (free_space_mode == ZAP_FREE_SPACE) {
3379 memset(free_start, 0xcc, size); 3381 memset(free_start, 0xcc, size);
3380 } 3382 }
3381 if (free_list_mode == REBUILD_FREE_LIST) { 3383 if (free_list_mode == REBUILD_FREE_LIST) {
3382 freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree( 3384 freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
3383 free_start, size); 3385 free_start, size);
3384 max_freed_bytes = Max(freed_bytes, max_freed_bytes); 3386 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3385 } else { 3387 } else {
3386 p->heap()->CreateFillerObjectAt(free_start, size, 3388 p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3387 ClearRecordedSlots::kNo); 3389 ClearRecordedSlots::kNo);
3388 } 3390 }
3389 } 3391 }
3390 Map* map = object->synchronized_map(); 3392 Map* map = object->synchronized_map();
3391 int size = object->SizeFromMap(map); 3393 int size = object->SizeFromMap(map);
3392 if (rebuild_skip_list) { 3394 if (rebuild_skip_list) {
3393 int new_region_start = SkipList::RegionNumber(free_end); 3395 int new_region_start = SkipList::RegionNumber(free_end);
3394 int new_region_end = 3396 int new_region_end =
3395 SkipList::RegionNumber(free_end + size - kPointerSize); 3397 SkipList::RegionNumber(free_end + size - kPointerSize);
3396 if (new_region_start != curr_region || new_region_end != curr_region) { 3398 if (new_region_start != curr_region || new_region_end != curr_region) {
3397 skip_list->AddObject(free_end, size); 3399 skip_list->AddObject(free_end, size);
3398 curr_region = new_region_end; 3400 curr_region = new_region_end;
3399 } 3401 }
3400 } 3402 }
3401 free_start = free_end + size; 3403 free_start = free_end + size;
3402 } 3404 }
3403 3405
3404 // Clear the mark bits of that page and reset live bytes count. 3406 // Clear the mark bits of that page and reset live bytes count.
3405 p->ClearLiveness(); 3407 p->ClearLiveness();
3406 3408
3407 if (free_start != p->area_end()) { 3409 if (free_start != p->area_end()) {
3408 int size = static_cast<int>(p->area_end() - free_start); 3410 CHECK_GT(p->area_end(), free_start);
3411 size_t size = static_cast<size_t>(p->area_end() - free_start);
3409 if (free_space_mode == ZAP_FREE_SPACE) { 3412 if (free_space_mode == ZAP_FREE_SPACE) {
3410 memset(free_start, 0xcc, size); 3413 memset(free_start, 0xcc, size);
3411 } 3414 }
3412 if (free_list_mode == REBUILD_FREE_LIST) { 3415 if (free_list_mode == REBUILD_FREE_LIST) {
3413 freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree( 3416 freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
3414 free_start, size); 3417 free_start, size);
3415 max_freed_bytes = Max(freed_bytes, max_freed_bytes); 3418 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3416 } else { 3419 } else {
3417 p->heap()->CreateFillerObjectAt(free_start, size, 3420 p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3418 ClearRecordedSlots::kNo); 3421 ClearRecordedSlots::kNo);
3419 } 3422 }
3420 } 3423 }
3421 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); 3424 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3422 if (free_list_mode == IGNORE_FREE_LIST) return 0; 3425 if (free_list_mode == IGNORE_FREE_LIST) return 0;
3423 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); 3426 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3424 } 3427 }
3425 3428
3426 void MarkCompactCollector::InvalidateCode(Code* code) { 3429 void MarkCompactCollector::InvalidateCode(Code* code) {
3427 Page* page = Page::FromAddress(code->address()); 3430 Page* page = Page::FromAddress(code->address());
(...skipping 433 matching lines...) Expand 10 before | Expand all | Expand 10 after
3861 Page* page) { 3864 Page* page) {
3862 DCHECK(sweeping_in_progress_); 3865 DCHECK(sweeping_in_progress_);
3863 PrepareToBeSweptPage(space, page); 3866 PrepareToBeSweptPage(space, page);
3864 late_pages_ = true; 3867 late_pages_ = true;
3865 AddSweepingPageSafe(space, page); 3868 AddSweepingPageSafe(space, page);
3866 } 3869 }
3867 3870
3868 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, 3871 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
3869 Page* page) { 3872 Page* page) {
3870 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); 3873 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
3871 int to_sweep = page->area_size() - page->LiveBytes(); 3874 DCHECK_GE(page->area_size(), static_cast<size_t>(page->LiveBytes()));
3875 size_t to_sweep = page->area_size() - page->LiveBytes();
3872 if (space != NEW_SPACE) 3876 if (space != NEW_SPACE)
3873 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); 3877 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
3874 } 3878 }
3875 3879
3876 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( 3880 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
3877 AllocationSpace space) { 3881 AllocationSpace space) {
3878 base::LockGuard<base::Mutex> guard(&mutex_); 3882 base::LockGuard<base::Mutex> guard(&mutex_);
3879 Page* page = nullptr; 3883 Page* page = nullptr;
3880 if (!sweeping_list_[space].empty()) { 3884 if (!sweeping_list_[space].empty()) {
3881 page = sweeping_list_[space].front(); 3885 page = sweeping_list_[space].front();
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
4008 // The target is always in old space, we don't have to record the slot in 4012 // The target is always in old space, we don't have to record the slot in
4009 // the old-to-new remembered set. 4013 // the old-to-new remembered set.
4010 DCHECK(!heap()->InNewSpace(target)); 4014 DCHECK(!heap()->InNewSpace(target));
4011 RecordRelocSlot(host, &rinfo, target); 4015 RecordRelocSlot(host, &rinfo, target);
4012 } 4016 }
4013 } 4017 }
4014 } 4018 }
4015 4019
4016 } // namespace internal 4020 } // namespace internal
4017 } // namespace v8 4021 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698