Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/pages.h" | 5 #include "vm/pages.h" |
| 6 | 6 |
| 7 #include "platform/assert.h" | 7 #include "platform/assert.h" |
| 8 #include "vm/compiler_stats.h" | 8 #include "vm/compiler_stats.h" |
| 9 #include "vm/gc_marker.h" | 9 #include "vm/gc_marker.h" |
| 10 #include "vm/gc_sweeper.h" | 10 #include "vm/gc_sweeper.h" |
| 11 #include "vm/lockers.h" | 11 #include "vm/lockers.h" |
| 12 #include "vm/object.h" | 12 #include "vm/object.h" |
| 13 #include "vm/os_thread.h" | 13 #include "vm/os_thread.h" |
| 14 #include "vm/thread_registry.h" | 14 #include "vm/safepoint.h" |
| 15 #include "vm/verified_memory.h" | 15 #include "vm/verified_memory.h" |
| 16 #include "vm/virtual_memory.h" | 16 #include "vm/virtual_memory.h" |
| 17 | 17 |
| 18 namespace dart { | 18 namespace dart { |
| 19 | 19 |
| 20 DEFINE_FLAG(int, heap_growth_rate, 0, | 20 DEFINE_FLAG(int, heap_growth_rate, 0, |
| 21 "The max number of pages the heap can grow at a time"); | 21 "The max number of pages the heap can grow at a time"); |
| 22 DEFINE_FLAG(int, old_gen_growth_space_ratio, 20, | 22 DEFINE_FLAG(int, old_gen_growth_space_ratio, 20, |
| 23 "The desired maximum percentage of free space after old gen GC"); | 23 "The desired maximum percentage of free space after old gen GC"); |
| 24 DEFINE_FLAG(int, old_gen_growth_time_ratio, 3, | 24 DEFINE_FLAG(int, old_gen_growth_time_ratio, 3, |
| (...skipping 759 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 784 if (page->type() == HeapPage::kExecutable) { | 784 if (page->type() == HeapPage::kExecutable) { |
| 785 page->WriteProtect(read_only); | 785 page->WriteProtect(read_only); |
| 786 } | 786 } |
| 787 page = page->next(); | 787 page = page->next(); |
| 788 } | 788 } |
| 789 } | 789 } |
| 790 } | 790 } |
| 791 | 791 |
| 792 | 792 |
| 793 void PageSpace::MarkSweep(bool invoke_api_callbacks) { | 793 void PageSpace::MarkSweep(bool invoke_api_callbacks) { |
| 794 Thread* thread = Thread::Current(); | |
| 794 Isolate* isolate = heap_->isolate(); | 795 Isolate* isolate = heap_->isolate(); |
| 795 ASSERT(isolate == Isolate::Current()); | 796 ASSERT(isolate == Isolate::Current()); |
| 796 | 797 |
| 797 // Wait for pending tasks to complete and then account for the driver task. | 798 // Wait for pending tasks to complete and then account for the driver task. |
| 798 { | 799 { |
| 799 MonitorLocker locker(tasks_lock()); | 800 MonitorLocker locker(tasks_lock()); |
| 800 while (tasks() > 0) { | 801 while (tasks() > 0) { |
| 801 locker.Wait(); | 802 locker.WaitWithSafepointCheck(thread); |
| 802 } | 803 } |
| 803 set_tasks(1); | 804 set_tasks(1); |
| 804 } | 805 } |
| 805 // Ensure that all threads for this isolate are at a safepoint (either stopped | 806 // Ensure that all threads for this isolate are at a safepoint (either |
| 806 // or in native code). If two threads are racing at this point, the loser | 807 // stopped or in native code). If two threads are racing at this point, the |
| 807 // will continue with its collection after waiting for the winner to complete. | 808 // loser will continue with its collection after waiting for the winner to |
| 809 // complete. | |
| 808 // TODO(koda): Consider moving SafepointThreads into allocation failure/retry | 810 // TODO(koda): Consider moving SafepointThreads into allocation failure/retry |
|
zra
2016/01/08 23:32:07
Remove TODO?
siva
2016/01/12 21:26:22
Done.
| |
| 809 // logic to avoid needless collections. | 811 // logic to avoid needless collections. |
| 810 isolate->thread_registry()->SafepointThreads(); | 812 { |
| 813 SafepointOperationScope safepoint_scope(thread); | |
| 811 | 814 |
| 812 // Perform various cleanup that relies on no tasks interfering. | 815 // Perform various cleanup that relies on no tasks interfering. |
| 813 isolate->class_table()->FreeOldTables(); | 816 isolate->class_table()->FreeOldTables(); |
| 814 | 817 |
| 815 NoSafepointScope no_safepoints; | 818 NoSafepointScope no_safepoints; |
| 816 | 819 |
| 817 if (FLAG_print_free_list_before_gc) { | 820 if (FLAG_print_free_list_before_gc) { |
| 818 OS::Print("Data Freelist (before GC):\n"); | 821 OS::Print("Data Freelist (before GC):\n"); |
| 819 freelist_[HeapPage::kData].Print(); | 822 freelist_[HeapPage::kData].Print(); |
| 820 OS::Print("Executable Freelist (before GC):\n"); | 823 OS::Print("Executable Freelist (before GC):\n"); |
| 821 freelist_[HeapPage::kExecutable].Print(); | 824 freelist_[HeapPage::kExecutable].Print(); |
| 822 } | 825 } |
| 823 | 826 |
| 824 if (FLAG_verify_before_gc) { | |
| 825 OS::PrintErr("Verifying before marking..."); | |
| 826 heap_->VerifyGC(); | |
| 827 OS::PrintErr(" done.\n"); | |
| 828 } | |
| 829 | |
| 830 const int64_t start = OS::GetCurrentTimeMicros(); | |
| 831 | |
| 832 // Make code pages writable. | |
| 833 WriteProtectCode(false); | |
| 834 | |
| 835 // Save old value before GCMarker visits the weak persistent handles. | |
| 836 SpaceUsage usage_before = GetCurrentUsage(); | |
| 837 | |
| 838 // Mark all reachable old-gen objects. | |
| 839 bool collect_code = FLAG_collect_code && ShouldCollectCode(); | |
| 840 GCMarker marker(heap_); | |
| 841 marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); | |
| 842 usage_.used_in_words = marker.marked_words(); | |
| 843 | |
| 844 int64_t mid1 = OS::GetCurrentTimeMicros(); | |
| 845 | |
| 846 // Abandon the remainder of the bump allocation block. | |
| 847 AbandonBumpAllocation(); | |
| 848 // Reset the freelists and setup sweeping. | |
| 849 freelist_[HeapPage::kData].Reset(); | |
| 850 freelist_[HeapPage::kExecutable].Reset(); | |
| 851 | |
| 852 int64_t mid2 = OS::GetCurrentTimeMicros(); | |
| 853 int64_t mid3 = 0; | |
| 854 | |
| 855 { | |
| 856 if (FLAG_verify_before_gc) { | 827 if (FLAG_verify_before_gc) { |
| 857 OS::PrintErr("Verifying before sweeping..."); | 828 OS::PrintErr("Verifying before marking..."); |
| 858 heap_->VerifyGC(kAllowMarked); | 829 heap_->VerifyGC(); |
| 859 OS::PrintErr(" done.\n"); | 830 OS::PrintErr(" done.\n"); |
| 860 } | 831 } |
| 861 GCSweeper sweeper; | |
| 862 | 832 |
| 863 // During stop-the-world phases we should use bulk lock when adding elements | 833 const int64_t start = OS::GetCurrentTimeMicros(); |
| 864 // to the free list. | |
| 865 MutexLocker mld(freelist_[HeapPage::kData].mutex()); | |
| 866 MutexLocker mle(freelist_[HeapPage::kExecutable].mutex()); | |
| 867 | 834 |
| 868 // Large and executable pages are always swept immediately. | 835 // Make code pages writable. |
| 869 HeapPage* prev_page = NULL; | 836 WriteProtectCode(false); |
| 870 HeapPage* page = large_pages_; | 837 |
| 871 while (page != NULL) { | 838 // Save old value before GCMarker visits the weak persistent handles. |
| 872 HeapPage* next_page = page->next(); | 839 SpaceUsage usage_before = GetCurrentUsage(); |
| 873 const intptr_t words_to_end = sweeper.SweepLargePage(page); | 840 |
| 874 if (words_to_end == 0) { | 841 // Mark all reachable old-gen objects. |
| 875 FreeLargePage(page, prev_page); | 842 bool collect_code = FLAG_collect_code && ShouldCollectCode(); |
| 876 } else { | 843 GCMarker marker(heap_); |
| 877 TruncateLargePage(page, words_to_end << kWordSizeLog2); | 844 marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); |
| 878 prev_page = page; | 845 usage_.used_in_words = marker.marked_words(); |
| 846 | |
| 847 int64_t mid1 = OS::GetCurrentTimeMicros(); | |
| 848 | |
| 849 // Abandon the remainder of the bump allocation block. | |
| 850 AbandonBumpAllocation(); | |
| 851 // Reset the freelists and setup sweeping. | |
| 852 freelist_[HeapPage::kData].Reset(); | |
| 853 freelist_[HeapPage::kExecutable].Reset(); | |
| 854 | |
| 855 int64_t mid2 = OS::GetCurrentTimeMicros(); | |
| 856 int64_t mid3 = 0; | |
| 857 | |
| 858 { | |
| 859 if (FLAG_verify_before_gc) { | |
| 860 OS::PrintErr("Verifying before sweeping..."); | |
| 861 heap_->VerifyGC(kAllowMarked); | |
| 862 OS::PrintErr(" done.\n"); | |
| 879 } | 863 } |
| 880 // Advance to the next page. | 864 GCSweeper sweeper; |
| 881 page = next_page; | |
| 882 } | |
| 883 | 865 |
| 884 prev_page = NULL; | 866 // During stop-the-world phases we should use bulk lock when adding |
| 885 page = exec_pages_; | 867 // elements to the free list. |
| 886 FreeList* freelist = &freelist_[HeapPage::kExecutable]; | 868 MutexLocker mld(freelist_[HeapPage::kData].mutex()); |
| 887 while (page != NULL) { | 869 MutexLocker mle(freelist_[HeapPage::kExecutable].mutex()); |
| 888 HeapPage* next_page = page->next(); | |
| 889 bool page_in_use = sweeper.SweepPage(page, freelist, true); | |
| 890 if (page_in_use) { | |
| 891 prev_page = page; | |
| 892 } else { | |
| 893 FreePage(page, prev_page); | |
| 894 } | |
| 895 // Advance to the next page. | |
| 896 page = next_page; | |
| 897 } | |
| 898 | 870 |
| 899 mid3 = OS::GetCurrentTimeMicros(); | 871 // Large and executable pages are always swept immediately. |
| 900 | 872 HeapPage* prev_page = NULL; |
| 901 if (!FLAG_concurrent_sweep) { | 873 HeapPage* page = large_pages_; |
| 902 // Sweep all regular sized pages now. | |
| 903 prev_page = NULL; | |
| 904 page = pages_; | |
| 905 while (page != NULL) { | 874 while (page != NULL) { |
| 906 HeapPage* next_page = page->next(); | 875 HeapPage* next_page = page->next(); |
| 907 bool page_in_use = sweeper.SweepPage( | 876 const intptr_t words_to_end = sweeper.SweepLargePage(page); |
| 908 page, &freelist_[page->type()], true); | 877 if (words_to_end == 0) { |
| 878 FreeLargePage(page, prev_page); | |
| 879 } else { | |
| 880 TruncateLargePage(page, words_to_end << kWordSizeLog2); | |
| 881 prev_page = page; | |
| 882 } | |
| 883 // Advance to the next page. | |
| 884 page = next_page; | |
| 885 } | |
| 886 | |
| 887 prev_page = NULL; | |
| 888 page = exec_pages_; | |
| 889 FreeList* freelist = &freelist_[HeapPage::kExecutable]; | |
| 890 while (page != NULL) { | |
| 891 HeapPage* next_page = page->next(); | |
| 892 bool page_in_use = sweeper.SweepPage(page, freelist, true); | |
| 909 if (page_in_use) { | 893 if (page_in_use) { |
| 910 prev_page = page; | 894 prev_page = page; |
| 911 } else { | 895 } else { |
| 912 FreePage(page, prev_page); | 896 FreePage(page, prev_page); |
| 913 } | 897 } |
| 914 // Advance to the next page. | 898 // Advance to the next page. |
| 915 page = next_page; | 899 page = next_page; |
| 916 } | 900 } |
| 917 if (FLAG_verify_after_gc) { | 901 |
| 918 OS::PrintErr("Verifying after sweeping..."); | 902 mid3 = OS::GetCurrentTimeMicros(); |
| 919 heap_->VerifyGC(kForbidMarked); | 903 |
| 920 OS::PrintErr(" done.\n"); | 904 if (!FLAG_concurrent_sweep) { |
| 905 // Sweep all regular sized pages now. | |
| 906 prev_page = NULL; | |
| 907 page = pages_; | |
| 908 while (page != NULL) { | |
| 909 HeapPage* next_page = page->next(); | |
| 910 bool page_in_use = sweeper.SweepPage( | |
| 911 page, &freelist_[page->type()], true); | |
| 912 if (page_in_use) { | |
| 913 prev_page = page; | |
| 914 } else { | |
| 915 FreePage(page, prev_page); | |
| 916 } | |
| 917 // Advance to the next page. | |
| 918 page = next_page; | |
| 919 } | |
| 920 if (FLAG_verify_after_gc) { | |
| 921 OS::PrintErr("Verifying after sweeping..."); | |
| 922 heap_->VerifyGC(kForbidMarked); | |
| 923 OS::PrintErr(" done.\n"); | |
| 924 } | |
| 925 } else { | |
| 926 // Start the concurrent sweeper task now. | |
| 927 GCSweeper::SweepConcurrent( | |
| 928 isolate, pages_, pages_tail_, &freelist_[HeapPage::kData]); | |
| 921 } | 929 } |
| 922 } else { | 930 } |
| 923 // Start the concurrent sweeper task now. | 931 |
| 924 GCSweeper::SweepConcurrent( | 932 // Make code pages read-only. |
| 925 isolate, pages_, pages_tail_, &freelist_[HeapPage::kData]); | 933 WriteProtectCode(true); |
| 934 | |
| 935 int64_t end = OS::GetCurrentTimeMicros(); | |
| 936 | |
| 937 // Record signals for growth control. Include size of external allocations. | |
| 938 page_space_controller_.EvaluateGarbageCollection(usage_before, | |
| 939 GetCurrentUsage(), | |
| 940 start, end); | |
| 941 | |
| 942 heap_->RecordTime(kMarkObjects, mid1 - start); | |
| 943 heap_->RecordTime(kResetFreeLists, mid2 - mid1); | |
| 944 heap_->RecordTime(kSweepPages, mid3 - mid2); | |
| 945 heap_->RecordTime(kSweepLargePages, end - mid3); | |
| 946 | |
| 947 if (FLAG_print_free_list_after_gc) { | |
| 948 OS::Print("Data Freelist (after GC):\n"); | |
| 949 freelist_[HeapPage::kData].Print(); | |
| 950 OS::Print("Executable Freelist (after GC):\n"); | |
| 951 freelist_[HeapPage::kExecutable].Print(); | |
| 952 } | |
| 953 | |
| 954 UpdateMaxUsed(); | |
| 955 if (heap_ != NULL) { | |
| 956 heap_->UpdateGlobalMaxUsed(); | |
| 926 } | 957 } |
| 927 } | 958 } |
| 928 | 959 |
| 929 // Make code pages read-only. | |
| 930 WriteProtectCode(true); | |
| 931 | |
| 932 int64_t end = OS::GetCurrentTimeMicros(); | |
| 933 | |
| 934 // Record signals for growth control. Include size of external allocations. | |
| 935 page_space_controller_.EvaluateGarbageCollection(usage_before, | |
| 936 GetCurrentUsage(), | |
| 937 start, end); | |
| 938 | |
| 939 heap_->RecordTime(kMarkObjects, mid1 - start); | |
| 940 heap_->RecordTime(kResetFreeLists, mid2 - mid1); | |
| 941 heap_->RecordTime(kSweepPages, mid3 - mid2); | |
| 942 heap_->RecordTime(kSweepLargePages, end - mid3); | |
| 943 | |
| 944 if (FLAG_print_free_list_after_gc) { | |
| 945 OS::Print("Data Freelist (after GC):\n"); | |
| 946 freelist_[HeapPage::kData].Print(); | |
| 947 OS::Print("Executable Freelist (after GC):\n"); | |
| 948 freelist_[HeapPage::kExecutable].Print(); | |
| 949 } | |
| 950 | |
| 951 UpdateMaxUsed(); | |
| 952 if (heap_ != NULL) { | |
| 953 heap_->UpdateGlobalMaxUsed(); | |
| 954 } | |
| 955 | |
| 956 isolate->thread_registry()->ResumeAllThreads(); | |
| 957 | |
| 958 // Done, reset the task count. | 960 // Done, reset the task count. |
| 959 { | 961 { |
| 960 MonitorLocker ml(tasks_lock()); | 962 MonitorLocker ml(tasks_lock()); |
| 961 set_tasks(tasks() - 1); | 963 set_tasks(tasks() - 1); |
| 962 ml.Notify(); | 964 ml.Notify(); |
| 963 } | 965 } |
| 964 } | 966 } |
| 965 | 967 |
| 966 | 968 |
| 967 uword PageSpace::TryAllocateDataBumpInternal(intptr_t size, | 969 uword PageSpace::TryAllocateDataBumpInternal(intptr_t size, |
| (...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1215 return 0; | 1217 return 0; |
| 1216 } else { | 1218 } else { |
| 1217 ASSERT(total_time >= gc_time); | 1219 ASSERT(total_time >= gc_time); |
| 1218 int result = static_cast<int>((static_cast<double>(gc_time) / | 1220 int result = static_cast<int>((static_cast<double>(gc_time) / |
| 1219 static_cast<double>(total_time)) * 100); | 1221 static_cast<double>(total_time)) * 100); |
| 1220 return result; | 1222 return result; |
| 1221 } | 1223 } |
| 1222 } | 1224 } |
| 1223 | 1225 |
| 1224 } // namespace dart | 1226 } // namespace dart |
| OLD | NEW |