| OLD | NEW |
| 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/pages.h" | 5 #include "vm/pages.h" |
| 6 | 6 |
| 7 #include "platform/assert.h" | 7 #include "platform/assert.h" |
| 8 #include "vm/compiler_stats.h" | 8 #include "vm/compiler_stats.h" |
| 9 #include "vm/gc_marker.h" | 9 #include "vm/gc_marker.h" |
| 10 #include "vm/gc_sweeper.h" | 10 #include "vm/gc_sweeper.h" |
| 11 #include "vm/lockers.h" | 11 #include "vm/lockers.h" |
| 12 #include "vm/object.h" | 12 #include "vm/object.h" |
| 13 #include "vm/os_thread.h" | 13 #include "vm/os_thread.h" |
| 14 #include "vm/thread_registry.h" | 14 #include "vm/safepoint.h" |
| 15 #include "vm/verified_memory.h" | 15 #include "vm/verified_memory.h" |
| 16 #include "vm/virtual_memory.h" | 16 #include "vm/virtual_memory.h" |
| 17 | 17 |
| 18 namespace dart { | 18 namespace dart { |
| 19 | 19 |
| 20 DEFINE_FLAG(int, heap_growth_rate, 0, | 20 DEFINE_FLAG(int, heap_growth_rate, 0, |
| 21 "The max number of pages the heap can grow at a time"); | 21 "The max number of pages the heap can grow at a time"); |
| 22 DEFINE_FLAG(int, old_gen_growth_space_ratio, 20, | 22 DEFINE_FLAG(int, old_gen_growth_space_ratio, 20, |
| 23 "The desired maximum percentage of free space after old gen GC"); | 23 "The desired maximum percentage of free space after old gen GC"); |
| 24 DEFINE_FLAG(int, old_gen_growth_time_ratio, 3, | 24 DEFINE_FLAG(int, old_gen_growth_time_ratio, 3, |
| (...skipping 759 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 784 if (page->type() == HeapPage::kExecutable) { | 784 if (page->type() == HeapPage::kExecutable) { |
| 785 page->WriteProtect(read_only); | 785 page->WriteProtect(read_only); |
| 786 } | 786 } |
| 787 page = page->next(); | 787 page = page->next(); |
| 788 } | 788 } |
| 789 } | 789 } |
| 790 } | 790 } |
| 791 | 791 |
| 792 | 792 |
| 793 void PageSpace::MarkSweep(bool invoke_api_callbacks) { | 793 void PageSpace::MarkSweep(bool invoke_api_callbacks) { |
| 794 Thread* thread = Thread::Current(); |
| 794 Isolate* isolate = heap_->isolate(); | 795 Isolate* isolate = heap_->isolate(); |
| 795 ASSERT(isolate == Isolate::Current()); | 796 ASSERT(isolate == Isolate::Current()); |
| 796 | 797 |
| 797 // Wait for pending tasks to complete and then account for the driver task. | 798 // Wait for pending tasks to complete and then account for the driver task. |
| 798 { | 799 { |
| 799 MonitorLocker locker(tasks_lock()); | 800 MonitorLocker locker(tasks_lock()); |
| 800 while (tasks() > 0) { | 801 while (tasks() > 0) { |
| 801 locker.Wait(); | 802 locker.WaitWithSafepointCheck(thread); |
| 802 } | 803 } |
| 803 set_tasks(1); | 804 set_tasks(1); |
| 804 } | 805 } |
| 805 // Ensure that all threads for this isolate are at a safepoint (either stopped | 806 // Ensure that all threads for this isolate are at a safepoint (either |
| 806 // or in native code). If two threads are racing at this point, the loser | 807 // stopped or in native code). We have guards around Newgen GC and oldgen GC |
| 807 // will continue with its collection after waiting for the winner to complete. | 808 // to ensure that if two threads are racing to collect at the same time the |
| 808 // TODO(koda): Consider moving SafepointThreads into allocation failure/retry | 809 // loser skips collection and goes straight to allocation. |
| 809 // logic to avoid needless collections. | 810 { |
| 810 isolate->thread_registry()->SafepointThreads(); | 811 SafepointOperationScope safepoint_scope(thread); |
| 811 | 812 |
| 812 // Perform various cleanup that relies on no tasks interfering. | 813 // Perform various cleanup that relies on no tasks interfering. |
| 813 isolate->class_table()->FreeOldTables(); | 814 isolate->class_table()->FreeOldTables(); |
| 814 | 815 |
| 815 NoSafepointScope no_safepoints; | 816 NoSafepointScope no_safepoints; |
| 816 | 817 |
| 817 if (FLAG_print_free_list_before_gc) { | 818 if (FLAG_print_free_list_before_gc) { |
| 818 OS::Print("Data Freelist (before GC):\n"); | 819 OS::Print("Data Freelist (before GC):\n"); |
| 819 freelist_[HeapPage::kData].Print(); | 820 freelist_[HeapPage::kData].Print(); |
| 820 OS::Print("Executable Freelist (before GC):\n"); | 821 OS::Print("Executable Freelist (before GC):\n"); |
| 821 freelist_[HeapPage::kExecutable].Print(); | 822 freelist_[HeapPage::kExecutable].Print(); |
| 822 } | 823 } |
| 823 | 824 |
| 824 if (FLAG_verify_before_gc) { | |
| 825 OS::PrintErr("Verifying before marking..."); | |
| 826 heap_->VerifyGC(); | |
| 827 OS::PrintErr(" done.\n"); | |
| 828 } | |
| 829 | |
| 830 const int64_t start = OS::GetCurrentTimeMicros(); | |
| 831 | |
| 832 // Make code pages writable. | |
| 833 WriteProtectCode(false); | |
| 834 | |
| 835 // Save old value before GCMarker visits the weak persistent handles. | |
| 836 SpaceUsage usage_before = GetCurrentUsage(); | |
| 837 | |
| 838 // Mark all reachable old-gen objects. | |
| 839 bool collect_code = FLAG_collect_code && ShouldCollectCode(); | |
| 840 GCMarker marker(heap_); | |
| 841 marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); | |
| 842 usage_.used_in_words = marker.marked_words(); | |
| 843 | |
| 844 int64_t mid1 = OS::GetCurrentTimeMicros(); | |
| 845 | |
| 846 // Abandon the remainder of the bump allocation block. | |
| 847 AbandonBumpAllocation(); | |
| 848 // Reset the freelists and setup sweeping. | |
| 849 freelist_[HeapPage::kData].Reset(); | |
| 850 freelist_[HeapPage::kExecutable].Reset(); | |
| 851 | |
| 852 int64_t mid2 = OS::GetCurrentTimeMicros(); | |
| 853 int64_t mid3 = 0; | |
| 854 | |
| 855 { | |
| 856 if (FLAG_verify_before_gc) { | 825 if (FLAG_verify_before_gc) { |
| 857 OS::PrintErr("Verifying before sweeping..."); | 826 OS::PrintErr("Verifying before marking..."); |
| 858 heap_->VerifyGC(kAllowMarked); | 827 heap_->VerifyGC(); |
| 859 OS::PrintErr(" done.\n"); | 828 OS::PrintErr(" done.\n"); |
| 860 } | 829 } |
| 861 GCSweeper sweeper; | |
| 862 | 830 |
| 863 // During stop-the-world phases we should use bulk lock when adding elements | 831 const int64_t start = OS::GetCurrentTimeMicros(); |
| 864 // to the free list. | |
| 865 MutexLocker mld(freelist_[HeapPage::kData].mutex()); | |
| 866 MutexLocker mle(freelist_[HeapPage::kExecutable].mutex()); | |
| 867 | 832 |
| 868 // Large and executable pages are always swept immediately. | 833 // Make code pages writable. |
| 869 HeapPage* prev_page = NULL; | 834 WriteProtectCode(false); |
| 870 HeapPage* page = large_pages_; | 835 |
| 871 while (page != NULL) { | 836 // Save old value before GCMarker visits the weak persistent handles. |
| 872 HeapPage* next_page = page->next(); | 837 SpaceUsage usage_before = GetCurrentUsage(); |
| 873 const intptr_t words_to_end = sweeper.SweepLargePage(page); | 838 |
| 874 if (words_to_end == 0) { | 839 // Mark all reachable old-gen objects. |
| 875 FreeLargePage(page, prev_page); | 840 bool collect_code = FLAG_collect_code && ShouldCollectCode(); |
| 876 } else { | 841 GCMarker marker(heap_); |
| 877 TruncateLargePage(page, words_to_end << kWordSizeLog2); | 842 marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); |
| 878 prev_page = page; | 843 usage_.used_in_words = marker.marked_words(); |
| 844 |
| 845 int64_t mid1 = OS::GetCurrentTimeMicros(); |
| 846 |
| 847 // Abandon the remainder of the bump allocation block. |
| 848 AbandonBumpAllocation(); |
| 849 // Reset the freelists and setup sweeping. |
| 850 freelist_[HeapPage::kData].Reset(); |
| 851 freelist_[HeapPage::kExecutable].Reset(); |
| 852 |
| 853 int64_t mid2 = OS::GetCurrentTimeMicros(); |
| 854 int64_t mid3 = 0; |
| 855 |
| 856 { |
| 857 if (FLAG_verify_before_gc) { |
| 858 OS::PrintErr("Verifying before sweeping..."); |
| 859 heap_->VerifyGC(kAllowMarked); |
| 860 OS::PrintErr(" done.\n"); |
| 879 } | 861 } |
| 880 // Advance to the next page. | 862 GCSweeper sweeper; |
| 881 page = next_page; | |
| 882 } | |
| 883 | 863 |
| 884 prev_page = NULL; | 864 // During stop-the-world phases we should use bulk lock when adding |
| 885 page = exec_pages_; | 865 // elements to the free list. |
| 886 FreeList* freelist = &freelist_[HeapPage::kExecutable]; | 866 MutexLocker mld(freelist_[HeapPage::kData].mutex()); |
| 887 while (page != NULL) { | 867 MutexLocker mle(freelist_[HeapPage::kExecutable].mutex()); |
| 888 HeapPage* next_page = page->next(); | |
| 889 bool page_in_use = sweeper.SweepPage(page, freelist, true); | |
| 890 if (page_in_use) { | |
| 891 prev_page = page; | |
| 892 } else { | |
| 893 FreePage(page, prev_page); | |
| 894 } | |
| 895 // Advance to the next page. | |
| 896 page = next_page; | |
| 897 } | |
| 898 | 868 |
| 899 mid3 = OS::GetCurrentTimeMicros(); | 869 // Large and executable pages are always swept immediately. |
| 900 | 870 HeapPage* prev_page = NULL; |
| 901 if (!FLAG_concurrent_sweep) { | 871 HeapPage* page = large_pages_; |
| 902 // Sweep all regular sized pages now. | |
| 903 prev_page = NULL; | |
| 904 page = pages_; | |
| 905 while (page != NULL) { | 872 while (page != NULL) { |
| 906 HeapPage* next_page = page->next(); | 873 HeapPage* next_page = page->next(); |
| 907 bool page_in_use = sweeper.SweepPage( | 874 const intptr_t words_to_end = sweeper.SweepLargePage(page); |
| 908 page, &freelist_[page->type()], true); | 875 if (words_to_end == 0) { |
| 876 FreeLargePage(page, prev_page); |
| 877 } else { |
| 878 TruncateLargePage(page, words_to_end << kWordSizeLog2); |
| 879 prev_page = page; |
| 880 } |
| 881 // Advance to the next page. |
| 882 page = next_page; |
| 883 } |
| 884 |
| 885 prev_page = NULL; |
| 886 page = exec_pages_; |
| 887 FreeList* freelist = &freelist_[HeapPage::kExecutable]; |
| 888 while (page != NULL) { |
| 889 HeapPage* next_page = page->next(); |
| 890 bool page_in_use = sweeper.SweepPage(page, freelist, true); |
| 909 if (page_in_use) { | 891 if (page_in_use) { |
| 910 prev_page = page; | 892 prev_page = page; |
| 911 } else { | 893 } else { |
| 912 FreePage(page, prev_page); | 894 FreePage(page, prev_page); |
| 913 } | 895 } |
| 914 // Advance to the next page. | 896 // Advance to the next page. |
| 915 page = next_page; | 897 page = next_page; |
| 916 } | 898 } |
| 917 if (FLAG_verify_after_gc) { | 899 |
| 918 OS::PrintErr("Verifying after sweeping..."); | 900 mid3 = OS::GetCurrentTimeMicros(); |
| 919 heap_->VerifyGC(kForbidMarked); | 901 |
| 920 OS::PrintErr(" done.\n"); | 902 if (!FLAG_concurrent_sweep) { |
| 903 // Sweep all regular sized pages now. |
| 904 prev_page = NULL; |
| 905 page = pages_; |
| 906 while (page != NULL) { |
| 907 HeapPage* next_page = page->next(); |
| 908 bool page_in_use = sweeper.SweepPage( |
| 909 page, &freelist_[page->type()], true); |
| 910 if (page_in_use) { |
| 911 prev_page = page; |
| 912 } else { |
| 913 FreePage(page, prev_page); |
| 914 } |
| 915 // Advance to the next page. |
| 916 page = next_page; |
| 917 } |
| 918 if (FLAG_verify_after_gc) { |
| 919 OS::PrintErr("Verifying after sweeping..."); |
| 920 heap_->VerifyGC(kForbidMarked); |
| 921 OS::PrintErr(" done.\n"); |
| 922 } |
| 923 } else { |
| 924 // Start the concurrent sweeper task now. |
| 925 GCSweeper::SweepConcurrent( |
| 926 isolate, pages_, pages_tail_, &freelist_[HeapPage::kData]); |
| 921 } | 927 } |
| 922 } else { | 928 } |
| 923 // Start the concurrent sweeper task now. | 929 |
| 924 GCSweeper::SweepConcurrent( | 930 // Make code pages read-only. |
| 925 isolate, pages_, pages_tail_, &freelist_[HeapPage::kData]); | 931 WriteProtectCode(true); |
| 932 |
| 933 int64_t end = OS::GetCurrentTimeMicros(); |
| 934 |
| 935 // Record signals for growth control. Include size of external allocations. |
| 936 page_space_controller_.EvaluateGarbageCollection(usage_before, |
| 937 GetCurrentUsage(), |
| 938 start, end); |
| 939 |
| 940 heap_->RecordTime(kMarkObjects, mid1 - start); |
| 941 heap_->RecordTime(kResetFreeLists, mid2 - mid1); |
| 942 heap_->RecordTime(kSweepPages, mid3 - mid2); |
| 943 heap_->RecordTime(kSweepLargePages, end - mid3); |
| 944 |
| 945 if (FLAG_print_free_list_after_gc) { |
| 946 OS::Print("Data Freelist (after GC):\n"); |
| 947 freelist_[HeapPage::kData].Print(); |
| 948 OS::Print("Executable Freelist (after GC):\n"); |
| 949 freelist_[HeapPage::kExecutable].Print(); |
| 950 } |
| 951 |
| 952 UpdateMaxUsed(); |
| 953 if (heap_ != NULL) { |
| 954 heap_->UpdateGlobalMaxUsed(); |
| 926 } | 955 } |
| 927 } | 956 } |
| 928 | 957 |
| 929 // Make code pages read-only. | |
| 930 WriteProtectCode(true); | |
| 931 | |
| 932 int64_t end = OS::GetCurrentTimeMicros(); | |
| 933 | |
| 934 // Record signals for growth control. Include size of external allocations. | |
| 935 page_space_controller_.EvaluateGarbageCollection(usage_before, | |
| 936 GetCurrentUsage(), | |
| 937 start, end); | |
| 938 | |
| 939 heap_->RecordTime(kMarkObjects, mid1 - start); | |
| 940 heap_->RecordTime(kResetFreeLists, mid2 - mid1); | |
| 941 heap_->RecordTime(kSweepPages, mid3 - mid2); | |
| 942 heap_->RecordTime(kSweepLargePages, end - mid3); | |
| 943 | |
| 944 if (FLAG_print_free_list_after_gc) { | |
| 945 OS::Print("Data Freelist (after GC):\n"); | |
| 946 freelist_[HeapPage::kData].Print(); | |
| 947 OS::Print("Executable Freelist (after GC):\n"); | |
| 948 freelist_[HeapPage::kExecutable].Print(); | |
| 949 } | |
| 950 | |
| 951 UpdateMaxUsed(); | |
| 952 if (heap_ != NULL) { | |
| 953 heap_->UpdateGlobalMaxUsed(); | |
| 954 } | |
| 955 | |
| 956 isolate->thread_registry()->ResumeAllThreads(); | |
| 957 | |
| 958 // Done, reset the task count. | 958 // Done, reset the task count. |
| 959 { | 959 { |
| 960 MonitorLocker ml(tasks_lock()); | 960 MonitorLocker ml(tasks_lock()); |
| 961 set_tasks(tasks() - 1); | 961 set_tasks(tasks() - 1); |
| 962 ml.Notify(); | 962 ml.Notify(); |
| 963 } | 963 } |
| 964 } | 964 } |
| 965 | 965 |
| 966 | 966 |
| 967 uword PageSpace::TryAllocateDataBumpInternal(intptr_t size, | 967 uword PageSpace::TryAllocateDataBumpInternal(intptr_t size, |
| (...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1215 return 0; | 1215 return 0; |
| 1216 } else { | 1216 } else { |
| 1217 ASSERT(total_time >= gc_time); | 1217 ASSERT(total_time >= gc_time); |
| 1218 int result = static_cast<int>((static_cast<double>(gc_time) / | 1218 int result = static_cast<int>((static_cast<double>(gc_time) / |
| 1219 static_cast<double>(total_time)) * 100); | 1219 static_cast<double>(total_time)) * 100); |
| 1220 return result; | 1220 return result; |
| 1221 } | 1221 } |
| 1222 } | 1222 } |
| 1223 | 1223 |
| 1224 } // namespace dart | 1224 } // namespace dart |
| OLD | NEW |