| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 645 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 656 json->beginArray("heaps"); | 656 json->beginArray("heaps"); |
| 657 SNAPSHOT_HEAP(General1); | 657 SNAPSHOT_HEAP(General1); |
| 658 SNAPSHOT_HEAP(General2); | 658 SNAPSHOT_HEAP(General2); |
| 659 SNAPSHOT_HEAP(General3); | 659 SNAPSHOT_HEAP(General3); |
| 660 SNAPSHOT_HEAP(General4); | 660 SNAPSHOT_HEAP(General4); |
| 661 SNAPSHOT_HEAP(CollectionBacking); | 661 SNAPSHOT_HEAP(CollectionBacking); |
| 662 FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP); | 662 FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP); |
| 663 json->endArray(); | 663 json->endArray(); |
| 664 #undef SNAPSHOT_HEAP | 664 #undef SNAPSHOT_HEAP |
| 665 | 665 |
| 666 json->setInteger("allocatedSpace", m_stats.totalAllocatedSpace()); | 666 json->setInteger("allocatedSpace", Heap::allocatedSpace()); |
| 667 json->setInteger("objectSpace", m_stats.totalObjectSpace()); | 667 json->setInteger("objectSpace", Heap::allocatedObjectSize()); |
| 668 json->setInteger("pageCount", info.pageCount); | 668 json->setInteger("pageCount", info.pageCount); |
| 669 json->setInteger("freeSize", info.freeSize); | 669 json->setInteger("freeSize", info.freeSize); |
| 670 | 670 |
| 671 Vector<String> classNameVector(info.classTags.size()); | 671 Vector<String> classNameVector(info.classTags.size()); |
| 672 for (HashMap<const GCInfo*, size_t>::iterator it = info.classTags.begin(); i
t != info.classTags.end(); ++it) | 672 for (HashMap<const GCInfo*, size_t>::iterator it = info.classTags.begin(); i
t != info.classTags.end(); ++it) |
| 673 classNameVector[it->value] = it->key->m_className; | 673 classNameVector[it->value] = it->key->m_className; |
| 674 | 674 |
| 675 size_t liveSize = 0; | 675 size_t liveSize = 0; |
| 676 size_t deadSize = 0; | 676 size_t deadSize = 0; |
| 677 json->beginArray("classes"); | 677 json->beginArray("classes"); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 724 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); | 724 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); |
| 725 return anchor; | 725 return anchor; |
| 726 } | 726 } |
| 727 | 727 |
| 728 Mutex& ThreadState::globalRootsMutex() | 728 Mutex& ThreadState::globalRootsMutex() |
| 729 { | 729 { |
| 730 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 730 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
| 731 return mutex; | 731 return mutex; |
| 732 } | 732 } |
| 733 | 733 |
| 734 // Trigger garbage collection on a 50% increase in size, but not for | 734 bool ThreadState::shouldGC() |
| 735 // less than 512kbytes. | |
| 736 bool ThreadState::increasedEnoughToGC(size_t newSize, size_t oldSize) | |
| 737 { | 735 { |
| 738 if (newSize < 1 << 19) | 736 // Do not GC during sweeping. We allow allocation during finalization, |
| 737 // but those allocations are not allowed to lead to nested GCs. |
| 738 if (m_sweepInProgress) |
| 739 return false; | 739 return false; |
| 740 size_t limit = oldSize + (oldSize >> 1); | 740 |
| 741 return newSize > limit; | 741 // Trigger garbage collection on a 50% increase in size, |
| 742 // but not for less than 512 KB. |
| 743 if (Heap::allocatedObjectSize() < 1 << 19) |
| 744 return false; |
| 745 size_t limit = Heap::liveObjectSize() + Heap::liveObjectSize() / 2; |
| 746 return Heap::allocatedObjectSize() > limit; |
| 742 } | 747 } |
| 743 | 748 |
| 744 // FIXME: The heuristics are local for a thread at this | |
| 745 // point. Consider using heuristics that take memory for all threads | |
| 746 // into account. | |
| 747 bool ThreadState::shouldGC() | |
| 748 { | |
| 749 // Do not GC during sweeping. We allow allocation during | |
| 750 // finalization, but those allocations are not allowed | |
| 751 // to lead to nested garbage collections. | |
| 752 return !m_sweepInProgress && increasedEnoughToGC(m_stats.totalObjectSpace(),
m_statsAfterLastGC.totalObjectSpace()); | |
| 753 } | |
| 754 | |
| 755 // Trigger conservative garbage collection on a 100% increase in size, | |
| 756 // but not for less than 4Mbytes. If the system currently has a low | |
| 757 // collection rate, then require a 300% increase in size. | |
| 758 bool ThreadState::increasedEnoughToForceConservativeGC(size_t newSize, size_t ol
dSize) | |
| 759 { | |
| 760 if (newSize < 1 << 22) | |
| 761 return false; | |
| 762 size_t limit = (m_lowCollectionRate ? 4 : 2) * oldSize; | |
| 763 return newSize > limit; | |
| 764 } | |
| 765 | |
| 766 // FIXME: The heuristics are local for a thread at this | |
| 767 // point. Consider using heuristics that take memory for all threads | |
| 768 // into account. | |
| 769 bool ThreadState::shouldForceConservativeGC() | 749 bool ThreadState::shouldForceConservativeGC() |
| 770 { | 750 { |
| 771 // Do not GC during sweeping. We allow allocation during | 751 // Do not GC during sweeping. We allow allocation during finalization, |
| 772 // finalization, but those allocations are not allowed | 752 // but those allocations are not allowed to lead to nested GCs. |
| 773 // to lead to nested garbage collections. | 753 if (m_sweepInProgress) |
| 774 return !m_sweepInProgress && increasedEnoughToForceConservativeGC(m_stats.to
talObjectSpace(), m_statsAfterLastGC.totalObjectSpace()); | 754 return false; |
| 755 |
| 756 // Trigger conservative garbage collection on a 100% increase in size, |
| 757 // but not for less than 4Mbytes. If the system currently has a low |
| 758 // collection rate, then require a 300% increase in size. |
| 759 if (Heap::allocatedObjectSize() < 1 << 22) |
| 760 return false; |
| 761 size_t limit = (m_lowCollectionRate ? 4 : 2) * Heap::liveObjectSize(); |
| 762 return Heap::allocatedObjectSize() > limit; |
| 775 } | 763 } |
| 776 | 764 |
| 777 bool ThreadState::sweepRequested() | 765 bool ThreadState::sweepRequested() |
| 778 { | 766 { |
| 779 ASSERT(isAnyThreadInGC() || checkThread()); | 767 ASSERT(isAnyThreadInGC() || checkThread()); |
| 780 return m_sweepRequested; | 768 return m_sweepRequested; |
| 781 } | 769 } |
| 782 | 770 |
| 783 void ThreadState::setSweepRequested() | 771 void ThreadState::setSweepRequested() |
| 784 { | 772 { |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 897 | 885 |
| 898 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) | 886 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) |
| 899 { | 887 { |
| 900 for (int i = 0; i < NumberOfHeaps; i++) { | 888 for (int i = 0; i < NumberOfHeaps; i++) { |
| 901 if (BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address)) | 889 if (BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address)) |
| 902 return page; | 890 return page; |
| 903 } | 891 } |
| 904 return 0; | 892 return 0; |
| 905 } | 893 } |
| 906 | 894 |
| 907 void ThreadState::getStats(HeapStats& stats) | 895 size_t ThreadState::objectPayloadSizeForTesting() |
| 908 { | |
| 909 stats = m_stats; | |
| 910 } | |
| 911 | |
| 912 void ThreadState::getStatsForTesting(HeapStats& stats) | |
| 913 { | 896 { |
| 914 ASSERT(isConsistentForSweeping()); | 897 ASSERT(isConsistentForSweeping()); |
| 898 size_t objectPayloadSize = 0; |
| 915 for (int i = 0; i < NumberOfHeaps; i++) | 899 for (int i = 0; i < NumberOfHeaps; i++) |
| 916 m_heaps[i]->getStatsForTesting(stats); | 900 objectPayloadSize += m_heaps[i]->objectPayloadSizeForTesting(); |
| 901 return objectPayloadSize; |
| 917 } | 902 } |
| 918 | 903 |
| 919 bool ThreadState::stopThreads() | 904 bool ThreadState::stopThreads() |
| 920 { | 905 { |
| 921 return s_safePointBarrier->parkOthers(); | 906 return s_safePointBarrier->parkOthers(); |
| 922 } | 907 } |
| 923 | 908 |
| 924 void ThreadState::resumeThreads() | 909 void ThreadState::resumeThreads() |
| 925 { | 910 { |
| 926 s_safePointBarrier->resumeOthers(); | 911 s_safePointBarrier->resumeOthers(); |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1035 { | 1020 { |
| 1036 TRACE_EVENT0("blink_gc", "ThreadState::waitUntilSweepersDone"); | 1021 TRACE_EVENT0("blink_gc", "ThreadState::waitUntilSweepersDone"); |
| 1037 MutexLocker locker(m_sweepMutex); | 1022 MutexLocker locker(m_sweepMutex); |
| 1038 while (m_numberOfSweeperTasks > 0) | 1023 while (m_numberOfSweeperTasks > 0) |
| 1039 m_sweepThreadCondition.wait(m_sweepMutex); | 1024 m_sweepThreadCondition.wait(m_sweepMutex); |
| 1040 } | 1025 } |
| 1041 | 1026 |
| 1042 | 1027 |
| 1043 class SweepNonFinalizedHeapTask final : public WebThread::Task { | 1028 class SweepNonFinalizedHeapTask final : public WebThread::Task { |
| 1044 public: | 1029 public: |
| 1045 SweepNonFinalizedHeapTask(ThreadState* state, BaseHeap* heap, HeapStats* sta
ts) | 1030 SweepNonFinalizedHeapTask(ThreadState* state, BaseHeap* heap) |
| 1046 : m_threadState(state) | 1031 : m_threadState(state) |
| 1047 , m_heap(heap) | 1032 , m_heap(heap) |
| 1048 , m_stats(stats) | |
| 1049 { | 1033 { |
| 1050 m_threadState->registerSweepingTask(); | 1034 m_threadState->registerSweepingTask(); |
| 1051 } | 1035 } |
| 1052 | 1036 |
| 1053 virtual ~SweepNonFinalizedHeapTask() | 1037 virtual ~SweepNonFinalizedHeapTask() |
| 1054 { | 1038 { |
| 1055 m_threadState->unregisterSweepingTask(); | 1039 m_threadState->unregisterSweepingTask(); |
| 1056 } | 1040 } |
| 1057 | 1041 |
| 1058 virtual void run() | 1042 virtual void run() |
| 1059 { | 1043 { |
| 1060 TRACE_EVENT0("blink_gc", "ThreadState::sweepNonFinalizedHeaps"); | 1044 TRACE_EVENT0("blink_gc", "ThreadState::sweepNonFinalizedHeaps"); |
| 1061 m_heap->sweep(m_stats); | 1045 m_heap->sweep(); |
| 1062 } | 1046 } |
| 1063 | 1047 |
| 1064 private: | 1048 private: |
| 1065 ThreadState* m_threadState; | 1049 ThreadState* m_threadState; |
| 1066 BaseHeap* m_heap; | 1050 BaseHeap* m_heap; |
| 1067 HeapStats* m_stats; | |
| 1068 }; | 1051 }; |
| 1069 | 1052 |
| 1070 void ThreadState::performPendingSweep() | 1053 void ThreadState::performPendingSweep() |
| 1071 { | 1054 { |
| 1072 if (!sweepRequested()) | 1055 if (!sweepRequested()) |
| 1073 return; | 1056 return; |
| 1074 | 1057 |
| 1075 #if ENABLE(GC_PROFILE_HEAP) | 1058 #if ENABLE(GC_PROFILE_HEAP) |
| 1076 // We snapshot the heap prior to sweeping to get numbers for both resources | 1059 // We snapshot the heap prior to sweeping to get numbers for both resources |
| 1077 // that have been allocated since the last GC and for resources that are | 1060 // that have been allocated since the last GC and for resources that are |
| 1078 // going to be freed. | 1061 // going to be freed. |
| 1079 bool gcTracingEnabled; | 1062 bool gcTracingEnabled; |
| 1080 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); | 1063 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); |
| 1081 if (gcTracingEnabled && m_stats.totalObjectSpace() > 0) | 1064 if (gcTracingEnabled) |
| 1082 snapshot(); | 1065 snapshot(); |
| 1083 #endif | 1066 #endif |
| 1084 | 1067 |
| 1085 TRACE_EVENT0("blink_gc", "ThreadState::performPendingSweep"); | 1068 TRACE_EVENT0("blink_gc", "ThreadState::performPendingSweep"); |
| 1086 | 1069 |
| 1087 double timeStamp = WTF::currentTimeMS(); | 1070 double timeStamp = WTF::currentTimeMS(); |
| 1088 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); | 1071 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); |
| 1089 if (isMainThread()) { | 1072 if (isMainThread()) { |
| 1090 ScriptForbiddenScope::enter(); | 1073 ScriptForbiddenScope::enter(); |
| 1091 TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCSweeping"); | 1074 TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCSweeping"); |
| 1092 } | 1075 } |
| 1093 | 1076 |
| 1094 size_t objectSpaceBeforeSweep = m_stats.totalObjectSpace(); | 1077 size_t allocatedObjectSizeBeforeSweeping = Heap::allocatedObjectSize(); |
| 1095 { | 1078 { |
| 1096 NoSweepScope scope(this); | 1079 NoSweepScope scope(this); |
| 1097 | 1080 |
| 1098 // Disallow allocation during weak processing. | 1081 // Disallow allocation during weak processing. |
| 1099 enterNoAllocationScope(); | 1082 enterNoAllocationScope(); |
| 1100 { | 1083 { |
| 1101 TRACE_EVENT0("blink_gc", "ThreadState::threadLocalWeakProcessing"); | 1084 TRACE_EVENT0("blink_gc", "ThreadState::threadLocalWeakProcessing"); |
| 1102 // Perform thread-specific weak processing. | 1085 // Perform thread-specific weak processing. |
| 1103 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { } | 1086 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { } |
| 1104 } | 1087 } |
| 1105 { | 1088 { |
| 1106 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers"); | 1089 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers"); |
| 1107 invokePreFinalizers(*Heap::s_markingVisitor); | 1090 invokePreFinalizers(*Heap::s_markingVisitor); |
| 1108 } | 1091 } |
| 1109 leaveNoAllocationScope(); | 1092 leaveNoAllocationScope(); |
| 1110 | 1093 |
| 1111 // Perform sweeping and finalization. | 1094 // Perform sweeping and finalization. |
| 1112 performPendingSweepInParallel(); | 1095 performPendingSweepInParallel(); |
| 1113 } | 1096 } |
| 1114 | 1097 |
| 1115 clearGCRequested(); | 1098 clearGCRequested(); |
| 1116 clearSweepRequested(); | 1099 clearSweepRequested(); |
| 1117 // If we collected less than 50% of objects, record that the | 1100 |
| 1118 // collection rate is low which we use to determine when to | 1101 // If we collected less than 50% of objects, record that the collection rate |
| 1119 // perform the next GC. | 1102 // is low which we use to determine when to perform the next GC. |
| 1120 setLowCollectionRate(m_stats.totalObjectSpace() > (objectSpaceBeforeSweep /
2)); | 1103 // FIXME: m_lowCollectionRate should be available in non-main threads. |
| 1104 if (isMainThread()) |
| 1105 m_lowCollectionRate = Heap::liveObjectSize() > (allocatedObjectSizeBefor
eSweeping / 2); |
| 1121 | 1106 |
| 1122 if (Platform::current()) { | 1107 if (Platform::current()) { |
| 1123 Platform::current()->histogramCustomCounts("BlinkGC.PerformPendingSweep"
, WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); | 1108 Platform::current()->histogramCustomCounts("BlinkGC.PerformPendingSweep"
, WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); |
| 1124 } | 1109 } |
| 1125 | 1110 |
| 1126 if (isMainThread()) { | 1111 if (isMainThread()) { |
| 1127 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); | 1112 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); |
| 1128 ScriptForbiddenScope::exit(); | 1113 ScriptForbiddenScope::exit(); |
| 1129 } | 1114 } |
| 1130 } | 1115 } |
| 1131 | 1116 |
| 1132 void ThreadState::performPendingSweepInParallel() | 1117 void ThreadState::performPendingSweepInParallel() |
| 1133 { | 1118 { |
| 1134 // Sweeping will recalculate the stats | |
| 1135 m_stats.clear(); | |
| 1136 | |
| 1137 // Sweep the non-finalized heap pages on multiple threads. | 1119 // Sweep the non-finalized heap pages on multiple threads. |
| 1138 // Attempt to load-balance by having the sweeper thread sweep as | 1120 // Attempt to load-balance by having the sweeper thread sweep as |
| 1139 // close to half of the pages as possible. | 1121 // close to half of the pages as possible. |
| 1140 int nonFinalizedPages = 0; | 1122 int nonFinalizedPages = 0; |
| 1141 for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) | 1123 for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) |
| 1142 nonFinalizedPages += m_heaps[FirstNonFinalizedHeap + i]->normalPageCount
(); | 1124 nonFinalizedPages += m_heaps[FirstNonFinalizedHeap + i]->normalPageCount
(); |
| 1143 | 1125 |
| 1144 int finalizedPages = 0; | 1126 int finalizedPages = 0; |
| 1145 for (int i = 0; i < NumberOfFinalizedHeaps; i++) | 1127 for (int i = 0; i < NumberOfFinalizedHeaps; i++) |
| 1146 finalizedPages += m_heaps[FirstFinalizedHeap + i]->normalPageCount(); | 1128 finalizedPages += m_heaps[FirstFinalizedHeap + i]->normalPageCount(); |
| 1147 | 1129 |
| 1148 int pagesToSweepInParallel = nonFinalizedPages < finalizedPages ? nonFinaliz
edPages : ((nonFinalizedPages + finalizedPages) / 2); | 1130 int pagesToSweepInParallel = nonFinalizedPages < finalizedPages ? nonFinaliz
edPages : ((nonFinalizedPages + finalizedPages) / 2); |
| 1149 | 1131 |
| 1150 // Start the sweeper thread for the non finalized heaps. No | 1132 // Start the sweeper thread for the non finalized heaps. No |
| 1151 // finalizers need to run and therefore the pages can be | 1133 // finalizers need to run and therefore the pages can be |
| 1152 // swept on other threads. | 1134 // swept on other threads. |
| 1153 static const int minNumberOfPagesForParallelSweep = 10; | 1135 static const int minNumberOfPagesForParallelSweep = 10; |
| 1154 HeapStats heapStatsVector[NumberOfNonFinalizedHeaps]; | |
| 1155 OwnPtr<BaseHeap> splitOffHeaps[NumberOfNonFinalizedHeaps]; | 1136 OwnPtr<BaseHeap> splitOffHeaps[NumberOfNonFinalizedHeaps]; |
| 1156 for (int i = 0; i < NumberOfNonFinalizedHeaps && pagesToSweepInParallel > 0;
i++) { | 1137 for (int i = 0; i < NumberOfNonFinalizedHeaps && pagesToSweepInParallel > 0;
i++) { |
| 1157 BaseHeap* heap = m_heaps[FirstNonFinalizedHeap + i]; | 1138 BaseHeap* heap = m_heaps[FirstNonFinalizedHeap + i]; |
| 1158 int pageCount = heap->normalPageCount(); | 1139 int pageCount = heap->normalPageCount(); |
| 1159 // Only use the sweeper thread if it exists and there are | 1140 // Only use the sweeper thread if it exists and there are |
| 1160 // pages to sweep. | 1141 // pages to sweep. |
| 1161 if (m_sweeperThread && pageCount > minNumberOfPagesForParallelSweep) { | 1142 if (m_sweeperThread && pageCount > minNumberOfPagesForParallelSweep) { |
| 1162 // Create a new thread heap instance to make sure that the | 1143 // Create a new thread heap instance to make sure that the |
| 1163 // state modified while sweeping is separate for the | 1144 // state modified while sweeping is separate for the |
| 1164 // sweeper thread and the owner thread. | 1145 // sweeper thread and the owner thread. |
| 1165 int pagesToSplitOff = std::min(pageCount, pagesToSweepInParallel); | 1146 int pagesToSplitOff = std::min(pageCount, pagesToSweepInParallel); |
| 1166 pagesToSweepInParallel -= pagesToSplitOff; | 1147 pagesToSweepInParallel -= pagesToSplitOff; |
| 1167 splitOffHeaps[i] = heap->split(pagesToSplitOff); | 1148 splitOffHeaps[i] = heap->split(pagesToSplitOff); |
| 1168 HeapStats* stats = &heapStatsVector[i]; | 1149 m_sweeperThread->postTask(new SweepNonFinalizedHeapTask(this, splitO
ffHeaps[i].get())); |
| 1169 m_sweeperThread->postTask(new SweepNonFinalizedHeapTask(this, splitO
ffHeaps[i].get(), stats)); | |
| 1170 } | 1150 } |
| 1171 } | 1151 } |
| 1172 | 1152 |
| 1173 { | 1153 { |
| 1174 // Sweep the remainder of the non-finalized pages (or all of them | 1154 // Sweep the remainder of the non-finalized pages (or all of them |
| 1175 // if there is no sweeper thread). | 1155 // if there is no sweeper thread). |
| 1176 TRACE_EVENT0("blink_gc", "ThreadState::sweepNonFinalizedHeaps"); | 1156 TRACE_EVENT0("blink_gc", "ThreadState::sweepNonFinalizedHeaps"); |
| 1177 for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) { | 1157 for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) { |
| 1178 HeapStats stats; | 1158 m_heaps[FirstNonFinalizedHeap + i]->sweep(); |
| 1179 m_heaps[FirstNonFinalizedHeap + i]->sweep(&stats); | |
| 1180 m_stats.add(&stats); | |
| 1181 } | 1159 } |
| 1182 } | 1160 } |
| 1183 | 1161 |
| 1184 { | 1162 { |
| 1185 // Sweep the finalized pages. | 1163 // Sweep the finalized pages. |
| 1186 TRACE_EVENT0("blink_gc", "ThreadState::sweepFinalizedHeaps"); | 1164 TRACE_EVENT0("blink_gc", "ThreadState::sweepFinalizedHeaps"); |
| 1187 for (int i = 0; i < NumberOfFinalizedHeaps; i++) { | 1165 for (int i = 0; i < NumberOfFinalizedHeaps; i++) { |
| 1188 HeapStats stats; | 1166 m_heaps[FirstFinalizedHeap + i]->sweep(); |
| 1189 m_heaps[FirstFinalizedHeap + i]->sweep(&stats); | |
| 1190 m_stats.add(&stats); | |
| 1191 } | 1167 } |
| 1192 } | 1168 } |
| 1193 | 1169 |
| 1194 // Wait for the sweeper threads and update the heap stats with the | |
| 1195 // stats for the heap portions swept by those threads. | |
| 1196 waitUntilSweepersDone(); | 1170 waitUntilSweepersDone(); |
| 1197 for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) { | 1171 for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) { |
| 1198 m_stats.add(&heapStatsVector[i]); | |
| 1199 if (splitOffHeaps[i]) | 1172 if (splitOffHeaps[i]) |
| 1200 m_heaps[FirstNonFinalizedHeap + i]->merge(splitOffHeaps[i].release()
); | 1173 m_heaps[FirstNonFinalizedHeap + i]->merge(splitOffHeaps[i].release()
); |
| 1201 } | 1174 } |
| 1202 | 1175 |
| 1203 for (int i = 0; i < NumberOfHeaps; i++) | 1176 for (int i = 0; i < NumberOfHeaps; i++) |
| 1204 m_heaps[i]->postSweepProcessing(); | 1177 m_heaps[i]->postSweepProcessing(); |
| 1205 | |
| 1206 getStats(m_statsAfterLastGC); | |
| 1207 } | 1178 } |
| 1208 | 1179 |
| 1209 void ThreadState::addInterruptor(Interruptor* interruptor) | 1180 void ThreadState::addInterruptor(Interruptor* interruptor) |
| 1210 { | 1181 { |
| 1211 SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting); | 1182 SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting); |
| 1212 | 1183 |
| 1213 { | 1184 { |
| 1214 MutexLocker locker(threadAttachMutex()); | 1185 MutexLocker locker(threadAttachMutex()); |
| 1215 m_interruptors.append(interruptor); | 1186 m_interruptors.append(interruptor); |
| 1216 } | 1187 } |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1277 return gcInfo; | 1248 return gcInfo; |
| 1278 } | 1249 } |
| 1279 } | 1250 } |
| 1280 if (needLockForIteration) | 1251 if (needLockForIteration) |
| 1281 threadAttachMutex().unlock(); | 1252 threadAttachMutex().unlock(); |
| 1282 return 0; | 1253 return 0; |
| 1283 } | 1254 } |
| 1284 #endif | 1255 #endif |
| 1285 | 1256 |
| 1286 } // namespace blink | 1257 } // namespace blink |
| OLD | NEW |