Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(139)

Side by Side Diff: Source/platform/heap/ThreadState.cpp

Issue 738773003: Revert of Oilpan: Refactor the way we calculate heap statistics (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/ThreadState.h ('k') | Source/wtf/Atomics.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 645 matching lines...) Expand 10 before | Expand all | Expand 10 after
656 json->beginArray("heaps"); 656 json->beginArray("heaps");
657 SNAPSHOT_HEAP(General1); 657 SNAPSHOT_HEAP(General1);
658 SNAPSHOT_HEAP(General2); 658 SNAPSHOT_HEAP(General2);
659 SNAPSHOT_HEAP(General3); 659 SNAPSHOT_HEAP(General3);
660 SNAPSHOT_HEAP(General4); 660 SNAPSHOT_HEAP(General4);
661 SNAPSHOT_HEAP(CollectionBacking); 661 SNAPSHOT_HEAP(CollectionBacking);
662 FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP); 662 FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP);
663 json->endArray(); 663 json->endArray();
664 #undef SNAPSHOT_HEAP 664 #undef SNAPSHOT_HEAP
665 665
666 json->setInteger("allocatedSpace", Heap::allocatedSpace()); 666 json->setInteger("allocatedSpace", m_stats.totalAllocatedSpace());
667 json->setInteger("objectSpace", Heap::allocatedObjectSize()); 667 json->setInteger("objectSpace", m_stats.totalObjectSpace());
668 json->setInteger("pageCount", info.pageCount); 668 json->setInteger("pageCount", info.pageCount);
669 json->setInteger("freeSize", info.freeSize); 669 json->setInteger("freeSize", info.freeSize);
670 670
671 Vector<String> classNameVector(info.classTags.size()); 671 Vector<String> classNameVector(info.classTags.size());
672 for (HashMap<const GCInfo*, size_t>::iterator it = info.classTags.begin(); i t != info.classTags.end(); ++it) 672 for (HashMap<const GCInfo*, size_t>::iterator it = info.classTags.begin(); i t != info.classTags.end(); ++it)
673 classNameVector[it->value] = it->key->m_className; 673 classNameVector[it->value] = it->key->m_className;
674 674
675 size_t liveSize = 0; 675 size_t liveSize = 0;
676 size_t deadSize = 0; 676 size_t deadSize = 0;
677 json->beginArray("classes"); 677 json->beginArray("classes");
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
724 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); 724 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor);
725 return anchor; 725 return anchor;
726 } 726 }
727 727
728 Mutex& ThreadState::globalRootsMutex() 728 Mutex& ThreadState::globalRootsMutex()
729 { 729 {
730 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); 730 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
731 return mutex; 731 return mutex;
732 } 732 }
733 733
734 // Trigger garbage collection on a 50% increase in size, but not for
735 // less than 512kbytes.
736 bool ThreadState::increasedEnoughToGC(size_t newSize, size_t oldSize)
737 {
738 if (newSize < 1 << 19)
739 return false;
740 size_t limit = oldSize + (oldSize >> 1);
741 return newSize > limit;
742 }
743
744 // FIXME: The heuristics are local for a thread at this
745 // point. Consider using heuristics that take memory for all threads
746 // into account.
734 bool ThreadState::shouldGC() 747 bool ThreadState::shouldGC()
735 { 748 {
736 // Do not GC during sweeping. We allow allocation during finalization, 749 // Do not GC during sweeping. We allow allocation during
737 // but those allocations are not allowed to lead to nested GCs. 750 // finalization, but those allocations are not allowed
738 if (m_sweepInProgress) 751 // to lead to nested garbage collections.
739 return false; 752 return !m_sweepInProgress && increasedEnoughToGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
740
741 // Trigger garbage collection on a 50% increase in size,
742 // but not for less than 512 KB.
743 if (Heap::allocatedObjectSize() < 1 << 19)
744 return false;
745 size_t limit = Heap::markedObjectSize() + Heap::markedObjectSize() / 2;
746 return Heap::allocatedObjectSize() > limit;
747 } 753 }
748 754
755 // Trigger conservative garbage collection on a 100% increase in size,
756 // but not for less than 4Mbytes. If the system currently has a low
757 // collection rate, then require a 300% increase in size.
758 bool ThreadState::increasedEnoughToForceConservativeGC(size_t newSize, size_t ol dSize)
759 {
760 if (newSize < 1 << 22)
761 return false;
762 size_t limit = (m_lowCollectionRate ? 4 : 2) * oldSize;
763 return newSize > limit;
764 }
765
766 // FIXME: The heuristics are local for a thread at this
767 // point. Consider using heuristics that take memory for all threads
768 // into account.
749 bool ThreadState::shouldForceConservativeGC() 769 bool ThreadState::shouldForceConservativeGC()
750 { 770 {
751 // Do not GC during sweeping. We allow allocation during finalization, 771 // Do not GC during sweeping. We allow allocation during
752 // but those allocations are not allowed to lead to nested GCs. 772 // finalization, but those allocations are not allowed
753 if (m_sweepInProgress) 773 // to lead to nested garbage collections.
754 return false; 774 return !m_sweepInProgress && increasedEnoughToForceConservativeGC(m_stats.to talObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
755
756 // Trigger conservative garbage collection on a 100% increase in size,
757 // but not for less than 4Mbytes. If the system currently has a low
758 // collection rate, then require a 300% increase in size.
759 if (Heap::allocatedObjectSize() < 1 << 22)
760 return false;
761 size_t limit = (m_lowCollectionRate ? 4 : 2) * Heap::markedObjectSize();
762 return Heap::allocatedObjectSize() > limit;
763 } 775 }
764 776
765 bool ThreadState::sweepRequested() 777 bool ThreadState::sweepRequested()
766 { 778 {
767 ASSERT(isAnyThreadInGC() || checkThread()); 779 ASSERT(isAnyThreadInGC() || checkThread());
768 return m_sweepRequested; 780 return m_sweepRequested;
769 } 781 }
770 782
771 void ThreadState::setSweepRequested() 783 void ThreadState::setSweepRequested()
772 { 784 {
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after
885 897
886 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) 898 BaseHeapPage* ThreadState::heapPageFromAddress(Address address)
887 { 899 {
888 for (int i = 0; i < NumberOfHeaps; i++) { 900 for (int i = 0; i < NumberOfHeaps; i++) {
889 if (BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address)) 901 if (BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address))
890 return page; 902 return page;
891 } 903 }
892 return 0; 904 return 0;
893 } 905 }
894 906
895 size_t ThreadState::objectPayloadSizeForTesting() 907 void ThreadState::getStats(HeapStats& stats)
908 {
909 stats = m_stats;
910 }
911
912 void ThreadState::getStatsForTesting(HeapStats& stats)
896 { 913 {
897 ASSERT(isConsistentForSweeping()); 914 ASSERT(isConsistentForSweeping());
898 size_t objectPayloadSize = 0;
899 for (int i = 0; i < NumberOfHeaps; i++) 915 for (int i = 0; i < NumberOfHeaps; i++)
900 objectPayloadSize += m_heaps[i]->objectPayloadSizeForTesting(); 916 m_heaps[i]->getStatsForTesting(stats);
901 return objectPayloadSize;
902 } 917 }
903 918
904 bool ThreadState::stopThreads() 919 bool ThreadState::stopThreads()
905 { 920 {
906 return s_safePointBarrier->parkOthers(); 921 return s_safePointBarrier->parkOthers();
907 } 922 }
908 923
909 void ThreadState::resumeThreads() 924 void ThreadState::resumeThreads()
910 { 925 {
911 s_safePointBarrier->resumeOthers(); 926 s_safePointBarrier->resumeOthers();
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
1020 { 1035 {
1021 TRACE_EVENT0("blink_gc", "ThreadState::waitUntilSweepersDone"); 1036 TRACE_EVENT0("blink_gc", "ThreadState::waitUntilSweepersDone");
1022 MutexLocker locker(m_sweepMutex); 1037 MutexLocker locker(m_sweepMutex);
1023 while (m_numberOfSweeperTasks > 0) 1038 while (m_numberOfSweeperTasks > 0)
1024 m_sweepThreadCondition.wait(m_sweepMutex); 1039 m_sweepThreadCondition.wait(m_sweepMutex);
1025 } 1040 }
1026 1041
1027 1042
1028 class SweepNonFinalizedHeapTask final : public WebThread::Task { 1043 class SweepNonFinalizedHeapTask final : public WebThread::Task {
1029 public: 1044 public:
1030 SweepNonFinalizedHeapTask(ThreadState* state, BaseHeap* heap) 1045 SweepNonFinalizedHeapTask(ThreadState* state, BaseHeap* heap, HeapStats* sta ts)
1031 : m_threadState(state) 1046 : m_threadState(state)
1032 , m_heap(heap) 1047 , m_heap(heap)
1048 , m_stats(stats)
1033 { 1049 {
1034 m_threadState->registerSweepingTask(); 1050 m_threadState->registerSweepingTask();
1035 } 1051 }
1036 1052
1037 virtual ~SweepNonFinalizedHeapTask() 1053 virtual ~SweepNonFinalizedHeapTask()
1038 { 1054 {
1039 m_threadState->unregisterSweepingTask(); 1055 m_threadState->unregisterSweepingTask();
1040 } 1056 }
1041 1057
1042 virtual void run() 1058 virtual void run()
1043 { 1059 {
1044 TRACE_EVENT0("blink_gc", "ThreadState::sweepNonFinalizedHeaps"); 1060 TRACE_EVENT0("blink_gc", "ThreadState::sweepNonFinalizedHeaps");
1045 m_heap->sweep(); 1061 m_heap->sweep(m_stats);
1046 } 1062 }
1047 1063
1048 private: 1064 private:
1049 ThreadState* m_threadState; 1065 ThreadState* m_threadState;
1050 BaseHeap* m_heap; 1066 BaseHeap* m_heap;
1067 HeapStats* m_stats;
1051 }; 1068 };
1052 1069
1053 void ThreadState::performPendingSweep() 1070 void ThreadState::performPendingSweep()
1054 { 1071 {
1055 if (!sweepRequested()) 1072 if (!sweepRequested())
1056 return; 1073 return;
1057 1074
1058 #if ENABLE(GC_PROFILE_HEAP) 1075 #if ENABLE(GC_PROFILE_HEAP)
1059 // We snapshot the heap prior to sweeping to get numbers for both resources 1076 // We snapshot the heap prior to sweeping to get numbers for both resources
1060 // that have been allocated since the last GC and for resources that are 1077 // that have been allocated since the last GC and for resources that are
1061 // going to be freed. 1078 // going to be freed.
1062 bool gcTracingEnabled; 1079 bool gcTracingEnabled;
1063 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); 1080 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled);
1064 if (gcTracingEnabled) 1081 if (gcTracingEnabled && m_stats.totalObjectSpace() > 0)
1065 snapshot(); 1082 snapshot();
1066 #endif 1083 #endif
1067 1084
1068 TRACE_EVENT0("blink_gc", "ThreadState::performPendingSweep"); 1085 TRACE_EVENT0("blink_gc", "ThreadState::performPendingSweep");
1069 1086
1070 double timeStamp = WTF::currentTimeMS(); 1087 double timeStamp = WTF::currentTimeMS();
1071 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); 1088 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
1072 if (isMainThread()) { 1089 if (isMainThread()) {
1073 ScriptForbiddenScope::enter(); 1090 ScriptForbiddenScope::enter();
1074 TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCSweeping"); 1091 TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCSweeping");
1075 } 1092 }
1076 1093
1077 size_t allocatedObjectSizeBeforeSweeping = Heap::allocatedObjectSize(); 1094 size_t objectSpaceBeforeSweep = m_stats.totalObjectSpace();
1078 { 1095 {
1079 NoSweepScope scope(this); 1096 NoSweepScope scope(this);
1080 1097
1081 // Disallow allocation during weak processing. 1098 // Disallow allocation during weak processing.
1082 enterNoAllocationScope(); 1099 enterNoAllocationScope();
1083 { 1100 {
1084 TRACE_EVENT0("blink_gc", "ThreadState::threadLocalWeakProcessing"); 1101 TRACE_EVENT0("blink_gc", "ThreadState::threadLocalWeakProcessing");
1085 // Perform thread-specific weak processing. 1102 // Perform thread-specific weak processing.
1086 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { } 1103 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { }
1087 } 1104 }
1088 { 1105 {
1089 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers"); 1106 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers");
1090 invokePreFinalizers(*Heap::s_markingVisitor); 1107 invokePreFinalizers(*Heap::s_markingVisitor);
1091 } 1108 }
1092 leaveNoAllocationScope(); 1109 leaveNoAllocationScope();
1093 1110
1094 // Perform sweeping and finalization. 1111 // Perform sweeping and finalization.
1095 performPendingSweepInParallel(); 1112 performPendingSweepInParallel();
1096 } 1113 }
1097 1114
1098 clearGCRequested(); 1115 clearGCRequested();
1099 clearSweepRequested(); 1116 clearSweepRequested();
1100 1117 // If we collected less than 50% of objects, record that the
1101 // If we collected less than 50% of objects, record that the collection rate 1118 // collection rate is low which we use to determine when to
1102 // is low which we use to determine when to perform the next GC. 1119 // perform the next GC.
1103 // FIXME: We should make m_lowCollectionRate available in non-main threads. 1120 setLowCollectionRate(m_stats.totalObjectSpace() > (objectSpaceBeforeSweep / 2));
1104 // FIXME: Heap::markedObjectSize() may not be accurate because other threads
1105 // may not have finished sweeping.
1106 if (isMainThread())
1107 m_lowCollectionRate = Heap::markedObjectSize() > (allocatedObjectSizeBef oreSweeping / 2);
1108 1121
1109 if (Platform::current()) { 1122 if (Platform::current()) {
1110 Platform::current()->histogramCustomCounts("BlinkGC.PerformPendingSweep" , WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); 1123 Platform::current()->histogramCustomCounts("BlinkGC.PerformPendingSweep" , WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
1111 } 1124 }
1112 1125
1113 if (isMainThread()) { 1126 if (isMainThread()) {
1114 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); 1127 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
1115 ScriptForbiddenScope::exit(); 1128 ScriptForbiddenScope::exit();
1116 } 1129 }
1117 } 1130 }
1118 1131
1119 void ThreadState::performPendingSweepInParallel() 1132 void ThreadState::performPendingSweepInParallel()
1120 { 1133 {
1134 // Sweeping will recalculate the stats
1135 m_stats.clear();
1136
1121 // Sweep the non-finalized heap pages on multiple threads. 1137 // Sweep the non-finalized heap pages on multiple threads.
1122 // Attempt to load-balance by having the sweeper thread sweep as 1138 // Attempt to load-balance by having the sweeper thread sweep as
1123 // close to half of the pages as possible. 1139 // close to half of the pages as possible.
1124 int nonFinalizedPages = 0; 1140 int nonFinalizedPages = 0;
1125 for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) 1141 for (int i = 0; i < NumberOfNonFinalizedHeaps; i++)
1126 nonFinalizedPages += m_heaps[FirstNonFinalizedHeap + i]->normalPageCount (); 1142 nonFinalizedPages += m_heaps[FirstNonFinalizedHeap + i]->normalPageCount ();
1127 1143
1128 int finalizedPages = 0; 1144 int finalizedPages = 0;
1129 for (int i = 0; i < NumberOfFinalizedHeaps; i++) 1145 for (int i = 0; i < NumberOfFinalizedHeaps; i++)
1130 finalizedPages += m_heaps[FirstFinalizedHeap + i]->normalPageCount(); 1146 finalizedPages += m_heaps[FirstFinalizedHeap + i]->normalPageCount();
1131 1147
1132 int pagesToSweepInParallel = nonFinalizedPages < finalizedPages ? nonFinaliz edPages : ((nonFinalizedPages + finalizedPages) / 2); 1148 int pagesToSweepInParallel = nonFinalizedPages < finalizedPages ? nonFinaliz edPages : ((nonFinalizedPages + finalizedPages) / 2);
1133 1149
1134 // Start the sweeper thread for the non finalized heaps. No 1150 // Start the sweeper thread for the non finalized heaps. No
1135 // finalizers need to run and therefore the pages can be 1151 // finalizers need to run and therefore the pages can be
1136 // swept on other threads. 1152 // swept on other threads.
1137 static const int minNumberOfPagesForParallelSweep = 10; 1153 static const int minNumberOfPagesForParallelSweep = 10;
1154 HeapStats heapStatsVector[NumberOfNonFinalizedHeaps];
1138 OwnPtr<BaseHeap> splitOffHeaps[NumberOfNonFinalizedHeaps]; 1155 OwnPtr<BaseHeap> splitOffHeaps[NumberOfNonFinalizedHeaps];
1139 for (int i = 0; i < NumberOfNonFinalizedHeaps && pagesToSweepInParallel > 0; i++) { 1156 for (int i = 0; i < NumberOfNonFinalizedHeaps && pagesToSweepInParallel > 0; i++) {
1140 BaseHeap* heap = m_heaps[FirstNonFinalizedHeap + i]; 1157 BaseHeap* heap = m_heaps[FirstNonFinalizedHeap + i];
1141 int pageCount = heap->normalPageCount(); 1158 int pageCount = heap->normalPageCount();
1142 // Only use the sweeper thread if it exists and there are 1159 // Only use the sweeper thread if it exists and there are
1143 // pages to sweep. 1160 // pages to sweep.
1144 if (m_sweeperThread && pageCount > minNumberOfPagesForParallelSweep) { 1161 if (m_sweeperThread && pageCount > minNumberOfPagesForParallelSweep) {
1145 // Create a new thread heap instance to make sure that the 1162 // Create a new thread heap instance to make sure that the
1146 // state modified while sweeping is separate for the 1163 // state modified while sweeping is separate for the
1147 // sweeper thread and the owner thread. 1164 // sweeper thread and the owner thread.
1148 int pagesToSplitOff = std::min(pageCount, pagesToSweepInParallel); 1165 int pagesToSplitOff = std::min(pageCount, pagesToSweepInParallel);
1149 pagesToSweepInParallel -= pagesToSplitOff; 1166 pagesToSweepInParallel -= pagesToSplitOff;
1150 splitOffHeaps[i] = heap->split(pagesToSplitOff); 1167 splitOffHeaps[i] = heap->split(pagesToSplitOff);
1151 m_sweeperThread->postTask(new SweepNonFinalizedHeapTask(this, splitO ffHeaps[i].get())); 1168 HeapStats* stats = &heapStatsVector[i];
1169 m_sweeperThread->postTask(new SweepNonFinalizedHeapTask(this, splitO ffHeaps[i].get(), stats));
1152 } 1170 }
1153 } 1171 }
1154 1172
1155 { 1173 {
1156 // Sweep the remainder of the non-finalized pages (or all of them 1174 // Sweep the remainder of the non-finalized pages (or all of them
1157 // if there is no sweeper thread). 1175 // if there is no sweeper thread).
1158 TRACE_EVENT0("blink_gc", "ThreadState::sweepNonFinalizedHeaps"); 1176 TRACE_EVENT0("blink_gc", "ThreadState::sweepNonFinalizedHeaps");
1159 for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) { 1177 for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) {
1160 m_heaps[FirstNonFinalizedHeap + i]->sweep(); 1178 HeapStats stats;
1179 m_heaps[FirstNonFinalizedHeap + i]->sweep(&stats);
1180 m_stats.add(&stats);
1161 } 1181 }
1162 } 1182 }
1163 1183
1164 { 1184 {
1165 // Sweep the finalized pages. 1185 // Sweep the finalized pages.
1166 TRACE_EVENT0("blink_gc", "ThreadState::sweepFinalizedHeaps"); 1186 TRACE_EVENT0("blink_gc", "ThreadState::sweepFinalizedHeaps");
1167 for (int i = 0; i < NumberOfFinalizedHeaps; i++) { 1187 for (int i = 0; i < NumberOfFinalizedHeaps; i++) {
1168 m_heaps[FirstFinalizedHeap + i]->sweep(); 1188 HeapStats stats;
1189 m_heaps[FirstFinalizedHeap + i]->sweep(&stats);
1190 m_stats.add(&stats);
1169 } 1191 }
1170 } 1192 }
1171 1193
1194 // Wait for the sweeper threads and update the heap stats with the
1195 // stats for the heap portions swept by those threads.
1172 waitUntilSweepersDone(); 1196 waitUntilSweepersDone();
1173 for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) { 1197 for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) {
1198 m_stats.add(&heapStatsVector[i]);
1174 if (splitOffHeaps[i]) 1199 if (splitOffHeaps[i])
1175 m_heaps[FirstNonFinalizedHeap + i]->merge(splitOffHeaps[i].release() ); 1200 m_heaps[FirstNonFinalizedHeap + i]->merge(splitOffHeaps[i].release() );
1176 } 1201 }
1177 1202
1178 for (int i = 0; i < NumberOfHeaps; i++) 1203 for (int i = 0; i < NumberOfHeaps; i++)
1179 m_heaps[i]->postSweepProcessing(); 1204 m_heaps[i]->postSweepProcessing();
1205
1206 getStats(m_statsAfterLastGC);
1180 } 1207 }
1181 1208
1182 void ThreadState::addInterruptor(Interruptor* interruptor) 1209 void ThreadState::addInterruptor(Interruptor* interruptor)
1183 { 1210 {
1184 SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting); 1211 SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting);
1185 1212
1186 { 1213 {
1187 MutexLocker locker(threadAttachMutex()); 1214 MutexLocker locker(threadAttachMutex());
1188 m_interruptors.append(interruptor); 1215 m_interruptors.append(interruptor);
1189 } 1216 }
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1250 return gcInfo; 1277 return gcInfo;
1251 } 1278 }
1252 } 1279 }
1253 if (needLockForIteration) 1280 if (needLockForIteration)
1254 threadAttachMutex().unlock(); 1281 threadAttachMutex().unlock();
1255 return 0; 1282 return 0;
1256 } 1283 }
1257 #endif 1284 #endif
1258 1285
1259 } // namespace blink 1286 } // namespace blink
OLDNEW
« no previous file with comments | « Source/platform/heap/ThreadState.h ('k') | Source/wtf/Atomics.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698