OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 820 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
831 } | 831 } |
832 | 832 |
833 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() | 833 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() |
834 { | 834 { |
835 if (m_shouldFlushHeapDoesNotContainCache) { | 835 if (m_shouldFlushHeapDoesNotContainCache) { |
836 Heap::flushHeapDoesNotContainCache(); | 836 Heap::flushHeapDoesNotContainCache(); |
837 m_shouldFlushHeapDoesNotContainCache = false; | 837 m_shouldFlushHeapDoesNotContainCache = false; |
838 } | 838 } |
839 } | 839 } |
840 | 840 |
841 void ThreadState::makeConsistentForSweeping() | 841 void ThreadState::makeConsistentForSweeping(GCType gcType) |
842 { | 842 { |
843 ASSERT(isInGC()); | 843 ASSERT(isInGC()); |
844 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForSweeping"); | 844 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForSweeping"); |
845 for (int i = 0; i < NumberOfHeaps; ++i) | 845 for (int i = 0; i < NumberOfHeaps; ++i) |
846 m_heaps[i]->makeConsistentForSweeping(); | 846 m_heaps[i]->makeConsistentForSweeping(gcType); |
847 } | 847 } |
848 | 848 |
849 void ThreadState::preGC() | 849 void ThreadState::preGC(GCType gcType) |
850 { | 850 { |
851 ASSERT(!isInGC()); | 851 ASSERT(!isInGC()); |
852 setGCState(GCRunning); | 852 setGCState(GCRunning); |
853 makeConsistentForSweeping(); | 853 makeConsistentForSweeping(gcType); |
854 prepareRegionTree(); | 854 prepareRegionTree(); |
855 flushHeapDoesNotContainCacheIfNeeded(); | 855 flushHeapDoesNotContainCacheIfNeeded(); |
856 clearHeapAges(); | 856 clearHeapAges(); |
857 } | 857 } |
858 | 858 |
859 void ThreadState::postGC(GCType gcType) | 859 void ThreadState::postGC(GCType gcType) |
860 { | 860 { |
861 ASSERT(isInGC()); | 861 ASSERT(isInGC()); |
862 | 862 |
863 #if ENABLE(GC_PROFILING) | 863 #if ENABLE(GC_PROFILING) |
864 // We snapshot the heap prior to sweeping to get numbers for both resources | 864 // We snapshot the heap prior to sweeping to get numbers for both resources |
865 // that have been allocated since the last GC and for resources that are | 865 // that have been allocated since the last GC and for resources that are |
866 // going to be freed. | 866 // going to be freed. |
867 bool gcTracingEnabled; | 867 bool gcTracingEnabled; |
868 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); | 868 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); |
869 | 869 |
870 if (gcTracingEnabled) { | 870 if (gcTracingEnabled) { |
871 bool disabledByDefaultGCTracingEnabled; | 871 bool disabledByDefaultGCTracingEnabled; |
872 TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("blink_gc") , &disabledByDefaultGCTracingEnabled); | 872 TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("blink_gc") , &disabledByDefaultGCTracingEnabled); |
873 | 873 |
874 snapshot(); | 874 snapshot(); |
875 if (disabledByDefaultGCTracingEnabled) | 875 if (disabledByDefaultGCTracingEnabled) |
876 collectAndReportMarkSweepStats(); | 876 collectAndReportMarkSweepStats(); |
877 incrementMarkedObjectsAge(); | 877 incrementMarkedObjectsAge(); |
878 } | 878 } |
879 #endif | 879 #endif |
880 | 880 |
881 setGCState(gcType == GCWithSweep ? EagerSweepScheduled : LazySweepScheduled) ; | |
882 for (int i = 0; i < NumberOfHeaps; i++) | 881 for (int i = 0; i < NumberOfHeaps; i++) |
883 m_heaps[i]->prepareForSweep(); | 882 m_heaps[i]->prepareForSweep(); |
883 | |
884 if (gcType == GCWithSweep) { | |
885 setGCState(EagerSweepScheduled); | |
886 } else if (gcType == GCWithoutSweep) { | |
887 setGCState(LazySweepScheduled); | |
888 } else { | |
889 takeSnapshot(); | |
ssid
2015/05/27 12:43:47
Sorry, I just realized. This takeSnapshot() should
haraken
2015/05/27 13:48:10
prepareForSweep() needs to be called, because the
| |
890 // This unmarks all marked objects and marks all unmarked objects dead. | |
891 makeConsistentForSweeping(gcType); | |
892 // Force setting NoGCScheduled to circumvent checkThread() | |
893 // in setGCState(). | |
894 m_gcState = NoGCScheduled; | |
895 } | |
884 } | 896 } |
885 | 897 |
886 void ThreadState::preSweep() | 898 void ThreadState::preSweep() |
887 { | 899 { |
888 checkThread(); | 900 checkThread(); |
889 if (gcState() != EagerSweepScheduled && gcState() != LazySweepScheduled) | 901 if (gcState() != EagerSweepScheduled && gcState() != LazySweepScheduled) |
890 return; | 902 return; |
891 | 903 |
892 { | 904 { |
893 if (isMainThread()) | 905 if (isMainThread()) |
(...skipping 364 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1258 m_vectorBackingHeapIndex = heapIndexOfVectorHeapLeastRecentlyExpanded(Ve ctor1HeapIndex, Vector4HeapIndex); | 1270 m_vectorBackingHeapIndex = heapIndexOfVectorHeapLeastRecentlyExpanded(Ve ctor1HeapIndex, Vector4HeapIndex); |
1259 } | 1271 } |
1260 | 1272 |
1261 void ThreadState::promptlyFreed(size_t gcInfoIndex) | 1273 void ThreadState::promptlyFreed(size_t gcInfoIndex) |
1262 { | 1274 { |
1263 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask; | 1275 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask; |
1264 // See the comment in vectorBackingHeap() for why this is +3. | 1276 // See the comment in vectorBackingHeap() for why this is +3. |
1265 m_likelyToBePromptlyFreed[entryIndex] += 3; | 1277 m_likelyToBePromptlyFreed[entryIndex] += 3; |
1266 } | 1278 } |
1267 | 1279 |
1280 void ThreadState::takeSnapshot() | |
1281 { | |
1282 ASSERT(isInGC()); | |
1283 // TODO(ssid): Implement this. | |
1284 } | |
1285 | |
1268 #if ENABLE(GC_PROFILING) | 1286 #if ENABLE(GC_PROFILING) |
1269 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address) | 1287 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address) |
1270 { | 1288 { |
1271 bool needLockForIteration = !ThreadState::current()->isInGC(); | 1289 bool needLockForIteration = !ThreadState::current()->isInGC(); |
1272 if (needLockForIteration) | 1290 if (needLockForIteration) |
1273 threadAttachMutex().lock(); | 1291 threadAttachMutex().lock(); |
1274 | 1292 |
1275 for (ThreadState* state : attachedThreads()) { | 1293 for (ThreadState* state : attachedThreads()) { |
1276 if (const GCInfo* gcInfo = state->findGCInfo(address)) { | 1294 if (const GCInfo* gcInfo = state->findGCInfo(address)) { |
1277 if (needLockForIteration) | 1295 if (needLockForIteration) |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1348 json->beginArray(it->key.ascii().data()); | 1366 json->beginArray(it->key.ascii().data()); |
1349 for (size_t age = 0; age <= maxHeapObjectAge; ++age) | 1367 for (size_t age = 0; age <= maxHeapObjectAge; ++age) |
1350 json->pushInteger(it->value.ages[age]); | 1368 json->pushInteger(it->value.ages[age]); |
1351 json->endArray(); | 1369 json->endArray(); |
1352 } | 1370 } |
1353 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); | 1371 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); |
1354 } | 1372 } |
1355 #endif | 1373 #endif |
1356 | 1374 |
1357 } // namespace blink | 1375 } // namespace blink |
OLD | NEW |