OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
281 , m_atSafePoint(false) | 281 , m_atSafePoint(false) |
282 , m_interruptors() | 282 , m_interruptors() |
283 , m_gcRequested(false) | 283 , m_gcRequested(false) |
284 , m_forcePreciseGCForTesting(false) | 284 , m_forcePreciseGCForTesting(false) |
285 , m_sweepRequested(0) | 285 , m_sweepRequested(0) |
286 , m_sweepInProgress(false) | 286 , m_sweepInProgress(false) |
287 , m_noAllocationCount(0) | 287 , m_noAllocationCount(0) |
288 , m_inGC(false) | 288 , m_inGC(false) |
289 , m_heapContainsCache(adoptPtr(new HeapContainsCache())) | 289 , m_heapContainsCache(adoptPtr(new HeapContainsCache())) |
290 , m_isTerminating(false) | 290 , m_isTerminating(false) |
291 , m_lowCollectionRate(false) | |
291 #if defined(ADDRESS_SANITIZER) | 292 #if defined(ADDRESS_SANITIZER) |
292 , m_asanFakeStack(__asan_get_current_fake_stack()) | 293 , m_asanFakeStack(__asan_get_current_fake_stack()) |
293 #endif | 294 #endif |
294 { | 295 { |
295 ASSERT(!**s_threadSpecific); | 296 ASSERT(!**s_threadSpecific); |
296 **s_threadSpecific = this; | 297 **s_threadSpecific = this; |
297 | 298 |
298 m_stats.clear(); | 299 m_stats.clear(); |
299 m_statsAfterLastGC.clear(); | 300 m_statsAfterLastGC.clear(); |
300 // First allocate the general heap, second iterate through to | 301 // First allocate the general heap, second iterate through to |
(...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
596 } | 597 } |
597 | 598 |
598 Mutex& ThreadState::globalRootsMutex() | 599 Mutex& ThreadState::globalRootsMutex() |
599 { | 600 { |
600 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 601 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
601 return mutex; | 602 return mutex; |
602 } | 603 } |
603 | 604 |
604 // Trigger garbage collection on a 50% increase in size, but not for | 605 // Trigger garbage collection on a 50% increase in size, but not for |
605 // less than 512kbytes. | 606 // less than 512kbytes. |
606 static bool increasedEnoughToGC(size_t newSize, size_t oldSize) | 607 bool ThreadState::increasedEnoughToGC(size_t newSize, size_t oldSize) |
607 { | 608 { |
608 if (newSize < 1 << 19) | 609 if (newSize < 1 << 19) |
609 return false; | 610 return false; |
610 return newSize > oldSize + (oldSize >> 1); | 611 size_t limit = oldSize + (oldSize >> 1); |
612 return newSize > limit; | |
611 } | 613 } |
612 | 614 |
613 // FIXME: The heuristics are local for a thread at this | 615 // FIXME: The heuristics are local for a thread at this |
614 // point. Consider using heuristics that take memory for all threads | 616 // point. Consider using heuristics that take memory for all threads |
615 // into account. | 617 // into account. |
616 bool ThreadState::shouldGC() | 618 bool ThreadState::shouldGC() |
617 { | 619 { |
618 // Do not GC during sweeping. We allow allocation during | 620 // Do not GC during sweeping. We allow allocation during |
619 // finalization, but those allocations are not allowed | 621 // finalization, but those allocations are not allowed |
620 // to lead to nested garbage collections. | 622 // to lead to nested garbage collections. |
621 return !m_sweepInProgress && increasedEnoughToGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace()); | 623 return !m_sweepInProgress && increasedEnoughToGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace()); |
622 } | 624 } |
623 | 625 |
624 // Trigger conservative garbage collection on a 100% increase in size, | 626 // Trigger conservative garbage collection on a 100% increase in size, |
625 // but not for less than 4Mbytes. | 627 // but not for less than 4Mbytes. |
Erik Corry
2014/08/11 10:24:06
// If we are in low collection rate mode, then req
Mads Ager (chromium)
2014/08/11 12:21:31
Done.
| |
626 static bool increasedEnoughToForceConservativeGC(size_t newSize, size_t oldSize) | 628 bool ThreadState::increasedEnoughToForceConservativeGC(size_t newSize, size_t ol dSize) |
627 { | 629 { |
628 if (newSize < 1 << 22) | 630 if (newSize < 1 << 22) |
629 return false; | 631 return false; |
630 return newSize > 2 * oldSize; | 632 size_t limit = (m_lowCollectionRate ? 4 : 2) * oldSize; |
633 return newSize > limit; | |
631 } | 634 } |
632 | 635 |
633 // FIXME: The heuristics are local for a thread at this | 636 // FIXME: The heuristics are local for a thread at this |
634 // point. Consider using heuristics that take memory for all threads | 637 // point. Consider using heuristics that take memory for all threads |
635 // into account. | 638 // into account. |
636 bool ThreadState::shouldForceConservativeGC() | 639 bool ThreadState::shouldForceConservativeGC() |
637 { | 640 { |
638 // Do not GC during sweeping. We allow allocation during | 641 // Do not GC during sweeping. We allow allocation during |
639 // finalization, but those allocations are not allowed | 642 // finalization, but those allocations are not allowed |
640 // to lead to nested garbage collections. | 643 // to lead to nested garbage collections. |
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
891 TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCSweeping"); | 894 TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCSweeping"); |
892 } | 895 } |
893 | 896 |
894 m_sweepInProgress = true; | 897 m_sweepInProgress = true; |
895 // Disallow allocation during weak processing. | 898 // Disallow allocation during weak processing. |
896 enterNoAllocationScope(); | 899 enterNoAllocationScope(); |
897 // Perform thread-specific weak processing. | 900 // Perform thread-specific weak processing. |
898 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { } | 901 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { } |
899 leaveNoAllocationScope(); | 902 leaveNoAllocationScope(); |
900 // Perform sweeping and finalization. | 903 // Perform sweeping and finalization. |
904 size_t objectSpaceBeforeSweep = m_stats.totalObjectSpace(); | |
901 m_stats.clear(); // Sweeping will recalculate the stats | 905 m_stats.clear(); // Sweeping will recalculate the stats |
902 for (int i = 0; i < NumberOfHeaps; i++) | 906 for (int i = 0; i < NumberOfHeaps; i++) |
903 m_heaps[i]->sweep(); | 907 m_heaps[i]->sweep(); |
904 getStats(m_statsAfterLastGC); | 908 getStats(m_statsAfterLastGC); |
905 m_sweepInProgress = false; | 909 m_sweepInProgress = false; |
906 clearGCRequested(); | 910 clearGCRequested(); |
907 clearSweepRequested(); | 911 clearSweepRequested(); |
912 // If we collected less than 50% of objects, record that the | |
913 // collection rate is low which we use to determine when to | |
914 // perform the next GC. | |
915 setLowCollectionRate(m_stats.totalObjectSpace() > (objectSpaceBeforeSweep >> 1)); | |
908 | 916 |
909 if (blink::Platform::current()) { | 917 if (blink::Platform::current()) { |
910 blink::Platform::current()->histogramCustomCounts("BlinkGC.PerformPendin gSweep", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); | 918 blink::Platform::current()->histogramCustomCounts("BlinkGC.PerformPendin gSweep", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); |
911 } | 919 } |
912 | 920 |
913 if (isMainThread()) { | 921 if (isMainThread()) { |
914 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); | 922 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); |
915 ScriptForbiddenScope::exit(); | 923 ScriptForbiddenScope::exit(); |
916 } | 924 } |
917 } | 925 } |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
966 threadAttachMutex().unlock(); | 974 threadAttachMutex().unlock(); |
967 return gcInfo; | 975 return gcInfo; |
968 } | 976 } |
969 } | 977 } |
970 if (needLockForIteration) | 978 if (needLockForIteration) |
971 threadAttachMutex().unlock(); | 979 threadAttachMutex().unlock(); |
972 return 0; | 980 return 0; |
973 } | 981 } |
974 #endif | 982 #endif |
975 } | 983 } |
OLD | NEW |