| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 281 , m_atSafePoint(false) | 281 , m_atSafePoint(false) |
| 282 , m_interruptors() | 282 , m_interruptors() |
| 283 , m_gcRequested(false) | 283 , m_gcRequested(false) |
| 284 , m_forcePreciseGCForTesting(false) | 284 , m_forcePreciseGCForTesting(false) |
| 285 , m_sweepRequested(0) | 285 , m_sweepRequested(0) |
| 286 , m_sweepInProgress(false) | 286 , m_sweepInProgress(false) |
| 287 , m_noAllocationCount(0) | 287 , m_noAllocationCount(0) |
| 288 , m_inGC(false) | 288 , m_inGC(false) |
| 289 , m_heapContainsCache(adoptPtr(new HeapContainsCache())) | 289 , m_heapContainsCache(adoptPtr(new HeapContainsCache())) |
| 290 , m_isTerminating(false) | 290 , m_isTerminating(false) |
| 291 , m_lowCollectionRate(false) |
| 291 #if defined(ADDRESS_SANITIZER) | 292 #if defined(ADDRESS_SANITIZER) |
| 292 , m_asanFakeStack(__asan_get_current_fake_stack()) | 293 , m_asanFakeStack(__asan_get_current_fake_stack()) |
| 293 #endif | 294 #endif |
| 294 { | 295 { |
| 295 ASSERT(!**s_threadSpecific); | 296 ASSERT(!**s_threadSpecific); |
| 296 **s_threadSpecific = this; | 297 **s_threadSpecific = this; |
| 297 | 298 |
| 298 m_stats.clear(); | 299 m_stats.clear(); |
| 299 m_statsAfterLastGC.clear(); | 300 m_statsAfterLastGC.clear(); |
| 300 // First allocate the general heap, second iterate through to | 301 // First allocate the general heap, second iterate through to |
| (...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 596 } | 597 } |
| 597 | 598 |
| 598 Mutex& ThreadState::globalRootsMutex() | 599 Mutex& ThreadState::globalRootsMutex() |
| 599 { | 600 { |
| 600 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 601 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
| 601 return mutex; | 602 return mutex; |
| 602 } | 603 } |
| 603 | 604 |
| 604 // Trigger garbage collection on a 50% increase in size, but not for | 605 // Trigger garbage collection on a 50% increase in size, but not for |
| 605 // less than 512kbytes. | 606 // less than 512kbytes. |
| 606 static bool increasedEnoughToGC(size_t newSize, size_t oldSize) | 607 bool ThreadState::increasedEnoughToGC(size_t newSize, size_t oldSize) |
| 607 { | 608 { |
| 608 if (newSize < 1 << 19) | 609 if (newSize < 1 << 19) |
| 609 return false; | 610 return false; |
| 610 return newSize > oldSize + (oldSize >> 1); | 611 size_t limit = oldSize + (oldSize >> 1); |
| 612 return newSize > limit; |
| 611 } | 613 } |
| 612 | 614 |
| 613 // FIXME: The heuristics are local for a thread at this | 615 // FIXME: The heuristics are local for a thread at this |
| 614 // point. Consider using heuristics that take memory for all threads | 616 // point. Consider using heuristics that take memory for all threads |
| 615 // into account. | 617 // into account. |
| 616 bool ThreadState::shouldGC() | 618 bool ThreadState::shouldGC() |
| 617 { | 619 { |
| 618 // Do not GC during sweeping. We allow allocation during | 620 // Do not GC during sweeping. We allow allocation during |
| 619 // finalization, but those allocations are not allowed | 621 // finalization, but those allocations are not allowed |
| 620 // to lead to nested garbage collections. | 622 // to lead to nested garbage collections. |
| 621 return !m_sweepInProgress && increasedEnoughToGC(m_stats.totalObjectSpace(),
m_statsAfterLastGC.totalObjectSpace()); | 623 return !m_sweepInProgress && increasedEnoughToGC(m_stats.totalObjectSpace(),
m_statsAfterLastGC.totalObjectSpace()); |
| 622 } | 624 } |
| 623 | 625 |
| 624 // Trigger conservative garbage collection on a 100% increase in size, | 626 // Trigger conservative garbage collection on a 100% increase in size, |
| 625 // but not for less than 4Mbytes. | 627 // but not for less than 4Mbytes. If the system currently has a low |
| 626 static bool increasedEnoughToForceConservativeGC(size_t newSize, size_t oldSize) | 628 // collection rate, then require a 300% increase in size. |
| 629 bool ThreadState::increasedEnoughToForceConservativeGC(size_t newSize, size_t ol
dSize) |
| 627 { | 630 { |
| 628 if (newSize < 1 << 22) | 631 if (newSize < 1 << 22) |
| 629 return false; | 632 return false; |
| 630 return newSize > 2 * oldSize; | 633 size_t limit = (m_lowCollectionRate ? 4 : 2) * oldSize; |
| 634 return newSize > limit; |
| 631 } | 635 } |
| 632 | 636 |
| 633 // FIXME: The heuristics are local for a thread at this | 637 // FIXME: The heuristics are local for a thread at this |
| 634 // point. Consider using heuristics that take memory for all threads | 638 // point. Consider using heuristics that take memory for all threads |
| 635 // into account. | 639 // into account. |
| 636 bool ThreadState::shouldForceConservativeGC() | 640 bool ThreadState::shouldForceConservativeGC() |
| 637 { | 641 { |
| 638 // Do not GC during sweeping. We allow allocation during | 642 // Do not GC during sweeping. We allow allocation during |
| 639 // finalization, but those allocations are not allowed | 643 // finalization, but those allocations are not allowed |
| 640 // to lead to nested garbage collections. | 644 // to lead to nested garbage collections. |
| (...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 891 TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCSweeping"); | 895 TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCSweeping"); |
| 892 } | 896 } |
| 893 | 897 |
| 894 m_sweepInProgress = true; | 898 m_sweepInProgress = true; |
| 895 // Disallow allocation during weak processing. | 899 // Disallow allocation during weak processing. |
| 896 enterNoAllocationScope(); | 900 enterNoAllocationScope(); |
| 897 // Perform thread-specific weak processing. | 901 // Perform thread-specific weak processing. |
| 898 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { } | 902 while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { } |
| 899 leaveNoAllocationScope(); | 903 leaveNoAllocationScope(); |
| 900 // Perform sweeping and finalization. | 904 // Perform sweeping and finalization. |
| 905 size_t objectSpaceBeforeSweep = m_stats.totalObjectSpace(); |
| 901 m_stats.clear(); // Sweeping will recalculate the stats | 906 m_stats.clear(); // Sweeping will recalculate the stats |
| 902 for (int i = 0; i < NumberOfHeaps; i++) | 907 for (int i = 0; i < NumberOfHeaps; i++) |
| 903 m_heaps[i]->sweep(); | 908 m_heaps[i]->sweep(); |
| 904 getStats(m_statsAfterLastGC); | 909 getStats(m_statsAfterLastGC); |
| 905 m_sweepInProgress = false; | 910 m_sweepInProgress = false; |
| 906 clearGCRequested(); | 911 clearGCRequested(); |
| 907 clearSweepRequested(); | 912 clearSweepRequested(); |
| 913 // If we collected less than 50% of objects, record that the |
| 914 // collection rate is low which we use to determine when to |
| 915 // perform the next GC. |
| 916 setLowCollectionRate(m_stats.totalObjectSpace() > (objectSpaceBeforeSweep >>
1)); |
| 908 | 917 |
| 909 if (blink::Platform::current()) { | 918 if (blink::Platform::current()) { |
| 910 blink::Platform::current()->histogramCustomCounts("BlinkGC.PerformPendin
gSweep", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); | 919 blink::Platform::current()->histogramCustomCounts("BlinkGC.PerformPendin
gSweep", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); |
| 911 } | 920 } |
| 912 | 921 |
| 913 if (isMainThread()) { | 922 if (isMainThread()) { |
| 914 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); | 923 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); |
| 915 ScriptForbiddenScope::exit(); | 924 ScriptForbiddenScope::exit(); |
| 916 } | 925 } |
| 917 } | 926 } |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 966 threadAttachMutex().unlock(); | 975 threadAttachMutex().unlock(); |
| 967 return gcInfo; | 976 return gcInfo; |
| 968 } | 977 } |
| 969 } | 978 } |
| 970 if (needLockForIteration) | 979 if (needLockForIteration) |
| 971 threadAttachMutex().unlock(); | 980 threadAttachMutex().unlock(); |
| 972 return 0; | 981 return 0; |
| 973 } | 982 } |
| 974 #endif | 983 #endif |
| 975 } | 984 } |
| OLD | NEW |