OLD | NEW |
(Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #ifndef HeapCompact_h |
| 6 #define HeapCompact_h |
| 7 |
| 8 #include "platform/PlatformExport.h" |
| 9 #include "platform/heap/BlinkGC.h" |
| 10 #include "wtf/DataLog.h" |
| 11 #include "wtf/PtrUtil.h" |
| 12 #include "wtf/ThreadingPrimitives.h" |
| 13 #include "wtf/Vector.h" |
| 14 |
| 15 #include <bitset> |
| 16 #include <utility> |
| 17 |
| 18 // Compaction-specific debug switches: |
| 19 |
| 20 // Set to 0 to prevent compaction GCs, disabling the heap compaction feature. |
| 21 #define ENABLE_HEAP_COMPACTION 1 |
| 22 |
| 23 // Emit debug info during compaction. |
| 24 #define DEBUG_HEAP_COMPACTION 0 |
| 25 |
| 26 // Emit stats on freelist occupancy. |
| 27 // 0 - disabled, 1 - minimal, 2 - verbose. |
| 28 #define DEBUG_HEAP_FREELIST 0 |
| 29 |
| 30 // Log the amount of time spent compacting. |
| 31 #define DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME 0 |
| 32 |
| 33 // Compact during all idle + precise GCs; for debugging. |
| 34 #define STRESS_TEST_HEAP_COMPACTION 0 |
| 35 |
| 36 namespace blink { |
| 37 |
| 38 class NormalPageArena; |
| 39 class BasePage; |
| 40 class ThreadState; |
| 41 |
| 42 class PLATFORM_EXPORT HeapCompact final { |
| 43 public: |
| 44 static std::unique_ptr<HeapCompact> create() { |
| 45 return WTF::wrapUnique(new HeapCompact); |
| 46 } |
| 47 |
| 48 ~HeapCompact(); |
| 49 |
| 50 // Determine if a GC for the given type and reason should also perform |
| 51 // additional heap compaction. |
| 52 // |
| 53 bool shouldCompact(ThreadState*, BlinkGC::GCType, BlinkGC::GCReason); |
| 54 |
| 55 // Compaction should be performed as part of the ongoing GC, initialize |
| 56 // the heap compaction pass. Returns the appropriate visitor type to |
| 57 // use when running the marking phase. |
| 58 BlinkGC::GCType initialize(ThreadState*); |
| 59 |
| 60 // Returns true if the ongoing GC will perform compaction. |
| 61 bool isCompacting() const { return m_doCompact; } |
| 62 |
| 63 // Returns true if the ongoing GC will perform compaction for the given |
| 64 // heap arena. |
| 65 bool isCompactingArena(int arenaIndex) const { |
| 66 return m_doCompact && (m_compactableArenas & (0x1u << arenaIndex)); |
| 67 } |
| 68 |
| 69 // Returns |true| if the ongoing GC may compact the given arena/sub-heap. |
| 70 static bool isCompactableArena(int arenaIndex) { |
| 71 return arenaIndex >= BlinkGC::Vector1ArenaIndex && |
| 72 arenaIndex <= BlinkGC::HashTableArenaIndex; |
| 73 } |
| 74 |
| 75 // See |Heap::registerMovingObjectReference()| documentation. |
| 76 void registerMovingObjectReference(MovableReference* slot); |
| 77 |
| 78 // See |Heap::registerMovingObjectCallback()| documentation. |
| 79 void registerMovingObjectCallback(MovableReference, |
| 80 MovingObjectCallback, |
| 81 void* callbackData); |
| 82 |
| 83 // Thread signalling that a compaction pass is starting or has |
| 84 // completed. |
| 85 // |
| 86 // A thread participating in a heap GC will wait on the completion |
| 87 // of compaction across all threads. No thread can be allowed to |
| 88 // potentially access another thread's heap arenas while they're |
| 89 // still being compacted. |
| 90 void startThreadCompaction(); |
| 91 void finishThreadCompaction(); |
| 92 |
| 93 // Perform any relocation post-processing after having completed compacting |
| 94 // the given arena. The number of pages that were freed together with the |
| 95 // total size (in bytes) of freed heap storage, are passed in as arguments. |
| 96 void finishedArenaCompaction(NormalPageArena*, |
| 97 size_t freedPages, |
| 98 size_t freedSize); |
| 99 |
| 100 // Register the heap page as containing live objects that will all be |
| 101 // compacted. Registration happens as part of making the arenas ready |
| 102 // for a GC. |
| 103 void addCompactingPage(BasePage*); |
| 104 |
| 105 // Notify heap compaction that object at |from| has been relocated to.. |to|. |
| 106 // (Called by the sweep compaction pass.) |
| 107 void relocate(Address from, Address to); |
| 108 |
| 109 // For unit testing only: arrange for a compaction GC to be triggered |
| 110 // next time a non-conservative GC is run. Sets the compact-next flag |
| 111 // to the new value, returning old. |
| 112 static bool scheduleCompactionGCForTesting(bool); |
| 113 |
| 114 private: |
| 115 class MovableObjectFixups; |
| 116 |
| 117 HeapCompact(); |
| 118 |
| 119 // Sample the amount of fragmentation and heap memory currently residing |
| 120 // on the freelists of the arenas we're able to compact. The computed |
| 121 // numbers will be subsequently used to determine if a heap compaction |
| 122 // is on order (shouldCompact().) |
| 123 void updateHeapResidency(ThreadState*); |
| 124 |
| 125 // Parameters controlling when compaction should be done: |
| 126 |
| 127 // Number of GCs that must have passed since last compaction GC. |
| 128 static const int kGCCountSinceLastCompactionThreshold = 10; |
| 129 |
| 130 // Freelist size threshold that must be exceeded before compaction |
| 131 // should be considered. |
| 132 static const size_t kFreeListSizeThreshold = 512 * 1024; |
| 133 |
| 134 MovableObjectFixups& fixups(); |
| 135 |
| 136 std::unique_ptr<MovableObjectFixups> m_fixups; |
| 137 |
| 138 // Set to |true| when a compacting sweep will go ahead. |
| 139 bool m_doCompact; |
| 140 size_t m_gcCountSinceLastCompaction; |
| 141 |
| 142 // Lock protecting finishedThreadCompaction() signalling. |
| 143 Mutex m_mutex; |
| 144 |
| 145 // All threads performing a GC must synchronize on completion |
| 146 // of all heap compactions. Not doing so risks one thread resuming |
| 147 // the mutator, which could perform cross-thread access to a heap |
| 148 // that's still in the process of being compacted. |
| 149 ThreadCondition m_finished; |
| 150 |
| 151 // Number of heap threads participating in the compaction. |
| 152 int m_threadCount; |
| 153 |
| 154 // Last reported freelist size, across all compactable arenas. |
| 155 size_t m_freeListSize; |
| 156 |
| 157 // If compacting, i'th heap arena will be compacted |
| 158 // if corresponding bit is set. Indexes are in |
| 159 // the range of BlinkGC::ArenaIndices. |
| 160 unsigned m_compactableArenas; |
| 161 |
| 162 // Stats, number of (complete) pages freed/decommitted + |
| 163 // bytes freed (which will include partial pages.) |
| 164 size_t m_freedPages; |
| 165 size_t m_freedSize; |
| 166 |
| 167 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME |
| 168 double m_startCompactionTimeMS; |
| 169 #endif |
| 170 |
| 171 static bool s_forceCompactionGC; |
| 172 }; |
| 173 |
| 174 } // namespace blink |
| 175 |
| 176 // Logging macros activated by debug switches. |
| 177 |
| 178 #define LOG_HEAP_COMPACTION_INTERNAL(msg, ...) dataLogF(msg, ##__VA_ARGS__) |
| 179 |
| 180 #if DEBUG_HEAP_COMPACTION |
| 181 #define LOG_HEAP_COMPACTION(msg, ...) \ |
| 182 LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__) |
| 183 #else |
| 184 #define LOG_HEAP_COMPACTION(msg, ...) \ |
| 185 do { \ |
| 186 } while (0) |
| 187 #endif |
| 188 |
| 189 #if DEBUG_HEAP_FREELIST |
| 190 #define LOG_HEAP_FREELIST(msg, ...) \ |
| 191 LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__) |
| 192 #else |
| 193 #define LOG_HEAP_FREELIST(msg, ...) \ |
| 194 do { \ |
| 195 } while (0) |
| 196 #endif |
| 197 |
| 198 #if DEBUG_HEAP_FREELIST == 2 |
| 199 #define LOG_HEAP_FREELIST_VERBOSE(msg, ...) \ |
| 200 LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__) |
| 201 #else |
| 202 #define LOG_HEAP_FREELIST_VERBOSE(msg, ...) \ |
| 203 do { \ |
| 204 } while (0) |
| 205 #endif |
| 206 |
| 207 #endif // HeapCompact_h |
OLD | NEW |