OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 Opera Software AS. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #ifndef HeapCompact_h | |
6 #define HeapCompact_h | |
7 | |
8 #include "platform/PlatformExport.h" | |
9 #include "platform/heap/BlinkGC.h" | |
10 #include "wtf/PtrUtil.h" | |
11 #include "wtf/ThreadingPrimitives.h" | |
12 #include "wtf/Vector.h" | |
13 | |
14 #include <bitset> | |
15 #include <utility> | |
16 | |
17 // Global dev/debug switches: | |
18 | |
19 // Set to 0 to prevent compaction GCs, disabling the heap compaction feature. | |
20 #define ENABLE_HEAP_COMPACTION 1 | |
21 | |
22 // Emit debug info during compaction. | |
23 #define DEBUG_HEAP_COMPACTION 0 | |
24 | |
25 // Emit stats on freelist occupancy. | |
26 // 0 - disabled, 1 - minimal, 2 - verbose. | |
27 #define DEBUG_HEAP_FREELIST 0 | |
28 | |
29 // Log the amount of time spent compacting. | |
30 #define DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME 0 | |
31 | |
32 // Compact during all idle + precise GCs; for debugging. | |
33 #define STRESS_TEST_HEAP_COMPACTION 0 | |
34 | |
35 #if OS(WIN) | |
36 // TODO: use standard logging facilities. | |
37 #define LOG_HEAP_COMPACTION_INTERNAL(msg, ...) \ | |
38 do { \ | |
39 char output[512]; \ | |
40 sprintf(output, msg, ##__VA_ARGS__); \ | |
41 OutputDebugStringA(output); \ | |
42 } while (0) | |
43 #else | |
44 #define LOG_HEAP_COMPACTION_INTERNAL(msg, ...) \ | |
45 fprintf(stderr, msg, ##__VA_ARGS__) | |
46 #endif | |
47 | |
48 #if DEBUG_HEAP_COMPACTION | |
49 #define LOG_HEAP_COMPACTION(msg, ...) \ | |
50 LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__) | |
51 #else | |
52 #define LOG_HEAP_COMPACTION(msg, ...) \ | |
53 do { \ | |
54 } while (0) | |
55 #endif | |
56 | |
57 #if DEBUG_HEAP_FREELIST | |
58 #define LOG_HEAP_FREELIST(msg, ...) \ | |
59 LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__) | |
60 #else | |
61 #define LOG_HEAP_FREELIST(msg, ...) \ | |
62 do { \ | |
63 } while (0) | |
64 #endif | |
65 | |
66 #if DEBUG_HEAP_FREELIST == 2 | |
67 #define LOG_HEAP_FREELIST_VERBOSE(msg, ...) \ | |
68 LOG_HEAP_COMPACTION_INTERNAL(msg, ##__VA_ARGS__) | |
69 #else | |
70 #define LOG_HEAP_FREELIST_VERBOSE(msg, ...) \ | |
71 do { \ | |
72 } while (0) | |
73 #endif | |
haraken
2016/12/02 12:43:20
Consider cleaning up these macros before landing t
sof
2016/12/04 14:55:38
Added TODO as an additional reminder to do so.
| |
74 | |
75 namespace blink { | |
76 | |
77 class NormalPageArena; | |
78 class BasePage; | |
79 class ThreadHeap; | |
80 class ThreadState; | |
81 | |
82 class PLATFORM_EXPORT HeapCompact final { | |
83 public: | |
84 static std::unique_ptr<HeapCompact> create() { | |
85 return std::unique_ptr<HeapCompact>(new HeapCompact); | |
haraken
2016/12/02 12:43:20
wrapUnique
sof
2016/12/04 14:55:37
Done, not sure I "get" the benefits wrapUnique().
| |
86 } | |
87 | |
88 ~HeapCompact(); | |
89 | |
90 // Check if a GC for the given type and reason should perform additional | |
91 // heap compaction once it has run. | |
92 // | |
93 // If deemed worthy, heap compaction is implicitly initialized and set up. | |
94 void checkIfCompacting(ThreadHeap*, | |
95 Visitor*, | |
96 BlinkGC::GCType, | |
97 BlinkGC::GCReason); | |
98 | |
99 // Returns true if the ongoing GC will perform compaction. | |
100 bool isCompacting() const { return m_doCompact; } | |
101 | |
102 // Returns true if the ongoing GC will perform compaction. | |
haraken
2016/12/02 12:43:20
Update the comment.
sof
2016/12/04 14:55:37
Done.
| |
103 bool isCompactingArena(int arenaIndex) const { | |
104 return m_doCompact && (m_compactableHeaps & (0x1u << arenaIndex)); | |
haraken
2016/12/02 12:43:20
Avoid hard-coding 0x1u.
sof
2016/12/04 14:55:37
That one-liner & idiom is as clear as can be; I do
| |
105 } | |
106 | |
107 // Returns |true| if the ongoing GC may compact the given arena/sub-heap. | |
108 static bool isCompactableArena(int arenaIndex) { | |
109 return arenaIndex >= BlinkGC::Vector1ArenaIndex && | |
110 arenaIndex <= BlinkGC::HashTableArenaIndex; | |
111 } | |
112 | |
113 // See |Heap::registerMovingObjectReference()| documentation. | |
114 void registerMovingObjectReference(MovableReference* slot); | |
115 | |
116 // See |Heap::registerMovingObjectCallback()| documentation. | |
117 void registerMovingObjectCallback(MovableReference, | |
118 MovingObjectCallback, | |
119 void* callbackData); | |
120 | |
121 // Register |slot| as containing a reference to the interior of a movable | |
122 // object. | |
123 // | |
124 // |registerMovingObjectReference()| handles the common case of holding | |
125 // an external reference to a backing store object. |registerRelocation()| | |
126 // handles the relocation of external references into backing store | |
127 // objects -- something not currently done & needed by the Blink codebase, | |
128 // but kept open as a possibility..until further notice. | |
129 void registerRelocation(MovableReference* slot); | |
130 | |
131 // Signal that the compaction pass is being started, finished by some | |
132 // ThreadState. | |
133 void startCompacting(ThreadState*); | |
134 void finishedCompacting(ThreadState*); | |
135 | |
136 // Perform any relocation post-processing after having completed compacting | |
137 // the given sub heap. Pass along the number of pages that were freed from | |
138 // the arena, along with their total size. | |
139 void finishedArenaCompaction(NormalPageArena*, | |
140 size_t freedPages, | |
141 size_t freedSize); | |
142 | |
143 // Record the main thread's compactable freelist residency (in bytes), | |
144 // along with with overall size. Sizes are relative to the compactable | |
145 // sub-heaps, and not a total count. Along with the totals, per-heap | |
146 // numbers are also provided. | |
147 // | |
148 // The recording is done after the decision has been made on whether | |
149 // or not to compact during the _current_ GC. If compacting, the size | |
150 // sampling will be ignored and the internal counters are reset. | |
151 // | |
152 // However, if not compacting, the values will be consulted the next time | |
153 // a GC goes ahead and it decides whether to compact or not. | |
154 void setHeapResidency( | |
155 size_t liveSize, | |
156 size_t freeSize, | |
haraken
2016/12/02 12:43:20
totalArenaSize
totalFreeListSize
to be consistent
sof
2016/12/04 14:55:37
Done.
| |
157 const Vector<std::pair<size_t, size_t>>& heapResidencies); | |
158 | |
159 // Register the heap page as containing live objects that will all be | |
160 // compacted. When the GC is compacting, that is. | |
161 void addCompactablePage(BasePage*); | |
162 | |
163 // Notify heap compaction that object at |from| has been moved to.. |to|. | |
164 // (Called by the sweep compaction pass.) | |
165 void movedObject(Address from, Address to); | |
166 | |
167 // For unit testing only: arrange for a compaction GC to be triggered | |
168 // next time a non-conservative GC is run. Sets the compact-next flag | |
169 // to the new value, returning old. | |
170 static bool scheduleCompactionGCForTesting(bool); | |
haraken
2016/12/02 12:43:20
Shall we simply forbid the heap compaction on cons
sof
2016/12/04 14:55:38
Yes, we absolutely must disable compaction during
| |
171 | |
172 private: | |
173 class MovableObjectFixups; | |
174 | |
175 HeapCompact(); | |
176 | |
177 // Parameters controlling when compaction should be done: | |
178 | |
179 // Number of GCs that must have passed since last compaction GC. | |
180 static const int kCompactIntervalThreshold = 10; | |
haraken
2016/12/02 12:43:20
kGCIntervalThresholdSinceLastCompaction (c.f., m_g
sof
2016/12/04 14:55:37
kGCCountSinceLastCompactionThreshold
| |
181 | |
182 // Freelist size threshold that must be exceeded before compaction | |
183 // should be considered. | |
184 static const size_t kFreeThreshold = 512 * 1024; | |
haraken
2016/12/02 12:43:20
kFreeListSizeThreashold
sof
2016/12/04 14:55:37
Done.
| |
185 | |
186 MovableObjectFixups& fixups(); | |
187 | |
188 std::unique_ptr<MovableObjectFixups> m_fixups; | |
189 | |
190 // Set to |true| when a compacting sweep will go ahead. | |
191 bool m_doCompact; | |
192 size_t m_gcCountSinceLastCompaction; | |
193 | |
194 Mutex m_mutex; | |
195 | |
196 // All threads performing a GC must synchronize on completion | |
197 // of all heap compactions. Not doing so risks one thread resuming | |
198 // the mutator, which could perform cross-thread access to a heap | |
199 // that's still in the process of being compacted. | |
200 ThreadCondition m_finished; | |
201 | |
202 // Number of heap threads participating. | |
203 int m_threadCount; | |
haraken
2016/12/02 12:43:19
m_mutex => m_threadSyncronizationMutex
m_finished
sof
2016/12/04 14:55:38
"threadSynchronization" is implied for these abstr
| |
204 | |
205 // Last reported freelist size, across all heaps. | |
206 size_t m_freeListAllocations; | |
207 // If compacting, i'th sub heap will be compacted | |
208 // if corresponding bit is set. | |
209 unsigned m_compactableHeaps; | |
haraken
2016/12/02 12:43:20
m_compactableHeaps => m_compactingArenas ? (c.f.,
sof
2016/12/04 14:55:37
Done, replaced the use of (sub)heaps with arenas,
| |
210 | |
211 // Stats, number of (complete) pages freed/decommitted + | |
212 // bytes freed (which will include partial pages.) | |
213 size_t m_freedPages; | |
214 size_t m_freedSize; | |
215 | |
216 #if DEBUG_LOG_HEAP_COMPACTION_RUNNING_TIME | |
217 int m_startCompaction; | |
218 double m_startCompactionTimeMS; | |
219 #endif | |
220 | |
221 static bool s_forceCompactionGC; | |
222 }; | |
223 | |
224 } // namespace blink | |
225 | |
226 #endif // HeapCompact_h | |
OLD | NEW |