Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(256)

Side by Side Diff: third_party/WebKit/Source/platform/heap/Heap.h

Issue 1477023003: Refactor the Heap into ThreadHeap to prepare for per thread heaps Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
129 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used."); 129 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used.");
130 BasePage* page = pageFromObject(objectPointer); 130 BasePage* page = pageFromObject(objectPointer);
131 if (page->hasBeenSwept()) 131 if (page->hasBeenSwept())
132 return false; 132 return false;
133 ASSERT(page->heap()->threadState()->isSweepingInProgress()); 133 ASSERT(page->heap()->threadState()->isSweepingInProgress());
134 134
135 return !Heap::isHeapObjectAlive(const_cast<T*>(objectPointer)); 135 return !Heap::isHeapObjectAlive(const_cast<T*>(objectPointer));
136 } 136 }
137 137
138 // Push a trace callback on the marking stack. 138 // Push a trace callback on the marking stack.
139 static void pushTraceCallback(void* containerObject, TraceCallback); 139 static void pushTraceCallback(void* containerObject, TraceCallback, ThreadSt ate*);
haraken 2016/01/07 08:06:22 This change is okay in this CL, but ideally these
140 140
141 // Push a trace callback on the post-marking callback stack. These 141 // Push a trace callback on the post-marking callback stack. These
142 // callbacks are called after normal marking (including ephemeron 142 // callbacks are called after normal marking (including ephemeron
143 // iteration). 143 // iteration).
144 static void pushPostMarkingCallback(void*, TraceCallback); 144 static void pushPostMarkingCallback(void*, TraceCallback);
145 145
146 // Add a weak pointer callback to the weak callback work list. General 146 // Add a weak pointer callback to the weak callback work list. General
147 // object pointer callbacks are added to a thread local weak callback work 147 // object pointer callbacks are added to a thread local weak callback work
148 // list and the callback is called on the thread that owns the object, with 148 // list and the callback is called on the thread that owns the object, with
149 // the closure pointer as an argument. Most of the time, the closure and 149 // the closure pointer as an argument. Most of the time, the closure and
150 // the containerObject can be the same thing, but the containerObject is 150 // the containerObject can be the same thing, but the containerObject is
151 // constrained to be on the heap, since the heap is used to identify the 151 // constrained to be on the heap, since the heap is used to identify the
152 // correct thread. 152 // correct thread.
153 static void pushThreadLocalWeakCallback(void* closure, void* containerObject , WeakCallback); 153 static void pushThreadLocalWeakCallback(void* closure, void* containerObject , WeakCallback);
154 154
155 // Similar to the more general pushThreadLocalWeakCallback, but cell 155 // Similar to the more general pushThreadLocalWeakCallback, but cell
156 // pointer callbacks are added to a static callback work list and the weak 156 // pointer callbacks are added to a static callback work list and the weak
157 // callback is performed on the thread performing garbage collection. This 157 // callback is performed on the thread performing garbage collection. This
158 // is OK because cells are just cleared and no deallocation can happen. 158 // is OK because cells are just cleared and no deallocation can happen.
159 static void pushGlobalWeakCallback(void** cell, WeakCallback); 159 static void pushGlobalWeakCallback(void** cell, WeakCallback, ThreadState*);
160 160
161 // Pop the top of a marking stack and call the callback with the visitor 161 // Pop the top of a marking stack and call the callback with the visitor
162 // and the object. Returns false when there is nothing more to do. 162 // and the object. Returns false when there is nothing more to do.
163 static bool popAndInvokeTraceCallback(Visitor*); 163 static bool popAndInvokeTraceCallback(Visitor*);
164 164
165 // Remove an item from the post-marking callback stack and call 165 // Remove an item from the post-marking callback stack and call
166 // the callback with the visitor and the object pointer. Returns 166 // the callback with the visitor and the object pointer. Returns
167 // false when there is nothing more to do. 167 // false when there is nothing more to do.
168 static bool popAndInvokePostMarkingCallback(Visitor*); 168 static bool popAndInvokePostMarkingCallback(Visitor*);
169 169
170 // Remove an item from the weak callback work list and call the callback 170 // Remove an item from the weak callback work list and call the callback
171 // with the visitor and the closure pointer. Returns false when there is 171 // with the visitor and the closure pointer. Returns false when there is
172 // nothing more to do. 172 // nothing more to do.
173 static bool popAndInvokeGlobalWeakCallback(Visitor*); 173 static bool popAndInvokeGlobalWeakCallback(Visitor*);
174 174
175 // Register an ephemeron table for fixed-point iteration. 175 // Register an ephemeron table for fixed-point iteration.
176 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback); 176 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback, ThreadState*);
177 #if ENABLE(ASSERT) 177 #if ENABLE(ASSERT)
178 static bool weakTableRegistered(const void*); 178 static bool weakTableRegistered(const void*, ThreadState*);
179 #endif 179 #endif
180 180
181 static inline size_t allocationSizeFromSize(size_t size) 181 static inline size_t allocationSizeFromSize(size_t size)
182 { 182 {
183 // Check the size before computing the actual allocation size. The 183 // Check the size before computing the actual allocation size. The
184 // allocation size calculation can overflow for large sizes and the chec k 184 // allocation size calculation can overflow for large sizes and the chec k
185 // therefore has to happen before any calculation on the size. 185 // therefore has to happen before any calculation on the size.
186 RELEASE_ASSERT(size < maxHeapObjectSize); 186 RELEASE_ASSERT(size < maxHeapObjectSize);
187 187
188 // Add space for header. 188 // Add space for header.
189 size_t allocationSize = size + sizeof(HeapObjectHeader); 189 size_t allocationSize = size + sizeof(HeapObjectHeader);
190 // Align size with allocation granularity. 190 // Align size with allocation granularity.
191 allocationSize = (allocationSize + allocationMask) & ~allocationMask; 191 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
192 return allocationSize; 192 return allocationSize;
193 } 193 }
194 static Address allocateOnHeapIndex(ThreadState*, size_t, int heapIndex, size _t gcInfoIndex); 194 static Address allocateOnHeapIndex(ThreadState*, size_t, int heapIndex, size _t gcInfoIndex);
195 template<typename T> static Address allocate(size_t, bool eagerlySweep = fal se); 195 template<typename T> static Address allocate(size_t, bool eagerlySweep = fal se);
196 template<typename T> static Address reallocate(void* previous, size_t); 196 template<typename T> static Address reallocate(void* previous, size_t);
197 197
198 static const char* gcReasonString(BlinkGC::GCReason); 198 static const char* gcReasonString(BlinkGC::GCReason);
199 static void collectGarbage(BlinkGC::StackState, BlinkGC::GCType, BlinkGC::GC Reason); 199 static void collectGarbage(BlinkGC::StackState, BlinkGC::GCType, BlinkGC::GC Reason);
200 static void collectGarbageForTerminatingThread(ThreadState*); 200 static void collectGarbageForTerminatingThread(ThreadState*);
201 static void collectGarbageForIsolatedThread(ThreadState*);
201 static void collectAllGarbage(); 202 static void collectAllGarbage();
202 203
203 static void processMarkingStack(Visitor*); 204 static void processMarkingStack(Visitor*);
204 static void postMarkingProcessing(Visitor*); 205 static void postMarkingProcessing(Visitor*);
205 static void globalWeakProcessing(Visitor*); 206 static void globalWeakProcessing(Visitor*);
206 static void setForcePreciseGCForTesting(); 207 static void setForcePreciseGCForTesting();
207 208
208 static void preGC(); 209 static void preGC();
209 static void postGC(BlinkGC::GCType); 210 static void postGC(BlinkGC::GCType);
210 211
211 // Conservatively checks whether an address is a pointer in any of the 212 // Conservatively checks whether an address is a pointer in any of the
212 // thread heaps. If so marks the object pointed to as live. 213 // thread heaps. If so marks the object pointed to as live.
213 static Address checkAndMarkPointer(Visitor*, Address); 214 static Address checkAndMarkPointer(Visitor*, Address);
214 215
215 static size_t objectPayloadSizeForTesting(); 216 static size_t objectPayloadSizeForTesting();
216 217
217 static void flushHeapDoesNotContainCache();
218
219 static FreePagePool* freePagePool() { return s_freePagePool; } 218 static FreePagePool* freePagePool() { return s_freePagePool; }
220 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; } 219 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; }
221 220
222 // This look-up uses the region search tree and a negative contains cache to 221 // This look-up uses the region search tree and a negative contains cache to
223 // provide an efficient mapping from arbitrary addresses to the containing 222 // provide an efficient mapping from arbitrary addresses to the containing
224 // heap-page if one exists. 223 // heap-page if one exists.
225 static BasePage* lookup(Address); 224 static BasePage* lookup(Address, ThreadState*);
226 static void addPageMemoryRegion(PageMemoryRegion*); 225 static void addPageMemoryRegion(PageMemoryRegion*);
227 static void removePageMemoryRegion(PageMemoryRegion*); 226 static void removePageMemoryRegion(PageMemoryRegion*);
228 227
229 static const GCInfo* gcInfo(size_t gcInfoIndex) 228 static const GCInfo* gcInfo(size_t gcInfoIndex)
230 { 229 {
231 ASSERT(gcInfoIndex >= 1); 230 ASSERT(gcInfoIndex >= 1);
232 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); 231 ASSERT(gcInfoIndex < GCInfoTable::maxIndex);
233 ASSERT(s_gcInfoTable); 232 ASSERT(s_gcInfoTable);
234 const GCInfo* info = s_gcInfoTable[gcInfoIndex]; 233 const GCInfo* info = s_gcInfoTable[gcInfoIndex];
235 ASSERT(info); 234 ASSERT(info);
236 return info; 235 return info;
237 } 236 }
238 237
239 static void setMarkedObjectSizeAtLastCompleteSweep(size_t size) { releaseSto re(&s_markedObjectSizeAtLastCompleteSweep, size); }
240 static size_t markedObjectSizeAtLastCompleteSweep() { return acquireLoad(&s_ markedObjectSizeAtLastCompleteSweep); }
241 static void increaseAllocatedObjectSize(size_t delta) { atomicAdd(&s_allocat edObjectSize, static_cast<long>(delta)); }
242 static void decreaseAllocatedObjectSize(size_t delta) { atomicSubtract(&s_al locatedObjectSize, static_cast<long>(delta)); }
243 static size_t allocatedObjectSize() { return acquireLoad(&s_allocatedObjectS ize); }
244 static void increaseMarkedObjectSize(size_t delta) { atomicAdd(&s_markedObje ctSize, static_cast<long>(delta)); }
245 static size_t markedObjectSize() { return acquireLoad(&s_markedObjectSize); }
246 static void increaseAllocatedSpace(size_t delta) { atomicAdd(&s_allocatedSpa ce, static_cast<long>(delta)); }
247 static void decreaseAllocatedSpace(size_t delta) { atomicSubtract(&s_allocat edSpace, static_cast<long>(delta)); }
248 static size_t allocatedSpace() { return acquireLoad(&s_allocatedSpace); }
249 static size_t objectSizeAtLastGC() { return acquireLoad(&s_objectSizeAtLastG C); }
250 static void increaseWrapperCount(size_t delta) { atomicAdd(&s_wrapperCount, static_cast<long>(delta)); }
251 static void decreaseWrapperCount(size_t delta) { atomicSubtract(&s_wrapperCo unt, static_cast<long>(delta)); }
252 static size_t wrapperCount() { return acquireLoad(&s_wrapperCount); }
253 static size_t wrapperCountAtLastGC() { return acquireLoad(&s_wrapperCountAtL astGC); }
254 static void increaseCollectedWrapperCount(size_t delta) { atomicAdd(&s_colle ctedWrapperCount, static_cast<long>(delta)); }
255 static size_t collectedWrapperCount() { return acquireLoad(&s_collectedWrapp erCount); }
256 static size_t partitionAllocSizeAtLastGC() { return acquireLoad(&s_partition AllocSizeAtLastGC); }
257
258 static double estimatedMarkingTime();
259 static void reportMemoryUsageHistogram();
260 static void reportMemoryUsageForTracing();
261
262 #if ENABLE(ASSERT)
263 static uint16_t gcGeneration() { return s_gcGeneration; }
264 #endif
265
266 private: 238 private:
267 // A RegionTree is a simple binary search tree of PageMemoryRegions sorted
268 // by base addresses.
269 class RegionTree {
270 public:
271 explicit RegionTree(PageMemoryRegion* region) : m_region(region), m_left (nullptr), m_right(nullptr) { }
272 ~RegionTree()
273 {
274 delete m_left;
275 delete m_right;
276 }
277 PageMemoryRegion* lookup(Address);
278 static void add(RegionTree*, RegionTree**);
279 static void remove(PageMemoryRegion*, RegionTree**);
280 private:
281 PageMemoryRegion* m_region;
282 RegionTree* m_left;
283 RegionTree* m_right;
284 };
285
286 // Reset counters that track live and allocated-since-last-GC sizes.
287 static void resetHeapCounters();
288
289 static int heapIndexForObjectSize(size_t); 239 static int heapIndexForObjectSize(size_t);
290 static bool isNormalHeapIndex(int); 240 static bool isNormalHeapIndex(int);
291 241
292 static CallbackStack* s_markingStack;
293 static CallbackStack* s_postMarkingCallbackStack;
294 static CallbackStack* s_globalWeakCallbackStack;
295 static CallbackStack* s_ephemeronStack;
296 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
297 static bool s_shutdownCalled; 242 static bool s_shutdownCalled;
298 static FreePagePool* s_freePagePool; 243 static FreePagePool* s_freePagePool;
299 static OrphanedPagePool* s_orphanedPagePool; 244 static OrphanedPagePool* s_orphanedPagePool;
300 static RegionTree* s_regionTree;
301 static size_t s_allocatedSpace;
302 static size_t s_allocatedObjectSize;
303 static size_t s_objectSizeAtLastGC;
304 static size_t s_markedObjectSize;
305 static size_t s_markedObjectSizeAtLastCompleteSweep;
306 static size_t s_wrapperCount;
307 static size_t s_wrapperCountAtLastGC;
308 static size_t s_collectedWrapperCount;
309 static size_t s_partitionAllocSizeAtLastGC;
310 static double s_estimatedMarkingTimePerByte;
311 #if ENABLE(ASSERT)
312 static uint16_t s_gcGeneration;
313 #endif
314 245
315 friend class ThreadState; 246 friend class ThreadState;
316 }; 247 };
317 248
318 template<typename T> 249 template<typename T>
319 struct IsEagerlyFinalizedType { 250 struct IsEagerlyFinalizedType {
320 private: 251 private:
321 typedef char YesType; 252 typedef char YesType;
322 struct NoType { 253 struct NoType {
323 char padding[8]; 254 char padding[8];
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after
503 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object) 434 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object)
504 { 435 {
505 T** cell = reinterpret_cast<T**>(object); 436 T** cell = reinterpret_cast<T**>(object);
506 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell)) 437 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell))
507 *cell = nullptr; 438 *cell = nullptr;
508 } 439 }
509 440
510 } // namespace blink 441 } // namespace blink
511 442
512 #endif // Heap_h 443 #endif // Heap_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698