Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(240)

Side by Side Diff: third_party/WebKit/Source/platform/heap/Heap.h

Issue 1919773002: Revert of Prepare for multiple ThreadHeaps (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: rebased Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
100 { 100 {
101 static_assert(sizeof(T), "T must be fully defined"); 101 static_assert(sizeof(T), "T must be fully defined");
102 return object->isHeapObjectAlive(); 102 return object->isHeapObjectAlive();
103 } 103 }
104 }; 104 };
105 105
106 class PLATFORM_EXPORT ProcessHeap { 106 class PLATFORM_EXPORT ProcessHeap {
107 STATIC_ONLY(ProcessHeap); 107 STATIC_ONLY(ProcessHeap);
108 public: 108 public:
109 static void init(); 109 static void init();
110 static void shutdown();
111 110
112 static CrossThreadPersistentRegion& crossThreadPersistentRegion(); 111 static CrossThreadPersistentRegion& crossThreadPersistentRegion();
113 112
114 static bool isLowEndDevice() { return s_isLowEndDevice; } 113 static bool isLowEndDevice() { return s_isLowEndDevice; }
115 static void increaseTotalAllocatedObjectSize(size_t delta) { atomicAdd(&s_to talAllocatedObjectSize, static_cast<long>(delta)); } 114 static void increaseTotalAllocatedObjectSize(size_t delta) { atomicAdd(&s_to talAllocatedObjectSize, static_cast<long>(delta)); }
116 static void decreaseTotalAllocatedObjectSize(size_t delta) { atomicSubtract( &s_totalAllocatedObjectSize, static_cast<long>(delta)); } 115 static void decreaseTotalAllocatedObjectSize(size_t delta) { atomicSubtract( &s_totalAllocatedObjectSize, static_cast<long>(delta)); }
117 static size_t totalAllocatedObjectSize() { return acquireLoad(&s_totalAlloca tedObjectSize); } 116 static size_t totalAllocatedObjectSize() { return acquireLoad(&s_totalAlloca tedObjectSize); }
118 static void increaseTotalMarkedObjectSize(size_t delta) { atomicAdd(&s_total MarkedObjectSize, static_cast<long>(delta)); } 117 static void increaseTotalMarkedObjectSize(size_t delta) { atomicAdd(&s_total MarkedObjectSize, static_cast<long>(delta)); }
119 static void decreaseTotalMarkedObjectSize(size_t delta) { atomicSubtract(&s_ totalMarkedObjectSize, static_cast<long>(delta)); }
120 static size_t totalMarkedObjectSize() { return acquireLoad(&s_totalMarkedObj ectSize); } 118 static size_t totalMarkedObjectSize() { return acquireLoad(&s_totalMarkedObj ectSize); }
121 static void increaseTotalAllocatedSpace(size_t delta) { atomicAdd(&s_totalAl locatedSpace, static_cast<long>(delta)); } 119 static void increaseTotalAllocatedSpace(size_t delta) { atomicAdd(&s_totalAl locatedSpace, static_cast<long>(delta)); }
122 static void decreaseTotalAllocatedSpace(size_t delta) { atomicSubtract(&s_to talAllocatedSpace, static_cast<long>(delta)); } 120 static void decreaseTotalAllocatedSpace(size_t delta) { atomicSubtract(&s_to talAllocatedSpace, static_cast<long>(delta)); }
123 static size_t totalAllocatedSpace() { return acquireLoad(&s_totalAllocatedSp ace); } 121 static size_t totalAllocatedSpace() { return acquireLoad(&s_totalAllocatedSp ace); }
124 static void resetHeapCounters(); 122 static void resetHeapCounters();
125 123
126 private: 124 private:
127 static bool s_shutdownComplete;
128 static bool s_isLowEndDevice; 125 static bool s_isLowEndDevice;
129 static size_t s_totalAllocatedSpace; 126 static size_t s_totalAllocatedSpace;
130 static size_t s_totalAllocatedObjectSize; 127 static size_t s_totalAllocatedObjectSize;
131 static size_t s_totalMarkedObjectSize; 128 static size_t s_totalMarkedObjectSize;
132 129
133 friend class ThreadState; 130 friend class ThreadState;
134 }; 131 };
135 132
136 // Stats for the heap. 133 // Stats for the heap.
137 class ThreadHeapStats { 134 class ThreadHeapStats {
(...skipping 29 matching lines...) Expand all
167 size_t m_objectSizeAtLastGC; 164 size_t m_objectSizeAtLastGC;
168 size_t m_markedObjectSize; 165 size_t m_markedObjectSize;
169 size_t m_markedObjectSizeAtLastCompleteSweep; 166 size_t m_markedObjectSizeAtLastCompleteSweep;
170 size_t m_wrapperCount; 167 size_t m_wrapperCount;
171 size_t m_wrapperCountAtLastGC; 168 size_t m_wrapperCountAtLastGC;
172 size_t m_collectedWrapperCount; 169 size_t m_collectedWrapperCount;
173 size_t m_partitionAllocSizeAtLastGC; 170 size_t m_partitionAllocSizeAtLastGC;
174 double m_estimatedMarkingTimePerByte; 171 double m_estimatedMarkingTimePerByte;
175 }; 172 };
176 173
177 using ThreadStateSet = HashSet<ThreadState*>;
178
179 class PLATFORM_EXPORT ThreadHeap { 174 class PLATFORM_EXPORT ThreadHeap {
175 STATIC_ONLY(ThreadHeap);
180 public: 176 public:
181 ThreadHeap(); 177 static void init();
182 ~ThreadHeap(); 178 static void shutdown();
183
184 // Returns true for main thread's heap.
185 // TODO(keishi): Per-thread-heap will return false.
186 bool isMainThreadHeap() { return this == ThreadHeap::mainThreadHeap(); }
187 static ThreadHeap* mainThreadHeap() { return s_mainThreadHeap; }
188 179
189 #if ENABLE(ASSERT) 180 #if ENABLE(ASSERT)
190 bool isAtSafePoint(); 181 static BasePage* findPageFromAddress(Address);
191 BasePage* findPageFromAddress(Address); 182 static BasePage* findPageFromAddress(const void* pointer) { return findPageF romAddress(reinterpret_cast<Address>(const_cast<void*>(pointer))); }
192 #endif 183 #endif
193 184
194 template<typename T> 185 template<typename T>
195 static inline bool isHeapObjectAlive(T* object) 186 static inline bool isHeapObjectAlive(T* object)
196 { 187 {
197 static_assert(sizeof(T), "T must be fully defined"); 188 static_assert(sizeof(T), "T must be fully defined");
198 // The strongification of collections relies on the fact that once a 189 // The strongification of collections relies on the fact that once a
199 // collection has been strongified, there is no way that it can contain 190 // collection has been strongified, there is no way that it can contain
200 // non-live entries, so no entries will be removed. Since you can't set 191 // non-live entries, so no entries will be removed. Since you can't set
201 // the mark bit on a null pointer, that means that null pointers are 192 // the mark bit on a null pointer, that means that null pointers are
(...skipping 16 matching lines...) Expand all
218 static inline bool isHeapObjectAlive(const UntracedMember<T>& member) 209 static inline bool isHeapObjectAlive(const UntracedMember<T>& member)
219 { 210 {
220 return isHeapObjectAlive(member.get()); 211 return isHeapObjectAlive(member.get());
221 } 212 }
222 template<typename T> 213 template<typename T>
223 static inline bool isHeapObjectAlive(const T*& ptr) 214 static inline bool isHeapObjectAlive(const T*& ptr)
224 { 215 {
225 return isHeapObjectAlive(ptr); 216 return isHeapObjectAlive(ptr);
226 } 217 }
227 218
228 RecursiveMutex& threadAttachMutex() { return m_threadAttachMutex; }
229 const ThreadStateSet& threads() const { return m_threads; }
230 ThreadHeapStats& heapStats() { return m_stats; }
231 SafePointBarrier* safePointBarrier() { return m_safePointBarrier.get(); }
232 CallbackStack* markingStack() const { return m_markingStack.get(); }
233 CallbackStack* postMarkingCallbackStack() const { return m_postMarkingCallba ckStack.get(); }
234 CallbackStack* globalWeakCallbackStack() const { return m_globalWeakCallback Stack.get(); }
235 CallbackStack* ephemeronStack() const { return m_ephemeronStack.get(); }
236
237 void attach(ThreadState*);
238 void detach(ThreadState*);
239 void lockThreadAttachMutex();
240 void unlockThreadAttachMutex();
241 bool park();
242 void resume();
243
244 void visitPersistentRoots(Visitor*);
245 void visitStackRoots(Visitor*);
246 void checkAndPark(ThreadState*, SafePointAwareMutexLocker*);
247 void enterSafePoint(ThreadState*);
248 void leaveSafePoint(ThreadState*, SafePointAwareMutexLocker*);
249
250 // Add a weak pointer callback to the weak callback work list. General
251 // object pointer callbacks are added to a thread local weak callback work
252 // list and the callback is called on the thread that owns the object, with
253 // the closure pointer as an argument. Most of the time, the closure and
254 // the containerObject can be the same thing, but the containerObject is
255 // constrained to be on the heap, since the heap is used to identify the
256 // correct thread.
257 void pushThreadLocalWeakCallback(void* closure, void* containerObject, WeakC allback);
258
259 static RecursiveMutex& allHeapsMutex();
260 static HashSet<ThreadHeap*>& allHeaps();
261
262 // Is the finalizable GC object still alive, but slated for lazy sweeping? 219 // Is the finalizable GC object still alive, but slated for lazy sweeping?
263 // If a lazy sweep is in progress, returns true if the object was found 220 // If a lazy sweep is in progress, returns true if the object was found
264 // to be not reachable during the marking phase, but it has yet to be swept 221 // to be not reachable during the marking phase, but it has yet to be swept
265 // and finalized. The predicate returns false in all other cases. 222 // and finalized. The predicate returns false in all other cases.
266 // 223 //
267 // Holding a reference to an already-dead object is not a valid state 224 // Holding a reference to an already-dead object is not a valid state
268 // to be in; willObjectBeLazilySwept() has undefined behavior if passed 225 // to be in; willObjectBeLazilySwept() has undefined behavior if passed
269 // such a reference. 226 // such a reference.
270 template<typename T> 227 template<typename T>
271 NO_LAZY_SWEEP_SANITIZE_ADDRESS 228 NO_LAZY_SWEEP_SANITIZE_ADDRESS
272 static bool willObjectBeLazilySwept(const T* objectPointer) 229 static bool willObjectBeLazilySwept(const T* objectPointer)
273 { 230 {
274 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used."); 231 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used.");
275 BasePage* page = pageFromObject(objectPointer); 232 BasePage* page = pageFromObject(objectPointer);
276 if (page->hasBeenSwept()) 233 if (page->hasBeenSwept())
277 return false; 234 return false;
278 ASSERT(page->arena()->getThreadState()->isSweepingInProgress()); 235 ASSERT(page->arena()->getThreadState()->isSweepingInProgress());
279 236
280 return !ThreadHeap::isHeapObjectAlive(const_cast<T*>(objectPointer)); 237 return !ThreadHeap::isHeapObjectAlive(const_cast<T*>(objectPointer));
281 } 238 }
282 239
283 // Push a trace callback on the marking stack. 240 // Push a trace callback on the marking stack.
284 void pushTraceCallback(void* containerObject, TraceCallback); 241 static void pushTraceCallback(void* containerObject, TraceCallback);
285 242
286 // Push a trace callback on the post-marking callback stack. These 243 // Push a trace callback on the post-marking callback stack. These
287 // callbacks are called after normal marking (including ephemeron 244 // callbacks are called after normal marking (including ephemeron
288 // iteration). 245 // iteration).
289 void pushPostMarkingCallback(void*, TraceCallback); 246 static void pushPostMarkingCallback(void*, TraceCallback);
247
248 // Add a weak pointer callback to the weak callback work list. General
249 // object pointer callbacks are added to a thread local weak callback work
250 // list and the callback is called on the thread that owns the object, with
251 // the closure pointer as an argument. Most of the time, the closure and
252 // the containerObject can be the same thing, but the containerObject is
253 // constrained to be on the heap, since the heap is used to identify the
254 // correct thread.
255 static void pushThreadLocalWeakCallback(void* closure, void* containerObject , WeakCallback);
290 256
291 // Similar to the more general pushThreadLocalWeakCallback, but cell 257 // Similar to the more general pushThreadLocalWeakCallback, but cell
292 // pointer callbacks are added to a static callback work list and the weak 258 // pointer callbacks are added to a static callback work list and the weak
293 // callback is performed on the thread performing garbage collection. This 259 // callback is performed on the thread performing garbage collection. This
294 // is OK because cells are just cleared and no deallocation can happen. 260 // is OK because cells are just cleared and no deallocation can happen.
295 void pushGlobalWeakCallback(void** cell, WeakCallback); 261 static void pushGlobalWeakCallback(void** cell, WeakCallback);
296 262
297 // Pop the top of a marking stack and call the callback with the visitor 263 // Pop the top of a marking stack and call the callback with the visitor
298 // and the object. Returns false when there is nothing more to do. 264 // and the object. Returns false when there is nothing more to do.
299 bool popAndInvokeTraceCallback(Visitor*); 265 static bool popAndInvokeTraceCallback(Visitor*);
300 266
301 // Remove an item from the post-marking callback stack and call 267 // Remove an item from the post-marking callback stack and call
302 // the callback with the visitor and the object pointer. Returns 268 // the callback with the visitor and the object pointer. Returns
303 // false when there is nothing more to do. 269 // false when there is nothing more to do.
304 bool popAndInvokePostMarkingCallback(Visitor*); 270 static bool popAndInvokePostMarkingCallback(Visitor*);
305 271
306 // Remove an item from the weak callback work list and call the callback 272 // Remove an item from the weak callback work list and call the callback
307 // with the visitor and the closure pointer. Returns false when there is 273 // with the visitor and the closure pointer. Returns false when there is
308 // nothing more to do. 274 // nothing more to do.
309 bool popAndInvokeGlobalWeakCallback(Visitor*); 275 static bool popAndInvokeGlobalWeakCallback(Visitor*);
310 276
311 // Register an ephemeron table for fixed-point iteration. 277 // Register an ephemeron table for fixed-point iteration.
312 void registerWeakTable(void* containerObject, EphemeronCallback, EphemeronCa llback); 278 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback);
313 #if ENABLE(ASSERT) 279 #if ENABLE(ASSERT)
314 bool weakTableRegistered(const void*); 280 static bool weakTableRegistered(const void*);
315 #endif 281 #endif
316 282
317 BlinkGC::GCReason lastGCReason() { return m_lastGCReason; }
318 RegionTree* getRegionTree() { return m_regionTree.get(); }
319
320 static inline size_t allocationSizeFromSize(size_t size) 283 static inline size_t allocationSizeFromSize(size_t size)
321 { 284 {
322 // Check the size before computing the actual allocation size. The 285 // Check the size before computing the actual allocation size. The
323 // allocation size calculation can overflow for large sizes and the chec k 286 // allocation size calculation can overflow for large sizes and the chec k
324 // therefore has to happen before any calculation on the size. 287 // therefore has to happen before any calculation on the size.
325 RELEASE_ASSERT(size < maxHeapObjectSize); 288 RELEASE_ASSERT(size < maxHeapObjectSize);
326 289
327 // Add space for header. 290 // Add space for header.
328 size_t allocationSize = size + sizeof(HeapObjectHeader); 291 size_t allocationSize = size + sizeof(HeapObjectHeader);
329 // Align size with allocation granularity. 292 // Align size with allocation granularity.
330 allocationSize = (allocationSize + allocationMask) & ~allocationMask; 293 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
331 return allocationSize; 294 return allocationSize;
332 } 295 }
333 static Address allocateOnArenaIndex(ThreadState*, size_t, int arenaIndex, si ze_t gcInfoIndex, const char* typeName); 296 static Address allocateOnArenaIndex(ThreadState*, size_t, int arenaIndex, si ze_t gcInfoIndex, const char* typeName);
334 template<typename T> static Address allocate(size_t, bool eagerlySweep = fal se); 297 template<typename T> static Address allocate(size_t, bool eagerlySweep = fal se);
335 template<typename T> static Address reallocate(void* previous, size_t); 298 template<typename T> static Address reallocate(void* previous, size_t);
336 299
337 static const char* gcReasonString(BlinkGC::GCReason); 300 static const char* gcReasonString(BlinkGC::GCReason);
338 static void collectGarbage(BlinkGC::StackState, BlinkGC::GCType, BlinkGC::GC Reason); 301 static void collectGarbage(BlinkGC::StackState, BlinkGC::GCType, BlinkGC::GC Reason);
339 static void collectGarbageForTerminatingThread(ThreadState*); 302 static void collectGarbageForTerminatingThread(ThreadState*);
340 static void collectAllGarbage(); 303 static void collectAllGarbage();
341 304
342 void processMarkingStack(Visitor*); 305 static void processMarkingStack(Visitor*);
343 void postMarkingProcessing(Visitor*); 306 static void postMarkingProcessing(Visitor*);
344 void globalWeakProcessing(Visitor*); 307 static void globalWeakProcessing(Visitor*);
308 static void setForcePreciseGCForTesting();
345 309
346 void preGC(); 310 static void preGC();
347 void postGC(BlinkGC::GCType); 311 static void postGC(BlinkGC::GCType);
348 312
349 // Conservatively checks whether an address is a pointer in any of the 313 // Conservatively checks whether an address is a pointer in any of the
350 // thread heaps. If so marks the object pointed to as live. 314 // thread heaps. If so marks the object pointed to as live.
351 Address checkAndMarkPointer(Visitor*, Address); 315 static Address checkAndMarkPointer(Visitor*, Address);
352 316
353 size_t objectPayloadSizeForTesting(); 317 static size_t objectPayloadSizeForTesting();
354 318
355 void flushHeapDoesNotContainCache(); 319 static void flushHeapDoesNotContainCache();
356 320
357 FreePagePool* getFreePagePool() { return m_freePagePool.get(); } 321 static FreePagePool* getFreePagePool() { return s_freePagePool; }
358 OrphanedPagePool* getOrphanedPagePool() { return m_orphanedPagePool.get(); } 322 static OrphanedPagePool* getOrphanedPagePool() { return s_orphanedPagePool; }
359 323
360 // This look-up uses the region search tree and a negative contains cache to 324 // This look-up uses the region search tree and a negative contains cache to
361 // provide an efficient mapping from arbitrary addresses to the containing 325 // provide an efficient mapping from arbitrary addresses to the containing
362 // heap-page if one exists. 326 // heap-page if one exists.
363 BasePage* lookupPageForAddress(Address); 327 static BasePage* lookup(Address);
328 static RegionTree* getRegionTree();
364 329
365 static const GCInfo* gcInfo(size_t gcInfoIndex) 330 static const GCInfo* gcInfo(size_t gcInfoIndex)
366 { 331 {
367 ASSERT(gcInfoIndex >= 1); 332 ASSERT(gcInfoIndex >= 1);
368 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); 333 ASSERT(gcInfoIndex < GCInfoTable::maxIndex);
369 ASSERT(s_gcInfoTable); 334 ASSERT(s_gcInfoTable);
370 const GCInfo* info = s_gcInfoTable[gcInfoIndex]; 335 const GCInfo* info = s_gcInfoTable[gcInfoIndex];
371 ASSERT(info); 336 ASSERT(info);
372 return info; 337 return info;
373 } 338 }
374 339
340 static ThreadHeapStats& heapStats();
341
342 static double estimatedMarkingTime();
375 static void reportMemoryUsageHistogram(); 343 static void reportMemoryUsageHistogram();
376 static void reportMemoryUsageForTracing(); 344 static void reportMemoryUsageForTracing();
345 static BlinkGC::GCReason lastGCReason() { return s_lastGCReason; }
377 346
378 private: 347 private:
379 // Reset counters that track live and allocated-since-last-GC sizes. 348 // Reset counters that track live and allocated-since-last-GC sizes.
380 void resetHeapCounters(); 349 static void resetHeapCounters();
381 350
382 static int arenaIndexForObjectSize(size_t); 351 static int arenaIndexForObjectSize(size_t);
383 static bool isNormalArenaIndex(int); 352 static bool isNormalArenaIndex(int);
384 353
385 void decommitCallbackStacks(); 354 static void decommitCallbackStacks();
386 355
387 RecursiveMutex m_threadAttachMutex; 356 static CallbackStack* s_markingStack;
388 ThreadStateSet m_threads; 357 static CallbackStack* s_postMarkingCallbackStack;
389 ThreadHeapStats m_stats; 358 static CallbackStack* s_globalWeakCallbackStack;
390 OwnPtr<RegionTree> m_regionTree; 359 static CallbackStack* s_ephemeronStack;
391 OwnPtr<HeapDoesNotContainCache> m_heapDoesNotContainCache; 360 static HeapDoesNotContainCache* s_heapDoesNotContainCache;
392 OwnPtr<SafePointBarrier> m_safePointBarrier; 361 static FreePagePool* s_freePagePool;
393 OwnPtr<FreePagePool> m_freePagePool; 362 static OrphanedPagePool* s_orphanedPagePool;
394 OwnPtr<OrphanedPagePool> m_orphanedPagePool; 363 static BlinkGC::GCReason s_lastGCReason;
395 OwnPtr<CallbackStack> m_markingStack;
396 OwnPtr<CallbackStack> m_postMarkingCallbackStack;
397 OwnPtr<CallbackStack> m_globalWeakCallbackStack;
398 OwnPtr<CallbackStack> m_ephemeronStack;
399 BlinkGC::GCReason m_lastGCReason;
400
401 static ThreadHeap* s_mainThreadHeap;
402 364
403 friend class ThreadState; 365 friend class ThreadState;
404 }; 366 };
405 367
406 template<typename T> 368 template<typename T>
407 struct IsEagerlyFinalizedType { 369 struct IsEagerlyFinalizedType {
408 STATIC_ONLY(IsEagerlyFinalizedType); 370 STATIC_ONLY(IsEagerlyFinalizedType);
409 private: 371 private:
410 typedef char YesType; 372 typedef char YesType;
411 struct NoType { 373 struct NoType {
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
592 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object) 554 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object)
593 { 555 {
594 T** cell = reinterpret_cast<T**>(object); 556 T** cell = reinterpret_cast<T**>(object);
595 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell)) 557 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell))
596 *cell = nullptr; 558 *cell = nullptr;
597 } 559 }
598 560
599 } // namespace blink 561 } // namespace blink
600 562
601 #endif // Heap_h 563 #endif // Heap_h
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/platform/exported/Platform.cpp ('k') | third_party/WebKit/Source/platform/heap/Heap.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698