Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 100 { | 100 { |
| 101 static_assert(sizeof(T), "T must be fully defined"); | 101 static_assert(sizeof(T), "T must be fully defined"); |
| 102 return object->isHeapObjectAlive(); | 102 return object->isHeapObjectAlive(); |
| 103 } | 103 } |
| 104 }; | 104 }; |
| 105 | 105 |
| 106 class PLATFORM_EXPORT ProcessHeap { | 106 class PLATFORM_EXPORT ProcessHeap { |
| 107 STATIC_ONLY(ProcessHeap); | 107 STATIC_ONLY(ProcessHeap); |
| 108 public: | 108 public: |
| 109 static void init(); | 109 static void init(); |
| 110 static void shutdown(); | |
| 110 | 111 |
| 111 static CrossThreadPersistentRegion& crossThreadPersistentRegion(); | 112 static CrossThreadPersistentRegion& crossThreadPersistentRegion(); |
| 112 | 113 |
| 113 static bool isLowEndDevice() { return s_isLowEndDevice; } | 114 static bool isLowEndDevice() { return s_isLowEndDevice; } |
| 114 static void increaseTotalAllocatedObjectSize(size_t delta) { atomicAdd(&s_to talAllocatedObjectSize, static_cast<long>(delta)); } | 115 static void increaseTotalAllocatedObjectSize(size_t delta) { atomicAdd(&s_to talAllocatedObjectSize, static_cast<long>(delta)); } |
| 115 static void decreaseTotalAllocatedObjectSize(size_t delta) { atomicSubtract( &s_totalAllocatedObjectSize, static_cast<long>(delta)); } | 116 static void decreaseTotalAllocatedObjectSize(size_t delta) { atomicSubtract( &s_totalAllocatedObjectSize, static_cast<long>(delta)); } |
| 116 static size_t totalAllocatedObjectSize() { return acquireLoad(&s_totalAlloca tedObjectSize); } | 117 static size_t totalAllocatedObjectSize() { return acquireLoad(&s_totalAlloca tedObjectSize); } |
| 117 static void increaseTotalMarkedObjectSize(size_t delta) { atomicAdd(&s_total MarkedObjectSize, static_cast<long>(delta)); } | 118 static void increaseTotalMarkedObjectSize(size_t delta) { atomicAdd(&s_total MarkedObjectSize, static_cast<long>(delta)); } |
| 118 static size_t totalMarkedObjectSize() { return acquireLoad(&s_totalMarkedObj ectSize); } | 119 static size_t totalMarkedObjectSize() { return acquireLoad(&s_totalMarkedObj ectSize); } |
| 119 static void increaseTotalAllocatedSpace(size_t delta) { atomicAdd(&s_totalAl locatedSpace, static_cast<long>(delta)); } | 120 static void increaseTotalAllocatedSpace(size_t delta) { atomicAdd(&s_totalAl locatedSpace, static_cast<long>(delta)); } |
| 120 static void decreaseTotalAllocatedSpace(size_t delta) { atomicSubtract(&s_to talAllocatedSpace, static_cast<long>(delta)); } | 121 static void decreaseTotalAllocatedSpace(size_t delta) { atomicSubtract(&s_to talAllocatedSpace, static_cast<long>(delta)); } |
| 121 static size_t totalAllocatedSpace() { return acquireLoad(&s_totalAllocatedSp ace); } | 122 static size_t totalAllocatedSpace() { return acquireLoad(&s_totalAllocatedSp ace); } |
| 122 static void resetHeapCounters(); | 123 static void resetHeapCounters(); |
| 123 | 124 |
| 124 private: | 125 private: |
| 126 static bool s_shutdownComplete; | |
| 125 static bool s_isLowEndDevice; | 127 static bool s_isLowEndDevice; |
| 126 static size_t s_totalAllocatedSpace; | 128 static size_t s_totalAllocatedSpace; |
| 127 static size_t s_totalAllocatedObjectSize; | 129 static size_t s_totalAllocatedObjectSize; |
| 128 static size_t s_totalMarkedObjectSize; | 130 static size_t s_totalMarkedObjectSize; |
| 129 | 131 |
| 130 friend class ThreadState; | 132 friend class ThreadState; |
| 131 }; | 133 }; |
| 132 | 134 |
| 133 // Stats for the heap. | 135 // Stats for the heap. |
| 134 class ThreadHeapStats { | 136 class ThreadHeapStats { |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 164 size_t m_objectSizeAtLastGC; | 166 size_t m_objectSizeAtLastGC; |
| 165 size_t m_markedObjectSize; | 167 size_t m_markedObjectSize; |
| 166 size_t m_markedObjectSizeAtLastCompleteSweep; | 168 size_t m_markedObjectSizeAtLastCompleteSweep; |
| 167 size_t m_wrapperCount; | 169 size_t m_wrapperCount; |
| 168 size_t m_wrapperCountAtLastGC; | 170 size_t m_wrapperCountAtLastGC; |
| 169 size_t m_collectedWrapperCount; | 171 size_t m_collectedWrapperCount; |
| 170 size_t m_partitionAllocSizeAtLastGC; | 172 size_t m_partitionAllocSizeAtLastGC; |
| 171 double m_estimatedMarkingTimePerByte; | 173 double m_estimatedMarkingTimePerByte; |
| 172 }; | 174 }; |
| 173 | 175 |
| 176 using ThreadStateSet = HashSet<ThreadState*>; | |
| 177 | |
| 174 class PLATFORM_EXPORT ThreadHeap { | 178 class PLATFORM_EXPORT ThreadHeap { |
| 175 STATIC_ONLY(ThreadHeap); | |
| 176 public: | 179 public: |
| 177 static void init(); | 180 ThreadHeap(); |
| 178 static void shutdown(); | 181 ~ThreadHeap(); |
| 182 | |
| 183 // Returns true for main thread's heap. | |
| 184 // TODO(keishi): Per-thread-heap will return false. | |
| 185 bool isMain() { return this == ThreadHeap::main(); } | |
| 186 static ThreadHeap* main() { return s_mainThreadHeap; } | |
|
haraken
2016/04/21 11:48:25
isMain => isMainThreadHeap
main => mainThreadHeap
keishi
2016/04/22 06:09:58
Done.
| |
| 179 | 187 |
| 180 #if ENABLE(ASSERT) | 188 #if ENABLE(ASSERT) |
| 181 static BasePage* findPageFromAddress(Address); | 189 bool isAtSafePoint(); |
| 182 static BasePage* findPageFromAddress(const void* pointer) { return findPageF romAddress(reinterpret_cast<Address>(const_cast<void*>(pointer))); } | 190 BasePage* findPageFromAddress(Address); |
| 183 #endif | 191 #endif |
| 184 | 192 |
| 185 template<typename T> | 193 template<typename T> |
| 186 static inline bool isHeapObjectAlive(T* object) | 194 static inline bool isHeapObjectAlive(T* object) |
| 187 { | 195 { |
| 188 static_assert(sizeof(T), "T must be fully defined"); | 196 static_assert(sizeof(T), "T must be fully defined"); |
| 189 // The strongification of collections relies on the fact that once a | 197 // The strongification of collections relies on the fact that once a |
| 190 // collection has been strongified, there is no way that it can contain | 198 // collection has been strongified, there is no way that it can contain |
| 191 // non-live entries, so no entries will be removed. Since you can't set | 199 // non-live entries, so no entries will be removed. Since you can't set |
| 192 // the mark bit on a null pointer, that means that null pointers are | 200 // the mark bit on a null pointer, that means that null pointers are |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 209 static inline bool isHeapObjectAlive(const UntracedMember<T>& member) | 217 static inline bool isHeapObjectAlive(const UntracedMember<T>& member) |
| 210 { | 218 { |
| 211 return isHeapObjectAlive(member.get()); | 219 return isHeapObjectAlive(member.get()); |
| 212 } | 220 } |
| 213 template<typename T> | 221 template<typename T> |
| 214 static inline bool isHeapObjectAlive(const T*& ptr) | 222 static inline bool isHeapObjectAlive(const T*& ptr) |
| 215 { | 223 { |
| 216 return isHeapObjectAlive(ptr); | 224 return isHeapObjectAlive(ptr); |
| 217 } | 225 } |
| 218 | 226 |
| 227 RecursiveMutex& threadAttachMutex() { return m_threadAttachMutex; } | |
| 228 const ThreadStateSet& threads() const { return m_threads; } | |
| 229 ThreadHeapStats& heapStats() { return m_stats; } | |
| 230 SafePointBarrier* safePointBarrier() { return m_safePointBarrier.get(); } | |
| 231 CallbackStack* markingStack() const { return m_markingStack.get(); } | |
| 232 CallbackStack* postMarkingCallbackStack() const { return m_postMarkingCallba ckStack.get(); } | |
| 233 CallbackStack* globalWeakCallbackStack() const { return m_globalWeakCallback Stack.get(); } | |
| 234 CallbackStack* ephemeronStack() const { return m_ephemeronStack.get(); } | |
| 235 | |
| 236 void attach(ThreadState*); | |
| 237 void detach(ThreadState*); | |
| 238 void lockThreadAttachMutex(); | |
| 239 void unlockThreadAttachMutex(); | |
| 240 bool park(); | |
| 241 void resume(); | |
| 242 | |
| 243 void visitPersistentRoots(Visitor*); | |
| 244 void visitStackRoots(Visitor*); | |
| 245 void checkAndPark(ThreadState*, SafePointAwareMutexLocker*); | |
| 246 void enterSafePoint(ThreadState*); | |
| 247 void leaveSafePoint(ThreadState*, SafePointAwareMutexLocker*); | |
| 248 | |
| 249 // Add a weak pointer callback to the weak callback work list. General | |
| 250 // object pointer callbacks are added to a thread local weak callback work | |
| 251 // list and the callback is called on the thread that owns the object, with | |
| 252 // the closure pointer as an argument. Most of the time, the closure and | |
| 253 // the containerObject can be the same thing, but the containerObject is | |
| 254 // constrained to be on the heap, since the heap is used to identify the | |
| 255 // correct thread. | |
| 256 void pushThreadLocalWeakCallback(void* closure, void* containerObject, WeakC allback); | |
| 257 | |
| 258 static RecursiveMutex& allHeapsMutex(); | |
| 259 static HashSet<ThreadHeap*>& allHeaps(); | |
| 260 | |
| 219 // Is the finalizable GC object still alive, but slated for lazy sweeping? | 261 // Is the finalizable GC object still alive, but slated for lazy sweeping? |
| 220 // If a lazy sweep is in progress, returns true if the object was found | 262 // If a lazy sweep is in progress, returns true if the object was found |
| 221 // to be not reachable during the marking phase, but it has yet to be swept | 263 // to be not reachable during the marking phase, but it has yet to be swept |
| 222 // and finalized. The predicate returns false in all other cases. | 264 // and finalized. The predicate returns false in all other cases. |
| 223 // | 265 // |
| 224 // Holding a reference to an already-dead object is not a valid state | 266 // Holding a reference to an already-dead object is not a valid state |
| 225 // to be in; willObjectBeLazilySwept() has undefined behavior if passed | 267 // to be in; willObjectBeLazilySwept() has undefined behavior if passed |
| 226 // such a reference. | 268 // such a reference. |
| 227 template<typename T> | 269 template<typename T> |
| 228 NO_LAZY_SWEEP_SANITIZE_ADDRESS | 270 NO_LAZY_SWEEP_SANITIZE_ADDRESS |
| 229 static bool willObjectBeLazilySwept(const T* objectPointer) | 271 static bool willObjectBeLazilySwept(const T* objectPointer) |
| 230 { | 272 { |
| 231 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used."); | 273 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used."); |
| 232 BasePage* page = pageFromObject(objectPointer); | 274 BasePage* page = pageFromObject(objectPointer); |
| 233 if (page->hasBeenSwept()) | 275 if (page->hasBeenSwept()) |
| 234 return false; | 276 return false; |
| 235 ASSERT(page->arena()->getThreadState()->isSweepingInProgress()); | 277 ASSERT(page->arena()->getThreadState()->isSweepingInProgress()); |
| 236 | 278 |
| 237 return !ThreadHeap::isHeapObjectAlive(const_cast<T*>(objectPointer)); | 279 return !ThreadHeap::isHeapObjectAlive(const_cast<T*>(objectPointer)); |
| 238 } | 280 } |
| 239 | 281 |
| 240 // Push a trace callback on the marking stack. | 282 // Push a trace callback on the marking stack. |
| 241 static void pushTraceCallback(void* containerObject, TraceCallback); | 283 void pushTraceCallback(void* containerObject, TraceCallback); |
| 242 | 284 |
| 243 // Push a trace callback on the post-marking callback stack. These | 285 // Push a trace callback on the post-marking callback stack. These |
| 244 // callbacks are called after normal marking (including ephemeron | 286 // callbacks are called after normal marking (including ephemeron |
| 245 // iteration). | 287 // iteration). |
| 246 static void pushPostMarkingCallback(void*, TraceCallback); | 288 void pushPostMarkingCallback(void*, TraceCallback); |
| 247 | |
| 248 // Add a weak pointer callback to the weak callback work list. General | |
| 249 // object pointer callbacks are added to a thread local weak callback work | |
| 250 // list and the callback is called on the thread that owns the object, with | |
| 251 // the closure pointer as an argument. Most of the time, the closure and | |
| 252 // the containerObject can be the same thing, but the containerObject is | |
| 253 // constrained to be on the heap, since the heap is used to identify the | |
| 254 // correct thread. | |
| 255 static void pushThreadLocalWeakCallback(void* closure, void* containerObject , WeakCallback); | |
| 256 | 289 |
| 257 // Similar to the more general pushThreadLocalWeakCallback, but cell | 290 // Similar to the more general pushThreadLocalWeakCallback, but cell |
| 258 // pointer callbacks are added to a static callback work list and the weak | 291 // pointer callbacks are added to a static callback work list and the weak |
| 259 // callback is performed on the thread performing garbage collection. This | 292 // callback is performed on the thread performing garbage collection. This |
| 260 // is OK because cells are just cleared and no deallocation can happen. | 293 // is OK because cells are just cleared and no deallocation can happen. |
| 261 static void pushGlobalWeakCallback(void** cell, WeakCallback); | 294 void pushGlobalWeakCallback(void** cell, WeakCallback); |
| 262 | 295 |
| 263 // Pop the top of a marking stack and call the callback with the visitor | 296 // Pop the top of a marking stack and call the callback with the visitor |
| 264 // and the object. Returns false when there is nothing more to do. | 297 // and the object. Returns false when there is nothing more to do. |
| 265 static bool popAndInvokeTraceCallback(Visitor*); | 298 bool popAndInvokeTraceCallback(Visitor*); |
| 266 | 299 |
| 267 // Remove an item from the post-marking callback stack and call | 300 // Remove an item from the post-marking callback stack and call |
| 268 // the callback with the visitor and the object pointer. Returns | 301 // the callback with the visitor and the object pointer. Returns |
| 269 // false when there is nothing more to do. | 302 // false when there is nothing more to do. |
| 270 static bool popAndInvokePostMarkingCallback(Visitor*); | 303 bool popAndInvokePostMarkingCallback(Visitor*); |
| 271 | 304 |
| 272 // Remove an item from the weak callback work list and call the callback | 305 // Remove an item from the weak callback work list and call the callback |
| 273 // with the visitor and the closure pointer. Returns false when there is | 306 // with the visitor and the closure pointer. Returns false when there is |
| 274 // nothing more to do. | 307 // nothing more to do. |
| 275 static bool popAndInvokeGlobalWeakCallback(Visitor*); | 308 bool popAndInvokeGlobalWeakCallback(Visitor*); |
| 276 | 309 |
| 277 // Register an ephemeron table for fixed-point iteration. | 310 // Register an ephemeron table for fixed-point iteration. |
| 278 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback); | 311 void registerWeakTable(void* containerObject, EphemeronCallback, EphemeronCa llback); |
| 279 #if ENABLE(ASSERT) | 312 #if ENABLE(ASSERT) |
| 280 static bool weakTableRegistered(const void*); | 313 bool weakTableRegistered(const void*); |
| 281 #endif | 314 #endif |
| 282 | 315 |
| 316 BlinkGC::GCReason lastGCReason() { return m_lastGCReason; } | |
| 317 RegionTree* getRegionTree() { return m_regionTree.get(); } | |
| 318 | |
| 283 static inline size_t allocationSizeFromSize(size_t size) | 319 static inline size_t allocationSizeFromSize(size_t size) |
| 284 { | 320 { |
| 285 // Check the size before computing the actual allocation size. The | 321 // Check the size before computing the actual allocation size. The |
| 286 // allocation size calculation can overflow for large sizes and the chec k | 322 // allocation size calculation can overflow for large sizes and the chec k |
| 287 // therefore has to happen before any calculation on the size. | 323 // therefore has to happen before any calculation on the size. |
| 288 RELEASE_ASSERT(size < maxHeapObjectSize); | 324 RELEASE_ASSERT(size < maxHeapObjectSize); |
| 289 | 325 |
| 290 // Add space for header. | 326 // Add space for header. |
| 291 size_t allocationSize = size + sizeof(HeapObjectHeader); | 327 size_t allocationSize = size + sizeof(HeapObjectHeader); |
| 292 // Align size with allocation granularity. | 328 // Align size with allocation granularity. |
| 293 allocationSize = (allocationSize + allocationMask) & ~allocationMask; | 329 allocationSize = (allocationSize + allocationMask) & ~allocationMask; |
| 294 return allocationSize; | 330 return allocationSize; |
| 295 } | 331 } |
| 296 static Address allocateOnArenaIndex(ThreadState*, size_t, int arenaIndex, si ze_t gcInfoIndex, const char* typeName); | 332 static Address allocateOnArenaIndex(ThreadState*, size_t, int arenaIndex, si ze_t gcInfoIndex, const char* typeName); |
| 297 template<typename T> static Address allocate(size_t, bool eagerlySweep = fal se); | 333 template<typename T> static Address allocate(size_t, bool eagerlySweep = fal se); |
| 298 template<typename T> static Address reallocate(void* previous, size_t); | 334 template<typename T> static Address reallocate(void* previous, size_t); |
| 299 | 335 |
| 300 static const char* gcReasonString(BlinkGC::GCReason); | 336 static const char* gcReasonString(BlinkGC::GCReason); |
| 301 static void collectGarbage(BlinkGC::StackState, BlinkGC::GCType, BlinkGC::GC Reason); | 337 static void collectGarbage(BlinkGC::StackState, BlinkGC::GCType, BlinkGC::GC Reason); |
| 302 static void collectGarbageForTerminatingThread(ThreadState*); | 338 static void collectGarbageForTerminatingThread(ThreadState*); |
| 303 static void collectAllGarbage(); | 339 static void collectAllGarbage(); |
| 304 | 340 |
| 305 static void processMarkingStack(Visitor*); | 341 void processMarkingStack(Visitor*); |
| 306 static void postMarkingProcessing(Visitor*); | 342 void postMarkingProcessing(Visitor*); |
| 307 static void globalWeakProcessing(Visitor*); | 343 void globalWeakProcessing(Visitor*); |
| 308 static void setForcePreciseGCForTesting(); | |
| 309 | 344 |
| 310 static void preGC(); | 345 void preGC(); |
| 311 static void postGC(BlinkGC::GCType); | 346 void postGC(BlinkGC::GCType); |
| 312 | 347 |
| 313 // Conservatively checks whether an address is a pointer in any of the | 348 // Conservatively checks whether an address is a pointer in any of the |
| 314 // thread heaps. If so marks the object pointed to as live. | 349 // thread heaps. If so marks the object pointed to as live. |
| 315 static Address checkAndMarkPointer(Visitor*, Address); | 350 Address checkAndMarkPointer(Visitor*, Address); |
| 316 | 351 |
| 317 static size_t objectPayloadSizeForTesting(); | 352 size_t objectPayloadSizeForTesting(); |
| 318 | 353 |
| 319 static void flushHeapDoesNotContainCache(); | 354 void flushHeapDoesNotContainCache(); |
| 320 | 355 |
| 321 static FreePagePool* getFreePagePool() { return s_freePagePool; } | 356 FreePagePool* getFreePagePool() { return m_freePagePool.get(); } |
| 322 static OrphanedPagePool* getOrphanedPagePool() { return s_orphanedPagePool; } | 357 OrphanedPagePool* getOrphanedPagePool() { return m_orphanedPagePool.get(); } |
| 323 | 358 |
| 324 // This look-up uses the region search tree and a negative contains cache to | 359 // This look-up uses the region search tree and a negative contains cache to |
| 325 // provide an efficient mapping from arbitrary addresses to the containing | 360 // provide an efficient mapping from arbitrary addresses to the containing |
| 326 // heap-page if one exists. | 361 // heap-page if one exists. |
| 327 static BasePage* lookup(Address); | 362 BasePage* lookupPageForAddress(Address); |
| 328 static RegionTree* getRegionTree(); | |
| 329 | 363 |
| 330 static const GCInfo* gcInfo(size_t gcInfoIndex) | 364 static const GCInfo* gcInfo(size_t gcInfoIndex) |
| 331 { | 365 { |
| 332 ASSERT(gcInfoIndex >= 1); | 366 ASSERT(gcInfoIndex >= 1); |
| 333 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); | 367 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); |
| 334 ASSERT(s_gcInfoTable); | 368 ASSERT(s_gcInfoTable); |
| 335 const GCInfo* info = s_gcInfoTable[gcInfoIndex]; | 369 const GCInfo* info = s_gcInfoTable[gcInfoIndex]; |
| 336 ASSERT(info); | 370 ASSERT(info); |
| 337 return info; | 371 return info; |
| 338 } | 372 } |
| 339 | 373 |
| 340 static ThreadHeapStats& heapStats(); | |
| 341 | |
| 342 static double estimatedMarkingTime(); | |
| 343 static void reportMemoryUsageHistogram(); | 374 static void reportMemoryUsageHistogram(); |
| 344 static void reportMemoryUsageForTracing(); | 375 static void reportMemoryUsageForTracing(); |
| 345 static BlinkGC::GCReason lastGCReason() { return s_lastGCReason; } | |
| 346 | 376 |
| 347 private: | 377 private: |
| 348 // Reset counters that track live and allocated-since-last-GC sizes. | 378 // Reset counters that track live and allocated-since-last-GC sizes. |
| 349 static void resetHeapCounters(); | 379 void resetHeapCounters(); |
| 350 | 380 |
| 351 static int arenaIndexForObjectSize(size_t); | 381 static int arenaIndexForObjectSize(size_t); |
| 352 static bool isNormalArenaIndex(int); | 382 static bool isNormalArenaIndex(int); |
| 353 | 383 |
| 354 static void decommitCallbackStacks(); | 384 void decommitCallbackStacks(); |
| 355 | 385 |
| 356 static CallbackStack* s_markingStack; | 386 RecursiveMutex m_threadAttachMutex; |
| 357 static CallbackStack* s_postMarkingCallbackStack; | 387 ThreadStateSet m_threads; |
| 358 static CallbackStack* s_globalWeakCallbackStack; | 388 ThreadHeapStats m_stats; |
| 359 static CallbackStack* s_ephemeronStack; | 389 OwnPtr<RegionTree> m_regionTree; |
| 360 static HeapDoesNotContainCache* s_heapDoesNotContainCache; | 390 OwnPtr<HeapDoesNotContainCache> m_heapDoesNotContainCache; |
| 361 static FreePagePool* s_freePagePool; | 391 OwnPtr<SafePointBarrier> m_safePointBarrier; |
| 362 static OrphanedPagePool* s_orphanedPagePool; | 392 OwnPtr<FreePagePool> m_freePagePool; |
| 363 static BlinkGC::GCReason s_lastGCReason; | 393 OwnPtr<OrphanedPagePool> m_orphanedPagePool; |
| 394 OwnPtr<CallbackStack> m_markingStack; | |
| 395 OwnPtr<CallbackStack> m_postMarkingCallbackStack; | |
| 396 OwnPtr<CallbackStack> m_globalWeakCallbackStack; | |
| 397 OwnPtr<CallbackStack> m_ephemeronStack; | |
| 398 BlinkGC::GCReason m_lastGCReason; | |
| 399 | |
| 400 static ThreadHeap* s_mainThreadHeap; | |
| 364 | 401 |
| 365 friend class ThreadState; | 402 friend class ThreadState; |
| 366 }; | 403 }; |
| 367 | 404 |
| 368 template<typename T> | 405 template<typename T> |
| 369 struct IsEagerlyFinalizedType { | 406 struct IsEagerlyFinalizedType { |
| 370 STATIC_ONLY(IsEagerlyFinalizedType); | 407 STATIC_ONLY(IsEagerlyFinalizedType); |
| 371 private: | 408 private: |
| 372 typedef char YesType; | 409 typedef char YesType; |
| 373 struct NoType { | 410 struct NoType { |
| (...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 554 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object) | 591 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object) |
| 555 { | 592 { |
| 556 T** cell = reinterpret_cast<T**>(object); | 593 T** cell = reinterpret_cast<T**>(object); |
| 557 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell)) | 594 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell)) |
| 558 *cell = nullptr; | 595 *cell = nullptr; |
| 559 } | 596 } |
| 560 | 597 |
| 561 } // namespace blink | 598 } // namespace blink |
| 562 | 599 |
| 563 #endif // Heap_h | 600 #endif // Heap_h |
| OLD | NEW |