Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 */ | 29 */ |
| 30 | 30 |
| 31 #ifndef Heap_h | 31 #ifndef Heap_h |
| 32 #define Heap_h | 32 #define Heap_h |
| 33 | 33 |
| 34 #include "platform/PlatformExport.h" | 34 #include "platform/PlatformExport.h" |
| 35 #include "platform/heap/CallbackStack.h" | |
| 35 #include "platform/heap/GCInfo.h" | 36 #include "platform/heap/GCInfo.h" |
| 36 #include "platform/heap/HeapPage.h" | 37 #include "platform/heap/HeapPage.h" |
| 37 #include "platform/heap/PageMemory.h" | 38 #include "platform/heap/PageMemory.h" |
| 38 #include "platform/heap/ThreadState.h" | 39 #include "platform/heap/ThreadState.h" |
| 39 #include "platform/heap/Visitor.h" | 40 #include "platform/heap/Visitor.h" |
| 40 #include "wtf/AddressSanitizer.h" | 41 #include "wtf/AddressSanitizer.h" |
| 41 #include "wtf/Assertions.h" | 42 #include "wtf/Assertions.h" |
| 42 #include "wtf/Atomics.h" | 43 #include "wtf/Atomics.h" |
| 43 #include "wtf/Forward.h" | 44 #include "wtf/Forward.h" |
| 44 | 45 |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 64 template<typename T> | 65 template<typename T> |
| 65 class ObjectAliveTrait<T, true> { | 66 class ObjectAliveTrait<T, true> { |
| 66 public: | 67 public: |
| 67 static bool isHeapObjectAlive(T* object) | 68 static bool isHeapObjectAlive(T* object) |
| 68 { | 69 { |
| 69 static_assert(sizeof(T), "T must be fully defined"); | 70 static_assert(sizeof(T), "T must be fully defined"); |
| 70 return object->isHeapObjectAlive(); | 71 return object->isHeapObjectAlive(); |
| 71 } | 72 } |
| 72 }; | 73 }; |
| 73 | 74 |
| 75 class PLATFORM_EXPORT GCData { | |
| 76 public: | |
| 77 GCData(ThreadState*, BlinkGC::StackState, BlinkGC::GCType); | |
| 78 | |
| 79 bool parkAllThreads(BlinkGC::StackState, BlinkGC::GCType); | |
| 80 | |
| 81 ThreadState* threadState() const { return m_state; } | |
| 82 Visitor* visitor() const { return m_visitor.get(); } | |
| 83 CallbackStack* markingStack() const { return m_markingStack.get(); } | |
| 84 CallbackStack* postMarkingCallbackStack() const { return m_postMarkingCallba ckStack.get(); } | |
| 85 CallbackStack* globalWeakCallbackStack() const { return m_globalWeakCallback Stack.get(); } | |
| 86 CallbackStack* ephemeronStack() const { return m_ephemeronStack.get(); } | |
| 87 | |
| 88 private: | |
| 89 ThreadState* m_state; | |
| 90 OwnPtr<Visitor> m_visitor; | |
| 91 OwnPtr<CallbackStack> m_markingStack; | |
| 92 OwnPtr<CallbackStack> m_postMarkingCallbackStack; | |
| 93 OwnPtr<CallbackStack> m_globalWeakCallbackStack; | |
| 94 OwnPtr<CallbackStack> m_ephemeronStack; | |
| 95 }; | |
| 96 | |
| 74 class PLATFORM_EXPORT Heap { | 97 class PLATFORM_EXPORT Heap { |
| 75 public: | 98 public: |
| 76 static void init(); | 99 static void init(); |
| 77 static void shutdown(); | 100 static void shutdown(); |
| 78 static void doShutdown(); | 101 static void doShutdown(); |
| 79 | 102 |
| 80 static CrossThreadPersistentRegion& crossThreadPersistentRegion(); | 103 static CrossThreadPersistentRegion& crossThreadPersistentRegion(); |
| 81 | 104 |
| 82 #if ENABLE(ASSERT) | 105 #if ENABLE(ASSERT) |
| 83 static BasePage* findPageFromAddress(Address); | 106 static BasePage* findPageFromAddress(Address); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 133 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used."); | 156 static_assert(IsGarbageCollectedType<T>::value, "only objects deriving f rom GarbageCollected can be used."); |
| 134 BasePage* page = pageFromObject(objectPointer); | 157 BasePage* page = pageFromObject(objectPointer); |
| 135 if (page->hasBeenSwept()) | 158 if (page->hasBeenSwept()) |
| 136 return false; | 159 return false; |
| 137 ASSERT(page->heap()->threadState()->isSweepingInProgress()); | 160 ASSERT(page->heap()->threadState()->isSweepingInProgress()); |
| 138 | 161 |
| 139 return !Heap::isHeapObjectAlive(const_cast<T*>(objectPointer)); | 162 return !Heap::isHeapObjectAlive(const_cast<T*>(objectPointer)); |
| 140 } | 163 } |
| 141 | 164 |
| 142 // Push a trace callback on the marking stack. | 165 // Push a trace callback on the marking stack. |
| 143 static void pushTraceCallback(void* containerObject, TraceCallback); | 166 static void pushTraceCallback(void* containerObject, TraceCallback, GCData*) ; |
|
haraken
2016/01/28 15:52:49
Nit: These push methods should be members of GCDat
keishi
2016/02/29 06:02:32
I moved the callback stack related methods to Visi
| |
| 144 | 167 |
| 145 // Push a trace callback on the post-marking callback stack. These | 168 // Push a trace callback on the post-marking callback stack. These |
| 146 // callbacks are called after normal marking (including ephemeron | 169 // callbacks are called after normal marking (including ephemeron |
| 147 // iteration). | 170 // iteration). |
| 148 static void pushPostMarkingCallback(void*, TraceCallback); | 171 static void pushPostMarkingCallback(void*, TraceCallback, GCData*); |
| 149 | 172 |
| 150 // Add a weak pointer callback to the weak callback work list. General | 173 // Add a weak pointer callback to the weak callback work list. General |
| 151 // object pointer callbacks are added to a thread local weak callback work | 174 // object pointer callbacks are added to a thread local weak callback work |
| 152 // list and the callback is called on the thread that owns the object, with | 175 // list and the callback is called on the thread that owns the object, with |
| 153 // the closure pointer as an argument. Most of the time, the closure and | 176 // the closure pointer as an argument. Most of the time, the closure and |
| 154 // the containerObject can be the same thing, but the containerObject is | 177 // the containerObject can be the same thing, but the containerObject is |
| 155 // constrained to be on the heap, since the heap is used to identify the | 178 // constrained to be on the heap, since the heap is used to identify the |
| 156 // correct thread. | 179 // correct thread. |
| 157 static void pushThreadLocalWeakCallback(void* closure, void* containerObject , WeakCallback); | 180 static void pushThreadLocalWeakCallback(void* closure, void* containerObject , WeakCallback); |
| 158 | 181 |
| 159 // Similar to the more general pushThreadLocalWeakCallback, but cell | 182 // Similar to the more general pushThreadLocalWeakCallback, but cell |
| 160 // pointer callbacks are added to a static callback work list and the weak | 183 // pointer callbacks are added to a static callback work list and the weak |
| 161 // callback is performed on the thread performing garbage collection. This | 184 // callback is performed on the thread performing garbage collection. This |
| 162 // is OK because cells are just cleared and no deallocation can happen. | 185 // is OK because cells are just cleared and no deallocation can happen. |
| 163 static void pushGlobalWeakCallback(void** cell, WeakCallback); | 186 static void pushGlobalWeakCallback(void** cell, WeakCallback, GCData*); |
| 164 | 187 |
| 165 // Pop the top of a marking stack and call the callback with the visitor | 188 // Pop the top of a marking stack and call the callback with the visitor |
| 166 // and the object. Returns false when there is nothing more to do. | 189 // and the object. Returns false when there is nothing more to do. |
| 167 static bool popAndInvokeTraceCallback(Visitor*); | 190 static bool popAndInvokeTraceCallback(Visitor*); |
| 168 | 191 |
| 169 // Remove an item from the post-marking callback stack and call | 192 // Remove an item from the post-marking callback stack and call |
| 170 // the callback with the visitor and the object pointer. Returns | 193 // the callback with the visitor and the object pointer. Returns |
| 171 // false when there is nothing more to do. | 194 // false when there is nothing more to do. |
| 172 static bool popAndInvokePostMarkingCallback(Visitor*); | 195 static bool popAndInvokePostMarkingCallback(Visitor*); |
| 173 | 196 |
| 174 // Remove an item from the weak callback work list and call the callback | 197 // Remove an item from the weak callback work list and call the callback |
| 175 // with the visitor and the closure pointer. Returns false when there is | 198 // with the visitor and the closure pointer. Returns false when there is |
| 176 // nothing more to do. | 199 // nothing more to do. |
| 177 static bool popAndInvokeGlobalWeakCallback(Visitor*); | 200 static bool popAndInvokeGlobalWeakCallback(Visitor*); |
| 178 | 201 |
| 179 // Register an ephemeron table for fixed-point iteration. | 202 // Register an ephemeron table for fixed-point iteration. |
| 180 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback); | 203 static void registerWeakTable(void* containerObject, EphemeronCallback, Ephe meronCallback, GCData*); |
| 181 #if ENABLE(ASSERT) | 204 #if ENABLE(ASSERT) |
| 182 static bool weakTableRegistered(const void*); | 205 static bool weakTableRegistered(const void*, GCData*); |
| 183 #endif | 206 #endif |
| 184 | 207 |
| 185 static inline size_t allocationSizeFromSize(size_t size) | 208 static inline size_t allocationSizeFromSize(size_t size) |
| 186 { | 209 { |
| 187 // Check the size before computing the actual allocation size. The | 210 // Check the size before computing the actual allocation size. The |
| 188 // allocation size calculation can overflow for large sizes and the chec k | 211 // allocation size calculation can overflow for large sizes and the chec k |
| 189 // therefore has to happen before any calculation on the size. | 212 // therefore has to happen before any calculation on the size. |
| 190 RELEASE_ASSERT(size < maxHeapObjectSize); | 213 RELEASE_ASSERT(size < maxHeapObjectSize); |
| 191 | 214 |
| 192 // Add space for header. | 215 // Add space for header. |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 211 | 234 |
| 212 static void preGC(); | 235 static void preGC(); |
| 213 static void postGC(BlinkGC::GCType); | 236 static void postGC(BlinkGC::GCType); |
| 214 | 237 |
| 215 // Conservatively checks whether an address is a pointer in any of the | 238 // Conservatively checks whether an address is a pointer in any of the |
| 216 // thread heaps. If so marks the object pointed to as live. | 239 // thread heaps. If so marks the object pointed to as live. |
| 217 static Address checkAndMarkPointer(Visitor*, Address); | 240 static Address checkAndMarkPointer(Visitor*, Address); |
| 218 | 241 |
| 219 static size_t objectPayloadSizeForTesting(); | 242 static size_t objectPayloadSizeForTesting(); |
| 220 | 243 |
| 221 static void flushHeapDoesNotContainCache(); | 244 static void increaseTotalAllocatedObjectSize(size_t delta) { atomicAdd(&s_to talAllocatedObjectSize, static_cast<long>(delta)); } |
| 222 | 245 static void decreaseTotalAllocatedObjectSize(size_t delta) { atomicSubtract( &s_totalAllocatedObjectSize, static_cast<long>(delta)); } |
| 223 static FreePagePool* freePagePool() { return s_freePagePool; } | 246 static size_t totalAllocatedObjectSize() { return acquireLoad(&s_totalAlloca tedObjectSize); } |
| 224 static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; } | 247 static void increaseTotalMarkedObjectSize(size_t delta) { atomicAdd(&s_total MarkedObjectSize, static_cast<long>(delta)); } |
| 225 | 248 static size_t totalMarkedObjectSize() { return acquireLoad(&s_totalMarkedObj ectSize); } |
| 226 // This look-up uses the region search tree and a negative contains cache to | 249 static void increaseTotalAllocatedSpace(size_t delta) { atomicAdd(&s_totalMa rkedObjectSize, static_cast<long>(delta)); } |
| 227 // provide an efficient mapping from arbitrary addresses to the containing | 250 static void decreaseTotalAllocatedSpace(size_t delta) { atomicSubtract(&s_to talAllocatedSpace, static_cast<long>(delta)); } |
| 228 // heap-page if one exists. | 251 static size_t totalAllocatedSpace() { return acquireLoad(&s_totalAllocatedSp ace); } |
| 229 static BasePage* lookup(Address); | |
| 230 static void addPageMemoryRegion(PageMemoryRegion*); | |
| 231 static void removePageMemoryRegion(PageMemoryRegion*); | |
| 232 | 252 |
| 233 static const GCInfo* gcInfo(size_t gcInfoIndex) | 253 static const GCInfo* gcInfo(size_t gcInfoIndex) |
| 234 { | 254 { |
| 235 ASSERT(gcInfoIndex >= 1); | 255 ASSERT(gcInfoIndex >= 1); |
| 236 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); | 256 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); |
| 237 ASSERT(s_gcInfoTable); | 257 ASSERT(s_gcInfoTable); |
| 238 const GCInfo* info = s_gcInfoTable[gcInfoIndex]; | 258 const GCInfo* info = s_gcInfoTable[gcInfoIndex]; |
| 239 ASSERT(info); | 259 ASSERT(info); |
| 240 return info; | 260 return info; |
| 241 } | 261 } |
| 242 | 262 |
| 243 static void setMarkedObjectSizeAtLastCompleteSweep(size_t size) { releaseSto re(&s_markedObjectSizeAtLastCompleteSweep, size); } | |
| 244 static size_t markedObjectSizeAtLastCompleteSweep() { return acquireLoad(&s_ markedObjectSizeAtLastCompleteSweep); } | |
| 245 static void increaseAllocatedObjectSize(size_t delta) { atomicAdd(&s_allocat edObjectSize, static_cast<long>(delta)); } | |
| 246 static void decreaseAllocatedObjectSize(size_t delta) { atomicSubtract(&s_al locatedObjectSize, static_cast<long>(delta)); } | |
| 247 static size_t allocatedObjectSize() { return acquireLoad(&s_allocatedObjectS ize); } | |
| 248 static void increaseMarkedObjectSize(size_t delta) { atomicAdd(&s_markedObje ctSize, static_cast<long>(delta)); } | |
| 249 static size_t markedObjectSize() { return acquireLoad(&s_markedObjectSize); } | |
| 250 static void increaseAllocatedSpace(size_t delta) { atomicAdd(&s_allocatedSpa ce, static_cast<long>(delta)); } | |
| 251 static void decreaseAllocatedSpace(size_t delta) { atomicSubtract(&s_allocat edSpace, static_cast<long>(delta)); } | |
| 252 static size_t allocatedSpace() { return acquireLoad(&s_allocatedSpace); } | |
| 253 static size_t objectSizeAtLastGC() { return acquireLoad(&s_objectSizeAtLastG C); } | |
| 254 static void increaseWrapperCount(size_t delta) { atomicAdd(&s_wrapperCount, static_cast<long>(delta)); } | |
| 255 static void decreaseWrapperCount(size_t delta) { atomicSubtract(&s_wrapperCo unt, static_cast<long>(delta)); } | |
| 256 static size_t wrapperCount() { return acquireLoad(&s_wrapperCount); } | |
| 257 static size_t wrapperCountAtLastGC() { return acquireLoad(&s_wrapperCountAtL astGC); } | |
| 258 static void increaseCollectedWrapperCount(size_t delta) { atomicAdd(&s_colle ctedWrapperCount, static_cast<long>(delta)); } | |
| 259 static size_t collectedWrapperCount() { return acquireLoad(&s_collectedWrapp erCount); } | |
| 260 static size_t partitionAllocSizeAtLastGC() { return acquireLoad(&s_partition AllocSizeAtLastGC); } | |
| 261 | |
| 262 static double estimatedMarkingTime(); | |
| 263 static void reportMemoryUsageHistogram(); | 263 static void reportMemoryUsageHistogram(); |
| 264 static void reportMemoryUsageForTracing(); | 264 static void reportMemoryUsageForTracing(); |
| 265 | 265 |
| 266 #if ENABLE(ASSERT) | |
| 267 static uint16_t gcGeneration() { return s_gcGeneration; } | |
| 268 #endif | |
| 269 | |
| 270 private: | 266 private: |
| 271 // Reset counters that track live and allocated-since-last-GC sizes. | 267 // Reset counters that track live and allocated-since-last-GC sizes. |
| 272 static void resetHeapCounters(); | 268 static void resetHeapCounters(); |
| 273 | 269 |
| 274 static int heapIndexForObjectSize(size_t); | 270 static int heapIndexForObjectSize(size_t); |
| 275 static bool isNormalHeapIndex(int); | 271 static bool isNormalHeapIndex(int); |
| 276 | 272 |
| 277 static CallbackStack* s_markingStack; | |
| 278 static CallbackStack* s_postMarkingCallbackStack; | |
| 279 static CallbackStack* s_globalWeakCallbackStack; | |
| 280 static CallbackStack* s_ephemeronStack; | |
| 281 static HeapDoesNotContainCache* s_heapDoesNotContainCache; | |
| 282 static bool s_shutdownCalled; | 273 static bool s_shutdownCalled; |
| 283 static FreePagePool* s_freePagePool; | 274 static bool s_doShutdownDone; |
| 284 static OrphanedPagePool* s_orphanedPagePool; | 275 |
| 285 static RegionTree* s_regionTree; | 276 // Stats for the entire Oilpan heap. |
| 286 static size_t s_allocatedSpace; | 277 static size_t s_totalAllocatedSpace; |
| 287 static size_t s_allocatedObjectSize; | 278 static size_t s_totalAllocatedObjectSize; |
| 288 static size_t s_objectSizeAtLastGC; | 279 static size_t s_totalMarkedObjectSize; |
| 289 static size_t s_markedObjectSize; | |
| 290 static size_t s_markedObjectSizeAtLastCompleteSweep; | |
| 291 static size_t s_wrapperCount; | |
| 292 static size_t s_wrapperCountAtLastGC; | |
| 293 static size_t s_collectedWrapperCount; | |
| 294 static size_t s_partitionAllocSizeAtLastGC; | |
| 295 static double s_estimatedMarkingTimePerByte; | |
| 296 #if ENABLE(ASSERT) | |
| 297 static uint16_t s_gcGeneration; | |
| 298 #endif | |
| 299 | 280 |
| 300 friend class ThreadState; | 281 friend class ThreadState; |
| 282 friend class MultiThreadGCGroup; | |
| 301 }; | 283 }; |
| 302 | 284 |
| 303 template<typename T> | 285 template<typename T> |
| 304 struct IsEagerlyFinalizedType { | 286 struct IsEagerlyFinalizedType { |
| 305 private: | 287 private: |
| 306 typedef char YesType; | 288 typedef char YesType; |
| 307 struct NoType { | 289 struct NoType { |
| 308 char padding[8]; | 290 char padding[8]; |
| 309 }; | 291 }; |
| 310 | 292 |
| (...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 488 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object) | 470 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object) |
| 489 { | 471 { |
| 490 T** cell = reinterpret_cast<T**>(object); | 472 T** cell = reinterpret_cast<T**>(object); |
| 491 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell)) | 473 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell)) |
| 492 *cell = nullptr; | 474 *cell = nullptr; |
| 493 } | 475 } |
| 494 | 476 |
| 495 } // namespace blink | 477 } // namespace blink |
| 496 | 478 |
| 497 #endif // Heap_h | 479 #endif // Heap_h |
| OLD | NEW |