Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 46 | 46 |
| 47 namespace v8 { | 47 namespace v8 { |
| 48 class Isolate; | 48 class Isolate; |
| 49 }; | 49 }; |
| 50 | 50 |
| 51 namespace blink { | 51 namespace blink { |
| 52 | 52 |
| 53 class BasePage; | 53 class BasePage; |
| 54 class CallbackStack; | 54 class CallbackStack; |
| 55 class CrossThreadPersistentRegion; | 55 class CrossThreadPersistentRegion; |
| 56 class FreePagePool; | |
| 56 struct GCInfo; | 57 struct GCInfo; |
| 57 class GarbageCollectedMixinConstructorMarker; | 58 class GarbageCollectedMixinConstructorMarker; |
| 59 class HeapDoesNotContainCache; | |
| 58 class HeapObjectHeader; | 60 class HeapObjectHeader; |
| 61 class OrphanedPagePool; | |
| 59 class PersistentNode; | 62 class PersistentNode; |
| 60 class PersistentRegion; | 63 class PersistentRegion; |
| 64 class XThreadPersistentRegion; | |
| 61 class BaseHeap; | 65 class BaseHeap; |
| 62 class SafePointAwareMutexLocker; | 66 class SafePointAwareMutexLocker; |
| 63 class SafePointBarrier; | 67 class SafePointBarrier; |
| 64 class ThreadState; | 68 class ThreadState; |
| 65 class Visitor; | 69 class Visitor; |
| 70 class PageMemoryRegion; | |
| 66 | 71 |
| 67 // Declare that a class has a pre-finalizer. The pre-finalizer is called | 72 // Declare that a class has a pre-finalizer. The pre-finalizer is called |
| 68 // before any object gets swept, so it is safe to touch on-heap objects | 73 // before any object gets swept, so it is safe to touch on-heap objects |
| 69 // that may be collected in the same GC cycle. If you cannot avoid touching | 74 // that may be collected in the same GC cycle. If you cannot avoid touching |
| 70 // on-heap objects in a destructor (which is not allowed), you can consider | 75 // on-heap objects in a destructor (which is not allowed), you can consider |
| 71 // using the pre-finalizer. The only restriction is that the pre-finalizer | 76 // using the pre-finalizer. The only restriction is that the pre-finalizer |
| 72 // must not resurrect dead objects (e.g., store unmarked objects into | 77 // must not resurrect dead objects (e.g., store unmarked objects into |
| 73 // Members etc). The pre-finalizer is called on the thread that registered | 78 // Members etc). The pre-finalizer is called on the thread that registered |
| 74 // the pre-finalizer. | 79 // the pre-finalizer. |
| 75 // | 80 // |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 117 #define WILL_BE_USING_PRE_FINALIZER(Class, method) USING_PRE_FINALIZER(Class, me thod) | 122 #define WILL_BE_USING_PRE_FINALIZER(Class, method) USING_PRE_FINALIZER(Class, me thod) |
| 118 #else | 123 #else |
| 119 #define WILL_BE_USING_PRE_FINALIZER(Class, method) | 124 #define WILL_BE_USING_PRE_FINALIZER(Class, method) |
| 120 #endif | 125 #endif |
| 121 | 126 |
| 122 class PLATFORM_EXPORT ThreadState { | 127 class PLATFORM_EXPORT ThreadState { |
| 123 WTF_MAKE_NONCOPYABLE(ThreadState); | 128 WTF_MAKE_NONCOPYABLE(ThreadState); |
| 124 public: | 129 public: |
| 125 typedef std::pair<void*, PreFinalizerCallback> PreFinalizer; | 130 typedef std::pair<void*, PreFinalizerCallback> PreFinalizer; |
| 126 | 131 |
| 132 // Heap stats that concern a GC. The main thread's GCHeapStats contains stat s for all threads in Heap::attachedThreads(). | |
| 133 class GCHeapStats { | |
|
haraken
2016/01/07 08:06:22
Can we make this change (i.e., move a bunch of sta
| |
| 134 public: | |
| 135 GCHeapStats(); | |
| 136 void setMarkedObjectSizeAtLastCompleteSweep(size_t size) { releaseStore( &m_markedObjectSizeAtLastCompleteSweep, size); } | |
| 137 size_t markedObjectSizeAtLastCompleteSweep() { return acquireLoad(&m_mar kedObjectSizeAtLastCompleteSweep); } | |
| 138 void increaseAllocatedObjectSize(size_t delta) { atomicAdd(&m_allocatedO bjectSize, static_cast<long>(delta)); } | |
| 139 void decreaseAllocatedObjectSize(size_t delta) { atomicSubtract(&m_alloc atedObjectSize, static_cast<long>(delta)); } | |
| 140 size_t allocatedObjectSize() { return acquireLoad(&m_allocatedObjectSize ); } | |
| 141 void increaseMarkedObjectSize(size_t delta) { atomicAdd(&m_markedObjectS ize, static_cast<long>(delta)); } | |
| 142 size_t markedObjectSize() { return acquireLoad(&m_markedObjectSize); } | |
| 143 void increaseAllocatedSpace(size_t delta) { atomicAdd(&m_allocatedSpace, static_cast<long>(delta)); } | |
| 144 void decreaseAllocatedSpace(size_t delta) { atomicSubtract(&m_allocatedS pace, static_cast<long>(delta)); } | |
| 145 size_t allocatedSpace() { return acquireLoad(&m_allocatedSpace); } | |
| 146 size_t objectSizeAtLastGC() { return acquireLoad(&m_objectSizeAtLastGC); } | |
| 147 void increaseWrapperCount(size_t delta) { atomicAdd(&m_wrapperCount, sta tic_cast<long>(delta)); } | |
| 148 void decreaseWrapperCount(size_t delta) { atomicSubtract(&m_wrapperCount , static_cast<long>(delta)); } | |
| 149 size_t wrapperCount() { return acquireLoad(&m_wrapperCount); } | |
| 150 size_t wrapperCountAtLastGC() { return acquireLoad(&m_wrapperCountAtLast GC); } | |
| 151 void increaseCollectedWrapperCount(size_t delta) { atomicAdd(&m_collecte dWrapperCount, static_cast<long>(delta)); } | |
| 152 size_t collectedWrapperCount() { return acquireLoad(&m_collectedWrapperC ount); } | |
| 153 size_t partitionAllocSizeAtLastGC() { return acquireLoad(&m_partitionAll ocSizeAtLastGC); } | |
| 154 void setEstimatedMarkingTimePerByte(double estimatedMarkingTimePerByte) { m_estimatedMarkingTimePerByte = estimatedMarkingTimePerByte; } | |
| 155 double estimatedMarkingTimePerByte() const { return m_estimatedMarkingTi mePerByte; } | |
| 156 double estimatedMarkingTime(); | |
| 157 void reset(); | |
| 158 | |
| 159 #if ENABLE(ASSERT) | |
| 160 void incrementGcGeneration() | |
| 161 { | |
| 162 if (++m_gcGeneration == 0) | |
| 163 m_gcGeneration = 1; | |
| 164 } | |
| 165 uint16_t gcGeneration() { return m_gcGeneration; } | |
| 166 #endif | |
| 167 | |
| 168 private: | |
| 169 size_t m_allocatedSpace; | |
| 170 size_t m_allocatedObjectSize; | |
| 171 size_t m_objectSizeAtLastGC; | |
| 172 size_t m_markedObjectSize; | |
| 173 size_t m_markedObjectSizeAtLastCompleteSweep; | |
| 174 size_t m_wrapperCount; | |
| 175 size_t m_wrapperCountAtLastGC; | |
| 176 size_t m_collectedWrapperCount; | |
| 177 size_t m_partitionAllocSizeAtLastGC; | |
| 178 double m_estimatedMarkingTimePerByte; | |
| 179 #if ENABLE(ASSERT) | |
| 180 uint16_t m_gcGeneration; | |
| 181 #endif | |
| 182 }; | |
| 183 | |
| 184 // A RegionTree is a simple binary search tree of PageMemoryRegions sorted | |
| 185 // by base addresses. | |
| 186 class RegionTree { | |
|
haraken
2016/01/07 08:06:22
Let's move out the RegionTree to a separate file.
| |
| 187 public: | |
| 188 explicit RegionTree(PageMemoryRegion* region) : m_region(region), m_left (nullptr), m_right(nullptr) { } | |
| 189 ~RegionTree() | |
| 190 { | |
| 191 delete m_left; | |
| 192 delete m_right; | |
| 193 } | |
| 194 PageMemoryRegion* lookup(Address); | |
| 195 static void add(RegionTree*, RegionTree**); | |
| 196 static void remove(PageMemoryRegion*, RegionTree**); | |
| 197 private: | |
| 198 PageMemoryRegion* m_region; | |
| 199 RegionTree* m_left; | |
| 200 RegionTree* m_right; | |
| 201 }; | |
| 202 | |
| 127 // See setGCState() for possible state transitions. | 203 // See setGCState() for possible state transitions. |
| 128 enum GCState { | 204 enum GCState { |
| 129 NoGCScheduled, | 205 NoGCScheduled, |
| 130 IdleGCScheduled, | 206 IdleGCScheduled, |
| 131 PreciseGCScheduled, | 207 PreciseGCScheduled, |
| 132 FullGCScheduled, | 208 FullGCScheduled, |
| 133 PageNavigationGCScheduled, | 209 PageNavigationGCScheduled, |
| 134 GCRunning, | 210 GCRunning, |
| 135 EagerSweepScheduled, | 211 EagerSweepScheduled, |
| 136 LazySweepScheduled, | 212 LazySweepScheduled, |
| 137 Sweeping, | 213 Sweeping, |
| 138 SweepingAndIdleGCScheduled, | 214 SweepingAndIdleGCScheduled, |
| 139 SweepingAndPreciseGCScheduled, | 215 SweepingAndPreciseGCScheduled, |
| 140 }; | 216 }; |
| 141 | 217 |
| 218 enum PerThreadHeapState { | |
| 219 PerThreadHeapEnabled, | |
| 220 PerThreadHeapDisabled | |
| 221 }; | |
| 222 | |
| 142 // The NoAllocationScope class is used in debug mode to catch unwanted | 223 // The NoAllocationScope class is used in debug mode to catch unwanted |
| 143 // allocations. E.g. allocations during GC. | 224 // allocations. E.g. allocations during GC. |
| 144 class NoAllocationScope final { | 225 class NoAllocationScope final { |
| 145 public: | 226 public: |
| 146 explicit NoAllocationScope(ThreadState* state) : m_state(state) | 227 explicit NoAllocationScope(ThreadState* state) : m_state(state) |
| 147 { | 228 { |
| 148 m_state->enterNoAllocationScope(); | 229 m_state->enterNoAllocationScope(); |
| 149 } | 230 } |
| 150 ~NoAllocationScope() | 231 ~NoAllocationScope() |
| 151 { | 232 { |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 164 } | 245 } |
| 165 ~SweepForbiddenScope() | 246 ~SweepForbiddenScope() |
| 166 { | 247 { |
| 167 ASSERT(m_state->m_sweepForbidden); | 248 ASSERT(m_state->m_sweepForbidden); |
| 168 m_state->m_sweepForbidden = false; | 249 m_state->m_sweepForbidden = false; |
| 169 } | 250 } |
| 170 private: | 251 private: |
| 171 ThreadState* m_state; | 252 ThreadState* m_state; |
| 172 }; | 253 }; |
| 173 | 254 |
| 174 // The set of ThreadStates for all threads attached to the Blink | 255 // The set of ThreadStates that are bound to the main thread's GC. |
| 175 // garbage collector. | |
| 176 using AttachedThreadStateSet = HashSet<ThreadState*>; | 256 using AttachedThreadStateSet = HashSet<ThreadState*>; |
| 177 static AttachedThreadStateSet& attachedThreads(); | 257 static AttachedThreadStateSet& attachedThreads(); |
| 178 static RecursiveMutex& threadAttachMutex(); | 258 static RecursiveMutex& threadAttachMutex(); |
| 179 static void lockThreadAttachMutex(); | 259 static void lockThreadAttachMutex(); |
| 180 static void unlockThreadAttachMutex(); | 260 static void unlockThreadAttachMutex(); |
| 181 | 261 |
| 182 // Initialize threading infrastructure. Should be called from the main | 262 // Initialize threading infrastructure. Should be called from the main |
| 183 // thread. | 263 // thread. |
| 184 static void init(); | 264 static void init(); |
| 185 static void shutdown(); | 265 static void shutdown(); |
| 186 static void shutdownHeapIfNecessary(); | 266 static void shutdownHeapIfNecessary(); |
| 187 bool isTerminating() { return m_isTerminating; } | 267 bool isTerminating() { return m_isTerminating; } |
| 188 | 268 |
| 189 static void attachMainThread(); | 269 static void attachMainThread(); |
| 190 static void detachMainThread(); | 270 static void detachMainThread(); |
| 191 | 271 |
| 192 // Trace all persistent roots, called when marking the managed heap objects. | 272 // Trace all persistent roots, called when marking the managed heap objects. |
| 193 static void visitPersistentRoots(Visitor*); | 273 static void visitPersistentRoots(Visitor*); |
| 194 | 274 |
| 195 // Trace all objects found on the stack, used when doing conservative GCs. | 275 // Trace all objects found on the stack, used when doing conservative GCs. |
| 196 static void visitStackRoots(Visitor*); | 276 static void visitStackRoots(Visitor*); |
| 197 | 277 |
| 198 // Associate ThreadState object with the current thread. After this | 278 // Associate ThreadState object with the current thread. After this |
| 199 // call thread can start using the garbage collected heap infrastructure. | 279 // call thread can start using the garbage collected heap infrastructure. |
| 200 // It also has to periodically check for safepoints. | 280 // It also has to periodically check for safepoints. |
| 201 static void attach(); | 281 static void attach(PerThreadHeapState); |
| 202 | 282 |
| 203 // Disassociate attached ThreadState from the current thread. The thread | 283 // Disassociate attached ThreadState from the current thread. The thread |
| 204 // can no longer use the garbage collected heap after this call. | 284 // can no longer use the garbage collected heap after this call. |
| 205 static void detach(); | 285 static void detach(); |
| 206 | 286 |
| 207 static ThreadState* current() | 287 static ThreadState* current() |
| 208 { | 288 { |
| 209 #if defined(__GLIBC__) || OS(ANDROID) || OS(FREEBSD) | 289 #if defined(__GLIBC__) || OS(ANDROID) || OS(FREEBSD) |
| 210 // TLS lookup is fast in these platforms. | 290 // TLS lookup is fast in these platforms. |
| 211 return **s_threadSpecific; | 291 return **s_threadSpecific; |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 222 } | 302 } |
| 223 // TLS lookup is slow. | 303 // TLS lookup is slow. |
| 224 return **s_threadSpecific; | 304 return **s_threadSpecific; |
| 225 #endif | 305 #endif |
| 226 } | 306 } |
| 227 | 307 |
| 228 static ThreadState* mainThreadState() | 308 static ThreadState* mainThreadState() |
| 229 { | 309 { |
| 230 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage); | 310 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage); |
| 231 } | 311 } |
| 312 static ThreadState* forObject(const void*); | |
| 313 | |
| 314 GCHeapStats* heapStats() const { return m_heapStats; } | |
| 315 | |
| 316 CallbackStack* markingStack() const { return m_markingStack.get(); } | |
|
haraken
2016/01/07 08:06:22
Can we move these methods before landing this CL?
| |
| 317 CallbackStack* postMarkingCallbackStack() const { return m_postMarkingCallba ckStack.get(); } | |
| 318 CallbackStack* globalWeakCallbackStack() const { return m_globalWeakCallback Stack.get(); } | |
| 319 CallbackStack* ephemeronStack() const { return m_ephemeronStack.get(); } | |
| 320 | |
| 321 static void increaseTotalAllocatedObjectSize(size_t delta) { atomicAdd(&s_to talAllocatedObjectSize, static_cast<long>(delta)); } | |
| 322 static void decreaseTotalAllocatedObjectSize(size_t delta) { atomicSubtract( &s_totalAllocatedObjectSize, static_cast<long>(delta)); } | |
| 323 static size_t totalAllocatedObjectSize() { return acquireLoad(&s_totalAlloca tedObjectSize); } | |
| 324 static void increaseTotalMarkedObjectSize(size_t delta) { atomicAdd(&s_total MarkedObjectSize, static_cast<long>(delta)); } | |
| 325 static size_t totalMarkedObjectSize() { return acquireLoad(&s_totalMarkedObj ectSize); } | |
| 326 static void increaseTotalAllocatedSpace(size_t delta) { atomicAdd(&s_totalMa rkedObjectSize, static_cast<long>(delta)); } | |
| 327 static void decreaseTotalAllocatedSpace(size_t delta) { atomicSubtract(&s_to talAllocatedSpace, static_cast<long>(delta)); } | |
| 328 static size_t totalAllocatedSpace() { return acquireLoad(&s_totalAllocatedSp ace); } | |
| 329 | |
| 330 size_t markedObjectSizeAtLastCompleteSweep() { return m_heapStats->markedObj ectSizeAtLastCompleteSweep(); } | |
| 331 void increaseAllocatedObjectSize(size_t delta) | |
| 332 { | |
| 333 m_heapStats->increaseAllocatedObjectSize(delta); | |
| 334 increaseTotalAllocatedObjectSize(delta); | |
| 335 } | |
| 336 void decreaseAllocatedObjectSize(size_t delta) | |
| 337 { | |
| 338 m_heapStats->decreaseAllocatedObjectSize(delta); | |
| 339 decreaseTotalAllocatedObjectSize(delta); | |
| 340 } | |
| 341 size_t allocatedObjectSize() { return m_heapStats->allocatedObjectSize(); } | |
| 342 void increaseMarkedObjectSize(size_t delta) | |
| 343 { | |
| 344 m_heapStats->increaseMarkedObjectSize(delta); | |
| 345 increaseTotalMarkedObjectSize(delta); | |
| 346 } | |
| 347 size_t markedObjectSize() { return m_heapStats->markedObjectSize(); } | |
| 348 void increaseAllocatedSpace(size_t delta) | |
| 349 { | |
| 350 m_heapStats->increaseAllocatedSpace(delta); | |
| 351 increaseTotalAllocatedSpace(delta); | |
| 352 } | |
| 353 void decreaseAllocatedSpace(size_t delta) | |
| 354 { | |
| 355 m_heapStats->decreaseAllocatedSpace(delta); | |
| 356 decreaseTotalAllocatedSpace(delta); | |
| 357 } | |
| 358 size_t allocatedSpace() { return m_heapStats->allocatedSpace(); } | |
| 359 size_t objectSizeAtLastGC() { return m_heapStats->objectSizeAtLastGC(); } | |
| 360 void increaseWrapperCount(size_t delta) { m_heapStats->increaseWrapperCount( delta); } | |
| 361 void decreaseWrapperCount(size_t delta) { m_heapStats->decreaseWrapperCount( delta); } | |
| 362 size_t wrapperCount() { return m_heapStats->wrapperCount(); } | |
| 363 size_t wrapperCountAtLastGC() { return m_heapStats->wrapperCountAtLastGC(); } | |
| 364 void increaseCollectedWrapperCount(size_t delta) { m_heapStats->increaseColl ectedWrapperCount(delta); } | |
| 365 size_t collectedWrapperCount() { return m_heapStats->collectedWrapperCount() ; } | |
| 366 size_t partitionAllocSizeAtLastGC() { return m_heapStats->partitionAllocSize AtLastGC(); } | |
| 367 void setEstimatedMarkingTimePerByte(double estimatedMarkingTimePerByte) { m_ heapStats->setEstimatedMarkingTimePerByte(estimatedMarkingTimePerByte); } | |
| 368 | |
| 369 #if ENABLE(ASSERT) | |
| 370 void incrementGcGeneration() { m_heapStats->incrementGcGeneration(); } | |
| 371 uint16_t gcGeneration() { return m_heapStats->gcGeneration(); } | |
| 372 #endif | |
| 373 | |
| 374 HeapDoesNotContainCache* heapDoesNotContainCache() const { return m_heapDoes NotContainCache.get(); } | |
| 375 RegionTree* regionTree() const { return m_regionTree; } | |
| 376 void setRegionTree(RegionTree* tree) { m_regionTree = tree; } | |
| 377 | |
| 378 double estimatedMarkingTime(); | |
| 379 void reportMemoryUsageHistogram(); | |
| 380 void reportMemoryUsageForTracing(); | |
| 381 | |
| 382 void flushHeapDoesNotContainCache(); | |
| 383 | |
| 384 // Reset counters that track live and allocated-since-last-GC sizes. | |
| 385 void resetHeapCounters(); | |
| 232 | 386 |
| 233 bool isMainThread() const { return this == mainThreadState(); } | 387 bool isMainThread() const { return this == mainThreadState(); } |
| 234 #if ENABLE(ASSERT) | 388 #if ENABLE(ASSERT) |
| 235 bool checkThread() const { return m_thread == currentThread(); } | 389 bool checkThread() const { return m_thread == currentThread(); } |
| 236 #endif | 390 #endif |
| 237 | 391 |
| 238 void performIdleGC(double deadlineSeconds); | 392 void performIdleGC(double deadlineSeconds); |
| 239 void performIdleLazySweep(double deadlineSeconds); | 393 void performIdleLazySweep(double deadlineSeconds); |
| 240 | 394 |
| 241 void scheduleIdleGC(); | 395 void scheduleIdleGC(); |
| (...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 360 #if ENABLE(ASSERT) | 514 #if ENABLE(ASSERT) |
| 361 // Infrastructure to determine if an address is within one of the | 515 // Infrastructure to determine if an address is within one of the |
| 362 // address ranges for the Blink heap. If the address is in the Blink | 516 // address ranges for the Blink heap. If the address is in the Blink |
| 363 // heap the containing heap page is returned. | 517 // heap the containing heap page is returned. |
| 364 BasePage* findPageFromAddress(Address); | 518 BasePage* findPageFromAddress(Address); |
| 365 BasePage* findPageFromAddress(const void* pointer) { return findPageFromAddr ess(reinterpret_cast<Address>(const_cast<void*>(pointer))); } | 519 BasePage* findPageFromAddress(const void* pointer) { return findPageFromAddr ess(reinterpret_cast<Address>(const_cast<void*>(pointer))); } |
| 366 #endif | 520 #endif |
| 367 | 521 |
| 368 // A region of PersistentNodes allocated on the given thread. | 522 // A region of PersistentNodes allocated on the given thread. |
| 369 PersistentRegion* persistentRegion() const { return m_persistentRegion.get() ; } | 523 PersistentRegion* persistentRegion() const { return m_persistentRegion.get() ; } |
| 524 XThreadPersistentRegion* xThreadPersistentRegion() const { return m_xThreadP ersistentRegion.get(); } | |
| 370 // A region of PersistentNodes not owned by any particular thread. | 525 // A region of PersistentNodes not owned by any particular thread. |
| 371 static CrossThreadPersistentRegion& crossThreadPersistentRegion(); | 526 static CrossThreadPersistentRegion& crossThreadPersistentRegion(); |
|
haraken
2016/01/07 08:06:22
Not related to your CL, this should be moved to He
| |
| 372 | 527 |
| 373 // Visit local thread stack and trace all pointers conservatively. | 528 // Visit local thread stack and trace all pointers conservatively. |
| 374 void visitStack(Visitor*); | 529 void visitStack(Visitor*); |
| 375 | 530 |
| 376 // Visit the asan fake stack frame corresponding to a slot on the | 531 // Visit the asan fake stack frame corresponding to a slot on the |
| 377 // real machine stack if there is one. | 532 // real machine stack if there is one. |
| 378 void visitAsanFakeStackForPointer(Visitor*, Address); | 533 void visitAsanFakeStackForPointer(Visitor*, Address); |
| 379 | 534 |
| 380 // Visit all persistents allocated on this thread. | 535 // Visit all persistents allocated on this thread. |
| 381 void visitPersistents(Visitor*); | 536 void visitPersistents(Visitor*); |
| (...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 502 } | 657 } |
| 503 void allocationPointAdjusted(int heapIndex); | 658 void allocationPointAdjusted(int heapIndex); |
| 504 void promptlyFreed(size_t gcInfoIndex); | 659 void promptlyFreed(size_t gcInfoIndex); |
| 505 | 660 |
| 506 void accumulateSweepingTime(double time) { m_accumulatedSweepingTime += time ; } | 661 void accumulateSweepingTime(double time) { m_accumulatedSweepingTime += time ; } |
| 507 | 662 |
| 508 #if OS(WIN) && COMPILER(MSVC) | 663 #if OS(WIN) && COMPILER(MSVC) |
| 509 size_t threadStackSize(); | 664 size_t threadStackSize(); |
| 510 #endif | 665 #endif |
| 511 | 666 |
| 667 bool perThreadHeapEnabled() { return m_perThreadHeapEnabled; } | |
| 668 void removeFromRegionTree(PageMemoryRegion*); | |
| 669 | |
| 512 #if defined(LEAK_SANITIZER) | 670 #if defined(LEAK_SANITIZER) |
| 513 void registerStaticPersistentNode(PersistentNode*); | 671 void registerStaticPersistentNode(PersistentNode*); |
| 514 void releaseStaticPersistentNodes(); | 672 void releaseStaticPersistentNodes(); |
| 515 | 673 |
| 516 void enterStaticReferenceRegistrationDisabledScope(); | 674 void enterStaticReferenceRegistrationDisabledScope(); |
| 517 void leaveStaticReferenceRegistrationDisabledScope(); | 675 void leaveStaticReferenceRegistrationDisabledScope(); |
| 518 #endif | 676 #endif |
| 519 | 677 |
| 520 private: | 678 private: |
| 521 enum SnapshotType { | 679 enum SnapshotType { |
| 522 HeapSnapshot, | 680 HeapSnapshot, |
| 523 FreelistSnapshot | 681 FreelistSnapshot |
| 524 }; | 682 }; |
| 525 | 683 |
| 526 ThreadState(); | 684 ThreadState(PerThreadHeapState); |
|
haraken
2016/01/07 08:06:22
Add explicit.
| |
| 527 ~ThreadState(); | 685 ~ThreadState(); |
| 528 | 686 |
| 529 NO_SANITIZE_ADDRESS void copyStackUntilSafePointScope(); | 687 NO_SANITIZE_ADDRESS void copyStackUntilSafePointScope(); |
| 530 void clearSafePointScopeMarker() | 688 void clearSafePointScopeMarker() |
| 531 { | 689 { |
| 532 m_safePointStackCopy.clear(); | 690 m_safePointStackCopy.clear(); |
| 533 m_safePointScopeMarker = nullptr; | 691 m_safePointScopeMarker = nullptr; |
| 534 } | 692 } |
| 535 | 693 |
| 536 // shouldSchedule{Precise,Idle}GC and shouldForceConservativeGC | 694 // shouldSchedule{Precise,Idle}GC and shouldForceConservativeGC |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 611 // because it will introduce global constructor and destructor. | 769 // because it will introduce global constructor and destructor. |
| 612 // We would like to manage lifetime of the ThreadState attached | 770 // We would like to manage lifetime of the ThreadState attached |
| 613 // to the main thread explicitly instead and still use normal | 771 // to the main thread explicitly instead and still use normal |
| 614 // constructor and destructor for the ThreadState class. | 772 // constructor and destructor for the ThreadState class. |
| 615 // For this we reserve static storage for the main ThreadState | 773 // For this we reserve static storage for the main ThreadState |
| 616 // and lazily construct ThreadState in it using placement new. | 774 // and lazily construct ThreadState in it using placement new. |
| 617 static uint8_t s_mainThreadStateStorage[]; | 775 static uint8_t s_mainThreadStateStorage[]; |
| 618 | 776 |
| 619 ThreadIdentifier m_thread; | 777 ThreadIdentifier m_thread; |
| 620 OwnPtr<PersistentRegion> m_persistentRegion; | 778 OwnPtr<PersistentRegion> m_persistentRegion; |
| 779 OwnPtr<XThreadPersistentRegion> m_xThreadPersistentRegion; | |
| 621 BlinkGC::StackState m_stackState; | 780 BlinkGC::StackState m_stackState; |
| 622 #if OS(WIN) && COMPILER(MSVC) | 781 #if OS(WIN) && COMPILER(MSVC) |
| 623 size_t m_threadStackSize; | 782 size_t m_threadStackSize; |
| 624 #endif | 783 #endif |
| 625 intptr_t* m_startOfStack; | 784 intptr_t* m_startOfStack; |
| 626 intptr_t* m_endOfStack; | 785 intptr_t* m_endOfStack; |
| 627 | 786 |
| 628 void* m_safePointScopeMarker; | 787 void* m_safePointScopeMarker; |
| 629 Vector<Address> m_safePointStackCopy; | 788 Vector<Address> m_safePointStackCopy; |
| 630 bool m_atSafePoint; | 789 bool m_atSafePoint; |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 668 size_t m_disabledStaticPersistentsRegistration; | 827 size_t m_disabledStaticPersistentsRegistration; |
| 669 #endif | 828 #endif |
| 670 | 829 |
| 671 // Ideally we want to allocate an array of size |gcInfoTableMax| but it will | 830 // Ideally we want to allocate an array of size |gcInfoTableMax| but it will |
| 672 // waste memory. Thus we limit the array size to 2^8 and share one entry | 831 // waste memory. Thus we limit the array size to 2^8 and share one entry |
| 673 // with multiple types of vectors. This won't be an issue in practice, | 832 // with multiple types of vectors. This won't be an issue in practice, |
| 674 // since there will be less than 2^8 types of objects in common cases. | 833 // since there will be less than 2^8 types of objects in common cases. |
| 675 static const int likelyToBePromptlyFreedArraySize = (1 << 8); | 834 static const int likelyToBePromptlyFreedArraySize = (1 << 8); |
| 676 static const int likelyToBePromptlyFreedArrayMask = likelyToBePromptlyFreedA rraySize - 1; | 835 static const int likelyToBePromptlyFreedArrayMask = likelyToBePromptlyFreedA rraySize - 1; |
| 677 OwnPtr<int[]> m_likelyToBePromptlyFreed; | 836 OwnPtr<int[]> m_likelyToBePromptlyFreed; |
| 837 | |
| 838 // Per thread heap enabled ThreadStates will have their own GCHeapStats. | |
| 839 // Others will use the main thread's. | |
| 840 GCHeapStats* m_heapStats; | |
|
haraken
2016/01/07 08:06:22
How about using an OwnPtr?
| |
| 841 OwnPtr<CallbackStack> m_markingStack; | |
| 842 OwnPtr<CallbackStack> m_postMarkingCallbackStack; | |
| 843 OwnPtr<CallbackStack> m_globalWeakCallbackStack; | |
| 844 OwnPtr<CallbackStack> m_ephemeronStack; | |
| 845 OwnPtr<HeapDoesNotContainCache> m_heapDoesNotContainCache; | |
| 846 RegionTree* m_regionTree; | |
|
haraken
2016/01/07 08:06:22
How about using an OwnPtr?
| |
| 847 bool m_perThreadHeapEnabled; | |
| 848 | |
| 849 // Stats for the entire Oilpan heap. | |
| 850 static size_t s_totalAllocatedSpace; | |
| 851 static size_t s_totalAllocatedObjectSize; | |
| 852 static size_t s_totalMarkedObjectSize; | |
|
haraken
2016/01/07 08:06:22
Given that these variables keep track of the stats
| |
| 678 }; | 853 }; |
| 679 | 854 |
| 680 template<ThreadAffinity affinity> class ThreadStateFor; | 855 template<ThreadAffinity affinity> class ThreadStateFor; |
| 681 | 856 |
| 682 template<> class ThreadStateFor<MainThreadOnly> { | 857 template<> class ThreadStateFor<MainThreadOnly> { |
| 683 public: | 858 public: |
| 684 static ThreadState* state() | 859 static ThreadState* state() |
| 685 { | 860 { |
| 686 // This specialization must only be used from the main thread. | 861 // This specialization must only be used from the main thread. |
| 687 ASSERT(ThreadState::current()->isMainThread()); | 862 ASSERT(ThreadState::current()->isMainThread()); |
| 688 return ThreadState::mainThreadState(); | 863 return ThreadState::mainThreadState(); |
| 689 } | 864 } |
| 690 }; | 865 }; |
| 691 | 866 |
| 692 template<> class ThreadStateFor<AnyThread> { | 867 template<> class ThreadStateFor<AnyThread> { |
| 693 public: | 868 public: |
| 694 static ThreadState* state() { return ThreadState::current(); } | 869 static ThreadState* state() { return ThreadState::current(); } |
| 695 }; | 870 }; |
| 696 | 871 |
| 697 } // namespace blink | 872 } // namespace blink |
| 698 | 873 |
| 699 #endif // ThreadState_h | 874 #endif // ThreadState_h |
| OLD | NEW |