OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
110 GCInfoTable::init(); | 110 GCInfoTable::init(); |
111 CallbackStackMemoryPool::instance().initialize(); | 111 CallbackStackMemoryPool::instance().initialize(); |
112 } | 112 } |
113 | 113 |
114 void ProcessHeap::resetHeapCounters() { | 114 void ProcessHeap::resetHeapCounters() { |
115 s_totalAllocatedObjectSize = 0; | 115 s_totalAllocatedObjectSize = 0; |
116 s_totalMarkedObjectSize = 0; | 116 s_totalMarkedObjectSize = 0; |
117 } | 117 } |
118 | 118 |
119 void ProcessHeap::shutdown() { | 119 void ProcessHeap::shutdown() { |
120 ASSERT(!s_shutdownComplete); | 120 DCHECK(!s_shutdownComplete); |
121 | 121 |
122 { | 122 { |
123 // The main thread must be the last thread that gets detached. | 123 // The main thread must be the last thread that gets detached. |
124 MutexLocker locker(ThreadHeap::allHeapsMutex()); | 124 MutexLocker locker(ThreadHeap::allHeapsMutex()); |
125 RELEASE_ASSERT(ThreadHeap::allHeaps().isEmpty()); | 125 CHECK(ThreadHeap::allHeaps().isEmpty()); |
126 } | 126 } |
127 | 127 |
128 CallbackStackMemoryPool::instance().shutdown(); | 128 CallbackStackMemoryPool::instance().shutdown(); |
129 GCInfoTable::shutdown(); | 129 GCInfoTable::shutdown(); |
130 ASSERT(ProcessHeap::totalAllocatedSpace() == 0); | 130 DCHECK_EQ(ProcessHeap::totalAllocatedSpace(), 0UL); |
131 s_shutdownComplete = true; | 131 s_shutdownComplete = true; |
132 } | 132 } |
133 | 133 |
134 CrossThreadPersistentRegion& ProcessHeap::crossThreadPersistentRegion() { | 134 CrossThreadPersistentRegion& ProcessHeap::crossThreadPersistentRegion() { |
135 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegion, | 135 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegion, |
136 new CrossThreadPersistentRegion()); | 136 new CrossThreadPersistentRegion()); |
137 return persistentRegion; | 137 return persistentRegion; |
138 } | 138 } |
139 | 139 |
140 bool ProcessHeap::s_shutdownComplete = false; | 140 bool ProcessHeap::s_shutdownComplete = false; |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
233 DEFINE_STATIC_LOCAL(HashSet<ThreadHeap*>, heaps, ()); | 233 DEFINE_STATIC_LOCAL(HashSet<ThreadHeap*>, heaps, ()); |
234 return heaps; | 234 return heaps; |
235 } | 235 } |
236 | 236 |
237 void ThreadHeap::attach(ThreadState* thread) { | 237 void ThreadHeap::attach(ThreadState* thread) { |
238 MutexLocker locker(m_threadAttachMutex); | 238 MutexLocker locker(m_threadAttachMutex); |
239 m_threads.add(thread); | 239 m_threads.add(thread); |
240 } | 240 } |
241 | 241 |
242 void ThreadHeap::detach(ThreadState* thread) { | 242 void ThreadHeap::detach(ThreadState* thread) { |
243 ASSERT(ThreadState::current() == thread); | 243 DCHECK_EQ(ThreadState::current(), thread); |
244 bool isLastThread = false; | 244 bool isLastThread = false; |
245 { | 245 { |
246 // Grab the threadAttachMutex to ensure only one thread can shutdown at | 246 // Grab the threadAttachMutex to ensure only one thread can shutdown at |
247 // a time and that no other thread can do a global GC. It also allows | 247 // a time and that no other thread can do a global GC. It also allows |
248 // safe iteration of the m_threads set which happens as part of | 248 // safe iteration of the m_threads set which happens as part of |
249 // thread local GC asserts. We enter a safepoint while waiting for the | 249 // thread local GC asserts. We enter a safepoint while waiting for the |
250 // lock to avoid a dead-lock where another thread has already requested | 250 // lock to avoid a dead-lock where another thread has already requested |
251 // GC. | 251 // GC. |
252 SafePointAwareMutexLocker locker(m_threadAttachMutex, | 252 SafePointAwareMutexLocker locker(m_threadAttachMutex, |
253 BlinkGC::NoHeapPointersOnStack); | 253 BlinkGC::NoHeapPointersOnStack); |
254 thread->runTerminationGC(); | 254 thread->runTerminationGC(); |
255 ASSERT(m_threads.contains(thread)); | 255 DCHECK(m_threads.contains(thread)); |
256 m_threads.remove(thread); | 256 m_threads.remove(thread); |
257 isLastThread = m_threads.isEmpty(); | 257 isLastThread = m_threads.isEmpty(); |
258 } | 258 } |
259 // The last thread begin detached should be the owning thread, which would | 259 // The last thread begin detached should be the owning thread, which would |
260 // be the main thread for the mainThreadHeap and a per thread heap enabled | 260 // be the main thread for the mainThreadHeap and a per thread heap enabled |
261 // thread otherwise. | 261 // thread otherwise. |
262 if (isLastThread) | 262 if (isLastThread) |
263 DCHECK(thread->threadHeapMode() == BlinkGC::PerThreadHeapMode || | 263 DCHECK(thread->threadHeapMode() == BlinkGC::PerThreadHeapMode || |
264 thread->isMainThread()); | 264 thread->isMainThread()); |
265 if (thread->isMainThread()) | 265 if (thread->isMainThread()) |
266 DCHECK_EQ(heapStats().allocatedSpace(), 0u); | 266 DCHECK_EQ(heapStats().allocatedSpace(), 0u); |
267 if (isLastThread) | 267 if (isLastThread) |
268 delete this; | 268 delete this; |
269 } | 269 } |
270 | 270 |
271 bool ThreadHeap::park() { | 271 bool ThreadHeap::park() { |
272 return m_safePointBarrier->parkOthers(); | 272 return m_safePointBarrier->parkOthers(); |
273 } | 273 } |
274 | 274 |
275 void ThreadHeap::resume() { | 275 void ThreadHeap::resume() { |
276 m_safePointBarrier->resumeOthers(); | 276 m_safePointBarrier->resumeOthers(); |
277 } | 277 } |
278 | 278 |
279 #if ENABLE(ASSERT) | 279 #if DCHECK_IS_ON() |
280 BasePage* ThreadHeap::findPageFromAddress(Address address) { | 280 BasePage* ThreadHeap::findPageFromAddress(Address address) { |
281 MutexLocker locker(m_threadAttachMutex); | 281 MutexLocker locker(m_threadAttachMutex); |
282 for (ThreadState* state : m_threads) { | 282 for (ThreadState* state : m_threads) { |
283 if (BasePage* page = state->findPageFromAddress(address)) | 283 if (BasePage* page = state->findPageFromAddress(address)) |
284 return page; | 284 return page; |
285 } | 285 } |
286 return nullptr; | 286 return nullptr; |
287 } | 287 } |
288 | 288 |
289 bool ThreadHeap::isAtSafePoint() { | 289 bool ThreadHeap::isAtSafePoint() { |
290 MutexLocker locker(m_threadAttachMutex); | 290 MutexLocker locker(m_threadAttachMutex); |
291 for (ThreadState* state : m_threads) { | 291 for (ThreadState* state : m_threads) { |
292 if (!state->isAtSafePoint()) | 292 if (!state->isAtSafePoint()) |
293 return false; | 293 return false; |
294 } | 294 } |
295 return true; | 295 return true; |
296 } | 296 } |
297 #endif | 297 #endif |
298 | 298 |
299 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address) { | 299 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address) { |
300 ASSERT(ThreadState::current()->isInGC()); | 300 DCHECK(ThreadState::current()->isInGC()); |
301 | 301 |
302 #if !ENABLE(ASSERT) | 302 #if !DCHECK_IS_ON() |
303 if (m_heapDoesNotContainCache->lookup(address)) | 303 if (m_heapDoesNotContainCache->lookup(address)) |
304 return nullptr; | 304 return nullptr; |
305 #endif | 305 #endif |
306 | 306 |
307 if (BasePage* page = lookupPageForAddress(address)) { | 307 if (BasePage* page = lookupPageForAddress(address)) { |
308 ASSERT(page->contains(address)); | 308 DCHECK(page->contains(address)); |
309 ASSERT(!page->orphaned()); | 309 DCHECK(!page->orphaned()); |
310 ASSERT(!m_heapDoesNotContainCache->lookup(address)); | 310 DCHECK(!m_heapDoesNotContainCache->lookup(address)); |
311 DCHECK(&visitor->heap() == &page->arena()->getThreadState()->heap()); | 311 DCHECK_EQ(&visitor->heap(), &page->arena()->getThreadState()->heap()); |
312 page->checkAndMarkPointer(visitor, address); | 312 page->checkAndMarkPointer(visitor, address); |
313 return address; | 313 return address; |
314 } | 314 } |
315 | 315 |
316 #if !ENABLE(ASSERT) | 316 #if !DCHECK_IS_ON() |
317 m_heapDoesNotContainCache->addEntry(address); | 317 m_heapDoesNotContainCache->addEntry(address); |
318 #else | 318 #else |
319 if (!m_heapDoesNotContainCache->lookup(address)) | 319 if (!m_heapDoesNotContainCache->lookup(address)) |
320 m_heapDoesNotContainCache->addEntry(address); | 320 m_heapDoesNotContainCache->addEntry(address); |
321 #endif | 321 #endif |
322 return nullptr; | 322 return nullptr; |
323 } | 323 } |
324 | 324 |
325 void ThreadHeap::pushTraceCallback(void* object, TraceCallback callback) { | 325 void ThreadHeap::pushTraceCallback(void* object, TraceCallback callback) { |
326 ASSERT(ThreadState::current()->isInGC()); | 326 DCHECK(ThreadState::current()->isInGC()); |
327 | 327 |
328 // Trace should never reach an orphaned page. | 328 // Trace should never reach an orphaned page. |
329 ASSERT(!getOrphanedPagePool()->contains(object)); | 329 DCHECK(!getOrphanedPagePool()->contains(object)); |
330 CallbackStack::Item* slot = m_markingStack->allocateEntry(); | 330 CallbackStack::Item* slot = m_markingStack->allocateEntry(); |
331 *slot = CallbackStack::Item(object, callback); | 331 *slot = CallbackStack::Item(object, callback); |
332 } | 332 } |
333 | 333 |
334 bool ThreadHeap::popAndInvokeTraceCallback(Visitor* visitor) { | 334 bool ThreadHeap::popAndInvokeTraceCallback(Visitor* visitor) { |
335 CallbackStack::Item* item = m_markingStack->pop(); | 335 CallbackStack::Item* item = m_markingStack->pop(); |
336 if (!item) | 336 if (!item) |
337 return false; | 337 return false; |
338 item->call(visitor); | 338 item->call(visitor); |
339 return true; | 339 return true; |
340 } | 340 } |
341 | 341 |
342 void ThreadHeap::pushPostMarkingCallback(void* object, TraceCallback callback) { | 342 void ThreadHeap::pushPostMarkingCallback(void* object, TraceCallback callback) { |
343 ASSERT(ThreadState::current()->isInGC()); | 343 DCHECK(ThreadState::current()->isInGC()); |
344 | 344 |
345 // Trace should never reach an orphaned page. | 345 // Trace should never reach an orphaned page. |
346 ASSERT(!getOrphanedPagePool()->contains(object)); | 346 DCHECK(!getOrphanedPagePool()->contains(object)); |
347 CallbackStack::Item* slot = m_postMarkingCallbackStack->allocateEntry(); | 347 CallbackStack::Item* slot = m_postMarkingCallbackStack->allocateEntry(); |
348 *slot = CallbackStack::Item(object, callback); | 348 *slot = CallbackStack::Item(object, callback); |
349 } | 349 } |
350 | 350 |
351 bool ThreadHeap::popAndInvokePostMarkingCallback(Visitor* visitor) { | 351 bool ThreadHeap::popAndInvokePostMarkingCallback(Visitor* visitor) { |
352 if (CallbackStack::Item* item = m_postMarkingCallbackStack->pop()) { | 352 if (CallbackStack::Item* item = m_postMarkingCallbackStack->pop()) { |
353 item->call(visitor); | 353 item->call(visitor); |
354 return true; | 354 return true; |
355 } | 355 } |
356 return false; | 356 return false; |
357 } | 357 } |
358 | 358 |
359 void ThreadHeap::pushGlobalWeakCallback(void** cell, WeakCallback callback) { | 359 void ThreadHeap::pushGlobalWeakCallback(void** cell, WeakCallback callback) { |
360 ASSERT(ThreadState::current()->isInGC()); | 360 DCHECK(ThreadState::current()->isInGC()); |
361 | 361 |
362 // Trace should never reach an orphaned page. | 362 // Trace should never reach an orphaned page. |
363 ASSERT(!getOrphanedPagePool()->contains(cell)); | 363 DCHECK(!getOrphanedPagePool()->contains(cell)); |
364 CallbackStack::Item* slot = m_globalWeakCallbackStack->allocateEntry(); | 364 CallbackStack::Item* slot = m_globalWeakCallbackStack->allocateEntry(); |
365 *slot = CallbackStack::Item(cell, callback); | 365 *slot = CallbackStack::Item(cell, callback); |
366 } | 366 } |
367 | 367 |
368 void ThreadHeap::pushThreadLocalWeakCallback(void* closure, | 368 void ThreadHeap::pushThreadLocalWeakCallback(void* closure, |
369 void* object, | 369 void* object, |
370 WeakCallback callback) { | 370 WeakCallback callback) { |
371 ASSERT(ThreadState::current()->isInGC()); | 371 DCHECK(ThreadState::current()->isInGC()); |
372 | 372 |
373 // Trace should never reach an orphaned page. | 373 // Trace should never reach an orphaned page. |
374 ASSERT(!getOrphanedPagePool()->contains(object)); | 374 DCHECK(!getOrphanedPagePool()->contains(object)); |
375 ThreadState* state = pageFromObject(object)->arena()->getThreadState(); | 375 ThreadState* state = pageFromObject(object)->arena()->getThreadState(); |
376 state->pushThreadLocalWeakCallback(closure, callback); | 376 state->pushThreadLocalWeakCallback(closure, callback); |
377 } | 377 } |
378 | 378 |
379 bool ThreadHeap::popAndInvokeGlobalWeakCallback(Visitor* visitor) { | 379 bool ThreadHeap::popAndInvokeGlobalWeakCallback(Visitor* visitor) { |
380 if (CallbackStack::Item* item = m_globalWeakCallbackStack->pop()) { | 380 if (CallbackStack::Item* item = m_globalWeakCallbackStack->pop()) { |
381 item->call(visitor); | 381 item->call(visitor); |
382 return true; | 382 return true; |
383 } | 383 } |
384 return false; | 384 return false; |
385 } | 385 } |
386 | 386 |
387 void ThreadHeap::registerWeakTable(void* table, | 387 void ThreadHeap::registerWeakTable(void* table, |
388 EphemeronCallback iterationCallback, | 388 EphemeronCallback iterationCallback, |
389 EphemeronCallback iterationDoneCallback) { | 389 EphemeronCallback iterationDoneCallback) { |
390 ASSERT(ThreadState::current()->isInGC()); | 390 DCHECK(ThreadState::current()->isInGC()); |
391 | 391 |
392 // Trace should never reach an orphaned page. | 392 // Trace should never reach an orphaned page. |
393 ASSERT(!getOrphanedPagePool()->contains(table)); | 393 DCHECK(!getOrphanedPagePool()->contains(table)); |
394 CallbackStack::Item* slot = m_ephemeronStack->allocateEntry(); | 394 CallbackStack::Item* slot = m_ephemeronStack->allocateEntry(); |
395 *slot = CallbackStack::Item(table, iterationCallback); | 395 *slot = CallbackStack::Item(table, iterationCallback); |
396 | 396 |
397 // Register a post-marking callback to tell the tables that | 397 // Register a post-marking callback to tell the tables that |
398 // ephemeron iteration is complete. | 398 // ephemeron iteration is complete. |
399 pushPostMarkingCallback(table, iterationDoneCallback); | 399 pushPostMarkingCallback(table, iterationDoneCallback); |
400 } | 400 } |
401 | 401 |
402 #if ENABLE(ASSERT) | 402 #if DCHECK_IS_ON() |
403 bool ThreadHeap::weakTableRegistered(const void* table) { | 403 bool ThreadHeap::weakTableRegistered(const void* table) { |
404 ASSERT(m_ephemeronStack); | 404 DCHECK(m_ephemeronStack); |
405 return m_ephemeronStack->hasCallbackForObject(table); | 405 return m_ephemeronStack->hasCallbackForObject(table); |
406 } | 406 } |
407 #endif | 407 #endif |
408 | 408 |
409 void ThreadHeap::commitCallbackStacks() { | 409 void ThreadHeap::commitCallbackStacks() { |
410 m_markingStack->commit(); | 410 m_markingStack->commit(); |
411 m_postMarkingCallbackStack->commit(); | 411 m_postMarkingCallbackStack->commit(); |
412 m_globalWeakCallbackStack->commit(); | 412 m_globalWeakCallbackStack->commit(); |
413 m_ephemeronStack->commit(); | 413 m_ephemeronStack->commit(); |
414 } | 414 } |
(...skipping 18 matching lines...) Expand all Loading... |
433 } | 433 } |
434 | 434 |
435 void ThreadHeap::decommitCallbackStacks() { | 435 void ThreadHeap::decommitCallbackStacks() { |
436 m_markingStack->decommit(); | 436 m_markingStack->decommit(); |
437 m_postMarkingCallbackStack->decommit(); | 437 m_postMarkingCallbackStack->decommit(); |
438 m_globalWeakCallbackStack->decommit(); | 438 m_globalWeakCallbackStack->decommit(); |
439 m_ephemeronStack->decommit(); | 439 m_ephemeronStack->decommit(); |
440 } | 440 } |
441 | 441 |
442 void ThreadHeap::preGC() { | 442 void ThreadHeap::preGC() { |
443 ASSERT(!ThreadState::current()->isInGC()); | 443 DCHECK(!ThreadState::current()->isInGC()); |
444 for (ThreadState* state : m_threads) | 444 for (ThreadState* state : m_threads) |
445 state->preGC(); | 445 state->preGC(); |
446 } | 446 } |
447 | 447 |
448 void ThreadHeap::postGC(BlinkGC::GCType gcType) { | 448 void ThreadHeap::postGC(BlinkGC::GCType gcType) { |
449 ASSERT(ThreadState::current()->isInGC()); | 449 DCHECK(ThreadState::current()->isInGC()); |
450 for (ThreadState* state : m_threads) | 450 for (ThreadState* state : m_threads) |
451 state->postGC(gcType); | 451 state->postGC(gcType); |
452 } | 452 } |
453 | 453 |
454 void ThreadHeap::processMarkingStack(Visitor* visitor) { | 454 void ThreadHeap::processMarkingStack(Visitor* visitor) { |
455 // Ephemeron fixed point loop. | 455 // Ephemeron fixed point loop. |
456 do { | 456 do { |
457 { | 457 { |
458 // Iteratively mark all objects that are reachable from the objects | 458 // Iteratively mark all objects that are reachable from the objects |
459 // currently pushed onto the marking stack. | 459 // currently pushed onto the marking stack. |
(...skipping 19 matching lines...) Expand all Loading... |
479 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup | 479 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup |
480 // (specifically to clear the queued bits for weak hash tables), and | 480 // (specifically to clear the queued bits for weak hash tables), and |
481 // 2. the markNoTracing callbacks on collection backings to mark them | 481 // 2. the markNoTracing callbacks on collection backings to mark them |
482 // if they are only reachable from their front objects. | 482 // if they are only reachable from their front objects. |
483 while (popAndInvokePostMarkingCallback(visitor)) { | 483 while (popAndInvokePostMarkingCallback(visitor)) { |
484 } | 484 } |
485 | 485 |
486 // Post-marking callbacks should not trace any objects and | 486 // Post-marking callbacks should not trace any objects and |
487 // therefore the marking stack should be empty after the | 487 // therefore the marking stack should be empty after the |
488 // post-marking callbacks. | 488 // post-marking callbacks. |
489 ASSERT(m_markingStack->isEmpty()); | 489 DCHECK(m_markingStack->isEmpty()); |
490 } | 490 } |
491 | 491 |
492 void ThreadHeap::globalWeakProcessing(Visitor* visitor) { | 492 void ThreadHeap::globalWeakProcessing(Visitor* visitor) { |
493 TRACE_EVENT0("blink_gc", "ThreadHeap::globalWeakProcessing"); | 493 TRACE_EVENT0("blink_gc", "ThreadHeap::globalWeakProcessing"); |
494 double startTime = WTF::currentTimeMS(); | 494 double startTime = WTF::currentTimeMS(); |
495 | 495 |
496 // Call weak callbacks on objects that may now be pointing to dead objects. | 496 // Call weak callbacks on objects that may now be pointing to dead objects. |
497 while (popAndInvokeGlobalWeakCallback(visitor)) { | 497 while (popAndInvokeGlobalWeakCallback(visitor)) { |
498 } | 498 } |
499 | 499 |
500 // It is not permitted to trace pointers of live objects in the weak | 500 // It is not permitted to trace pointers of live objects in the weak |
501 // callback phase, so the marking stack should still be empty here. | 501 // callback phase, so the marking stack should still be empty here. |
502 ASSERT(m_markingStack->isEmpty()); | 502 DCHECK(m_markingStack->isEmpty()); |
503 | 503 |
504 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; | 504 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; |
505 DEFINE_THREAD_SAFE_STATIC_LOCAL( | 505 DEFINE_THREAD_SAFE_STATIC_LOCAL( |
506 CustomCountHistogram, globalWeakTimeHistogram, | 506 CustomCountHistogram, globalWeakTimeHistogram, |
507 new CustomCountHistogram("BlinkGC.TimeForGlobalWeakProcessing", 1, | 507 new CustomCountHistogram("BlinkGC.TimeForGlobalWeakProcessing", 1, |
508 10 * 1000, 50)); | 508 10 * 1000, 50)); |
509 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing); | 509 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing); |
510 } | 510 } |
511 | 511 |
512 void ThreadHeap::reportMemoryUsageHistogram() { | 512 void ThreadHeap::reportMemoryUsageHistogram() { |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
607 state->makeConsistentForGC(); | 607 state->makeConsistentForGC(); |
608 objectPayloadSize += state->objectPayloadSizeForTesting(); | 608 objectPayloadSize += state->objectPayloadSizeForTesting(); |
609 state->setGCState(ThreadState::EagerSweepScheduled); | 609 state->setGCState(ThreadState::EagerSweepScheduled); |
610 state->setGCState(ThreadState::Sweeping); | 610 state->setGCState(ThreadState::Sweeping); |
611 state->setGCState(ThreadState::NoGCScheduled); | 611 state->setGCState(ThreadState::NoGCScheduled); |
612 } | 612 } |
613 return objectPayloadSize; | 613 return objectPayloadSize; |
614 } | 614 } |
615 | 615 |
616 void ThreadHeap::visitPersistentRoots(Visitor* visitor) { | 616 void ThreadHeap::visitPersistentRoots(Visitor* visitor) { |
617 ASSERT(ThreadState::current()->isInGC()); | 617 DCHECK(ThreadState::current()->isInGC()); |
618 TRACE_EVENT0("blink_gc", "ThreadHeap::visitPersistentRoots"); | 618 TRACE_EVENT0("blink_gc", "ThreadHeap::visitPersistentRoots"); |
619 ProcessHeap::crossThreadPersistentRegion().tracePersistentNodes(visitor); | 619 ProcessHeap::crossThreadPersistentRegion().tracePersistentNodes(visitor); |
620 | 620 |
621 for (ThreadState* state : m_threads) | 621 for (ThreadState* state : m_threads) |
622 state->visitPersistents(visitor); | 622 state->visitPersistents(visitor); |
623 } | 623 } |
624 | 624 |
625 void ThreadHeap::visitStackRoots(Visitor* visitor) { | 625 void ThreadHeap::visitStackRoots(Visitor* visitor) { |
626 ASSERT(ThreadState::current()->isInGC()); | 626 DCHECK(ThreadState::current()->isInGC()); |
627 TRACE_EVENT0("blink_gc", "ThreadHeap::visitStackRoots"); | 627 TRACE_EVENT0("blink_gc", "ThreadHeap::visitStackRoots"); |
628 for (ThreadState* state : m_threads) | 628 for (ThreadState* state : m_threads) |
629 state->visitStack(visitor); | 629 state->visitStack(visitor); |
630 } | 630 } |
631 | 631 |
632 void ThreadHeap::checkAndPark(ThreadState* threadState, | 632 void ThreadHeap::checkAndPark(ThreadState* threadState, |
633 SafePointAwareMutexLocker* locker) { | 633 SafePointAwareMutexLocker* locker) { |
634 m_safePointBarrier->checkAndPark(threadState, locker); | 634 m_safePointBarrier->checkAndPark(threadState, locker); |
635 } | 635 } |
636 | 636 |
637 void ThreadHeap::enterSafePoint(ThreadState* threadState) { | 637 void ThreadHeap::enterSafePoint(ThreadState* threadState) { |
638 m_safePointBarrier->enterSafePoint(threadState); | 638 m_safePointBarrier->enterSafePoint(threadState); |
639 } | 639 } |
640 | 640 |
641 void ThreadHeap::leaveSafePoint(ThreadState* threadState, | 641 void ThreadHeap::leaveSafePoint(ThreadState* threadState, |
642 SafePointAwareMutexLocker* locker) { | 642 SafePointAwareMutexLocker* locker) { |
643 m_safePointBarrier->leaveSafePoint(threadState, locker); | 643 m_safePointBarrier->leaveSafePoint(threadState, locker); |
644 } | 644 } |
645 | 645 |
646 BasePage* ThreadHeap::lookupPageForAddress(Address address) { | 646 BasePage* ThreadHeap::lookupPageForAddress(Address address) { |
647 ASSERT(ThreadState::current()->isInGC()); | 647 DCHECK(ThreadState::current()->isInGC()); |
648 if (PageMemoryRegion* region = m_regionTree->lookup(address)) { | 648 if (PageMemoryRegion* region = m_regionTree->lookup(address)) { |
649 BasePage* page = region->pageFromAddress(address); | 649 BasePage* page = region->pageFromAddress(address); |
650 return page && !page->orphaned() ? page : nullptr; | 650 return page && !page->orphaned() ? page : nullptr; |
651 } | 651 } |
652 return nullptr; | 652 return nullptr; |
653 } | 653 } |
654 | 654 |
655 void ThreadHeap::resetHeapCounters() { | 655 void ThreadHeap::resetHeapCounters() { |
656 ASSERT(ThreadState::current()->isInGC()); | 656 DCHECK(ThreadState::current()->isInGC()); |
657 | 657 |
658 ThreadHeap::reportMemoryUsageForTracing(); | 658 ThreadHeap::reportMemoryUsageForTracing(); |
659 | 659 |
660 ProcessHeap::decreaseTotalAllocatedObjectSize(m_stats.allocatedObjectSize()); | 660 ProcessHeap::decreaseTotalAllocatedObjectSize(m_stats.allocatedObjectSize()); |
661 ProcessHeap::decreaseTotalMarkedObjectSize(m_stats.markedObjectSize()); | 661 ProcessHeap::decreaseTotalMarkedObjectSize(m_stats.markedObjectSize()); |
662 | 662 |
663 m_stats.reset(); | 663 m_stats.reset(); |
664 for (ThreadState* state : m_threads) | 664 for (ThreadState* state : m_threads) |
665 state->resetHeapCounters(); | 665 state->resetHeapCounters(); |
666 } | 666 } |
667 | 667 |
668 ThreadHeap* ThreadHeap::s_mainThreadHeap = nullptr; | 668 ThreadHeap* ThreadHeap::s_mainThreadHeap = nullptr; |
669 | 669 |
670 } // namespace blink | 670 } // namespace blink |
OLD | NEW |