OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
215 | 215 |
216 #if defined(LEAK_SANITIZER) | 216 #if defined(LEAK_SANITIZER) |
217 // If LSan is about to perform leak detection, release all the registered | 217 // If LSan is about to perform leak detection, release all the registered |
218 // static Persistent<> root references to global caches that Blink keeps, | 218 // static Persistent<> root references to global caches that Blink keeps, |
219 // followed by GCs to clear out all they referred to. | 219 // followed by GCs to clear out all they referred to. |
220 // | 220 // |
221 // This is not needed for caches over non-Oilpan objects, as they're | 221 // This is not needed for caches over non-Oilpan objects, as they're |
222 // not scanned by LSan due to being held in non-global storage | 222 // not scanned by LSan due to being held in non-global storage |
223 // ("static" references inside functions/methods.) | 223 // ("static" references inside functions/methods.) |
224 releaseStaticPersistentNodes(); | 224 releaseStaticPersistentNodes(); |
225 ThreadHeap::collectAllGarbage(); | 225 Heap::collectAllGarbage(); |
226 #endif | 226 #endif |
227 | 227 |
228 // Finish sweeping before shutting down V8. Otherwise, some destructor | 228 // Finish sweeping before shutting down V8. Otherwise, some destructor |
229 // may access V8 and cause crashes. | 229 // may access V8 and cause crashes. |
230 completeSweep(); | 230 completeSweep(); |
231 | 231 |
232 // It is unsafe to trigger GCs after this point because some | 232 // It is unsafe to trigger GCs after this point because some |
233 // destructor may access already-detached V8 and cause crashes. | 233 // destructor may access already-detached V8 and cause crashes. |
234 // Also it is useless. So we forbid GCs. | 234 // Also it is useless. So we forbid GCs. |
235 enterGCForbiddenScope(); | 235 enterGCForbiddenScope(); |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
298 prepareForThreadStateTermination(); | 298 prepareForThreadStateTermination(); |
299 | 299 |
300 ProcessHeap::crossThreadPersistentRegion().prepareForThreadStateTerminat
ion(this); | 300 ProcessHeap::crossThreadPersistentRegion().prepareForThreadStateTerminat
ion(this); |
301 | 301 |
302 // Do thread local GC's as long as the count of thread local Persistents | 302 // Do thread local GC's as long as the count of thread local Persistents |
303 // changes and is above zero. | 303 // changes and is above zero. |
304 int oldCount = -1; | 304 int oldCount = -1; |
305 int currentCount = getPersistentRegion()->numberOfPersistents(); | 305 int currentCount = getPersistentRegion()->numberOfPersistents(); |
306 ASSERT(currentCount >= 0); | 306 ASSERT(currentCount >= 0); |
307 while (currentCount != oldCount) { | 307 while (currentCount != oldCount) { |
308 ThreadHeap::collectGarbageForTerminatingThread(this); | 308 Heap::collectGarbageForTerminatingThread(this); |
309 oldCount = currentCount; | 309 oldCount = currentCount; |
310 currentCount = getPersistentRegion()->numberOfPersistents(); | 310 currentCount = getPersistentRegion()->numberOfPersistents(); |
311 } | 311 } |
312 // We should not have any persistents left when getting to this point, | 312 // We should not have any persistents left when getting to this point, |
313 // if we have it is probably a bug so adding a debug ASSERT to catch thi
s. | 313 // if we have it is probably a bug so adding a debug ASSERT to catch thi
s. |
314 ASSERT(!currentCount); | 314 ASSERT(!currentCount); |
315 // All of pre-finalizers should be consumed. | 315 // All of pre-finalizers should be consumed. |
316 ASSERT(m_orderedPreFinalizers.isEmpty()); | 316 ASSERT(m_orderedPreFinalizers.isEmpty()); |
317 RELEASE_ASSERT(gcState() == NoGCScheduled); | 317 RELEASE_ASSERT(gcState() == NoGCScheduled); |
318 | 318 |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
364 m_asanFakeStack, maybeFakeFrame, | 364 m_asanFakeStack, maybeFakeFrame, |
365 reinterpret_cast<void**>(&fakeFrameStart), | 365 reinterpret_cast<void**>(&fakeFrameStart), |
366 reinterpret_cast<void**>(&fakeFrameEnd))); | 366 reinterpret_cast<void**>(&fakeFrameEnd))); |
367 if (realFrameForFakeFrame) { | 367 if (realFrameForFakeFrame) { |
368 // This is a fake frame from the asan fake stack. | 368 // This is a fake frame from the asan fake stack. |
369 if (realFrameForFakeFrame > end && start > realFrameForFakeFrame) { | 369 if (realFrameForFakeFrame > end && start > realFrameForFakeFrame) { |
370 // The real stack address for the asan fake frame is | 370 // The real stack address for the asan fake frame is |
371 // within the stack range that we need to scan so we need | 371 // within the stack range that we need to scan so we need |
372 // to visit the values in the fake frame. | 372 // to visit the values in the fake frame. |
373 for (Address* p = fakeFrameStart; p < fakeFrameEnd; ++p) | 373 for (Address* p = fakeFrameStart; p < fakeFrameEnd; ++p) |
374 ThreadHeap::checkAndMarkPointer(visitor, *p); | 374 Heap::checkAndMarkPointer(visitor, *p); |
375 } | 375 } |
376 } | 376 } |
377 #endif | 377 #endif |
378 } | 378 } |
379 | 379 |
380 NO_SANITIZE_ADDRESS | 380 NO_SANITIZE_ADDRESS |
381 void ThreadState::visitStack(Visitor* visitor) | 381 void ThreadState::visitStack(Visitor* visitor) |
382 { | 382 { |
383 if (m_stackState == BlinkGC::NoHeapPointersOnStack) | 383 if (m_stackState == BlinkGC::NoHeapPointersOnStack) |
384 return; | 384 return; |
(...skipping 15 matching lines...) Expand all Loading... |
400 for (; current < start; ++current) { | 400 for (; current < start; ++current) { |
401 Address ptr = *current; | 401 Address ptr = *current; |
402 #if defined(MEMORY_SANITIZER) | 402 #if defined(MEMORY_SANITIZER) |
403 // |ptr| may be uninitialized by design. Mark it as initialized to keep | 403 // |ptr| may be uninitialized by design. Mark it as initialized to keep |
404 // MSan from complaining. | 404 // MSan from complaining. |
405 // Note: it may be tempting to get rid of |ptr| and simply use |current| | 405 // Note: it may be tempting to get rid of |ptr| and simply use |current| |
406 // here, but that would be incorrect. We intentionally use a local | 406 // here, but that would be incorrect. We intentionally use a local |
407 // variable because we don't want to unpoison the original stack. | 407 // variable because we don't want to unpoison the original stack. |
408 __msan_unpoison(&ptr, sizeof(ptr)); | 408 __msan_unpoison(&ptr, sizeof(ptr)); |
409 #endif | 409 #endif |
410 ThreadHeap::checkAndMarkPointer(visitor, ptr); | 410 Heap::checkAndMarkPointer(visitor, ptr); |
411 visitAsanFakeStackForPointer(visitor, ptr); | 411 visitAsanFakeStackForPointer(visitor, ptr); |
412 } | 412 } |
413 | 413 |
414 for (Address ptr : m_safePointStackCopy) { | 414 for (Address ptr : m_safePointStackCopy) { |
415 #if defined(MEMORY_SANITIZER) | 415 #if defined(MEMORY_SANITIZER) |
416 // See the comment above. | 416 // See the comment above. |
417 __msan_unpoison(&ptr, sizeof(ptr)); | 417 __msan_unpoison(&ptr, sizeof(ptr)); |
418 #endif | 418 #endif |
419 ThreadHeap::checkAndMarkPointer(visitor, ptr); | 419 Heap::checkAndMarkPointer(visitor, ptr); |
420 visitAsanFakeStackForPointer(visitor, ptr); | 420 visitAsanFakeStackForPointer(visitor, ptr); |
421 } | 421 } |
422 } | 422 } |
423 | 423 |
424 void ThreadState::visitPersistents(Visitor* visitor) | 424 void ThreadState::visitPersistents(Visitor* visitor) |
425 { | 425 { |
426 m_persistentRegion->tracePersistentNodes(visitor); | 426 m_persistentRegion->tracePersistentNodes(visitor); |
427 if (m_traceDOMWrappers) { | 427 if (m_traceDOMWrappers) { |
428 TRACE_EVENT0("blink_gc", "V8GCController::traceDOMWrappers"); | 428 TRACE_EVENT0("blink_gc", "V8GCController::traceDOMWrappers"); |
429 m_traceDOMWrappers(m_isolate, visitor); | 429 m_traceDOMWrappers(m_isolate, visitor); |
(...skipping 14 matching lines...) Expand all Loading... |
444 *slot = CallbackStack::Item(object, callback); | 444 *slot = CallbackStack::Item(object, callback); |
445 } | 445 } |
446 | 446 |
447 bool ThreadState::popAndInvokeThreadLocalWeakCallback(Visitor* visitor) | 447 bool ThreadState::popAndInvokeThreadLocalWeakCallback(Visitor* visitor) |
448 { | 448 { |
449 ASSERT(checkThread()); | 449 ASSERT(checkThread()); |
450 // For weak processing we should never reach orphaned pages since orphaned | 450 // For weak processing we should never reach orphaned pages since orphaned |
451 // pages are not traced and thus objects on those pages are never be | 451 // pages are not traced and thus objects on those pages are never be |
452 // registered as objects on orphaned pages. We cannot assert this here since | 452 // registered as objects on orphaned pages. We cannot assert this here since |
453 // we might have an off-heap collection. We assert it in | 453 // we might have an off-heap collection. We assert it in |
454 // ThreadHeap::pushThreadLocalWeakCallback. | 454 // Heap::pushThreadLocalWeakCallback. |
455 if (CallbackStack::Item* item = m_threadLocalWeakCallbackStack->pop()) { | 455 if (CallbackStack::Item* item = m_threadLocalWeakCallbackStack->pop()) { |
456 // Note that the thread-local weak processing can be called for | 456 // Note that the thread-local weak processing can be called for |
457 // an already dead object (for which isHeapObjectAlive(object) can | 457 // an already dead object (for which isHeapObjectAlive(object) can |
458 // return false). This can happen in the following scenario: | 458 // return false). This can happen in the following scenario: |
459 // | 459 // |
460 // 1) Marking runs. A weak callback for an object X is registered | 460 // 1) Marking runs. A weak callback for an object X is registered |
461 // to the thread that created the object X (say, thread P). | 461 // to the thread that created the object X (say, thread P). |
462 // 2) Marking finishes. All other threads are resumed. | 462 // 2) Marking finishes. All other threads are resumed. |
463 // 3) The object X becomes unreachable. | 463 // 3) The object X becomes unreachable. |
464 // 4) A next GC hits before the thread P wakes up. | 464 // 4) A next GC hits before the thread P wakes up. |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
510 | 510 |
511 if (isMainThread()) { | 511 if (isMainThread()) { |
512 double timeForThreadLocalWeakProcessing = WTF::currentTimeMS() - startTi
me; | 512 double timeForThreadLocalWeakProcessing = WTF::currentTimeMS() - startTi
me; |
513 DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForWeakHistogram, ("BlinkG
C.TimeForThreadLocalWeakProcessing", 1, 10 * 1000, 50)); | 513 DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForWeakHistogram, ("BlinkG
C.TimeForThreadLocalWeakProcessing", 1, 10 * 1000, 50)); |
514 timeForWeakHistogram.count(timeForThreadLocalWeakProcessing); | 514 timeForWeakHistogram.count(timeForThreadLocalWeakProcessing); |
515 } | 515 } |
516 } | 516 } |
517 | 517 |
518 size_t ThreadState::totalMemorySize() | 518 size_t ThreadState::totalMemorySize() |
519 { | 519 { |
520 return ThreadHeap::heapStats().allocatedObjectSize() + ThreadHeap::heapStats
().markedObjectSize() + WTF::Partitions::totalSizeOfCommittedPages(); | 520 return Heap::heapStats().allocatedObjectSize() + Heap::heapStats().markedObj
ectSize() + WTF::Partitions::totalSizeOfCommittedPages(); |
521 } | 521 } |
522 | 522 |
523 size_t ThreadState::estimatedLiveSize(size_t estimationBaseSize, size_t sizeAtLa
stGC) | 523 size_t ThreadState::estimatedLiveSize(size_t estimationBaseSize, size_t sizeAtLa
stGC) |
524 { | 524 { |
525 if (ThreadHeap::heapStats().wrapperCountAtLastGC() == 0) { | 525 if (Heap::heapStats().wrapperCountAtLastGC() == 0) { |
526 // We'll reach here only before hitting the first GC. | 526 // We'll reach here only before hitting the first GC. |
527 return 0; | 527 return 0; |
528 } | 528 } |
529 | 529 |
530 // (estimated size) = (estimation base size) - (heap size at the last GC) /
(# of persistent handles at the last GC) * (# of persistent handles collected si
nce the last GC); | 530 // (estimated size) = (estimation base size) - (heap size at the last GC) /
(# of persistent handles at the last GC) * (# of persistent handles collected si
nce the last GC); |
531 size_t sizeRetainedByCollectedPersistents = static_cast<size_t>(1.0 * sizeAt
LastGC / ThreadHeap::heapStats().wrapperCountAtLastGC() * ThreadHeap::heapStats(
).collectedWrapperCount()); | 531 size_t sizeRetainedByCollectedPersistents = static_cast<size_t>(1.0 * sizeAt
LastGC / Heap::heapStats().wrapperCountAtLastGC() * Heap::heapStats().collectedW
rapperCount()); |
532 if (estimationBaseSize < sizeRetainedByCollectedPersistents) | 532 if (estimationBaseSize < sizeRetainedByCollectedPersistents) |
533 return 0; | 533 return 0; |
534 return estimationBaseSize - sizeRetainedByCollectedPersistents; | 534 return estimationBaseSize - sizeRetainedByCollectedPersistents; |
535 } | 535 } |
536 | 536 |
537 double ThreadState::heapGrowingRate() | 537 double ThreadState::heapGrowingRate() |
538 { | 538 { |
539 size_t currentSize = ThreadHeap::heapStats().allocatedObjectSize() + ThreadH
eap::heapStats().markedObjectSize(); | 539 size_t currentSize = Heap::heapStats().allocatedObjectSize() + Heap::heapSta
ts().markedObjectSize(); |
540 size_t estimatedSize = estimatedLiveSize(ThreadHeap::heapStats().markedObjec
tSizeAtLastCompleteSweep(), ThreadHeap::heapStats().markedObjectSizeAtLastComple
teSweep()); | 540 size_t estimatedSize = estimatedLiveSize(Heap::heapStats().markedObjectSizeA
tLastCompleteSweep(), Heap::heapStats().markedObjectSizeAtLastCompleteSweep()); |
541 | 541 |
542 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. | 542 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. |
543 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize :
100; | 543 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize :
100; |
544 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapEsti
matedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_MAX))); | 544 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapEsti
matedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_MAX))); |
545 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapGrow
ingRate", static_cast<int>(100 * growingRate)); | 545 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapGrow
ingRate", static_cast<int>(100 * growingRate)); |
546 return growingRate; | 546 return growingRate; |
547 } | 547 } |
548 | 548 |
549 double ThreadState::partitionAllocGrowingRate() | 549 double ThreadState::partitionAllocGrowingRate() |
550 { | 550 { |
551 size_t currentSize = WTF::Partitions::totalSizeOfCommittedPages(); | 551 size_t currentSize = WTF::Partitions::totalSizeOfCommittedPages(); |
552 size_t estimatedSize = estimatedLiveSize(currentSize, ThreadHeap::heapStats(
).partitionAllocSizeAtLastGC()); | 552 size_t estimatedSize = estimatedLiveSize(currentSize, Heap::heapStats().part
itionAllocSizeAtLastGC()); |
553 | 553 |
554 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. | 554 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. |
555 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize :
100; | 555 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize :
100; |
556 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio
nAllocEstimatedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_M
AX))); | 556 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio
nAllocEstimatedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_M
AX))); |
557 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio
nAllocGrowingRate", static_cast<int>(100 * growingRate)); | 557 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio
nAllocGrowingRate", static_cast<int>(100 * growingRate)); |
558 return growingRate; | 558 return growingRate; |
559 } | 559 } |
560 | 560 |
561 // TODO(haraken): We should improve the GC heuristics. The heuristics affect | 561 // TODO(haraken): We should improve the GC heuristics. The heuristics affect |
562 // performance significantly. | 562 // performance significantly. |
563 bool ThreadState::judgeGCThreshold(size_t totalMemorySizeThreshold, double heapG
rowingRateThreshold) | 563 bool ThreadState::judgeGCThreshold(size_t totalMemorySizeThreshold, double heapG
rowingRateThreshold) |
564 { | 564 { |
565 // If the allocated object size or the total memory size is small, don't tri
gger a GC. | 565 // If the allocated object size or the total memory size is small, don't tri
gger a GC. |
566 if (ThreadHeap::heapStats().allocatedObjectSize() < 100 * 1024 || totalMemor
ySize() < totalMemorySizeThreshold) | 566 if (Heap::heapStats().allocatedObjectSize() < 100 * 1024 || totalMemorySize(
) < totalMemorySizeThreshold) |
567 return false; | 567 return false; |
568 // If the growing rate of Oilpan's heap or PartitionAlloc is high enough, | 568 // If the growing rate of Oilpan's heap or PartitionAlloc is high enough, |
569 // trigger a GC. | 569 // trigger a GC. |
570 #if PRINT_HEAP_STATS | 570 #if PRINT_HEAP_STATS |
571 dataLogF("heapGrowingRate=%.1lf, partitionAllocGrowingRate=%.1lf\n", heapGro
wingRate(), partitionAllocGrowingRate()); | 571 dataLogF("heapGrowingRate=%.1lf, partitionAllocGrowingRate=%.1lf\n", heapGro
wingRate(), partitionAllocGrowingRate()); |
572 #endif | 572 #endif |
573 return heapGrowingRate() >= heapGrowingRateThreshold || partitionAllocGrowin
gRate() >= heapGrowingRateThreshold; | 573 return heapGrowingRate() >= heapGrowingRateThreshold || partitionAllocGrowin
gRate() >= heapGrowingRateThreshold; |
574 } | 574 } |
575 | 575 |
576 bool ThreadState::shouldScheduleIdleGC() | 576 bool ThreadState::shouldScheduleIdleGC() |
(...skipping 27 matching lines...) Expand all Loading... |
604 bool ThreadState::shouldForceMemoryPressureGC() | 604 bool ThreadState::shouldForceMemoryPressureGC() |
605 { | 605 { |
606 if (totalMemorySize() < 300 * 1024 * 1024) | 606 if (totalMemorySize() < 300 * 1024 * 1024) |
607 return false; | 607 return false; |
608 return judgeGCThreshold(0, 1.5); | 608 return judgeGCThreshold(0, 1.5); |
609 } | 609 } |
610 | 610 |
611 void ThreadState::scheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gcType) | 611 void ThreadState::scheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gcType) |
612 { | 612 { |
613 ASSERT(checkThread()); | 613 ASSERT(checkThread()); |
614 ThreadHeap::reportMemoryUsageForTracing(); | 614 Heap::reportMemoryUsageForTracing(); |
615 | 615 |
616 #if PRINT_HEAP_STATS | 616 #if PRINT_HEAP_STATS |
617 dataLogF("ThreadState::scheduleV8FollowupGCIfNeeded (gcType=%s)\n", gcType =
= BlinkGC::V8MajorGC ? "MajorGC" : "MinorGC"); | 617 dataLogF("ThreadState::scheduleV8FollowupGCIfNeeded (gcType=%s)\n", gcType =
= BlinkGC::V8MajorGC ? "MajorGC" : "MinorGC"); |
618 #endif | 618 #endif |
619 | 619 |
620 if (isGCForbidden()) | 620 if (isGCForbidden()) |
621 return; | 621 return; |
622 | 622 |
623 // This completeSweep() will do nothing in common cases since we've | 623 // This completeSweep() will do nothing in common cases since we've |
624 // called completeSweep() before V8 starts minor/major GCs. | 624 // called completeSweep() before V8 starts minor/major GCs. |
(...skipping 27 matching lines...) Expand all Loading... |
652 // completeSweep() here, because gcPrologue for a major GC is called | 652 // completeSweep() here, because gcPrologue for a major GC is called |
653 // not at the point where the major GC started but at the point where | 653 // not at the point where the major GC started but at the point where |
654 // the major GC requests object grouping. | 654 // the major GC requests object grouping. |
655 if (gcType == BlinkGC::V8MajorGC) | 655 if (gcType == BlinkGC::V8MajorGC) |
656 completeSweep(); | 656 completeSweep(); |
657 } | 657 } |
658 | 658 |
659 void ThreadState::schedulePageNavigationGCIfNeeded(float estimatedRemovalRatio) | 659 void ThreadState::schedulePageNavigationGCIfNeeded(float estimatedRemovalRatio) |
660 { | 660 { |
661 ASSERT(checkThread()); | 661 ASSERT(checkThread()); |
662 ThreadHeap::reportMemoryUsageForTracing(); | 662 Heap::reportMemoryUsageForTracing(); |
663 | 663 |
664 #if PRINT_HEAP_STATS | 664 #if PRINT_HEAP_STATS |
665 dataLogF("ThreadState::schedulePageNavigationGCIfNeeded (estimatedRemovalRat
io=%.2lf)\n", estimatedRemovalRatio); | 665 dataLogF("ThreadState::schedulePageNavigationGCIfNeeded (estimatedRemovalRat
io=%.2lf)\n", estimatedRemovalRatio); |
666 #endif | 666 #endif |
667 | 667 |
668 if (isGCForbidden()) | 668 if (isGCForbidden()) |
669 return; | 669 return; |
670 | 670 |
671 // Finish on-going lazy sweeping. | 671 // Finish on-going lazy sweeping. |
672 // TODO(haraken): It might not make sense to force completeSweep() for all | 672 // TODO(haraken): It might not make sense to force completeSweep() for all |
673 // page navigations. | 673 // page navigations. |
674 completeSweep(); | 674 completeSweep(); |
675 ASSERT(!isSweepingInProgress()); | 675 ASSERT(!isSweepingInProgress()); |
676 ASSERT(!sweepForbidden()); | 676 ASSERT(!sweepForbidden()); |
677 | 677 |
678 if (shouldForceMemoryPressureGC()) { | 678 if (shouldForceMemoryPressureGC()) { |
679 #if PRINT_HEAP_STATS | 679 #if PRINT_HEAP_STATS |
680 dataLogF("Scheduled MemoryPressureGC\n"); | 680 dataLogF("Scheduled MemoryPressureGC\n"); |
681 #endif | 681 #endif |
682 ThreadHeap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWith
outSweep, BlinkGC::MemoryPressureGC); | 682 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSwe
ep, BlinkGC::MemoryPressureGC); |
683 return; | 683 return; |
684 } | 684 } |
685 if (shouldSchedulePageNavigationGC(estimatedRemovalRatio)) { | 685 if (shouldSchedulePageNavigationGC(estimatedRemovalRatio)) { |
686 #if PRINT_HEAP_STATS | 686 #if PRINT_HEAP_STATS |
687 dataLogF("Scheduled PageNavigationGC\n"); | 687 dataLogF("Scheduled PageNavigationGC\n"); |
688 #endif | 688 #endif |
689 schedulePageNavigationGC(); | 689 schedulePageNavigationGC(); |
690 } | 690 } |
691 } | 691 } |
692 | 692 |
693 void ThreadState::schedulePageNavigationGC() | 693 void ThreadState::schedulePageNavigationGC() |
694 { | 694 { |
695 ASSERT(checkThread()); | 695 ASSERT(checkThread()); |
696 ASSERT(!isSweepingInProgress()); | 696 ASSERT(!isSweepingInProgress()); |
697 setGCState(PageNavigationGCScheduled); | 697 setGCState(PageNavigationGCScheduled); |
698 } | 698 } |
699 | 699 |
700 void ThreadState::scheduleGCIfNeeded() | 700 void ThreadState::scheduleGCIfNeeded() |
701 { | 701 { |
702 ASSERT(checkThread()); | 702 ASSERT(checkThread()); |
703 ThreadHeap::reportMemoryUsageForTracing(); | 703 Heap::reportMemoryUsageForTracing(); |
704 | 704 |
705 #if PRINT_HEAP_STATS | 705 #if PRINT_HEAP_STATS |
706 dataLogF("ThreadState::scheduleGCIfNeeded\n"); | 706 dataLogF("ThreadState::scheduleGCIfNeeded\n"); |
707 #endif | 707 #endif |
708 | 708 |
709 // Allocation is allowed during sweeping, but those allocations should not | 709 // Allocation is allowed during sweeping, but those allocations should not |
710 // trigger nested GCs. | 710 // trigger nested GCs. |
711 if (isGCForbidden()) | 711 if (isGCForbidden()) |
712 return; | 712 return; |
713 | 713 |
714 if (isSweepingInProgress()) | 714 if (isSweepingInProgress()) |
715 return; | 715 return; |
716 ASSERT(!sweepForbidden()); | 716 ASSERT(!sweepForbidden()); |
717 | 717 |
718 reportMemoryToV8(); | 718 reportMemoryToV8(); |
719 | 719 |
720 if (shouldForceMemoryPressureGC()) { | 720 if (shouldForceMemoryPressureGC()) { |
721 completeSweep(); | 721 completeSweep(); |
722 if (shouldForceMemoryPressureGC()) { | 722 if (shouldForceMemoryPressureGC()) { |
723 #if PRINT_HEAP_STATS | 723 #if PRINT_HEAP_STATS |
724 dataLogF("Scheduled MemoryPressureGC\n"); | 724 dataLogF("Scheduled MemoryPressureGC\n"); |
725 #endif | 725 #endif |
726 ThreadHeap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GC
WithoutSweep, BlinkGC::MemoryPressureGC); | 726 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithou
tSweep, BlinkGC::MemoryPressureGC); |
727 return; | 727 return; |
728 } | 728 } |
729 } | 729 } |
730 | 730 |
731 if (shouldForceConservativeGC()) { | 731 if (shouldForceConservativeGC()) { |
732 completeSweep(); | 732 completeSweep(); |
733 if (shouldForceConservativeGC()) { | 733 if (shouldForceConservativeGC()) { |
734 #if PRINT_HEAP_STATS | 734 #if PRINT_HEAP_STATS |
735 dataLogF("Scheduled ConservativeGC\n"); | 735 dataLogF("Scheduled ConservativeGC\n"); |
736 #endif | 736 #endif |
737 ThreadHeap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GC
WithoutSweep, BlinkGC::ConservativeGC); | 737 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithou
tSweep, BlinkGC::ConservativeGC); |
738 return; | 738 return; |
739 } | 739 } |
740 } | 740 } |
741 if (shouldScheduleIdleGC()) { | 741 if (shouldScheduleIdleGC()) { |
742 #if PRINT_HEAP_STATS | 742 #if PRINT_HEAP_STATS |
743 dataLogF("Scheduled IdleGC\n"); | 743 dataLogF("Scheduled IdleGC\n"); |
744 #endif | 744 #endif |
745 scheduleIdleGC(); | 745 scheduleIdleGC(); |
746 return; | 746 return; |
747 } | 747 } |
748 } | 748 } |
749 | 749 |
750 void ThreadState::performIdleGC(double deadlineSeconds) | 750 void ThreadState::performIdleGC(double deadlineSeconds) |
751 { | 751 { |
752 ASSERT(checkThread()); | 752 ASSERT(checkThread()); |
753 ASSERT(isMainThread()); | 753 ASSERT(isMainThread()); |
754 ASSERT(Platform::current()->currentThread()->scheduler()); | 754 ASSERT(Platform::current()->currentThread()->scheduler()); |
755 | 755 |
756 if (gcState() != IdleGCScheduled) | 756 if (gcState() != IdleGCScheduled) |
757 return; | 757 return; |
758 | 758 |
759 double idleDeltaInSeconds = deadlineSeconds - monotonicallyIncreasingTime(); | 759 double idleDeltaInSeconds = deadlineSeconds - monotonicallyIncreasingTime(); |
760 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds",
idleDeltaInSeconds, "estimatedMarkingTime", ThreadHeap::heapStats().estimatedMa
rkingTime()); | 760 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds",
idleDeltaInSeconds, "estimatedMarkingTime", Heap::heapStats().estimatedMarkingT
ime()); |
761 if (idleDeltaInSeconds <= ThreadHeap::heapStats().estimatedMarkingTime() &&
!Platform::current()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequi
red()) { | 761 if (idleDeltaInSeconds <= Heap::heapStats().estimatedMarkingTime() && !Platf
orm::current()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired())
{ |
762 // If marking is estimated to take longer than the deadline and we can't | 762 // If marking is estimated to take longer than the deadline and we can't |
763 // exceed the deadline, then reschedule for the next idle period. | 763 // exceed the deadline, then reschedule for the next idle period. |
764 scheduleIdleGC(); | 764 scheduleIdleGC(); |
765 return; | 765 return; |
766 } | 766 } |
767 | 767 |
768 ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithou
tSweep, BlinkGC::IdleGC); | 768 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep
, BlinkGC::IdleGC); |
769 } | 769 } |
770 | 770 |
771 void ThreadState::performIdleLazySweep(double deadlineSeconds) | 771 void ThreadState::performIdleLazySweep(double deadlineSeconds) |
772 { | 772 { |
773 ASSERT(checkThread()); | 773 ASSERT(checkThread()); |
774 ASSERT(isMainThread()); | 774 ASSERT(isMainThread()); |
775 | 775 |
776 // If we are not in a sweeping phase, there is nothing to do here. | 776 // If we are not in a sweeping phase, there is nothing to do here. |
777 if (!isSweepingInProgress()) | 777 if (!isSweepingInProgress()) |
778 return; | 778 return; |
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
934 // If a safe point is entered while initiating a GC, we clearly do | 934 // If a safe point is entered while initiating a GC, we clearly do |
935 // not want to do another as part that -- the safe point is only | 935 // not want to do another as part that -- the safe point is only |
936 // entered after checking if a scheduled GC ought to run first. | 936 // entered after checking if a scheduled GC ought to run first. |
937 // Prevent that from happening by marking GCs as forbidden while | 937 // Prevent that from happening by marking GCs as forbidden while |
938 // one is initiated and later running. | 938 // one is initiated and later running. |
939 if (isGCForbidden()) | 939 if (isGCForbidden()) |
940 return; | 940 return; |
941 | 941 |
942 switch (gcState()) { | 942 switch (gcState()) { |
943 case FullGCScheduled: | 943 case FullGCScheduled: |
944 ThreadHeap::collectAllGarbage(); | 944 Heap::collectAllGarbage(); |
945 break; | 945 break; |
946 case PreciseGCScheduled: | 946 case PreciseGCScheduled: |
947 ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWi
thoutSweep, BlinkGC::PreciseGC); | 947 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutS
weep, BlinkGC::PreciseGC); |
948 break; | 948 break; |
949 case PageNavigationGCScheduled: | 949 case PageNavigationGCScheduled: |
950 ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWi
thSweep, BlinkGC::PageNavigationGC); | 950 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSwee
p, BlinkGC::PageNavigationGC); |
951 break; | 951 break; |
952 case IdleGCScheduled: | 952 case IdleGCScheduled: |
953 // Idle time GC will be scheduled by Blink Scheduler. | 953 // Idle time GC will be scheduled by Blink Scheduler. |
954 break; | 954 break; |
955 default: | 955 default: |
956 break; | 956 break; |
957 } | 957 } |
958 } | 958 } |
959 | 959 |
960 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() | 960 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() |
961 { | 961 { |
962 if (m_shouldFlushHeapDoesNotContainCache) { | 962 if (m_shouldFlushHeapDoesNotContainCache) { |
963 ThreadHeap::flushHeapDoesNotContainCache(); | 963 Heap::flushHeapDoesNotContainCache(); |
964 m_shouldFlushHeapDoesNotContainCache = false; | 964 m_shouldFlushHeapDoesNotContainCache = false; |
965 } | 965 } |
966 } | 966 } |
967 | 967 |
968 void ThreadState::makeConsistentForGC() | 968 void ThreadState::makeConsistentForGC() |
969 { | 969 { |
970 ASSERT(isInGC()); | 970 ASSERT(isInGC()); |
971 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); | 971 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); |
972 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) | 972 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) |
973 m_arenas[i]->makeConsistentForGC(); | 973 m_arenas[i]->makeConsistentForGC(); |
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1116 completeSweepHistogram.count(timeForCompleteSweep); | 1116 completeSweepHistogram.count(timeForCompleteSweep); |
1117 } | 1117 } |
1118 } | 1118 } |
1119 | 1119 |
1120 postSweep(); | 1120 postSweep(); |
1121 } | 1121 } |
1122 | 1122 |
1123 void ThreadState::postSweep() | 1123 void ThreadState::postSweep() |
1124 { | 1124 { |
1125 ASSERT(checkThread()); | 1125 ASSERT(checkThread()); |
1126 ThreadHeap::reportMemoryUsageForTracing(); | 1126 Heap::reportMemoryUsageForTracing(); |
1127 | 1127 |
1128 if (isMainThread()) { | 1128 if (isMainThread()) { |
1129 double collectionRate = 0; | 1129 double collectionRate = 0; |
1130 if (ThreadHeap::heapStats().objectSizeAtLastGC() > 0) | 1130 if (Heap::heapStats().objectSizeAtLastGC() > 0) |
1131 collectionRate = 1 - 1.0 * ThreadHeap::heapStats().markedObjectSize(
) / ThreadHeap::heapStats().objectSizeAtLastGC(); | 1131 collectionRate = 1 - 1.0 * Heap::heapStats().markedObjectSize() / He
ap::heapStats().objectSizeAtLastGC(); |
1132 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::coll
ectionRate", static_cast<int>(100 * collectionRate)); | 1132 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::coll
ectionRate", static_cast<int>(100 * collectionRate)); |
1133 | 1133 |
1134 #if PRINT_HEAP_STATS | 1134 #if PRINT_HEAP_STATS |
1135 dataLogF("ThreadState::postSweep (collectionRate=%d%%)\n", static_cast<i
nt>(100 * collectionRate)); | 1135 dataLogF("ThreadState::postSweep (collectionRate=%d%%)\n", static_cast<i
nt>(100 * collectionRate)); |
1136 #endif | 1136 #endif |
1137 | 1137 |
1138 // ThreadHeap::markedObjectSize() may be underestimated here if any othe
r | 1138 // Heap::markedObjectSize() may be underestimated here if any other |
1139 // thread has not yet finished lazy sweeping. | 1139 // thread has not yet finished lazy sweeping. |
1140 ThreadHeap::heapStats().setMarkedObjectSizeAtLastCompleteSweep(ThreadHea
p::heapStats().markedObjectSize()); | 1140 Heap::heapStats().setMarkedObjectSizeAtLastCompleteSweep(Heap::heapStats
().markedObjectSize()); |
1141 | 1141 |
1142 DEFINE_STATIC_LOCAL(CustomCountHistogram, objectSizeBeforeGCHistogram, (
"BlinkGC.ObjectSizeBeforeGC", 1, 4 * 1024 * 1024, 50)); | 1142 DEFINE_STATIC_LOCAL(CustomCountHistogram, objectSizeBeforeGCHistogram, (
"BlinkGC.ObjectSizeBeforeGC", 1, 4 * 1024 * 1024, 50)); |
1143 objectSizeBeforeGCHistogram.count(ThreadHeap::heapStats().objectSizeAtLa
stGC() / 1024); | 1143 objectSizeBeforeGCHistogram.count(Heap::heapStats().objectSizeAtLastGC()
/ 1024); |
1144 DEFINE_STATIC_LOCAL(CustomCountHistogram, objectSizeAfterGCHistogram, ("
BlinkGC.ObjectSizeAfterGC", 1, 4 * 1024 * 1024, 50)); | 1144 DEFINE_STATIC_LOCAL(CustomCountHistogram, objectSizeAfterGCHistogram, ("
BlinkGC.ObjectSizeAfterGC", 1, 4 * 1024 * 1024, 50)); |
1145 objectSizeAfterGCHistogram.count(ThreadHeap::heapStats().markedObjectSiz
e() / 1024); | 1145 objectSizeAfterGCHistogram.count(Heap::heapStats().markedObjectSize() /
1024); |
1146 DEFINE_STATIC_LOCAL(CustomCountHistogram, collectionRateHistogram, ("Bli
nkGC.CollectionRate", 1, 100, 20)); | 1146 DEFINE_STATIC_LOCAL(CustomCountHistogram, collectionRateHistogram, ("Bli
nkGC.CollectionRate", 1, 100, 20)); |
1147 collectionRateHistogram.count(static_cast<int>(100 * collectionRate)); | 1147 collectionRateHistogram.count(static_cast<int>(100 * collectionRate)); |
1148 DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForSweepHistogram, ("Blink
GC.TimeForSweepingAllObjects", 1, 10 * 1000, 50)); | 1148 DEFINE_STATIC_LOCAL(CustomCountHistogram, timeForSweepHistogram, ("Blink
GC.TimeForSweepingAllObjects", 1, 10 * 1000, 50)); |
1149 timeForSweepHistogram.count(m_accumulatedSweepingTime); | 1149 timeForSweepHistogram.count(m_accumulatedSweepingTime); |
1150 | 1150 |
1151 | 1151 |
1152 #define COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(GCReason) \ | 1152 #define COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(GCReason) \ |
1153 case BlinkGC::GCReason: { \ | 1153 case BlinkGC::GCReason: { \ |
1154 DEFINE_STATIC_LOCAL(CustomCountHistogram, histogram, \ | 1154 DEFINE_STATIC_LOCAL(CustomCountHistogram, histogram, \ |
1155 ("BlinkGC.CollectionRate_" #GCReason, 1, 100, 20)); \ | 1155 ("BlinkGC.CollectionRate_" #GCReason, 1, 100, 20)); \ |
1156 histogram.count(static_cast<int>(100 * collectionRate)); \ | 1156 histogram.count(static_cast<int>(100 * collectionRate)); \ |
1157 break; \ | 1157 break; \ |
1158 } | 1158 } |
1159 | 1159 |
1160 switch (ThreadHeap::lastGCReason()) { | 1160 switch (Heap::lastGCReason()) { |
1161 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(IdleGC) | 1161 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(IdleGC) |
1162 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(PreciseGC) | 1162 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(PreciseGC) |
1163 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(ConservativeGC) | 1163 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(ConservativeGC) |
1164 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(ForcedGC) | 1164 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(ForcedGC) |
1165 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(MemoryPressureGC) | 1165 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(MemoryPressureGC) |
1166 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(PageNavigationGC) | 1166 COUNT_COLLECTION_RATE_HISTOGRAM_BY_GC_REASON(PageNavigationGC) |
1167 default: | 1167 default: |
1168 break; | 1168 break; |
1169 } | 1169 } |
1170 } | 1170 } |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1217 } | 1217 } |
1218 | 1218 |
1219 void ThreadState::resumeThreads() | 1219 void ThreadState::resumeThreads() |
1220 { | 1220 { |
1221 s_safePointBarrier->resumeOthers(); | 1221 s_safePointBarrier->resumeOthers(); |
1222 } | 1222 } |
1223 | 1223 |
1224 void ThreadState::safePoint(BlinkGC::StackState stackState) | 1224 void ThreadState::safePoint(BlinkGC::StackState stackState) |
1225 { | 1225 { |
1226 ASSERT(checkThread()); | 1226 ASSERT(checkThread()); |
1227 ThreadHeap::reportMemoryUsageForTracing(); | 1227 Heap::reportMemoryUsageForTracing(); |
1228 | 1228 |
1229 runScheduledGC(stackState); | 1229 runScheduledGC(stackState); |
1230 ASSERT(!m_atSafePoint); | 1230 ASSERT(!m_atSafePoint); |
1231 m_stackState = stackState; | 1231 m_stackState = stackState; |
1232 m_atSafePoint = true; | 1232 m_atSafePoint = true; |
1233 s_safePointBarrier->checkAndPark(this); | 1233 s_safePointBarrier->checkAndPark(this); |
1234 m_atSafePoint = false; | 1234 m_atSafePoint = false; |
1235 m_stackState = BlinkGC::HeapPointersOnStack; | 1235 m_stackState = BlinkGC::HeapPointersOnStack; |
1236 preSweep(); | 1236 preSweep(); |
1237 } | 1237 } |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1301 | 1301 |
1302 void ThreadState::resetHeapCounters() | 1302 void ThreadState::resetHeapCounters() |
1303 { | 1303 { |
1304 m_allocatedObjectSize = 0; | 1304 m_allocatedObjectSize = 0; |
1305 m_markedObjectSize = 0; | 1305 m_markedObjectSize = 0; |
1306 } | 1306 } |
1307 | 1307 |
1308 void ThreadState::increaseAllocatedObjectSize(size_t delta) | 1308 void ThreadState::increaseAllocatedObjectSize(size_t delta) |
1309 { | 1309 { |
1310 m_allocatedObjectSize += delta; | 1310 m_allocatedObjectSize += delta; |
1311 ThreadHeap::heapStats().increaseAllocatedObjectSize(delta); | 1311 Heap::heapStats().increaseAllocatedObjectSize(delta); |
1312 } | 1312 } |
1313 | 1313 |
1314 void ThreadState::decreaseAllocatedObjectSize(size_t delta) | 1314 void ThreadState::decreaseAllocatedObjectSize(size_t delta) |
1315 { | 1315 { |
1316 m_allocatedObjectSize -= delta; | 1316 m_allocatedObjectSize -= delta; |
1317 ThreadHeap::heapStats().decreaseAllocatedObjectSize(delta); | 1317 Heap::heapStats().decreaseAllocatedObjectSize(delta); |
1318 } | 1318 } |
1319 | 1319 |
1320 void ThreadState::increaseMarkedObjectSize(size_t delta) | 1320 void ThreadState::increaseMarkedObjectSize(size_t delta) |
1321 { | 1321 { |
1322 m_markedObjectSize += delta; | 1322 m_markedObjectSize += delta; |
1323 ThreadHeap::heapStats().increaseMarkedObjectSize(delta); | 1323 Heap::heapStats().increaseMarkedObjectSize(delta); |
1324 } | 1324 } |
1325 | 1325 |
1326 void ThreadState::copyStackUntilSafePointScope() | 1326 void ThreadState::copyStackUntilSafePointScope() |
1327 { | 1327 { |
1328 if (!m_safePointScopeMarker || m_stackState == BlinkGC::NoHeapPointersOnStac
k) | 1328 if (!m_safePointScopeMarker || m_stackState == BlinkGC::NoHeapPointersOnStac
k) |
1329 return; | 1329 return; |
1330 | 1330 |
1331 Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker); | 1331 Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker); |
1332 Address* from = reinterpret_cast<Address*>(m_endOfStack); | 1332 Address* from = reinterpret_cast<Address*>(m_endOfStack); |
1333 RELEASE_ASSERT(from < to); | 1333 RELEASE_ASSERT(from < to); |
(...skipping 217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1551 threadDump->addScalar("dead_count", "objects", totalDeadCount); | 1551 threadDump->addScalar("dead_count", "objects", totalDeadCount); |
1552 threadDump->addScalar("live_size", "bytes", totalLiveSize); | 1552 threadDump->addScalar("live_size", "bytes", totalLiveSize); |
1553 threadDump->addScalar("dead_size", "bytes", totalDeadSize); | 1553 threadDump->addScalar("dead_size", "bytes", totalDeadSize); |
1554 | 1554 |
1555 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c
reateMemoryAllocatorDumpForCurrentGC(heapsDumpName); | 1555 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c
reateMemoryAllocatorDumpForCurrentGC(heapsDumpName); |
1556 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()-
>createMemoryAllocatorDumpForCurrentGC(classesDumpName); | 1556 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()-
>createMemoryAllocatorDumpForCurrentGC(classesDumpName); |
1557 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners
hipEdge(classesDump->guid(), heapsDump->guid()); | 1557 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners
hipEdge(classesDump->guid(), heapsDump->guid()); |
1558 } | 1558 } |
1559 | 1559 |
1560 } // namespace blink | 1560 } // namespace blink |
OLD | NEW |