| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 90 : m_thread(currentThread()) | 90 : m_thread(currentThread()) |
| 91 , m_persistentRegion(adoptPtr(new PersistentRegion())) | 91 , m_persistentRegion(adoptPtr(new PersistentRegion())) |
| 92 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart(
))) | 92 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart(
))) |
| 93 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart())
) | 93 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart())
) |
| 94 , m_safePointScopeMarker(nullptr) | 94 , m_safePointScopeMarker(nullptr) |
| 95 , m_atSafePoint(false) | 95 , m_atSafePoint(false) |
| 96 , m_interruptors() | 96 , m_interruptors() |
| 97 , m_sweepForbidden(false) | 97 , m_sweepForbidden(false) |
| 98 , m_noAllocationCount(0) | 98 , m_noAllocationCount(0) |
| 99 , m_gcForbiddenCount(0) | 99 , m_gcForbiddenCount(0) |
| 100 , m_persistentAllocated(0) | 100 , m_wrapperAllocated(0) |
| 101 , m_persistentFreed(0) | 101 , m_wrapperFreed(0) |
| 102 , m_vectorBackingHeapIndex(Vector1HeapIndex) | 102 , m_vectorBackingHeapIndex(Vector1HeapIndex) |
| 103 , m_currentHeapAges(0) | 103 , m_currentHeapAges(0) |
| 104 , m_isTerminating(false) | 104 , m_isTerminating(false) |
| 105 , m_gcMixinMarker(nullptr) | 105 , m_gcMixinMarker(nullptr) |
| 106 , m_shouldFlushHeapDoesNotContainCache(false) | 106 , m_shouldFlushHeapDoesNotContainCache(false) |
| 107 , m_gcState(NoGCScheduled) | 107 , m_gcState(NoGCScheduled) |
| 108 , m_traceDOMWrappers(nullptr) | 108 , m_traceDOMWrappers(nullptr) |
| 109 #if defined(ADDRESS_SANITIZER) | 109 #if defined(ADDRESS_SANITIZER) |
| 110 , m_asanFakeStack(__asan_get_current_fake_stack()) | 110 , m_asanFakeStack(__asan_get_current_fake_stack()) |
| 111 #endif | 111 #endif |
| (...skipping 439 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 551 if (isMainThread()) | 551 if (isMainThread()) |
| 552 ScriptForbiddenScope::exit(); | 552 ScriptForbiddenScope::exit(); |
| 553 } | 553 } |
| 554 | 554 |
| 555 CrossThreadPersistentRegion& ThreadState::crossThreadPersistentRegion() | 555 CrossThreadPersistentRegion& ThreadState::crossThreadPersistentRegion() |
| 556 { | 556 { |
| 557 AtomicallyInitializedStaticReference(CrossThreadPersistentRegion, persistent
Region, new CrossThreadPersistentRegion()); | 557 AtomicallyInitializedStaticReference(CrossThreadPersistentRegion, persistent
Region, new CrossThreadPersistentRegion()); |
| 558 return persistentRegion; | 558 return persistentRegion; |
| 559 } | 559 } |
| 560 | 560 |
| 561 void ThreadState::updatePersistentCounters() | 561 void ThreadState::updateWrapperCounters() |
| 562 { | 562 { |
| 563 if (m_persistentAllocated >= m_persistentFreed) | 563 if (m_wrapperAllocated >= m_wrapperFreed) |
| 564 Heap::increasePersistentCount(m_persistentAllocated - m_persistentFreed)
; | 564 Heap::increaseWrapperCount(m_wrapperAllocated - m_wrapperFreed); |
| 565 else | 565 else |
| 566 Heap::decreasePersistentCount(m_persistentFreed - m_persistentAllocated)
; | 566 Heap::decreaseWrapperCount(m_wrapperFreed - m_wrapperAllocated); |
| 567 Heap::increaseCollectedPersistentCount(m_persistentFreed); | 567 Heap::increaseCollectedWrapperCount(m_wrapperFreed); |
| 568 m_persistentAllocated = 0; | 568 m_wrapperAllocated = 0; |
| 569 m_persistentFreed = 0; | 569 m_wrapperFreed = 0; |
| 570 } | 570 } |
| 571 | 571 |
| 572 size_t ThreadState::estimatedLiveObjectSize() | 572 double ThreadState::partitionAllocGrowingRate() |
| 573 { | 573 { |
| 574 // We estimate the live object size with the following equations. | 574 size_t sizeAtLastGC = Heap::partitionAllocSizeAtLastGC(); |
| 575 // | 575 size_t wrapperCountAtLastGC = Heap::wrapperCountAtLastGC(); |
| 576 // heapSizePerPersistent = (marked(t0, t1) + partitionAlloc(t0)) / persist
entCount(t0) | 576 size_t sizeRetainedByCollectedPersistents = wrapperCountAtLastGC ? Heap::col
lectedWrapperCount() * sizeAtLastGC / wrapperCountAtLastGC : 0; |
| 577 // estimatedLiveObjectSize = marked(t0, t) + allocated(t0, t) + partitionA
lloc(t) - heapSizePerPersistent * collectedPersistentCount(t0, t) | 577 size_t estimatedLiveSize = sizeAtLastGC > sizeRetainedByCollectedPersistents
? sizeAtLastGC - sizeRetainedByCollectedPersistents : 0; |
| 578 // | 578 double growingRate = estimatedLiveSize ? 1.0 * WTF::Partitions::totalSizeOfC
ommittedPages() / estimatedLiveSize : 0; |
| 579 // t0: The time when the last collectGarbage runs. | 579 TRACE_COUNTER1("blink_gc", "ThreadState::estimatedLivePartitionAllocSizeKB",
std::min(estimatedLiveSize / 1024, static_cast<size_t>(INT_MAX))); |
| 580 // t1: The time when the last completeSweep runs. | 580 TRACE_COUNTER1("blink_gc", "ThreadState::partitionAllocGrowingRate", static_
cast<int>(100 * growingRate)); |
| 581 // t: The current time. | 581 return growingRate; |
| 582 // marked(t0, t): The size of marked objects between t0 and t. | |
| 583 // allocated(t0, t): The size of newly allocated objects between t0 and t. | |
| 584 // persistentCount(t): The number of existing persistent handles at t. | |
| 585 // collectedPersistentCount(t0, t): | |
| 586 // The number of persistent handles collected between | |
| 587 // t0 and t. | |
| 588 // partitionAlloc(t): The size of allocated memory in PartitionAlloc at t. | |
| 589 size_t currentHeapSize = currentObjectSize(); | |
| 590 size_t heapSizeRetainedByCollectedPersistents = Heap::heapSizePerPersistent(
) * Heap::collectedPersistentCount(); | |
| 591 size_t estimatedSize = 0; | |
| 592 if (currentHeapSize > heapSizeRetainedByCollectedPersistents) | |
| 593 estimatedSize = currentHeapSize - heapSizeRetainedByCollectedPersistents
; | |
| 594 TRACE_COUNTER1("blink_gc", "ThreadState::currentHeapSizeKB", std::min(curren
tHeapSize / 1024, static_cast<size_t>(INT_MAX))); | |
| 595 TRACE_COUNTER1("blink_gc", "ThreadState::estimatedLiveObjectSizeKB", std::mi
n(estimatedSize / 1024, static_cast<size_t>(INT_MAX))); | |
| 596 TRACE_COUNTER1("blink_gc", "ThreadState::heapGrowingRate", static_cast<int>(
100.0 * currentHeapSize / estimatedSize)); | |
| 597 return estimatedSize; | |
| 598 } | 582 } |
| 599 | 583 |
| 600 size_t ThreadState::currentObjectSize() | 584 double ThreadState::heapGrowingRate() |
| 601 { | 585 { |
| 602 return Heap::allocatedObjectSize() + Heap::markedObjectSize() + WTF::Partiti
ons::totalSizeOfCommittedPages(); | 586 size_t markedObjectSize = Heap::markedObjectSizeAtLastCompleteSweep(); |
| 587 size_t currentObjectSize = Heap::allocatedObjectSize() + markedObjectSize; |
| 588 double growingRate = markedObjectSize ? 1.0 * currentObjectSize / markedObje
ctSize : 0; |
| 589 TRACE_COUNTER1("blink_gc", "ThreadState::currentObjectSizeKB", std::min(curr
entObjectSize / 1024, static_cast<size_t>(INT_MAX))); |
| 590 TRACE_COUNTER1("blink_gc", "ThreadState::heapGrowingRate", static_cast<int>(
100 * growingRate)); |
| 591 return growingRate; |
| 592 } |
| 593 |
| 594 // TODO(haraken): We should improve the GC heuristics. These heuristics |
| 595 // significantly affect performance. |
| 596 bool ThreadState::judgeGCThreshold(size_t allocatedObjectSizeThreshold, double h
eapGrowingRateThreshold, double partitionAllocGrowingRateThreshold) |
| 597 { |
| 598 if (Heap::allocatedObjectSize() < allocatedObjectSizeThreshold) |
| 599 return false; |
| 600 if (heapGrowingRate() >= heapGrowingRateThreshold) |
| 601 return true; |
| 602 return partitionAllocGrowingRate() >= partitionAllocGrowingRateThreshold; |
| 603 } | 603 } |
| 604 | 604 |
| 605 bool ThreadState::shouldForceMemoryPressureGC() | 605 bool ThreadState::shouldForceMemoryPressureGC() |
| 606 { | 606 { |
| 607 // Avoid potential overflow by truncating to Kb. | 607 size_t totalObjectSize = Heap::allocatedObjectSize() + Heap::markedObjectSiz
eAtLastCompleteSweep() + WTF::Partitions::totalSizeOfCommittedPages(); |
| 608 size_t currentObjectSizeKb = currentObjectSize() >> 10; | 608 if (totalObjectSize < 300 * 1024 * 1024) |
| 609 if (currentObjectSizeKb < 300 * 1024) | |
| 610 return false; | 609 return false; |
| 611 | 610 return judgeGCThreshold(0, 1.5, 1.5); |
| 612 size_t estimatedLiveObjectSizeKb = estimatedLiveObjectSize() >> 10; | |
| 613 // If we're consuming too much memory, trigger a conservative GC | |
| 614 // aggressively. This is a safe guard to avoid OOM. | |
| 615 return currentObjectSizeKb > (estimatedLiveObjectSizeKb * 3) / 2; | |
| 616 } | 611 } |
| 617 | 612 |
| 618 // TODO(haraken): We should improve the GC heuristics. | |
| 619 // These heuristics affect performance significantly. | |
| 620 bool ThreadState::shouldScheduleIdleGC() | 613 bool ThreadState::shouldScheduleIdleGC() |
| 621 { | 614 { |
| 622 if (gcState() != NoGCScheduled) | 615 if (gcState() != NoGCScheduled) |
| 623 return false; | 616 return false; |
| 624 #if ENABLE(IDLE_GC) | 617 #if ENABLE(IDLE_GC) |
| 625 // Avoid potential overflow by truncating to Kb. | 618 return judgeGCThreshold(1024 * 1024, 1.5, 1.5); |
| 626 size_t allocatedObjectSizeKb = Heap::allocatedObjectSize() >> 10; | |
| 627 // The estimated size is updated when the main thread finishes lazy | |
| 628 // sweeping. If this thread reaches here before the main thread finishes | |
| 629 // lazy sweeping, the thread will use the estimated size of the last GC. | |
| 630 size_t estimatedLiveObjectSizeKb = estimatedLiveObjectSize() >> 10; | |
| 631 // Heap::markedObjectSize() may be underestimated if any thread has not | |
| 632 // finished completeSweep(). | |
| 633 size_t currentObjectSizeKb = currentObjectSize() >> 10; | |
| 634 // Schedule an idle GC if Oilpan has allocated more than 1 MB since | |
| 635 // the last GC and the current memory usage is >50% larger than | |
| 636 // the estimated live memory usage. | |
| 637 return allocatedObjectSizeKb >= 1024 && currentObjectSizeKb > (estimatedLive
ObjectSizeKb * 3) / 2; | |
| 638 #else | 619 #else |
| 639 return false; | 620 return false; |
| 640 #endif | 621 #endif |
| 641 } | 622 } |
| 642 | 623 |
| 643 // TODO(haraken): We should improve the GC heuristics. | |
| 644 // These heuristics affect performance significantly. | |
| 645 bool ThreadState::shouldSchedulePreciseGC() | 624 bool ThreadState::shouldSchedulePreciseGC() |
| 646 { | 625 { |
| 647 if (gcState() != NoGCScheduled) | 626 if (isGCForbidden()) |
| 648 return false; | 627 return false; |
| 628 |
| 629 if (shouldForceMemoryPressureGC()) |
| 630 return true; |
| 631 |
| 649 #if ENABLE(IDLE_GC) | 632 #if ENABLE(IDLE_GC) |
| 650 return false; | 633 return judgeGCThreshold(32 * 1024 * 1024, 4.0, 4.0); |
| 634 // return false; |
| 651 #else | 635 #else |
| 652 // Avoid potential overflow by truncating to Kb. | 636 return judgeGCThreshold(1024 * 1024, 1.5, 1.5); |
| 653 size_t allocatedObjectSizeKb = Heap::allocatedObjectSize() >> 10; | |
| 654 // The estimated size is updated when the main thread finishes lazy | |
| 655 // sweeping. If this thread reaches here before the main thread finishes | |
| 656 // lazy sweeping, the thread will use the estimated size of the last GC. | |
| 657 size_t estimatedLiveObjectSizeKb = estimatedLiveObjectSize() >> 10; | |
| 658 // Heap::markedObjectSize() may be underestimated if any thread has not | |
| 659 // finished completeSweep(). | |
| 660 size_t currentObjectSizeKb = currentObjectSize() >> 10; | |
| 661 // Schedule a precise GC if Oilpan has allocated more than 1 MB since | |
| 662 // the last GC and the current memory usage is >50% larger than | |
| 663 // the estimated live memory usage. | |
| 664 return allocatedObjectSizeKb >= 1024 && currentObjectSizeKb > (estimatedLive
ObjectSizeKb * 3) / 2; | |
| 665 #endif | 637 #endif |
| 666 } | 638 } |
| 667 | 639 |
| 668 bool ThreadState::shouldSchedulePageNavigationGC(float estimatedRemovalRatio) | 640 bool ThreadState::shouldSchedulePageNavigationGC(float estimatedRemovalRatio) |
| 669 { | 641 { |
| 670 if (UNLIKELY(isGCForbidden())) | 642 if (UNLIKELY(isGCForbidden())) |
| 671 return false; | 643 return false; |
| 672 | 644 |
| 673 if (shouldForceMemoryPressureGC()) | 645 if (shouldForceMemoryPressureGC()) |
| 674 return true; | 646 return true; |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 707 { | 679 { |
| 708 ASSERT(checkThread()); | 680 ASSERT(checkThread()); |
| 709 ASSERT(!isSweepingInProgress()); | 681 ASSERT(!isSweepingInProgress()); |
| 710 setGCState(PageNavigationGCScheduled); | 682 setGCState(PageNavigationGCScheduled); |
| 711 } | 683 } |
| 712 | 684 |
| 713 // TODO(haraken): We should improve the GC heuristics. | 685 // TODO(haraken): We should improve the GC heuristics. |
| 714 // These heuristics affect performance significantly. | 686 // These heuristics affect performance significantly. |
| 715 bool ThreadState::shouldForceConservativeGC() | 687 bool ThreadState::shouldForceConservativeGC() |
| 716 { | 688 { |
| 717 if (UNLIKELY(isGCForbidden())) | 689 if (isGCForbidden()) |
| 718 return false; | 690 return false; |
| 719 | 691 |
| 720 if (shouldForceMemoryPressureGC()) | 692 if (shouldForceMemoryPressureGC()) |
| 721 return true; | 693 return true; |
| 722 | 694 |
| 723 // Avoid potential overflow by truncating to Kb. | 695 return judgeGCThreshold(32 * 1024 * 1024, 5.0, 5.0); |
| 724 size_t allocatedObjectSizeKb = Heap::allocatedObjectSize() >> 10; | |
| 725 // The estimated size is updated when the main thread finishes lazy | |
| 726 // sweeping. If this thread reaches here before the main thread finishes | |
| 727 // lazy sweeping, the thread will use the estimated size of the last GC. | |
| 728 size_t estimatedLiveObjectSizeKb = estimatedLiveObjectSize() >> 10; | |
| 729 // Heap::markedObjectSize() may be underestimated if any thread has not | |
| 730 // finished completeSweep(). | |
| 731 size_t currentObjectSizeKb = currentObjectSize() >> 10; | |
| 732 // Schedule a conservative GC if Oilpan has allocated more than 32 MB since | |
| 733 // the last GC and the current memory usage is >400% larger than | |
| 734 // the estimated live memory usage. | |
| 735 // TODO(haraken): 400% is too large. Lower the heap growing factor. | |
| 736 return allocatedObjectSizeKb >= 32 * 1024 && currentObjectSizeKb > 5 * estim
atedLiveObjectSizeKb; | |
| 737 } | 696 } |
| 738 | 697 |
| 739 void ThreadState::scheduleGCIfNeeded() | 698 void ThreadState::scheduleGCIfNeeded() |
| 740 { | 699 { |
| 741 ASSERT(checkThread()); | 700 ASSERT(checkThread()); |
| 742 // Allocation is allowed during sweeping, but those allocations should not | 701 // Allocation is allowed during sweeping, but those allocations should not |
| 743 // trigger nested GCs. | 702 // trigger nested GCs. |
| 744 if (isSweepingInProgress()) | 703 if (isSweepingInProgress()) |
| 745 return; | 704 return; |
| 746 ASSERT(!sweepForbidden()); | 705 ASSERT(!sweepForbidden()); |
| (...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 931 #endif | 890 #endif |
| 932 } | 891 } |
| 933 | 892 |
| 934 #undef VERIFY_STATE_TRANSITION | 893 #undef VERIFY_STATE_TRANSITION |
| 935 | 894 |
| 936 ThreadState::GCState ThreadState::gcState() const | 895 ThreadState::GCState ThreadState::gcState() const |
| 937 { | 896 { |
| 938 return m_gcState; | 897 return m_gcState; |
| 939 } | 898 } |
| 940 | 899 |
| 941 void ThreadState::didV8MajorGC() | |
| 942 { | |
| 943 ASSERT(checkThread()); | |
| 944 if (isMainThread()) { | |
| 945 if (shouldForceMemoryPressureGC()) { | |
| 946 // Under memory pressure, force a conservative GC. | |
| 947 Heap::collectGarbage(HeapPointersOnStack, GCWithoutSweep, Heap::Cons
ervativeGC); | |
| 948 return; | |
| 949 } | |
| 950 } | |
| 951 } | |
| 952 | |
| 953 void ThreadState::runScheduledGC(StackState stackState) | 900 void ThreadState::runScheduledGC(StackState stackState) |
| 954 { | 901 { |
| 955 ASSERT(checkThread()); | 902 ASSERT(checkThread()); |
| 956 if (stackState != NoHeapPointersOnStack) | 903 if (stackState != NoHeapPointersOnStack) |
| 957 return; | 904 return; |
| 958 | 905 |
| 959 // If a safe point is entered while initiating a GC, we clearly do | 906 // If a safe point is entered while initiating a GC, we clearly do |
| 960 // not want to do another as part that -- the safe point is only | 907 // not want to do another as part that -- the safe point is only |
| 961 // entered after checking if a scheduled GC ought to run first. | 908 // entered after checking if a scheduled GC ought to run first. |
| 962 // Prevent that from happening by marking GCs as forbidden while | 909 // Prevent that from happening by marking GCs as forbidden while |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1005 m_heaps[i]->makeConsistentForMutator(); | 952 m_heaps[i]->makeConsistentForMutator(); |
| 1006 } | 953 } |
| 1007 | 954 |
| 1008 void ThreadState::preGC() | 955 void ThreadState::preGC() |
| 1009 { | 956 { |
| 1010 ASSERT(!isInGC()); | 957 ASSERT(!isInGC()); |
| 1011 setGCState(GCRunning); | 958 setGCState(GCRunning); |
| 1012 makeConsistentForGC(); | 959 makeConsistentForGC(); |
| 1013 flushHeapDoesNotContainCacheIfNeeded(); | 960 flushHeapDoesNotContainCacheIfNeeded(); |
| 1014 clearHeapAges(); | 961 clearHeapAges(); |
| 1015 updatePersistentCounters(); | 962 updateWrapperCounters(); |
| 1016 } | 963 } |
| 1017 | 964 |
| 1018 void ThreadState::postGC(GCType gcType) | 965 void ThreadState::postGC(GCType gcType) |
| 1019 { | 966 { |
| 1020 ASSERT(isInGC()); | 967 ASSERT(isInGC()); |
| 1021 | 968 |
| 1022 #if ENABLE(GC_PROFILING) | 969 #if ENABLE(GC_PROFILING) |
| 1023 // We snapshot the heap prior to sweeping to get numbers for both resources | 970 // We snapshot the heap prior to sweeping to get numbers for both resources |
| 1024 // that have been allocated since the last GC and for resources that are | 971 // that have been allocated since the last GC and for resources that are |
| 1025 // going to be freed. | 972 // going to be freed. |
| (...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1186 | 1133 |
| 1187 postSweep(); | 1134 postSweep(); |
| 1188 } | 1135 } |
| 1189 | 1136 |
| 1190 void ThreadState::postSweep() | 1137 void ThreadState::postSweep() |
| 1191 { | 1138 { |
| 1192 ASSERT(checkThread()); | 1139 ASSERT(checkThread()); |
| 1193 Heap::reportMemoryUsageForTracing(); | 1140 Heap::reportMemoryUsageForTracing(); |
| 1194 | 1141 |
| 1195 if (isMainThread()) { | 1142 if (isMainThread()) { |
| 1196 // See the comment in estimatedLiveObjectSize() for what we're | |
| 1197 // calculating here. | |
| 1198 // | |
| 1199 // Heap::markedObjectSize() may be underestimated here if any other | 1143 // Heap::markedObjectSize() may be underestimated here if any other |
| 1200 // thread has not yet finished lazy sweeping. | 1144 // thread has not yet finished lazy sweeping. |
| 1201 if (Heap::persistentCountAtLastGC() > 0) { | 1145 if (Heap::objectSizeAtLastGC()) { |
| 1202 TRACE_EVENT1("blink_gc", "ThreadState::postSweep", "collection rate"
, 1.0 * Heap::markedObjectSize() / Heap::objectSizeAtLastGC()); | 1146 TRACE_EVENT1("blink_gc", "ThreadState::postSweep", "collection rate"
, 1 - 1.0 * Heap::markedObjectSize() / Heap::objectSizeAtLastGC()); |
| 1203 Heap::setHeapSizePerPersistent((Heap::markedObjectSize() + Heap::par
titionAllocSizeAtLastGC()) / Heap::persistentCountAtLastGC()); | 1147 Heap::setMarkedObjectSizeAtLastCompleteSweep(Heap::markedObjectSize(
)); |
| 1204 } | 1148 } |
| 1205 } | 1149 } |
| 1206 | 1150 |
| 1207 switch (gcState()) { | 1151 switch (gcState()) { |
| 1208 case Sweeping: | 1152 case Sweeping: |
| 1209 setGCState(NoGCScheduled); | 1153 setGCState(NoGCScheduled); |
| 1210 break; | 1154 break; |
| 1211 case SweepingAndPreciseGCScheduled: | 1155 case SweepingAndPreciseGCScheduled: |
| 1212 setGCState(PreciseGCScheduled); | 1156 setGCState(PreciseGCScheduled); |
| 1213 break; | 1157 break; |
| (...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1632 json->beginArray(it->key.ascii().data()); | 1576 json->beginArray(it->key.ascii().data()); |
| 1633 for (size_t age = 0; age <= maxHeapObjectAge; ++age) | 1577 for (size_t age = 0; age <= maxHeapObjectAge; ++age) |
| 1634 json->pushInteger(it->value.ages[age]); | 1578 json->pushInteger(it->value.ages[age]); |
| 1635 json->endArray(); | 1579 json->endArray(); |
| 1636 } | 1580 } |
| 1637 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s
tatsName, this, json.release()); | 1581 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s
tatsName, this, json.release()); |
| 1638 } | 1582 } |
| 1639 #endif | 1583 #endif |
| 1640 | 1584 |
| 1641 } // namespace blink | 1585 } // namespace blink |
| OLD | NEW |