| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 514 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 525 return mutex; | 525 return mutex; |
| 526 } | 526 } |
| 527 | 527 |
| 528 // TODO(haraken): We should improve the GC heuristics. | 528 // TODO(haraken): We should improve the GC heuristics. |
| 529 // These heuristics affect performance significantly. | 529 // These heuristics affect performance significantly. |
| 530 bool ThreadState::shouldScheduleIdleGC() | 530 bool ThreadState::shouldScheduleIdleGC() |
| 531 { | 531 { |
| 532 if (gcState() != NoGCScheduled) | 532 if (gcState() != NoGCScheduled) |
| 533 return false; | 533 return false; |
| 534 #if ENABLE(IDLE_GC) | 534 #if ENABLE(IDLE_GC) |
| 535 // Avoid potential overflow by truncating to Kb. |
| 536 size_t allocatedObjectSizeKb = Heap::allocatedObjectSize() >> 10; |
| 535 // The estimated size is updated when the main thread finishes lazy | 537 // The estimated size is updated when the main thread finishes lazy |
| 536 // sweeping. If this thread reaches here before the main thread finishes | 538 // sweeping. If this thread reaches here before the main thread finishes |
| 537 // lazy sweeping, the thread will use the estimated size of the last GC. | 539 // lazy sweeping, the thread will use the estimated size of the last GC. |
| 538 size_t estimatedLiveObjectSize = Heap::estimatedLiveObjectSize(); | 540 size_t estimatedLiveObjectSizeKb = Heap::estimatedLiveObjectSize() >> 10; |
| 539 size_t allocatedObjectSize = Heap::allocatedObjectSize(); | |
| 540 // Heap::markedObjectSize() may be underestimated if any thread has not | 541 // Heap::markedObjectSize() may be underestimated if any thread has not |
| 541 // finished completeSweep(). | 542 // finished completeSweep(). |
| 542 size_t currentObjectSize = allocatedObjectSize + Heap::markedObjectSize() +
WTF::Partitions::totalSizeOfCommittedPages(); | 543 size_t currentObjectSizeKb = allocatedObjectSizeKb + ((Heap::markedObjectSiz
e() + WTF::Partitions::totalSizeOfCommittedPages()) >> 10); |
| 543 // Schedule an idle GC if Oilpan has allocated more than 1 MB since | 544 // Schedule an idle GC if Oilpan has allocated more than 1 MB since |
| 544 // the last GC and the current memory usage is >50% larger than | 545 // the last GC and the current memory usage is >50% larger than |
| 545 // the estimated live memory usage. | 546 // the estimated live memory usage. |
| 546 return allocatedObjectSize >= 1024 * 1024 && currentObjectSize > estimatedLi
veObjectSize * 3 / 2; | 547 return allocatedObjectSizeKb >= 1024 && currentObjectSizeKb > estimatedLiveO
bjectSizeKb * 3 / 2; |
| 547 #else | 548 #else |
| 548 return false; | 549 return false; |
| 549 #endif | 550 #endif |
| 550 } | 551 } |
| 551 | 552 |
| 552 // TODO(haraken): We should improve the GC heuristics. | 553 // TODO(haraken): We should improve the GC heuristics. |
| 553 // These heuristics affect performance significantly. | 554 // These heuristics affect performance significantly. |
| 554 bool ThreadState::shouldSchedulePreciseGC() | 555 bool ThreadState::shouldSchedulePreciseGC() |
| 555 { | 556 { |
| 556 if (gcState() != NoGCScheduled) | 557 if (gcState() != NoGCScheduled) |
| 557 return false; | 558 return false; |
| 558 #if ENABLE(IDLE_GC) | 559 #if ENABLE(IDLE_GC) |
| 559 return false; | 560 return false; |
| 560 #else | 561 #else |
| 562 // Avoid potential overflow by truncating to Kb. |
| 563 size_t allocatedObjectSizeKb = Heap::allocatedObjectSize() >> 10; |
| 561 // The estimated size is updated when the main thread finishes lazy | 564 // The estimated size is updated when the main thread finishes lazy |
| 562 // sweeping. If this thread reaches here before the main thread finishes | 565 // sweeping. If this thread reaches here before the main thread finishes |
| 563 // lazy sweeping, the thread will use the estimated size of the last GC. | 566 // lazy sweeping, the thread will use the estimated size of the last GC. |
| 564 size_t estimatedLiveObjectSize = Heap::estimatedLiveObjectSize(); | 567 size_t estimatedLiveObjectSizeKb = Heap::estimatedLiveObjectSize() >> 10; |
| 565 size_t allocatedObjectSize = Heap::allocatedObjectSize(); | |
| 566 // Heap::markedObjectSize() may be underestimated if any thread has not | 568 // Heap::markedObjectSize() may be underestimated if any thread has not |
| 567 // finished completeSweep(). | 569 // finished completeSweep(). |
| 568 size_t currentObjectSize = allocatedObjectSize + Heap::markedObjectSize() +
WTF::Partitions::totalSizeOfCommittedPages(); | 570 size_t currentObjectSizeKb = allocatedObjectSizeKb + ((Heap::markedObjectSiz
e() + WTF::Partitions::totalSizeOfCommittedPages()) >> 10); |
| 569 // Schedule a precise GC if Oilpan has allocated more than 1 MB since | 571 // Schedule a precise GC if Oilpan has allocated more than 1 MB since |
| 570 // the last GC and the current memory usage is >50% larger than | 572 // the last GC and the current memory usage is >50% larger than |
| 571 // the estimated live memory usage. | 573 // the estimated live memory usage. |
| 572 return allocatedObjectSize >= 1024 * 1024 && currentObjectSize > estimatedLi
veObjectSize * 3 / 2; | 574 return allocatedObjectSizeKb >= 1024 && currentObjectSizeKb > estimatedLiveO
bjectSizeKb * 3 / 2; |
| 573 #endif | 575 #endif |
| 574 } | 576 } |
| 575 | 577 |
| 578 bool ThreadState::shouldForceMemoryPressureGC() |
| 579 { |
| 580 // Avoid potential overflow by truncating to Kb. |
| 581 size_t currentObjectSizeKb = (Heap::allocatedObjectSize() + Heap::markedObje
ctSize() + WTF::Partitions::totalSizeOfCommittedPages()) >> 10; |
| 582 size_t estimatedLiveObjectSizeKb = (Heap::estimatedLiveObjectSize()) >> 10; |
| 583 if (currentObjectSizeKb < 300 * 1024) |
| 584 return false; |
| 585 |
| 586 // If we're consuming too much memory, trigger a conservative GC |
| 587 // aggressively. This is a safe guard to avoid OOM. |
| 588 return currentObjectSizeKb > (estimatedLiveObjectSizeKb * 3) / 2; |
| 589 } |
| 590 |
| 576 // TODO(haraken): We should improve the GC heuristics. | 591 // TODO(haraken): We should improve the GC heuristics. |
| 577 // These heuristics affect performance significantly. | 592 // These heuristics affect performance significantly. |
| 578 bool ThreadState::shouldForceConservativeGC() | 593 bool ThreadState::shouldForceConservativeGC() |
| 579 { | 594 { |
| 580 if (UNLIKELY(isGCForbidden())) | 595 if (UNLIKELY(isGCForbidden())) |
| 581 return false; | 596 return false; |
| 582 | 597 |
| 598 if (shouldForceMemoryPressureGC()) |
| 599 return true; |
| 600 |
| 601 // Avoid potential overflow by truncating to Kb. |
| 602 size_t allocatedObjectSizeKb = Heap::allocatedObjectSize() >> 10; |
| 583 // The estimated size is updated when the main thread finishes lazy | 603 // The estimated size is updated when the main thread finishes lazy |
| 584 // sweeping. If this thread reaches here before the main thread finishes | 604 // sweeping. If this thread reaches here before the main thread finishes |
| 585 // lazy sweeping, the thread will use the estimated size of the last GC. | 605 // lazy sweeping, the thread will use the estimated size of the last GC. |
| 586 size_t estimatedLiveObjectSize = Heap::estimatedLiveObjectSize(); | 606 size_t estimatedLiveObjectSizeKb = Heap::estimatedLiveObjectSize() >> 10; |
| 587 size_t allocatedObjectSize = Heap::allocatedObjectSize(); | |
| 588 // Heap::markedObjectSize() may be underestimated if any thread has not | 607 // Heap::markedObjectSize() may be underestimated if any thread has not |
| 589 // finished completeSweep(). | 608 // finished completeSweep(). |
| 590 size_t currentObjectSize = allocatedObjectSize + Heap::markedObjectSize() +
WTF::Partitions::totalSizeOfCommittedPages(); | 609 size_t currentObjectSizeKb = allocatedObjectSizeKb + ((Heap::markedObjectSiz
e() + WTF::Partitions::totalSizeOfCommittedPages()) >> 10); |
| 591 if (currentObjectSize >= 300 * 1024 * 1024) { | 610 |
| 592 // If we're consuming too much memory, trigger a conservative GC | |
| 593 // aggressively. This is a safe guard to avoid OOM. | |
| 594 return currentObjectSize > estimatedLiveObjectSize * 3 / 2; | |
| 595 } | |
| 596 // Schedule a conservative GC if Oilpan has allocated more than 32 MB since | 611 // Schedule a conservative GC if Oilpan has allocated more than 32 MB since |
| 597 // the last GC and the current memory usage is >400% larger than | 612 // the last GC and the current memory usage is >400% larger than |
| 598 // the estimated live memory usage. | 613 // the estimated live memory usage. |
| 599 // TODO(haraken): 400% is too large. Lower the heap growing factor. | 614 // TODO(haraken): 400% is too large. Lower the heap growing factor. |
| 600 return allocatedObjectSize >= 32 * 1024 * 1024 && currentObjectSize > 5 * es
timatedLiveObjectSize; | 615 return allocatedObjectSizeKb >= 32 * 1024 && currentObjectSizeKb > 5 * estim
atedLiveObjectSizeKb; |
| 601 } | 616 } |
| 602 | 617 |
| 603 void ThreadState::scheduleGCIfNeeded() | 618 void ThreadState::scheduleGCIfNeeded() |
| 604 { | 619 { |
| 605 checkThread(); | 620 checkThread(); |
| 606 // Allocation is allowed during sweeping, but those allocations should not | 621 // Allocation is allowed during sweeping, but those allocations should not |
| 607 // trigger nested GCs. | 622 // trigger nested GCs. |
| 608 if (isSweepingInProgress()) | 623 if (isSweepingInProgress()) |
| 609 return; | 624 return; |
| 610 ASSERT(!sweepForbidden()); | 625 ASSERT(!sweepForbidden()); |
| (...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 782 m_gcState = gcState; | 797 m_gcState = gcState; |
| 783 } | 798 } |
| 784 | 799 |
| 785 #undef VERIFY_STATE_TRANSITION | 800 #undef VERIFY_STATE_TRANSITION |
| 786 | 801 |
| 787 ThreadState::GCState ThreadState::gcState() const | 802 ThreadState::GCState ThreadState::gcState() const |
| 788 { | 803 { |
| 789 return m_gcState; | 804 return m_gcState; |
| 790 } | 805 } |
| 791 | 806 |
| 792 void ThreadState::didV8GC() | 807 void ThreadState::didV8MajorGC(bool forceGC) |
| 793 { | 808 { |
| 794 checkThread(); | 809 checkThread(); |
| 795 if (isMainThread()) { | 810 if (isMainThread()) { |
| 796 // Lower the estimated live object size because the V8 major GC is | 811 // Lower the estimated live object size because the V8 major GC is |
| 797 // expected to have collected a lot of DOM wrappers and dropped | 812 // expected to have collected a lot of DOM wrappers and dropped |
| 798 // references to their DOM objects. | 813 // references to their DOM objects. |
| 799 Heap::setEstimatedLiveObjectSize(Heap::estimatedLiveObjectSize() / 2); | 814 Heap::setEstimatedLiveObjectSize(Heap::estimatedLiveObjectSize() / 2); |
| 815 |
| 816 if (forceGC) { |
| 817 // This single GC is not enough for two reasons: |
| 818 // (1) The GC is not precise because the GC scans on-stack pointer
s conservatively. |
| 819 // (2) One GC is not enough to break a chain of persistent handles
. It's possible that |
| 820 // some heap allocated objects own objects that contain persis
tent handles |
| 821 // pointing to other heap allocated objects. To break the chai
n, we need multiple GCs. |
| 822 // |
| 823 // Regarding (1), we force a precise GC at the end of the current ev
ent loop. So if you want |
| 824 // to collect all garbage, you need to wait until the next event loo
p. |
| 825 // Regarding (2), it would be OK in practice to trigger only one GC
per gcEpilogue, because |
| 826 // GCController.collectAll() forces 7 V8's GC. |
| 827 Heap::collectGarbage(ThreadState::HeapPointersOnStack, ThreadState::
GCWithSweep, Heap::ForcedGC); |
| 828 |
| 829 // Forces a precise GC at the end of the current event loop. |
| 830 ThreadState::current()->setGCState(ThreadState::FullGCScheduled); |
| 831 return; |
| 832 } |
| 833 |
| 834 // If under memory pressure, complete sweeping before initiating |
| 835 // the urgent conservative GC. |
| 836 if (shouldForceMemoryPressureGC()) |
| 837 completeSweep(); |
| 838 |
| 839 // Schedule an Oilpan GC to avoid the following scenario: |
| 840 // (1) A DOM object X holds a v8::Persistent to a V8 object. |
| 841 // Assume that X is small but the V8 object is huge. |
| 842 // The v8::Persistent is released when X is destructed. |
| 843 // (2) X's DOM wrapper is created. |
| 844 // (3) The DOM wrapper becomes unreachable. |
| 845 // (4) V8 triggers a GC. The V8's GC collects the DOM wrapper. |
| 846 // However, X is not collected until a next Oilpan's GC is |
| 847 // triggered. |
| 848 // (5) If a lot of such DOM objects are created, we end up with |
| 849 // a situation where V8's GC collects the DOM wrappers but |
| 850 // the DOM objects are not collected forever. (Note that |
| 851 // Oilpan's GC is not triggered unless Oilpan's heap gets full.) |
| 852 // (6) V8 hits OOM. |
| 853 scheduleGCIfNeeded(); |
| 800 } | 854 } |
| 801 } | 855 } |
| 802 | 856 |
| 803 void ThreadState::runScheduledGC(StackState stackState) | 857 void ThreadState::runScheduledGC(StackState stackState) |
| 804 { | 858 { |
| 805 checkThread(); | 859 checkThread(); |
| 806 if (stackState != NoHeapPointersOnStack) | 860 if (stackState != NoHeapPointersOnStack) |
| 807 return; | 861 return; |
| 808 | 862 |
| 809 // If a safe point is entered while initiating a GC, we clearly do | 863 // If a safe point is entered while initiating a GC, we clearly do |
| (...skipping 597 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1407 json->beginArray(it->key.ascii().data()); | 1461 json->beginArray(it->key.ascii().data()); |
| 1408 for (size_t age = 0; age <= maxHeapObjectAge; ++age) | 1462 for (size_t age = 0; age <= maxHeapObjectAge; ++age) |
| 1409 json->pushInteger(it->value.ages[age]); | 1463 json->pushInteger(it->value.ages[age]); |
| 1410 json->endArray(); | 1464 json->endArray(); |
| 1411 } | 1465 } |
| 1412 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s
tatsName, this, json.release()); | 1466 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s
tatsName, this, json.release()); |
| 1413 } | 1467 } |
| 1414 #endif | 1468 #endif |
| 1415 | 1469 |
| 1416 } // namespace blink | 1470 } // namespace blink |
| OLD | NEW |