OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 513 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
524 return mutex; | 524 return mutex; |
525 } | 525 } |
526 | 526 |
527 // TODO(haraken): We should improve the GC heuristics. | 527 // TODO(haraken): We should improve the GC heuristics. |
528 // These heuristics affect performance significantly. | 528 // These heuristics affect performance significantly. |
529 bool ThreadState::shouldScheduleIdleGC() | 529 bool ThreadState::shouldScheduleIdleGC() |
530 { | 530 { |
531 if (gcState() != NoGCScheduled) | 531 if (gcState() != NoGCScheduled) |
532 return false; | 532 return false; |
533 #if ENABLE(OILPAN) | 533 #if ENABLE(OILPAN) |
534 // Avoid potential overflow by truncating to Kb. | |
535 size_t allocatedObjectSizeKb = Heap::allocatedObjectSize() >> 10; | |
534 // The estimated size is updated when the main thread finishes lazy | 536 // The estimated size is updated when the main thread finishes lazy |
535 // sweeping. If this thread reaches here before the main thread finishes | 537 // sweeping. If this thread reaches here before the main thread finishes |
536 // lazy sweeping, the thread will use the estimated size of the last GC. | 538 // lazy sweeping, the thread will use the estimated size of the last GC. |
537 size_t estimatedLiveObjectSize = Heap::estimatedLiveObjectSize(); | 539 size_t estimatedLiveObjectSizeKb = Heap::estimatedLiveObjectSize() >> 10; |
538 size_t allocatedObjectSize = Heap::allocatedObjectSize(); | |
539 // Heap::markedObjectSize() may be underestimated if any thread has not | 540 // Heap::markedObjectSize() may be underestimated if any thread has not |
540 // finished completeSweep(). | 541 // finished completeSweep(). |
541 size_t currentObjectSize = allocatedObjectSize + Heap::markedObjectSize() + WTF::Partitions::totalSizeOfCommittedPages(); | 542 size_t currentObjectSizeKb = allocatedObjectSizeKb + ((Heap::markedObjectSiz e() + WTF::Partitions::totalSizeOfCommittedPages()) >> 10); |
542 // Schedule an idle GC if Oilpan has allocated more than 1 MB since | 543 // Schedule an idle GC if Oilpan has allocated more than 1 MB since |
543 // the last GC and the current memory usage is >50% larger than | 544 // the last GC and the current memory usage is >50% larger than |
544 // the estimated live memory usage. | 545 // the estimated live memory usage. |
545 return allocatedObjectSize >= 1024 * 1024 && currentObjectSize > estimatedLi veObjectSize * 3 / 2; | 546 return allocatedObjectSizeKb >= 1024 && currentObjectSizeKb > estimatedLiveO bjectSizeKb * 3 / 2; |
546 #else | 547 #else |
547 return false; | 548 return false; |
548 #endif | 549 #endif |
549 } | 550 } |
550 | 551 |
551 // TODO(haraken): We should improve the GC heuristics. | 552 // TODO(haraken): We should improve the GC heuristics. |
552 // These heuristics affect performance significantly. | 553 // These heuristics affect performance significantly. |
553 bool ThreadState::shouldSchedulePreciseGC() | 554 bool ThreadState::shouldSchedulePreciseGC() |
554 { | 555 { |
555 if (gcState() != NoGCScheduled) | 556 if (gcState() != NoGCScheduled) |
556 return false; | 557 return false; |
557 #if ENABLE(OILPAN) | 558 #if ENABLE(OILPAN) |
558 return false; | 559 return false; |
559 #else | 560 #else |
561 // Avoid potential overflow by truncating to Kb. | |
562 size_t allocatedObjectSizeKb = Heap::allocatedObjectSize() >> 10; | |
560 // The estimated size is updated when the main thread finishes lazy | 563 // The estimated size is updated when the main thread finishes lazy |
561 // sweeping. If this thread reaches here before the main thread finishes | 564 // sweeping. If this thread reaches here before the main thread finishes |
562 // lazy sweeping, the thread will use the estimated size of the last GC. | 565 // lazy sweeping, the thread will use the estimated size of the last GC. |
563 size_t estimatedLiveObjectSize = Heap::estimatedLiveObjectSize(); | 566 size_t estimatedLiveObjectSizeKb = Heap::estimatedLiveObjectSize() >> 10; |
564 size_t allocatedObjectSize = Heap::allocatedObjectSize(); | |
565 // Heap::markedObjectSize() may be underestimated if any thread has not | 567 // Heap::markedObjectSize() may be underestimated if any thread has not |
566 // finished completeSweep(). | 568 // finished completeSweep(). |
567 size_t currentObjectSize = allocatedObjectSize + Heap::markedObjectSize() + WTF::Partitions::totalSizeOfCommittedPages(); | 569 size_t currentObjectSizeKb = allocatedObjectSize + ((Heap::markedObjectSize( ) + WTF::Partitions::totalSizeOfCommittedPages()) >> 10); |
568 // Schedule a precise GC if Oilpan has allocated more than 1 MB since | 570 // Schedule a precise GC if Oilpan has allocated more than 1 MB since |
569 // the last GC and the current memory usage is >50% larger than | 571 // the last GC and the current memory usage is >50% larger than |
570 // the estimated live memory usage. | 572 // the estimated live memory usage. |
571 return allocatedObjectSize >= 1024 * 1024 && currentObjectSize > estimatedLi veObjectSize * 3 / 2; | 573 return allocatedObjectSizeKb >= 1024 && currentObjectSizeKb > estimatedLiveO bjectSizeKb * 3 / 2; |
572 #endif | 574 #endif |
573 } | 575 } |
574 | 576 |
577 bool ThreadState::shouldForceMemoryPressureGC() | |
578 { | |
579 // Avoid potential overflow by truncating to Kb. | |
580 size_t currentObjectSizeKb = (Heap::allocatedObjectSize() + Heap::markedObje ctSize() + WTF::Partitions::totalSizeOfCommittedPages()) >> 10; | |
581 size_t estimatedLiveObjectSizeKb = (Heap::estimatedLiveObjectSize()) >> 10; | |
582 if (currentObjectSizeKb < 300 * 1024) | |
583 return false; | |
584 | |
585 // If we're consuming too much memory, trigger a conservative GC | |
586 // aggressively. This is a safe guard to avoid OOM. | |
587 return currentObjectSizeKb > (estimatedLiveObjectSizeKb * 3) / 2; | |
588 } | |
589 | |
575 // TODO(haraken): We should improve the GC heuristics. | 590 // TODO(haraken): We should improve the GC heuristics. |
576 // These heuristics affect performance significantly. | 591 // These heuristics affect performance significantly. |
577 bool ThreadState::shouldForceConservativeGC() | 592 bool ThreadState::shouldForceConservativeGC() |
578 { | 593 { |
579 if (UNLIKELY(isGCForbidden())) | 594 if (UNLIKELY(isGCForbidden())) |
580 return false; | 595 return false; |
581 | 596 |
597 if (shouldForceMemoryPressureGC()) | |
598 return true; | |
599 | |
600 // Avoid potential overflow by truncating to Kb. | |
601 size_t allocatedObjectSizeKb = Heap::allocatedObjectSize() >> 10; | |
582 // The estimated size is updated when the main thread finishes lazy | 602 // The estimated size is updated when the main thread finishes lazy |
583 // sweeping. If this thread reaches here before the main thread finishes | 603 // sweeping. If this thread reaches here before the main thread finishes |
584 // lazy sweeping, the thread will use the estimated size of the last GC. | 604 // lazy sweeping, the thread will use the estimated size of the last GC. |
585 size_t estimatedLiveObjectSize = Heap::estimatedLiveObjectSize(); | 605 size_t estimatedLiveObjectSizeKb = Heap::estimatedLiveObjectSize() >> 10; |
586 size_t allocatedObjectSize = Heap::allocatedObjectSize(); | |
587 // Heap::markedObjectSize() may be underestimated if any thread has not | 606 // Heap::markedObjectSize() may be underestimated if any thread has not |
588 // finished completeSweep(). | 607 // finished completeSweep(). |
589 size_t currentObjectSize = allocatedObjectSize + Heap::markedObjectSize() + WTF::Partitions::totalSizeOfCommittedPages(); | 608 size_t currentObjectSizeKb = allocatedObjectSizeKb + ((Heap::markedObjectSiz e() + WTF::Partitions::totalSizeOfCommittedPages()) >> 10); |
590 if (currentObjectSize >= 300 * 1024 * 1024) { | 609 |
591 // If we're consuming too much memory, trigger a conservative GC | |
592 // aggressively. This is a safe guard to avoid OOM. | |
593 return currentObjectSize > estimatedLiveObjectSize * 3 / 2; | |
594 } | |
595 // Schedule a conservative GC if Oilpan has allocated more than 32 MB since | 610 // Schedule a conservative GC if Oilpan has allocated more than 32 MB since |
596 // the last GC and the current memory usage is >400% larger than | 611 // the last GC and the current memory usage is >400% larger than |
597 // the estimated live memory usage. | 612 // the estimated live memory usage. |
598 // TODO(haraken): 400% is too large. Lower the heap growing factor. | 613 // TODO(haraken): 400% is too large. Lower the heap growing factor. |
599 return allocatedObjectSize >= 32 * 1024 * 1024 && currentObjectSize > 5 * es timatedLiveObjectSize; | 614 return allocatedObjectSizeKb >= 32 * 1024 && currentObjectSizeKb > 5 * estim atedLiveObjectSizeKb; |
600 } | 615 } |
601 | 616 |
602 void ThreadState::scheduleGCIfNeeded() | 617 void ThreadState::scheduleGCIfNeeded() |
603 { | 618 { |
604 checkThread(); | 619 checkThread(); |
605 // Allocation is allowed during sweeping, but those allocations should not | 620 // Allocation is allowed during sweeping, but those allocations should not |
606 // trigger nested GCs. | 621 // trigger nested GCs. |
607 if (isSweepingInProgress()) | 622 if (isSweepingInProgress()) |
608 return; | 623 return; |
609 ASSERT(!sweepForbidden()); | 624 ASSERT(!sweepForbidden()); |
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
781 m_gcState = gcState; | 796 m_gcState = gcState; |
782 } | 797 } |
783 | 798 |
784 #undef VERIFY_STATE_TRANSITION | 799 #undef VERIFY_STATE_TRANSITION |
785 | 800 |
786 ThreadState::GCState ThreadState::gcState() const | 801 ThreadState::GCState ThreadState::gcState() const |
787 { | 802 { |
788 return m_gcState; | 803 return m_gcState; |
789 } | 804 } |
790 | 805 |
791 void ThreadState::didV8GC() | 806 void ThreadState::didV8MajorGC(bool forceGC) |
792 { | 807 { |
793 checkThread(); | 808 checkThread(); |
794 if (isMainThread()) { | 809 if (isMainThread()) { |
810 size_t currentObjectSize = Heap::allocatedObjectSize() + Heap::markedObj ectSize() + WTF::Partitions::totalSizeOfCommittedPages(); | |
haraken
2015/06/11 08:33:47
Here can we just call:
Heap::setEstimatedLiveOb
sof
2015/06/11 08:45:02
Sure, bad merge including old code while moving th
| |
811 size_t estimatedLiveObjectSize = Heap::estimatedLiveObjectSize(); | |
812 if (shouldForceMemoryPressureGC(currentObjectSize, estimatedLiveObjectSi ze)) { | |
813 // Under memory pressure, force a conservative GC. | |
814 Heap::collectGarbage(HeapPointersOnStack, GCWithoutSweep, Heap::Cons ervativeGC); | |
815 return; | |
816 } | |
795 // Lower the estimated live object size because the V8 major GC is | 817 // Lower the estimated live object size because the V8 major GC is |
796 // expected to have collected a lot of DOM wrappers and dropped | 818 // expected to have collected a lot of DOM wrappers and dropped |
797 // references to their DOM objects. | 819 // references to their DOM objects. |
798 Heap::setEstimatedLiveObjectSize(Heap::estimatedLiveObjectSize() / 2); | 820 Heap::setEstimatedLiveObjectSize(Heap::estimatedLiveObjectSize() / 2); |
821 | |
822 if (forceGC) { | |
823 // This single GC is not enough for two reasons: | |
824 // (1) The GC is not precise because the GC scans on-stack pointer s conservatively. | |
825 // (2) One GC is not enough to break a chain of persistent handles . It's possible that | |
826 // some heap allocated objects own objects that contain persis tent handles | |
827 // pointing to other heap allocated objects. To break the chai n, we need multiple GCs. | |
828 // | |
829 // Regarding (1), we force a precise GC at the end of the current ev ent loop. So if you want | |
830 // to collect all garbage, you need to wait until the next event loo p. | |
831 // Regarding (2), it would be OK in practice to trigger only one GC per gcEpilogue, because | |
832 // GCController.collectAll() forces 7 V8's GC. | |
833 Heap::collectGarbage(ThreadState::HeapPointersOnStack, ThreadState:: GCWithSweep, Heap::ForcedGC); | |
834 | |
835 // Forces a precise GC at the end of the current event loop. | |
836 ThreadState::current()->setGCState(ThreadState::FullGCScheduled); | |
837 return; | |
838 } | |
839 | |
840 // Schedule an Oilpan GC to avoid the following scenario: | |
841 // (1) A DOM object X holds a v8::Persistent to a V8 object. | |
842 // Assume that X is small but the V8 object is huge. | |
843 // The v8::Persistent is released when X is destructed. | |
844 // (2) X's DOM wrapper is created. | |
845 // (3) The DOM wrapper becomes unreachable. | |
846 // (4) V8 triggers a GC. The V8's GC collects the DOM wrapper. | |
847 // However, X is not collected until a next Oilpan's GC is | |
848 // triggered. | |
849 // (5) If a lot of such DOM objects are created, we end up with | |
850 // a situation where V8's GC collects the DOM wrappers but | |
851 // the DOM objects are not collected forever. (Note that | |
852 // Oilpan's GC is not triggered unless Oilpan's heap gets full.) | |
853 // (6) V8 hits OOM. | |
854 | |
855 if (shouldForceMemoryPressureGC()) | |
856 completeSweep(); | |
857 scheduleGCIfNeeded(); | |
799 } | 858 } |
800 } | 859 } |
801 | 860 |
802 void ThreadState::runScheduledGC(StackState stackState) | 861 void ThreadState::runScheduledGC(StackState stackState) |
803 { | 862 { |
804 checkThread(); | 863 checkThread(); |
805 if (stackState != NoHeapPointersOnStack) | 864 if (stackState != NoHeapPointersOnStack) |
806 return; | 865 return; |
807 | 866 |
808 // If a safe point is entered while initiating a GC, we clearly do | 867 // If a safe point is entered while initiating a GC, we clearly do |
(...skipping 568 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1377 json->beginArray(it->key.ascii().data()); | 1436 json->beginArray(it->key.ascii().data()); |
1378 for (size_t age = 0; age <= maxHeapObjectAge; ++age) | 1437 for (size_t age = 0; age <= maxHeapObjectAge; ++age) |
1379 json->pushInteger(it->value.ages[age]); | 1438 json->pushInteger(it->value.ages[age]); |
1380 json->endArray(); | 1439 json->endArray(); |
1381 } | 1440 } |
1382 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); | 1441 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); |
1383 } | 1442 } |
1384 #endif | 1443 #endif |
1385 | 1444 |
1386 } // namespace blink | 1445 } // namespace blink |
OLD | NEW |