Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 651 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 662 // but not for less than 512 KB. | 662 // but not for less than 512 KB. |
| 663 size_t newSize = Heap::allocatedObjectSize(); | 663 size_t newSize = Heap::allocatedObjectSize(); |
| 664 return newSize >= 512 * 1024 && newSize > Heap::markedObjectSize() / 2; | 664 return newSize >= 512 * 1024 && newSize > Heap::markedObjectSize() / 2; |
| 665 #endif | 665 #endif |
| 666 } | 666 } |
| 667 | 667 |
| 668 // FIXME: We should improve the GC heuristics. | 668 // FIXME: We should improve the GC heuristics. |
| 669 // These heuristics affect performance significantly. | 669 // These heuristics affect performance significantly. |
| 670 bool ThreadState::shouldForceConservativeGC() | 670 bool ThreadState::shouldForceConservativeGC() |
| 671 { | 671 { |
| 672 if (Heap::isUrgentGCRequested()) | |
| 673 return true; | |
| 674 | |
| 672 size_t newSize = Heap::allocatedObjectSize(); | 675 size_t newSize = Heap::allocatedObjectSize(); |
| 673 if (newSize >= 300 * 1024 * 1024) { | 676 if (newSize >= 300 * 1024 * 1024) { |
| 674 // If we consume too much memory, trigger a conservative GC | 677 // If we consume too much memory, trigger a conservative GC |
| 675 // on a 50% increase in size since the last GC. This is a safe guard | 678 // on a 50% increase in size since the last GC. This is a safe guard |
| 676 // to avoid OOM. | 679 // to avoid OOM. |
| 677 return newSize > Heap::markedObjectSize() / 2; | 680 return newSize > Heap::markedObjectSize() / 2; |
| 678 } | 681 } |
| 679 if (m_didV8GCAfterLastGC && m_collectionRate > 0.5) { | 682 if (m_didV8GCAfterLastGC && m_collectionRate > 0.5) { |
| 680 // If we had a V8 GC after the last Oilpan GC and the last collection | 683 // If we had a V8 GC after the last Oilpan GC and the last collection |
| 681 // rate was higher than 50%, trigger a conservative GC on a 200% | 684 // rate was higher than 50%, trigger a conservative GC on a 200% |
| 682 // increase in size since the last GC, but not for less than 4 MB. | 685 // increase in size since the last GC, but not for less than 4 MB. |
| 683 return newSize >= 4 * 1024 * 1024 && newSize > 2 * Heap::markedObjectSiz e(); | 686 return newSize >= 4 * 1024 * 1024 && newSize > 2 * Heap::markedObjectSiz e(); |
| 684 } | 687 } |
| 685 // Otherwise, trigger a conservative GC on a 400% increase in size since | 688 // Otherwise, trigger a conservative GC on a 400% increase in size since |
| 686 // the last GC, but not for less than 32 MB. We set the higher limit in | 689 // the last GC, but not for less than 32 MB. We set the higher limit in |
| 687 // this case because Oilpan GC is unlikely to collect a lot of objects | 690 // this case because Oilpan GC is unlikely to collect a lot of objects |
| 688 // without having a V8 GC. | 691 // without having a V8 GC. |
| 689 return newSize >= 32 * 1024 * 1024 && newSize > 4 * Heap::markedObjectSize() ; | 692 return newSize >= 32 * 1024 * 1024 && newSize > 4 * Heap::markedObjectSize() ; |
| 690 } | 693 } |
| 691 | 694 |
| 692 void ThreadState::scheduleGCIfNeeded() | 695 void ThreadState::scheduleGCIfNeeded() |
| 693 { | 696 { |
| 694 checkThread(); | 697 checkThread(); |
| 695 // Allocation is allowed during sweeping, but those allocations should not | 698 // Allocation is allowed during sweeping, but those allocations should not |
| 696 // trigger nested GCs | 699 // trigger nested GCs |
| 697 if (isSweepingInProgress()) | 700 if (isSweepingInProgress()) { |
| 698 return; | 701 if (!Heap::isUrgentGCRequested() || !isSweepingScheduled()) |
|
haraken
2015/02/23 08:42:27
Do we want to have the 'isSweepingScheduled()' che
sof
2015/02/23 12:26:13
I think we do as otherwise we would allow nested G
haraken
2015/02/23 14:59:07
How can it lead to nested GCs? Given that we clear
sof
2015/02/23 15:03:40
If you're in the Sweeping state and you don't perf
haraken
2015/02/23 15:06:22
Thanks, makes sense.
It seems more straightforwar
sof
2015/02/23 15:22:40
Having that "is-allowed" check in an already condi
| |
| 702 return; | |
| 703 // Urgent GC requested with only a GC scheduled; fall through | |
| 704 // and have it be serviced by a conservative GC. | |
| 705 } | |
| 699 ASSERT(!sweepForbidden()); | 706 ASSERT(!sweepForbidden()); |
| 700 | 707 |
| 701 if (shouldForceConservativeGC()) | 708 if (shouldForceConservativeGC()) { |
| 702 Heap::collectGarbage(ThreadState::HeapPointersOnStack, ThreadState::GCWi thoutSweep); | 709 // If GC is deemed urgent, eagerly sweep and finalize any external alloc ations right away. |
| 703 else if (shouldSchedulePreciseGC()) | 710 GCType gcType = Heap::isUrgentGCRequested() ? GCWithSweep : GCWithoutSwe ep; |
| 711 Heap::clearUrgentGC(); | |
|
haraken
2015/02/23 08:42:27
Would it be better to move clearUrgentGC() into He
sof
2015/02/23 12:26:13
That makes good sense to do for the cases where ur
| |
| 712 Heap::collectGarbage(HeapPointersOnStack, gcType); | |
| 713 return; | |
| 714 } | |
| 715 if (shouldSchedulePreciseGC()) | |
| 704 schedulePreciseGC(); | 716 schedulePreciseGC(); |
| 705 else if (shouldScheduleIdleGC()) | 717 else if (shouldScheduleIdleGC()) |
| 706 scheduleIdleGC(); | 718 scheduleIdleGC(); |
| 707 } | 719 } |
| 708 | 720 |
| 709 void ThreadState::performIdleGC(double deadlineSeconds) | 721 void ThreadState::performIdleGC(double deadlineSeconds) |
| 710 { | 722 { |
| 711 ASSERT(isMainThread()); | 723 ASSERT(isMainThread()); |
| 712 | 724 |
| 713 m_hasPendingIdleTask = false; | 725 m_hasPendingIdleTask = false; |
| (...skipping 611 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1325 json->beginArray(it->key.ascii().data()); | 1337 json->beginArray(it->key.ascii().data()); |
| 1326 for (size_t age = 0; age <= maxHeapObjectAge; ++age) | 1338 for (size_t age = 0; age <= maxHeapObjectAge; ++age) |
| 1327 json->pushInteger(it->value.ages[age]); | 1339 json->pushInteger(it->value.ages[age]); |
| 1328 json->endArray(); | 1340 json->endArray(); |
| 1329 } | 1341 } |
| 1330 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); | 1342 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); |
| 1331 } | 1343 } |
| 1332 #endif | 1344 #endif |
| 1333 | 1345 |
| 1334 } // namespace blink | 1346 } // namespace blink |
| OLD | NEW |