Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 700 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 711 // but not for less than 512 KB. | 711 // but not for less than 512 KB. |
| 712 size_t newSize = Heap::allocatedObjectSize(); | 712 size_t newSize = Heap::allocatedObjectSize(); |
| 713 return newSize >= 512 * 1024 && newSize > Heap::markedObjectSize() / 2; | 713 return newSize >= 512 * 1024 && newSize > Heap::markedObjectSize() / 2; |
| 714 #endif | 714 #endif |
| 715 } | 715 } |
| 716 | 716 |
| 717 // FIXME: We should improve the GC heuristics. | 717 // FIXME: We should improve the GC heuristics. |
| 718 // These heuristics affect performance significantly. | 718 // These heuristics affect performance significantly. |
| 719 bool ThreadState::shouldForceConservativeGC() | 719 bool ThreadState::shouldForceConservativeGC() |
| 720 { | 720 { |
| 721 if (Heap::isGCUrgentlyRequested()) { | |
| 722 Heap::clearUrgentGC(); | |
|
haraken
2015/02/10 01:23:17
We should call Heap::clearUrgentGC() in Heap::coll
| |
| 723 return true; | |
| 724 } | |
| 721 size_t newSize = Heap::allocatedObjectSize(); | 725 size_t newSize = Heap::allocatedObjectSize(); |
| 722 if (newSize >= 300 * 1024 * 1024) { | 726 if (newSize >= 300 * 1024 * 1024) { |
| 723 // If we consume too much memory, trigger a conservative GC | 727 // If we consume too much memory, trigger a conservative GC |
| 724 // on a 50% increase in size since the last GC. This is a safe guard | 728 // on a 50% increase in size since the last GC. This is a safe guard |
| 725 // to avoid OOM. | 729 // to avoid OOM. |
| 726 return newSize > Heap::markedObjectSize() / 2; | 730 return newSize > Heap::markedObjectSize() / 2; |
| 727 } | 731 } |
| 728 if (m_didV8GCAfterLastGC && m_collectionRate > 0.5) { | 732 if (m_didV8GCAfterLastGC && m_collectionRate > 0.5) { |
| 729 // If we had a V8 GC after the last Oilpan GC and the last collection | 733 // If we had a V8 GC after the last Oilpan GC and the last collection |
| 730 // rate was higher than 50%, trigger a conservative GC on a 200% | 734 // rate was higher than 50%, trigger a conservative GC on a 200% |
| 731 // increase in size since the last GC, but not for less than 4 MB. | 735 // increase in size since the last GC, but not for less than 4 MB. |
| 732 return newSize >= 4 * 1024 * 1024 && newSize > 2 * Heap::markedObjectSiz e(); | 736 return newSize >= 4 * 1024 * 1024 && newSize > 2 * Heap::markedObjectSiz e(); |
| 733 } | 737 } |
| 734 // Otherwise, trigger a conservative GC on a 400% increase in size since | 738 // Otherwise, trigger a conservative GC on a 400% increase in size since |
| 735 // the last GC, but not for less than 32 MB. We set the higher limit in | 739 // the last GC, but not for less than 32 MB. We set the higher limit in |
| 736 // this case because Oilpan GC is unlikely to collect a lot of objects | 740 // this case because Oilpan GC is unlikely to collect a lot of objects |
| 737 // without having a V8 GC. | 741 // without having a V8 GC. |
| 738 return newSize >= 32 * 1024 * 1024 && newSize > 4 * Heap::markedObjectSize() ; | 742 return newSize >= 32 * 1024 * 1024 && newSize > 4 * Heap::markedObjectSize() ; |
| 739 } | 743 } |
| 740 | 744 |
| 741 void ThreadState::scheduleGCIfNeeded() | 745 void ThreadState::scheduleGCIfNeeded() |
| 742 { | 746 { |
| 743 checkThread(); | 747 checkThread(); |
| 744 // Allocation is allowed during sweeping, but those allocations should not | 748 // Allocation is allowed during sweeping, but those allocations should not |
| 745 // trigger nested GCs | 749 // trigger nested GCs |
| 746 if (isSweepingInProgress()) | 750 if (isSweepingInProgress()) { |
| 747 return; | 751 if (!Heap::isGCUrgentlyRequested() || !isSweepingScheduled()) |
|
haraken
2015/02/10 01:23:17
If an urgent GC is scheduled, I think we can just
sof
2015/02/11 15:58:25
That would happen on entering "StoppingOtherThread
| |
| 752 return; | |
| 753 // Urgent GC requested with only a GC scheduled; fall through | |
| 754 // and have it be serviced by a conservative GC. | |
| 755 } | |
| 748 ASSERT(!sweepForbidden()); | 756 ASSERT(!sweepForbidden()); |
| 749 | 757 |
| 750 if (shouldForceConservativeGC()) | 758 if (shouldForceConservativeGC()) |
| 751 Heap::collectGarbage(ThreadState::HeapPointersOnStack, ThreadState::GCWi thoutSweep); | 759 Heap::collectGarbage(ThreadState::HeapPointersOnStack, ThreadState::GCWi thoutSweep); |
| 752 else if (shouldSchedulePreciseGC()) | 760 else if (shouldSchedulePreciseGC()) |
| 753 schedulePreciseGC(); | 761 schedulePreciseGC(); |
| 754 else if (shouldScheduleIdleGC()) | 762 else if (shouldScheduleIdleGC()) |
| 755 scheduleIdleGC(); | 763 scheduleIdleGC(); |
| 756 } | 764 } |
| 757 | 765 |
| (...skipping 557 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1315 json->beginArray(it->key.ascii().data()); | 1323 json->beginArray(it->key.ascii().data()); |
| 1316 for (size_t age = 0; age <= maxHeapObjectAge; ++age) | 1324 for (size_t age = 0; age <= maxHeapObjectAge; ++age) |
| 1317 json->pushInteger(it->value.ages[age]); | 1325 json->pushInteger(it->value.ages[age]); |
| 1318 json->endArray(); | 1326 json->endArray(); |
| 1319 } | 1327 } |
| 1320 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); | 1328 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); |
| 1321 } | 1329 } |
| 1322 #endif | 1330 #endif |
| 1323 | 1331 |
| 1324 } // namespace blink | 1332 } // namespace blink |
| OLD | NEW |