Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 35 #include "platform/TraceEvent.h" | 35 #include "platform/TraceEvent.h" |
| 36 #include "platform/heap/AddressSanitizer.h" | 36 #include "platform/heap/AddressSanitizer.h" |
| 37 #include "platform/heap/CallbackStack.h" | 37 #include "platform/heap/CallbackStack.h" |
| 38 #include "platform/heap/Handle.h" | 38 #include "platform/heap/Handle.h" |
| 39 #include "platform/heap/Heap.h" | 39 #include "platform/heap/Heap.h" |
| 40 #include "platform/heap/SafePoint.h" | 40 #include "platform/heap/SafePoint.h" |
| 41 #include "platform/scheduler/Scheduler.h" | 41 #include "platform/scheduler/Scheduler.h" |
| 42 #include "public/platform/Platform.h" | 42 #include "public/platform/Platform.h" |
| 43 #include "public/platform/WebThread.h" | 43 #include "public/platform/WebThread.h" |
| 44 #include "public/platform/WebTraceLocation.h" | 44 #include "public/platform/WebTraceLocation.h" |
| 45 #include "wtf/Partitions.h" | |
| 45 #include "wtf/ThreadingPrimitives.h" | 46 #include "wtf/ThreadingPrimitives.h" |
| 46 #if ENABLE(GC_PROFILING) | 47 #if ENABLE(GC_PROFILING) |
| 47 #include "platform/TracedValue.h" | 48 #include "platform/TracedValue.h" |
| 48 #include "wtf/text/StringHash.h" | 49 #include "wtf/text/StringHash.h" |
| 49 #endif | 50 #endif |
| 50 | 51 |
| 51 #if OS(WIN) | 52 #if OS(WIN) |
| 52 #include <stddef.h> | 53 #include <stddef.h> |
| 53 #include <windows.h> | 54 #include <windows.h> |
| 54 #include <winnt.h> | 55 #include <winnt.h> |
| (...skipping 437 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 492 AtomicallyInitializedStaticReference(PersistentNode, anchor, new PersistentA nchor); | 493 AtomicallyInitializedStaticReference(PersistentNode, anchor, new PersistentA nchor); |
| 493 return anchor; | 494 return anchor; |
| 494 } | 495 } |
| 495 | 496 |
| 496 Mutex& ThreadState::globalRootsMutex() | 497 Mutex& ThreadState::globalRootsMutex() |
| 497 { | 498 { |
| 498 AtomicallyInitializedStaticReference(Mutex, mutex, new Mutex); | 499 AtomicallyInitializedStaticReference(Mutex, mutex, new Mutex); |
| 499 return mutex; | 500 return mutex; |
| 500 } | 501 } |
| 501 | 502 |
| 502 // FIXME: We should improve the GC heuristics. | 503 // TODO(haraken): We should improve the GC heuristics. |
| 503 // These heuristics affect performance significantly. | 504 // These heuristics affect performance significantly. |
| 504 bool ThreadState::shouldScheduleIdleGC() | 505 bool ThreadState::shouldScheduleIdleGC() |
| 505 { | 506 { |
| 506 if (gcState() != NoGCScheduled) | 507 if (gcState() != NoGCScheduled) |
| 507 return false; | 508 return false; |
| 508 #if ENABLE(OILPAN) | 509 #if ENABLE(OILPAN) |
| 509 // The estimated size is updated when the main thread finishes lazy | 510 // The estimated size is updated when the main thread finishes lazy |
| 510 // sweeping. If this thread reaches here before the main thread finishes | 511 // sweeping. If this thread reaches here before the main thread finishes |
| 511 // lazy sweeping, the thread will use the estimated size of the last GC. | 512 // lazy sweeping, the thread will use the estimated size of the last GC. |
| 512 size_t estimatedLiveObjectSize = Heap::estimatedLiveObjectSize(); | 513 size_t estimatedLiveObjectSize = Heap::estimatedLiveObjectSize(); |
| 513 // Schedule an idle GC if more than 512 KB has been allocated since | |
| 514 // the last GC and the current memory usage (=allocated + estimated) | |
| 515 // is >50% larger than the estimated live memory usage. | |
| 516 size_t allocatedObjectSize = Heap::allocatedObjectSize(); | 514 size_t allocatedObjectSize = Heap::allocatedObjectSize(); |
| 517 return allocatedObjectSize >= 512 * 1024 && allocatedObjectSize > estimatedL iveObjectSize / 2; | 515 // Heap::markedObjectSize() may be underestimated if any thread has not |
| 516 // finished completeSweep(). | |
| 517 size_t currentObjectSize = allocatedObjectSize + Heap::markedObjectSize() + WTF::Partitions::totalSizeOfCommittedPages(); | |
| 518 // Schedule an idle GC if the current memory usage is >1MB | |
| 519 // and is >50% larger than the estimated live memory usage. | |
| 520 bool shouldGC = currentObjectSize >= 1024 * 1024 && currentObjectSize > esti matedLiveObjectSize * 3 / 2; | |
| 521 /* | |
| 522 if (shouldGC) { | |
| 523 fprintf(stderr, "idle GC: allocatedObjectSize=%ld estimatedLiveObjectSiz e=%ld markedObjectSize=%ld totalSizeOfCommittedPages=%ld currentObjectSize=%ld\n ", Heap::allocatedObjectSize() / 1024 / 1024, Heap::estimatedLiveObjectSize() / 1024 / 1024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeO fCommittedPages() / 1024 / 1024, currentObjectSize / 1024 / 1024); | |
| 524 } | |
| 525 */ | |
| 526 return shouldGC; | |
| 518 #else | 527 #else |
| 519 return false; | 528 return false; |
| 520 #endif | 529 #endif |
| 521 } | 530 } |
| 522 | 531 |
| 523 // FIXME: We should improve the GC heuristics. | 532 // TODO(haraken): We should improve the GC heuristics. |
| 524 // These heuristics affect performance significantly. | 533 // These heuristics affect performance significantly. |
| 525 bool ThreadState::shouldSchedulePreciseGC() | 534 bool ThreadState::shouldSchedulePreciseGC() |
| 526 { | 535 { |
| 527 if (gcState() != NoGCScheduled) | 536 if (gcState() != NoGCScheduled) |
| 528 return false; | 537 return false; |
| 529 #if ENABLE(OILPAN) | 538 #if ENABLE(OILPAN) |
| 530 return false; | 539 return false; |
| 531 #else | 540 #else |
| 532 // The estimated size is updated when the main thread finishes lazy | 541 // The estimated size is updated when the main thread finishes lazy |
| 533 // sweeping. If this thread reaches here before the main thread finishes | 542 // sweeping. If this thread reaches here before the main thread finishes |
| 534 // lazy sweeping, the thread will use the estimated size of the last GC. | 543 // lazy sweeping, the thread will use the estimated size of the last GC. |
| 535 size_t estimatedLiveObjectSize = Heap::estimatedLiveObjectSize(); | 544 size_t estimatedLiveObjectSize = Heap::estimatedLiveObjectSize(); |
| 536 // Schedule a precise GC if more than 512 KB has been allocated since | |
| 537 // the last GC and the current memory usage (=allocated + estimated) | |
| 538 // is >50% larger than the estimated live memory usage. | |
| 539 size_t allocatedObjectSize = Heap::allocatedObjectSize(); | 545 size_t allocatedObjectSize = Heap::allocatedObjectSize(); |
| 540 return allocatedObjectSize >= 512 * 1024 && allocatedObjectSize > estimatedL iveObjectSize / 2; | 546 // Heap::markedObjectSize() may be underestimated if any thread has not |
| 547 // finished completeSweep(). | |
| 548 size_t currentObjectSize = allocatedObjectSize + Heap::markedObjectSize() + WTF::Partitions::totalSizeOfCommittedPages(); | |
| 549 // Schedule a precise GC if the current memory usage is >1MB | |
| 550 // and is >50% larger than the estimated live memory usage. | |
| 551 return currentObjectSize >= 1024 * 1024 && currentObjectSize > estimatedLive ObjectSize * 3 / 2; | |
| 541 #endif | 552 #endif |
| 542 } | 553 } |
| 543 | 554 |
| 544 // FIXME: We should improve the GC heuristics. | 555 // TODO(haraken): We should improve the GC heuristics. |
| 545 // These heuristics affect performance significantly. | 556 // These heuristics affect performance significantly. |
| 546 bool ThreadState::shouldForceConservativeGC() | 557 bool ThreadState::shouldForceConservativeGC() |
| 547 { | 558 { |
| 548 if (UNLIKELY(m_gcForbiddenCount)) | 559 if (UNLIKELY(m_gcForbiddenCount)) |
| 549 return false; | 560 return false; |
| 550 | 561 |
| 551 if (Heap::isUrgentGCRequested()) | |
| 552 return true; | |
| 553 | |
| 554 // The estimated size is updated when the main thread finishes lazy | 562 // The estimated size is updated when the main thread finishes lazy |
| 555 // sweeping. If this thread reaches here before the main thread finishes | 563 // sweeping. If this thread reaches here before the main thread finishes |
| 556 // lazy sweeping, the thread will use the estimated size of the last GC. | 564 // lazy sweeping, the thread will use the estimated size of the last GC. |
| 557 size_t estimatedLiveObjectSize = Heap::estimatedLiveObjectSize(); | 565 size_t estimatedLiveObjectSize = Heap::estimatedLiveObjectSize(); |
| 558 size_t allocatedObjectSize = Heap::allocatedObjectSize(); | 566 size_t allocatedObjectSize = Heap::allocatedObjectSize(); |
| 567 // Heap::markedObjectSize() may be underestimated if any thread has not | |
| 568 // finished completeSweep(). | |
| 569 size_t currentObjectSize = allocatedObjectSize + Heap::markedObjectSize() + WTF::Partitions::totalSizeOfCommittedPages(); | |
| 559 if (Heap::markedObjectSize() + allocatedObjectSize >= 300 * 1024 * 1024) { | 570 if (Heap::markedObjectSize() + allocatedObjectSize >= 300 * 1024 * 1024) { |
|
sof
2015/04/14 20:13:19
If you change this to
size_t currentObjectSize
| |
| 560 // If we're consuming too much memory, trigger a conservative GC | 571 // If we're consuming too much memory, trigger a conservative GC |
| 561 // aggressively. This is a safe guard to avoid OOM. | 572 // aggressively. This is a safe guard to avoid OOM. |
| 562 return allocatedObjectSize > estimatedLiveObjectSize / 2; | 573 return currentObjectSize > estimatedLiveObjectSize * 3 / 2; |
| 563 } | 574 } |
| 564 // Schedule a conservative GC if more than 32 MB has been allocated since | 575 // Schedule a conservative GC if the current memory usage is >32MB |
| 565 // the last GC and the current memory usage (=allocated + estimated) | 576 // and is >50% larger than the estimated live memory usage. |
| 566 // is >500% larger than the estimated live memory usage. | 577 bool shouldGC = currentObjectSize >= 32 * 1024 * 1024 && currentObjectSize > 5 * estimatedLiveObjectSize; |
| 567 return allocatedObjectSize >= 32 * 1024 * 1024 && allocatedObjectSize > 4 * estimatedLiveObjectSize; | 578 /* |
| 579 if (shouldGC) { | |
| 580 fprintf(stderr, "conservative GC: allocatedObjectSize=%ld estimatedLiveO bjectSize=%ld markedObjectSize=%ld totalSizeOfCommittedPages=%ld currentObjectSi ze=%ld\n", Heap::allocatedObjectSize() / 1024 / 1024, Heap::estimatedLiveObjectS ize() / 1024 / 1024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::to talSizeOfCommittedPages() / 1024 / 1024, currentObjectSize / 1024 / 1024); | |
|
Daniel Bratell
2015/04/16 08:58:45
Need the fprintf to not be there when this lands.
| |
| 581 } | |
| 582 */ | |
| 583 return shouldGC; | |
| 568 } | 584 } |
| 569 | 585 |
| 570 void ThreadState::scheduleGCIfNeeded() | 586 void ThreadState::scheduleGCIfNeeded() |
| 571 { | 587 { |
| 572 checkThread(); | 588 checkThread(); |
| 573 // Allocation is allowed during sweeping, but those allocations should not | 589 // Allocation is allowed during sweeping, but those allocations should not |
| 574 // trigger nested GCs. Does not apply if an urgent GC has been requested. | 590 // trigger nested GCs. |
| 575 if (isSweepingInProgress() && UNLIKELY(!Heap::isUrgentGCRequested())) | 591 if (isSweepingInProgress()) |
| 576 return; | 592 return; |
| 577 ASSERT(!sweepForbidden()); | 593 ASSERT(!sweepForbidden()); |
| 578 | 594 |
| 579 if (shouldForceConservativeGC()) { | 595 if (shouldForceConservativeGC()) { |
| 580 if (Heap::isUrgentGCRequested()) { | 596 Heap::collectGarbage(HeapPointersOnStack, GCWithoutSweep, Heap::Conserva tiveGC); |
| 581 // If GC is deemed urgent, eagerly sweep and finalize any external a llocations right away. | |
| 582 Heap::collectGarbage(HeapPointersOnStack, GCWithSweep, Heap::Conserv ativeGC); | |
| 583 } else { | |
| 584 // Otherwise, schedule a lazy sweeping in an idle task. | |
| 585 Heap::collectGarbage(HeapPointersOnStack, GCWithoutSweep, Heap::Cons ervativeGC); | |
| 586 } | |
| 587 return; | 597 return; |
| 588 } | 598 } |
| 589 if (shouldSchedulePreciseGC()) | 599 if (shouldSchedulePreciseGC()) |
| 590 schedulePreciseGC(); | 600 schedulePreciseGC(); |
| 591 else if (shouldScheduleIdleGC()) | 601 else if (shouldScheduleIdleGC()) |
| 592 scheduleIdleGC(); | 602 scheduleIdleGC(); |
| 593 } | 603 } |
| 594 | 604 |
| 595 void ThreadState::performIdleGC(double deadlineSeconds) | 605 void ThreadState::performIdleGC(double deadlineSeconds) |
| 596 { | 606 { |
| (...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 838 | 848 |
| 839 if (isMainThread()) | 849 if (isMainThread()) |
| 840 ScriptForbiddenScope::exit(); | 850 ScriptForbiddenScope::exit(); |
| 841 } | 851 } |
| 842 | 852 |
| 843 postSweep(); | 853 postSweep(); |
| 844 } | 854 } |
| 845 | 855 |
| 846 void ThreadState::postSweep() | 856 void ThreadState::postSweep() |
| 847 { | 857 { |
| 848 if (isMainThread()) | 858 if (isMainThread()) { |
| 849 Heap::setEstimatedLiveObjectSize(Heap::markedObjectSize()); | 859 // At the point where the main thread finishes lazy sweeping, |
| 860 // we estimate the live object size. Heap::markedObjectSize() | |
| 861 // may be underestimated if any other thread has not finished | |
| 862 // lazy sweeping. | |
| 863 Heap::setEstimatedLiveObjectSize(Heap::markedObjectSize() + Heap::extern alObjectSizeAtLastGC()); | |
|
sof
2015/04/10 07:21:09
Hmm, this feels like arbitrary measures to combine
| |
| 864 } | |
| 850 | 865 |
| 851 switch (gcState()) { | 866 switch (gcState()) { |
| 852 case Sweeping: | 867 case Sweeping: |
| 853 setGCState(NoGCScheduled); | 868 setGCState(NoGCScheduled); |
| 854 break; | 869 break; |
| 855 case SweepingAndPreciseGCScheduled: | 870 case SweepingAndPreciseGCScheduled: |
| 856 setGCState(PreciseGCScheduled); | 871 setGCState(PreciseGCScheduled); |
| 857 break; | 872 break; |
| 858 case SweepingAndIdleGCScheduled: | 873 case SweepingAndIdleGCScheduled: |
| 859 setGCState(NoGCScheduled); | 874 setGCState(NoGCScheduled); |
| (...skipping 431 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1291 json->beginArray(it->key.ascii().data()); | 1306 json->beginArray(it->key.ascii().data()); |
| 1292 for (size_t age = 0; age <= maxHeapObjectAge; ++age) | 1307 for (size_t age = 0; age <= maxHeapObjectAge; ++age) |
| 1293 json->pushInteger(it->value.ages[age]); | 1308 json->pushInteger(it->value.ages[age]); |
| 1294 json->endArray(); | 1309 json->endArray(); |
| 1295 } | 1310 } |
| 1296 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); | 1311 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s tatsName, this, json.release()); |
| 1297 } | 1312 } |
| 1298 #endif | 1313 #endif |
| 1299 | 1314 |
| 1300 } // namespace blink | 1315 } // namespace blink |
| OLD | NEW |