| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 439 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 450 | 450 |
| 451 if (threadState()->isMainThread()) | 451 if (threadState()->isMainThread()) |
| 452 ScriptForbiddenScope::enter(); | 452 ScriptForbiddenScope::enter(); |
| 453 | 453 |
| 454 Address result = lazySweepPages(allocationSize, gcInfoIndex); | 454 Address result = lazySweepPages(allocationSize, gcInfoIndex); |
| 455 | 455 |
| 456 if (threadState()->isMainThread()) | 456 if (threadState()->isMainThread()) |
| 457 ScriptForbiddenScope::exit(); | 457 ScriptForbiddenScope::exit(); |
| 458 | 458 |
| 459 Heap::reportMemoryUsageForTracing(); | 459 Heap::reportMemoryUsageForTracing(); |
| 460 | |
| 461 return result; | 460 return result; |
| 462 } | 461 } |
| 463 | 462 |
| 464 void BaseHeap::sweepUnsweptPage() | 463 void BaseHeap::sweepUnsweptPage() |
| 465 { | 464 { |
| 466 BasePage* page = m_firstUnsweptPage; | 465 BasePage* page = m_firstUnsweptPage; |
| 467 if (page->isEmpty()) { | 466 if (page->isEmpty()) { |
| 468 page->unlink(&m_firstUnsweptPage); | 467 page->unlink(&m_firstUnsweptPage); |
| 469 page->removeFromHeap(); | 468 page->removeFromHeap(); |
| 470 } else { | 469 } else { |
| (...skipping 410 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 881 ASSERT(allocationSize > remainingAllocationSize()); | 880 ASSERT(allocationSize > remainingAllocationSize()); |
| 882 ASSERT(allocationSize >= allocationGranularity); | 881 ASSERT(allocationSize >= allocationGranularity); |
| 883 | 882 |
| 884 #if ENABLE(GC_PROFILING) | 883 #if ENABLE(GC_PROFILING) |
| 885 threadState()->snapshotFreeListIfNecessary(); | 884 threadState()->snapshotFreeListIfNecessary(); |
| 886 #endif | 885 #endif |
| 887 | 886 |
| 888 // Ideally we want to update the persistent count every time a persistent | 887 // Ideally we want to update the persistent count every time a persistent |
| 889 // handle is created or destructed, but that is heavy. So we do the update | 888 // handle is created or destructed, but that is heavy. So we do the update |
| 890 // only in outOfLineAllocate(). | 889 // only in outOfLineAllocate(). |
| 891 threadState()->updatePersistentCounters(); | 890 threadState()->updateWrapperCounters(); |
| 892 | 891 |
| 893 // 1. If this allocation is big enough, allocate a large object. | 892 // 1. If this allocation is big enough, allocate a large object. |
| 894 if (allocationSize >= largeObjectSizeThreshold) { | 893 if (allocationSize >= largeObjectSizeThreshold) { |
| 895 // TODO(sof): support eagerly finalized large objects, if ever needed. | 894 // TODO(sof): support eagerly finalized large objects, if ever needed. |
| 896 RELEASE_ASSERT(heapIndex() != ThreadState::EagerSweepHeapIndex); | 895 RELEASE_ASSERT(heapIndex() != ThreadState::EagerSweepHeapIndex); |
| 897 LargeObjectHeap* largeObjectHeap = static_cast<LargeObjectHeap*>(threadS
tate()->heap(ThreadState::LargeObjectHeapIndex)); | 896 LargeObjectHeap* largeObjectHeap = static_cast<LargeObjectHeap*>(threadS
tate()->heap(ThreadState::LargeObjectHeapIndex)); |
| 898 Address largeObject = largeObjectHeap->allocateLargeObjectPage(allocatio
nSize, gcInfoIndex); | 897 Address largeObject = largeObjectHeap->allocateLargeObjectPage(allocatio
nSize, gcInfoIndex); |
| 899 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject); | 898 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject); |
| 900 return largeObject; | 899 return largeObject; |
| 901 } | 900 } |
| (...skipping 1025 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1927 s_postMarkingCallbackStack = new CallbackStack(); | 1926 s_postMarkingCallbackStack = new CallbackStack(); |
| 1928 s_globalWeakCallbackStack = new CallbackStack(); | 1927 s_globalWeakCallbackStack = new CallbackStack(); |
| 1929 s_ephemeronStack = new CallbackStack(); | 1928 s_ephemeronStack = new CallbackStack(); |
| 1930 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); | 1929 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); |
| 1931 s_freePagePool = new FreePagePool(); | 1930 s_freePagePool = new FreePagePool(); |
| 1932 s_orphanedPagePool = new OrphanedPagePool(); | 1931 s_orphanedPagePool = new OrphanedPagePool(); |
| 1933 s_allocatedSpace = 0; | 1932 s_allocatedSpace = 0; |
| 1934 s_allocatedObjectSize = 0; | 1933 s_allocatedObjectSize = 0; |
| 1935 s_objectSizeAtLastGC = 0; | 1934 s_objectSizeAtLastGC = 0; |
| 1936 s_markedObjectSize = 0; | 1935 s_markedObjectSize = 0; |
| 1937 s_persistentCount = 0; | 1936 s_markedObjectSizeAtLastCompleteSweep = 1024 * 1024; |
| 1938 s_persistentCountAtLastGC = 0; | 1937 s_wrapperCount = 0; |
| 1939 s_collectedPersistentCount = 0; | 1938 s_wrapperCountAtLastGC = 0; |
| 1939 s_collectedWrapperCount = 0; |
| 1940 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); | 1940 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); |
| 1941 // Initially, the total heap size is very small. Thus we'll hit the GC | |
| 1942 // condition (i.e., 50% increase on the heap size etc) even if we don't | |
| 1943 // take into account the memory usage explained by the collected persistent | |
| 1944 // handles. So it is OK to set a large initial value. | |
| 1945 s_heapSizePerPersistent = 1024 * 1024; | |
| 1946 s_estimatedMarkingTimePerByte = 0.0; | 1941 s_estimatedMarkingTimePerByte = 0.0; |
| 1947 | 1942 |
| 1948 GCInfoTable::init(); | 1943 GCInfoTable::init(); |
| 1949 | 1944 |
| 1950 if (Platform::current() && Platform::current()->currentThread()) | 1945 if (Platform::current() && Platform::current()->currentThread()) |
| 1951 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide
r::instance()); | 1946 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide
r::instance()); |
| 1952 } | 1947 } |
| 1953 | 1948 |
| 1954 void Heap::shutdown() | 1949 void Heap::shutdown() |
| 1955 { | 1950 { |
| (...skipping 441 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2397 observedMaxSizeInMB = sizeInMB; | 2392 observedMaxSizeInMB = sizeInMB; |
| 2398 } | 2393 } |
| 2399 } | 2394 } |
| 2400 | 2395 |
| 2401 void Heap::reportMemoryUsageForTracing() | 2396 void Heap::reportMemoryUsageForTracing() |
| 2402 { | 2397 { |
| 2403 // These values are divided by 1024 to avoid overflow in practical cases (TR
ACE_COUNTER values are 32-bit ints). | 2398 // These values are divided by 1024 to avoid overflow in practical cases (TR
ACE_COUNTER values are 32-bit ints). |
| 2404 // They are capped to INT_MAX just in case. | 2399 // They are capped to INT_MAX just in case. |
| 2405 TRACE_COUNTER1("blink_gc", "Heap::allocatedObjectSizeKB", std::min(Heap::all
ocatedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); | 2400 TRACE_COUNTER1("blink_gc", "Heap::allocatedObjectSizeKB", std::min(Heap::all
ocatedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); |
| 2406 TRACE_COUNTER1("blink_gc", "Heap::markedObjectSizeKB", std::min(Heap::marked
ObjectSize() / 1024, static_cast<size_t>(INT_MAX))); | 2401 TRACE_COUNTER1("blink_gc", "Heap::markedObjectSizeKB", std::min(Heap::marked
ObjectSize() / 1024, static_cast<size_t>(INT_MAX))); |
| 2402 TRACE_COUNTER1("blink_gc", "Heap::markedObjectSizeAtLastCompleteSweepKB", st
d::min(Heap::markedObjectSizeAtLastCompleteSweep() / 1024, static_cast<size_t>(I
NT_MAX))); |
| 2407 TRACE_COUNTER1("blink_gc", "Heap::allocatedSpaceKB", std::min(Heap::allocate
dSpace() / 1024, static_cast<size_t>(INT_MAX))); | 2403 TRACE_COUNTER1("blink_gc", "Heap::allocatedSpaceKB", std::min(Heap::allocate
dSpace() / 1024, static_cast<size_t>(INT_MAX))); |
| 2408 TRACE_COUNTER1("blink_gc", "Heap::objectSizeAtLastGCKB", std::min(Heap::obje
ctSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX))); | 2404 TRACE_COUNTER1("blink_gc", "Heap::objectSizeAtLastGCKB", std::min(Heap::obje
ctSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX))); |
| 2409 TRACE_COUNTER1("blink_gc", "Heap::persistentCount", std::min(Heap::persisten
tCount(), static_cast<size_t>(INT_MAX))); | 2405 TRACE_COUNTER1("blink_gc", "Heap::wrapperCount", std::min(Heap::wrapperCount
(), static_cast<size_t>(INT_MAX))); |
| 2410 TRACE_COUNTER1("blink_gc", "Heap::persistentCountAtLastGC", std::min(Heap::p
ersistentCountAtLastGC(), static_cast<size_t>(INT_MAX))); | 2406 TRACE_COUNTER1("blink_gc", "Heap::wrapperCountAtLastGC", std::min(Heap::wrap
perCountAtLastGC(), static_cast<size_t>(INT_MAX))); |
| 2411 TRACE_COUNTER1("blink_gc", "Heap::collectedPersistentCount", std::min(Heap::
collectedPersistentCount(), static_cast<size_t>(INT_MAX))); | 2407 TRACE_COUNTER1("blink_gc", "Heap::collectedWrapperCount", std::min(Heap::col
lectedWrapperCount(), static_cast<size_t>(INT_MAX))); |
| 2412 TRACE_COUNTER1("blink_gc", "Heap::heapSizePerPersistent", std::min(Heap::hea
pSizePerPersistent(), static_cast<size_t>(INT_MAX))); | |
| 2413 TRACE_COUNTER1("blink_gc", "Heap::partitionAllocSizeAtLastGCKB", std::min(He
ap::partitionAllocSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX))); | 2408 TRACE_COUNTER1("blink_gc", "Heap::partitionAllocSizeAtLastGCKB", std::min(He
ap::partitionAllocSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX))); |
| 2414 TRACE_COUNTER1("blink_gc", "Partitions::totalSizeOfCommittedPagesKB", std::m
in(WTF::Partitions::totalSizeOfCommittedPages() / 1024, static_cast<size_t>(INT_
MAX))); | 2409 TRACE_COUNTER1("blink_gc", "Partitions::totalSizeOfCommittedPagesKB", std::m
in(WTF::Partitions::totalSizeOfCommittedPages() / 1024, static_cast<size_t>(INT_
MAX))); |
| 2415 } | 2410 } |
| 2416 | 2411 |
| 2417 size_t Heap::objectPayloadSizeForTesting() | 2412 size_t Heap::objectPayloadSizeForTesting() |
| 2418 { | 2413 { |
| 2419 size_t objectPayloadSize = 0; | 2414 size_t objectPayloadSize = 0; |
| 2420 for (ThreadState* state : ThreadState::attachedThreads()) { | 2415 for (ThreadState* state : ThreadState::attachedThreads()) { |
| 2421 state->setGCState(ThreadState::GCRunning); | 2416 state->setGCState(ThreadState::GCRunning); |
| 2422 state->makeConsistentForGC(); | 2417 state->makeConsistentForGC(); |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2520 delete current; | 2515 delete current; |
| 2521 } | 2516 } |
| 2522 | 2517 |
| 2523 void Heap::resetHeapCounters() | 2518 void Heap::resetHeapCounters() |
| 2524 { | 2519 { |
| 2525 ASSERT(ThreadState::current()->isInGC()); | 2520 ASSERT(ThreadState::current()->isInGC()); |
| 2526 | 2521 |
| 2527 Heap::reportMemoryUsageForTracing(); | 2522 Heap::reportMemoryUsageForTracing(); |
| 2528 | 2523 |
| 2529 s_objectSizeAtLastGC = s_allocatedObjectSize + s_markedObjectSize; | 2524 s_objectSizeAtLastGC = s_allocatedObjectSize + s_markedObjectSize; |
| 2525 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); |
| 2526 s_wrapperCountAtLastGC = s_wrapperCount; |
| 2530 s_allocatedObjectSize = 0; | 2527 s_allocatedObjectSize = 0; |
| 2531 s_markedObjectSize = 0; | 2528 s_markedObjectSize = 0; |
| 2532 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); | 2529 s_collectedWrapperCount = 0; |
| 2533 s_persistentCountAtLastGC = s_persistentCount; | |
| 2534 s_collectedPersistentCount = 0; | |
| 2535 } | 2530 } |
| 2536 | 2531 |
| 2537 CallbackStack* Heap::s_markingStack; | 2532 CallbackStack* Heap::s_markingStack; |
| 2538 CallbackStack* Heap::s_postMarkingCallbackStack; | 2533 CallbackStack* Heap::s_postMarkingCallbackStack; |
| 2539 CallbackStack* Heap::s_globalWeakCallbackStack; | 2534 CallbackStack* Heap::s_globalWeakCallbackStack; |
| 2540 CallbackStack* Heap::s_ephemeronStack; | 2535 CallbackStack* Heap::s_ephemeronStack; |
| 2541 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 2536 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
| 2542 bool Heap::s_shutdownCalled = false; | 2537 bool Heap::s_shutdownCalled = false; |
| 2543 FreePagePool* Heap::s_freePagePool; | 2538 FreePagePool* Heap::s_freePagePool; |
| 2544 OrphanedPagePool* Heap::s_orphanedPagePool; | 2539 OrphanedPagePool* Heap::s_orphanedPagePool; |
| 2545 Heap::RegionTree* Heap::s_regionTree = nullptr; | 2540 Heap::RegionTree* Heap::s_regionTree = nullptr; |
| 2546 size_t Heap::s_allocatedSpace = 0; | 2541 size_t Heap::s_allocatedSpace = 0; |
| 2547 size_t Heap::s_allocatedObjectSize = 0; | 2542 size_t Heap::s_allocatedObjectSize = 0; |
| 2548 size_t Heap::s_objectSizeAtLastGC = 0; | 2543 size_t Heap::s_objectSizeAtLastGC = 0; |
| 2549 size_t Heap::s_markedObjectSize = 0; | 2544 size_t Heap::s_markedObjectSize = 0; |
| 2550 size_t Heap::s_persistentCount = 0; | 2545 size_t Heap::s_markedObjectSizeAtLastCompleteSweep = 0; |
| 2551 size_t Heap::s_persistentCountAtLastGC = 0; | 2546 size_t Heap::s_wrapperCount = 0; |
| 2552 size_t Heap::s_collectedPersistentCount = 0; | 2547 size_t Heap::s_wrapperCountAtLastGC = 0; |
| 2548 size_t Heap::s_collectedWrapperCount = 0; |
| 2553 size_t Heap::s_partitionAllocSizeAtLastGC = 0; | 2549 size_t Heap::s_partitionAllocSizeAtLastGC = 0; |
| 2554 size_t Heap::s_heapSizePerPersistent = 0; | |
| 2555 double Heap::s_estimatedMarkingTimePerByte = 0.0; | 2550 double Heap::s_estimatedMarkingTimePerByte = 0.0; |
| 2556 | 2551 |
| 2557 } // namespace blink | 2552 } // namespace blink |
| OLD | NEW |