| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 409 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 420 TRACE_EVENT0("blink_gc", "Heap::GCScope"); | 420 TRACE_EVENT0("blink_gc", "Heap::GCScope"); |
| 421 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); | 421 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); |
| 422 if (m_state->isMainThread()) | 422 if (m_state->isMainThread()) |
| 423 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); | 423 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); |
| 424 | 424 |
| 425 m_state->checkThread(); | 425 m_state->checkThread(); |
| 426 | 426 |
| 427 // FIXME: in an unlikely coincidence that two threads decide | 427 // FIXME: in an unlikely coincidence that two threads decide |
| 428 // to collect garbage at the same time, avoid doing two GCs in | 428 // to collect garbage at the same time, avoid doing two GCs in |
| 429 // a row. | 429 // a row. |
| 430 RELEASE_ASSERT(!m_state->isInGC()); | |
| 431 RELEASE_ASSERT(!m_state->isSweepInProgress()); | |
| 432 if (LIKELY(ThreadState::stopThreads())) { | 430 if (LIKELY(ThreadState::stopThreads())) { |
| 433 m_parkedAllThreads = true; | 431 m_parkedAllThreads = true; |
| 434 m_state->enterGC(); | |
| 435 } | 432 } |
| 436 if (m_state->isMainThread()) | 433 if (m_state->isMainThread()) |
| 437 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); | 434 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); |
| 438 } | 435 } |
| 439 | 436 |
| 440 bool allThreadsParked() { return m_parkedAllThreads; } | 437 bool allThreadsParked() { return m_parkedAllThreads; } |
| 441 | 438 |
| 442 ~GCScope() | 439 ~GCScope() |
| 443 { | 440 { |
| 444 // Only cleanup if we parked all threads in which case the GC happened | 441 // Only cleanup if we parked all threads in which case the GC happened |
| 445 // and we need to resume the other threads. | 442 // and we need to resume the other threads. |
| 446 if (LIKELY(m_parkedAllThreads)) { | 443 if (LIKELY(m_parkedAllThreads)) { |
| 447 m_state->leaveGC(); | |
| 448 ASSERT(!m_state->isInGC()); | |
| 449 ThreadState::resumeThreads(); | 444 ThreadState::resumeThreads(); |
| 450 } | 445 } |
| 451 } | 446 } |
| 452 | 447 |
| 453 private: | 448 private: |
| 454 ThreadState* m_state; | 449 ThreadState* m_state; |
| 455 ThreadState::SafePointScope m_safePointScope; | 450 ThreadState::SafePointScope m_safePointScope; |
| 456 bool m_parkedAllThreads; // False if we fail to park all threads | 451 bool m_parkedAllThreads; // False if we fail to park all threads |
| 457 }; | 452 }; |
| 458 | 453 |
| (...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 686 { | 681 { |
| 687 ASSERT(allocationSize > remainingAllocationSize()); | 682 ASSERT(allocationSize > remainingAllocationSize()); |
| 688 if (allocationSize > blinkPageSize / 2) | 683 if (allocationSize > blinkPageSize / 2) |
| 689 return allocateLargeObject(allocationSize, gcInfo); | 684 return allocateLargeObject(allocationSize, gcInfo); |
| 690 | 685 |
| 691 updateRemainingAllocationSize(); | 686 updateRemainingAllocationSize(); |
| 692 if (threadState()->shouldGC()) { | 687 if (threadState()->shouldGC()) { |
| 693 if (threadState()->shouldForceConservativeGC()) | 688 if (threadState()->shouldForceConservativeGC()) |
| 694 Heap::collectGarbage(ThreadState::HeapPointersOnStack); | 689 Heap::collectGarbage(ThreadState::HeapPointersOnStack); |
| 695 else | 690 else |
| 696 threadState()->setGCRequested(); | 691 threadState()->setGCState(ThreadState::GCScheduled); |
| 697 } | 692 } |
| 698 if (remainingAllocationSize() > 0) { | 693 if (remainingAllocationSize() > 0) { |
| 699 m_freeList.addToFreeList(currentAllocationPoint(), remainingAllocationSi
ze()); | 694 m_freeList.addToFreeList(currentAllocationPoint(), remainingAllocationSi
ze()); |
| 700 setAllocationPoint(0, 0); | 695 setAllocationPoint(0, 0); |
| 701 } | 696 } |
| 702 ensureCurrentAllocation(allocationSize, gcInfo); | 697 ensureCurrentAllocation(allocationSize, gcInfo); |
| 703 return allocate(payloadSize, gcInfo); | 698 return allocate(payloadSize, gcInfo); |
| 704 } | 699 } |
| 705 | 700 |
| 706 template<typename Header> | 701 template<typename Header> |
| (...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1021 allocationSize += headerPadding<Header>(); | 1016 allocationSize += headerPadding<Header>(); |
| 1022 | 1017 |
| 1023 // If ASan is supported we add allocationGranularity bytes to the allocated
space and | 1018 // If ASan is supported we add allocationGranularity bytes to the allocated
space and |
| 1024 // poison that to detect overflows | 1019 // poison that to detect overflows |
| 1025 #if defined(ADDRESS_SANITIZER) | 1020 #if defined(ADDRESS_SANITIZER) |
| 1026 allocationSize += allocationGranularity; | 1021 allocationSize += allocationGranularity; |
| 1027 #endif | 1022 #endif |
| 1028 | 1023 |
| 1029 updateRemainingAllocationSize(); | 1024 updateRemainingAllocationSize(); |
| 1030 if (m_threadState->shouldGC()) | 1025 if (m_threadState->shouldGC()) |
| 1031 m_threadState->setGCRequested(); | 1026 m_threadState->setGCState(ThreadState::GCScheduled); |
| 1032 m_threadState->shouldFlushHeapDoesNotContainCache(); | 1027 m_threadState->shouldFlushHeapDoesNotContainCache(); |
| 1033 PageMemory* pageMemory = PageMemory::allocate(allocationSize); | 1028 PageMemory* pageMemory = PageMemory::allocate(allocationSize); |
| 1034 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region()); | 1029 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region()); |
| 1035 Address largeObjectAddress = pageMemory->writableStart(); | 1030 Address largeObjectAddress = pageMemory->writableStart(); |
| 1036 Address headerAddress = largeObjectAddress + sizeof(LargeObject<Header>) + h
eaderPadding<Header>(); | 1031 Address headerAddress = largeObjectAddress + sizeof(LargeObject<Header>) + h
eaderPadding<Header>(); |
| 1037 memset(headerAddress, 0, size); | 1032 memset(headerAddress, 0, size); |
| 1038 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); | 1033 Header* header = new (NotNull, headerAddress) Header(size, gcInfo); |
| 1039 Address result = headerAddress + sizeof(*header); | 1034 Address result = headerAddress + sizeof(*header); |
| 1040 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 1035 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 1041 LargeObject<Header>* largeObject = new (largeObjectAddress) LargeObject<Head
er>(pageMemory, gcInfo, threadState()); | 1036 LargeObject<Header>* largeObject = new (largeObjectAddress) LargeObject<Head
er>(pageMemory, gcInfo, threadState()); |
| (...skipping 843 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1885 if (json) { | 1880 if (json) { |
| 1886 json->setInteger("class", tag); | 1881 json->setInteger("class", tag); |
| 1887 json->setInteger("size", header->size()); | 1882 json->setInteger("size", header->size()); |
| 1888 json->setInteger("isMarked", isMarked()); | 1883 json->setInteger("isMarked", isMarked()); |
| 1889 } | 1884 } |
| 1890 } | 1885 } |
| 1891 #endif | 1886 #endif |
| 1892 | 1887 |
| 1893 void HeapDoesNotContainCache::flush() | 1888 void HeapDoesNotContainCache::flush() |
| 1894 { | 1889 { |
| 1895 ASSERT(ThreadState::isAnyThreadInGC()); | 1890 ASSERT(Heap::isInGC()); |
| 1896 | 1891 |
| 1897 if (m_hasEntries) { | 1892 if (m_hasEntries) { |
| 1898 for (int i = 0; i < numberOfEntries; i++) | 1893 for (int i = 0; i < numberOfEntries; i++) |
| 1899 m_entries[i] = 0; | 1894 m_entries[i] = 0; |
| 1900 m_hasEntries = false; | 1895 m_hasEntries = false; |
| 1901 } | 1896 } |
| 1902 } | 1897 } |
| 1903 | 1898 |
| 1904 size_t HeapDoesNotContainCache::hash(Address address) | 1899 size_t HeapDoesNotContainCache::hash(Address address) |
| 1905 { | 1900 { |
| 1906 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); | 1901 size_t value = (reinterpret_cast<size_t>(address) >> blinkPageSizeLog2); |
| 1907 value ^= value >> numberOfEntriesLog2; | 1902 value ^= value >> numberOfEntriesLog2; |
| 1908 value ^= value >> (numberOfEntriesLog2 * 2); | 1903 value ^= value >> (numberOfEntriesLog2 * 2); |
| 1909 value &= numberOfEntries - 1; | 1904 value &= numberOfEntries - 1; |
| 1910 return value & ~1; // Returns only even number. | 1905 return value & ~1; // Returns only even number. |
| 1911 } | 1906 } |
| 1912 | 1907 |
| 1913 bool HeapDoesNotContainCache::lookup(Address address) | 1908 bool HeapDoesNotContainCache::lookup(Address address) |
| 1914 { | 1909 { |
| 1915 ASSERT(ThreadState::isAnyThreadInGC()); | 1910 ASSERT(Heap::isInGC()); |
| 1916 | 1911 |
| 1917 size_t index = hash(address); | 1912 size_t index = hash(address); |
| 1918 ASSERT(!(index & 1)); | 1913 ASSERT(!(index & 1)); |
| 1919 Address cachePage = roundToBlinkPageStart(address); | 1914 Address cachePage = roundToBlinkPageStart(address); |
| 1920 if (m_entries[index] == cachePage) | 1915 if (m_entries[index] == cachePage) |
| 1921 return m_entries[index]; | 1916 return m_entries[index]; |
| 1922 if (m_entries[index + 1] == cachePage) | 1917 if (m_entries[index + 1] == cachePage) |
| 1923 return m_entries[index + 1]; | 1918 return m_entries[index + 1]; |
| 1924 return 0; | 1919 return 0; |
| 1925 } | 1920 } |
| 1926 | 1921 |
| 1927 void HeapDoesNotContainCache::addEntry(Address address) | 1922 void HeapDoesNotContainCache::addEntry(Address address) |
| 1928 { | 1923 { |
| 1929 ASSERT(ThreadState::isAnyThreadInGC()); | 1924 ASSERT(Heap::isInGC()); |
| 1930 | 1925 |
| 1931 m_hasEntries = true; | 1926 m_hasEntries = true; |
| 1932 size_t index = hash(address); | 1927 size_t index = hash(address); |
| 1933 ASSERT(!(index & 1)); | 1928 ASSERT(!(index & 1)); |
| 1934 Address cachePage = roundToBlinkPageStart(address); | 1929 Address cachePage = roundToBlinkPageStart(address); |
| 1935 m_entries[index + 1] = m_entries[index]; | 1930 m_entries[index + 1] = m_entries[index]; |
| 1936 m_entries[index] = cachePage; | 1931 m_entries[index] = cachePage; |
| 1937 } | 1932 } |
| 1938 | 1933 |
| 1939 void Heap::flushHeapDoesNotContainCache() | 1934 void Heap::flushHeapDoesNotContainCache() |
| (...skipping 21 matching lines...) Expand all Loading... |
| 1961 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer,
TraceCallback callback) | 1956 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer,
TraceCallback callback) |
| 1962 { | 1957 { |
| 1963 ASSERT(header); | 1958 ASSERT(header); |
| 1964 #if ENABLE(ASSERT) | 1959 #if ENABLE(ASSERT) |
| 1965 { | 1960 { |
| 1966 // Check that we are not marking objects that are outside | 1961 // Check that we are not marking objects that are outside |
| 1967 // the heap by calling Heap::contains. However we cannot | 1962 // the heap by calling Heap::contains. However we cannot |
| 1968 // call Heap::contains when outside a GC and we call mark | 1963 // call Heap::contains when outside a GC and we call mark |
| 1969 // when doing weakness for ephemerons. Hence we only check | 1964 // when doing weakness for ephemerons. Hence we only check |
| 1970 // when called within. | 1965 // when called within. |
| 1971 ASSERT(!ThreadState::isAnyThreadInGC() || Heap::containedInHeapOrOrp
hanedPage(header)); | 1966 ASSERT(!Heap::isInGC() || Heap::containedInHeapOrOrphanedPage(header
)); |
| 1972 } | 1967 } |
| 1973 #endif | 1968 #endif |
| 1974 ASSERT(objectPointer); | 1969 ASSERT(objectPointer); |
| 1975 if (header->isMarked()) | 1970 if (header->isMarked()) |
| 1976 return; | 1971 return; |
| 1977 header->mark(); | 1972 header->mark(); |
| 1978 #if ENABLE(GC_PROFILE_MARKING) | 1973 #if ENABLE(GC_PROFILE_MARKING) |
| 1979 MutexLocker locker(objectGraphMutex()); | 1974 MutexLocker locker(objectGraphMutex()); |
| 1980 String className(classOf(objectPointer)); | 1975 String className(classOf(objectPointer)); |
| 1981 { | 1976 { |
| (...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2184 s_shutdownCalled = true; | 2179 s_shutdownCalled = true; |
| 2185 ThreadState::shutdownHeapIfNecessary(); | 2180 ThreadState::shutdownHeapIfNecessary(); |
| 2186 } | 2181 } |
| 2187 | 2182 |
| 2188 void Heap::doShutdown() | 2183 void Heap::doShutdown() |
| 2189 { | 2184 { |
| 2190 // We don't want to call doShutdown() twice. | 2185 // We don't want to call doShutdown() twice. |
| 2191 if (!s_markingVisitor) | 2186 if (!s_markingVisitor) |
| 2192 return; | 2187 return; |
| 2193 | 2188 |
| 2194 ASSERT(!ThreadState::isAnyThreadInGC()); | 2189 ASSERT(!Heap::isInGC()); |
| 2195 ASSERT(!ThreadState::attachedThreads().size()); | 2190 ASSERT(!ThreadState::attachedThreads().size()); |
| 2196 delete s_markingVisitor; | 2191 delete s_markingVisitor; |
| 2197 s_markingVisitor = 0; | 2192 s_markingVisitor = 0; |
| 2198 delete s_heapDoesNotContainCache; | 2193 delete s_heapDoesNotContainCache; |
| 2199 s_heapDoesNotContainCache = 0; | 2194 s_heapDoesNotContainCache = 0; |
| 2200 delete s_freePagePool; | 2195 delete s_freePagePool; |
| 2201 s_freePagePool = 0; | 2196 s_freePagePool = 0; |
| 2202 delete s_orphanedPagePool; | 2197 delete s_orphanedPagePool; |
| 2203 s_orphanedPagePool = 0; | 2198 s_orphanedPagePool = 0; |
| 2204 delete s_weakCallbackStack; | 2199 delete s_weakCallbackStack; |
| 2205 s_weakCallbackStack = 0; | 2200 s_weakCallbackStack = 0; |
| 2206 delete s_postMarkingCallbackStack; | 2201 delete s_postMarkingCallbackStack; |
| 2207 s_postMarkingCallbackStack = 0; | 2202 s_postMarkingCallbackStack = 0; |
| 2208 delete s_markingStack; | 2203 delete s_markingStack; |
| 2209 s_markingStack = 0; | 2204 s_markingStack = 0; |
| 2210 delete s_ephemeronStack; | 2205 delete s_ephemeronStack; |
| 2211 s_ephemeronStack = 0; | 2206 s_ephemeronStack = 0; |
| 2212 delete s_regionTree; | 2207 delete s_regionTree; |
| 2213 s_regionTree = 0; | 2208 s_regionTree = 0; |
| 2214 ThreadState::shutdown(); | 2209 ThreadState::shutdown(); |
| 2215 ASSERT(Heap::allocatedSpace() == 0); | 2210 ASSERT(Heap::allocatedSpace() == 0); |
| 2216 } | 2211 } |
| 2217 | 2212 |
| 2218 BaseHeapPage* Heap::contains(Address address) | 2213 BaseHeapPage* Heap::contains(Address address) |
| 2219 { | 2214 { |
| 2220 ASSERT(ThreadState::isAnyThreadInGC()); | 2215 ASSERT(Heap::isInGC()); |
| 2221 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 2216 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
| 2222 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 2217 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
| 2223 BaseHeapPage* page = (*it)->contains(address); | 2218 BaseHeapPage* page = (*it)->contains(address); |
| 2224 if (page) | 2219 if (page) |
| 2225 return page; | 2220 return page; |
| 2226 } | 2221 } |
| 2227 return 0; | 2222 return 0; |
| 2228 } | 2223 } |
| 2229 | 2224 |
| 2230 #if ENABLE(ASSERT) | 2225 #if ENABLE(ASSERT) |
| 2231 bool Heap::containedInHeapOrOrphanedPage(void* object) | 2226 bool Heap::containedInHeapOrOrphanedPage(void* object) |
| 2232 { | 2227 { |
| 2233 return contains(object) || orphanedPagePool()->contains(object); | 2228 return contains(object) || orphanedPagePool()->contains(object); |
| 2234 } | 2229 } |
| 2235 #endif | 2230 #endif |
| 2236 | 2231 |
| 2237 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 2232 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| 2238 { | 2233 { |
| 2239 ASSERT(ThreadState::isAnyThreadInGC()); | 2234 ASSERT(Heap::isInGC()); |
| 2240 | 2235 |
| 2241 #if !ENABLE(ASSERT) | 2236 #if !ENABLE(ASSERT) |
| 2242 if (s_heapDoesNotContainCache->lookup(address)) | 2237 if (s_heapDoesNotContainCache->lookup(address)) |
| 2243 return 0; | 2238 return 0; |
| 2244 #endif | 2239 #endif |
| 2245 | 2240 |
| 2246 if (BaseHeapPage* page = lookup(address)) { | 2241 if (BaseHeapPage* page = lookup(address)) { |
| 2247 ASSERT(page->contains(address)); | 2242 ASSERT(page->contains(address)); |
| 2248 ASSERT(!page->orphaned()); | 2243 ASSERT(!page->orphaned()); |
| 2249 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | 2244 ASSERT(!s_heapDoesNotContainCache->lookup(address)); |
| (...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2420 } | 2415 } |
| 2421 | 2416 |
| 2422 #if ENABLE(ASSERT) | 2417 #if ENABLE(ASSERT) |
| 2423 bool Heap::weakTableRegistered(const void* table) | 2418 bool Heap::weakTableRegistered(const void* table) |
| 2424 { | 2419 { |
| 2425 ASSERT(s_ephemeronStack); | 2420 ASSERT(s_ephemeronStack); |
| 2426 return s_ephemeronStack->hasCallbackForObject(table); | 2421 return s_ephemeronStack->hasCallbackForObject(table); |
| 2427 } | 2422 } |
| 2428 #endif | 2423 #endif |
| 2429 | 2424 |
| 2430 void Heap::prepareForGC() | 2425 void Heap::preGC() |
| 2431 { | 2426 { |
| 2432 ASSERT(ThreadState::isAnyThreadInGC()); | |
| 2433 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 2427 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
| 2434 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) | 2428 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) |
| 2435 (*it)->prepareForGC(); | 2429 (*it)->preGC(); |
| 2430 } |
| 2431 |
| 2432 void Heap::postGC() |
| 2433 { |
| 2434 ASSERT(Heap::isInGC()); |
| 2435 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
| 2436 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) |
| 2437 (*it)->postGC(); |
| 2436 } | 2438 } |
| 2437 | 2439 |
| 2438 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::Cause
OfGC cause) | 2440 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::Cause
OfGC cause) |
| 2439 { | 2441 { |
| 2440 ThreadState* state = ThreadState::current(); | 2442 ThreadState* state = ThreadState::current(); |
| 2441 state->clearGCRequested(); | 2443 state->setGCState(ThreadState::StoppingOtherThreads); |
| 2442 | 2444 |
| 2443 GCScope gcScope(stackState); | 2445 GCScope gcScope(stackState); |
| 2444 // Check if we successfully parked the other threads. If not we bail out of
the GC. | 2446 // Check if we successfully parked the other threads. If not we bail out of
the GC. |
| 2445 if (!gcScope.allThreadsParked()) { | 2447 if (!gcScope.allThreadsParked()) { |
| 2446 ThreadState::current()->setGCRequested(); | 2448 state->setGCState(ThreadState::GCScheduled); |
| 2447 return; | 2449 return; |
| 2448 } | 2450 } |
| 2449 | 2451 |
| 2450 if (state->isMainThread()) | 2452 if (state->isMainThread()) |
| 2451 ScriptForbiddenScope::enter(); | 2453 ScriptForbiddenScope::enter(); |
| 2452 | 2454 |
| 2453 s_lastGCWasConservative = false; | 2455 s_lastGCWasConservative = false; |
| 2454 | 2456 |
| 2455 Heap::resetMarkedObjectSize(); | |
| 2456 Heap::resetAllocatedObjectSize(); | |
| 2457 | |
| 2458 TRACE_EVENT2("blink_gc", "Heap::collectGarbage", | 2457 TRACE_EVENT2("blink_gc", "Heap::collectGarbage", |
| 2459 "precise", stackState == ThreadState::NoHeapPointersOnStack, | 2458 "precise", stackState == ThreadState::NoHeapPointersOnStack, |
| 2460 "forced", cause == ThreadState::ForcedGC); | 2459 "forced", cause == ThreadState::ForcedGC); |
| 2461 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); | 2460 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); |
| 2462 double timeStamp = WTF::currentTimeMS(); | 2461 double timeStamp = WTF::currentTimeMS(); |
| 2463 #if ENABLE(GC_PROFILE_MARKING) | 2462 #if ENABLE(GC_PROFILE_MARKING) |
| 2464 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); | 2463 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); |
| 2465 #endif | 2464 #endif |
| 2466 | 2465 |
| 2467 // Disallow allocation during garbage collection (but not | 2466 // Disallow allocation during garbage collection (but not |
| 2468 // during the finalization that happens when the gcScope is | 2467 // during the finalization that happens when the gcScope is |
| 2469 // torn down). | 2468 // torn down). |
| 2470 NoAllocationScope<AnyThread> noAllocationScope; | 2469 NoAllocationScope<AnyThread> noAllocationScope; |
| 2471 | 2470 |
| 2472 prepareForGC(); | 2471 enterGC(); |
| 2472 preGC(); |
| 2473 |
| 2474 Heap::resetMarkedObjectSize(); |
| 2475 Heap::resetAllocatedObjectSize(); |
| 2473 | 2476 |
| 2474 // 1. trace persistent roots. | 2477 // 1. trace persistent roots. |
| 2475 ThreadState::visitPersistentRoots(s_markingVisitor); | 2478 ThreadState::visitPersistentRoots(s_markingVisitor); |
| 2476 | 2479 |
| 2477 // 2. trace objects reachable from the persistent roots including ephemerons
. | 2480 // 2. trace objects reachable from the persistent roots including ephemerons
. |
| 2478 processMarkingStack<GlobalMarking>(); | 2481 processMarkingStack<GlobalMarking>(); |
| 2479 | 2482 |
| 2480 // 3. trace objects reachable from the stack. We do this independent of the | 2483 // 3. trace objects reachable from the stack. We do this independent of the |
| 2481 // given stackState since other threads might have a different stack state. | 2484 // given stackState since other threads might have a different stack state. |
| 2482 ThreadState::visitStackRoots(s_markingVisitor); | 2485 ThreadState::visitStackRoots(s_markingVisitor); |
| 2483 | 2486 |
| 2484 // 4. trace objects reachable from the stack "roots" including ephemerons. | 2487 // 4. trace objects reachable from the stack "roots" including ephemerons. |
| 2485 // Only do the processing if we found a pointer to an object on one of the | 2488 // Only do the processing if we found a pointer to an object on one of the |
| 2486 // thread stacks. | 2489 // thread stacks. |
| 2487 if (lastGCWasConservative()) { | 2490 if (lastGCWasConservative()) { |
| 2488 processMarkingStack<GlobalMarking>(); | 2491 processMarkingStack<GlobalMarking>(); |
| 2489 } | 2492 } |
| 2490 | 2493 |
| 2491 postMarkingProcessing(); | 2494 postMarkingProcessing(); |
| 2492 globalWeakProcessing(); | 2495 globalWeakProcessing(); |
| 2493 | 2496 |
| 2494 // Now we can delete all orphaned pages because there are no dangling | 2497 // Now we can delete all orphaned pages because there are no dangling |
| 2495 // pointers to the orphaned pages. (If we have such dangling pointers, | 2498 // pointers to the orphaned pages. (If we have such dangling pointers, |
| 2496 // we should have crashed during marking before getting here.) | 2499 // we should have crashed during marking before getting here.) |
| 2497 orphanedPagePool()->decommitOrphanedPages(); | 2500 orphanedPagePool()->decommitOrphanedPages(); |
| 2498 | 2501 |
| 2502 postGC(); |
| 2503 leaveGC(); |
| 2504 |
| 2499 #if ENABLE(GC_PROFILE_MARKING) | 2505 #if ENABLE(GC_PROFILE_MARKING) |
| 2500 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); | 2506 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); |
| 2501 #endif | 2507 #endif |
| 2502 | 2508 |
| 2503 if (Platform::current()) { | 2509 if (Platform::current()) { |
| 2504 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF
::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); | 2510 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF
::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); |
| 2505 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", H
eap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); | 2511 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", H
eap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); |
| 2506 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace"
, Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); | 2512 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace"
, Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); |
| 2507 } | 2513 } |
| 2508 | 2514 |
| 2509 if (state->isMainThread()) | 2515 if (state->isMainThread()) |
| 2510 ScriptForbiddenScope::exit(); | 2516 ScriptForbiddenScope::exit(); |
| 2511 } | 2517 } |
| 2512 | 2518 |
| 2513 void Heap::collectGarbageForTerminatingThread(ThreadState* state) | 2519 void Heap::collectGarbageForTerminatingThread(ThreadState* state) |
| 2514 { | 2520 { |
| 2515 // We explicitly do not enter a safepoint while doing thread specific | 2521 // We explicitly do not enter a safepoint while doing thread specific |
| 2516 // garbage collection since we don't want to allow a global GC at the | 2522 // garbage collection since we don't want to allow a global GC at the |
| 2517 // same time as a thread local GC. | 2523 // same time as a thread local GC. |
| 2518 | 2524 |
| 2519 { | 2525 { |
| 2520 NoAllocationScope<AnyThread> noAllocationScope; | 2526 NoAllocationScope<AnyThread> noAllocationScope; |
| 2521 | 2527 |
| 2522 state->enterGC(); | 2528 enterGC(); |
| 2523 state->prepareForGC(); | 2529 state->preGC(); |
| 2524 | 2530 |
| 2525 // 1. trace the thread local persistent roots. For thread local GCs we | 2531 // 1. trace the thread local persistent roots. For thread local GCs we |
| 2526 // don't trace the stack (ie. no conservative scanning) since this is | 2532 // don't trace the stack (ie. no conservative scanning) since this is |
| 2527 // only called during thread shutdown where there should be no objects | 2533 // only called during thread shutdown where there should be no objects |
| 2528 // on the stack. | 2534 // on the stack. |
| 2529 // We also assume that orphaned pages have no objects reachable from | 2535 // We also assume that orphaned pages have no objects reachable from |
| 2530 // persistent handles on other threads or CrossThreadPersistents. The | 2536 // persistent handles on other threads or CrossThreadPersistents. The |
| 2531 // only cases where this could happen is if a subsequent conservative | 2537 // only cases where this could happen is if a subsequent conservative |
| 2532 // global GC finds a "pointer" on the stack or due to a programming | 2538 // global GC finds a "pointer" on the stack or due to a programming |
| 2533 // error where an object has a dangling cross-thread pointer to an | 2539 // error where an object has a dangling cross-thread pointer to an |
| 2534 // object on this heap. | 2540 // object on this heap. |
| 2535 state->visitPersistents(s_markingVisitor); | 2541 state->visitPersistents(s_markingVisitor); |
| 2536 | 2542 |
| 2537 // 2. trace objects reachable from the thread's persistent roots | 2543 // 2. trace objects reachable from the thread's persistent roots |
| 2538 // including ephemerons. | 2544 // including ephemerons. |
| 2539 processMarkingStack<ThreadLocalMarking>(); | 2545 processMarkingStack<ThreadLocalMarking>(); |
| 2540 | 2546 |
| 2541 postMarkingProcessing(); | 2547 postMarkingProcessing(); |
| 2542 globalWeakProcessing(); | 2548 globalWeakProcessing(); |
| 2543 | 2549 |
| 2544 state->leaveGC(); | 2550 state->postGC(); |
| 2551 leaveGC(); |
| 2545 } | 2552 } |
| 2546 state->performPendingSweep(); | 2553 state->performPendingSweep(); |
| 2547 } | 2554 } |
| 2548 | 2555 |
| 2549 template<CallbackInvocationMode Mode> | 2556 template<CallbackInvocationMode Mode> |
| 2550 void Heap::processMarkingStack() | 2557 void Heap::processMarkingStack() |
| 2551 { | 2558 { |
| 2552 // Ephemeron fixed point loop. | 2559 // Ephemeron fixed point loop. |
| 2553 do { | 2560 do { |
| 2554 { | 2561 { |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2623 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { | 2630 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { |
| 2624 page->setTerminating(); | 2631 page->setTerminating(); |
| 2625 } | 2632 } |
| 2626 for (LargeObject<Header>* largeObject = m_firstLargeObject; largeObject; lar
geObject = largeObject->next()) { | 2633 for (LargeObject<Header>* largeObject = m_firstLargeObject; largeObject; lar
geObject = largeObject->next()) { |
| 2627 largeObject->setTerminating(); | 2634 largeObject->setTerminating(); |
| 2628 } | 2635 } |
| 2629 } | 2636 } |
| 2630 size_t Heap::objectPayloadSizeForTesting() | 2637 size_t Heap::objectPayloadSizeForTesting() |
| 2631 { | 2638 { |
| 2632 size_t objectPayloadSize = 0; | 2639 size_t objectPayloadSize = 0; |
| 2633 ASSERT(ThreadState::isAnyThreadInGC()); | 2640 ASSERT(Heap::isInGC()); |
| 2634 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 2641 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
| 2635 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; | 2642 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; |
| 2636 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en
d; ++it) { | 2643 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en
d; ++it) { |
| 2637 (*it)->makeConsistentForSweeping(); | 2644 (*it)->makeConsistentForSweeping(); |
| 2638 objectPayloadSize += (*it)->objectPayloadSizeForTesting(); | 2645 objectPayloadSize += (*it)->objectPayloadSizeForTesting(); |
| 2639 } | 2646 } |
| 2640 return objectPayloadSize; | 2647 return objectPayloadSize; |
| 2641 } | 2648 } |
| 2642 | 2649 |
| 2643 template<typename HeapTraits, typename HeapType, typename HeaderType> | 2650 template<typename HeapTraits, typename HeapType, typename HeaderType> |
| 2644 void HeapAllocator::backingFree(void* address) | 2651 void HeapAllocator::backingFree(void* address) |
| 2645 { | 2652 { |
| 2646 if (!address || ThreadState::isAnyThreadInGC()) | 2653 if (!address || Heap::isInGC()) |
| 2647 return; | 2654 return; |
| 2648 | 2655 |
| 2649 ThreadState* state = ThreadState::current(); | 2656 ThreadState* state = ThreadState::current(); |
| 2650 if (state->isSweepInProgress()) | 2657 if (state->isSweepInProgress()) |
| 2651 return; | 2658 return; |
| 2652 | 2659 |
| 2653 // Don't promptly free large objects because their page is never reused | 2660 // Don't promptly free large objects because their page is never reused |
| 2654 // and don't free backings allocated on other threads. | 2661 // and don't free backings allocated on other threads. |
| 2655 BaseHeapPage* page = pageFromObject(address); | 2662 BaseHeapPage* page = pageFromObject(address); |
| 2656 if (page->isLargeObject() || page->threadState() != state) | 2663 if (page->isLargeObject() || page->threadState() != state) |
| (...skipping 20 matching lines...) Expand all Loading... |
| 2677 { | 2684 { |
| 2678 typedef HeapIndexTrait<HashTableBackingHeap> HeapTraits; | 2685 typedef HeapIndexTrait<HashTableBackingHeap> HeapTraits; |
| 2679 typedef HeapTraits::HeapType HeapType; | 2686 typedef HeapTraits::HeapType HeapType; |
| 2680 typedef HeapTraits::HeaderType HeaderType; | 2687 typedef HeapTraits::HeaderType HeaderType; |
| 2681 backingFree<HeapTraits, HeapType, HeaderType>(address); | 2688 backingFree<HeapTraits, HeapType, HeaderType>(address); |
| 2682 } | 2689 } |
| 2683 | 2690 |
| 2684 template<typename HeapTraits, typename HeapType, typename HeaderType> | 2691 template<typename HeapTraits, typename HeapType, typename HeaderType> |
| 2685 bool HeapAllocator::backingExpand(void* address, size_t newSize) | 2692 bool HeapAllocator::backingExpand(void* address, size_t newSize) |
| 2686 { | 2693 { |
| 2687 if (!address || ThreadState::isAnyThreadInGC()) | 2694 if (!address || Heap::isInGC()) |
| 2688 return false; | 2695 return false; |
| 2689 | 2696 |
| 2690 ThreadState* state = ThreadState::current(); | 2697 ThreadState* state = ThreadState::current(); |
| 2691 if (state->isSweepInProgress()) | 2698 if (state->isSweepInProgress()) |
| 2692 return false; | 2699 return false; |
| 2693 ASSERT(state->isAllocationAllowed()); | 2700 ASSERT(state->isAllocationAllowed()); |
| 2694 | 2701 |
| 2695 BaseHeapPage* page = pageFromObject(address); | 2702 BaseHeapPage* page = pageFromObject(address); |
| 2696 if (page->isLargeObject() || page->threadState() != state) | 2703 if (page->isLargeObject() || page->threadState() != state) |
| 2697 return false; | 2704 return false; |
| (...skipping 10 matching lines...) Expand all Loading... |
| 2708 bool HeapAllocator::vectorBackingExpand(void* address, size_t newSize) | 2715 bool HeapAllocator::vectorBackingExpand(void* address, size_t newSize) |
| 2709 { | 2716 { |
| 2710 typedef HeapIndexTrait<VectorBackingHeap> HeapTraits; | 2717 typedef HeapIndexTrait<VectorBackingHeap> HeapTraits; |
| 2711 typedef HeapTraits::HeapType HeapType; | 2718 typedef HeapTraits::HeapType HeapType; |
| 2712 typedef HeapTraits::HeaderType HeaderType; | 2719 typedef HeapTraits::HeaderType HeaderType; |
| 2713 return backingExpand<HeapTraits, HeapType, HeaderType>(address, newSize); | 2720 return backingExpand<HeapTraits, HeapType, HeaderType>(address, newSize); |
| 2714 } | 2721 } |
| 2715 | 2722 |
| 2716 BaseHeapPage* Heap::lookup(Address address) | 2723 BaseHeapPage* Heap::lookup(Address address) |
| 2717 { | 2724 { |
| 2718 ASSERT(ThreadState::isAnyThreadInGC()); | 2725 ASSERT(Heap::isInGC()); |
| 2719 if (!s_regionTree) | 2726 if (!s_regionTree) |
| 2720 return 0; | 2727 return 0; |
| 2721 if (PageMemoryRegion* region = s_regionTree->lookup(address)) { | 2728 if (PageMemoryRegion* region = s_regionTree->lookup(address)) { |
| 2722 BaseHeapPage* page = region->pageFromAddress(address); | 2729 BaseHeapPage* page = region->pageFromAddress(address); |
| 2723 return page && !page->orphaned() ? page : 0; | 2730 return page && !page->orphaned() ? page : 0; |
| 2724 } | 2731 } |
| 2725 return 0; | 2732 return 0; |
| 2726 } | 2733 } |
| 2727 | 2734 |
| 2728 static Mutex& regionTreeMutex() | 2735 static Mutex& regionTreeMutex() |
| 2729 { | 2736 { |
| 2730 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 2737 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
| 2731 return mutex; | 2738 return mutex; |
| 2732 } | 2739 } |
| 2733 | 2740 |
| 2734 void Heap::removePageMemoryRegion(PageMemoryRegion* region) | 2741 void Heap::removePageMemoryRegion(PageMemoryRegion* region) |
| 2735 { | 2742 { |
| 2736 // Deletion of large objects (and thus their regions) can happen concurrentl
y | 2743 // Deletion of large objects (and thus their regions) can happen concurrentl
y |
| 2737 // on sweeper threads. Removal can also happen during thread shutdown, but | 2744 // on sweeper threads. Removal can also happen during thread shutdown, but |
| 2738 // that case is safe. Regardless, we make all removals mutually exclusive. | 2745 // that case is safe. Regardless, we make all removals mutually exclusive. |
| 2739 MutexLocker locker(regionTreeMutex()); | 2746 MutexLocker locker(regionTreeMutex()); |
| 2740 RegionTree::remove(region, &s_regionTree); | 2747 RegionTree::remove(region, &s_regionTree); |
| 2741 } | 2748 } |
| 2742 | 2749 |
| 2743 void Heap::addPageMemoryRegion(PageMemoryRegion* region) | 2750 void Heap::addPageMemoryRegion(PageMemoryRegion* region) |
| 2744 { | 2751 { |
| 2745 ASSERT(ThreadState::isAnyThreadInGC()); | 2752 ASSERT(Heap::isInGC()); |
| 2746 RegionTree::add(new RegionTree(region), &s_regionTree); | 2753 RegionTree::add(new RegionTree(region), &s_regionTree); |
| 2747 } | 2754 } |
| 2748 | 2755 |
| 2749 PageMemoryRegion* Heap::RegionTree::lookup(Address address) | 2756 PageMemoryRegion* Heap::RegionTree::lookup(Address address) |
| 2750 { | 2757 { |
| 2751 RegionTree* current = s_regionTree; | 2758 RegionTree* current = s_regionTree; |
| 2752 while (current) { | 2759 while (current) { |
| 2753 Address base = current->m_region->base(); | 2760 Address base = current->m_region->base(); |
| 2754 if (address < base) { | 2761 if (address < base) { |
| 2755 current = current->m_left; | 2762 current = current->m_left; |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2811 template class ThreadHeap<HeapObjectHeader>; | 2818 template class ThreadHeap<HeapObjectHeader>; |
| 2812 | 2819 |
| 2813 Visitor* Heap::s_markingVisitor; | 2820 Visitor* Heap::s_markingVisitor; |
| 2814 CallbackStack* Heap::s_markingStack; | 2821 CallbackStack* Heap::s_markingStack; |
| 2815 CallbackStack* Heap::s_postMarkingCallbackStack; | 2822 CallbackStack* Heap::s_postMarkingCallbackStack; |
| 2816 CallbackStack* Heap::s_weakCallbackStack; | 2823 CallbackStack* Heap::s_weakCallbackStack; |
| 2817 CallbackStack* Heap::s_ephemeronStack; | 2824 CallbackStack* Heap::s_ephemeronStack; |
| 2818 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 2825 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
| 2819 bool Heap::s_shutdownCalled = false; | 2826 bool Heap::s_shutdownCalled = false; |
| 2820 bool Heap::s_lastGCWasConservative = false; | 2827 bool Heap::s_lastGCWasConservative = false; |
| 2828 bool Heap::s_inGC = false; |
| 2821 FreePagePool* Heap::s_freePagePool; | 2829 FreePagePool* Heap::s_freePagePool; |
| 2822 OrphanedPagePool* Heap::s_orphanedPagePool; | 2830 OrphanedPagePool* Heap::s_orphanedPagePool; |
| 2823 Heap::RegionTree* Heap::s_regionTree = 0; | 2831 Heap::RegionTree* Heap::s_regionTree = 0; |
| 2824 size_t Heap::s_allocatedObjectSize = 0; | 2832 size_t Heap::s_allocatedObjectSize = 0; |
| 2825 size_t Heap::s_allocatedSpace = 0; | 2833 size_t Heap::s_allocatedSpace = 0; |
| 2826 size_t Heap::s_markedObjectSize = 0; | 2834 size_t Heap::s_markedObjectSize = 0; |
| 2827 | 2835 |
| 2828 } // namespace blink | 2836 } // namespace blink |
| OLD | NEW |