Chromium Code Reviews| Index: Source/platform/heap/Heap.h |
| diff --git a/Source/platform/heap/Heap.h b/Source/platform/heap/Heap.h |
| index 81bf4ad0cf68eb27e6c72f982c2e110a0707790b..5b00f7cb307081248d962e2a1ab9a2e97a3cbc93 100644 |
| --- a/Source/platform/heap/Heap.h |
| +++ b/Source/platform/heap/Heap.h |
| @@ -69,7 +69,13 @@ const size_t maxHeapObjectSize = 1 << 27; |
| const size_t markBitMask = 1; |
| const size_t freeListMask = 2; |
| -const size_t debugBitMask = 4; |
| +// The dead bit is used for objects that have gone through a GC marking, but did |
| +// not get swept before a new GC started. In that case we set the dead bit on |
| +// objects that were not marked in the previous GC to ensure we are not reviving |
|
Mads Ager (chromium)
2014/07/08 08:24:56
I think we should avoid using the word 'reviving'
wibling-chromium
2014/07/08 13:39:47
Done.
|
| +// them via a conservatively found pointer. This guarantees that we don't end up |
| +// tracing a revived object pointing into another thread heap's object where the |
| +// other thread did a sweep and cleared out the object. |
| +const size_t deadBitMask = 4; |
| const size_t sizeMask = ~7; |
| const uint8_t freelistZapValue = 42; |
| const uint8_t finalizedZapValue = 24; |
| @@ -127,60 +133,16 @@ inline bool isPageHeaderAddress(Address address) |
| } |
| #endif |
| -// Mask an address down to the enclosing oilpan heap page base address. |
| +// Mask an address down to the enclosing oilpan heap base page. |
| // All oilpan heap pages are aligned at blinkPageBase plus an OS page size. |
| // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our typed heaps. |
| // This is only exported to enable tests in HeapTest.cpp. |
| -PLATFORM_EXPORT inline Address pageHeaderAddress(Address address) |
| +PLATFORM_EXPORT inline BaseHeapPage* pageHeaderFromObject(const void* object) |
| { |
| - return blinkPageAddress(address) + osPageSize(); |
| + Address address = reinterpret_cast<Address>(const_cast<void*>(object)); |
| + return reinterpret_cast<BaseHeapPage*>(blinkPageAddress(address) + osPageSize()); |
| } |
| -// Common header for heap pages. |
| -class BaseHeapPage { |
| -public: |
| - BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state) |
| - : m_storage(storage) |
| - , m_gcInfo(gcInfo) |
| - , m_threadState(state) |
| - , m_padding(0) |
| - { |
| - ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
| - } |
| - |
| - // Check if the given address points to an object in this |
| - // heap page. If so, find the start of that object and mark it |
| - // using the given Visitor. Otherwise do nothing. The pointer must |
| - // be within the same aligned blinkPageSize as the this-pointer. |
| - // |
| - // This is used during conservative stack scanning to |
| - // conservatively mark all objects that could be referenced from |
| - // the stack. |
| - virtual void checkAndMarkPointer(Visitor*, Address) = 0; |
| - |
| -#if ENABLE(GC_TRACING) |
| - virtual const GCInfo* findGCInfo(Address) = 0; |
| -#endif |
| - |
| - Address address() { return reinterpret_cast<Address>(this); } |
| - PageMemory* storage() const { return m_storage; } |
| - ThreadState* threadState() const { return m_threadState; } |
| - const GCInfo* gcInfo() { return m_gcInfo; } |
| - virtual bool isLargeObject() { return false; } |
| - |
| -private: |
| - // Accessor to silence unused warnings for the m_padding field. |
| - intptr_t padding() const { return m_padding; } |
| - |
| - PageMemory* m_storage; |
| - const GCInfo* m_gcInfo; |
| - ThreadState* m_threadState; |
| - // Pointer sized integer to ensure proper alignment of the |
| - // HeapPage header. This can be used as a bit field if we need |
| - // to associate more information with pages. |
| - intptr_t m_padding; |
| -}; |
| - |
| // Large allocations are allocated as separate objects and linked in a |
| // list. |
| // |
| @@ -232,7 +194,7 @@ public: |
| // Returns true for any address that is on one of the pages that this |
| // large object uses. That ensures that we can use a negative result to |
| // populate the negative page cache. |
| - bool contains(Address object) |
| + virtual bool contains(Address object) OVERRIDE |
| { |
| return roundToBlinkPageStart(address()) <= object && object < roundToBlinkPageEnd(address() + size()); |
| } |
| @@ -261,6 +223,14 @@ public: |
| void getStats(HeapStats&); |
| void mark(Visitor*); |
| void finalize(); |
| + void setDeadMark(); |
| + virtual void markOrphaned() |
| + { |
| + // We clear out the payload to detect incorrect usage. |
|
haraken
2014/07/08 05:44:51
Don't we want to zap the orphaned page in Debug bu
wibling-chromium
2014/07/08 13:39:46
Good idea. I added a new zap value to detect orpha
|
| + memset(payload(), 0, payloadSize()); |
| + clearReuseMemory(); // Don't reuse memory for large objects, ie. don't move to the memory pool. |
|
Mads Ager (chromium)
2014/07/08 08:24:56
It looks like this is only used to indicate that t
wibling-chromium
2014/07/08 13:39:47
Good idea. Done.
|
| + BaseHeapPage::markOrphaned(); |
| + } |
| private: |
| friend class ThreadHeap<Header>; |
| @@ -329,9 +299,9 @@ public: |
| inline size_t payloadSize(); |
| inline Address payloadEnd(); |
| - inline void setDebugMark(); |
| - inline void clearDebugMark(); |
| - inline bool hasDebugMark() const; |
| + inline void setDeadMark(); |
| + inline void clearDeadMark(); |
| + inline bool hasDeadMark() const; |
| // Zap magic number with a new magic number that means there was once an |
| // object allocated here, but it was freed because nobody marked it during |
| @@ -468,7 +438,7 @@ public: |
| // Returns true for the whole blinkPageSize page that the page is on, even |
| // for the header, and the unmapped guard page at the start. That ensures |
| // the result can be used to populate the negative page cache. |
| - bool contains(Address addr) |
| + virtual bool contains(Address addr) OVERRIDE |
| { |
| Address blinkPageStart = roundToBlinkPageStart(address()); |
| ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligned address plus guard page size. |
| @@ -490,7 +460,7 @@ public: |
| Address end() { return payload() + payloadSize(); } |
| void getStats(HeapStats&); |
| - void clearMarks(); |
| + void clearLiveAndMarkDead(); |
| void sweep(); |
| void clearObjectStartBitMap(); |
| void finalize(Header*); |
| @@ -502,6 +472,13 @@ public: |
| #if defined(ADDRESS_SANITIZER) |
| void poisonUnmarkedObjects(); |
| #endif |
| + virtual void markOrphaned() |
| + { |
| + // We clear out the payload to detect incorrect usage. |
|
Mads Ager (chromium)
2014/07/08 08:24:56
Maybe update the comment to state that incorrect u
wibling-chromium
2014/07/08 13:39:47
Done.
|
| + memset(payload(), 0, payloadSize()); |
| + setReuseMemory(); // Reuse memory for normal blink pages. |
|
Mads Ager (chromium)
2014/07/08 08:24:56
Can we get rid of this reuse memory bit and just u
wibling-chromium
2014/07/08 13:39:47
Done.
|
| + BaseHeapPage::markOrphaned(); |
| + } |
| protected: |
| Header* findHeaderFromAddress(Address); |
| @@ -677,6 +654,52 @@ private: |
| mutable Mutex m_mutex; |
| }; |
| +template<typename DataType> |
| +class HeapPool { |
| +protected: |
| + HeapPool(); |
| + |
| + class PoolEntry { |
| + public: |
| + PoolEntry(DataType* data, PoolEntry* next) |
| + : data(data) |
| + , next(next) |
| + { } |
| + |
| + DataType* data; |
| + PoolEntry* next; |
| + }; |
| + |
| + PoolEntry* m_pool[NumberOfHeaps]; |
| +}; |
| + |
| +// Once pages have been used for one type of thread heap they will never be |
| +// reused for another type of thread heap. Instead of unmapping, we add the |
| +// pages to a pool of pages to be reused later by a thread heap of the same |
| +// type. This is done as a security feature to avoid type confusion. The |
| +// heaps are type segregated by having separate thread heaps for different |
| +// types of objects. Holding on to pages ensures that the same virtual address |
| +// space cannot be used for objects of another type than the type contained |
| +// in this page to begin with. |
| +class HeapMemoryPool : public HeapPool<PageMemory> { |
|
haraken
2014/07/08 05:44:51
It's a bit confusing to mix "HeapPool", "HeapMemor
wibling-chromium
2014/07/08 13:39:46
Good idea. I changed it to
HeapPool => PagePool
|
| +public: |
| + ~HeapMemoryPool(); |
| + void addMemory(int index, PageMemory*); |
| + PageMemory* takeMemory(int index); |
| + |
| +private: |
| + Mutex m_mutex[NumberOfHeaps]; |
| +}; |
| + |
| +class HeapOrphanedPagePool : public HeapPool<BaseHeapPage> { |
| +public: |
| + ~HeapOrphanedPagePool(); |
| + void addOrphanedPage(int, BaseHeapPage*); |
| + void addOrphanedPages(int, Vector<BaseHeapPage*>&); |
| + void decommitOrphanedPages(); |
| + bool contains(void*); |
| +}; |
| + |
| // The CallbackStack contains all the visitor callbacks used to trace and mark |
| // objects. A specific CallbackStack instance contains at most bufferSize elements. |
| // If more space is needed a new CallbackStack instance is created and chained |
| @@ -727,8 +750,8 @@ public: |
| init(first); |
| } |
| } |
| - bool popAndInvokeCallback(CallbackStack** first, Visitor*); |
| - static void invokeCallbacks(CallbackStack** first, Visitor*); |
| + template<bool ThreadLocal> bool popAndInvokeCallback(CallbackStack** first, Visitor*); |
| + template<bool ThreadLocal> static void invokeCallbacks(CallbackStack** first, Visitor*); |
| Item* allocateEntry(CallbackStack** first) |
| { |
| @@ -742,7 +765,7 @@ public: |
| #endif |
| private: |
| - void invokeOldestCallbacks(Visitor*); |
| + template<bool ThreadLocal> void invokeOldestCallbacks(Visitor*); |
| static const size_t bufferSize = 8000; |
| Item m_buffer[bufferSize]; |
| @@ -776,7 +799,7 @@ public: |
| virtual void assertEmpty() = 0; |
| virtual void clearFreeLists() = 0; |
| - virtual void clearMarks() = 0; |
| + virtual void clearLiveAndMarkDead() = 0; |
| #ifndef NDEBUG |
| virtual void getScannedStats(HeapStats&) = 0; |
| #endif |
| @@ -784,6 +807,8 @@ public: |
| virtual void makeConsistentForGC() = 0; |
| virtual bool isConsistentForGC() = 0; |
| + virtual void setShutdown() = 0; |
| + |
| // Returns a bucket number for inserting a FreeListEntry of a |
| // given size. All FreeListEntries in the given bucket, n, have |
| // size >= 2^n. |
| @@ -803,7 +828,7 @@ public: |
| template<typename Header> |
| class ThreadHeap : public BaseHeap { |
| public: |
| - ThreadHeap(ThreadState*); |
| + ThreadHeap(ThreadState*, int); |
| virtual ~ThreadHeap(); |
| virtual BaseHeapPage* heapPageFromAddress(Address); |
| @@ -813,7 +838,7 @@ public: |
| virtual void sweep(); |
| virtual void assertEmpty(); |
| virtual void clearFreeLists(); |
| - virtual void clearMarks(); |
| + virtual void clearLiveAndMarkDead(); |
| #ifndef NDEBUG |
| virtual void getScannedStats(HeapStats&); |
| #endif |
| @@ -830,41 +855,18 @@ public: |
| inline Address allocate(size_t, const GCInfo*); |
| void addToFreeList(Address, size_t); |
| - void addPageMemoryToPool(PageMemory*); |
| - void addPageToPool(HeapPage<Header>*); |
| inline static size_t roundedAllocationSize(size_t size) |
| { |
| return allocationSizeFromSize(size) - sizeof(Header); |
| } |
| -private: |
| - // Once pages have been used for one thread heap they will never |
| - // be reused for another thread heap. Instead of unmapping, we add |
| - // the pages to a pool of pages to be reused later by this thread |
| - // heap. This is done as a security feature to avoid type |
| - // confusion. The heap is type segregated by having separate |
| - // thread heaps for various types of objects. Holding on to pages |
| - // ensures that the same virtual address space cannot be used for |
| - // objects of another type than the type contained in this thread |
| - // heap. |
| - class PagePoolEntry { |
| - public: |
| - PagePoolEntry(PageMemory* storage, PagePoolEntry* next) |
| - : m_storage(storage) |
| - , m_next(next) |
| - { } |
| - |
| - PageMemory* storage() { return m_storage; } |
| - PagePoolEntry* next() { return m_next; } |
| - |
| - private: |
| - PageMemory* m_storage; |
| - PagePoolEntry* m_next; |
| - }; |
| + void setShutdown(); |
| + void removePageFromHeap(HeapPage<Header>*); |
| +private: |
| + void addPageToHeap(const GCInfo*); |
| PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*); |
| static size_t allocationSizeFromSize(size_t); |
| - void addPageToHeap(const GCInfo*); |
| PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); |
| Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
| size_t remainingAllocationSize() const { return m_remainingAllocationSize; } |
| @@ -880,11 +882,7 @@ private: |
| bool allocateFromFreeList(size_t); |
| void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**); |
| - |
| void allocatePage(const GCInfo*); |
| - PageMemory* takePageFromPool(); |
| - void clearPagePool(); |
| - void deletePages(); |
| Address m_currentAllocationPoint; |
| size_t m_remainingAllocationSize; |
| @@ -898,9 +896,9 @@ private: |
| // All FreeListEntries in the nth list have size >= 2^n. |
| FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
| - // List of pages that have been previously allocated, but are now |
| - // unused. |
| - PagePoolEntry* m_pagePool; |
| + // Index into the memory pools. This is used so heaps of the same type (ie. index) |
| + // put their pages into the correct pool for avoiding type confusion. |
|
haraken
2014/07/08 05:44:51
// This is used to ensure that the pages of the sa
wibling-chromium
2014/07/08 13:39:46
Done.
|
| + int m_index; |
| }; |
| class PLATFORM_EXPORT Heap { |
| @@ -912,6 +910,9 @@ public: |
| static BaseHeapPage* contains(Address); |
| static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_cast<Address>(pointer)); } |
| static BaseHeapPage* contains(const void* pointer) { return contains(const_cast<void*>(pointer)); } |
| +#ifndef NDEBUG |
| + static bool containedInHeapOrOrphanedPage(void*); |
| +#endif |
| // Push a trace callback on the marking stack. |
| static void pushTraceCallback(void* containerObject, TraceCallback); |
| @@ -933,7 +934,7 @@ public: |
| // Pop the top of the marking stack and call the callback with the visitor |
| // and the object. Returns false when there is nothing more to do. |
| - static bool popAndInvokeTraceCallback(Visitor*); |
| + template<bool ThreadLocal> static bool popAndInvokeTraceCallback(Visitor*); |
|
Mads Ager (chromium)
2014/07/08 08:24:56
Could you introduce an enum for this to make the c
wibling-chromium
2014/07/08 13:39:46
Done.
|
| // Remove an item from the weak callback work list and call the callback |
| // with the visitor and the closure pointer. Returns false when there is |
| @@ -950,7 +951,9 @@ public: |
| template<typename T> static Address reallocate(void* previous, size_t); |
| static void collectGarbage(ThreadState::StackState); |
| + static void collectGarbageForThread(ThreadState*, bool); |
|
Mads Ager (chromium)
2014/07/08 08:24:56
Maybe call this collectGarbageForTerminatingThread
wibling-chromium
2014/07/08 13:39:47
Done.
|
| static void collectAllGarbage(); |
| + template<bool ThreadLocal> static void tracingAndGlobalWeakProcessing(); |
|
Mads Ager (chromium)
2014/07/08 08:24:56
traceAndPerformGlobalWeakProcessing?
wibling-chromium
2014/07/08 13:39:47
Done. Changed it to traceRootsAndPerformGlobalWeak
|
| static void setForcePreciseGCForTesting(); |
| static void prepareForGC(); |
| @@ -988,6 +991,9 @@ public: |
| // during conservative scanning. |
| static bool lastGCWasConservative() { return s_lastGCWasConservative; } |
| + static HeapMemoryPool* memoryPool() { return s_memoryPool; } |
| + static HeapOrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; } |
| + |
| private: |
| static Visitor* s_markingVisitor; |
| @@ -997,6 +1003,8 @@ private: |
| static HeapDoesNotContainCache* s_heapDoesNotContainCache; |
| static bool s_shutdownCalled; |
| static bool s_lastGCWasConservative; |
| + static HeapMemoryPool* s_memoryPool; |
|
Mads Ager (chromium)
2014/07/08 08:24:56
Do we want the page pool to be global or do we wan
wibling-chromium
2014/07/08 13:39:47
There are trade-offs for both. With the global ver
|
| + static HeapOrphanedPagePool* s_orphanedPagePool; |
| friend class ThreadState; |
| }; |
| @@ -1303,7 +1311,10 @@ T* adoptRefCountedGarbageCollected(T* ptr) |
| NO_SANITIZE_ADDRESS |
| void HeapObjectHeader::checkHeader() const |
| { |
| - ASSERT(m_magic == magic); |
| +#ifndef NDEBUG |
| + BaseHeapPage* page = pageHeaderFromObject(this); |
| + ASSERT(page->orphaned() || m_magic == magic); |
| +#endif |
| } |
| Address HeapObjectHeader::payload() |
| @@ -2322,6 +2333,47 @@ void HeapHashTableBacking<Table>::finalize(void* pointer) |
| } |
| } |
| +template<bool ThreadLocal> |
| +bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor) |
|
Mads Ager (chromium)
2014/07/08 08:24:56
We might want to keep this implementation in the c
|
| +{ |
| + if (m_current == &(m_buffer[0])) { |
| + if (!m_next) { |
| +#ifndef NDEBUG |
| + clearUnused(); |
| +#endif |
| + return false; |
| + } |
| + CallbackStack* nextStack = m_next; |
| + *first = nextStack; |
| + delete this; |
| + return nextStack->popAndInvokeCallback<ThreadLocal>(first, visitor); |
| + } |
| + Item* item = --m_current; |
| + |
| + // If the object being traced is located on a page which is dead don't |
| + // trace it. This can happen when a conservative GC kept a dead object |
|
haraken
2014/07/08 05:44:51
// If the object being traced is located on a dead
|
| + // alive which pointed to a (now gone) object on the cleaned up page. |
| + // Also if doing a thread local GC don't trace objects that are located |
|
haraken
2014/07/08 05:44:51
// Also if we're now performing a thread local GC
|
| + // on other thread's heaps, ie. pages where the shuttingDown flag is not |
| + // set. |
| + BaseHeapPage* heapPage = pageHeaderFromObject(item->object()); |
| + if (ThreadLocal ? (heapPage->orphaned() || !heapPage->shuttingDown()) : heapPage->orphaned()) { |
|
haraken
2014/07/08 05:44:51
I'd rename "shuttingDown" to "isTargetOfThreadLoca
haraken
2014/07/08 05:44:51
Would it be possible to add RELEASE_ASSERT(!heapPa
|
| + // If tracing this from a global GC set the traced bit. |
| + if (!ThreadLocal) |
| + heapPage->setTraced(); |
|
haraken
2014/07/08 05:44:51
Slightly more readable:
if (ThreadLocal) {
if (
|
| + return true; |
| + } |
| + |
| + VisitorCallback callback = item->callback(); |
| +#if ENABLE(GC_TRACING) |
| + if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndInvokeCallback |
| + visitor->setHostInfo(item->object(), classOf(item->object())); |
| +#endif |
| + callback(visitor, item->object()); |
| + |
| + return true; |
| +} |
| + |
| template<typename T, typename U, typename V, typename W, typename X> |
| struct GCInfoTrait<HeapHashMap<T, U, V, W, X> > : public GCInfoTrait<HashMap<T, U, V, W, X, HeapAllocator> > { }; |
| template<typename T, typename U, typename V> |