| Index: Source/platform/heap/Heap.h
|
| diff --git a/Source/platform/heap/Heap.h b/Source/platform/heap/Heap.h
|
| index d4d15bf4f54e7d54a7521d853c57ed60041d17db..6e3c70b8a6155ac7a2ce0a496bb68cc9e4fa2326 100644
|
| --- a/Source/platform/heap/Heap.h
|
| +++ b/Source/platform/heap/Heap.h
|
| @@ -69,10 +69,25 @@ const size_t maxHeapObjectSize = 1 << 27;
|
|
|
| const size_t markBitMask = 1;
|
| const size_t freeListMask = 2;
|
| -const size_t debugBitMask = 4;
|
| +// The dead bit is used for objects that have gone through a GC marking, but did
|
| +// not get swept before a new GC started. In that case we set the dead bit on
|
| +// objects that were not marked in the previous GC to ensure we are not tracing
|
| +// them via a conservatively found pointer. Tracing dead objects could lead to
|
| +// tracing of already finalized objects in another thread's heap which is a
|
| +// use-after-free situation.
|
| +const size_t deadBitMask = 4;
|
| const size_t sizeMask = ~7;
|
| const uint8_t freelistZapValue = 42;
|
| const uint8_t finalizedZapValue = 24;
|
| +// The orphaned zap value must be zero in the lowest bits to allow for using
|
| +// the mark bit when tracing.
|
| +const uint8_t orphanedZapValue = 240;
|
| +
|
| +enum CallbackInvocationMode {
|
| + GlobalMarking,
|
| + ThreadLocalMarking,
|
| + WeaknessProcessing,
|
| +};
|
|
|
| class HeapStats;
|
| class PageMemory;
|
| @@ -127,60 +142,16 @@ inline bool isPageHeaderAddress(Address address)
|
| }
|
| #endif
|
|
|
| -// Mask an address down to the enclosing oilpan heap page base address.
|
| +// Mask an address down to the enclosing oilpan heap base page.
|
| // All oilpan heap pages are aligned at blinkPageBase plus an OS page size.
|
| // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our typed heaps.
|
| // This is only exported to enable tests in HeapTest.cpp.
|
| -PLATFORM_EXPORT inline Address pageHeaderAddress(Address address)
|
| +PLATFORM_EXPORT inline BaseHeapPage* pageHeaderFromObject(const void* object)
|
| {
|
| - return blinkPageAddress(address) + osPageSize();
|
| + Address address = reinterpret_cast<Address>(const_cast<void*>(object));
|
| + return reinterpret_cast<BaseHeapPage*>(blinkPageAddress(address) + osPageSize());
|
| }
|
|
|
| -// Common header for heap pages.
|
| -class BaseHeapPage {
|
| -public:
|
| - BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state)
|
| - : m_storage(storage)
|
| - , m_gcInfo(gcInfo)
|
| - , m_threadState(state)
|
| - , m_padding(0)
|
| - {
|
| - ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
|
| - }
|
| -
|
| - // Check if the given address points to an object in this
|
| - // heap page. If so, find the start of that object and mark it
|
| - // using the given Visitor. Otherwise do nothing. The pointer must
|
| - // be within the same aligned blinkPageSize as the this-pointer.
|
| - //
|
| - // This is used during conservative stack scanning to
|
| - // conservatively mark all objects that could be referenced from
|
| - // the stack.
|
| - virtual void checkAndMarkPointer(Visitor*, Address) = 0;
|
| -
|
| -#if ENABLE(GC_TRACING)
|
| - virtual const GCInfo* findGCInfo(Address) = 0;
|
| -#endif
|
| -
|
| - Address address() { return reinterpret_cast<Address>(this); }
|
| - PageMemory* storage() const { return m_storage; }
|
| - ThreadState* threadState() const { return m_threadState; }
|
| - const GCInfo* gcInfo() { return m_gcInfo; }
|
| - virtual bool isLargeObject() { return false; }
|
| -
|
| -private:
|
| - // Accessor to silence unused warnings for the m_padding field.
|
| - intptr_t padding() const { return m_padding; }
|
| -
|
| - PageMemory* m_storage;
|
| - const GCInfo* m_gcInfo;
|
| - ThreadState* m_threadState;
|
| - // Pointer sized integer to ensure proper alignment of the
|
| - // HeapPage header. This can be used as a bit field if we need
|
| - // to associate more information with pages.
|
| - intptr_t m_padding;
|
| -};
|
| -
|
| // Large allocations are allocated as separate objects and linked in a
|
| // list.
|
| //
|
| @@ -232,7 +203,7 @@ public:
|
| // Returns true for any address that is on one of the pages that this
|
| // large object uses. That ensures that we can use a negative result to
|
| // populate the negative page cache.
|
| - bool contains(Address object)
|
| + virtual bool contains(Address object) OVERRIDE
|
| {
|
| return roundToBlinkPageStart(address()) <= object && object < roundToBlinkPageEnd(address() + size());
|
| }
|
| @@ -261,6 +232,14 @@ public:
|
| void getStats(HeapStats&);
|
| void mark(Visitor*);
|
| void finalize();
|
| + void setDeadMark();
|
| + virtual void markOrphaned()
|
| + {
|
| + // Zap the payload with a recognizable value to detect any incorrect
|
| + // cross thread pointer usage.
|
| + memset(payload(), orphanedZapValue, payloadSize());
|
| + BaseHeapPage::markOrphaned();
|
| + }
|
|
|
| private:
|
| friend class ThreadHeap<Header>;
|
| @@ -329,9 +308,9 @@ public:
|
| inline size_t payloadSize();
|
| inline Address payloadEnd();
|
|
|
| - inline void setDebugMark();
|
| - inline void clearDebugMark();
|
| - inline bool hasDebugMark() const;
|
| + inline void setDeadMark();
|
| + inline void clearDeadMark();
|
| + inline bool hasDeadMark() const;
|
|
|
| // Zap magic number with a new magic number that means there was once an
|
| // object allocated here, but it was freed because nobody marked it during
|
| @@ -468,7 +447,7 @@ public:
|
| // Returns true for the whole blinkPageSize page that the page is on, even
|
| // for the header, and the unmapped guard page at the start. That ensures
|
| // the result can be used to populate the negative page cache.
|
| - bool contains(Address addr)
|
| + virtual bool contains(Address addr) OVERRIDE
|
| {
|
| Address blinkPageStart = roundToBlinkPageStart(address());
|
| ASSERT(blinkPageStart == address() - osPageSize()); // Page is at aligned address plus guard page size.
|
| @@ -490,7 +469,7 @@ public:
|
| Address end() { return payload() + payloadSize(); }
|
|
|
| void getStats(HeapStats&);
|
| - void clearMarks();
|
| + void clearLiveAndMarkDead();
|
| void sweep();
|
| void clearObjectStartBitMap();
|
| void finalize(Header*);
|
| @@ -502,6 +481,22 @@ public:
|
| #if defined(ADDRESS_SANITIZER)
|
| void poisonUnmarkedObjects();
|
| #endif
|
| + NO_SANITIZE_ADDRESS
|
| + virtual void markOrphaned()
|
| + {
|
| + // Zap the payload with a recognizable value to detect any incorrect
|
| + // cross thread pointer usage.
|
| +#if defined(ADDRESS_SANITIZER)
|
| + // Don't use memset when running with ASan since this needs to zap
|
| + // poisoned memory as well and the NO_SANITIZE_ADDRESS annotation
|
| + // only works for code in this method and not for calls to memset.
|
| + for (Address current = payload(); current < payload() + payloadSize(); ++current)
|
| + *current = orphanedZapValue;
|
| +#else
|
| + memset(payload(), orphanedZapValue, payloadSize());
|
| +#endif
|
| + BaseHeapPage::markOrphaned();
|
| + }
|
|
|
| protected:
|
| Header* findHeaderFromAddress(Address);
|
| @@ -677,6 +672,55 @@ private:
|
| mutable Mutex m_mutex;
|
| };
|
|
|
| +template<typename DataType>
|
| +class PagePool {
|
| +protected:
|
| + PagePool();
|
| +
|
| + class PoolEntry {
|
| + public:
|
| + PoolEntry(DataType* data, PoolEntry* next)
|
| + : data(data)
|
| + , next(next)
|
| + { }
|
| +
|
| + DataType* data;
|
| + PoolEntry* next;
|
| + };
|
| +
|
| + PoolEntry* m_pool[NumberOfHeaps];
|
| +};
|
| +
|
| +// Once pages have been used for one type of thread heap they will never be
|
| +// reused for another type of thread heap. Instead of unmapping, we add the
|
| +// pages to a pool of pages to be reused later by a thread heap of the same
|
| +// type. This is done as a security feature to avoid type confusion. The
|
| +// heaps are type segregated by having separate thread heaps for different
|
| +// types of objects. Holding on to pages ensures that the same virtual address
|
| +// space cannot be used for objects of another type than the type contained
|
| +// in this page to begin with.
|
| +class FreePagePool : public PagePool<PageMemory> {
|
| +public:
|
| + ~FreePagePool();
|
| + void addFreePage(int, PageMemory*);
|
| + PageMemory* takeFreePage(int);
|
| +
|
| +private:
|
| + Mutex m_mutex[NumberOfHeaps];
|
| +};
|
| +
|
| +class OrphanedPagePool : public PagePool<BaseHeapPage> {
|
| +public:
|
| + ~OrphanedPagePool();
|
| + void addOrphanedPage(int, BaseHeapPage*);
|
| + void decommitOrphanedPages();
|
| +#ifndef NDEBUG
|
| + bool contains(void*);
|
| +#endif
|
| +private:
|
| + void clearMemory(PageMemory*);
|
| +};
|
| +
|
| // The CallbackStack contains all the visitor callbacks used to trace and mark
|
| // objects. A specific CallbackStack instance contains at most bufferSize elements.
|
| // If more space is needed a new CallbackStack instance is created and chained
|
| @@ -727,7 +771,7 @@ public:
|
| init(first);
|
| }
|
| }
|
| - bool popAndInvokeCallback(CallbackStack** first, Visitor*);
|
| + template<CallbackInvocationMode Mode> bool popAndInvokeCallback(CallbackStack** first, Visitor*);
|
| static void invokeCallbacks(CallbackStack** first, Visitor*);
|
|
|
| Item* allocateEntry(CallbackStack** first)
|
| @@ -755,6 +799,7 @@ private:
|
| class BaseHeap {
|
| public:
|
| virtual ~BaseHeap() { }
|
| + virtual void cleanupPages() = 0;
|
|
|
| // Find the page in this thread heap containing the given
|
| // address. Returns 0 if the address is not contained in any
|
| @@ -769,14 +814,8 @@ public:
|
| // and builds freelists for all the unused memory.
|
| virtual void sweep() = 0;
|
|
|
| - // Forcefully finalize all objects in this part of the Blink heap
|
| - // (potentially with the exception of one object). This is used
|
| - // during thread termination to make sure that all objects for the
|
| - // dying thread are finalized.
|
| - virtual void assertEmpty() = 0;
|
| -
|
| virtual void clearFreeLists() = 0;
|
| - virtual void clearMarks() = 0;
|
| + virtual void clearLiveAndMarkDead() = 0;
|
| #ifndef NDEBUG
|
| virtual void getScannedStats(HeapStats&) = 0;
|
| #endif
|
| @@ -784,6 +823,8 @@ public:
|
| virtual void makeConsistentForGC() = 0;
|
| virtual bool isConsistentForGC() = 0;
|
|
|
| + virtual void prepareHeapForTermination() = 0;
|
| +
|
| // Returns a bucket number for inserting a FreeListEntry of a
|
| // given size. All FreeListEntries in the given bucket, n, have
|
| // size >= 2^n.
|
| @@ -803,17 +844,17 @@ public:
|
| template<typename Header>
|
| class ThreadHeap : public BaseHeap {
|
| public:
|
| - ThreadHeap(ThreadState*);
|
| + ThreadHeap(ThreadState*, int);
|
| virtual ~ThreadHeap();
|
| + virtual void cleanupPages();
|
|
|
| virtual BaseHeapPage* heapPageFromAddress(Address);
|
| #if ENABLE(GC_TRACING)
|
| virtual const GCInfo* findGCInfoOfLargeHeapObject(Address);
|
| #endif
|
| virtual void sweep();
|
| - virtual void assertEmpty();
|
| virtual void clearFreeLists();
|
| - virtual void clearMarks();
|
| + virtual void clearLiveAndMarkDead();
|
| #ifndef NDEBUG
|
| virtual void getScannedStats(HeapStats&);
|
| #endif
|
| @@ -830,41 +871,18 @@ public:
|
|
|
| inline Address allocate(size_t, const GCInfo*);
|
| void addToFreeList(Address, size_t);
|
| - void addPageMemoryToPool(PageMemory*);
|
| - void addPageToPool(HeapPage<Header>*);
|
| inline static size_t roundedAllocationSize(size_t size)
|
| {
|
| return allocationSizeFromSize(size) - sizeof(Header);
|
| }
|
|
|
| -private:
|
| - // Once pages have been used for one thread heap they will never
|
| - // be reused for another thread heap. Instead of unmapping, we add
|
| - // the pages to a pool of pages to be reused later by this thread
|
| - // heap. This is done as a security feature to avoid type
|
| - // confusion. The heap is type segregated by having separate
|
| - // thread heaps for various types of objects. Holding on to pages
|
| - // ensures that the same virtual address space cannot be used for
|
| - // objects of another type than the type contained in this thread
|
| - // heap.
|
| - class PagePoolEntry {
|
| - public:
|
| - PagePoolEntry(PageMemory* storage, PagePoolEntry* next)
|
| - : m_storage(storage)
|
| - , m_next(next)
|
| - { }
|
| -
|
| - PageMemory* storage() { return m_storage; }
|
| - PagePoolEntry* next() { return m_next; }
|
| -
|
| - private:
|
| - PageMemory* m_storage;
|
| - PagePoolEntry* m_next;
|
| - };
|
| + void prepareHeapForTermination();
|
| + void removePageFromHeap(HeapPage<Header>*);
|
|
|
| +private:
|
| + void addPageToHeap(const GCInfo*);
|
| PLATFORM_EXPORT Address outOfLineAllocate(size_t, const GCInfo*);
|
| static size_t allocationSizeFromSize(size_t);
|
| - void addPageToHeap(const GCInfo*);
|
| PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*);
|
| Address currentAllocationPoint() const { return m_currentAllocationPoint; }
|
| size_t remainingAllocationSize() const { return m_remainingAllocationSize; }
|
| @@ -880,11 +898,7 @@ private:
|
| bool allocateFromFreeList(size_t);
|
|
|
| void freeLargeObject(LargeHeapObject<Header>*, LargeHeapObject<Header>**);
|
| -
|
| void allocatePage(const GCInfo*);
|
| - PageMemory* takePageFromPool();
|
| - void clearPagePool();
|
| - void deletePages();
|
|
|
| Address m_currentAllocationPoint;
|
| size_t m_remainingAllocationSize;
|
| @@ -898,9 +912,9 @@ private:
|
| // All FreeListEntries in the nth list have size >= 2^n.
|
| FreeListEntry* m_freeLists[blinkPageSizeLog2];
|
|
|
| - // List of pages that have been previously allocated, but are now
|
| - // unused.
|
| - PagePoolEntry* m_pagePool;
|
| + // Index into the page pools. This is used to ensure that the pages of the
|
| + // same type go into the correct page pool and thus avoid type confusion.
|
| + int m_index;
|
| };
|
|
|
| class PLATFORM_EXPORT Heap {
|
| @@ -912,6 +926,9 @@ public:
|
| static BaseHeapPage* contains(Address);
|
| static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_cast<Address>(pointer)); }
|
| static BaseHeapPage* contains(const void* pointer) { return contains(const_cast<void*>(pointer)); }
|
| +#ifndef NDEBUG
|
| + static bool containedInHeapOrOrphanedPage(void*);
|
| +#endif
|
|
|
| // Push a trace callback on the marking stack.
|
| static void pushTraceCallback(void* containerObject, TraceCallback);
|
| @@ -933,7 +950,7 @@ public:
|
|
|
| // Pop the top of the marking stack and call the callback with the visitor
|
| // and the object. Returns false when there is nothing more to do.
|
| - static bool popAndInvokeTraceCallback(Visitor*);
|
| + template<CallbackInvocationMode Mode> static bool popAndInvokeTraceCallback(Visitor*);
|
|
|
| // Remove an item from the weak callback work list and call the callback
|
| // with the visitor and the closure pointer. Returns false when there is
|
| @@ -950,7 +967,9 @@ public:
|
| template<typename T> static Address reallocate(void* previous, size_t);
|
|
|
| static void collectGarbage(ThreadState::StackState);
|
| + static void collectGarbageForTerminatingThread(ThreadState*);
|
| static void collectAllGarbage();
|
| + template<CallbackInvocationMode Mode> static void traceRootsAndPerformGlobalWeakProcessing();
|
| static void setForcePreciseGCForTesting();
|
|
|
| static void prepareForGC();
|
| @@ -988,6 +1007,9 @@ public:
|
| // during conservative scanning.
|
| static bool lastGCWasConservative() { return s_lastGCWasConservative; }
|
|
|
| + static FreePagePool* freePagePool() { return s_freePagePool; }
|
| + static OrphanedPagePool* orphanedPagePool() { return s_orphanedPagePool; }
|
| +
|
| private:
|
| static Visitor* s_markingVisitor;
|
|
|
| @@ -997,6 +1019,8 @@ private:
|
| static HeapDoesNotContainCache* s_heapDoesNotContainCache;
|
| static bool s_shutdownCalled;
|
| static bool s_lastGCWasConservative;
|
| + static FreePagePool* s_freePagePool;
|
| + static OrphanedPagePool* s_orphanedPagePool;
|
| friend class ThreadState;
|
| };
|
|
|
| @@ -1303,7 +1327,10 @@ T* adoptRefCountedGarbageCollected(T* ptr)
|
| NO_SANITIZE_ADDRESS
|
| void HeapObjectHeader::checkHeader() const
|
| {
|
| - ASSERT(m_magic == magic);
|
| +#ifndef NDEBUG
|
| + BaseHeapPage* page = pageHeaderFromObject(this);
|
| + ASSERT(page->orphaned() || m_magic == magic);
|
| +#endif
|
| }
|
|
|
| Address HeapObjectHeader::payload()
|
|
|