| Index: Source/platform/heap/Heap.cpp
|
| diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp
|
| index dd63429b6f4fbb800921076ed703c8bdb8396e34..fc8a7c67de3d10c6c09ce45e67f6ab9052c1b775 100644
|
| --- a/Source/platform/heap/Heap.cpp
|
| +++ b/Source/platform/heap/Heap.cpp
|
| @@ -34,6 +34,7 @@
|
| #include "platform/TraceEvent.h"
|
| #include "platform/heap/ThreadState.h"
|
| #include "public/platform/Platform.h"
|
| +#include "wtf/AddressSpaceRandomization.h"
|
| #include "wtf/Assertions.h"
|
| #include "wtf/LeakAnnotations.h"
|
| #include "wtf/PassOwnPtr.h"
|
| @@ -172,125 +173,138 @@ private:
|
| size_t m_size;
|
| };
|
|
|
| -// Representation of the memory used for a Blink heap page.
|
| -//
|
| -// The representation keeps track of two memory regions:
|
| -//
|
| -// 1. The virtual memory reserved from the sytem in order to be able
|
| -// to free all the virtual memory reserved on destruction.
|
| -//
|
| -// 2. The writable memory (a sub-region of the reserved virtual
|
| -// memory region) that is used for the actual heap page payload.
|
| -//
|
| -// Guard pages are created before and after the writable memory.
|
| -class PageMemory {
|
| +// A PageMemoryRegion represents a chunk of reserved virtual address
|
| +// space containing a number of blink heap pages. On Window, reserved
|
| +// virtual address space can only be given back to the system as a
|
| +// whole. The PageMemoryRegion allows us to do that be keeping track
|
| +// of the number of pages using it in order to be able to release all
|
| +// of the virtual address space when there are no more pages using it.
|
| +class PageMemoryRegion : public MemoryRegion {
|
| public:
|
| - ~PageMemory()
|
| + ~PageMemoryRegion()
|
| {
|
| - __lsan_unregister_root_region(m_writable.base(), m_writable.size());
|
| - m_reserved.release();
|
| + release();
|
| }
|
|
|
| - bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
|
| - void decommit() { m_writable.decommit(); }
|
| -
|
| - Address writableStart() { return m_writable.base(); }
|
| -
|
| - // Allocate a virtual address space for the blink page with the
|
| - // following layout:
|
| - //
|
| - // [ guard os page | ... payload ... | guard os page ]
|
| - // ^---{ aligned to blink page size }
|
| - //
|
| - static PageMemory* allocate(size_t payloadSize)
|
| + void pageRemoved()
|
| {
|
| - ASSERT(payloadSize > 0);
|
| + if (!--m_numPages)
|
| + delete this;
|
| + }
|
|
|
| - // Virtual memory allocation routines operate in OS page sizes.
|
| - // Round up the requested size to nearest os page size.
|
| - payloadSize = roundToOsPageSize(payloadSize);
|
| + static PageMemoryRegion* allocate(size_t size, unsigned numPages)
|
| + {
|
| + ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
|
|
|
| - // Overallocate by blinkPageSize and 2 times OS page size to
|
| - // ensure a chunk of memory which is blinkPageSize aligned and
|
| - // has a system page before and after to use for guarding. We
|
| - // unmap the excess memory before returning.
|
| - size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize;
|
| + // Compute a random blink page aligned address for the page memory
|
| + // region and attempt to get the memory there.
|
| + Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase());
|
| + Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress);
|
|
|
| - ASSERT(Heap::heapDoesNotContainCacheIsEmpty());
|
| #if OS(POSIX)
|
| - Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0));
|
| + Address base = static_cast<Address>(mmap(alignedRandomAddress, size, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0));
|
| + RELEASE_ASSERT(base != MAP_FAILED);
|
| + if (base == roundToBlinkPageBoundary(base))
|
| + return new PageMemoryRegion(base, size, numPages);
|
| +
|
| + // We failed to get a blink page aligned chunk of
|
| + // memory. Unmap the chunk that we got and fall back to
|
| + // overallocating and selecting an aligned sub part of what
|
| + // we allocate.
|
| + int error = munmap(base, size);
|
| + RELEASE_ASSERT(!error);
|
| + size_t allocationSize = size + blinkPageSize;
|
| + base = static_cast<Address>(mmap(alignedRandomAddress, allocationSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0));
|
| RELEASE_ASSERT(base != MAP_FAILED);
|
|
|
| Address end = base + allocationSize;
|
| Address alignedBase = roundToBlinkPageBoundary(base);
|
| - Address payloadBase = alignedBase + osPageSize();
|
| - Address payloadEnd = payloadBase + payloadSize;
|
| - Address blinkPageEnd = payloadEnd + osPageSize();
|
| + Address regionEnd = alignedBase + size;
|
|
|
| // If the allocate memory was not blink page aligned release
|
| // the memory before the aligned address.
|
| if (alignedBase != base)
|
| MemoryRegion(base, alignedBase - base).release();
|
|
|
| - // Create guard pages by decommiting an OS page before and
|
| - // after the payload.
|
| - MemoryRegion(alignedBase, osPageSize()).decommit();
|
| - MemoryRegion(payloadEnd, osPageSize()).decommit();
|
| -
|
| // Free the additional memory at the end of the page if any.
|
| - if (blinkPageEnd < end)
|
| - MemoryRegion(blinkPageEnd, end - blinkPageEnd).release();
|
| + if (regionEnd < end)
|
| + MemoryRegion(regionEnd, end - regionEnd).release();
|
|
|
| - return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBase), MemoryRegion(payloadBase, payloadSize));
|
| + return new PageMemoryRegion(alignedBase, size, numPages);
|
| #else
|
| - Address base = 0;
|
| - Address alignedBase = 0;
|
| -
|
| - // On Windows it is impossible to partially release a region
|
| - // of memory allocated by VirtualAlloc. To avoid wasting
|
| - // virtual address space we attempt to release a large region
|
| - // of memory returned as a whole and then allocate an aligned
|
| - // region inside this larger region.
|
| + Address base = static_cast<Address>(VirtualAlloc(alignedRandomAddress, size, MEM_RESERVE, PAGE_NOACCESS));
|
| + if (base) {
|
| + ASSERT(base == alignedRandomAddress);
|
| + return new PageMemoryRegion(base, size, numPages);
|
| + }
|
| +
|
| + // We failed to get the random aligned address that we asked
|
| + // for. Fall back to overallocating. On Windows it is
|
| + // impossible to partially release a region of memory
|
| + // allocated by VirtualAlloc. To avoid wasting virtual address
|
| + // space we attempt to release a large region of memory
|
| + // returned as a whole and then allocate an aligned region
|
| + // inside this larger region.
|
| + size_t allocationSize = size + blinkPageSize;
|
| for (int attempt = 0; attempt < 3; attempt++) {
|
| base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
|
| RELEASE_ASSERT(base);
|
| VirtualFree(base, 0, MEM_RELEASE);
|
|
|
| - alignedBase = roundToBlinkPageBoundary(base);
|
| - base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize + 2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS));
|
| + Address alignedBase = roundToBlinkPageBoundary(base);
|
| + base = static_cast<Address>(VirtualAlloc(alignedBase, size, MEM_RESERVE, PAGE_NOACCESS));
|
| if (base) {
|
| - RELEASE_ASSERT(base == alignedBase);
|
| - allocationSize = payloadSize + 2 * osPageSize();
|
| - break;
|
| + ASSERT(base == alignedBase);
|
| + return new PageMemoryRegion(alignedBase, size, numPages);
|
| }
|
| }
|
|
|
| - if (!base) {
|
| - // We failed to avoid wasting virtual address space after
|
| - // several attempts.
|
| - base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
|
| - RELEASE_ASSERT(base);
|
| + // We failed to avoid wasting virtual address space after
|
| + // several attempts.
|
| + base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
|
| + RELEASE_ASSERT(base);
|
|
|
| - // FIXME: If base is by accident blink page size aligned
|
| - // here then we can create two pages out of reserved
|
| - // space. Do this.
|
| - alignedBase = roundToBlinkPageBoundary(base);
|
| - }
|
| + // FIXME: If base is by accident blink page size aligned
|
| + // here then we can create two pages out of reserved
|
| + // space. Do this.
|
| + Address alignedBase = roundToBlinkPageBoundary(base);
|
|
|
| - Address payloadBase = alignedBase + osPageSize();
|
| - PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize), MemoryRegion(payloadBase, payloadSize));
|
| - bool res = storage->commit();
|
| - RELEASE_ASSERT(res);
|
| - return storage;
|
| + return new PageMemoryRegion(alignedBase, size, numPages);
|
| #endif
|
| }
|
|
|
| private:
|
| - PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable)
|
| + PageMemoryRegion(Address base, size_t size, unsigned numPages)
|
| + : MemoryRegion(base, size)
|
| + , m_numPages(numPages)
|
| + {
|
| + }
|
| +
|
| + unsigned m_numPages;
|
| +};
|
| +
|
| +// Representation of the memory used for a Blink heap page.
|
| +//
|
| +// The representation keeps track of two memory regions:
|
| +//
|
| +// 1. The virtual memory reserved from the system in order to be able
|
| +// to free all the virtual memory reserved. Multiple PageMemory
|
| +// instances can share the same reserved memory region and
|
| +// therefore notify the reserved memory region on destruction so
|
| +// that the system memory can be given back when all PageMemory
|
| +// instances for that memory are gone.
|
| +//
|
| +// 2. The writable memory (a sub-region of the reserved virtual
|
| +// memory region) that is used for the actual heap page payload.
|
| +//
|
| +// Guard pages are created before and after the writable memory.
|
| +class PageMemory {
|
| +public:
|
| + PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable)
|
| : m_reserved(reserved)
|
| , m_writable(writable)
|
| {
|
| - ASSERT(reserved.contains(writable));
|
| + ASSERT(reserved->contains(writable));
|
|
|
| // Register the writable area of the memory as part of the LSan root set.
|
| // Only the writable area is mapped and can contain C++ objects. Those
|
| @@ -299,7 +313,43 @@ private:
|
| __lsan_register_root_region(m_writable.base(), m_writable.size());
|
| }
|
|
|
| - MemoryRegion m_reserved;
|
| + ~PageMemory()
|
| + {
|
| + __lsan_unregister_root_region(m_writable.base(), m_writable.size());
|
| + m_reserved->pageRemoved();
|
| + }
|
| +
|
| + bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
|
| + void decommit() { m_writable.decommit(); }
|
| +
|
| + Address writableStart() { return m_writable.base(); }
|
| +
|
| + // Allocate a virtual address space for one blink page with the
|
| + // following layout:
|
| + //
|
| + // [ guard os page | ... payload ... | guard os page ]
|
| + // ^---{ aligned to blink page size }
|
| + //
|
| + static PageMemory* allocate(size_t payloadSize)
|
| + {
|
| + ASSERT(payloadSize > 0);
|
| +
|
| + // Virtual memory allocation routines operate in OS page sizes.
|
| + // Round up the requested size to nearest os page size.
|
| + payloadSize = roundToOsPageSize(payloadSize);
|
| +
|
| + // Overallocate by 2 times OS page size to have space for a
|
| + // guard page at the beginning and end of blink heap page.
|
| + size_t allocationSize = payloadSize + 2 * osPageSize();
|
| + PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocate(allocationSize, 1);
|
| + Address payload = pageMemoryRegion->base() + osPageSize();
|
| + PageMemory* storage = new PageMemory(pageMemoryRegion, MemoryRegion(payload, payloadSize));
|
| + RELEASE_ASSERT(storage->commit());
|
| + return storage;
|
| + }
|
| +
|
| +private:
|
| + PageMemoryRegion* m_reserved;
|
| MemoryRegion m_writable;
|
| };
|
|
|
| @@ -720,13 +770,19 @@ PageMemory* ThreadHeap<Header>::takePageFromPool()
|
| }
|
|
|
| template<typename Header>
|
| -void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused)
|
| +void ThreadHeap<Header>::addPageMemoryToPool(PageMemory* storage)
|
| {
|
| flushHeapContainsCache();
|
| - PageMemory* storage = unused->storage();
|
| PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool);
|
| m_pagePool = entry;
|
| +}
|
| +
|
| +template <typename Header>
|
| +void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* page)
|
| +{
|
| + PageMemory* storage = page->storage();
|
| storage->decommit();
|
| + addPageMemoryToPool(storage);
|
| }
|
|
|
| template<typename Header>
|
| @@ -735,7 +791,22 @@ void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
|
| Heap::flushHeapDoesNotContainCache();
|
| PageMemory* pageMemory = takePageFromPool();
|
| if (!pageMemory) {
|
| - pageMemory = PageMemory::allocate(blinkPagePayloadSize());
|
| + // Allocate a memory region for blinkPagesPerRegion pages that
|
| + // will each have the following layout.
|
| + //
|
| + // [ guard os page | ... payload ... | guard os page ]
|
| + // ^---{ aligned to blink page size }
|
| + PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * blinkPagesPerRegion, blinkPagesPerRegion);
|
| + // Setup the PageMemory object for each of the pages in the
|
| + // region. The first payload address is after one OS page
|
| + // which will act as a guard page.
|
| + ASSERT(blinkPageSize == blinkPagePayloadSize() + 2 * osPageSize());
|
| + Address payload = region->base() + osPageSize();
|
| + for (size_t i = 0; i < blinkPagesPerRegion; i++) {
|
| + addPageMemoryToPool(new PageMemory(region, MemoryRegion(payload, blinkPagePayloadSize())));
|
| + payload += blinkPageSize;
|
| + }
|
| + pageMemory = takePageFromPool();
|
| RELEASE_ASSERT(pageMemory);
|
| }
|
| HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
|
|
|