Chromium Code Reviews| Index: Source/platform/heap/Heap.cpp |
| diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp |
| index dd63429b6f4fbb800921076ed703c8bdb8396e34..c4fe8e779f62c356220582480a89da169303809e 100644 |
| --- a/Source/platform/heap/Heap.cpp |
| +++ b/Source/platform/heap/Heap.cpp |
| @@ -34,6 +34,7 @@ |
| #include "platform/TraceEvent.h" |
| #include "platform/heap/ThreadState.h" |
| #include "public/platform/Platform.h" |
| +#include "wtf/AddressSpaceRandomization.h" |
| #include "wtf/Assertions.h" |
| #include "wtf/LeakAnnotations.h" |
| #include "wtf/PassOwnPtr.h" |
| @@ -172,12 +173,126 @@ private: |
| size_t m_size; |
| }; |
| +// A PageMemoryRegion represents a chunk of reserved virtual address |
| +// space containing a number of blink heap pages. On Windows, reserved |
| +// virtual address space can only be given back to the system as a |
| +// whole. The PageMemoryRegion allows us to do that by keeping track |
| +// of the number of pages using it in order to be able to release all |
| +// of the virtual address space when there are no more pages using it. |
| +class PageMemoryRegion : public MemoryRegion { |
| +public: |
| + ~PageMemoryRegion() |
| + { |
| + release(); |
| + } |
| + |
| + void pageRemoved() |
| + { |
| + if (!--m_numPages) |
| + delete this; |
| + } |
| + |
| + static PageMemoryRegion* allocate(size_t size, unsigned numPages) |
| + { |
| + ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); |
| + |
| + // Compute a random blink page aligned address for the page memory |
| + // region and attempt to get the memory there. |
| + Address randomAddress = reinterpret_cast<Address>(WTF::getRandomPageBase()); |
| + Address alignedRandomAddress = roundToBlinkPageBoundary(randomAddress); |
| + |
| +#if OS(POSIX) |
| + Address base = static_cast<Address>(mmap(alignedRandomAddress, size, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0)); |
| + RELEASE_ASSERT(base != MAP_FAILED); |
| + if (base == roundToBlinkPageBoundary(base)) |
| + return new PageMemoryRegion(base, size, numPages); |
| + |
| + // We failed to get a blink page aligned chunk of |
| + // memory. Unmap the chunk that we got and fall back to |
| + // overallocating and selecting an aligned sub part of what |
| + // we allocate. |
| + int error = munmap(base, size); |
| + RELEASE_ASSERT(!error); |
| + size_t allocationSize = size + blinkPageSize; |
| + base = static_cast<Address>(mmap(alignedRandomAddress, allocationSize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0)); |
| + RELEASE_ASSERT(base != MAP_FAILED); |
| + |
| + Address end = base + allocationSize; |
| + Address alignedBase = roundToBlinkPageBoundary(base); |
| + Address regionEnd = alignedBase + size; |
| + |
| + // If the allocated memory was not blink page aligned release |
| + // the memory before the aligned address. |
| + if (alignedBase != base) |
| + MemoryRegion(base, alignedBase - base).release(); |
| + |
| + // Free the additional memory at the end of the page if any. |
| + if (regionEnd < end) |
| + MemoryRegion(regionEnd, end - regionEnd).release(); |
| + |
| + return new PageMemoryRegion(alignedBase, size, numPages); |
| +#else |
| + Address base = static_cast<Address>(VirtualAlloc(alignedRandomAddress, size, MEM_RESERVE, PAGE_NOACCESS)); |
| + if (base) { |
| + ASSERT(base == alignedRandomAddress); |
| + return new PageMemoryRegion(base, size, numPages); |
| + } |
| + |
| + // We failed to get the random aligned address that we asked |
| + // for. Fall back to overallocating. On Windows it is |
| + // impossible to partially release a region of memory |
| + // allocated by VirtualAlloc. To avoid wasting virtual address |
| + // space we attempt to release a large region of memory |
| + // returned as a whole and then allocate an aligned region |
| + // inside this larger region. |
| + size_t allocationSize = size + blinkPageSize; |
| + for (int attempt = 0; attempt < 3; attempt++) { |
| + base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS)); |
| + RELEASE_ASSERT(base); |
| + VirtualFree(base, 0, MEM_RELEASE); |
| + |
| + Address alignedBase = roundToBlinkPageBoundary(base); |
| + base = static_cast<Address>(VirtualAlloc(alignedBase, size, MEM_RESERVE, PAGE_NOACCESS)); |
| + if (base) { |
| + ASSERT(base == alignedBase); |
| + return new PageMemoryRegion(alignedBase, size, numPages); |
| + } |
| + } |
| + |
| + // We failed to avoid wasting virtual address space after |
| + // several attempts. |
| + base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS)); |
| + RELEASE_ASSERT(base); |
| + |
| + // FIXME: If base is by accident blink page size aligned |
| + // here then we can create two pages out of reserved |
| + // space. Do this. |
| + Address alignedBase = roundToBlinkPageBoundary(base); |
| + |
| + return new PageMemoryRegion(alignedBase, size, numPages); |
| +#endif |
| + } |
| + |
| +private: |
| + PageMemoryRegion(Address base, size_t size, unsigned numPages) |
| + : MemoryRegion(base, size) |
| + , m_numPages(numPages) |
| + { |
| + } |
| + |
| + unsigned m_numPages; |
| +}; |
| + |
| // Representation of the memory used for a Blink heap page. |
| // |
| // The representation keeps track of two memory regions: |
| // |
| -// 1. The virtual memory reserved from the sytem in order to be able |
| -// to free all the virtual memory reserved on destruction. |
| +// 1. The virtual memory reserved from the system in order to be able |
| +// to free all the virtual memory reserved. Multiple PageMemory |
| +// instances can share the same reserved memory region and |
| +// therefore notify the reserved memory region on destruction so |
| +// that the system memory can be given back when all PageMemory |
| +// instances for that memory are gone. |
| // |
| // 2. The writable memory (a sub-region of the reserved virtual |
| // memory region) that is used for the actual heap page payload. |
| @@ -188,7 +303,7 @@ public: |
| ~PageMemory() |
| { |
| __lsan_unregister_root_region(m_writable.base(), m_writable.size()); |
| - m_reserved.release(); |
| + m_reserved->pageRemoved(); |
| } |
| bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); } |
| @@ -196,7 +311,15 @@ public: |
| Address writableStart() { return m_writable.base(); } |
| - // Allocate a virtual address space for the blink page with the |
| + static PageMemory* setupPageMemoryInRegion(PageMemoryRegion* region, size_t pageOffset, size_t payloadSize) |
| + { |
| + // Setup the payload one OS page into the page memory. The |
| + // first os page is the guard page. |
| + Address payloadAddress = region->base() + pageOffset + osPageSize(); |
| + return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize)); |
| + } |
| + |
| + // Allocate a virtual address space for one blink page with the |
| // following layout: |
| // |
| // [ guard os page | ... payload ... | guard os page ] |
| @@ -210,87 +333,21 @@ public: |
| // Round up the requested size to nearest os page size. |
| payloadSize = roundToOsPageSize(payloadSize); |
| - // Overallocate by blinkPageSize and 2 times OS page size to |
| - // ensure a chunk of memory which is blinkPageSize aligned and |
| - // has a system page before and after to use for guarding. We |
| - // unmap the excess memory before returning. |
| - size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize; |
| - |
| - ASSERT(Heap::heapDoesNotContainCacheIsEmpty()); |
| -#if OS(POSIX) |
| - Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0)); |
| - RELEASE_ASSERT(base != MAP_FAILED); |
| - |
| - Address end = base + allocationSize; |
| - Address alignedBase = roundToBlinkPageBoundary(base); |
| - Address payloadBase = alignedBase + osPageSize(); |
| - Address payloadEnd = payloadBase + payloadSize; |
| - Address blinkPageEnd = payloadEnd + osPageSize(); |
| - |
| - // If the allocate memory was not blink page aligned release |
| - // the memory before the aligned address. |
| - if (alignedBase != base) |
| - MemoryRegion(base, alignedBase - base).release(); |
| - |
| - // Create guard pages by decommiting an OS page before and |
| - // after the payload. |
| - MemoryRegion(alignedBase, osPageSize()).decommit(); |
| - MemoryRegion(payloadEnd, osPageSize()).decommit(); |
| - |
| - // Free the additional memory at the end of the page if any. |
| - if (blinkPageEnd < end) |
| - MemoryRegion(blinkPageEnd, end - blinkPageEnd).release(); |
| - |
| - return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBase), MemoryRegion(payloadBase, payloadSize)); |
| -#else |
| - Address base = 0; |
| - Address alignedBase = 0; |
| - |
| - // On Windows it is impossible to partially release a region |
| - // of memory allocated by VirtualAlloc. To avoid wasting |
| - // virtual address space we attempt to release a large region |
| - // of memory returned as a whole and then allocate an aligned |
| - // region inside this larger region. |
| - for (int attempt = 0; attempt < 3; attempt++) { |
| - base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS)); |
| - RELEASE_ASSERT(base); |
| - VirtualFree(base, 0, MEM_RELEASE); |
| - |
| - alignedBase = roundToBlinkPageBoundary(base); |
| - base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize + 2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS)); |
| - if (base) { |
| - RELEASE_ASSERT(base == alignedBase); |
| - allocationSize = payloadSize + 2 * osPageSize(); |
| - break; |
| - } |
| - } |
| - |
| - if (!base) { |
| - // We failed to avoid wasting virtual address space after |
| - // several attempts. |
| - base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS)); |
| - RELEASE_ASSERT(base); |
| - |
| - // FIXME: If base is by accident blink page size aligned |
| - // here then we can create two pages out of reserved |
| - // space. Do this. |
| - alignedBase = roundToBlinkPageBoundary(base); |
| - } |
| - |
| - Address payloadBase = alignedBase + osPageSize(); |
| - PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize), MemoryRegion(payloadBase, payloadSize)); |
| - bool res = storage->commit(); |
| - RELEASE_ASSERT(res); |
| + // Overallocate by 2 times OS page size to have space for a |
| + // guard page at the beginning and end of blink heap page. |
| + size_t allocationSize = payloadSize + 2 * osPageSize(); |
| + PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocate(allocationSize, 1); |
| + PageMemory* storage = setupPageMemoryInRegion(pageMemoryRegion, 0, payloadSize); |
| + RELEASE_ASSERT(storage->commit()); |
| return storage; |
| -#endif |
| } |
| private: |
| - PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable) |
| + PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable) |
| : m_reserved(reserved) |
| , m_writable(writable) |
| { |
| - ASSERT(reserved.contains(writable)); |
| + ASSERT(reserved->contains(writable)); |
| // Register the writable area of the memory as part of the LSan root set. |
| // Only the writable area is mapped and can contain C++ objects. Those |
| @@ -299,7 +356,8 @@ private: |
| __lsan_register_root_region(m_writable.base(), m_writable.size()); |
| } |
| - MemoryRegion m_reserved; |
| + |
| + PageMemoryRegion* m_reserved; |
| MemoryRegion m_writable; |
| }; |
| @@ -720,13 +778,19 @@ PageMemory* ThreadHeap<Header>::takePageFromPool() |
| } |
| template<typename Header> |
| -void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* unused) |
| +void ThreadHeap<Header>::addPageMemoryToPool(PageMemory* storage) |
| { |
| flushHeapContainsCache(); |
| - PageMemory* storage = unused->storage(); |
| PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); |
| m_pagePool = entry; |
| +} |
| + |
| +template <typename Header> |
| +void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* page) |
| +{ |
| + PageMemory* storage = page->storage(); |
| storage->decommit(); |
| + addPageMemoryToPool(storage); |
| } |
| template<typename Header> |
| @@ -735,7 +799,20 @@ void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) |
| Heap::flushHeapDoesNotContainCache(); |
| PageMemory* pageMemory = takePageFromPool(); |
| if (!pageMemory) { |
| - pageMemory = PageMemory::allocate(blinkPagePayloadSize()); |
| + // Allocate a memory region for blinkPagesPerRegion pages that |
| + // will each have the following layout. |
|
haraken
2014/06/27 06:50:17
I guess "each" is confusing. Also "... payload ...
Mads Ager (chromium)
2014/06/27 07:01:04
They layout that we get in a region is:
[guard |
haraken
2014/06/27 07:16:43
Now I understood!
|
| + // |
| + // [ guard os page | ... payload ... | guard os page ] |
| + // ^---{ aligned to blink page size } |
| + PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * blinkPagesPerRegion, blinkPagesPerRegion); |
| + // Setup the PageMemory object for each of the pages in the |
| + // region. |
| + size_t offset = 0; |
| + for (size_t i = 0; i < blinkPagesPerRegion; i++) { |
| + addPageMemoryToPool(PageMemory::setupPageMemoryInRegion(region, offset, blinkPagePayloadSize())); |
| + offset += blinkPageSize; |
| + } |
| + pageMemory = takePageFromPool(); |
| RELEASE_ASSERT(pageMemory); |
| } |
| HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo); |