Chromium Code Reviews| Index: third_party/WebKit/Source/wtf/PageAllocator.cpp |
| diff --git a/third_party/WebKit/Source/wtf/PageAllocator.cpp b/third_party/WebKit/Source/wtf/PageAllocator.cpp |
| index 5f5eb440e4bfa5c87cc5b0ea2ee1093bc78a1d31..60585ad20f9ea2690fc2649be6d91f95a76c4ebc 100644 |
| --- a/third_party/WebKit/Source/wtf/PageAllocator.cpp |
| +++ b/third_party/WebKit/Source/wtf/PageAllocator.cpp |
| @@ -58,8 +58,8 @@ |
| namespace WTF { |
| -// This simple internal function wraps the OS-specific page allocation call so |
| -// that it behaves consistently: the address is a hint and if it cannot be used, |
| +// This internal function wraps the OS-specific page allocation call so that |
| +// it behaves consistently: the address is a hint and if it cannot be used, |
| // the allocation will be placed elsewhere. |
| static void* systemAllocPages(void* addr, size_t len, PageAccessibilityConfiguration pageAccessibility) |
| { |
| @@ -67,9 +67,9 @@ static void* systemAllocPages(void* addr, size_t len, PageAccessibilityConfigura |
| ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffsetMask)); |
| void* ret; |
| #if OS(WIN) |
| - int accessFlag = pageAccessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS; |
| + DWORD accessFlag = pageAccessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS; |
| ret = VirtualAlloc(addr, len, MEM_RESERVE | MEM_COMMIT, accessFlag); |
| - if (!ret) |
| + if (!ret && addr) |
| ret = VirtualAlloc(0, len, MEM_RESERVE | MEM_COMMIT, accessFlag); |
|
JF
2015/10/05 20:32:38
It's not clear what the intent of the fallback is.
|
| #else |
| int accessFlag = pageAccessibility == PageAccessible ? (PROT_READ | PROT_WRITE) : PROT_NONE; |
| @@ -80,29 +80,6 @@ static void* systemAllocPages(void* addr, size_t len, PageAccessibilityConfigura |
| return ret; |
| } |
| -static bool trimMapping(void* baseAddr, size_t baseLen, void* trimAddr, size_t trimLen) |
| -{ |
| -#if OS(WIN) |
| - return false; |
| -#else |
| - char* basePtr = static_cast<char*>(baseAddr); |
| - char* trimPtr = static_cast<char*>(trimAddr); |
| - ASSERT(trimPtr >= basePtr); |
| - ASSERT(trimPtr + trimLen <= basePtr + baseLen); |
| - size_t preLen = trimPtr - basePtr; |
| - if (preLen) { |
| - int ret = munmap(basePtr, preLen); |
| - RELEASE_ASSERT(!ret); |
| - } |
| - size_t postLen = (basePtr + baseLen) - (trimPtr + trimLen); |
| - if (postLen) { |
| - int ret = munmap(trimPtr + trimLen, postLen); |
| - RELEASE_ASSERT(!ret); |
| - } |
| - return true; |
| -#endif |
| -} |
| - |
| void* allocPages(void* addr, size_t len, size_t align, PageAccessibilityConfiguration pageAccessibility) |
| { |
| ASSERT(len >= kPageAllocationGranularity); |
| @@ -119,49 +96,67 @@ void* allocPages(void* addr, size_t len, size_t align, PageAccessibilityConfigur |
| addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & alignBaseMask); |
| } |
| - // The common case, which is also the least work we can do, is that the |
| - // address and length are suitable. Just try it. |
| - void* ret = systemAllocPages(addr, len, pageAccessibility); |
| - // If the alignment is to our liking, we're done. |
| - if (!ret || !(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask)) |
| - return ret; |
| - |
| - // Annoying. Unmap and map a larger range to be sure to succeed on the |
| - // second, slower attempt. |
| - freePages(ret, len); |
| + // First try to force an exact-size, aligned allocation from our random base. |
| + for (int count = 0; count < 3; ++count) { |
| + void* ret = systemAllocPages(addr, len, pageAccessibility); |
| + // If the alignment is to our liking, we're done. |
| + if (!(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask)) |
| + return ret; |
| + // We failed, so we retry another range depending on the size of our address space. |
| + freePages(ret, len); |
| +#if CPU(X86_64) || CPU(ARM64) |
| + // Keep trying random addresses on 64-bit systems, because they have the space. |
| + addr = getRandomPageBase(); |
| + addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr)& alignBaseMask); |
|
JF
2015/10/05 20:32:38
Nit clang-format.
|
| +#else |
| + // Use a linear probe on 32-bit systems, where the address space tends to be cramped. |
| + // This may wrap, but we'll just fall back to the guaranteed method in that case. |
| + addr = reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(ret) + align) & alignBaseMask); |
|
JF
2015/10/05 20:32:38
& alignBaseMask isn't required here, I'd assert on
|
| +#endif |
|
JF
2015/10/05 20:32:38
Same as below, I'd avoid the preprocessor and just
|
| + } |
| + // Map a larger allocation so we can force alignment, but continuing randomizing only on |
| + // 64-bit POSIX. |
| size_t tryLen = len + (align - kPageAllocationGranularity); |
| RELEASE_ASSERT(tryLen > len); |
| - |
| - // We loop to cater for the unlikely case where another thread maps on top |
| - // of the aligned location we choose. |
| - int count = 0; |
| - while (count++ < 100) { |
| - ret = systemAllocPages(addr, tryLen, pageAccessibility); |
| + while (true) { |
| + addr = nullptr; |
|
JF
2015/10/05 20:32:38
This drops the explicit address that's passed in t
|
| +#if OS(POSIX) && (CPU(X86_64) && CPU(ARM64)) |
|
JF
2015/10/05 20:18:09
This check is wrong.
jschuh
2015/10/05 20:24:17
Good catch. Would have lost that extra randomness
JF
2015/10/05 20:32:38
You should have a static_assert here that sizeof(v
|
| + addr = getRandomPageBase(); |
| +#endif |
| + void* ret = systemAllocPages(addr, tryLen, pageAccessibility); |
| if (!ret) |
|
JF
2015/10/05 20:32:38
This drops the alignment requirement.
|
| - return 0; |
| - // We can now try and trim out a subset of the mapping. |
| - addr = reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(ret) + alignOffsetMask) & alignBaseMask); |
| - |
| - // On POSIX systems, we can trim the oversized mapping to fit exactly. |
| - // This will always work on POSIX systems. |
| - if (trimMapping(ret, tryLen, addr, len)) |
| - return addr; |
| - |
| - // On Windows, you can't trim an existing mapping so we unmap and remap |
| - // a subset. We used to do for all platforms, but OSX 10.8 has a |
| - // broken mmap() that ignores address hints for valid, unused addresses. |
| - freePages(ret, tryLen); |
| - ret = systemAllocPages(addr, len, pageAccessibility); |
| - if (ret == addr || !ret) |
| return ret; |
| - |
| - // Unlikely race / collision. Do the simple thing and just start again. |
| + size_t preSlack = reinterpret_cast<uintptr_t>(ret) & (align - 1); |
|
JF
2015/10/05 20:32:38
align - 1 is alignOffsetMask.
|
| + preSlack = preSlack ? align - preSlack : 0; |
| + size_t postSlack = tryLen - preSlack - len; |
| + ASSERT(preSlack || postSlack); |
| + ASSERT(preSlack < tryLen); |
| + ASSERT(postSlack < tryLen); |
| +#if OS(POSIX) // On POSIX we can resize the allocation run. |
| + if (preSlack) { |
| + int res = munmap(ret, preSlack); |
| + RELEASE_ASSERT(!res); |
| + ret = addr = reinterpret_cast<char*>(ret) + preSlack; |
| + } |
| + if (postSlack) { |
| + int res = munmap(reinterpret_cast<char*>(ret) + len, postSlack); |
| + RELEASE_ASSERT(!res); |
| + } |
| +#else // On Windows we can't resize the allocation run. |
| + if (preSlack || postSlack) { |
| + addr = reinterpret_cast<char*>(ret) + preSlack; |
| + freePages(ret, len); |
| + ret = systemAllocPages(addr, len, pageAccessibility); |
| + if (!ret) |
| + return ret; |
| + } |
| +#endif |
| + if (ret == addr) |
| + return ret; |
| freePages(ret, len); |
| - addr = getRandomPageBase(); |
| - addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & alignBaseMask); |
| } |
| - IMMEDIATE_CRASH(); |
| + |
| return 0; |
| } |