| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 52 | 52 |
| 53 #elif OS(WIN) | 53 #elif OS(WIN) |
| 54 | 54 |
| 55 #include <windows.h> | 55 #include <windows.h> |
| 56 | 56 |
| 57 // VirtualAlloc will fail if allocation at the hint address is blocked. | 57 // VirtualAlloc will fail if allocation at the hint address is blocked. |
| 58 static const bool kHintIsAdvisory = false; | 58 static const bool kHintIsAdvisory = false; |
| 59 | 59 |
| 60 #else | 60 #else |
| 61 #error Unknown OS | 61 #error Unknown OS |
| 62 #endif // OS(POSIX) | 62 #endif // OS(POSIX) |
| 63 | 63 |
| 64 namespace WTF { | 64 namespace WTF { |
| 65 | 65 |
| 66 // This internal function wraps the OS-specific page allocation call. The | 66 // This internal function wraps the OS-specific page allocation call. The |
| 67 // behavior of the hint address is determined by the kHintIsAdvisory constant. | 67 // behavior of the hint address is determined by the kHintIsAdvisory constant. |
| 68 // If true, a non-zero hint is advisory and the returned address may differ from | 68 // If true, a non-zero hint is advisory and the returned address may differ from |
| 69 // the hint. If false, the hint is mandatory and a successful allocation will | 69 // the hint. If false, the hint is mandatory and a successful allocation will |
| 70 // not differ from the hint. | 70 // not differ from the hint. |
| 71 static void* systemAllocPages(void* hint, size_t len, PageAccessibilityConfigura
tion pageAccessibility) | 71 static void* systemAllocPages( |
| 72 { | 72 void* hint, |
| 73 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); | 73 size_t len, |
| 74 ASSERT(!(reinterpret_cast<uintptr_t>(hint) & kPageAllocationGranularityOffse
tMask)); | 74 PageAccessibilityConfiguration pageAccessibility) { |
| 75 void* ret; | 75 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); |
| 76 ASSERT(!(reinterpret_cast<uintptr_t>(hint) & |
| 77 kPageAllocationGranularityOffsetMask)); |
| 78 void* ret; |
| 76 #if OS(WIN) | 79 #if OS(WIN) |
| 77 DWORD accessFlag = pageAccessibility == PageAccessible ? PAGE_READWRITE : PA
GE_NOACCESS; | 80 DWORD accessFlag = |
| 78 ret = VirtualAlloc(hint, len, MEM_RESERVE | MEM_COMMIT, accessFlag); | 81 pageAccessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS; |
| 79 #else | 82 ret = VirtualAlloc(hint, len, MEM_RESERVE | MEM_COMMIT, accessFlag); |
| 80 int accessFlag = pageAccessibility == PageAccessible ? (PROT_READ | PROT_WRI
TE) : PROT_NONE; | 83 #else |
| 81 ret = mmap(hint, len, accessFlag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); | 84 int accessFlag = pageAccessibility == PageAccessible |
| 82 if (ret == MAP_FAILED) | 85 ? (PROT_READ | PROT_WRITE) |
| 83 ret = 0; | 86 : PROT_NONE; |
| 84 #endif | 87 ret = mmap(hint, len, accessFlag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); |
| 85 return ret; | 88 if (ret == MAP_FAILED) |
| 89 ret = 0; |
| 90 #endif |
| 91 return ret; |
| 86 } | 92 } |
| 87 | 93 |
| 88 // Trims base to given length and alignment. Windows returns null on failure and
frees base. | 94 // Trims base to given length and alignment. Windows returns null on failure and
frees base. |
| 89 static void* trimMapping(void *base, size_t baseLen, size_t trimLen, uintptr_t a
lign, PageAccessibilityConfiguration pageAccessibility) | 95 static void* trimMapping(void* base, |
| 90 { | 96 size_t baseLen, |
| 91 size_t preSlack = reinterpret_cast<uintptr_t>(base) & (align - 1); | 97 size_t trimLen, |
| 92 if (preSlack) | 98 uintptr_t align, |
| 93 preSlack = align - preSlack; | 99 PageAccessibilityConfiguration pageAccessibility) { |
| 94 size_t postSlack = baseLen - preSlack - trimLen; | 100 size_t preSlack = reinterpret_cast<uintptr_t>(base) & (align - 1); |
| 95 ASSERT(baseLen >= trimLen || preSlack || postSlack); | 101 if (preSlack) |
| 96 ASSERT(preSlack < baseLen); | 102 preSlack = align - preSlack; |
| 97 ASSERT(postSlack < baseLen); | 103 size_t postSlack = baseLen - preSlack - trimLen; |
| 98 void* ret = base; | 104 ASSERT(baseLen >= trimLen || preSlack || postSlack); |
| 99 | 105 ASSERT(preSlack < baseLen); |
| 100 #if OS(POSIX) // On POSIX we can resize the allocation run. | 106 ASSERT(postSlack < baseLen); |
| 101 (void) pageAccessibility; | 107 void* ret = base; |
| 102 if (preSlack) { | 108 |
| 103 int res = munmap(base, preSlack); | 109 #if OS(POSIX) // On POSIX we can resize the allocation run. |
| 104 RELEASE_ASSERT(!res); | 110 (void)pageAccessibility; |
| 105 ret = reinterpret_cast<char*>(base) + preSlack; | 111 if (preSlack) { |
| 112 int res = munmap(base, preSlack); |
| 113 RELEASE_ASSERT(!res); |
| 114 ret = reinterpret_cast<char*>(base) + preSlack; |
| 115 } |
| 116 if (postSlack) { |
| 117 int res = munmap(reinterpret_cast<char*>(ret) + trimLen, postSlack); |
| 118 RELEASE_ASSERT(!res); |
| 119 } |
| 120 #else // On Windows we can't resize the allocation run. |
| 121 if (preSlack || postSlack) { |
| 122 ret = reinterpret_cast<char*>(base) + preSlack; |
| 123 freePages(base, baseLen); |
| 124 ret = systemAllocPages(ret, trimLen, pageAccessibility); |
| 125 } |
| 126 #endif |
| 127 |
| 128 return ret; |
| 129 } |
| 130 |
| 131 void* allocPages(void* addr, |
| 132 size_t len, |
| 133 size_t align, |
| 134 PageAccessibilityConfiguration pageAccessibility) { |
| 135 ASSERT(len >= kPageAllocationGranularity); |
| 136 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); |
| 137 ASSERT(align >= kPageAllocationGranularity); |
| 138 ASSERT(!(align & kPageAllocationGranularityOffsetMask)); |
| 139 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & |
| 140 kPageAllocationGranularityOffsetMask)); |
| 141 uintptr_t alignOffsetMask = align - 1; |
| 142 uintptr_t alignBaseMask = ~alignOffsetMask; |
| 143 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask)); |
| 144 |
| 145 // If the client passed null as the address, choose a good one. |
| 146 if (!addr) { |
| 147 addr = getRandomPageBase(); |
| 148 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & |
| 149 alignBaseMask); |
| 150 } |
| 151 |
| 152 // First try to force an exact-size, aligned allocation from our random base. |
| 153 for (int count = 0; count < 3; ++count) { |
| 154 void* ret = systemAllocPages(addr, len, pageAccessibility); |
| 155 if (kHintIsAdvisory || ret) { |
| 156 // If the alignment is to our liking, we're done. |
| 157 if (!(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask)) |
| 158 return ret; |
| 159 freePages(ret, len); |
| 160 #if CPU(32BIT) |
| 161 addr = reinterpret_cast<void*>( |
| 162 (reinterpret_cast<uintptr_t>(ret) + align) & alignBaseMask); |
| 163 #endif |
| 164 } else if (!addr) { // We know we're OOM when an unhinted allocation fails. |
| 165 return nullptr; |
| 166 |
| 167 } else { |
| 168 #if CPU(32BIT) |
| 169 addr = reinterpret_cast<char*>(addr) + align; |
| 170 #endif |
| 106 } | 171 } |
| 107 if (postSlack) { | 172 |
| 108 int res = munmap(reinterpret_cast<char*>(ret) + trimLen, postSlack); | 173 #if !CPU( \ |
| 109 RELEASE_ASSERT(!res); | 174 32BIT) // Keep trying random addresses on systems that have a large address
space. |
| 110 } | 175 addr = getRandomPageBase(); |
| 111 #else // On Windows we can't resize the allocation run. | 176 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & |
| 112 if (preSlack || postSlack) { | 177 alignBaseMask); |
| 113 ret = reinterpret_cast<char*>(base) + preSlack; | 178 #endif |
| 114 freePages(base, baseLen); | 179 } |
| 115 ret = systemAllocPages(ret, trimLen, pageAccessibility); | 180 |
| 116 } | 181 // Map a larger allocation so we can force alignment, but continue randomizing
only on 64-bit POSIX. |
| 117 #endif | 182 size_t tryLen = len + (align - kPageAllocationGranularity); |
| 118 | 183 RELEASE_ASSERT(tryLen >= len); |
| 119 return ret; | 184 void* ret; |
| 120 } | 185 |
| 121 | 186 do { |
| 122 void* allocPages(void* addr, size_t len, size_t align, PageAccessibilityConfigur
ation pageAccessibility) | 187 // Don't continue to burn cycles on mandatory hints (Windows). |
| 123 { | 188 addr = kHintIsAdvisory ? getRandomPageBase() : nullptr; |
| 124 ASSERT(len >= kPageAllocationGranularity); | 189 ret = systemAllocPages(addr, tryLen, pageAccessibility); |
| 125 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); | |
| 126 ASSERT(align >= kPageAllocationGranularity); | |
| 127 ASSERT(!(align & kPageAllocationGranularityOffsetMask)); | |
| 128 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse
tMask)); | |
| 129 uintptr_t alignOffsetMask = align - 1; | |
| 130 uintptr_t alignBaseMask = ~alignOffsetMask; | |
| 131 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask)); | |
| 132 | |
| 133 // If the client passed null as the address, choose a good one. | |
| 134 if (!addr) { | |
| 135 addr = getRandomPageBase(); | |
| 136 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & align
BaseMask); | |
| 137 } | |
| 138 | |
| 139 // First try to force an exact-size, aligned allocation from our random base
. | |
| 140 for (int count = 0; count < 3; ++count) { | |
| 141 void* ret = systemAllocPages(addr, len, pageAccessibility); | |
| 142 if (kHintIsAdvisory || ret) { | |
| 143 // If the alignment is to our liking, we're done. | |
| 144 if (!(reinterpret_cast<uintptr_t>(ret)& alignOffsetMask)) | |
| 145 return ret; | |
| 146 freePages(ret, len); | |
| 147 #if CPU(32BIT) | |
| 148 addr = reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(ret)+ali
gn) & alignBaseMask); | |
| 149 #endif | |
| 150 } else if (!addr) { // We know we're OOM when an unhinted allocation fai
ls. | |
| 151 return nullptr; | |
| 152 | |
| 153 } else { | |
| 154 #if CPU(32BIT) | |
| 155 addr = reinterpret_cast<char*>(addr) + align; | |
| 156 #endif | |
| 157 } | |
| 158 | |
| 159 #if !CPU(32BIT) // Keep trying random addresses on systems that have a large add
ress space. | |
| 160 addr = getRandomPageBase(); | |
| 161 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & align
BaseMask); | |
| 162 #endif | |
| 163 } | |
| 164 | |
| 165 // Map a larger allocation so we can force alignment, but continue randomizi
ng only on 64-bit POSIX. | |
| 166 size_t tryLen = len + (align - kPageAllocationGranularity); | |
| 167 RELEASE_ASSERT(tryLen >= len); | |
| 168 void* ret; | |
| 169 | |
| 170 do { | |
| 171 // Don't continue to burn cycles on mandatory hints (Windows). | |
| 172 addr = kHintIsAdvisory ? getRandomPageBase() : nullptr; | |
| 173 ret = systemAllocPages(addr, tryLen, pageAccessibility); | |
| 174 // The retries are for Windows, where a race can steal our mapping on resize
. | 190 // The retries are for Windows, where a race can steal our mapping on resize
. |
| 175 } while (ret && !(ret = trimMapping(ret, tryLen, len, align, pageAccessibili
ty))); | 191 } while (ret && |
| 176 | 192 !(ret = trimMapping(ret, tryLen, len, align, pageAccessibility))); |
| 177 return ret; | 193 |
| 178 } | 194 return ret; |
| 179 | 195 } |
| 180 void freePages(void* addr, size_t len) | 196 |
| 181 { | 197 void freePages(void* addr, size_t len) { |
| 182 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse
tMask)); | 198 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & |
| 183 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); | 199 kPageAllocationGranularityOffsetMask)); |
| 184 #if OS(POSIX) | 200 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); |
| 185 int ret = munmap(addr, len); | 201 #if OS(POSIX) |
| 186 RELEASE_ASSERT(!ret); | 202 int ret = munmap(addr, len); |
| 187 #else | 203 RELEASE_ASSERT(!ret); |
| 188 BOOL ret = VirtualFree(addr, 0, MEM_RELEASE); | 204 #else |
| 205 BOOL ret = VirtualFree(addr, 0, MEM_RELEASE); |
| 206 RELEASE_ASSERT(ret); |
| 207 #endif |
| 208 } |
| 209 |
| 210 void setSystemPagesInaccessible(void* addr, size_t len) { |
| 211 ASSERT(!(len & kSystemPageOffsetMask)); |
| 212 #if OS(POSIX) |
| 213 int ret = mprotect(addr, len, PROT_NONE); |
| 214 RELEASE_ASSERT(!ret); |
| 215 #else |
| 216 BOOL ret = VirtualFree(addr, len, MEM_DECOMMIT); |
| 217 RELEASE_ASSERT(ret); |
| 218 #endif |
| 219 } |
| 220 |
| 221 bool setSystemPagesAccessible(void* addr, size_t len) { |
| 222 ASSERT(!(len & kSystemPageOffsetMask)); |
| 223 #if OS(POSIX) |
| 224 return !mprotect(addr, len, PROT_READ | PROT_WRITE); |
| 225 #else |
| 226 return !!VirtualAlloc(addr, len, MEM_COMMIT, PAGE_READWRITE); |
| 227 #endif |
| 228 } |
| 229 |
| 230 void decommitSystemPages(void* addr, size_t len) { |
| 231 ASSERT(!(len & kSystemPageOffsetMask)); |
| 232 #if OS(POSIX) |
| 233 int ret = madvise(addr, len, MADV_FREE); |
| 234 RELEASE_ASSERT(!ret); |
| 235 #else |
| 236 setSystemPagesInaccessible(addr, len); |
| 237 #endif |
| 238 } |
| 239 |
| 240 void recommitSystemPages(void* addr, size_t len) { |
| 241 ASSERT(!(len & kSystemPageOffsetMask)); |
| 242 #if OS(POSIX) |
| 243 (void)addr; |
| 244 #else |
| 245 RELEASE_ASSERT(setSystemPagesAccessible(addr, len)); |
| 246 #endif |
| 247 } |
| 248 |
| 249 void discardSystemPages(void* addr, size_t len) { |
| 250 ASSERT(!(len & kSystemPageOffsetMask)); |
| 251 #if OS(POSIX) |
| 252 // On POSIX, the implementation detail is that discard and decommit are the |
| 253 // same, and lead to pages that are returned to the system immediately and |
| 254 // get replaced with zeroed pages when touched. So we just call |
| 255 // decommitSystemPages() here to avoid code duplication. |
| 256 decommitSystemPages(addr, len); |
| 257 #else |
| 258 // On Windows discarded pages are not returned to the system immediately and |
| 259 // not guaranteed to be zeroed when returned to the application. |
| 260 using DiscardVirtualMemoryFunction = |
| 261 DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size); |
| 262 static DiscardVirtualMemoryFunction discardVirtualMemory = |
| 263 reinterpret_cast<DiscardVirtualMemoryFunction>(-1); |
| 264 if (discardVirtualMemory == |
| 265 reinterpret_cast<DiscardVirtualMemoryFunction>(-1)) |
| 266 discardVirtualMemory = |
| 267 reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress( |
| 268 GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory")); |
| 269 // Use DiscardVirtualMemory when available because it releases faster than MEM
_RESET. |
| 270 DWORD ret = 1; |
| 271 if (discardVirtualMemory) |
| 272 ret = discardVirtualMemory(addr, len); |
| 273 // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on fa
ilure. |
| 274 if (ret) { |
| 275 void* ret = VirtualAlloc(addr, len, MEM_RESET, PAGE_READWRITE); |
| 189 RELEASE_ASSERT(ret); | 276 RELEASE_ASSERT(ret); |
| 190 #endif | 277 } |
| 191 } | 278 #endif |
| 192 | 279 } |
| 193 void setSystemPagesInaccessible(void* addr, size_t len) | 280 |
| 194 { | 281 } // namespace WTF |
| 195 ASSERT(!(len & kSystemPageOffsetMask)); | |
| 196 #if OS(POSIX) | |
| 197 int ret = mprotect(addr, len, PROT_NONE); | |
| 198 RELEASE_ASSERT(!ret); | |
| 199 #else | |
| 200 BOOL ret = VirtualFree(addr, len, MEM_DECOMMIT); | |
| 201 RELEASE_ASSERT(ret); | |
| 202 #endif | |
| 203 } | |
| 204 | |
| 205 bool setSystemPagesAccessible(void* addr, size_t len) | |
| 206 { | |
| 207 ASSERT(!(len & kSystemPageOffsetMask)); | |
| 208 #if OS(POSIX) | |
| 209 return !mprotect(addr, len, PROT_READ | PROT_WRITE); | |
| 210 #else | |
| 211 return !!VirtualAlloc(addr, len, MEM_COMMIT, PAGE_READWRITE); | |
| 212 #endif | |
| 213 } | |
| 214 | |
| 215 void decommitSystemPages(void* addr, size_t len) | |
| 216 { | |
| 217 ASSERT(!(len & kSystemPageOffsetMask)); | |
| 218 #if OS(POSIX) | |
| 219 int ret = madvise(addr, len, MADV_FREE); | |
| 220 RELEASE_ASSERT(!ret); | |
| 221 #else | |
| 222 setSystemPagesInaccessible(addr, len); | |
| 223 #endif | |
| 224 } | |
| 225 | |
| 226 void recommitSystemPages(void* addr, size_t len) | |
| 227 { | |
| 228 ASSERT(!(len & kSystemPageOffsetMask)); | |
| 229 #if OS(POSIX) | |
| 230 (void) addr; | |
| 231 #else | |
| 232 RELEASE_ASSERT(setSystemPagesAccessible(addr, len)); | |
| 233 #endif | |
| 234 } | |
| 235 | |
| 236 void discardSystemPages(void* addr, size_t len) | |
| 237 { | |
| 238 ASSERT(!(len & kSystemPageOffsetMask)); | |
| 239 #if OS(POSIX) | |
| 240 // On POSIX, the implementation detail is that discard and decommit are the | |
| 241 // same, and lead to pages that are returned to the system immediately and | |
| 242 // get replaced with zeroed pages when touched. So we just call | |
| 243 // decommitSystemPages() here to avoid code duplication. | |
| 244 decommitSystemPages(addr, len); | |
| 245 #else | |
| 246 // On Windows discarded pages are not returned to the system immediately and | |
| 247 // not guaranteed to be zeroed when returned to the application. | |
| 248 using DiscardVirtualMemoryFunction = DWORD(WINAPI*)(PVOID virtualAddress, SI
ZE_T size); | |
| 249 static DiscardVirtualMemoryFunction discardVirtualMemory = reinterpret_cast<
DiscardVirtualMemoryFunction>(-1); | |
| 250 if (discardVirtualMemory == reinterpret_cast<DiscardVirtualMemoryFunction>(-
1)) | |
| 251 discardVirtualMemory = reinterpret_cast<DiscardVirtualMemoryFunction>(Ge
tProcAddress(GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory")); | |
| 252 // Use DiscardVirtualMemory when available because it releases faster than M
EM_RESET. | |
| 253 DWORD ret = 1; | |
| 254 if (discardVirtualMemory) | |
| 255 ret = discardVirtualMemory(addr, len); | |
| 256 // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
failure. | |
| 257 if (ret) { | |
| 258 void* ret = VirtualAlloc(addr, len, MEM_RESET, PAGE_READWRITE); | |
| 259 RELEASE_ASSERT(ret); | |
| 260 } | |
| 261 #endif | |
| 262 } | |
| 263 | |
| 264 } // namespace WTF | |
| 265 | |
| OLD | NEW |