Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(177)

Side by Side Diff: base/allocator/partition_allocator/page_allocator.cc

Issue 2518253002: Move Partition Allocator into Chromium base. (Closed)
Patch Set: Respond to more of Primiano's comments; move PartitionAllocator.* back to wtf; some build fixes Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be
3 * 3 // found in the LICENSE file.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30 4
31 #include "wtf/allocator/PageAllocator.h" 5 #include "base/allocator/partition_allocator/page_allocator.h"
32
33 #include "wtf/Assertions.h"
34 #include "wtf/Atomics.h"
35 #include "wtf/allocator/AddressSpaceRandomization.h"
36 6
37 #include <limits.h> 7 #include <limits.h>
38 8
39 #if OS(POSIX) 9 #include "base/allocator/partition_allocator/address_space_randomization.h"
10 #include "base/atomicops.h"
11 #include "base/logging.h"
12 #include "build/build_config.h"
13
14 #if defined(OS_POSIX)
40 15
41 #include <errno.h> 16 #include <errno.h>
42 #include <sys/mman.h> 17 #include <sys/mman.h>
43 18
44 #ifndef MADV_FREE 19 #ifndef MADV_FREE
45 #define MADV_FREE MADV_DONTNEED 20 #define MADV_FREE MADV_DONTNEED
46 #endif 21 #endif
47 22
48 #ifndef MAP_ANONYMOUS 23 #ifndef MAP_ANONYMOUS
49 #define MAP_ANONYMOUS MAP_ANON 24 #define MAP_ANONYMOUS MAP_ANON
50 #endif 25 #endif
51 26
52 // On POSIX memmap uses a nearby address if the hint address is blocked. 27 // On POSIX |mmap| uses a nearby address if the hint address is blocked.
53 static const bool kHintIsAdvisory = true; 28 static const bool kHintIsAdvisory = true;
54 static uint32_t s_allocPageErrorCode = 0; 29 static volatile base::subtle::Atomic32 s_allocPageErrorCode = 0;
55 30
56 #elif OS(WIN) 31 #elif defined(OS_WIN)
57 32
58 #include <windows.h> 33 #include <windows.h>
59 34
60 // VirtualAlloc will fail if allocation at the hint address is blocked. 35 // |VirtualAlloc| will fail if allocation at the hint address is blocked.
61 static const bool kHintIsAdvisory = false; 36 static const bool kHintIsAdvisory = false;
62 static uint32_t s_allocPageErrorCode = ERROR_SUCCESS; 37 static base::subtle::Atomic32 s_allocPageErrorCode = ERROR_SUCCESS;
63 38
64 #else 39 #else
65 #error Unknown OS 40 #error Unknown OS
66 #endif // OS(POSIX) 41 #endif // defined(OS_POSIX)
67 42
68 namespace WTF { 43 namespace base {
69 44
70 // This internal function wraps the OS-specific page allocation call. The 45 // This internal function wraps the OS-specific page allocation call:
71 // behavior of the hint address is determined by the kHintIsAdvisory constant. 46 // |VirtualAlloc| on Windows, and |mmap| on POSIX.
72 // If true, a non-zero hint is advisory and the returned address may differ from
73 // the hint. If false, the hint is mandatory and a successful allocation will
74 // not differ from the hint.
75 static void* systemAllocPages( 47 static void* systemAllocPages(
76 void* hint, 48 void* hint,
77 size_t len, 49 size_t len,
78 PageAccessibilityConfiguration pageAccessibility) { 50 PageAccessibilityConfiguration pageAccessibility) {
79 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); 51 DCHECK(!(len & kPageAllocationGranularityOffsetMask));
80 ASSERT(!(reinterpret_cast<uintptr_t>(hint) & 52 DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
81 kPageAllocationGranularityOffsetMask)); 53 kPageAllocationGranularityOffsetMask));
82 void* ret; 54 void* ret;
83 #if OS(WIN) 55 #if defined(OS_WIN)
84 DWORD accessFlag = 56 DWORD accessFlag =
85 pageAccessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS; 57 pageAccessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS;
86 ret = VirtualAlloc(hint, len, MEM_RESERVE | MEM_COMMIT, accessFlag); 58 ret = VirtualAlloc(hint, len, MEM_RESERVE | MEM_COMMIT, accessFlag);
87 if (!ret) 59 if (!ret)
88 releaseStore(&s_allocPageErrorCode, GetLastError()); 60 base::subtle::Release_Store(&s_allocPageErrorCode, GetLastError());
89 #else 61 #else
90 int accessFlag = pageAccessibility == PageAccessible 62 int accessFlag = pageAccessibility == PageAccessible
91 ? (PROT_READ | PROT_WRITE) 63 ? (PROT_READ | PROT_WRITE)
92 : PROT_NONE; 64 : PROT_NONE;
93 ret = mmap(hint, len, accessFlag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 65 ret = mmap(hint, len, accessFlag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
94 if (ret == MAP_FAILED) { 66 if (ret == MAP_FAILED) {
95 releaseStore(&s_allocPageErrorCode, errno); 67 base::subtle::Release_Store(&s_allocPageErrorCode, errno);
96 ret = 0; 68 ret = 0;
97 } 69 }
98 #endif 70 #endif
99 return ret; 71 return ret;
100 } 72 }
101 73
102 // Trims base to given length and alignment. Windows returns null on failure and 74 // Trims base to given length and alignment. Windows returns null on failure and
103 // frees base. 75 // frees base.
104 static void* trimMapping(void* base, 76 static void* trimMapping(void* base,
105 size_t baseLen, 77 size_t baseLen,
106 size_t trimLen, 78 size_t trimLen,
107 uintptr_t align, 79 uintptr_t align,
108 PageAccessibilityConfiguration pageAccessibility) { 80 PageAccessibilityConfiguration pageAccessibility) {
109 size_t preSlack = reinterpret_cast<uintptr_t>(base) & (align - 1); 81 size_t preSlack = reinterpret_cast<uintptr_t>(base) & (align - 1);
110 if (preSlack) 82 if (preSlack)
111 preSlack = align - preSlack; 83 preSlack = align - preSlack;
112 size_t postSlack = baseLen - preSlack - trimLen; 84 size_t postSlack = baseLen - preSlack - trimLen;
113 ASSERT(baseLen >= trimLen || preSlack || postSlack); 85 DCHECK(baseLen >= trimLen || preSlack || postSlack);
114 ASSERT(preSlack < baseLen); 86 DCHECK(preSlack < baseLen);
115 ASSERT(postSlack < baseLen); 87 DCHECK(postSlack < baseLen);
116 void* ret = base; 88 void* ret = base;
117 89
118 #if OS(POSIX) // On POSIX we can resize the allocation run. 90 #if defined(OS_POSIX) // On POSIX we can resize the allocation run.
119 (void)pageAccessibility; 91 (void)pageAccessibility;
120 if (preSlack) { 92 if (preSlack) {
121 int res = munmap(base, preSlack); 93 int res = munmap(base, preSlack);
122 RELEASE_ASSERT(!res); 94 CHECK(!res);
123 ret = reinterpret_cast<char*>(base) + preSlack; 95 ret = reinterpret_cast<char*>(base) + preSlack;
124 } 96 }
125 if (postSlack) { 97 if (postSlack) {
126 int res = munmap(reinterpret_cast<char*>(ret) + trimLen, postSlack); 98 int res = munmap(reinterpret_cast<char*>(ret) + trimLen, postSlack);
127 RELEASE_ASSERT(!res); 99 CHECK(!res);
128 } 100 }
129 #else // On Windows we can't resize the allocation run. 101 #else // On Windows we can't resize the allocation run.
130 if (preSlack || postSlack) { 102 if (preSlack || postSlack) {
131 ret = reinterpret_cast<char*>(base) + preSlack; 103 ret = reinterpret_cast<char*>(base) + preSlack;
132 freePages(base, baseLen); 104 freePages(base, baseLen);
133 ret = systemAllocPages(ret, trimLen, pageAccessibility); 105 ret = systemAllocPages(ret, trimLen, pageAccessibility);
134 } 106 }
135 #endif 107 #endif
136 108
137 return ret; 109 return ret;
138 } 110 }
139 111
140 void* allocPages(void* addr, 112 void* allocPages(void* addr,
141 size_t len, 113 size_t len,
142 size_t align, 114 size_t align,
143 PageAccessibilityConfiguration pageAccessibility) { 115 PageAccessibilityConfiguration pageAccessibility) {
144 ASSERT(len >= kPageAllocationGranularity); 116 DCHECK(len >= kPageAllocationGranularity);
145 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); 117 DCHECK(!(len & kPageAllocationGranularityOffsetMask));
146 ASSERT(align >= kPageAllocationGranularity); 118 DCHECK(align >= kPageAllocationGranularity);
147 ASSERT(!(align & kPageAllocationGranularityOffsetMask)); 119 DCHECK(!(align & kPageAllocationGranularityOffsetMask));
148 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & 120 DCHECK(!(reinterpret_cast<uintptr_t>(addr) &
149 kPageAllocationGranularityOffsetMask)); 121 kPageAllocationGranularityOffsetMask));
150 uintptr_t alignOffsetMask = align - 1; 122 uintptr_t alignOffsetMask = align - 1;
151 uintptr_t alignBaseMask = ~alignOffsetMask; 123 uintptr_t alignBaseMask = ~alignOffsetMask;
152 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask)); 124 DCHECK(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask));
153 125
154 // If the client passed null as the address, choose a good one. 126 // If the client passed null as the address, choose a good one.
155 if (!addr) { 127 if (!addr) {
156 addr = getRandomPageBase(); 128 addr = getRandomPageBase();
157 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & 129 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) &
158 alignBaseMask); 130 alignBaseMask);
159 } 131 }
160 132
161 // First try to force an exact-size, aligned allocation from our random base. 133 // First try to force an exact-size, aligned allocation from our random base.
162 for (int count = 0; count < 3; ++count) { 134 for (int count = 0; count < 3; ++count) {
163 void* ret = systemAllocPages(addr, len, pageAccessibility); 135 void* ret = systemAllocPages(addr, len, pageAccessibility);
164 if (kHintIsAdvisory || ret) { 136 if (kHintIsAdvisory || ret) {
165 // If the alignment is to our liking, we're done. 137 // If the alignment is to our liking, we're done.
166 if (!(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask)) 138 if (!(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask))
167 return ret; 139 return ret;
168 freePages(ret, len); 140 freePages(ret, len);
169 #if CPU(32BIT) 141 #if defined(ARCH_CPU_32_BITS)
170 addr = reinterpret_cast<void*>( 142 addr = reinterpret_cast<void*>(
171 (reinterpret_cast<uintptr_t>(ret) + align) & alignBaseMask); 143 (reinterpret_cast<uintptr_t>(ret) + align) & alignBaseMask);
172 #endif 144 #endif
173 } else if (!addr) { // We know we're OOM when an unhinted allocation fails. 145 } else if (!addr) { // We know we're OOM when an unhinted allocation fails.
174 return nullptr; 146 return nullptr;
175 147
176 } else { 148 } else {
177 #if CPU(32BIT) 149 #if defined(ARCH_CPU_32_BITS)
178 addr = reinterpret_cast<char*>(addr) + align; 150 addr = reinterpret_cast<char*>(addr) + align;
179 #endif 151 #endif
180 } 152 }
181 153
182 #if !CPU(32BIT) 154 #if !defined(ARCH_CPU_32_BITS)
183 // Keep trying random addresses on systems that have a large address space. 155 // Keep trying random addresses on systems that have a large address space.
184 addr = getRandomPageBase(); 156 addr = getRandomPageBase();
185 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & 157 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) &
186 alignBaseMask); 158 alignBaseMask);
187 #endif 159 #endif
188 } 160 }
189 161
190 // Map a larger allocation so we can force alignment, but continue randomizing 162 // Map a larger allocation so we can force alignment, but continue randomizing
191 // only on 64-bit POSIX. 163 // only on 64-bit POSIX.
192 size_t tryLen = len + (align - kPageAllocationGranularity); 164 size_t tryLen = len + (align - kPageAllocationGranularity);
193 RELEASE_ASSERT(tryLen >= len); 165 CHECK(tryLen >= len);
194 void* ret; 166 void* ret;
195 167
196 do { 168 do {
197 // Don't continue to burn cycles on mandatory hints (Windows). 169 // Don't continue to burn cycles on mandatory hints (Windows).
198 addr = kHintIsAdvisory ? getRandomPageBase() : nullptr; 170 addr = kHintIsAdvisory ? getRandomPageBase() : nullptr;
199 ret = systemAllocPages(addr, tryLen, pageAccessibility); 171 ret = systemAllocPages(addr, tryLen, pageAccessibility);
200 // The retries are for Windows, where a race can steal our mapping on 172 // The retries are for Windows, where a race can steal our mapping on
201 // resize. 173 // resize.
202 } while (ret && 174 } while (ret &&
203 !(ret = trimMapping(ret, tryLen, len, align, pageAccessibility))); 175 !(ret = trimMapping(ret, tryLen, len, align, pageAccessibility)));
204 176
205 return ret; 177 return ret;
206 } 178 }
207 179
208 void freePages(void* addr, size_t len) { 180 void freePages(void* addr, size_t len) {
209 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & 181 DCHECK(!(reinterpret_cast<uintptr_t>(addr) &
210 kPageAllocationGranularityOffsetMask)); 182 kPageAllocationGranularityOffsetMask));
211 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); 183 DCHECK(!(len & kPageAllocationGranularityOffsetMask));
212 #if OS(POSIX) 184 #if defined(OS_POSIX)
213 int ret = munmap(addr, len); 185 int ret = munmap(addr, len);
214 RELEASE_ASSERT(!ret); 186 CHECK(!ret);
215 #else 187 #else
216 BOOL ret = VirtualFree(addr, 0, MEM_RELEASE); 188 BOOL ret = VirtualFree(addr, 0, MEM_RELEASE);
217 RELEASE_ASSERT(ret); 189 CHECK(ret);
218 #endif 190 #endif
219 } 191 }
220 192
221 void setSystemPagesInaccessible(void* addr, size_t len) { 193 void setSystemPagesInaccessible(void* addr, size_t len) {
222 ASSERT(!(len & kSystemPageOffsetMask)); 194 DCHECK(!(len & kSystemPageOffsetMask));
223 #if OS(POSIX) 195 #if defined(OS_POSIX)
224 int ret = mprotect(addr, len, PROT_NONE); 196 int ret = mprotect(addr, len, PROT_NONE);
225 RELEASE_ASSERT(!ret); 197 CHECK(!ret);
226 #else 198 #else
227 BOOL ret = VirtualFree(addr, len, MEM_DECOMMIT); 199 BOOL ret = VirtualFree(addr, len, MEM_DECOMMIT);
228 RELEASE_ASSERT(ret); 200 CHECK(ret);
229 #endif 201 #endif
230 } 202 }
231 203
232 bool setSystemPagesAccessible(void* addr, size_t len) { 204 bool setSystemPagesAccessible(void* addr, size_t len) {
233 ASSERT(!(len & kSystemPageOffsetMask)); 205 DCHECK(!(len & kSystemPageOffsetMask));
234 #if OS(POSIX) 206 #if defined(OS_POSIX)
235 return !mprotect(addr, len, PROT_READ | PROT_WRITE); 207 return !mprotect(addr, len, PROT_READ | PROT_WRITE);
236 #else 208 #else
237 return !!VirtualAlloc(addr, len, MEM_COMMIT, PAGE_READWRITE); 209 return !!VirtualAlloc(addr, len, MEM_COMMIT, PAGE_READWRITE);
238 #endif 210 #endif
239 } 211 }
240 212
241 void decommitSystemPages(void* addr, size_t len) { 213 void decommitSystemPages(void* addr, size_t len) {
242 ASSERT(!(len & kSystemPageOffsetMask)); 214 DCHECK(!(len & kSystemPageOffsetMask));
243 #if OS(POSIX) 215 #if defined(OS_POSIX)
244 int ret = madvise(addr, len, MADV_FREE); 216 int ret = madvise(addr, len, MADV_FREE);
245 RELEASE_ASSERT(!ret); 217 CHECK(!ret);
246 #else 218 #else
247 setSystemPagesInaccessible(addr, len); 219 setSystemPagesInaccessible(addr, len);
248 #endif 220 #endif
249 } 221 }
250 222
251 void recommitSystemPages(void* addr, size_t len) { 223 void recommitSystemPages(void* addr, size_t len) {
252 ASSERT(!(len & kSystemPageOffsetMask)); 224 DCHECK(!(len & kSystemPageOffsetMask));
253 #if OS(POSIX) 225 #if defined(OS_POSIX)
254 (void)addr; 226 (void)addr;
255 #else 227 #else
256 RELEASE_ASSERT(setSystemPagesAccessible(addr, len)); 228 CHECK(setSystemPagesAccessible(addr, len));
257 #endif 229 #endif
258 } 230 }
259 231
260 void discardSystemPages(void* addr, size_t len) { 232 void discardSystemPages(void* addr, size_t len) {
261 ASSERT(!(len & kSystemPageOffsetMask)); 233 DCHECK(!(len & kSystemPageOffsetMask));
262 #if OS(POSIX) 234 #if defined(OS_POSIX)
263 // On POSIX, the implementation detail is that discard and decommit are the 235 // On POSIX, the implementation detail is that discard and decommit are the
264 // same, and lead to pages that are returned to the system immediately and 236 // same, and lead to pages that are returned to the system immediately and
265 // get replaced with zeroed pages when touched. So we just call 237 // get replaced with zeroed pages when touched. So we just call
266 // decommitSystemPages() here to avoid code duplication. 238 // decommitSystemPages() here to avoid code duplication.
267 decommitSystemPages(addr, len); 239 decommitSystemPages(addr, len);
268 #else 240 #else
269 // On Windows discarded pages are not returned to the system immediately and 241 // On Windows discarded pages are not returned to the system immediately and
270 // not guaranteed to be zeroed when returned to the application. 242 // not guaranteed to be zeroed when returned to the application.
271 using DiscardVirtualMemoryFunction = 243 using DiscardVirtualMemoryFunction =
272 DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size); 244 DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
273 static DiscardVirtualMemoryFunction discardVirtualMemory = 245 static DiscardVirtualMemoryFunction discardVirtualMemory =
274 reinterpret_cast<DiscardVirtualMemoryFunction>(-1); 246 reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
275 if (discardVirtualMemory == 247 if (discardVirtualMemory ==
276 reinterpret_cast<DiscardVirtualMemoryFunction>(-1)) 248 reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
277 discardVirtualMemory = 249 discardVirtualMemory =
278 reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress( 250 reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
279 GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory")); 251 GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
280 // Use DiscardVirtualMemory when available because it releases faster than 252 // Use DiscardVirtualMemory when available because it releases faster than
281 // MEM_RESET. 253 // MEM_RESET.
282 DWORD ret = 1; 254 DWORD ret = 1;
283 if (discardVirtualMemory) 255 if (discardVirtualMemory)
284 ret = discardVirtualMemory(addr, len); 256 ret = discardVirtualMemory(addr, len);
285 // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on 257 // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
286 // failure. 258 // failure.
287 if (ret) { 259 if (ret) {
288 void* ret = VirtualAlloc(addr, len, MEM_RESET, PAGE_READWRITE); 260 void* ret = VirtualAlloc(addr, len, MEM_RESET, PAGE_READWRITE);
289 RELEASE_ASSERT(ret); 261 CHECK(ret);
290 } 262 }
291 #endif 263 #endif
292 } 264 }
293 265
294 uint32_t getAllocPageErrorCode() { 266 uint32_t getAllocPageErrorCode() {
295 return acquireLoad(&s_allocPageErrorCode); 267 return base::subtle::Acquire_Load(&s_allocPageErrorCode);
296 } 268 }
297 269
298 } // namespace WTF 270 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698