OLD | NEW |
1 /* | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 // Use of this source code is governed by a BSD-style license that can be |
3 * | 3 // found in the LICENSE file. |
4 * Redistribution and use in source and binary forms, with or without | |
5 * modification, are permitted provided that the following conditions are | |
6 * met: | |
7 * | |
8 * * Redistributions of source code must retain the above copyright | |
9 * notice, this list of conditions and the following disclaimer. | |
10 * * Redistributions in binary form must reproduce the above | |
11 * copyright notice, this list of conditions and the following disclaimer | |
12 * in the documentation and/or other materials provided with the | |
13 * distribution. | |
14 * * Neither the name of Google Inc. nor the names of its | |
15 * contributors may be used to endorse or promote products derived from | |
16 * this software without specific prior written permission. | |
17 * | |
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 */ | |
30 | 4 |
31 #include "wtf/allocator/PageAllocator.h" | 5 #include "base/allocator/partition_allocator/page_allocator.h" |
32 | |
33 #include "wtf/Assertions.h" | |
34 #include "wtf/Atomics.h" | |
35 #include "wtf/allocator/AddressSpaceRandomization.h" | |
36 | 6 |
37 #include <limits.h> | 7 #include <limits.h> |
38 | 8 |
39 #if OS(POSIX) | 9 #include "base/allocator/partition_allocator/address_space_randomization.h" |
| 10 #include "base/logging.h" |
| 11 #include "build/build_config.h" |
| 12 |
| 13 #if defined(OS_POSIX) |
40 | 14 |
41 #include <errno.h> | 15 #include <errno.h> |
42 #include <sys/mman.h> | 16 #include <sys/mman.h> |
43 | 17 |
44 #ifndef MADV_FREE | 18 #ifndef MADV_FREE |
45 #define MADV_FREE MADV_DONTNEED | 19 #define MADV_FREE MADV_DONTNEED |
46 #endif | 20 #endif |
47 | 21 |
48 #ifndef MAP_ANONYMOUS | 22 #ifndef MAP_ANONYMOUS |
49 #define MAP_ANONYMOUS MAP_ANON | 23 #define MAP_ANONYMOUS MAP_ANON |
50 #endif | 24 #endif |
51 | 25 |
52 // On POSIX memmap uses a nearby address if the hint address is blocked. | 26 // On POSIX memmap uses a nearby address if the hint address is blocked. |
53 static const bool kHintIsAdvisory = true; | 27 static const bool kHintIsAdvisory = true; |
54 static uint32_t s_allocPageErrorCode = 0; | 28 static uint32_t s_allocPageErrorCode = 0; |
55 | 29 |
56 #elif OS(WIN) | 30 #elif defined(OS_WIN) |
57 | 31 |
58 #include <windows.h> | 32 #include <windows.h> |
59 | 33 |
60 // VirtualAlloc will fail if allocation at the hint address is blocked. | 34 // VirtualAlloc will fail if allocation at the hint address is blocked. |
61 static const bool kHintIsAdvisory = false; | 35 static const bool kHintIsAdvisory = false; |
62 static uint32_t s_allocPageErrorCode = ERROR_SUCCESS; | 36 static uint32_t s_allocPageErrorCode = ERROR_SUCCESS; |
63 | 37 |
64 #else | 38 #else |
65 #error Unknown OS | 39 #error Unknown OS |
66 #endif // OS(POSIX) | 40 #endif // defined(OS_POSIX) |
67 | 41 |
68 namespace WTF { | 42 namespace base { |
69 | 43 |
70 // This internal function wraps the OS-specific page allocation call. The | 44 // This internal function wraps the OS-specific page allocation call. The |
71 // behavior of the hint address is determined by the kHintIsAdvisory constant. | 45 // behavior of the hint address is determined by the kHintIsAdvisory constant. |
72 // If true, a non-zero hint is advisory and the returned address may differ from | 46 // If true, a non-zero hint is advisory and the returned address may differ from |
73 // the hint. If false, the hint is mandatory and a successful allocation will | 47 // the hint. If false, the hint is mandatory and a successful allocation will |
74 // not differ from the hint. | 48 // not differ from the hint. |
75 static void* systemAllocPages( | 49 static void* systemAllocPages( |
76 void* hint, | 50 void* hint, |
77 size_t len, | 51 size_t len, |
78 PageAccessibilityConfiguration pageAccessibility) { | 52 PageAccessibilityConfiguration pageAccessibility) { |
79 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); | 53 DCHECK(!(len & kPageAllocationGranularityOffsetMask)); |
80 ASSERT(!(reinterpret_cast<uintptr_t>(hint) & | 54 DCHECK(!(reinterpret_cast<uintptr_t>(hint) & |
81 kPageAllocationGranularityOffsetMask)); | 55 kPageAllocationGranularityOffsetMask)); |
82 void* ret; | 56 void* ret; |
83 #if OS(WIN) | 57 #if defined(OS_WIN) |
84 DWORD accessFlag = | 58 DWORD accessFlag = |
85 pageAccessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS; | 59 pageAccessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS; |
86 ret = VirtualAlloc(hint, len, MEM_RESERVE | MEM_COMMIT, accessFlag); | 60 ret = VirtualAlloc(hint, len, MEM_RESERVE | MEM_COMMIT, accessFlag); |
87 if (!ret) | 61 if (!ret) |
88 releaseStore(&s_allocPageErrorCode, GetLastError()); | 62 releaseStore(&s_allocPageErrorCode, GetLastError()); |
89 #else | 63 #else |
90 int accessFlag = pageAccessibility == PageAccessible | 64 int accessFlag = pageAccessibility == PageAccessible |
91 ? (PROT_READ | PROT_WRITE) | 65 ? (PROT_READ | PROT_WRITE) |
92 : PROT_NONE; | 66 : PROT_NONE; |
93 ret = mmap(hint, len, accessFlag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); | 67 ret = mmap(hint, len, accessFlag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); |
94 if (ret == MAP_FAILED) { | 68 if (ret == MAP_FAILED) { |
95 releaseStore(&s_allocPageErrorCode, errno); | 69 releaseStore(&s_allocPageErrorCode, errno); |
96 ret = 0; | 70 ret = 0; |
97 } | 71 } |
98 #endif | 72 #endif |
99 return ret; | 73 return ret; |
100 } | 74 } |
101 | 75 |
102 // Trims base to given length and alignment. Windows returns null on failure and | 76 // Trims base to given length and alignment. Windows returns null on failure and |
103 // frees base. | 77 // frees base. |
104 static void* trimMapping(void* base, | 78 static void* trimMapping(void* base, |
105 size_t baseLen, | 79 size_t baseLen, |
106 size_t trimLen, | 80 size_t trimLen, |
107 uintptr_t align, | 81 uintptr_t align, |
108 PageAccessibilityConfiguration pageAccessibility) { | 82 PageAccessibilityConfiguration pageAccessibility) { |
109 size_t preSlack = reinterpret_cast<uintptr_t>(base) & (align - 1); | 83 size_t preSlack = reinterpret_cast<uintptr_t>(base) & (align - 1); |
110 if (preSlack) | 84 if (preSlack) |
111 preSlack = align - preSlack; | 85 preSlack = align - preSlack; |
112 size_t postSlack = baseLen - preSlack - trimLen; | 86 size_t postSlack = baseLen - preSlack - trimLen; |
113 ASSERT(baseLen >= trimLen || preSlack || postSlack); | 87 DCHECK(baseLen >= trimLen || preSlack || postSlack); |
114 ASSERT(preSlack < baseLen); | 88 DCHECK(preSlack < baseLen); |
115 ASSERT(postSlack < baseLen); | 89 DCHECK(postSlack < baseLen); |
116 void* ret = base; | 90 void* ret = base; |
117 | 91 |
118 #if OS(POSIX) // On POSIX we can resize the allocation run. | 92 #if defined(OS_POSIX) // On POSIX we can resize the allocation run. |
119 (void)pageAccessibility; | 93 (void)pageAccessibility; |
120 if (preSlack) { | 94 if (preSlack) { |
121 int res = munmap(base, preSlack); | 95 int res = munmap(base, preSlack); |
122 RELEASE_ASSERT(!res); | 96 CHECK(!res); |
123 ret = reinterpret_cast<char*>(base) + preSlack; | 97 ret = reinterpret_cast<char*>(base) + preSlack; |
124 } | 98 } |
125 if (postSlack) { | 99 if (postSlack) { |
126 int res = munmap(reinterpret_cast<char*>(ret) + trimLen, postSlack); | 100 int res = munmap(reinterpret_cast<char*>(ret) + trimLen, postSlack); |
127 RELEASE_ASSERT(!res); | 101 CHECK(!res); |
128 } | 102 } |
129 #else // On Windows we can't resize the allocation run. | 103 #else // On Windows we can't resize the allocation run. |
130 if (preSlack || postSlack) { | 104 if (preSlack || postSlack) { |
131 ret = reinterpret_cast<char*>(base) + preSlack; | 105 ret = reinterpret_cast<char*>(base) + preSlack; |
132 freePages(base, baseLen); | 106 freePages(base, baseLen); |
133 ret = systemAllocPages(ret, trimLen, pageAccessibility); | 107 ret = systemAllocPages(ret, trimLen, pageAccessibility); |
134 } | 108 } |
135 #endif | 109 #endif |
136 | 110 |
137 return ret; | 111 return ret; |
138 } | 112 } |
139 | 113 |
140 void* allocPages(void* addr, | 114 void* allocPages(void* addr, |
141 size_t len, | 115 size_t len, |
142 size_t align, | 116 size_t align, |
143 PageAccessibilityConfiguration pageAccessibility) { | 117 PageAccessibilityConfiguration pageAccessibility) { |
144 ASSERT(len >= kPageAllocationGranularity); | 118 DCHECK(len >= kPageAllocationGranularity); |
145 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); | 119 DCHECK(!(len & kPageAllocationGranularityOffsetMask)); |
146 ASSERT(align >= kPageAllocationGranularity); | 120 DCHECK(align >= kPageAllocationGranularity); |
147 ASSERT(!(align & kPageAllocationGranularityOffsetMask)); | 121 DCHECK(!(align & kPageAllocationGranularityOffsetMask)); |
148 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & | 122 DCHECK(!(reinterpret_cast<uintptr_t>(addr) & |
149 kPageAllocationGranularityOffsetMask)); | 123 kPageAllocationGranularityOffsetMask)); |
150 uintptr_t alignOffsetMask = align - 1; | 124 uintptr_t alignOffsetMask = align - 1; |
151 uintptr_t alignBaseMask = ~alignOffsetMask; | 125 uintptr_t alignBaseMask = ~alignOffsetMask; |
152 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask)); | 126 DCHECK(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask)); |
153 | 127 |
154 // If the client passed null as the address, choose a good one. | 128 // If the client passed null as the address, choose a good one. |
155 if (!addr) { | 129 if (!addr) { |
156 addr = getRandomPageBase(); | 130 addr = getRandomPageBase(); |
157 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & | 131 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & |
158 alignBaseMask); | 132 alignBaseMask); |
159 } | 133 } |
160 | 134 |
161 // First try to force an exact-size, aligned allocation from our random base. | 135 // First try to force an exact-size, aligned allocation from our random base. |
162 for (int count = 0; count < 3; ++count) { | 136 for (int count = 0; count < 3; ++count) { |
163 void* ret = systemAllocPages(addr, len, pageAccessibility); | 137 void* ret = systemAllocPages(addr, len, pageAccessibility); |
164 if (kHintIsAdvisory || ret) { | 138 if (kHintIsAdvisory || ret) { |
165 // If the alignment is to our liking, we're done. | 139 // If the alignment is to our liking, we're done. |
166 if (!(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask)) | 140 if (!(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask)) |
167 return ret; | 141 return ret; |
168 freePages(ret, len); | 142 freePages(ret, len); |
169 #if CPU(32BIT) | 143 #if defined(ARCH_CPU_32_BITS) |
170 addr = reinterpret_cast<void*>( | 144 addr = reinterpret_cast<void*>( |
171 (reinterpret_cast<uintptr_t>(ret) + align) & alignBaseMask); | 145 (reinterpret_cast<uintptr_t>(ret) + align) & alignBaseMask); |
172 #endif | 146 #endif |
173 } else if (!addr) { // We know we're OOM when an unhinted allocation fails. | 147 } else if (!addr) { // We know we're OOM when an unhinted allocation fails. |
174 return nullptr; | 148 return nullptr; |
175 | 149 |
176 } else { | 150 } else { |
177 #if CPU(32BIT) | 151 #if defined(ARCH_CPU_32_BITS) |
178 addr = reinterpret_cast<char*>(addr) + align; | 152 addr = reinterpret_cast<char*>(addr) + align; |
179 #endif | 153 #endif |
180 } | 154 } |
181 | 155 |
182 #if !CPU(32BIT) | 156 #if !defined(ARCH_CPU_32_BITS) |
183 // Keep trying random addresses on systems that have a large address space. | 157 // Keep trying random addresses on systems that have a large address space. |
184 addr = getRandomPageBase(); | 158 addr = getRandomPageBase(); |
185 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & | 159 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & |
186 alignBaseMask); | 160 alignBaseMask); |
187 #endif | 161 #endif |
188 } | 162 } |
189 | 163 |
190 // Map a larger allocation so we can force alignment, but continue randomizing | 164 // Map a larger allocation so we can force alignment, but continue randomizing |
191 // only on 64-bit POSIX. | 165 // only on 64-bit POSIX. |
192 size_t tryLen = len + (align - kPageAllocationGranularity); | 166 size_t tryLen = len + (align - kPageAllocationGranularity); |
193 RELEASE_ASSERT(tryLen >= len); | 167 CHECK(tryLen >= len); |
194 void* ret; | 168 void* ret; |
195 | 169 |
196 do { | 170 do { |
197 // Don't continue to burn cycles on mandatory hints (Windows). | 171 // Don't continue to burn cycles on mandatory hints (Windows). |
198 addr = kHintIsAdvisory ? getRandomPageBase() : nullptr; | 172 addr = kHintIsAdvisory ? getRandomPageBase() : nullptr; |
199 ret = systemAllocPages(addr, tryLen, pageAccessibility); | 173 ret = systemAllocPages(addr, tryLen, pageAccessibility); |
200 // The retries are for Windows, where a race can steal our mapping on | 174 // The retries are for Windows, where a race can steal our mapping on |
201 // resize. | 175 // resize. |
202 } while (ret && | 176 } while (ret && |
203 !(ret = trimMapping(ret, tryLen, len, align, pageAccessibility))); | 177 !(ret = trimMapping(ret, tryLen, len, align, pageAccessibility))); |
204 | 178 |
205 return ret; | 179 return ret; |
206 } | 180 } |
207 | 181 |
208 void freePages(void* addr, size_t len) { | 182 void freePages(void* addr, size_t len) { |
209 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & | 183 DCHECK(!(reinterpret_cast<uintptr_t>(addr) & |
210 kPageAllocationGranularityOffsetMask)); | 184 kPageAllocationGranularityOffsetMask)); |
211 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); | 185 DCHECK(!(len & kPageAllocationGranularityOffsetMask)); |
212 #if OS(POSIX) | 186 #if defined(OS_POSIX) |
213 int ret = munmap(addr, len); | 187 int ret = munmap(addr, len); |
214 RELEASE_ASSERT(!ret); | 188 CHECK(!ret); |
215 #else | 189 #else |
216 BOOL ret = VirtualFree(addr, 0, MEM_RELEASE); | 190 BOOL ret = VirtualFree(addr, 0, MEM_RELEASE); |
217 RELEASE_ASSERT(ret); | 191 CHECK(ret); |
218 #endif | 192 #endif |
219 } | 193 } |
220 | 194 |
221 void setSystemPagesInaccessible(void* addr, size_t len) { | 195 void setSystemPagesInaccessible(void* addr, size_t len) { |
222 ASSERT(!(len & kSystemPageOffsetMask)); | 196 DCHECK(!(len & kSystemPageOffsetMask)); |
223 #if OS(POSIX) | 197 #if defined(OS_POSIX) |
224 int ret = mprotect(addr, len, PROT_NONE); | 198 int ret = mprotect(addr, len, PROT_NONE); |
225 RELEASE_ASSERT(!ret); | 199 CHECK(!ret); |
226 #else | 200 #else |
227 BOOL ret = VirtualFree(addr, len, MEM_DECOMMIT); | 201 BOOL ret = VirtualFree(addr, len, MEM_DECOMMIT); |
228 RELEASE_ASSERT(ret); | 202 CHECK(ret); |
229 #endif | 203 #endif |
230 } | 204 } |
231 | 205 |
232 bool setSystemPagesAccessible(void* addr, size_t len) { | 206 bool setSystemPagesAccessible(void* addr, size_t len) { |
233 ASSERT(!(len & kSystemPageOffsetMask)); | 207 DCHECK(!(len & kSystemPageOffsetMask)); |
234 #if OS(POSIX) | 208 #if defined(OS_POSIX) |
235 return !mprotect(addr, len, PROT_READ | PROT_WRITE); | 209 return !mprotect(addr, len, PROT_READ | PROT_WRITE); |
236 #else | 210 #else |
237 return !!VirtualAlloc(addr, len, MEM_COMMIT, PAGE_READWRITE); | 211 return !!VirtualAlloc(addr, len, MEM_COMMIT, PAGE_READWRITE); |
238 #endif | 212 #endif |
239 } | 213 } |
240 | 214 |
241 void decommitSystemPages(void* addr, size_t len) { | 215 void decommitSystemPages(void* addr, size_t len) { |
242 ASSERT(!(len & kSystemPageOffsetMask)); | 216 DCHECK(!(len & kSystemPageOffsetMask)); |
243 #if OS(POSIX) | 217 #if defined(OS_POSIX) |
244 int ret = madvise(addr, len, MADV_FREE); | 218 int ret = madvise(addr, len, MADV_FREE); |
245 RELEASE_ASSERT(!ret); | 219 CHECK(!ret); |
246 #else | 220 #else |
247 setSystemPagesInaccessible(addr, len); | 221 setSystemPagesInaccessible(addr, len); |
248 #endif | 222 #endif |
249 } | 223 } |
250 | 224 |
251 void recommitSystemPages(void* addr, size_t len) { | 225 void recommitSystemPages(void* addr, size_t len) { |
252 ASSERT(!(len & kSystemPageOffsetMask)); | 226 DCHECK(!(len & kSystemPageOffsetMask)); |
253 #if OS(POSIX) | 227 #if defined(OS_POSIX) |
254 (void)addr; | 228 (void)addr; |
255 #else | 229 #else |
256 RELEASE_ASSERT(setSystemPagesAccessible(addr, len)); | 230 CHECK(setSystemPagesAccessible(addr, len)); |
257 #endif | 231 #endif |
258 } | 232 } |
259 | 233 |
260 void discardSystemPages(void* addr, size_t len) { | 234 void discardSystemPages(void* addr, size_t len) { |
261 ASSERT(!(len & kSystemPageOffsetMask)); | 235 DCHECK(!(len & kSystemPageOffsetMask)); |
262 #if OS(POSIX) | 236 #if defined(OS_POSIX) |
263 // On POSIX, the implementation detail is that discard and decommit are the | 237 // On POSIX, the implementation detail is that discard and decommit are the |
264 // same, and lead to pages that are returned to the system immediately and | 238 // same, and lead to pages that are returned to the system immediately and |
265 // get replaced with zeroed pages when touched. So we just call | 239 // get replaced with zeroed pages when touched. So we just call |
266 // decommitSystemPages() here to avoid code duplication. | 240 // decommitSystemPages() here to avoid code duplication. |
267 decommitSystemPages(addr, len); | 241 decommitSystemPages(addr, len); |
268 #else | 242 #else |
269 // On Windows discarded pages are not returned to the system immediately and | 243 // On Windows discarded pages are not returned to the system immediately and |
270 // not guaranteed to be zeroed when returned to the application. | 244 // not guaranteed to be zeroed when returned to the application. |
271 using DiscardVirtualMemoryFunction = | 245 using DiscardVirtualMemoryFunction = |
272 DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size); | 246 DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size); |
273 static DiscardVirtualMemoryFunction discardVirtualMemory = | 247 static DiscardVirtualMemoryFunction discardVirtualMemory = |
274 reinterpret_cast<DiscardVirtualMemoryFunction>(-1); | 248 reinterpret_cast<DiscardVirtualMemoryFunction>(-1); |
275 if (discardVirtualMemory == | 249 if (discardVirtualMemory == |
276 reinterpret_cast<DiscardVirtualMemoryFunction>(-1)) | 250 reinterpret_cast<DiscardVirtualMemoryFunction>(-1)) |
277 discardVirtualMemory = | 251 discardVirtualMemory = |
278 reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress( | 252 reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress( |
279 GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory")); | 253 GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory")); |
280 // Use DiscardVirtualMemory when available because it releases faster than | 254 // Use DiscardVirtualMemory when available because it releases faster than |
281 // MEM_RESET. | 255 // MEM_RESET. |
282 DWORD ret = 1; | 256 DWORD ret = 1; |
283 if (discardVirtualMemory) | 257 if (discardVirtualMemory) |
284 ret = discardVirtualMemory(addr, len); | 258 ret = discardVirtualMemory(addr, len); |
285 // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on | 259 // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on |
286 // failure. | 260 // failure. |
287 if (ret) { | 261 if (ret) { |
288 void* ret = VirtualAlloc(addr, len, MEM_RESET, PAGE_READWRITE); | 262 void* ret = VirtualAlloc(addr, len, MEM_RESET, PAGE_READWRITE); |
289 RELEASE_ASSERT(ret); | 263 CHECK(ret); |
290 } | 264 } |
291 #endif | 265 #endif |
292 } | 266 } |
293 | 267 |
294 uint32_t getAllocPageErrorCode() { | 268 uint32_t getAllocPageErrorCode() { |
295 return acquireLoad(&s_allocPageErrorCode); | 269 return acquireLoad(&s_allocPageErrorCode); |
296 } | 270 } |
297 | 271 |
298 } // namespace WTF | 272 } // namespace base |
OLD | NEW |