OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
51 #elif OS(WIN) | 51 #elif OS(WIN) |
52 | 52 |
53 #include <windows.h> | 53 #include <windows.h> |
54 | 54 |
55 #else | 55 #else |
56 #error Unknown OS | 56 #error Unknown OS |
57 #endif // OS(POSIX) | 57 #endif // OS(POSIX) |
58 | 58 |
59 namespace WTF { | 59 namespace WTF { |
60 | 60 |
61 // This internal function wraps the OS-specific page allocation call so that | 61 // This simple internal function wraps the OS-specific page allocation call so |
62 // it behaves consistently: the address is a hint and if it cannot be used, | 62 // that it behaves consistently: the address is a hint and if it cannot be used, |
63 // the allocation will be placed elsewhere. | 63 // the allocation will be placed elsewhere. |
64 static void* systemAllocPages(void* addr, size_t len, PageAccessibilityConfigura
tion pageAccessibility) | 64 static void* systemAllocPages(void* addr, size_t len, PageAccessibilityConfigura
tion pageAccessibility) |
65 { | 65 { |
66 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); | 66 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); |
67 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse
tMask)); | 67 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse
tMask)); |
68 void* ret; | 68 void* ret; |
69 #if OS(WIN) | 69 #if OS(WIN) |
70 DWORD accessFlag = pageAccessibility == PageAccessible ? PAGE_READWRITE : PA
GE_NOACCESS; | 70 int accessFlag = pageAccessibility == PageAccessible ? PAGE_READWRITE : PAGE
_NOACCESS; |
71 ret = VirtualAlloc(addr, len, MEM_RESERVE | MEM_COMMIT, accessFlag); | 71 ret = VirtualAlloc(addr, len, MEM_RESERVE | MEM_COMMIT, accessFlag); |
72 if (!ret) | 72 if (!ret) |
73 ret = VirtualAlloc(0, len, MEM_RESERVE | MEM_COMMIT, accessFlag); | 73 ret = VirtualAlloc(0, len, MEM_RESERVE | MEM_COMMIT, accessFlag); |
74 #else | 74 #else |
75 int accessFlag = pageAccessibility == PageAccessible ? (PROT_READ | PROT_WRI
TE) : PROT_NONE; | 75 int accessFlag = pageAccessibility == PageAccessible ? (PROT_READ | PROT_WRI
TE) : PROT_NONE; |
76 ret = mmap(addr, len, accessFlag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); | 76 ret = mmap(addr, len, accessFlag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); |
77 if (ret == MAP_FAILED) | 77 if (ret == MAP_FAILED) |
78 ret = 0; | 78 ret = 0; |
79 #endif | 79 #endif |
80 return ret; | 80 return ret; |
81 } | 81 } |
82 | 82 |
| 83 static bool trimMapping(void* baseAddr, size_t baseLen, void* trimAddr, size_t t
rimLen) |
| 84 { |
| 85 #if OS(WIN) |
| 86 return false; |
| 87 #else |
| 88 char* basePtr = static_cast<char*>(baseAddr); |
| 89 char* trimPtr = static_cast<char*>(trimAddr); |
| 90 ASSERT(trimPtr >= basePtr); |
| 91 ASSERT(trimPtr + trimLen <= basePtr + baseLen); |
| 92 size_t preLen = trimPtr - basePtr; |
| 93 if (preLen) { |
| 94 int ret = munmap(basePtr, preLen); |
| 95 RELEASE_ASSERT(!ret); |
| 96 } |
| 97 size_t postLen = (basePtr + baseLen) - (trimPtr + trimLen); |
| 98 if (postLen) { |
| 99 int ret = munmap(trimPtr + trimLen, postLen); |
| 100 RELEASE_ASSERT(!ret); |
| 101 } |
| 102 return true; |
| 103 #endif |
| 104 } |
| 105 |
83 void* allocPages(void* addr, size_t len, size_t align, PageAccessibilityConfigur
ation pageAccessibility) | 106 void* allocPages(void* addr, size_t len, size_t align, PageAccessibilityConfigur
ation pageAccessibility) |
84 { | 107 { |
85 ASSERT(len >= kPageAllocationGranularity); | 108 ASSERT(len >= kPageAllocationGranularity); |
86 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); | 109 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); |
87 ASSERT(align >= kPageAllocationGranularity); | 110 ASSERT(align >= kPageAllocationGranularity); |
88 ASSERT(!(align & kPageAllocationGranularityOffsetMask)); | 111 ASSERT(!(align & kPageAllocationGranularityOffsetMask)); |
89 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse
tMask)); | 112 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse
tMask)); |
90 uintptr_t alignOffsetMask = align - 1; | 113 size_t alignOffsetMask = align - 1; |
91 uintptr_t alignBaseMask = ~alignOffsetMask; | 114 size_t alignBaseMask = ~alignOffsetMask; |
92 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask)); | 115 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask)); |
93 // If the client passed null as the address, choose a good one. | 116 // If the client passed null as the address, choose a good one. |
94 if (!addr) { | 117 if (!addr) { |
95 addr = getRandomPageBase(); | 118 addr = getRandomPageBase(); |
96 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & align
BaseMask); | 119 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & align
BaseMask); |
97 } | 120 } |
98 | 121 |
99 // First try to force an exact-size, aligned allocation from our random base
. | 122 // The common case, which is also the least work we can do, is that the |
100 for (int count = 0; count < 3; ++count) { | 123 // address and length are suitable. Just try it. |
101 void* ret = systemAllocPages(addr, len, pageAccessibility); | 124 void* ret = systemAllocPages(addr, len, pageAccessibility); |
102 // If the alignment is to our liking, we're done. | 125 // If the alignment is to our liking, we're done. |
103 if (!(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask)) | 126 if (!ret || !(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask)) |
| 127 return ret; |
| 128 |
| 129 // Annoying. Unmap and map a larger range to be sure to succeed on the |
| 130 // second, slower attempt. |
| 131 freePages(ret, len); |
| 132 |
| 133 size_t tryLen = len + (align - kPageAllocationGranularity); |
| 134 RELEASE_ASSERT(tryLen > len); |
| 135 |
| 136 // We loop to cater for the unlikely case where another thread maps on top |
| 137 // of the aligned location we choose. |
| 138 int count = 0; |
| 139 while (count++ < 100) { |
| 140 ret = systemAllocPages(addr, tryLen, pageAccessibility); |
| 141 if (!ret) |
| 142 return 0; |
| 143 // We can now try and trim out a subset of the mapping. |
| 144 addr = reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(ret) + align
OffsetMask) & alignBaseMask); |
| 145 |
| 146 // On POSIX systems, we can trim the oversized mapping to fit exactly. |
| 147 // This will always work on POSIX systems. |
| 148 if (trimMapping(ret, tryLen, addr, len)) |
| 149 return addr; |
| 150 |
| 151 // On Windows, you can't trim an existing mapping so we unmap and remap |
| 152 // a subset. We used to do for all platforms, but OSX 10.8 has a |
| 153 // broken mmap() that ignores address hints for valid, unused addresses. |
| 154 freePages(ret, tryLen); |
| 155 ret = systemAllocPages(addr, len, pageAccessibility); |
| 156 if (ret == addr || !ret) |
104 return ret; | 157 return ret; |
105 // We failed, so we retry another range depending on the size of our add
ress space. | 158 |
| 159 // Unlikely race / collision. Do the simple thing and just start again. |
106 freePages(ret, len); | 160 freePages(ret, len); |
107 #if CPU(32BIT) | |
108 // Use a linear probe on 32-bit systems, where the address space tends t
o be cramped. | |
109 // This may wrap, but we'll just fall back to the guaranteed method in t
hat case. | |
110 addr = reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(ret) + align
) & alignBaseMask); | |
111 #else | |
112 // Keep trying random addresses on systems that have a large address spa
ce. | |
113 addr = getRandomPageBase(); | 161 addr = getRandomPageBase(); |
114 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & align
BaseMask); | 162 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & align
BaseMask); |
115 #endif | |
116 } | 163 } |
117 | 164 IMMEDIATE_CRASH(); |
118 // Map a larger allocation so we can force alignment, but continuing randomi
zing only on | 165 return 0; |
119 // 64-bit POSIX. | |
120 size_t tryLen = len + (align - kPageAllocationGranularity); | |
121 RELEASE_ASSERT(tryLen > len); | |
122 while (true) { | |
123 addr = nullptr; | |
124 #if OS(POSIX) && CPU(32BIT) | |
125 addr = getRandomPageBase(); | |
126 #endif | |
127 void* ret = systemAllocPages(addr, tryLen, pageAccessibility); | |
128 if (!ret) | |
129 return nullptr; | |
130 size_t preSlack = reinterpret_cast<uintptr_t>(ret) & alignOffsetMask; | |
131 preSlack = preSlack ? align - preSlack : 0; | |
132 size_t postSlack = tryLen - preSlack - len; | |
133 ASSERT(preSlack || postSlack); | |
134 ASSERT(preSlack < tryLen); | |
135 ASSERT(postSlack < tryLen); | |
136 #if OS(POSIX) // On POSIX we can resize the allocation run. | |
137 if (preSlack) { | |
138 int res = munmap(ret, preSlack); | |
139 RELEASE_ASSERT(!res); | |
140 ret = addr = reinterpret_cast<char*>(ret) + preSlack; | |
141 } | |
142 if (postSlack) { | |
143 int res = munmap(reinterpret_cast<char*>(ret) + len, postSlack); | |
144 RELEASE_ASSERT(!res); | |
145 } | |
146 #else // On Windows we can't resize the allocation run. | |
147 if (preSlack || postSlack) { | |
148 addr = reinterpret_cast<char*>(ret) + preSlack; | |
149 freePages(ret, len); | |
150 ret = systemAllocPages(addr, len, pageAccessibility); | |
151 if (!ret) | |
152 return nullptr; | |
153 } | |
154 #endif | |
155 if (ret == addr) { | |
156 ASSERT(!(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask)); | |
157 return ret; | |
158 } | |
159 freePages(ret, len); | |
160 } | |
161 | |
162 return nullptr; | |
163 } | 166 } |
164 | 167 |
165 void freePages(void* addr, size_t len) | 168 void freePages(void* addr, size_t len) |
166 { | 169 { |
167 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse
tMask)); | 170 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse
tMask)); |
168 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); | 171 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); |
169 #if OS(POSIX) | 172 #if OS(POSIX) |
170 int ret = munmap(addr, len); | 173 int ret = munmap(addr, len); |
171 RELEASE_ASSERT(!ret); | 174 RELEASE_ASSERT(!ret); |
172 #else | 175 #else |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
230 #else | 233 #else |
231 (void) addr; | 234 (void) addr; |
232 (void) len; | 235 (void) len; |
233 // TODO(cevans): implement this using MEM_RESET for Windows, once we've | 236 // TODO(cevans): implement this using MEM_RESET for Windows, once we've |
234 // decided that the semantics are a match. | 237 // decided that the semantics are a match. |
235 #endif | 238 #endif |
236 } | 239 } |
237 | 240 |
238 } // namespace WTF | 241 } // namespace WTF |
239 | 242 |
OLD | NEW |