OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
51 #elif OS(WIN) | 51 #elif OS(WIN) |
52 | 52 |
53 #include <windows.h> | 53 #include <windows.h> |
54 | 54 |
55 #else | 55 #else |
56 #error Unknown OS | 56 #error Unknown OS |
57 #endif // OS(POSIX) | 57 #endif // OS(POSIX) |
58 | 58 |
59 namespace WTF { | 59 namespace WTF { |
60 | 60 |
61 // This simple internal function wraps the OS-specific page allocation call so | 61 // This internal function wraps the OS-specific page allocation call so that |
62 // that it behaves consistently: the address is a hint and if it cannot be used, | 62 // it behaves consistently: the address is a hint and if it cannot be used, |
63 // the allocation will be placed elsewhere. | 63 // the allocation will be placed elsewhere. |
64 static void* systemAllocPages(void* addr, size_t len, PageAccessibilityConfigura tion pageAccessibility) | 64 static void* systemAllocPages(void* addr, size_t len, bool commit, PageAccessibi lityConfiguration pageAccessibility) |
65 { | 65 { |
66 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); | 66 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); |
67 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse tMask)); | 67 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse tMask)); |
68 void* ret; | 68 void* ret; |
69 #if OS(WIN) | 69 #if OS(WIN) |
70 int accessFlag = pageAccessibility == PageAccessible ? PAGE_READWRITE : PAGE _NOACCESS; | 70 DWORD allocType = MEM_RESERVE | (commit ? MEM_COMMIT : 0); |
71 ret = VirtualAlloc(addr, len, MEM_RESERVE | MEM_COMMIT, accessFlag); | 71 DWORD accessFlag = pageAccessibility == PageAccessible ? PAGE_READWRITE : PA GE_NOACCESS; |
72 if (!ret) | 72 ret = VirtualAlloc(addr, len, allocType, accessFlag); |
73 ret = VirtualAlloc(0, len, MEM_RESERVE | MEM_COMMIT, accessFlag); | 73 if (!ret && addr) |
74 ret = VirtualAlloc(0, len, allocType, accessFlag); | |
74 #else | 75 #else |
75 int accessFlag = pageAccessibility == PageAccessible ? (PROT_READ | PROT_WRI TE) : PROT_NONE; | 76 int accessFlag = pageAccessibility == PageAccessible ? (PROT_READ | PROT_WRI TE) : PROT_NONE; |
76 ret = mmap(addr, len, accessFlag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); | 77 ret = mmap(addr, len, accessFlag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); |
77 if (ret == MAP_FAILED) | 78 if (ret == MAP_FAILED) |
78 ret = 0; | 79 ret = 0; |
79 #endif | 80 #endif |
80 return ret; | 81 return ret; |
81 } | 82 } |
82 | 83 |
83 static bool trimMapping(void* baseAddr, size_t baseLen, void* trimAddr, size_t t rimLen) | |
84 { | |
85 #if OS(WIN) | |
86 return false; | |
87 #else | |
88 char* basePtr = static_cast<char*>(baseAddr); | |
89 char* trimPtr = static_cast<char*>(trimAddr); | |
90 ASSERT(trimPtr >= basePtr); | |
91 ASSERT(trimPtr + trimLen <= basePtr + baseLen); | |
92 size_t preLen = trimPtr - basePtr; | |
93 if (preLen) { | |
94 int ret = munmap(basePtr, preLen); | |
95 RELEASE_ASSERT(!ret); | |
96 } | |
97 size_t postLen = (basePtr + baseLen) - (trimPtr + trimLen); | |
98 if (postLen) { | |
99 int ret = munmap(trimPtr + trimLen, postLen); | |
100 RELEASE_ASSERT(!ret); | |
101 } | |
102 return true; | |
103 #endif | |
104 } | |
105 | |
106 void* allocPages(void* addr, size_t len, size_t align, PageAccessibilityConfigur ation pageAccessibility) | 84 void* allocPages(void* addr, size_t len, size_t align, PageAccessibilityConfigur ation pageAccessibility) |
107 { | 85 { |
108 ASSERT(len >= kPageAllocationGranularity); | 86 ASSERT(len >= kPageAllocationGranularity); |
109 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); | 87 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); |
110 ASSERT(align >= kPageAllocationGranularity); | 88 ASSERT(align >= kPageAllocationGranularity); |
111 ASSERT(!(align & kPageAllocationGranularityOffsetMask)); | 89 ASSERT(!(align & kPageAllocationGranularityOffsetMask)); |
112 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse tMask)); | 90 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse tMask)); |
113 size_t alignOffsetMask = align - 1; | 91 size_t alignOffsetMask = align - 1; |
114 size_t alignBaseMask = ~alignOffsetMask; | 92 size_t alignBaseMask = ~alignOffsetMask; |
115 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask)); | 93 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask)); |
116 // If the client passed null as the address, choose a good one. | 94 // If the client passed null as the address, choose a good one. |
117 if (!addr) { | 95 if (!addr) { |
118 addr = getRandomPageBase(); | 96 addr = getRandomPageBase(); |
119 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & align BaseMask); | 97 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & align BaseMask); |
120 } | 98 } |
121 | 99 |
122 // The common case, which is also the least work we can do, is that the | 100 // First try to force an exact-size, aligned allocation from our random base . |
123 // address and length are suitable. Just try it. | 101 for (int count = 0; count < 3; ++count) { |
124 void* ret = systemAllocPages(addr, len, pageAccessibility); | 102 void* ret = systemAllocPages(addr, len, false, pageAccessibility); |
125 // If the alignment is to our liking, we're done. | 103 // If the alignment is to our liking, we're done. |
126 if (!ret || !(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask)) | 104 if (!(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask)) { |
Tom Sepez
2015/10/05 19:13:31
nit: maybe reads cleaner if you test !ret and earl
jschuh
2015/10/05 20:02:29
Removed the nested block entirely.
| |
127 return ret; | 105 if (ret) |
106 recommitSystemPages(ret, len); | |
107 return ret; | |
108 } | |
109 // We failed, so we retry another range depending on the size of our add ress space. | |
110 freePages(ret, len); | |
111 #if CPU(X86_64) || CPU(ARM64) | |
112 // Keep trying random addresses on 64-bit systems, because they have the space. | |
113 addr = getRandomPageBase(); | |
114 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr)& alignB aseMask); | |
115 #else | |
116 // Use a linear probe on 32-bit systems, where the address space tends t o be cramped. | |
Tom Sepez
2015/10/05 19:07:30
Thought you were going to try 3 random allocations
jschuh
2015/10/05 20:02:29
Explained in previous message.
| |
117 // This may wrap, but we'll just fall back to the guaranteed method in t hat case. | |
118 addr = reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(ret) + align ) & alignBaseMask); | |
119 #endif | |
120 } | |
128 | 121 |
129 // Annoying. Unmap and map a larger range to be sure to succeed on the | 122 // Map a larger allocation so we can force alignment, but continuing randomi zing only on |
130 // second, slower attempt. | 123 // 64-bit POSIX. |
131 freePages(ret, len); | 124 const size_t tryLen = len + (align - kPageAllocationGranularity); |
Tom Sepez
2015/10/05 19:13:31
nit: the const doesn't buy you a lot here and isn'
jschuh
2015/10/05 20:02:29
Done.
| |
125 RELEASE_ASSERT(tryLen > len); | |
126 while (true) { | |
127 addr = nullptr; | |
128 #if OS(POSIX) && (CPU(X86_64) && CPU(ARM64)) | |
129 addr = getRandomPageBase(); | |
130 #endif | |
131 void* ret = systemAllocPages(addr, tryLen, false, pageAccessibility); | |
132 if (!ret) | |
133 return ret; | |
134 size_t preSlack = reinterpret_cast<uintptr_t>(ret) & (align - 1); | |
135 preSlack = preSlack ? align - preSlack : 0; | |
136 size_t postSlack = tryLen - preSlack - len; | |
137 ASSERT(preSlack || postSlack); | |
138 ASSERT(preSlack < tryLen); | |
139 ASSERT(postSlack < tryLen); | |
140 #if OS(POSIX) // On POSIX we can resize the allocation run. | |
141 if (preSlack) { | |
142 int res = munmap(ret, preSlack); | |
143 RELEASE_ASSERT(!res); | |
144 ret = addr = reinterpret_cast<char*>(ret) + preSlack; | |
145 } | |
146 if (postSlack) { | |
147 int res = munmap(reinterpret_cast<char*>(ret) + len, postSlack); | |
148 RELEASE_ASSERT(!res); | |
149 } | |
150 #else // On Windows we can't resize the allocation run. | |
151 if (preSlack || postSlack) { | |
152 addr = reinterpret_cast<char*>(ret) + preSlack; | |
153 freePages(ret, len); | |
154 ret = systemAllocPages(addr, len, true, pageAccessibility); | |
155 if (!ret) | |
156 return ret; | |
157 } | |
158 #endif | |
159 if (ret == addr) | |
160 return ret; | |
161 freePages(ret, len); | |
162 } | |
132 | 163 |
133 size_t tryLen = len + (align - kPageAllocationGranularity); | |
134 RELEASE_ASSERT(tryLen > len); | |
135 | |
136 // We loop to cater for the unlikely case where another thread maps on top | |
137 // of the aligned location we choose. | |
138 int count = 0; | |
139 while (count++ < 100) { | |
140 ret = systemAllocPages(addr, tryLen, pageAccessibility); | |
141 if (!ret) | |
142 return 0; | |
143 // We can now try and trim out a subset of the mapping. | |
144 addr = reinterpret_cast<void*>((reinterpret_cast<uintptr_t>(ret) + align OffsetMask) & alignBaseMask); | |
145 | |
146 // On POSIX systems, we can trim the oversized mapping to fit exactly. | |
147 // This will always work on POSIX systems. | |
148 if (trimMapping(ret, tryLen, addr, len)) | |
149 return addr; | |
150 | |
151 // On Windows, you can't trim an existing mapping so we unmap and remap | |
152 // a subset. We used to do for all platforms, but OSX 10.8 has a | |
153 // broken mmap() that ignores address hints for valid, unused addresses. | |
154 freePages(ret, tryLen); | |
155 ret = systemAllocPages(addr, len, pageAccessibility); | |
156 if (ret == addr || !ret) | |
157 return ret; | |
158 | |
159 // Unlikely race / collision. Do the simple thing and just start again. | |
160 freePages(ret, len); | |
161 addr = getRandomPageBase(); | |
162 addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & align BaseMask); | |
163 } | |
164 IMMEDIATE_CRASH(); | |
165 return 0; | 164 return 0; |
166 } | 165 } |
167 | 166 |
168 void freePages(void* addr, size_t len) | 167 void freePages(void* addr, size_t len) |
169 { | 168 { |
170 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse tMask)); | 169 ASSERT(!(reinterpret_cast<uintptr_t>(addr) & kPageAllocationGranularityOffse tMask)); |
171 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); | 170 ASSERT(!(len & kPageAllocationGranularityOffsetMask)); |
172 #if OS(POSIX) | 171 #if OS(POSIX) |
173 int ret = munmap(addr, len); | 172 int ret = munmap(addr, len); |
174 RELEASE_ASSERT(!ret); | 173 RELEASE_ASSERT(!ret); |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
233 #else | 232 #else |
234 (void) addr; | 233 (void) addr; |
235 (void) len; | 234 (void) len; |
236 // TODO(cevans): implement this using MEM_RESET for Windows, once we've | 235 // TODO(cevans): implement this using MEM_RESET for Windows, once we've |
237 // decided that the semantics are a match. | 236 // decided that the semantics are a match. |
238 #endif | 237 #endif |
239 } | 238 } |
240 | 239 |
241 } // namespace WTF | 240 } // namespace WTF |
242 | 241 |
OLD | NEW |