| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // The allocator shim is only enabled in Release Static builds. |
| 6 // This #if is needed as gyp can't have different compile |
| 7 // targets between Debug and Release. |
| 8 // TODO(wfh): Remove this once gyp is dead. |
| 9 #if defined(ALLOCATOR_SHIM) |
| 10 |
| 5 #include <limits.h> | 11 #include <limits.h> |
| 6 #include <malloc.h> | 12 #include <malloc.h> |
| 7 #include <new.h> | 13 #include <new.h> |
| 8 #include <windows.h> | 14 #include <windows.h> |
| 9 #include <stddef.h> | 15 #include <stddef.h> |
| 10 | 16 |
| 17 #include "allocator_shim_win.h" |
| 18 |
| 11 // This shim make it possible to perform additional checks on allocations | 19 // This shim make it possible to perform additional checks on allocations |
| 12 // before passing them to the Heap functions. | 20 // before passing them to the Heap functions. |
| 13 | 21 |
| 14 // Heap functions are stripped from libcmt.lib using the prep_libc.py | 22 // Override heap functions to perform additional checks: |
| 15 // for each object file stripped, we re-implement them here to allow us to | |
| 16 // perform additional checks: | |
| 17 // 1. Enforcing the maximum size that can be allocated to 2Gb. | 23 // 1. Enforcing the maximum size that can be allocated to 2Gb. |
| 18 // 2. Calling new_handler if malloc fails. | 24 // 2. Calling new_handler if malloc fails |
| 19 | 25 |
| 20 extern "C" { | 26 // See definitions of original functions in ucrt\ucrt_malloc.h |
| 21 // We set this to 1 because part of the CRT uses a check of _crtheap != 0 | |
| 22 // to test whether the CRT has been initialized. Once we've ripped out | |
| 23 // the allocators from libcmt, we need to provide this definition so that | |
| 24 // the rest of the CRT is still usable. | |
| 25 // heapinit.c | |
| 26 void* _crtheap = reinterpret_cast<void*>(1); | |
| 27 } | |
| 28 | 27 |
| 29 namespace base { | 28 namespace base { |
| 30 namespace allocator { | 29 namespace allocator { |
| 31 bool g_is_win_shim_layer_initialized = false; | 30 bool g_is_win_shim_layer_initialized = false; |
| 32 } // namespace allocator | 31 } // namespace allocator |
| 33 } // namespace base | 32 } // namespace base |
| 34 | 33 |
| 35 namespace { | 34 namespace { |
| 36 | 35 |
| 37 const size_t kWindowsPageSize = 4096; | 36 const size_t kWindowsPageSize = 4096; |
| 38 const size_t kMaxWindowsAllocation = INT_MAX - kWindowsPageSize; | 37 const size_t kMaxWindowsAllocation = INT_MAX - kWindowsPageSize; |
| 39 int new_mode = 0; | 38 int new_mode = 0; |
| 40 | 39 |
| 41 // VS2013 crt uses the process heap as its heap, so we do the same here. | 40 inline HANDLE get_heap_handle() { |
| 42 // See heapinit.c in VS CRT sources. | 41 return reinterpret_cast<HANDLE>(_get_heap_handle()); |
| 43 bool win_heap_init() { | |
| 44 // Set the _crtheap global here. THis allows us to offload most of the | |
| 45 // memory management to the CRT, except the functions we need to shim. | |
| 46 _crtheap = GetProcessHeap(); | |
| 47 if (_crtheap == NULL) | |
| 48 return false; | |
| 49 | |
| 50 ULONG enable_lfh = 2; | |
| 51 // NOTE: Setting LFH may fail. Vista already has it enabled. | |
| 52 // And under the debugger, it won't use LFH. So we | |
| 53 // ignore any errors. | |
| 54 HeapSetInformation(_crtheap, HeapCompatibilityInformation, &enable_lfh, | |
| 55 sizeof(enable_lfh)); | |
| 56 | |
| 57 return true; | |
| 58 } | 42 } |
| 59 | 43 |
| 60 void* win_heap_malloc(size_t size) { | 44 void* win_heap_malloc(size_t size) { |
| 61 if (size < kMaxWindowsAllocation) | 45 if (size < kMaxWindowsAllocation) |
| 62 return HeapAlloc(_crtheap, 0, size); | 46 return HeapAlloc(get_heap_handle(), 0, size); |
| 63 return NULL; | 47 return nullptr; |
| 64 } | 48 } |
| 65 | 49 |
| 66 void win_heap_free(void* size) { | 50 void win_heap_free(void* size) { |
| 67 HeapFree(_crtheap, 0, size); | 51 HeapFree(get_heap_handle(), 0, size); |
| 68 } | 52 } |
| 69 | 53 |
| 70 void* win_heap_realloc(void* ptr, size_t size) { | 54 void* win_heap_realloc(void* ptr, size_t size) { |
| 71 if (!ptr) | 55 if (!ptr) |
| 72 return win_heap_malloc(size); | 56 return win_heap_malloc(size); |
| 73 if (!size) { | 57 if (!size) { |
| 74 win_heap_free(ptr); | 58 win_heap_free(ptr); |
| 75 return NULL; | 59 return nullptr; |
| 76 } | 60 } |
| 77 if (size < kMaxWindowsAllocation) | 61 if (size < kMaxWindowsAllocation) |
| 78 return HeapReAlloc(_crtheap, 0, ptr, size); | 62 return HeapReAlloc(get_heap_handle(), 0, ptr, size); |
| 79 return NULL; | 63 return nullptr; |
| 80 } | |
| 81 | |
| 82 void win_heap_term() { | |
| 83 _crtheap = NULL; | |
| 84 } | 64 } |
| 85 | 65 |
| 86 // Call the new handler, if one has been set. | 66 // Call the new handler, if one has been set. |
| 87 // Returns true on successfully calling the handler, false otherwise. | 67 // Returns true on successfully calling the handler, false otherwise. |
| 88 inline bool call_new_handler(bool nothrow, size_t size) { | 68 inline bool call_new_handler(bool nothrow, size_t size) { |
| 89 // Get the current new handler. | 69 // Get the current new handler. |
| 90 _PNH nh = _query_new_handler(); | 70 _PNH nh = _query_new_handler(); |
| 91 #if defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS | 71 #if defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS |
| 92 if (!nh) | 72 if (!nh) |
| 93 return false; | 73 return false; |
| 94 // Since exceptions are disabled, we don't really know if new_handler | 74 // Since exceptions are disabled, we don't really know if new_handler |
| 95 // failed. Assume it will abort if it fails. | 75 // failed. Assume it will abort if it fails. |
| 96 return nh(size); | 76 return nh(size) ? true : false; |
| 97 #else | 77 #else |
| 98 #error "Exceptions in allocator shim are not supported!" | 78 #error "Exceptions in allocator shim are not supported!" |
| 99 #endif // defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS | 79 #endif // defined(_HAS_EXCEPTIONS) && !_HAS_EXCEPTIONS |
| 100 return false; | |
| 101 } | 80 } |
| 102 | 81 |
| 103 // Implement a C++ style allocation, which always calls the new_handler | 82 // Implement a C++ style allocation, which always calls the new_handler |
| 104 // on failure. | 83 // on failure. |
| 105 inline void* generic_cpp_alloc(size_t size, bool nothrow) { | 84 inline void* generic_cpp_alloc(size_t size, bool nothrow) { |
| 106 void* ptr; | 85 void* ptr; |
| 107 for (;;) { | 86 for (;;) { |
| 108 ptr = malloc(size); | 87 ptr = malloc(size); |
| 109 if (ptr) | 88 if (ptr) |
| 110 return ptr; | 89 return ptr; |
| 111 if (!call_new_handler(nothrow, size)) | 90 if (!call_new_handler(nothrow, size)) |
| 112 break; | 91 break; |
| 113 } | 92 } |
| 114 return ptr; | 93 return ptr; |
| 115 } | 94 } |
| 116 | 95 |
| 117 } // namespace | 96 } // namespace |
| 118 | 97 |
| 119 // new.cpp | 98 extern "C" { |
| 120 void* operator new(size_t size) { | |
| 121 return generic_cpp_alloc(size, false); | |
| 122 } | |
| 123 | |
| 124 // delete.cpp | |
| 125 void operator delete(void* p) throw() { | |
| 126 free(p); | |
| 127 } | |
| 128 | |
| 129 // new2.cpp | |
| 130 void* operator new[](size_t size) { | |
| 131 return generic_cpp_alloc(size, false); | |
| 132 } | |
| 133 | |
| 134 // delete2.cpp | |
| 135 void operator delete[](void* p) throw() { | |
| 136 free(p); | |
| 137 } | |
| 138 | |
| 139 // newopnt.cpp | |
| 140 void* operator new(size_t size, const std::nothrow_t& nt) { | |
| 141 return generic_cpp_alloc(size, true); | |
| 142 } | |
| 143 | |
| 144 // newaopnt.cpp | |
| 145 void* operator new[](size_t size, const std::nothrow_t& nt) { | |
| 146 return generic_cpp_alloc(size, true); | |
| 147 } | |
| 148 | 99 |
| 149 // This function behaves similarly to MSVC's _set_new_mode. | 100 // This function behaves similarly to MSVC's _set_new_mode. |
| 150 // If flag is 0 (default), calls to malloc will behave normally. | 101 // If flag is 0 (default), calls to malloc will behave normally. |
| 151 // If flag is 1, calls to malloc will behave like calls to new, | 102 // If flag is 1, calls to malloc will behave like calls to new, |
| 152 // and the std_new_handler will be invoked on failure. | 103 // and the std_new_handler will be invoked on failure. |
| 153 // Returns the previous mode. | 104 // Returns the previous mode. |
| 154 // new_mode.cpp | 105 // new_mode.cpp |
| 155 int _set_new_mode(int flag) throw() { | 106 int _set_new_mode(int flag) { |
| 107 // The MS CRT calls this function early on in startup, so this serves as a low |
| 108 // overhead proof that the allocator shim is in place for this process. |
| 109 base::allocator::g_is_win_shim_layer_initialized = true; |
| 156 int old_mode = new_mode; | 110 int old_mode = new_mode; |
| 157 new_mode = flag; | 111 new_mode = flag; |
| 158 return old_mode; | 112 return old_mode; |
| 159 } | 113 } |
| 160 | 114 |
| 161 // new_mode.cpp | 115 // new_mode.cpp |
| 162 int _query_new_mode() { | 116 int _query_new_mode() { |
| 163 return new_mode; | 117 return new_mode; |
| 164 } | 118 } |
| 165 | 119 |
| 166 extern "C" { | |
| 167 // malloc.c | 120 // malloc.c |
| 168 void* malloc(size_t size) { | 121 __declspec(restrict) __declspec(allocator) void* malloc(size_t size) { |
| 169 void* ptr; | 122 void* ptr; |
| 170 for (;;) { | 123 for (;;) { |
| 171 ptr = win_heap_malloc(size); | 124 ptr = win_heap_malloc(size); |
| 172 if (ptr) | 125 if (ptr) |
| 173 return ptr; | 126 return ptr; |
| 174 | 127 |
| 175 if (!new_mode || !call_new_handler(true, size)) | 128 if (!new_mode || !call_new_handler(true, size)) |
| 176 break; | 129 break; |
| 177 } | 130 } |
| 178 return ptr; | 131 return ptr; |
| 179 } | 132 } |
| 180 | 133 |
| 181 // Symbol to allow weak linkage to win_heap_malloc from memory_win.cc. | 134 // Symbol to allow weak linkage to win_heap_malloc from memory_win.cc. |
| 182 void* (*malloc_unchecked)(size_t) = &win_heap_malloc; | 135 void* (*malloc_unchecked)(size_t) = &win_heap_malloc; |
| 183 | 136 |
| 184 // free.c | 137 // free.c |
| 185 void free(void* p) { | 138 void free(void* p) { |
| 186 win_heap_free(p); | 139 win_heap_free(p); |
| 187 return; | 140 return; |
| 188 } | 141 } |
| 189 | 142 |
| 190 // realloc.c | 143 // realloc.c |
| 191 void* realloc(void* ptr, size_t size) { | 144 __declspec(restrict) __declspec(allocator) void* realloc(void* ptr, |
| 145 size_t size) { |
| 192 // Webkit is brittle for allocators that return NULL for malloc(0). The | 146 // Webkit is brittle for allocators that return NULL for malloc(0). The |
| 193 // realloc(0, 0) code path does not guarantee a non-NULL return, so be sure | 147 // realloc(0, 0) code path does not guarantee a non-NULL return, so be sure |
| 194 // to call malloc for this case. | 148 // to call malloc for this case. |
| 195 if (!ptr) | 149 if (!ptr) |
| 196 return malloc(size); | 150 return malloc(size); |
| 197 | 151 |
| 198 void* new_ptr; | 152 void* new_ptr; |
| 199 for (;;) { | 153 for (;;) { |
| 200 new_ptr = win_heap_realloc(ptr, size); | 154 new_ptr = win_heap_realloc(ptr, size); |
| 201 | 155 |
| 202 // Subtle warning: NULL return does not alwas indicate out-of-memory. If | 156 // Subtle warning: NULL return does not alwas indicate out-of-memory. If |
| 203 // the requested new size is zero, realloc should free the ptr and return | 157 // the requested new size is zero, realloc should free the ptr and return |
| 204 // NULL. | 158 // NULL. |
| 205 if (new_ptr || !size) | 159 if (new_ptr || !size) |
| 206 return new_ptr; | 160 return new_ptr; |
| 207 if (!new_mode || !call_new_handler(true, size)) | 161 if (!new_mode || !call_new_handler(true, size)) |
| 208 break; | 162 break; |
| 209 } | 163 } |
| 210 return new_ptr; | 164 return new_ptr; |
| 211 } | 165 } |
| 212 | 166 |
| 213 // heapinit.c | |
| 214 intptr_t _get_heap_handle() { | |
| 215 return reinterpret_cast<intptr_t>(_crtheap); | |
| 216 } | |
| 217 | |
| 218 // heapinit.c | |
| 219 int _heap_init() { | |
| 220 base::allocator::g_is_win_shim_layer_initialized = true; | |
| 221 return win_heap_init() ? 1 : 0; | |
| 222 } | |
| 223 | |
| 224 // heapinit.c | |
| 225 void _heap_term() { | |
| 226 win_heap_term(); | |
| 227 } | |
| 228 | |
| 229 // calloc.c | 167 // calloc.c |
| 230 void* calloc(size_t n, size_t elem_size) { | 168 __declspec(restrict) __declspec(allocator) void* calloc(size_t n, |
| 169 size_t elem_size) { |
| 231 // Overflow check. | 170 // Overflow check. |
| 232 const size_t size = n * elem_size; | 171 const size_t size = n * elem_size; |
| 233 if (elem_size != 0 && size / elem_size != n) | 172 if (elem_size != 0 && size / elem_size != n) |
| 234 return NULL; | 173 return nullptr; |
| 235 | 174 |
| 236 void* result = malloc(size); | 175 void* result = malloc(size); |
| 237 if (result != NULL) { | 176 if (result) { |
| 238 memset(result, 0, size); | 177 memset(result, 0, size); |
| 239 } | 178 } |
| 240 return result; | 179 return result; |
| 241 } | 180 } |
| 242 | 181 |
| 243 // recalloc.c | 182 } // extern C |
| 244 void* _recalloc(void* p, size_t n, size_t elem_size) { | |
| 245 if (!p) | |
| 246 return calloc(n, elem_size); | |
| 247 | 183 |
| 248 // This API is a bit odd. | 184 #endif // defined(ALLOCATOR_SHIM) |
| 249 // Note: recalloc only guarantees zeroed memory when p is NULL. | |
| 250 // Generally, calls to malloc() have padding. So a request | |
| 251 // to malloc N bytes actually malloc's N+x bytes. Later, if | |
| 252 // that buffer is passed to recalloc, we don't know what N | |
| 253 // was anymore. We only know what N+x is. As such, there is | |
| 254 // no way to know what to zero out. | |
| 255 const size_t size = n * elem_size; | |
| 256 if (elem_size != 0 && size / elem_size != n) | |
| 257 return NULL; | |
| 258 return realloc(p, size); | |
| 259 } | |
| 260 | |
| 261 // calloc_impl.c | |
| 262 void* _calloc_impl(size_t n, size_t size) { | |
| 263 return calloc(n, size); | |
| 264 } | |
| 265 | |
| 266 #ifndef NDEBUG | |
| 267 #undef malloc | |
| 268 #undef free | |
| 269 #undef calloc | |
| 270 | |
| 271 static int error_handler(int reportType) { | |
| 272 switch (reportType) { | |
| 273 case 0: // _CRT_WARN | |
| 274 __debugbreak(); | |
| 275 return 0; | |
| 276 | |
| 277 case 1: // _CRT_ERROR | |
| 278 __debugbreak(); | |
| 279 return 0; | |
| 280 | |
| 281 case 2: // _CRT_ASSERT | |
| 282 __debugbreak(); | |
| 283 return 0; | |
| 284 } | |
| 285 char* p = NULL; | |
| 286 *p = '\0'; | |
| 287 return 0; | |
| 288 } | |
| 289 | |
| 290 int _CrtDbgReport(int reportType, | |
| 291 const char*, | |
| 292 int, | |
| 293 const char*, | |
| 294 const char*, | |
| 295 ...) { | |
| 296 return error_handler(reportType); | |
| 297 } | |
| 298 | |
| 299 int _CrtDbgReportW(int reportType, | |
| 300 const wchar_t*, | |
| 301 int, | |
| 302 const wchar_t*, | |
| 303 const wchar_t*, | |
| 304 ...) { | |
| 305 return error_handler(reportType); | |
| 306 } | |
| 307 | |
| 308 int _CrtSetReportMode(int, int) { | |
| 309 return 0; | |
| 310 } | |
| 311 | |
| 312 void* _malloc_dbg(size_t size, int, const char*, int) { | |
| 313 return malloc(size); | |
| 314 } | |
| 315 | |
| 316 void* _realloc_dbg(void* ptr, size_t size, int, const char*, int) { | |
| 317 return realloc(ptr, size); | |
| 318 } | |
| 319 | |
| 320 void _free_dbg(void* ptr, int) { | |
| 321 free(ptr); | |
| 322 } | |
| 323 | |
| 324 void* _calloc_dbg(size_t n, size_t size, int, const char*, int) { | |
| 325 return calloc(n, size); | |
| 326 } | |
| 327 #endif // NDEBUG | |
| 328 | |
| 329 } // extern C | |
| OLD | NEW |