| OLD | NEW |
| 1 // Copyright (c) 2005, 2007, Google Inc. | 1 // Copyright (c) 2005, 2007, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 67 | 67 |
| 68 // Page size is initialized on demand | 68 // Page size is initialized on demand |
| 69 static size_t pagesize = 0; | 69 static size_t pagesize = 0; |
| 70 | 70 |
| 71 // Configuration parameters. | 71 // Configuration parameters. |
| 72 | 72 |
| 73 #if HAVE(MMAP) | 73 #if HAVE(MMAP) |
| 74 static bool use_mmap = true; | 74 static bool use_mmap = true; |
| 75 #endif | 75 #endif |
| 76 | 76 |
| 77 #if HAVE(VIRTUALALLOC) | |
| 78 static bool use_VirtualAlloc = true; | |
| 79 #endif | |
| 80 | |
| 81 // Flags to keep us from retrying allocators that failed. | 77 // Flags to keep us from retrying allocators that failed. |
| 82 static bool devmem_failure = false; | 78 static bool devmem_failure = false; |
| 83 static bool sbrk_failure = false; | 79 static bool sbrk_failure = false; |
| 84 static bool mmap_failure = false; | 80 static bool mmap_failure = false; |
| 85 static bool VirtualAlloc_failure = false; | |
| 86 | 81 |
| 87 static const int32_t FLAGS_malloc_devmem_start = 0; | 82 static const int32_t FLAGS_malloc_devmem_start = 0; |
| 88 static const int32_t FLAGS_malloc_devmem_limit = 0; | 83 static const int32_t FLAGS_malloc_devmem_limit = 0; |
| 89 | 84 |
| 90 #if HAVE(MMAP) | 85 #if HAVE(MMAP) |
| 91 | 86 |
| 92 static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) { | 87 static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) { |
| 93 // Enforce page alignment | 88 // Enforce page alignment |
| 94 if (pagesize == 0) pagesize = getpagesize(); | 89 if (pagesize == 0) pagesize = getpagesize(); |
| 95 if (alignment < pagesize) alignment = pagesize; | 90 if (alignment < pagesize) alignment = pagesize; |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 131 if (adjust < extra) { | 126 if (adjust < extra) { |
| 132 munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust); | 127 munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust); |
| 133 } | 128 } |
| 134 | 129 |
| 135 ptr += adjust; | 130 ptr += adjust; |
| 136 return reinterpret_cast<void*>(ptr); | 131 return reinterpret_cast<void*>(ptr); |
| 137 } | 132 } |
| 138 | 133 |
| 139 #endif /* HAVE(MMAP) */ | 134 #endif /* HAVE(MMAP) */ |
| 140 | 135 |
| 141 #if HAVE(VIRTUALALLOC) | |
| 142 | |
| 143 static void* TryVirtualAlloc(size_t size, size_t *actual_size, size_t alignment)
{ | |
| 144 // Enforce page alignment | |
| 145 if (pagesize == 0) { | |
| 146 SYSTEM_INFO system_info; | |
| 147 GetSystemInfo(&system_info); | |
| 148 pagesize = system_info.dwPageSize; | |
| 149 } | |
| 150 | |
| 151 if (alignment < pagesize) alignment = pagesize; | |
| 152 size = ((size + alignment - 1) / alignment) * alignment; | |
| 153 | |
| 154 // could theoretically return the "extra" bytes here, but this | |
| 155 // is simple and correct. | |
| 156 if (actual_size) | |
| 157 *actual_size = size; | |
| 158 | |
| 159 // Ask for extra memory if alignment > pagesize | |
| 160 size_t extra = 0; | |
| 161 if (alignment > pagesize) { | |
| 162 extra = alignment - pagesize; | |
| 163 } | |
| 164 void* result = VirtualAlloc(NULL, size + extra, | |
| 165 MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, | |
| 166 PAGE_READWRITE); | |
| 167 | |
| 168 if (result == NULL) { | |
| 169 VirtualAlloc_failure = true; | |
| 170 return NULL; | |
| 171 } | |
| 172 | |
| 173 // Adjust the return memory so it is aligned | |
| 174 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | |
| 175 size_t adjust = 0; | |
| 176 if ((ptr & (alignment - 1)) != 0) { | |
| 177 adjust = alignment - (ptr & (alignment - 1)); | |
| 178 } | |
| 179 | |
| 180 // Return the unused memory to the system - we'd like to release but the best
we can do | |
| 181 // is decommit, since Windows only lets you free the whole allocation. | |
| 182 if (adjust > 0) { | |
| 183 VirtualFree(reinterpret_cast<void*>(ptr), adjust, MEM_DECOMMIT); | |
| 184 } | |
| 185 if (adjust < extra) { | |
| 186 VirtualFree(reinterpret_cast<void*>(ptr + adjust + size), extra-adjust, MEM_
DECOMMIT); | |
| 187 } | |
| 188 | |
| 189 ptr += adjust; | |
| 190 return reinterpret_cast<void*>(ptr); | |
| 191 } | |
| 192 | |
| 193 #endif /* HAVE(MMAP) */ | |
| 194 | |
| 195 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) { | 136 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) { |
| 196 // Discard requests that overflow | 137 // Discard requests that overflow |
| 197 if (size + alignment < size) return NULL; | 138 if (size + alignment < size) return NULL; |
| 198 | 139 |
| 199 SpinLockHolder lock_holder(&spinlock); | 140 SpinLockHolder lock_holder(&spinlock); |
| 200 | 141 |
| 201 // Enforce minimum alignment | 142 // Enforce minimum alignment |
| 202 if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner); | 143 if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner); |
| 203 | 144 |
| 204 // Try twice, once avoiding allocators that failed before, and once | 145 // Try twice, once avoiding allocators that failed before, and once |
| 205 // more trying all allocators even if they failed before. | 146 // more trying all allocators even if they failed before. |
| 206 for (int i = 0; i < 2; i++) { | 147 for (int i = 0; i < 2; i++) { |
| 207 | 148 |
| 208 #if HAVE(MMAP) | 149 #if HAVE(MMAP) |
| 209 if (use_mmap && !mmap_failure) { | 150 if (use_mmap && !mmap_failure) { |
| 210 void* result = TryMmap(size, actual_size, alignment); | 151 void* result = TryMmap(size, actual_size, alignment); |
| 211 if (result != NULL) return result; | 152 if (result != NULL) return result; |
| 212 } | 153 } |
| 213 #endif | 154 #endif |
| 214 | 155 |
| 215 #if HAVE(VIRTUALALLOC) | |
| 216 if (use_VirtualAlloc && !VirtualAlloc_failure) { | |
| 217 void* result = TryVirtualAlloc(size, actual_size, alignment); | |
| 218 if (result != NULL) return result; | |
| 219 } | |
| 220 #endif | |
| 221 | |
| 222 // nothing worked - reset failure flags and try again | 156 // nothing worked - reset failure flags and try again |
| 223 devmem_failure = false; | 157 devmem_failure = false; |
| 224 sbrk_failure = false; | 158 sbrk_failure = false; |
| 225 mmap_failure = false; | 159 mmap_failure = false; |
| 226 VirtualAlloc_failure = false; | |
| 227 } | 160 } |
| 228 return NULL; | 161 return NULL; |
| 229 } | 162 } |
| 230 | 163 |
| 231 #if HAVE(MADV_FREE_REUSE) | 164 #if HAVE(MADV_FREE_REUSE) |
| 232 | 165 |
| 233 void TCMalloc_SystemRelease(void* start, size_t length) | 166 void TCMalloc_SystemRelease(void* start, size_t length) |
| 234 { | 167 { |
| 235 int madviseResult; | 168 int madviseResult; |
| 236 | 169 |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 286 | 219 |
| 287 #elif HAVE(MMAP) | 220 #elif HAVE(MMAP) |
| 288 | 221 |
| 289 void TCMalloc_SystemRelease(void* start, size_t length) | 222 void TCMalloc_SystemRelease(void* start, size_t length) |
| 290 { | 223 { |
| 291 void* newAddress = mmap(start, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | M
AP_ANONYMOUS | MAP_FIXED, -1, 0); | 224 void* newAddress = mmap(start, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | M
AP_ANONYMOUS | MAP_FIXED, -1, 0); |
| 292 // If the mmap failed then that's ok, we just won't return the memory to the s
ystem. | 225 // If the mmap failed then that's ok, we just won't return the memory to the s
ystem. |
| 293 ASSERT_UNUSED(newAddress, newAddress == start || newAddress == reinterpret_cas
t<void*>(MAP_FAILED)); | 226 ASSERT_UNUSED(newAddress, newAddress == start || newAddress == reinterpret_cas
t<void*>(MAP_FAILED)); |
| 294 } | 227 } |
| 295 | 228 |
| 296 #elif HAVE(VIRTUALALLOC) | |
| 297 | |
| 298 void TCMalloc_SystemRelease(void* start, size_t length) | |
| 299 { | |
| 300 if (VirtualFree(start, length, MEM_DECOMMIT)) | |
| 301 return; | |
| 302 | |
| 303 // The decommit may fail if the memory region consists of allocations | |
| 304 // from more than one call to VirtualAlloc. In this case, fall back to | |
| 305 // using VirtualQuery to retrieve the allocation boundaries and decommit | |
| 306 // them each individually. | |
| 307 | |
| 308 char* ptr = static_cast<char*>(start); | |
| 309 char* end = ptr + length; | |
| 310 MEMORY_BASIC_INFORMATION info; | |
| 311 while (ptr < end) { | |
| 312 size_t resultSize = VirtualQuery(ptr, &info, sizeof(info)); | |
| 313 ASSERT_UNUSED(resultSize, resultSize == sizeof(info)); | |
| 314 | |
| 315 size_t decommitSize = min<size_t>(info.RegionSize, end - ptr); | |
| 316 BOOL success = VirtualFree(ptr, decommitSize, MEM_DECOMMIT); | |
| 317 ASSERT_UNUSED(success, success); | |
| 318 ptr += decommitSize; | |
| 319 } | |
| 320 } | |
| 321 | |
| 322 #else | 229 #else |
| 323 | 230 |
| 324 // Platforms that don't support returning memory use an empty inline version of
TCMalloc_SystemRelease | 231 // Platforms that don't support returning memory use an empty inline version of
TCMalloc_SystemRelease |
| 325 // declared in TCSystemAlloc.h | 232 // declared in TCSystemAlloc.h |
| 326 | 233 |
| 327 #endif | 234 #endif |
| 328 | 235 |
| 329 #if HAVE(MADV_FREE_REUSE) | 236 #if HAVE(MADV_FREE_REUSE) |
| 330 | 237 |
| 331 void TCMalloc_SystemCommit(void* start, size_t length) | 238 void TCMalloc_SystemCommit(void* start, size_t length) |
| 332 { | 239 { |
| 333 while (madvise(start, length, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { } | 240 while (madvise(start, length, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { } |
| 334 } | 241 } |
| 335 | 242 |
| 336 #elif HAVE(VIRTUALALLOC) | |
| 337 | |
| 338 void TCMalloc_SystemCommit(void* start, size_t length) | |
| 339 { | |
| 340 if (VirtualAlloc(start, length, MEM_COMMIT, PAGE_READWRITE) == start) | |
| 341 return; | |
| 342 | |
| 343 // The commit may fail if the memory region consists of allocations | |
| 344 // from more than one call to VirtualAlloc. In this case, fall back to | |
| 345 // using VirtualQuery to retrieve the allocation boundaries and commit them | |
| 346 // each individually. | |
| 347 | |
| 348 char* ptr = static_cast<char*>(start); | |
| 349 char* end = ptr + length; | |
| 350 MEMORY_BASIC_INFORMATION info; | |
| 351 while (ptr < end) { | |
| 352 size_t resultSize = VirtualQuery(ptr, &info, sizeof(info)); | |
| 353 ASSERT_UNUSED(resultSize, resultSize == sizeof(info)); | |
| 354 | |
| 355 size_t commitSize = min<size_t>(info.RegionSize, end - ptr); | |
| 356 void* newAddress = VirtualAlloc(ptr, commitSize, MEM_COMMIT, PAGE_READWR
ITE); | |
| 357 ASSERT_UNUSED(newAddress, newAddress == ptr); | |
| 358 ptr += commitSize; | |
| 359 } | |
| 360 } | |
| 361 | |
| 362 #else | 243 #else |
| 363 | 244 |
| 364 // Platforms that don't need to explicitly commit memory use an empty inline ver
sion of TCMalloc_SystemCommit | 245 // Platforms that don't need to explicitly commit memory use an empty inline ver
sion of TCMalloc_SystemCommit |
| 365 // declared in TCSystemAlloc.h | 246 // declared in TCSystemAlloc.h |
| 366 | 247 |
| 367 #endif | 248 #endif |
| 368 | 249 |
| 369 #endif // #if !USE(SYSTEM_MALLOC) | 250 #endif // #if !USE(SYSTEM_MALLOC) |
| 370 | 251 |
| OLD | NEW |