| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2005, 2007, Google Inc. | |
| 2 // All rights reserved. | |
| 3 // | |
| 4 // Redistribution and use in source and binary forms, with or without | |
| 5 // modification, are permitted provided that the following conditions are | |
| 6 // met: | |
| 7 // | |
| 8 // * Redistributions of source code must retain the above copyright | |
| 9 // notice, this list of conditions and the following disclaimer. | |
| 10 // * Redistributions in binary form must reproduce the above | |
| 11 // copyright notice, this list of conditions and the following disclaimer | |
| 12 // in the documentation and/or other materials provided with the | |
| 13 // distribution. | |
| 14 // * Neither the name of Google Inc. nor the names of its | |
| 15 // contributors may be used to endorse or promote products derived from | |
| 16 // this software without specific prior written permission. | |
| 17 // | |
| 18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 29 | |
| 30 // --- | |
| 31 // Author: Sanjay Ghemawat | |
| 32 | |
| 33 #include "config.h" | |
| 34 #if !USE(SYSTEM_MALLOC) | |
| 35 #include "TCSystemAlloc.h" | |
| 36 | |
| 37 #include "Assertions.h" | |
| 38 #include "CheckedArithmetic.h" | |
| 39 #include "TCSpinLock.h" | |
| 40 #include "UnusedParam.h" | |
| 41 #include "VMTags.h" | |
| 42 #include <algorithm> | |
| 43 #include <stdint.h> | |
| 44 | |
| 45 #if OS(WINDOWS) | |
| 46 #include "windows.h" | |
| 47 #else | |
| 48 #include <errno.h> | |
| 49 #include <unistd.h> | |
| 50 #include <sys/mman.h> | |
| 51 #endif | |
| 52 | |
| 53 #ifndef MAP_ANONYMOUS | |
| 54 #define MAP_ANONYMOUS MAP_ANON | |
| 55 #endif | |
| 56 | |
| 57 using namespace std; | |
| 58 | |
| 59 // Structure for discovering alignment | |
| 60 union MemoryAligner { | |
| 61 void* p; | |
| 62 double d; | |
| 63 size_t s; | |
| 64 }; | |
| 65 | |
| 66 static SpinLock spinlock = SPINLOCK_INITIALIZER; | |
| 67 | |
| 68 // Page size is initialized on demand | |
| 69 static size_t pagesize = 0; | |
| 70 | |
| 71 // Configuration parameters. | |
| 72 // | |
| 73 // if use_devmem is true, either use_sbrk or use_mmap must also be true. | |
| 74 // For 2.2 kernels, it looks like the sbrk address space (500MBish) and | |
| 75 // the mmap address space (1300MBish) are disjoint, so we need both allocators | |
| 76 // to get as much virtual memory as possible. | |
| 77 #ifndef WTF_CHANGES | |
| 78 static bool use_devmem = false; | |
| 79 static bool use_sbrk = false; | |
| 80 #endif | |
| 81 | |
| 82 #if HAVE(MMAP) | |
| 83 static bool use_mmap = true; | |
| 84 #endif | |
| 85 | |
| 86 #if HAVE(VIRTUALALLOC) | |
| 87 static bool use_VirtualAlloc = true; | |
| 88 #endif | |
| 89 | |
| 90 // Flags to keep us from retrying allocators that failed. | |
| 91 static bool devmem_failure = false; | |
| 92 static bool sbrk_failure = false; | |
| 93 static bool mmap_failure = false; | |
| 94 static bool VirtualAlloc_failure = false; | |
| 95 | |
| 96 #ifndef WTF_CHANGES | |
| 97 DEFINE_int32(malloc_devmem_start, 0, | |
| 98 "Physical memory starting location in MB for /dev/mem allocation." | |
| 99 " Setting this to 0 disables /dev/mem allocation"); | |
| 100 DEFINE_int32(malloc_devmem_limit, 0, | |
| 101 "Physical memory limit location in MB for /dev/mem allocation." | |
| 102 " Setting this to 0 means no limit."); | |
| 103 #else | |
| 104 static const int32_t FLAGS_malloc_devmem_start = 0; | |
| 105 static const int32_t FLAGS_malloc_devmem_limit = 0; | |
| 106 #endif | |
| 107 | |
| 108 #ifndef WTF_CHANGES | |
| 109 | |
| 110 static void* TrySbrk(size_t size, size_t *actual_size, size_t alignment) { | |
| 111 size = ((size + alignment - 1) / alignment) * alignment; | |
| 112 | |
| 113 // could theoretically return the "extra" bytes here, but this | |
| 114 // is simple and correct. | |
| 115 if (actual_size) | |
| 116 *actual_size = size; | |
| 117 | |
| 118 void* result = sbrk(size); | |
| 119 if (result == reinterpret_cast<void*>(-1)) { | |
| 120 sbrk_failure = true; | |
| 121 return NULL; | |
| 122 } | |
| 123 | |
| 124 // Is it aligned? | |
| 125 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | |
| 126 if ((ptr & (alignment-1)) == 0) return result; | |
| 127 | |
| 128 // Try to get more memory for alignment | |
| 129 size_t extra = alignment - (ptr & (alignment-1)); | |
| 130 void* r2 = sbrk(extra); | |
| 131 if (reinterpret_cast<uintptr_t>(r2) == (ptr + size)) { | |
| 132 // Contiguous with previous result | |
| 133 return reinterpret_cast<void*>(ptr + extra); | |
| 134 } | |
| 135 | |
| 136 // Give up and ask for "size + alignment - 1" bytes so | |
| 137 // that we can find an aligned region within it. | |
| 138 result = sbrk(size + alignment - 1); | |
| 139 if (result == reinterpret_cast<void*>(-1)) { | |
| 140 sbrk_failure = true; | |
| 141 return NULL; | |
| 142 } | |
| 143 ptr = reinterpret_cast<uintptr_t>(result); | |
| 144 if ((ptr & (alignment-1)) != 0) { | |
| 145 ptr += alignment - (ptr & (alignment-1)); | |
| 146 } | |
| 147 return reinterpret_cast<void*>(ptr); | |
| 148 } | |
| 149 | |
| 150 #endif /* ifndef(WTF_CHANGES) */ | |
| 151 | |
| 152 #if HAVE(MMAP) | |
| 153 | |
| 154 static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) { | |
| 155 // Enforce page alignment | |
| 156 if (pagesize == 0) pagesize = getpagesize(); | |
| 157 if (alignment < pagesize) alignment = pagesize; | |
| 158 size = ((size + alignment - 1) / alignment) * alignment; | |
| 159 | |
| 160 // could theoretically return the "extra" bytes here, but this | |
| 161 // is simple and correct. | |
| 162 if (actual_size) | |
| 163 *actual_size = size; | |
| 164 | |
| 165 // Ask for extra memory if alignment > pagesize | |
| 166 size_t extra = 0; | |
| 167 if (alignment > pagesize) { | |
| 168 extra = alignment - pagesize; | |
| 169 } | |
| 170 Checked<size_t> mapSize = Checked<size_t>(size) + extra + 2 * pagesize; | |
| 171 void* result = mmap(NULL, mapSize.unsafeGet(), | |
| 172 PROT_READ | PROT_WRITE, | |
| 173 MAP_PRIVATE|MAP_ANONYMOUS, | |
| 174 VM_TAG_FOR_TCMALLOC_MEMORY, 0); | |
| 175 if (result == reinterpret_cast<void*>(MAP_FAILED)) { | |
| 176 mmap_failure = true; | |
| 177 return NULL; | |
| 178 } | |
| 179 mmap(result, pagesize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, VM_TAG_F
OR_TCMALLOC_MEMORY, 0); | |
| 180 mmap(static_cast<char*>(result) + (mapSize - pagesize).unsafeGet(), pagesize,
PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_TCMALLOC_MEMORY, 0); | |
| 181 result = static_cast<char*>(result) + pagesize; | |
| 182 // Adjust the return memory so it is aligned | |
| 183 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | |
| 184 size_t adjust = 0; | |
| 185 if ((ptr & (alignment - 1)) != 0) { | |
| 186 adjust = alignment - (ptr & (alignment - 1)); | |
| 187 } | |
| 188 | |
| 189 // Return the unused memory to the system | |
| 190 if (adjust > 0) { | |
| 191 munmap(reinterpret_cast<void*>(ptr), adjust); | |
| 192 } | |
| 193 if (adjust < extra) { | |
| 194 munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust); | |
| 195 } | |
| 196 | |
| 197 ptr += adjust; | |
| 198 return reinterpret_cast<void*>(ptr); | |
| 199 } | |
| 200 | |
| 201 #endif /* HAVE(MMAP) */ | |
| 202 | |
| 203 #if HAVE(VIRTUALALLOC) | |
| 204 | |
| 205 static void* TryVirtualAlloc(size_t size, size_t *actual_size, size_t alignment)
{ | |
| 206 // Enforce page alignment | |
| 207 if (pagesize == 0) { | |
| 208 SYSTEM_INFO system_info; | |
| 209 GetSystemInfo(&system_info); | |
| 210 pagesize = system_info.dwPageSize; | |
| 211 } | |
| 212 | |
| 213 if (alignment < pagesize) alignment = pagesize; | |
| 214 size = ((size + alignment - 1) / alignment) * alignment; | |
| 215 | |
| 216 // could theoretically return the "extra" bytes here, but this | |
| 217 // is simple and correct. | |
| 218 if (actual_size) | |
| 219 *actual_size = size; | |
| 220 | |
| 221 // Ask for extra memory if alignment > pagesize | |
| 222 size_t extra = 0; | |
| 223 if (alignment > pagesize) { | |
| 224 extra = alignment - pagesize; | |
| 225 } | |
| 226 void* result = VirtualAlloc(NULL, size + extra, | |
| 227 MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN, | |
| 228 PAGE_READWRITE); | |
| 229 | |
| 230 if (result == NULL) { | |
| 231 VirtualAlloc_failure = true; | |
| 232 return NULL; | |
| 233 } | |
| 234 | |
| 235 // Adjust the return memory so it is aligned | |
| 236 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | |
| 237 size_t adjust = 0; | |
| 238 if ((ptr & (alignment - 1)) != 0) { | |
| 239 adjust = alignment - (ptr & (alignment - 1)); | |
| 240 } | |
| 241 | |
| 242 // Return the unused memory to the system - we'd like to release but the best
we can do | |
| 243 // is decommit, since Windows only lets you free the whole allocation. | |
| 244 if (adjust > 0) { | |
| 245 VirtualFree(reinterpret_cast<void*>(ptr), adjust, MEM_DECOMMIT); | |
| 246 } | |
| 247 if (adjust < extra) { | |
| 248 VirtualFree(reinterpret_cast<void*>(ptr + adjust + size), extra-adjust, MEM_
DECOMMIT); | |
| 249 } | |
| 250 | |
| 251 ptr += adjust; | |
| 252 return reinterpret_cast<void*>(ptr); | |
| 253 } | |
| 254 | |
| 255 #endif /* HAVE(MMAP) */ | |
| 256 | |
| 257 #ifndef WTF_CHANGES | |
| 258 static void* TryDevMem(size_t size, size_t *actual_size, size_t alignment) { | |
| 259 static bool initialized = false; | |
| 260 static off_t physmem_base; // next physical memory address to allocate | |
| 261 static off_t physmem_limit; // maximum physical address allowed | |
| 262 static int physmem_fd; // file descriptor for /dev/mem | |
| 263 | |
| 264 // Check if we should use /dev/mem allocation. Note that it may take | |
| 265 // a while to get this flag initialized, so meanwhile we fall back to | |
| 266 // the next allocator. (It looks like 7MB gets allocated before | |
| 267 // this flag gets initialized -khr.) | |
| 268 if (FLAGS_malloc_devmem_start == 0) { | |
| 269 // NOTE: not a devmem_failure - we'd like TCMalloc_SystemAlloc to | |
| 270 // try us again next time. | |
| 271 return NULL; | |
| 272 } | |
| 273 | |
| 274 if (!initialized) { | |
| 275 physmem_fd = open("/dev/mem", O_RDWR); | |
| 276 if (physmem_fd < 0) { | |
| 277 devmem_failure = true; | |
| 278 return NULL; | |
| 279 } | |
| 280 physmem_base = FLAGS_malloc_devmem_start*1024LL*1024LL; | |
| 281 physmem_limit = FLAGS_malloc_devmem_limit*1024LL*1024LL; | |
| 282 initialized = true; | |
| 283 } | |
| 284 | |
| 285 // Enforce page alignment | |
| 286 if (pagesize == 0) pagesize = getpagesize(); | |
| 287 if (alignment < pagesize) alignment = pagesize; | |
| 288 size = ((size + alignment - 1) / alignment) * alignment; | |
| 289 | |
| 290 // could theoretically return the "extra" bytes here, but this | |
| 291 // is simple and correct. | |
| 292 if (actual_size) | |
| 293 *actual_size = size; | |
| 294 | |
| 295 // Ask for extra memory if alignment > pagesize | |
| 296 size_t extra = 0; | |
| 297 if (alignment > pagesize) { | |
| 298 extra = alignment - pagesize; | |
| 299 } | |
| 300 | |
| 301 // check to see if we have any memory left | |
| 302 if (physmem_limit != 0 && physmem_base + size + extra > physmem_limit) { | |
| 303 devmem_failure = true; | |
| 304 return NULL; | |
| 305 } | |
| 306 void *result = mmap(0, size + extra, PROT_READ | PROT_WRITE, | |
| 307 MAP_SHARED, physmem_fd, physmem_base); | |
| 308 if (result == reinterpret_cast<void*>(MAP_FAILED)) { | |
| 309 devmem_failure = true; | |
| 310 return NULL; | |
| 311 } | |
| 312 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | |
| 313 | |
| 314 // Adjust the return memory so it is aligned | |
| 315 size_t adjust = 0; | |
| 316 if ((ptr & (alignment - 1)) != 0) { | |
| 317 adjust = alignment - (ptr & (alignment - 1)); | |
| 318 } | |
| 319 | |
| 320 // Return the unused virtual memory to the system | |
| 321 if (adjust > 0) { | |
| 322 munmap(reinterpret_cast<void*>(ptr), adjust); | |
| 323 } | |
| 324 if (adjust < extra) { | |
| 325 munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust); | |
| 326 } | |
| 327 | |
| 328 ptr += adjust; | |
| 329 physmem_base += adjust + size; | |
| 330 | |
| 331 return reinterpret_cast<void*>(ptr); | |
| 332 } | |
| 333 #endif | |
| 334 | |
| 335 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, size_t alignment) { | |
| 336 // Discard requests that overflow | |
| 337 if (size + alignment < size) return NULL; | |
| 338 | |
| 339 SpinLockHolder lock_holder(&spinlock); | |
| 340 | |
| 341 // Enforce minimum alignment | |
| 342 if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner); | |
| 343 | |
| 344 // Try twice, once avoiding allocators that failed before, and once | |
| 345 // more trying all allocators even if they failed before. | |
| 346 for (int i = 0; i < 2; i++) { | |
| 347 | |
| 348 #ifndef WTF_CHANGES | |
| 349 if (use_devmem && !devmem_failure) { | |
| 350 void* result = TryDevMem(size, actual_size, alignment); | |
| 351 if (result != NULL) return result; | |
| 352 } | |
| 353 | |
| 354 if (use_sbrk && !sbrk_failure) { | |
| 355 void* result = TrySbrk(size, actual_size, alignment); | |
| 356 if (result != NULL) return result; | |
| 357 } | |
| 358 #endif | |
| 359 | |
| 360 #if HAVE(MMAP) | |
| 361 if (use_mmap && !mmap_failure) { | |
| 362 void* result = TryMmap(size, actual_size, alignment); | |
| 363 if (result != NULL) return result; | |
| 364 } | |
| 365 #endif | |
| 366 | |
| 367 #if HAVE(VIRTUALALLOC) | |
| 368 if (use_VirtualAlloc && !VirtualAlloc_failure) { | |
| 369 void* result = TryVirtualAlloc(size, actual_size, alignment); | |
| 370 if (result != NULL) return result; | |
| 371 } | |
| 372 #endif | |
| 373 | |
| 374 // nothing worked - reset failure flags and try again | |
| 375 devmem_failure = false; | |
| 376 sbrk_failure = false; | |
| 377 mmap_failure = false; | |
| 378 VirtualAlloc_failure = false; | |
| 379 } | |
| 380 return NULL; | |
| 381 } | |
| 382 | |
| 383 #if HAVE(MADV_FREE_REUSE) | |
| 384 | |
| 385 void TCMalloc_SystemRelease(void* start, size_t length) | |
| 386 { | |
| 387 int madviseResult; | |
| 388 | |
| 389 while ((madviseResult = madvise(start, length, MADV_FREE_REUSABLE)) == -1 &&
errno == EAGAIN) { } | |
| 390 | |
| 391 // Although really advisory, if madvise fail, we want to know about it. | |
| 392 ASSERT_UNUSED(madviseResult, madviseResult != -1); | |
| 393 } | |
| 394 | |
| 395 #elif HAVE(MADV_FREE) || HAVE(MADV_DONTNEED) | |
| 396 | |
| 397 void TCMalloc_SystemRelease(void* start, size_t length) | |
| 398 { | |
| 399 // MADV_FREE clears the modified bit on pages, which allows | |
| 400 // them to be discarded immediately. | |
| 401 #if HAVE(MADV_FREE) | |
| 402 const int advice = MADV_FREE; | |
| 403 #else | |
| 404 const int advice = MADV_DONTNEED; | |
| 405 #endif | |
| 406 if (FLAGS_malloc_devmem_start) { | |
| 407 // It's not safe to use MADV_DONTNEED if we've been mapping | |
| 408 // /dev/mem for heap memory | |
| 409 return; | |
| 410 } | |
| 411 if (pagesize == 0) pagesize = getpagesize(); | |
| 412 const size_t pagemask = pagesize - 1; | |
| 413 | |
| 414 size_t new_start = reinterpret_cast<size_t>(start); | |
| 415 size_t end = new_start + length; | |
| 416 size_t new_end = end; | |
| 417 | |
| 418 // Round up the starting address and round down the ending address | |
| 419 // to be page aligned: | |
| 420 new_start = (new_start + pagesize - 1) & ~pagemask; | |
| 421 new_end = new_end & ~pagemask; | |
| 422 | |
| 423 ASSERT((new_start & pagemask) == 0); | |
| 424 ASSERT((new_end & pagemask) == 0); | |
| 425 ASSERT(new_start >= reinterpret_cast<size_t>(start)); | |
| 426 ASSERT(new_end <= end); | |
| 427 | |
| 428 if (new_end > new_start) { | |
| 429 // Note -- ignoring most return codes, because if this fails it | |
| 430 // doesn't matter... | |
| 431 while (madvise(reinterpret_cast<char*>(new_start), new_end - new_start, | |
| 432 advice) == -1 && | |
| 433 errno == EAGAIN) { | |
| 434 // NOP | |
| 435 } | |
| 436 } | |
| 437 } | |
| 438 | |
| 439 #elif HAVE(MMAP) | |
| 440 | |
| 441 void TCMalloc_SystemRelease(void* start, size_t length) | |
| 442 { | |
| 443 void* newAddress = mmap(start, length, PROT_READ | PROT_WRITE, MAP_PRIVATE | M
AP_ANONYMOUS | MAP_FIXED, -1, 0); | |
| 444 // If the mmap failed then that's ok, we just won't return the memory to the s
ystem. | |
| 445 ASSERT_UNUSED(newAddress, newAddress == start || newAddress == reinterpret_cas
t<void*>(MAP_FAILED)); | |
| 446 } | |
| 447 | |
| 448 #elif HAVE(VIRTUALALLOC) | |
| 449 | |
| 450 void TCMalloc_SystemRelease(void* start, size_t length) | |
| 451 { | |
| 452 if (VirtualFree(start, length, MEM_DECOMMIT)) | |
| 453 return; | |
| 454 | |
| 455 // The decommit may fail if the memory region consists of allocations | |
| 456 // from more than one call to VirtualAlloc. In this case, fall back to | |
| 457 // using VirtualQuery to retrieve the allocation boundaries and decommit | |
| 458 // them each individually. | |
| 459 | |
| 460 char* ptr = static_cast<char*>(start); | |
| 461 char* end = ptr + length; | |
| 462 MEMORY_BASIC_INFORMATION info; | |
| 463 while (ptr < end) { | |
| 464 size_t resultSize = VirtualQuery(ptr, &info, sizeof(info)); | |
| 465 ASSERT_UNUSED(resultSize, resultSize == sizeof(info)); | |
| 466 | |
| 467 size_t decommitSize = min<size_t>(info.RegionSize, end - ptr); | |
| 468 BOOL success = VirtualFree(ptr, decommitSize, MEM_DECOMMIT); | |
| 469 ASSERT_UNUSED(success, success); | |
| 470 ptr += decommitSize; | |
| 471 } | |
| 472 } | |
| 473 | |
| 474 #else | |
| 475 | |
| 476 // Platforms that don't support returning memory use an empty inline version of
TCMalloc_SystemRelease | |
| 477 // declared in TCSystemAlloc.h | |
| 478 | |
| 479 #endif | |
| 480 | |
| 481 #if HAVE(MADV_FREE_REUSE) | |
| 482 | |
| 483 void TCMalloc_SystemCommit(void* start, size_t length) | |
| 484 { | |
| 485 while (madvise(start, length, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { } | |
| 486 } | |
| 487 | |
| 488 #elif HAVE(VIRTUALALLOC) | |
| 489 | |
| 490 void TCMalloc_SystemCommit(void* start, size_t length) | |
| 491 { | |
| 492 if (VirtualAlloc(start, length, MEM_COMMIT, PAGE_READWRITE) == start) | |
| 493 return; | |
| 494 | |
| 495 // The commit may fail if the memory region consists of allocations | |
| 496 // from more than one call to VirtualAlloc. In this case, fall back to | |
| 497 // using VirtualQuery to retrieve the allocation boundaries and commit them | |
| 498 // each individually. | |
| 499 | |
| 500 char* ptr = static_cast<char*>(start); | |
| 501 char* end = ptr + length; | |
| 502 MEMORY_BASIC_INFORMATION info; | |
| 503 while (ptr < end) { | |
| 504 size_t resultSize = VirtualQuery(ptr, &info, sizeof(info)); | |
| 505 ASSERT_UNUSED(resultSize, resultSize == sizeof(info)); | |
| 506 | |
| 507 size_t commitSize = min<size_t>(info.RegionSize, end - ptr); | |
| 508 void* newAddress = VirtualAlloc(ptr, commitSize, MEM_COMMIT, PAGE_READWR
ITE); | |
| 509 ASSERT_UNUSED(newAddress, newAddress == ptr); | |
| 510 ptr += commitSize; | |
| 511 } | |
| 512 } | |
| 513 | |
| 514 #else | |
| 515 | |
| 516 // Platforms that don't need to explicitly commit memory use an empty inline ver
sion of TCMalloc_SystemCommit | |
| 517 // declared in TCSystemAlloc.h | |
| 518 | |
| 519 #endif | |
| 520 | |
| 521 #endif // #if !USE(SYSTEM_MALLOC) | |
| 522 | |
| OLD | NEW |