| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2005, 2007, Google Inc. | |
| 2 // All rights reserved. | |
| 3 // Copyright (C) 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserv
ed. | |
| 4 // | |
| 5 // Redistribution and use in source and binary forms, with or without | |
| 6 // modification, are permitted provided that the following conditions are | |
| 7 // met: | |
| 8 // | |
| 9 // * Redistributions of source code must retain the above copyright | |
| 10 // notice, this list of conditions and the following disclaimer. | |
| 11 // * Redistributions in binary form must reproduce the above | |
| 12 // copyright notice, this list of conditions and the following disclaimer | |
| 13 // in the documentation and/or other materials provided with the | |
| 14 // distribution. | |
| 15 // * Neither the name of Google Inc. nor the names of its | |
| 16 // contributors may be used to endorse or promote products derived from | |
| 17 // this software without specific prior written permission. | |
| 18 // | |
| 19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 30 | |
| 31 // --- | |
| 32 // Author: Sanjay Ghemawat <opensource@google.com> | |
| 33 // | |
| 34 // A malloc that uses a per-thread cache to satisfy small malloc requests. | |
| 35 // (The time for malloc/free of a small object drops from 300 ns to 50 ns.) | |
| 36 // | |
| 37 // See doc/tcmalloc.html for a high-level | |
| 38 // description of how this malloc works. | |
| 39 // | |
| 40 // SYNCHRONIZATION | |
| 41 // 1. The thread-specific lists are accessed without acquiring any locks. | |
| 42 // This is safe because each such list is only accessed by one thread. | |
| 43 // 2. We have a lock per central free-list, and hold it while manipulating | |
| 44 // the central free list for a particular size. | |
| 45 // 3. The central page allocator is protected by "pageheap_lock". | |
| 46 // 4. The pagemap (which maps from page-number to descriptor), | |
| 47 // can be read without holding any locks, and written while holding | |
| 48 // the "pageheap_lock". | |
| 49 // 5. To improve performance, a subset of the information one can get | |
| 50 // from the pagemap is cached in a data structure, pagemap_cache_, | |
| 51 // that atomically reads and writes its entries. This cache can be | |
| 52 // read and written without locking. | |
| 53 // | |
| 54 // This multi-threaded access to the pagemap is safe for fairly | |
| 55 // subtle reasons. We basically assume that when an object X is | |
| 56 // allocated by thread A and deallocated by thread B, there must | |
| 57 // have been appropriate synchronization in the handoff of object | |
| 58 // X from thread A to thread B. The same logic applies to pagemap_cache_. | |
| 59 // | |
| 60 // THE PAGEID-TO-SIZECLASS CACHE | |
| 61 // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache | |
| 62 // returns 0 for a particular PageID then that means "no information," not that | |
| 63 // the sizeclass is 0. The cache may have stale information for pages that do | |
| 64 // not hold the beginning of any free()'able object. Staleness is eliminated | |
| 65 // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and | |
| 66 // do_memalign() for all other relevant pages. | |
| 67 // | |
| 68 // TODO: Bias reclamation to larger addresses | |
| 69 // TODO: implement mallinfo/mallopt | |
| 70 // TODO: Better testing | |
| 71 // | |
| 72 // 9/28/2003 (new page-level allocator replaces ptmalloc2): | |
| 73 // * malloc/free of small objects goes from ~300 ns to ~50 ns. | |
| 74 // * allocation of a reasonably complicated struct | |
| 75 // goes from about 1100 ns to about 300 ns. | |
| 76 | |
| 77 #include "config.h" | |
| 78 #include "FastMalloc.h" | |
| 79 | |
| 80 #include "Assertions.h" | |
| 81 | |
| 82 #include <limits> | |
| 83 #if OS(WINDOWS) | |
| 84 #include <windows.h> | |
| 85 #else | |
| 86 #include <pthread.h> | |
| 87 #endif | |
| 88 #include <string.h> | |
| 89 #include <wtf/StdLibExtras.h> | |
| 90 #include <wtf/UnusedParam.h> | |
| 91 | |
| 92 #ifndef NO_TCMALLOC_SAMPLES | |
| 93 #ifdef WTF_CHANGES | |
| 94 #define NO_TCMALLOC_SAMPLES | |
| 95 #endif | |
| 96 #endif | |
| 97 | |
| 98 #if !USE(SYSTEM_MALLOC) && defined(NDEBUG) | |
| 99 #define FORCE_SYSTEM_MALLOC 0 | |
| 100 #else | |
| 101 #define FORCE_SYSTEM_MALLOC 1 | |
| 102 #endif | |
| 103 | |
| 104 // Harden the pointers stored in the TCMalloc linked lists | |
| 105 #if COMPILER(GCC) | |
| 106 #define ENABLE_TCMALLOC_HARDENING 1 | |
| 107 #endif | |
| 108 | |
| 109 // Use a background thread to periodically scavenge memory to release back to th
e system | |
| 110 #define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1 | |
| 111 | |
| 112 #ifndef NDEBUG | |
| 113 namespace WTF { | |
| 114 | |
| 115 #if OS(WINDOWS) | |
| 116 | |
| 117 // TLS_OUT_OF_INDEXES is not defined on WinCE. | |
| 118 #ifndef TLS_OUT_OF_INDEXES | |
| 119 #define TLS_OUT_OF_INDEXES 0xffffffff | |
| 120 #endif | |
| 121 | |
| 122 static DWORD isForibiddenTlsIndex = TLS_OUT_OF_INDEXES; | |
| 123 static const LPVOID kTlsAllowValue = reinterpret_cast<LPVOID>(0); // Must be zer
o. | |
| 124 static const LPVOID kTlsForbiddenValue = reinterpret_cast<LPVOID>(1); | |
| 125 | |
| 126 #if !ASSERT_DISABLED | |
| 127 static bool isForbidden() | |
| 128 { | |
| 129 // By default, fastMalloc is allowed so we don't allocate the | |
| 130 // tls index unless we're asked to make it forbidden. If TlsSetValue | |
| 131 // has not been called on a thread, the value returned by TlsGetValue is 0. | |
| 132 return (isForibiddenTlsIndex != TLS_OUT_OF_INDEXES) && (TlsGetValue(isForibi
ddenTlsIndex) == kTlsForbiddenValue); | |
| 133 } | |
| 134 #endif | |
| 135 | |
| 136 void fastMallocForbid() | |
| 137 { | |
| 138 if (isForibiddenTlsIndex == TLS_OUT_OF_INDEXES) | |
| 139 isForibiddenTlsIndex = TlsAlloc(); // a little racey, but close enough f
or debug only | |
| 140 TlsSetValue(isForibiddenTlsIndex, kTlsForbiddenValue); | |
| 141 } | |
| 142 | |
| 143 void fastMallocAllow() | |
| 144 { | |
| 145 if (isForibiddenTlsIndex == TLS_OUT_OF_INDEXES) | |
| 146 return; | |
| 147 TlsSetValue(isForibiddenTlsIndex, kTlsAllowValue); | |
| 148 } | |
| 149 | |
| 150 #else // !OS(WINDOWS) | |
| 151 | |
| 152 static pthread_key_t isForbiddenKey; | |
| 153 static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT; | |
| 154 static void initializeIsForbiddenKey() | |
| 155 { | |
| 156 pthread_key_create(&isForbiddenKey, 0); | |
| 157 } | |
| 158 | |
| 159 #if !ASSERT_DISABLED | |
| 160 static bool isForbidden() | |
| 161 { | |
| 162 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey); | |
| 163 return !!pthread_getspecific(isForbiddenKey); | |
| 164 } | |
| 165 #endif | |
| 166 | |
| 167 void fastMallocForbid() | |
| 168 { | |
| 169 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey); | |
| 170 pthread_setspecific(isForbiddenKey, &isForbiddenKey); | |
| 171 } | |
| 172 | |
| 173 void fastMallocAllow() | |
| 174 { | |
| 175 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey); | |
| 176 pthread_setspecific(isForbiddenKey, 0); | |
| 177 } | |
| 178 #endif // OS(WINDOWS) | |
| 179 | |
| 180 } // namespace WTF | |
| 181 #endif // NDEBUG | |
| 182 | |
| 183 namespace WTF { | |
| 184 | |
| 185 | |
| 186 namespace Internal { | |
| 187 #if !ENABLE(WTF_MALLOC_VALIDATION) | |
| 188 WTF_EXPORT_PRIVATE void fastMallocMatchFailed(void*); | |
| 189 #else | |
| 190 COMPILE_ASSERT(((sizeof(ValidationHeader) % sizeof(AllocAlignmentInteger)) == 0)
, ValidationHeader_must_produce_correct_alignment); | |
| 191 #endif | |
| 192 | |
| 193 NO_RETURN_DUE_TO_CRASH void fastMallocMatchFailed(void*) | |
| 194 { | |
| 195 CRASH(); | |
| 196 } | |
| 197 | |
| 198 } // namespace Internal | |
| 199 | |
| 200 | |
| 201 void* fastZeroedMalloc(size_t n) | |
| 202 { | |
| 203 void* result = fastMalloc(n); | |
| 204 memset(result, 0, n); | |
| 205 return result; | |
| 206 } | |
| 207 | |
| 208 char* fastStrDup(const char* src) | |
| 209 { | |
| 210 size_t len = strlen(src) + 1; | |
| 211 char* dup = static_cast<char*>(fastMalloc(len)); | |
| 212 memcpy(dup, src, len); | |
| 213 return dup; | |
| 214 } | |
| 215 | |
| 216 TryMallocReturnValue tryFastZeroedMalloc(size_t n) | |
| 217 { | |
| 218 void* result; | |
| 219 if (!tryFastMalloc(n).getValue(result)) | |
| 220 return 0; | |
| 221 memset(result, 0, n); | |
| 222 return result; | |
| 223 } | |
| 224 | |
| 225 } // namespace WTF | |
| 226 | |
| 227 #if FORCE_SYSTEM_MALLOC | |
| 228 | |
| 229 #if OS(DARWIN) | |
| 230 #include <malloc/malloc.h> | |
| 231 #elif OS(WINDOWS) | |
| 232 #include <malloc.h> | |
| 233 #endif | |
| 234 | |
| 235 namespace WTF { | |
| 236 | |
| 237 size_t fastMallocGoodSize(size_t bytes) | |
| 238 { | |
| 239 #if OS(DARWIN) | |
| 240 return malloc_good_size(bytes); | |
| 241 #else | |
| 242 return bytes; | |
| 243 #endif | |
| 244 } | |
| 245 | |
| 246 TryMallocReturnValue tryFastMalloc(size_t n) | |
| 247 { | |
| 248 ASSERT(!isForbidden()); | |
| 249 | |
| 250 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 251 if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n
) // If overflow would occur... | |
| 252 return 0; | |
| 253 | |
| 254 void* result = malloc(n + Internal::ValidationBufferSize); | |
| 255 if (!result) | |
| 256 return 0; | |
| 257 Internal::ValidationHeader* header = static_cast<Internal::ValidationHeader*
>(result); | |
| 258 header->m_size = n; | |
| 259 header->m_type = Internal::AllocTypeMalloc; | |
| 260 header->m_prefix = static_cast<unsigned>(Internal::ValidationPrefix); | |
| 261 result = header + 1; | |
| 262 *Internal::fastMallocValidationSuffix(result) = Internal::ValidationSuffix; | |
| 263 fastMallocValidate(result); | |
| 264 return result; | |
| 265 #else | |
| 266 return malloc(n); | |
| 267 #endif | |
| 268 } | |
| 269 | |
| 270 void* fastMalloc(size_t n) | |
| 271 { | |
| 272 ASSERT(!isForbidden()); | |
| 273 | |
| 274 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 275 TryMallocReturnValue returnValue = tryFastMalloc(n); | |
| 276 void* result; | |
| 277 if (!returnValue.getValue(result)) | |
| 278 CRASH(); | |
| 279 #else | |
| 280 void* result = malloc(n); | |
| 281 #endif | |
| 282 | |
| 283 ASSERT(result); // We expect tcmalloc underneath, which would crash instead
of getting here. | |
| 284 | |
| 285 return result; | |
| 286 } | |
| 287 | |
| 288 TryMallocReturnValue tryFastCalloc(size_t n_elements, size_t element_size) | |
| 289 { | |
| 290 ASSERT(!isForbidden()); | |
| 291 | |
| 292 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 293 size_t totalBytes = n_elements * element_size; | |
| 294 if (n_elements > 1 && element_size && (totalBytes / element_size) != n_eleme
nts) | |
| 295 return 0; | |
| 296 | |
| 297 TryMallocReturnValue returnValue = tryFastMalloc(totalBytes); | |
| 298 void* result; | |
| 299 if (!returnValue.getValue(result)) | |
| 300 return 0; | |
| 301 memset(result, 0, totalBytes); | |
| 302 fastMallocValidate(result); | |
| 303 return result; | |
| 304 #else | |
| 305 return calloc(n_elements, element_size); | |
| 306 #endif | |
| 307 } | |
| 308 | |
| 309 void* fastCalloc(size_t n_elements, size_t element_size) | |
| 310 { | |
| 311 ASSERT(!isForbidden()); | |
| 312 | |
| 313 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 314 TryMallocReturnValue returnValue = tryFastCalloc(n_elements, element_size); | |
| 315 void* result; | |
| 316 if (!returnValue.getValue(result)) | |
| 317 CRASH(); | |
| 318 #else | |
| 319 void* result = calloc(n_elements, element_size); | |
| 320 #endif | |
| 321 | |
| 322 ASSERT(result); // We expect tcmalloc underneath, which would crash instead
of getting here. | |
| 323 | |
| 324 return result; | |
| 325 } | |
| 326 | |
| 327 void fastFree(void* p) | |
| 328 { | |
| 329 ASSERT(!isForbidden()); | |
| 330 | |
| 331 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 332 if (!p) | |
| 333 return; | |
| 334 | |
| 335 fastMallocMatchValidateFree(p, Internal::AllocTypeMalloc); | |
| 336 Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(p)
; | |
| 337 memset(p, 0xCC, header->m_size); | |
| 338 free(header); | |
| 339 #else | |
| 340 free(p); | |
| 341 #endif | |
| 342 } | |
| 343 | |
| 344 TryMallocReturnValue tryFastRealloc(void* p, size_t n) | |
| 345 { | |
| 346 ASSERT(!isForbidden()); | |
| 347 | |
| 348 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 349 if (p) { | |
| 350 if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize
<= n) // If overflow would occur... | |
| 351 return 0; | |
| 352 fastMallocValidate(p); | |
| 353 Internal::ValidationHeader* result = static_cast<Internal::ValidationHea
der*>(realloc(Internal::fastMallocValidationHeader(p), n + Internal::ValidationB
ufferSize)); | |
| 354 if (!result) | |
| 355 return 0; | |
| 356 result->m_size = n; | |
| 357 result = result + 1; | |
| 358 *fastMallocValidationSuffix(result) = Internal::ValidationSuffix; | |
| 359 fastMallocValidate(result); | |
| 360 return result; | |
| 361 } else { | |
| 362 return fastMalloc(n); | |
| 363 } | |
| 364 #else | |
| 365 return realloc(p, n); | |
| 366 #endif | |
| 367 } | |
| 368 | |
| 369 void* fastRealloc(void* p, size_t n) | |
| 370 { | |
| 371 ASSERT(!isForbidden()); | |
| 372 | |
| 373 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 374 TryMallocReturnValue returnValue = tryFastRealloc(p, n); | |
| 375 void* result; | |
| 376 if (!returnValue.getValue(result)) | |
| 377 CRASH(); | |
| 378 #else | |
| 379 void* result = realloc(p, n); | |
| 380 #endif | |
| 381 | |
| 382 ASSERT(result); // We expect tcmalloc underneath, which would crash instead
of getting here. | |
| 383 | |
| 384 return result; | |
| 385 } | |
| 386 | |
| 387 void releaseFastMallocFreeMemory() { } | |
| 388 | |
| 389 FastMallocStatistics fastMallocStatistics() | |
| 390 { | |
| 391 FastMallocStatistics statistics = { 0, 0, 0 }; | |
| 392 return statistics; | |
| 393 } | |
| 394 | |
| 395 size_t fastMallocSize(const void* p) | |
| 396 { | |
| 397 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 398 return Internal::fastMallocValidationHeader(const_cast<void*>(p))->m_size; | |
| 399 #elif OS(DARWIN) | |
| 400 return malloc_size(p); | |
| 401 #elif OS(WINDOWS) | |
| 402 return _msize(const_cast<void*>(p)); | |
| 403 #else | |
| 404 UNUSED_PARAM(p); | |
| 405 return 1; | |
| 406 #endif | |
| 407 } | |
| 408 | |
| 409 } // namespace WTF | |
| 410 | |
| 411 #if OS(DARWIN) | |
| 412 // This symbol is present in the JavaScriptCore exports file even when FastMallo
c is disabled. | |
| 413 // It will never be used in this case, so it's type and value are less interesti
ng than its presence. | |
| 414 extern "C" WTF_EXPORT_PRIVATE const int jscore_fastmalloc_introspection = 0; | |
| 415 #endif | |
| 416 | |
| 417 #else // FORCE_SYSTEM_MALLOC | |
| 418 | |
| 419 #include "Compiler.h" | |
| 420 #include "TCPackedCache.h" | |
| 421 #include "TCPageMap.h" | |
| 422 #include "TCSpinLock.h" | |
| 423 #include "TCSystemAlloc.h" | |
| 424 #include <algorithm> | |
| 425 #include <pthread.h> | |
| 426 #include <stdarg.h> | |
| 427 #include <stddef.h> | |
| 428 #include <stdint.h> | |
| 429 #include <stdio.h> | |
| 430 #if HAVE(ERRNO_H) | |
| 431 #include <errno.h> | |
| 432 #endif | |
| 433 #if OS(UNIX) | |
| 434 #include <unistd.h> | |
| 435 #endif | |
| 436 #if OS(WINDOWS) | |
| 437 #ifndef WIN32_LEAN_AND_MEAN | |
| 438 #define WIN32_LEAN_AND_MEAN | |
| 439 #endif | |
| 440 #include <windows.h> | |
| 441 #endif | |
| 442 | |
| 443 #ifdef WTF_CHANGES | |
| 444 | |
| 445 #if OS(DARWIN) | |
| 446 #include "MallocZoneSupport.h" | |
| 447 #include <wtf/HashSet.h> | |
| 448 #include <wtf/Vector.h> | |
| 449 #endif | |
| 450 | |
| 451 #if HAVE(DISPATCH_H) | |
| 452 #include <dispatch/dispatch.h> | |
| 453 #endif | |
| 454 | |
| 455 #ifdef __has_include | |
| 456 #if __has_include(<System/pthread_machdep.h>) | |
| 457 | |
| 458 #include <System/pthread_machdep.h> | |
| 459 | |
| 460 #if defined(__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0) | |
| 461 #define WTF_USE_PTHREAD_GETSPECIFIC_DIRECT 1 | |
| 462 #endif | |
| 463 | |
| 464 #endif | |
| 465 #endif | |
| 466 | |
| 467 #ifndef PRIuS | |
| 468 #define PRIuS "zu" | |
| 469 #endif | |
| 470 | |
| 471 // Calling pthread_getspecific through a global function pointer is faster than
a normal | |
| 472 // call to the function on Mac OS X, and it's used in performance-critical code.
So we | |
| 473 // use a function pointer. But that's not necessarily faster on other platforms,
and we had | |
| 474 // problems with this technique on Windows, so we'll do this only on Mac OS X. | |
| 475 #if OS(DARWIN) | |
| 476 #if !USE(PTHREAD_GETSPECIFIC_DIRECT) | |
| 477 static void* (*pthread_getspecific_function_pointer)(pthread_key_t) = pthread_ge
tspecific; | |
| 478 #define pthread_getspecific(key) pthread_getspecific_function_pointer(key) | |
| 479 #else | |
| 480 #define pthread_getspecific(key) _pthread_getspecific_direct(key) | |
| 481 #define pthread_setspecific(key, val) _pthread_setspecific_direct(key, (val)) | |
| 482 #endif | |
| 483 #endif | |
| 484 | |
| 485 #define DEFINE_VARIABLE(type, name, value, meaning) \ | |
| 486 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead {
\ | |
| 487 type FLAGS_##name(value); \ | |
| 488 char FLAGS_no##name; \ | |
| 489 } \ | |
| 490 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_
##name | |
| 491 | |
| 492 #define DEFINE_int64(name, value, meaning) \ | |
| 493 DEFINE_VARIABLE(int64_t, name, value, meaning) | |
| 494 | |
| 495 #define DEFINE_double(name, value, meaning) \ | |
| 496 DEFINE_VARIABLE(double, name, value, meaning) | |
| 497 | |
| 498 namespace WTF { | |
| 499 | |
| 500 #define malloc fastMalloc | |
| 501 #define calloc fastCalloc | |
| 502 #define free fastFree | |
| 503 #define realloc fastRealloc | |
| 504 | |
| 505 #define MESSAGE LOG_ERROR | |
| 506 #define CHECK_CONDITION ASSERT | |
| 507 | |
| 508 static const char kLLHardeningMask = 0; | |
| 509 template <unsigned> struct EntropySource; | |
| 510 template <> struct EntropySource<4> { | |
| 511 static uint32_t value() | |
| 512 { | |
| 513 #if OS(DARWIN) | |
| 514 return arc4random(); | |
| 515 #else | |
| 516 return static_cast<uint32_t>(static_cast<uintptr_t>(currentTime() * 1000
0) ^ reinterpret_cast<uintptr_t>(&kLLHardeningMask)); | |
| 517 #endif | |
| 518 } | |
| 519 }; | |
| 520 | |
| 521 template <> struct EntropySource<8> { | |
| 522 static uint64_t value() | |
| 523 { | |
| 524 return EntropySource<4>::value() | (static_cast<uint64_t>(EntropySource<
4>::value()) << 32); | |
| 525 } | |
| 526 }; | |
| 527 | |
| 528 #if ENABLE(TCMALLOC_HARDENING) | |
| 529 /* | |
| 530 * To make it harder to exploit use-after free style exploits | |
| 531 * we mask the addresses we put into our linked lists with the | |
| 532 * address of kLLHardeningMask. Due to ASLR the address of | |
| 533 * kLLHardeningMask should be sufficiently randomized to make direct | |
| 534 * freelist manipulation much more difficult. | |
| 535 */ | |
| 536 enum { | |
| 537 MaskKeyShift = 13 | |
| 538 }; | |
| 539 | |
| 540 static ALWAYS_INLINE uintptr_t internalEntropyValue() | |
| 541 { | |
| 542 static uintptr_t value = EntropySource<sizeof(uintptr_t)>::value() | 1; | |
| 543 ASSERT(value); | |
| 544 return value; | |
| 545 } | |
| 546 | |
| 547 #define HARDENING_ENTROPY internalEntropyValue() | |
| 548 #define ROTATE_VALUE(value, amount) (((value) >> (amount)) | ((value) << (sizeof
(value) * 8 - (amount)))) | |
| 549 #define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (reinterpret_cast<typeof(ptr)>(
reinterpret_cast<uintptr_t>(ptr)^(ROTATE_VALUE(reinterpret_cast<uintptr_t>(key),
MaskKeyShift)^entropy))) | |
| 550 | |
| 551 | |
| 552 static ALWAYS_INLINE uint32_t freedObjectStartPoison() | |
| 553 { | |
| 554 static uint32_t value = EntropySource<sizeof(uint32_t)>::value() | 1; | |
| 555 ASSERT(value); | |
| 556 return value; | |
| 557 } | |
| 558 | |
| 559 static ALWAYS_INLINE uint32_t freedObjectEndPoison() | |
| 560 { | |
| 561 static uint32_t value = EntropySource<sizeof(uint32_t)>::value() | 1; | |
| 562 ASSERT(value); | |
| 563 return value; | |
| 564 } | |
| 565 | |
| 566 #define PTR_TO_UINT32(ptr) static_cast<uint32_t>(reinterpret_cast<uintptr_t>(ptr
)) | |
| 567 #define END_POISON_INDEX(allocationSize) (((allocationSize) - sizeof(uint32_t))
/ sizeof(uint32_t)) | |
| 568 #define POISON_ALLOCATION(allocation, allocationSize) do { \ | |
| 569 ASSERT((allocationSize) >= 2 * sizeof(uint32_t)); \ | |
| 570 reinterpret_cast<uint32_t*>(allocation)[0] = 0xbadbeef1; \ | |
| 571 reinterpret_cast<uint32_t*>(allocation)[1] = 0xbadbeef3; \ | |
| 572 if ((allocationSize) < 4 * sizeof(uint32_t)) \ | |
| 573 break; \ | |
| 574 reinterpret_cast<uint32_t*>(allocation)[2] = 0xbadbeef5; \ | |
| 575 reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] =
0xbadbeef7; \ | |
| 576 } while (false); | |
| 577 | |
| 578 #define POISON_DEALLOCATION_EXPLICIT(allocation, allocationSize, startPoison, en
dPoison) do { \ | |
| 579 ASSERT((allocationSize) >= 2 * sizeof(uint32_t)); \ | |
| 580 reinterpret_cast<uint32_t*>(allocation)[0] = 0xbadbeef9; \ | |
| 581 reinterpret_cast<uint32_t*>(allocation)[1] = 0xbadbeefb; \ | |
| 582 if ((allocationSize) < 4 * sizeof(uint32_t)) \ | |
| 583 break; \ | |
| 584 reinterpret_cast<uint32_t*>(allocation)[2] = (startPoison) ^ PTR_TO_UINT32(a
llocation); \ | |
| 585 reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] =
(endPoison) ^ PTR_TO_UINT32(allocation); \ | |
| 586 } while (false) | |
| 587 | |
| 588 #define POISON_DEALLOCATION(allocation, allocationSize) \ | |
| 589 POISON_DEALLOCATION_EXPLICIT(allocation, (allocationSize), freedObjectStartP
oison(), freedObjectEndPoison()) | |
| 590 | |
| 591 #define MAY_BE_POISONED(allocation, allocationSize) (((allocationSize) >= 4 * si
zeof(uint32_t)) && ( \ | |
| 592 (reinterpret_cast<uint32_t*>(allocation)[2] == (freedObjectStartPoison() ^ P
TR_TO_UINT32(allocation))) || \ | |
| 593 (reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] =
= (freedObjectEndPoison() ^ PTR_TO_UINT32(allocation))) \ | |
| 594 )) | |
| 595 | |
| 596 #define IS_DEFINITELY_POISONED(allocation, allocationSize) (((allocationSize) <
4 * sizeof(uint32_t)) || ( \ | |
| 597 (reinterpret_cast<uint32_t*>(allocation)[2] == (freedObjectStartPoison() ^ P
TR_TO_UINT32(allocation))) && \ | |
| 598 (reinterpret_cast<uint32_t*>(allocation)[END_POISON_INDEX(allocationSize)] =
= (freedObjectEndPoison() ^ PTR_TO_UINT32(allocation))) \ | |
| 599 )) | |
| 600 | |
| 601 #else | |
| 602 | |
| 603 #define POISON_ALLOCATION(allocation, allocationSize) | |
| 604 #define POISON_DEALLOCATION(allocation, allocationSize) | |
| 605 #define POISON_DEALLOCATION_EXPLICIT(allocation, allocationSize, startPoison, en
dPoison) | |
| 606 #define MAY_BE_POISONED(allocation, allocationSize) (false) | |
| 607 #define IS_DEFINITELY_POISONED(allocation, allocationSize) (true) | |
| 608 #define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (((void)entropy), ((void)key),
ptr) | |
| 609 | |
| 610 #define HARDENING_ENTROPY 0 | |
| 611 | |
| 612 #endif | |
| 613 | |
| 614 //------------------------------------------------------------------- | |
| 615 // Configuration | |
| 616 //------------------------------------------------------------------- | |
| 617 | |
| 618 // Not all possible combinations of the following parameters make | |
| 619 // sense. In particular, if kMaxSize increases, you may have to | |
| 620 // increase kNumClasses as well. | |
| 621 static const size_t kPageShift = 12; | |
| 622 static const size_t kPageSize = 1 << kPageShift; | |
| 623 static const size_t kMaxSize = 8u * kPageSize; | |
| 624 static const size_t kAlignShift = 3; | |
| 625 static const size_t kAlignment = 1 << kAlignShift; | |
| 626 static const size_t kNumClasses = 68; | |
| 627 | |
| 628 // Allocates a big block of memory for the pagemap once we reach more than | |
| 629 // 128MB | |
| 630 static const size_t kPageMapBigAllocationThreshold = 128 << 20; | |
| 631 | |
| 632 // Minimum number of pages to fetch from system at a time. Must be | |
| 633 // significantly bigger than kPageSize to amortize system-call | |
| 634 // overhead, and also to reduce external fragementation. Also, we | |
| 635 // should keep this value big because various incarnations of Linux | |
| 636 // have small limits on the number of mmap() regions per | |
| 637 // address-space. | |
| 638 static const size_t kMinSystemAlloc = 1 << (20 - kPageShift); | |
| 639 | |
| 640 // Number of objects to move between a per-thread list and a central | |
| 641 // list in one shot. We want this to be not too small so we can | |
| 642 // amortize the lock overhead for accessing the central list. Making | |
| 643 // it too big may temporarily cause unnecessary memory wastage in the | |
| 644 // per-thread free list until the scavenger cleans up the list. | |
| 645 static int num_objects_to_move[kNumClasses]; | |
| 646 | |
| 647 // Maximum length we allow a per-thread free-list to have before we | |
| 648 // move objects from it into the corresponding central free-list. We | |
| 649 // want this big to avoid locking the central free-list too often. It | |
| 650 // should not hurt to make this list somewhat big because the | |
| 651 // scavenging code will shrink it down when its contents are not in use. | |
| 652 static const int kMaxFreeListLength = 256; | |
| 653 | |
| 654 // Lower and upper bounds on the per-thread cache sizes | |
| 655 static const size_t kMinThreadCacheSize = kMaxSize * 2; | |
| 656 static const size_t kMaxThreadCacheSize = 2 << 20; | |
| 657 | |
| 658 // Default bound on the total amount of thread caches | |
| 659 static const size_t kDefaultOverallThreadCacheSize = 16 << 20; | |
| 660 | |
| 661 // For all span-lengths < kMaxPages we keep an exact-size list. | |
| 662 // REQUIRED: kMaxPages >= kMinSystemAlloc; | |
| 663 static const size_t kMaxPages = kMinSystemAlloc; | |
| 664 | |
| 665 /* The smallest prime > 2^n */ | |
| 666 static int primes_list[] = { | |
| 667 // Small values might cause high rates of sampling | |
| 668 // and hence commented out. | |
| 669 // 2, 5, 11, 17, 37, 67, 131, 257, | |
| 670 // 521, 1031, 2053, 4099, 8209, 16411, | |
| 671 32771, 65537, 131101, 262147, 524309, 1048583, | |
| 672 2097169, 4194319, 8388617, 16777259, 33554467 }; | |
| 673 | |
| 674 // Twice the approximate gap between sampling actions. | |
| 675 // I.e., we take one sample approximately once every | |
| 676 // tcmalloc_sample_parameter/2 | |
| 677 // bytes of allocation, i.e., ~ once every 128KB. | |
| 678 // Must be a prime number. | |
| 679 #ifdef NO_TCMALLOC_SAMPLES | |
| 680 DEFINE_int64(tcmalloc_sample_parameter, 0, | |
| 681 "Unused: code is compiled with NO_TCMALLOC_SAMPLES"); | |
| 682 static size_t sample_period = 0; | |
| 683 #else | |
| 684 DEFINE_int64(tcmalloc_sample_parameter, 262147, | |
| 685 "Twice the approximate gap between sampling actions." | |
| 686 " Must be a prime number. Otherwise will be rounded up to a " | |
| 687 " larger prime number"); | |
| 688 static size_t sample_period = 262147; | |
| 689 #endif | |
| 690 | |
| 691 // Protects sample_period above | |
| 692 static SpinLock sample_period_lock = SPINLOCK_INITIALIZER; | |
| 693 | |
| 694 // Parameters for controlling how fast memory is returned to the OS. | |
| 695 | |
| 696 DEFINE_double(tcmalloc_release_rate, 1, | |
| 697 "Rate at which we release unused memory to the system. " | |
| 698 "Zero means we never release memory back to the system. " | |
| 699 "Increase this flag to return memory faster; decrease it " | |
| 700 "to return memory slower. Reasonable rates are in the " | |
| 701 "range [0,10]"); | |
| 702 | |
| 703 //------------------------------------------------------------------- | |
| 704 // Mapping from size to size_class and vice versa | |
| 705 //------------------------------------------------------------------- | |
| 706 | |
| 707 // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an | |
| 708 // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128. | |
| 709 // So for these larger sizes we have an array indexed by ceil(size/128). | |
| 710 // | |
| 711 // We flatten both logical arrays into one physical array and use | |
| 712 // arithmetic to compute an appropriate index. The constants used by | |
| 713 // ClassIndex() were selected to make the flattening work. | |
| 714 // | |
| 715 // Examples: | |
| 716 // Size Expression Index | |
| 717 // ------------------------------------------------------- | |
| 718 // 0 (0 + 7) / 8 0 | |
| 719 // 1 (1 + 7) / 8 1 | |
| 720 // ... | |
| 721 // 1024 (1024 + 7) / 8 128 | |
| 722 // 1025 (1025 + 127 + (120<<7)) / 128 129 | |
| 723 // ... | |
| 724 // 32768 (32768 + 127 + (120<<7)) / 128 376 | |
| 725 static const size_t kMaxSmallSize = 1024; | |
| 726 static const int shift_amount[2] = { 3, 7 }; // For divides by 8 or 128 | |
| 727 static const int add_amount[2] = { 7, 127 + (120 << 7) }; | |
| 728 static unsigned char class_array[377]; | |
| 729 | |
| 730 // Compute index of the class_array[] entry for a given size | |
| 731 static inline int ClassIndex(size_t s) { | |
| 732 const int i = (s > kMaxSmallSize); | |
| 733 return static_cast<int>((s + add_amount[i]) >> shift_amount[i]); | |
| 734 } | |
| 735 | |
| 736 // Mapping from size class to max size storable in that class | |
| 737 static size_t class_to_size[kNumClasses]; | |
| 738 | |
| 739 // Mapping from size class to number of pages to allocate at a time | |
| 740 static size_t class_to_pages[kNumClasses]; | |
| 741 | |
| 742 // Hardened singly linked list. We make this a class to allow compiler to | |
| 743 // statically prevent mismatching hardened and non-hardened list | |
| 744 class HardenedSLL { | |
| 745 public: | |
| 746 static ALWAYS_INLINE HardenedSLL create(void* value) | |
| 747 { | |
| 748 HardenedSLL result; | |
| 749 result.m_value = value; | |
| 750 return result; | |
| 751 } | |
| 752 | |
| 753 static ALWAYS_INLINE HardenedSLL null() | |
| 754 { | |
| 755 HardenedSLL result; | |
| 756 result.m_value = 0; | |
| 757 return result; | |
| 758 } | |
| 759 | |
| 760 ALWAYS_INLINE void setValue(void* value) { m_value = value; } | |
| 761 ALWAYS_INLINE void* value() const { return m_value; } | |
| 762 ALWAYS_INLINE bool operator!() const { return !m_value; } | |
| 763 typedef void* (HardenedSLL::*UnspecifiedBoolType); | |
| 764 ALWAYS_INLINE operator UnspecifiedBoolType() const { return m_value ? &Harde
nedSLL::m_value : 0; } | |
| 765 | |
| 766 bool operator!=(const HardenedSLL& other) const { return m_value != other.m_
value; } | |
| 767 bool operator==(const HardenedSLL& other) const { return m_value == other.m_
value; } | |
| 768 | |
| 769 private: | |
| 770 void* m_value; | |
| 771 }; | |
| 772 | |
| 773 // TransferCache is used to cache transfers of num_objects_to_move[size_class] | |
| 774 // back and forth between thread caches and the central cache for a given size | |
| 775 // class. | |
| 776 struct TCEntry { | |
| 777 HardenedSLL head; // Head of chain of objects. | |
| 778 HardenedSLL tail; // Tail of chain of objects. | |
| 779 }; | |
| 780 // A central cache freelist can have anywhere from 0 to kNumTransferEntries | |
| 781 // slots to put link list chains into. To keep memory usage bounded the total | |
| 782 // number of TCEntries across size classes is fixed. Currently each size | |
| 783 // class is initially given one TCEntry which also means that the maximum any | |
| 784 // one class can have is kNumClasses. | |
| 785 static const int kNumTransferEntries = kNumClasses; | |
| 786 | |
| 787 // Note: the following only works for "n"s that fit in 32-bits, but | |
| 788 // that is fine since we only use it for small sizes. | |
| 789 static inline int LgFloor(size_t n) { | |
| 790 int log = 0; | |
| 791 for (int i = 4; i >= 0; --i) { | |
| 792 int shift = (1 << i); | |
| 793 size_t x = n >> shift; | |
| 794 if (x != 0) { | |
| 795 n = x; | |
| 796 log += shift; | |
| 797 } | |
| 798 } | |
| 799 ASSERT(n == 1); | |
| 800 return log; | |
| 801 } | |
| 802 | |
| 803 // Functions for using our simple hardened singly linked list | |
| 804 static ALWAYS_INLINE HardenedSLL SLL_Next(HardenedSLL t, uintptr_t entropy) { | |
| 805 return HardenedSLL::create(XOR_MASK_PTR_WITH_KEY(*(reinterpret_cast<void**>(
t.value())), t.value(), entropy)); | |
| 806 } | |
| 807 | |
| 808 static ALWAYS_INLINE void SLL_SetNext(HardenedSLL t, HardenedSLL n, uintptr_t en
tropy) { | |
| 809 *(reinterpret_cast<void**>(t.value())) = XOR_MASK_PTR_WITH_KEY(n.value(), t.
value(), entropy); | |
| 810 } | |
| 811 | |
| 812 static ALWAYS_INLINE void SLL_Push(HardenedSLL* list, HardenedSLL element, uintp
tr_t entropy) { | |
| 813 SLL_SetNext(element, *list, entropy); | |
| 814 *list = element; | |
| 815 } | |
| 816 | |
| 817 static ALWAYS_INLINE HardenedSLL SLL_Pop(HardenedSLL *list, uintptr_t entropy) { | |
| 818 HardenedSLL result = *list; | |
| 819 *list = SLL_Next(*list, entropy); | |
| 820 return result; | |
| 821 } | |
| 822 | |
| 823 // Remove N elements from a linked list to which head points. head will be | |
| 824 // modified to point to the new head. start and end will point to the first | |
| 825 // and last nodes of the range. Note that end will point to NULL after this | |
| 826 // function is called. | |
| 827 | |
| 828 static ALWAYS_INLINE void SLL_PopRange(HardenedSLL* head, int N, HardenedSLL *st
art, HardenedSLL *end, uintptr_t entropy) { | |
| 829 if (N == 0) { | |
| 830 *start = HardenedSLL::null(); | |
| 831 *end = HardenedSLL::null(); | |
| 832 return; | |
| 833 } | |
| 834 | |
| 835 HardenedSLL tmp = *head; | |
| 836 for (int i = 1; i < N; ++i) { | |
| 837 tmp = SLL_Next(tmp, entropy); | |
| 838 } | |
| 839 | |
| 840 *start = *head; | |
| 841 *end = tmp; | |
| 842 *head = SLL_Next(tmp, entropy); | |
| 843 // Unlink range from list. | |
| 844 SLL_SetNext(tmp, HardenedSLL::null(), entropy); | |
| 845 } | |
| 846 | |
| 847 static ALWAYS_INLINE void SLL_PushRange(HardenedSLL *head, HardenedSLL start, Ha
rdenedSLL end, uintptr_t entropy) { | |
| 848 if (!start) return; | |
| 849 SLL_SetNext(end, *head, entropy); | |
| 850 *head = start; | |
| 851 } | |
| 852 | |
| 853 static ALWAYS_INLINE size_t SLL_Size(HardenedSLL head, uintptr_t entropy) { | |
| 854 int count = 0; | |
| 855 while (head) { | |
| 856 count++; | |
| 857 head = SLL_Next(head, entropy); | |
| 858 } | |
| 859 return count; | |
| 860 } | |
| 861 | |
| 862 // Setup helper functions. | |
| 863 | |
| 864 static ALWAYS_INLINE size_t SizeClass(size_t size) { | |
| 865 return class_array[ClassIndex(size)]; | |
| 866 } | |
| 867 | |
| 868 // Get the byte-size for a specified class | |
| 869 static ALWAYS_INLINE size_t ByteSizeForClass(size_t cl) { | |
| 870 return class_to_size[cl]; | |
| 871 } | |
| 872 static int NumMoveSize(size_t size) { | |
| 873 if (size == 0) return 0; | |
| 874 // Use approx 64k transfers between thread and central caches. | |
| 875 int num = static_cast<int>(64.0 * 1024.0 / size); | |
| 876 if (num < 2) num = 2; | |
| 877 // Clamp well below kMaxFreeListLength to avoid ping pong between central | |
| 878 // and thread caches. | |
| 879 if (num > static_cast<int>(0.8 * kMaxFreeListLength)) | |
| 880 num = static_cast<int>(0.8 * kMaxFreeListLength); | |
| 881 | |
| 882 // Also, avoid bringing in too many objects into small object free | |
| 883 // lists. There are lots of such lists, and if we allow each one to | |
| 884 // fetch too many at a time, we end up having to scavenge too often | |
| 885 // (especially when there are lots of threads and each thread gets a | |
| 886 // small allowance for its thread cache). | |
| 887 // | |
| 888 // TODO: Make thread cache free list sizes dynamic so that we do not | |
| 889 // have to equally divide a fixed resource amongst lots of threads. | |
| 890 if (num > 32) num = 32; | |
| 891 | |
| 892 return num; | |
| 893 } | |
| 894 | |
| 895 // Initialize the mapping arrays | |
| 896 static void InitSizeClasses() { | |
| 897 // Do some sanity checking on add_amount[]/shift_amount[]/class_array[] | |
| 898 if (ClassIndex(0) < 0) { | |
| 899 MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0)); | |
| 900 CRASH(); | |
| 901 } | |
| 902 if (static_cast<size_t>(ClassIndex(kMaxSize)) >= sizeof(class_array)) { | |
| 903 MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize)); | |
| 904 CRASH(); | |
| 905 } | |
| 906 | |
| 907 // Compute the size classes we want to use | |
| 908 size_t sc = 1; // Next size class to assign | |
| 909 unsigned char alignshift = kAlignShift; | |
| 910 int last_lg = -1; | |
| 911 for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) { | |
| 912 int lg = LgFloor(size); | |
| 913 if (lg > last_lg) { | |
| 914 // Increase alignment every so often. | |
| 915 // | |
| 916 // Since we double the alignment every time size doubles and | |
| 917 // size >= 128, this means that space wasted due to alignment is | |
| 918 // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256 | |
| 919 // bytes, so the space wasted as a percentage starts falling for | |
| 920 // sizes > 2K. | |
| 921 if ((lg >= 7) && (alignshift < 8)) { | |
| 922 alignshift++; | |
| 923 } | |
| 924 last_lg = lg; | |
| 925 } | |
| 926 | |
| 927 // Allocate enough pages so leftover is less than 1/8 of total. | |
| 928 // This bounds wasted space to at most 12.5%. | |
| 929 size_t psize = kPageSize; | |
| 930 while ((psize % size) > (psize >> 3)) { | |
| 931 psize += kPageSize; | |
| 932 } | |
| 933 const size_t my_pages = psize >> kPageShift; | |
| 934 | |
| 935 if (sc > 1 && my_pages == class_to_pages[sc-1]) { | |
| 936 // See if we can merge this into the previous class without | |
| 937 // increasing the fragmentation of the previous class. | |
| 938 const size_t my_objects = (my_pages << kPageShift) / size; | |
| 939 const size_t prev_objects = (class_to_pages[sc-1] << kPageShift) | |
| 940 / class_to_size[sc-1]; | |
| 941 if (my_objects == prev_objects) { | |
| 942 // Adjust last class to include this size | |
| 943 class_to_size[sc-1] = size; | |
| 944 continue; | |
| 945 } | |
| 946 } | |
| 947 | |
| 948 // Add new class | |
| 949 class_to_pages[sc] = my_pages; | |
| 950 class_to_size[sc] = size; | |
| 951 sc++; | |
| 952 } | |
| 953 if (sc != kNumClasses) { | |
| 954 MESSAGE("wrong number of size classes: found %" PRIuS " instead of %d\n", | |
| 955 sc, int(kNumClasses)); | |
| 956 CRASH(); | |
| 957 } | |
| 958 | |
| 959 // Initialize the mapping arrays | |
| 960 int next_size = 0; | |
| 961 for (unsigned char c = 1; c < kNumClasses; c++) { | |
| 962 const size_t max_size_in_class = class_to_size[c]; | |
| 963 for (size_t s = next_size; s <= max_size_in_class; s += kAlignment) { | |
| 964 class_array[ClassIndex(s)] = c; | |
| 965 } | |
| 966 next_size = static_cast<int>(max_size_in_class + kAlignment); | |
| 967 } | |
| 968 | |
| 969 // Double-check sizes just to be safe | |
| 970 for (size_t size = 0; size <= kMaxSize; size++) { | |
| 971 const size_t sc = SizeClass(size); | |
| 972 if (sc == 0) { | |
| 973 MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size); | |
| 974 CRASH(); | |
| 975 } | |
| 976 if (sc > 1 && size <= class_to_size[sc-1]) { | |
| 977 MESSAGE("Allocating unnecessarily large class %" PRIuS " for %" PRIuS | |
| 978 "\n", sc, size); | |
| 979 CRASH(); | |
| 980 } | |
| 981 if (sc >= kNumClasses) { | |
| 982 MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size); | |
| 983 CRASH(); | |
| 984 } | |
| 985 const size_t s = class_to_size[sc]; | |
| 986 if (size > s) { | |
| 987 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size,
sc); | |
| 988 CRASH(); | |
| 989 } | |
| 990 if (s == 0) { | |
| 991 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size,
sc); | |
| 992 CRASH(); | |
| 993 } | |
| 994 } | |
| 995 | |
| 996 // Initialize the num_objects_to_move array. | |
| 997 for (size_t cl = 1; cl < kNumClasses; ++cl) { | |
| 998 num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl)); | |
| 999 } | |
| 1000 | |
| 1001 #ifndef WTF_CHANGES | |
| 1002 if (false) { | |
| 1003 // Dump class sizes and maximum external wastage per size class | |
| 1004 for (size_t cl = 1; cl < kNumClasses; ++cl) { | |
| 1005 const int alloc_size = class_to_pages[cl] << kPageShift; | |
| 1006 const int alloc_objs = alloc_size / class_to_size[cl]; | |
| 1007 const int min_used = (class_to_size[cl-1] + 1) * alloc_objs; | |
| 1008 const int max_waste = alloc_size - min_used; | |
| 1009 MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n", | |
| 1010 int(cl), | |
| 1011 int(class_to_size[cl-1] + 1), | |
| 1012 int(class_to_size[cl]), | |
| 1013 int(class_to_pages[cl] << kPageShift), | |
| 1014 max_waste * 100.0 / alloc_size | |
| 1015 ); | |
| 1016 } | |
| 1017 } | |
| 1018 #endif | |
| 1019 } | |
| 1020 | |
| 1021 // ------------------------------------------------------------------------- | |
| 1022 // Simple allocator for objects of a specified type. External locking | |
| 1023 // is required before accessing one of these objects. | |
| 1024 // ------------------------------------------------------------------------- | |
| 1025 | |
| 1026 // Metadata allocator -- keeps stats about how many bytes allocated | |
| 1027 static uint64_t metadata_system_bytes = 0; | |
| 1028 static void* MetaDataAlloc(size_t bytes) { | |
| 1029 void* result = TCMalloc_SystemAlloc(bytes, 0); | |
| 1030 if (result != NULL) { | |
| 1031 metadata_system_bytes += bytes; | |
| 1032 } | |
| 1033 return result; | |
| 1034 } | |
| 1035 | |
| 1036 template <class T> | |
| 1037 class PageHeapAllocator { | |
| 1038 private: | |
| 1039 // How much to allocate from system at a time | |
| 1040 static const size_t kAllocIncrement = 32 << 10; | |
| 1041 | |
| 1042 // Aligned size of T | |
| 1043 static const size_t kAlignedSize | |
| 1044 = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment); | |
| 1045 | |
| 1046 // Free area from which to carve new objects | |
| 1047 char* free_area_; | |
| 1048 size_t free_avail_; | |
| 1049 | |
| 1050 // Linked list of all regions allocated by this allocator | |
| 1051 HardenedSLL allocated_regions_; | |
| 1052 | |
| 1053 // Free list of already carved objects | |
| 1054 HardenedSLL free_list_; | |
| 1055 | |
| 1056 // Number of allocated but unfreed objects | |
| 1057 int inuse_; | |
| 1058 uintptr_t entropy_; | |
| 1059 | |
| 1060 public: | |
| 1061 void Init(uintptr_t entropy) { | |
| 1062 ASSERT(kAlignedSize <= kAllocIncrement); | |
| 1063 inuse_ = 0; | |
| 1064 allocated_regions_ = HardenedSLL::null(); | |
| 1065 free_area_ = NULL; | |
| 1066 free_avail_ = 0; | |
| 1067 free_list_.setValue(NULL); | |
| 1068 entropy_ = entropy; | |
| 1069 } | |
| 1070 | |
| 1071 T* New() { | |
| 1072 // Consult free list | |
| 1073 void* result; | |
| 1074 if (free_list_) { | |
| 1075 result = free_list_.value(); | |
| 1076 free_list_ = SLL_Next(free_list_, entropy_); | |
| 1077 } else { | |
| 1078 if (free_avail_ < kAlignedSize) { | |
| 1079 // Need more room | |
| 1080 char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncre
ment)); | |
| 1081 if (!new_allocation) | |
| 1082 CRASH(); | |
| 1083 | |
| 1084 HardenedSLL new_head = HardenedSLL::create(new_allocation); | |
| 1085 SLL_SetNext(new_head, allocated_regions_, entropy_); | |
| 1086 allocated_regions_ = new_head; | |
| 1087 free_area_ = new_allocation + kAlignedSize; | |
| 1088 free_avail_ = kAllocIncrement - kAlignedSize; | |
| 1089 } | |
| 1090 result = free_area_; | |
| 1091 free_area_ += kAlignedSize; | |
| 1092 free_avail_ -= kAlignedSize; | |
| 1093 } | |
| 1094 inuse_++; | |
| 1095 return reinterpret_cast<T*>(result); | |
| 1096 } | |
| 1097 | |
| 1098 void Delete(T* p) { | |
| 1099 HardenedSLL new_head = HardenedSLL::create(p); | |
| 1100 SLL_SetNext(new_head, free_list_, entropy_); | |
| 1101 free_list_ = new_head; | |
| 1102 inuse_--; | |
| 1103 } | |
| 1104 | |
| 1105 int inuse() const { return inuse_; } | |
| 1106 | |
| 1107 #if defined(WTF_CHANGES) && OS(DARWIN) | |
| 1108 template <class Recorder> | |
| 1109 void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader&
reader) | |
| 1110 { | |
| 1111 for (HardenedSLL adminAllocation = allocated_regions_; adminAllocation; ad
minAllocation.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<voi
d**>(adminAllocation.value()), entropy_))) | |
| 1112 recorder.recordRegion(reinterpret_cast<vm_address_t>(adminAllocation.v
alue()), kAllocIncrement); | |
| 1113 } | |
| 1114 #endif | |
| 1115 }; | |
| 1116 | |
| 1117 // ------------------------------------------------------------------------- | |
| 1118 // Span - a contiguous run of pages | |
| 1119 // ------------------------------------------------------------------------- | |
| 1120 | |
| 1121 // Type that can hold a page number | |
| 1122 typedef uintptr_t PageID; | |
| 1123 | |
| 1124 // Type that can hold the length of a run of pages | |
| 1125 typedef uintptr_t Length; | |
| 1126 | |
| 1127 static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift; | |
| 1128 | |
| 1129 // Convert byte size into pages. This won't overflow, but may return | |
| 1130 // an unreasonably large value if bytes is huge enough. | |
| 1131 static inline Length pages(size_t bytes) { | |
| 1132 return (bytes >> kPageShift) + | |
| 1133 ((bytes & (kPageSize - 1)) > 0 ? 1 : 0); | |
| 1134 } | |
| 1135 | |
| 1136 // Convert a user size into the number of bytes that will actually be | |
| 1137 // allocated | |
| 1138 static size_t AllocationSize(size_t bytes) { | |
| 1139 if (bytes > kMaxSize) { | |
| 1140 // Large object: we allocate an integral number of pages | |
| 1141 ASSERT(bytes <= (kMaxValidPages << kPageShift)); | |
| 1142 return pages(bytes) << kPageShift; | |
| 1143 } else { | |
| 1144 // Small object: find the size class to which it belongs | |
| 1145 return ByteSizeForClass(SizeClass(bytes)); | |
| 1146 } | |
| 1147 } | |
| 1148 | |
| 1149 enum { | |
| 1150 kSpanCookieBits = 10, | |
| 1151 kSpanCookieMask = (1 << 10) - 1, | |
| 1152 kSpanThisShift = 7 | |
| 1153 }; | |
| 1154 | |
| 1155 static uint32_t spanValidationCookie; | |
| 1156 static uint32_t spanInitializerCookie() | |
| 1157 { | |
| 1158 static uint32_t value = EntropySource<sizeof(uint32_t)>::value() & kSpanCook
ieMask; | |
| 1159 spanValidationCookie = value; | |
| 1160 return value; | |
| 1161 } | |
| 1162 | |
| 1163 // Information kept for a span (a contiguous run of pages). | |
| 1164 struct Span { | |
| 1165 PageID start; // Starting page number | |
| 1166 Length length; // Number of pages in span | |
| 1167 Span* next(uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_next, thi
s, entropy); } | |
| 1168 Span* remoteNext(const Span* remoteSpanPointer, uintptr_t entropy) const { ret
urn XOR_MASK_PTR_WITH_KEY(m_next, remoteSpanPointer, entropy); } | |
| 1169 Span* prev(uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_prev, thi
s, entropy); } | |
| 1170 void setNext(Span* next, uintptr_t entropy) { m_next = XOR_MASK_PTR_WITH_KEY(n
ext, this, entropy); } | |
| 1171 void setPrev(Span* prev, uintptr_t entropy) { m_prev = XOR_MASK_PTR_WITH_KEY(p
rev, this, entropy); } | |
| 1172 | |
| 1173 private: | |
| 1174 Span* m_next; // Used when in link list | |
| 1175 Span* m_prev; // Used when in link list | |
| 1176 public: | |
| 1177 HardenedSLL objects; // Linked list of free objects | |
| 1178 unsigned int free : 1; // Is the span free | |
| 1179 #ifndef NO_TCMALLOC_SAMPLES | |
| 1180 unsigned int sample : 1; // Sampled object? | |
| 1181 #endif | |
| 1182 unsigned int sizeclass : 8; // Size-class for small objects (or 0) | |
| 1183 unsigned int refcount : 11; // Number of non-free objects | |
| 1184 bool decommitted : 1; | |
| 1185 void initCookie() | |
| 1186 { | |
| 1187 m_cookie = ((reinterpret_cast<uintptr_t>(this) >> kSpanThisShift) & kSpanC
ookieMask) ^ spanInitializerCookie(); | |
| 1188 } | |
| 1189 void clearCookie() { m_cookie = 0; } | |
| 1190 bool isValid() const | |
| 1191 { | |
| 1192 return (((reinterpret_cast<uintptr_t>(this) >> kSpanThisShift) & kSpanCook
ieMask) ^ m_cookie) == spanValidationCookie; | |
| 1193 } | |
| 1194 private: | |
| 1195 uint32_t m_cookie : kSpanCookieBits; | |
| 1196 | |
| 1197 #undef SPAN_HISTORY | |
| 1198 #ifdef SPAN_HISTORY | |
| 1199 // For debugging, we can keep a log events per span | |
| 1200 int nexthistory; | |
| 1201 char history[64]; | |
| 1202 int value[64]; | |
| 1203 #endif | |
| 1204 }; | |
| 1205 | |
| 1206 #define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted) | |
| 1207 | |
| 1208 #ifdef SPAN_HISTORY | |
| 1209 void Event(Span* span, char op, int v = 0) { | |
| 1210 span->history[span->nexthistory] = op; | |
| 1211 span->value[span->nexthistory] = v; | |
| 1212 span->nexthistory++; | |
| 1213 if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0; | |
| 1214 } | |
| 1215 #else | |
| 1216 #define Event(s,o,v) ((void) 0) | |
| 1217 #endif | |
| 1218 | |
| 1219 // Allocator/deallocator for spans | |
| 1220 static PageHeapAllocator<Span> span_allocator; | |
| 1221 static Span* NewSpan(PageID p, Length len) { | |
| 1222 Span* result = span_allocator.New(); | |
| 1223 memset(result, 0, sizeof(*result)); | |
| 1224 result->start = p; | |
| 1225 result->length = len; | |
| 1226 result->initCookie(); | |
| 1227 #ifdef SPAN_HISTORY | |
| 1228 result->nexthistory = 0; | |
| 1229 #endif | |
| 1230 return result; | |
| 1231 } | |
| 1232 | |
| 1233 static inline void DeleteSpan(Span* span) { | |
| 1234 RELEASE_ASSERT(span->isValid()); | |
| 1235 #ifndef NDEBUG | |
| 1236 // In debug mode, trash the contents of deleted Spans | |
| 1237 memset(span, 0x3f, sizeof(*span)); | |
| 1238 #endif | |
| 1239 span->clearCookie(); | |
| 1240 span_allocator.Delete(span); | |
| 1241 } | |
| 1242 | |
| 1243 // ------------------------------------------------------------------------- | |
| 1244 // Doubly linked list of spans. | |
| 1245 // ------------------------------------------------------------------------- | |
| 1246 | |
| 1247 static inline void DLL_Init(Span* list, uintptr_t entropy) { | |
| 1248 list->setNext(list, entropy); | |
| 1249 list->setPrev(list, entropy); | |
| 1250 } | |
| 1251 | |
| 1252 static inline void DLL_Remove(Span* span, uintptr_t entropy) { | |
| 1253 span->prev(entropy)->setNext(span->next(entropy), entropy); | |
| 1254 span->next(entropy)->setPrev(span->prev(entropy), entropy); | |
| 1255 span->setPrev(NULL, entropy); | |
| 1256 span->setNext(NULL, entropy); | |
| 1257 } | |
| 1258 | |
| 1259 static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list, uintptr_t entropy) { | |
| 1260 return list->next(entropy) == list; | |
| 1261 } | |
| 1262 | |
| 1263 static int DLL_Length(const Span* list, uintptr_t entropy) { | |
| 1264 int result = 0; | |
| 1265 for (Span* s = list->next(entropy); s != list; s = s->next(entropy)) { | |
| 1266 result++; | |
| 1267 } | |
| 1268 return result; | |
| 1269 } | |
| 1270 | |
| 1271 #if 0 /* Not needed at the moment -- causes compiler warnings if not used */ | |
| 1272 static void DLL_Print(const char* label, const Span* list) { | |
| 1273 MESSAGE("%-10s %p:", label, list); | |
| 1274 for (const Span* s = list->next; s != list; s = s->next) { | |
| 1275 MESSAGE(" <%p,%u,%u>", s, s->start, s->length); | |
| 1276 } | |
| 1277 MESSAGE("\n"); | |
| 1278 } | |
| 1279 #endif | |
| 1280 | |
| 1281 static inline void DLL_Prepend(Span* list, Span* span, uintptr_t entropy) { | |
| 1282 span->setNext(list->next(entropy), entropy); | |
| 1283 span->setPrev(list, entropy); | |
| 1284 list->next(entropy)->setPrev(span, entropy); | |
| 1285 list->setNext(span, entropy); | |
| 1286 } | |
| 1287 | |
| 1288 //------------------------------------------------------------------- | |
| 1289 // Data kept per size-class in central cache | |
| 1290 //------------------------------------------------------------------- | |
| 1291 | |
| 1292 class TCMalloc_Central_FreeList { | |
| 1293 public: | |
| 1294 void Init(size_t cl, uintptr_t entropy); | |
| 1295 | |
| 1296 // These methods all do internal locking. | |
| 1297 | |
| 1298 // Insert the specified range into the central freelist. N is the number of | |
| 1299 // elements in the range. | |
| 1300 void InsertRange(HardenedSLL start, HardenedSLL end, int N); | |
| 1301 | |
| 1302 // Returns the actual number of fetched elements into N. | |
| 1303 void RemoveRange(HardenedSLL* start, HardenedSLL* end, int *N); | |
| 1304 | |
| 1305 // Returns the number of free objects in cache. | |
| 1306 size_t length() { | |
| 1307 SpinLockHolder h(&lock_); | |
| 1308 return counter_; | |
| 1309 } | |
| 1310 | |
| 1311 // Returns the number of free objects in the transfer cache. | |
| 1312 int tc_length() { | |
| 1313 SpinLockHolder h(&lock_); | |
| 1314 return used_slots_ * num_objects_to_move[size_class_]; | |
| 1315 } | |
| 1316 | |
| 1317 #ifdef WTF_CHANGES | |
| 1318 template <class Finder, class Reader> | |
| 1319 void enumerateFreeObjects(Finder& finder, const Reader& reader, TCMalloc_Centr
al_FreeList* remoteCentralFreeList) | |
| 1320 { | |
| 1321 { | |
| 1322 static const ptrdiff_t emptyOffset = reinterpret_cast<const char*>(&empty_
) - reinterpret_cast<const char*>(this); | |
| 1323 Span* remoteEmpty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remote
CentralFreeList) + emptyOffset); | |
| 1324 Span* remoteSpan = nonempty_.remoteNext(remoteEmpty, entropy_); | |
| 1325 for (Span* span = reader(remoteEmpty); span && span != &empty_; remoteSpan
= span->remoteNext(remoteSpan, entropy_), span = (remoteSpan ? reader(remoteSpa
n) : 0)) | |
| 1326 ASSERT(!span->objects); | |
| 1327 } | |
| 1328 | |
| 1329 ASSERT(!nonempty_.objects); | |
| 1330 static const ptrdiff_t nonemptyOffset = reinterpret_cast<const char*>(&nonem
pty_) - reinterpret_cast<const char*>(this); | |
| 1331 | |
| 1332 Span* remoteNonempty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remot
eCentralFreeList) + nonemptyOffset); | |
| 1333 Span* remoteSpan = nonempty_.remoteNext(remoteNonempty, entropy_); | |
| 1334 | |
| 1335 for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty;
remoteSpan = span->remoteNext(remoteSpan, entropy_), span = (remoteSpan ? reader
(remoteSpan) : 0)) { | |
| 1336 for (HardenedSLL nextObject = span->objects; nextObject; nextObject.setVal
ue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.valu
e()), entropy_))) { | |
| 1337 finder.visit(nextObject.value()); | |
| 1338 } | |
| 1339 } | |
| 1340 } | |
| 1341 #endif | |
| 1342 | |
| 1343 uintptr_t entropy() const { return entropy_; } | |
| 1344 private: | |
| 1345 // REQUIRES: lock_ is held | |
| 1346 // Remove object from cache and return. | |
| 1347 // Return NULL if no free entries in cache. | |
| 1348 HardenedSLL FetchFromSpans(); | |
| 1349 | |
| 1350 // REQUIRES: lock_ is held | |
| 1351 // Remove object from cache and return. Fetches | |
| 1352 // from pageheap if cache is empty. Only returns | |
| 1353 // NULL on allocation failure. | |
| 1354 HardenedSLL FetchFromSpansSafe(); | |
| 1355 | |
| 1356 // REQUIRES: lock_ is held | |
| 1357 // Release a linked list of objects to spans. | |
| 1358 // May temporarily release lock_. | |
| 1359 void ReleaseListToSpans(HardenedSLL start); | |
| 1360 | |
| 1361 // REQUIRES: lock_ is held | |
| 1362 // Release an object to spans. | |
| 1363 // May temporarily release lock_. | |
| 1364 ALWAYS_INLINE void ReleaseToSpans(HardenedSLL object); | |
| 1365 | |
| 1366 // REQUIRES: lock_ is held | |
| 1367 // Populate cache by fetching from the page heap. | |
| 1368 // May temporarily release lock_. | |
| 1369 ALWAYS_INLINE void Populate(); | |
| 1370 | |
| 1371 // REQUIRES: lock is held. | |
| 1372 // Tries to make room for a TCEntry. If the cache is full it will try to | |
| 1373 // expand it at the cost of some other cache size. Return false if there is | |
| 1374 // no space. | |
| 1375 bool MakeCacheSpace(); | |
| 1376 | |
| 1377 // REQUIRES: lock_ for locked_size_class is held. | |
| 1378 // Picks a "random" size class to steal TCEntry slot from. In reality it | |
| 1379 // just iterates over the sizeclasses but does so without taking a lock. | |
| 1380 // Returns true on success. | |
| 1381 // May temporarily lock a "random" size class. | |
| 1382 static ALWAYS_INLINE bool EvictRandomSizeClass(size_t locked_size_class, bool
force); | |
| 1383 | |
| 1384 // REQUIRES: lock_ is *not* held. | |
| 1385 // Tries to shrink the Cache. If force is true it will relase objects to | |
| 1386 // spans if it allows it to shrink the cache. Return false if it failed to | |
| 1387 // shrink the cache. Decrements cache_size_ on succeess. | |
| 1388 // May temporarily take lock_. If it takes lock_, the locked_size_class | |
| 1389 // lock is released to the thread from holding two size class locks | |
| 1390 // concurrently which could lead to a deadlock. | |
| 1391 bool ShrinkCache(int locked_size_class, bool force); | |
| 1392 | |
| 1393 // This lock protects all the data members. cached_entries and cache_size_ | |
| 1394 // may be looked at without holding the lock. | |
| 1395 SpinLock lock_; | |
| 1396 | |
| 1397 // We keep linked lists of empty and non-empty spans. | |
| 1398 size_t size_class_; // My size class | |
| 1399 Span empty_; // Dummy header for list of empty spans | |
| 1400 Span nonempty_; // Dummy header for list of non-empty spans | |
| 1401 size_t counter_; // Number of free objects in cache entry | |
| 1402 | |
| 1403 // Here we reserve space for TCEntry cache slots. Since one size class can | |
| 1404 // end up getting all the TCEntries quota in the system we just preallocate | |
| 1405 // sufficient number of entries here. | |
| 1406 TCEntry tc_slots_[kNumTransferEntries]; | |
| 1407 | |
| 1408 // Number of currently used cached entries in tc_slots_. This variable is | |
| 1409 // updated under a lock but can be read without one. | |
| 1410 int32_t used_slots_; | |
| 1411 // The current number of slots for this size class. This is an | |
| 1412 // adaptive value that is increased if there is lots of traffic | |
| 1413 // on a given size class. | |
| 1414 int32_t cache_size_; | |
| 1415 uintptr_t entropy_; | |
| 1416 }; | |
| 1417 | |
| 1418 #if COMPILER(CLANG) && defined(__has_warning) | |
| 1419 #pragma clang diagnostic push | |
| 1420 #if __has_warning("-Wunused-private-field") | |
| 1421 #pragma clang diagnostic ignored "-Wunused-private-field" | |
| 1422 #endif | |
| 1423 #endif | |
| 1424 | |
| 1425 // Pad each CentralCache object to multiple of 64 bytes | |
| 1426 template <size_t SizeToPad> | |
| 1427 class TCMalloc_Central_FreeListPadded_Template : public TCMalloc_Central_FreeLis
t { | |
| 1428 private: | |
| 1429 char pad[64 - SizeToPad]; | |
| 1430 }; | |
| 1431 | |
| 1432 // Zero-size specialization to avoid compiler error when TCMalloc_Central_FreeLi
st happens | |
| 1433 // to be exactly 64 bytes. | |
| 1434 template <> class TCMalloc_Central_FreeListPadded_Template<0> : public TCMalloc_
Central_FreeList { | |
| 1435 }; | |
| 1436 | |
| 1437 typedef TCMalloc_Central_FreeListPadded_Template<sizeof(TCMalloc_Central_FreeLis
t) % 64> TCMalloc_Central_FreeListPadded; | |
| 1438 | |
| 1439 #if COMPILER(CLANG) && defined(__has_warning) | |
| 1440 #pragma clang diagnostic pop | |
| 1441 #endif | |
| 1442 | |
| 1443 #if OS(DARWIN) | |
| 1444 struct Span; | |
| 1445 class TCMalloc_PageHeap; | |
| 1446 class TCMalloc_ThreadCache; | |
| 1447 template <typename T> class PageHeapAllocator; | |
| 1448 | |
| 1449 class FastMallocZone { | |
| 1450 public: | |
| 1451 static void init(); | |
| 1452 | |
| 1453 static kern_return_t enumerate(task_t, void*, unsigned typeMmask, vm_address
_t zoneAddress, memory_reader_t, vm_range_recorder_t); | |
| 1454 static size_t goodSize(malloc_zone_t*, size_t size) { return size; } | |
| 1455 static boolean_t check(malloc_zone_t*) { return true; } | |
| 1456 static void print(malloc_zone_t*, boolean_t) { } | |
| 1457 static void log(malloc_zone_t*, void*) { } | |
| 1458 static void forceLock(malloc_zone_t*) { } | |
| 1459 static void forceUnlock(malloc_zone_t*) { } | |
| 1460 static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(
stats, 0, sizeof(malloc_statistics_t)); } | |
| 1461 | |
| 1462 private: | |
| 1463 FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_
FreeListPadded*, PageHeapAllocator<Span>*, PageHeapAllocator<TCMalloc_ThreadCach
e>*); | |
| 1464 static size_t size(malloc_zone_t*, const void*); | |
| 1465 static void* zoneMalloc(malloc_zone_t*, size_t); | |
| 1466 static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size); | |
| 1467 static void zoneFree(malloc_zone_t*, void*); | |
| 1468 static void* zoneRealloc(malloc_zone_t*, void*, size_t); | |
| 1469 static void* zoneValloc(malloc_zone_t*, size_t) { LOG_ERROR("valloc is not s
upported"); return 0; } | |
| 1470 static void zoneDestroy(malloc_zone_t*) { } | |
| 1471 | |
| 1472 malloc_zone_t m_zone; | |
| 1473 TCMalloc_PageHeap* m_pageHeap; | |
| 1474 TCMalloc_ThreadCache** m_threadHeaps; | |
| 1475 TCMalloc_Central_FreeListPadded* m_centralCaches; | |
| 1476 PageHeapAllocator<Span>* m_spanAllocator; | |
| 1477 PageHeapAllocator<TCMalloc_ThreadCache>* m_pageHeapAllocator; | |
| 1478 }; | |
| 1479 | |
| 1480 #endif | |
| 1481 | |
| 1482 #endif | |
| 1483 | |
| 1484 #ifndef WTF_CHANGES | |
| 1485 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if | |
| 1486 // you're porting to a system where you really can't get a stacktrace. | |
| 1487 #ifdef NO_TCMALLOC_SAMPLES | |
| 1488 // We use #define so code compiles even if you #include stacktrace.h somehow. | |
| 1489 # define GetStackTrace(stack, depth, skip) (0) | |
| 1490 #else | |
| 1491 # include <google/stacktrace.h> | |
| 1492 #endif | |
| 1493 #endif | |
| 1494 | |
| 1495 // Even if we have support for thread-local storage in the compiler | |
| 1496 // and linker, the OS may not support it. We need to check that at | |
| 1497 // runtime. Right now, we have to keep a manual set of "bad" OSes. | |
| 1498 #if defined(HAVE_TLS) | |
| 1499 static bool kernel_supports_tls = false; // be conservative | |
| 1500 static inline bool KernelSupportsTLS() { | |
| 1501 return kernel_supports_tls; | |
| 1502 } | |
| 1503 # if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS | |
| 1504 static void CheckIfKernelSupportsTLS() { | |
| 1505 kernel_supports_tls = false; | |
| 1506 } | |
| 1507 # else | |
| 1508 # include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too | |
| 1509 static void CheckIfKernelSupportsTLS() { | |
| 1510 struct utsname buf; | |
| 1511 if (uname(&buf) != 0) { // should be impossible | |
| 1512 MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno); | |
| 1513 kernel_supports_tls = false; | |
| 1514 } else if (strcasecmp(buf.sysname, "linux") == 0) { | |
| 1515 // The linux case: the first kernel to support TLS was 2.6.0 | |
| 1516 if (buf.release[0] < '2' && buf.release[1] == '.') // 0.x or 1.x | |
| 1517 kernel_supports_tls = false; | |
| 1518 else if (buf.release[0] == '2' && buf.release[1] == '.' && | |
| 1519 buf.release[2] >= '0' && buf.release[2] < '6' && | |
| 1520 buf.release[3] == '.') // 2.0 - 2.5 | |
| 1521 kernel_supports_tls = false; | |
| 1522 else | |
| 1523 kernel_supports_tls = true; | |
| 1524 } else { // some other kernel, we'll be optimisitic | |
| 1525 kernel_supports_tls = true; | |
| 1526 } | |
| 1527 // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG | |
| 1528 } | |
| 1529 # endif // HAVE_DECL_UNAME | |
| 1530 #endif // HAVE_TLS | |
| 1531 | |
| 1532 // __THROW is defined in glibc systems. It means, counter-intuitively, | |
| 1533 // "This function will never throw an exception." It's an optional | |
| 1534 // optimization tool, but we may need to use it to match glibc prototypes. | |
| 1535 #ifndef __THROW // I guess we're not on a glibc system | |
| 1536 # define __THROW // __THROW is just an optimization, so ok to make it "" | |
| 1537 #endif | |
| 1538 | |
| 1539 // ------------------------------------------------------------------------- | |
| 1540 // Stack traces kept for sampled allocations | |
| 1541 // The following state is protected by pageheap_lock_. | |
| 1542 // ------------------------------------------------------------------------- | |
| 1543 | |
| 1544 // size/depth are made the same size as a pointer so that some generic | |
| 1545 // code below can conveniently cast them back and forth to void*. | |
| 1546 static const int kMaxStackDepth = 31; | |
| 1547 struct StackTrace { | |
| 1548 uintptr_t size; // Size of object | |
| 1549 uintptr_t depth; // Number of PC values stored in array below | |
| 1550 void* stack[kMaxStackDepth]; | |
| 1551 }; | |
| 1552 static PageHeapAllocator<StackTrace> stacktrace_allocator; | |
| 1553 static Span sampled_objects; | |
| 1554 | |
| 1555 // ------------------------------------------------------------------------- | |
| 1556 // Map from page-id to per-page data | |
| 1557 // ------------------------------------------------------------------------- | |
| 1558 | |
| 1559 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines. | |
| 1560 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings, | |
| 1561 // because sometimes the sizeclass is all the information we need. | |
| 1562 | |
| 1563 // Selector class -- general selector uses 3-level map | |
| 1564 template <int BITS> class MapSelector { | |
| 1565 public: | |
| 1566 typedef TCMalloc_PageMap3<BITS-kPageShift> Type; | |
| 1567 typedef PackedCache<BITS, uint64_t> CacheType; | |
| 1568 }; | |
| 1569 | |
| 1570 #if defined(WTF_CHANGES) | |
| 1571 #if CPU(X86_64) | |
| 1572 // On all known X86-64 platforms, the upper 16 bits are always unused and theref
ore | |
| 1573 // can be excluded from the PageMap key. | |
| 1574 // See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details | |
| 1575 | |
| 1576 static const size_t kBitsUnusedOn64Bit = 16; | |
| 1577 #else | |
| 1578 static const size_t kBitsUnusedOn64Bit = 0; | |
| 1579 #endif | |
| 1580 | |
| 1581 // A three-level map for 64-bit machines | |
| 1582 template <> class MapSelector<64> { | |
| 1583 public: | |
| 1584 typedef TCMalloc_PageMap3<64 - kPageShift - kBitsUnusedOn64Bit> Type; | |
| 1585 typedef PackedCache<64, uint64_t> CacheType; | |
| 1586 }; | |
| 1587 #endif | |
| 1588 | |
| 1589 // A two-level map for 32-bit machines | |
| 1590 template <> class MapSelector<32> { | |
| 1591 public: | |
| 1592 typedef TCMalloc_PageMap2<32 - kPageShift> Type; | |
| 1593 typedef PackedCache<32 - kPageShift, uint16_t> CacheType; | |
| 1594 }; | |
| 1595 | |
| 1596 // ------------------------------------------------------------------------- | |
| 1597 // Page-level allocator | |
| 1598 // * Eager coalescing | |
| 1599 // | |
| 1600 // Heap for page-level allocation. We allow allocating and freeing a | |
| 1601 // contiguous runs of pages (called a "span"). | |
| 1602 // ------------------------------------------------------------------------- | |
| 1603 | |
| 1604 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 1605 // The page heap maintains a free list for spans that are no longer in use by | |
| 1606 // the central cache or any thread caches. We use a background thread to | |
| 1607 // periodically scan the free list and release a percentage of it back to the OS
. | |
| 1608 | |
| 1609 // If free_committed_pages_ exceeds kMinimumFreeCommittedPageCount, the | |
| 1610 // background thread: | |
| 1611 // - wakes up | |
| 1612 // - pauses for kScavengeDelayInSeconds | |
| 1613 // - returns to the OS a percentage of the memory that remained unused durin
g | |
| 1614 // that pause (kScavengePercentage * min_free_committed_pages_since_last_s
cavenge_) | |
| 1615 // The goal of this strategy is to reduce memory pressure in a timely fashion | |
| 1616 // while avoiding thrashing the OS allocator. | |
| 1617 | |
| 1618 // Time delay before the page heap scavenger will consider returning pages to | |
| 1619 // the OS. | |
| 1620 static const int kScavengeDelayInSeconds = 2; | |
| 1621 | |
| 1622 // Approximate percentage of free committed pages to return to the OS in one | |
| 1623 // scavenge. | |
| 1624 static const float kScavengePercentage = .5f; | |
| 1625 | |
| 1626 // number of span lists to keep spans in when memory is returned. | |
| 1627 static const int kMinSpanListsWithSpans = 32; | |
| 1628 | |
| 1629 // Number of free committed pages that we want to keep around. The minimum numb
er of pages used when there | |
| 1630 // is 1 span in each of the first kMinSpanListsWithSpans spanlists. Currently 5
28 pages. | |
| 1631 static const size_t kMinimumFreeCommittedPageCount = kMinSpanListsWithSpans * ((
1.0f+kMinSpanListsWithSpans) / 2.0f); | |
| 1632 | |
| 1633 #endif | |
| 1634 | |
| 1635 static SpinLock pageheap_lock = SPINLOCK_INITIALIZER; | |
| 1636 | |
| 1637 class TCMalloc_PageHeap { | |
| 1638 public: | |
| 1639 void init(); | |
| 1640 | |
| 1641 // Allocate a run of "n" pages. Returns zero if out of memory. | |
| 1642 Span* New(Length n); | |
| 1643 | |
| 1644 // Delete the span "[p, p+n-1]". | |
| 1645 // REQUIRES: span was returned by earlier call to New() and | |
| 1646 // has not yet been deleted. | |
| 1647 void Delete(Span* span); | |
| 1648 | |
| 1649 // Mark an allocated span as being used for small objects of the | |
| 1650 // specified size-class. | |
| 1651 // REQUIRES: span was returned by an earlier call to New() | |
| 1652 // and has not yet been deleted. | |
| 1653 void RegisterSizeClass(Span* span, size_t sc); | |
| 1654 | |
| 1655 // Split an allocated span into two spans: one of length "n" pages | |
| 1656 // followed by another span of length "span->length - n" pages. | |
| 1657 // Modifies "*span" to point to the first span of length "n" pages. | |
| 1658 // Returns a pointer to the second span. | |
| 1659 // | |
| 1660 // REQUIRES: "0 < n < span->length" | |
| 1661 // REQUIRES: !span->free | |
| 1662 // REQUIRES: span->sizeclass == 0 | |
| 1663 Span* Split(Span* span, Length n); | |
| 1664 | |
| 1665 // Return the descriptor for the specified page. | |
| 1666 inline Span* GetDescriptor(PageID p) const { | |
| 1667 return reinterpret_cast<Span*>(pagemap_.get(p)); | |
| 1668 } | |
| 1669 | |
| 1670 #ifdef WTF_CHANGES | |
| 1671 inline Span* GetDescriptorEnsureSafe(PageID p) | |
| 1672 { | |
| 1673 pagemap_.Ensure(p, 1); | |
| 1674 return GetDescriptor(p); | |
| 1675 } | |
| 1676 | |
| 1677 size_t ReturnedBytes() const; | |
| 1678 #endif | |
| 1679 | |
| 1680 // Dump state to stderr | |
| 1681 #ifndef WTF_CHANGES | |
| 1682 void Dump(TCMalloc_Printer* out); | |
| 1683 #endif | |
| 1684 | |
| 1685 // Return number of bytes allocated from system | |
| 1686 inline uint64_t SystemBytes() const { return system_bytes_; } | |
| 1687 | |
| 1688 // Return number of free bytes in heap | |
| 1689 uint64_t FreeBytes() const { | |
| 1690 return (static_cast<uint64_t>(free_pages_) << kPageShift); | |
| 1691 } | |
| 1692 | |
| 1693 bool Check(); | |
| 1694 size_t CheckList(Span* list, Length min_pages, Length max_pages, bool decommit
ted); | |
| 1695 | |
| 1696 // Release all pages on the free list for reuse by the OS: | |
| 1697 void ReleaseFreePages(); | |
| 1698 void ReleaseFreeList(Span*, Span*); | |
| 1699 | |
| 1700 // Return 0 if we have no information, or else the correct sizeclass for p. | |
| 1701 // Reads and writes to pagemap_cache_ do not require locking. | |
| 1702 // The entries are 64 bits on 64-bit hardware and 16 bits on | |
| 1703 // 32-bit hardware, and we don't mind raciness as long as each read of | |
| 1704 // an entry yields a valid entry, not a partially updated entry. | |
| 1705 size_t GetSizeClassIfCached(PageID p) const { | |
| 1706 return pagemap_cache_.GetOrDefault(p, 0); | |
| 1707 } | |
| 1708 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); } | |
| 1709 | |
| 1710 private: | |
| 1711 // Pick the appropriate map and cache types based on pointer size | |
| 1712 typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap; | |
| 1713 typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache; | |
| 1714 PageMap pagemap_; | |
| 1715 mutable PageMapCache pagemap_cache_; | |
| 1716 | |
| 1717 // We segregate spans of a given size into two circular linked | |
| 1718 // lists: one for normal spans, and one for spans whose memory | |
| 1719 // has been returned to the system. | |
| 1720 struct SpanList { | |
| 1721 Span normal; | |
| 1722 Span returned; | |
| 1723 }; | |
| 1724 | |
| 1725 // List of free spans of length >= kMaxPages | |
| 1726 SpanList large_; | |
| 1727 | |
| 1728 // Array mapping from span length to a doubly linked list of free spans | |
| 1729 SpanList free_[kMaxPages]; | |
| 1730 | |
| 1731 // Number of pages kept in free lists | |
| 1732 uintptr_t free_pages_; | |
| 1733 | |
| 1734 // Used for hardening | |
| 1735 uintptr_t entropy_; | |
| 1736 | |
| 1737 // Bytes allocated from system | |
| 1738 uint64_t system_bytes_; | |
| 1739 | |
| 1740 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 1741 // Number of pages kept in free lists that are still committed. | |
| 1742 Length free_committed_pages_; | |
| 1743 | |
| 1744 // Minimum number of free committed pages since last scavenge. (Can be 0 if | |
| 1745 // we've committed new pages since the last scavenge.) | |
| 1746 Length min_free_committed_pages_since_last_scavenge_; | |
| 1747 #endif | |
| 1748 | |
| 1749 bool GrowHeap(Length n); | |
| 1750 | |
| 1751 // REQUIRES span->length >= n | |
| 1752 // Remove span from its free list, and move any leftover part of | |
| 1753 // span into appropriate free lists. Also update "span" to have | |
| 1754 // length exactly "n" and mark it as non-free so it can be returned | |
| 1755 // to the client. | |
| 1756 // | |
| 1757 // "released" is true iff "span" was found on a "returned" list. | |
| 1758 void Carve(Span* span, Length n, bool released); | |
| 1759 | |
| 1760 void RecordSpan(Span* span) { | |
| 1761 pagemap_.set(span->start, span); | |
| 1762 if (span->length > 1) { | |
| 1763 pagemap_.set(span->start + span->length - 1, span); | |
| 1764 } | |
| 1765 } | |
| 1766 | |
| 1767 // Allocate a large span of length == n. If successful, returns a | |
| 1768 // span of exactly the specified length. Else, returns NULL. | |
| 1769 Span* AllocLarge(Length n); | |
| 1770 | |
| 1771 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 1772 // Incrementally release some memory to the system. | |
| 1773 // IncrementalScavenge(n) is called whenever n pages are freed. | |
| 1774 void IncrementalScavenge(Length n); | |
| 1775 #endif | |
| 1776 | |
| 1777 // Number of pages to deallocate before doing more scavenging | |
| 1778 int64_t scavenge_counter_; | |
| 1779 | |
| 1780 // Index of last free list we scavenged | |
| 1781 size_t scavenge_index_; | |
| 1782 | |
| 1783 #if defined(WTF_CHANGES) && OS(DARWIN) | |
| 1784 friend class FastMallocZone; | |
| 1785 #endif | |
| 1786 | |
| 1787 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 1788 void initializeScavenger(); | |
| 1789 ALWAYS_INLINE void signalScavenger(); | |
| 1790 void scavenge(); | |
| 1791 ALWAYS_INLINE bool shouldScavenge() const; | |
| 1792 | |
| 1793 #if HAVE(DISPATCH_H) || OS(WINDOWS) | |
| 1794 void periodicScavenge(); | |
| 1795 ALWAYS_INLINE bool isScavengerSuspended(); | |
| 1796 ALWAYS_INLINE void scheduleScavenger(); | |
| 1797 ALWAYS_INLINE void rescheduleScavenger(); | |
| 1798 ALWAYS_INLINE void suspendScavenger(); | |
| 1799 #endif | |
| 1800 | |
| 1801 #if HAVE(DISPATCH_H) | |
| 1802 dispatch_queue_t m_scavengeQueue; | |
| 1803 dispatch_source_t m_scavengeTimer; | |
| 1804 bool m_scavengingSuspended; | |
| 1805 #elif OS(WINDOWS) | |
| 1806 static void CALLBACK scavengerTimerFired(void*, BOOLEAN); | |
| 1807 HANDLE m_scavengeQueueTimer; | |
| 1808 #else | |
| 1809 static NO_RETURN_WITH_VALUE void* runScavengerThread(void*); | |
| 1810 NO_RETURN void scavengerThread(); | |
| 1811 | |
| 1812 // Keeps track of whether the background thread is actively scavenging memory
every kScavengeDelayInSeconds, or | |
| 1813 // it's blocked waiting for more pages to be deleted. | |
| 1814 bool m_scavengeThreadActive; | |
| 1815 | |
| 1816 pthread_mutex_t m_scavengeMutex; | |
| 1817 pthread_cond_t m_scavengeCondition; | |
| 1818 #endif | |
| 1819 | |
| 1820 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 1821 }; | |
| 1822 | |
| 1823 void TCMalloc_PageHeap::init() | |
| 1824 { | |
| 1825 pagemap_.init(MetaDataAlloc); | |
| 1826 pagemap_cache_ = PageMapCache(0); | |
| 1827 free_pages_ = 0; | |
| 1828 system_bytes_ = 0; | |
| 1829 entropy_ = HARDENING_ENTROPY; | |
| 1830 | |
| 1831 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 1832 free_committed_pages_ = 0; | |
| 1833 min_free_committed_pages_since_last_scavenge_ = 0; | |
| 1834 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 1835 | |
| 1836 scavenge_counter_ = 0; | |
| 1837 // Start scavenging at kMaxPages list | |
| 1838 scavenge_index_ = kMaxPages-1; | |
| 1839 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); | |
| 1840 DLL_Init(&large_.normal, entropy_); | |
| 1841 DLL_Init(&large_.returned, entropy_); | |
| 1842 for (size_t i = 0; i < kMaxPages; i++) { | |
| 1843 DLL_Init(&free_[i].normal, entropy_); | |
| 1844 DLL_Init(&free_[i].returned, entropy_); | |
| 1845 } | |
| 1846 | |
| 1847 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 1848 initializeScavenger(); | |
| 1849 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 1850 } | |
| 1851 | |
| 1852 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 1853 | |
| 1854 #if HAVE(DISPATCH_H) | |
| 1855 | |
| 1856 void TCMalloc_PageHeap::initializeScavenger() | |
| 1857 { | |
| 1858 m_scavengeQueue = dispatch_queue_create("com.apple.JavaScriptCore.FastMalloc
Savenger", NULL); | |
| 1859 m_scavengeTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, m
_scavengeQueue); | |
| 1860 uint64_t scavengeDelayInNanoseconds = kScavengeDelayInSeconds * NSEC_PER_SEC
; | |
| 1861 dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, scavengeDelayIn
Nanoseconds); | |
| 1862 dispatch_source_set_timer(m_scavengeTimer, startTime, scavengeDelayInNanosec
onds, scavengeDelayInNanoseconds / 10); | |
| 1863 dispatch_source_set_event_handler(m_scavengeTimer, ^{ periodicScavenge(); })
; | |
| 1864 m_scavengingSuspended = true; | |
| 1865 } | |
| 1866 | |
| 1867 ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended() | |
| 1868 { | |
| 1869 ASSERT(pageheap_lock.IsHeld()); | |
| 1870 return m_scavengingSuspended; | |
| 1871 } | |
| 1872 | |
| 1873 ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger() | |
| 1874 { | |
| 1875 ASSERT(pageheap_lock.IsHeld()); | |
| 1876 m_scavengingSuspended = false; | |
| 1877 dispatch_resume(m_scavengeTimer); | |
| 1878 } | |
| 1879 | |
| 1880 ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger() | |
| 1881 { | |
| 1882 // Nothing to do here for libdispatch. | |
| 1883 } | |
| 1884 | |
| 1885 ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger() | |
| 1886 { | |
| 1887 ASSERT(pageheap_lock.IsHeld()); | |
| 1888 m_scavengingSuspended = true; | |
| 1889 dispatch_suspend(m_scavengeTimer); | |
| 1890 } | |
| 1891 | |
| 1892 #elif OS(WINDOWS) | |
| 1893 | |
| 1894 void TCMalloc_PageHeap::scavengerTimerFired(void* context, BOOLEAN) | |
| 1895 { | |
| 1896 static_cast<TCMalloc_PageHeap*>(context)->periodicScavenge(); | |
| 1897 } | |
| 1898 | |
| 1899 void TCMalloc_PageHeap::initializeScavenger() | |
| 1900 { | |
| 1901 m_scavengeQueueTimer = 0; | |
| 1902 } | |
| 1903 | |
| 1904 ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended() | |
| 1905 { | |
| 1906 ASSERT(pageheap_lock.IsHeld()); | |
| 1907 return !m_scavengeQueueTimer; | |
| 1908 } | |
| 1909 | |
| 1910 ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger() | |
| 1911 { | |
| 1912 // We need to use WT_EXECUTEONLYONCE here and reschedule the timer, because | |
| 1913 // Windows will fire the timer event even when the function is already runni
ng. | |
| 1914 ASSERT(pageheap_lock.IsHeld()); | |
| 1915 CreateTimerQueueTimer(&m_scavengeQueueTimer, 0, scavengerTimerFired, this, k
ScavengeDelayInSeconds * 1000, 0, WT_EXECUTEONLYONCE); | |
| 1916 } | |
| 1917 | |
| 1918 ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger() | |
| 1919 { | |
| 1920 // We must delete the timer and create it again, because it is not possible
to retrigger a timer on Windows. | |
| 1921 suspendScavenger(); | |
| 1922 scheduleScavenger(); | |
| 1923 } | |
| 1924 | |
| 1925 ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger() | |
| 1926 { | |
| 1927 ASSERT(pageheap_lock.IsHeld()); | |
| 1928 HANDLE scavengeQueueTimer = m_scavengeQueueTimer; | |
| 1929 m_scavengeQueueTimer = 0; | |
| 1930 DeleteTimerQueueTimer(0, scavengeQueueTimer, 0); | |
| 1931 } | |
| 1932 | |
| 1933 #else | |
| 1934 | |
| 1935 void TCMalloc_PageHeap::initializeScavenger() | |
| 1936 { | |
| 1937 // Create a non-recursive mutex. | |
| 1938 #if !defined(PTHREAD_MUTEX_NORMAL) || PTHREAD_MUTEX_NORMAL == PTHREAD_MUTEX_DEFA
ULT | |
| 1939 pthread_mutex_init(&m_scavengeMutex, 0); | |
| 1940 #else | |
| 1941 pthread_mutexattr_t attr; | |
| 1942 pthread_mutexattr_init(&attr); | |
| 1943 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL); | |
| 1944 | |
| 1945 pthread_mutex_init(&m_scavengeMutex, &attr); | |
| 1946 | |
| 1947 pthread_mutexattr_destroy(&attr); | |
| 1948 #endif | |
| 1949 | |
| 1950 pthread_cond_init(&m_scavengeCondition, 0); | |
| 1951 m_scavengeThreadActive = true; | |
| 1952 pthread_t thread; | |
| 1953 pthread_create(&thread, 0, runScavengerThread, this); | |
| 1954 } | |
| 1955 | |
| 1956 void* TCMalloc_PageHeap::runScavengerThread(void* context) | |
| 1957 { | |
| 1958 static_cast<TCMalloc_PageHeap*>(context)->scavengerThread(); | |
| 1959 #if (COMPILER(MSVC) || COMPILER(SUNCC)) | |
| 1960 // Without this, Visual Studio and Sun Studio will complain that this method
does not return a value. | |
| 1961 return 0; | |
| 1962 #endif | |
| 1963 } | |
| 1964 | |
| 1965 ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger() | |
| 1966 { | |
| 1967 // shouldScavenge() should be called only when the pageheap_lock spinlock is
held, additionally, | |
| 1968 // m_scavengeThreadActive is only set to false whilst pageheap_lock is held.
The caller must ensure this is | |
| 1969 // taken prior to calling this method. If the scavenger thread is sleeping a
nd shouldScavenge() indicates there | |
| 1970 // is memory to free the scavenger thread is signalled to start. | |
| 1971 ASSERT(pageheap_lock.IsHeld()); | |
| 1972 if (!m_scavengeThreadActive && shouldScavenge()) | |
| 1973 pthread_cond_signal(&m_scavengeCondition); | |
| 1974 } | |
| 1975 | |
| 1976 #endif | |
| 1977 | |
| 1978 void TCMalloc_PageHeap::scavenge() | |
| 1979 { | |
| 1980 size_t pagesToRelease = min_free_committed_pages_since_last_scavenge_ * kSca
vengePercentage; | |
| 1981 size_t targetPageCount = std::max<size_t>(kMinimumFreeCommittedPageCount, fr
ee_committed_pages_ - pagesToRelease); | |
| 1982 | |
| 1983 Length lastFreeCommittedPages = free_committed_pages_; | |
| 1984 while (free_committed_pages_ > targetPageCount) { | |
| 1985 ASSERT(Check()); | |
| 1986 for (int i = kMaxPages; i > 0 && free_committed_pages_ >= targetPageCoun
t; i--) { | |
| 1987 SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ :
&free_[i]; | |
| 1988 // If the span size is bigger than kMinSpanListsWithSpans pages retu
rn all the spans in the list, else return all but 1 span. | |
| 1989 // Return only 50% of a spanlist at a time so spans of size 1 are no
t the only ones left. | |
| 1990 size_t length = DLL_Length(&slist->normal, entropy_); | |
| 1991 size_t numSpansToReturn = (i > kMinSpanListsWithSpans) ? length : le
ngth / 2; | |
| 1992 for (int j = 0; static_cast<size_t>(j) < numSpansToReturn && !DLL_Is
Empty(&slist->normal, entropy_) && free_committed_pages_ > targetPageCount; j++)
{ | |
| 1993 Span* s = slist->normal.prev(entropy_); | |
| 1994 DLL_Remove(s, entropy_); | |
| 1995 ASSERT(!s->decommitted); | |
| 1996 if (!s->decommitted) { | |
| 1997 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << k
PageShift), | |
| 1998 static_cast<size_t>(s->length << kPag
eShift)); | |
| 1999 ASSERT(free_committed_pages_ >= s->length); | |
| 2000 free_committed_pages_ -= s->length; | |
| 2001 s->decommitted = true; | |
| 2002 } | |
| 2003 DLL_Prepend(&slist->returned, s, entropy_); | |
| 2004 } | |
| 2005 } | |
| 2006 | |
| 2007 if (lastFreeCommittedPages == free_committed_pages_) | |
| 2008 break; | |
| 2009 lastFreeCommittedPages = free_committed_pages_; | |
| 2010 } | |
| 2011 | |
| 2012 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_; | |
| 2013 } | |
| 2014 | |
| 2015 ALWAYS_INLINE bool TCMalloc_PageHeap::shouldScavenge() const | |
| 2016 { | |
| 2017 return free_committed_pages_ > kMinimumFreeCommittedPageCount; | |
| 2018 } | |
| 2019 | |
| 2020 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2021 | |
| 2022 inline Span* TCMalloc_PageHeap::New(Length n) { | |
| 2023 ASSERT(Check()); | |
| 2024 ASSERT(n > 0); | |
| 2025 | |
| 2026 // Find first size >= n that has a non-empty list | |
| 2027 for (Length s = n; s < kMaxPages; s++) { | |
| 2028 Span* ll = NULL; | |
| 2029 bool released = false; | |
| 2030 if (!DLL_IsEmpty(&free_[s].normal, entropy_)) { | |
| 2031 // Found normal span | |
| 2032 ll = &free_[s].normal; | |
| 2033 } else if (!DLL_IsEmpty(&free_[s].returned, entropy_)) { | |
| 2034 // Found returned span; reallocate it | |
| 2035 ll = &free_[s].returned; | |
| 2036 released = true; | |
| 2037 } else { | |
| 2038 // Keep looking in larger classes | |
| 2039 continue; | |
| 2040 } | |
| 2041 | |
| 2042 Span* result = ll->next(entropy_); | |
| 2043 Carve(result, n, released); | |
| 2044 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2045 // The newly allocated memory is from a span that's in the normal span list
(already committed). Update the | |
| 2046 // free committed pages count. | |
| 2047 ASSERT(free_committed_pages_ >= n); | |
| 2048 free_committed_pages_ -= n; | |
| 2049 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_) | |
| 2050 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_; | |
| 2051 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2052 ASSERT(Check()); | |
| 2053 free_pages_ -= n; | |
| 2054 return result; | |
| 2055 } | |
| 2056 | |
| 2057 Span* result = AllocLarge(n); | |
| 2058 if (result != NULL) { | |
| 2059 ASSERT_SPAN_COMMITTED(result); | |
| 2060 return result; | |
| 2061 } | |
| 2062 | |
| 2063 // Grow the heap and try again | |
| 2064 if (!GrowHeap(n)) { | |
| 2065 ASSERT(Check()); | |
| 2066 return NULL; | |
| 2067 } | |
| 2068 | |
| 2069 return New(n); | |
| 2070 } | |
| 2071 | |
| 2072 Span* TCMalloc_PageHeap::AllocLarge(Length n) { | |
| 2073 // find the best span (closest to n in size). | |
| 2074 // The following loops implements address-ordered best-fit. | |
| 2075 bool from_released = false; | |
| 2076 Span *best = NULL; | |
| 2077 | |
| 2078 // Search through normal list | |
| 2079 for (Span* span = large_.normal.next(entropy_); | |
| 2080 span != &large_.normal; | |
| 2081 span = span->next(entropy_)) { | |
| 2082 if (span->length >= n) { | |
| 2083 if ((best == NULL) | |
| 2084 || (span->length < best->length) | |
| 2085 || ((span->length == best->length) && (span->start < best->start))) { | |
| 2086 best = span; | |
| 2087 from_released = false; | |
| 2088 } | |
| 2089 } | |
| 2090 } | |
| 2091 | |
| 2092 // Search through released list in case it has a better fit | |
| 2093 for (Span* span = large_.returned.next(entropy_); | |
| 2094 span != &large_.returned; | |
| 2095 span = span->next(entropy_)) { | |
| 2096 if (span->length >= n) { | |
| 2097 if ((best == NULL) | |
| 2098 || (span->length < best->length) | |
| 2099 || ((span->length == best->length) && (span->start < best->start))) { | |
| 2100 best = span; | |
| 2101 from_released = true; | |
| 2102 } | |
| 2103 } | |
| 2104 } | |
| 2105 | |
| 2106 if (best != NULL) { | |
| 2107 Carve(best, n, from_released); | |
| 2108 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2109 // The newly allocated memory is from a span that's in the normal span list
(already committed). Update the | |
| 2110 // free committed pages count. | |
| 2111 ASSERT(free_committed_pages_ >= n); | |
| 2112 free_committed_pages_ -= n; | |
| 2113 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_) | |
| 2114 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_; | |
| 2115 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2116 ASSERT(Check()); | |
| 2117 free_pages_ -= n; | |
| 2118 return best; | |
| 2119 } | |
| 2120 return NULL; | |
| 2121 } | |
| 2122 | |
| 2123 Span* TCMalloc_PageHeap::Split(Span* span, Length n) { | |
| 2124 ASSERT(0 < n); | |
| 2125 ASSERT(n < span->length); | |
| 2126 ASSERT(!span->free); | |
| 2127 ASSERT(span->sizeclass == 0); | |
| 2128 Event(span, 'T', n); | |
| 2129 | |
| 2130 const Length extra = span->length - n; | |
| 2131 Span* leftover = NewSpan(span->start + n, extra); | |
| 2132 Event(leftover, 'U', extra); | |
| 2133 RecordSpan(leftover); | |
| 2134 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span | |
| 2135 span->length = n; | |
| 2136 | |
| 2137 return leftover; | |
| 2138 } | |
| 2139 | |
| 2140 inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) { | |
| 2141 ASSERT(n > 0); | |
| 2142 DLL_Remove(span, entropy_); | |
| 2143 span->free = 0; | |
| 2144 Event(span, 'A', n); | |
| 2145 | |
| 2146 if (released) { | |
| 2147 // If the span chosen to carve from is decommited, commit the entire span at
once to avoid committing spans 1 page at a time. | |
| 2148 ASSERT(span->decommitted); | |
| 2149 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), st
atic_cast<size_t>(span->length << kPageShift)); | |
| 2150 span->decommitted = false; | |
| 2151 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2152 free_committed_pages_ += span->length; | |
| 2153 #endif | |
| 2154 } | |
| 2155 | |
| 2156 const int extra = static_cast<int>(span->length - n); | |
| 2157 ASSERT(extra >= 0); | |
| 2158 if (extra > 0) { | |
| 2159 Span* leftover = NewSpan(span->start + n, extra); | |
| 2160 leftover->free = 1; | |
| 2161 leftover->decommitted = false; | |
| 2162 Event(leftover, 'S', extra); | |
| 2163 RecordSpan(leftover); | |
| 2164 | |
| 2165 // Place leftover span on appropriate free list | |
| 2166 SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra
] : &large_; | |
| 2167 Span* dst = &listpair->normal; | |
| 2168 DLL_Prepend(dst, leftover, entropy_); | |
| 2169 | |
| 2170 span->length = n; | |
| 2171 pagemap_.set(span->start + n - 1, span); | |
| 2172 } | |
| 2173 } | |
| 2174 | |
| 2175 static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other) | |
| 2176 { | |
| 2177 if (destination->decommitted && !other->decommitted) { | |
| 2178 TCMalloc_SystemRelease(reinterpret_cast<void*>(other->start << kPageShif
t), | |
| 2179 static_cast<size_t>(other->length << kPageShift))
; | |
| 2180 } else if (other->decommitted && !destination->decommitted) { | |
| 2181 TCMalloc_SystemRelease(reinterpret_cast<void*>(destination->start << kPa
geShift), | |
| 2182 static_cast<size_t>(destination->length << kPageS
hift)); | |
| 2183 destination->decommitted = true; | |
| 2184 } | |
| 2185 } | |
| 2186 | |
| 2187 inline void TCMalloc_PageHeap::Delete(Span* span) { | |
| 2188 ASSERT(Check()); | |
| 2189 ASSERT(!span->free); | |
| 2190 ASSERT(span->length > 0); | |
| 2191 ASSERT(GetDescriptor(span->start) == span); | |
| 2192 ASSERT(GetDescriptor(span->start + span->length - 1) == span); | |
| 2193 span->sizeclass = 0; | |
| 2194 #ifndef NO_TCMALLOC_SAMPLES | |
| 2195 span->sample = 0; | |
| 2196 #endif | |
| 2197 | |
| 2198 // Coalesce -- we guarantee that "p" != 0, so no bounds checking | |
| 2199 // necessary. We do not bother resetting the stale pagemap | |
| 2200 // entries for the pieces we are merging together because we only | |
| 2201 // care about the pagemap entries for the boundaries. | |
| 2202 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2203 // Track the total size of the neighboring free spans that are committed. | |
| 2204 Length neighboringCommittedSpansLength = 0; | |
| 2205 #endif | |
| 2206 const PageID p = span->start; | |
| 2207 const Length n = span->length; | |
| 2208 Span* prev = GetDescriptor(p-1); | |
| 2209 if (prev != NULL && prev->free) { | |
| 2210 // Merge preceding span into this span | |
| 2211 ASSERT(prev->start + prev->length == p); | |
| 2212 const Length len = prev->length; | |
| 2213 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2214 if (!prev->decommitted) | |
| 2215 neighboringCommittedSpansLength += len; | |
| 2216 #endif | |
| 2217 mergeDecommittedStates(span, prev); | |
| 2218 DLL_Remove(prev, entropy_); | |
| 2219 DeleteSpan(prev); | |
| 2220 span->start -= len; | |
| 2221 span->length += len; | |
| 2222 pagemap_.set(span->start, span); | |
| 2223 Event(span, 'L', len); | |
| 2224 } | |
| 2225 Span* next = GetDescriptor(p+n); | |
| 2226 if (next != NULL && next->free) { | |
| 2227 // Merge next span into this span | |
| 2228 ASSERT(next->start == p+n); | |
| 2229 const Length len = next->length; | |
| 2230 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2231 if (!next->decommitted) | |
| 2232 neighboringCommittedSpansLength += len; | |
| 2233 #endif | |
| 2234 mergeDecommittedStates(span, next); | |
| 2235 DLL_Remove(next, entropy_); | |
| 2236 DeleteSpan(next); | |
| 2237 span->length += len; | |
| 2238 pagemap_.set(span->start + span->length - 1, span); | |
| 2239 Event(span, 'R', len); | |
| 2240 } | |
| 2241 | |
| 2242 Event(span, 'D', span->length); | |
| 2243 span->free = 1; | |
| 2244 if (span->decommitted) { | |
| 2245 if (span->length < kMaxPages) | |
| 2246 DLL_Prepend(&free_[span->length].returned, span, entropy_); | |
| 2247 else | |
| 2248 DLL_Prepend(&large_.returned, span, entropy_); | |
| 2249 } else { | |
| 2250 if (span->length < kMaxPages) | |
| 2251 DLL_Prepend(&free_[span->length].normal, span, entropy_); | |
| 2252 else | |
| 2253 DLL_Prepend(&large_.normal, span, entropy_); | |
| 2254 } | |
| 2255 free_pages_ += n; | |
| 2256 | |
| 2257 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2258 if (span->decommitted) { | |
| 2259 // If the merged span is decommitted, that means we decommitted any neighb
oring spans that were | |
| 2260 // committed. Update the free committed pages count. | |
| 2261 free_committed_pages_ -= neighboringCommittedSpansLength; | |
| 2262 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_) | |
| 2263 min_free_committed_pages_since_last_scavenge_ = free_committed_pages
_; | |
| 2264 } else { | |
| 2265 // If the merged span remains committed, add the deleted span's size to th
e free committed pages count. | |
| 2266 free_committed_pages_ += n; | |
| 2267 } | |
| 2268 | |
| 2269 // Make sure the scavenge thread becomes active if we have enough freed pages
to release some back to the system. | |
| 2270 signalScavenger(); | |
| 2271 #else | |
| 2272 IncrementalScavenge(n); | |
| 2273 #endif | |
| 2274 | |
| 2275 ASSERT(Check()); | |
| 2276 } | |
| 2277 | |
| 2278 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2279 void TCMalloc_PageHeap::IncrementalScavenge(Length n) { | |
| 2280 // Fast path; not yet time to release memory | |
| 2281 scavenge_counter_ -= n; | |
| 2282 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge | |
| 2283 | |
| 2284 // If there is nothing to release, wait for so many pages before | |
| 2285 // scavenging again. With 4K pages, this comes to 16MB of memory. | |
| 2286 static const size_t kDefaultReleaseDelay = 1 << 8; | |
| 2287 | |
| 2288 // Find index of free list to scavenge | |
| 2289 size_t index = scavenge_index_ + 1; | |
| 2290 uintptr_t entropy = entropy_; | |
| 2291 for (size_t i = 0; i < kMaxPages+1; i++) { | |
| 2292 if (index > kMaxPages) index = 0; | |
| 2293 SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index]; | |
| 2294 if (!DLL_IsEmpty(&slist->normal, entropy)) { | |
| 2295 // Release the last span on the normal portion of this list | |
| 2296 Span* s = slist->normal.prev(entropy); | |
| 2297 DLL_Remove(s, entropy_); | |
| 2298 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), | |
| 2299 static_cast<size_t>(s->length << kPageShift)); | |
| 2300 s->decommitted = true; | |
| 2301 DLL_Prepend(&slist->returned, s, entropy); | |
| 2302 | |
| 2303 scavenge_counter_ = std::max<size_t>(64UL, std::min<size_t>(kDefaultReleas
eDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay))); | |
| 2304 | |
| 2305 if (index == kMaxPages && !DLL_IsEmpty(&slist->normal, entropy)) | |
| 2306 scavenge_index_ = index - 1; | |
| 2307 else | |
| 2308 scavenge_index_ = index; | |
| 2309 return; | |
| 2310 } | |
| 2311 index++; | |
| 2312 } | |
| 2313 | |
| 2314 // Nothing to scavenge, delay for a while | |
| 2315 scavenge_counter_ = kDefaultReleaseDelay; | |
| 2316 } | |
| 2317 #endif | |
| 2318 | |
| 2319 void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) { | |
| 2320 // Associate span object with all interior pages as well | |
| 2321 ASSERT(!span->free); | |
| 2322 ASSERT(GetDescriptor(span->start) == span); | |
| 2323 ASSERT(GetDescriptor(span->start+span->length-1) == span); | |
| 2324 Event(span, 'C', sc); | |
| 2325 span->sizeclass = static_cast<unsigned int>(sc); | |
| 2326 for (Length i = 1; i < span->length-1; i++) { | |
| 2327 pagemap_.set(span->start+i, span); | |
| 2328 } | |
| 2329 } | |
| 2330 | |
| 2331 #ifdef WTF_CHANGES | |
| 2332 size_t TCMalloc_PageHeap::ReturnedBytes() const { | |
| 2333 size_t result = 0; | |
| 2334 for (unsigned s = 0; s < kMaxPages; s++) { | |
| 2335 const int r_length = DLL_Length(&free_[s].returned, entropy_); | |
| 2336 unsigned r_pages = s * r_length; | |
| 2337 result += r_pages << kPageShift; | |
| 2338 } | |
| 2339 | |
| 2340 for (Span* s = large_.returned.next(entropy_); s != &large_.returned; s = s-
>next(entropy_)) | |
| 2341 result += s->length << kPageShift; | |
| 2342 return result; | |
| 2343 } | |
| 2344 #endif | |
| 2345 | |
| 2346 #ifndef WTF_CHANGES | |
| 2347 static double PagesToMB(uint64_t pages) { | |
| 2348 return (pages << kPageShift) / 1048576.0; | |
| 2349 } | |
| 2350 | |
| 2351 void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) { | |
| 2352 int nonempty_sizes = 0; | |
| 2353 for (int s = 0; s < kMaxPages; s++) { | |
| 2354 if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) { | |
| 2355 nonempty_sizes++; | |
| 2356 } | |
| 2357 } | |
| 2358 out->printf("------------------------------------------------\n"); | |
| 2359 out->printf("PageHeap: %d sizes; %6.1f MB free\n", | |
| 2360 nonempty_sizes, PagesToMB(free_pages_)); | |
| 2361 out->printf("------------------------------------------------\n"); | |
| 2362 uint64_t total_normal = 0; | |
| 2363 uint64_t total_returned = 0; | |
| 2364 for (int s = 0; s < kMaxPages; s++) { | |
| 2365 const int n_length = DLL_Length(&free_[s].normal); | |
| 2366 const int r_length = DLL_Length(&free_[s].returned); | |
| 2367 if (n_length + r_length > 0) { | |
| 2368 uint64_t n_pages = s * n_length; | |
| 2369 uint64_t r_pages = s * r_length; | |
| 2370 total_normal += n_pages; | |
| 2371 total_returned += r_pages; | |
| 2372 out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum" | |
| 2373 "; unmapped: %6.1f MB; %6.1f MB cum\n", | |
| 2374 s, | |
| 2375 (n_length + r_length), | |
| 2376 PagesToMB(n_pages + r_pages), | |
| 2377 PagesToMB(total_normal + total_returned), | |
| 2378 PagesToMB(r_pages), | |
| 2379 PagesToMB(total_returned)); | |
| 2380 } | |
| 2381 } | |
| 2382 | |
| 2383 uint64_t n_pages = 0; | |
| 2384 uint64_t r_pages = 0; | |
| 2385 int n_spans = 0; | |
| 2386 int r_spans = 0; | |
| 2387 out->printf("Normal large spans:\n"); | |
| 2388 for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) { | |
| 2389 out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n", | |
| 2390 s->length, PagesToMB(s->length)); | |
| 2391 n_pages += s->length; | |
| 2392 n_spans++; | |
| 2393 } | |
| 2394 out->printf("Unmapped large spans:\n"); | |
| 2395 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) { | |
| 2396 out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n", | |
| 2397 s->length, PagesToMB(s->length)); | |
| 2398 r_pages += s->length; | |
| 2399 r_spans++; | |
| 2400 } | |
| 2401 total_normal += n_pages; | |
| 2402 total_returned += r_pages; | |
| 2403 out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum" | |
| 2404 "; unmapped: %6.1f MB; %6.1f MB cum\n", | |
| 2405 (n_spans + r_spans), | |
| 2406 PagesToMB(n_pages + r_pages), | |
| 2407 PagesToMB(total_normal + total_returned), | |
| 2408 PagesToMB(r_pages), | |
| 2409 PagesToMB(total_returned)); | |
| 2410 } | |
| 2411 #endif | |
| 2412 | |
| 2413 bool TCMalloc_PageHeap::GrowHeap(Length n) { | |
| 2414 ASSERT(kMaxPages >= kMinSystemAlloc); | |
| 2415 if (n > kMaxValidPages) return false; | |
| 2416 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc); | |
| 2417 size_t actual_size; | |
| 2418 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); | |
| 2419 if (ptr == NULL) { | |
| 2420 if (n < ask) { | |
| 2421 // Try growing just "n" pages | |
| 2422 ask = n; | |
| 2423 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); | |
| 2424 } | |
| 2425 if (ptr == NULL) return false; | |
| 2426 } | |
| 2427 ask = actual_size >> kPageShift; | |
| 2428 | |
| 2429 uint64_t old_system_bytes = system_bytes_; | |
| 2430 system_bytes_ += (ask << kPageShift); | |
| 2431 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | |
| 2432 ASSERT(p > 0); | |
| 2433 | |
| 2434 // If we have already a lot of pages allocated, just pre allocate a bunch of | |
| 2435 // memory for the page map. This prevents fragmentation by pagemap metadata | |
| 2436 // when a program keeps allocating and freeing large blocks. | |
| 2437 | |
| 2438 if (old_system_bytes < kPageMapBigAllocationThreshold | |
| 2439 && system_bytes_ >= kPageMapBigAllocationThreshold) { | |
| 2440 pagemap_.PreallocateMoreMemory(); | |
| 2441 } | |
| 2442 | |
| 2443 // Make sure pagemap_ has entries for all of the new pages. | |
| 2444 // Plus ensure one before and one after so coalescing code | |
| 2445 // does not need bounds-checking. | |
| 2446 if (pagemap_.Ensure(p-1, ask+2)) { | |
| 2447 // Pretend the new area is allocated and then Delete() it to | |
| 2448 // cause any necessary coalescing to occur. | |
| 2449 // | |
| 2450 // We do not adjust free_pages_ here since Delete() will do it for us. | |
| 2451 Span* span = NewSpan(p, ask); | |
| 2452 RecordSpan(span); | |
| 2453 Delete(span); | |
| 2454 ASSERT(Check()); | |
| 2455 return true; | |
| 2456 } else { | |
| 2457 // We could not allocate memory within "pagemap_" | |
| 2458 // TODO: Once we can return memory to the system, return the new span | |
| 2459 return false; | |
| 2460 } | |
| 2461 } | |
| 2462 | |
| 2463 bool TCMalloc_PageHeap::Check() { | |
| 2464 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2465 size_t totalFreeCommitted = 0; | |
| 2466 #endif | |
| 2467 ASSERT(free_[0].normal.next(entropy_) == &free_[0].normal); | |
| 2468 ASSERT(free_[0].returned.next(entropy_) == &free_[0].returned); | |
| 2469 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2470 totalFreeCommitted = CheckList(&large_.normal, kMaxPages, 1000000000, false); | |
| 2471 #else | |
| 2472 CheckList(&large_.normal, kMaxPages, 1000000000, false); | |
| 2473 #endif | |
| 2474 CheckList(&large_.returned, kMaxPages, 1000000000, true); | |
| 2475 for (Length s = 1; s < kMaxPages; s++) { | |
| 2476 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2477 totalFreeCommitted += CheckList(&free_[s].normal, s, s, false); | |
| 2478 #else | |
| 2479 CheckList(&free_[s].normal, s, s, false); | |
| 2480 #endif | |
| 2481 CheckList(&free_[s].returned, s, s, true); | |
| 2482 } | |
| 2483 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2484 ASSERT(totalFreeCommitted == free_committed_pages_); | |
| 2485 #endif | |
| 2486 return true; | |
| 2487 } | |
| 2488 | |
| 2489 #if ASSERT_DISABLED | |
| 2490 size_t TCMalloc_PageHeap::CheckList(Span*, Length, Length, bool) { | |
| 2491 return 0; | |
| 2492 } | |
| 2493 #else | |
| 2494 size_t TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pag
es, bool decommitted) { | |
| 2495 size_t freeCount = 0; | |
| 2496 for (Span* s = list->next(entropy_); s != list; s = s->next(entropy_)) { | |
| 2497 CHECK_CONDITION(s->free); | |
| 2498 CHECK_CONDITION(s->length >= min_pages); | |
| 2499 CHECK_CONDITION(s->length <= max_pages); | |
| 2500 CHECK_CONDITION(GetDescriptor(s->start) == s); | |
| 2501 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s); | |
| 2502 CHECK_CONDITION(s->decommitted == decommitted); | |
| 2503 freeCount += s->length; | |
| 2504 } | |
| 2505 return freeCount; | |
| 2506 } | |
| 2507 #endif | |
| 2508 | |
| 2509 void TCMalloc_PageHeap::ReleaseFreeList(Span* list, Span* returned) { | |
| 2510 // Walk backwards through list so that when we push these | |
| 2511 // spans on the "returned" list, we preserve the order. | |
| 2512 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2513 size_t freePageReduction = 0; | |
| 2514 #endif | |
| 2515 | |
| 2516 while (!DLL_IsEmpty(list, entropy_)) { | |
| 2517 Span* s = list->prev(entropy_); | |
| 2518 | |
| 2519 DLL_Remove(s, entropy_); | |
| 2520 s->decommitted = true; | |
| 2521 DLL_Prepend(returned, s, entropy_); | |
| 2522 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), | |
| 2523 static_cast<size_t>(s->length << kPageShift)); | |
| 2524 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2525 freePageReduction += s->length; | |
| 2526 #endif | |
| 2527 } | |
| 2528 | |
| 2529 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2530 free_committed_pages_ -= freePageReduction; | |
| 2531 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_) | |
| 2532 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_; | |
| 2533 #endif | |
| 2534 } | |
| 2535 | |
| 2536 void TCMalloc_PageHeap::ReleaseFreePages() { | |
| 2537 for (Length s = 0; s < kMaxPages; s++) { | |
| 2538 ReleaseFreeList(&free_[s].normal, &free_[s].returned); | |
| 2539 } | |
| 2540 ReleaseFreeList(&large_.normal, &large_.returned); | |
| 2541 ASSERT(Check()); | |
| 2542 } | |
| 2543 | |
| 2544 //------------------------------------------------------------------- | |
| 2545 // Free list | |
| 2546 //------------------------------------------------------------------- | |
| 2547 | |
| 2548 class TCMalloc_ThreadCache_FreeList { | |
| 2549 private: | |
| 2550 HardenedSLL list_; // Linked list of nodes | |
| 2551 uint16_t length_; // Current length | |
| 2552 uint16_t lowater_; // Low water mark for list length | |
| 2553 uintptr_t entropy_; // Entropy source for hardening | |
| 2554 | |
| 2555 public: | |
| 2556 void Init(uintptr_t entropy) { | |
| 2557 list_.setValue(NULL); | |
| 2558 length_ = 0; | |
| 2559 lowater_ = 0; | |
| 2560 entropy_ = entropy; | |
| 2561 #if ENABLE(TCMALLOC_HARDENING) | |
| 2562 ASSERT(entropy_); | |
| 2563 #endif | |
| 2564 } | |
| 2565 | |
| 2566 // Return current length of list | |
| 2567 int length() const { | |
| 2568 return length_; | |
| 2569 } | |
| 2570 | |
| 2571 // Is list empty? | |
| 2572 bool empty() const { | |
| 2573 return !list_; | |
| 2574 } | |
| 2575 | |
| 2576 // Low-water mark management | |
| 2577 int lowwatermark() const { return lowater_; } | |
| 2578 void clear_lowwatermark() { lowater_ = length_; } | |
| 2579 | |
| 2580 ALWAYS_INLINE void Push(HardenedSLL ptr) { | |
| 2581 SLL_Push(&list_, ptr, entropy_); | |
| 2582 length_++; | |
| 2583 } | |
| 2584 | |
| 2585 void PushRange(int N, HardenedSLL start, HardenedSLL end) { | |
| 2586 SLL_PushRange(&list_, start, end, entropy_); | |
| 2587 length_ = length_ + static_cast<uint16_t>(N); | |
| 2588 } | |
| 2589 | |
| 2590 void PopRange(int N, HardenedSLL* start, HardenedSLL* end) { | |
| 2591 SLL_PopRange(&list_, N, start, end, entropy_); | |
| 2592 ASSERT(length_ >= N); | |
| 2593 length_ = length_ - static_cast<uint16_t>(N); | |
| 2594 if (length_ < lowater_) lowater_ = length_; | |
| 2595 } | |
| 2596 | |
| 2597 ALWAYS_INLINE void* Pop() { | |
| 2598 ASSERT(list_); | |
| 2599 length_--; | |
| 2600 if (length_ < lowater_) lowater_ = length_; | |
| 2601 return SLL_Pop(&list_, entropy_).value(); | |
| 2602 } | |
| 2603 | |
| 2604 // Runs through the linked list to ensure that | |
| 2605 // we can do that, and ensures that 'missing' | |
| 2606 // is not present | |
| 2607 NEVER_INLINE void Validate(HardenedSLL missing, size_t size) { | |
| 2608 HardenedSLL node = list_; | |
| 2609 UNUSED_PARAM(size); | |
| 2610 while (node) { | |
| 2611 RELEASE_ASSERT(node != missing); | |
| 2612 RELEASE_ASSERT(IS_DEFINITELY_POISONED(node.value(), size)); | |
| 2613 node = SLL_Next(node, entropy_); | |
| 2614 } | |
| 2615 } | |
| 2616 | |
| 2617 #ifdef WTF_CHANGES | |
| 2618 template <class Finder, class Reader> | |
| 2619 void enumerateFreeObjects(Finder& finder, const Reader& reader) | |
| 2620 { | |
| 2621 for (HardenedSLL nextObject = list_; nextObject; nextObject.setValue(reade
r.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.value()), en
tropy_))) | |
| 2622 finder.visit(nextObject.value()); | |
| 2623 } | |
| 2624 #endif | |
| 2625 }; | |
| 2626 | |
| 2627 //------------------------------------------------------------------- | |
| 2628 // Data kept per thread | |
| 2629 //------------------------------------------------------------------- | |
| 2630 | |
| 2631 class TCMalloc_ThreadCache { | |
| 2632 private: | |
| 2633 typedef TCMalloc_ThreadCache_FreeList FreeList; | |
| 2634 #if OS(WINDOWS) | |
| 2635 typedef DWORD ThreadIdentifier; | |
| 2636 #else | |
| 2637 typedef pthread_t ThreadIdentifier; | |
| 2638 #endif | |
| 2639 | |
| 2640 size_t size_; // Combined size of data | |
| 2641 ThreadIdentifier tid_; // Which thread owns it | |
| 2642 bool in_setspecific_; // Called pthread_setspecific? | |
| 2643 FreeList list_[kNumClasses]; // Array indexed by size-class | |
| 2644 | |
| 2645 // We sample allocations, biased by the size of the allocation | |
| 2646 uint32_t rnd_; // Cheap random number generator | |
| 2647 size_t bytes_until_sample_; // Bytes until we sample next | |
| 2648 | |
| 2649 uintptr_t entropy_; // Entropy value used for hardening | |
| 2650 | |
| 2651 // Allocate a new heap. REQUIRES: pageheap_lock is held. | |
| 2652 static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid, uintptr_t en
tropy); | |
| 2653 | |
| 2654 // Use only as pthread thread-specific destructor function. | |
| 2655 static void DestroyThreadCache(void* ptr); | |
| 2656 public: | |
| 2657 // All ThreadCache objects are kept in a linked list (for stats collection) | |
| 2658 TCMalloc_ThreadCache* next_; | |
| 2659 TCMalloc_ThreadCache* prev_; | |
| 2660 | |
| 2661 void Init(ThreadIdentifier tid, uintptr_t entropy); | |
| 2662 void Cleanup(); | |
| 2663 | |
| 2664 // Accessors (mostly just for printing stats) | |
| 2665 int freelist_length(size_t cl) const { return list_[cl].length(); } | |
| 2666 | |
| 2667 // Total byte size in cache | |
| 2668 size_t Size() const { return size_; } | |
| 2669 | |
| 2670 ALWAYS_INLINE void* Allocate(size_t size); | |
| 2671 void Deallocate(HardenedSLL ptr, size_t size_class); | |
| 2672 | |
| 2673 ALWAYS_INLINE void FetchFromCentralCache(size_t cl, size_t allocationSize); | |
| 2674 void ReleaseToCentralCache(size_t cl, int N); | |
| 2675 void Scavenge(); | |
| 2676 void Print() const; | |
| 2677 | |
| 2678 // Record allocation of "k" bytes. Return true iff allocation | |
| 2679 // should be sampled | |
| 2680 bool SampleAllocation(size_t k); | |
| 2681 | |
| 2682 // Pick next sampling point | |
| 2683 void PickNextSample(size_t k); | |
| 2684 | |
| 2685 static void InitModule(); | |
| 2686 static void InitTSD(); | |
| 2687 static TCMalloc_ThreadCache* GetThreadHeap(); | |
| 2688 static TCMalloc_ThreadCache* GetCache(); | |
| 2689 static TCMalloc_ThreadCache* GetCacheIfPresent(); | |
| 2690 static TCMalloc_ThreadCache* CreateCacheIfNecessary(); | |
| 2691 static void DeleteCache(TCMalloc_ThreadCache* heap); | |
| 2692 static void BecomeIdle(); | |
| 2693 static void RecomputeThreadCacheSize(); | |
| 2694 | |
| 2695 #ifdef WTF_CHANGES | |
| 2696 template <class Finder, class Reader> | |
| 2697 void enumerateFreeObjects(Finder& finder, const Reader& reader) | |
| 2698 { | |
| 2699 for (unsigned sizeClass = 0; sizeClass < kNumClasses; sizeClass++) | |
| 2700 list_[sizeClass].enumerateFreeObjects(finder, reader); | |
| 2701 } | |
| 2702 #endif | |
| 2703 }; | |
| 2704 | |
| 2705 //------------------------------------------------------------------- | |
| 2706 // Global variables | |
| 2707 //------------------------------------------------------------------- | |
| 2708 | |
| 2709 // Central cache -- a collection of free-lists, one per size-class. | |
| 2710 // We have a separate lock per free-list to reduce contention. | |
| 2711 static TCMalloc_Central_FreeListPadded central_cache[kNumClasses]; | |
| 2712 | |
| 2713 // Page-level allocator | |
| 2714 static AllocAlignmentInteger pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof
(AllocAlignmentInteger) - 1) / sizeof(AllocAlignmentInteger)]; | |
| 2715 static bool phinited = false; | |
| 2716 | |
| 2717 // Avoid extra level of indirection by making "pageheap" be just an alias | |
| 2718 // of pageheap_memory. | |
| 2719 typedef union { | |
| 2720 void* m_memory; | |
| 2721 TCMalloc_PageHeap* m_pageHeap; | |
| 2722 } PageHeapUnion; | |
| 2723 | |
| 2724 static inline TCMalloc_PageHeap* getPageHeap() | |
| 2725 { | |
| 2726 PageHeapUnion u = { &pageheap_memory[0] }; | |
| 2727 return u.m_pageHeap; | |
| 2728 } | |
| 2729 | |
| 2730 #define pageheap getPageHeap() | |
| 2731 | |
| 2732 size_t fastMallocGoodSize(size_t bytes) | |
| 2733 { | |
| 2734 if (!phinited) | |
| 2735 TCMalloc_ThreadCache::InitModule(); | |
| 2736 return AllocationSize(bytes); | |
| 2737 } | |
| 2738 | |
| 2739 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY | |
| 2740 | |
| 2741 #if HAVE(DISPATCH_H) || OS(WINDOWS) | |
| 2742 | |
| 2743 void TCMalloc_PageHeap::periodicScavenge() | |
| 2744 { | |
| 2745 SpinLockHolder h(&pageheap_lock); | |
| 2746 pageheap->scavenge(); | |
| 2747 | |
| 2748 if (shouldScavenge()) { | |
| 2749 rescheduleScavenger(); | |
| 2750 return; | |
| 2751 } | |
| 2752 | |
| 2753 suspendScavenger(); | |
| 2754 } | |
| 2755 | |
| 2756 ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger() | |
| 2757 { | |
| 2758 ASSERT(pageheap_lock.IsHeld()); | |
| 2759 if (isScavengerSuspended() && shouldScavenge()) | |
| 2760 scheduleScavenger(); | |
| 2761 } | |
| 2762 | |
| 2763 #else | |
| 2764 | |
| 2765 void TCMalloc_PageHeap::scavengerThread() | |
| 2766 { | |
| 2767 #if HAVE(PTHREAD_SETNAME_NP) | |
| 2768 pthread_setname_np("JavaScriptCore: FastMalloc scavenger"); | |
| 2769 #endif | |
| 2770 | |
| 2771 while (1) { | |
| 2772 pageheap_lock.Lock(); | |
| 2773 if (!shouldScavenge()) { | |
| 2774 // Set to false so that signalScavenger() will check whether we need
to be siganlled. | |
| 2775 m_scavengeThreadActive = false; | |
| 2776 | |
| 2777 // We need to unlock now, as this thread will block on the condvar u
ntil scavenging is required. | |
| 2778 pageheap_lock.Unlock(); | |
| 2779 | |
| 2780 // Block until there are enough free committed pages to release back
to the system. | |
| 2781 pthread_mutex_lock(&m_scavengeMutex); | |
| 2782 pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex); | |
| 2783 // After exiting the pthread_cond_wait, we hold the lock on m_scaven
geMutex. Unlock it to prevent | |
| 2784 // deadlock next time round the loop. | |
| 2785 pthread_mutex_unlock(&m_scavengeMutex); | |
| 2786 | |
| 2787 // Set to true to prevent unnecessary signalling of the condvar. | |
| 2788 m_scavengeThreadActive = true; | |
| 2789 } else | |
| 2790 pageheap_lock.Unlock(); | |
| 2791 | |
| 2792 // Wait for a while to calculate how much memory remains unused during t
his pause. | |
| 2793 sleep(kScavengeDelayInSeconds); | |
| 2794 | |
| 2795 { | |
| 2796 SpinLockHolder h(&pageheap_lock); | |
| 2797 pageheap->scavenge(); | |
| 2798 } | |
| 2799 } | |
| 2800 } | |
| 2801 | |
| 2802 #endif | |
| 2803 | |
| 2804 #endif | |
| 2805 | |
| 2806 // If TLS is available, we also store a copy | |
| 2807 // of the per-thread object in a __thread variable | |
| 2808 // since __thread variables are faster to read | |
| 2809 // than pthread_getspecific(). We still need | |
| 2810 // pthread_setspecific() because __thread | |
| 2811 // variables provide no way to run cleanup | |
| 2812 // code when a thread is destroyed. | |
| 2813 #ifdef HAVE_TLS | |
| 2814 static __thread TCMalloc_ThreadCache *threadlocal_heap; | |
| 2815 #endif | |
| 2816 // Thread-specific key. Initialization here is somewhat tricky | |
| 2817 // because some Linux startup code invokes malloc() before it | |
| 2818 // is in a good enough state to handle pthread_keycreate(). | |
| 2819 // Therefore, we use TSD keys only after tsd_inited is set to true. | |
| 2820 // Until then, we use a slow path to get the heap object. | |
| 2821 static bool tsd_inited = false; | |
| 2822 #if USE(PTHREAD_GETSPECIFIC_DIRECT) | |
| 2823 static const pthread_key_t heap_key = __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0; | |
| 2824 #else | |
| 2825 static pthread_key_t heap_key; | |
| 2826 #endif | |
| 2827 #if OS(WINDOWS) | |
| 2828 DWORD tlsIndex = TLS_OUT_OF_INDEXES; | |
| 2829 #endif | |
| 2830 | |
| 2831 static ALWAYS_INLINE void setThreadHeap(TCMalloc_ThreadCache* heap) | |
| 2832 { | |
| 2833 #if USE(PTHREAD_GETSPECIFIC_DIRECT) | |
| 2834 // Can't have two libraries both doing this in the same process, | |
| 2835 // so check and make this crash right away. | |
| 2836 if (pthread_getspecific(heap_key)) | |
| 2837 CRASH(); | |
| 2838 #endif | |
| 2839 | |
| 2840 // Still do pthread_setspecific even if there's an alternate form | |
| 2841 // of thread-local storage in use, to benefit from the delete callback. | |
| 2842 pthread_setspecific(heap_key, heap); | |
| 2843 | |
| 2844 #if OS(WINDOWS) | |
| 2845 TlsSetValue(tlsIndex, heap); | |
| 2846 #endif | |
| 2847 } | |
| 2848 | |
| 2849 // Allocator for thread heaps | |
| 2850 static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator; | |
| 2851 | |
| 2852 // Linked list of heap objects. Protected by pageheap_lock. | |
| 2853 static TCMalloc_ThreadCache* thread_heaps = NULL; | |
| 2854 static int thread_heap_count = 0; | |
| 2855 | |
| 2856 // Overall thread cache size. Protected by pageheap_lock. | |
| 2857 static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize; | |
| 2858 | |
| 2859 // Global per-thread cache size. Writes are protected by | |
| 2860 // pageheap_lock. Reads are done without any locking, which should be | |
| 2861 // fine as long as size_t can be written atomically and we don't place | |
| 2862 // invariants between this variable and other pieces of state. | |
| 2863 static volatile size_t per_thread_cache_size = kMaxThreadCacheSize; | |
| 2864 | |
| 2865 //------------------------------------------------------------------- | |
| 2866 // Central cache implementation | |
| 2867 //------------------------------------------------------------------- | |
| 2868 | |
| 2869 void TCMalloc_Central_FreeList::Init(size_t cl, uintptr_t entropy) { | |
| 2870 lock_.Init(); | |
| 2871 size_class_ = cl; | |
| 2872 entropy_ = entropy; | |
| 2873 #if ENABLE(TCMALLOC_HARDENING) | |
| 2874 ASSERT(entropy_); | |
| 2875 #endif | |
| 2876 DLL_Init(&empty_, entropy_); | |
| 2877 DLL_Init(&nonempty_, entropy_); | |
| 2878 counter_ = 0; | |
| 2879 | |
| 2880 cache_size_ = 1; | |
| 2881 used_slots_ = 0; | |
| 2882 ASSERT(cache_size_ <= kNumTransferEntries); | |
| 2883 } | |
| 2884 | |
| 2885 void TCMalloc_Central_FreeList::ReleaseListToSpans(HardenedSLL start) { | |
| 2886 while (start) { | |
| 2887 HardenedSLL next = SLL_Next(start, entropy_); | |
| 2888 ReleaseToSpans(start); | |
| 2889 start = next; | |
| 2890 } | |
| 2891 } | |
| 2892 | |
| 2893 ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(HardenedSLL object)
{ | |
| 2894 const PageID p = reinterpret_cast<uintptr_t>(object.value()) >> kPageShift; | |
| 2895 Span* span = pageheap->GetDescriptor(p); | |
| 2896 ASSERT(span != NULL); | |
| 2897 ASSERT(span->refcount > 0); | |
| 2898 | |
| 2899 // If span is empty, move it to non-empty list | |
| 2900 if (!span->objects) { | |
| 2901 DLL_Remove(span, entropy_); | |
| 2902 DLL_Prepend(&nonempty_, span, entropy_); | |
| 2903 Event(span, 'N', 0); | |
| 2904 } | |
| 2905 | |
| 2906 // The following check is expensive, so it is disabled by default | |
| 2907 if (false) { | |
| 2908 // Check that object does not occur in list | |
| 2909 unsigned got = 0; | |
| 2910 for (HardenedSLL p = span->objects; !p; SLL_Next(p, entropy_)) { | |
| 2911 ASSERT(p.value() != object.value()); | |
| 2912 got++; | |
| 2913 } | |
| 2914 ASSERT(got + span->refcount == | |
| 2915 (span->length<<kPageShift)/ByteSizeForClass(span->sizeclass)); | |
| 2916 } | |
| 2917 | |
| 2918 counter_++; | |
| 2919 span->refcount--; | |
| 2920 if (span->refcount == 0) { | |
| 2921 Event(span, '#', 0); | |
| 2922 counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass); | |
| 2923 DLL_Remove(span, entropy_); | |
| 2924 | |
| 2925 // Release central list lock while operating on pageheap | |
| 2926 lock_.Unlock(); | |
| 2927 { | |
| 2928 SpinLockHolder h(&pageheap_lock); | |
| 2929 pageheap->Delete(span); | |
| 2930 } | |
| 2931 lock_.Lock(); | |
| 2932 } else { | |
| 2933 SLL_SetNext(object, span->objects, entropy_); | |
| 2934 span->objects.setValue(object.value()); | |
| 2935 } | |
| 2936 } | |
| 2937 | |
| 2938 ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass( | |
| 2939 size_t locked_size_class, bool force) { | |
| 2940 static int race_counter = 0; | |
| 2941 int t = race_counter++; // Updated without a lock, but who cares. | |
| 2942 if (t >= static_cast<int>(kNumClasses)) { | |
| 2943 while (t >= static_cast<int>(kNumClasses)) { | |
| 2944 t -= kNumClasses; | |
| 2945 } | |
| 2946 race_counter = t; | |
| 2947 } | |
| 2948 ASSERT(t >= 0); | |
| 2949 ASSERT(t < static_cast<int>(kNumClasses)); | |
| 2950 if (t == static_cast<int>(locked_size_class)) return false; | |
| 2951 return central_cache[t].ShrinkCache(static_cast<int>(locked_size_class), force
); | |
| 2952 } | |
| 2953 | |
| 2954 bool TCMalloc_Central_FreeList::MakeCacheSpace() { | |
| 2955 // Is there room in the cache? | |
| 2956 if (used_slots_ < cache_size_) return true; | |
| 2957 // Check if we can expand this cache? | |
| 2958 if (cache_size_ == kNumTransferEntries) return false; | |
| 2959 // Ok, we'll try to grab an entry from some other size class. | |
| 2960 if (EvictRandomSizeClass(size_class_, false) || | |
| 2961 EvictRandomSizeClass(size_class_, true)) { | |
| 2962 // Succeeded in evicting, we're going to make our cache larger. | |
| 2963 cache_size_++; | |
| 2964 return true; | |
| 2965 } | |
| 2966 return false; | |
| 2967 } | |
| 2968 | |
| 2969 | |
| 2970 namespace { | |
| 2971 class LockInverter { | |
| 2972 private: | |
| 2973 SpinLock *held_, *temp_; | |
| 2974 public: | |
| 2975 inline explicit LockInverter(SpinLock* held, SpinLock *temp) | |
| 2976 : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); } | |
| 2977 inline ~LockInverter() { temp_->Unlock(); held_->Lock(); } | |
| 2978 }; | |
| 2979 } | |
| 2980 | |
| 2981 bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) { | |
| 2982 // Start with a quick check without taking a lock. | |
| 2983 if (cache_size_ == 0) return false; | |
| 2984 // We don't evict from a full cache unless we are 'forcing'. | |
| 2985 if (force == false && used_slots_ == cache_size_) return false; | |
| 2986 | |
| 2987 // Grab lock, but first release the other lock held by this thread. We use | |
| 2988 // the lock inverter to ensure that we never hold two size class locks | |
| 2989 // concurrently. That can create a deadlock because there is no well | |
| 2990 // defined nesting order. | |
| 2991 LockInverter li(¢ral_cache[locked_size_class].lock_, &lock_); | |
| 2992 ASSERT(used_slots_ <= cache_size_); | |
| 2993 ASSERT(0 <= cache_size_); | |
| 2994 if (cache_size_ == 0) return false; | |
| 2995 if (used_slots_ == cache_size_) { | |
| 2996 if (force == false) return false; | |
| 2997 // ReleaseListToSpans releases the lock, so we have to make all the | |
| 2998 // updates to the central list before calling it. | |
| 2999 cache_size_--; | |
| 3000 used_slots_--; | |
| 3001 ReleaseListToSpans(tc_slots_[used_slots_].head); | |
| 3002 return true; | |
| 3003 } | |
| 3004 cache_size_--; | |
| 3005 return true; | |
| 3006 } | |
| 3007 | |
| 3008 void TCMalloc_Central_FreeList::InsertRange(HardenedSLL start, HardenedSLL end,
int N) { | |
| 3009 SpinLockHolder h(&lock_); | |
| 3010 if (N == num_objects_to_move[size_class_] && | |
| 3011 MakeCacheSpace()) { | |
| 3012 int slot = used_slots_++; | |
| 3013 ASSERT(slot >=0); | |
| 3014 ASSERT(slot < kNumTransferEntries); | |
| 3015 TCEntry *entry = &tc_slots_[slot]; | |
| 3016 entry->head = start; | |
| 3017 entry->tail = end; | |
| 3018 return; | |
| 3019 } | |
| 3020 ReleaseListToSpans(start); | |
| 3021 } | |
| 3022 | |
| 3023 void TCMalloc_Central_FreeList::RemoveRange(HardenedSLL* start, HardenedSLL* end
, int *N) { | |
| 3024 int num = *N; | |
| 3025 ASSERT(num > 0); | |
| 3026 | |
| 3027 SpinLockHolder h(&lock_); | |
| 3028 if (num == num_objects_to_move[size_class_] && used_slots_ > 0) { | |
| 3029 int slot = --used_slots_; | |
| 3030 ASSERT(slot >= 0); | |
| 3031 TCEntry *entry = &tc_slots_[slot]; | |
| 3032 *start = entry->head; | |
| 3033 *end = entry->tail; | |
| 3034 return; | |
| 3035 } | |
| 3036 | |
| 3037 // TODO: Prefetch multiple TCEntries? | |
| 3038 HardenedSLL tail = FetchFromSpansSafe(); | |
| 3039 if (!tail) { | |
| 3040 // We are completely out of memory. | |
| 3041 *start = *end = HardenedSLL::null(); | |
| 3042 *N = 0; | |
| 3043 return; | |
| 3044 } | |
| 3045 | |
| 3046 SLL_SetNext(tail, HardenedSLL::null(), entropy_); | |
| 3047 HardenedSLL head = tail; | |
| 3048 int count = 1; | |
| 3049 while (count < num) { | |
| 3050 HardenedSLL t = FetchFromSpans(); | |
| 3051 if (!t) break; | |
| 3052 SLL_Push(&head, t, entropy_); | |
| 3053 count++; | |
| 3054 } | |
| 3055 *start = head; | |
| 3056 *end = tail; | |
| 3057 *N = count; | |
| 3058 } | |
| 3059 | |
| 3060 | |
| 3061 HardenedSLL TCMalloc_Central_FreeList::FetchFromSpansSafe() { | |
| 3062 HardenedSLL t = FetchFromSpans(); | |
| 3063 if (!t) { | |
| 3064 Populate(); | |
| 3065 t = FetchFromSpans(); | |
| 3066 } | |
| 3067 return t; | |
| 3068 } | |
| 3069 | |
| 3070 HardenedSLL TCMalloc_Central_FreeList::FetchFromSpans() { | |
| 3071 if (DLL_IsEmpty(&nonempty_, entropy_)) return HardenedSLL::null(); | |
| 3072 Span* span = nonempty_.next(entropy_); | |
| 3073 | |
| 3074 ASSERT(span->objects); | |
| 3075 ASSERT_SPAN_COMMITTED(span); | |
| 3076 span->refcount++; | |
| 3077 HardenedSLL result = span->objects; | |
| 3078 span->objects = SLL_Next(result, entropy_); | |
| 3079 if (!span->objects) { | |
| 3080 // Move to empty list | |
| 3081 DLL_Remove(span, entropy_); | |
| 3082 DLL_Prepend(&empty_, span, entropy_); | |
| 3083 Event(span, 'E', 0); | |
| 3084 } | |
| 3085 counter_--; | |
| 3086 return result; | |
| 3087 } | |
| 3088 | |
| 3089 // Fetch memory from the system and add to the central cache freelist. | |
| 3090 ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() { | |
| 3091 // Release central list lock while operating on pageheap | |
| 3092 lock_.Unlock(); | |
| 3093 const size_t npages = class_to_pages[size_class_]; | |
| 3094 | |
| 3095 Span* span; | |
| 3096 { | |
| 3097 SpinLockHolder h(&pageheap_lock); | |
| 3098 span = pageheap->New(npages); | |
| 3099 if (span) pageheap->RegisterSizeClass(span, size_class_); | |
| 3100 } | |
| 3101 if (span == NULL) { | |
| 3102 #if HAVE(ERRNO_H) | |
| 3103 MESSAGE("allocation failed: %d\n", errno); | |
| 3104 #elif OS(WINDOWS) | |
| 3105 MESSAGE("allocation failed: %d\n", ::GetLastError()); | |
| 3106 #else | |
| 3107 MESSAGE("allocation failed\n"); | |
| 3108 #endif | |
| 3109 lock_.Lock(); | |
| 3110 return; | |
| 3111 } | |
| 3112 ASSERT_SPAN_COMMITTED(span); | |
| 3113 ASSERT(span->length == npages); | |
| 3114 // Cache sizeclass info eagerly. Locking is not necessary. | |
| 3115 // (Instead of being eager, we could just replace any stale info | |
| 3116 // about this span, but that seems to be no better in practice.) | |
| 3117 for (size_t i = 0; i < npages; i++) { | |
| 3118 pageheap->CacheSizeClass(span->start + i, size_class_); | |
| 3119 } | |
| 3120 | |
| 3121 // Split the block into pieces and add to the free-list | |
| 3122 // TODO: coloring of objects to avoid cache conflicts? | |
| 3123 HardenedSLL head = HardenedSLL::null(); | |
| 3124 char* start = reinterpret_cast<char*>(span->start << kPageShift); | |
| 3125 const size_t size = ByteSizeForClass(size_class_); | |
| 3126 char* ptr = start + (npages << kPageShift) - ((npages << kPageShift) % size); | |
| 3127 int num = 0; | |
| 3128 #if ENABLE(TCMALLOC_HARDENING) | |
| 3129 uint32_t startPoison = freedObjectStartPoison(); | |
| 3130 uint32_t endPoison = freedObjectEndPoison(); | |
| 3131 #endif | |
| 3132 | |
| 3133 while (ptr > start) { | |
| 3134 ptr -= size; | |
| 3135 HardenedSLL node = HardenedSLL::create(ptr); | |
| 3136 POISON_DEALLOCATION_EXPLICIT(ptr, size, startPoison, endPoison); | |
| 3137 SLL_SetNext(node, head, entropy_); | |
| 3138 head = node; | |
| 3139 num++; | |
| 3140 } | |
| 3141 ASSERT(ptr == start); | |
| 3142 ASSERT(ptr == head.value()); | |
| 3143 #ifndef NDEBUG | |
| 3144 { | |
| 3145 HardenedSLL node = head; | |
| 3146 while (node) { | |
| 3147 ASSERT(IS_DEFINITELY_POISONED(node.value(), size)); | |
| 3148 node = SLL_Next(node, entropy_); | |
| 3149 } | |
| 3150 } | |
| 3151 #endif | |
| 3152 span->objects = head; | |
| 3153 ASSERT(span->objects.value() == head.value()); | |
| 3154 span->refcount = 0; // No sub-object in use yet | |
| 3155 | |
| 3156 // Add span to list of non-empty spans | |
| 3157 lock_.Lock(); | |
| 3158 DLL_Prepend(&nonempty_, span, entropy_); | |
| 3159 counter_ += num; | |
| 3160 } | |
| 3161 | |
| 3162 //------------------------------------------------------------------- | |
| 3163 // TCMalloc_ThreadCache implementation | |
| 3164 //------------------------------------------------------------------- | |
| 3165 | |
| 3166 inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) { | |
| 3167 if (bytes_until_sample_ < k) { | |
| 3168 PickNextSample(k); | |
| 3169 return true; | |
| 3170 } else { | |
| 3171 bytes_until_sample_ -= k; | |
| 3172 return false; | |
| 3173 } | |
| 3174 } | |
| 3175 | |
| 3176 void TCMalloc_ThreadCache::Init(ThreadIdentifier tid, uintptr_t entropy) { | |
| 3177 size_ = 0; | |
| 3178 next_ = NULL; | |
| 3179 prev_ = NULL; | |
| 3180 tid_ = tid; | |
| 3181 in_setspecific_ = false; | |
| 3182 entropy_ = entropy; | |
| 3183 #if ENABLE(TCMALLOC_HARDENING) | |
| 3184 ASSERT(entropy_); | |
| 3185 #endif | |
| 3186 for (size_t cl = 0; cl < kNumClasses; ++cl) { | |
| 3187 list_[cl].Init(entropy_); | |
| 3188 } | |
| 3189 | |
| 3190 // Initialize RNG -- run it for a bit to get to good values | |
| 3191 bytes_until_sample_ = 0; | |
| 3192 rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this)); | |
| 3193 for (int i = 0; i < 100; i++) { | |
| 3194 PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter * 2)); | |
| 3195 } | |
| 3196 } | |
| 3197 | |
| 3198 void TCMalloc_ThreadCache::Cleanup() { | |
| 3199 // Put unused memory back into central cache | |
| 3200 for (size_t cl = 0; cl < kNumClasses; ++cl) { | |
| 3201 if (list_[cl].length() > 0) { | |
| 3202 ReleaseToCentralCache(cl, list_[cl].length()); | |
| 3203 } | |
| 3204 } | |
| 3205 } | |
| 3206 | |
| 3207 ALWAYS_INLINE void* TCMalloc_ThreadCache::Allocate(size_t size) { | |
| 3208 ASSERT(size <= kMaxSize); | |
| 3209 const size_t cl = SizeClass(size); | |
| 3210 FreeList* list = &list_[cl]; | |
| 3211 size_t allocationSize = ByteSizeForClass(cl); | |
| 3212 if (list->empty()) { | |
| 3213 FetchFromCentralCache(cl, allocationSize); | |
| 3214 if (list->empty()) return NULL; | |
| 3215 } | |
| 3216 size_ -= allocationSize; | |
| 3217 void* result = list->Pop(); | |
| 3218 if (!result) | |
| 3219 return 0; | |
| 3220 RELEASE_ASSERT(IS_DEFINITELY_POISONED(result, allocationSize)); | |
| 3221 POISON_ALLOCATION(result, allocationSize); | |
| 3222 return result; | |
| 3223 } | |
| 3224 | |
| 3225 inline void TCMalloc_ThreadCache::Deallocate(HardenedSLL ptr, size_t cl) { | |
| 3226 size_t allocationSize = ByteSizeForClass(cl); | |
| 3227 size_ += allocationSize; | |
| 3228 FreeList* list = &list_[cl]; | |
| 3229 if (MAY_BE_POISONED(ptr.value(), allocationSize)) | |
| 3230 list->Validate(ptr, allocationSize); | |
| 3231 | |
| 3232 POISON_DEALLOCATION(ptr.value(), allocationSize); | |
| 3233 list->Push(ptr); | |
| 3234 // If enough data is free, put back into central cache | |
| 3235 if (list->length() > kMaxFreeListLength) { | |
| 3236 ReleaseToCentralCache(cl, num_objects_to_move[cl]); | |
| 3237 } | |
| 3238 if (size_ >= per_thread_cache_size) Scavenge(); | |
| 3239 } | |
| 3240 | |
| 3241 // Remove some objects of class "cl" from central cache and add to thread heap | |
| 3242 ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t
allocationSize) { | |
| 3243 int fetch_count = num_objects_to_move[cl]; | |
| 3244 HardenedSLL start, end; | |
| 3245 central_cache[cl].RemoveRange(&start, &end, &fetch_count); | |
| 3246 list_[cl].PushRange(fetch_count, start, end); | |
| 3247 size_ += allocationSize * fetch_count; | |
| 3248 } | |
| 3249 | |
| 3250 // Remove some objects of class "cl" from thread heap and add to central cache | |
| 3251 inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) { | |
| 3252 ASSERT(N > 0); | |
| 3253 FreeList* src = &list_[cl]; | |
| 3254 if (N > src->length()) N = src->length(); | |
| 3255 size_ -= N*ByteSizeForClass(cl); | |
| 3256 | |
| 3257 // We return prepackaged chains of the correct size to the central cache. | |
| 3258 // TODO: Use the same format internally in the thread caches? | |
| 3259 int batch_size = num_objects_to_move[cl]; | |
| 3260 while (N > batch_size) { | |
| 3261 HardenedSLL tail, head; | |
| 3262 src->PopRange(batch_size, &head, &tail); | |
| 3263 central_cache[cl].InsertRange(head, tail, batch_size); | |
| 3264 N -= batch_size; | |
| 3265 } | |
| 3266 HardenedSLL tail, head; | |
| 3267 src->PopRange(N, &head, &tail); | |
| 3268 central_cache[cl].InsertRange(head, tail, N); | |
| 3269 } | |
| 3270 | |
| 3271 // Release idle memory to the central cache | |
| 3272 inline void TCMalloc_ThreadCache::Scavenge() { | |
| 3273 // If the low-water mark for the free list is L, it means we would | |
| 3274 // not have had to allocate anything from the central cache even if | |
| 3275 // we had reduced the free list size by L. We aim to get closer to | |
| 3276 // that situation by dropping L/2 nodes from the free list. This | |
| 3277 // may not release much memory, but if so we will call scavenge again | |
| 3278 // pretty soon and the low-water marks will be high on that call. | |
| 3279 //int64 start = CycleClock::Now(); | |
| 3280 | |
| 3281 for (size_t cl = 0; cl < kNumClasses; cl++) { | |
| 3282 FreeList* list = &list_[cl]; | |
| 3283 const int lowmark = list->lowwatermark(); | |
| 3284 if (lowmark > 0) { | |
| 3285 const int drop = (lowmark > 1) ? lowmark/2 : 1; | |
| 3286 ReleaseToCentralCache(cl, drop); | |
| 3287 } | |
| 3288 list->clear_lowwatermark(); | |
| 3289 } | |
| 3290 | |
| 3291 //int64 finish = CycleClock::Now(); | |
| 3292 //CycleTimer ct; | |
| 3293 //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0); | |
| 3294 } | |
| 3295 | |
| 3296 void TCMalloc_ThreadCache::PickNextSample(size_t k) { | |
| 3297 // Make next "random" number | |
| 3298 // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers | |
| 3299 static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0); | |
| 3300 uint32_t r = rnd_; | |
| 3301 rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly); | |
| 3302 | |
| 3303 // Next point is "rnd_ % (sample_period)". I.e., average | |
| 3304 // increment is "sample_period/2". | |
| 3305 const int flag_value = static_cast<int>(FLAGS_tcmalloc_sample_parameter); | |
| 3306 static int last_flag_value = -1; | |
| 3307 | |
| 3308 if (flag_value != last_flag_value) { | |
| 3309 SpinLockHolder h(&sample_period_lock); | |
| 3310 int i; | |
| 3311 for (i = 0; i < (static_cast<int>(sizeof(primes_list)/sizeof(primes_list[0])
) - 1); i++) { | |
| 3312 if (primes_list[i] >= flag_value) { | |
| 3313 break; | |
| 3314 } | |
| 3315 } | |
| 3316 sample_period = primes_list[i]; | |
| 3317 last_flag_value = flag_value; | |
| 3318 } | |
| 3319 | |
| 3320 bytes_until_sample_ += rnd_ % sample_period; | |
| 3321 | |
| 3322 if (k > (static_cast<size_t>(-1) >> 2)) { | |
| 3323 // If the user has asked for a huge allocation then it is possible | |
| 3324 // for the code below to loop infinitely. Just return (note that | |
| 3325 // this throws off the sampling accuracy somewhat, but a user who | |
| 3326 // is allocating more than 1G of memory at a time can live with a | |
| 3327 // minor inaccuracy in profiling of small allocations, and also | |
| 3328 // would rather not wait for the loop below to terminate). | |
| 3329 return; | |
| 3330 } | |
| 3331 | |
| 3332 while (bytes_until_sample_ < k) { | |
| 3333 // Increase bytes_until_sample_ by enough average sampling periods | |
| 3334 // (sample_period >> 1) to allow us to sample past the current | |
| 3335 // allocation. | |
| 3336 bytes_until_sample_ += (sample_period >> 1); | |
| 3337 } | |
| 3338 | |
| 3339 bytes_until_sample_ -= k; | |
| 3340 } | |
| 3341 | |
| 3342 void TCMalloc_ThreadCache::InitModule() { | |
| 3343 // There is a slight potential race here because of double-checked | |
| 3344 // locking idiom. However, as long as the program does a small | |
| 3345 // allocation before switching to multi-threaded mode, we will be | |
| 3346 // fine. We increase the chances of doing such a small allocation | |
| 3347 // by doing one in the constructor of the module_enter_exit_hook | |
| 3348 // object declared below. | |
| 3349 SpinLockHolder h(&pageheap_lock); | |
| 3350 if (!phinited) { | |
| 3351 uintptr_t entropy = HARDENING_ENTROPY; | |
| 3352 #ifdef WTF_CHANGES | |
| 3353 InitTSD(); | |
| 3354 #endif | |
| 3355 InitSizeClasses(); | |
| 3356 threadheap_allocator.Init(entropy); | |
| 3357 span_allocator.Init(entropy); | |
| 3358 span_allocator.New(); // Reduce cache conflicts | |
| 3359 span_allocator.New(); // Reduce cache conflicts | |
| 3360 stacktrace_allocator.Init(entropy); | |
| 3361 DLL_Init(&sampled_objects, entropy); | |
| 3362 for (size_t i = 0; i < kNumClasses; ++i) { | |
| 3363 central_cache[i].Init(i, entropy); | |
| 3364 } | |
| 3365 pageheap->init(); | |
| 3366 phinited = 1; | |
| 3367 #if defined(WTF_CHANGES) && OS(DARWIN) | |
| 3368 FastMallocZone::init(); | |
| 3369 #endif | |
| 3370 } | |
| 3371 } | |
| 3372 | |
| 3373 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid,
uintptr_t entropy) { | |
| 3374 // Create the heap and add it to the linked list | |
| 3375 TCMalloc_ThreadCache *heap = threadheap_allocator.New(); | |
| 3376 heap->Init(tid, entropy); | |
| 3377 heap->next_ = thread_heaps; | |
| 3378 heap->prev_ = NULL; | |
| 3379 if (thread_heaps != NULL) thread_heaps->prev_ = heap; | |
| 3380 thread_heaps = heap; | |
| 3381 thread_heap_count++; | |
| 3382 RecomputeThreadCacheSize(); | |
| 3383 return heap; | |
| 3384 } | |
| 3385 | |
| 3386 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetThreadHeap() { | |
| 3387 #ifdef HAVE_TLS | |
| 3388 // __thread is faster, but only when the kernel supports it | |
| 3389 if (KernelSupportsTLS()) | |
| 3390 return threadlocal_heap; | |
| 3391 #elif OS(WINDOWS) | |
| 3392 return static_cast<TCMalloc_ThreadCache*>(TlsGetValue(tlsIndex)); | |
| 3393 #else | |
| 3394 return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key)); | |
| 3395 #endif | |
| 3396 } | |
| 3397 | |
| 3398 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() { | |
| 3399 TCMalloc_ThreadCache* ptr = NULL; | |
| 3400 if (!tsd_inited) { | |
| 3401 InitModule(); | |
| 3402 } else { | |
| 3403 ptr = GetThreadHeap(); | |
| 3404 } | |
| 3405 if (ptr == NULL) ptr = CreateCacheIfNecessary(); | |
| 3406 return ptr; | |
| 3407 } | |
| 3408 | |
| 3409 // In deletion paths, we do not try to create a thread-cache. This is | |
| 3410 // because we may be in the thread destruction code and may have | |
| 3411 // already cleaned up the cache for this thread. | |
| 3412 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() { | |
| 3413 if (!tsd_inited) return NULL; | |
| 3414 void* const p = GetThreadHeap(); | |
| 3415 return reinterpret_cast<TCMalloc_ThreadCache*>(p); | |
| 3416 } | |
| 3417 | |
| 3418 void TCMalloc_ThreadCache::InitTSD() { | |
| 3419 ASSERT(!tsd_inited); | |
| 3420 #if USE(PTHREAD_GETSPECIFIC_DIRECT) | |
| 3421 pthread_key_init_np(heap_key, DestroyThreadCache); | |
| 3422 #else | |
| 3423 pthread_key_create(&heap_key, DestroyThreadCache); | |
| 3424 #endif | |
| 3425 #if OS(WINDOWS) | |
| 3426 tlsIndex = TlsAlloc(); | |
| 3427 #endif | |
| 3428 tsd_inited = true; | |
| 3429 | |
| 3430 #if !OS(WINDOWS) | |
| 3431 // We may have used a fake pthread_t for the main thread. Fix it. | |
| 3432 pthread_t zero; | |
| 3433 memset(&zero, 0, sizeof(zero)); | |
| 3434 #endif | |
| 3435 #ifndef WTF_CHANGES | |
| 3436 SpinLockHolder h(&pageheap_lock); | |
| 3437 #else | |
| 3438 ASSERT(pageheap_lock.IsHeld()); | |
| 3439 #endif | |
| 3440 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) { | |
| 3441 #if OS(WINDOWS) | |
| 3442 if (h->tid_ == 0) { | |
| 3443 h->tid_ = GetCurrentThreadId(); | |
| 3444 } | |
| 3445 #else | |
| 3446 if (pthread_equal(h->tid_, zero)) { | |
| 3447 h->tid_ = pthread_self(); | |
| 3448 } | |
| 3449 #endif | |
| 3450 } | |
| 3451 } | |
| 3452 | |
| 3453 TCMalloc_ThreadCache* TCMalloc_ThreadCache::CreateCacheIfNecessary() { | |
| 3454 // Initialize per-thread data if necessary | |
| 3455 TCMalloc_ThreadCache* heap = NULL; | |
| 3456 { | |
| 3457 SpinLockHolder h(&pageheap_lock); | |
| 3458 | |
| 3459 #if OS(WINDOWS) | |
| 3460 DWORD me; | |
| 3461 if (!tsd_inited) { | |
| 3462 me = 0; | |
| 3463 } else { | |
| 3464 me = GetCurrentThreadId(); | |
| 3465 } | |
| 3466 #else | |
| 3467 // Early on in glibc's life, we cannot even call pthread_self() | |
| 3468 pthread_t me; | |
| 3469 if (!tsd_inited) { | |
| 3470 memset(&me, 0, sizeof(me)); | |
| 3471 } else { | |
| 3472 me = pthread_self(); | |
| 3473 } | |
| 3474 #endif | |
| 3475 | |
| 3476 // This may be a recursive malloc call from pthread_setspecific() | |
| 3477 // In that case, the heap for this thread has already been created | |
| 3478 // and added to the linked list. So we search for that first. | |
| 3479 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) { | |
| 3480 #if OS(WINDOWS) | |
| 3481 if (h->tid_ == me) { | |
| 3482 #else | |
| 3483 if (pthread_equal(h->tid_, me)) { | |
| 3484 #endif | |
| 3485 heap = h; | |
| 3486 break; | |
| 3487 } | |
| 3488 } | |
| 3489 | |
| 3490 if (heap == NULL) heap = NewHeap(me, HARDENING_ENTROPY); | |
| 3491 } | |
| 3492 | |
| 3493 // We call pthread_setspecific() outside the lock because it may | |
| 3494 // call malloc() recursively. The recursive call will never get | |
| 3495 // here again because it will find the already allocated heap in the | |
| 3496 // linked list of heaps. | |
| 3497 if (!heap->in_setspecific_ && tsd_inited) { | |
| 3498 heap->in_setspecific_ = true; | |
| 3499 setThreadHeap(heap); | |
| 3500 } | |
| 3501 return heap; | |
| 3502 } | |
| 3503 | |
| 3504 void TCMalloc_ThreadCache::BecomeIdle() { | |
| 3505 if (!tsd_inited) return; // No caches yet | |
| 3506 TCMalloc_ThreadCache* heap = GetThreadHeap(); | |
| 3507 if (heap == NULL) return; // No thread cache to remove | |
| 3508 if (heap->in_setspecific_) return; // Do not disturb the active caller | |
| 3509 | |
| 3510 heap->in_setspecific_ = true; | |
| 3511 setThreadHeap(NULL); | |
| 3512 #ifdef HAVE_TLS | |
| 3513 // Also update the copy in __thread | |
| 3514 threadlocal_heap = NULL; | |
| 3515 #endif | |
| 3516 heap->in_setspecific_ = false; | |
| 3517 if (GetThreadHeap() == heap) { | |
| 3518 // Somehow heap got reinstated by a recursive call to malloc | |
| 3519 // from pthread_setspecific. We give up in this case. | |
| 3520 return; | |
| 3521 } | |
| 3522 | |
| 3523 // We can now get rid of the heap | |
| 3524 DeleteCache(heap); | |
| 3525 } | |
| 3526 | |
| 3527 void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr) { | |
| 3528 // Note that "ptr" cannot be NULL since pthread promises not | |
| 3529 // to invoke the destructor on NULL values, but for safety, | |
| 3530 // we check anyway. | |
| 3531 if (ptr == NULL) return; | |
| 3532 #ifdef HAVE_TLS | |
| 3533 // Prevent fast path of GetThreadHeap() from returning heap. | |
| 3534 threadlocal_heap = NULL; | |
| 3535 #endif | |
| 3536 DeleteCache(reinterpret_cast<TCMalloc_ThreadCache*>(ptr)); | |
| 3537 } | |
| 3538 | |
| 3539 void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache* heap) { | |
| 3540 // Remove all memory from heap | |
| 3541 heap->Cleanup(); | |
| 3542 | |
| 3543 // Remove from linked list | |
| 3544 SpinLockHolder h(&pageheap_lock); | |
| 3545 if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_; | |
| 3546 if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_; | |
| 3547 if (thread_heaps == heap) thread_heaps = heap->next_; | |
| 3548 thread_heap_count--; | |
| 3549 RecomputeThreadCacheSize(); | |
| 3550 | |
| 3551 threadheap_allocator.Delete(heap); | |
| 3552 } | |
| 3553 | |
| 3554 void TCMalloc_ThreadCache::RecomputeThreadCacheSize() { | |
| 3555 // Divide available space across threads | |
| 3556 int n = thread_heap_count > 0 ? thread_heap_count : 1; | |
| 3557 size_t space = overall_thread_cache_size / n; | |
| 3558 | |
| 3559 // Limit to allowed range | |
| 3560 if (space < kMinThreadCacheSize) space = kMinThreadCacheSize; | |
| 3561 if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize; | |
| 3562 | |
| 3563 per_thread_cache_size = space; | |
| 3564 } | |
| 3565 | |
| 3566 void TCMalloc_ThreadCache::Print() const { | |
| 3567 for (size_t cl = 0; cl < kNumClasses; ++cl) { | |
| 3568 MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n", | |
| 3569 ByteSizeForClass(cl), | |
| 3570 list_[cl].length(), | |
| 3571 list_[cl].lowwatermark()); | |
| 3572 } | |
| 3573 } | |
| 3574 | |
| 3575 // Extract interesting stats | |
| 3576 struct TCMallocStats { | |
| 3577 uint64_t system_bytes; // Bytes alloced from system | |
| 3578 uint64_t thread_bytes; // Bytes in thread caches | |
| 3579 uint64_t central_bytes; // Bytes in central cache | |
| 3580 uint64_t transfer_bytes; // Bytes in central transfer cache | |
| 3581 uint64_t pageheap_bytes; // Bytes in page heap | |
| 3582 uint64_t metadata_bytes; // Bytes alloced for metadata | |
| 3583 }; | |
| 3584 | |
| 3585 #ifndef WTF_CHANGES | |
| 3586 // Get stats into "r". Also get per-size-class counts if class_count != NULL | |
| 3587 static void ExtractStats(TCMallocStats* r, uint64_t* class_count) { | |
| 3588 r->central_bytes = 0; | |
| 3589 r->transfer_bytes = 0; | |
| 3590 for (int cl = 0; cl < kNumClasses; ++cl) { | |
| 3591 const int length = central_cache[cl].length(); | |
| 3592 const int tc_length = central_cache[cl].tc_length(); | |
| 3593 r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length; | |
| 3594 r->transfer_bytes += | |
| 3595 static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length; | |
| 3596 if (class_count) class_count[cl] = length + tc_length; | |
| 3597 } | |
| 3598 | |
| 3599 // Add stats from per-thread heaps | |
| 3600 r->thread_bytes = 0; | |
| 3601 { // scope | |
| 3602 SpinLockHolder h(&pageheap_lock); | |
| 3603 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) { | |
| 3604 r->thread_bytes += h->Size(); | |
| 3605 if (class_count) { | |
| 3606 for (size_t cl = 0; cl < kNumClasses; ++cl) { | |
| 3607 class_count[cl] += h->freelist_length(cl); | |
| 3608 } | |
| 3609 } | |
| 3610 } | |
| 3611 } | |
| 3612 | |
| 3613 { //scope | |
| 3614 SpinLockHolder h(&pageheap_lock); | |
| 3615 r->system_bytes = pageheap->SystemBytes(); | |
| 3616 r->metadata_bytes = metadata_system_bytes; | |
| 3617 r->pageheap_bytes = pageheap->FreeBytes(); | |
| 3618 } | |
| 3619 } | |
| 3620 #endif | |
| 3621 | |
| 3622 #ifndef WTF_CHANGES | |
| 3623 // WRITE stats to "out" | |
| 3624 static void DumpStats(TCMalloc_Printer* out, int level) { | |
| 3625 TCMallocStats stats; | |
| 3626 uint64_t class_count[kNumClasses]; | |
| 3627 ExtractStats(&stats, (level >= 2 ? class_count : NULL)); | |
| 3628 | |
| 3629 if (level >= 2) { | |
| 3630 out->printf("------------------------------------------------\n"); | |
| 3631 uint64_t cumulative = 0; | |
| 3632 for (int cl = 0; cl < kNumClasses; ++cl) { | |
| 3633 if (class_count[cl] > 0) { | |
| 3634 uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl); | |
| 3635 cumulative += class_bytes; | |
| 3636 out->printf("class %3d [ %8" PRIuS " bytes ] : " | |
| 3637 "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n", | |
| 3638 cl, ByteSizeForClass(cl), | |
| 3639 class_count[cl], | |
| 3640 class_bytes / 1048576.0, | |
| 3641 cumulative / 1048576.0); | |
| 3642 } | |
| 3643 } | |
| 3644 | |
| 3645 SpinLockHolder h(&pageheap_lock); | |
| 3646 pageheap->Dump(out); | |
| 3647 } | |
| 3648 | |
| 3649 const uint64_t bytes_in_use = stats.system_bytes | |
| 3650 - stats.pageheap_bytes | |
| 3651 - stats.central_bytes | |
| 3652 - stats.transfer_bytes | |
| 3653 - stats.thread_bytes; | |
| 3654 | |
| 3655 out->printf("------------------------------------------------\n" | |
| 3656 "MALLOC: %12" PRIu64 " Heap size\n" | |
| 3657 "MALLOC: %12" PRIu64 " Bytes in use by application\n" | |
| 3658 "MALLOC: %12" PRIu64 " Bytes free in page heap\n" | |
| 3659 "MALLOC: %12" PRIu64 " Bytes free in central cache\n" | |
| 3660 "MALLOC: %12" PRIu64 " Bytes free in transfer cache\n" | |
| 3661 "MALLOC: %12" PRIu64 " Bytes free in thread caches\n" | |
| 3662 "MALLOC: %12" PRIu64 " Spans in use\n" | |
| 3663 "MALLOC: %12" PRIu64 " Thread heaps in use\n" | |
| 3664 "MALLOC: %12" PRIu64 " Metadata allocated\n" | |
| 3665 "------------------------------------------------\n", | |
| 3666 stats.system_bytes, | |
| 3667 bytes_in_use, | |
| 3668 stats.pageheap_bytes, | |
| 3669 stats.central_bytes, | |
| 3670 stats.transfer_bytes, | |
| 3671 stats.thread_bytes, | |
| 3672 uint64_t(span_allocator.inuse()), | |
| 3673 uint64_t(threadheap_allocator.inuse()), | |
| 3674 stats.metadata_bytes); | |
| 3675 } | |
| 3676 | |
| 3677 static void PrintStats(int level) { | |
| 3678 const int kBufferSize = 16 << 10; | |
| 3679 char* buffer = new char[kBufferSize]; | |
| 3680 TCMalloc_Printer printer(buffer, kBufferSize); | |
| 3681 DumpStats(&printer, level); | |
| 3682 write(STDERR_FILENO, buffer, strlen(buffer)); | |
| 3683 delete[] buffer; | |
| 3684 } | |
| 3685 | |
| 3686 static void** DumpStackTraces() { | |
| 3687 // Count how much space we need | |
| 3688 int needed_slots = 0; | |
| 3689 { | |
| 3690 SpinLockHolder h(&pageheap_lock); | |
| 3691 for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) { | |
| 3692 StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects); | |
| 3693 needed_slots += 3 + stack->depth; | |
| 3694 } | |
| 3695 needed_slots += 100; // Slop in case sample grows | |
| 3696 needed_slots += needed_slots/8; // An extra 12.5% slop | |
| 3697 } | |
| 3698 | |
| 3699 void** result = new void*[needed_slots]; | |
| 3700 if (result == NULL) { | |
| 3701 MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n", | |
| 3702 needed_slots); | |
| 3703 return NULL; | |
| 3704 } | |
| 3705 | |
| 3706 SpinLockHolder h(&pageheap_lock); | |
| 3707 int used_slots = 0; | |
| 3708 for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) { | |
| 3709 ASSERT(used_slots < needed_slots); // Need to leave room for terminator | |
| 3710 StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects); | |
| 3711 if (used_slots + 3 + stack->depth >= needed_slots) { | |
| 3712 // No more room | |
| 3713 break; | |
| 3714 } | |
| 3715 | |
| 3716 result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1)); | |
| 3717 result[used_slots+1] = reinterpret_cast<void*>(stack->size); | |
| 3718 result[used_slots+2] = reinterpret_cast<void*>(stack->depth); | |
| 3719 for (int d = 0; d < stack->depth; d++) { | |
| 3720 result[used_slots+3+d] = stack->stack[d]; | |
| 3721 } | |
| 3722 used_slots += 3 + stack->depth; | |
| 3723 } | |
| 3724 result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0)); | |
| 3725 return result; | |
| 3726 } | |
| 3727 #endif | |
| 3728 | |
| 3729 #ifndef WTF_CHANGES | |
| 3730 | |
| 3731 // TCMalloc's support for extra malloc interfaces | |
| 3732 class TCMallocImplementation : public MallocExtension { | |
| 3733 public: | |
| 3734 virtual void GetStats(char* buffer, int buffer_length) { | |
| 3735 ASSERT(buffer_length > 0); | |
| 3736 TCMalloc_Printer printer(buffer, buffer_length); | |
| 3737 | |
| 3738 // Print level one stats unless lots of space is available | |
| 3739 if (buffer_length < 10000) { | |
| 3740 DumpStats(&printer, 1); | |
| 3741 } else { | |
| 3742 DumpStats(&printer, 2); | |
| 3743 } | |
| 3744 } | |
| 3745 | |
| 3746 virtual void** ReadStackTraces() { | |
| 3747 return DumpStackTraces(); | |
| 3748 } | |
| 3749 | |
| 3750 virtual bool GetNumericProperty(const char* name, size_t* value) { | |
| 3751 ASSERT(name != NULL); | |
| 3752 | |
| 3753 if (strcmp(name, "generic.current_allocated_bytes") == 0) { | |
| 3754 TCMallocStats stats; | |
| 3755 ExtractStats(&stats, NULL); | |
| 3756 *value = stats.system_bytes | |
| 3757 - stats.thread_bytes | |
| 3758 - stats.central_bytes | |
| 3759 - stats.pageheap_bytes; | |
| 3760 return true; | |
| 3761 } | |
| 3762 | |
| 3763 if (strcmp(name, "generic.heap_size") == 0) { | |
| 3764 TCMallocStats stats; | |
| 3765 ExtractStats(&stats, NULL); | |
| 3766 *value = stats.system_bytes; | |
| 3767 return true; | |
| 3768 } | |
| 3769 | |
| 3770 if (strcmp(name, "tcmalloc.slack_bytes") == 0) { | |
| 3771 // We assume that bytes in the page heap are not fragmented too | |
| 3772 // badly, and are therefore available for allocation. | |
| 3773 SpinLockHolder l(&pageheap_lock); | |
| 3774 *value = pageheap->FreeBytes(); | |
| 3775 return true; | |
| 3776 } | |
| 3777 | |
| 3778 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) { | |
| 3779 SpinLockHolder l(&pageheap_lock); | |
| 3780 *value = overall_thread_cache_size; | |
| 3781 return true; | |
| 3782 } | |
| 3783 | |
| 3784 if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) { | |
| 3785 TCMallocStats stats; | |
| 3786 ExtractStats(&stats, NULL); | |
| 3787 *value = stats.thread_bytes; | |
| 3788 return true; | |
| 3789 } | |
| 3790 | |
| 3791 return false; | |
| 3792 } | |
| 3793 | |
| 3794 virtual bool SetNumericProperty(const char* name, size_t value) { | |
| 3795 ASSERT(name != NULL); | |
| 3796 | |
| 3797 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) { | |
| 3798 // Clip the value to a reasonable range | |
| 3799 if (value < kMinThreadCacheSize) value = kMinThreadCacheSize; | |
| 3800 if (value > (1<<30)) value = (1<<30); // Limit to 1GB | |
| 3801 | |
| 3802 SpinLockHolder l(&pageheap_lock); | |
| 3803 overall_thread_cache_size = static_cast<size_t>(value); | |
| 3804 TCMalloc_ThreadCache::RecomputeThreadCacheSize(); | |
| 3805 return true; | |
| 3806 } | |
| 3807 | |
| 3808 return false; | |
| 3809 } | |
| 3810 | |
| 3811 virtual void MarkThreadIdle() { | |
| 3812 TCMalloc_ThreadCache::BecomeIdle(); | |
| 3813 } | |
| 3814 | |
| 3815 virtual void ReleaseFreeMemory() { | |
| 3816 SpinLockHolder h(&pageheap_lock); | |
| 3817 pageheap->ReleaseFreePages(); | |
| 3818 } | |
| 3819 }; | |
| 3820 #endif | |
| 3821 | |
| 3822 // The constructor allocates an object to ensure that initialization | |
| 3823 // runs before main(), and therefore we do not have a chance to become | |
| 3824 // multi-threaded before initialization. We also create the TSD key | |
| 3825 // here. Presumably by the time this constructor runs, glibc is in | |
| 3826 // good enough shape to handle pthread_key_create(). | |
| 3827 // | |
| 3828 // The constructor also takes the opportunity to tell STL to use | |
| 3829 // tcmalloc. We want to do this early, before construct time, so | |
| 3830 // all user STL allocations go through tcmalloc (which works really | |
| 3831 // well for STL). | |
| 3832 // | |
| 3833 // The destructor prints stats when the program exits. | |
| 3834 class TCMallocGuard { | |
| 3835 public: | |
| 3836 | |
| 3837 TCMallocGuard() { | |
| 3838 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS | |
| 3839 // Check whether the kernel also supports TLS (needs to happen at runtime) | |
| 3840 CheckIfKernelSupportsTLS(); | |
| 3841 #endif | |
| 3842 #ifndef WTF_CHANGES | |
| 3843 #ifdef WIN32 // patch the windows VirtualAlloc, etc. | |
| 3844 PatchWindowsFunctions(); // defined in windows/patch_functions.cc | |
| 3845 #endif | |
| 3846 #endif | |
| 3847 free(malloc(1)); | |
| 3848 TCMalloc_ThreadCache::InitTSD(); | |
| 3849 free(malloc(1)); | |
| 3850 #ifndef WTF_CHANGES | |
| 3851 MallocExtension::Register(new TCMallocImplementation); | |
| 3852 #endif | |
| 3853 } | |
| 3854 | |
| 3855 #ifndef WTF_CHANGES | |
| 3856 ~TCMallocGuard() { | |
| 3857 const char* env = getenv("MALLOCSTATS"); | |
| 3858 if (env != NULL) { | |
| 3859 int level = atoi(env); | |
| 3860 if (level < 1) level = 1; | |
| 3861 PrintStats(level); | |
| 3862 } | |
| 3863 #ifdef WIN32 | |
| 3864 UnpatchWindowsFunctions(); | |
| 3865 #endif | |
| 3866 } | |
| 3867 #endif | |
| 3868 }; | |
| 3869 | |
| 3870 #ifndef WTF_CHANGES | |
| 3871 static TCMallocGuard module_enter_exit_hook; | |
| 3872 #endif | |
| 3873 | |
| 3874 | |
| 3875 //------------------------------------------------------------------- | |
| 3876 // Helpers for the exported routines below | |
| 3877 //------------------------------------------------------------------- | |
| 3878 | |
| 3879 #ifndef WTF_CHANGES | |
| 3880 | |
| 3881 static Span* DoSampledAllocation(size_t size) { | |
| 3882 | |
| 3883 // Grab the stack trace outside the heap lock | |
| 3884 StackTrace tmp; | |
| 3885 tmp.depth = GetStackTrace(tmp.stack, kMaxStackDepth, 1); | |
| 3886 tmp.size = size; | |
| 3887 | |
| 3888 SpinLockHolder h(&pageheap_lock); | |
| 3889 // Allocate span | |
| 3890 Span *span = pageheap->New(pages(size == 0 ? 1 : size)); | |
| 3891 if (span == NULL) { | |
| 3892 return NULL; | |
| 3893 } | |
| 3894 | |
| 3895 // Allocate stack trace | |
| 3896 StackTrace *stack = stacktrace_allocator.New(); | |
| 3897 if (stack == NULL) { | |
| 3898 // Sampling failed because of lack of memory | |
| 3899 return span; | |
| 3900 } | |
| 3901 | |
| 3902 *stack = tmp; | |
| 3903 span->sample = 1; | |
| 3904 span->objects = stack; | |
| 3905 DLL_Prepend(&sampled_objects, span); | |
| 3906 | |
| 3907 return span; | |
| 3908 } | |
| 3909 #endif | |
| 3910 | |
| 3911 static inline bool CheckCachedSizeClass(void *ptr) { | |
| 3912 PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | |
| 3913 size_t cached_value = pageheap->GetSizeClassIfCached(p); | |
| 3914 return cached_value == 0 || | |
| 3915 cached_value == pageheap->GetDescriptor(p)->sizeclass; | |
| 3916 } | |
| 3917 | |
| 3918 static inline void* CheckedMallocResult(void *result) | |
| 3919 { | |
| 3920 ASSERT(result == 0 || CheckCachedSizeClass(result)); | |
| 3921 return result; | |
| 3922 } | |
| 3923 | |
| 3924 static inline void* SpanToMallocResult(Span *span) { | |
| 3925 ASSERT_SPAN_COMMITTED(span); | |
| 3926 pageheap->CacheSizeClass(span->start, 0); | |
| 3927 void* result = reinterpret_cast<void*>(span->start << kPageShift); | |
| 3928 POISON_ALLOCATION(result, span->length << kPageShift); | |
| 3929 return CheckedMallocResult(result); | |
| 3930 } | |
| 3931 | |
| 3932 #ifdef WTF_CHANGES | |
| 3933 template <bool crashOnFailure> | |
| 3934 #endif | |
| 3935 static ALWAYS_INLINE void* do_malloc(size_t size) { | |
| 3936 void* ret = NULL; | |
| 3937 | |
| 3938 #ifdef WTF_CHANGES | |
| 3939 ASSERT(!isForbidden()); | |
| 3940 #endif | |
| 3941 | |
| 3942 // The following call forces module initialization | |
| 3943 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache(); | |
| 3944 #ifndef WTF_CHANGES | |
| 3945 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { | |
| 3946 Span* span = DoSampledAllocation(size); | |
| 3947 if (span != NULL) { | |
| 3948 ret = SpanToMallocResult(span); | |
| 3949 } | |
| 3950 } else | |
| 3951 #endif | |
| 3952 if (size > kMaxSize) { | |
| 3953 // Use page-level allocator | |
| 3954 SpinLockHolder h(&pageheap_lock); | |
| 3955 Span* span = pageheap->New(pages(size)); | |
| 3956 if (span != NULL) { | |
| 3957 ret = SpanToMallocResult(span); | |
| 3958 } | |
| 3959 } else { | |
| 3960 // The common case, and also the simplest. This just pops the | |
| 3961 // size-appropriate freelist, afer replenishing it if it's empty. | |
| 3962 ret = CheckedMallocResult(heap->Allocate(size)); | |
| 3963 } | |
| 3964 if (!ret) { | |
| 3965 #ifdef WTF_CHANGES | |
| 3966 if (crashOnFailure) // This branch should be optimized out by the compiler. | |
| 3967 CRASH(); | |
| 3968 #else | |
| 3969 errno = ENOMEM; | |
| 3970 #endif | |
| 3971 } | |
| 3972 return ret; | |
| 3973 } | |
| 3974 | |
| 3975 static ALWAYS_INLINE void do_free(void* ptr) { | |
| 3976 if (ptr == NULL) return; | |
| 3977 ASSERT(pageheap != NULL); // Should not call free() before malloc() | |
| 3978 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | |
| 3979 Span* span = NULL; | |
| 3980 size_t cl = pageheap->GetSizeClassIfCached(p); | |
| 3981 | |
| 3982 if (cl == 0) { | |
| 3983 span = pageheap->GetDescriptor(p); | |
| 3984 RELEASE_ASSERT(span->isValid()); | |
| 3985 cl = span->sizeclass; | |
| 3986 pageheap->CacheSizeClass(p, cl); | |
| 3987 } | |
| 3988 if (cl != 0) { | |
| 3989 #ifndef NO_TCMALLOC_SAMPLES | |
| 3990 ASSERT(!pageheap->GetDescriptor(p)->sample); | |
| 3991 #endif | |
| 3992 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent(); | |
| 3993 if (heap != NULL) { | |
| 3994 heap->Deallocate(HardenedSLL::create(ptr), cl); | |
| 3995 } else { | |
| 3996 // Delete directly into central cache | |
| 3997 POISON_DEALLOCATION(ptr, ByteSizeForClass(cl)); | |
| 3998 SLL_SetNext(HardenedSLL::create(ptr), HardenedSLL::null(), central_cache[c
l].entropy()); | |
| 3999 central_cache[cl].InsertRange(HardenedSLL::create(ptr), HardenedSLL::creat
e(ptr), 1); | |
| 4000 } | |
| 4001 } else { | |
| 4002 SpinLockHolder h(&pageheap_lock); | |
| 4003 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0); | |
| 4004 ASSERT(span != NULL && span->start == p); | |
| 4005 #ifndef NO_TCMALLOC_SAMPLES | |
| 4006 if (span->sample) { | |
| 4007 DLL_Remove(span); | |
| 4008 stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects)); | |
| 4009 span->objects = NULL; | |
| 4010 } | |
| 4011 #endif | |
| 4012 | |
| 4013 POISON_DEALLOCATION(ptr, span->length << kPageShift); | |
| 4014 pageheap->Delete(span); | |
| 4015 } | |
| 4016 } | |
| 4017 | |
| 4018 #ifndef WTF_CHANGES | |
| 4019 // For use by exported routines below that want specific alignments | |
| 4020 // | |
| 4021 // Note: this code can be slow, and can significantly fragment memory. | |
| 4022 // The expectation is that memalign/posix_memalign/valloc/pvalloc will | |
| 4023 // not be invoked very often. This requirement simplifies our | |
| 4024 // implementation and allows us to tune for expected allocation | |
| 4025 // patterns. | |
| 4026 static void* do_memalign(size_t align, size_t size) { | |
| 4027 ASSERT((align & (align - 1)) == 0); | |
| 4028 ASSERT(align > 0); | |
| 4029 if (pageheap == NULL) TCMalloc_ThreadCache::InitModule(); | |
| 4030 | |
| 4031 // Allocate at least one byte to avoid boundary conditions below | |
| 4032 if (size == 0) size = 1; | |
| 4033 | |
| 4034 if (size <= kMaxSize && align < kPageSize) { | |
| 4035 // Search through acceptable size classes looking for one with | |
| 4036 // enough alignment. This depends on the fact that | |
| 4037 // InitSizeClasses() currently produces several size classes that | |
| 4038 // are aligned at powers of two. We will waste time and space if | |
| 4039 // we miss in the size class array, but that is deemed acceptable | |
| 4040 // since memalign() should be used rarely. | |
| 4041 size_t cl = SizeClass(size); | |
| 4042 while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) { | |
| 4043 cl++; | |
| 4044 } | |
| 4045 if (cl < kNumClasses) { | |
| 4046 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache(); | |
| 4047 return CheckedMallocResult(heap->Allocate(class_to_size[cl])); | |
| 4048 } | |
| 4049 } | |
| 4050 | |
| 4051 // We will allocate directly from the page heap | |
| 4052 SpinLockHolder h(&pageheap_lock); | |
| 4053 | |
| 4054 if (align <= kPageSize) { | |
| 4055 // Any page-level allocation will be fine | |
| 4056 // TODO: We could put the rest of this page in the appropriate | |
| 4057 // TODO: cache but it does not seem worth it. | |
| 4058 Span* span = pageheap->New(pages(size)); | |
| 4059 return span == NULL ? NULL : SpanToMallocResult(span); | |
| 4060 } | |
| 4061 | |
| 4062 // Allocate extra pages and carve off an aligned portion | |
| 4063 const Length alloc = pages(size + align); | |
| 4064 Span* span = pageheap->New(alloc); | |
| 4065 if (span == NULL) return NULL; | |
| 4066 | |
| 4067 // Skip starting portion so that we end up aligned | |
| 4068 Length skip = 0; | |
| 4069 while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) { | |
| 4070 skip++; | |
| 4071 } | |
| 4072 ASSERT(skip < alloc); | |
| 4073 if (skip > 0) { | |
| 4074 Span* rest = pageheap->Split(span, skip); | |
| 4075 pageheap->Delete(span); | |
| 4076 span = rest; | |
| 4077 } | |
| 4078 | |
| 4079 // Skip trailing portion that we do not need to return | |
| 4080 const Length needed = pages(size); | |
| 4081 ASSERT(span->length >= needed); | |
| 4082 if (span->length > needed) { | |
| 4083 Span* trailer = pageheap->Split(span, needed); | |
| 4084 pageheap->Delete(trailer); | |
| 4085 } | |
| 4086 return SpanToMallocResult(span); | |
| 4087 } | |
| 4088 #endif | |
| 4089 | |
| 4090 // Helpers for use by exported routines below: | |
| 4091 | |
| 4092 #ifndef WTF_CHANGES | |
| 4093 static inline void do_malloc_stats() { | |
| 4094 PrintStats(1); | |
| 4095 } | |
| 4096 #endif | |
| 4097 | |
| 4098 static inline int do_mallopt(int, int) { | |
| 4099 return 1; // Indicates error | |
| 4100 } | |
| 4101 | |
| 4102 #ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance | |
| 4103 static inline struct mallinfo do_mallinfo() { | |
| 4104 TCMallocStats stats; | |
| 4105 ExtractStats(&stats, NULL); | |
| 4106 | |
| 4107 // Just some of the fields are filled in. | |
| 4108 struct mallinfo info; | |
| 4109 memset(&info, 0, sizeof(info)); | |
| 4110 | |
| 4111 // Unfortunately, the struct contains "int" field, so some of the | |
| 4112 // size values will be truncated. | |
| 4113 info.arena = static_cast<int>(stats.system_bytes); | |
| 4114 info.fsmblks = static_cast<int>(stats.thread_bytes | |
| 4115 + stats.central_bytes | |
| 4116 + stats.transfer_bytes); | |
| 4117 info.fordblks = static_cast<int>(stats.pageheap_bytes); | |
| 4118 info.uordblks = static_cast<int>(stats.system_bytes | |
| 4119 - stats.thread_bytes | |
| 4120 - stats.central_bytes | |
| 4121 - stats.transfer_bytes | |
| 4122 - stats.pageheap_bytes); | |
| 4123 | |
| 4124 return info; | |
| 4125 } | |
| 4126 #endif | |
| 4127 | |
| 4128 //------------------------------------------------------------------- | |
| 4129 // Exported routines | |
| 4130 //------------------------------------------------------------------- | |
| 4131 | |
| 4132 // CAVEAT: The code structure below ensures that MallocHook methods are always | |
| 4133 // called from the stack frame of the invoked allocation function. | |
| 4134 // heap-checker.cc depends on this to start a stack trace from | |
| 4135 // the call to the (de)allocation function. | |
| 4136 | |
| 4137 #ifndef WTF_CHANGES | |
| 4138 extern "C" | |
| 4139 #else | |
| 4140 #define do_malloc do_malloc<crashOnFailure> | |
| 4141 | |
| 4142 template <bool crashOnFailure> | |
| 4143 ALWAYS_INLINE void* malloc(size_t); | |
| 4144 | |
| 4145 void* fastMalloc(size_t size) | |
| 4146 { | |
| 4147 return malloc<true>(size); | |
| 4148 } | |
| 4149 | |
| 4150 TryMallocReturnValue tryFastMalloc(size_t size) | |
| 4151 { | |
| 4152 return malloc<false>(size); | |
| 4153 } | |
| 4154 | |
| 4155 template <bool crashOnFailure> | |
| 4156 ALWAYS_INLINE | |
| 4157 #endif | |
| 4158 void* malloc(size_t size) { | |
| 4159 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4160 if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= s
ize) // If overflow would occur... | |
| 4161 return 0; | |
| 4162 void* result = do_malloc(size + Internal::ValidationBufferSize); | |
| 4163 if (!result) | |
| 4164 return 0; | |
| 4165 | |
| 4166 Internal::ValidationHeader* header = static_cast<Internal::ValidationHeader*
>(result); | |
| 4167 header->m_size = size; | |
| 4168 header->m_type = Internal::AllocTypeMalloc; | |
| 4169 header->m_prefix = static_cast<unsigned>(Internal::ValidationPrefix); | |
| 4170 result = header + 1; | |
| 4171 *Internal::fastMallocValidationSuffix(result) = Internal::ValidationSuffix; | |
| 4172 fastMallocValidate(result); | |
| 4173 #else | |
| 4174 void* result = do_malloc(size); | |
| 4175 #endif | |
| 4176 | |
| 4177 #ifndef WTF_CHANGES | |
| 4178 MallocHook::InvokeNewHook(result, size); | |
| 4179 #endif | |
| 4180 return result; | |
| 4181 } | |
| 4182 | |
| 4183 #ifndef WTF_CHANGES | |
| 4184 extern "C" | |
| 4185 #endif | |
| 4186 void free(void* ptr) { | |
| 4187 #ifndef WTF_CHANGES | |
| 4188 MallocHook::InvokeDeleteHook(ptr); | |
| 4189 #endif | |
| 4190 | |
| 4191 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4192 if (!ptr) | |
| 4193 return; | |
| 4194 | |
| 4195 fastMallocValidate(ptr); | |
| 4196 Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(pt
r); | |
| 4197 memset(ptr, 0xCC, header->m_size); | |
| 4198 do_free(header); | |
| 4199 #else | |
| 4200 do_free(ptr); | |
| 4201 #endif | |
| 4202 } | |
| 4203 | |
| 4204 #ifndef WTF_CHANGES | |
| 4205 extern "C" | |
| 4206 #else | |
| 4207 template <bool crashOnFailure> | |
| 4208 ALWAYS_INLINE void* calloc(size_t, size_t); | |
| 4209 | |
| 4210 void* fastCalloc(size_t n, size_t elem_size) | |
| 4211 { | |
| 4212 void* result = calloc<true>(n, elem_size); | |
| 4213 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4214 fastMallocValidate(result); | |
| 4215 #endif | |
| 4216 return result; | |
| 4217 } | |
| 4218 | |
| 4219 TryMallocReturnValue tryFastCalloc(size_t n, size_t elem_size) | |
| 4220 { | |
| 4221 void* result = calloc<false>(n, elem_size); | |
| 4222 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4223 fastMallocValidate(result); | |
| 4224 #endif | |
| 4225 return result; | |
| 4226 } | |
| 4227 | |
| 4228 template <bool crashOnFailure> | |
| 4229 ALWAYS_INLINE | |
| 4230 #endif | |
| 4231 void* calloc(size_t n, size_t elem_size) { | |
| 4232 size_t totalBytes = n * elem_size; | |
| 4233 | |
| 4234 // Protect against overflow | |
| 4235 if (n > 1 && elem_size && (totalBytes / elem_size) != n) | |
| 4236 return 0; | |
| 4237 | |
| 4238 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4239 void* result = malloc<crashOnFailure>(totalBytes); | |
| 4240 if (!result) | |
| 4241 return 0; | |
| 4242 | |
| 4243 memset(result, 0, totalBytes); | |
| 4244 fastMallocValidate(result); | |
| 4245 #else | |
| 4246 void* result = do_malloc(totalBytes); | |
| 4247 if (result != NULL) { | |
| 4248 memset(result, 0, totalBytes); | |
| 4249 } | |
| 4250 #endif | |
| 4251 | |
| 4252 #ifndef WTF_CHANGES | |
| 4253 MallocHook::InvokeNewHook(result, totalBytes); | |
| 4254 #endif | |
| 4255 return result; | |
| 4256 } | |
| 4257 | |
| 4258 // Since cfree isn't used anywhere, we don't compile it in. | |
| 4259 #ifndef WTF_CHANGES | |
| 4260 #ifndef WTF_CHANGES | |
| 4261 extern "C" | |
| 4262 #endif | |
| 4263 void cfree(void* ptr) { | |
| 4264 #ifndef WTF_CHANGES | |
| 4265 MallocHook::InvokeDeleteHook(ptr); | |
| 4266 #endif | |
| 4267 do_free(ptr); | |
| 4268 } | |
| 4269 #endif | |
| 4270 | |
| 4271 #ifndef WTF_CHANGES | |
| 4272 extern "C" | |
| 4273 #else | |
| 4274 template <bool crashOnFailure> | |
| 4275 ALWAYS_INLINE void* realloc(void*, size_t); | |
| 4276 | |
| 4277 void* fastRealloc(void* old_ptr, size_t new_size) | |
| 4278 { | |
| 4279 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4280 fastMallocValidate(old_ptr); | |
| 4281 #endif | |
| 4282 void* result = realloc<true>(old_ptr, new_size); | |
| 4283 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4284 fastMallocValidate(result); | |
| 4285 #endif | |
| 4286 return result; | |
| 4287 } | |
| 4288 | |
| 4289 TryMallocReturnValue tryFastRealloc(void* old_ptr, size_t new_size) | |
| 4290 { | |
| 4291 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4292 fastMallocValidate(old_ptr); | |
| 4293 #endif | |
| 4294 void* result = realloc<false>(old_ptr, new_size); | |
| 4295 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4296 fastMallocValidate(result); | |
| 4297 #endif | |
| 4298 return result; | |
| 4299 } | |
| 4300 | |
| 4301 template <bool crashOnFailure> | |
| 4302 ALWAYS_INLINE | |
| 4303 #endif | |
| 4304 void* realloc(void* old_ptr, size_t new_size) { | |
| 4305 if (old_ptr == NULL) { | |
| 4306 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4307 void* result = malloc<crashOnFailure>(new_size); | |
| 4308 #else | |
| 4309 void* result = do_malloc(new_size); | |
| 4310 #ifndef WTF_CHANGES | |
| 4311 MallocHook::InvokeNewHook(result, new_size); | |
| 4312 #endif | |
| 4313 #endif | |
| 4314 return result; | |
| 4315 } | |
| 4316 if (new_size == 0) { | |
| 4317 #ifndef WTF_CHANGES | |
| 4318 MallocHook::InvokeDeleteHook(old_ptr); | |
| 4319 #endif | |
| 4320 free(old_ptr); | |
| 4321 return NULL; | |
| 4322 } | |
| 4323 | |
| 4324 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4325 if (std::numeric_limits<size_t>::max() - Internal::ValidationBufferSize <= n
ew_size) // If overflow would occur... | |
| 4326 return 0; | |
| 4327 Internal::ValidationHeader* header = Internal::fastMallocValidationHeader(ol
d_ptr); | |
| 4328 fastMallocValidate(old_ptr); | |
| 4329 old_ptr = header; | |
| 4330 header->m_size = new_size; | |
| 4331 new_size += Internal::ValidationBufferSize; | |
| 4332 #endif | |
| 4333 | |
| 4334 // Get the size of the old entry | |
| 4335 const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift; | |
| 4336 size_t cl = pageheap->GetSizeClassIfCached(p); | |
| 4337 Span *span = NULL; | |
| 4338 size_t old_size; | |
| 4339 if (cl == 0) { | |
| 4340 span = pageheap->GetDescriptor(p); | |
| 4341 cl = span->sizeclass; | |
| 4342 pageheap->CacheSizeClass(p, cl); | |
| 4343 } | |
| 4344 if (cl != 0) { | |
| 4345 old_size = ByteSizeForClass(cl); | |
| 4346 } else { | |
| 4347 ASSERT(span != NULL); | |
| 4348 old_size = span->length << kPageShift; | |
| 4349 } | |
| 4350 | |
| 4351 // Reallocate if the new size is larger than the old size, | |
| 4352 // or if the new size is significantly smaller than the old size. | |
| 4353 if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) { | |
| 4354 // Need to reallocate | |
| 4355 void* new_ptr = do_malloc(new_size); | |
| 4356 if (new_ptr == NULL) { | |
| 4357 return NULL; | |
| 4358 } | |
| 4359 #ifndef WTF_CHANGES | |
| 4360 MallocHook::InvokeNewHook(new_ptr, new_size); | |
| 4361 #endif | |
| 4362 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size)); | |
| 4363 #ifndef WTF_CHANGES | |
| 4364 MallocHook::InvokeDeleteHook(old_ptr); | |
| 4365 #endif | |
| 4366 // We could use a variant of do_free() that leverages the fact | |
| 4367 // that we already know the sizeclass of old_ptr. The benefit | |
| 4368 // would be small, so don't bother. | |
| 4369 do_free(old_ptr); | |
| 4370 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4371 new_ptr = static_cast<Internal::ValidationHeader*>(new_ptr) + 1; | |
| 4372 *Internal::fastMallocValidationSuffix(new_ptr) = Internal::ValidationSuffix; | |
| 4373 #endif | |
| 4374 return new_ptr; | |
| 4375 } else { | |
| 4376 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4377 old_ptr = static_cast<Internal::ValidationHeader*>(old_ptr) + 1; // Set old_
ptr back to the user pointer. | |
| 4378 *Internal::fastMallocValidationSuffix(old_ptr) = Internal::ValidationSuffix; | |
| 4379 #endif | |
| 4380 return old_ptr; | |
| 4381 } | |
| 4382 } | |
| 4383 | |
| 4384 #ifdef WTF_CHANGES | |
| 4385 #undef do_malloc | |
| 4386 #else | |
| 4387 | |
| 4388 static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER; | |
| 4389 | |
| 4390 static inline void* cpp_alloc(size_t size, bool nothrow) { | |
| 4391 for (;;) { | |
| 4392 void* p = do_malloc(size); | |
| 4393 #ifdef PREANSINEW | |
| 4394 return p; | |
| 4395 #else | |
| 4396 if (p == NULL) { // allocation failed | |
| 4397 // Get the current new handler. NB: this function is not | |
| 4398 // thread-safe. We make a feeble stab at making it so here, but | |
| 4399 // this lock only protects against tcmalloc interfering with | |
| 4400 // itself, not with other libraries calling set_new_handler. | |
| 4401 std::new_handler nh; | |
| 4402 { | |
| 4403 SpinLockHolder h(&set_new_handler_lock); | |
| 4404 nh = std::set_new_handler(0); | |
| 4405 (void) std::set_new_handler(nh); | |
| 4406 } | |
| 4407 // If no new_handler is established, the allocation failed. | |
| 4408 if (!nh) { | |
| 4409 if (nothrow) return 0; | |
| 4410 throw std::bad_alloc(); | |
| 4411 } | |
| 4412 // Otherwise, try the new_handler. If it returns, retry the | |
| 4413 // allocation. If it throws std::bad_alloc, fail the allocation. | |
| 4414 // if it throws something else, don't interfere. | |
| 4415 try { | |
| 4416 (*nh)(); | |
| 4417 } catch (const std::bad_alloc&) { | |
| 4418 if (!nothrow) throw; | |
| 4419 return p; | |
| 4420 } | |
| 4421 } else { // allocation success | |
| 4422 return p; | |
| 4423 } | |
| 4424 #endif | |
| 4425 } | |
| 4426 } | |
| 4427 | |
| 4428 extern "C" void* memalign(size_t align, size_t size) __THROW { | |
| 4429 void* result = do_memalign(align, size); | |
| 4430 MallocHook::InvokeNewHook(result, size); | |
| 4431 return result; | |
| 4432 } | |
| 4433 | |
| 4434 extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size) | |
| 4435 __THROW { | |
| 4436 if (((align % sizeof(void*)) != 0) || | |
| 4437 ((align & (align - 1)) != 0) || | |
| 4438 (align == 0)) { | |
| 4439 return EINVAL; | |
| 4440 } | |
| 4441 | |
| 4442 void* result = do_memalign(align, size); | |
| 4443 MallocHook::InvokeNewHook(result, size); | |
| 4444 if (result == NULL) { | |
| 4445 return ENOMEM; | |
| 4446 } else { | |
| 4447 *result_ptr = result; | |
| 4448 return 0; | |
| 4449 } | |
| 4450 } | |
| 4451 | |
| 4452 static size_t pagesize = 0; | |
| 4453 | |
| 4454 extern "C" void* valloc(size_t size) __THROW { | |
| 4455 // Allocate page-aligned object of length >= size bytes | |
| 4456 if (pagesize == 0) pagesize = getpagesize(); | |
| 4457 void* result = do_memalign(pagesize, size); | |
| 4458 MallocHook::InvokeNewHook(result, size); | |
| 4459 return result; | |
| 4460 } | |
| 4461 | |
| 4462 extern "C" void* pvalloc(size_t size) __THROW { | |
| 4463 // Round up size to a multiple of pagesize | |
| 4464 if (pagesize == 0) pagesize = getpagesize(); | |
| 4465 size = (size + pagesize - 1) & ~(pagesize - 1); | |
| 4466 void* result = do_memalign(pagesize, size); | |
| 4467 MallocHook::InvokeNewHook(result, size); | |
| 4468 return result; | |
| 4469 } | |
| 4470 | |
| 4471 extern "C" void malloc_stats(void) { | |
| 4472 do_malloc_stats(); | |
| 4473 } | |
| 4474 | |
| 4475 extern "C" int mallopt(int cmd, int value) { | |
| 4476 return do_mallopt(cmd, value); | |
| 4477 } | |
| 4478 | |
| 4479 #ifdef HAVE_STRUCT_MALLINFO | |
| 4480 extern "C" struct mallinfo mallinfo(void) { | |
| 4481 return do_mallinfo(); | |
| 4482 } | |
| 4483 #endif | |
| 4484 | |
| 4485 //------------------------------------------------------------------- | |
| 4486 // Some library routines on RedHat 9 allocate memory using malloc() | |
| 4487 // and free it using __libc_free() (or vice-versa). Since we provide | |
| 4488 // our own implementations of malloc/free, we need to make sure that | |
| 4489 // the __libc_XXX variants (defined as part of glibc) also point to | |
| 4490 // the same implementations. | |
| 4491 //------------------------------------------------------------------- | |
| 4492 | |
| 4493 #if defined(__GLIBC__) | |
| 4494 extern "C" { | |
| 4495 #if COMPILER(GCC) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__) | |
| 4496 // Potentially faster variants that use the gcc alias extension. | |
| 4497 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check. | |
| 4498 # define ALIAS(x) __attribute__ ((weak, alias (x))) | |
| 4499 void* __libc_malloc(size_t size) ALIAS("malloc"); | |
| 4500 void __libc_free(void* ptr) ALIAS("free"); | |
| 4501 void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc"); | |
| 4502 void* __libc_calloc(size_t n, size_t size) ALIAS("calloc"); | |
| 4503 void __libc_cfree(void* ptr) ALIAS("cfree"); | |
| 4504 void* __libc_memalign(size_t align, size_t s) ALIAS("memalign"); | |
| 4505 void* __libc_valloc(size_t size) ALIAS("valloc"); | |
| 4506 void* __libc_pvalloc(size_t size) ALIAS("pvalloc"); | |
| 4507 int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign"); | |
| 4508 # undef ALIAS | |
| 4509 # else /* not __GNUC__ */ | |
| 4510 // Portable wrappers | |
| 4511 void* __libc_malloc(size_t size) { return malloc(size); } | |
| 4512 void __libc_free(void* ptr) { free(ptr); } | |
| 4513 void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); } | |
| 4514 void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); } | |
| 4515 void __libc_cfree(void* ptr) { cfree(ptr); } | |
| 4516 void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); } | |
| 4517 void* __libc_valloc(size_t size) { return valloc(size); } | |
| 4518 void* __libc_pvalloc(size_t size) { return pvalloc(size); } | |
| 4519 int __posix_memalign(void** r, size_t a, size_t s) { | |
| 4520 return posix_memalign(r, a, s); | |
| 4521 } | |
| 4522 # endif /* __GNUC__ */ | |
| 4523 } | |
| 4524 #endif /* __GLIBC__ */ | |
| 4525 | |
| 4526 // Override __libc_memalign in libc on linux boxes specially. | |
| 4527 // They have a bug in libc that causes them to (very rarely) allocate | |
| 4528 // with __libc_memalign() yet deallocate with free() and the | |
| 4529 // definitions above don't catch it. | |
| 4530 // This function is an exception to the rule of calling MallocHook method | |
| 4531 // from the stack frame of the allocation function; | |
| 4532 // heap-checker handles this special case explicitly. | |
| 4533 static void *MemalignOverride(size_t align, size_t size, const void *caller) | |
| 4534 __THROW { | |
| 4535 void* result = do_memalign(align, size); | |
| 4536 MallocHook::InvokeNewHook(result, size); | |
| 4537 return result; | |
| 4538 } | |
| 4539 void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride; | |
| 4540 | |
| 4541 #endif | |
| 4542 | |
| 4543 #ifdef WTF_CHANGES | |
| 4544 void releaseFastMallocFreeMemory() | |
| 4545 { | |
| 4546 // Flush free pages in the current thread cache back to the page heap. | |
| 4547 if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPres
ent()) | |
| 4548 threadCache->Cleanup(); | |
| 4549 | |
| 4550 SpinLockHolder h(&pageheap_lock); | |
| 4551 pageheap->ReleaseFreePages(); | |
| 4552 } | |
| 4553 | |
| 4554 FastMallocStatistics fastMallocStatistics() | |
| 4555 { | |
| 4556 FastMallocStatistics statistics; | |
| 4557 | |
| 4558 SpinLockHolder lockHolder(&pageheap_lock); | |
| 4559 statistics.reservedVMBytes = static_cast<size_t>(pageheap->SystemBytes()); | |
| 4560 statistics.committedVMBytes = statistics.reservedVMBytes - pageheap->Returne
dBytes(); | |
| 4561 | |
| 4562 statistics.freeListBytes = 0; | |
| 4563 for (unsigned cl = 0; cl < kNumClasses; ++cl) { | |
| 4564 const int length = central_cache[cl].length(); | |
| 4565 const int tc_length = central_cache[cl].tc_length(); | |
| 4566 | |
| 4567 statistics.freeListBytes += ByteSizeForClass(cl) * (length + tc_length); | |
| 4568 } | |
| 4569 for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadC
ache = threadCache->next_) | |
| 4570 statistics.freeListBytes += threadCache->Size(); | |
| 4571 | |
| 4572 return statistics; | |
| 4573 } | |
| 4574 | |
| 4575 size_t fastMallocSize(const void* ptr) | |
| 4576 { | |
| 4577 #if ENABLE(WTF_MALLOC_VALIDATION) | |
| 4578 return Internal::fastMallocValidationHeader(const_cast<void*>(ptr))->m_size; | |
| 4579 #else | |
| 4580 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | |
| 4581 Span* span = pageheap->GetDescriptorEnsureSafe(p); | |
| 4582 | |
| 4583 if (!span || span->free) | |
| 4584 return 0; | |
| 4585 | |
| 4586 for (HardenedSLL free = span->objects; free; free = SLL_Next(free, HARDENING
_ENTROPY)) { | |
| 4587 if (ptr == free.value()) | |
| 4588 return 0; | |
| 4589 } | |
| 4590 | |
| 4591 if (size_t cl = span->sizeclass) | |
| 4592 return ByteSizeForClass(cl); | |
| 4593 | |
| 4594 return span->length << kPageShift; | |
| 4595 #endif | |
| 4596 } | |
| 4597 | |
| 4598 #if OS(DARWIN) | |
| 4599 | |
| 4600 template <typename T> | |
| 4601 T* RemoteMemoryReader::nextEntryInHardenedLinkedList(T** remoteAddress, uintptr_
t entropy) const | |
| 4602 { | |
| 4603 T** localAddress = (*this)(remoteAddress); | |
| 4604 if (!localAddress) | |
| 4605 return 0; | |
| 4606 T* hardenedNext = *localAddress; | |
| 4607 if (!hardenedNext || hardenedNext == (void*)entropy) | |
| 4608 return 0; | |
| 4609 return XOR_MASK_PTR_WITH_KEY(hardenedNext, remoteAddress, entropy); | |
| 4610 } | |
| 4611 | |
| 4612 class FreeObjectFinder { | |
| 4613 const RemoteMemoryReader& m_reader; | |
| 4614 HashSet<void*> m_freeObjects; | |
| 4615 | |
| 4616 public: | |
| 4617 FreeObjectFinder(const RemoteMemoryReader& reader) : m_reader(reader) { } | |
| 4618 | |
| 4619 void visit(void* ptr) { m_freeObjects.add(ptr); } | |
| 4620 bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); } | |
| 4621 bool isFreeObject(vm_address_t ptr) const { return isFreeObject(reinterpret_
cast<void*>(ptr)); } | |
| 4622 size_t freeObjectCount() const { return m_freeObjects.size(); } | |
| 4623 | |
| 4624 void findFreeObjects(TCMalloc_ThreadCache* threadCache) | |
| 4625 { | |
| 4626 for (; threadCache; threadCache = (threadCache->next_ ? m_reader(threadC
ache->next_) : 0)) | |
| 4627 threadCache->enumerateFreeObjects(*this, m_reader); | |
| 4628 } | |
| 4629 | |
| 4630 void findFreeObjects(TCMalloc_Central_FreeListPadded* centralFreeList, size_
t numSizes, TCMalloc_Central_FreeListPadded* remoteCentralFreeList) | |
| 4631 { | |
| 4632 for (unsigned i = 0; i < numSizes; i++) | |
| 4633 centralFreeList[i].enumerateFreeObjects(*this, m_reader, remoteCentr
alFreeList + i); | |
| 4634 } | |
| 4635 }; | |
| 4636 | |
| 4637 class PageMapFreeObjectFinder { | |
| 4638 const RemoteMemoryReader& m_reader; | |
| 4639 FreeObjectFinder& m_freeObjectFinder; | |
| 4640 uintptr_t m_entropy; | |
| 4641 | |
| 4642 public: | |
| 4643 PageMapFreeObjectFinder(const RemoteMemoryReader& reader, FreeObjectFinder&
freeObjectFinder, uintptr_t entropy) | |
| 4644 : m_reader(reader) | |
| 4645 , m_freeObjectFinder(freeObjectFinder) | |
| 4646 , m_entropy(entropy) | |
| 4647 { | |
| 4648 #if ENABLE(TCMALLOC_HARDENING) | |
| 4649 ASSERT(m_entropy); | |
| 4650 #endif | |
| 4651 } | |
| 4652 | |
| 4653 int visit(void* ptr) const | |
| 4654 { | |
| 4655 if (!ptr) | |
| 4656 return 1; | |
| 4657 | |
| 4658 Span* span = m_reader(reinterpret_cast<Span*>(ptr)); | |
| 4659 if (!span) | |
| 4660 return 1; | |
| 4661 | |
| 4662 if (span->free) { | |
| 4663 void* ptr = reinterpret_cast<void*>(span->start << kPageShift); | |
| 4664 m_freeObjectFinder.visit(ptr); | |
| 4665 } else if (span->sizeclass) { | |
| 4666 // Walk the free list of the small-object span, keeping track of eac
h object seen | |
| 4667 for (HardenedSLL nextObject = span->objects; nextObject; nextObject.
setValue(m_reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObj
ect.value()), m_entropy))) | |
| 4668 m_freeObjectFinder.visit(nextObject.value()); | |
| 4669 } | |
| 4670 return span->length; | |
| 4671 } | |
| 4672 }; | |
| 4673 | |
| 4674 class PageMapMemoryUsageRecorder { | |
| 4675 task_t m_task; | |
| 4676 void* m_context; | |
| 4677 unsigned m_typeMask; | |
| 4678 vm_range_recorder_t* m_recorder; | |
| 4679 const RemoteMemoryReader& m_reader; | |
| 4680 const FreeObjectFinder& m_freeObjectFinder; | |
| 4681 | |
| 4682 HashSet<void*> m_seenPointers; | |
| 4683 Vector<Span*> m_coalescedSpans; | |
| 4684 | |
| 4685 public: | |
| 4686 PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm
_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectF
inder& freeObjectFinder) | |
| 4687 : m_task(task) | |
| 4688 , m_context(context) | |
| 4689 , m_typeMask(typeMask) | |
| 4690 , m_recorder(recorder) | |
| 4691 , m_reader(reader) | |
| 4692 , m_freeObjectFinder(freeObjectFinder) | |
| 4693 { } | |
| 4694 | |
| 4695 ~PageMapMemoryUsageRecorder() | |
| 4696 { | |
| 4697 ASSERT(!m_coalescedSpans.size()); | |
| 4698 } | |
| 4699 | |
| 4700 void recordPendingRegions() | |
| 4701 { | |
| 4702 if (!(m_typeMask & (MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RAN
GE_TYPE))) { | |
| 4703 m_coalescedSpans.clear(); | |
| 4704 return; | |
| 4705 } | |
| 4706 | |
| 4707 Vector<vm_range_t, 1024> allocatedPointers; | |
| 4708 for (size_t i = 0; i < m_coalescedSpans.size(); ++i) { | |
| 4709 Span *theSpan = m_coalescedSpans[i]; | |
| 4710 if (theSpan->free) | |
| 4711 continue; | |
| 4712 | |
| 4713 vm_address_t spanStartAddress = theSpan->start << kPageShift; | |
| 4714 vm_size_t spanSizeInBytes = theSpan->length * kPageSize; | |
| 4715 | |
| 4716 if (!theSpan->sizeclass) { | |
| 4717 // If it's an allocated large object span, mark it as in use | |
| 4718 if (!m_freeObjectFinder.isFreeObject(spanStartAddress)) | |
| 4719 allocatedPointers.append((vm_range_t){spanStartAddress, span
SizeInBytes}); | |
| 4720 } else { | |
| 4721 const size_t objectSize = ByteSizeForClass(theSpan->sizeclass); | |
| 4722 | |
| 4723 // Mark each allocated small object within the span as in use | |
| 4724 const vm_address_t endOfSpan = spanStartAddress + spanSizeInByte
s; | |
| 4725 for (vm_address_t object = spanStartAddress; object + objectSize
<= endOfSpan; object += objectSize) { | |
| 4726 if (!m_freeObjectFinder.isFreeObject(object)) | |
| 4727 allocatedPointers.append((vm_range_t){object, objectSize
}); | |
| 4728 } | |
| 4729 } | |
| 4730 } | |
| 4731 | |
| 4732 (*m_recorder)(m_task, m_context, m_typeMask & (MALLOC_PTR_IN_USE_RANGE_T
YPE | MALLOC_PTR_REGION_RANGE_TYPE), allocatedPointers.data(), allocatedPointers
.size()); | |
| 4733 | |
| 4734 m_coalescedSpans.clear(); | |
| 4735 } | |
| 4736 | |
| 4737 int visit(void* ptr) | |
| 4738 { | |
| 4739 if (!ptr) | |
| 4740 return 1; | |
| 4741 | |
| 4742 Span* span = m_reader(reinterpret_cast<Span*>(ptr)); | |
| 4743 if (!span || !span->start) | |
| 4744 return 1; | |
| 4745 | |
| 4746 if (m_seenPointers.contains(ptr)) | |
| 4747 return span->length; | |
| 4748 m_seenPointers.add(ptr); | |
| 4749 | |
| 4750 if (!m_coalescedSpans.size()) { | |
| 4751 m_coalescedSpans.append(span); | |
| 4752 return span->length; | |
| 4753 } | |
| 4754 | |
| 4755 Span* previousSpan = m_coalescedSpans[m_coalescedSpans.size() - 1]; | |
| 4756 vm_address_t previousSpanStartAddress = previousSpan->start << kPageShif
t; | |
| 4757 vm_size_t previousSpanSizeInBytes = previousSpan->length * kPageSize; | |
| 4758 | |
| 4759 // If the new span is adjacent to the previous span, do nothing for now. | |
| 4760 vm_address_t spanStartAddress = span->start << kPageShift; | |
| 4761 if (spanStartAddress == previousSpanStartAddress + previousSpanSizeInByt
es) { | |
| 4762 m_coalescedSpans.append(span); | |
| 4763 return span->length; | |
| 4764 } | |
| 4765 | |
| 4766 // New span is not adjacent to previous span, so record the spans coales
ced so far. | |
| 4767 recordPendingRegions(); | |
| 4768 m_coalescedSpans.append(span); | |
| 4769 | |
| 4770 return span->length; | |
| 4771 } | |
| 4772 }; | |
| 4773 | |
| 4774 class AdminRegionRecorder { | |
| 4775 task_t m_task; | |
| 4776 void* m_context; | |
| 4777 unsigned m_typeMask; | |
| 4778 vm_range_recorder_t* m_recorder; | |
| 4779 | |
| 4780 Vector<vm_range_t, 1024> m_pendingRegions; | |
| 4781 | |
| 4782 public: | |
| 4783 AdminRegionRecorder(task_t task, void* context, unsigned typeMask, vm_range_
recorder_t* recorder) | |
| 4784 : m_task(task) | |
| 4785 , m_context(context) | |
| 4786 , m_typeMask(typeMask) | |
| 4787 , m_recorder(recorder) | |
| 4788 { } | |
| 4789 | |
| 4790 void recordRegion(vm_address_t ptr, size_t size) | |
| 4791 { | |
| 4792 if (m_typeMask & MALLOC_ADMIN_REGION_RANGE_TYPE) | |
| 4793 m_pendingRegions.append((vm_range_t){ ptr, size }); | |
| 4794 } | |
| 4795 | |
| 4796 void visit(void *ptr, size_t size) | |
| 4797 { | |
| 4798 recordRegion(reinterpret_cast<vm_address_t>(ptr), size); | |
| 4799 } | |
| 4800 | |
| 4801 void recordPendingRegions() | |
| 4802 { | |
| 4803 if (m_pendingRegions.size()) { | |
| 4804 (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, m_p
endingRegions.data(), m_pendingRegions.size()); | |
| 4805 m_pendingRegions.clear(); | |
| 4806 } | |
| 4807 } | |
| 4808 | |
| 4809 ~AdminRegionRecorder() | |
| 4810 { | |
| 4811 ASSERT(!m_pendingRegions.size()); | |
| 4812 } | |
| 4813 }; | |
| 4814 | |
| 4815 kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typ
eMask, vm_address_t zoneAddress, memory_reader_t reader, vm_range_recorder_t rec
order) | |
| 4816 { | |
| 4817 RemoteMemoryReader memoryReader(task, reader); | |
| 4818 | |
| 4819 InitSizeClasses(); | |
| 4820 | |
| 4821 FastMallocZone* mzone = memoryReader(reinterpret_cast<FastMallocZone*>(zoneA
ddress)); | |
| 4822 TCMalloc_PageHeap* pageHeap = memoryReader(mzone->m_pageHeap); | |
| 4823 TCMalloc_ThreadCache** threadHeapsPointer = memoryReader(mzone->m_threadHeap
s); | |
| 4824 TCMalloc_ThreadCache* threadHeaps = memoryReader(*threadHeapsPointer); | |
| 4825 | |
| 4826 TCMalloc_Central_FreeListPadded* centralCaches = memoryReader(mzone->m_centr
alCaches, sizeof(TCMalloc_Central_FreeListPadded) * kNumClasses); | |
| 4827 | |
| 4828 FreeObjectFinder finder(memoryReader); | |
| 4829 finder.findFreeObjects(threadHeaps); | |
| 4830 finder.findFreeObjects(centralCaches, kNumClasses, mzone->m_centralCaches); | |
| 4831 | |
| 4832 TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_; | |
| 4833 PageMapFreeObjectFinder pageMapFinder(memoryReader, finder, pageHeap->entrop
y_); | |
| 4834 pageMap->visitValues(pageMapFinder, memoryReader); | |
| 4835 | |
| 4836 PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder,
memoryReader, finder); | |
| 4837 pageMap->visitValues(usageRecorder, memoryReader); | |
| 4838 usageRecorder.recordPendingRegions(); | |
| 4839 | |
| 4840 AdminRegionRecorder adminRegionRecorder(task, context, typeMask, recorder); | |
| 4841 pageMap->visitAllocations(adminRegionRecorder, memoryReader); | |
| 4842 | |
| 4843 PageHeapAllocator<Span>* spanAllocator = memoryReader(mzone->m_spanAllocator
); | |
| 4844 PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator = memoryReader(mz
one->m_pageHeapAllocator); | |
| 4845 | |
| 4846 spanAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader
); | |
| 4847 pageHeapAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryRe
ader); | |
| 4848 | |
| 4849 adminRegionRecorder.recordPendingRegions(); | |
| 4850 | |
| 4851 return 0; | |
| 4852 } | |
| 4853 | |
| 4854 size_t FastMallocZone::size(malloc_zone_t*, const void*) | |
| 4855 { | |
| 4856 return 0; | |
| 4857 } | |
| 4858 | |
| 4859 void* FastMallocZone::zoneMalloc(malloc_zone_t*, size_t) | |
| 4860 { | |
| 4861 return 0; | |
| 4862 } | |
| 4863 | |
| 4864 void* FastMallocZone::zoneCalloc(malloc_zone_t*, size_t, size_t) | |
| 4865 { | |
| 4866 return 0; | |
| 4867 } | |
| 4868 | |
| 4869 void FastMallocZone::zoneFree(malloc_zone_t*, void* ptr) | |
| 4870 { | |
| 4871 // Due to <rdar://problem/5671357> zoneFree may be called by the system free
even if the pointer | |
| 4872 // is not in this zone. When this happens, the pointer being freed was not
allocated by any | |
| 4873 // zone so we need to print a useful error for the application developer. | |
| 4874 malloc_printf("*** error for object %p: pointer being freed was not allocate
d\n", ptr); | |
| 4875 } | |
| 4876 | |
| 4877 void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t) | |
| 4878 { | |
| 4879 return 0; | |
| 4880 } | |
| 4881 | |
| 4882 | |
| 4883 #undef malloc | |
| 4884 #undef free | |
| 4885 #undef realloc | |
| 4886 #undef calloc | |
| 4887 | |
| 4888 extern "C" { | |
| 4889 malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enum
erate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print
, | |
| 4890 &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlo
ck, &FastMallocZone::statistics | |
| 4891 | |
| 4892 #if OS(IOS) || __MAC_OS_X_VERSION_MAX_ALLOWED >= 1060 | |
| 4893 , 0 // zone_locked will not be called on the zone unless it advertises itsel
f as version five or higher. | |
| 4894 #endif | |
| 4895 #if OS(IOS) || __MAC_OS_X_VERSION_MAX_ALLOWED >= 1070 | |
| 4896 , 0, 0, 0, 0 // These members will not be used unless the zone advertises it
self as version seven or higher. | |
| 4897 #endif | |
| 4898 | |
| 4899 }; | |
| 4900 } | |
| 4901 | |
| 4902 FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache
** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches, PageHeapAllocato
r<Span>* spanAllocator, PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocat
or) | |
| 4903 : m_pageHeap(pageHeap) | |
| 4904 , m_threadHeaps(threadHeaps) | |
| 4905 , m_centralCaches(centralCaches) | |
| 4906 , m_spanAllocator(spanAllocator) | |
| 4907 , m_pageHeapAllocator(pageHeapAllocator) | |
| 4908 { | |
| 4909 memset(&m_zone, 0, sizeof(m_zone)); | |
| 4910 m_zone.version = 4; | |
| 4911 m_zone.zone_name = "JavaScriptCore FastMalloc"; | |
| 4912 m_zone.size = &FastMallocZone::size; | |
| 4913 m_zone.malloc = &FastMallocZone::zoneMalloc; | |
| 4914 m_zone.calloc = &FastMallocZone::zoneCalloc; | |
| 4915 m_zone.realloc = &FastMallocZone::zoneRealloc; | |
| 4916 m_zone.free = &FastMallocZone::zoneFree; | |
| 4917 m_zone.valloc = &FastMallocZone::zoneValloc; | |
| 4918 m_zone.destroy = &FastMallocZone::zoneDestroy; | |
| 4919 m_zone.introspect = &jscore_fastmalloc_introspection; | |
| 4920 malloc_zone_register(&m_zone); | |
| 4921 } | |
| 4922 | |
| 4923 | |
| 4924 void FastMallocZone::init() | |
| 4925 { | |
| 4926 static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Cen
tral_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator); | |
| 4927 } | |
| 4928 | |
| 4929 #endif // OS(DARWIN) | |
| 4930 | |
| 4931 } // namespace WTF | |
| 4932 #endif // WTF_CHANGES | |
| 4933 | |
| 4934 #endif // FORCE_SYSTEM_MALLOC | |
| OLD | NEW |