OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2005, Google Inc. |
| 2 // All rights reserved. |
| 3 // |
| 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are |
| 6 // met: |
| 7 // |
| 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above |
| 11 // copyright notice, this list of conditions and the following disclaimer |
| 12 // in the documentation and/or other materials provided with the |
| 13 // distribution. |
| 14 // * Neither the name of Google Inc. nor the names of its |
| 15 // contributors may be used to endorse or promote products derived from |
| 16 // this software without specific prior written permission. |
| 17 // |
| 18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 |
| 30 // --- |
| 31 // Author: Sanjay Ghemawat <opensource@google.com> |
| 32 // |
| 33 // A malloc that uses a per-thread cache to satisfy small malloc requests. |
| 34 // (The time for malloc/free of a small object drops from 300 ns to 50 ns.) |
| 35 // |
| 36 // See doc/tcmalloc.html for a high-level |
| 37 // description of how this malloc works. |
| 38 // |
| 39 // SYNCHRONIZATION |
| 40 // 1. The thread-specific lists are accessed without acquiring any locks. |
| 41 // This is safe because each such list is only accessed by one thread. |
| 42 // 2. We have a lock per central free-list, and hold it while manipulating |
| 43 // the central free list for a particular size. |
| 44 // 3. The central page allocator is protected by "pageheap_lock". |
| 45 // 4. The pagemap (which maps from page-number to descriptor), |
| 46 // can be read without holding any locks, and written while holding |
| 47 // the "pageheap_lock". |
| 48 // 5. To improve performance, a subset of the information one can get |
| 49 // from the pagemap is cached in a data structure, pagemap_cache_, |
| 50 // that atomically reads and writes its entries. This cache can be |
| 51 // read and written without locking. |
| 52 // |
| 53 // This multi-threaded access to the pagemap is safe for fairly |
| 54 // subtle reasons. We basically assume that when an object X is |
| 55 // allocated by thread A and deallocated by thread B, there must |
| 56 // have been appropriate synchronization in the handoff of object |
| 57 // X from thread A to thread B. The same logic applies to pagemap_cache_. |
| 58 // |
| 59 // THE PAGEID-TO-SIZECLASS CACHE |
| 60 // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache |
| 61 // returns 0 for a particular PageID then that means "no information," not that |
| 62 // the sizeclass is 0. The cache may have stale information for pages that do |
| 63 // not hold the beginning of any free()'able object. Staleness is eliminated |
| 64 // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and |
| 65 // do_memalign() for all other relevant pages. |
| 66 // |
| 67 // PAGEMAP |
| 68 // ------- |
| 69 // Page map contains a mapping from page id to Span. |
| 70 // |
| 71 // If Span s occupies pages [p..q], |
| 72 // pagemap[p] == s |
| 73 // pagemap[q] == s |
| 74 // pagemap[p+1..q-1] are undefined |
| 75 // pagemap[p-1] and pagemap[q+1] are defined: |
| 76 // NULL if the corresponding page is not yet in the address space. |
| 77 // Otherwise it points to a Span. This span may be free |
| 78 // or allocated. If free, it is in one of pageheap's freelist. |
| 79 // |
| 80 // TODO: Bias reclamation to larger addresses |
| 81 // TODO: implement mallinfo/mallopt |
| 82 // TODO: Better testing |
| 83 // |
| 84 // 9/28/2003 (new page-level allocator replaces ptmalloc2): |
| 85 // * malloc/free of small objects goes from ~300 ns to ~50 ns. |
| 86 // * allocation of a reasonably complicated struct |
| 87 // goes from about 1100 ns to about 300 ns. |
| 88 |
| 89 #include <config.h> |
| 90 #include <new> |
| 91 #include <stdio.h> |
| 92 #include <stddef.h> |
| 93 #if defined HAVE_STDINT_H |
| 94 #include <stdint.h> |
| 95 #elif defined HAVE_INTTYPES_H |
| 96 #include <inttypes.h> |
| 97 #else |
| 98 #include <sys/types.h> |
| 99 #endif |
| 100 #if defined(HAVE_MALLOC_H) && defined(HAVE_STRUCT_MALLINFO) |
| 101 #include <malloc.h> // for struct mallinfo |
| 102 #endif |
| 103 #include <string.h> |
| 104 #ifdef HAVE_PTHREAD |
| 105 #include <pthread.h> |
| 106 #endif |
| 107 #ifdef HAVE_UNISTD_H |
| 108 #include <unistd.h> |
| 109 #endif |
| 110 #include <errno.h> |
| 111 #include <stdarg.h> |
| 112 #include <algorithm> |
| 113 #include <google/tcmalloc.h> |
| 114 #include "base/commandlineflags.h" |
| 115 #include "base/basictypes.h" // gets us PRIu64 |
| 116 #include "base/sysinfo.h" |
| 117 #include "base/spinlock.h" |
| 118 #include "common.h" |
| 119 #include "malloc_hook-inl.h" |
| 120 #include <google/malloc_hook.h> |
| 121 #include <google/malloc_extension.h> |
| 122 #include "central_freelist.h" |
| 123 #include "internal_logging.h" |
| 124 #include "linked_list.h" |
| 125 #include "maybe_threads.h" |
| 126 #include "page_heap.h" |
| 127 #include "page_heap_allocator.h" |
| 128 #include "pagemap.h" |
| 129 #include "span.h" |
| 130 #include "static_vars.h" |
| 131 #include "system-alloc.h" |
| 132 #include "tcmalloc_guard.h" |
| 133 #include "thread_cache.h" |
| 134 |
| 135 #if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defi
ned(WIN32_OVERRIDE_ALLOCATORS) |
| 136 # define WIN32_DO_PATCHING 1 |
| 137 #endif |
| 138 |
| 139 using tcmalloc::PageHeap; |
| 140 using tcmalloc::PageHeapAllocator; |
| 141 using tcmalloc::SizeMap; |
| 142 using tcmalloc::Span; |
| 143 using tcmalloc::StackTrace; |
| 144 using tcmalloc::Static; |
| 145 using tcmalloc::ThreadCache; |
| 146 |
| 147 // __THROW is defined in glibc systems. It means, counter-intuitively, |
| 148 // "This function will never throw an exception." It's an optional |
| 149 // optimization tool, but we may need to use it to match glibc prototypes. |
| 150 #ifndef __THROW // I guess we're not on a glibc system |
| 151 # define __THROW // __THROW is just an optimization, so ok to make it "" |
| 152 #endif |
| 153 |
| 154 DECLARE_int64(tcmalloc_sample_parameter); |
| 155 DECLARE_double(tcmalloc_release_rate); |
| 156 |
| 157 // For windows, the printf we use to report large allocs is |
| 158 // potentially dangerous: it could cause a malloc that would cause an |
| 159 // infinite loop. So by default we set the threshold to a huge number |
| 160 // on windows, so this bad situation will never trigger. You can |
| 161 // always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you |
| 162 // want this functionality. |
| 163 #ifdef _WIN32 |
| 164 const int64 kDefaultLargeAllocReportThreshold = static_cast<int64>(1) << 62; |
| 165 #else |
| 166 const int64 kDefaultLargeAllocReportThreshold = static_cast<int64>(1) << 30; |
| 167 #endif |
| 168 DEFINE_int64(tcmalloc_large_alloc_report_threshold, |
| 169 EnvToInt64("TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD", |
| 170 kDefaultLargeAllocReportThreshold), |
| 171 "Allocations larger than this value cause a stack " |
| 172 "trace to be dumped to stderr. The threshold for " |
| 173 "dumping stack traces is increased by a factor of 1.125 " |
| 174 "every time we print a message so that the threshold " |
| 175 "automatically goes up by a factor of ~1000 every 60 " |
| 176 "messages. This bounds the amount of extra logging " |
| 177 "generated by this flag. Default value of this flag " |
| 178 "is very large and therefore you should see no extra " |
| 179 "logging unless the flag is overridden. Set to 0 to " |
| 180 "disable reporting entirely."); |
| 181 |
| 182 |
| 183 // We already declared these functions in tcmalloc.h, but we have to |
| 184 // declare them again to give them an ATTRIBUTE_SECTION: we want to |
| 185 // put all callers of MallocHook::Invoke* in this module into |
| 186 // ATTRIBUTE_SECTION(google_malloc) section, so that |
| 187 // MallocHook::GetCallerStackTrace can function accurately. |
| 188 extern "C" { |
| 189 void* tc_malloc(size_t size) __THROW |
| 190 ATTRIBUTE_SECTION(google_malloc); |
| 191 void tc_free(void* ptr) __THROW |
| 192 ATTRIBUTE_SECTION(google_malloc); |
| 193 void* tc_realloc(void* ptr, size_t size) __THROW |
| 194 ATTRIBUTE_SECTION(google_malloc); |
| 195 void* tc_calloc(size_t nmemb, size_t size) __THROW |
| 196 ATTRIBUTE_SECTION(google_malloc); |
| 197 void tc_cfree(void* ptr) __THROW |
| 198 ATTRIBUTE_SECTION(google_malloc); |
| 199 |
| 200 void* tc_memalign(size_t __alignment, size_t __size) __THROW |
| 201 ATTRIBUTE_SECTION(google_malloc); |
| 202 int tc_posix_memalign(void** ptr, size_t align, size_t size) __THROW |
| 203 ATTRIBUTE_SECTION(google_malloc); |
| 204 void* tc_valloc(size_t __size) __THROW |
| 205 ATTRIBUTE_SECTION(google_malloc); |
| 206 void* tc_pvalloc(size_t __size) __THROW |
| 207 ATTRIBUTE_SECTION(google_malloc); |
| 208 |
| 209 void tc_malloc_stats(void) __THROW |
| 210 ATTRIBUTE_SECTION(google_malloc); |
| 211 int tc_mallopt(int cmd, int value) __THROW |
| 212 ATTRIBUTE_SECTION(google_malloc); |
| 213 #ifdef HAVE_STRUCT_MALLINFO // struct mallinfo isn't defined on freebsd |
| 214 struct mallinfo tc_mallinfo(void) __THROW |
| 215 ATTRIBUTE_SECTION(google_malloc); |
| 216 #endif |
| 217 |
| 218 void* tc_new(size_t size) |
| 219 ATTRIBUTE_SECTION(google_malloc); |
| 220 void tc_delete(void* p) __THROW |
| 221 ATTRIBUTE_SECTION(google_malloc); |
| 222 void* tc_newarray(size_t size) |
| 223 ATTRIBUTE_SECTION(google_malloc); |
| 224 void tc_deletearray(void* p) __THROW |
| 225 ATTRIBUTE_SECTION(google_malloc); |
| 226 |
| 227 // And the nothrow variants of these: |
| 228 void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW |
| 229 ATTRIBUTE_SECTION(google_malloc); |
| 230 void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW |
| 231 ATTRIBUTE_SECTION(google_malloc); |
| 232 } |
| 233 |
| 234 // Override the libc functions to prefer our own instead. This comes |
| 235 // first so code in tcmalloc.cc can use the overridden versions. One |
| 236 // exception: in windows, by default, we patch our code into these |
| 237 // functions (via src/windows/patch_function.cc) rather than override |
| 238 // them. In that case, we don't want to do this overriding here. |
| 239 #ifndef WIN32_DO_PATCHING |
| 240 |
| 241 // TODO(mbelshe): Turn off TCMalloc's symbols for libc. We do that |
| 242 // elsewhere. |
| 243 #if 0 |
| 244 |
| 245 #if defined(__GNUC__) && !defined(__MACH__) |
| 246 // Potentially faster variants that use the gcc alias extension. |
| 247 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check. |
| 248 // FreeBSD does support aliases, but apparently not correctly. :-( |
| 249 # define ALIAS(x) __attribute__ ((alias (x))) |
| 250 void* operator new(size_t size) ALIAS("tc_new"); |
| 251 void operator delete(void* p) __THROW ALIAS("tc_delete"); |
| 252 void* operator new[](size_t size) ALIAS("tc_newarray"); |
| 253 void operator delete[](void* p) __THROW ALIAS("tc_deletearray"); |
| 254 void* operator new(size_t size, const std::nothrow_t&) __THROW |
| 255 ALIAS("tc_new_nothrow"); |
| 256 void* operator new[](size_t size, const std::nothrow_t&) __THROW |
| 257 ALIAS("tc_newarray_nothrow"); |
| 258 extern "C" { |
| 259 void* malloc(size_t size) __THROW ALIAS("tc_malloc"); |
| 260 void free(void* ptr) __THROW ALIAS("tc_free"); |
| 261 void* realloc(void* ptr, size_t size) __THROW ALIAS("tc_realloc"); |
| 262 void* calloc(size_t n, size_t size) __THROW ALIAS("tc_calloc"); |
| 263 void cfree(void* ptr) __THROW ALIAS("tc_cfree"); |
| 264 void* memalign(size_t align, size_t s) __THROW ALIAS("tc_memalign"); |
| 265 void* valloc(size_t size) __THROW ALIAS("tc_valloc"); |
| 266 void* pvalloc(size_t size) __THROW ALIAS("tc_pvalloc"); |
| 267 int posix_memalign(void** r, size_t a, size_t s) __THROW |
| 268 ALIAS("tc_posix_memalign"); |
| 269 void malloc_stats(void) __THROW ALIAS("tc_malloc_stats"); |
| 270 int mallopt(int cmd, int value) __THROW ALIAS("tc_mallopt"); |
| 271 #ifdef HAVE_STRUCT_MALLINFO |
| 272 struct mallinfo mallinfo(void) __THROW ALIAS("tc_mallinfo"); |
| 273 #endif |
| 274 // Some library routines on RedHat 9 allocate memory using malloc() |
| 275 // and free it using __libc_free() (or vice-versa). Since we provide |
| 276 // our own implementations of malloc/free, we need to make sure that |
| 277 // the __libc_XXX variants (defined as part of glibc) also point to |
| 278 // the same implementations. |
| 279 # if defined(__GLIBC__) |
| 280 void* __libc_malloc(size_t size) ALIAS("tc_malloc"); |
| 281 void __libc_free(void* ptr) ALIAS("tc_free"); |
| 282 void* __libc_realloc(void* ptr, size_t size) ALIAS("tc_realloc"); |
| 283 void* __libc_calloc(size_t n, size_t size) ALIAS("tc_calloc"); |
| 284 void __libc_cfree(void* ptr) ALIAS("tc_cfree"); |
| 285 void* __libc_memalign(size_t align, size_t s) ALIAS("tc_memalign"); |
| 286 void* __libc_valloc(size_t size) ALIAS("tc_valloc"); |
| 287 void* __libc_pvalloc(size_t size) ALIAS("tc_pvalloc"); |
| 288 int __posix_memalign(void** r, size_t a, size_t s) ALIAS("tc_posix_memalign"); |
| 289 # define HAVE_ALIASED___LIBC 1 |
| 290 # endif // #if defined(__GLIBC__) |
| 291 } // extern "C" |
| 292 # undef ALIAS |
| 293 #else |
| 294 // Portable wrappers |
| 295 void* operator new(size_t size) { return tc_new(size); } |
| 296 void operator delete(void* p) __THROW { tc_delete(p); } |
| 297 void* operator new[](size_t size) { return tc_newarray(size); } |
| 298 void operator delete[](void* p) __THROW { tc_deletearray(p); } |
| 299 void* operator new(size_t size, const std::nothrow_t& nt) __THROW { |
| 300 return tc_new_nothrow(size, nt); |
| 301 } |
| 302 void* operator new[](size_t size, const std::nothrow_t& nt) __THROW { |
| 303 return tc_newarray_nothrow(size, nt); |
| 304 } |
| 305 extern "C" { |
| 306 void* malloc(size_t s) __THROW { return tc_malloc(s); } |
| 307 void free(void* p) __THROW { tc_free(p); } |
| 308 void* realloc(void* p, size_t s) __THROW { return tc_realloc(p, s); } |
| 309 void* calloc(size_t n, size_t s) __THROW { return tc_calloc(n, s); } |
| 310 void cfree(void* p) __THROW { tc_cfree(p); } |
| 311 void* memalign(size_t a, size_t s) __THROW { return tc_memalign(a, s); } |
| 312 void* valloc(size_t s) __THROW { return tc_valloc(s); } |
| 313 void* pvalloc(size_t s) __THROW { return tc_pvalloc(s); } |
| 314 int posix_memalign(void** r, size_t a, size_t s) __THROW { |
| 315 return tc_posix_memalign(r, a, s); |
| 316 } |
| 317 void malloc_stats(void) __THROW { tc_malloc_stats(); } |
| 318 int mallopt(int cmd, int v) __THROW { return tc_mallopt(cmd, v); } |
| 319 #ifdef HAVE_STRUCT_MALLINFO |
| 320 struct mallinfo mallinfo(void) __THROW { return tc_mallinfo(); } |
| 321 #endif |
| 322 } // extern C |
| 323 #endif // #if defined(__GNUC__) |
| 324 |
| 325 #ifndef HAVE_ALIASED___LIBC |
| 326 extern "C" { |
| 327 void* __libc_malloc(size_t size) { return malloc(size); } |
| 328 void __libc_free(void* ptr) { free(ptr); } |
| 329 void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); } |
| 330 void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); } |
| 331 void __libc_cfree(void* ptr) { cfree(ptr); } |
| 332 void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); } |
| 333 void* __libc_valloc(size_t size) { return valloc(size); } |
| 334 void* __libc_pvalloc(size_t size) { return pvalloc(size); } |
| 335 int __posix_memalign(void** r, size_t a, size_t s) { |
| 336 return posix_memalign(r, a, s); |
| 337 } |
| 338 } // extern "C" |
| 339 #endif // #ifndef HAVE_ALIASED___LIBC |
| 340 |
| 341 #endif // #ifdef 0 |
| 342 |
| 343 #endif // #ifndef WIN32_DO_PATCHING |
| 344 |
| 345 |
| 346 // ----------------------- IMPLEMENTATION ------------------------------- |
| 347 |
| 348 // These routines are called by free(), realloc(), etc. if the pointer is |
| 349 // invalid. This is a cheap (source-editing required) kind of exception |
| 350 // handling for these routines. |
| 351 namespace { |
| 352 void InvalidFree(void* ptr) { |
| 353 CRASH("Attempt to free invalid pointer: %p\n", ptr); |
| 354 } |
| 355 |
| 356 size_t InvalidGetSizeForRealloc(void* old_ptr) { |
| 357 CRASH("Attempt to realloc invalid pointer: %p\n", old_ptr); |
| 358 return 0; |
| 359 } |
| 360 |
| 361 size_t InvalidGetAllocatedSize(void* ptr) { |
| 362 CRASH("Attempt to get the size of an invalid pointer: %p\n", ptr); |
| 363 return 0; |
| 364 } |
| 365 } // unnamed namespace |
| 366 |
| 367 // Extract interesting stats |
| 368 struct TCMallocStats { |
| 369 uint64_t system_bytes; // Bytes alloced from system |
| 370 uint64_t thread_bytes; // Bytes in thread caches |
| 371 uint64_t central_bytes; // Bytes in central cache |
| 372 uint64_t transfer_bytes; // Bytes in central transfer cache |
| 373 uint64_t pageheap_bytes; // Bytes in page heap |
| 374 uint64_t metadata_bytes; // Bytes alloced for metadata |
| 375 }; |
| 376 |
| 377 // Get stats into "r". Also get per-size-class counts if class_count != NULL |
| 378 static void ExtractStats(TCMallocStats* r, uint64_t* class_count) { |
| 379 r->central_bytes = 0; |
| 380 r->transfer_bytes = 0; |
| 381 for (int cl = 0; cl < kNumClasses; ++cl) { |
| 382 const int length = Static::central_cache()[cl].length(); |
| 383 const int tc_length = Static::central_cache()[cl].tc_length(); |
| 384 const size_t size = static_cast<uint64_t>( |
| 385 Static::sizemap()->ByteSizeForClass(cl)); |
| 386 r->central_bytes += (size * length); |
| 387 r->transfer_bytes += (size * tc_length); |
| 388 if (class_count) class_count[cl] = length + tc_length; |
| 389 } |
| 390 |
| 391 // Add stats from per-thread heaps |
| 392 r->thread_bytes = 0; |
| 393 { // scope |
| 394 SpinLockHolder h(Static::pageheap_lock()); |
| 395 ThreadCache::GetThreadStats(&r->thread_bytes, class_count); |
| 396 } |
| 397 |
| 398 { //scope |
| 399 SpinLockHolder h(Static::pageheap_lock()); |
| 400 r->system_bytes = Static::pageheap()->SystemBytes(); |
| 401 r->metadata_bytes = tcmalloc::metadata_system_bytes(); |
| 402 r->pageheap_bytes = Static::pageheap()->FreeBytes(); |
| 403 } |
| 404 } |
| 405 |
| 406 // WRITE stats to "out" |
| 407 static void DumpStats(TCMalloc_Printer* out, int level) { |
| 408 TCMallocStats stats; |
| 409 uint64_t class_count[kNumClasses]; |
| 410 ExtractStats(&stats, (level >= 2 ? class_count : NULL)); |
| 411 |
| 412 static const double MB = 1048576.0; |
| 413 |
| 414 if (level >= 2) { |
| 415 out->printf("------------------------------------------------\n"); |
| 416 uint64_t cumulative = 0; |
| 417 for (int cl = 0; cl < kNumClasses; ++cl) { |
| 418 if (class_count[cl] > 0) { |
| 419 uint64_t class_bytes = |
| 420 class_count[cl] * Static::sizemap()->ByteSizeForClass(cl); |
| 421 cumulative += class_bytes; |
| 422 out->printf("class %3d [ %8" PRIuS " bytes ] : " |
| 423 "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n", |
| 424 cl, Static::sizemap()->ByteSizeForClass(cl), |
| 425 class_count[cl], |
| 426 class_bytes / MB, |
| 427 cumulative / MB); |
| 428 } |
| 429 } |
| 430 |
| 431 SpinLockHolder h(Static::pageheap_lock()); |
| 432 Static::pageheap()->Dump(out); |
| 433 |
| 434 out->printf("------------------------------------------------\n"); |
| 435 DumpSystemAllocatorStats(out); |
| 436 } |
| 437 |
| 438 const uint64_t bytes_in_use = stats.system_bytes |
| 439 - stats.pageheap_bytes |
| 440 - stats.central_bytes |
| 441 - stats.transfer_bytes |
| 442 - stats.thread_bytes; |
| 443 |
| 444 out->printf("------------------------------------------------\n" |
| 445 "MALLOC: %12" PRIu64 " (%7.1f MB) Heap size\n" |
| 446 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes in use by application\n" |
| 447 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in page heap\n" |
| 448 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in central cache\n" |
| 449 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in transfer cache\n" |
| 450 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in thread caches\n" |
| 451 "MALLOC: %12" PRIu64 " Spans in use\n" |
| 452 "MALLOC: %12" PRIu64 " Thread heaps in use\n" |
| 453 "MALLOC: %12" PRIu64 " (%7.1f MB) Metadata allocated\n" |
| 454 "------------------------------------------------\n", |
| 455 stats.system_bytes, stats.system_bytes / MB, |
| 456 bytes_in_use, bytes_in_use / MB, |
| 457 stats.pageheap_bytes, stats.pageheap_bytes / MB, |
| 458 stats.central_bytes, stats.central_bytes / MB, |
| 459 stats.transfer_bytes, stats.transfer_bytes / MB, |
| 460 stats.thread_bytes, stats.thread_bytes / MB, |
| 461 uint64_t(Static::span_allocator()->inuse()), |
| 462 uint64_t(ThreadCache::HeapsInUse()), |
| 463 stats.metadata_bytes, stats.metadata_bytes / MB); |
| 464 } |
| 465 |
| 466 static void PrintStats(int level) { |
| 467 const int kBufferSize = 16 << 10; |
| 468 char* buffer = new char[kBufferSize]; |
| 469 TCMalloc_Printer printer(buffer, kBufferSize); |
| 470 DumpStats(&printer, level); |
| 471 write(STDERR_FILENO, buffer, strlen(buffer)); |
| 472 delete[] buffer; |
| 473 } |
| 474 |
| 475 static void** DumpHeapGrowthStackTraces() { |
| 476 // Count how much space we need |
| 477 int needed_slots = 0; |
| 478 { |
| 479 SpinLockHolder h(Static::pageheap_lock()); |
| 480 for (StackTrace* t = Static::growth_stacks(); |
| 481 t != NULL; |
| 482 t = reinterpret_cast<StackTrace*>( |
| 483 t->stack[tcmalloc::kMaxStackDepth-1])) { |
| 484 needed_slots += 3 + t->depth; |
| 485 } |
| 486 needed_slots += 100; // Slop in case list grows |
| 487 needed_slots += needed_slots/8; // An extra 12.5% slop |
| 488 } |
| 489 |
| 490 void** result = new void*[needed_slots]; |
| 491 if (result == NULL) { |
| 492 MESSAGE("tcmalloc: allocation failed for stack trace slots", |
| 493 needed_slots * sizeof(*result)); |
| 494 return NULL; |
| 495 } |
| 496 |
| 497 SpinLockHolder h(Static::pageheap_lock()); |
| 498 int used_slots = 0; |
| 499 for (StackTrace* t = Static::growth_stacks(); |
| 500 t != NULL; |
| 501 t = reinterpret_cast<StackTrace*>( |
| 502 t->stack[tcmalloc::kMaxStackDepth-1])) { |
| 503 ASSERT(used_slots < needed_slots); // Need to leave room for terminator |
| 504 if (used_slots + 3 + t->depth >= needed_slots) { |
| 505 // No more room |
| 506 break; |
| 507 } |
| 508 |
| 509 result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1)); |
| 510 result[used_slots+1] = reinterpret_cast<void*>(t->size); |
| 511 result[used_slots+2] = reinterpret_cast<void*>(t->depth); |
| 512 for (int d = 0; d < t->depth; d++) { |
| 513 result[used_slots+3+d] = t->stack[d]; |
| 514 } |
| 515 used_slots += 3 + t->depth; |
| 516 } |
| 517 result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0)); |
| 518 return result; |
| 519 } |
| 520 |
| 521 // TCMalloc's support for extra malloc interfaces |
| 522 class TCMallocImplementation : public MallocExtension { |
| 523 public: |
| 524 virtual void GetStats(char* buffer, int buffer_length) { |
| 525 ASSERT(buffer_length > 0); |
| 526 TCMalloc_Printer printer(buffer, buffer_length); |
| 527 |
| 528 // Print level one stats unless lots of space is available |
| 529 if (buffer_length < 10000) { |
| 530 DumpStats(&printer, 1); |
| 531 } else { |
| 532 DumpStats(&printer, 2); |
| 533 } |
| 534 } |
| 535 |
| 536 virtual void** ReadStackTraces(int* sample_period) { |
| 537 tcmalloc::StackTraceTable table; |
| 538 { |
| 539 SpinLockHolder h(Static::pageheap_lock()); |
| 540 Span* sampled = Static::sampled_objects(); |
| 541 for (Span* s = sampled->next; s != sampled; s = s->next) { |
| 542 table.AddTrace(*reinterpret_cast<StackTrace*>(s->objects)); |
| 543 } |
| 544 } |
| 545 *sample_period = ThreadCache::GetCache()->GetSamplePeriod(); |
| 546 return table.ReadStackTracesAndClear(); // grabs and releases pageheap_lock |
| 547 } |
| 548 |
| 549 virtual void** ReadHeapGrowthStackTraces() { |
| 550 return DumpHeapGrowthStackTraces(); |
| 551 } |
| 552 |
| 553 virtual bool GetNumericProperty(const char* name, size_t* value) { |
| 554 ASSERT(name != NULL); |
| 555 |
| 556 if (strcmp(name, "generic.current_allocated_bytes") == 0) { |
| 557 TCMallocStats stats; |
| 558 ExtractStats(&stats, NULL); |
| 559 *value = stats.system_bytes |
| 560 - stats.thread_bytes |
| 561 - stats.central_bytes |
| 562 - stats.transfer_bytes |
| 563 - stats.pageheap_bytes; |
| 564 return true; |
| 565 } |
| 566 |
| 567 if (strcmp(name, "generic.heap_size") == 0) { |
| 568 TCMallocStats stats; |
| 569 ExtractStats(&stats, NULL); |
| 570 *value = stats.system_bytes; |
| 571 return true; |
| 572 } |
| 573 |
| 574 if (strcmp(name, "tcmalloc.slack_bytes") == 0) { |
| 575 // We assume that bytes in the page heap are not fragmented too |
| 576 // badly, and are therefore available for allocation. |
| 577 SpinLockHolder l(Static::pageheap_lock()); |
| 578 *value = Static::pageheap()->FreeBytes(); |
| 579 return true; |
| 580 } |
| 581 |
| 582 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) { |
| 583 SpinLockHolder l(Static::pageheap_lock()); |
| 584 *value = ThreadCache::overall_thread_cache_size(); |
| 585 return true; |
| 586 } |
| 587 |
| 588 if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) { |
| 589 TCMallocStats stats; |
| 590 ExtractStats(&stats, NULL); |
| 591 *value = stats.thread_bytes; |
| 592 return true; |
| 593 } |
| 594 |
| 595 return false; |
| 596 } |
| 597 |
| 598 virtual bool SetNumericProperty(const char* name, size_t value) { |
| 599 ASSERT(name != NULL); |
| 600 |
| 601 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) { |
| 602 SpinLockHolder l(Static::pageheap_lock()); |
| 603 ThreadCache::set_overall_thread_cache_size(value); |
| 604 return true; |
| 605 } |
| 606 |
| 607 return false; |
| 608 } |
| 609 |
| 610 virtual void MarkThreadIdle() { |
| 611 ThreadCache::BecomeIdle(); |
| 612 } |
| 613 |
| 614 virtual void ReleaseFreeMemory() { |
| 615 SpinLockHolder h(Static::pageheap_lock()); |
| 616 Static::pageheap()->ReleaseFreePages(); |
| 617 } |
| 618 |
| 619 virtual void SetMemoryReleaseRate(double rate) { |
| 620 FLAGS_tcmalloc_release_rate = rate; |
| 621 } |
| 622 |
| 623 virtual double GetMemoryReleaseRate() { |
| 624 return FLAGS_tcmalloc_release_rate; |
| 625 } |
| 626 virtual size_t GetEstimatedAllocatedSize(size_t size) { |
| 627 if (size <= kMaxSize) { |
| 628 const size_t cl = Static::sizemap()->SizeClass(size); |
| 629 const size_t alloc_size = Static::sizemap()->ByteSizeForClass(cl); |
| 630 return alloc_size; |
| 631 } else { |
| 632 return tcmalloc::pages(size) << kPageShift; |
| 633 } |
| 634 } |
| 635 |
| 636 // This just calls GetSizeWithCallback, but because that's in an |
| 637 // unnamed namespace, we need to move the definition below it in the |
| 638 // file. |
| 639 virtual size_t GetAllocatedSize(void* ptr); |
| 640 }; |
| 641 |
| 642 // The constructor allocates an object to ensure that initialization |
| 643 // runs before main(), and therefore we do not have a chance to become |
| 644 // multi-threaded before initialization. We also create the TSD key |
| 645 // here. Presumably by the time this constructor runs, glibc is in |
| 646 // good enough shape to handle pthread_key_create(). |
| 647 // |
| 648 // The constructor also takes the opportunity to tell STL to use |
| 649 // tcmalloc. We want to do this early, before construct time, so |
| 650 // all user STL allocations go through tcmalloc (which works really |
| 651 // well for STL). |
| 652 // |
| 653 // The destructor prints stats when the program exits. |
| 654 static int tcmallocguard_refcount = 0; // no lock needed: runs before main() |
| 655 TCMallocGuard::TCMallocGuard() { |
| 656 if (tcmallocguard_refcount++ == 0) { |
| 657 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS |
| 658 // Check whether the kernel also supports TLS (needs to happen at runtime) |
| 659 tcmalloc::CheckIfKernelSupportsTLS(); |
| 660 #endif |
| 661 #ifdef WIN32_DO_PATCHING |
| 662 // patch the windows VirtualAlloc, etc. |
| 663 PatchWindowsFunctions(); // defined in windows/patch_functions.cc |
| 664 #endif |
| 665 free(malloc(1)); |
| 666 ThreadCache::InitTSD(); |
| 667 free(malloc(1)); |
| 668 MallocExtension::Register(new TCMallocImplementation); |
| 669 } |
| 670 } |
| 671 |
| 672 TCMallocGuard::~TCMallocGuard() { |
| 673 if (--tcmallocguard_refcount == 0) { |
| 674 const char* env = getenv("MALLOCSTATS"); |
| 675 if (env != NULL) { |
| 676 int level = atoi(env); |
| 677 if (level < 1) level = 1; |
| 678 PrintStats(level); |
| 679 } |
| 680 } |
| 681 } |
| 682 #ifndef WIN32_OVERRIDE_ALLOCATORS |
| 683 static TCMallocGuard module_enter_exit_hook; |
| 684 #endif |
| 685 |
| 686 //------------------------------------------------------------------- |
| 687 // Helpers for the exported routines below |
| 688 //------------------------------------------------------------------- |
| 689 |
| 690 static Span* DoSampledAllocation(size_t size) { |
| 691 // Grab the stack trace outside the heap lock |
| 692 StackTrace tmp; |
| 693 tmp.depth = GetStackTrace(tmp.stack, tcmalloc::kMaxStackDepth, 1); |
| 694 tmp.size = size; |
| 695 |
| 696 SpinLockHolder h(Static::pageheap_lock()); |
| 697 // Allocate span |
| 698 Span *span = Static::pageheap()->New(tcmalloc::pages(size == 0 ? 1 : size)); |
| 699 if (span == NULL) { |
| 700 return NULL; |
| 701 } |
| 702 |
| 703 // Allocate stack trace |
| 704 StackTrace *stack = Static::stacktrace_allocator()->New(); |
| 705 if (stack == NULL) { |
| 706 // Sampling failed because of lack of memory |
| 707 return span; |
| 708 } |
| 709 |
| 710 *stack = tmp; |
| 711 span->sample = 1; |
| 712 span->objects = stack; |
| 713 tcmalloc::DLL_Prepend(Static::sampled_objects(), span); |
| 714 |
| 715 return span; |
| 716 } |
| 717 |
| 718 static inline bool CheckCachedSizeClass(void *ptr) { |
| 719 PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
| 720 size_t cached_value = Static::pageheap()->GetSizeClassIfCached(p); |
| 721 return cached_value == 0 || |
| 722 cached_value == Static::pageheap()->GetDescriptor(p)->sizeclass; |
| 723 } |
| 724 |
| 725 static inline void* CheckedMallocResult(void *result) |
| 726 { |
| 727 ASSERT(result == 0 || CheckCachedSizeClass(result)); |
| 728 return result; |
| 729 } |
| 730 |
| 731 static inline void* SpanToMallocResult(Span *span) { |
| 732 Static::pageheap()->CacheSizeClass(span->start, 0); |
| 733 return |
| 734 CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift)); |
| 735 } |
| 736 |
| 737 // Copy of FLAGS_tcmalloc_large_alloc_report_threshold with |
| 738 // automatic increases factored in. |
| 739 static int64_t large_alloc_threshold = |
| 740 (kPageSize > FLAGS_tcmalloc_large_alloc_report_threshold |
| 741 ? kPageSize : FLAGS_tcmalloc_large_alloc_report_threshold); |
| 742 |
| 743 static void ReportLargeAlloc(Length num_pages, void* result) { |
| 744 StackTrace stack; |
| 745 stack.depth = GetStackTrace(stack.stack, tcmalloc::kMaxStackDepth, 1); |
| 746 |
| 747 static const int N = 1000; |
| 748 char buffer[N]; |
| 749 TCMalloc_Printer printer(buffer, N); |
| 750 printer.printf("tcmalloc: large alloc %llu bytes == %p @ ", |
| 751 static_cast<unsigned long long>(num_pages) << kPageShift, |
| 752 result); |
| 753 for (int i = 0; i < stack.depth; i++) { |
| 754 printer.printf(" %p", stack.stack[i]); |
| 755 } |
| 756 printer.printf("\n"); |
| 757 write(STDERR_FILENO, buffer, strlen(buffer)); |
| 758 } |
| 759 |
| 760 namespace { |
| 761 |
| 762 // Helper for do_malloc(). |
| 763 inline void* do_malloc_pages(Length num_pages) { |
| 764 Span *span; |
| 765 bool report_large = false; |
| 766 { |
| 767 SpinLockHolder h(Static::pageheap_lock()); |
| 768 span = Static::pageheap()->New(num_pages); |
| 769 const int64 threshold = large_alloc_threshold; |
| 770 if (threshold > 0 && num_pages >= (threshold >> kPageShift)) { |
| 771 // Increase the threshold by 1/8 every time we generate a report. |
| 772 // We cap the threshold at 8GB to avoid overflow problems. |
| 773 large_alloc_threshold = (threshold + threshold/8 < 8ll<<30 |
| 774 ? threshold + threshold/8 : 8ll<<30); |
| 775 report_large = true; |
| 776 } |
| 777 } |
| 778 |
| 779 void* result = (span == NULL ? NULL : SpanToMallocResult(span)); |
| 780 if (report_large) { |
| 781 ReportLargeAlloc(num_pages, result); |
| 782 } |
| 783 return result; |
| 784 } |
| 785 |
| 786 inline void* do_malloc(size_t size) { |
| 787 void* ret = NULL; |
| 788 |
| 789 // The following call forces module initialization |
| 790 ThreadCache* heap = ThreadCache::GetCache(); |
| 791 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { |
| 792 Span* span = DoSampledAllocation(size); |
| 793 if (span != NULL) { |
| 794 ret = SpanToMallocResult(span); |
| 795 } |
| 796 } else if (size <= kMaxSize) { |
| 797 // The common case, and also the simplest. This just pops the |
| 798 // size-appropriate freelist, after replenishing it if it's empty. |
| 799 ret = CheckedMallocResult(heap->Allocate(size)); |
| 800 } else { |
| 801 ret = do_malloc_pages(tcmalloc::pages(size)); |
| 802 } |
| 803 if (ret == NULL) errno = ENOMEM; |
| 804 return ret; |
| 805 } |
| 806 |
| 807 inline void* do_calloc(size_t n, size_t elem_size) { |
| 808 // Overflow check |
| 809 const size_t size = n * elem_size; |
| 810 if (elem_size != 0 && size / elem_size != n) return NULL; |
| 811 |
| 812 void* result = do_malloc(size); |
| 813 if (result != NULL) { |
| 814 memset(result, 0, size); |
| 815 } |
| 816 return result; |
| 817 } |
| 818 |
| 819 static inline ThreadCache* GetCacheIfPresent() { |
| 820 void* const p = ThreadCache::GetCacheIfPresent(); |
| 821 return reinterpret_cast<ThreadCache*>(p); |
| 822 } |
| 823 |
| 824 // This lets you call back to a given function pointer if ptr is invalid. |
| 825 // It is used primarily by windows code which wants a specialized callback. |
| 826 inline void do_free_with_callback(void* ptr, void (*invalid_free_fn)(void*)) { |
| 827 if (ptr == NULL) return; |
| 828 ASSERT(Static::pageheap() != NULL); // Should not call free() before malloc() |
| 829 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
| 830 Span* span = NULL; |
| 831 size_t cl = Static::pageheap()->GetSizeClassIfCached(p); |
| 832 |
| 833 if (cl == 0) { |
| 834 span = Static::pageheap()->GetDescriptor(p); |
| 835 if (!span) { |
| 836 // span can be NULL because the pointer passed in is invalid |
| 837 // (not something returned by malloc or friends), or because the |
| 838 // pointer was allocated with some other allocator besides |
| 839 // tcmalloc. The latter can happen if tcmalloc is linked in via |
| 840 // a dynamic library, but is not listed last on the link line. |
| 841 // In that case, libraries after it on the link line will |
| 842 // allocate with libc malloc, but free with tcmalloc's free. |
| 843 (*invalid_free_fn)(ptr); // Decide how to handle the bad free request |
| 844 return; |
| 845 } |
| 846 cl = span->sizeclass; |
| 847 Static::pageheap()->CacheSizeClass(p, cl); |
| 848 } |
| 849 if (cl != 0) { |
| 850 ASSERT(!Static::pageheap()->GetDescriptor(p)->sample); |
| 851 ThreadCache* heap = GetCacheIfPresent(); |
| 852 if (heap != NULL) { |
| 853 heap->Deallocate(ptr, cl); |
| 854 } else { |
| 855 // Delete directly into central cache |
| 856 tcmalloc::SLL_SetNext(ptr, NULL); |
| 857 Static::central_cache()[cl].InsertRange(ptr, ptr, 1); |
| 858 } |
| 859 } else { |
| 860 SpinLockHolder h(Static::pageheap_lock()); |
| 861 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0); |
| 862 ASSERT(span != NULL && span->start == p); |
| 863 if (span->sample) { |
| 864 tcmalloc::DLL_Remove(span); |
| 865 Static::stacktrace_allocator()->Delete( |
| 866 reinterpret_cast<StackTrace*>(span->objects)); |
| 867 span->objects = NULL; |
| 868 } |
| 869 Static::pageheap()->Delete(span); |
| 870 } |
| 871 } |
| 872 |
| 873 // The default "do_free" that uses the default callback. |
| 874 inline void do_free(void* ptr) { |
| 875 return do_free_with_callback(ptr, &InvalidFree); |
| 876 } |
| 877 |
| 878 inline size_t GetSizeWithCallback(void* ptr, |
| 879 size_t (*invalid_getsize_fn)(void*)) { |
| 880 if (ptr == NULL) |
| 881 return 0; |
| 882 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
| 883 size_t cl = Static::pageheap()->GetSizeClassIfCached(p); |
| 884 if (cl != 0) { |
| 885 return Static::sizemap()->ByteSizeForClass(cl); |
| 886 } else { |
| 887 Span *span = Static::pageheap()->GetDescriptor(p); |
| 888 if (span == NULL) { // means we do not own this memory |
| 889 return (*invalid_getsize_fn)(ptr); |
| 890 } else if (span->sizeclass != 0) { |
| 891 Static::pageheap()->CacheSizeClass(p, span->sizeclass); |
| 892 return Static::sizemap()->ByteSizeForClass(span->sizeclass); |
| 893 } else { |
| 894 return span->length << kPageShift; |
| 895 } |
| 896 } |
| 897 } |
| 898 |
| 899 // This lets you call back to a given function pointer if ptr is invalid. |
| 900 // It is used primarily by windows code which wants a specialized callback. |
| 901 inline void* do_realloc_with_callback( |
| 902 void* old_ptr, size_t new_size, |
| 903 void (*invalid_free_fn)(void*), |
| 904 size_t (*invalid_get_size_fn)(void*)) { |
| 905 // Get the size of the old entry |
| 906 const size_t old_size = GetSizeWithCallback(old_ptr, invalid_get_size_fn); |
| 907 |
| 908 // Reallocate if the new size is larger than the old size, |
| 909 // or if the new size is significantly smaller than the old size. |
| 910 // We do hysteresis to avoid resizing ping-pongs: |
| 911 // . If we need to grow, grow to max(new_size, old_size * 1.X) |
| 912 // . Don't shrink unless new_size < old_size * 0.Y |
| 913 // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5. |
| 914 const int lower_bound_to_grow = old_size + old_size / 4; |
| 915 const int upper_bound_to_shrink = old_size / 2; |
| 916 if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) { |
| 917 // Need to reallocate. |
| 918 void* new_ptr = NULL; |
| 919 |
| 920 if (new_size > old_size && new_size < lower_bound_to_grow) { |
| 921 new_ptr = do_malloc(lower_bound_to_grow); |
| 922 } |
| 923 if (new_ptr == NULL) { |
| 924 // Either new_size is not a tiny increment, or last do_malloc failed. |
| 925 new_ptr = do_malloc(new_size); |
| 926 } |
| 927 if (new_ptr == NULL) { |
| 928 return NULL; |
| 929 } |
| 930 MallocHook::InvokeNewHook(new_ptr, new_size); |
| 931 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size)); |
| 932 MallocHook::InvokeDeleteHook(old_ptr); |
| 933 // We could use a variant of do_free() that leverages the fact |
| 934 // that we already know the sizeclass of old_ptr. The benefit |
| 935 // would be small, so don't bother. |
| 936 do_free_with_callback(old_ptr, invalid_free_fn); |
| 937 return new_ptr; |
| 938 } else { |
| 939 // We still need to call hooks to report the updated size: |
| 940 MallocHook::InvokeDeleteHook(old_ptr); |
| 941 MallocHook::InvokeNewHook(old_ptr, new_size); |
| 942 return old_ptr; |
| 943 } |
| 944 } |
| 945 |
| 946 inline void* do_realloc(void* old_ptr, size_t new_size) { |
| 947 return do_realloc_with_callback(old_ptr, new_size, |
| 948 &InvalidFree, &InvalidGetSizeForRealloc); |
| 949 } |
| 950 |
| 951 // For use by exported routines below that want specific alignments |
| 952 // |
| 953 // Note: this code can be slow, and can significantly fragment memory. |
| 954 // The expectation is that memalign/posix_memalign/valloc/pvalloc will |
| 955 // not be invoked very often. This requirement simplifies our |
| 956 // implementation and allows us to tune for expected allocation |
| 957 // patterns. |
| 958 void* do_memalign(size_t align, size_t size) { |
| 959 ASSERT((align & (align - 1)) == 0); |
| 960 ASSERT(align > 0); |
| 961 if (size + align < size) return NULL; // Overflow |
| 962 |
| 963 if (Static::pageheap() == NULL) ThreadCache::InitModule(); |
| 964 |
| 965 // Allocate at least one byte to avoid boundary conditions below |
| 966 if (size == 0) size = 1; |
| 967 |
| 968 if (size <= kMaxSize && align < kPageSize) { |
| 969 // Search through acceptable size classes looking for one with |
| 970 // enough alignment. This depends on the fact that |
| 971 // InitSizeClasses() currently produces several size classes that |
| 972 // are aligned at powers of two. We will waste time and space if |
| 973 // we miss in the size class array, but that is deemed acceptable |
| 974 // since memalign() should be used rarely. |
| 975 int cl = Static::sizemap()->SizeClass(size); |
| 976 while (cl < kNumClasses && |
| 977 ((Static::sizemap()->class_to_size(cl) & (align - 1)) != 0)) { |
| 978 cl++; |
| 979 } |
| 980 if (cl < kNumClasses) { |
| 981 ThreadCache* heap = ThreadCache::GetCache(); |
| 982 return CheckedMallocResult(heap->Allocate( |
| 983 Static::sizemap()->class_to_size(cl))); |
| 984 } |
| 985 } |
| 986 |
| 987 // We will allocate directly from the page heap |
| 988 SpinLockHolder h(Static::pageheap_lock()); |
| 989 |
| 990 if (align <= kPageSize) { |
| 991 // Any page-level allocation will be fine |
| 992 // TODO: We could put the rest of this page in the appropriate |
| 993 // TODO: cache but it does not seem worth it. |
| 994 Span* span = Static::pageheap()->New(tcmalloc::pages(size)); |
| 995 return span == NULL ? NULL : SpanToMallocResult(span); |
| 996 } |
| 997 |
| 998 // Allocate extra pages and carve off an aligned portion |
| 999 const Length alloc = tcmalloc::pages(size + align); |
| 1000 Span* span = Static::pageheap()->New(alloc); |
| 1001 if (span == NULL) return NULL; |
| 1002 |
| 1003 // Skip starting portion so that we end up aligned |
| 1004 Length skip = 0; |
| 1005 while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) { |
| 1006 skip++; |
| 1007 } |
| 1008 ASSERT(skip < alloc); |
| 1009 if (skip > 0) { |
| 1010 Span* rest = Static::pageheap()->Split(span, skip); |
| 1011 Static::pageheap()->Delete(span); |
| 1012 span = rest; |
| 1013 } |
| 1014 |
| 1015 // Skip trailing portion that we do not need to return |
| 1016 const Length needed = tcmalloc::pages(size); |
| 1017 ASSERT(span->length >= needed); |
| 1018 if (span->length > needed) { |
| 1019 Span* trailer = Static::pageheap()->Split(span, needed); |
| 1020 Static::pageheap()->Delete(trailer); |
| 1021 } |
| 1022 return SpanToMallocResult(span); |
| 1023 } |
| 1024 |
| 1025 // Helpers for use by exported routines below: |
| 1026 |
| 1027 inline void do_malloc_stats() { |
| 1028 PrintStats(1); |
| 1029 } |
| 1030 |
| 1031 inline int do_mallopt(int cmd, int value) { |
| 1032 return 1; // Indicates error |
| 1033 } |
| 1034 |
| 1035 #ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance |
| 1036 inline struct mallinfo do_mallinfo() { |
| 1037 TCMallocStats stats; |
| 1038 ExtractStats(&stats, NULL); |
| 1039 |
| 1040 // Just some of the fields are filled in. |
| 1041 struct mallinfo info; |
| 1042 memset(&info, 0, sizeof(info)); |
| 1043 |
| 1044 // Unfortunately, the struct contains "int" field, so some of the |
| 1045 // size values will be truncated. |
| 1046 info.arena = static_cast<int>(stats.system_bytes); |
| 1047 info.fsmblks = static_cast<int>(stats.thread_bytes |
| 1048 + stats.central_bytes |
| 1049 + stats.transfer_bytes); |
| 1050 info.fordblks = static_cast<int>(stats.pageheap_bytes); |
| 1051 info.uordblks = static_cast<int>(stats.system_bytes |
| 1052 - stats.thread_bytes |
| 1053 - stats.central_bytes |
| 1054 - stats.transfer_bytes |
| 1055 - stats.pageheap_bytes); |
| 1056 |
| 1057 return info; |
| 1058 } |
| 1059 #endif // #ifndef HAVE_STRUCT_MALLINFO |
| 1060 |
| 1061 static SpinLock set_new_handler_lock(SpinLock::LINKER_INITIALIZED); |
| 1062 |
| 1063 inline void* cpp_alloc(size_t size, bool nothrow) { |
| 1064 for (;;) { |
| 1065 void* p = do_malloc(size); |
| 1066 #ifdef PREANSINEW |
| 1067 return p; |
| 1068 #else |
| 1069 if (p == NULL) { // allocation failed |
| 1070 // Get the current new handler. NB: this function is not |
| 1071 // thread-safe. We make a feeble stab at making it so here, but |
| 1072 // this lock only protects against tcmalloc interfering with |
| 1073 // itself, not with other libraries calling set_new_handler. |
| 1074 std::new_handler nh; |
| 1075 { |
| 1076 SpinLockHolder h(&set_new_handler_lock); |
| 1077 nh = std::set_new_handler(0); |
| 1078 (void) std::set_new_handler(nh); |
| 1079 } |
| 1080 #if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) &
& !_HAS_EXCEPTIONS) |
| 1081 if (nh) { |
| 1082 // Since exceptions are disabled, we don't really know if new_handler |
| 1083 // failed. Assume it will abort if it fails. |
| 1084 (*nh)(); |
| 1085 continue; |
| 1086 } |
| 1087 return 0; |
| 1088 #else |
| 1089 // If no new_handler is established, the allocation failed. |
| 1090 if (!nh) { |
| 1091 if (nothrow) return 0; |
| 1092 throw std::bad_alloc(); |
| 1093 } |
| 1094 // Otherwise, try the new_handler. If it returns, retry the |
| 1095 // allocation. If it throws std::bad_alloc, fail the allocation. |
| 1096 // if it throws something else, don't interfere. |
| 1097 try { |
| 1098 (*nh)(); |
| 1099 } catch (const std::bad_alloc&) { |
| 1100 if (!nothrow) throw; |
| 1101 return p; |
| 1102 } |
| 1103 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPT
IONS) && !_HAS_EXCEPTIONS) |
| 1104 } else { // allocation success |
| 1105 return p; |
| 1106 } |
| 1107 #endif // PREANSINEW |
| 1108 } |
| 1109 } |
| 1110 |
| 1111 } // end unnamed namespace |
| 1112 |
| 1113 // As promised, the definition of this function, declared above. |
| 1114 size_t TCMallocImplementation::GetAllocatedSize(void* ptr) { |
| 1115 return GetSizeWithCallback(ptr, &InvalidGetAllocatedSize); |
| 1116 } |
| 1117 |
| 1118 //------------------------------------------------------------------- |
| 1119 // Exported routines |
| 1120 //------------------------------------------------------------------- |
| 1121 |
| 1122 // CAVEAT: The code structure below ensures that MallocHook methods are always |
| 1123 // called from the stack frame of the invoked allocation function. |
| 1124 // heap-checker.cc depends on this to start a stack trace from |
| 1125 // the call to the (de)allocation function. |
| 1126 |
| 1127 static int tc_new_mode = 0; // See tc_set_new_mode(). |
| 1128 extern "C" void* tc_malloc(size_t size) __THROW { |
| 1129 void* result = (tc_new_mode ? cpp_alloc(size, false) : do_malloc(size)); |
| 1130 MallocHook::InvokeNewHook(result, size); |
| 1131 return result; |
| 1132 } |
| 1133 |
| 1134 extern "C" void tc_free(void* ptr) __THROW { |
| 1135 MallocHook::InvokeDeleteHook(ptr); |
| 1136 do_free(ptr); |
| 1137 } |
| 1138 |
| 1139 extern "C" void* tc_calloc(size_t n, size_t elem_size) __THROW { |
| 1140 void* result = do_calloc(n, elem_size); |
| 1141 MallocHook::InvokeNewHook(result, n * elem_size); |
| 1142 return result; |
| 1143 } |
| 1144 |
| 1145 extern "C" void tc_cfree(void* ptr) __THROW { |
| 1146 MallocHook::InvokeDeleteHook(ptr); |
| 1147 do_free(ptr); |
| 1148 } |
| 1149 |
| 1150 extern "C" void* tc_realloc(void* old_ptr, size_t new_size) __THROW { |
| 1151 if (old_ptr == NULL) { |
| 1152 void* result = do_malloc(new_size); |
| 1153 MallocHook::InvokeNewHook(result, new_size); |
| 1154 return result; |
| 1155 } |
| 1156 if (new_size == 0) { |
| 1157 MallocHook::InvokeDeleteHook(old_ptr); |
| 1158 do_free(old_ptr); |
| 1159 return NULL; |
| 1160 } |
| 1161 return do_realloc(old_ptr, new_size); |
| 1162 } |
| 1163 |
| 1164 extern "C" void* tc_new(size_t size) { |
| 1165 void* p = cpp_alloc(size, false); |
| 1166 // We keep this next instruction out of cpp_alloc for a reason: when |
| 1167 // it's in, and new just calls cpp_alloc, the optimizer may fold the |
| 1168 // new call into cpp_alloc, which messes up our whole section-based |
| 1169 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc |
| 1170 // isn't the last thing this fn calls, and prevents the folding. |
| 1171 MallocHook::InvokeNewHook(p, size); |
| 1172 return p; |
| 1173 } |
| 1174 |
| 1175 extern "C" void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW { |
| 1176 void* p = cpp_alloc(size, true); |
| 1177 MallocHook::InvokeNewHook(p, size); |
| 1178 return p; |
| 1179 } |
| 1180 |
| 1181 extern "C" void tc_delete(void* p) __THROW { |
| 1182 MallocHook::InvokeDeleteHook(p); |
| 1183 do_free(p); |
| 1184 } |
| 1185 |
| 1186 extern "C" void* tc_newarray(size_t size) { |
| 1187 void* p = cpp_alloc(size, false); |
| 1188 // We keep this next instruction out of cpp_alloc for a reason: when |
| 1189 // it's in, and new just calls cpp_alloc, the optimizer may fold the |
| 1190 // new call into cpp_alloc, which messes up our whole section-based |
| 1191 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc |
| 1192 // isn't the last thing this fn calls, and prevents the folding. |
| 1193 MallocHook::InvokeNewHook(p, size); |
| 1194 return p; |
| 1195 } |
| 1196 |
| 1197 extern "C" void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW
{ |
| 1198 void* p = cpp_alloc(size, true); |
| 1199 MallocHook::InvokeNewHook(p, size); |
| 1200 return p; |
| 1201 } |
| 1202 |
| 1203 extern "C" void tc_deletearray(void* p) __THROW { |
| 1204 MallocHook::InvokeDeleteHook(p); |
| 1205 do_free(p); |
| 1206 } |
| 1207 |
| 1208 extern "C" void* tc_memalign(size_t align, size_t size) __THROW { |
| 1209 void* result = do_memalign(align, size); |
| 1210 MallocHook::InvokeNewHook(result, size); |
| 1211 return result; |
| 1212 } |
| 1213 |
| 1214 extern "C" int tc_posix_memalign(void** result_ptr, size_t align, size_t size) |
| 1215 __THROW { |
| 1216 if (((align % sizeof(void*)) != 0) || |
| 1217 ((align & (align - 1)) != 0) || |
| 1218 (align == 0)) { |
| 1219 return EINVAL; |
| 1220 } |
| 1221 |
| 1222 void* result = do_memalign(align, size); |
| 1223 MallocHook::InvokeNewHook(result, size); |
| 1224 if (result == NULL) { |
| 1225 return ENOMEM; |
| 1226 } else { |
| 1227 *result_ptr = result; |
| 1228 return 0; |
| 1229 } |
| 1230 } |
| 1231 |
| 1232 static size_t pagesize = 0; |
| 1233 |
| 1234 extern "C" void* tc_valloc(size_t size) __THROW { |
| 1235 // Allocate page-aligned object of length >= size bytes |
| 1236 if (pagesize == 0) pagesize = getpagesize(); |
| 1237 void* result = do_memalign(pagesize, size); |
| 1238 MallocHook::InvokeNewHook(result, size); |
| 1239 return result; |
| 1240 } |
| 1241 |
| 1242 extern "C" void* tc_pvalloc(size_t size) __THROW { |
| 1243 // Round up size to a multiple of pagesize |
| 1244 if (pagesize == 0) pagesize = getpagesize(); |
| 1245 size = (size + pagesize - 1) & ~(pagesize - 1); |
| 1246 void* result = do_memalign(pagesize, size); |
| 1247 MallocHook::InvokeNewHook(result, size); |
| 1248 return result; |
| 1249 } |
| 1250 |
| 1251 extern "C" void tc_malloc_stats(void) __THROW { |
| 1252 do_malloc_stats(); |
| 1253 } |
| 1254 |
| 1255 extern "C" int tc_mallopt(int cmd, int value) __THROW { |
| 1256 return do_mallopt(cmd, value); |
| 1257 } |
| 1258 |
| 1259 #ifdef HAVE_STRUCT_MALLINFO |
| 1260 extern "C" struct mallinfo tc_mallinfo(void) __THROW { |
| 1261 return do_mallinfo(); |
| 1262 } |
| 1263 #endif |
| 1264 |
| 1265 // This function behaves similarly to MSVC's _set_new_mode. |
| 1266 // If flag is 0 (default), calls to malloc will behave normally. |
| 1267 // If flag is 1, calls to malloc will behave like calls to new, |
| 1268 // and the std_new_handler will be invoked on failure. |
| 1269 // Returns the previous mode. |
| 1270 extern "C" int tc_set_new_mode(int flag) __THROW { |
| 1271 int old_mode = tc_new_mode; |
| 1272 tc_new_mode = flag; |
| 1273 return old_mode; |
| 1274 } |
| 1275 |
| 1276 |
| 1277 // Override __libc_memalign in libc on linux boxes specially. |
| 1278 // They have a bug in libc that causes them to (very rarely) allocate |
| 1279 // with __libc_memalign() yet deallocate with free() and the |
| 1280 // definitions above don't catch it. |
| 1281 // This function is an exception to the rule of calling MallocHook method |
| 1282 // from the stack frame of the allocation function; |
| 1283 // heap-checker handles this special case explicitly. |
| 1284 static void *MemalignOverride(size_t align, size_t size, const void *caller) |
| 1285 __THROW ATTRIBUTE_SECTION(google_malloc); |
| 1286 |
| 1287 static void *MemalignOverride(size_t align, size_t size, const void *caller) |
| 1288 __THROW { |
| 1289 void* result = do_memalign(align, size); |
| 1290 MallocHook::InvokeNewHook(result, size); |
| 1291 return result; |
| 1292 } |
| 1293 void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride; |
OLD | NEW |