OLD | NEW |
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
79 // | 79 // |
80 // TODO: Bias reclamation to larger addresses | 80 // TODO: Bias reclamation to larger addresses |
81 // TODO: implement mallinfo/mallopt | 81 // TODO: implement mallinfo/mallopt |
82 // TODO: Better testing | 82 // TODO: Better testing |
83 // | 83 // |
84 // 9/28/2003 (new page-level allocator replaces ptmalloc2): | 84 // 9/28/2003 (new page-level allocator replaces ptmalloc2): |
85 // * malloc/free of small objects goes from ~300 ns to ~50 ns. | 85 // * malloc/free of small objects goes from ~300 ns to ~50 ns. |
86 // * allocation of a reasonably complicated struct | 86 // * allocation of a reasonably complicated struct |
87 // goes from about 1100 ns to about 300 ns. | 87 // goes from about 1100 ns to about 300 ns. |
88 | 88 |
89 #include <config.h> | 89 #include "config.h" |
90 #include <new> | 90 #include <google/tcmalloc.h> |
91 #include <stdio.h> | 91 |
92 #include <stddef.h> | 92 #include <errno.h> // for ENOMEM, EINVAL, errno |
| 93 #ifdef HAVE_SYS_CDEFS_H |
| 94 #include <sys/cdefs.h> // for __THROW |
| 95 #endif |
| 96 #ifdef HAVE_FEATURES_H |
| 97 #include <features.h> // for __GLIBC__ |
| 98 #endif |
93 #if defined HAVE_STDINT_H | 99 #if defined HAVE_STDINT_H |
94 #include <stdint.h> | 100 #include <stdint.h> |
95 #elif defined HAVE_INTTYPES_H | 101 #elif defined HAVE_INTTYPES_H |
96 #include <inttypes.h> | 102 #include <inttypes.h> |
97 #else | 103 #else |
98 #include <sys/types.h> | 104 #include <sys/types.h> |
99 #endif | 105 #endif |
100 #if defined(HAVE_MALLOC_H) && defined(HAVE_STRUCT_MALLINFO) | 106 #include <stddef.h> // for size_t, NULL |
101 #include <malloc.h> // for struct mallinfo | 107 #include <stdlib.h> // for getenv |
| 108 #include <string.h> // for strcmp, memset, strlen, etc |
| 109 #ifdef HAVE_UNISTD_H |
| 110 #include <unistd.h> // for getpagesize, write, etc |
102 #endif | 111 #endif |
103 #include <string.h> | 112 #include <algorithm> // for max, min |
104 #ifdef HAVE_PTHREAD | 113 #include <limits> // for numeric_limits |
105 #include <pthread.h> | 114 #include <new> // for nothrow_t (ptr only), etc |
| 115 #include <vector> // for vector |
| 116 |
| 117 #include <google/malloc_extension.h> |
| 118 #include <google/malloc_hook.h> // for MallocHook |
| 119 #include "base/basictypes.h" // for int64 |
| 120 #include "base/commandlineflags.h" // for RegisterFlagValidator, etc |
| 121 #include "base/dynamic_annotations.h" // for RunningOnValgrind |
| 122 #include "base/spinlock.h" // for SpinLockHolder |
| 123 #include "central_freelist.h" // for CentralFreeListPadded |
| 124 #include "common.h" // for StackTrace, kPageShift, etc |
| 125 #include "internal_logging.h" // for ASSERT, TCMalloc_Printer, etc |
| 126 #include "linked_list.h" // for SLL_SetNext |
| 127 #include "malloc_hook-inl.h" // for MallocHook::InvokeNewHook, etc |
| 128 #include "page_heap.h" // for PageHeap, PageHeap::Stats |
| 129 #include "page_heap_allocator.h" // for PageHeapAllocator |
| 130 #include "span.h" // for Span, DLL_Prepend, etc |
| 131 #include "stack_trace_table.h" // for StackTraceTable |
| 132 #include "static_vars.h" // for Static |
| 133 #include "system-alloc.h" // for DumpSystemAllocatorStats, etc |
| 134 #include "tcmalloc_guard.h" // for TCMallocGuard |
| 135 #include "thread_cache.h" // for ThreadCache |
| 136 |
| 137 // We only need malloc.h for struct mallinfo. |
| 138 #ifdef HAVE_STRUCT_MALLINFO |
| 139 // Malloc can be in several places on older versions of OS X. |
| 140 # if defined(HAVE_MALLOC_H) |
| 141 # include <malloc.h> |
| 142 # elif defined(HAVE_SYS_MALLOC_H) |
| 143 # include <sys/malloc.h> |
| 144 # elif defined(HAVE_MALLOC_MALLOC_H) |
| 145 # include <malloc/malloc.h> |
| 146 # endif |
106 #endif | 147 #endif |
107 #ifdef HAVE_UNISTD_H | |
108 #include <unistd.h> | |
109 #endif | |
110 #include <errno.h> | |
111 #include <stdarg.h> | |
112 #include <algorithm> | |
113 #include <google/tcmalloc.h> | |
114 #include "base/commandlineflags.h" | |
115 #include "base/basictypes.h" // gets us PRIu64 | |
116 #include "base/sysinfo.h" | |
117 #include "base/spinlock.h" | |
118 #include "common.h" | |
119 #include "malloc_hook-inl.h" | |
120 #include <google/malloc_hook.h> | |
121 #include <google/malloc_extension.h> | |
122 #include "central_freelist.h" | |
123 #include "internal_logging.h" | |
124 #include "linked_list.h" | |
125 #include "maybe_threads.h" | |
126 #include "page_heap.h" | |
127 #include "pagemap.h" | |
128 #include "span.h" | |
129 #include "static_vars.h" | |
130 #include "system-alloc.h" | |
131 #include "tcmalloc_guard.h" | |
132 #include "thread_cache.h" | |
133 | 148 |
134 #if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defi
ned(WIN32_OVERRIDE_ALLOCATORS) | 149 #if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defi
ned(WIN32_OVERRIDE_ALLOCATORS) |
135 # define WIN32_DO_PATCHING 1 | 150 # define WIN32_DO_PATCHING 1 |
136 #endif | 151 #endif |
137 | 152 |
138 using std::max; | 153 using STL_NAMESPACE::max; |
| 154 using STL_NAMESPACE::numeric_limits; |
| 155 using STL_NAMESPACE::vector; |
| 156 using tcmalloc::AlignmentForSize; |
139 using tcmalloc::PageHeap; | 157 using tcmalloc::PageHeap; |
| 158 using tcmalloc::PageHeapAllocator; |
140 using tcmalloc::SizeMap; | 159 using tcmalloc::SizeMap; |
141 using tcmalloc::Span; | 160 using tcmalloc::Span; |
142 using tcmalloc::StackTrace; | 161 using tcmalloc::StackTrace; |
143 using tcmalloc::Static; | 162 using tcmalloc::Static; |
144 using tcmalloc::ThreadCache; | 163 using tcmalloc::ThreadCache; |
145 | 164 |
146 // __THROW is defined in glibc systems. It means, counter-intuitively, | 165 // __THROW is defined in glibc systems. It means, counter-intuitively, |
147 // "This function will never throw an exception." It's an optional | 166 // "This function will never throw an exception." It's an optional |
148 // optimization tool, but we may need to use it to match glibc prototypes. | 167 // optimization tool, but we may need to use it to match glibc prototypes. |
149 #ifndef __THROW // I guess we're not on a glibc system | 168 #ifndef __THROW // I guess we're not on a glibc system |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
210 ATTRIBUTE_SECTION(google_malloc); | 229 ATTRIBUTE_SECTION(google_malloc); |
211 void* tc_valloc(size_t __size) __THROW | 230 void* tc_valloc(size_t __size) __THROW |
212 ATTRIBUTE_SECTION(google_malloc); | 231 ATTRIBUTE_SECTION(google_malloc); |
213 void* tc_pvalloc(size_t __size) __THROW | 232 void* tc_pvalloc(size_t __size) __THROW |
214 ATTRIBUTE_SECTION(google_malloc); | 233 ATTRIBUTE_SECTION(google_malloc); |
215 | 234 |
216 void tc_malloc_stats(void) __THROW | 235 void tc_malloc_stats(void) __THROW |
217 ATTRIBUTE_SECTION(google_malloc); | 236 ATTRIBUTE_SECTION(google_malloc); |
218 int tc_mallopt(int cmd, int value) __THROW | 237 int tc_mallopt(int cmd, int value) __THROW |
219 ATTRIBUTE_SECTION(google_malloc); | 238 ATTRIBUTE_SECTION(google_malloc); |
220 #ifdef HAVE_STRUCT_MALLINFO // struct mallinfo isn't defined on freebsd | 239 #ifdef HAVE_STRUCT_MALLINFO |
221 struct mallinfo tc_mallinfo(void) __THROW | 240 struct mallinfo tc_mallinfo(void) __THROW |
222 ATTRIBUTE_SECTION(google_malloc); | 241 ATTRIBUTE_SECTION(google_malloc); |
223 #endif | 242 #endif |
224 | 243 |
225 void* tc_new(size_t size) | 244 void* tc_new(size_t size) |
226 ATTRIBUTE_SECTION(google_malloc); | 245 ATTRIBUTE_SECTION(google_malloc); |
227 void tc_delete(void* p) __THROW | 246 void tc_delete(void* p) __THROW |
228 ATTRIBUTE_SECTION(google_malloc); | 247 ATTRIBUTE_SECTION(google_malloc); |
229 void* tc_newarray(size_t size) | 248 void* tc_newarray(size_t size) |
230 ATTRIBUTE_SECTION(google_malloc); | 249 ATTRIBUTE_SECTION(google_malloc); |
231 void tc_deletearray(void* p) __THROW | 250 void tc_deletearray(void* p) __THROW |
232 ATTRIBUTE_SECTION(google_malloc); | 251 ATTRIBUTE_SECTION(google_malloc); |
233 | 252 |
234 // And the nothrow variants of these: | 253 // And the nothrow variants of these: |
235 void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW | 254 void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW |
236 ATTRIBUTE_SECTION(google_malloc); | 255 ATTRIBUTE_SECTION(google_malloc); |
237 void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW | 256 void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW |
238 ATTRIBUTE_SECTION(google_malloc); | 257 ATTRIBUTE_SECTION(google_malloc); |
239 // Surprisingly, standard C++ library implementations use a | 258 // Surprisingly, standard C++ library implementations use a |
240 // nothrow-delete internally. See, eg: | 259 // nothrow-delete internally. See, eg: |
241 // http://www.dinkumware.com/manuals/?manual=compleat&page=new.html | 260 // http://www.dinkumware.com/manuals/?manual=compleat&page=new.html |
242 void tc_delete_nothrow(void* ptr, const std::nothrow_t&) __THROW | 261 void tc_delete_nothrow(void* ptr, const std::nothrow_t&) __THROW |
243 ATTRIBUTE_SECTION(google_malloc); | 262 ATTRIBUTE_SECTION(google_malloc); |
244 void tc_deletearray_nothrow(void* ptr, const std::nothrow_t&) __THROW | 263 void tc_deletearray_nothrow(void* ptr, const std::nothrow_t&) __THROW |
245 ATTRIBUTE_SECTION(google_malloc); | 264 ATTRIBUTE_SECTION(google_malloc); |
| 265 |
| 266 // Some non-standard extensions that we support. |
| 267 |
| 268 // This is equivalent to |
| 269 // OS X: malloc_size() |
| 270 // glibc: malloc_usable_size() |
| 271 // Windows: _msize() |
| 272 size_t tc_malloc_size(void* p) __THROW |
| 273 ATTRIBUTE_SECTION(google_malloc); |
246 } // extern "C" | 274 } // extern "C" |
247 | 275 |
248 // Override the libc functions to prefer our own instead. This comes | 276 // Override the libc functions to prefer our own instead. This comes |
249 // first so code in tcmalloc.cc can use the overridden versions. One | 277 // first so code in tcmalloc.cc can use the overridden versions. One |
250 // exception: in windows, by default, we patch our code into these | 278 // exception: in windows, by default, we patch our code into these |
251 // functions (via src/windows/patch_function.cc) rather than override | 279 // functions (via src/windows/patch_function.cc) rather than override |
252 // them. In that case, we don't want to do this overriding here. | 280 // them. In that case, we don't want to do this overriding here. |
253 #if !defined(WIN32_DO_PATCHING) && !defined(TCMALLOC_FOR_DEBUGALLOCATION) | 281 #if !defined(WIN32_DO_PATCHING) |
254 | 282 |
255 // TODO(mbelshe): Turn off TCMalloc's symbols for libc. We do that | 283 // TODO(mbelshe): Turn off TCMalloc's symbols for libc. We do that |
256 // elsewhere. | 284 // elsewhere. |
257 #ifndef _WIN32 | 285 #ifndef _WIN32 |
258 | 286 |
259 #if defined(__GNUC__) && !defined(__MACH__) | 287 #if defined(__GNUC__) && !defined(__MACH__) |
260 // Potentially faster variants that use the gcc alias extension. | 288 // Potentially faster variants that use the gcc alias extension. |
261 // FreeBSD does support aliases, but apparently not correctly. :-( | 289 // FreeBSD does support aliases, but apparently not correctly. :-( |
262 // NOTE: we make many of these symbols weak, but do so in the makefile | 290 // NOTE: we make many of these symbols weak, but do so in the makefile |
263 // (via objcopy -W) and not here. That ends up being more portable. | 291 // (via objcopy -W) and not here. That ends up being more portable. |
(...skipping 19 matching lines...) Expand all Loading... |
283 void* memalign(size_t align, size_t s) __THROW ALIAS("tc_memalign"); | 311 void* memalign(size_t align, size_t s) __THROW ALIAS("tc_memalign"); |
284 void* valloc(size_t size) __THROW ALIAS("tc_valloc"); | 312 void* valloc(size_t size) __THROW ALIAS("tc_valloc"); |
285 void* pvalloc(size_t size) __THROW ALIAS("tc_pvalloc"); | 313 void* pvalloc(size_t size) __THROW ALIAS("tc_pvalloc"); |
286 int posix_memalign(void** r, size_t a, size_t s) __THROW | 314 int posix_memalign(void** r, size_t a, size_t s) __THROW |
287 ALIAS("tc_posix_memalign"); | 315 ALIAS("tc_posix_memalign"); |
288 void malloc_stats(void) __THROW ALIAS("tc_malloc_stats"); | 316 void malloc_stats(void) __THROW ALIAS("tc_malloc_stats"); |
289 int mallopt(int cmd, int value) __THROW ALIAS("tc_mallopt"); | 317 int mallopt(int cmd, int value) __THROW ALIAS("tc_mallopt"); |
290 #ifdef HAVE_STRUCT_MALLINFO | 318 #ifdef HAVE_STRUCT_MALLINFO |
291 struct mallinfo mallinfo(void) __THROW ALIAS("tc_mallinfo"); | 319 struct mallinfo mallinfo(void) __THROW ALIAS("tc_mallinfo"); |
292 #endif | 320 #endif |
293 size_t malloc_usable_size(void* ptr) __THROW ALIAS("tc_malloc_usable_size"); | 321 size_t malloc_size(void* p) __THROW ALIAS("tc_malloc_size"); |
| 322 size_t malloc_usable_size(void* p) __THROW ALIAS("tc_malloc_size"); |
294 } // extern "C" | 323 } // extern "C" |
295 #else // #if defined(__GNUC__) && !defined(__MACH__) | 324 #else // #if defined(__GNUC__) && !defined(__MACH__) |
296 // Portable wrappers | 325 // Portable wrappers |
297 void* operator new(size_t size) { return tc_new(size); } | 326 void* operator new(size_t size) { return tc_new(size); } |
298 void operator delete(void* p) __THROW { tc_delete(p); } | 327 void operator delete(void* p) __THROW { tc_delete(p); } |
299 void* operator new[](size_t size) { return tc_newarray(size); } | 328 void* operator new[](size_t size) { return tc_newarray(size); } |
300 void operator delete[](void* p) __THROW { tc_deletearray(p); } | 329 void operator delete[](void* p) __THROW { tc_deletearray(p); } |
301 void* operator new(size_t size, const std::nothrow_t& nt) __THROW { | 330 void* operator new(size_t size, const std::nothrow_t& nt) __THROW { |
302 return tc_new_nothrow(size, nt); | 331 return tc_new_nothrow(size, nt); |
303 } | 332 } |
(...skipping 16 matching lines...) Expand all Loading... |
320 void* valloc(size_t s) __THROW { return tc_valloc(s); } | 349 void* valloc(size_t s) __THROW { return tc_valloc(s); } |
321 void* pvalloc(size_t s) __THROW { return tc_pvalloc(s); } | 350 void* pvalloc(size_t s) __THROW { return tc_pvalloc(s); } |
322 int posix_memalign(void** r, size_t a, size_t s) __THROW { | 351 int posix_memalign(void** r, size_t a, size_t s) __THROW { |
323 return tc_posix_memalign(r, a, s); | 352 return tc_posix_memalign(r, a, s); |
324 } | 353 } |
325 void malloc_stats(void) __THROW { tc_malloc_stats(); } | 354 void malloc_stats(void) __THROW { tc_malloc_stats(); } |
326 int mallopt(int cmd, int v) __THROW { return tc_mallopt(cmd, v); } | 355 int mallopt(int cmd, int v) __THROW { return tc_mallopt(cmd, v); } |
327 #ifdef HAVE_STRUCT_MALLINFO | 356 #ifdef HAVE_STRUCT_MALLINFO |
328 struct mallinfo mallinfo(void) __THROW { return tc_mallinfo(); } | 357 struct mallinfo mallinfo(void) __THROW { return tc_mallinfo(); } |
329 #endif | 358 #endif |
330 size_t malloc_usable_size(void* p) __THROW { | 359 size_t malloc_size(void* p) __THROW { return tc_malloc_size(p); } |
331 return tc_malloc_usable_size(p); | 360 size_t malloc_usable_size(void* p) __THROW { return tc_malloc_size(p); } |
332 } | |
333 } // extern "C" | 361 } // extern "C" |
334 #endif // #if defined(__GNUC__) | 362 #endif // #if defined(__GNUC__) |
335 | 363 |
336 // Some library routines on RedHat 9 allocate memory using malloc() | 364 // Some library routines on RedHat 9 allocate memory using malloc() |
337 // and free it using __libc_free() (or vice-versa). Since we provide | 365 // and free it using __libc_free() (or vice-versa). Since we provide |
338 // our own implementations of malloc/free, we need to make sure that | 366 // our own implementations of malloc/free, we need to make sure that |
339 // the __libc_XXX variants (defined as part of glibc) also point to | 367 // the __libc_XXX variants (defined as part of glibc) also point to |
340 // the same implementations. | 368 // the same implementations. |
341 #ifdef __GLIBC__ // only glibc defines __libc_* | 369 #ifdef __GLIBC__ // only glibc defines __libc_* |
342 extern "C" { | 370 extern "C" { |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
390 tc_free(ptr); | 418 tc_free(ptr); |
391 } | 419 } |
392 | 420 |
393 void (*__free_hook)(void* ptr, const void* caller) = tc_ptmalloc_free_hook; | 421 void (*__free_hook)(void* ptr, const void* caller) = tc_ptmalloc_free_hook; |
394 | 422 |
395 #endif | 423 #endif |
396 | 424 |
397 #endif // #ifndef _WIN32 | 425 #endif // #ifndef _WIN32 |
398 #undef ALIAS | 426 #undef ALIAS |
399 | 427 |
400 #endif // #ifndef(WIN32_DO_PATCHING) && ndef(TCMALLOC_FOR_DEBUGALLOCATION) | 428 #endif // #ifndef(WIN32_DO_PATCHING) |
401 | 429 |
402 | 430 |
403 // ----------------------- IMPLEMENTATION ------------------------------- | 431 // ----------------------- IMPLEMENTATION ------------------------------- |
404 | 432 |
405 static int tc_new_mode = 0; // See tc_set_new_mode(). | 433 static int tc_new_mode = 0; // See tc_set_new_mode(). |
406 | 434 |
407 // Routines such as free() and realloc() catch some erroneous pointers | 435 // Routines such as free() and realloc() catch some erroneous pointers |
408 // passed to them, and invoke the below when they do. (An erroneous pointer | 436 // passed to them, and invoke the below when they do. (An erroneous pointer |
409 // won't be caught if it's within a valid span or a stale span for which | 437 // won't be caught if it's within a valid span or a stale span for which |
410 // the pagemap cache has a non-zero sizeclass.) This is a cheap (source-editing | 438 // the pagemap cache has a non-zero sizeclass.) This is a cheap (source-editing |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
457 r->pageheap = Static::pageheap()->stats(); | 485 r->pageheap = Static::pageheap()->stats(); |
458 } | 486 } |
459 } | 487 } |
460 | 488 |
461 // WRITE stats to "out" | 489 // WRITE stats to "out" |
462 static void DumpStats(TCMalloc_Printer* out, int level) { | 490 static void DumpStats(TCMalloc_Printer* out, int level) { |
463 TCMallocStats stats; | 491 TCMallocStats stats; |
464 uint64_t class_count[kNumClasses]; | 492 uint64_t class_count[kNumClasses]; |
465 ExtractStats(&stats, (level >= 2 ? class_count : NULL)); | 493 ExtractStats(&stats, (level >= 2 ? class_count : NULL)); |
466 | 494 |
467 static const double MB = 1048576.0; | 495 static const double MiB = 1048576.0; |
468 | 496 |
469 const uint64_t bytes_in_use = stats.pageheap.system_bytes | 497 const uint64_t virtual_memory_used = (stats.pageheap.system_bytes |
470 - stats.pageheap.free_bytes | 498 + stats.metadata_bytes); |
471 - stats.pageheap.unmapped_bytes | 499 const uint64_t physical_memory_used = (virtual_memory_used |
472 - stats.central_bytes | 500 - stats.pageheap.unmapped_bytes); |
473 - stats.transfer_bytes | 501 const uint64_t bytes_in_use_by_app = (physical_memory_used |
474 - stats.thread_bytes; | 502 - stats.metadata_bytes |
| 503 - stats.pageheap.free_bytes |
| 504 - stats.central_bytes |
| 505 - stats.transfer_bytes |
| 506 - stats.thread_bytes); |
475 | 507 |
476 out->printf("WASTE: %7.1f MB committed but not used\n" | 508 out->printf( |
477 "WASTE: %7.1f MB bytes committed, %7.1f MB bytes in use\n" | 509 "WASTE: %7.1f MiB committed but not used\n" |
478 "WASTE: committed/used ratio of %f\n", | 510 "WASTE: %7.1f MiB bytes committed, %7.1f MiB bytes in use\n" |
479 (stats.pageheap.committed_bytes - bytes_in_use) / MB, | 511 "WASTE: committed/used ratio of %f\n", |
480 stats.pageheap.committed_bytes / MB, | 512 (stats.pageheap.committed_bytes - bytes_in_use_by_app) / MiB, |
481 bytes_in_use / MB, | 513 stats.pageheap.committed_bytes / MiB, |
482 stats.pageheap.committed_bytes / static_cast<double>(bytes_in_use)
); | 514 bytes_in_use_by_app / MiB, |
| 515 stats.pageheap.committed_bytes / static_cast<double>(bytes_in_use_by_app) |
| 516 ); |
| 517 #ifdef TCMALLOC_SMALL_BUT_SLOW |
| 518 out->printf( |
| 519 "NOTE: SMALL MEMORY MODEL IS IN USE, PERFORMANCE MAY SUFFER.\n"); |
| 520 #endif |
| 521 out->printf( |
| 522 "------------------------------------------------\n" |
| 523 "MALLOC: %12" PRIu64 " (%7.1f MiB) Bytes in use by application\n" |
| 524 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes committed\n" |
| 525 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in page heap freelist\n" |
| 526 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in central cache freelist\n" |
| 527 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in transfer cache freelist\n" |
| 528 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in thread cache freelists\n" |
| 529 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in malloc metadata\n" |
| 530 "MALLOC: ------------\n" |
| 531 "MALLOC: = %12" PRIu64 " (%7.1f MiB) Actual memory used (physical + swap)\
n" |
| 532 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes released to OS (aka unmapped)\n
" |
| 533 "MALLOC: ------------\n" |
| 534 "MALLOC: = %12" PRIu64 " (%7.1f MiB) Virtual address space used\n" |
| 535 "MALLOC:\n" |
| 536 "MALLOC: %12" PRIu64 " Spans in use\n" |
| 537 "MALLOC: %12" PRIu64 " Thread heaps in use\n" |
| 538 "MALLOC: %12" PRIu64 " Tcmalloc page size\n" |
| 539 "------------------------------------------------\n" |
| 540 "Call ReleaseFreeMemory() to release freelist memory to the OS" |
| 541 " (via madvise()).\n" |
| 542 "Bytes released to the OS take up virtual address space" |
| 543 " but no physical memory.\n", |
| 544 bytes_in_use_by_app, bytes_in_use_by_app / MiB, |
| 545 stats.pageheap.committed_bytes, stats.pageheap.committed_bytes / MiB, |
| 546 stats.pageheap.free_bytes, stats.pageheap.free_bytes / MiB, |
| 547 stats.central_bytes, stats.central_bytes / MiB, |
| 548 stats.transfer_bytes, stats.transfer_bytes / MiB, |
| 549 stats.thread_bytes, stats.thread_bytes / MiB, |
| 550 stats.metadata_bytes, stats.metadata_bytes / MiB, |
| 551 physical_memory_used, physical_memory_used / MiB, |
| 552 stats.pageheap.unmapped_bytes, stats.pageheap.unmapped_bytes / MiB, |
| 553 virtual_memory_used, virtual_memory_used / MiB, |
| 554 uint64_t(Static::span_allocator()->inuse()), |
| 555 uint64_t(ThreadCache::HeapsInUse()), |
| 556 uint64_t(kPageSize)); |
483 | 557 |
484 if (level >= 2) { | 558 if (level >= 2) { |
485 out->printf("------------------------------------------------\n"); | 559 out->printf("------------------------------------------------\n"); |
486 out->printf("Size class breakdown\n"); | 560 out->printf("Size class breakdown\n"); |
487 out->printf("------------------------------------------------\n"); | 561 out->printf("------------------------------------------------\n"); |
488 uint64_t cumulative = 0; | 562 uint64_t cumulative = 0; |
489 for (int cl = 0; cl < kNumClasses; ++cl) { | 563 for (int cl = 0; cl < kNumClasses; ++cl) { |
490 if (class_count[cl] > 0) { | 564 if (class_count[cl] > 0) { |
491 uint64_t class_bytes = | 565 uint64_t class_bytes = |
492 class_count[cl] * Static::sizemap()->ByteSizeForClass(cl); | 566 class_count[cl] * Static::sizemap()->ByteSizeForClass(cl); |
493 cumulative += class_bytes; | 567 cumulative += class_bytes; |
494 out->printf("class %3d [ %8" PRIuS " bytes ] : " | 568 out->printf("class %3d [ %8" PRIuS " bytes ] : " |
495 "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n", | 569 "%8" PRIu64 " objs; %5.1f MiB; %5.1f cum MiB\n", |
496 cl, Static::sizemap()->ByteSizeForClass(cl), | 570 cl, Static::sizemap()->ByteSizeForClass(cl), |
497 class_count[cl], | 571 class_count[cl], |
498 class_bytes / MB, | 572 class_bytes / MiB, |
499 cumulative / MB); | 573 cumulative / MiB); |
500 } | 574 } |
501 } | 575 } |
502 | 576 |
503 SpinLockHolder h(Static::pageheap_lock()); | 577 SpinLockHolder h(Static::pageheap_lock()); |
504 Static::pageheap()->Dump(out); | 578 Static::pageheap()->Dump(out); |
505 | |
506 out->printf("------------------------------------------------\n"); | |
507 DumpSystemAllocatorStats(out); | |
508 } | 579 } |
509 | |
510 out->printf("------------------------------------------------\n" | |
511 "MALLOC: %12" PRIu64 " (%7.1f MB) Heap size\n" | |
512 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes committed\n" | |
513 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes in use by application\n" | |
514 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in page heap\n" | |
515 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes unmapped in page heap\n" | |
516 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in central cache\n" | |
517 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in transfer cache\n" | |
518 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in thread caches\n" | |
519 "MALLOC: %12" PRIu64 " Spans in use\n" | |
520 "MALLOC: %12" PRIu64 " Thread heaps in use\n" | |
521 "MALLOC: %12" PRIu64 " (%7.1f MB) Metadata allocated\n" | |
522 "------------------------------------------------\n", | |
523 stats.pageheap.system_bytes, stats.pageheap.system_bytes / MB, | |
524 stats.pageheap.committed_bytes, stats.pageheap.committed_bytes / M
B, | |
525 bytes_in_use, bytes_in_use / MB, | |
526 stats.pageheap.free_bytes, stats.pageheap.free_bytes / MB, | |
527 stats.pageheap.unmapped_bytes, stats.pageheap.unmapped_bytes / MB, | |
528 stats.central_bytes, stats.central_bytes / MB, | |
529 stats.transfer_bytes, stats.transfer_bytes / MB, | |
530 stats.thread_bytes, stats.thread_bytes / MB, | |
531 uint64_t(Static::span_allocator()->inuse()), | |
532 uint64_t(ThreadCache::HeapsInUse()), | |
533 stats.metadata_bytes, stats.metadata_bytes / MB); | |
534 } | 580 } |
535 | 581 |
536 static void PrintStats(int level) { | 582 static void PrintStats(int level) { |
537 const int kBufferSize = 16 << 10; | 583 const int kBufferSize = 16 << 10; |
538 char* buffer = new char[kBufferSize]; | 584 char* buffer = new char[kBufferSize]; |
539 TCMalloc_Printer printer(buffer, kBufferSize); | 585 TCMalloc_Printer printer(buffer, kBufferSize); |
540 DumpStats(&printer, level); | 586 DumpStats(&printer, level); |
541 write(STDERR_FILENO, buffer, strlen(buffer)); | 587 write(STDERR_FILENO, buffer, strlen(buffer)); |
542 delete[] buffer; | 588 delete[] buffer; |
543 } | 589 } |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
637 TCMalloc_Printer printer(buffer, buffer_length); | 683 TCMalloc_Printer printer(buffer, buffer_length); |
638 | 684 |
639 // Print level one stats unless lots of space is available | 685 // Print level one stats unless lots of space is available |
640 if (buffer_length < 10000) { | 686 if (buffer_length < 10000) { |
641 DumpStats(&printer, 1); | 687 DumpStats(&printer, 1); |
642 } else { | 688 } else { |
643 DumpStats(&printer, 2); | 689 DumpStats(&printer, 2); |
644 } | 690 } |
645 } | 691 } |
646 | 692 |
| 693 // We may print an extra, tcmalloc-specific warning message here. |
| 694 virtual void GetHeapSample(MallocExtensionWriter* writer) { |
| 695 if (FLAGS_tcmalloc_sample_parameter == 0) { |
| 696 const char* const kWarningMsg = |
| 697 "%warn\n" |
| 698 "%warn This heap profile does not have any data in it, because\n" |
| 699 "%warn the application was run with heap sampling turned off.\n" |
| 700 "%warn To get useful data from GetHeapSample(), you must\n" |
| 701 "%warn set the environment variable TCMALLOC_SAMPLE_PARAMETER to\n" |
| 702 "%warn a positive sampling period, such as 524288.\n" |
| 703 "%warn\n"; |
| 704 writer->append(kWarningMsg, strlen(kWarningMsg)); |
| 705 } |
| 706 MallocExtension::GetHeapSample(writer); |
| 707 } |
| 708 |
647 virtual void** ReadStackTraces(int* sample_period) { | 709 virtual void** ReadStackTraces(int* sample_period) { |
648 tcmalloc::StackTraceTable table; | 710 tcmalloc::StackTraceTable table; |
649 { | 711 { |
650 SpinLockHolder h(Static::pageheap_lock()); | 712 SpinLockHolder h(Static::pageheap_lock()); |
651 Span* sampled = Static::sampled_objects(); | 713 Span* sampled = Static::sampled_objects(); |
652 for (Span* s = sampled->next; s != sampled; s = s->next) { | 714 for (Span* s = sampled->next; s != sampled; s = s->next) { |
653 table.AddTrace(*reinterpret_cast<StackTrace*>(s->objects)); | 715 table.AddTrace(*reinterpret_cast<StackTrace*>(s->objects)); |
654 } | 716 } |
655 } | 717 } |
656 *sample_period = ThreadCache::GetCache()->GetSamplePeriod(); | 718 *sample_period = ThreadCache::GetCache()->GetSamplePeriod(); |
(...skipping 24 matching lines...) Expand all Loading... |
681 } | 743 } |
682 | 744 |
683 if (strcmp(name, "generic.heap_size") == 0) { | 745 if (strcmp(name, "generic.heap_size") == 0) { |
684 TCMallocStats stats; | 746 TCMallocStats stats; |
685 ExtractStats(&stats, NULL); | 747 ExtractStats(&stats, NULL); |
686 *value = stats.pageheap.system_bytes; | 748 *value = stats.pageheap.system_bytes; |
687 return true; | 749 return true; |
688 } | 750 } |
689 | 751 |
690 if (strcmp(name, "tcmalloc.slack_bytes") == 0) { | 752 if (strcmp(name, "tcmalloc.slack_bytes") == 0) { |
691 // We assume that bytes in the page heap are not fragmented too | 753 // Kept for backwards compatibility. Now defined externally as: |
692 // badly, and are therefore available for allocation without | 754 // pageheap_free_bytes + pageheap_unmapped_bytes. |
693 // growing the pageheap system byte count. | |
694 SpinLockHolder l(Static::pageheap_lock()); | 755 SpinLockHolder l(Static::pageheap_lock()); |
695 PageHeap::Stats stats = Static::pageheap()->stats(); | 756 PageHeap::Stats stats = Static::pageheap()->stats(); |
696 *value = stats.free_bytes + stats.unmapped_bytes; | 757 *value = stats.free_bytes + stats.unmapped_bytes; |
697 return true; | 758 return true; |
698 } | 759 } |
699 | 760 |
700 if (strcmp(name, "tcmalloc.pageheap_free_bytes") == 0) { | 761 if (strcmp(name, "tcmalloc.pageheap_free_bytes") == 0) { |
701 SpinLockHolder l(Static::pageheap_lock()); | 762 SpinLockHolder l(Static::pageheap_lock()); |
702 *value = Static::pageheap()->stats().free_bytes; | 763 *value = Static::pageheap()->stats().free_bytes; |
703 return true; | 764 return true; |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
736 | 797 |
737 return false; | 798 return false; |
738 } | 799 } |
739 | 800 |
740 virtual void MarkThreadIdle() { | 801 virtual void MarkThreadIdle() { |
741 ThreadCache::BecomeIdle(); | 802 ThreadCache::BecomeIdle(); |
742 } | 803 } |
743 | 804 |
744 virtual void MarkThreadBusy(); // Implemented below | 805 virtual void MarkThreadBusy(); // Implemented below |
745 | 806 |
| 807 virtual SysAllocator* GetSystemAllocator() { |
| 808 SpinLockHolder h(Static::pageheap_lock()); |
| 809 return sys_alloc; |
| 810 } |
| 811 |
| 812 virtual void SetSystemAllocator(SysAllocator* alloc) { |
| 813 SpinLockHolder h(Static::pageheap_lock()); |
| 814 sys_alloc = alloc; |
| 815 } |
| 816 |
746 virtual void ReleaseToSystem(size_t num_bytes) { | 817 virtual void ReleaseToSystem(size_t num_bytes) { |
747 SpinLockHolder h(Static::pageheap_lock()); | 818 SpinLockHolder h(Static::pageheap_lock()); |
748 if (num_bytes <= extra_bytes_released_) { | 819 if (num_bytes <= extra_bytes_released_) { |
749 // We released too much on a prior call, so don't release any | 820 // We released too much on a prior call, so don't release any |
750 // more this time. | 821 // more this time. |
751 extra_bytes_released_ = extra_bytes_released_ - num_bytes; | 822 extra_bytes_released_ = extra_bytes_released_ - num_bytes; |
752 return; | 823 return; |
753 } | 824 } |
754 num_bytes = num_bytes - extra_bytes_released_; | 825 num_bytes = num_bytes - extra_bytes_released_; |
755 // num_bytes might be less than one page. If we pass zero to | 826 // num_bytes might be less than one page. If we pass zero to |
(...skipping 26 matching lines...) Expand all Loading... |
782 return alloc_size; | 853 return alloc_size; |
783 } else { | 854 } else { |
784 return tcmalloc::pages(size) << kPageShift; | 855 return tcmalloc::pages(size) << kPageShift; |
785 } | 856 } |
786 } | 857 } |
787 | 858 |
788 // This just calls GetSizeWithCallback, but because that's in an | 859 // This just calls GetSizeWithCallback, but because that's in an |
789 // unnamed namespace, we need to move the definition below it in the | 860 // unnamed namespace, we need to move the definition below it in the |
790 // file. | 861 // file. |
791 virtual size_t GetAllocatedSize(void* ptr); | 862 virtual size_t GetAllocatedSize(void* ptr); |
| 863 |
| 864 virtual void GetFreeListSizes(vector<MallocExtension::FreeListInfo>* v) { |
| 865 static const char* kCentralCacheType = "tcmalloc.central"; |
| 866 static const char* kTransferCacheType = "tcmalloc.transfer"; |
| 867 static const char* kThreadCacheType = "tcmalloc.thread"; |
| 868 static const char* kPageHeapType = "tcmalloc.page"; |
| 869 static const char* kPageHeapUnmappedType = "tcmalloc.page_unmapped"; |
| 870 static const char* kLargeSpanType = "tcmalloc.large"; |
| 871 static const char* kLargeUnmappedSpanType = "tcmalloc.large_unmapped"; |
| 872 |
| 873 v->clear(); |
| 874 |
| 875 // central class information |
| 876 int64 prev_class_size = 0; |
| 877 for (int cl = 1; cl < kNumClasses; ++cl) { |
| 878 size_t class_size = Static::sizemap()->ByteSizeForClass(cl); |
| 879 MallocExtension::FreeListInfo i; |
| 880 i.min_object_size = prev_class_size + 1; |
| 881 i.max_object_size = class_size; |
| 882 i.total_bytes_free = |
| 883 Static::central_cache()[cl].length() * class_size; |
| 884 i.type = kCentralCacheType; |
| 885 v->push_back(i); |
| 886 |
| 887 // transfer cache |
| 888 i.total_bytes_free = |
| 889 Static::central_cache()[cl].tc_length() * class_size; |
| 890 i.type = kTransferCacheType; |
| 891 v->push_back(i); |
| 892 |
| 893 prev_class_size = Static::sizemap()->ByteSizeForClass(cl); |
| 894 } |
| 895 |
| 896 // Add stats from per-thread heaps |
| 897 uint64_t class_count[kNumClasses]; |
| 898 memset(class_count, 0, sizeof(class_count)); |
| 899 { |
| 900 SpinLockHolder h(Static::pageheap_lock()); |
| 901 uint64_t thread_bytes = 0; |
| 902 ThreadCache::GetThreadStats(&thread_bytes, class_count); |
| 903 } |
| 904 |
| 905 prev_class_size = 0; |
| 906 for (int cl = 1; cl < kNumClasses; ++cl) { |
| 907 MallocExtension::FreeListInfo i; |
| 908 i.min_object_size = prev_class_size + 1; |
| 909 i.max_object_size = Static::sizemap()->ByteSizeForClass(cl); |
| 910 i.total_bytes_free = |
| 911 class_count[cl] * Static::sizemap()->ByteSizeForClass(cl); |
| 912 i.type = kThreadCacheType; |
| 913 v->push_back(i); |
| 914 } |
| 915 |
| 916 // append page heap info |
| 917 int64 page_count_normal[kMaxPages]; |
| 918 int64 page_count_returned[kMaxPages]; |
| 919 int64 span_count_normal; |
| 920 int64 span_count_returned; |
| 921 { |
| 922 SpinLockHolder h(Static::pageheap_lock()); |
| 923 Static::pageheap()->GetClassSizes(page_count_normal, |
| 924 page_count_returned, |
| 925 &span_count_normal, |
| 926 &span_count_returned); |
| 927 } |
| 928 |
| 929 // spans: mapped |
| 930 MallocExtension::FreeListInfo span_info; |
| 931 span_info.type = kLargeSpanType; |
| 932 span_info.max_object_size = (numeric_limits<size_t>::max)(); |
| 933 span_info.min_object_size = kMaxPages << kPageShift; |
| 934 span_info.total_bytes_free = span_count_normal << kPageShift; |
| 935 v->push_back(span_info); |
| 936 |
| 937 // spans: unmapped |
| 938 span_info.type = kLargeUnmappedSpanType; |
| 939 span_info.total_bytes_free = span_count_returned << kPageShift; |
| 940 v->push_back(span_info); |
| 941 |
| 942 for (int s = 1; s < kMaxPages; s++) { |
| 943 MallocExtension::FreeListInfo i; |
| 944 i.max_object_size = (s << kPageShift); |
| 945 i.min_object_size = ((s - 1) << kPageShift); |
| 946 |
| 947 i.type = kPageHeapType; |
| 948 i.total_bytes_free = (s << kPageShift) * page_count_normal[s]; |
| 949 v->push_back(i); |
| 950 |
| 951 i.type = kPageHeapUnmappedType; |
| 952 i.total_bytes_free = (s << kPageShift) * page_count_returned[s]; |
| 953 v->push_back(i); |
| 954 } |
| 955 } |
792 }; | 956 }; |
793 | 957 |
794 // The constructor allocates an object to ensure that initialization | 958 // The constructor allocates an object to ensure that initialization |
795 // runs before main(), and therefore we do not have a chance to become | 959 // runs before main(), and therefore we do not have a chance to become |
796 // multi-threaded before initialization. We also create the TSD key | 960 // multi-threaded before initialization. We also create the TSD key |
797 // here. Presumably by the time this constructor runs, glibc is in | 961 // here. Presumably by the time this constructor runs, glibc is in |
798 // good enough shape to handle pthread_key_create(). | 962 // good enough shape to handle pthread_key_create(). |
799 // | 963 // |
800 // The constructor also takes the opportunity to tell STL to use | 964 // The constructor also takes the opportunity to tell STL to use |
801 // tcmalloc. We want to do this early, before construct time, so | 965 // tcmalloc. We want to do this early, before construct time, so |
802 // all user STL allocations go through tcmalloc (which works really | 966 // all user STL allocations go through tcmalloc (which works really |
803 // well for STL). | 967 // well for STL). |
804 // | 968 // |
805 // The destructor prints stats when the program exits. | 969 // The destructor prints stats when the program exits. |
806 static int tcmallocguard_refcount = 0; // no lock needed: runs before main() | 970 static int tcmallocguard_refcount = 0; // no lock needed: runs before main() |
807 TCMallocGuard::TCMallocGuard() { | 971 TCMallocGuard::TCMallocGuard() { |
808 if (tcmallocguard_refcount++ == 0) { | 972 if (tcmallocguard_refcount++ == 0) { |
809 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS | 973 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS |
810 // Check whether the kernel also supports TLS (needs to happen at runtime) | 974 // Check whether the kernel also supports TLS (needs to happen at runtime) |
811 tcmalloc::CheckIfKernelSupportsTLS(); | 975 tcmalloc::CheckIfKernelSupportsTLS(); |
812 #endif | 976 #endif |
813 #ifdef WIN32_DO_PATCHING | 977 #ifdef WIN32_DO_PATCHING |
814 // patch the windows VirtualAlloc, etc. | 978 // patch the windows VirtualAlloc, etc. |
815 PatchWindowsFunctions(); // defined in windows/patch_functions.cc | 979 PatchWindowsFunctions(); // defined in windows/patch_functions.cc |
816 #endif | 980 #endif |
817 tc_free(tc_malloc(1)); | 981 tc_free(tc_malloc(1)); |
818 ThreadCache::InitTSD(); | 982 ThreadCache::InitTSD(); |
819 tc_free(tc_malloc(1)); | 983 tc_free(tc_malloc(1)); |
820 // Either we, or debugallocation.cc, or valgrind will control memory | 984 // Either we, or debugallocation.cc, or valgrind will control memory |
821 // management. We register our extension if we're the winner. | 985 // management. We register our extension if we're the winner. |
822 #ifdef TCMALLOC_FOR_DEBUGALLOCATION | 986 #ifdef TCMALLOC_USING_DEBUGALLOCATION |
823 // Let debugallocation register its extension. | 987 // Let debugallocation register its extension. |
824 #else | 988 #else |
825 if (RunningOnValgrind()) { | 989 if (RunningOnValgrind()) { |
826 // Let Valgrind uses its own malloc (so don't register our extension). | 990 // Let Valgrind uses its own malloc (so don't register our extension). |
827 } else { | 991 } else { |
828 MallocExtension::Register(new TCMallocImplementation); | 992 MallocExtension::Register(new TCMallocImplementation); |
829 } | 993 } |
830 #endif | 994 #endif |
831 } | 995 } |
832 } | 996 } |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
880 if (span == NULL) { | 1044 if (span == NULL) { |
881 return NULL; | 1045 return NULL; |
882 } | 1046 } |
883 | 1047 |
884 // Allocate stack trace | 1048 // Allocate stack trace |
885 StackTrace *stack = Static::stacktrace_allocator()->New(); | 1049 StackTrace *stack = Static::stacktrace_allocator()->New(); |
886 if (stack == NULL) { | 1050 if (stack == NULL) { |
887 // Sampling failed because of lack of memory | 1051 // Sampling failed because of lack of memory |
888 return span; | 1052 return span; |
889 } | 1053 } |
890 | |
891 *stack = tmp; | 1054 *stack = tmp; |
892 span->sample = 1; | 1055 span->sample = 1; |
893 span->objects = stack; | 1056 span->objects = stack; |
894 tcmalloc::DLL_Prepend(Static::sampled_objects(), span); | 1057 tcmalloc::DLL_Prepend(Static::sampled_objects(), span); |
895 | 1058 |
896 return SpanToMallocResult(span); | 1059 return SpanToMallocResult(span); |
897 } | 1060 } |
898 | 1061 |
| 1062 namespace { |
| 1063 |
899 // Copy of FLAGS_tcmalloc_large_alloc_report_threshold with | 1064 // Copy of FLAGS_tcmalloc_large_alloc_report_threshold with |
900 // automatic increases factored in. | 1065 // automatic increases factored in. |
901 static int64_t large_alloc_threshold = | 1066 static int64_t large_alloc_threshold = |
902 (kPageSize > FLAGS_tcmalloc_large_alloc_report_threshold | 1067 (kPageSize > FLAGS_tcmalloc_large_alloc_report_threshold |
903 ? kPageSize : FLAGS_tcmalloc_large_alloc_report_threshold); | 1068 ? kPageSize : FLAGS_tcmalloc_large_alloc_report_threshold); |
904 | 1069 |
905 static void ReportLargeAlloc(Length num_pages, void* result) { | 1070 static void ReportLargeAlloc(Length num_pages, void* result) { |
906 StackTrace stack; | 1071 StackTrace stack; |
907 stack.depth = GetStackTrace(stack.stack, tcmalloc::kMaxStackDepth, 1); | 1072 stack.depth = GetStackTrace(stack.stack, tcmalloc::kMaxStackDepth, 1); |
908 | 1073 |
909 static const int N = 1000; | 1074 static const int N = 1000; |
910 char buffer[N]; | 1075 char buffer[N]; |
911 TCMalloc_Printer printer(buffer, N); | 1076 TCMalloc_Printer printer(buffer, N); |
912 printer.printf("tcmalloc: large alloc %llu bytes == %p @ ", | 1077 printer.printf("tcmalloc: large alloc %llu bytes == %p @ ", |
913 static_cast<unsigned long long>(num_pages) << kPageShift, | 1078 static_cast<unsigned long long>(num_pages) << kPageShift, |
914 result); | 1079 result); |
915 for (int i = 0; i < stack.depth; i++) { | 1080 for (int i = 0; i < stack.depth; i++) { |
916 printer.printf(" %p", stack.stack[i]); | 1081 printer.printf(" %p", stack.stack[i]); |
917 } | 1082 } |
918 printer.printf("\n"); | 1083 printer.printf("\n"); |
919 write(STDERR_FILENO, buffer, strlen(buffer)); | 1084 write(STDERR_FILENO, buffer, strlen(buffer)); |
920 } | 1085 } |
921 | 1086 |
922 namespace { | |
923 | |
924 inline void* cpp_alloc(size_t size, bool nothrow); | 1087 inline void* cpp_alloc(size_t size, bool nothrow); |
925 inline void* do_malloc(size_t size); | 1088 inline void* do_malloc(size_t size); |
926 | 1089 |
927 // TODO(willchan): Investigate whether or not inlining this much is harmful to | 1090 // TODO(willchan): Investigate whether or not inlining this much is harmful to |
928 // performance. | 1091 // performance. |
929 // This is equivalent to do_malloc() except when tc_new_mode is set to true. | 1092 // This is equivalent to do_malloc() except when tc_new_mode is set to true. |
930 // Otherwise, it will run the std::new_handler if set. | 1093 // Otherwise, it will run the std::new_handler if set. |
931 inline void* do_malloc_or_cpp_alloc(size_t size) { | 1094 inline void* do_malloc_or_cpp_alloc(size_t size) { |
932 return tc_new_mode ? cpp_alloc(size, true) : do_malloc(size); | 1095 return tc_new_mode ? cpp_alloc(size, true) : do_malloc(size); |
933 } | 1096 } |
934 | 1097 |
935 void* cpp_memalign(size_t align, size_t size); | 1098 void* cpp_memalign(size_t align, size_t size); |
936 void* do_memalign(size_t align, size_t size); | 1099 void* do_memalign(size_t align, size_t size); |
937 | 1100 |
938 inline void* do_memalign_or_cpp_memalign(size_t align, size_t size) { | 1101 inline void* do_memalign_or_cpp_memalign(size_t align, size_t size) { |
939 return tc_new_mode ? cpp_memalign(align, size) : do_memalign(align, size); | 1102 return tc_new_mode ? cpp_memalign(align, size) : do_memalign(align, size); |
940 } | 1103 } |
941 | 1104 |
942 // Must be called with the page lock held. | 1105 // Must be called with the page lock held. |
943 inline bool should_report_large(Length num_pages) { | 1106 inline bool should_report_large(Length num_pages) { |
944 const int64 threshold = large_alloc_threshold; | 1107 const int64 threshold = large_alloc_threshold; |
945 if (threshold > 0 && num_pages >= (threshold >> kPageShift)) { | 1108 if (threshold > 0 && num_pages >= (threshold >> kPageShift)) { |
946 // Increase the threshold by 1/8 every time we generate a report. | 1109 // Increase the threshold by 1/8 every time we generate a report. |
947 // We cap the threshold at 8GB to avoid overflow problems. | 1110 // We cap the threshold at 8GiB to avoid overflow problems. |
948 large_alloc_threshold = (threshold + threshold/8 < 8ll<<30 | 1111 large_alloc_threshold = (threshold + threshold/8 < 8ll<<30 |
949 ? threshold + threshold/8 : 8ll<<30); | 1112 ? threshold + threshold/8 : 8ll<<30); |
950 return true; | 1113 return true; |
951 } | 1114 } |
952 return false; | 1115 return false; |
953 } | 1116 } |
954 | 1117 |
955 // Helper for do_malloc(). | 1118 // Helper for do_malloc(). |
956 inline void* do_malloc_pages(ThreadCache* heap, size_t size) { | 1119 inline void* do_malloc_pages(ThreadCache* heap, size_t size) { |
957 void* result; | 1120 void* result; |
958 bool report_large; | 1121 bool report_large; |
959 | 1122 |
960 Length num_pages = tcmalloc::pages(size); | 1123 Length num_pages = tcmalloc::pages(size); |
961 size = num_pages << kPageShift; | 1124 size = num_pages << kPageShift; |
| 1125 |
962 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { | 1126 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { |
963 result = DoSampledAllocation(size); | 1127 result = DoSampledAllocation(size); |
964 | 1128 |
965 SpinLockHolder h(Static::pageheap_lock()); | 1129 SpinLockHolder h(Static::pageheap_lock()); |
966 report_large = should_report_large(num_pages); | 1130 report_large = should_report_large(num_pages); |
967 } else { | 1131 } else { |
968 SpinLockHolder h(Static::pageheap_lock()); | 1132 SpinLockHolder h(Static::pageheap_lock()); |
969 Span* span = Static::pageheap()->New(num_pages); | 1133 Span* span = Static::pageheap()->New(num_pages); |
970 result = (span == NULL ? NULL : SpanToMallocResult(span)); | 1134 result = (span == NULL ? NULL : SpanToMallocResult(span)); |
971 report_large = should_report_large(num_pages); | 1135 report_large = should_report_large(num_pages); |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1057 } else { | 1221 } else { |
1058 // Delete directly into central cache | 1222 // Delete directly into central cache |
1059 tcmalloc::SLL_SetNext(ptr, NULL); | 1223 tcmalloc::SLL_SetNext(ptr, NULL); |
1060 Static::central_cache()[cl].InsertRange(ptr, ptr, 1); | 1224 Static::central_cache()[cl].InsertRange(ptr, ptr, 1); |
1061 } | 1225 } |
1062 } else { | 1226 } else { |
1063 SpinLockHolder h(Static::pageheap_lock()); | 1227 SpinLockHolder h(Static::pageheap_lock()); |
1064 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0); | 1228 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0); |
1065 ASSERT(span != NULL && span->start == p); | 1229 ASSERT(span != NULL && span->start == p); |
1066 if (span->sample) { | 1230 if (span->sample) { |
| 1231 StackTrace* st = reinterpret_cast<StackTrace*>(span->objects); |
1067 tcmalloc::DLL_Remove(span); | 1232 tcmalloc::DLL_Remove(span); |
1068 Static::stacktrace_allocator()->Delete( | 1233 Static::stacktrace_allocator()->Delete(st); |
1069 reinterpret_cast<StackTrace*>(span->objects)); | |
1070 span->objects = NULL; | 1234 span->objects = NULL; |
1071 } | 1235 } |
1072 Static::pageheap()->Delete(span); | 1236 Static::pageheap()->Delete(span); |
1073 } | 1237 } |
1074 } | 1238 } |
1075 | 1239 |
1076 // The default "do_free" that uses the default callback. | 1240 // The default "do_free" that uses the default callback. |
1077 inline void do_free(void* ptr) { | 1241 inline void do_free(void* ptr) { |
1078 return do_free_with_callback(ptr, &InvalidFree); | 1242 return do_free_with_callback(ptr, &InvalidFree); |
1079 } | 1243 } |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1149 } | 1313 } |
1150 } | 1314 } |
1151 | 1315 |
1152 inline void* do_realloc(void* old_ptr, size_t new_size) { | 1316 inline void* do_realloc(void* old_ptr, size_t new_size) { |
1153 return do_realloc_with_callback(old_ptr, new_size, | 1317 return do_realloc_with_callback(old_ptr, new_size, |
1154 &InvalidFree, &InvalidGetSizeForRealloc); | 1318 &InvalidFree, &InvalidGetSizeForRealloc); |
1155 } | 1319 } |
1156 | 1320 |
1157 // For use by exported routines below that want specific alignments | 1321 // For use by exported routines below that want specific alignments |
1158 // | 1322 // |
1159 // Note: this code can be slow, and can significantly fragment memory. | 1323 // Note: this code can be slow for alignments > 16, and can |
1160 // The expectation is that memalign/posix_memalign/valloc/pvalloc will | 1324 // significantly fragment memory. The expectation is that |
1161 // not be invoked very often. This requirement simplifies our | 1325 // memalign/posix_memalign/valloc/pvalloc will not be invoked very |
1162 // implementation and allows us to tune for expected allocation | 1326 // often. This requirement simplifies our implementation and allows |
1163 // patterns. | 1327 // us to tune for expected allocation patterns. |
1164 void* do_memalign(size_t align, size_t size) { | 1328 void* do_memalign(size_t align, size_t size) { |
1165 ASSERT((align & (align - 1)) == 0); | 1329 ASSERT((align & (align - 1)) == 0); |
1166 ASSERT(align > 0); | 1330 ASSERT(align > 0); |
1167 // Marked in CheckMallocResult(), which is also inside SpanToMallocResult(). | 1331 // Marked in CheckMallocResult(), which is also inside SpanToMallocResult(). |
1168 AddRoomForMark(&size); | 1332 AddRoomForMark(&size); |
1169 if (size + align < size) return NULL; // Overflow | 1333 if (size + align < size) return NULL; // Overflow |
1170 | 1334 |
| 1335 // Fall back to malloc if we would already align this memory access properly. |
| 1336 if (align <= AlignmentForSize(size)) { |
| 1337 void* p = do_malloc(size); |
| 1338 ASSERT((reinterpret_cast<uintptr_t>(p) % align) == 0); |
| 1339 return p; |
| 1340 } |
| 1341 |
1171 if (Static::pageheap() == NULL) ThreadCache::InitModule(); | 1342 if (Static::pageheap() == NULL) ThreadCache::InitModule(); |
1172 | 1343 |
1173 // Allocate at least one byte to avoid boundary conditions below | 1344 // Allocate at least one byte to avoid boundary conditions below |
1174 if (size == 0) size = 1; | 1345 if (size == 0) size = 1; |
1175 | 1346 |
1176 if (size <= kMaxSize && align < kPageSize) { | 1347 if (size <= kMaxSize && align < kPageSize) { |
1177 // Search through acceptable size classes looking for one with | 1348 // Search through acceptable size classes looking for one with |
1178 // enough alignment. This depends on the fact that | 1349 // enough alignment. This depends on the fact that |
1179 // InitSizeClasses() currently produces several size classes that | 1350 // InitSizeClasses() currently produces several size classes that |
1180 // are aligned at powers of two. We will waste time and space if | 1351 // are aligned at powers of two. We will waste time and space if |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1233 // Helpers for use by exported routines below: | 1404 // Helpers for use by exported routines below: |
1234 | 1405 |
1235 inline void do_malloc_stats() { | 1406 inline void do_malloc_stats() { |
1236 PrintStats(1); | 1407 PrintStats(1); |
1237 } | 1408 } |
1238 | 1409 |
1239 inline int do_mallopt(int cmd, int value) { | 1410 inline int do_mallopt(int cmd, int value) { |
1240 return 1; // Indicates error | 1411 return 1; // Indicates error |
1241 } | 1412 } |
1242 | 1413 |
1243 #ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance | 1414 #ifdef HAVE_STRUCT_MALLINFO |
1244 inline struct mallinfo do_mallinfo() { | 1415 inline struct mallinfo do_mallinfo() { |
1245 TCMallocStats stats; | 1416 TCMallocStats stats; |
1246 ExtractStats(&stats, NULL); | 1417 ExtractStats(&stats, NULL); |
1247 | 1418 |
1248 // Just some of the fields are filled in. | 1419 // Just some of the fields are filled in. |
1249 struct mallinfo info; | 1420 struct mallinfo info; |
1250 memset(&info, 0, sizeof(info)); | 1421 memset(&info, 0, sizeof(info)); |
1251 | 1422 |
1252 // Unfortunately, the struct contains "int" field, so some of the | 1423 // Unfortunately, the struct contains "int" field, so some of the |
1253 // size values will be truncated. | 1424 // size values will be truncated. |
1254 info.arena = static_cast<int>(stats.pageheap.system_bytes); | 1425 info.arena = static_cast<int>(stats.pageheap.system_bytes); |
1255 info.fsmblks = static_cast<int>(stats.thread_bytes | 1426 info.fsmblks = static_cast<int>(stats.thread_bytes |
1256 + stats.central_bytes | 1427 + stats.central_bytes |
1257 + stats.transfer_bytes); | 1428 + stats.transfer_bytes); |
1258 info.fordblks = static_cast<int>(stats.pageheap.free_bytes + | 1429 info.fordblks = static_cast<int>(stats.pageheap.free_bytes + |
1259 stats.pageheap.unmapped_bytes); | 1430 stats.pageheap.unmapped_bytes); |
1260 info.uordblks = static_cast<int>(stats.pageheap.system_bytes | 1431 info.uordblks = static_cast<int>(stats.pageheap.system_bytes |
1261 - stats.thread_bytes | 1432 - stats.thread_bytes |
1262 - stats.central_bytes | 1433 - stats.central_bytes |
1263 - stats.transfer_bytes | 1434 - stats.transfer_bytes |
1264 - stats.pageheap.free_bytes | 1435 - stats.pageheap.free_bytes |
1265 - stats.pageheap.unmapped_bytes); | 1436 - stats.pageheap.unmapped_bytes); |
1266 | 1437 |
1267 return info; | 1438 return info; |
1268 } | 1439 } |
1269 #endif // #ifndef HAVE_STRUCT_MALLINFO | 1440 #endif // HAVE_STRUCT_MALLINFO |
1270 | 1441 |
1271 static SpinLock set_new_handler_lock(SpinLock::LINKER_INITIALIZED); | 1442 static SpinLock set_new_handler_lock(SpinLock::LINKER_INITIALIZED); |
1272 | 1443 |
1273 inline void* cpp_alloc(size_t size, bool nothrow) { | 1444 inline void* cpp_alloc(size_t size, bool nothrow) { |
1274 for (;;) { | 1445 for (;;) { |
1275 void* p = do_malloc(size); | 1446 void* p = do_malloc(size); |
1276 #ifdef PREANSINEW | 1447 #ifdef PREANSINEW |
1277 return p; | 1448 return p; |
1278 #else | 1449 #else |
1279 if (p == NULL) { // allocation failed | 1450 if (p == NULL) { // allocation failed |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1383 //------------------------------------------------------------------- | 1554 //------------------------------------------------------------------- |
1384 | 1555 |
1385 extern "C" PERFTOOLS_DLL_DECL const char* tc_version( | 1556 extern "C" PERFTOOLS_DLL_DECL const char* tc_version( |
1386 int* major, int* minor, const char** patch) __THROW { | 1557 int* major, int* minor, const char** patch) __THROW { |
1387 if (major) *major = TC_VERSION_MAJOR; | 1558 if (major) *major = TC_VERSION_MAJOR; |
1388 if (minor) *minor = TC_VERSION_MINOR; | 1559 if (minor) *minor = TC_VERSION_MINOR; |
1389 if (patch) *patch = TC_VERSION_PATCH; | 1560 if (patch) *patch = TC_VERSION_PATCH; |
1390 return TC_VERSION_STRING; | 1561 return TC_VERSION_STRING; |
1391 } | 1562 } |
1392 | 1563 |
| 1564 // This function behaves similarly to MSVC's _set_new_mode. |
| 1565 // If flag is 0 (default), calls to malloc will behave normally. |
| 1566 // If flag is 1, calls to malloc will behave like calls to new, |
| 1567 // and the std_new_handler will be invoked on failure. |
| 1568 // Returns the previous mode. |
| 1569 extern "C" PERFTOOLS_DLL_DECL int tc_set_new_mode(int flag) __THROW { |
| 1570 int old_mode = tc_new_mode; |
| 1571 tc_new_mode = flag; |
| 1572 return old_mode; |
| 1573 } |
| 1574 |
| 1575 #ifndef TCMALLOC_USING_DEBUGALLOCATION // debugallocation.cc defines its own |
| 1576 |
1393 // CAVEAT: The code structure below ensures that MallocHook methods are always | 1577 // CAVEAT: The code structure below ensures that MallocHook methods are always |
1394 // called from the stack frame of the invoked allocation function. | 1578 // called from the stack frame of the invoked allocation function. |
1395 // heap-checker.cc depends on this to start a stack trace from | 1579 // heap-checker.cc depends on this to start a stack trace from |
1396 // the call to the (de)allocation function. | 1580 // the call to the (de)allocation function. |
1397 | 1581 |
1398 extern "C" PERFTOOLS_DLL_DECL void* tc_malloc(size_t size) __THROW { | 1582 extern "C" PERFTOOLS_DLL_DECL void* tc_malloc(size_t size) __THROW { |
1399 void* result = do_malloc_or_cpp_alloc(size); | 1583 void* result = do_malloc_or_cpp_alloc(size); |
1400 MallocHook::InvokeNewHook(result, size); | 1584 MallocHook::InvokeNewHook(result, size); |
1401 return result; | 1585 return result; |
1402 } | 1586 } |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1467 void* p = cpp_alloc(size, false); | 1651 void* p = cpp_alloc(size, false); |
1468 // We keep this next instruction out of cpp_alloc for a reason: when | 1652 // We keep this next instruction out of cpp_alloc for a reason: when |
1469 // it's in, and new just calls cpp_alloc, the optimizer may fold the | 1653 // it's in, and new just calls cpp_alloc, the optimizer may fold the |
1470 // new call into cpp_alloc, which messes up our whole section-based | 1654 // new call into cpp_alloc, which messes up our whole section-based |
1471 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc | 1655 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc |
1472 // isn't the last thing this fn calls, and prevents the folding. | 1656 // isn't the last thing this fn calls, and prevents the folding. |
1473 MallocHook::InvokeNewHook(p, size); | 1657 MallocHook::InvokeNewHook(p, size); |
1474 return p; | 1658 return p; |
1475 } | 1659 } |
1476 | 1660 |
1477 extern "C" PERFTOOLS_DLL_DECL void* tc_newarray_nothrow(size_t size, const std::
nothrow_t&) __THROW { | 1661 extern "C" PERFTOOLS_DLL_DECL void* tc_newarray_nothrow(size_t size, const std::
nothrow_t&) |
| 1662 __THROW { |
1478 void* p = cpp_alloc(size, true); | 1663 void* p = cpp_alloc(size, true); |
1479 MallocHook::InvokeNewHook(p, size); | 1664 MallocHook::InvokeNewHook(p, size); |
1480 return p; | 1665 return p; |
1481 } | 1666 } |
1482 | 1667 |
1483 extern "C" PERFTOOLS_DLL_DECL void tc_deletearray(void* p) __THROW { | 1668 extern "C" PERFTOOLS_DLL_DECL void tc_deletearray(void* p) __THROW { |
1484 MallocHook::InvokeDeleteHook(p); | 1669 MallocHook::InvokeDeleteHook(p); |
1485 do_free(p); | 1670 do_free(p); |
1486 } | 1671 } |
1487 | 1672 |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1544 extern "C" PERFTOOLS_DLL_DECL int tc_mallopt(int cmd, int value) __THROW { | 1729 extern "C" PERFTOOLS_DLL_DECL int tc_mallopt(int cmd, int value) __THROW { |
1545 return do_mallopt(cmd, value); | 1730 return do_mallopt(cmd, value); |
1546 } | 1731 } |
1547 | 1732 |
1548 #ifdef HAVE_STRUCT_MALLINFO | 1733 #ifdef HAVE_STRUCT_MALLINFO |
1549 extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) __THROW { | 1734 extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) __THROW { |
1550 return do_mallinfo(); | 1735 return do_mallinfo(); |
1551 } | 1736 } |
1552 #endif | 1737 #endif |
1553 | 1738 |
1554 extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_usable_size(void* ptr) __THROW { | 1739 extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) __THROW { |
1555 return GetSizeWithCallback(ptr, &InvalidGetAllocatedSize); | 1740 return GetSizeWithCallback(ptr, &InvalidGetAllocatedSize); |
1556 } | 1741 } |
1557 | 1742 |
1558 // This function behaves similarly to MSVC's _set_new_mode. | |
1559 // If flag is 0 (default), calls to malloc will behave normally. | |
1560 // If flag is 1, calls to malloc will behave like calls to new, | |
1561 // and the std_new_handler will be invoked on failure. | |
1562 // Returns the previous mode. | |
1563 extern "C" PERFTOOLS_DLL_DECL int tc_set_new_mode(int flag) __THROW { | |
1564 int old_mode = tc_new_mode; | |
1565 tc_new_mode = flag; | |
1566 return old_mode; | |
1567 } | |
1568 | |
1569 | 1743 |
1570 // Override __libc_memalign in libc on linux boxes specially. | 1744 // Override __libc_memalign in libc on linux boxes specially. |
1571 // They have a bug in libc that causes them to (very rarely) allocate | 1745 // They have a bug in libc that causes them to (very rarely) allocate |
1572 // with __libc_memalign() yet deallocate with free() and the | 1746 // with __libc_memalign() yet deallocate with free() and the |
1573 // definitions above don't catch it. | 1747 // definitions above don't catch it. |
1574 // This function is an exception to the rule of calling MallocHook method | 1748 // This function is an exception to the rule of calling MallocHook method |
1575 // from the stack frame of the allocation function; | 1749 // from the stack frame of the allocation function; |
1576 // heap-checker handles this special case explicitly. | 1750 // heap-checker handles this special case explicitly. |
1577 #ifndef TCMALLOC_FOR_DEBUGALLOCATION | |
1578 static void *MemalignOverride(size_t align, size_t size, const void *caller) | 1751 static void *MemalignOverride(size_t align, size_t size, const void *caller) |
1579 __THROW ATTRIBUTE_SECTION(google_malloc); | 1752 __THROW ATTRIBUTE_SECTION(google_malloc); |
1580 | 1753 |
1581 static void *MemalignOverride(size_t align, size_t size, const void *caller) | 1754 static void *MemalignOverride(size_t align, size_t size, const void *caller) |
1582 __THROW { | 1755 __THROW { |
1583 void* result = do_memalign_or_cpp_memalign(align, size); | 1756 void* result = do_memalign_or_cpp_memalign(align, size); |
1584 MallocHook::InvokeNewHook(result, size); | 1757 MallocHook::InvokeNewHook(result, size); |
1585 return result; | 1758 return result; |
1586 } | 1759 } |
1587 void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride; | 1760 void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride; |
1588 #endif // #ifndef TCMALLOC_FOR_DEBUGALLOCATION | 1761 #endif // TCMALLOC_USING_DEBUGALLOCATION |
1589 | 1762 |
1590 // ---Double free() debugging implementation ----------------------------------- | 1763 // ---Double free() debugging implementation ----------------------------------- |
1591 // We will put a mark at the extreme end of each allocation block. We make | 1764 // We will put a mark at the extreme end of each allocation block. We make |
1592 // sure that we always allocate enough "extra memory" that we can fit in the | 1765 // sure that we always allocate enough "extra memory" that we can fit in the |
1593 // mark, and still provide the requested usable region. If ever that mark is | 1766 // mark, and still provide the requested usable region. If ever that mark is |
1594 // not as expected, then we know that the user is corrupting memory beyond their | 1767 // not as expected, then we know that the user is corrupting memory beyond their |
1595 // request size, or that they have called free a second time without having | 1768 // request size, or that they have called free a second time without having |
1596 // the memory allocated (again). This allows us to spot most double free()s, | 1769 // the memory allocated (again). This allows us to spot most double free()s, |
1597 // but some can "slip by" or confuse our logic if the caller reallocates memory | 1770 // but some can "slip by" or confuse our logic if the caller reallocates memory |
1598 // (for a second use) before performing an evil double-free of a first | 1771 // (for a second use) before performing an evil double-free of a first |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1746 *mark = ~allocated_mark; // Distinctively not allocated. | 1919 *mark = ~allocated_mark; // Distinctively not allocated. |
1747 } | 1920 } |
1748 | 1921 |
1749 static void MarkAllocatedRegion(void* ptr) { | 1922 static void MarkAllocatedRegion(void* ptr) { |
1750 if (ptr == NULL) return; | 1923 if (ptr == NULL) return; |
1751 MarkType* mark = GetMarkLocation(ptr); | 1924 MarkType* mark = GetMarkLocation(ptr); |
1752 *mark = GetMarkValue(ptr, mark); | 1925 *mark = GetMarkValue(ptr, mark); |
1753 } | 1926 } |
1754 | 1927 |
1755 #endif // TCMALLOC_VALIDATION | 1928 #endif // TCMALLOC_VALIDATION |
OLD | NEW |