| OLD | NEW |
| 1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
| 2 // All rights reserved. | 2 // All rights reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
| (...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 117 #include "base/spinlock.h" | 117 #include "base/spinlock.h" |
| 118 #include "common.h" | 118 #include "common.h" |
| 119 #include "malloc_hook-inl.h" | 119 #include "malloc_hook-inl.h" |
| 120 #include <google/malloc_hook.h> | 120 #include <google/malloc_hook.h> |
| 121 #include <google/malloc_extension.h> | 121 #include <google/malloc_extension.h> |
| 122 #include "central_freelist.h" | 122 #include "central_freelist.h" |
| 123 #include "internal_logging.h" | 123 #include "internal_logging.h" |
| 124 #include "linked_list.h" | 124 #include "linked_list.h" |
| 125 #include "maybe_threads.h" | 125 #include "maybe_threads.h" |
| 126 #include "page_heap.h" | 126 #include "page_heap.h" |
| 127 #include "page_heap_allocator.h" |
| 127 #include "pagemap.h" | 128 #include "pagemap.h" |
| 128 #include "span.h" | 129 #include "span.h" |
| 129 #include "static_vars.h" | 130 #include "static_vars.h" |
| 130 #include "system-alloc.h" | 131 #include "system-alloc.h" |
| 131 #include "tcmalloc_guard.h" | 132 #include "tcmalloc_guard.h" |
| 132 #include "thread_cache.h" | 133 #include "thread_cache.h" |
| 133 | 134 |
| 134 #if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defi
ned(WIN32_OVERRIDE_ALLOCATORS) | 135 #if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defi
ned(WIN32_OVERRIDE_ALLOCATORS) |
| 135 # define WIN32_DO_PATCHING 1 | 136 # define WIN32_DO_PATCHING 1 |
| 136 #endif | 137 #endif |
| 137 | 138 |
| 138 using std::max; | |
| 139 using tcmalloc::PageHeap; | 139 using tcmalloc::PageHeap; |
| 140 using tcmalloc::PageHeapAllocator; |
| 140 using tcmalloc::SizeMap; | 141 using tcmalloc::SizeMap; |
| 141 using tcmalloc::Span; | 142 using tcmalloc::Span; |
| 142 using tcmalloc::StackTrace; | 143 using tcmalloc::StackTrace; |
| 143 using tcmalloc::Static; | 144 using tcmalloc::Static; |
| 144 using tcmalloc::ThreadCache; | 145 using tcmalloc::ThreadCache; |
| 145 | 146 |
| 146 // __THROW is defined in glibc systems. It means, counter-intuitively, | 147 // __THROW is defined in glibc systems. It means, counter-intuitively, |
| 147 // "This function will never throw an exception." It's an optional | 148 // "This function will never throw an exception." It's an optional |
| 148 // optimization tool, but we may need to use it to match glibc prototypes. | 149 // optimization tool, but we may need to use it to match glibc prototypes. |
| 149 #ifndef __THROW // I guess we're not on a glibc system | 150 #ifndef __THROW // I guess we're not on a glibc system |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 221 void* tc_newarray(size_t size) | 222 void* tc_newarray(size_t size) |
| 222 ATTRIBUTE_SECTION(google_malloc); | 223 ATTRIBUTE_SECTION(google_malloc); |
| 223 void tc_deletearray(void* p) __THROW | 224 void tc_deletearray(void* p) __THROW |
| 224 ATTRIBUTE_SECTION(google_malloc); | 225 ATTRIBUTE_SECTION(google_malloc); |
| 225 | 226 |
| 226 // And the nothrow variants of these: | 227 // And the nothrow variants of these: |
| 227 void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW | 228 void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW |
| 228 ATTRIBUTE_SECTION(google_malloc); | 229 ATTRIBUTE_SECTION(google_malloc); |
| 229 void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW | 230 void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW |
| 230 ATTRIBUTE_SECTION(google_malloc); | 231 ATTRIBUTE_SECTION(google_malloc); |
| 231 // Surprisingly, compilers use a nothrow-delete internally. See, eg: | 232 } |
| 232 // http://www.dinkumware.com/manuals/?manual=compleat&page=new.html | |
| 233 void tc_delete_nothrow(void* ptr, const std::nothrow_t&) __THROW | |
| 234 ATTRIBUTE_SECTION(google_malloc); | |
| 235 void tc_deletearray_nothrow(void* ptr, const std::nothrow_t&) __THROW | |
| 236 ATTRIBUTE_SECTION(google_malloc); | |
| 237 } // extern "C" | |
| 238 | 233 |
| 239 // Override the libc functions to prefer our own instead. This comes | 234 // Override the libc functions to prefer our own instead. This comes |
| 240 // first so code in tcmalloc.cc can use the overridden versions. One | 235 // first so code in tcmalloc.cc can use the overridden versions. One |
| 241 // exception: in windows, by default, we patch our code into these | 236 // exception: in windows, by default, we patch our code into these |
| 242 // functions (via src/windows/patch_function.cc) rather than override | 237 // functions (via src/windows/patch_function.cc) rather than override |
| 243 // them. In that case, we don't want to do this overriding here. | 238 // them. In that case, we don't want to do this overriding here. |
| 244 #if !defined(WIN32_DO_PATCHING) && !defined(TCMALLOC_FOR_DEBUGALLOCATION) | 239 #ifndef WIN32_DO_PATCHING |
| 245 | 240 |
| 246 // TODO(mbelshe): Turn off TCMalloc's symbols for libc. We do that | 241 // TODO(mbelshe): Turn off TCMalloc's symbols for libc. We do that |
| 247 // elsewhere. | 242 // elsewhere. |
| 248 #ifndef _WIN32 | 243 #if 0 |
| 249 | 244 |
| 250 #if defined(__GNUC__) && !defined(__MACH__) | 245 #if defined(__GNUC__) && !defined(__MACH__) |
| 251 // Potentially faster variants that use the gcc alias extension. | 246 // Potentially faster variants that use the gcc alias extension. |
| 247 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check. |
| 252 // FreeBSD does support aliases, but apparently not correctly. :-( | 248 // FreeBSD does support aliases, but apparently not correctly. :-( |
| 253 // NOTE: we make many of these symbols weak, but do so in the makefile | |
| 254 // (via objcopy -W) and not here. That ends up being more portable. | |
| 255 # define ALIAS(x) __attribute__ ((alias (x))) | 249 # define ALIAS(x) __attribute__ ((alias (x))) |
| 256 void* operator new(size_t size) ALIAS("tc_new"); | 250 void* operator new(size_t size) ALIAS("tc_new"); |
| 257 void operator delete(void* p) __THROW ALIAS("tc_delete"); | 251 void operator delete(void* p) __THROW ALIAS("tc_delete"); |
| 258 void* operator new[](size_t size) ALIAS("tc_newarray"); | 252 void* operator new[](size_t size) ALIAS("tc_newarray"); |
| 259 void operator delete[](void* p) __THROW ALIAS("tc_deletearray"); | 253 void operator delete[](void* p) __THROW ALIAS("tc_deletearray"); |
| 260 void* operator new(size_t size, const std::nothrow_t&) __THROW | 254 void* operator new(size_t size, const std::nothrow_t&) __THROW |
| 261 ALIAS("tc_new_nothrow"); | 255 ALIAS("tc_new_nothrow"); |
| 262 void* operator new[](size_t size, const std::nothrow_t&) __THROW | 256 void* operator new[](size_t size, const std::nothrow_t&) __THROW |
| 263 ALIAS("tc_newarray_nothrow"); | 257 ALIAS("tc_newarray_nothrow"); |
| 264 void operator delete(void* size, const std::nothrow_t&) __THROW | |
| 265 ALIAS("tc_delete_nothrow"); | |
| 266 void operator delete[](void* size, const std::nothrow_t&) __THROW | |
| 267 ALIAS("tc_deletearray_nothrow")
; | |
| 268 extern "C" { | 258 extern "C" { |
| 269 void* malloc(size_t size) __THROW ALIAS("tc_malloc"); | 259 void* malloc(size_t size) __THROW ALIAS("tc_malloc"); |
| 270 void free(void* ptr) __THROW ALIAS("tc_free"); | 260 void free(void* ptr) __THROW ALIAS("tc_free"); |
| 271 void* realloc(void* ptr, size_t size) __THROW ALIAS("tc_realloc"); | 261 void* realloc(void* ptr, size_t size) __THROW ALIAS("tc_realloc"); |
| 272 void* calloc(size_t n, size_t size) __THROW ALIAS("tc_calloc"); | 262 void* calloc(size_t n, size_t size) __THROW ALIAS("tc_calloc"); |
| 273 void cfree(void* ptr) __THROW ALIAS("tc_cfree"); | 263 void cfree(void* ptr) __THROW ALIAS("tc_cfree"); |
| 274 void* memalign(size_t align, size_t s) __THROW ALIAS("tc_memalign"); | 264 void* memalign(size_t align, size_t s) __THROW ALIAS("tc_memalign"); |
| 275 void* valloc(size_t size) __THROW ALIAS("tc_valloc"); | 265 void* valloc(size_t size) __THROW ALIAS("tc_valloc"); |
| 276 void* pvalloc(size_t size) __THROW ALIAS("tc_pvalloc"); | 266 void* pvalloc(size_t size) __THROW ALIAS("tc_pvalloc"); |
| 277 int posix_memalign(void** r, size_t a, size_t s) __THROW | 267 int posix_memalign(void** r, size_t a, size_t s) __THROW |
| 278 ALIAS("tc_posix_memalign"); | 268 ALIAS("tc_posix_memalign"); |
| 279 void malloc_stats(void) __THROW ALIAS("tc_malloc_stats"); | 269 void malloc_stats(void) __THROW ALIAS("tc_malloc_stats"); |
| 280 int mallopt(int cmd, int value) __THROW ALIAS("tc_mallopt"); | 270 int mallopt(int cmd, int value) __THROW ALIAS("tc_mallopt"); |
| 281 #ifdef HAVE_STRUCT_MALLINFO | 271 #ifdef HAVE_STRUCT_MALLINFO |
| 282 struct mallinfo mallinfo(void) __THROW ALIAS("tc_mallinfo"); | 272 struct mallinfo mallinfo(void) __THROW ALIAS("tc_mallinfo"); |
| 283 #endif | 273 #endif |
| 274 // Some library routines on RedHat 9 allocate memory using malloc() |
| 275 // and free it using __libc_free() (or vice-versa). Since we provide |
| 276 // our own implementations of malloc/free, we need to make sure that |
| 277 // the __libc_XXX variants (defined as part of glibc) also point to |
| 278 // the same implementations. |
| 279 # if defined(__GLIBC__) |
| 280 void* __libc_malloc(size_t size) ALIAS("tc_malloc"); |
| 281 void __libc_free(void* ptr) ALIAS("tc_free"); |
| 282 void* __libc_realloc(void* ptr, size_t size) ALIAS("tc_realloc"); |
| 283 void* __libc_calloc(size_t n, size_t size) ALIAS("tc_calloc"); |
| 284 void __libc_cfree(void* ptr) ALIAS("tc_cfree"); |
| 285 void* __libc_memalign(size_t align, size_t s) ALIAS("tc_memalign"); |
| 286 void* __libc_valloc(size_t size) ALIAS("tc_valloc"); |
| 287 void* __libc_pvalloc(size_t size) ALIAS("tc_pvalloc"); |
| 288 int __posix_memalign(void** r, size_t a, size_t s) ALIAS("tc_posix_memalign"); |
| 289 # define HAVE_ALIASED___LIBC 1 |
| 290 # endif // #if defined(__GLIBC__) |
| 284 } // extern "C" | 291 } // extern "C" |
| 285 #else // #if defined(__GNUC__) && !defined(__MACH__) | 292 # undef ALIAS |
| 293 #else |
| 286 // Portable wrappers | 294 // Portable wrappers |
| 287 void* operator new(size_t size) { return tc_new(size); } | 295 void* operator new(size_t size) { return tc_new(size); } |
| 288 void operator delete(void* p) __THROW { tc_delete(p); } | 296 void operator delete(void* p) __THROW { tc_delete(p); } |
| 289 void* operator new[](size_t size) { return tc_newarray(size); } | 297 void* operator new[](size_t size) { return tc_newarray(size); } |
| 290 void operator delete[](void* p) __THROW { tc_deletearray(p); } | 298 void operator delete[](void* p) __THROW { tc_deletearray(p); } |
| 291 void* operator new(size_t size, const std::nothrow_t& nt) __THROW { | 299 void* operator new(size_t size, const std::nothrow_t& nt) __THROW { |
| 292 return tc_new_nothrow(size, nt); | 300 return tc_new_nothrow(size, nt); |
| 293 } | 301 } |
| 294 void* operator new[](size_t size, const std::nothrow_t& nt) __THROW { | 302 void* operator new[](size_t size, const std::nothrow_t& nt) __THROW { |
| 295 return tc_newarray_nothrow(size, nt); | 303 return tc_newarray_nothrow(size, nt); |
| 296 } | 304 } |
| 297 void operator delete(void* ptr, const std::nothrow_t& nt) __THROW { | |
| 298 return tc_delete_nothrow(ptr, nt); | |
| 299 } | |
| 300 void operator delete[](void* ptr, const std::nothrow_t& nt) __THROW { | |
| 301 return tc_deletearray_nothrow(ptr, nt); | |
| 302 } | |
| 303 extern "C" { | 305 extern "C" { |
| 304 void* malloc(size_t s) __THROW { return tc_malloc(s); } | 306 void* malloc(size_t s) __THROW { return tc_malloc(s); } |
| 305 void free(void* p) __THROW { tc_free(p); } | 307 void free(void* p) __THROW { tc_free(p); } |
| 306 void* realloc(void* p, size_t s) __THROW { return tc_realloc(p, s); } | 308 void* realloc(void* p, size_t s) __THROW { return tc_realloc(p, s); } |
| 307 void* calloc(size_t n, size_t s) __THROW { return tc_calloc(n, s); } | 309 void* calloc(size_t n, size_t s) __THROW { return tc_calloc(n, s); } |
| 308 void cfree(void* p) __THROW { tc_cfree(p); } | 310 void cfree(void* p) __THROW { tc_cfree(p); } |
| 309 void* memalign(size_t a, size_t s) __THROW { return tc_memalign(a, s); } | 311 void* memalign(size_t a, size_t s) __THROW { return tc_memalign(a, s); } |
| 310 void* valloc(size_t s) __THROW { return tc_valloc(s); } | 312 void* valloc(size_t s) __THROW { return tc_valloc(s); } |
| 311 void* pvalloc(size_t s) __THROW { return tc_pvalloc(s); } | 313 void* pvalloc(size_t s) __THROW { return tc_pvalloc(s); } |
| 312 int posix_memalign(void** r, size_t a, size_t s) __THROW { | 314 int posix_memalign(void** r, size_t a, size_t s) __THROW { |
| 313 return tc_posix_memalign(r, a, s); | 315 return tc_posix_memalign(r, a, s); |
| 314 } | 316 } |
| 315 void malloc_stats(void) __THROW { tc_malloc_stats(); } | 317 void malloc_stats(void) __THROW { tc_malloc_stats(); } |
| 316 int mallopt(int cmd, int v) __THROW { return tc_mallopt(cmd, v); } | 318 int mallopt(int cmd, int v) __THROW { return tc_mallopt(cmd, v); } |
| 317 #ifdef HAVE_STRUCT_MALLINFO | 319 #ifdef HAVE_STRUCT_MALLINFO |
| 318 struct mallinfo mallinfo(void) __THROW { return tc_mallinfo(); } | 320 struct mallinfo mallinfo(void) __THROW { return tc_mallinfo(); } |
| 319 #endif | 321 #endif |
| 320 } // extern "C" | 322 } // extern C |
| 321 #endif // #if defined(__GNUC__) | 323 #endif // #if defined(__GNUC__) |
| 322 | 324 |
| 323 // Some library routines on RedHat 9 allocate memory using malloc() | 325 #ifndef HAVE_ALIASED___LIBC |
| 324 // and free it using __libc_free() (or vice-versa). Since we provide | |
| 325 // our own implementations of malloc/free, we need to make sure that | |
| 326 // the __libc_XXX variants (defined as part of glibc) also point to | |
| 327 // the same implementations. | |
| 328 #ifdef __GLIBC__ // only glibc defines __libc_* | |
| 329 extern "C" { | 326 extern "C" { |
| 330 #ifdef ALIAS | |
| 331 void* __libc_malloc(size_t size) ALIAS("tc_malloc"); | |
| 332 void __libc_free(void* ptr) ALIAS("tc_free"); | |
| 333 void* __libc_realloc(void* ptr, size_t size) ALIAS("tc_realloc"); | |
| 334 void* __libc_calloc(size_t n, size_t size) ALIAS("tc_calloc"); | |
| 335 void __libc_cfree(void* ptr) ALIAS("tc_cfree"); | |
| 336 void* __libc_memalign(size_t align, size_t s) ALIAS("tc_memalign"); | |
| 337 void* __libc_valloc(size_t size) ALIAS("tc_valloc"); | |
| 338 void* __libc_pvalloc(size_t size) ALIAS("tc_pvalloc"); | |
| 339 int __posix_memalign(void** r, size_t a, size_t s) ALIAS("tc_posix_memalign"); | |
| 340 #else // #ifdef ALIAS | |
| 341 void* __libc_malloc(size_t size) { return malloc(size); } | 327 void* __libc_malloc(size_t size) { return malloc(size); } |
| 342 void __libc_free(void* ptr) { free(ptr); } | 328 void __libc_free(void* ptr) { free(ptr); } |
| 343 void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); } | 329 void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); } |
| 344 void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); } | 330 void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); } |
| 345 void __libc_cfree(void* ptr) { cfree(ptr); } | 331 void __libc_cfree(void* ptr) { cfree(ptr); } |
| 346 void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); } | 332 void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); } |
| 347 void* __libc_valloc(size_t size) { return valloc(size); } | 333 void* __libc_valloc(size_t size) { return valloc(size); } |
| 348 void* __libc_pvalloc(size_t size) { return pvalloc(size); } | 334 void* __libc_pvalloc(size_t size) { return pvalloc(size); } |
| 349 int __posix_memalign(void** r, size_t a, size_t s) { | 335 int __posix_memalign(void** r, size_t a, size_t s) { |
| 350 return posix_memalign(r, a, s); | 336 return posix_memalign(r, a, s); |
| 351 } | 337 } |
| 352 #endif // #ifdef ALIAS | |
| 353 } // extern "C" | 338 } // extern "C" |
| 354 #endif // ifdef __GLIBC__ | 339 #endif // #ifndef HAVE_ALIASED___LIBC |
| 355 | 340 |
| 356 #endif // #ifndef _WIN32 | 341 #endif // #ifdef 0 |
| 357 #undef ALIAS | |
| 358 | 342 |
| 359 #endif // #ifndef(WIN32_DO_PATCHING) && ndef(TCMALLOC_FOR_DEBUGALLOCATION) | 343 #endif // #ifndef WIN32_DO_PATCHING |
| 360 | 344 |
| 361 | 345 |
| 362 // ----------------------- IMPLEMENTATION ------------------------------- | 346 // ----------------------- IMPLEMENTATION ------------------------------- |
| 363 | 347 |
| 364 static int tc_new_mode = 0; // See tc_set_new_mode(). | 348 // These routines are called by free(), realloc(), etc. if the pointer is |
| 365 | 349 // invalid. This is a cheap (source-editing required) kind of exception |
| 366 // Routines such as free() and realloc() catch some erroneous pointers | 350 // handling for these routines. |
| 367 // passed to them, and invoke the below when they do. (An erroneous pointer | |
| 368 // won't be caught if it's within a valid span or a stale span for which | |
| 369 // the pagemap cache has a non-zero sizeclass.) This is a cheap (source-editing | |
| 370 // required) kind of exception handling for these routines. | |
| 371 namespace { | 351 namespace { |
| 372 void InvalidFree(void* ptr) { | 352 void InvalidFree(void* ptr) { |
| 373 CRASH("Attempt to free invalid pointer: %p\n", ptr); | 353 CRASH("Attempt to free invalid pointer: %p\n", ptr); |
| 374 } | 354 } |
| 375 | 355 |
| 376 size_t InvalidGetSizeForRealloc(void* old_ptr) { | 356 size_t InvalidGetSizeForRealloc(void* old_ptr) { |
| 377 CRASH("Attempt to realloc invalid pointer: %p\n", old_ptr); | 357 CRASH("Attempt to realloc invalid pointer: %p\n", old_ptr); |
| 378 return 0; | 358 return 0; |
| 379 } | 359 } |
| 380 | 360 |
| 381 size_t InvalidGetAllocatedSize(void* ptr) { | 361 size_t InvalidGetAllocatedSize(void* ptr) { |
| 382 CRASH("Attempt to get the size of an invalid pointer: %p\n", ptr); | 362 CRASH("Attempt to get the size of an invalid pointer: %p\n", ptr); |
| 383 return 0; | 363 return 0; |
| 384 } | 364 } |
| 385 } // unnamed namespace | 365 } // unnamed namespace |
| 386 | 366 |
| 387 // Extract interesting stats | 367 // Extract interesting stats |
| 388 struct TCMallocStats { | 368 struct TCMallocStats { |
| 389 uint64_t thread_bytes; // Bytes in thread caches | 369 uint64_t system_bytes; // Bytes alloced from system |
| 390 uint64_t central_bytes; // Bytes in central cache | 370 uint64_t committed_bytes; // Bytes alloced and committed from system |
| 391 uint64_t transfer_bytes; // Bytes in central transfer cache | 371 uint64_t thread_bytes; // Bytes in thread caches |
| 392 uint64_t metadata_bytes; // Bytes alloced for metadata | 372 uint64_t central_bytes; // Bytes in central cache |
| 393 PageHeap::Stats pageheap; // Stats from page heap | 373 uint64_t transfer_bytes; // Bytes in central transfer cache |
| 374 uint64_t pageheap_bytes; // Bytes in page heap |
| 375 uint64_t metadata_bytes; // Bytes alloced for metadata |
| 394 }; | 376 }; |
| 395 | 377 |
| 396 // Get stats into "r". Also get per-size-class counts if class_count != NULL | 378 // Get stats into "r". Also get per-size-class counts if class_count != NULL |
| 397 static void ExtractStats(TCMallocStats* r, uint64_t* class_count) { | 379 static void ExtractStats(TCMallocStats* r, uint64_t* class_count) { |
| 398 r->central_bytes = 0; | 380 r->central_bytes = 0; |
| 399 r->transfer_bytes = 0; | 381 r->transfer_bytes = 0; |
| 400 for (int cl = 0; cl < kNumClasses; ++cl) { | 382 for (int cl = 0; cl < kNumClasses; ++cl) { |
| 401 const int length = Static::central_cache()[cl].length(); | 383 const int length = Static::central_cache()[cl].length(); |
| 402 const int tc_length = Static::central_cache()[cl].tc_length(); | 384 const int tc_length = Static::central_cache()[cl].tc_length(); |
| 403 const size_t size = static_cast<uint64_t>( | 385 const size_t size = static_cast<uint64_t>( |
| 404 Static::sizemap()->ByteSizeForClass(cl)); | 386 Static::sizemap()->ByteSizeForClass(cl)); |
| 405 r->central_bytes += (size * length); | 387 r->central_bytes += (size * length); |
| 406 r->transfer_bytes += (size * tc_length); | 388 r->transfer_bytes += (size * tc_length); |
| 407 if (class_count) class_count[cl] = length + tc_length; | 389 if (class_count) class_count[cl] = length + tc_length; |
| 408 } | 390 } |
| 409 | 391 |
| 410 // Add stats from per-thread heaps | 392 // Add stats from per-thread heaps |
| 411 r->thread_bytes = 0; | 393 r->thread_bytes = 0; |
| 412 { // scope | 394 { // scope |
| 413 SpinLockHolder h(Static::pageheap_lock()); | 395 SpinLockHolder h(Static::pageheap_lock()); |
| 414 ThreadCache::GetThreadStats(&r->thread_bytes, class_count); | 396 ThreadCache::GetThreadStats(&r->thread_bytes, class_count); |
| 397 } |
| 398 |
| 399 { //scope |
| 400 SpinLockHolder h(Static::pageheap_lock()); |
| 401 r->system_bytes = Static::pageheap()->SystemBytes(); |
| 402 r->committed_bytes = Static::pageheap()->CommittedBytes(); |
| 415 r->metadata_bytes = tcmalloc::metadata_system_bytes(); | 403 r->metadata_bytes = tcmalloc::metadata_system_bytes(); |
| 416 r->pageheap = Static::pageheap()->stats(); | 404 r->pageheap_bytes = Static::pageheap()->FreeBytes(); |
| 417 } | 405 } |
| 418 } | 406 } |
| 419 | 407 |
| 420 // WRITE stats to "out" | 408 // WRITE stats to "out" |
| 421 static void DumpStats(TCMalloc_Printer* out, int level) { | 409 static void DumpStats(TCMalloc_Printer* out, int level) { |
| 422 TCMallocStats stats; | 410 TCMallocStats stats; |
| 423 uint64_t class_count[kNumClasses]; | 411 uint64_t class_count[kNumClasses]; |
| 424 ExtractStats(&stats, (level >= 2 ? class_count : NULL)); | 412 ExtractStats(&stats, (level >= 2 ? class_count : NULL)); |
| 425 | 413 |
| 426 static const double MB = 1048576.0; | 414 static const double MB = 1048576.0; |
| 427 | 415 |
| 428 const uint64_t bytes_in_use = stats.pageheap.system_bytes | 416 const uint64_t bytes_in_use = stats.system_bytes |
| 429 - stats.pageheap.free_bytes | 417 - stats.pageheap_bytes |
| 430 - stats.pageheap.unmapped_bytes | |
| 431 - stats.central_bytes | 418 - stats.central_bytes |
| 432 - stats.transfer_bytes | 419 - stats.transfer_bytes |
| 433 - stats.thread_bytes; | 420 - stats.thread_bytes; |
| 434 | 421 |
| 435 out->printf("WASTE: %7.1f MB committed but not used\n" | 422 out->printf("WASTE: %7.1f MB committed but not used\n" |
| 436 "WASTE: %7.1f MB bytes committed, %7.1f MB bytes in use\n" | 423 "WASTE: %7.1f MB bytes committed, %7.1f MB bytes in use\n" |
| 437 "WASTE: committed/used ratio of %f\n", | 424 "WASTE: committed/used ratio of %f\n", |
| 438 (stats.pageheap.committed_bytes - bytes_in_use) / MB, | 425 (stats.committed_bytes - bytes_in_use) / MB, |
| 439 stats.pageheap.committed_bytes / MB, | 426 stats.committed_bytes / MB, |
| 440 bytes_in_use / MB, | 427 bytes_in_use / MB, |
| 441 stats.pageheap.committed_bytes / static_cast<double>(bytes_in_use)
); | 428 stats.committed_bytes / static_cast<double>(bytes_in_use)); |
| 442 | 429 |
| 443 if (level >= 2) { | 430 if (level >= 2) { |
| 444 out->printf("------------------------------------------------\n"); | 431 out->printf("------------------------------------------------\n"); |
| 445 out->printf("Size class breakdown\n"); | |
| 446 out->printf("------------------------------------------------\n"); | |
| 447 uint64_t cumulative = 0; | 432 uint64_t cumulative = 0; |
| 448 for (int cl = 0; cl < kNumClasses; ++cl) { | 433 for (int cl = 0; cl < kNumClasses; ++cl) { |
| 449 if (class_count[cl] > 0) { | 434 if (class_count[cl] > 0) { |
| 450 uint64_t class_bytes = | 435 uint64_t class_bytes = |
| 451 class_count[cl] * Static::sizemap()->ByteSizeForClass(cl); | 436 class_count[cl] * Static::sizemap()->ByteSizeForClass(cl); |
| 452 cumulative += class_bytes; | 437 cumulative += class_bytes; |
| 453 out->printf("class %3d [ %8" PRIuS " bytes ] : " | 438 out->printf("class %3d [ %8" PRIuS " bytes ] : " |
| 454 "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n", | 439 "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n", |
| 455 cl, Static::sizemap()->ByteSizeForClass(cl), | 440 cl, Static::sizemap()->ByteSizeForClass(cl), |
| 456 class_count[cl], | 441 class_count[cl], |
| 457 class_bytes / MB, | 442 class_bytes / MB, |
| 458 cumulative / MB); | 443 cumulative / MB); |
| 459 } | 444 } |
| 460 } | 445 } |
| 461 | 446 |
| 462 SpinLockHolder h(Static::pageheap_lock()); | 447 SpinLockHolder h(Static::pageheap_lock()); |
| 463 Static::pageheap()->Dump(out); | 448 Static::pageheap()->Dump(out); |
| 464 | 449 |
| 465 out->printf("------------------------------------------------\n"); | 450 out->printf("------------------------------------------------\n"); |
| 466 DumpSystemAllocatorStats(out); | 451 DumpSystemAllocatorStats(out); |
| 467 } | 452 } |
| 468 | 453 |
| 469 out->printf("------------------------------------------------\n" | 454 out->printf("------------------------------------------------\n" |
| 470 "MALLOC: %12" PRIu64 " (%7.1f MB) Heap size\n" | 455 "MALLOC: %12" PRIu64 " (%7.1f MB) Heap size\n" |
| 471 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes committed\n" | 456 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes committed\n" |
| 472 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes in use by application\n" | 457 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes in use by application\n" |
| 473 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in page heap\n" | 458 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in page heap\n" |
| 474 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes unmapped in page heap\n" | |
| 475 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in central cache\n" | 459 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in central cache\n" |
| 476 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in transfer cache\n" | 460 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in transfer cache\n" |
| 477 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in thread caches\n" | 461 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in thread caches\n" |
| 478 "MALLOC: %12" PRIu64 " Spans in use\n" | 462 "MALLOC: %12" PRIu64 " Spans in use\n" |
| 479 "MALLOC: %12" PRIu64 " Thread heaps in use\n" | 463 "MALLOC: %12" PRIu64 " Thread heaps in use\n" |
| 480 "MALLOC: %12" PRIu64 " (%7.1f MB) Metadata allocated\n" | 464 "MALLOC: %12" PRIu64 " (%7.1f MB) Metadata allocated\n" |
| 481 "------------------------------------------------\n", | 465 "------------------------------------------------\n", |
| 482 stats.pageheap.system_bytes, stats.pageheap.system_bytes / MB, | 466 stats.system_bytes, stats.system_bytes / MB, |
| 483 stats.pageheap.committed_bytes, stats.pageheap.committed_bytes / M
B, | 467 stats.committed_bytes, stats.committed_bytes / MB, |
| 484 bytes_in_use, bytes_in_use / MB, | 468 bytes_in_use, bytes_in_use / MB, |
| 485 stats.pageheap.free_bytes, stats.pageheap.free_bytes / MB, | 469 stats.pageheap_bytes, stats.pageheap_bytes / MB, |
| 486 stats.pageheap.unmapped_bytes, stats.pageheap.unmapped_bytes / MB, | |
| 487 stats.central_bytes, stats.central_bytes / MB, | 470 stats.central_bytes, stats.central_bytes / MB, |
| 488 stats.transfer_bytes, stats.transfer_bytes / MB, | 471 stats.transfer_bytes, stats.transfer_bytes / MB, |
| 489 stats.thread_bytes, stats.thread_bytes / MB, | 472 stats.thread_bytes, stats.thread_bytes / MB, |
| 490 uint64_t(Static::span_allocator()->inuse()), | 473 uint64_t(Static::span_allocator()->inuse()), |
| 491 uint64_t(ThreadCache::HeapsInUse()), | 474 uint64_t(ThreadCache::HeapsInUse()), |
| 492 stats.metadata_bytes, stats.metadata_bytes / MB); | 475 stats.metadata_bytes, stats.metadata_bytes / MB); |
| 493 } | 476 } |
| 494 | 477 |
| 495 static void PrintStats(int level) { | 478 static void PrintStats(int level) { |
| 496 const int kBufferSize = 16 << 10; | 479 const int kBufferSize = 16 << 10; |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 540 result[used_slots+2] = reinterpret_cast<void*>(t->depth); | 523 result[used_slots+2] = reinterpret_cast<void*>(t->depth); |
| 541 for (int d = 0; d < t->depth; d++) { | 524 for (int d = 0; d < t->depth; d++) { |
| 542 result[used_slots+3+d] = t->stack[d]; | 525 result[used_slots+3+d] = t->stack[d]; |
| 543 } | 526 } |
| 544 used_slots += 3 + t->depth; | 527 used_slots += 3 + t->depth; |
| 545 } | 528 } |
| 546 result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0)); | 529 result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0)); |
| 547 return result; | 530 return result; |
| 548 } | 531 } |
| 549 | 532 |
| 550 static void IterateOverRanges(void* arg, MallocExtension::RangeFunction func) { | |
| 551 PageID page = 1; // Some code may assume that page==0 is never used | |
| 552 bool done = false; | |
| 553 while (!done) { | |
| 554 // Accumulate a small number of ranges in a local buffer | |
| 555 static const int kNumRanges = 16; | |
| 556 static base::MallocRange ranges[kNumRanges]; | |
| 557 int n = 0; | |
| 558 { | |
| 559 SpinLockHolder h(Static::pageheap_lock()); | |
| 560 while (n < kNumRanges) { | |
| 561 if (!Static::pageheap()->GetNextRange(page, &ranges[n])) { | |
| 562 done = true; | |
| 563 break; | |
| 564 } else { | |
| 565 uintptr_t limit = ranges[n].address + ranges[n].length; | |
| 566 page = (limit + kPageSize - 1) >> kPageShift; | |
| 567 n++; | |
| 568 } | |
| 569 } | |
| 570 } | |
| 571 | |
| 572 for (int i = 0; i < n; i++) { | |
| 573 (*func)(arg, &ranges[i]); | |
| 574 } | |
| 575 } | |
| 576 } | |
| 577 | |
| 578 // TCMalloc's support for extra malloc interfaces | 533 // TCMalloc's support for extra malloc interfaces |
| 579 class TCMallocImplementation : public MallocExtension { | 534 class TCMallocImplementation : public MallocExtension { |
| 580 private: | |
| 581 // ReleaseToSystem() might release more than the requested bytes because | |
| 582 // the page heap releases at the span granularity, and spans are of wildly | |
| 583 // different sizes. This member keeps track of the extra bytes bytes | |
| 584 // released so that the app can periodically call ReleaseToSystem() to | |
| 585 // release memory at a constant rate. | |
| 586 // NOTE: Protected by Static::pageheap_lock(). | |
| 587 size_t extra_bytes_released_; | |
| 588 | |
| 589 public: | 535 public: |
| 590 TCMallocImplementation() | |
| 591 : extra_bytes_released_(0) { | |
| 592 } | |
| 593 | |
| 594 virtual void GetStats(char* buffer, int buffer_length) { | 536 virtual void GetStats(char* buffer, int buffer_length) { |
| 595 ASSERT(buffer_length > 0); | 537 ASSERT(buffer_length > 0); |
| 596 TCMalloc_Printer printer(buffer, buffer_length); | 538 TCMalloc_Printer printer(buffer, buffer_length); |
| 597 | 539 |
| 598 // Print level one stats unless lots of space is available | 540 // Print level one stats unless lots of space is available |
| 599 if (buffer_length < 10000) { | 541 if (buffer_length < 10000) { |
| 600 DumpStats(&printer, 1); | 542 DumpStats(&printer, 1); |
| 601 } else { | 543 } else { |
| 602 DumpStats(&printer, 2); | 544 DumpStats(&printer, 2); |
| 603 } | 545 } |
| 604 } | 546 } |
| 605 | 547 |
| 606 virtual void** ReadStackTraces(int* sample_period) { | 548 virtual void** ReadStackTraces(int* sample_period) { |
| 607 tcmalloc::StackTraceTable table; | 549 tcmalloc::StackTraceTable table; |
| 608 { | 550 { |
| 609 SpinLockHolder h(Static::pageheap_lock()); | 551 SpinLockHolder h(Static::pageheap_lock()); |
| 610 Span* sampled = Static::sampled_objects(); | 552 Span* sampled = Static::sampled_objects(); |
| 611 for (Span* s = sampled->next; s != sampled; s = s->next) { | 553 for (Span* s = sampled->next; s != sampled; s = s->next) { |
| 612 table.AddTrace(*reinterpret_cast<StackTrace*>(s->objects)); | 554 table.AddTrace(*reinterpret_cast<StackTrace*>(s->objects)); |
| 613 } | 555 } |
| 614 } | 556 } |
| 615 *sample_period = ThreadCache::GetCache()->GetSamplePeriod(); | 557 *sample_period = ThreadCache::GetCache()->GetSamplePeriod(); |
| 616 return table.ReadStackTracesAndClear(); // grabs and releases pageheap_lock | 558 return table.ReadStackTracesAndClear(); // grabs and releases pageheap_lock |
| 617 } | 559 } |
| 618 | 560 |
| 619 virtual void** ReadHeapGrowthStackTraces() { | 561 virtual void** ReadHeapGrowthStackTraces() { |
| 620 return DumpHeapGrowthStackTraces(); | 562 return DumpHeapGrowthStackTraces(); |
| 621 } | 563 } |
| 622 | 564 |
| 623 virtual void Ranges(void* arg, RangeFunction func) { | |
| 624 IterateOverRanges(arg, func); | |
| 625 } | |
| 626 | |
| 627 virtual bool GetNumericProperty(const char* name, size_t* value) { | 565 virtual bool GetNumericProperty(const char* name, size_t* value) { |
| 628 ASSERT(name != NULL); | 566 ASSERT(name != NULL); |
| 629 | 567 |
| 630 if (strcmp(name, "generic.current_allocated_bytes") == 0) { | 568 if (strcmp(name, "generic.current_allocated_bytes") == 0) { |
| 631 TCMallocStats stats; | 569 TCMallocStats stats; |
| 632 ExtractStats(&stats, NULL); | 570 ExtractStats(&stats, NULL); |
| 633 *value = stats.pageheap.system_bytes | 571 *value = stats.system_bytes |
| 634 - stats.thread_bytes | 572 - stats.thread_bytes |
| 635 - stats.central_bytes | 573 - stats.central_bytes |
| 636 - stats.transfer_bytes | 574 - stats.transfer_bytes |
| 637 - stats.pageheap.free_bytes | 575 - stats.pageheap_bytes; |
| 638 - stats.pageheap.unmapped_bytes; | |
| 639 return true; | 576 return true; |
| 640 } | 577 } |
| 641 | 578 |
| 642 if (strcmp(name, "generic.heap_size") == 0) { | 579 if (strcmp(name, "generic.heap_size") == 0) { |
| 643 TCMallocStats stats; | 580 TCMallocStats stats; |
| 644 ExtractStats(&stats, NULL); | 581 ExtractStats(&stats, NULL); |
| 645 *value = stats.pageheap.system_bytes; | 582 *value = stats.system_bytes; |
| 583 return true; |
| 584 } |
| 585 |
| 586 if (strcmp(name, "generic.committed_bytes") == 0) { |
| 587 TCMallocStats stats; |
| 588 ExtractStats(&stats, NULL); |
| 589 *value = stats.committed_bytes + stats.metadata_bytes; |
| 646 return true; | 590 return true; |
| 647 } | 591 } |
| 648 | 592 |
| 649 if (strcmp(name, "tcmalloc.slack_bytes") == 0) { | 593 if (strcmp(name, "tcmalloc.slack_bytes") == 0) { |
| 650 // We assume that bytes in the page heap are not fragmented too | 594 // We assume that bytes in the page heap are not fragmented too |
| 651 // badly, and are therefore available for allocation without | 595 // badly, and are therefore available for allocation. |
| 652 // growing the pageheap system byte count. | |
| 653 SpinLockHolder l(Static::pageheap_lock()); | 596 SpinLockHolder l(Static::pageheap_lock()); |
| 654 PageHeap::Stats stats = Static::pageheap()->stats(); | 597 *value = Static::pageheap()->FreeBytes(); |
| 655 *value = stats.free_bytes + stats.unmapped_bytes; | |
| 656 return true; | |
| 657 } | |
| 658 | |
| 659 if (strcmp(name, "tcmalloc.pageheap_free_bytes") == 0) { | |
| 660 SpinLockHolder l(Static::pageheap_lock()); | |
| 661 *value = Static::pageheap()->stats().free_bytes; | |
| 662 return true; | |
| 663 } | |
| 664 | |
| 665 if (strcmp(name, "tcmalloc.pageheap_unmapped_bytes") == 0) { | |
| 666 SpinLockHolder l(Static::pageheap_lock()); | |
| 667 *value = Static::pageheap()->stats().unmapped_bytes; | |
| 668 return true; | 598 return true; |
| 669 } | 599 } |
| 670 | 600 |
| 671 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) { | 601 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) { |
| 672 SpinLockHolder l(Static::pageheap_lock()); | 602 SpinLockHolder l(Static::pageheap_lock()); |
| 673 *value = ThreadCache::overall_thread_cache_size(); | 603 *value = ThreadCache::overall_thread_cache_size(); |
| 674 return true; | 604 return true; |
| 675 } | 605 } |
| 676 | 606 |
| 677 if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) { | 607 if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) { |
| (...skipping 15 matching lines...) Expand all Loading... |
| 693 return true; | 623 return true; |
| 694 } | 624 } |
| 695 | 625 |
| 696 return false; | 626 return false; |
| 697 } | 627 } |
| 698 | 628 |
| 699 virtual void MarkThreadIdle() { | 629 virtual void MarkThreadIdle() { |
| 700 ThreadCache::BecomeIdle(); | 630 ThreadCache::BecomeIdle(); |
| 701 } | 631 } |
| 702 | 632 |
| 703 virtual void MarkThreadBusy(); // Implemented below | 633 virtual void ReleaseFreeMemory() { |
| 704 | |
| 705 virtual void ReleaseToSystem(size_t num_bytes) { | |
| 706 SpinLockHolder h(Static::pageheap_lock()); | 634 SpinLockHolder h(Static::pageheap_lock()); |
| 707 if (num_bytes <= extra_bytes_released_) { | 635 Static::pageheap()->ReleaseFreePages(); |
| 708 // We released too much on a prior call, so don't release any | |
| 709 // more this time. | |
| 710 extra_bytes_released_ = extra_bytes_released_ - num_bytes; | |
| 711 return; | |
| 712 } | |
| 713 num_bytes = num_bytes - extra_bytes_released_; | |
| 714 // num_bytes might be less than one page. If we pass zero to | |
| 715 // ReleaseAtLeastNPages, it won't do anything, so we release a whole | |
| 716 // page now and let extra_bytes_released_ smooth it out over time. | |
| 717 Length num_pages = max<Length>(num_bytes >> kPageShift, 1); | |
| 718 size_t bytes_released = Static::pageheap()->ReleaseAtLeastNPages( | |
| 719 num_pages) << kPageShift; | |
| 720 if (bytes_released > num_bytes) { | |
| 721 extra_bytes_released_ = bytes_released - num_bytes; | |
| 722 } else { | |
| 723 // The PageHeap wasn't able to release num_bytes. Don't try to | |
| 724 // compensate with a big release next time. Specifically, | |
| 725 // ReleaseFreeMemory() calls ReleaseToSystem(LONG_MAX). | |
| 726 extra_bytes_released_ = 0; | |
| 727 } | |
| 728 } | 636 } |
| 729 | 637 |
| 730 virtual void SetMemoryReleaseRate(double rate) { | 638 virtual void SetMemoryReleaseRate(double rate) { |
| 731 FLAGS_tcmalloc_release_rate = rate; | 639 FLAGS_tcmalloc_release_rate = rate; |
| 732 } | 640 } |
| 733 | 641 |
| 734 virtual double GetMemoryReleaseRate() { | 642 virtual double GetMemoryReleaseRate() { |
| 735 return FLAGS_tcmalloc_release_rate; | 643 return FLAGS_tcmalloc_release_rate; |
| 736 } | 644 } |
| 737 virtual size_t GetEstimatedAllocatedSize(size_t size) { | 645 virtual size_t GetEstimatedAllocatedSize(size_t size) { |
| (...skipping 28 matching lines...) Expand all Loading... |
| 766 TCMallocGuard::TCMallocGuard() { | 674 TCMallocGuard::TCMallocGuard() { |
| 767 if (tcmallocguard_refcount++ == 0) { | 675 if (tcmallocguard_refcount++ == 0) { |
| 768 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS | 676 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS |
| 769 // Check whether the kernel also supports TLS (needs to happen at runtime) | 677 // Check whether the kernel also supports TLS (needs to happen at runtime) |
| 770 tcmalloc::CheckIfKernelSupportsTLS(); | 678 tcmalloc::CheckIfKernelSupportsTLS(); |
| 771 #endif | 679 #endif |
| 772 #ifdef WIN32_DO_PATCHING | 680 #ifdef WIN32_DO_PATCHING |
| 773 // patch the windows VirtualAlloc, etc. | 681 // patch the windows VirtualAlloc, etc. |
| 774 PatchWindowsFunctions(); // defined in windows/patch_functions.cc | 682 PatchWindowsFunctions(); // defined in windows/patch_functions.cc |
| 775 #endif | 683 #endif |
| 776 tc_free(tc_malloc(1)); | 684 free(malloc(1)); |
| 777 ThreadCache::InitTSD(); | 685 ThreadCache::InitTSD(); |
| 778 tc_free(tc_malloc(1)); | 686 free(malloc(1)); |
| 779 MallocExtension::Register(new TCMallocImplementation); | 687 MallocExtension::Register(new TCMallocImplementation); |
| 780 } | 688 } |
| 781 } | 689 } |
| 782 | 690 |
| 783 TCMallocGuard::~TCMallocGuard() { | 691 TCMallocGuard::~TCMallocGuard() { |
| 784 if (--tcmallocguard_refcount == 0) { | 692 if (--tcmallocguard_refcount == 0) { |
| 785 const char* env = getenv("MALLOCSTATS"); | 693 const char* env = getenv("MALLOCSTATS"); |
| 786 if (env != NULL) { | 694 if (env != NULL) { |
| 787 int level = atoi(env); | 695 int level = atoi(env); |
| 788 if (level < 1) level = 1; | 696 if (level < 1) level = 1; |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 863 result); | 771 result); |
| 864 for (int i = 0; i < stack.depth; i++) { | 772 for (int i = 0; i < stack.depth; i++) { |
| 865 printer.printf(" %p", stack.stack[i]); | 773 printer.printf(" %p", stack.stack[i]); |
| 866 } | 774 } |
| 867 printer.printf("\n"); | 775 printer.printf("\n"); |
| 868 write(STDERR_FILENO, buffer, strlen(buffer)); | 776 write(STDERR_FILENO, buffer, strlen(buffer)); |
| 869 } | 777 } |
| 870 | 778 |
| 871 namespace { | 779 namespace { |
| 872 | 780 |
| 873 inline void* cpp_alloc(size_t size, bool nothrow); | |
| 874 inline void* do_malloc(size_t size); | |
| 875 | |
| 876 // TODO(willchan): Investigate whether or not inlining this much is harmful to | |
| 877 // performance. | |
| 878 // This is equivalent to do_malloc() except when tc_new_mode is set to true. | |
| 879 // Otherwise, it will run the std::new_handler if set. | |
| 880 inline void* do_malloc_or_cpp_alloc(size_t size) { | |
| 881 return tc_new_mode ? cpp_alloc(size, true) : do_malloc(size); | |
| 882 } | |
| 883 | |
| 884 void* cpp_memalign(size_t align, size_t size); | |
| 885 void* do_memalign(size_t align, size_t size); | |
| 886 | |
| 887 inline void* do_memalign_or_cpp_memalign(size_t align, size_t size) { | |
| 888 return tc_new_mode ? cpp_memalign(align, size) : do_memalign(align, size); | |
| 889 } | |
| 890 | |
| 891 // Helper for do_malloc(). | 781 // Helper for do_malloc(). |
| 892 inline void* do_malloc_pages(Length num_pages) { | 782 inline void* do_malloc_pages(Length num_pages) { |
| 893 Span *span; | 783 Span *span; |
| 894 bool report_large = false; | 784 bool report_large = false; |
| 895 { | 785 { |
| 896 SpinLockHolder h(Static::pageheap_lock()); | 786 SpinLockHolder h(Static::pageheap_lock()); |
| 897 span = Static::pageheap()->New(num_pages); | 787 span = Static::pageheap()->New(num_pages); |
| 898 const int64 threshold = large_alloc_threshold; | 788 const int64 threshold = large_alloc_threshold; |
| 899 if (threshold > 0 && num_pages >= (threshold >> kPageShift)) { | 789 if (threshold > 0 && num_pages >= (threshold >> kPageShift)) { |
| 900 // Increase the threshold by 1/8 every time we generate a report. | 790 // Increase the threshold by 1/8 every time we generate a report. |
| (...skipping 30 matching lines...) Expand all Loading... |
| 931 } | 821 } |
| 932 if (ret == NULL) errno = ENOMEM; | 822 if (ret == NULL) errno = ENOMEM; |
| 933 return ret; | 823 return ret; |
| 934 } | 824 } |
| 935 | 825 |
| 936 inline void* do_calloc(size_t n, size_t elem_size) { | 826 inline void* do_calloc(size_t n, size_t elem_size) { |
| 937 // Overflow check | 827 // Overflow check |
| 938 const size_t size = n * elem_size; | 828 const size_t size = n * elem_size; |
| 939 if (elem_size != 0 && size / elem_size != n) return NULL; | 829 if (elem_size != 0 && size / elem_size != n) return NULL; |
| 940 | 830 |
| 941 void* result = do_malloc_or_cpp_alloc(size); | 831 void* result = do_malloc(size); |
| 942 if (result != NULL) { | 832 if (result != NULL) { |
| 943 memset(result, 0, size); | 833 memset(result, 0, size); |
| 944 } | 834 } |
| 945 return result; | 835 return result; |
| 946 } | 836 } |
| 947 | 837 |
| 948 static inline ThreadCache* GetCacheIfPresent() { | 838 static inline ThreadCache* GetCacheIfPresent() { |
| 949 void* const p = ThreadCache::GetCacheIfPresent(); | 839 void* const p = ThreadCache::GetCacheIfPresent(); |
| 950 return reinterpret_cast<ThreadCache*>(p); | 840 return reinterpret_cast<ThreadCache*>(p); |
| 951 } | 841 } |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1040 // . If we need to grow, grow to max(new_size, old_size * 1.X) | 930 // . If we need to grow, grow to max(new_size, old_size * 1.X) |
| 1041 // . Don't shrink unless new_size < old_size * 0.Y | 931 // . Don't shrink unless new_size < old_size * 0.Y |
| 1042 // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5. | 932 // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5. |
| 1043 const int lower_bound_to_grow = old_size + old_size / 4; | 933 const int lower_bound_to_grow = old_size + old_size / 4; |
| 1044 const int upper_bound_to_shrink = old_size / 2; | 934 const int upper_bound_to_shrink = old_size / 2; |
| 1045 if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) { | 935 if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) { |
| 1046 // Need to reallocate. | 936 // Need to reallocate. |
| 1047 void* new_ptr = NULL; | 937 void* new_ptr = NULL; |
| 1048 | 938 |
| 1049 if (new_size > old_size && new_size < lower_bound_to_grow) { | 939 if (new_size > old_size && new_size < lower_bound_to_grow) { |
| 1050 new_ptr = do_malloc_or_cpp_alloc(lower_bound_to_grow); | 940 new_ptr = do_malloc(lower_bound_to_grow); |
| 1051 } | 941 } |
| 1052 if (new_ptr == NULL) { | 942 if (new_ptr == NULL) { |
| 1053 // Either new_size is not a tiny increment, or last do_malloc failed. | 943 // Either new_size is not a tiny increment, or last do_malloc failed. |
| 1054 new_ptr = do_malloc_or_cpp_alloc(new_size); | 944 new_ptr = do_malloc(new_size); |
| 1055 } | 945 } |
| 1056 if (new_ptr == NULL) { | 946 if (new_ptr == NULL) { |
| 1057 return NULL; | 947 return NULL; |
| 1058 } | 948 } |
| 1059 MallocHook::InvokeNewHook(new_ptr, new_size); | 949 MallocHook::InvokeNewHook(new_ptr, new_size); |
| 1060 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size)); | 950 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size)); |
| 1061 MallocHook::InvokeDeleteHook(old_ptr); | 951 MallocHook::InvokeDeleteHook(old_ptr); |
| 1062 // We could use a variant of do_free() that leverages the fact | 952 // We could use a variant of do_free() that leverages the fact |
| 1063 // that we already know the sizeclass of old_ptr. The benefit | 953 // that we already know the sizeclass of old_ptr. The benefit |
| 1064 // would be small, so don't bother. | 954 // would be small, so don't bother. |
| (...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1165 inline struct mallinfo do_mallinfo() { | 1055 inline struct mallinfo do_mallinfo() { |
| 1166 TCMallocStats stats; | 1056 TCMallocStats stats; |
| 1167 ExtractStats(&stats, NULL); | 1057 ExtractStats(&stats, NULL); |
| 1168 | 1058 |
| 1169 // Just some of the fields are filled in. | 1059 // Just some of the fields are filled in. |
| 1170 struct mallinfo info; | 1060 struct mallinfo info; |
| 1171 memset(&info, 0, sizeof(info)); | 1061 memset(&info, 0, sizeof(info)); |
| 1172 | 1062 |
| 1173 // Unfortunately, the struct contains "int" field, so some of the | 1063 // Unfortunately, the struct contains "int" field, so some of the |
| 1174 // size values will be truncated. | 1064 // size values will be truncated. |
| 1175 info.arena = static_cast<int>(stats.pageheap.system_bytes); | 1065 info.arena = static_cast<int>(stats.system_bytes); |
| 1176 info.fsmblks = static_cast<int>(stats.thread_bytes | 1066 info.fsmblks = static_cast<int>(stats.thread_bytes |
| 1177 + stats.central_bytes | 1067 + stats.central_bytes |
| 1178 + stats.transfer_bytes); | 1068 + stats.transfer_bytes); |
| 1179 info.fordblks = static_cast<int>(stats.pageheap.free_bytes + | 1069 info.fordblks = static_cast<int>(stats.pageheap_bytes); |
| 1180 stats.pageheap.unmapped_bytes); | 1070 info.uordblks = static_cast<int>(stats.system_bytes |
| 1181 info.uordblks = static_cast<int>(stats.pageheap.system_bytes | |
| 1182 - stats.thread_bytes | 1071 - stats.thread_bytes |
| 1183 - stats.central_bytes | 1072 - stats.central_bytes |
| 1184 - stats.transfer_bytes | 1073 - stats.transfer_bytes |
| 1185 - stats.pageheap.free_bytes | 1074 - stats.pageheap_bytes); |
| 1186 - stats.pageheap.unmapped_bytes); | |
| 1187 | 1075 |
| 1188 return info; | 1076 return info; |
| 1189 } | 1077 } |
| 1190 #endif // #ifndef HAVE_STRUCT_MALLINFO | 1078 #endif // #ifndef HAVE_STRUCT_MALLINFO |
| 1191 | 1079 |
| 1192 static SpinLock set_new_handler_lock(SpinLock::LINKER_INITIALIZED); | 1080 static SpinLock set_new_handler_lock(SpinLock::LINKER_INITIALIZED); |
| 1193 | 1081 |
| 1194 inline void* cpp_alloc(size_t size, bool nothrow) { | 1082 inline void* cpp_alloc(size_t size, bool nothrow) { |
| 1195 for (;;) { | 1083 for (;;) { |
| 1196 void* p = do_malloc(size); | 1084 void* p = do_malloc(size); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1232 return p; | 1120 return p; |
| 1233 } | 1121 } |
| 1234 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPT
IONS) && !_HAS_EXCEPTIONS) | 1122 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPT
IONS) && !_HAS_EXCEPTIONS) |
| 1235 } else { // allocation success | 1123 } else { // allocation success |
| 1236 return p; | 1124 return p; |
| 1237 } | 1125 } |
| 1238 #endif // PREANSINEW | 1126 #endif // PREANSINEW |
| 1239 } | 1127 } |
| 1240 } | 1128 } |
| 1241 | 1129 |
| 1242 void* cpp_memalign(size_t align, size_t size) { | |
| 1243 for (;;) { | |
| 1244 void* p = do_memalign(align, size); | |
| 1245 #ifdef PREANSINEW | |
| 1246 return p; | |
| 1247 #else | |
| 1248 if (p == NULL) { // allocation failed | |
| 1249 // Get the current new handler. NB: this function is not | |
| 1250 // thread-safe. We make a feeble stab at making it so here, but | |
| 1251 // this lock only protects against tcmalloc interfering with | |
| 1252 // itself, not with other libraries calling set_new_handler. | |
| 1253 std::new_handler nh; | |
| 1254 { | |
| 1255 SpinLockHolder h(&set_new_handler_lock); | |
| 1256 nh = std::set_new_handler(0); | |
| 1257 (void) std::set_new_handler(nh); | |
| 1258 } | |
| 1259 #if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) &
& !_HAS_EXCEPTIONS) | |
| 1260 if (nh) { | |
| 1261 // Since exceptions are disabled, we don't really know if new_handler | |
| 1262 // failed. Assume it will abort if it fails. | |
| 1263 (*nh)(); | |
| 1264 continue; | |
| 1265 } | |
| 1266 return 0; | |
| 1267 #else | |
| 1268 // If no new_handler is established, the allocation failed. | |
| 1269 if (!nh) | |
| 1270 return 0; | |
| 1271 | |
| 1272 // Otherwise, try the new_handler. If it returns, retry the | |
| 1273 // allocation. If it throws std::bad_alloc, fail the allocation. | |
| 1274 // if it throws something else, don't interfere. | |
| 1275 try { | |
| 1276 (*nh)(); | |
| 1277 } catch (const std::bad_alloc&) { | |
| 1278 return p; | |
| 1279 } | |
| 1280 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPT
IONS) && !_HAS_EXCEPTIONS) | |
| 1281 } else { // allocation success | |
| 1282 return p; | |
| 1283 } | |
| 1284 #endif // PREANSINEW | |
| 1285 } | |
| 1286 } | |
| 1287 | |
| 1288 } // end unnamed namespace | 1130 } // end unnamed namespace |
| 1289 | 1131 |
| 1290 // As promised, the definition of this function, declared above. | 1132 // As promised, the definition of this function, declared above. |
| 1291 size_t TCMallocImplementation::GetAllocatedSize(void* ptr) { | 1133 size_t TCMallocImplementation::GetAllocatedSize(void* ptr) { |
| 1292 return GetSizeWithCallback(ptr, &InvalidGetAllocatedSize); | 1134 return GetSizeWithCallback(ptr, &InvalidGetAllocatedSize); |
| 1293 } | 1135 } |
| 1294 | 1136 |
| 1295 void TCMallocImplementation::MarkThreadBusy() { | |
| 1296 // Allocate to force the creation of a thread cache, but avoid | |
| 1297 // invoking any hooks. | |
| 1298 do_free(do_malloc(0)); | |
| 1299 } | |
| 1300 | |
| 1301 //------------------------------------------------------------------- | 1137 //------------------------------------------------------------------- |
| 1302 // Exported routines | 1138 // Exported routines |
| 1303 //------------------------------------------------------------------- | 1139 //------------------------------------------------------------------- |
| 1304 | 1140 |
| 1305 extern "C" PERFTOOLS_DLL_DECL const char* tc_version( | |
| 1306 int* major, int* minor, const char** patch) __THROW { | |
| 1307 if (major) *major = TC_VERSION_MAJOR; | |
| 1308 if (minor) *minor = TC_VERSION_MINOR; | |
| 1309 if (patch) *patch = TC_VERSION_PATCH; | |
| 1310 return TC_VERSION_STRING; | |
| 1311 } | |
| 1312 | |
| 1313 // CAVEAT: The code structure below ensures that MallocHook methods are always | 1141 // CAVEAT: The code structure below ensures that MallocHook methods are always |
| 1314 // called from the stack frame of the invoked allocation function. | 1142 // called from the stack frame of the invoked allocation function. |
| 1315 // heap-checker.cc depends on this to start a stack trace from | 1143 // heap-checker.cc depends on this to start a stack trace from |
| 1316 // the call to the (de)allocation function. | 1144 // the call to the (de)allocation function. |
| 1317 | 1145 |
| 1318 extern "C" PERFTOOLS_DLL_DECL void* tc_malloc(size_t size) __THROW { | 1146 static int tc_new_mode = 0; // See tc_set_new_mode(). |
| 1319 void* result = do_malloc_or_cpp_alloc(size); | 1147 extern "C" void* tc_malloc(size_t size) __THROW { |
| 1148 void* result = (tc_new_mode ? cpp_alloc(size, false) : do_malloc(size)); |
| 1320 MallocHook::InvokeNewHook(result, size); | 1149 MallocHook::InvokeNewHook(result, size); |
| 1321 return result; | 1150 return result; |
| 1322 } | 1151 } |
| 1323 | 1152 |
| 1324 extern "C" PERFTOOLS_DLL_DECL void tc_free(void* ptr) __THROW { | 1153 extern "C" void tc_free(void* ptr) __THROW { |
| 1325 MallocHook::InvokeDeleteHook(ptr); | 1154 MallocHook::InvokeDeleteHook(ptr); |
| 1326 do_free(ptr); | 1155 do_free(ptr); |
| 1327 } | 1156 } |
| 1328 | 1157 |
| 1329 extern "C" PERFTOOLS_DLL_DECL void* tc_calloc(size_t n, | 1158 extern "C" void* tc_calloc(size_t n, size_t elem_size) __THROW { |
| 1330 size_t elem_size) __THROW { | |
| 1331 void* result = do_calloc(n, elem_size); | 1159 void* result = do_calloc(n, elem_size); |
| 1332 MallocHook::InvokeNewHook(result, n * elem_size); | 1160 MallocHook::InvokeNewHook(result, n * elem_size); |
| 1333 return result; | 1161 return result; |
| 1334 } | 1162 } |
| 1335 | 1163 |
| 1336 extern "C" PERFTOOLS_DLL_DECL void tc_cfree(void* ptr) __THROW { | 1164 extern "C" void tc_cfree(void* ptr) __THROW { |
| 1337 MallocHook::InvokeDeleteHook(ptr); | 1165 MallocHook::InvokeDeleteHook(ptr); |
| 1338 do_free(ptr); | 1166 do_free(ptr); |
| 1339 } | 1167 } |
| 1340 | 1168 |
| 1341 extern "C" PERFTOOLS_DLL_DECL void* tc_realloc(void* old_ptr, | 1169 extern "C" void* tc_realloc(void* old_ptr, size_t new_size) __THROW { |
| 1342 size_t new_size) __THROW { | |
| 1343 if (old_ptr == NULL) { | 1170 if (old_ptr == NULL) { |
| 1344 void* result = do_malloc_or_cpp_alloc(new_size); | 1171 void* result = do_malloc(new_size); |
| 1345 MallocHook::InvokeNewHook(result, new_size); | 1172 MallocHook::InvokeNewHook(result, new_size); |
| 1346 return result; | 1173 return result; |
| 1347 } | 1174 } |
| 1348 if (new_size == 0) { | 1175 if (new_size == 0) { |
| 1349 MallocHook::InvokeDeleteHook(old_ptr); | 1176 MallocHook::InvokeDeleteHook(old_ptr); |
| 1350 do_free(old_ptr); | 1177 do_free(old_ptr); |
| 1351 return NULL; | 1178 return NULL; |
| 1352 } | 1179 } |
| 1353 return do_realloc(old_ptr, new_size); | 1180 return do_realloc(old_ptr, new_size); |
| 1354 } | 1181 } |
| 1355 | 1182 |
| 1356 extern "C" PERFTOOLS_DLL_DECL void* tc_new(size_t size) { | 1183 extern "C" void* tc_new(size_t size) { |
| 1357 void* p = cpp_alloc(size, false); | 1184 void* p = cpp_alloc(size, false); |
| 1358 // We keep this next instruction out of cpp_alloc for a reason: when | 1185 // We keep this next instruction out of cpp_alloc for a reason: when |
| 1359 // it's in, and new just calls cpp_alloc, the optimizer may fold the | 1186 // it's in, and new just calls cpp_alloc, the optimizer may fold the |
| 1360 // new call into cpp_alloc, which messes up our whole section-based | 1187 // new call into cpp_alloc, which messes up our whole section-based |
| 1361 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc | 1188 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc |
| 1362 // isn't the last thing this fn calls, and prevents the folding. | 1189 // isn't the last thing this fn calls, and prevents the folding. |
| 1363 MallocHook::InvokeNewHook(p, size); | 1190 MallocHook::InvokeNewHook(p, size); |
| 1364 return p; | 1191 return p; |
| 1365 } | 1192 } |
| 1366 | 1193 |
| 1367 extern "C" PERFTOOLS_DLL_DECL void* tc_new_nothrow( | 1194 extern "C" void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW { |
| 1368 size_t size, const std::nothrow_t&) __THROW { | |
| 1369 void* p = cpp_alloc(size, true); | 1195 void* p = cpp_alloc(size, true); |
| 1370 MallocHook::InvokeNewHook(p, size); | 1196 MallocHook::InvokeNewHook(p, size); |
| 1371 return p; | 1197 return p; |
| 1372 } | 1198 } |
| 1373 | 1199 |
| 1374 extern "C" PERFTOOLS_DLL_DECL void tc_delete(void* p) __THROW { | 1200 extern "C" void tc_delete(void* p) __THROW { |
| 1375 MallocHook::InvokeDeleteHook(p); | 1201 MallocHook::InvokeDeleteHook(p); |
| 1376 do_free(p); | 1202 do_free(p); |
| 1377 } | 1203 } |
| 1378 | 1204 |
| 1379 // Compilers define and use this (via ::operator delete(ptr, nothrow)). | 1205 extern "C" void* tc_newarray(size_t size) { |
| 1380 // But it's really the same as normal delete, so we just do the same thing. | |
| 1381 extern "C" PERFTOOLS_DLL_DECL void tc_delete_nothrow( | |
| 1382 void* p, const std::nothrow_t&) __THROW { | |
| 1383 MallocHook::InvokeDeleteHook(p); | |
| 1384 do_free(p); | |
| 1385 } | |
| 1386 | |
| 1387 extern "C" PERFTOOLS_DLL_DECL void* tc_newarray(size_t size) { | |
| 1388 void* p = cpp_alloc(size, false); | 1206 void* p = cpp_alloc(size, false); |
| 1389 // We keep this next instruction out of cpp_alloc for a reason: when | 1207 // We keep this next instruction out of cpp_alloc for a reason: when |
| 1390 // it's in, and new just calls cpp_alloc, the optimizer may fold the | 1208 // it's in, and new just calls cpp_alloc, the optimizer may fold the |
| 1391 // new call into cpp_alloc, which messes up our whole section-based | 1209 // new call into cpp_alloc, which messes up our whole section-based |
| 1392 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc | 1210 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc |
| 1393 // isn't the last thing this fn calls, and prevents the folding. | 1211 // isn't the last thing this fn calls, and prevents the folding. |
| 1394 MallocHook::InvokeNewHook(p, size); | 1212 MallocHook::InvokeNewHook(p, size); |
| 1395 return p; | 1213 return p; |
| 1396 } | 1214 } |
| 1397 | 1215 |
| 1398 extern "C" PERFTOOLS_DLL_DECL void* tc_newarray_nothrow( | 1216 extern "C" void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW
{ |
| 1399 size_t size, const std::nothrow_t&) __THROW { | |
| 1400 void* p = cpp_alloc(size, true); | 1217 void* p = cpp_alloc(size, true); |
| 1401 MallocHook::InvokeNewHook(p, size); | 1218 MallocHook::InvokeNewHook(p, size); |
| 1402 return p; | 1219 return p; |
| 1403 } | 1220 } |
| 1404 | 1221 |
| 1405 extern "C" PERFTOOLS_DLL_DECL void tc_deletearray(void* p) __THROW { | 1222 extern "C" void tc_deletearray(void* p) __THROW { |
| 1406 MallocHook::InvokeDeleteHook(p); | 1223 MallocHook::InvokeDeleteHook(p); |
| 1407 do_free(p); | 1224 do_free(p); |
| 1408 } | 1225 } |
| 1409 | 1226 |
| 1410 extern "C" PERFTOOLS_DLL_DECL void tc_deletearray_nothrow( | 1227 extern "C" void* tc_memalign(size_t align, size_t size) __THROW { |
| 1411 void* p, const std::nothrow_t&) __THROW { | 1228 void* result = do_memalign(align, size); |
| 1412 MallocHook::InvokeDeleteHook(p); | |
| 1413 do_free(p); | |
| 1414 } | |
| 1415 | |
| 1416 extern "C" PERFTOOLS_DLL_DECL void* tc_memalign(size_t align, | |
| 1417 size_t size) __THROW { | |
| 1418 void* result = do_memalign_or_cpp_memalign(align, size); | |
| 1419 MallocHook::InvokeNewHook(result, size); | 1229 MallocHook::InvokeNewHook(result, size); |
| 1420 return result; | 1230 return result; |
| 1421 } | 1231 } |
| 1422 | 1232 |
| 1423 extern "C" PERFTOOLS_DLL_DECL int tc_posix_memalign( | 1233 extern "C" int tc_posix_memalign(void** result_ptr, size_t align, size_t size) |
| 1424 void** result_ptr, size_t align, size_t size) __THROW { | 1234 __THROW { |
| 1425 if (((align % sizeof(void*)) != 0) || | 1235 if (((align % sizeof(void*)) != 0) || |
| 1426 ((align & (align - 1)) != 0) || | 1236 ((align & (align - 1)) != 0) || |
| 1427 (align == 0)) { | 1237 (align == 0)) { |
| 1428 return EINVAL; | 1238 return EINVAL; |
| 1429 } | 1239 } |
| 1430 | 1240 |
| 1431 void* result = do_memalign_or_cpp_memalign(align, size); | 1241 void* result = do_memalign(align, size); |
| 1432 MallocHook::InvokeNewHook(result, size); | 1242 MallocHook::InvokeNewHook(result, size); |
| 1433 if (result == NULL) { | 1243 if (result == NULL) { |
| 1434 return ENOMEM; | 1244 return ENOMEM; |
| 1435 } else { | 1245 } else { |
| 1436 *result_ptr = result; | 1246 *result_ptr = result; |
| 1437 return 0; | 1247 return 0; |
| 1438 } | 1248 } |
| 1439 } | 1249 } |
| 1440 | 1250 |
| 1441 static size_t pagesize = 0; | 1251 static size_t pagesize = 0; |
| 1442 | 1252 |
| 1443 extern "C" PERFTOOLS_DLL_DECL void* tc_valloc(size_t size) __THROW { | 1253 extern "C" void* tc_valloc(size_t size) __THROW { |
| 1444 // Allocate page-aligned object of length >= size bytes | 1254 // Allocate page-aligned object of length >= size bytes |
| 1445 if (pagesize == 0) pagesize = getpagesize(); | 1255 if (pagesize == 0) pagesize = getpagesize(); |
| 1446 void* result = do_memalign_or_cpp_memalign(pagesize, size); | 1256 void* result = do_memalign(pagesize, size); |
| 1447 MallocHook::InvokeNewHook(result, size); | 1257 MallocHook::InvokeNewHook(result, size); |
| 1448 return result; | 1258 return result; |
| 1449 } | 1259 } |
| 1450 | 1260 |
| 1451 extern "C" PERFTOOLS_DLL_DECL void* tc_pvalloc(size_t size) __THROW { | 1261 extern "C" void* tc_pvalloc(size_t size) __THROW { |
| 1452 // Round up size to a multiple of pagesize | 1262 // Round up size to a multiple of pagesize |
| 1453 if (pagesize == 0) pagesize = getpagesize(); | 1263 if (pagesize == 0) pagesize = getpagesize(); |
| 1454 if (size == 0) { // pvalloc(0) should allocate one page, according to | |
| 1455 size = pagesize; // http://man.free4web.biz/man3/libmpatrol.3.html | |
| 1456 } | |
| 1457 size = (size + pagesize - 1) & ~(pagesize - 1); | 1264 size = (size + pagesize - 1) & ~(pagesize - 1); |
| 1458 void* result = do_memalign_or_cpp_memalign(pagesize, size); | 1265 void* result = do_memalign(pagesize, size); |
| 1459 MallocHook::InvokeNewHook(result, size); | 1266 MallocHook::InvokeNewHook(result, size); |
| 1460 return result; | 1267 return result; |
| 1461 } | 1268 } |
| 1462 | 1269 |
| 1463 extern "C" PERFTOOLS_DLL_DECL void tc_malloc_stats(void) __THROW { | 1270 extern "C" void tc_malloc_stats(void) __THROW { |
| 1464 do_malloc_stats(); | 1271 do_malloc_stats(); |
| 1465 } | 1272 } |
| 1466 | 1273 |
| 1467 extern "C" PERFTOOLS_DLL_DECL int tc_mallopt(int cmd, int value) __THROW { | 1274 extern "C" int tc_mallopt(int cmd, int value) __THROW { |
| 1468 return do_mallopt(cmd, value); | 1275 return do_mallopt(cmd, value); |
| 1469 } | 1276 } |
| 1470 | 1277 |
| 1471 #ifdef HAVE_STRUCT_MALLINFO | 1278 #ifdef HAVE_STRUCT_MALLINFO |
| 1472 extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) __THROW { | 1279 extern "C" struct mallinfo tc_mallinfo(void) __THROW { |
| 1473 return do_mallinfo(); | 1280 return do_mallinfo(); |
| 1474 } | 1281 } |
| 1475 #endif | 1282 #endif |
| 1476 | 1283 |
| 1477 // This function behaves similarly to MSVC's _set_new_mode. | 1284 // This function behaves similarly to MSVC's _set_new_mode. |
| 1478 // If flag is 0 (default), calls to malloc will behave normally. | 1285 // If flag is 0 (default), calls to malloc will behave normally. |
| 1479 // If flag is 1, calls to malloc will behave like calls to new, | 1286 // If flag is 1, calls to malloc will behave like calls to new, |
| 1480 // and the std_new_handler will be invoked on failure. | 1287 // and the std_new_handler will be invoked on failure. |
| 1481 // Returns the previous mode. | 1288 // Returns the previous mode. |
| 1482 extern "C" PERFTOOLS_DLL_DECL int tc_set_new_mode(int flag) __THROW { | 1289 extern "C" int tc_set_new_mode(int flag) __THROW { |
| 1483 int old_mode = tc_new_mode; | 1290 int old_mode = tc_new_mode; |
| 1484 tc_new_mode = flag; | 1291 tc_new_mode = flag; |
| 1485 return old_mode; | 1292 return old_mode; |
| 1486 } | 1293 } |
| 1487 | 1294 |
| 1488 | 1295 |
| 1489 // Override __libc_memalign in libc on linux boxes specially. | 1296 // Override __libc_memalign in libc on linux boxes specially. |
| 1490 // They have a bug in libc that causes them to (very rarely) allocate | 1297 // They have a bug in libc that causes them to (very rarely) allocate |
| 1491 // with __libc_memalign() yet deallocate with free() and the | 1298 // with __libc_memalign() yet deallocate with free() and the |
| 1492 // definitions above don't catch it. | 1299 // definitions above don't catch it. |
| 1493 // This function is an exception to the rule of calling MallocHook method | 1300 // This function is an exception to the rule of calling MallocHook method |
| 1494 // from the stack frame of the allocation function; | 1301 // from the stack frame of the allocation function; |
| 1495 // heap-checker handles this special case explicitly. | 1302 // heap-checker handles this special case explicitly. |
| 1496 #ifndef TCMALLOC_FOR_DEBUGALLOCATION | |
| 1497 static void *MemalignOverride(size_t align, size_t size, const void *caller) | 1303 static void *MemalignOverride(size_t align, size_t size, const void *caller) |
| 1498 __THROW ATTRIBUTE_SECTION(google_malloc); | 1304 __THROW ATTRIBUTE_SECTION(google_malloc); |
| 1499 | 1305 |
| 1500 static void *MemalignOverride(size_t align, size_t size, const void *caller) | 1306 static void *MemalignOverride(size_t align, size_t size, const void *caller) |
| 1501 __THROW { | 1307 __THROW { |
| 1502 void* result = do_memalign_or_cpp_memalign(align, size); | 1308 void* result = do_memalign(align, size); |
| 1503 MallocHook::InvokeNewHook(result, size); | 1309 MallocHook::InvokeNewHook(result, size); |
| 1504 return result; | 1310 return result; |
| 1505 } | 1311 } |
| 1506 void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride; | 1312 void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride; |
| 1507 #endif // #ifndef TCMALLOC_FOR_DEBUGALLOCATION | |
| OLD | NEW |