Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(274)

Side by Side Diff: third_party/tcmalloc/chromium/src/tcmalloc.cc

Issue 576001: Merged third_party/tcmalloc/vendor/src(google-perftools r87) into... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Removed the unnecessary printf and ASSERT(0) Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2005, Google Inc. 1 // Copyright (c) 2005, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
117 #include "base/spinlock.h" 117 #include "base/spinlock.h"
118 #include "common.h" 118 #include "common.h"
119 #include "malloc_hook-inl.h" 119 #include "malloc_hook-inl.h"
120 #include <google/malloc_hook.h> 120 #include <google/malloc_hook.h>
121 #include <google/malloc_extension.h> 121 #include <google/malloc_extension.h>
122 #include "central_freelist.h" 122 #include "central_freelist.h"
123 #include "internal_logging.h" 123 #include "internal_logging.h"
124 #include "linked_list.h" 124 #include "linked_list.h"
125 #include "maybe_threads.h" 125 #include "maybe_threads.h"
126 #include "page_heap.h" 126 #include "page_heap.h"
127 #include "page_heap_allocator.h"
128 #include "pagemap.h" 127 #include "pagemap.h"
129 #include "span.h" 128 #include "span.h"
130 #include "static_vars.h" 129 #include "static_vars.h"
131 #include "system-alloc.h" 130 #include "system-alloc.h"
132 #include "tcmalloc_guard.h" 131 #include "tcmalloc_guard.h"
133 #include "thread_cache.h" 132 #include "thread_cache.h"
134 133
135 #if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defi ned(WIN32_OVERRIDE_ALLOCATORS) 134 #if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defi ned(WIN32_OVERRIDE_ALLOCATORS)
136 # define WIN32_DO_PATCHING 1 135 # define WIN32_DO_PATCHING 1
137 #endif 136 #endif
138 137
138 using std::max;
139 using tcmalloc::PageHeap; 139 using tcmalloc::PageHeap;
140 using tcmalloc::PageHeapAllocator;
141 using tcmalloc::SizeMap; 140 using tcmalloc::SizeMap;
142 using tcmalloc::Span; 141 using tcmalloc::Span;
143 using tcmalloc::StackTrace; 142 using tcmalloc::StackTrace;
144 using tcmalloc::Static; 143 using tcmalloc::Static;
145 using tcmalloc::ThreadCache; 144 using tcmalloc::ThreadCache;
146 145
147 // __THROW is defined in glibc systems. It means, counter-intuitively, 146 // __THROW is defined in glibc systems. It means, counter-intuitively,
148 // "This function will never throw an exception." It's an optional 147 // "This function will never throw an exception." It's an optional
149 // optimization tool, but we may need to use it to match glibc prototypes. 148 // optimization tool, but we may need to use it to match glibc prototypes.
150 #ifndef __THROW // I guess we're not on a glibc system 149 #ifndef __THROW // I guess we're not on a glibc system
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
222 void* tc_newarray(size_t size) 221 void* tc_newarray(size_t size)
223 ATTRIBUTE_SECTION(google_malloc); 222 ATTRIBUTE_SECTION(google_malloc);
224 void tc_deletearray(void* p) __THROW 223 void tc_deletearray(void* p) __THROW
225 ATTRIBUTE_SECTION(google_malloc); 224 ATTRIBUTE_SECTION(google_malloc);
226 225
227 // And the nothrow variants of these: 226 // And the nothrow variants of these:
228 void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW 227 void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW
229 ATTRIBUTE_SECTION(google_malloc); 228 ATTRIBUTE_SECTION(google_malloc);
230 void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW 229 void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW
231 ATTRIBUTE_SECTION(google_malloc); 230 ATTRIBUTE_SECTION(google_malloc);
232 } 231 // Surprisingly, compilers use a nothrow-delete internally. See, eg:
232 // http://www.dinkumware.com/manuals/?manual=compleat&page=new.html
233 void tc_delete_nothrow(void* ptr, const std::nothrow_t&) __THROW
234 ATTRIBUTE_SECTION(google_malloc);
235 void tc_deletearray_nothrow(void* ptr, const std::nothrow_t&) __THROW
236 ATTRIBUTE_SECTION(google_malloc);
237 } // extern "C"
233 238
234 // Override the libc functions to prefer our own instead. This comes 239 // Override the libc functions to prefer our own instead. This comes
235 // first so code in tcmalloc.cc can use the overridden versions. One 240 // first so code in tcmalloc.cc can use the overridden versions. One
236 // exception: in windows, by default, we patch our code into these 241 // exception: in windows, by default, we patch our code into these
237 // functions (via src/windows/patch_function.cc) rather than override 242 // functions (via src/windows/patch_function.cc) rather than override
238 // them. In that case, we don't want to do this overriding here. 243 // them. In that case, we don't want to do this overriding here.
239 #ifndef WIN32_DO_PATCHING 244 #if !defined(WIN32_DO_PATCHING) && !defined(TCMALLOC_FOR_DEBUGALLOCATION)
240 245
241 // TODO(mbelshe): Turn off TCMalloc's symbols for libc. We do that 246 // TODO(mbelshe): Turn off TCMalloc's symbols for libc. We do that
242 // elsewhere. 247 // elsewhere.
243 #if 0 248 #ifndef _WIN32
244 249
245 #if defined(__GNUC__) && !defined(__MACH__) 250 #if defined(__GNUC__) && !defined(__MACH__)
246 // Potentially faster variants that use the gcc alias extension. 251 // Potentially faster variants that use the gcc alias extension.
247 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
248 // FreeBSD does support aliases, but apparently not correctly. :-( 252 // FreeBSD does support aliases, but apparently not correctly. :-(
253 // NOTE: we make many of these symbols weak, but do so in the makefile
254 // (via objcopy -W) and not here. That ends up being more portable.
249 # define ALIAS(x) __attribute__ ((alias (x))) 255 # define ALIAS(x) __attribute__ ((alias (x)))
250 void* operator new(size_t size) ALIAS("tc_new"); 256 void* operator new(size_t size) ALIAS("tc_new");
251 void operator delete(void* p) __THROW ALIAS("tc_delete"); 257 void operator delete(void* p) __THROW ALIAS("tc_delete");
252 void* operator new[](size_t size) ALIAS("tc_newarray"); 258 void* operator new[](size_t size) ALIAS("tc_newarray");
253 void operator delete[](void* p) __THROW ALIAS("tc_deletearray"); 259 void operator delete[](void* p) __THROW ALIAS("tc_deletearray");
254 void* operator new(size_t size, const std::nothrow_t&) __THROW 260 void* operator new(size_t size, const std::nothrow_t&) __THROW
255 ALIAS("tc_new_nothrow"); 261 ALIAS("tc_new_nothrow");
256 void* operator new[](size_t size, const std::nothrow_t&) __THROW 262 void* operator new[](size_t size, const std::nothrow_t&) __THROW
257 ALIAS("tc_newarray_nothrow"); 263 ALIAS("tc_newarray_nothrow");
264 void operator delete(void* size, const std::nothrow_t&) __THROW
265 ALIAS("tc_delete_nothrow");
266 void operator delete[](void* size, const std::nothrow_t&) __THROW
267 ALIAS("tc_deletearray_nothrow") ;
258 extern "C" { 268 extern "C" {
259 void* malloc(size_t size) __THROW ALIAS("tc_malloc"); 269 void* malloc(size_t size) __THROW ALIAS("tc_malloc");
260 void free(void* ptr) __THROW ALIAS("tc_free"); 270 void free(void* ptr) __THROW ALIAS("tc_free");
261 void* realloc(void* ptr, size_t size) __THROW ALIAS("tc_realloc"); 271 void* realloc(void* ptr, size_t size) __THROW ALIAS("tc_realloc");
262 void* calloc(size_t n, size_t size) __THROW ALIAS("tc_calloc"); 272 void* calloc(size_t n, size_t size) __THROW ALIAS("tc_calloc");
263 void cfree(void* ptr) __THROW ALIAS("tc_cfree"); 273 void cfree(void* ptr) __THROW ALIAS("tc_cfree");
264 void* memalign(size_t align, size_t s) __THROW ALIAS("tc_memalign"); 274 void* memalign(size_t align, size_t s) __THROW ALIAS("tc_memalign");
265 void* valloc(size_t size) __THROW ALIAS("tc_valloc"); 275 void* valloc(size_t size) __THROW ALIAS("tc_valloc");
266 void* pvalloc(size_t size) __THROW ALIAS("tc_pvalloc"); 276 void* pvalloc(size_t size) __THROW ALIAS("tc_pvalloc");
267 int posix_memalign(void** r, size_t a, size_t s) __THROW 277 int posix_memalign(void** r, size_t a, size_t s) __THROW
268 ALIAS("tc_posix_memalign"); 278 ALIAS("tc_posix_memalign");
269 void malloc_stats(void) __THROW ALIAS("tc_malloc_stats"); 279 void malloc_stats(void) __THROW ALIAS("tc_malloc_stats");
270 int mallopt(int cmd, int value) __THROW ALIAS("tc_mallopt"); 280 int mallopt(int cmd, int value) __THROW ALIAS("tc_mallopt");
271 #ifdef HAVE_STRUCT_MALLINFO 281 #ifdef HAVE_STRUCT_MALLINFO
272 struct mallinfo mallinfo(void) __THROW ALIAS("tc_mallinfo"); 282 struct mallinfo mallinfo(void) __THROW ALIAS("tc_mallinfo");
273 #endif 283 #endif
274 // Some library routines on RedHat 9 allocate memory using malloc()
275 // and free it using __libc_free() (or vice-versa). Since we provide
276 // our own implementations of malloc/free, we need to make sure that
277 // the __libc_XXX variants (defined as part of glibc) also point to
278 // the same implementations.
279 # if defined(__GLIBC__)
280 void* __libc_malloc(size_t size) ALIAS("tc_malloc");
281 void __libc_free(void* ptr) ALIAS("tc_free");
282 void* __libc_realloc(void* ptr, size_t size) ALIAS("tc_realloc");
283 void* __libc_calloc(size_t n, size_t size) ALIAS("tc_calloc");
284 void __libc_cfree(void* ptr) ALIAS("tc_cfree");
285 void* __libc_memalign(size_t align, size_t s) ALIAS("tc_memalign");
286 void* __libc_valloc(size_t size) ALIAS("tc_valloc");
287 void* __libc_pvalloc(size_t size) ALIAS("tc_pvalloc");
288 int __posix_memalign(void** r, size_t a, size_t s) ALIAS("tc_posix_memalign");
289 # define HAVE_ALIASED___LIBC 1
290 # endif // #if defined(__GLIBC__)
291 } // extern "C" 284 } // extern "C"
292 # undef ALIAS 285 #else // #if defined(__GNUC__) && !defined(__MACH__)
293 #else
294 // Portable wrappers 286 // Portable wrappers
295 void* operator new(size_t size) { return tc_new(size); } 287 void* operator new(size_t size) { return tc_new(size); }
296 void operator delete(void* p) __THROW { tc_delete(p); } 288 void operator delete(void* p) __THROW { tc_delete(p); }
297 void* operator new[](size_t size) { return tc_newarray(size); } 289 void* operator new[](size_t size) { return tc_newarray(size); }
298 void operator delete[](void* p) __THROW { tc_deletearray(p); } 290 void operator delete[](void* p) __THROW { tc_deletearray(p); }
299 void* operator new(size_t size, const std::nothrow_t& nt) __THROW { 291 void* operator new(size_t size, const std::nothrow_t& nt) __THROW {
300 return tc_new_nothrow(size, nt); 292 return tc_new_nothrow(size, nt);
301 } 293 }
302 void* operator new[](size_t size, const std::nothrow_t& nt) __THROW { 294 void* operator new[](size_t size, const std::nothrow_t& nt) __THROW {
303 return tc_newarray_nothrow(size, nt); 295 return tc_newarray_nothrow(size, nt);
304 } 296 }
297 void operator delete(void* ptr, const std::nothrow_t& nt) __THROW {
298 return tc_delete_nothrow(ptr, nt);
299 }
300 void operator delete[](void* ptr, const std::nothrow_t& nt) __THROW {
301 return tc_deletearray_nothrow(ptr, nt);
302 }
305 extern "C" { 303 extern "C" {
306 void* malloc(size_t s) __THROW { return tc_malloc(s); } 304 void* malloc(size_t s) __THROW { return tc_malloc(s); }
307 void free(void* p) __THROW { tc_free(p); } 305 void free(void* p) __THROW { tc_free(p); }
308 void* realloc(void* p, size_t s) __THROW { return tc_realloc(p, s); } 306 void* realloc(void* p, size_t s) __THROW { return tc_realloc(p, s); }
309 void* calloc(size_t n, size_t s) __THROW { return tc_calloc(n, s); } 307 void* calloc(size_t n, size_t s) __THROW { return tc_calloc(n, s); }
310 void cfree(void* p) __THROW { tc_cfree(p); } 308 void cfree(void* p) __THROW { tc_cfree(p); }
311 void* memalign(size_t a, size_t s) __THROW { return tc_memalign(a, s); } 309 void* memalign(size_t a, size_t s) __THROW { return tc_memalign(a, s); }
312 void* valloc(size_t s) __THROW { return tc_valloc(s); } 310 void* valloc(size_t s) __THROW { return tc_valloc(s); }
313 void* pvalloc(size_t s) __THROW { return tc_pvalloc(s); } 311 void* pvalloc(size_t s) __THROW { return tc_pvalloc(s); }
314 int posix_memalign(void** r, size_t a, size_t s) __THROW { 312 int posix_memalign(void** r, size_t a, size_t s) __THROW {
315 return tc_posix_memalign(r, a, s); 313 return tc_posix_memalign(r, a, s);
316 } 314 }
317 void malloc_stats(void) __THROW { tc_malloc_stats(); } 315 void malloc_stats(void) __THROW { tc_malloc_stats(); }
318 int mallopt(int cmd, int v) __THROW { return tc_mallopt(cmd, v); } 316 int mallopt(int cmd, int v) __THROW { return tc_mallopt(cmd, v); }
319 #ifdef HAVE_STRUCT_MALLINFO 317 #ifdef HAVE_STRUCT_MALLINFO
320 struct mallinfo mallinfo(void) __THROW { return tc_mallinfo(); } 318 struct mallinfo mallinfo(void) __THROW { return tc_mallinfo(); }
321 #endif 319 #endif
322 } // extern C 320 } // extern "C"
323 #endif // #if defined(__GNUC__) 321 #endif // #if defined(__GNUC__)
324 322
325 #ifndef HAVE_ALIASED___LIBC 323 // Some library routines on RedHat 9 allocate memory using malloc()
324 // and free it using __libc_free() (or vice-versa). Since we provide
325 // our own implementations of malloc/free, we need to make sure that
326 // the __libc_XXX variants (defined as part of glibc) also point to
327 // the same implementations.
328 #ifdef __GLIBC__ // only glibc defines __libc_*
326 extern "C" { 329 extern "C" {
330 #ifdef ALIAS
331 void* __libc_malloc(size_t size) ALIAS("tc_malloc");
332 void __libc_free(void* ptr) ALIAS("tc_free");
333 void* __libc_realloc(void* ptr, size_t size) ALIAS("tc_realloc");
334 void* __libc_calloc(size_t n, size_t size) ALIAS("tc_calloc");
335 void __libc_cfree(void* ptr) ALIAS("tc_cfree");
336 void* __libc_memalign(size_t align, size_t s) ALIAS("tc_memalign");
337 void* __libc_valloc(size_t size) ALIAS("tc_valloc");
338 void* __libc_pvalloc(size_t size) ALIAS("tc_pvalloc");
339 int __posix_memalign(void** r, size_t a, size_t s) ALIAS("tc_posix_memalign");
340 #else // #ifdef ALIAS
327 void* __libc_malloc(size_t size) { return malloc(size); } 341 void* __libc_malloc(size_t size) { return malloc(size); }
328 void __libc_free(void* ptr) { free(ptr); } 342 void __libc_free(void* ptr) { free(ptr); }
329 void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); } 343 void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); }
330 void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); } 344 void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); }
331 void __libc_cfree(void* ptr) { cfree(ptr); } 345 void __libc_cfree(void* ptr) { cfree(ptr); }
332 void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); } 346 void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); }
333 void* __libc_valloc(size_t size) { return valloc(size); } 347 void* __libc_valloc(size_t size) { return valloc(size); }
334 void* __libc_pvalloc(size_t size) { return pvalloc(size); } 348 void* __libc_pvalloc(size_t size) { return pvalloc(size); }
335 int __posix_memalign(void** r, size_t a, size_t s) { 349 int __posix_memalign(void** r, size_t a, size_t s) {
336 return posix_memalign(r, a, s); 350 return posix_memalign(r, a, s);
337 } 351 }
352 #endif // #ifdef ALIAS
338 } // extern "C" 353 } // extern "C"
339 #endif // #ifndef HAVE_ALIASED___LIBC 354 #endif // ifdef __GLIBC__
340 355
341 #endif // #ifdef 0 356 #endif // #ifndef _WIN32
357 #undef ALIAS
342 358
343 #endif // #ifndef WIN32_DO_PATCHING 359 #endif // #ifndef(WIN32_DO_PATCHING) && ndef(TCMALLOC_FOR_DEBUGALLOCATION)
344 360
345 361
346 // ----------------------- IMPLEMENTATION ------------------------------- 362 // ----------------------- IMPLEMENTATION -------------------------------
347 363
348 // These routines are called by free(), realloc(), etc. if the pointer is 364 static int tc_new_mode = 0; // See tc_set_new_mode().
349 // invalid. This is a cheap (source-editing required) kind of exception 365
350 // handling for these routines. 366 // Routines such as free() and realloc() catch some erroneous pointers
367 // passed to them, and invoke the below when they do. (An erroneous pointer
368 // won't be caught if it's within a valid span or a stale span for which
369 // the pagemap cache has a non-zero sizeclass.) This is a cheap (source-editing
370 // required) kind of exception handling for these routines.
351 namespace { 371 namespace {
352 void InvalidFree(void* ptr) { 372 void InvalidFree(void* ptr) {
353 CRASH("Attempt to free invalid pointer: %p\n", ptr); 373 CRASH("Attempt to free invalid pointer: %p\n", ptr);
354 } 374 }
355 375
356 size_t InvalidGetSizeForRealloc(void* old_ptr) { 376 size_t InvalidGetSizeForRealloc(void* old_ptr) {
357 CRASH("Attempt to realloc invalid pointer: %p\n", old_ptr); 377 CRASH("Attempt to realloc invalid pointer: %p\n", old_ptr);
358 return 0; 378 return 0;
359 } 379 }
360 380
361 size_t InvalidGetAllocatedSize(void* ptr) { 381 size_t InvalidGetAllocatedSize(void* ptr) {
362 CRASH("Attempt to get the size of an invalid pointer: %p\n", ptr); 382 CRASH("Attempt to get the size of an invalid pointer: %p\n", ptr);
363 return 0; 383 return 0;
364 } 384 }
365 } // unnamed namespace 385 } // unnamed namespace
366 386
367 // Extract interesting stats 387 // Extract interesting stats
368 struct TCMallocStats { 388 struct TCMallocStats {
369 uint64_t system_bytes; // Bytes alloced from system 389 uint64_t thread_bytes; // Bytes in thread caches
370 uint64_t committed_bytes; // Bytes alloced and committed from system 390 uint64_t central_bytes; // Bytes in central cache
371 uint64_t thread_bytes; // Bytes in thread caches 391 uint64_t transfer_bytes; // Bytes in central transfer cache
372 uint64_t central_bytes; // Bytes in central cache 392 uint64_t metadata_bytes; // Bytes alloced for metadata
373 uint64_t transfer_bytes; // Bytes in central transfer cache 393 PageHeap::Stats pageheap; // Stats from page heap
374 uint64_t pageheap_bytes; // Bytes in page heap
375 uint64_t metadata_bytes; // Bytes alloced for metadata
376 }; 394 };
377 395
378 // Get stats into "r". Also get per-size-class counts if class_count != NULL 396 // Get stats into "r". Also get per-size-class counts if class_count != NULL
379 static void ExtractStats(TCMallocStats* r, uint64_t* class_count) { 397 static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
380 r->central_bytes = 0; 398 r->central_bytes = 0;
381 r->transfer_bytes = 0; 399 r->transfer_bytes = 0;
382 for (int cl = 0; cl < kNumClasses; ++cl) { 400 for (int cl = 0; cl < kNumClasses; ++cl) {
383 const int length = Static::central_cache()[cl].length(); 401 const int length = Static::central_cache()[cl].length();
384 const int tc_length = Static::central_cache()[cl].tc_length(); 402 const int tc_length = Static::central_cache()[cl].tc_length();
385 const size_t size = static_cast<uint64_t>( 403 const size_t size = static_cast<uint64_t>(
386 Static::sizemap()->ByteSizeForClass(cl)); 404 Static::sizemap()->ByteSizeForClass(cl));
387 r->central_bytes += (size * length); 405 r->central_bytes += (size * length);
388 r->transfer_bytes += (size * tc_length); 406 r->transfer_bytes += (size * tc_length);
389 if (class_count) class_count[cl] = length + tc_length; 407 if (class_count) class_count[cl] = length + tc_length;
390 } 408 }
391 409
392 // Add stats from per-thread heaps 410 // Add stats from per-thread heaps
393 r->thread_bytes = 0; 411 r->thread_bytes = 0;
394 { // scope 412 { // scope
395 SpinLockHolder h(Static::pageheap_lock()); 413 SpinLockHolder h(Static::pageheap_lock());
396 ThreadCache::GetThreadStats(&r->thread_bytes, class_count); 414 ThreadCache::GetThreadStats(&r->thread_bytes, class_count);
397 }
398
399 { //scope
400 SpinLockHolder h(Static::pageheap_lock());
401 r->system_bytes = Static::pageheap()->SystemBytes();
402 r->committed_bytes = Static::pageheap()->CommittedBytes();
403 r->metadata_bytes = tcmalloc::metadata_system_bytes(); 415 r->metadata_bytes = tcmalloc::metadata_system_bytes();
404 r->pageheap_bytes = Static::pageheap()->FreeBytes(); 416 r->pageheap = Static::pageheap()->stats();
405 } 417 }
406 } 418 }
407 419
408 // WRITE stats to "out" 420 // WRITE stats to "out"
409 static void DumpStats(TCMalloc_Printer* out, int level) { 421 static void DumpStats(TCMalloc_Printer* out, int level) {
410 TCMallocStats stats; 422 TCMallocStats stats;
411 uint64_t class_count[kNumClasses]; 423 uint64_t class_count[kNumClasses];
412 ExtractStats(&stats, (level >= 2 ? class_count : NULL)); 424 ExtractStats(&stats, (level >= 2 ? class_count : NULL));
413 425
414 static const double MB = 1048576.0; 426 static const double MB = 1048576.0;
415 427
416 const uint64_t bytes_in_use = stats.system_bytes 428 const uint64_t bytes_in_use = stats.pageheap.system_bytes
417 - stats.pageheap_bytes 429 - stats.pageheap.free_bytes
430 - stats.pageheap.unmapped_bytes
418 - stats.central_bytes 431 - stats.central_bytes
419 - stats.transfer_bytes 432 - stats.transfer_bytes
420 - stats.thread_bytes; 433 - stats.thread_bytes;
421 434
422 out->printf("WASTE: %7.1f MB committed but not used\n" 435 out->printf("WASTE: %7.1f MB committed but not used\n"
423 "WASTE: %7.1f MB bytes committed, %7.1f MB bytes in use\n" 436 "WASTE: %7.1f MB bytes committed, %7.1f MB bytes in use\n"
424 "WASTE: committed/used ratio of %f\n", 437 "WASTE: committed/used ratio of %f\n",
425 (stats.committed_bytes - bytes_in_use) / MB, 438 (stats.pageheap.committed_bytes - bytes_in_use) / MB,
426 stats.committed_bytes / MB, 439 stats.pageheap.committed_bytes / MB,
427 bytes_in_use / MB, 440 bytes_in_use / MB,
428 stats.committed_bytes / static_cast<double>(bytes_in_use)); 441 stats.pageheap.committed_bytes / static_cast<double>(bytes_in_use) );
429 442
430 if (level >= 2) { 443 if (level >= 2) {
431 out->printf("------------------------------------------------\n"); 444 out->printf("------------------------------------------------\n");
445 out->printf("Size class breakdown\n");
446 out->printf("------------------------------------------------\n");
432 uint64_t cumulative = 0; 447 uint64_t cumulative = 0;
433 for (int cl = 0; cl < kNumClasses; ++cl) { 448 for (int cl = 0; cl < kNumClasses; ++cl) {
434 if (class_count[cl] > 0) { 449 if (class_count[cl] > 0) {
435 uint64_t class_bytes = 450 uint64_t class_bytes =
436 class_count[cl] * Static::sizemap()->ByteSizeForClass(cl); 451 class_count[cl] * Static::sizemap()->ByteSizeForClass(cl);
437 cumulative += class_bytes; 452 cumulative += class_bytes;
438 out->printf("class %3d [ %8" PRIuS " bytes ] : " 453 out->printf("class %3d [ %8" PRIuS " bytes ] : "
439 "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n", 454 "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n",
440 cl, Static::sizemap()->ByteSizeForClass(cl), 455 cl, Static::sizemap()->ByteSizeForClass(cl),
441 class_count[cl], 456 class_count[cl],
442 class_bytes / MB, 457 class_bytes / MB,
443 cumulative / MB); 458 cumulative / MB);
444 } 459 }
445 } 460 }
446 461
447 SpinLockHolder h(Static::pageheap_lock()); 462 SpinLockHolder h(Static::pageheap_lock());
448 Static::pageheap()->Dump(out); 463 Static::pageheap()->Dump(out);
449 464
450 out->printf("------------------------------------------------\n"); 465 out->printf("------------------------------------------------\n");
451 DumpSystemAllocatorStats(out); 466 DumpSystemAllocatorStats(out);
452 } 467 }
453 468
454 out->printf("------------------------------------------------\n" 469 out->printf("------------------------------------------------\n"
455 "MALLOC: %12" PRIu64 " (%7.1f MB) Heap size\n" 470 "MALLOC: %12" PRIu64 " (%7.1f MB) Heap size\n"
456 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes committed\n" 471 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes committed\n"
457 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes in use by application\n" 472 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes in use by application\n"
458 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in page heap\n" 473 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in page heap\n"
474 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes unmapped in page heap\n"
459 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in central cache\n" 475 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in central cache\n"
460 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in transfer cache\n" 476 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in transfer cache\n"
461 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in thread caches\n" 477 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in thread caches\n"
462 "MALLOC: %12" PRIu64 " Spans in use\n" 478 "MALLOC: %12" PRIu64 " Spans in use\n"
463 "MALLOC: %12" PRIu64 " Thread heaps in use\n" 479 "MALLOC: %12" PRIu64 " Thread heaps in use\n"
464 "MALLOC: %12" PRIu64 " (%7.1f MB) Metadata allocated\n" 480 "MALLOC: %12" PRIu64 " (%7.1f MB) Metadata allocated\n"
465 "------------------------------------------------\n", 481 "------------------------------------------------\n",
466 stats.system_bytes, stats.system_bytes / MB, 482 stats.pageheap.system_bytes, stats.pageheap.system_bytes / MB,
467 stats.committed_bytes, stats.committed_bytes / MB, 483 stats.pageheap.committed_bytes, stats.pageheap.committed_bytes / M B,
468 bytes_in_use, bytes_in_use / MB, 484 bytes_in_use, bytes_in_use / MB,
469 stats.pageheap_bytes, stats.pageheap_bytes / MB, 485 stats.pageheap.free_bytes, stats.pageheap.free_bytes / MB,
486 stats.pageheap.unmapped_bytes, stats.pageheap.unmapped_bytes / MB,
470 stats.central_bytes, stats.central_bytes / MB, 487 stats.central_bytes, stats.central_bytes / MB,
471 stats.transfer_bytes, stats.transfer_bytes / MB, 488 stats.transfer_bytes, stats.transfer_bytes / MB,
472 stats.thread_bytes, stats.thread_bytes / MB, 489 stats.thread_bytes, stats.thread_bytes / MB,
473 uint64_t(Static::span_allocator()->inuse()), 490 uint64_t(Static::span_allocator()->inuse()),
474 uint64_t(ThreadCache::HeapsInUse()), 491 uint64_t(ThreadCache::HeapsInUse()),
475 stats.metadata_bytes, stats.metadata_bytes / MB); 492 stats.metadata_bytes, stats.metadata_bytes / MB);
476 } 493 }
477 494
478 static void PrintStats(int level) { 495 static void PrintStats(int level) {
479 const int kBufferSize = 16 << 10; 496 const int kBufferSize = 16 << 10;
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
523 result[used_slots+2] = reinterpret_cast<void*>(t->depth); 540 result[used_slots+2] = reinterpret_cast<void*>(t->depth);
524 for (int d = 0; d < t->depth; d++) { 541 for (int d = 0; d < t->depth; d++) {
525 result[used_slots+3+d] = t->stack[d]; 542 result[used_slots+3+d] = t->stack[d];
526 } 543 }
527 used_slots += 3 + t->depth; 544 used_slots += 3 + t->depth;
528 } 545 }
529 result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0)); 546 result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
530 return result; 547 return result;
531 } 548 }
532 549
550 static void IterateOverRanges(void* arg, MallocExtension::RangeFunction func) {
551 PageID page = 1; // Some code may assume that page==0 is never used
552 bool done = false;
553 while (!done) {
554 // Accumulate a small number of ranges in a local buffer
555 static const int kNumRanges = 16;
556 static base::MallocRange ranges[kNumRanges];
557 int n = 0;
558 {
559 SpinLockHolder h(Static::pageheap_lock());
560 while (n < kNumRanges) {
561 if (!Static::pageheap()->GetNextRange(page, &ranges[n])) {
562 done = true;
563 break;
564 } else {
565 uintptr_t limit = ranges[n].address + ranges[n].length;
566 page = (limit + kPageSize - 1) >> kPageShift;
567 n++;
568 }
569 }
570 }
571
572 for (int i = 0; i < n; i++) {
573 (*func)(arg, &ranges[i]);
574 }
575 }
576 }
577
533 // TCMalloc's support for extra malloc interfaces 578 // TCMalloc's support for extra malloc interfaces
534 class TCMallocImplementation : public MallocExtension { 579 class TCMallocImplementation : public MallocExtension {
580 private:
581 // ReleaseToSystem() might release more than the requested bytes because
582 // the page heap releases at the span granularity, and spans are of wildly
583 // different sizes. This member keeps track of the extra bytes bytes
584 // released so that the app can periodically call ReleaseToSystem() to
585 // release memory at a constant rate.
586 // NOTE: Protected by Static::pageheap_lock().
587 size_t extra_bytes_released_;
588
535 public: 589 public:
590 TCMallocImplementation()
591 : extra_bytes_released_(0) {
592 }
593
536 virtual void GetStats(char* buffer, int buffer_length) { 594 virtual void GetStats(char* buffer, int buffer_length) {
537 ASSERT(buffer_length > 0); 595 ASSERT(buffer_length > 0);
538 TCMalloc_Printer printer(buffer, buffer_length); 596 TCMalloc_Printer printer(buffer, buffer_length);
539 597
540 // Print level one stats unless lots of space is available 598 // Print level one stats unless lots of space is available
541 if (buffer_length < 10000) { 599 if (buffer_length < 10000) {
542 DumpStats(&printer, 1); 600 DumpStats(&printer, 1);
543 } else { 601 } else {
544 DumpStats(&printer, 2); 602 DumpStats(&printer, 2);
545 } 603 }
546 } 604 }
547 605
548 virtual void** ReadStackTraces(int* sample_period) { 606 virtual void** ReadStackTraces(int* sample_period) {
549 tcmalloc::StackTraceTable table; 607 tcmalloc::StackTraceTable table;
550 { 608 {
551 SpinLockHolder h(Static::pageheap_lock()); 609 SpinLockHolder h(Static::pageheap_lock());
552 Span* sampled = Static::sampled_objects(); 610 Span* sampled = Static::sampled_objects();
553 for (Span* s = sampled->next; s != sampled; s = s->next) { 611 for (Span* s = sampled->next; s != sampled; s = s->next) {
554 table.AddTrace(*reinterpret_cast<StackTrace*>(s->objects)); 612 table.AddTrace(*reinterpret_cast<StackTrace*>(s->objects));
555 } 613 }
556 } 614 }
557 *sample_period = ThreadCache::GetCache()->GetSamplePeriod(); 615 *sample_period = ThreadCache::GetCache()->GetSamplePeriod();
558 return table.ReadStackTracesAndClear(); // grabs and releases pageheap_lock 616 return table.ReadStackTracesAndClear(); // grabs and releases pageheap_lock
559 } 617 }
560 618
561 virtual void** ReadHeapGrowthStackTraces() { 619 virtual void** ReadHeapGrowthStackTraces() {
562 return DumpHeapGrowthStackTraces(); 620 return DumpHeapGrowthStackTraces();
563 } 621 }
564 622
623 virtual void Ranges(void* arg, RangeFunction func) {
624 IterateOverRanges(arg, func);
625 }
626
565 virtual bool GetNumericProperty(const char* name, size_t* value) { 627 virtual bool GetNumericProperty(const char* name, size_t* value) {
566 ASSERT(name != NULL); 628 ASSERT(name != NULL);
567 629
568 if (strcmp(name, "generic.current_allocated_bytes") == 0) { 630 if (strcmp(name, "generic.current_allocated_bytes") == 0) {
569 TCMallocStats stats; 631 TCMallocStats stats;
570 ExtractStats(&stats, NULL); 632 ExtractStats(&stats, NULL);
571 *value = stats.system_bytes 633 *value = stats.pageheap.system_bytes
572 - stats.thread_bytes 634 - stats.thread_bytes
573 - stats.central_bytes 635 - stats.central_bytes
574 - stats.transfer_bytes 636 - stats.transfer_bytes
575 - stats.pageheap_bytes; 637 - stats.pageheap.free_bytes
638 - stats.pageheap.unmapped_bytes;
576 return true; 639 return true;
577 } 640 }
578 641
579 if (strcmp(name, "generic.heap_size") == 0) { 642 if (strcmp(name, "generic.heap_size") == 0) {
580 TCMallocStats stats; 643 TCMallocStats stats;
581 ExtractStats(&stats, NULL); 644 ExtractStats(&stats, NULL);
582 *value = stats.system_bytes; 645 *value = stats.pageheap.system_bytes;
583 return true;
584 }
585
586 if (strcmp(name, "generic.committed_bytes") == 0) {
587 TCMallocStats stats;
588 ExtractStats(&stats, NULL);
589 *value = stats.committed_bytes + stats.metadata_bytes;
590 return true; 646 return true;
591 } 647 }
592 648
593 if (strcmp(name, "tcmalloc.slack_bytes") == 0) { 649 if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
594 // We assume that bytes in the page heap are not fragmented too 650 // We assume that bytes in the page heap are not fragmented too
595 // badly, and are therefore available for allocation. 651 // badly, and are therefore available for allocation without
652 // growing the pageheap system byte count.
596 SpinLockHolder l(Static::pageheap_lock()); 653 SpinLockHolder l(Static::pageheap_lock());
597 *value = Static::pageheap()->FreeBytes(); 654 PageHeap::Stats stats = Static::pageheap()->stats();
655 *value = stats.free_bytes + stats.unmapped_bytes;
656 return true;
657 }
658
659 if (strcmp(name, "tcmalloc.pageheap_free_bytes") == 0) {
660 SpinLockHolder l(Static::pageheap_lock());
661 *value = Static::pageheap()->stats().free_bytes;
662 return true;
663 }
664
665 if (strcmp(name, "tcmalloc.pageheap_unmapped_bytes") == 0) {
666 SpinLockHolder l(Static::pageheap_lock());
667 *value = Static::pageheap()->stats().unmapped_bytes;
598 return true; 668 return true;
599 } 669 }
600 670
601 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) { 671 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
602 SpinLockHolder l(Static::pageheap_lock()); 672 SpinLockHolder l(Static::pageheap_lock());
603 *value = ThreadCache::overall_thread_cache_size(); 673 *value = ThreadCache::overall_thread_cache_size();
604 return true; 674 return true;
605 } 675 }
606 676
607 if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) { 677 if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
(...skipping 15 matching lines...) Expand all
623 return true; 693 return true;
624 } 694 }
625 695
626 return false; 696 return false;
627 } 697 }
628 698
629 virtual void MarkThreadIdle() { 699 virtual void MarkThreadIdle() {
630 ThreadCache::BecomeIdle(); 700 ThreadCache::BecomeIdle();
631 } 701 }
632 702
633 virtual void ReleaseFreeMemory() { 703 virtual void MarkThreadBusy(); // Implemented below
704
705 virtual void ReleaseToSystem(size_t num_bytes) {
634 SpinLockHolder h(Static::pageheap_lock()); 706 SpinLockHolder h(Static::pageheap_lock());
635 Static::pageheap()->ReleaseFreePages(); 707 if (num_bytes <= extra_bytes_released_) {
708 // We released too much on a prior call, so don't release any
709 // more this time.
710 extra_bytes_released_ = extra_bytes_released_ - num_bytes;
711 return;
712 }
713 num_bytes = num_bytes - extra_bytes_released_;
714 // num_bytes might be less than one page. If we pass zero to
715 // ReleaseAtLeastNPages, it won't do anything, so we release a whole
716 // page now and let extra_bytes_released_ smooth it out over time.
717 Length num_pages = max<Length>(num_bytes >> kPageShift, 1);
718 size_t bytes_released = Static::pageheap()->ReleaseAtLeastNPages(
719 num_pages) << kPageShift;
720 if (bytes_released > num_bytes) {
721 extra_bytes_released_ = bytes_released - num_bytes;
722 } else {
723 // The PageHeap wasn't able to release num_bytes. Don't try to
724 // compensate with a big release next time. Specifically,
725 // ReleaseFreeMemory() calls ReleaseToSystem(LONG_MAX).
726 extra_bytes_released_ = 0;
727 }
636 } 728 }
637 729
638 virtual void SetMemoryReleaseRate(double rate) { 730 virtual void SetMemoryReleaseRate(double rate) {
639 FLAGS_tcmalloc_release_rate = rate; 731 FLAGS_tcmalloc_release_rate = rate;
640 } 732 }
641 733
642 virtual double GetMemoryReleaseRate() { 734 virtual double GetMemoryReleaseRate() {
643 return FLAGS_tcmalloc_release_rate; 735 return FLAGS_tcmalloc_release_rate;
644 } 736 }
645 virtual size_t GetEstimatedAllocatedSize(size_t size) { 737 virtual size_t GetEstimatedAllocatedSize(size_t size) {
(...skipping 28 matching lines...) Expand all
674 TCMallocGuard::TCMallocGuard() { 766 TCMallocGuard::TCMallocGuard() {
675 if (tcmallocguard_refcount++ == 0) { 767 if (tcmallocguard_refcount++ == 0) {
676 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS 768 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
677 // Check whether the kernel also supports TLS (needs to happen at runtime) 769 // Check whether the kernel also supports TLS (needs to happen at runtime)
678 tcmalloc::CheckIfKernelSupportsTLS(); 770 tcmalloc::CheckIfKernelSupportsTLS();
679 #endif 771 #endif
680 #ifdef WIN32_DO_PATCHING 772 #ifdef WIN32_DO_PATCHING
681 // patch the windows VirtualAlloc, etc. 773 // patch the windows VirtualAlloc, etc.
682 PatchWindowsFunctions(); // defined in windows/patch_functions.cc 774 PatchWindowsFunctions(); // defined in windows/patch_functions.cc
683 #endif 775 #endif
684 free(malloc(1)); 776 tc_free(tc_malloc(1));
685 ThreadCache::InitTSD(); 777 ThreadCache::InitTSD();
686 free(malloc(1)); 778 tc_free(tc_malloc(1));
687 MallocExtension::Register(new TCMallocImplementation); 779 MallocExtension::Register(new TCMallocImplementation);
688 } 780 }
689 } 781 }
690 782
691 TCMallocGuard::~TCMallocGuard() { 783 TCMallocGuard::~TCMallocGuard() {
692 if (--tcmallocguard_refcount == 0) { 784 if (--tcmallocguard_refcount == 0) {
693 const char* env = getenv("MALLOCSTATS"); 785 const char* env = getenv("MALLOCSTATS");
694 if (env != NULL) { 786 if (env != NULL) {
695 int level = atoi(env); 787 int level = atoi(env);
696 if (level < 1) level = 1; 788 if (level < 1) level = 1;
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
771 result); 863 result);
772 for (int i = 0; i < stack.depth; i++) { 864 for (int i = 0; i < stack.depth; i++) {
773 printer.printf(" %p", stack.stack[i]); 865 printer.printf(" %p", stack.stack[i]);
774 } 866 }
775 printer.printf("\n"); 867 printer.printf("\n");
776 write(STDERR_FILENO, buffer, strlen(buffer)); 868 write(STDERR_FILENO, buffer, strlen(buffer));
777 } 869 }
778 870
779 namespace { 871 namespace {
780 872
873 inline void* cpp_alloc(size_t size, bool nothrow);
874 inline void* do_malloc(size_t size);
875
876 // TODO(willchan): Investigate whether or not inlining this much is harmful to
877 // performance.
878 // This is equivalent to do_malloc() except when tc_new_mode is set to true.
879 // Otherwise, it will run the std::new_handler if set.
880 inline void* do_malloc_or_cpp_alloc(size_t size) {
881 return tc_new_mode ? cpp_alloc(size, true) : do_malloc(size);
882 }
883
884 void* cpp_memalign(size_t align, size_t size);
885 void* do_memalign(size_t align, size_t size);
886
887 inline void* do_memalign_or_cpp_memalign(size_t align, size_t size) {
888 return tc_new_mode ? cpp_memalign(align, size) : do_memalign(align, size);
889 }
890
781 // Helper for do_malloc(). 891 // Helper for do_malloc().
782 inline void* do_malloc_pages(Length num_pages) { 892 inline void* do_malloc_pages(Length num_pages) {
783 Span *span; 893 Span *span;
784 bool report_large = false; 894 bool report_large = false;
785 { 895 {
786 SpinLockHolder h(Static::pageheap_lock()); 896 SpinLockHolder h(Static::pageheap_lock());
787 span = Static::pageheap()->New(num_pages); 897 span = Static::pageheap()->New(num_pages);
788 const int64 threshold = large_alloc_threshold; 898 const int64 threshold = large_alloc_threshold;
789 if (threshold > 0 && num_pages >= (threshold >> kPageShift)) { 899 if (threshold > 0 && num_pages >= (threshold >> kPageShift)) {
790 // Increase the threshold by 1/8 every time we generate a report. 900 // Increase the threshold by 1/8 every time we generate a report.
(...skipping 30 matching lines...) Expand all
821 } 931 }
822 if (ret == NULL) errno = ENOMEM; 932 if (ret == NULL) errno = ENOMEM;
823 return ret; 933 return ret;
824 } 934 }
825 935
826 inline void* do_calloc(size_t n, size_t elem_size) { 936 inline void* do_calloc(size_t n, size_t elem_size) {
827 // Overflow check 937 // Overflow check
828 const size_t size = n * elem_size; 938 const size_t size = n * elem_size;
829 if (elem_size != 0 && size / elem_size != n) return NULL; 939 if (elem_size != 0 && size / elem_size != n) return NULL;
830 940
831 void* result = do_malloc(size); 941 void* result = do_malloc_or_cpp_alloc(size);
832 if (result != NULL) { 942 if (result != NULL) {
833 memset(result, 0, size); 943 memset(result, 0, size);
834 } 944 }
835 return result; 945 return result;
836 } 946 }
837 947
838 static inline ThreadCache* GetCacheIfPresent() { 948 static inline ThreadCache* GetCacheIfPresent() {
839 void* const p = ThreadCache::GetCacheIfPresent(); 949 void* const p = ThreadCache::GetCacheIfPresent();
840 return reinterpret_cast<ThreadCache*>(p); 950 return reinterpret_cast<ThreadCache*>(p);
841 } 951 }
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
930 // . If we need to grow, grow to max(new_size, old_size * 1.X) 1040 // . If we need to grow, grow to max(new_size, old_size * 1.X)
931 // . Don't shrink unless new_size < old_size * 0.Y 1041 // . Don't shrink unless new_size < old_size * 0.Y
932 // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5. 1042 // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5.
933 const int lower_bound_to_grow = old_size + old_size / 4; 1043 const int lower_bound_to_grow = old_size + old_size / 4;
934 const int upper_bound_to_shrink = old_size / 2; 1044 const int upper_bound_to_shrink = old_size / 2;
935 if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) { 1045 if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) {
936 // Need to reallocate. 1046 // Need to reallocate.
937 void* new_ptr = NULL; 1047 void* new_ptr = NULL;
938 1048
939 if (new_size > old_size && new_size < lower_bound_to_grow) { 1049 if (new_size > old_size && new_size < lower_bound_to_grow) {
940 new_ptr = do_malloc(lower_bound_to_grow); 1050 new_ptr = do_malloc_or_cpp_alloc(lower_bound_to_grow);
941 } 1051 }
942 if (new_ptr == NULL) { 1052 if (new_ptr == NULL) {
943 // Either new_size is not a tiny increment, or last do_malloc failed. 1053 // Either new_size is not a tiny increment, or last do_malloc failed.
944 new_ptr = do_malloc(new_size); 1054 new_ptr = do_malloc_or_cpp_alloc(new_size);
945 } 1055 }
946 if (new_ptr == NULL) { 1056 if (new_ptr == NULL) {
947 return NULL; 1057 return NULL;
948 } 1058 }
949 MallocHook::InvokeNewHook(new_ptr, new_size); 1059 MallocHook::InvokeNewHook(new_ptr, new_size);
950 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size)); 1060 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
951 MallocHook::InvokeDeleteHook(old_ptr); 1061 MallocHook::InvokeDeleteHook(old_ptr);
952 // We could use a variant of do_free() that leverages the fact 1062 // We could use a variant of do_free() that leverages the fact
953 // that we already know the sizeclass of old_ptr. The benefit 1063 // that we already know the sizeclass of old_ptr. The benefit
954 // would be small, so don't bother. 1064 // would be small, so don't bother.
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
1055 inline struct mallinfo do_mallinfo() { 1165 inline struct mallinfo do_mallinfo() {
1056 TCMallocStats stats; 1166 TCMallocStats stats;
1057 ExtractStats(&stats, NULL); 1167 ExtractStats(&stats, NULL);
1058 1168
1059 // Just some of the fields are filled in. 1169 // Just some of the fields are filled in.
1060 struct mallinfo info; 1170 struct mallinfo info;
1061 memset(&info, 0, sizeof(info)); 1171 memset(&info, 0, sizeof(info));
1062 1172
1063 // Unfortunately, the struct contains "int" field, so some of the 1173 // Unfortunately, the struct contains "int" field, so some of the
1064 // size values will be truncated. 1174 // size values will be truncated.
1065 info.arena = static_cast<int>(stats.system_bytes); 1175 info.arena = static_cast<int>(stats.pageheap.system_bytes);
1066 info.fsmblks = static_cast<int>(stats.thread_bytes 1176 info.fsmblks = static_cast<int>(stats.thread_bytes
1067 + stats.central_bytes 1177 + stats.central_bytes
1068 + stats.transfer_bytes); 1178 + stats.transfer_bytes);
1069 info.fordblks = static_cast<int>(stats.pageheap_bytes); 1179 info.fordblks = static_cast<int>(stats.pageheap.free_bytes +
1070 info.uordblks = static_cast<int>(stats.system_bytes 1180 stats.pageheap.unmapped_bytes);
1181 info.uordblks = static_cast<int>(stats.pageheap.system_bytes
1071 - stats.thread_bytes 1182 - stats.thread_bytes
1072 - stats.central_bytes 1183 - stats.central_bytes
1073 - stats.transfer_bytes 1184 - stats.transfer_bytes
1074 - stats.pageheap_bytes); 1185 - stats.pageheap.free_bytes
1186 - stats.pageheap.unmapped_bytes);
1075 1187
1076 return info; 1188 return info;
1077 } 1189 }
1078 #endif // #ifndef HAVE_STRUCT_MALLINFO 1190 #endif // #ifndef HAVE_STRUCT_MALLINFO
1079 1191
1080 static SpinLock set_new_handler_lock(SpinLock::LINKER_INITIALIZED); 1192 static SpinLock set_new_handler_lock(SpinLock::LINKER_INITIALIZED);
1081 1193
1082 inline void* cpp_alloc(size_t size, bool nothrow) { 1194 inline void* cpp_alloc(size_t size, bool nothrow) {
1083 for (;;) { 1195 for (;;) {
1084 void* p = do_malloc(size); 1196 void* p = do_malloc(size);
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
1120 return p; 1232 return p;
1121 } 1233 }
1122 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPT IONS) && !_HAS_EXCEPTIONS) 1234 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPT IONS) && !_HAS_EXCEPTIONS)
1123 } else { // allocation success 1235 } else { // allocation success
1124 return p; 1236 return p;
1125 } 1237 }
1126 #endif // PREANSINEW 1238 #endif // PREANSINEW
1127 } 1239 }
1128 } 1240 }
1129 1241
1242 void* cpp_memalign(size_t align, size_t size) {
1243 for (;;) {
1244 void* p = do_memalign(align, size);
1245 #ifdef PREANSINEW
1246 return p;
1247 #else
1248 if (p == NULL) { // allocation failed
1249 // Get the current new handler. NB: this function is not
1250 // thread-safe. We make a feeble stab at making it so here, but
1251 // this lock only protects against tcmalloc interfering with
1252 // itself, not with other libraries calling set_new_handler.
1253 std::new_handler nh;
1254 {
1255 SpinLockHolder h(&set_new_handler_lock);
1256 nh = std::set_new_handler(0);
1257 (void) std::set_new_handler(nh);
1258 }
1259 #if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPTIONS) & & !_HAS_EXCEPTIONS)
1260 if (nh) {
1261 // Since exceptions are disabled, we don't really know if new_handler
1262 // failed. Assume it will abort if it fails.
1263 (*nh)();
1264 continue;
1265 }
1266 return 0;
1267 #else
1268 // If no new_handler is established, the allocation failed.
1269 if (!nh)
1270 return 0;
1271
1272 // Otherwise, try the new_handler. If it returns, retry the
1273 // allocation. If it throws std::bad_alloc, fail the allocation.
1274 // if it throws something else, don't interfere.
1275 try {
1276 (*nh)();
1277 } catch (const std::bad_alloc&) {
1278 return p;
1279 }
1280 #endif // (defined(__GNUC__) && !defined(__EXCEPTIONS)) || (defined(_HAS_EXCEPT IONS) && !_HAS_EXCEPTIONS)
1281 } else { // allocation success
1282 return p;
1283 }
1284 #endif // PREANSINEW
1285 }
1286 }
1287
1130 } // end unnamed namespace 1288 } // end unnamed namespace
1131 1289
1132 // As promised, the definition of this function, declared above. 1290 // As promised, the definition of this function, declared above.
1133 size_t TCMallocImplementation::GetAllocatedSize(void* ptr) { 1291 size_t TCMallocImplementation::GetAllocatedSize(void* ptr) {
1134 return GetSizeWithCallback(ptr, &InvalidGetAllocatedSize); 1292 return GetSizeWithCallback(ptr, &InvalidGetAllocatedSize);
1135 } 1293 }
1136 1294
1295 void TCMallocImplementation::MarkThreadBusy() {
1296 // Allocate to force the creation of a thread cache, but avoid
1297 // invoking any hooks.
1298 do_free(do_malloc(0));
1299 }
1300
1137 //------------------------------------------------------------------- 1301 //-------------------------------------------------------------------
1138 // Exported routines 1302 // Exported routines
1139 //------------------------------------------------------------------- 1303 //-------------------------------------------------------------------
1140 1304
1305 extern "C" PERFTOOLS_DLL_DECL const char* tc_version(
1306 int* major, int* minor, const char** patch) __THROW {
1307 if (major) *major = TC_VERSION_MAJOR;
1308 if (minor) *minor = TC_VERSION_MINOR;
1309 if (patch) *patch = TC_VERSION_PATCH;
1310 return TC_VERSION_STRING;
1311 }
1312
1141 // CAVEAT: The code structure below ensures that MallocHook methods are always 1313 // CAVEAT: The code structure below ensures that MallocHook methods are always
1142 // called from the stack frame of the invoked allocation function. 1314 // called from the stack frame of the invoked allocation function.
1143 // heap-checker.cc depends on this to start a stack trace from 1315 // heap-checker.cc depends on this to start a stack trace from
1144 // the call to the (de)allocation function. 1316 // the call to the (de)allocation function.
1145 1317
1146 static int tc_new_mode = 0; // See tc_set_new_mode(). 1318 extern "C" PERFTOOLS_DLL_DECL void* tc_malloc(size_t size) __THROW {
1147 extern "C" void* tc_malloc(size_t size) __THROW { 1319 void* result = do_malloc_or_cpp_alloc(size);
1148 void* result = (tc_new_mode ? cpp_alloc(size, false) : do_malloc(size));
1149 MallocHook::InvokeNewHook(result, size); 1320 MallocHook::InvokeNewHook(result, size);
1150 return result; 1321 return result;
1151 } 1322 }
1152 1323
1153 extern "C" void tc_free(void* ptr) __THROW { 1324 extern "C" PERFTOOLS_DLL_DECL void tc_free(void* ptr) __THROW {
1154 MallocHook::InvokeDeleteHook(ptr); 1325 MallocHook::InvokeDeleteHook(ptr);
1155 do_free(ptr); 1326 do_free(ptr);
1156 } 1327 }
1157 1328
1158 extern "C" void* tc_calloc(size_t n, size_t elem_size) __THROW { 1329 extern "C" PERFTOOLS_DLL_DECL void* tc_calloc(size_t n,
1330 size_t elem_size) __THROW {
1159 void* result = do_calloc(n, elem_size); 1331 void* result = do_calloc(n, elem_size);
1160 MallocHook::InvokeNewHook(result, n * elem_size); 1332 MallocHook::InvokeNewHook(result, n * elem_size);
1161 return result; 1333 return result;
1162 } 1334 }
1163 1335
1164 extern "C" void tc_cfree(void* ptr) __THROW { 1336 extern "C" PERFTOOLS_DLL_DECL void tc_cfree(void* ptr) __THROW {
1165 MallocHook::InvokeDeleteHook(ptr); 1337 MallocHook::InvokeDeleteHook(ptr);
1166 do_free(ptr); 1338 do_free(ptr);
1167 } 1339 }
1168 1340
1169 extern "C" void* tc_realloc(void* old_ptr, size_t new_size) __THROW { 1341 extern "C" PERFTOOLS_DLL_DECL void* tc_realloc(void* old_ptr,
1342 size_t new_size) __THROW {
1170 if (old_ptr == NULL) { 1343 if (old_ptr == NULL) {
1171 void* result = do_malloc(new_size); 1344 void* result = do_malloc_or_cpp_alloc(new_size);
1172 MallocHook::InvokeNewHook(result, new_size); 1345 MallocHook::InvokeNewHook(result, new_size);
1173 return result; 1346 return result;
1174 } 1347 }
1175 if (new_size == 0) { 1348 if (new_size == 0) {
1176 MallocHook::InvokeDeleteHook(old_ptr); 1349 MallocHook::InvokeDeleteHook(old_ptr);
1177 do_free(old_ptr); 1350 do_free(old_ptr);
1178 return NULL; 1351 return NULL;
1179 } 1352 }
1180 return do_realloc(old_ptr, new_size); 1353 return do_realloc(old_ptr, new_size);
1181 } 1354 }
1182 1355
1183 extern "C" void* tc_new(size_t size) { 1356 extern "C" PERFTOOLS_DLL_DECL void* tc_new(size_t size) {
1184 void* p = cpp_alloc(size, false); 1357 void* p = cpp_alloc(size, false);
1185 // We keep this next instruction out of cpp_alloc for a reason: when 1358 // We keep this next instruction out of cpp_alloc for a reason: when
1186 // it's in, and new just calls cpp_alloc, the optimizer may fold the 1359 // it's in, and new just calls cpp_alloc, the optimizer may fold the
1187 // new call into cpp_alloc, which messes up our whole section-based 1360 // new call into cpp_alloc, which messes up our whole section-based
1188 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc 1361 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
1189 // isn't the last thing this fn calls, and prevents the folding. 1362 // isn't the last thing this fn calls, and prevents the folding.
1190 MallocHook::InvokeNewHook(p, size); 1363 MallocHook::InvokeNewHook(p, size);
1191 return p; 1364 return p;
1192 } 1365 }
1193 1366
1194 extern "C" void* tc_new_nothrow(size_t size, const std::nothrow_t&) __THROW { 1367 extern "C" PERFTOOLS_DLL_DECL void* tc_new_nothrow(
1368 size_t size, const std::nothrow_t&) __THROW {
1195 void* p = cpp_alloc(size, true); 1369 void* p = cpp_alloc(size, true);
1196 MallocHook::InvokeNewHook(p, size); 1370 MallocHook::InvokeNewHook(p, size);
1197 return p; 1371 return p;
1198 } 1372 }
1199 1373
1200 extern "C" void tc_delete(void* p) __THROW { 1374 extern "C" PERFTOOLS_DLL_DECL void tc_delete(void* p) __THROW {
1201 MallocHook::InvokeDeleteHook(p); 1375 MallocHook::InvokeDeleteHook(p);
1202 do_free(p); 1376 do_free(p);
1203 } 1377 }
1204 1378
1205 extern "C" void* tc_newarray(size_t size) { 1379 // Compilers define and use this (via ::operator delete(ptr, nothrow)).
1380 // But it's really the same as normal delete, so we just do the same thing.
1381 extern "C" PERFTOOLS_DLL_DECL void tc_delete_nothrow(
1382 void* p, const std::nothrow_t&) __THROW {
1383 MallocHook::InvokeDeleteHook(p);
1384 do_free(p);
1385 }
1386
1387 extern "C" PERFTOOLS_DLL_DECL void* tc_newarray(size_t size) {
1206 void* p = cpp_alloc(size, false); 1388 void* p = cpp_alloc(size, false);
1207 // We keep this next instruction out of cpp_alloc for a reason: when 1389 // We keep this next instruction out of cpp_alloc for a reason: when
1208 // it's in, and new just calls cpp_alloc, the optimizer may fold the 1390 // it's in, and new just calls cpp_alloc, the optimizer may fold the
1209 // new call into cpp_alloc, which messes up our whole section-based 1391 // new call into cpp_alloc, which messes up our whole section-based
1210 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc 1392 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
1211 // isn't the last thing this fn calls, and prevents the folding. 1393 // isn't the last thing this fn calls, and prevents the folding.
1212 MallocHook::InvokeNewHook(p, size); 1394 MallocHook::InvokeNewHook(p, size);
1213 return p; 1395 return p;
1214 } 1396 }
1215 1397
1216 extern "C" void* tc_newarray_nothrow(size_t size, const std::nothrow_t&) __THROW { 1398 extern "C" PERFTOOLS_DLL_DECL void* tc_newarray_nothrow(
1399 size_t size, const std::nothrow_t&) __THROW {
1217 void* p = cpp_alloc(size, true); 1400 void* p = cpp_alloc(size, true);
1218 MallocHook::InvokeNewHook(p, size); 1401 MallocHook::InvokeNewHook(p, size);
1219 return p; 1402 return p;
1220 } 1403 }
1221 1404
1222 extern "C" void tc_deletearray(void* p) __THROW { 1405 extern "C" PERFTOOLS_DLL_DECL void tc_deletearray(void* p) __THROW {
1223 MallocHook::InvokeDeleteHook(p); 1406 MallocHook::InvokeDeleteHook(p);
1224 do_free(p); 1407 do_free(p);
1225 } 1408 }
1226 1409
1227 extern "C" void* tc_memalign(size_t align, size_t size) __THROW { 1410 extern "C" PERFTOOLS_DLL_DECL void tc_deletearray_nothrow(
1228 void* result = do_memalign(align, size); 1411 void* p, const std::nothrow_t&) __THROW {
1412 MallocHook::InvokeDeleteHook(p);
1413 do_free(p);
1414 }
1415
1416 extern "C" PERFTOOLS_DLL_DECL void* tc_memalign(size_t align,
1417 size_t size) __THROW {
1418 void* result = do_memalign_or_cpp_memalign(align, size);
1229 MallocHook::InvokeNewHook(result, size); 1419 MallocHook::InvokeNewHook(result, size);
1230 return result; 1420 return result;
1231 } 1421 }
1232 1422
1233 extern "C" int tc_posix_memalign(void** result_ptr, size_t align, size_t size) 1423 extern "C" PERFTOOLS_DLL_DECL int tc_posix_memalign(
1234 __THROW { 1424 void** result_ptr, size_t align, size_t size) __THROW {
1235 if (((align % sizeof(void*)) != 0) || 1425 if (((align % sizeof(void*)) != 0) ||
1236 ((align & (align - 1)) != 0) || 1426 ((align & (align - 1)) != 0) ||
1237 (align == 0)) { 1427 (align == 0)) {
1238 return EINVAL; 1428 return EINVAL;
1239 } 1429 }
1240 1430
1241 void* result = do_memalign(align, size); 1431 void* result = do_memalign_or_cpp_memalign(align, size);
1242 MallocHook::InvokeNewHook(result, size); 1432 MallocHook::InvokeNewHook(result, size);
1243 if (result == NULL) { 1433 if (result == NULL) {
1244 return ENOMEM; 1434 return ENOMEM;
1245 } else { 1435 } else {
1246 *result_ptr = result; 1436 *result_ptr = result;
1247 return 0; 1437 return 0;
1248 } 1438 }
1249 } 1439 }
1250 1440
1251 static size_t pagesize = 0; 1441 static size_t pagesize = 0;
1252 1442
1253 extern "C" void* tc_valloc(size_t size) __THROW { 1443 extern "C" PERFTOOLS_DLL_DECL void* tc_valloc(size_t size) __THROW {
1254 // Allocate page-aligned object of length >= size bytes 1444 // Allocate page-aligned object of length >= size bytes
1255 if (pagesize == 0) pagesize = getpagesize(); 1445 if (pagesize == 0) pagesize = getpagesize();
1256 void* result = do_memalign(pagesize, size); 1446 void* result = do_memalign_or_cpp_memalign(pagesize, size);
1257 MallocHook::InvokeNewHook(result, size); 1447 MallocHook::InvokeNewHook(result, size);
1258 return result; 1448 return result;
1259 } 1449 }
1260 1450
1261 extern "C" void* tc_pvalloc(size_t size) __THROW { 1451 extern "C" PERFTOOLS_DLL_DECL void* tc_pvalloc(size_t size) __THROW {
1262 // Round up size to a multiple of pagesize 1452 // Round up size to a multiple of pagesize
1263 if (pagesize == 0) pagesize = getpagesize(); 1453 if (pagesize == 0) pagesize = getpagesize();
1454 if (size == 0) { // pvalloc(0) should allocate one page, according to
1455 size = pagesize; // http://man.free4web.biz/man3/libmpatrol.3.html
1456 }
1264 size = (size + pagesize - 1) & ~(pagesize - 1); 1457 size = (size + pagesize - 1) & ~(pagesize - 1);
1265 void* result = do_memalign(pagesize, size); 1458 void* result = do_memalign_or_cpp_memalign(pagesize, size);
1266 MallocHook::InvokeNewHook(result, size); 1459 MallocHook::InvokeNewHook(result, size);
1267 return result; 1460 return result;
1268 } 1461 }
1269 1462
1270 extern "C" void tc_malloc_stats(void) __THROW { 1463 extern "C" PERFTOOLS_DLL_DECL void tc_malloc_stats(void) __THROW {
1271 do_malloc_stats(); 1464 do_malloc_stats();
1272 } 1465 }
1273 1466
1274 extern "C" int tc_mallopt(int cmd, int value) __THROW { 1467 extern "C" PERFTOOLS_DLL_DECL int tc_mallopt(int cmd, int value) __THROW {
1275 return do_mallopt(cmd, value); 1468 return do_mallopt(cmd, value);
1276 } 1469 }
1277 1470
1278 #ifdef HAVE_STRUCT_MALLINFO 1471 #ifdef HAVE_STRUCT_MALLINFO
1279 extern "C" struct mallinfo tc_mallinfo(void) __THROW { 1472 extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) __THROW {
1280 return do_mallinfo(); 1473 return do_mallinfo();
1281 } 1474 }
1282 #endif 1475 #endif
1283 1476
1284 // This function behaves similarly to MSVC's _set_new_mode. 1477 // This function behaves similarly to MSVC's _set_new_mode.
1285 // If flag is 0 (default), calls to malloc will behave normally. 1478 // If flag is 0 (default), calls to malloc will behave normally.
1286 // If flag is 1, calls to malloc will behave like calls to new, 1479 // If flag is 1, calls to malloc will behave like calls to new,
1287 // and the std_new_handler will be invoked on failure. 1480 // and the std_new_handler will be invoked on failure.
1288 // Returns the previous mode. 1481 // Returns the previous mode.
1289 extern "C" int tc_set_new_mode(int flag) __THROW { 1482 extern "C" PERFTOOLS_DLL_DECL int tc_set_new_mode(int flag) __THROW {
1290 int old_mode = tc_new_mode; 1483 int old_mode = tc_new_mode;
1291 tc_new_mode = flag; 1484 tc_new_mode = flag;
1292 return old_mode; 1485 return old_mode;
1293 } 1486 }
1294 1487
1295 1488
1296 // Override __libc_memalign in libc on linux boxes specially. 1489 // Override __libc_memalign in libc on linux boxes specially.
1297 // They have a bug in libc that causes them to (very rarely) allocate 1490 // They have a bug in libc that causes them to (very rarely) allocate
1298 // with __libc_memalign() yet deallocate with free() and the 1491 // with __libc_memalign() yet deallocate with free() and the
1299 // definitions above don't catch it. 1492 // definitions above don't catch it.
1300 // This function is an exception to the rule of calling MallocHook method 1493 // This function is an exception to the rule of calling MallocHook method
1301 // from the stack frame of the allocation function; 1494 // from the stack frame of the allocation function;
1302 // heap-checker handles this special case explicitly. 1495 // heap-checker handles this special case explicitly.
1496 #ifndef TCMALLOC_FOR_DEBUGALLOCATION
1303 static void *MemalignOverride(size_t align, size_t size, const void *caller) 1497 static void *MemalignOverride(size_t align, size_t size, const void *caller)
1304 __THROW ATTRIBUTE_SECTION(google_malloc); 1498 __THROW ATTRIBUTE_SECTION(google_malloc);
1305 1499
1306 static void *MemalignOverride(size_t align, size_t size, const void *caller) 1500 static void *MemalignOverride(size_t align, size_t size, const void *caller)
1307 __THROW { 1501 __THROW {
1308 void* result = do_memalign(align, size); 1502 void* result = do_memalign_or_cpp_memalign(align, size);
1309 MallocHook::InvokeNewHook(result, size); 1503 MallocHook::InvokeNewHook(result, size);
1310 return result; 1504 return result;
1311 } 1505 }
1312 void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride; 1506 void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride;
1507 #endif // #ifndef TCMALLOC_FOR_DEBUGALLOCATION
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/tcmalloc.h ('k') | third_party/tcmalloc/chromium/src/tests/atomicops_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698