OLD | NEW |
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
86 // * allocation of a reasonably complicated struct | 86 // * allocation of a reasonably complicated struct |
87 // goes from about 1100 ns to about 300 ns. | 87 // goes from about 1100 ns to about 300 ns. |
88 | 88 |
89 #include "config.h" | 89 #include "config.h" |
90 #include <google/tcmalloc.h> | 90 #include <google/tcmalloc.h> |
91 | 91 |
92 #include <errno.h> // for ENOMEM, EINVAL, errno | 92 #include <errno.h> // for ENOMEM, EINVAL, errno |
93 #ifdef HAVE_SYS_CDEFS_H | 93 #ifdef HAVE_SYS_CDEFS_H |
94 #include <sys/cdefs.h> // for __THROW | 94 #include <sys/cdefs.h> // for __THROW |
95 #endif | 95 #endif |
96 #ifdef HAVE_FEATURES_H | |
97 #include <features.h> // for __GLIBC__ | |
98 #endif | |
99 #if defined HAVE_STDINT_H | 96 #if defined HAVE_STDINT_H |
100 #include <stdint.h> | 97 #include <stdint.h> |
101 #elif defined HAVE_INTTYPES_H | 98 #elif defined HAVE_INTTYPES_H |
102 #include <inttypes.h> | 99 #include <inttypes.h> |
103 #else | 100 #else |
104 #include <sys/types.h> | 101 #include <sys/types.h> |
105 #endif | 102 #endif |
106 #include <stddef.h> // for size_t, NULL | 103 #include <stddef.h> // for size_t, NULL |
107 #include <stdlib.h> // for getenv | 104 #include <stdlib.h> // for getenv |
108 #include <string.h> // for strcmp, memset, strlen, etc | 105 #include <string.h> // for strcmp, memset, strlen, etc |
109 #ifdef HAVE_UNISTD_H | 106 #ifdef HAVE_UNISTD_H |
110 #include <unistd.h> // for getpagesize, write, etc | 107 #include <unistd.h> // for getpagesize, write, etc |
111 #endif | 108 #endif |
112 #include <algorithm> // for max, min | 109 #include <algorithm> // for max, min |
113 #include <limits> // for numeric_limits | 110 #include <limits> // for numeric_limits |
114 #include <new> // for nothrow_t (ptr only), etc | 111 #include <new> // for nothrow_t (ptr only), etc |
115 #include <vector> // for vector | 112 #include <vector> // for vector |
116 | 113 |
117 #include <google/malloc_extension.h> | 114 #include <google/malloc_extension.h> |
118 #include <google/malloc_hook.h> // for MallocHook | 115 #include <google/malloc_hook.h> // for MallocHook |
119 #include "base/basictypes.h" // for int64 | 116 #include "base/basictypes.h" // for int64 |
120 #include "base/commandlineflags.h" // for RegisterFlagValidator, etc | 117 #include "base/commandlineflags.h" // for RegisterFlagValidator, etc |
121 #include "base/dynamic_annotations.h" // for RunningOnValgrind | 118 #include "base/dynamic_annotations.h" // for RunningOnValgrind |
122 #include "base/spinlock.h" // for SpinLockHolder | 119 #include "base/spinlock.h" // for SpinLockHolder |
123 #include "central_freelist.h" // for CentralFreeListPadded | 120 #include "central_freelist.h" // for CentralFreeListPadded |
124 #include "common.h" // for StackTrace, kPageShift, etc | 121 #include "common.h" // for StackTrace, kPageShift, etc |
125 #include "free_list.h" // for FL_Init | |
126 #include "internal_logging.h" // for ASSERT, TCMalloc_Printer, etc | 122 #include "internal_logging.h" // for ASSERT, TCMalloc_Printer, etc |
| 123 #include "linked_list.h" // for SLL_SetNext |
127 #include "malloc_hook-inl.h" // for MallocHook::InvokeNewHook, etc | 124 #include "malloc_hook-inl.h" // for MallocHook::InvokeNewHook, etc |
128 #include "page_heap.h" // for PageHeap, PageHeap::Stats | 125 #include "page_heap.h" // for PageHeap, PageHeap::Stats |
129 #include "page_heap_allocator.h" // for PageHeapAllocator | 126 #include "page_heap_allocator.h" // for PageHeapAllocator |
130 #include "span.h" // for Span, DLL_Prepend, etc | 127 #include "span.h" // for Span, DLL_Prepend, etc |
131 #include "stack_trace_table.h" // for StackTraceTable | 128 #include "stack_trace_table.h" // for StackTraceTable |
132 #include "static_vars.h" // for Static | 129 #include "static_vars.h" // for Static |
133 #include "system-alloc.h" // for DumpSystemAllocatorStats, etc | 130 #include "system-alloc.h" // for DumpSystemAllocatorStats, etc |
134 #include "tcmalloc_guard.h" // for TCMallocGuard | 131 #include "tcmalloc_guard.h" // for TCMallocGuard |
135 #include "thread_cache.h" // for ThreadCache | 132 #include "thread_cache.h" // for ThreadCache |
136 | 133 |
137 // We only need malloc.h for struct mallinfo. | 134 // We only need malloc.h for struct mallinfo. |
138 #ifdef HAVE_STRUCT_MALLINFO | 135 #ifdef HAVE_STRUCT_MALLINFO |
139 // Malloc can be in several places on older versions of OS X. | 136 // Malloc can be in several places on older versions of OS X. |
140 # if defined(HAVE_MALLOC_H) | 137 # if defined(HAVE_MALLOC_H) |
141 # include <malloc.h> | 138 # include <malloc.h> |
142 # elif defined(HAVE_SYS_MALLOC_H) | 139 # elif defined(HAVE_SYS_MALLOC_H) |
143 # include <sys/malloc.h> | 140 # include <sys/malloc.h> |
144 # elif defined(HAVE_MALLOC_MALLOC_H) | 141 # elif defined(HAVE_MALLOC_MALLOC_H) |
145 # include <malloc/malloc.h> | 142 # include <malloc/malloc.h> |
146 # endif | 143 # endif |
147 #endif | 144 #endif |
148 | 145 |
149 #if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defi
ned(WIN32_OVERRIDE_ALLOCATORS) | 146 #if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defi
ned(WIN32_OVERRIDE_ALLOCATORS) |
150 # define WIN32_DO_PATCHING 1 | 147 # define WIN32_DO_PATCHING 1 |
151 #endif | 148 #endif |
152 | 149 |
153 // GLibc 2.14+ requires the hook functions be declared volatile, based on the | 150 // Some windows file somewhere (at least on cygwin) #define's small (!) |
154 // value of the define __MALLOC_HOOK_VOLATILE. For compatibility with | 151 #undef small |
155 // older/non-GLibc implementations, provide an empty definition. | |
156 #if !defined(__MALLOC_HOOK_VOLATILE) | |
157 #define __MALLOC_HOOK_VOLATILE | |
158 #endif | |
159 | 152 |
160 using STL_NAMESPACE::max; | 153 using STL_NAMESPACE::max; |
161 using STL_NAMESPACE::numeric_limits; | 154 using STL_NAMESPACE::numeric_limits; |
162 using STL_NAMESPACE::vector; | 155 using STL_NAMESPACE::vector; |
| 156 |
| 157 #include "libc_override.h" |
| 158 |
| 159 // __THROW is defined in glibc (via <sys/cdefs.h>). It means, |
| 160 // counter-intuitively, "This function will never throw an exception." |
| 161 // It's an optional optimization tool, but we may need to use it to |
| 162 // match glibc prototypes. |
| 163 #ifndef __THROW // I guess we're not on a glibc system |
| 164 # define __THROW // __THROW is just an optimization, so ok to make it "" |
| 165 #endif |
| 166 |
163 using tcmalloc::AlignmentForSize; | 167 using tcmalloc::AlignmentForSize; |
| 168 using tcmalloc::kLog; |
| 169 using tcmalloc::kCrash; |
| 170 using tcmalloc::kCrashWithStats; |
| 171 using tcmalloc::Log; |
164 using tcmalloc::PageHeap; | 172 using tcmalloc::PageHeap; |
165 using tcmalloc::PageHeapAllocator; | 173 using tcmalloc::PageHeapAllocator; |
166 using tcmalloc::SizeMap; | 174 using tcmalloc::SizeMap; |
167 using tcmalloc::Span; | 175 using tcmalloc::Span; |
168 using tcmalloc::StackTrace; | 176 using tcmalloc::StackTrace; |
169 using tcmalloc::Static; | 177 using tcmalloc::Static; |
170 using tcmalloc::ThreadCache; | 178 using tcmalloc::ThreadCache; |
171 | 179 |
172 // __THROW is defined in glibc systems. It means, counter-intuitively, | |
173 // "This function will never throw an exception." It's an optional | |
174 // optimization tool, but we may need to use it to match glibc prototypes. | |
175 #ifndef __THROW // I guess we're not on a glibc system | |
176 # define __THROW // __THROW is just an optimization, so ok to make it "" | |
177 #endif | |
178 | |
179 // ---- Double free debug declarations | |
180 static size_t ExcludeSpaceForMark(size_t size); | |
181 static void AddRoomForMark(size_t* size); | |
182 static void ExcludeMarkFromSize(size_t* new_size); | |
183 static void MarkAllocatedRegion(void* ptr); | |
184 static void ValidateAllocatedRegion(void* ptr, size_t cl); | |
185 // ---- End Double free debug declarations | |
186 | |
187 DECLARE_int64(tcmalloc_sample_parameter); | 180 DECLARE_int64(tcmalloc_sample_parameter); |
188 DECLARE_double(tcmalloc_release_rate); | 181 DECLARE_double(tcmalloc_release_rate); |
189 | 182 |
190 // For windows, the printf we use to report large allocs is | 183 // For windows, the printf we use to report large allocs is |
191 // potentially dangerous: it could cause a malloc that would cause an | 184 // potentially dangerous: it could cause a malloc that would cause an |
192 // infinite loop. So by default we set the threshold to a huge number | 185 // infinite loop. So by default we set the threshold to a huge number |
193 // on windows, so this bad situation will never trigger. You can | 186 // on windows, so this bad situation will never trigger. You can |
194 // always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you | 187 // always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you |
195 // want this functionality. | 188 // want this functionality. |
196 #ifdef _WIN32 | 189 #ifdef _WIN32 |
(...skipping 14 matching lines...) Expand all Loading... |
211 "is very large and therefore you should see no extra " | 204 "is very large and therefore you should see no extra " |
212 "logging unless the flag is overridden. Set to 0 to " | 205 "logging unless the flag is overridden. Set to 0 to " |
213 "disable reporting entirely."); | 206 "disable reporting entirely."); |
214 | 207 |
215 | 208 |
216 // We already declared these functions in tcmalloc.h, but we have to | 209 // We already declared these functions in tcmalloc.h, but we have to |
217 // declare them again to give them an ATTRIBUTE_SECTION: we want to | 210 // declare them again to give them an ATTRIBUTE_SECTION: we want to |
218 // put all callers of MallocHook::Invoke* in this module into | 211 // put all callers of MallocHook::Invoke* in this module into |
219 // ATTRIBUTE_SECTION(google_malloc) section, so that | 212 // ATTRIBUTE_SECTION(google_malloc) section, so that |
220 // MallocHook::GetCallerStackTrace can function accurately. | 213 // MallocHook::GetCallerStackTrace can function accurately. |
| 214 #ifndef _WIN32 // windows doesn't have attribute_section, so don't bother |
221 extern "C" { | 215 extern "C" { |
222 void* tc_malloc(size_t size) __THROW | 216 void* tc_malloc(size_t size) __THROW |
223 ATTRIBUTE_SECTION(google_malloc); | 217 ATTRIBUTE_SECTION(google_malloc); |
224 void tc_free(void* ptr) __THROW | 218 void tc_free(void* ptr) __THROW |
225 ATTRIBUTE_SECTION(google_malloc); | 219 ATTRIBUTE_SECTION(google_malloc); |
226 void* tc_realloc(void* ptr, size_t size) __THROW | 220 void* tc_realloc(void* ptr, size_t size) __THROW |
227 ATTRIBUTE_SECTION(google_malloc); | 221 ATTRIBUTE_SECTION(google_malloc); |
228 void* tc_calloc(size_t nmemb, size_t size) __THROW | 222 void* tc_calloc(size_t nmemb, size_t size) __THROW |
229 ATTRIBUTE_SECTION(google_malloc); | 223 ATTRIBUTE_SECTION(google_malloc); |
230 void tc_cfree(void* ptr) __THROW | 224 void tc_cfree(void* ptr) __THROW |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
272 | 266 |
273 // Some non-standard extensions that we support. | 267 // Some non-standard extensions that we support. |
274 | 268 |
275 // This is equivalent to | 269 // This is equivalent to |
276 // OS X: malloc_size() | 270 // OS X: malloc_size() |
277 // glibc: malloc_usable_size() | 271 // glibc: malloc_usable_size() |
278 // Windows: _msize() | 272 // Windows: _msize() |
279 size_t tc_malloc_size(void* p) __THROW | 273 size_t tc_malloc_size(void* p) __THROW |
280 ATTRIBUTE_SECTION(google_malloc); | 274 ATTRIBUTE_SECTION(google_malloc); |
281 } // extern "C" | 275 } // extern "C" |
282 | |
283 // Override the libc functions to prefer our own instead. This comes | |
284 // first so code in tcmalloc.cc can use the overridden versions. One | |
285 // exception: in windows, by default, we patch our code into these | |
286 // functions (via src/windows/patch_function.cc) rather than override | |
287 // them. In that case, we don't want to do this overriding here. | |
288 #if !defined(WIN32_DO_PATCHING) | |
289 | |
290 // TODO(mbelshe): Turn off TCMalloc's symbols for libc. We do that | |
291 // elsewhere. | |
292 #ifndef _WIN32 | |
293 | |
294 #if defined(__GNUC__) && !defined(__MACH__) | |
295 // Potentially faster variants that use the gcc alias extension. | |
296 // FreeBSD does support aliases, but apparently not correctly. :-( | |
297 // NOTE: we make many of these symbols weak, but do so in the makefile | |
298 // (via objcopy -W) and not here. That ends up being more portable. | |
299 # define ALIAS(x) __attribute__ ((alias (x))) | |
300 void* operator new(size_t size) throw (std::bad_alloc) ALIAS("tc_new"); | |
301 void operator delete(void* p) __THROW ALIAS("tc_delete"); | |
302 void* operator new[](size_t size) throw (std::bad_alloc) ALIAS("tc_newarray"); | |
303 void operator delete[](void* p) __THROW ALIAS("tc_deletearray"); | |
304 void* operator new(size_t size, const std::nothrow_t&) __THROW | |
305 ALIAS("tc_new_nothrow"); | |
306 void* operator new[](size_t size, const std::nothrow_t&) __THROW | |
307 ALIAS("tc_newarray_nothrow"); | |
308 void operator delete(void* size, const std::nothrow_t&) __THROW | |
309 ALIAS("tc_delete_nothrow"); | |
310 void operator delete[](void* size, const std::nothrow_t&) __THROW | |
311 ALIAS("tc_deletearray_nothrow"); | |
312 extern "C" { | |
313 void* malloc(size_t size) __THROW ALIAS("tc_malloc"); | |
314 void free(void* ptr) __THROW ALIAS("tc_free"); | |
315 void* realloc(void* ptr, size_t size) __THROW ALIAS("tc_realloc"); | |
316 void* calloc(size_t n, size_t size) __THROW ALIAS("tc_calloc"); | |
317 void cfree(void* ptr) __THROW ALIAS("tc_cfree"); | |
318 void* memalign(size_t align, size_t s) __THROW ALIAS("tc_memalign"); | |
319 void* valloc(size_t size) __THROW ALIAS("tc_valloc"); | |
320 void* pvalloc(size_t size) __THROW ALIAS("tc_pvalloc"); | |
321 int posix_memalign(void** r, size_t a, size_t s) __THROW | |
322 ALIAS("tc_posix_memalign"); | |
323 void malloc_stats(void) __THROW ALIAS("tc_malloc_stats"); | |
324 int mallopt(int cmd, int value) __THROW ALIAS("tc_mallopt"); | |
325 #ifdef HAVE_STRUCT_MALLINFO | |
326 struct mallinfo mallinfo(void) __THROW ALIAS("tc_mallinfo"); | |
327 #endif | |
328 size_t malloc_size(void* p) __THROW ALIAS("tc_malloc_size"); | |
329 size_t malloc_usable_size(void* p) __THROW ALIAS("tc_malloc_size"); | |
330 } // extern "C" | |
331 #else // #if defined(__GNUC__) && !defined(__MACH__) | |
332 // Portable wrappers | |
333 void* operator new(size_t size) { return tc_new(size); } | |
334 void operator delete(void* p) __THROW { tc_delete(p); } | |
335 void* operator new[](size_t size) { return tc_newarray(size); } | |
336 void operator delete[](void* p) __THROW { tc_deletearray(p); } | |
337 void* operator new(size_t size, const std::nothrow_t& nt) __THROW { | |
338 return tc_new_nothrow(size, nt); | |
339 } | |
340 void* operator new[](size_t size, const std::nothrow_t& nt) __THROW { | |
341 return tc_newarray_nothrow(size, nt); | |
342 } | |
343 void operator delete(void* ptr, const std::nothrow_t& nt) __THROW { | |
344 return tc_delete_nothrow(ptr, nt); | |
345 } | |
346 void operator delete[](void* ptr, const std::nothrow_t& nt) __THROW { | |
347 return tc_deletearray_nothrow(ptr, nt); | |
348 } | |
349 extern "C" { | |
350 void* malloc(size_t s) __THROW { return tc_malloc(s); } | |
351 void free(void* p) __THROW { tc_free(p); } | |
352 void* realloc(void* p, size_t s) __THROW { return tc_realloc(p, s); } | |
353 void* calloc(size_t n, size_t s) __THROW { return tc_calloc(n, s); } | |
354 void cfree(void* p) __THROW { tc_cfree(p); } | |
355 void* memalign(size_t a, size_t s) __THROW { return tc_memalign(a, s); } | |
356 void* valloc(size_t s) __THROW { return tc_valloc(s); } | |
357 void* pvalloc(size_t s) __THROW { return tc_pvalloc(s); } | |
358 int posix_memalign(void** r, size_t a, size_t s) __THROW { | |
359 return tc_posix_memalign(r, a, s); | |
360 } | |
361 void malloc_stats(void) __THROW { tc_malloc_stats(); } | |
362 int mallopt(int cmd, int v) __THROW { return tc_mallopt(cmd, v); } | |
363 #ifdef HAVE_STRUCT_MALLINFO | |
364 struct mallinfo mallinfo(void) __THROW { return tc_mallinfo(); } | |
365 #endif | |
366 size_t malloc_size(void* p) __THROW { return tc_malloc_size(p); } | |
367 size_t malloc_usable_size(void* p) __THROW { return tc_malloc_size(p); } | |
368 } // extern "C" | |
369 #endif // #if defined(__GNUC__) | |
370 | |
371 // Some library routines on RedHat 9 allocate memory using malloc() | |
372 // and free it using __libc_free() (or vice-versa). Since we provide | |
373 // our own implementations of malloc/free, we need to make sure that | |
374 // the __libc_XXX variants (defined as part of glibc) also point to | |
375 // the same implementations. | |
376 #ifdef __GLIBC__ // only glibc defines __libc_* | |
377 extern "C" { | |
378 #ifdef ALIAS | |
379 void* __libc_malloc(size_t size) ALIAS("tc_malloc"); | |
380 void __libc_free(void* ptr) ALIAS("tc_free"); | |
381 void* __libc_realloc(void* ptr, size_t size) ALIAS("tc_realloc"); | |
382 void* __libc_calloc(size_t n, size_t size) ALIAS("tc_calloc"); | |
383 void __libc_cfree(void* ptr) ALIAS("tc_cfree"); | |
384 void* __libc_memalign(size_t align, size_t s) ALIAS("tc_memalign"); | |
385 void* __libc_valloc(size_t size) ALIAS("tc_valloc"); | |
386 void* __libc_pvalloc(size_t size) ALIAS("tc_pvalloc"); | |
387 int __posix_memalign(void** r, size_t a, size_t s) ALIAS("tc_posix_memalign"); | |
388 #else // #ifdef ALIAS | |
389 void* __libc_malloc(size_t size) { return malloc(size); } | |
390 void __libc_free(void* ptr) { free(ptr); } | |
391 void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); } | |
392 void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); } | |
393 void __libc_cfree(void* ptr) { cfree(ptr); } | |
394 void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); } | |
395 void* __libc_valloc(size_t size) { return valloc(size); } | |
396 void* __libc_pvalloc(size_t size) { return pvalloc(size); } | |
397 int __posix_memalign(void** r, size_t a, size_t s) { | |
398 return posix_memalign(r, a, s); | |
399 } | |
400 #endif // #ifdef ALIAS | |
401 } // extern "C" | |
402 #endif // ifdef __GLIBC__ | |
403 | |
404 #if defined(__GLIBC__) && defined(HAVE_MALLOC_H) | |
405 // If we're using glibc, then override glibc malloc hooks to make sure that even | |
406 // if calls fall through to ptmalloc (due to dlopen() with RTLD_DEEPBIND or what | |
407 // not), ptmalloc will use TCMalloc. | |
408 | |
409 static void* tc_ptmalloc_malloc_hook(size_t size, const void* caller) { | |
410 return tc_malloc(size); | |
411 } | |
412 | |
413 void* (*__MALLOC_HOOK_VOLATILE __malloc_hook)( | |
414 size_t size, const void* caller) = tc_ptmalloc_malloc_hook; | |
415 | |
416 static void* tc_ptmalloc_realloc_hook( | |
417 void* ptr, size_t size, const void* caller) { | |
418 return tc_realloc(ptr, size); | |
419 } | |
420 | |
421 void* (*__MALLOC_HOOK_VOLATILE __realloc_hook)( | |
422 void* ptr, size_t size, const void* caller) = tc_ptmalloc_realloc_hook; | |
423 | |
424 static void tc_ptmalloc_free_hook(void* ptr, const void* caller) { | |
425 tc_free(ptr); | |
426 } | |
427 | |
428 void (*__MALLOC_HOOK_VOLATILE __free_hook)(void* ptr, const void* caller) = tc_p
tmalloc_free_hook; | |
429 | |
430 #endif | |
431 | |
432 #endif // #ifndef _WIN32 | 276 #endif // #ifndef _WIN32 |
433 #undef ALIAS | |
434 | |
435 #endif // #ifndef(WIN32_DO_PATCHING) | |
436 | |
437 | 277 |
438 // ----------------------- IMPLEMENTATION ------------------------------- | 278 // ----------------------- IMPLEMENTATION ------------------------------- |
439 | 279 |
440 static int tc_new_mode = 0; // See tc_set_new_mode(). | 280 static int tc_new_mode = 0; // See tc_set_new_mode(). |
441 | 281 |
442 // Routines such as free() and realloc() catch some erroneous pointers | 282 // Routines such as free() and realloc() catch some erroneous pointers |
443 // passed to them, and invoke the below when they do. (An erroneous pointer | 283 // passed to them, and invoke the below when they do. (An erroneous pointer |
444 // won't be caught if it's within a valid span or a stale span for which | 284 // won't be caught if it's within a valid span or a stale span for which |
445 // the pagemap cache has a non-zero sizeclass.) This is a cheap (source-editing | 285 // the pagemap cache has a non-zero sizeclass.) This is a cheap (source-editing |
446 // required) kind of exception handling for these routines. | 286 // required) kind of exception handling for these routines. |
447 namespace { | 287 namespace { |
448 void InvalidFree(void* ptr) { | 288 void InvalidFree(void* ptr) { |
449 CRASH("Attempt to free invalid pointer: %p\n", ptr); | 289 Log(kCrash, __FILE__, __LINE__, "Attempt to free invalid pointer", ptr); |
450 } | 290 } |
451 | 291 |
452 size_t InvalidGetSizeForRealloc(void* old_ptr) { | 292 size_t InvalidGetSizeForRealloc(const void* old_ptr) { |
453 CRASH("Attempt to realloc invalid pointer: %p\n", old_ptr); | 293 Log(kCrash, __FILE__, __LINE__, |
| 294 "Attempt to realloc invalid pointer", old_ptr); |
454 return 0; | 295 return 0; |
455 } | 296 } |
456 | 297 |
457 size_t InvalidGetAllocatedSize(void* ptr) { | 298 size_t InvalidGetAllocatedSize(const void* ptr) { |
458 CRASH("Attempt to get the size of an invalid pointer: %p\n", ptr); | 299 Log(kCrash, __FILE__, __LINE__, |
| 300 "Attempt to get the size of an invalid pointer", ptr); |
459 return 0; | 301 return 0; |
460 } | 302 } |
461 } // unnamed namespace | 303 } // unnamed namespace |
462 | 304 |
463 // Extract interesting stats | 305 // Extract interesting stats |
464 struct TCMallocStats { | 306 struct TCMallocStats { |
465 uint64_t thread_bytes; // Bytes in thread caches | 307 uint64_t thread_bytes; // Bytes in thread caches |
466 uint64_t central_bytes; // Bytes in central cache | 308 uint64_t central_bytes; // Bytes in central cache |
467 uint64_t transfer_bytes; // Bytes in central transfer cache | 309 uint64_t transfer_bytes; // Bytes in central transfer cache |
468 uint64_t metadata_bytes; // Bytes alloced for metadata | 310 uint64_t metadata_bytes; // Bytes alloced for metadata |
469 PageHeap::Stats pageheap; // Stats from page heap | 311 PageHeap::Stats pageheap; // Stats from page heap |
470 }; | 312 }; |
471 | 313 |
472 // Get stats into "r". Also get per-size-class counts if class_count != NULL | 314 // Get stats into "r". Also get per-size-class counts if class_count != NULL |
473 static void ExtractStats(TCMallocStats* r, uint64_t* class_count) { | 315 static void ExtractStats(TCMallocStats* r, uint64_t* class_count, |
| 316 PageHeap::SmallSpanStats* small_spans, |
| 317 PageHeap::LargeSpanStats* large_spans) { |
474 r->central_bytes = 0; | 318 r->central_bytes = 0; |
475 r->transfer_bytes = 0; | 319 r->transfer_bytes = 0; |
476 for (int cl = 0; cl < kNumClasses; ++cl) { | 320 for (int cl = 0; cl < kNumClasses; ++cl) { |
477 const int length = Static::central_cache()[cl].length(); | 321 const int length = Static::central_cache()[cl].length(); |
478 const int tc_length = Static::central_cache()[cl].tc_length(); | 322 const int tc_length = Static::central_cache()[cl].tc_length(); |
| 323 const size_t cache_overhead = Static::central_cache()[cl].OverheadBytes(); |
479 const size_t size = static_cast<uint64_t>( | 324 const size_t size = static_cast<uint64_t>( |
480 Static::sizemap()->ByteSizeForClass(cl)); | 325 Static::sizemap()->ByteSizeForClass(cl)); |
481 r->central_bytes += (size * length); | 326 r->central_bytes += (size * length) + cache_overhead; |
482 r->transfer_bytes += (size * tc_length); | 327 r->transfer_bytes += (size * tc_length); |
483 if (class_count) class_count[cl] = length + tc_length; | 328 if (class_count) class_count[cl] = length + tc_length; |
484 } | 329 } |
485 | 330 |
486 // Add stats from per-thread heaps | 331 // Add stats from per-thread heaps |
487 r->thread_bytes = 0; | 332 r->thread_bytes = 0; |
488 { // scope | 333 { // scope |
489 SpinLockHolder h(Static::pageheap_lock()); | 334 SpinLockHolder h(Static::pageheap_lock()); |
490 ThreadCache::GetThreadStats(&r->thread_bytes, class_count); | 335 ThreadCache::GetThreadStats(&r->thread_bytes, class_count); |
491 r->metadata_bytes = tcmalloc::metadata_system_bytes(); | 336 r->metadata_bytes = tcmalloc::metadata_system_bytes(); |
492 r->pageheap = Static::pageheap()->stats(); | 337 r->pageheap = Static::pageheap()->stats(); |
| 338 if (small_spans != NULL) { |
| 339 Static::pageheap()->GetSmallSpanStats(small_spans); |
| 340 } |
| 341 if (large_spans != NULL) { |
| 342 Static::pageheap()->GetLargeSpanStats(large_spans); |
| 343 } |
493 } | 344 } |
494 } | 345 } |
495 | 346 |
| 347 static double PagesToMiB(uint64_t pages) { |
| 348 return (pages << kPageShift) / 1048576.0; |
| 349 } |
| 350 |
496 // WRITE stats to "out" | 351 // WRITE stats to "out" |
497 static void DumpStats(TCMalloc_Printer* out, int level) { | 352 static void DumpStats(TCMalloc_Printer* out, int level) { |
498 TCMallocStats stats; | 353 TCMallocStats stats; |
499 uint64_t class_count[kNumClasses]; | 354 uint64_t class_count[kNumClasses]; |
500 ExtractStats(&stats, (level >= 2 ? class_count : NULL)); | 355 PageHeap::SmallSpanStats small; |
| 356 PageHeap::LargeSpanStats large; |
| 357 if (level >= 2) { |
| 358 ExtractStats(&stats, class_count, &small, &large); |
| 359 } else { |
| 360 ExtractStats(&stats, NULL, NULL, NULL); |
| 361 } |
501 | 362 |
502 static const double MiB = 1048576.0; | 363 static const double MiB = 1048576.0; |
503 | 364 |
504 const uint64_t virtual_memory_used = (stats.pageheap.system_bytes | 365 const uint64_t virtual_memory_used = (stats.pageheap.system_bytes |
505 + stats.metadata_bytes); | 366 + stats.metadata_bytes); |
506 const uint64_t physical_memory_used = (virtual_memory_used | 367 const uint64_t physical_memory_used = (virtual_memory_used |
507 - stats.pageheap.unmapped_bytes); | 368 - stats.pageheap.unmapped_bytes); |
508 const uint64_t bytes_in_use_by_app = (physical_memory_used | 369 const uint64_t bytes_in_use_by_app = (physical_memory_used |
509 - stats.metadata_bytes | 370 - stats.metadata_bytes |
510 - stats.pageheap.free_bytes | 371 - stats.pageheap.free_bytes |
511 - stats.central_bytes | 372 - stats.central_bytes |
512 - stats.transfer_bytes | 373 - stats.transfer_bytes |
513 - stats.thread_bytes); | 374 - stats.thread_bytes); |
514 | 375 |
515 out->printf( | |
516 "WASTE: %7.1f MiB committed but not used\n" | |
517 "WASTE: %7.1f MiB bytes committed, %7.1f MiB bytes in use\n" | |
518 "WASTE: committed/used ratio of %f\n", | |
519 (stats.pageheap.committed_bytes - bytes_in_use_by_app) / MiB, | |
520 stats.pageheap.committed_bytes / MiB, | |
521 bytes_in_use_by_app / MiB, | |
522 stats.pageheap.committed_bytes / static_cast<double>(bytes_in_use_by_app) | |
523 ); | |
524 #ifdef TCMALLOC_SMALL_BUT_SLOW | 376 #ifdef TCMALLOC_SMALL_BUT_SLOW |
525 out->printf( | 377 out->printf( |
526 "NOTE: SMALL MEMORY MODEL IS IN USE, PERFORMANCE MAY SUFFER.\n"); | 378 "NOTE: SMALL MEMORY MODEL IS IN USE, PERFORMANCE MAY SUFFER.\n"); |
527 #endif | 379 #endif |
528 out->printf( | 380 out->printf( |
529 "------------------------------------------------\n" | 381 "------------------------------------------------\n" |
530 "MALLOC: %12" PRIu64 " (%7.1f MiB) Bytes in use by application\n" | 382 "MALLOC: %12" PRIu64 " (%7.1f MiB) Bytes in use by application\n" |
531 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes committed\n" | |
532 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in page heap freelist\n" | 383 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in page heap freelist\n" |
533 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in central cache freelist\n" | 384 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in central cache freelist\n" |
534 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in transfer cache freelist\n" | 385 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in transfer cache freelist\n" |
535 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in thread cache freelists\n" | 386 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in thread cache freelists\n" |
536 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in malloc metadata\n" | 387 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in malloc metadata\n" |
537 "MALLOC: ------------\n" | 388 "MALLOC: ------------\n" |
538 "MALLOC: = %12" PRIu64 " (%7.1f MiB) Actual memory used (physical + swap)\
n" | 389 "MALLOC: = %12" PRIu64 " (%7.1f MiB) Actual memory used (physical + swap)\
n" |
539 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes released to OS (aka unmapped)\n
" | 390 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes released to OS (aka unmapped)\n
" |
540 "MALLOC: ------------\n" | 391 "MALLOC: ------------\n" |
541 "MALLOC: = %12" PRIu64 " (%7.1f MiB) Virtual address space used\n" | 392 "MALLOC: = %12" PRIu64 " (%7.1f MiB) Virtual address space used\n" |
542 "MALLOC:\n" | 393 "MALLOC:\n" |
543 "MALLOC: %12" PRIu64 " Spans in use\n" | 394 "MALLOC: %12" PRIu64 " Spans in use\n" |
544 "MALLOC: %12" PRIu64 " Thread heaps in use\n" | 395 "MALLOC: %12" PRIu64 " Thread heaps in use\n" |
545 "MALLOC: %12" PRIu64 " Tcmalloc page size\n" | 396 "MALLOC: %12" PRIu64 " Tcmalloc page size\n" |
546 "------------------------------------------------\n" | 397 "------------------------------------------------\n" |
547 "Call ReleaseFreeMemory() to release freelist memory to the OS" | 398 "Call ReleaseFreeMemory() to release freelist memory to the OS" |
548 " (via madvise()).\n" | 399 " (via madvise()).\n" |
549 "Bytes released to the OS take up virtual address space" | 400 "Bytes released to the OS take up virtual address space" |
550 " but no physical memory.\n", | 401 " but no physical memory.\n", |
551 bytes_in_use_by_app, bytes_in_use_by_app / MiB, | 402 bytes_in_use_by_app, bytes_in_use_by_app / MiB, |
552 stats.pageheap.committed_bytes, stats.pageheap.committed_bytes / MiB, | |
553 stats.pageheap.free_bytes, stats.pageheap.free_bytes / MiB, | 403 stats.pageheap.free_bytes, stats.pageheap.free_bytes / MiB, |
554 stats.central_bytes, stats.central_bytes / MiB, | 404 stats.central_bytes, stats.central_bytes / MiB, |
555 stats.transfer_bytes, stats.transfer_bytes / MiB, | 405 stats.transfer_bytes, stats.transfer_bytes / MiB, |
556 stats.thread_bytes, stats.thread_bytes / MiB, | 406 stats.thread_bytes, stats.thread_bytes / MiB, |
557 stats.metadata_bytes, stats.metadata_bytes / MiB, | 407 stats.metadata_bytes, stats.metadata_bytes / MiB, |
558 physical_memory_used, physical_memory_used / MiB, | 408 physical_memory_used, physical_memory_used / MiB, |
559 stats.pageheap.unmapped_bytes, stats.pageheap.unmapped_bytes / MiB, | 409 stats.pageheap.unmapped_bytes, stats.pageheap.unmapped_bytes / MiB, |
560 virtual_memory_used, virtual_memory_used / MiB, | 410 virtual_memory_used, virtual_memory_used / MiB, |
561 uint64_t(Static::span_allocator()->inuse()), | 411 uint64_t(Static::span_allocator()->inuse()), |
562 uint64_t(ThreadCache::HeapsInUse()), | 412 uint64_t(ThreadCache::HeapsInUse()), |
(...skipping 11 matching lines...) Expand all Loading... |
574 cumulative += class_bytes; | 424 cumulative += class_bytes; |
575 out->printf("class %3d [ %8" PRIuS " bytes ] : " | 425 out->printf("class %3d [ %8" PRIuS " bytes ] : " |
576 "%8" PRIu64 " objs; %5.1f MiB; %5.1f cum MiB\n", | 426 "%8" PRIu64 " objs; %5.1f MiB; %5.1f cum MiB\n", |
577 cl, Static::sizemap()->ByteSizeForClass(cl), | 427 cl, Static::sizemap()->ByteSizeForClass(cl), |
578 class_count[cl], | 428 class_count[cl], |
579 class_bytes / MiB, | 429 class_bytes / MiB, |
580 cumulative / MiB); | 430 cumulative / MiB); |
581 } | 431 } |
582 } | 432 } |
583 | 433 |
584 SpinLockHolder h(Static::pageheap_lock()); | 434 // append page heap info |
585 Static::pageheap()->Dump(out); | 435 int nonempty_sizes = 0; |
| 436 for (int s = 0; s < kMaxPages; s++) { |
| 437 if (small.normal_length[s] + small.returned_length[s] > 0) { |
| 438 nonempty_sizes++; |
| 439 } |
| 440 } |
| 441 out->printf("------------------------------------------------\n"); |
| 442 out->printf("PageHeap: %d sizes; %6.1f MiB free; %6.1f MiB unmapped\n", |
| 443 nonempty_sizes, stats.pageheap.free_bytes / MiB, |
| 444 stats.pageheap.unmapped_bytes / MiB); |
| 445 out->printf("------------------------------------------------\n"); |
| 446 uint64_t total_normal = 0; |
| 447 uint64_t total_returned = 0; |
| 448 for (int s = 0; s < kMaxPages; s++) { |
| 449 const int n_length = small.normal_length[s]; |
| 450 const int r_length = small.returned_length[s]; |
| 451 if (n_length + r_length > 0) { |
| 452 uint64_t n_pages = s * n_length; |
| 453 uint64_t r_pages = s * r_length; |
| 454 total_normal += n_pages; |
| 455 total_returned += r_pages; |
| 456 out->printf("%6u pages * %6u spans ~ %6.1f MiB; %6.1f MiB cum" |
| 457 "; unmapped: %6.1f MiB; %6.1f MiB cum\n", |
| 458 s, |
| 459 (n_length + r_length), |
| 460 PagesToMiB(n_pages + r_pages), |
| 461 PagesToMiB(total_normal + total_returned), |
| 462 PagesToMiB(r_pages), |
| 463 PagesToMiB(total_returned)); |
| 464 } |
| 465 } |
| 466 |
| 467 total_normal += large.normal_pages; |
| 468 total_returned += large.returned_pages; |
| 469 out->printf(">255 large * %6u spans ~ %6.1f MiB; %6.1f MiB cum" |
| 470 "; unmapped: %6.1f MiB; %6.1f MiB cum\n", |
| 471 static_cast<unsigned int>(large.spans), |
| 472 PagesToMiB(large.normal_pages + large.returned_pages), |
| 473 PagesToMiB(total_normal + total_returned), |
| 474 PagesToMiB(large.returned_pages), |
| 475 PagesToMiB(total_returned)); |
586 } | 476 } |
587 } | 477 } |
588 | 478 |
589 static void PrintStats(int level) { | 479 static void PrintStats(int level) { |
590 const int kBufferSize = 16 << 10; | 480 const int kBufferSize = 16 << 10; |
591 char* buffer = new char[kBufferSize]; | 481 char* buffer = new char[kBufferSize]; |
592 TCMalloc_Printer printer(buffer, kBufferSize); | 482 TCMalloc_Printer printer(buffer, kBufferSize); |
593 DumpStats(&printer, level); | 483 DumpStats(&printer, level); |
594 write(STDERR_FILENO, buffer, strlen(buffer)); | 484 write(STDERR_FILENO, buffer, strlen(buffer)); |
595 delete[] buffer; | 485 delete[] buffer; |
596 } | 486 } |
597 | 487 |
598 static void** DumpHeapGrowthStackTraces() { | 488 static void** DumpHeapGrowthStackTraces() { |
599 // Count how much space we need | 489 // Count how much space we need |
600 int needed_slots = 0; | 490 int needed_slots = 0; |
601 { | 491 { |
602 SpinLockHolder h(Static::pageheap_lock()); | 492 SpinLockHolder h(Static::pageheap_lock()); |
603 for (StackTrace* t = Static::growth_stacks(); | 493 for (StackTrace* t = Static::growth_stacks(); |
604 t != NULL; | 494 t != NULL; |
605 t = reinterpret_cast<StackTrace*>( | 495 t = reinterpret_cast<StackTrace*>( |
606 t->stack[tcmalloc::kMaxStackDepth-1])) { | 496 t->stack[tcmalloc::kMaxStackDepth-1])) { |
607 needed_slots += 3 + t->depth; | 497 needed_slots += 3 + t->depth; |
608 } | 498 } |
609 needed_slots += 100; // Slop in case list grows | 499 needed_slots += 100; // Slop in case list grows |
610 needed_slots += needed_slots/8; // An extra 12.5% slop | 500 needed_slots += needed_slots/8; // An extra 12.5% slop |
611 } | 501 } |
612 | 502 |
613 void** result = new void*[needed_slots]; | 503 void** result = new void*[needed_slots]; |
614 if (result == NULL) { | 504 if (result == NULL) { |
615 MESSAGE("tcmalloc: allocation failed for stack trace slots", | 505 Log(kLog, __FILE__, __LINE__, |
616 needed_slots * sizeof(*result)); | 506 "tcmalloc: allocation failed for stack trace slots", |
| 507 needed_slots * sizeof(*result)); |
617 return NULL; | 508 return NULL; |
618 } | 509 } |
619 | 510 |
620 SpinLockHolder h(Static::pageheap_lock()); | 511 SpinLockHolder h(Static::pageheap_lock()); |
621 int used_slots = 0; | 512 int used_slots = 0; |
622 for (StackTrace* t = Static::growth_stacks(); | 513 for (StackTrace* t = Static::growth_stacks(); |
623 t != NULL; | 514 t != NULL; |
624 t = reinterpret_cast<StackTrace*>( | 515 t = reinterpret_cast<StackTrace*>( |
625 t->stack[tcmalloc::kMaxStackDepth-1])) { | 516 t->stack[tcmalloc::kMaxStackDepth-1])) { |
626 ASSERT(used_slots < needed_slots); // Need to leave room for terminator | 517 ASSERT(used_slots < needed_slots); // Need to leave room for terminator |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
732 | 623 |
733 virtual void Ranges(void* arg, RangeFunction func) { | 624 virtual void Ranges(void* arg, RangeFunction func) { |
734 IterateOverRanges(arg, func); | 625 IterateOverRanges(arg, func); |
735 } | 626 } |
736 | 627 |
737 virtual bool GetNumericProperty(const char* name, size_t* value) { | 628 virtual bool GetNumericProperty(const char* name, size_t* value) { |
738 ASSERT(name != NULL); | 629 ASSERT(name != NULL); |
739 | 630 |
740 if (strcmp(name, "generic.current_allocated_bytes") == 0) { | 631 if (strcmp(name, "generic.current_allocated_bytes") == 0) { |
741 TCMallocStats stats; | 632 TCMallocStats stats; |
742 ExtractStats(&stats, NULL); | 633 ExtractStats(&stats, NULL, NULL, NULL); |
743 *value = stats.pageheap.system_bytes | 634 *value = stats.pageheap.system_bytes |
744 - stats.thread_bytes | 635 - stats.thread_bytes |
745 - stats.central_bytes | 636 - stats.central_bytes |
746 - stats.transfer_bytes | 637 - stats.transfer_bytes |
747 - stats.pageheap.free_bytes | 638 - stats.pageheap.free_bytes |
748 - stats.pageheap.unmapped_bytes; | 639 - stats.pageheap.unmapped_bytes; |
749 return true; | 640 return true; |
750 } | 641 } |
751 | 642 |
752 if (strcmp(name, "generic.heap_size") == 0) { | 643 if (strcmp(name, "generic.heap_size") == 0) { |
753 TCMallocStats stats; | 644 TCMallocStats stats; |
754 ExtractStats(&stats, NULL); | 645 ExtractStats(&stats, NULL, NULL, NULL); |
755 *value = stats.pageheap.system_bytes; | 646 *value = stats.pageheap.system_bytes; |
756 return true; | 647 return true; |
757 } | 648 } |
758 | 649 |
759 if (strcmp(name, "tcmalloc.slack_bytes") == 0) { | 650 if (strcmp(name, "tcmalloc.slack_bytes") == 0) { |
760 // Kept for backwards compatibility. Now defined externally as: | 651 // Kept for backwards compatibility. Now defined externally as: |
761 // pageheap_free_bytes + pageheap_unmapped_bytes. | 652 // pageheap_free_bytes + pageheap_unmapped_bytes. |
762 SpinLockHolder l(Static::pageheap_lock()); | 653 SpinLockHolder l(Static::pageheap_lock()); |
763 PageHeap::Stats stats = Static::pageheap()->stats(); | 654 PageHeap::Stats stats = Static::pageheap()->stats(); |
764 *value = stats.free_bytes + stats.unmapped_bytes; | 655 *value = stats.free_bytes + stats.unmapped_bytes; |
(...skipping 13 matching lines...) Expand all Loading... |
778 } | 669 } |
779 | 670 |
780 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) { | 671 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) { |
781 SpinLockHolder l(Static::pageheap_lock()); | 672 SpinLockHolder l(Static::pageheap_lock()); |
782 *value = ThreadCache::overall_thread_cache_size(); | 673 *value = ThreadCache::overall_thread_cache_size(); |
783 return true; | 674 return true; |
784 } | 675 } |
785 | 676 |
786 if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) { | 677 if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) { |
787 TCMallocStats stats; | 678 TCMallocStats stats; |
788 ExtractStats(&stats, NULL); | 679 ExtractStats(&stats, NULL, NULL, NULL); |
789 *value = stats.thread_bytes; | 680 *value = stats.thread_bytes; |
790 return true; | 681 return true; |
791 } | 682 } |
792 | 683 |
793 return false; | 684 return false; |
794 } | 685 } |
795 | 686 |
796 virtual bool SetNumericProperty(const char* name, size_t value) { | 687 virtual bool SetNumericProperty(const char* name, size_t value) { |
797 ASSERT(name != NULL); | 688 ASSERT(name != NULL); |
798 | 689 |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
859 const size_t alloc_size = Static::sizemap()->ByteSizeForClass(cl); | 750 const size_t alloc_size = Static::sizemap()->ByteSizeForClass(cl); |
860 return alloc_size; | 751 return alloc_size; |
861 } else { | 752 } else { |
862 return tcmalloc::pages(size) << kPageShift; | 753 return tcmalloc::pages(size) << kPageShift; |
863 } | 754 } |
864 } | 755 } |
865 | 756 |
866 // This just calls GetSizeWithCallback, but because that's in an | 757 // This just calls GetSizeWithCallback, but because that's in an |
867 // unnamed namespace, we need to move the definition below it in the | 758 // unnamed namespace, we need to move the definition below it in the |
868 // file. | 759 // file. |
869 virtual size_t GetAllocatedSize(void* ptr); | 760 virtual size_t GetAllocatedSize(const void* ptr); |
| 761 |
| 762 // This duplicates some of the logic in GetSizeWithCallback, but is |
| 763 // faster. This is important on OS X, where this function is called |
| 764 // on every allocation operation. |
| 765 virtual Ownership GetOwnership(const void* ptr) { |
| 766 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
| 767 // The rest of tcmalloc assumes that all allocated pointers use at |
| 768 // most kAddressBits bits. If ptr doesn't, then it definitely |
| 769 // wasn't alloacted by tcmalloc. |
| 770 if ((p >> (kAddressBits - kPageShift)) > 0) { |
| 771 return kNotOwned; |
| 772 } |
| 773 size_t cl = Static::pageheap()->GetSizeClassIfCached(p); |
| 774 if (cl != 0) { |
| 775 return kOwned; |
| 776 } |
| 777 const Span *span = Static::pageheap()->GetDescriptor(p); |
| 778 return span ? kOwned : kNotOwned; |
| 779 } |
870 | 780 |
871 virtual void GetFreeListSizes(vector<MallocExtension::FreeListInfo>* v) { | 781 virtual void GetFreeListSizes(vector<MallocExtension::FreeListInfo>* v) { |
872 static const char* kCentralCacheType = "tcmalloc.central"; | 782 static const char* kCentralCacheType = "tcmalloc.central"; |
873 static const char* kTransferCacheType = "tcmalloc.transfer"; | 783 static const char* kTransferCacheType = "tcmalloc.transfer"; |
874 static const char* kThreadCacheType = "tcmalloc.thread"; | 784 static const char* kThreadCacheType = "tcmalloc.thread"; |
875 static const char* kPageHeapType = "tcmalloc.page"; | 785 static const char* kPageHeapType = "tcmalloc.page"; |
876 static const char* kPageHeapUnmappedType = "tcmalloc.page_unmapped"; | 786 static const char* kPageHeapUnmappedType = "tcmalloc.page_unmapped"; |
877 static const char* kLargeSpanType = "tcmalloc.large"; | 787 static const char* kLargeSpanType = "tcmalloc.large"; |
878 static const char* kLargeUnmappedSpanType = "tcmalloc.large_unmapped"; | 788 static const char* kLargeUnmappedSpanType = "tcmalloc.large_unmapped"; |
879 | 789 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
914 MallocExtension::FreeListInfo i; | 824 MallocExtension::FreeListInfo i; |
915 i.min_object_size = prev_class_size + 1; | 825 i.min_object_size = prev_class_size + 1; |
916 i.max_object_size = Static::sizemap()->ByteSizeForClass(cl); | 826 i.max_object_size = Static::sizemap()->ByteSizeForClass(cl); |
917 i.total_bytes_free = | 827 i.total_bytes_free = |
918 class_count[cl] * Static::sizemap()->ByteSizeForClass(cl); | 828 class_count[cl] * Static::sizemap()->ByteSizeForClass(cl); |
919 i.type = kThreadCacheType; | 829 i.type = kThreadCacheType; |
920 v->push_back(i); | 830 v->push_back(i); |
921 } | 831 } |
922 | 832 |
923 // append page heap info | 833 // append page heap info |
924 int64 page_count_normal[kMaxPages]; | 834 PageHeap::SmallSpanStats small; |
925 int64 page_count_returned[kMaxPages]; | 835 PageHeap::LargeSpanStats large; |
926 int64 span_count_normal; | |
927 int64 span_count_returned; | |
928 { | 836 { |
929 SpinLockHolder h(Static::pageheap_lock()); | 837 SpinLockHolder h(Static::pageheap_lock()); |
930 Static::pageheap()->GetClassSizes(page_count_normal, | 838 Static::pageheap()->GetSmallSpanStats(&small); |
931 page_count_returned, | 839 Static::pageheap()->GetLargeSpanStats(&large); |
932 &span_count_normal, | |
933 &span_count_returned); | |
934 } | 840 } |
935 | 841 |
936 // spans: mapped | 842 // large spans: mapped |
937 MallocExtension::FreeListInfo span_info; | 843 MallocExtension::FreeListInfo span_info; |
938 span_info.type = kLargeSpanType; | 844 span_info.type = kLargeSpanType; |
939 span_info.max_object_size = (numeric_limits<size_t>::max)(); | 845 span_info.max_object_size = (numeric_limits<size_t>::max)(); |
940 span_info.min_object_size = kMaxPages << kPageShift; | 846 span_info.min_object_size = kMaxPages << kPageShift; |
941 span_info.total_bytes_free = span_count_normal << kPageShift; | 847 span_info.total_bytes_free = large.normal_pages << kPageShift; |
942 v->push_back(span_info); | 848 v->push_back(span_info); |
943 | 849 |
944 // spans: unmapped | 850 // large spans: unmapped |
945 span_info.type = kLargeUnmappedSpanType; | 851 span_info.type = kLargeUnmappedSpanType; |
946 span_info.total_bytes_free = span_count_returned << kPageShift; | 852 span_info.total_bytes_free = large.returned_pages << kPageShift; |
947 v->push_back(span_info); | 853 v->push_back(span_info); |
948 | 854 |
| 855 // small spans |
949 for (int s = 1; s < kMaxPages; s++) { | 856 for (int s = 1; s < kMaxPages; s++) { |
950 MallocExtension::FreeListInfo i; | 857 MallocExtension::FreeListInfo i; |
951 i.max_object_size = (s << kPageShift); | 858 i.max_object_size = (s << kPageShift); |
952 i.min_object_size = ((s - 1) << kPageShift); | 859 i.min_object_size = ((s - 1) << kPageShift); |
953 | 860 |
954 i.type = kPageHeapType; | 861 i.type = kPageHeapType; |
955 i.total_bytes_free = (s << kPageShift) * page_count_normal[s]; | 862 i.total_bytes_free = (s << kPageShift) * small.normal_length[s]; |
956 v->push_back(i); | 863 v->push_back(i); |
957 | 864 |
958 i.type = kPageHeapUnmappedType; | 865 i.type = kPageHeapUnmappedType; |
959 i.total_bytes_free = (s << kPageShift) * page_count_returned[s]; | 866 i.total_bytes_free = (s << kPageShift) * small.returned_length[s]; |
960 v->push_back(i); | 867 v->push_back(i); |
961 } | 868 } |
962 } | 869 } |
963 }; | 870 }; |
964 | 871 |
965 // The constructor allocates an object to ensure that initialization | 872 // The constructor allocates an object to ensure that initialization |
966 // runs before main(), and therefore we do not have a chance to become | 873 // runs before main(), and therefore we do not have a chance to become |
967 // multi-threaded before initialization. We also create the TSD key | 874 // multi-threaded before initialization. We also create the TSD key |
968 // here. Presumably by the time this constructor runs, glibc is in | 875 // here. Presumably by the time this constructor runs, glibc is in |
969 // good enough shape to handle pthread_key_create(). | 876 // good enough shape to handle pthread_key_create(). |
970 // | 877 // |
971 // The constructor also takes the opportunity to tell STL to use | 878 // The constructor also takes the opportunity to tell STL to use |
972 // tcmalloc. We want to do this early, before construct time, so | 879 // tcmalloc. We want to do this early, before construct time, so |
973 // all user STL allocations go through tcmalloc (which works really | 880 // all user STL allocations go through tcmalloc (which works really |
974 // well for STL). | 881 // well for STL). |
975 // | 882 // |
976 // The destructor prints stats when the program exits. | 883 // The destructor prints stats when the program exits. |
977 static int tcmallocguard_refcount = 0; // no lock needed: runs before main() | 884 static int tcmallocguard_refcount = 0; // no lock needed: runs before main() |
978 TCMallocGuard::TCMallocGuard() { | 885 TCMallocGuard::TCMallocGuard() { |
979 if (tcmallocguard_refcount++ == 0) { | 886 if (tcmallocguard_refcount++ == 0) { |
980 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS | 887 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS |
981 // Check whether the kernel also supports TLS (needs to happen at runtime) | 888 // Check whether the kernel also supports TLS (needs to happen at runtime) |
982 tcmalloc::CheckIfKernelSupportsTLS(); | 889 tcmalloc::CheckIfKernelSupportsTLS(); |
983 #endif | 890 #endif |
984 #ifdef WIN32_DO_PATCHING | 891 ReplaceSystemAlloc(); // defined in libc_override_*.h |
985 // patch the windows VirtualAlloc, etc. | |
986 PatchWindowsFunctions(); // defined in windows/patch_functions.cc | |
987 #endif | |
988 tc_free(tc_malloc(1)); | 892 tc_free(tc_malloc(1)); |
989 ThreadCache::InitTSD(); | 893 ThreadCache::InitTSD(); |
990 tc_free(tc_malloc(1)); | 894 tc_free(tc_malloc(1)); |
991 // Either we, or debugallocation.cc, or valgrind will control memory | 895 // Either we, or debugallocation.cc, or valgrind will control memory |
992 // management. We register our extension if we're the winner. | 896 // management. We register our extension if we're the winner. |
993 #ifdef TCMALLOC_USING_DEBUGALLOCATION | 897 #ifdef TCMALLOC_USING_DEBUGALLOCATION |
994 // Let debugallocation register its extension. | 898 // Let debugallocation register its extension. |
995 #else | 899 #else |
996 if (RunningOnValgrind()) { | 900 if (RunningOnValgrind()) { |
997 // Let Valgrind uses its own malloc (so don't register our extension). | 901 // Let Valgrind uses its own malloc (so don't register our extension). |
(...skipping 24 matching lines...) Expand all Loading... |
1022 | 926 |
1023 static inline bool CheckCachedSizeClass(void *ptr) { | 927 static inline bool CheckCachedSizeClass(void *ptr) { |
1024 PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | 928 PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
1025 size_t cached_value = Static::pageheap()->GetSizeClassIfCached(p); | 929 size_t cached_value = Static::pageheap()->GetSizeClassIfCached(p); |
1026 return cached_value == 0 || | 930 return cached_value == 0 || |
1027 cached_value == Static::pageheap()->GetDescriptor(p)->sizeclass; | 931 cached_value == Static::pageheap()->GetDescriptor(p)->sizeclass; |
1028 } | 932 } |
1029 | 933 |
1030 static inline void* CheckedMallocResult(void *result) { | 934 static inline void* CheckedMallocResult(void *result) { |
1031 ASSERT(result == NULL || CheckCachedSizeClass(result)); | 935 ASSERT(result == NULL || CheckCachedSizeClass(result)); |
1032 MarkAllocatedRegion(result); | |
1033 return result; | 936 return result; |
1034 } | 937 } |
1035 | 938 |
1036 static inline void* SpanToMallocResult(Span *span) { | 939 static inline void* SpanToMallocResult(Span *span) { |
1037 Static::pageheap()->CacheSizeClass(span->start, 0); | 940 Static::pageheap()->CacheSizeClass(span->start, 0); |
1038 return | 941 return |
1039 CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift)); | 942 CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift)); |
1040 } | 943 } |
1041 | 944 |
1042 static void* DoSampledAllocation(size_t size) { | 945 static void* DoSampledAllocation(size_t size) { |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1074 (kPageSize > FLAGS_tcmalloc_large_alloc_report_threshold | 977 (kPageSize > FLAGS_tcmalloc_large_alloc_report_threshold |
1075 ? kPageSize : FLAGS_tcmalloc_large_alloc_report_threshold); | 978 ? kPageSize : FLAGS_tcmalloc_large_alloc_report_threshold); |
1076 | 979 |
1077 static void ReportLargeAlloc(Length num_pages, void* result) { | 980 static void ReportLargeAlloc(Length num_pages, void* result) { |
1078 StackTrace stack; | 981 StackTrace stack; |
1079 stack.depth = GetStackTrace(stack.stack, tcmalloc::kMaxStackDepth, 1); | 982 stack.depth = GetStackTrace(stack.stack, tcmalloc::kMaxStackDepth, 1); |
1080 | 983 |
1081 static const int N = 1000; | 984 static const int N = 1000; |
1082 char buffer[N]; | 985 char buffer[N]; |
1083 TCMalloc_Printer printer(buffer, N); | 986 TCMalloc_Printer printer(buffer, N); |
1084 printer.printf("tcmalloc: large alloc %llu bytes == %p @ ", | 987 printer.printf("tcmalloc: large alloc %"PRIu64" bytes == %p @ ", |
1085 static_cast<unsigned long long>(num_pages) << kPageShift, | 988 static_cast<uint64>(num_pages) << kPageShift, |
1086 result); | 989 result); |
1087 for (int i = 0; i < stack.depth; i++) { | 990 for (int i = 0; i < stack.depth; i++) { |
1088 printer.printf(" %p", stack.stack[i]); | 991 printer.printf(" %p", stack.stack[i]); |
1089 } | 992 } |
1090 printer.printf("\n"); | 993 printer.printf("\n"); |
1091 write(STDERR_FILENO, buffer, strlen(buffer)); | 994 write(STDERR_FILENO, buffer, strlen(buffer)); |
1092 } | 995 } |
1093 | 996 |
1094 inline void* cpp_alloc(size_t size, bool nothrow); | 997 inline void* cpp_alloc(size_t size, bool nothrow); |
1095 inline void* do_malloc(size_t size); | 998 inline void* do_malloc(size_t size); |
1096 | 999 |
1097 // TODO(willchan): Investigate whether or not inlining this much is harmful to | 1000 // TODO(willchan): Investigate whether or not lining this much is harmful to |
1098 // performance. | 1001 // performance. |
1099 // This is equivalent to do_malloc() except when tc_new_mode is set to true. | 1002 // This is equivalent to do_malloc() except when tc_new_mode is set to true. |
1100 // Otherwise, it will run the std::new_handler if set. | 1003 // Otherwise, it will run the std::new_handler if set. |
1101 inline void* do_malloc_or_cpp_alloc(size_t size) { | 1004 inline void* do_malloc_or_cpp_alloc(size_t size) { |
1102 return tc_new_mode ? cpp_alloc(size, true) : do_malloc(size); | 1005 return tc_new_mode ? cpp_alloc(size, true) : do_malloc(size); |
1103 } | 1006 } |
1104 | 1007 |
1105 void* cpp_memalign(size_t align, size_t size); | 1008 void* cpp_memalign(size_t align, size_t size); |
1106 void* do_memalign(size_t align, size_t size); | 1009 void* do_memalign(size_t align, size_t size); |
1107 | 1010 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1142 report_large = should_report_large(num_pages); | 1045 report_large = should_report_large(num_pages); |
1143 } | 1046 } |
1144 | 1047 |
1145 if (report_large) { | 1048 if (report_large) { |
1146 ReportLargeAlloc(num_pages, result); | 1049 ReportLargeAlloc(num_pages, result); |
1147 } | 1050 } |
1148 return result; | 1051 return result; |
1149 } | 1052 } |
1150 | 1053 |
1151 inline void* do_malloc(size_t size) { | 1054 inline void* do_malloc(size_t size) { |
1152 AddRoomForMark(&size); | |
1153 | |
1154 void* ret = NULL; | 1055 void* ret = NULL; |
1155 | 1056 |
1156 // The following call forces module initialization | 1057 // The following call forces module initialization |
1157 ThreadCache* heap = ThreadCache::GetCache(); | 1058 ThreadCache* heap = ThreadCache::GetCache(); |
1158 if (size <= kMaxSize) { | 1059 if (size <= kMaxSize) { |
1159 size_t cl = Static::sizemap()->SizeClass(size); | 1060 size_t cl = Static::sizemap()->SizeClass(size); |
1160 size = Static::sizemap()->class_to_size(cl); | 1061 size = Static::sizemap()->class_to_size(cl); |
1161 | 1062 |
1162 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { | 1063 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { |
1163 ret = DoSampledAllocation(size); | 1064 ret = DoSampledAllocation(size); |
1164 MarkAllocatedRegion(ret); | |
1165 } else { | 1065 } else { |
1166 // The common case, and also the simplest. This just pops the | 1066 // The common case, and also the simplest. This just pops the |
1167 // size-appropriate freelist, after replenishing it if it's empty. | 1067 // size-appropriate freelist, after replenishing it if it's empty. |
1168 ret = CheckedMallocResult(heap->Allocate(size, cl)); | 1068 ret = CheckedMallocResult(heap->Allocate(size, cl)); |
1169 } | 1069 } |
1170 } else { | 1070 } else { |
1171 ret = do_malloc_pages(heap, size); | 1071 ret = do_malloc_pages(heap, size); |
1172 MarkAllocatedRegion(ret); | |
1173 } | 1072 } |
1174 if (ret == NULL) errno = ENOMEM; | 1073 if (ret == NULL) errno = ENOMEM; |
1175 return ret; | 1074 return ret; |
1176 } | 1075 } |
1177 | 1076 |
1178 inline void* do_calloc(size_t n, size_t elem_size) { | 1077 inline void* do_calloc(size_t n, size_t elem_size) { |
1179 // Overflow check | 1078 // Overflow check |
1180 const size_t size = n * elem_size; | 1079 const size_t size = n * elem_size; |
1181 if (elem_size != 0 && size / elem_size != n) return NULL; | 1080 if (elem_size != 0 && size / elem_size != n) return NULL; |
1182 | 1081 |
1183 void* result = do_malloc_or_cpp_alloc(size); | 1082 void* result = do_malloc_or_cpp_alloc(size); |
1184 if (result != NULL) { | 1083 if (result != NULL) { |
1185 memset(result, 0, size); | 1084 memset(result, 0, size); |
1186 } | 1085 } |
1187 return result; | 1086 return result; |
1188 } | 1087 } |
1189 | 1088 |
1190 static inline ThreadCache* GetCacheIfPresent() { | 1089 static inline ThreadCache* GetCacheIfPresent() { |
1191 void* const p = ThreadCache::GetCacheIfPresent(); | 1090 void* const p = ThreadCache::GetCacheIfPresent(); |
1192 return reinterpret_cast<ThreadCache*>(p); | 1091 return reinterpret_cast<ThreadCache*>(p); |
1193 } | 1092 } |
1194 | 1093 |
1195 // This lets you call back to a given function pointer if ptr is invalid. | 1094 // This lets you call back to a given function pointer if ptr is invalid. |
1196 // It is used primarily by windows code which wants a specialized callback. | 1095 // It is used primarily by windows code which wants a specialized callback. |
1197 inline void do_free_with_callback(void* ptr, void (*invalid_free_fn)(void*)) { | 1096 inline void do_free_with_callback(void* ptr, void (*invalid_free_fn)(void*)) { |
1198 if (ptr == NULL) return; | 1097 if (ptr == NULL) return; |
1199 ASSERT(Static::pageheap() != NULL); // Should not call free() before malloc() | 1098 if (Static::pageheap() == NULL) { |
| 1099 // We called free() before malloc(). This can occur if the |
| 1100 // (system) malloc() is called before tcmalloc is loaded, and then |
| 1101 // free() is called after tcmalloc is loaded (and tc_free has |
| 1102 // replaced free), but before the global constructor has run that |
| 1103 // sets up the tcmalloc data structures. |
| 1104 (*invalid_free_fn)(ptr); // Decide how to handle the bad free request |
| 1105 return; |
| 1106 } |
1200 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | 1107 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
1201 Span* span = NULL; | 1108 Span* span = NULL; |
1202 size_t cl = Static::pageheap()->GetSizeClassIfCached(p); | 1109 size_t cl = Static::pageheap()->GetSizeClassIfCached(p); |
1203 | 1110 |
1204 if (cl == 0) { | 1111 if (cl == 0) { |
1205 span = Static::pageheap()->GetDescriptor(p); | 1112 span = Static::pageheap()->GetDescriptor(p); |
1206 if (!span) { | 1113 if (!span) { |
1207 // span can be NULL because the pointer passed in is invalid | 1114 // span can be NULL because the pointer passed in is invalid |
1208 // (not something returned by malloc or friends), or because the | 1115 // (not something returned by malloc or friends), or because the |
1209 // pointer was allocated with some other allocator besides | 1116 // pointer was allocated with some other allocator besides |
1210 // tcmalloc. The latter can happen if tcmalloc is linked in via | 1117 // tcmalloc. The latter can happen if tcmalloc is linked in via |
1211 // a dynamic library, but is not listed last on the link line. | 1118 // a dynamic library, but is not listed last on the link line. |
1212 // In that case, libraries after it on the link line will | 1119 // In that case, libraries after it on the link line will |
1213 // allocate with libc malloc, but free with tcmalloc's free. | 1120 // allocate with libc malloc, but free with tcmalloc's free. |
1214 (*invalid_free_fn)(ptr); // Decide how to handle the bad free request | 1121 (*invalid_free_fn)(ptr); // Decide how to handle the bad free request |
1215 return; | 1122 return; |
1216 } | 1123 } |
1217 cl = span->sizeclass; | 1124 cl = span->sizeclass; |
1218 Static::pageheap()->CacheSizeClass(p, cl); | 1125 Static::pageheap()->CacheSizeClass(p, cl); |
1219 } | 1126 } |
1220 | |
1221 ValidateAllocatedRegion(ptr, cl); | |
1222 | |
1223 if (cl != 0) { | 1127 if (cl != 0) { |
1224 ASSERT(!Static::pageheap()->GetDescriptor(p)->sample); | 1128 ASSERT(!Static::pageheap()->GetDescriptor(p)->sample); |
1225 ThreadCache* heap = GetCacheIfPresent(); | 1129 ThreadCache* heap = GetCacheIfPresent(); |
1226 if (heap != NULL) { | 1130 if (heap != NULL) { |
1227 heap->Deallocate(ptr, cl); | 1131 heap->Deallocate(ptr, cl); |
1228 } else { | 1132 } else { |
1229 // Delete directly into central cache | 1133 // Delete directly into central cache |
1230 tcmalloc::FL_Init(ptr); | 1134 tcmalloc::SLL_SetNext(ptr, NULL); |
1231 Static::central_cache()[cl].InsertRange(ptr, ptr, 1); | 1135 Static::central_cache()[cl].InsertRange(ptr, ptr, 1); |
1232 } | 1136 } |
1233 } else { | 1137 } else { |
1234 SpinLockHolder h(Static::pageheap_lock()); | 1138 SpinLockHolder h(Static::pageheap_lock()); |
1235 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0); | 1139 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0); |
1236 ASSERT(span != NULL && span->start == p); | 1140 ASSERT(span != NULL && span->start == p); |
1237 if (span->sample) { | 1141 if (span->sample) { |
1238 StackTrace* st = reinterpret_cast<StackTrace*>(span->objects); | 1142 StackTrace* st = reinterpret_cast<StackTrace*>(span->objects); |
1239 tcmalloc::DLL_Remove(span); | 1143 tcmalloc::DLL_Remove(span); |
1240 Static::stacktrace_allocator()->Delete(st); | 1144 Static::stacktrace_allocator()->Delete(st); |
1241 span->objects = NULL; | 1145 span->objects = NULL; |
1242 } | 1146 } |
1243 Static::pageheap()->Delete(span); | 1147 Static::pageheap()->Delete(span); |
1244 } | 1148 } |
1245 } | 1149 } |
1246 | 1150 |
1247 // The default "do_free" that uses the default callback. | 1151 // The default "do_free" that uses the default callback. |
1248 inline void do_free(void* ptr) { | 1152 inline void do_free(void* ptr) { |
1249 return do_free_with_callback(ptr, &InvalidFree); | 1153 return do_free_with_callback(ptr, &InvalidFree); |
1250 } | 1154 } |
1251 | 1155 |
1252 inline size_t GetSizeWithCallback(void* ptr, | 1156 // NOTE: some logic here is duplicated in GetOwnership (above), for |
1253 size_t (*invalid_getsize_fn)(void*)) { | 1157 // speed. If you change this function, look at that one too. |
| 1158 inline size_t GetSizeWithCallback(const void* ptr, |
| 1159 size_t (*invalid_getsize_fn)(const void*)) { |
1254 if (ptr == NULL) | 1160 if (ptr == NULL) |
1255 return 0; | 1161 return 0; |
1256 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | 1162 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
1257 size_t cl = Static::pageheap()->GetSizeClassIfCached(p); | 1163 size_t cl = Static::pageheap()->GetSizeClassIfCached(p); |
1258 if (cl != 0) { | 1164 if (cl != 0) { |
1259 return Static::sizemap()->ByteSizeForClass(cl); | 1165 return Static::sizemap()->ByteSizeForClass(cl); |
1260 } else { | 1166 } else { |
1261 Span *span = Static::pageheap()->GetDescriptor(p); | 1167 const Span *span = Static::pageheap()->GetDescriptor(p); |
1262 if (span == NULL) { // means we do not own this memory | 1168 if (span == NULL) { // means we do not own this memory |
1263 return (*invalid_getsize_fn)(ptr); | 1169 return (*invalid_getsize_fn)(ptr); |
1264 } else if (span->sizeclass != 0) { | 1170 } else if (span->sizeclass != 0) { |
1265 Static::pageheap()->CacheSizeClass(p, span->sizeclass); | 1171 Static::pageheap()->CacheSizeClass(p, span->sizeclass); |
1266 return Static::sizemap()->ByteSizeForClass(span->sizeclass); | 1172 return Static::sizemap()->ByteSizeForClass(span->sizeclass); |
1267 } else { | 1173 } else { |
1268 return span->length << kPageShift; | 1174 return span->length << kPageShift; |
1269 } | 1175 } |
1270 } | 1176 } |
1271 } | 1177 } |
1272 | 1178 |
1273 // This lets you call back to a given function pointer if ptr is invalid. | 1179 // This lets you call back to a given function pointer if ptr is invalid. |
1274 // It is used primarily by windows code which wants a specialized callback. | 1180 // It is used primarily by windows code which wants a specialized callback. |
1275 inline void* do_realloc_with_callback( | 1181 inline void* do_realloc_with_callback( |
1276 void* old_ptr, size_t new_size, | 1182 void* old_ptr, size_t new_size, |
1277 void (*invalid_free_fn)(void*), | 1183 void (*invalid_free_fn)(void*), |
1278 size_t (*invalid_get_size_fn)(void*)) { | 1184 size_t (*invalid_get_size_fn)(const void*)) { |
1279 AddRoomForMark(&new_size); | |
1280 // Get the size of the old entry | 1185 // Get the size of the old entry |
1281 const size_t old_size = GetSizeWithCallback(old_ptr, invalid_get_size_fn); | 1186 const size_t old_size = GetSizeWithCallback(old_ptr, invalid_get_size_fn); |
1282 | 1187 |
1283 // Reallocate if the new size is larger than the old size, | 1188 // Reallocate if the new size is larger than the old size, |
1284 // or if the new size is significantly smaller than the old size. | 1189 // or if the new size is significantly smaller than the old size. |
1285 // We do hysteresis to avoid resizing ping-pongs: | 1190 // We do hysteresis to avoid resizing ping-pongs: |
1286 // . If we need to grow, grow to max(new_size, old_size * 1.X) | 1191 // . If we need to grow, grow to max(new_size, old_size * 1.X) |
1287 // . Don't shrink unless new_size < old_size * 0.Y | 1192 // . Don't shrink unless new_size < old_size * 0.Y |
1288 // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5. | 1193 // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5. |
1289 const int lower_bound_to_grow = old_size + old_size / 4; | 1194 const int lower_bound_to_grow = old_size + old_size / 4; |
1290 const int upper_bound_to_shrink = old_size / 2; | 1195 const int upper_bound_to_shrink = old_size / 2; |
1291 if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) { | 1196 if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) { |
1292 // Need to reallocate. | 1197 // Need to reallocate. |
1293 void* new_ptr = NULL; | 1198 void* new_ptr = NULL; |
1294 | 1199 |
1295 if (new_size > old_size && new_size < lower_bound_to_grow) { | 1200 if (new_size > old_size && new_size < lower_bound_to_grow) { |
1296 new_ptr = do_malloc_or_cpp_alloc(lower_bound_to_grow); | 1201 new_ptr = do_malloc_or_cpp_alloc(lower_bound_to_grow); |
1297 } | 1202 } |
1298 ExcludeMarkFromSize(&new_size); // do_malloc will add space if needed. | |
1299 if (new_ptr == NULL) { | 1203 if (new_ptr == NULL) { |
1300 // Either new_size is not a tiny increment, or last do_malloc failed. | 1204 // Either new_size is not a tiny increment, or last do_malloc failed. |
1301 new_ptr = do_malloc_or_cpp_alloc(new_size); | 1205 new_ptr = do_malloc_or_cpp_alloc(new_size); |
1302 } | 1206 } |
1303 if (new_ptr == NULL) { | 1207 if (new_ptr == NULL) { |
1304 return NULL; | 1208 return NULL; |
1305 } | 1209 } |
1306 MallocHook::InvokeNewHook(new_ptr, new_size); | 1210 MallocHook::InvokeNewHook(new_ptr, new_size); |
1307 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size)); | 1211 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size)); |
1308 MallocHook::InvokeDeleteHook(old_ptr); | 1212 MallocHook::InvokeDeleteHook(old_ptr); |
1309 // We could use a variant of do_free() that leverages the fact | 1213 // We could use a variant of do_free() that leverages the fact |
1310 // that we already know the sizeclass of old_ptr. The benefit | 1214 // that we already know the sizeclass of old_ptr. The benefit |
1311 // would be small, so don't bother. | 1215 // would be small, so don't bother. |
1312 do_free_with_callback(old_ptr, invalid_free_fn); | 1216 do_free_with_callback(old_ptr, invalid_free_fn); |
1313 return new_ptr; | 1217 return new_ptr; |
1314 } else { | 1218 } else { |
1315 // We still need to call hooks to report the updated size: | 1219 // We still need to call hooks to report the updated size: |
1316 MallocHook::InvokeDeleteHook(old_ptr); | 1220 MallocHook::InvokeDeleteHook(old_ptr); |
1317 ExcludeMarkFromSize(&new_size); | |
1318 MallocHook::InvokeNewHook(old_ptr, new_size); | 1221 MallocHook::InvokeNewHook(old_ptr, new_size); |
1319 return old_ptr; | 1222 return old_ptr; |
1320 } | 1223 } |
1321 } | 1224 } |
1322 | 1225 |
1323 inline void* do_realloc(void* old_ptr, size_t new_size) { | 1226 inline void* do_realloc(void* old_ptr, size_t new_size) { |
1324 return do_realloc_with_callback(old_ptr, new_size, | 1227 return do_realloc_with_callback(old_ptr, new_size, |
1325 &InvalidFree, &InvalidGetSizeForRealloc); | 1228 &InvalidFree, &InvalidGetSizeForRealloc); |
1326 } | 1229 } |
1327 | 1230 |
1328 // For use by exported routines below that want specific alignments | 1231 // For use by exported routines below that want specific alignments |
1329 // | 1232 // |
1330 // Note: this code can be slow for alignments > 16, and can | 1233 // Note: this code can be slow for alignments > 16, and can |
1331 // significantly fragment memory. The expectation is that | 1234 // significantly fragment memory. The expectation is that |
1332 // memalign/posix_memalign/valloc/pvalloc will not be invoked very | 1235 // memalign/posix_memalign/valloc/pvalloc will not be invoked very |
1333 // often. This requirement simplifies our implementation and allows | 1236 // often. This requirement simplifies our implementation and allows |
1334 // us to tune for expected allocation patterns. | 1237 // us to tune for expected allocation patterns. |
1335 void* do_memalign(size_t align, size_t size) { | 1238 void* do_memalign(size_t align, size_t size) { |
1336 ASSERT((align & (align - 1)) == 0); | 1239 ASSERT((align & (align - 1)) == 0); |
1337 ASSERT(align > 0); | 1240 ASSERT(align > 0); |
1338 // Marked in CheckMallocResult(), which is also inside SpanToMallocResult(). | |
1339 AddRoomForMark(&size); | |
1340 if (size + align < size) return NULL; // Overflow | 1241 if (size + align < size) return NULL; // Overflow |
1341 | 1242 |
1342 // Fall back to malloc if we would already align this memory access properly. | 1243 // Fall back to malloc if we would already align this memory access properly. |
1343 if (align <= AlignmentForSize(size)) { | 1244 if (align <= AlignmentForSize(size)) { |
1344 void* p = do_malloc(size); | 1245 void* p = do_malloc(size); |
1345 ASSERT((reinterpret_cast<uintptr_t>(p) % align) == 0); | 1246 ASSERT((reinterpret_cast<uintptr_t>(p) % align) == 0); |
1346 return p; | 1247 return p; |
1347 } | 1248 } |
1348 | 1249 |
1349 if (Static::pageheap() == NULL) ThreadCache::InitModule(); | 1250 if (Static::pageheap() == NULL) ThreadCache::InitModule(); |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1414 PrintStats(1); | 1315 PrintStats(1); |
1415 } | 1316 } |
1416 | 1317 |
1417 inline int do_mallopt(int cmd, int value) { | 1318 inline int do_mallopt(int cmd, int value) { |
1418 return 1; // Indicates error | 1319 return 1; // Indicates error |
1419 } | 1320 } |
1420 | 1321 |
1421 #ifdef HAVE_STRUCT_MALLINFO | 1322 #ifdef HAVE_STRUCT_MALLINFO |
1422 inline struct mallinfo do_mallinfo() { | 1323 inline struct mallinfo do_mallinfo() { |
1423 TCMallocStats stats; | 1324 TCMallocStats stats; |
1424 ExtractStats(&stats, NULL); | 1325 ExtractStats(&stats, NULL, NULL, NULL); |
1425 | 1326 |
1426 // Just some of the fields are filled in. | 1327 // Just some of the fields are filled in. |
1427 struct mallinfo info; | 1328 struct mallinfo info; |
1428 memset(&info, 0, sizeof(info)); | 1329 memset(&info, 0, sizeof(info)); |
1429 | 1330 |
1430 // Unfortunately, the struct contains "int" field, so some of the | 1331 // Unfortunately, the struct contains "int" field, so some of the |
1431 // size values will be truncated. | 1332 // size values will be truncated. |
1432 info.arena = static_cast<int>(stats.pageheap.system_bytes); | 1333 info.arena = static_cast<int>(stats.pageheap.system_bytes); |
1433 info.fsmblks = static_cast<int>(stats.thread_bytes | 1334 info.fsmblks = static_cast<int>(stats.thread_bytes |
1434 + stats.central_bytes | 1335 + stats.central_bytes |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1538 } else { // allocation success | 1439 } else { // allocation success |
1539 return p; | 1440 return p; |
1540 } | 1441 } |
1541 #endif // PREANSINEW | 1442 #endif // PREANSINEW |
1542 } | 1443 } |
1543 } | 1444 } |
1544 | 1445 |
1545 } // end unnamed namespace | 1446 } // end unnamed namespace |
1546 | 1447 |
1547 // As promised, the definition of this function, declared above. | 1448 // As promised, the definition of this function, declared above. |
1548 size_t TCMallocImplementation::GetAllocatedSize(void* ptr) { | 1449 size_t TCMallocImplementation::GetAllocatedSize(const void* ptr) { |
1549 return ExcludeSpaceForMark( | 1450 ASSERT(TCMallocImplementation::GetOwnership(ptr) |
1550 GetSizeWithCallback(ptr, &InvalidGetAllocatedSize)); | 1451 != TCMallocImplementation::kNotOwned); |
| 1452 return GetSizeWithCallback(ptr, &InvalidGetAllocatedSize); |
1551 } | 1453 } |
1552 | 1454 |
1553 void TCMallocImplementation::MarkThreadBusy() { | 1455 void TCMallocImplementation::MarkThreadBusy() { |
1554 // Allocate to force the creation of a thread cache, but avoid | 1456 // Allocate to force the creation of a thread cache, but avoid |
1555 // invoking any hooks. | 1457 // invoking any hooks. |
1556 do_free(do_malloc(0)); | 1458 do_free(do_malloc(0)); |
1557 } | 1459 } |
1558 | 1460 |
1559 //------------------------------------------------------------------- | 1461 //------------------------------------------------------------------- |
1560 // Exported routines | 1462 // Exported routines |
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1737 return do_mallopt(cmd, value); | 1639 return do_mallopt(cmd, value); |
1738 } | 1640 } |
1739 | 1641 |
1740 #ifdef HAVE_STRUCT_MALLINFO | 1642 #ifdef HAVE_STRUCT_MALLINFO |
1741 extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) __THROW { | 1643 extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) __THROW { |
1742 return do_mallinfo(); | 1644 return do_mallinfo(); |
1743 } | 1645 } |
1744 #endif | 1646 #endif |
1745 | 1647 |
1746 extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) __THROW { | 1648 extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) __THROW { |
1747 return GetSizeWithCallback(ptr, &InvalidGetAllocatedSize); | 1649 return MallocExtension::instance()->GetAllocatedSize(ptr); |
1748 } | 1650 } |
1749 | 1651 |
1750 | |
1751 // Override __libc_memalign in libc on linux boxes specially. | |
1752 // They have a bug in libc that causes them to (very rarely) allocate | |
1753 // with __libc_memalign() yet deallocate with free() and the | |
1754 // definitions above don't catch it. | |
1755 // This function is an exception to the rule of calling MallocHook method | |
1756 // from the stack frame of the allocation function; | |
1757 // heap-checker handles this special case explicitly. | |
1758 static void *MemalignOverride(size_t align, size_t size, const void *caller) | |
1759 __THROW ATTRIBUTE_SECTION(google_malloc); | |
1760 | |
1761 static void *MemalignOverride(size_t align, size_t size, const void *caller) | |
1762 __THROW { | |
1763 void* result = do_memalign_or_cpp_memalign(align, size); | |
1764 MallocHook::InvokeNewHook(result, size); | |
1765 return result; | |
1766 } | |
1767 void *(*__MALLOC_HOOK_VOLATILE __memalign_hook)(size_t, size_t, const void *) =
MemalignOverride; | |
1768 #endif // TCMALLOC_USING_DEBUGALLOCATION | 1652 #endif // TCMALLOC_USING_DEBUGALLOCATION |
1769 | |
1770 // ---Double free() debugging implementation ----------------------------------- | |
1771 // We will put a mark at the extreme end of each allocation block. We make | |
1772 // sure that we always allocate enough "extra memory" that we can fit in the | |
1773 // mark, and still provide the requested usable region. If ever that mark is | |
1774 // not as expected, then we know that the user is corrupting memory beyond their | |
1775 // request size, or that they have called free a second time without having | |
1776 // the memory allocated (again). This allows us to spot most double free()s, | |
1777 // but some can "slip by" or confuse our logic if the caller reallocates memory | |
1778 // (for a second use) before performing an evil double-free of a first | |
1779 // allocation | |
1780 | |
1781 // This code can be optimized, but for now, it is written to be most easily | |
1782 // understood, and flexible (since it is evolving a bit). Potential | |
1783 // optimizations include using other calculated data, such as class size, or | |
1784 // allocation size, which is known in the code above, but then is recalculated | |
1785 // below. Another potential optimization would be careful manual inlining of | |
1786 // code, but I *think* that the compile will probably do this for me, and I've | |
1787 // been careful to avoid aliasing issues that might make a compiler back-off. | |
1788 | |
1789 // Evolution includes experimenting with different marks, to minimize the chance | |
1790 // that a mark would be misunderstood (missed corruption). The marks are meant | |
1791 // to be hashed encoding of the location, so that they can't be copied over a | |
1792 // different region (by accident) without being detected (most of the time). | |
1793 | |
1794 // Enable the following define to turn on all the TCMalloc checking. | |
1795 // It will cost about 2% in performance, but it will catch double frees (most of | |
1796 // the time), and will often catch allocated-buffer overrun errors. This | |
1797 // validation is only active when TCMalloc is used as the allocator. | |
1798 #ifndef NDEBUG | |
1799 #define TCMALLOC_VALIDATION | |
1800 #endif | |
1801 | |
1802 #if !defined(TCMALLOC_VALIDATION) | |
1803 | |
1804 static size_t ExcludeSpaceForMark(size_t size) { return size; } | |
1805 static void AddRoomForMark(size_t* size) {} | |
1806 static void ExcludeMarkFromSize(size_t* new_size) {} | |
1807 static void MarkAllocatedRegion(void* ptr) {} | |
1808 static void ValidateAllocatedRegion(void* ptr, size_t cl) {} | |
1809 | |
1810 #else // TCMALLOC_VALIDATION | |
1811 | |
1812 static void DieFromDoubleFree() { | |
1813 char* p = NULL; | |
1814 p++; | |
1815 *p += 1; // Segv. | |
1816 } | |
1817 | |
1818 static size_t DieFromBadFreePointer(void* unused) { | |
1819 char* p = NULL; | |
1820 p += 2; | |
1821 *p += 2; // Segv. | |
1822 return 0; | |
1823 } | |
1824 | |
1825 static void DieFromMemoryCorruption() { | |
1826 char* p = NULL; | |
1827 p += 3; | |
1828 *p += 3; // Segv. | |
1829 } | |
1830 | |
1831 // We can either do byte marking, or whole word marking based on the following | |
1832 // define. char is as small as we can get, and word marking probably provides | |
1833 // more than enough bits that we won't miss a corruption. Any sized integral | |
1834 // type can be used, but we just define two examples. | |
1835 | |
1836 // #define TCMALLOC_SMALL_VALIDATION | |
1837 #if defined (TCMALLOC_SMALL_VALIDATION) | |
1838 | |
1839 typedef char MarkType; // char saves memory... int is more complete. | |
1840 static const MarkType kAllocationMarkMask = static_cast<MarkType>(0x36); | |
1841 | |
1842 #else | |
1843 | |
1844 typedef int MarkType; // char saves memory... int is more complete. | |
1845 static const MarkType kAllocationMarkMask = static_cast<MarkType>(0xE1AB9536); | |
1846 | |
1847 #endif | |
1848 | |
1849 // TODO(jar): See if use of reference rather than pointer gets better inlining, | |
1850 // or if macro is needed. My fear is that taking address map preclude register | |
1851 // allocation :-(. | |
1852 inline static void AddRoomForMark(size_t* size) { | |
1853 *size += sizeof(kAllocationMarkMask); | |
1854 } | |
1855 | |
1856 inline static void ExcludeMarkFromSize(size_t* new_size) { | |
1857 *new_size -= sizeof(kAllocationMarkMask); | |
1858 } | |
1859 | |
1860 inline static size_t ExcludeSpaceForMark(size_t size) { | |
1861 return size - sizeof(kAllocationMarkMask); // Lie about size when asked. | |
1862 } | |
1863 | |
1864 inline static MarkType* GetMarkLocation(void* ptr) { | |
1865 size_t class_size = GetSizeWithCallback(ptr, DieFromBadFreePointer); | |
1866 ASSERT(class_size % sizeof(kAllocationMarkMask) == 0); | |
1867 size_t last_index = (class_size / sizeof(kAllocationMarkMask)) - 1; | |
1868 return static_cast<MarkType*>(ptr) + last_index; | |
1869 } | |
1870 | |
1871 // We hash in the mark location plus the pointer so that we effectively mix in | |
1872 // the size of the block. This means that if a span is used for different sizes | |
1873 // that the mark will be different. It would be good to hash in the size (which | |
1874 // we effectively get by using both mark location and pointer), but even better | |
1875 // would be to also include the class, as it concisely contains the entropy | |
1876 // found in the size (when we don't have large allocation), and there is less | |
1877 // risk of losing those bits to truncation. It would probably be good to combine | |
1878 // the high bits of size (capturing info about large blocks) with the class | |
1879 // (which is a 6 bit number). | |
1880 inline static MarkType GetMarkValue(void* ptr, MarkType* mark) { | |
1881 void* ptr2 = static_cast<void*>(mark); | |
1882 size_t offset1 = static_cast<char*>(ptr) - static_cast<char*>(NULL); | |
1883 size_t offset2 = static_cast<char*>(ptr2) - static_cast<char*>(NULL); | |
1884 static const int kInvariantBits = 2; | |
1885 ASSERT((offset1 >> kInvariantBits) << kInvariantBits == offset1); | |
1886 // Note: low bits of both offsets are invariants due to alignment. High bits | |
1887 // of both offsets are the same (unless we have a large allocation). Avoid | |
1888 // XORing high bits together, as they will cancel for most small allocations. | |
1889 | |
1890 MarkType ret = kAllocationMarkMask; | |
1891 // Using a little shift, we can safely XOR together both offsets. | |
1892 ret ^= static_cast<MarkType>(offset1 >> kInvariantBits) ^ | |
1893 static_cast<MarkType>(offset2); | |
1894 if (sizeof(ret) == 1) { | |
1895 // Try to bring some high level bits into the mix. | |
1896 ret += static_cast<MarkType>(offset1 >> 8) ^ | |
1897 static_cast<MarkType>(offset1 >> 16) ^ | |
1898 static_cast<MarkType>(offset1 >> 24) ; | |
1899 } | |
1900 // Hash in high bits on a 64 bit architecture. | |
1901 if (sizeof(size_t) == 8 && sizeof(ret) == 4) | |
1902 ret += offset1 >> 16; | |
1903 if (ret == 0) | |
1904 ret = kAllocationMarkMask; // Avoid common pattern of all zeros. | |
1905 return ret; | |
1906 } | |
1907 | |
1908 // TODO(jar): Use the passed in TCmalloc Class Index to calculate mark location | |
1909 // faster. The current implementation calls general functions, which have to | |
1910 // recalculate this in order to get the Class Size. This is a slow and wasteful | |
1911 // recomputation... but it is much more readable this way (for now). | |
1912 static void ValidateAllocatedRegion(void* ptr, size_t cl) { | |
1913 if (ptr == NULL) return; | |
1914 MarkType* mark = GetMarkLocation(ptr); | |
1915 MarkType allocated_mark = GetMarkValue(ptr, mark); | |
1916 MarkType current_mark = *mark; | |
1917 | |
1918 if (current_mark == ~allocated_mark) | |
1919 DieFromDoubleFree(); | |
1920 if (current_mark != allocated_mark) | |
1921 DieFromMemoryCorruption(); | |
1922 #ifndef NDEBUG | |
1923 // In debug mode, copy the mark into all the free'd region. | |
1924 size_t class_size = static_cast<size_t>(reinterpret_cast<char*>(mark) - | |
1925 reinterpret_cast<char*>(ptr)); | |
1926 memset(ptr, static_cast<char>(0x36), class_size); | |
1927 #endif | |
1928 *mark = ~allocated_mark; // Distinctively not allocated. | |
1929 } | |
1930 | |
1931 static void MarkAllocatedRegion(void* ptr) { | |
1932 if (ptr == NULL) return; | |
1933 MarkType* mark = GetMarkLocation(ptr); | |
1934 *mark = GetMarkValue(ptr, mark); | |
1935 } | |
1936 | |
1937 #endif // TCMALLOC_VALIDATION | |
OLD | NEW |