OLD | NEW |
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
112 #include <vector> // for vector | 112 #include <vector> // for vector |
113 | 113 |
114 #include <google/malloc_extension.h> | 114 #include <google/malloc_extension.h> |
115 #include <google/malloc_hook.h> // for MallocHook | 115 #include <google/malloc_hook.h> // for MallocHook |
116 #include "base/basictypes.h" // for int64 | 116 #include "base/basictypes.h" // for int64 |
117 #include "base/commandlineflags.h" // for RegisterFlagValidator, etc | 117 #include "base/commandlineflags.h" // for RegisterFlagValidator, etc |
118 #include "base/dynamic_annotations.h" // for RunningOnValgrind | 118 #include "base/dynamic_annotations.h" // for RunningOnValgrind |
119 #include "base/spinlock.h" // for SpinLockHolder | 119 #include "base/spinlock.h" // for SpinLockHolder |
120 #include "central_freelist.h" // for CentralFreeListPadded | 120 #include "central_freelist.h" // for CentralFreeListPadded |
121 #include "common.h" // for StackTrace, kPageShift, etc | 121 #include "common.h" // for StackTrace, kPageShift, etc |
| 122 #include "free_list.h" // for FL_Init |
122 #include "internal_logging.h" // for ASSERT, TCMalloc_Printer, etc | 123 #include "internal_logging.h" // for ASSERT, TCMalloc_Printer, etc |
123 #include "linked_list.h" // for SLL_SetNext | |
124 #include "malloc_hook-inl.h" // for MallocHook::InvokeNewHook, etc | 124 #include "malloc_hook-inl.h" // for MallocHook::InvokeNewHook, etc |
125 #include "page_heap.h" // for PageHeap, PageHeap::Stats | 125 #include "page_heap.h" // for PageHeap, PageHeap::Stats |
126 #include "page_heap_allocator.h" // for PageHeapAllocator | 126 #include "page_heap_allocator.h" // for PageHeapAllocator |
127 #include "span.h" // for Span, DLL_Prepend, etc | 127 #include "span.h" // for Span, DLL_Prepend, etc |
128 #include "stack_trace_table.h" // for StackTraceTable | 128 #include "stack_trace_table.h" // for StackTraceTable |
129 #include "static_vars.h" // for Static | 129 #include "static_vars.h" // for Static |
130 #include "system-alloc.h" // for DumpSystemAllocatorStats, etc | 130 #include "system-alloc.h" // for DumpSystemAllocatorStats, etc |
131 #include "tcmalloc_guard.h" // for TCMallocGuard | 131 #include "tcmalloc_guard.h" // for TCMallocGuard |
132 #include "thread_cache.h" // for ThreadCache | 132 #include "thread_cache.h" // for ThreadCache |
133 | 133 |
134 // We only need malloc.h for struct mallinfo. | 134 // We only need malloc.h for struct mallinfo. |
135 #ifdef HAVE_STRUCT_MALLINFO | 135 #ifdef HAVE_STRUCT_MALLINFO |
136 // Malloc can be in several places on older versions of OS X. | 136 // Malloc can be in several places on older versions of OS X. |
137 # if defined(HAVE_MALLOC_H) | 137 # if defined(HAVE_MALLOC_H) |
138 # include <malloc.h> | 138 # include <malloc.h> |
139 # elif defined(HAVE_SYS_MALLOC_H) | 139 # elif defined(HAVE_SYS_MALLOC_H) |
140 # include <sys/malloc.h> | 140 # include <sys/malloc.h> |
141 # elif defined(HAVE_MALLOC_MALLOC_H) | 141 # elif defined(HAVE_MALLOC_MALLOC_H) |
142 # include <malloc/malloc.h> | 142 # include <malloc/malloc.h> |
143 # endif | 143 # endif |
144 #endif | 144 #endif |
145 | 145 |
146 #if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defi
ned(WIN32_OVERRIDE_ALLOCATORS) | 146 #if (defined(_WIN32) && !defined(__CYGWIN__) && !defined(__CYGWIN32__)) && !defi
ned(WIN32_OVERRIDE_ALLOCATORS) |
147 # define WIN32_DO_PATCHING 1 | 147 # define WIN32_DO_PATCHING 1 |
148 #endif | 148 #endif |
149 | 149 |
150 // Some windows file somewhere (at least on cygwin) #define's small (!) | 150 // Some windows file somewhere (at least on cygwin) #define's small (!) |
151 #undef small | 151 #undef small |
152 | 152 |
| 153 // GLibc 2.14+ requires the hook functions be declared volatile, based on the |
| 154 // value of the define __MALLOC_HOOK_VOLATILE. For compatibility with |
| 155 // older/non-GLibc implementations, provide an empty definition. |
| 156 #if !defined(__MALLOC_HOOK_VOLATILE) |
| 157 #define __MALLOC_HOOK_VOLATILE |
| 158 #endif |
| 159 |
153 using STL_NAMESPACE::max; | 160 using STL_NAMESPACE::max; |
154 using STL_NAMESPACE::numeric_limits; | 161 using STL_NAMESPACE::numeric_limits; |
155 using STL_NAMESPACE::vector; | 162 using STL_NAMESPACE::vector; |
156 | 163 |
157 #include "libc_override.h" | 164 #include "libc_override.h" |
158 | 165 |
159 // __THROW is defined in glibc (via <sys/cdefs.h>). It means, | 166 // __THROW is defined in glibc (via <sys/cdefs.h>). It means, |
160 // counter-intuitively, "This function will never throw an exception." | 167 // counter-intuitively, "This function will never throw an exception." |
161 // It's an optional optimization tool, but we may need to use it to | 168 // It's an optional optimization tool, but we may need to use it to |
162 // match glibc prototypes. | 169 // match glibc prototypes. |
163 #ifndef __THROW // I guess we're not on a glibc system | 170 #ifndef __THROW // I guess we're not on a glibc system |
164 # define __THROW // __THROW is just an optimization, so ok to make it "" | 171 # define __THROW // __THROW is just an optimization, so ok to make it "" |
165 #endif | 172 #endif |
166 | 173 |
167 using tcmalloc::AlignmentForSize; | 174 using tcmalloc::AlignmentForSize; |
168 using tcmalloc::kLog; | 175 using tcmalloc::kLog; |
169 using tcmalloc::kCrash; | 176 using tcmalloc::kCrash; |
170 using tcmalloc::kCrashWithStats; | 177 using tcmalloc::kCrashWithStats; |
171 using tcmalloc::Log; | 178 using tcmalloc::Log; |
172 using tcmalloc::PageHeap; | 179 using tcmalloc::PageHeap; |
173 using tcmalloc::PageHeapAllocator; | 180 using tcmalloc::PageHeapAllocator; |
174 using tcmalloc::SizeMap; | 181 using tcmalloc::SizeMap; |
175 using tcmalloc::Span; | 182 using tcmalloc::Span; |
176 using tcmalloc::StackTrace; | 183 using tcmalloc::StackTrace; |
177 using tcmalloc::Static; | 184 using tcmalloc::Static; |
178 using tcmalloc::ThreadCache; | 185 using tcmalloc::ThreadCache; |
179 | 186 |
| 187 // ---- Double free debug declarations |
| 188 static size_t ExcludeSpaceForMark(size_t size); |
| 189 static void AddRoomForMark(size_t* size); |
| 190 static void ExcludeMarkFromSize(size_t* new_size); |
| 191 static void MarkAllocatedRegion(void* ptr); |
| 192 static void ValidateAllocatedRegion(void* ptr, size_t cl); |
| 193 // ---- End Double free debug declarations |
| 194 |
180 DECLARE_int64(tcmalloc_sample_parameter); | 195 DECLARE_int64(tcmalloc_sample_parameter); |
181 DECLARE_double(tcmalloc_release_rate); | 196 DECLARE_double(tcmalloc_release_rate); |
182 | 197 |
183 // For windows, the printf we use to report large allocs is | 198 // For windows, the printf we use to report large allocs is |
184 // potentially dangerous: it could cause a malloc that would cause an | 199 // potentially dangerous: it could cause a malloc that would cause an |
185 // infinite loop. So by default we set the threshold to a huge number | 200 // infinite loop. So by default we set the threshold to a huge number |
186 // on windows, so this bad situation will never trigger. You can | 201 // on windows, so this bad situation will never trigger. You can |
187 // always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you | 202 // always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you |
188 // want this functionality. | 203 // want this functionality. |
189 #ifdef _WIN32 | 204 #ifdef _WIN32 |
(...skipping 14 matching lines...) Expand all Loading... |
204 "is very large and therefore you should see no extra " | 219 "is very large and therefore you should see no extra " |
205 "logging unless the flag is overridden. Set to 0 to " | 220 "logging unless the flag is overridden. Set to 0 to " |
206 "disable reporting entirely."); | 221 "disable reporting entirely."); |
207 | 222 |
208 | 223 |
209 // We already declared these functions in tcmalloc.h, but we have to | 224 // We already declared these functions in tcmalloc.h, but we have to |
210 // declare them again to give them an ATTRIBUTE_SECTION: we want to | 225 // declare them again to give them an ATTRIBUTE_SECTION: we want to |
211 // put all callers of MallocHook::Invoke* in this module into | 226 // put all callers of MallocHook::Invoke* in this module into |
212 // ATTRIBUTE_SECTION(google_malloc) section, so that | 227 // ATTRIBUTE_SECTION(google_malloc) section, so that |
213 // MallocHook::GetCallerStackTrace can function accurately. | 228 // MallocHook::GetCallerStackTrace can function accurately. |
214 #ifndef _WIN32 // windows doesn't have attribute_section, so don't bother | |
215 extern "C" { | 229 extern "C" { |
216 void* tc_malloc(size_t size) __THROW | 230 void* tc_malloc(size_t size) __THROW |
217 ATTRIBUTE_SECTION(google_malloc); | 231 ATTRIBUTE_SECTION(google_malloc); |
218 void tc_free(void* ptr) __THROW | 232 void tc_free(void* ptr) __THROW |
219 ATTRIBUTE_SECTION(google_malloc); | 233 ATTRIBUTE_SECTION(google_malloc); |
220 void* tc_realloc(void* ptr, size_t size) __THROW | 234 void* tc_realloc(void* ptr, size_t size) __THROW |
221 ATTRIBUTE_SECTION(google_malloc); | 235 ATTRIBUTE_SECTION(google_malloc); |
222 void* tc_calloc(size_t nmemb, size_t size) __THROW | 236 void* tc_calloc(size_t nmemb, size_t size) __THROW |
223 ATTRIBUTE_SECTION(google_malloc); | 237 ATTRIBUTE_SECTION(google_malloc); |
224 void tc_cfree(void* ptr) __THROW | 238 void tc_cfree(void* ptr) __THROW |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
266 | 280 |
267 // Some non-standard extensions that we support. | 281 // Some non-standard extensions that we support. |
268 | 282 |
269 // This is equivalent to | 283 // This is equivalent to |
270 // OS X: malloc_size() | 284 // OS X: malloc_size() |
271 // glibc: malloc_usable_size() | 285 // glibc: malloc_usable_size() |
272 // Windows: _msize() | 286 // Windows: _msize() |
273 size_t tc_malloc_size(void* p) __THROW | 287 size_t tc_malloc_size(void* p) __THROW |
274 ATTRIBUTE_SECTION(google_malloc); | 288 ATTRIBUTE_SECTION(google_malloc); |
275 } // extern "C" | 289 } // extern "C" |
276 #endif // #ifndef _WIN32 | |
277 | 290 |
278 // ----------------------- IMPLEMENTATION ------------------------------- | 291 // ----------------------- IMPLEMENTATION ------------------------------- |
279 | 292 |
280 static int tc_new_mode = 0; // See tc_set_new_mode(). | 293 static int tc_new_mode = 0; // See tc_set_new_mode(). |
281 | 294 |
282 // Routines such as free() and realloc() catch some erroneous pointers | 295 // Routines such as free() and realloc() catch some erroneous pointers |
283 // passed to them, and invoke the below when they do. (An erroneous pointer | 296 // passed to them, and invoke the below when they do. (An erroneous pointer |
284 // won't be caught if it's within a valid span or a stale span for which | 297 // won't be caught if it's within a valid span or a stale span for which |
285 // the pagemap cache has a non-zero sizeclass.) This is a cheap (source-editing | 298 // the pagemap cache has a non-zero sizeclass.) This is a cheap (source-editing |
286 // required) kind of exception handling for these routines. | 299 // required) kind of exception handling for these routines. |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
366 + stats.metadata_bytes); | 379 + stats.metadata_bytes); |
367 const uint64_t physical_memory_used = (virtual_memory_used | 380 const uint64_t physical_memory_used = (virtual_memory_used |
368 - stats.pageheap.unmapped_bytes); | 381 - stats.pageheap.unmapped_bytes); |
369 const uint64_t bytes_in_use_by_app = (physical_memory_used | 382 const uint64_t bytes_in_use_by_app = (physical_memory_used |
370 - stats.metadata_bytes | 383 - stats.metadata_bytes |
371 - stats.pageheap.free_bytes | 384 - stats.pageheap.free_bytes |
372 - stats.central_bytes | 385 - stats.central_bytes |
373 - stats.transfer_bytes | 386 - stats.transfer_bytes |
374 - stats.thread_bytes); | 387 - stats.thread_bytes); |
375 | 388 |
| 389 out->printf( |
| 390 "WASTE: %7.1f MiB committed but not used\n" |
| 391 "WASTE: %7.1f MiB bytes committed, %7.1f MiB bytes in use\n" |
| 392 "WASTE: committed/used ratio of %f\n", |
| 393 (stats.pageheap.committed_bytes - bytes_in_use_by_app) / MiB, |
| 394 stats.pageheap.committed_bytes / MiB, |
| 395 bytes_in_use_by_app / MiB, |
| 396 stats.pageheap.committed_bytes / static_cast<double>(bytes_in_use_by_app) |
| 397 ); |
376 #ifdef TCMALLOC_SMALL_BUT_SLOW | 398 #ifdef TCMALLOC_SMALL_BUT_SLOW |
377 out->printf( | 399 out->printf( |
378 "NOTE: SMALL MEMORY MODEL IS IN USE, PERFORMANCE MAY SUFFER.\n"); | 400 "NOTE: SMALL MEMORY MODEL IS IN USE, PERFORMANCE MAY SUFFER.\n"); |
379 #endif | 401 #endif |
380 out->printf( | 402 out->printf( |
381 "------------------------------------------------\n" | 403 "------------------------------------------------\n" |
382 "MALLOC: %12" PRIu64 " (%7.1f MiB) Bytes in use by application\n" | 404 "MALLOC: %12" PRIu64 " (%7.1f MiB) Bytes in use by application\n" |
| 405 "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes committed\n" |
383 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in page heap freelist\n" | 406 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in page heap freelist\n" |
384 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in central cache freelist\n" | 407 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in central cache freelist\n" |
385 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in transfer cache freelist\n" | 408 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in transfer cache freelist\n" |
386 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in thread cache freelists\n" | 409 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in thread cache freelists\n" |
387 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in malloc metadata\n" | 410 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes in malloc metadata\n" |
388 "MALLOC: ------------\n" | 411 "MALLOC: ------------\n" |
389 "MALLOC: = %12" PRIu64 " (%7.1f MiB) Actual memory used (physical + swap)\
n" | 412 "MALLOC: = %12" PRIu64 " (%7.1f MiB) Actual memory used (physical + swap)\
n" |
390 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes released to OS (aka unmapped)\n
" | 413 "MALLOC: + %12" PRIu64 " (%7.1f MiB) Bytes released to OS (aka unmapped)\n
" |
391 "MALLOC: ------------\n" | 414 "MALLOC: ------------\n" |
392 "MALLOC: = %12" PRIu64 " (%7.1f MiB) Virtual address space used\n" | 415 "MALLOC: = %12" PRIu64 " (%7.1f MiB) Virtual address space used\n" |
393 "MALLOC:\n" | 416 "MALLOC:\n" |
394 "MALLOC: %12" PRIu64 " Spans in use\n" | 417 "MALLOC: %12" PRIu64 " Spans in use\n" |
395 "MALLOC: %12" PRIu64 " Thread heaps in use\n" | 418 "MALLOC: %12" PRIu64 " Thread heaps in use\n" |
396 "MALLOC: %12" PRIu64 " Tcmalloc page size\n" | 419 "MALLOC: %12" PRIu64 " Tcmalloc page size\n" |
397 "------------------------------------------------\n" | 420 "------------------------------------------------\n" |
398 "Call ReleaseFreeMemory() to release freelist memory to the OS" | 421 "Call ReleaseFreeMemory() to release freelist memory to the OS" |
399 " (via madvise()).\n" | 422 " (via madvise()).\n" |
400 "Bytes released to the OS take up virtual address space" | 423 "Bytes released to the OS take up virtual address space" |
401 " but no physical memory.\n", | 424 " but no physical memory.\n", |
402 bytes_in_use_by_app, bytes_in_use_by_app / MiB, | 425 bytes_in_use_by_app, bytes_in_use_by_app / MiB, |
| 426 stats.pageheap.committed_bytes, stats.pageheap.committed_bytes / MiB, |
403 stats.pageheap.free_bytes, stats.pageheap.free_bytes / MiB, | 427 stats.pageheap.free_bytes, stats.pageheap.free_bytes / MiB, |
404 stats.central_bytes, stats.central_bytes / MiB, | 428 stats.central_bytes, stats.central_bytes / MiB, |
405 stats.transfer_bytes, stats.transfer_bytes / MiB, | 429 stats.transfer_bytes, stats.transfer_bytes / MiB, |
406 stats.thread_bytes, stats.thread_bytes / MiB, | 430 stats.thread_bytes, stats.thread_bytes / MiB, |
407 stats.metadata_bytes, stats.metadata_bytes / MiB, | 431 stats.metadata_bytes, stats.metadata_bytes / MiB, |
408 physical_memory_used, physical_memory_used / MiB, | 432 physical_memory_used, physical_memory_used / MiB, |
409 stats.pageheap.unmapped_bytes, stats.pageheap.unmapped_bytes / MiB, | 433 stats.pageheap.unmapped_bytes, stats.pageheap.unmapped_bytes / MiB, |
410 virtual_memory_used, virtual_memory_used / MiB, | 434 virtual_memory_used, virtual_memory_used / MiB, |
411 uint64_t(Static::span_allocator()->inuse()), | 435 uint64_t(Static::span_allocator()->inuse()), |
412 uint64_t(ThreadCache::HeapsInUse()), | 436 uint64_t(ThreadCache::HeapsInUse()), |
(...skipping 513 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
926 | 950 |
927 static inline bool CheckCachedSizeClass(void *ptr) { | 951 static inline bool CheckCachedSizeClass(void *ptr) { |
928 PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; | 952 PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; |
929 size_t cached_value = Static::pageheap()->GetSizeClassIfCached(p); | 953 size_t cached_value = Static::pageheap()->GetSizeClassIfCached(p); |
930 return cached_value == 0 || | 954 return cached_value == 0 || |
931 cached_value == Static::pageheap()->GetDescriptor(p)->sizeclass; | 955 cached_value == Static::pageheap()->GetDescriptor(p)->sizeclass; |
932 } | 956 } |
933 | 957 |
934 static inline void* CheckedMallocResult(void *result) { | 958 static inline void* CheckedMallocResult(void *result) { |
935 ASSERT(result == NULL || CheckCachedSizeClass(result)); | 959 ASSERT(result == NULL || CheckCachedSizeClass(result)); |
| 960 MarkAllocatedRegion(result); |
936 return result; | 961 return result; |
937 } | 962 } |
938 | 963 |
939 static inline void* SpanToMallocResult(Span *span) { | 964 static inline void* SpanToMallocResult(Span *span) { |
940 Static::pageheap()->CacheSizeClass(span->start, 0); | 965 Static::pageheap()->CacheSizeClass(span->start, 0); |
941 return | 966 return |
942 CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift)); | 967 CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift)); |
943 } | 968 } |
944 | 969 |
945 static void* DoSampledAllocation(size_t size) { | 970 static void* DoSampledAllocation(size_t size) { |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
990 for (int i = 0; i < stack.depth; i++) { | 1015 for (int i = 0; i < stack.depth; i++) { |
991 printer.printf(" %p", stack.stack[i]); | 1016 printer.printf(" %p", stack.stack[i]); |
992 } | 1017 } |
993 printer.printf("\n"); | 1018 printer.printf("\n"); |
994 write(STDERR_FILENO, buffer, strlen(buffer)); | 1019 write(STDERR_FILENO, buffer, strlen(buffer)); |
995 } | 1020 } |
996 | 1021 |
997 inline void* cpp_alloc(size_t size, bool nothrow); | 1022 inline void* cpp_alloc(size_t size, bool nothrow); |
998 inline void* do_malloc(size_t size); | 1023 inline void* do_malloc(size_t size); |
999 | 1024 |
1000 // TODO(willchan): Investigate whether or not lining this much is harmful to | 1025 // TODO(willchan): Investigate whether or not inlining this much is harmful to |
1001 // performance. | 1026 // performance. |
1002 // This is equivalent to do_malloc() except when tc_new_mode is set to true. | 1027 // This is equivalent to do_malloc() except when tc_new_mode is set to true. |
1003 // Otherwise, it will run the std::new_handler if set. | 1028 // Otherwise, it will run the std::new_handler if set. |
1004 inline void* do_malloc_or_cpp_alloc(size_t size) { | 1029 inline void* do_malloc_or_cpp_alloc(size_t size) { |
1005 return tc_new_mode ? cpp_alloc(size, true) : do_malloc(size); | 1030 return tc_new_mode ? cpp_alloc(size, true) : do_malloc(size); |
1006 } | 1031 } |
1007 | 1032 |
1008 void* cpp_memalign(size_t align, size_t size); | 1033 void* cpp_memalign(size_t align, size_t size); |
1009 void* do_memalign(size_t align, size_t size); | 1034 void* do_memalign(size_t align, size_t size); |
1010 | 1035 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1045 report_large = should_report_large(num_pages); | 1070 report_large = should_report_large(num_pages); |
1046 } | 1071 } |
1047 | 1072 |
1048 if (report_large) { | 1073 if (report_large) { |
1049 ReportLargeAlloc(num_pages, result); | 1074 ReportLargeAlloc(num_pages, result); |
1050 } | 1075 } |
1051 return result; | 1076 return result; |
1052 } | 1077 } |
1053 | 1078 |
1054 inline void* do_malloc(size_t size) { | 1079 inline void* do_malloc(size_t size) { |
| 1080 AddRoomForMark(&size); |
| 1081 |
1055 void* ret = NULL; | 1082 void* ret = NULL; |
1056 | 1083 |
1057 // The following call forces module initialization | 1084 // The following call forces module initialization |
1058 ThreadCache* heap = ThreadCache::GetCache(); | 1085 ThreadCache* heap = ThreadCache::GetCache(); |
1059 if (size <= kMaxSize) { | 1086 if (size <= kMaxSize) { |
1060 size_t cl = Static::sizemap()->SizeClass(size); | 1087 size_t cl = Static::sizemap()->SizeClass(size); |
1061 size = Static::sizemap()->class_to_size(cl); | 1088 size = Static::sizemap()->class_to_size(cl); |
1062 | 1089 |
1063 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { | 1090 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) { |
1064 ret = DoSampledAllocation(size); | 1091 ret = DoSampledAllocation(size); |
| 1092 MarkAllocatedRegion(ret); |
1065 } else { | 1093 } else { |
1066 // The common case, and also the simplest. This just pops the | 1094 // The common case, and also the simplest. This just pops the |
1067 // size-appropriate freelist, after replenishing it if it's empty. | 1095 // size-appropriate freelist, after replenishing it if it's empty. |
1068 ret = CheckedMallocResult(heap->Allocate(size, cl)); | 1096 ret = CheckedMallocResult(heap->Allocate(size, cl)); |
1069 } | 1097 } |
1070 } else { | 1098 } else { |
1071 ret = do_malloc_pages(heap, size); | 1099 ret = do_malloc_pages(heap, size); |
| 1100 MarkAllocatedRegion(ret); |
1072 } | 1101 } |
1073 if (ret == NULL) errno = ENOMEM; | 1102 if (ret == NULL) errno = ENOMEM; |
1074 return ret; | 1103 return ret; |
1075 } | 1104 } |
1076 | 1105 |
1077 inline void* do_calloc(size_t n, size_t elem_size) { | 1106 inline void* do_calloc(size_t n, size_t elem_size) { |
1078 // Overflow check | 1107 // Overflow check |
1079 const size_t size = n * elem_size; | 1108 const size_t size = n * elem_size; |
1080 if (elem_size != 0 && size / elem_size != n) return NULL; | 1109 if (elem_size != 0 && size / elem_size != n) return NULL; |
1081 | 1110 |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1117 // tcmalloc. The latter can happen if tcmalloc is linked in via | 1146 // tcmalloc. The latter can happen if tcmalloc is linked in via |
1118 // a dynamic library, but is not listed last on the link line. | 1147 // a dynamic library, but is not listed last on the link line. |
1119 // In that case, libraries after it on the link line will | 1148 // In that case, libraries after it on the link line will |
1120 // allocate with libc malloc, but free with tcmalloc's free. | 1149 // allocate with libc malloc, but free with tcmalloc's free. |
1121 (*invalid_free_fn)(ptr); // Decide how to handle the bad free request | 1150 (*invalid_free_fn)(ptr); // Decide how to handle the bad free request |
1122 return; | 1151 return; |
1123 } | 1152 } |
1124 cl = span->sizeclass; | 1153 cl = span->sizeclass; |
1125 Static::pageheap()->CacheSizeClass(p, cl); | 1154 Static::pageheap()->CacheSizeClass(p, cl); |
1126 } | 1155 } |
| 1156 |
| 1157 ValidateAllocatedRegion(ptr, cl); |
| 1158 |
1127 if (cl != 0) { | 1159 if (cl != 0) { |
1128 ASSERT(!Static::pageheap()->GetDescriptor(p)->sample); | 1160 ASSERT(!Static::pageheap()->GetDescriptor(p)->sample); |
1129 ThreadCache* heap = GetCacheIfPresent(); | 1161 ThreadCache* heap = GetCacheIfPresent(); |
1130 if (heap != NULL) { | 1162 if (heap != NULL) { |
1131 heap->Deallocate(ptr, cl); | 1163 heap->Deallocate(ptr, cl); |
1132 } else { | 1164 } else { |
1133 // Delete directly into central cache | 1165 // Delete directly into central cache |
1134 tcmalloc::SLL_SetNext(ptr, NULL); | 1166 tcmalloc::FL_Init(ptr); |
1135 Static::central_cache()[cl].InsertRange(ptr, ptr, 1); | 1167 Static::central_cache()[cl].InsertRange(ptr, ptr, 1); |
1136 } | 1168 } |
1137 } else { | 1169 } else { |
1138 SpinLockHolder h(Static::pageheap_lock()); | 1170 SpinLockHolder h(Static::pageheap_lock()); |
1139 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0); | 1171 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0); |
1140 ASSERT(span != NULL && span->start == p); | 1172 ASSERT(span != NULL && span->start == p); |
1141 if (span->sample) { | 1173 if (span->sample) { |
1142 StackTrace* st = reinterpret_cast<StackTrace*>(span->objects); | 1174 StackTrace* st = reinterpret_cast<StackTrace*>(span->objects); |
1143 tcmalloc::DLL_Remove(span); | 1175 tcmalloc::DLL_Remove(span); |
1144 Static::stacktrace_allocator()->Delete(st); | 1176 Static::stacktrace_allocator()->Delete(st); |
(...skipping 30 matching lines...) Expand all Loading... |
1175 } | 1207 } |
1176 } | 1208 } |
1177 } | 1209 } |
1178 | 1210 |
1179 // This lets you call back to a given function pointer if ptr is invalid. | 1211 // This lets you call back to a given function pointer if ptr is invalid. |
1180 // It is used primarily by windows code which wants a specialized callback. | 1212 // It is used primarily by windows code which wants a specialized callback. |
1181 inline void* do_realloc_with_callback( | 1213 inline void* do_realloc_with_callback( |
1182 void* old_ptr, size_t new_size, | 1214 void* old_ptr, size_t new_size, |
1183 void (*invalid_free_fn)(void*), | 1215 void (*invalid_free_fn)(void*), |
1184 size_t (*invalid_get_size_fn)(const void*)) { | 1216 size_t (*invalid_get_size_fn)(const void*)) { |
| 1217 AddRoomForMark(&new_size); |
1185 // Get the size of the old entry | 1218 // Get the size of the old entry |
1186 const size_t old_size = GetSizeWithCallback(old_ptr, invalid_get_size_fn); | 1219 const size_t old_size = GetSizeWithCallback(old_ptr, invalid_get_size_fn); |
1187 | 1220 |
1188 // Reallocate if the new size is larger than the old size, | 1221 // Reallocate if the new size is larger than the old size, |
1189 // or if the new size is significantly smaller than the old size. | 1222 // or if the new size is significantly smaller than the old size. |
1190 // We do hysteresis to avoid resizing ping-pongs: | 1223 // We do hysteresis to avoid resizing ping-pongs: |
1191 // . If we need to grow, grow to max(new_size, old_size * 1.X) | 1224 // . If we need to grow, grow to max(new_size, old_size * 1.X) |
1192 // . Don't shrink unless new_size < old_size * 0.Y | 1225 // . Don't shrink unless new_size < old_size * 0.Y |
1193 // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5. | 1226 // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5. |
1194 const int lower_bound_to_grow = old_size + old_size / 4; | 1227 const int lower_bound_to_grow = old_size + old_size / 4; |
1195 const int upper_bound_to_shrink = old_size / 2; | 1228 const int upper_bound_to_shrink = old_size / 2; |
1196 if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) { | 1229 if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) { |
1197 // Need to reallocate. | 1230 // Need to reallocate. |
1198 void* new_ptr = NULL; | 1231 void* new_ptr = NULL; |
1199 | 1232 |
1200 if (new_size > old_size && new_size < lower_bound_to_grow) { | 1233 if (new_size > old_size && new_size < lower_bound_to_grow) { |
1201 new_ptr = do_malloc_or_cpp_alloc(lower_bound_to_grow); | 1234 new_ptr = do_malloc_or_cpp_alloc(lower_bound_to_grow); |
1202 } | 1235 } |
| 1236 ExcludeMarkFromSize(&new_size); // do_malloc will add space if needed. |
1203 if (new_ptr == NULL) { | 1237 if (new_ptr == NULL) { |
1204 // Either new_size is not a tiny increment, or last do_malloc failed. | 1238 // Either new_size is not a tiny increment, or last do_malloc failed. |
1205 new_ptr = do_malloc_or_cpp_alloc(new_size); | 1239 new_ptr = do_malloc_or_cpp_alloc(new_size); |
1206 } | 1240 } |
1207 if (new_ptr == NULL) { | 1241 if (new_ptr == NULL) { |
1208 return NULL; | 1242 return NULL; |
1209 } | 1243 } |
1210 MallocHook::InvokeNewHook(new_ptr, new_size); | 1244 MallocHook::InvokeNewHook(new_ptr, new_size); |
1211 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size)); | 1245 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size)); |
1212 MallocHook::InvokeDeleteHook(old_ptr); | 1246 MallocHook::InvokeDeleteHook(old_ptr); |
1213 // We could use a variant of do_free() that leverages the fact | 1247 // We could use a variant of do_free() that leverages the fact |
1214 // that we already know the sizeclass of old_ptr. The benefit | 1248 // that we already know the sizeclass of old_ptr. The benefit |
1215 // would be small, so don't bother. | 1249 // would be small, so don't bother. |
1216 do_free_with_callback(old_ptr, invalid_free_fn); | 1250 do_free_with_callback(old_ptr, invalid_free_fn); |
1217 return new_ptr; | 1251 return new_ptr; |
1218 } else { | 1252 } else { |
1219 // We still need to call hooks to report the updated size: | 1253 // We still need to call hooks to report the updated size: |
1220 MallocHook::InvokeDeleteHook(old_ptr); | 1254 MallocHook::InvokeDeleteHook(old_ptr); |
| 1255 ExcludeMarkFromSize(&new_size); |
1221 MallocHook::InvokeNewHook(old_ptr, new_size); | 1256 MallocHook::InvokeNewHook(old_ptr, new_size); |
1222 return old_ptr; | 1257 return old_ptr; |
1223 } | 1258 } |
1224 } | 1259 } |
1225 | 1260 |
1226 inline void* do_realloc(void* old_ptr, size_t new_size) { | 1261 inline void* do_realloc(void* old_ptr, size_t new_size) { |
1227 return do_realloc_with_callback(old_ptr, new_size, | 1262 return do_realloc_with_callback(old_ptr, new_size, |
1228 &InvalidFree, &InvalidGetSizeForRealloc); | 1263 &InvalidFree, &InvalidGetSizeForRealloc); |
1229 } | 1264 } |
1230 | 1265 |
1231 // For use by exported routines below that want specific alignments | 1266 // For use by exported routines below that want specific alignments |
1232 // | 1267 // |
1233 // Note: this code can be slow for alignments > 16, and can | 1268 // Note: this code can be slow for alignments > 16, and can |
1234 // significantly fragment memory. The expectation is that | 1269 // significantly fragment memory. The expectation is that |
1235 // memalign/posix_memalign/valloc/pvalloc will not be invoked very | 1270 // memalign/posix_memalign/valloc/pvalloc will not be invoked very |
1236 // often. This requirement simplifies our implementation and allows | 1271 // often. This requirement simplifies our implementation and allows |
1237 // us to tune for expected allocation patterns. | 1272 // us to tune for expected allocation patterns. |
1238 void* do_memalign(size_t align, size_t size) { | 1273 void* do_memalign(size_t align, size_t size) { |
1239 ASSERT((align & (align - 1)) == 0); | 1274 ASSERT((align & (align - 1)) == 0); |
1240 ASSERT(align > 0); | 1275 ASSERT(align > 0); |
| 1276 // Marked in CheckMallocResult(), which is also inside SpanToMallocResult(). |
| 1277 AddRoomForMark(&size); |
1241 if (size + align < size) return NULL; // Overflow | 1278 if (size + align < size) return NULL; // Overflow |
1242 | 1279 |
1243 // Fall back to malloc if we would already align this memory access properly. | 1280 // Fall back to malloc if we would already align this memory access properly. |
1244 if (align <= AlignmentForSize(size)) { | 1281 if (align <= AlignmentForSize(size)) { |
1245 void* p = do_malloc(size); | 1282 void* p = do_malloc(size); |
1246 ASSERT((reinterpret_cast<uintptr_t>(p) % align) == 0); | 1283 ASSERT((reinterpret_cast<uintptr_t>(p) % align) == 0); |
1247 return p; | 1284 return p; |
1248 } | 1285 } |
1249 | 1286 |
1250 if (Static::pageheap() == NULL) ThreadCache::InitModule(); | 1287 if (Static::pageheap() == NULL) ThreadCache::InitModule(); |
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1442 #endif // PREANSINEW | 1479 #endif // PREANSINEW |
1443 } | 1480 } |
1444 } | 1481 } |
1445 | 1482 |
1446 } // end unnamed namespace | 1483 } // end unnamed namespace |
1447 | 1484 |
1448 // As promised, the definition of this function, declared above. | 1485 // As promised, the definition of this function, declared above. |
1449 size_t TCMallocImplementation::GetAllocatedSize(const void* ptr) { | 1486 size_t TCMallocImplementation::GetAllocatedSize(const void* ptr) { |
1450 ASSERT(TCMallocImplementation::GetOwnership(ptr) | 1487 ASSERT(TCMallocImplementation::GetOwnership(ptr) |
1451 != TCMallocImplementation::kNotOwned); | 1488 != TCMallocImplementation::kNotOwned); |
1452 return GetSizeWithCallback(ptr, &InvalidGetAllocatedSize); | 1489 return ExcludeSpaceForMark( |
| 1490 GetSizeWithCallback(ptr, &InvalidGetAllocatedSize)); |
1453 } | 1491 } |
1454 | 1492 |
1455 void TCMallocImplementation::MarkThreadBusy() { | 1493 void TCMallocImplementation::MarkThreadBusy() { |
1456 // Allocate to force the creation of a thread cache, but avoid | 1494 // Allocate to force the creation of a thread cache, but avoid |
1457 // invoking any hooks. | 1495 // invoking any hooks. |
1458 do_free(do_malloc(0)); | 1496 do_free(do_malloc(0)); |
1459 } | 1497 } |
1460 | 1498 |
1461 //------------------------------------------------------------------- | 1499 //------------------------------------------------------------------- |
1462 // Exported routines | 1500 // Exported routines |
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1643 extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) __THROW { | 1681 extern "C" PERFTOOLS_DLL_DECL struct mallinfo tc_mallinfo(void) __THROW { |
1644 return do_mallinfo(); | 1682 return do_mallinfo(); |
1645 } | 1683 } |
1646 #endif | 1684 #endif |
1647 | 1685 |
1648 extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) __THROW { | 1686 extern "C" PERFTOOLS_DLL_DECL size_t tc_malloc_size(void* ptr) __THROW { |
1649 return MallocExtension::instance()->GetAllocatedSize(ptr); | 1687 return MallocExtension::instance()->GetAllocatedSize(ptr); |
1650 } | 1688 } |
1651 | 1689 |
1652 #endif // TCMALLOC_USING_DEBUGALLOCATION | 1690 #endif // TCMALLOC_USING_DEBUGALLOCATION |
| 1691 |
| 1692 // ---Double free() debugging implementation ----------------------------------- |
| 1693 // We will put a mark at the extreme end of each allocation block. We make |
| 1694 // sure that we always allocate enough "extra memory" that we can fit in the |
| 1695 // mark, and still provide the requested usable region. If ever that mark is |
| 1696 // not as expected, then we know that the user is corrupting memory beyond their |
| 1697 // request size, or that they have called free a second time without having |
| 1698 // the memory allocated (again). This allows us to spot most double free()s, |
| 1699 // but some can "slip by" or confuse our logic if the caller reallocates memory |
| 1700 // (for a second use) before performing an evil double-free of a first |
| 1701 // allocation |
| 1702 |
| 1703 // This code can be optimized, but for now, it is written to be most easily |
| 1704 // understood, and flexible (since it is evolving a bit). Potential |
| 1705 // optimizations include using other calculated data, such as class size, or |
| 1706 // allocation size, which is known in the code above, but then is recalculated |
| 1707 // below. Another potential optimization would be careful manual inlining of |
| 1708 // code, but I *think* that the compile will probably do this for me, and I've |
| 1709 // been careful to avoid aliasing issues that might make a compiler back-off. |
| 1710 |
| 1711 // Evolution includes experimenting with different marks, to minimize the chance |
| 1712 // that a mark would be misunderstood (missed corruption). The marks are meant |
| 1713 // to be hashed encoding of the location, so that they can't be copied over a |
| 1714 // different region (by accident) without being detected (most of the time). |
| 1715 |
| 1716 // Enable the following define to turn on all the TCMalloc checking. |
| 1717 // It will cost about 2% in performance, but it will catch double frees (most of |
| 1718 // the time), and will often catch allocated-buffer overrun errors. This |
| 1719 // validation is only active when TCMalloc is used as the allocator. |
| 1720 #ifndef NDEBUG |
| 1721 #define TCMALLOC_VALIDATION |
| 1722 #endif |
| 1723 |
| 1724 #if !defined(TCMALLOC_VALIDATION) |
| 1725 |
| 1726 static size_t ExcludeSpaceForMark(size_t size) { return size; } |
| 1727 static void AddRoomForMark(size_t* size) {} |
| 1728 static void ExcludeMarkFromSize(size_t* new_size) {} |
| 1729 static void MarkAllocatedRegion(void* ptr) {} |
| 1730 static void ValidateAllocatedRegion(void* ptr, size_t cl) {} |
| 1731 |
| 1732 #else // TCMALLOC_VALIDATION |
| 1733 |
| 1734 static void DieFromDoubleFree() { |
| 1735 char* p = NULL; |
| 1736 p++; |
| 1737 *p += 1; // Segv. |
| 1738 } |
| 1739 |
| 1740 static size_t DieFromBadFreePointer(const void* unused) { |
| 1741 char* p = NULL; |
| 1742 p += 2; |
| 1743 *p += 2; // Segv. |
| 1744 return 0; |
| 1745 } |
| 1746 |
| 1747 static void DieFromMemoryCorruption() { |
| 1748 char* p = NULL; |
| 1749 p += 3; |
| 1750 *p += 3; // Segv. |
| 1751 } |
| 1752 |
| 1753 // We can either do byte marking, or whole word marking based on the following |
| 1754 // define. char is as small as we can get, and word marking probably provides |
| 1755 // more than enough bits that we won't miss a corruption. Any sized integral |
| 1756 // type can be used, but we just define two examples. |
| 1757 |
| 1758 // #define TCMALLOC_SMALL_VALIDATION |
| 1759 #if defined (TCMALLOC_SMALL_VALIDATION) |
| 1760 |
| 1761 typedef char MarkType; // char saves memory... int is more complete. |
| 1762 static const MarkType kAllocationMarkMask = static_cast<MarkType>(0x36); |
| 1763 |
| 1764 #else |
| 1765 |
| 1766 typedef int MarkType; // char saves memory... int is more complete. |
| 1767 static const MarkType kAllocationMarkMask = static_cast<MarkType>(0xE1AB9536); |
| 1768 |
| 1769 #endif |
| 1770 |
| 1771 // TODO(jar): See if use of reference rather than pointer gets better inlining, |
| 1772 // or if macro is needed. My fear is that taking address map preclude register |
| 1773 // allocation :-(. |
| 1774 inline static void AddRoomForMark(size_t* size) { |
| 1775 *size += sizeof(kAllocationMarkMask); |
| 1776 } |
| 1777 |
| 1778 inline static void ExcludeMarkFromSize(size_t* new_size) { |
| 1779 *new_size -= sizeof(kAllocationMarkMask); |
| 1780 } |
| 1781 |
| 1782 inline static size_t ExcludeSpaceForMark(size_t size) { |
| 1783 return size - sizeof(kAllocationMarkMask); // Lie about size when asked. |
| 1784 } |
| 1785 |
| 1786 inline static MarkType* GetMarkLocation(void* ptr) { |
| 1787 size_t class_size = GetSizeWithCallback(ptr, DieFromBadFreePointer); |
| 1788 ASSERT(class_size % sizeof(kAllocationMarkMask) == 0); |
| 1789 size_t last_index = (class_size / sizeof(kAllocationMarkMask)) - 1; |
| 1790 return static_cast<MarkType*>(ptr) + last_index; |
| 1791 } |
| 1792 |
| 1793 // We hash in the mark location plus the pointer so that we effectively mix in |
| 1794 // the size of the block. This means that if a span is used for different sizes |
| 1795 // that the mark will be different. It would be good to hash in the size (which |
| 1796 // we effectively get by using both mark location and pointer), but even better |
| 1797 // would be to also include the class, as it concisely contains the entropy |
| 1798 // found in the size (when we don't have large allocation), and there is less |
| 1799 // risk of losing those bits to truncation. It would probably be good to combine |
| 1800 // the high bits of size (capturing info about large blocks) with the class |
| 1801 // (which is a 6 bit number). |
| 1802 inline static MarkType GetMarkValue(void* ptr, MarkType* mark) { |
| 1803 void* ptr2 = static_cast<void*>(mark); |
| 1804 size_t offset1 = static_cast<char*>(ptr) - static_cast<char*>(NULL); |
| 1805 size_t offset2 = static_cast<char*>(ptr2) - static_cast<char*>(NULL); |
| 1806 static const int kInvariantBits = 2; |
| 1807 ASSERT((offset1 >> kInvariantBits) << kInvariantBits == offset1); |
| 1808 // Note: low bits of both offsets are invariants due to alignment. High bits |
| 1809 // of both offsets are the same (unless we have a large allocation). Avoid |
| 1810 // XORing high bits together, as they will cancel for most small allocations. |
| 1811 |
| 1812 MarkType ret = kAllocationMarkMask; |
| 1813 // Using a little shift, we can safely XOR together both offsets. |
| 1814 ret ^= static_cast<MarkType>(offset1 >> kInvariantBits) ^ |
| 1815 static_cast<MarkType>(offset2); |
| 1816 if (sizeof(ret) == 1) { |
| 1817 // Try to bring some high level bits into the mix. |
| 1818 ret += static_cast<MarkType>(offset1 >> 8) ^ |
| 1819 static_cast<MarkType>(offset1 >> 16) ^ |
| 1820 static_cast<MarkType>(offset1 >> 24) ; |
| 1821 } |
| 1822 // Hash in high bits on a 64 bit architecture. |
| 1823 if (sizeof(size_t) == 8 && sizeof(ret) == 4) |
| 1824 ret += offset1 >> 16; |
| 1825 if (ret == 0) |
| 1826 ret = kAllocationMarkMask; // Avoid common pattern of all zeros. |
| 1827 return ret; |
| 1828 } |
| 1829 |
| 1830 // TODO(jar): Use the passed in TCmalloc Class Index to calculate mark location |
| 1831 // faster. The current implementation calls general functions, which have to |
| 1832 // recalculate this in order to get the Class Size. This is a slow and wasteful |
| 1833 // recomputation... but it is much more readable this way (for now). |
| 1834 static void ValidateAllocatedRegion(void* ptr, size_t cl) { |
| 1835 if (ptr == NULL) return; |
| 1836 MarkType* mark = GetMarkLocation(ptr); |
| 1837 MarkType allocated_mark = GetMarkValue(ptr, mark); |
| 1838 MarkType current_mark = *mark; |
| 1839 |
| 1840 if (current_mark == ~allocated_mark) |
| 1841 DieFromDoubleFree(); |
| 1842 if (current_mark != allocated_mark) |
| 1843 DieFromMemoryCorruption(); |
| 1844 #ifndef NDEBUG |
| 1845 // In debug mode, copy the mark into all the free'd region. |
| 1846 size_t class_size = static_cast<size_t>(reinterpret_cast<char*>(mark) - |
| 1847 reinterpret_cast<char*>(ptr)); |
| 1848 memset(ptr, static_cast<char>(0x36), class_size); |
| 1849 #endif |
| 1850 *mark = ~allocated_mark; // Distinctively not allocated. |
| 1851 } |
| 1852 |
| 1853 static void MarkAllocatedRegion(void* ptr) { |
| 1854 if (ptr == NULL) return; |
| 1855 MarkType* mark = GetMarkLocation(ptr); |
| 1856 *mark = GetMarkValue(ptr, mark); |
| 1857 } |
| 1858 |
| 1859 #endif // TCMALLOC_VALIDATION |
OLD | NEW |