OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/malloc_dump_provider.h" | 5 #include "base/trace_event/malloc_dump_provider.h" |
6 | 6 |
7 #include <stddef.h> | 7 #include <stddef.h> |
8 | 8 |
9 #include "base/allocator/allocator_extension.h" | 9 #include "base/allocator/allocator_extension.h" |
| 10 #include "base/allocator/allocator_shim.h" |
| 11 #include "base/allocator/features.h" |
| 12 #include "base/trace_event/heap_profiler_allocation_context.h" |
| 13 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" |
| 14 #include "base/trace_event/heap_profiler_allocation_register.h" |
| 15 #include "base/trace_event/heap_profiler_heap_dump_writer.h" |
10 #include "base/trace_event/process_memory_dump.h" | 16 #include "base/trace_event/process_memory_dump.h" |
| 17 #include "base/trace_event/trace_event_argument.h" |
11 #include "build/build_config.h" | 18 #include "build/build_config.h" |
12 | 19 |
13 #if defined(OS_MACOSX) | 20 #if defined(OS_MACOSX) |
14 #include <malloc/malloc.h> | 21 #include <malloc/malloc.h> |
15 #else | 22 #else |
16 #include <malloc.h> | 23 #include <malloc.h> |
17 #endif | 24 #endif |
18 | 25 |
19 namespace base { | 26 namespace base { |
20 namespace trace_event { | 27 namespace trace_event { |
21 | 28 |
| 29 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
| 30 namespace { |
| 31 |
| 32 using allocator::AllocatorDispatch; |
| 33 |
| 34 void* HookAlloc(const AllocatorDispatch* self, size_t size) { |
| 35 const AllocatorDispatch* const next = self->next; |
| 36 void* ptr = next->alloc_function(next, size); |
| 37 if (ptr) |
| 38 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size); |
| 39 return ptr; |
| 40 } |
| 41 |
| 42 void* HookZeroInitAlloc(const AllocatorDispatch* self, size_t n, size_t size) { |
| 43 const AllocatorDispatch* const next = self->next; |
| 44 void* ptr = next->alloc_zero_initialized_function(next, n, size); |
| 45 if (ptr) |
| 46 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, n * size); |
| 47 return ptr; |
| 48 } |
| 49 |
| 50 void* HookllocAligned(const AllocatorDispatch* self, |
| 51 size_t alignment, |
| 52 size_t size) { |
| 53 const AllocatorDispatch* const next = self->next; |
| 54 void* ptr = next->alloc_aligned_function(next, alignment, size); |
| 55 if (ptr) |
| 56 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size); |
| 57 return ptr; |
| 58 } |
| 59 |
| 60 void* HookRealloc(const AllocatorDispatch* self, void* address, size_t size) { |
| 61 const AllocatorDispatch* const next = self->next; |
| 62 void* ptr = next->realloc_function(next, address, size); |
| 63 MallocDumpProvider::GetInstance()->RemoveAllocation(address); |
| 64 if (size > 0) // realloc(size == 0) means free(). |
| 65 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size); |
| 66 return ptr; |
| 67 } |
| 68 |
| 69 void HookFree(const AllocatorDispatch* self, void* address) { |
| 70 if (address) |
| 71 MallocDumpProvider::GetInstance()->RemoveAllocation(address); |
| 72 const AllocatorDispatch* const next = self->next; |
| 73 next->free_function(next, address); |
| 74 } |
| 75 |
| 76 AllocatorDispatch g_allocator_hooks = { |
| 77 &HookAlloc, /* alloc_function */ |
| 78 &HookZeroInitAlloc, /* alloc_zero_initialized_function */ |
| 79 &HookllocAligned, /* alloc_aligned_function */ |
| 80 &HookRealloc, /* realloc_function */ |
| 81 &HookFree, /* free_function */ |
| 82 nullptr, /* next */ |
| 83 }; |
| 84 |
| 85 } // namespace |
| 86 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
| 87 |
22 // static | 88 // static |
23 const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects"; | 89 const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects"; |
24 | 90 |
25 // static | 91 // static |
26 MallocDumpProvider* MallocDumpProvider::GetInstance() { | 92 MallocDumpProvider* MallocDumpProvider::GetInstance() { |
27 return Singleton<MallocDumpProvider, | 93 return Singleton<MallocDumpProvider, |
28 LeakySingletonTraits<MallocDumpProvider>>::get(); | 94 LeakySingletonTraits<MallocDumpProvider>>::get(); |
29 } | 95 } |
30 | 96 |
31 MallocDumpProvider::MallocDumpProvider() {} | 97 MallocDumpProvider::MallocDumpProvider() |
| 98 : heap_profiler_enabled_(false), tid_dumping_heap_(kInvalidThreadId) {} |
32 | 99 |
33 MallocDumpProvider::~MallocDumpProvider() {} | 100 MallocDumpProvider::~MallocDumpProvider() {} |
34 | 101 |
35 // Called at trace dump point time. Creates a snapshot the memory counters for | 102 // Called at trace dump point time. Creates a snapshot the memory counters for |
36 // the current process. | 103 // the current process. |
37 bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args, | 104 bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args, |
38 ProcessMemoryDump* pmd) { | 105 ProcessMemoryDump* pmd) { |
39 size_t total_virtual_size = 0; | 106 size_t total_virtual_size = 0; |
40 size_t resident_size = 0; | 107 size_t resident_size = 0; |
41 size_t allocated_objects_size = 0; | 108 size_t allocated_objects_size = 0; |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
89 // Explicitly specify why is extra memory resident. In tcmalloc it accounts | 156 // Explicitly specify why is extra memory resident. In tcmalloc it accounts |
90 // for free lists and caches. In mac and ios it accounts for the | 157 // for free lists and caches. In mac and ios it accounts for the |
91 // fragmentation and metadata. | 158 // fragmentation and metadata. |
92 MemoryAllocatorDump* other_dump = | 159 MemoryAllocatorDump* other_dump = |
93 pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches"); | 160 pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches"); |
94 other_dump->AddScalar(MemoryAllocatorDump::kNameSize, | 161 other_dump->AddScalar(MemoryAllocatorDump::kNameSize, |
95 MemoryAllocatorDump::kUnitsBytes, | 162 MemoryAllocatorDump::kUnitsBytes, |
96 resident_size - allocated_objects_size); | 163 resident_size - allocated_objects_size); |
97 } | 164 } |
98 | 165 |
| 166 // Heap profiler dumps. |
| 167 if (!heap_profiler_enabled_) |
| 168 return true; |
| 169 |
| 170 // The dumps of the heap profiler should be created only when heap profiling |
| 171 // was enabled (--enable-heap-profiling) AND a DETAILED dump is requested. |
| 172 // However, when enabled, the overhead of the heap profiler should be always |
| 173 // reported to avoid oscillations of the malloc total in LIGHT dumps. |
| 174 |
| 175 tid_dumping_heap_ = PlatformThread::CurrentId(); |
| 176 // At this point the Insert/RemoveAllocation hooks will ignore this thread. |
| 177 // Enclosing all the temporariy data structures in a scope, so that the heap |
| 178 // profiler does not see unabalanced malloc/free calls from these containers. |
| 179 { |
| 180 TraceEventMemoryOverhead overhead; |
| 181 hash_map<AllocationContext, size_t> bytes_by_context; |
| 182 { |
| 183 AutoLock lock(allocation_register_lock_); |
| 184 if (allocation_register_) { |
| 185 if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) { |
| 186 for (const auto& alloc_size : *allocation_register_) |
| 187 bytes_by_context[alloc_size.context] += alloc_size.size; |
| 188 } |
| 189 allocation_register_->EstimateTraceMemoryOverhead(&overhead); |
| 190 } |
| 191 } // lock(allocation_register_lock_) |
| 192 |
| 193 if (!bytes_by_context.empty()) { |
| 194 scoped_ptr<TracedValue> heap_dump = ExportHeapDump( |
| 195 bytes_by_context, pmd->session_state()->stack_frame_deduplicator(), |
| 196 pmd->session_state()->type_name_deduplicator()); |
| 197 pmd->AddHeapDump("malloc", std::move(heap_dump)); |
| 198 } |
| 199 overhead.DumpInto("tracing/heap_profiler_malloc", pmd); |
| 200 } |
| 201 tid_dumping_heap_ = kInvalidThreadId; |
| 202 |
99 return true; | 203 return true; |
100 } | 204 } |
101 | 205 |
| 206 void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) { |
| 207 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
| 208 if (enabled) { |
| 209 { |
| 210 AutoLock lock(allocation_register_lock_); |
| 211 allocation_register_.reset(new AllocationRegister()); |
| 212 } |
| 213 allocator::InsertAllocatorDispatch(&g_allocator_hooks); |
| 214 } else { |
| 215 AutoLock lock(allocation_register_lock_); |
| 216 allocation_register_.reset(); |
| 217 // Insert/RemoveAllocation below will no-op if the register is torn down. |
| 218 // Once disabled, heap profiling will not re-enabled anymore for the |
| 219 // lifetime of the process. |
| 220 } |
| 221 #endif |
| 222 heap_profiler_enabled_ = enabled; |
| 223 } |
| 224 |
| 225 void MallocDumpProvider::InsertAllocation(void* address, size_t size) { |
| 226 // CurrentId() can be a slow operation (crbug.com/497226). This apparently |
| 227 // redundant condition short circuits the CurrentID() calls when unnecessary. |
| 228 if (tid_dumping_heap_ != kInvalidThreadId && |
| 229 tid_dumping_heap_ == PlatformThread::CurrentId()) |
| 230 return; |
| 231 |
| 232 // AllocationContextTracker will return nullptr when called re-reentrantly. |
| 233 // This is the case of GetInstanceForCurrentThread() being called for the |
| 234 // first time, which causes a new() inside the tracker which re-enters the |
| 235 // heap profiler, in which case we just want to early out. |
| 236 auto tracker = AllocationContextTracker::GetInstanceForCurrentThread(); |
| 237 if (!tracker) |
| 238 return; |
| 239 AllocationContext context = tracker->GetContextSnapshot(); |
| 240 |
| 241 AutoLock lock(allocation_register_lock_); |
| 242 if (!allocation_register_) |
| 243 return; |
| 244 |
| 245 allocation_register_->Insert(address, size, context); |
| 246 } |
| 247 |
| 248 void MallocDumpProvider::RemoveAllocation(void* address) { |
| 249 // No re-entrancy is expected here as none of the calls below should |
| 250 // cause a free()-s (|allocation_register_| does its own heap management). |
| 251 if (tid_dumping_heap_ != kInvalidThreadId && |
| 252 tid_dumping_heap_ == PlatformThread::CurrentId()) |
| 253 return; |
| 254 AutoLock lock(allocation_register_lock_); |
| 255 if (!allocation_register_) |
| 256 return; |
| 257 allocation_register_->Remove(address); |
| 258 } |
| 259 |
102 } // namespace trace_event | 260 } // namespace trace_event |
103 } // namespace base | 261 } // namespace base |
OLD | NEW |