OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/malloc_dump_provider.h" | 5 #include "base/trace_event/malloc_dump_provider.h" |
6 | 6 |
7 #include <stddef.h> | 7 #include <stddef.h> |
8 | 8 |
9 #include "base/allocator/allocator_extension.h" | 9 #include "base/allocator/allocator_extension.h" |
10 #include "base/allocator/allocator_shim.h" | |
11 #include "base/allocator/features.h" | |
12 #include "base/trace_event/heap_profiler_allocation_context.h" | |
13 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" | |
14 #include "base/trace_event/heap_profiler_allocation_register.h" | |
15 #include "base/trace_event/heap_profiler_heap_dump_writer.h" | |
10 #include "base/trace_event/process_memory_dump.h" | 16 #include "base/trace_event/process_memory_dump.h" |
17 #include "base/trace_event/trace_event_argument.h" | |
11 #include "build/build_config.h" | 18 #include "build/build_config.h" |
12 | 19 |
13 #if defined(OS_MACOSX) | 20 #if defined(OS_MACOSX) |
14 #include <malloc/malloc.h> | 21 #include <malloc/malloc.h> |
15 #else | 22 #else |
16 #include <malloc.h> | 23 #include <malloc.h> |
17 #endif | 24 #endif |
18 | 25 |
19 namespace base { | 26 namespace base { |
20 namespace trace_event { | 27 namespace trace_event { |
21 | 28 |
29 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) | |
30 namespace { | |
31 | |
32 using allocator::AllocatorDispatch; | |
33 | |
34 void* HookAlloc(const AllocatorDispatch* self, size_t size) { | |
35 const AllocatorDispatch* const next = self->next; | |
36 void* ptr = next->alloc_function(next, size); | |
37 if (ptr) | |
38 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size); | |
39 return ptr; | |
40 } | |
41 | |
42 void* HookZeroInitAlloc(const AllocatorDispatch* self, size_t n, size_t size) { | |
43 const AllocatorDispatch* const next = self->next; | |
44 void* ptr = next->alloc_zero_initialized_function(next, n, size); | |
45 if (ptr) | |
46 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, n * size); | |
47 return ptr; | |
48 } | |
49 | |
50 void* HookllocAligned(const AllocatorDispatch* self, | |
51 size_t alignment, | |
52 size_t size) { | |
53 const AllocatorDispatch* const next = self->next; | |
54 void* ptr = next->alloc_aligned_function(next, alignment, size); | |
55 if (ptr) | |
56 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size); | |
57 return ptr; | |
58 } | |
59 | |
60 void* HookRealloc(const AllocatorDispatch* self, void* address, size_t size) { | |
61 const AllocatorDispatch* const next = self->next; | |
62 void* ptr = next->realloc_function(next, address, size); | |
63 MallocDumpProvider::GetInstance()->RemoveAllocation(address); | |
64 if (size > 0) // realloc(size == 0) means free(). | |
65 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size); | |
66 return ptr; | |
67 } | |
68 | |
69 void HookFree(const AllocatorDispatch* self, void* address) { | |
70 if (address) | |
71 MallocDumpProvider::GetInstance()->RemoveAllocation(address); | |
72 const AllocatorDispatch* const next = self->next; | |
73 next->free_function(next, address); | |
74 } | |
75 | |
76 AllocatorDispatch g_allocator_hooks = { | |
77 &HookAlloc, /* alloc_function */ | |
78 &HookZeroInitAlloc, /* alloc_zero_initialized_function */ | |
79 &HookllocAligned, /* alloc_aligned_function */ | |
80 &HookRealloc, /* realloc_function */ | |
81 &HookFree, /* free_function */ | |
82 nullptr, /* next */ | |
83 }; | |
84 | |
85 } // namespace | |
86 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) | |
87 | |
22 // static | 88 // static |
23 const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects"; | 89 const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects"; |
24 | 90 |
25 // static | 91 // static |
26 MallocDumpProvider* MallocDumpProvider::GetInstance() { | 92 MallocDumpProvider* MallocDumpProvider::GetInstance() { |
27 return Singleton<MallocDumpProvider, | 93 return Singleton<MallocDumpProvider, |
28 LeakySingletonTraits<MallocDumpProvider>>::get(); | 94 LeakySingletonTraits<MallocDumpProvider>>::get(); |
29 } | 95 } |
30 | 96 |
31 MallocDumpProvider::MallocDumpProvider() {} | 97 MallocDumpProvider::MallocDumpProvider() |
98 : heap_profiler_enabled_(false), tid_dumping_heap_(kInvalidThreadId) {} | |
32 | 99 |
33 MallocDumpProvider::~MallocDumpProvider() {} | 100 MallocDumpProvider::~MallocDumpProvider() {} |
34 | 101 |
35 // Called at trace dump point time. Creates a snapshot the memory counters for | 102 // Called at trace dump point time. Creates a snapshot the memory counters for |
36 // the current process. | 103 // the current process. |
37 bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args, | 104 bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args, |
38 ProcessMemoryDump* pmd) { | 105 ProcessMemoryDump* pmd) { |
39 size_t total_virtual_size = 0; | 106 size_t total_virtual_size = 0; |
40 size_t resident_size = 0; | 107 size_t resident_size = 0; |
41 size_t allocated_objects_size = 0; | 108 size_t allocated_objects_size = 0; |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
89 // Explicitly specify why is extra memory resident. In tcmalloc it accounts | 156 // Explicitly specify why is extra memory resident. In tcmalloc it accounts |
90 // for free lists and caches. In mac and ios it accounts for the | 157 // for free lists and caches. In mac and ios it accounts for the |
91 // fragmentation and metadata. | 158 // fragmentation and metadata. |
92 MemoryAllocatorDump* other_dump = | 159 MemoryAllocatorDump* other_dump = |
93 pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches"); | 160 pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches"); |
94 other_dump->AddScalar(MemoryAllocatorDump::kNameSize, | 161 other_dump->AddScalar(MemoryAllocatorDump::kNameSize, |
95 MemoryAllocatorDump::kUnitsBytes, | 162 MemoryAllocatorDump::kUnitsBytes, |
96 resident_size - allocated_objects_size); | 163 resident_size - allocated_objects_size); |
97 } | 164 } |
98 | 165 |
166 // Heap profiler dumps. | |
167 if (!heap_profiler_enabled_) | |
168 return true; | |
169 | |
170 // The dumps of the heap profiler should be created only when heap profiling | |
171 // was enabled (--enable-heap-profiling) AND a DETAILED dump is requested. | |
172 // However, when enabled, the overhead of the heap profiler should be always | |
173 // reported (w.r.t LIGHT vs DETAILED), to avoid oscillations in the malloc() | |
petrcermak
2016/03/11 11:09:23
nit: s/w.r.t/w.r.t./ and s/vs/vs./
petrcermak
2016/03/11 11:09:24
supernit: I don't think there should be a comma be
petrcermak
2016/03/11 11:09:24
supernit: I think you should s/malloc()/malloc/ or
petrcermak
2016/03/11 11:09:24
What do you mean by "w.r.t LIGHT vs DETAILED"? Don
Primiano Tucci (use gerrit)
2016/03/11 13:57:57
Done.
Primiano Tucci (use gerrit)
2016/03/11 13:57:57
Ok reworded this.
Primiano Tucci (use gerrit)
2016/03/11 13:57:57
Done.
Primiano Tucci (use gerrit)
2016/03/11 13:57:57
Done.
| |
174 // total. | |
175 | |
176 tid_dumping_heap_ = PlatformThread::CurrentId(); | |
177 // At this point the Insert/RemoveAllocation hooks will ignore this thread. | |
178 // Enclosing all the temporariy data structures in a scope, so that the heap | |
179 // profiler does not see unabalanced malloc/free calls from these containers. | |
180 { | |
181 TraceEventMemoryOverhead overhead; | |
182 hash_map<AllocationContext, size_t> bytes_by_context; | |
183 { | |
184 AutoLock lock(allocation_register_lock_); | |
185 if (allocation_register_) { | |
186 if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) { | |
187 for (const auto& alloc_size : *allocation_register_) | |
188 bytes_by_context[alloc_size.context] += alloc_size.size; | |
189 } | |
190 allocation_register_->EstimateTraceMemoryOverhead(&overhead); | |
191 } | |
192 } // lock(allocation_register_lock_) | |
193 | |
194 if (!bytes_by_context.empty()) { | |
195 scoped_ptr<TracedValue> heap_dump = ExportHeapDump( | |
196 bytes_by_context, pmd->session_state()->stack_frame_deduplicator(), | |
197 pmd->session_state()->type_name_deduplicator()); | |
198 pmd->AddHeapDump("malloc", std::move(heap_dump)); | |
199 } | |
200 overhead.DumpInto("tracing/heap_profiler_malloc", pmd); | |
petrcermak
2016/03/11 11:09:24
thought: Wouldn't "tracing/malloc_heap_profiler" b
Primiano Tucci (use gerrit)
2016/03/11 13:57:57
In the beginning it was like that, but I made it c
petrcermak
2016/03/11 14:26:31
Acknowledged.
| |
201 } | |
202 tid_dumping_heap_ = kInvalidThreadId; | |
203 | |
99 return true; | 204 return true; |
100 } | 205 } |
101 | 206 |
207 void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) { | |
208 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) | |
209 if (enabled) { | |
210 { | |
211 AutoLock lock(allocation_register_lock_); | |
212 allocation_register_.reset(new AllocationRegister()); | |
213 } | |
214 allocator::InsertAllocatorDispatch(&g_allocator_hooks); | |
215 } else { | |
216 AutoLock lock(allocation_register_lock_); | |
217 allocation_register_.reset(); | |
218 // Inser/RemoveAllocation below will no-op if the register is torn down. | |
petrcermak
2016/03/11 11:09:24
s/Inser/Insert/
Primiano Tucci (use gerrit)
2016/03/11 13:57:57
Done.
| |
219 } | |
220 #endif | |
221 heap_profiler_enabled_ = enabled; | |
222 } | |
223 | |
224 void MallocDumpProvider::InsertAllocation(void* address, size_t size) { | |
225 if (tid_dumping_heap_ != kInvalidThreadId && | |
petrcermak
2016/03/11 11:09:24
Can it ever happen that PlatformThread::CurrentId(
Primiano Tucci (use gerrit)
2016/03/11 13:57:57
Heh I knew that you would have commented this. You
petrcermak
2016/03/11 14:26:31
Acknowledged.
| |
226 tid_dumping_heap_ == PlatformThread::CurrentId()) | |
227 return; | |
228 // AllocationContextTracker initialization is the only thing here that can | |
229 // cause re-entrancy. | |
230 switch (AllocationContextTracker::GetStateForCurrentThread()) { | |
231 case AllocationContextTracker::kInitialized: | |
232 break; | |
233 case AllocationContextTracker::kNotInitialized: | |
234 AllocationContextTracker::InitializeForCurrentThread(); | |
235 return; | |
236 case AllocationContextTracker::kInitializing: | |
237 return; | |
238 } | |
239 | |
240 AllocationContext context = AllocationContextTracker::GetContextSnapshot(); | |
241 | |
242 AutoLock lock(allocation_register_lock_); | |
petrcermak
2016/03/11 11:09:24
Can't the compiler move this definition to the top
Primiano Tucci (use gerrit)
2016/03/11 13:57:57
nope. The compiler is not allowed to reorder instr
| |
243 if (!allocation_register_) | |
244 return; | |
245 | |
246 allocation_register_->Insert(address, size, context); | |
247 } | |
248 | |
249 void MallocDumpProvider::RemoveAllocation(void* address) { | |
250 if (tid_dumping_heap_ != kInvalidThreadId && | |
251 tid_dumping_heap_ == PlatformThread::CurrentId()) | |
252 return; | |
253 AutoLock lock(allocation_register_lock_); | |
petrcermak
2016/03/11 11:09:24
Is it definitely the case that AllocationContextTr
Primiano Tucci (use gerrit)
2016/03/11 13:57:57
Good spot. It's definitely NOT the case (I mean, i
petrcermak
2016/03/11 14:26:31
Thanks for the detailed explanation. It might be w
Primiano Tucci (use gerrit)
2016/03/11 16:57:21
Done
| |
254 if (!allocation_register_) | |
255 return; | |
256 allocation_register_->Remove(address); | |
257 } | |
258 | |
102 } // namespace trace_event | 259 } // namespace trace_event |
103 } // namespace base | 260 } // namespace base |
OLD | NEW |