Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(108)

Side by Side Diff: base/trace_event/malloc_dump_provider.cc

Issue 2890363003: Enable sharding of AllocationRegister on desktop. (Closed)
Patch Set: comment from primiano. Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/malloc_dump_provider.h" 5 #include "base/trace_event/malloc_dump_provider.h"
6 6
7 #include <stddef.h> 7 #include <stddef.h>
8 8
9 #include <unordered_map> 9 #include <unordered_map>
10 10
11 #include "base/allocator/allocator_extension.h" 11 #include "base/allocator/allocator_extension.h"
12 #include "base/allocator/allocator_shim.h" 12 #include "base/allocator/allocator_shim.h"
13 #include "base/allocator/features.h" 13 #include "base/allocator/features.h"
14 #include "base/debug/profiler.h" 14 #include "base/debug/profiler.h"
15 #include "base/trace_event/heap_profiler_allocation_context.h" 15 #include "base/trace_event/heap_profiler_allocation_context.h"
16 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 16 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
17 #include "base/trace_event/heap_profiler_allocation_register.h"
18 #include "base/trace_event/heap_profiler_heap_dump_writer.h" 17 #include "base/trace_event/heap_profiler_heap_dump_writer.h"
19 #include "base/trace_event/process_memory_dump.h" 18 #include "base/trace_event/process_memory_dump.h"
19 #include "base/trace_event/sharded_allocation_register.h"
20 #include "base/trace_event/trace_event_argument.h" 20 #include "base/trace_event/trace_event_argument.h"
21 #include "build/build_config.h" 21 #include "build/build_config.h"
22 22
23 #if defined(OS_MACOSX) 23 #if defined(OS_MACOSX)
24 #include <malloc/malloc.h> 24 #include <malloc/malloc.h>
25 #else 25 #else
26 #include <malloc.h> 26 #include <malloc.h>
27 #endif 27 #endif
28 #if defined(OS_WIN) 28 #if defined(OS_WIN)
29 #include <windows.h> 29 #include <windows.h>
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
183 // static 183 // static
184 const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects"; 184 const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects";
185 185
186 // static 186 // static
187 MallocDumpProvider* MallocDumpProvider::GetInstance() { 187 MallocDumpProvider* MallocDumpProvider::GetInstance() {
188 return Singleton<MallocDumpProvider, 188 return Singleton<MallocDumpProvider,
189 LeakySingletonTraits<MallocDumpProvider>>::get(); 189 LeakySingletonTraits<MallocDumpProvider>>::get();
190 } 190 }
191 191
192 MallocDumpProvider::MallocDumpProvider() 192 MallocDumpProvider::MallocDumpProvider()
193 : heap_profiler_enabled_(false), tid_dumping_heap_(kInvalidThreadId) {} 193 : allocation_register_(new ShardedAllocationRegister),
194 tid_dumping_heap_(kInvalidThreadId) {}
194 195
195 MallocDumpProvider::~MallocDumpProvider() {} 196 MallocDumpProvider::~MallocDumpProvider() {}
196 197
197 // Called at trace dump point time. Creates a snapshot the memory counters for 198 // Called at trace dump point time. Creates a snapshot the memory counters for
198 // the current process. 199 // the current process.
199 bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args, 200 bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
200 ProcessMemoryDump* pmd) { 201 ProcessMemoryDump* pmd) {
201 size_t total_virtual_size = 0; 202 size_t total_virtual_size = 0;
202 size_t resident_size = 0; 203 size_t resident_size = 0;
203 size_t allocated_objects_size = 0; 204 size_t allocated_objects_size = 0;
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
279 // for free lists and caches. In mac and ios it accounts for the 280 // for free lists and caches. In mac and ios it accounts for the
280 // fragmentation and metadata. 281 // fragmentation and metadata.
281 MemoryAllocatorDump* other_dump = 282 MemoryAllocatorDump* other_dump =
282 pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches"); 283 pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches");
283 other_dump->AddScalar(MemoryAllocatorDump::kNameSize, 284 other_dump->AddScalar(MemoryAllocatorDump::kNameSize,
284 MemoryAllocatorDump::kUnitsBytes, 285 MemoryAllocatorDump::kUnitsBytes,
285 resident_size - allocated_objects_size); 286 resident_size - allocated_objects_size);
286 } 287 }
287 288
288 // Heap profiler dumps. 289 // Heap profiler dumps.
289 if (!heap_profiler_enabled_) 290 if (!allocation_register_->IsEnabled())
290 return true; 291 return true;
291 292
292 // The dumps of the heap profiler should be created only when heap profiling 293 // The dumps of the heap profiler should be created only when heap profiling
293 // was enabled (--enable-heap-profiling) AND a DETAILED dump is requested. 294 // was enabled (--enable-heap-profiling) AND a DETAILED dump is requested.
294 // However, when enabled, the overhead of the heap profiler should be always 295 // However, when enabled, the overhead of the heap profiler should be always
295 // reported to avoid oscillations of the malloc total in LIGHT dumps. 296 // reported to avoid oscillations of the malloc total in LIGHT dumps.
296 297
297 tid_dumping_heap_ = PlatformThread::CurrentId(); 298 tid_dumping_heap_ = PlatformThread::CurrentId();
298 // At this point the Insert/RemoveAllocation hooks will ignore this thread. 299 // At this point the Insert/RemoveAllocation hooks will ignore this thread.
299 // Enclosing all the temporary data structures in a scope, so that the heap 300 // Enclosing all the temporary data structures in a scope, so that the heap
300 // profiler does not see unbalanced malloc/free calls from these containers. 301 // profiler does not see unbalanced malloc/free calls from these containers.
301 { 302 {
302 size_t shim_allocated_objects_size = 0; 303 size_t shim_allocated_objects_size = 0;
303 size_t shim_allocated_objects_count = 0; 304 size_t shim_allocated_objects_count = 0;
304 TraceEventMemoryOverhead overhead; 305 TraceEventMemoryOverhead overhead;
305 std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context; 306 std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context;
306 { 307 {
Primiano Tucci (use gerrit) 2017/05/22 16:37:50 at this point this {} scope delimiter is not neede
erikchen 2017/05/22 17:10:42 Done.
307 AutoLock lock(allocation_register_lock_);
308 if (allocation_register_) { 308 if (allocation_register_) {
Primiano Tucci (use gerrit) 2017/05/22 16:37:50 this condition is always going to evaluate to true
erikchen 2017/05/22 17:10:42 The condition ins already checked on line 290. Rem
309 if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) { 309 if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
310 for (const auto& alloc_size : *allocation_register_) { 310 ShardedAllocationRegister::OutputMetrics metrics =
311 AllocationMetrics& metrics = metrics_by_context[alloc_size.context]; 311 allocation_register_->UpdateAndReturnsMetrics(metrics_by_context);
312 metrics.size += alloc_size.size;
313 metrics.count++;
314 312
315 // Aggregate data for objects allocated through the shim. 313 // Aggregate data for objects allocated through the shim.
316 shim_allocated_objects_size += alloc_size.size; 314 shim_allocated_objects_size += metrics.size;
317 shim_allocated_objects_count++; 315 shim_allocated_objects_count += metrics.count;
318 }
319 } 316 }
320 allocation_register_->EstimateTraceMemoryOverhead(&overhead); 317 allocation_register_->EstimateTraceMemoryOverhead(&overhead);
321 } 318 }
322 319
323 inner_dump->AddScalar("shim_allocated_objects_size", 320 inner_dump->AddScalar("shim_allocated_objects_size",
324 MemoryAllocatorDump::kUnitsBytes, 321 MemoryAllocatorDump::kUnitsBytes,
325 shim_allocated_objects_size); 322 shim_allocated_objects_size);
326 inner_dump->AddScalar("shim_allocator_object_count", 323 inner_dump->AddScalar("shim_allocator_object_count",
327 MemoryAllocatorDump::kUnitsObjects, 324 MemoryAllocatorDump::kUnitsObjects,
328 shim_allocated_objects_count); 325 shim_allocated_objects_count);
329 } // lock(allocation_register_lock_) 326 }
330 pmd->DumpHeapUsage(metrics_by_context, overhead, "malloc"); 327 pmd->DumpHeapUsage(metrics_by_context, overhead, "malloc");
331 } 328 }
332 tid_dumping_heap_ = kInvalidThreadId; 329 tid_dumping_heap_ = kInvalidThreadId;
333 330
334 return true; 331 return true;
335 } 332 }
336 333
337 void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) { 334 void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) {
338 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) 335 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
339 if (enabled) { 336 if (enabled) {
340 { 337 if (!allocation_register_->IsInitialized())
Primiano Tucci (use gerrit) 2017/05/22 16:37:50 just call Initialize and early out in its implemen
erikchen 2017/05/22 17:10:42 Done.
341 AutoLock lock(allocation_register_lock_); 338 allocation_register_->Initialize();
342 allocation_register_.reset(new AllocationRegister());
343 }
344 allocator::InsertAllocatorDispatch(&g_allocator_hooks); 339 allocator::InsertAllocatorDispatch(&g_allocator_hooks);
345 } else { 340 } else {
346 AutoLock lock(allocation_register_lock_); 341 // Once we enable heap profiling, we never remove the structures from
347 allocation_register_.reset(); 342 // memory.
348 // Insert/RemoveAllocation below will no-op if the register is torn down.
349 // Once disabled, heap profiling will not re-enabled anymore for the
350 // lifetime of the process.
351 } 343 }
352 #endif 344 #endif
353 heap_profiler_enabled_ = enabled; 345 allocation_register_->SetEnabled(enabled);
354 } 346 }
355 347
356 void MallocDumpProvider::InsertAllocation(void* address, size_t size) { 348 void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
357 // CurrentId() can be a slow operation (crbug.com/497226). This apparently 349 // CurrentId() can be a slow operation (crbug.com/497226). This apparently
358 // redundant condition short circuits the CurrentID() calls when unnecessary. 350 // redundant condition short circuits the CurrentID() calls when unnecessary.
359 if (tid_dumping_heap_ != kInvalidThreadId && 351 if (tid_dumping_heap_ != kInvalidThreadId &&
360 tid_dumping_heap_ == PlatformThread::CurrentId()) 352 tid_dumping_heap_ == PlatformThread::CurrentId())
361 return; 353 return;
362 354
363 // AllocationContextTracker will return nullptr when called re-reentrantly. 355 // AllocationContextTracker will return nullptr when called re-reentrantly.
364 // This is the case of GetInstanceForCurrentThread() being called for the 356 // This is the case of GetInstanceForCurrentThread() being called for the
365 // first time, which causes a new() inside the tracker which re-enters the 357 // first time, which causes a new() inside the tracker which re-enters the
366 // heap profiler, in which case we just want to early out. 358 // heap profiler, in which case we just want to early out.
367 auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread(); 359 auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
368 if (!tracker) 360 if (!tracker)
369 return; 361 return;
370 362
371 AllocationContext context; 363 AllocationContext context;
372 if (!tracker->GetContextSnapshot(&context)) 364 if (!tracker->GetContextSnapshot(&context))
373 return; 365 return;
374 366
375 AutoLock lock(allocation_register_lock_); 367 if (!allocation_register_->IsEnabled())
376 if (!allocation_register_)
377 return; 368 return;
378 369
379 allocation_register_->Insert(address, size, context); 370 allocation_register_->Insert(address, size, context);
380 } 371 }
381 372
382 void MallocDumpProvider::RemoveAllocation(void* address) { 373 void MallocDumpProvider::RemoveAllocation(void* address) {
383 // No re-entrancy is expected here as none of the calls below should 374 // No re-entrancy is expected here as none of the calls below should
384 // cause a free()-s (|allocation_register_| does its own heap management). 375 // cause a free()-s (|allocation_register_| does its own heap management).
385 if (tid_dumping_heap_ != kInvalidThreadId && 376 if (tid_dumping_heap_ != kInvalidThreadId &&
386 tid_dumping_heap_ == PlatformThread::CurrentId()) 377 tid_dumping_heap_ == PlatformThread::CurrentId())
387 return; 378 return;
388 AutoLock lock(allocation_register_lock_); 379 if (!allocation_register_->IsEnabled())
389 if (!allocation_register_)
390 return; 380 return;
391 allocation_register_->Remove(address); 381 allocation_register_->Remove(address);
392 } 382 }
393 383
394 } // namespace trace_event 384 } // namespace trace_event
395 } // namespace base 385 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698