| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/malloc_dump_provider.h" | 5 #include "base/trace_event/malloc_dump_provider.h" |
| 6 | 6 |
| 7 #include <stddef.h> | 7 #include <stddef.h> |
| 8 | 8 |
| 9 #include <memory> |
| 10 |
| 9 #include "base/allocator/allocator_extension.h" | 11 #include "base/allocator/allocator_extension.h" |
| 10 #include "base/allocator/allocator_shim.h" | 12 #include "base/allocator/allocator_shim.h" |
| 11 #include "base/allocator/features.h" | 13 #include "base/allocator/features.h" |
| 12 #include "base/debug/profiler.h" | 14 #include "base/debug/profiler.h" |
| 13 #include "base/trace_event/heap_profiler_allocation_context.h" | 15 #include "base/trace_event/heap_profiler_allocation_context.h" |
| 14 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" | 16 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" |
| 15 #include "base/trace_event/heap_profiler_allocation_register.h" | 17 #include "base/trace_event/heap_profiler_allocation_register.h" |
| 16 #include "base/trace_event/heap_profiler_heap_dump_writer.h" | 18 #include "base/trace_event/heap_profiler_heap_dump_writer.h" |
| 17 #include "base/trace_event/process_memory_dump.h" | 19 #include "base/trace_event/process_memory_dump.h" |
| 18 #include "base/trace_event/trace_event_argument.h" | 20 #include "base/trace_event/trace_event_argument.h" |
| (...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 119 void HookFreeDefiniteSize(const AllocatorDispatch* self, | 121 void HookFreeDefiniteSize(const AllocatorDispatch* self, |
| 120 void* ptr, | 122 void* ptr, |
| 121 size_t size, | 123 size_t size, |
| 122 void* context) { | 124 void* context) { |
| 123 if (ptr) | 125 if (ptr) |
| 124 MallocDumpProvider::GetInstance()->RemoveAllocation(ptr); | 126 MallocDumpProvider::GetInstance()->RemoveAllocation(ptr); |
| 125 const AllocatorDispatch* const next = self->next; | 127 const AllocatorDispatch* const next = self->next; |
| 126 next->free_definite_size_function(next, ptr, size, context); | 128 next->free_definite_size_function(next, ptr, size, context); |
| 127 } | 129 } |
| 128 | 130 |
| 131 void* HookTaggedAlloc(const AllocatorDispatch* self, size_t size, const char* ta
g) { |
| 132 const AllocatorDispatch* const next = self->next; |
| 133 void* ptr = next->tagged_alloc_function ? |
| 134 next->tagged_alloc_function(next, size, tag) : |
| 135 next->alloc_function(next, size, nullptr); |
| 136 if (ptr) |
| 137 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size, tag); |
| 138 return ptr; |
| 139 } |
| 140 |
| 129 AllocatorDispatch g_allocator_hooks = { | 141 AllocatorDispatch g_allocator_hooks = { |
| 130 &HookAlloc, /* alloc_function */ | 142 &HookAlloc, /* alloc_function */ |
| 131 &HookZeroInitAlloc, /* alloc_zero_initialized_function */ | 143 &HookZeroInitAlloc, /* alloc_zero_initialized_function */ |
| 132 &HookllocAligned, /* alloc_aligned_function */ | 144 &HookllocAligned, /* alloc_aligned_function */ |
| 133 &HookRealloc, /* realloc_function */ | 145 &HookRealloc, /* realloc_function */ |
| 134 &HookFree, /* free_function */ | 146 &HookFree, /* free_function */ |
| 135 &HookGetSizeEstimate, /* get_size_estimate_function */ | 147 &HookGetSizeEstimate, /* get_size_estimate_function */ |
| 136 &HookBatchMalloc, /* batch_malloc_function */ | 148 &HookBatchMalloc, /* batch_malloc_function */ |
| 137 &HookBatchFree, /* batch_free_function */ | 149 &HookBatchFree, /* batch_free_function */ |
| 138 &HookFreeDefiniteSize, /* free_definite_size_function */ | 150 &HookFreeDefiniteSize, /* free_definite_size_function */ |
| 151 &HookTaggedAlloc, /* tagged_alloc_function */ |
| 139 nullptr, /* next */ | 152 nullptr, /* next */ |
| 140 }; | 153 }; |
| 141 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) | 154 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
| 142 | 155 |
| 143 #if defined(OS_WIN) | 156 #if defined(OS_WIN) |
| 144 // A structure containing some information about a given heap. | 157 // A structure containing some information about a given heap. |
| 145 struct WinHeapInfo { | 158 struct WinHeapInfo { |
| 146 size_t committed_size; | 159 size_t committed_size; |
| 147 size_t uncommitted_size; | 160 size_t uncommitted_size; |
| 148 size_t allocated_size; | 161 size_t allocated_size; |
| (...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 274 // Explicitly specify why is extra memory resident. In tcmalloc it accounts | 287 // Explicitly specify why is extra memory resident. In tcmalloc it accounts |
| 275 // for free lists and caches. In mac and ios it accounts for the | 288 // for free lists and caches. In mac and ios it accounts for the |
| 276 // fragmentation and metadata. | 289 // fragmentation and metadata. |
| 277 MemoryAllocatorDump* other_dump = | 290 MemoryAllocatorDump* other_dump = |
| 278 pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches"); | 291 pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches"); |
| 279 other_dump->AddScalar(MemoryAllocatorDump::kNameSize, | 292 other_dump->AddScalar(MemoryAllocatorDump::kNameSize, |
| 280 MemoryAllocatorDump::kUnitsBytes, | 293 MemoryAllocatorDump::kUnitsBytes, |
| 281 resident_size - allocated_objects_size); | 294 resident_size - allocated_objects_size); |
| 282 } | 295 } |
| 283 | 296 |
| 297 #if defined(_LIBCPP_COUNTING_ALLOCATOR) |
| 298 { |
| 299 std::string dump_name = "stl"; |
| 300 auto* dump = pmd->CreateAllocatorDump(dump_name.c_str()); |
| 301 |
| 302 size_t total_size = 0; |
| 303 for (size_t i = 0; i != std::allocation_group_count; ++i) { |
| 304 auto group = static_cast<std::allocation_group>(i); |
| 305 auto stats = std::allocation_counter::get(group); |
| 306 |
| 307 #define DUMP_ALLOCATION_COUNTERS(name) { \ |
| 308 std::string group_dump_name = \ |
| 309 dump_name + "/" #name "/" + \ |
| 310 std::get_allocation_group_name(group); \ |
| 311 auto* group_dump = pmd->CreateAllocatorDump(group_dump_name.c_str()); \ |
| 312 group_dump->AddScalar(MemoryAllocatorDump::kNameSize, \ |
| 313 MemoryAllocatorDump::kUnitsBytes, \ |
| 314 stats.name.size); \ |
| 315 group_dump->AddScalar("usable_size", \ |
| 316 MemoryAllocatorDump::kUnitsBytes, \ |
| 317 stats.name.usable_size); \ |
| 318 group_dump->AddScalar("constructed_size", \ |
| 319 MemoryAllocatorDump::kUnitsBytes, \ |
| 320 stats.name.constructed_size); \ |
| 321 group_dump->AddScalar("payload_size", \ |
| 322 MemoryAllocatorDump::kUnitsBytes, \ |
| 323 stats.name.payload_size); \ |
| 324 group_dump->AddScalar("count", \ |
| 325 MemoryAllocatorDump::kUnitsObjects, \ |
| 326 stats.name.count); \ |
| 327 group_dump->AddScalar("wasted_size", \ |
| 328 MemoryAllocatorDump::kUnitsBytes, \ |
| 329 stats.name.wasted_size()); \ |
| 330 group_dump->AddScalar("utilization", \ |
| 331 MemoryAllocatorDump::kUnitsObjects, \ |
| 332 stats.name.utilization_pct()); \ |
| 333 group_dump->AddScalar("overhead", \ |
| 334 MemoryAllocatorDump::kUnitsObjects, \ |
| 335 stats.name.overhead_pct()); \ |
| 336 } |
| 337 |
| 338 DUMP_ALLOCATION_COUNTERS(live) |
| 339 DUMP_ALLOCATION_COUNTERS(max_size) |
| 340 |
| 341 #undef DUMP_ALLOCATION_COUNTERS |
| 342 |
| 343 total_size += stats.live.size; |
| 344 } |
| 345 |
| 346 dump->AddScalar(MemoryAllocatorDump::kNameSize, |
| 347 MemoryAllocatorDump::kUnitsBytes, |
| 348 total_size); |
| 349 } |
| 350 #endif // _LIBCPP_COUNTING_ALLOCATOR |
| 351 |
| 284 // Heap profiler dumps. | 352 // Heap profiler dumps. |
| 285 if (!heap_profiler_enabled_) | 353 if (!heap_profiler_enabled_) |
| 286 return true; | 354 return true; |
| 287 | 355 |
| 288 // The dumps of the heap profiler should be created only when heap profiling | 356 // The dumps of the heap profiler should be created only when heap profiling |
| 289 // was enabled (--enable-heap-profiling) AND a DETAILED dump is requested. | 357 // was enabled (--enable-heap-profiling) AND a DETAILED dump is requested. |
| 290 // However, when enabled, the overhead of the heap profiler should be always | 358 // However, when enabled, the overhead of the heap profiler should be always |
| 291 // reported to avoid oscillations of the malloc total in LIGHT dumps. | 359 // reported to avoid oscillations of the malloc total in LIGHT dumps. |
| 292 | 360 |
| 293 tid_dumping_heap_ = PlatformThread::CurrentId(); | 361 tid_dumping_heap_ = PlatformThread::CurrentId(); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 329 AutoLock lock(allocation_register_lock_); | 397 AutoLock lock(allocation_register_lock_); |
| 330 allocation_register_.reset(); | 398 allocation_register_.reset(); |
| 331 // Insert/RemoveAllocation below will no-op if the register is torn down. | 399 // Insert/RemoveAllocation below will no-op if the register is torn down. |
| 332 // Once disabled, heap profiling will not re-enabled anymore for the | 400 // Once disabled, heap profiling will not re-enabled anymore for the |
| 333 // lifetime of the process. | 401 // lifetime of the process. |
| 334 } | 402 } |
| 335 #endif | 403 #endif |
| 336 heap_profiler_enabled_ = enabled; | 404 heap_profiler_enabled_ = enabled; |
| 337 } | 405 } |
| 338 | 406 |
| 339 void MallocDumpProvider::InsertAllocation(void* address, size_t size) { | 407 void MallocDumpProvider::InsertAllocation(void* address, size_t size, const char
* tag) { |
| 408 if (!tag) return; |
| 409 |
| 340 // CurrentId() can be a slow operation (crbug.com/497226). This apparently | 410 // CurrentId() can be a slow operation (crbug.com/497226). This apparently |
| 341 // redundant condition short circuits the CurrentID() calls when unnecessary. | 411 // redundant condition short circuits the CurrentID() calls when unnecessary. |
| 342 if (tid_dumping_heap_ != kInvalidThreadId && | 412 if (tid_dumping_heap_ != kInvalidThreadId && |
| 343 tid_dumping_heap_ == PlatformThread::CurrentId()) | 413 tid_dumping_heap_ == PlatformThread::CurrentId()) |
| 344 return; | 414 return; |
| 345 | 415 |
| 346 // AllocationContextTracker will return nullptr when called re-reentrantly. | 416 // AllocationContextTracker will return nullptr when called re-reentrantly. |
| 347 // This is the case of GetInstanceForCurrentThread() being called for the | 417 // This is the case of GetInstanceForCurrentThread() being called for the |
| 348 // first time, which causes a new() inside the tracker which re-enters the | 418 // first time, which causes a new() inside the tracker which re-enters the |
| 349 // heap profiler, in which case we just want to early out. | 419 // heap profiler, in which case we just want to early out. |
| 350 auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread(); | 420 auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread(); |
| 351 if (!tracker) | 421 if (!tracker) |
| 352 return; | 422 return; |
| 353 | 423 |
| 354 AllocationContext context; | 424 AllocationContext context; |
| 355 if (!tracker->GetContextSnapshot(&context)) | 425 if (!tracker->GetContextSnapshot(&context)) |
| 356 return; | 426 return; |
| 427 context.type_name = tag; |
| 357 | 428 |
| 358 AutoLock lock(allocation_register_lock_); | 429 AutoLock lock(allocation_register_lock_); |
| 359 if (!allocation_register_) | 430 if (!allocation_register_) |
| 360 return; | 431 return; |
| 361 | 432 |
| 362 allocation_register_->Insert(address, size, context); | 433 allocation_register_->Insert(address, size, context); |
| 363 } | 434 } |
| 364 | 435 |
| 365 void MallocDumpProvider::RemoveAllocation(void* address) { | 436 void MallocDumpProvider::RemoveAllocation(void* address) { |
| 366 // No re-entrancy is expected here as none of the calls below should | 437 // No re-entrancy is expected here as none of the calls below should |
| 367 // cause a free()-s (|allocation_register_| does its own heap management). | 438 // cause a free()-s (|allocation_register_| does its own heap management). |
| 368 if (tid_dumping_heap_ != kInvalidThreadId && | 439 if (tid_dumping_heap_ != kInvalidThreadId && |
| 369 tid_dumping_heap_ == PlatformThread::CurrentId()) | 440 tid_dumping_heap_ == PlatformThread::CurrentId()) |
| 370 return; | 441 return; |
| 371 AutoLock lock(allocation_register_lock_); | 442 AutoLock lock(allocation_register_lock_); |
| 372 if (!allocation_register_) | 443 if (!allocation_register_) |
| 373 return; | 444 return; |
| 374 allocation_register_->Remove(address); | 445 allocation_register_->Remove(address); |
| 375 } | 446 } |
| 376 | 447 |
| 377 } // namespace trace_event | 448 } // namespace trace_event |
| 378 } // namespace base | 449 } // namespace base |
| OLD | NEW |