| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <utility> | 8 #include <utility> |
| 9 | 9 |
| 10 #include "base/atomic_sequence_num.h" | 10 #include "base/atomic_sequence_num.h" |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 91 GetterFunctPtr const getter_function; | 91 GetterFunctPtr const getter_function; |
| 92 }; | 92 }; |
| 93 | 93 |
| 94 } // namespace | 94 } // namespace |
| 95 | 95 |
| 96 // static | 96 // static |
| 97 const char* const MemoryDumpManager::kTraceCategory = | 97 const char* const MemoryDumpManager::kTraceCategory = |
| 98 TRACE_DISABLED_BY_DEFAULT("memory-infra"); | 98 TRACE_DISABLED_BY_DEFAULT("memory-infra"); |
| 99 | 99 |
| 100 // static | 100 // static |
| 101 const char* const MemoryDumpManager::kMemoryInfraDumpLogName = |
| 102 "memory-infra dump"; |
| 103 |
| 104 // static |
| 101 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; | 105 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; |
| 102 | 106 |
| 103 // static | 107 // static |
| 104 const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0; | 108 const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0; |
| 105 | 109 |
| 106 // static | 110 // static |
| 107 const char* const MemoryDumpManager::kSystemAllocatorPoolName = | 111 const char* const MemoryDumpManager::kSystemAllocatorPoolName = |
| 108 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED) | 112 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED) |
| 109 MallocDumpProvider::kAllocatedObjects; | 113 MallocDumpProvider::kAllocatedObjects; |
| 110 #elif defined(OS_WIN) | 114 #elif defined(OS_WIN) |
| (...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 330 (*mdp_iter)->disabled = true; | 334 (*mdp_iter)->disabled = true; |
| 331 dump_providers_.erase(mdp_iter); | 335 dump_providers_.erase(mdp_iter); |
| 332 } | 336 } |
| 333 | 337 |
| 334 void MemoryDumpManager::RequestGlobalDump( | 338 void MemoryDumpManager::RequestGlobalDump( |
| 335 MemoryDumpType dump_type, | 339 MemoryDumpType dump_type, |
| 336 MemoryDumpLevelOfDetail level_of_detail, | 340 MemoryDumpLevelOfDetail level_of_detail, |
| 337 const MemoryDumpCallback& callback) { | 341 const MemoryDumpCallback& callback) { |
| 338 // Bail out immediately if tracing is not enabled at all. | 342 // Bail out immediately if tracing is not enabled at all. |
| 339 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { | 343 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { |
| 344 VLOG(1) << kMemoryInfraDumpLogName << " failed because " << kTraceCategory |
| 345 << " tracing category is not enabled"; |
| 340 if (!callback.is_null()) | 346 if (!callback.is_null()) |
| 341 callback.Run(0u /* guid */, false /* success */); | 347 callback.Run(0u /* guid */, false /* success */); |
| 342 return; | 348 return; |
| 343 } | 349 } |
| 344 | 350 |
| 345 const uint64_t guid = | 351 const uint64_t guid = |
| 346 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); | 352 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); |
| 347 | 353 |
| 348 // Creates an async event to keep track of the global dump evolution. | 354 // Creates an async event to keep track of the global dump evolution. |
| 349 // The |wrapped_callback| will generate the ASYNC_END event and then invoke | 355 // The |wrapped_callback| will generate the ASYNC_END event and then invoke |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 415 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs | 421 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs |
| 416 // in the PostTask below don't end up registering their own dump providers | 422 // in the PostTask below don't end up registering their own dump providers |
| 417 // (for discounting trace memory overhead) while holding the |lock_|. | 423 // (for discounting trace memory overhead) while holding the |lock_|. |
| 418 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 424 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
| 419 | 425 |
| 420 // |dump_thread_| might be destroyed before getting this point. | 426 // |dump_thread_| might be destroyed before getting this point. |
| 421 // It means that tracing was disabled right before starting this dump. | 427 // It means that tracing was disabled right before starting this dump. |
| 422 // Anyway either tracing is stopped or this was the last hop, create a trace | 428 // Anyway either tracing is stopped or this was the last hop, create a trace |
| 423 // event, add it to the trace and finalize process dump invoking the callback. | 429 // event, add it to the trace and finalize process dump invoking the callback. |
| 424 if (!pmd_async_state->dump_thread_task_runner.get()) { | 430 if (!pmd_async_state->dump_thread_task_runner.get()) { |
| 431 if (pmd_async_state->pending_dump_providers.empty()) { |
| 432 VLOG(1) << kMemoryInfraDumpLogName << " failed because dump thread was" |
| 433 << " destroyed before finalizing the dump"; |
| 434 } else { |
| 435 VLOG(1) << kMemoryInfraDumpLogName << " failed because dump thread was" |
| 436 << " destroyed before dumping " |
| 437 << pmd_async_state->pending_dump_providers.back().get()->name; |
| 438 } |
| 425 pmd_async_state->dump_successful = false; | 439 pmd_async_state->dump_successful = false; |
| 426 pmd_async_state->pending_dump_providers.clear(); | 440 pmd_async_state->pending_dump_providers.clear(); |
| 427 } | 441 } |
| 428 if (pmd_async_state->pending_dump_providers.empty()) | 442 if (pmd_async_state->pending_dump_providers.empty()) |
| 429 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); | 443 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); |
| 430 | 444 |
| 431 // Read MemoryDumpProviderInfo thread safety considerations in | 445 // Read MemoryDumpProviderInfo thread safety considerations in |
| 432 // memory_dump_manager.h when accessing |mdpinfo| fields. | 446 // memory_dump_manager.h when accessing |mdpinfo| fields. |
| 433 MemoryDumpProviderInfo* mdpinfo = | 447 MemoryDumpProviderInfo* mdpinfo = |
| 434 pmd_async_state->pending_dump_providers.back().get(); | 448 pmd_async_state->pending_dump_providers.back().get(); |
| (...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 589 TRACE_EVENT_PHASE_MEMORY_DUMP, | 603 TRACE_EVENT_PHASE_MEMORY_DUMP, |
| 590 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name, | 604 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name, |
| 591 trace_event_internal::kGlobalScope, dump_guid, pid, | 605 trace_event_internal::kGlobalScope, dump_guid, pid, |
| 592 kTraceEventNumArgs, kTraceEventArgNames, | 606 kTraceEventNumArgs, kTraceEventArgNames, |
| 593 kTraceEventArgTypes, nullptr /* arg_values */, &event_value, | 607 kTraceEventArgTypes, nullptr /* arg_values */, &event_value, |
| 594 TRACE_EVENT_FLAG_HAS_ID); | 608 TRACE_EVENT_FLAG_HAS_ID); |
| 595 } | 609 } |
| 596 | 610 |
| 597 bool tracing_still_enabled; | 611 bool tracing_still_enabled; |
| 598 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &tracing_still_enabled); | 612 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &tracing_still_enabled); |
| 599 if (!tracing_still_enabled) | 613 if (!tracing_still_enabled) { |
| 600 pmd_async_state->dump_successful = false; | 614 pmd_async_state->dump_successful = false; |
| 615 VLOG(1) << kMemoryInfraDumpLogName << " failed because tracing was" |
| 616 << " disabled before the dump was completed"; |
| 617 } |
| 601 | 618 |
| 602 if (!pmd_async_state->callback.is_null()) { | 619 if (!pmd_async_state->callback.is_null()) { |
| 603 pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful); | 620 pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful); |
| 604 pmd_async_state->callback.Reset(); | 621 pmd_async_state->callback.Reset(); |
| 605 } | 622 } |
| 606 | 623 |
| 607 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump", | 624 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump", |
| 608 TRACE_ID_MANGLE(dump_guid)); | 625 TRACE_ID_MANGLE(dump_guid)); |
| 609 } | 626 } |
| 610 | 627 |
| (...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 820 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0) | 837 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0) |
| 821 level_of_detail = MemoryDumpLevelOfDetail::DETAILED; | 838 level_of_detail = MemoryDumpLevelOfDetail::DETAILED; |
| 822 ++periodic_dumps_count_; | 839 ++periodic_dumps_count_; |
| 823 | 840 |
| 824 MemoryDumpManager::GetInstance()->RequestGlobalDump( | 841 MemoryDumpManager::GetInstance()->RequestGlobalDump( |
| 825 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); | 842 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); |
| 826 } | 843 } |
| 827 | 844 |
| 828 } // namespace trace_event | 845 } // namespace trace_event |
| 829 } // namespace base | 846 } // namespace base |
| OLD | NEW |