| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <utility> | 8 #include <utility> |
| 9 | 9 |
| 10 #include "base/atomic_sequence_num.h" | 10 #include "base/atomic_sequence_num.h" |
| (...skipping 320 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 331 // to just skip it, without actually invoking the |mdp|, which might be | 331 // to just skip it, without actually invoking the |mdp|, which might be |
| 332 // destroyed by the caller soon after this method returns. | 332 // destroyed by the caller soon after this method returns. |
| 333 (*mdp_iter)->disabled = true; | 333 (*mdp_iter)->disabled = true; |
| 334 dump_providers_.erase(mdp_iter); | 334 dump_providers_.erase(mdp_iter); |
| 335 } | 335 } |
| 336 | 336 |
| 337 void MemoryDumpManager::RequestGlobalDump( | 337 void MemoryDumpManager::RequestGlobalDump( |
| 338 MemoryDumpType dump_type, | 338 MemoryDumpType dump_type, |
| 339 MemoryDumpLevelOfDetail level_of_detail, | 339 MemoryDumpLevelOfDetail level_of_detail, |
| 340 const MemoryDumpCallback& callback) { | 340 const MemoryDumpCallback& callback) { |
| 341 // Bail out immediately if tracing is not enabled at all. | 341 // Bail out immediately if tracing is not enabled at all or if the dump mode |
| 342 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { | 342 // is not allowed. |
| 343 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || |
| 344 !IsDumpModeAllowed(level_of_detail)) { |
| 343 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory | 345 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory |
| 344 << " tracing category is not enabled"; | 346 << " tracing category is not enabled or the requested dump mode is " |
| 347 "not allowed by trace config."; |
| 345 if (!callback.is_null()) | 348 if (!callback.is_null()) |
| 346 callback.Run(0u /* guid */, false /* success */); | 349 callback.Run(0u /* guid */, false /* success */); |
| 347 return; | 350 return; |
| 348 } | 351 } |
| 349 | 352 |
| 350 const uint64_t guid = | 353 const uint64_t guid = |
| 351 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); | 354 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); |
| 352 | 355 |
| 353 // Creates an async event to keep track of the global dump evolution. | 356 // Creates an async event to keep track of the global dump evolution. |
| 354 // The |wrapped_callback| will generate the ASYNC_END event and then invoke | 357 // The |wrapped_callback| will generate the ASYNC_END event and then invoke |
| (...skipping 26 matching lines...) Expand all Loading... |
| 381 } | 384 } |
| 382 | 385 |
| 383 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, | 386 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
| 384 const MemoryDumpCallback& callback) { | 387 const MemoryDumpCallback& callback) { |
| 385 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", | 388 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", |
| 386 TRACE_ID_MANGLE(args.dump_guid)); | 389 TRACE_ID_MANGLE(args.dump_guid)); |
| 387 | 390 |
| 388 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; | 391 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
| 389 { | 392 { |
| 390 AutoLock lock(lock_); | 393 AutoLock lock(lock_); |
| 394 |
| 391 // |dump_thread_| can be nullptr is tracing was disabled before reaching | 395 // |dump_thread_| can be nullptr is tracing was disabled before reaching |
| 392 // here. SetupNextMemoryDump() is robust enough to tolerate it and will | 396 // here. SetupNextMemoryDump() is robust enough to tolerate it and will |
| 393 // NACK the dump. | 397 // NACK the dump. |
| 394 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( | 398 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
| 395 args, dump_providers_, session_state_, callback, | 399 args, dump_providers_, session_state_, callback, |
| 396 dump_thread_ ? dump_thread_->task_runner() : nullptr)); | 400 dump_thread_ ? dump_thread_->task_runner() : nullptr)); |
| 401 |
| 402 // Safety check to prevent reaching here without calling RequestGlobalDump, |
| 403 // with disallowed modes. If |session_state_| is null then tracing is |
| 404 // disabled. |
| 405 CHECK(!session_state_ || |
| 406 session_state_->memory_dump_config().allowed_dump_modes.count( |
| 407 args.level_of_detail)); |
| 397 } | 408 } |
| 398 | 409 |
| 399 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", | 410 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", |
| 400 TRACE_ID_MANGLE(args.dump_guid), | 411 TRACE_ID_MANGLE(args.dump_guid), |
| 401 TRACE_EVENT_FLAG_FLOW_OUT); | 412 TRACE_EVENT_FLAG_FLOW_OUT); |
| 402 | 413 |
| 403 // Start the process dump. This involves task runner hops as specified by the | 414 // Start the process dump. This involves task runner hops as specified by the |
| 404 // MemoryDumpProvider(s) in RegisterDumpProvider()). | 415 // MemoryDumpProvider(s) in RegisterDumpProvider()). |
| 405 SetupNextMemoryDump(std::move(pmd_async_state)); | 416 SetupNextMemoryDump(std::move(pmd_async_state)); |
| 406 } | 417 } |
| (...skipping 300 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 707 session_state_ = nullptr; | 718 session_state_ = nullptr; |
| 708 } | 719 } |
| 709 | 720 |
| 710 // Thread stops are blocking and must be performed outside of the |lock_| | 721 // Thread stops are blocking and must be performed outside of the |lock_| |
| 711 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). | 722 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). |
| 712 periodic_dump_timer_.Stop(); | 723 periodic_dump_timer_.Stop(); |
| 713 if (dump_thread) | 724 if (dump_thread) |
| 714 dump_thread->Stop(); | 725 dump_thread->Stop(); |
| 715 } | 726 } |
| 716 | 727 |
| 728 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { |
| 729 AutoLock lock(lock_); |
| 730 if (!session_state_) |
| 731 return false; |
| 732 return session_state_->memory_dump_config().allowed_dump_modes.count( |
| 733 dump_mode) != 0; |
| 734 } |
| 735 |
| 717 uint64_t MemoryDumpManager::GetTracingProcessId() const { | 736 uint64_t MemoryDumpManager::GetTracingProcessId() const { |
| 718 return delegate_->GetTracingProcessId(); | 737 return delegate_->GetTracingProcessId(); |
| 719 } | 738 } |
| 720 | 739 |
| 721 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( | 740 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( |
| 722 MemoryDumpProvider* dump_provider, | 741 MemoryDumpProvider* dump_provider, |
| 723 const char* name, | 742 const char* name, |
| 724 scoped_refptr<SequencedTaskRunner> task_runner, | 743 scoped_refptr<SequencedTaskRunner> task_runner, |
| 725 const MemoryDumpProvider::Options& options, | 744 const MemoryDumpProvider::Options& options, |
| 726 bool whitelisted_for_background_mode) | 745 bool whitelisted_for_background_mode) |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 789 return; | 808 return; |
| 790 | 809 |
| 791 // At the moment the periodic support is limited to at most one periodic | 810 // At the moment the periodic support is limited to at most one periodic |
| 792 // trigger per dump mode. All intervals should be an integer multiple of the | 811 // trigger per dump mode. All intervals should be an integer multiple of the |
| 793 // smallest interval specified. | 812 // smallest interval specified. |
| 794 periodic_dumps_count_ = 0; | 813 periodic_dumps_count_ = 0; |
| 795 uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max(); | 814 uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max(); |
| 796 uint32_t light_dump_period_ms = 0; | 815 uint32_t light_dump_period_ms = 0; |
| 797 uint32_t heavy_dump_period_ms = 0; | 816 uint32_t heavy_dump_period_ms = 0; |
| 798 DCHECK_LE(triggers_list.size(), 3u); | 817 DCHECK_LE(triggers_list.size(), 3u); |
| 818 auto mdm = MemoryDumpManager::GetInstance(); |
| 799 for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) { | 819 for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) { |
| 800 DCHECK_NE(0u, config.periodic_interval_ms); | 820 DCHECK_NE(0u, config.periodic_interval_ms); |
| 801 if (config.level_of_detail == MemoryDumpLevelOfDetail::LIGHT) { | 821 switch (config.level_of_detail) { |
| 802 DCHECK_EQ(0u, light_dump_period_ms); | 822 case MemoryDumpLevelOfDetail::BACKGROUND: |
| 803 light_dump_period_ms = config.periodic_interval_ms; | 823 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND)); |
| 804 } else if (config.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) { | 824 break; |
| 805 DCHECK_EQ(0u, heavy_dump_period_ms); | 825 case MemoryDumpLevelOfDetail::LIGHT: |
| 806 heavy_dump_period_ms = config.periodic_interval_ms; | 826 DCHECK_EQ(0u, light_dump_period_ms); |
| 827 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT)); |
| 828 light_dump_period_ms = config.periodic_interval_ms; |
| 829 break; |
| 830 case MemoryDumpLevelOfDetail::DETAILED: |
| 831 DCHECK_EQ(0u, heavy_dump_period_ms); |
| 832 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED)); |
| 833 heavy_dump_period_ms = config.periodic_interval_ms; |
| 834 break; |
| 807 } | 835 } |
| 808 min_timer_period_ms = | 836 min_timer_period_ms = |
| 809 std::min(min_timer_period_ms, config.periodic_interval_ms); | 837 std::min(min_timer_period_ms, config.periodic_interval_ms); |
| 810 } | 838 } |
| 811 | 839 |
| 812 DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms); | 840 DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms); |
| 813 light_dump_rate_ = light_dump_period_ms / min_timer_period_ms; | 841 light_dump_rate_ = light_dump_period_ms / min_timer_period_ms; |
| 814 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms); | 842 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms); |
| 815 heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms; | 843 heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms; |
| 816 | 844 |
| (...skipping 19 matching lines...) Expand all Loading... |
| 836 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0) | 864 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0) |
| 837 level_of_detail = MemoryDumpLevelOfDetail::DETAILED; | 865 level_of_detail = MemoryDumpLevelOfDetail::DETAILED; |
| 838 ++periodic_dumps_count_; | 866 ++periodic_dumps_count_; |
| 839 | 867 |
| 840 MemoryDumpManager::GetInstance()->RequestGlobalDump( | 868 MemoryDumpManager::GetInstance()->RequestGlobalDump( |
| 841 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); | 869 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); |
| 842 } | 870 } |
| 843 | 871 |
| 844 } // namespace trace_event | 872 } // namespace trace_event |
| 845 } // namespace base | 873 } // namespace base |
| OLD | NEW |