Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(715)

Side by Side Diff: base/trace_event/memory_dump_manager.cc

Issue 2582453002: [tracing] Implement polling in MemoryDumpManager (Closed)
Patch Set: doc link and fix test. Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/memory_dump_manager.h" 5 #include "base/trace_event/memory_dump_manager.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <utility> 8 #include <utility>
9 9
10 #include "base/allocator/features.h" 10 #include "base/allocator/features.h"
11 #include "base/atomic_sequence_num.h" 11 #include "base/atomic_sequence_num.h"
12 #include "base/base_switches.h" 12 #include "base/base_switches.h"
13 #include "base/command_line.h" 13 #include "base/command_line.h"
14 #include "base/compiler_specific.h" 14 #include "base/compiler_specific.h"
15 #include "base/debug/debugging_flags.h" 15 #include "base/debug/debugging_flags.h"
16 #include "base/debug/stack_trace.h" 16 #include "base/debug/stack_trace.h"
17 #include "base/debug/thread_heap_usage_tracker.h" 17 #include "base/debug/thread_heap_usage_tracker.h"
18 #include "base/memory/ptr_util.h" 18 #include "base/memory/ptr_util.h"
19 #include "base/threading/thread.h" 19 #include "base/threading/thread.h"
20 #include "base/threading/thread_task_runner_handle.h" 20 #include "base/threading/thread_task_runner_handle.h"
21 #include "base/trace_event/heap_profiler.h" 21 #include "base/trace_event/heap_profiler.h"
22 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 22 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
23 #include "base/trace_event/heap_profiler_event_filter.h" 23 #include "base/trace_event/heap_profiler_event_filter.h"
24 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" 24 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
25 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" 25 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
26 #include "base/trace_event/malloc_dump_provider.h" 26 #include "base/trace_event/malloc_dump_provider.h"
27 #include "base/trace_event/memory_dump_provider.h" 27 #include "base/trace_event/memory_dump_provider.h"
28 #include "base/trace_event/memory_dump_scheduler.h"
28 #include "base/trace_event/memory_dump_session_state.h" 29 #include "base/trace_event/memory_dump_session_state.h"
29 #include "base/trace_event/memory_infra_background_whitelist.h" 30 #include "base/trace_event/memory_infra_background_whitelist.h"
30 #include "base/trace_event/process_memory_dump.h" 31 #include "base/trace_event/process_memory_dump.h"
31 #include "base/trace_event/trace_event.h" 32 #include "base/trace_event/trace_event.h"
32 #include "base/trace_event/trace_event_argument.h" 33 #include "base/trace_event/trace_event_argument.h"
33 #include "build/build_config.h" 34 #include "build/build_config.h"
34 35
35 #if defined(OS_ANDROID) 36 #if defined(OS_ANDROID)
36 #include "base/trace_event/java_heap_dump_provider_android.h" 37 #include "base/trace_event/java_heap_dump_provider_android.h"
37 #endif 38 #endif
(...skipping 369 matching lines...) Expand 10 before | Expand all | Expand 10 after
407 // to just skip it, without actually invoking the |mdp|, which might be 408 // to just skip it, without actually invoking the |mdp|, which might be
408 // destroyed by the caller soon after this method returns. 409 // destroyed by the caller soon after this method returns.
409 (*mdp_iter)->disabled = true; 410 (*mdp_iter)->disabled = true;
410 dump_providers_.erase(mdp_iter); 411 dump_providers_.erase(mdp_iter);
411 } 412 }
412 413
413 void MemoryDumpManager::RegisterPollingMDPOnDumpThread( 414 void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
414 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { 415 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
415 AutoLock lock(lock_); 416 AutoLock lock(lock_);
416 dump_providers_for_polling_.insert(mdpinfo); 417 dump_providers_for_polling_.insert(mdpinfo);
418
419 // Notify ready for polling when first polling supported provider is
420 // registered. This handles the case where OnTraceLogEnabled() did not notify
421 // ready since no polling supported mdp has yet been registered.
422 if (dump_providers_for_polling_.size() == 1)
423 dump_scheduler_->NotifyPollingSupported();
417 } 424 }
418 425
419 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread( 426 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
420 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { 427 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
421 mdpinfo->dump_provider->SuspendFastMemoryPolling(); 428 mdpinfo->dump_provider->SuspendFastMemoryPolling();
422 429
423 AutoLock lock(lock_); 430 AutoLock lock(lock_);
424 dump_providers_for_polling_.erase(mdpinfo); 431 dump_providers_for_polling_.erase(mdpinfo);
432 DCHECK(!dump_providers_for_polling_.empty())
433 << "All polling MDPs cannot be unregistered.";
425 } 434 }
426 435
427 void MemoryDumpManager::RequestGlobalDump( 436 void MemoryDumpManager::RequestGlobalDump(
428 MemoryDumpType dump_type, 437 MemoryDumpType dump_type,
429 MemoryDumpLevelOfDetail level_of_detail, 438 MemoryDumpLevelOfDetail level_of_detail,
430 const MemoryDumpCallback& callback) { 439 const MemoryDumpCallback& callback) {
431 // Bail out immediately if tracing is not enabled at all or if the dump mode 440 // Bail out immediately if tracing is not enabled at all or if the dump mode
432 // is not allowed. 441 // is not allowed.
433 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || 442 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) ||
434 !IsDumpModeAllowed(level_of_detail)) { 443 !IsDumpModeAllowed(level_of_detail)) {
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
496 // here. SetupNextMemoryDump() is robust enough to tolerate it and will 505 // here. SetupNextMemoryDump() is robust enough to tolerate it and will
497 // NACK the dump. 506 // NACK the dump.
498 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( 507 pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
499 args, dump_providers_, session_state_, callback, 508 args, dump_providers_, session_state_, callback,
500 dump_thread_ ? dump_thread_->task_runner() : nullptr)); 509 dump_thread_ ? dump_thread_->task_runner() : nullptr));
501 510
502 // Safety check to prevent reaching here without calling RequestGlobalDump, 511 // Safety check to prevent reaching here without calling RequestGlobalDump,
503 // with disallowed modes. If |session_state_| is null then tracing is 512 // with disallowed modes. If |session_state_| is null then tracing is
504 // disabled. 513 // disabled.
505 CHECK(!session_state_ || 514 CHECK(!session_state_ ||
506 session_state_->memory_dump_config().allowed_dump_modes.count( 515 session_state_->IsDumpModeAllowed(args.level_of_detail));
507 args.level_of_detail));
508 } 516 }
509 517
510 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", 518 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
511 TRACE_ID_MANGLE(args.dump_guid), 519 TRACE_ID_MANGLE(args.dump_guid),
512 TRACE_EVENT_FLAG_FLOW_OUT); 520 TRACE_EVENT_FLAG_FLOW_OUT);
513 521
514 // Start the process dump. This involves task runner hops as specified by the 522 // Start the process dump. This involves task runner hops as specified by the
515 // MemoryDumpProvider(s) in RegisterDumpProvider()). 523 // MemoryDumpProvider(s) in RegisterDumpProvider()).
516 SetupNextMemoryDump(std::move(pmd_async_state)); 524 SetupNextMemoryDump(std::move(pmd_async_state));
517 } 525 }
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
670 args); 678 args);
671 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); 679 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
672 mdpinfo->consecutive_failures = 680 mdpinfo->consecutive_failures =
673 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; 681 dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
674 } 682 }
675 683
676 pmd_async_state->pending_dump_providers.pop_back(); 684 pmd_async_state->pending_dump_providers.pop_back();
677 SetupNextMemoryDump(std::move(pmd_async_state)); 685 SetupNextMemoryDump(std::move(pmd_async_state));
678 } 686 }
679 687
680 void MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) { 688 bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
689 #if DCHECK_IS_ON()
690 {
691 AutoLock lock(lock_);
692 if (dump_thread_)
693 DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread());
694 }
695 #endif
696 if (dump_providers_for_polling_.empty())
697 return false;
698
681 *memory_total = 0; 699 *memory_total = 0;
682 // Note that we call PollFastMemoryTotal() even if the dump provider is 700 // Note that we call PollFastMemoryTotal() even if the dump provider is
683 // disabled (unregistered). This is to avoid taking lock while polling. 701 // disabled (unregistered). This is to avoid taking lock while polling.
684 for (const auto& mdpinfo : dump_providers_for_polling_) { 702 for (const auto& mdpinfo : dump_providers_for_polling_) {
685 uint64_t value = 0; 703 uint64_t value = 0;
686 mdpinfo->dump_provider->PollFastMemoryTotal(&value); 704 mdpinfo->dump_provider->PollFastMemoryTotal(&value);
687 *memory_total += value; 705 *memory_total += value;
688 } 706 }
689 return; 707 return true;
690 } 708 }
691 709
692 // static 710 // static
693 void MemoryDumpManager::FinalizeDumpAndAddToTrace( 711 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
694 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { 712 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
695 HEAP_PROFILER_SCOPED_IGNORE; 713 HEAP_PROFILER_SCOPED_IGNORE;
696 DCHECK(pmd_async_state->pending_dump_providers.empty()); 714 DCHECK(pmd_async_state->pending_dump_providers.empty());
697 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; 715 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
698 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { 716 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
699 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = 717 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
758 // while the |lock_| is taken; 776 // while the |lock_| is taken;
759 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); 777 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
760 778
761 // Spin-up the thread used to invoke unbound dump providers. 779 // Spin-up the thread used to invoke unbound dump providers.
762 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra")); 780 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
763 if (!dump_thread->Start()) { 781 if (!dump_thread->Start()) {
764 LOG(ERROR) << "Failed to start the memory-infra thread for tracing"; 782 LOG(ERROR) << "Failed to start the memory-infra thread for tracing";
765 return; 783 return;
766 } 784 }
767 785
768 const TraceConfig trace_config = 786 const TraceConfig& trace_config =
769 TraceLog::GetInstance()->GetCurrentTraceConfig(); 787 TraceLog::GetInstance()->GetCurrentTraceConfig();
788 const TraceConfig::MemoryDumpConfig& memory_dump_config =
789 trace_config.memory_dump_config();
770 scoped_refptr<MemoryDumpSessionState> session_state = 790 scoped_refptr<MemoryDumpSessionState> session_state =
771 new MemoryDumpSessionState; 791 new MemoryDumpSessionState;
772 session_state->SetMemoryDumpConfig(trace_config.memory_dump_config()); 792 session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes);
793 session_state->set_heap_profiler_breakdown_threshold_bytes(
794 memory_dump_config.heap_profiler_options.breakdown_threshold_bytes);
773 if (heap_profiling_enabled_) { 795 if (heap_profiling_enabled_) {
774 // If heap profiling is enabled, the stack frame deduplicator and type name 796 // If heap profiling is enabled, the stack frame deduplicator and type name
775 // deduplicator will be in use. Add a metadata events to write the frames 797 // deduplicator will be in use. Add a metadata events to write the frames
776 // and type IDs. 798 // and type IDs.
777 session_state->SetStackFrameDeduplicator( 799 session_state->SetStackFrameDeduplicator(
778 WrapUnique(new StackFrameDeduplicator)); 800 WrapUnique(new StackFrameDeduplicator));
779 801
780 session_state->SetTypeNameDeduplicator( 802 session_state->SetTypeNameDeduplicator(
781 WrapUnique(new TypeNameDeduplicator)); 803 WrapUnique(new TypeNameDeduplicator));
782 804
783 TRACE_EVENT_API_ADD_METADATA_EVENT( 805 TRACE_EVENT_API_ADD_METADATA_EVENT(
784 TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames", 806 TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
785 "stackFrames", 807 "stackFrames",
786 MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>( 808 MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>(
787 session_state, &MemoryDumpSessionState::stack_frame_deduplicator)); 809 session_state, &MemoryDumpSessionState::stack_frame_deduplicator));
788 810
789 TRACE_EVENT_API_ADD_METADATA_EVENT( 811 TRACE_EVENT_API_ADD_METADATA_EVENT(
790 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", 812 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
791 "typeNames", 813 "typeNames",
792 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>( 814 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>(
793 session_state, &MemoryDumpSessionState::type_name_deduplicator)); 815 session_state, &MemoryDumpSessionState::type_name_deduplicator));
794 } 816 }
795 817
818 std::unique_ptr<MemoryDumpScheduler> dump_scheduler(
819 new MemoryDumpScheduler(this, dump_thread->task_runner()));
820 DCHECK_LE(memory_dump_config.triggers.size(), 3u);
821 for (const auto& trigger : memory_dump_config.triggers) {
822 if (!session_state->IsDumpModeAllowed(trigger.level_of_detail)) {
823 NOTREACHED();
824 continue;
825 }
826 dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
827 trigger.min_time_between_dumps_ms);
828 }
829
796 { 830 {
797 AutoLock lock(lock_); 831 AutoLock lock(lock_);
798 832
799 DCHECK(delegate_); // At this point we must have a delegate. 833 DCHECK(delegate_); // At this point we must have a delegate.
800 session_state_ = session_state; 834 session_state_ = session_state;
801 835
802 DCHECK(!dump_thread_); 836 DCHECK(!dump_thread_);
803 dump_thread_ = std::move(dump_thread); 837 dump_thread_ = std::move(dump_thread);
838 dump_scheduler_ = std::move(dump_scheduler);
839
840 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
804 841
805 dump_providers_for_polling_.clear(); 842 dump_providers_for_polling_.clear();
806 for (const auto& mdpinfo : dump_providers_) { 843 for (const auto& mdpinfo : dump_providers_) {
807 if (mdpinfo->options.is_fast_polling_supported) 844 if (mdpinfo->options.is_fast_polling_supported)
808 dump_providers_for_polling_.insert(mdpinfo); 845 dump_providers_for_polling_.insert(mdpinfo);
809 } 846 }
847 // Notify polling supported only if some polling supported provider was
848 // registered, else RegisterPollingMDPOnDumpThread() will notify when first
849 // polling MDP registers.
850 if (!dump_providers_for_polling_.empty())
851 dump_scheduler_->NotifyPollingSupported();
810 852
811 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); 853 // Only coordinator process triggers periodic global memory dumps.
812 854 if (is_coordinator_)
813 if (!is_coordinator_) 855 dump_scheduler_->NotifyPeriodicTriggerSupported();
814 return;
815 } 856 }
816 857
817 // Enable periodic dumps if necessary.
818 periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers);
819 } 858 }
820 859
821 void MemoryDumpManager::OnTraceLogDisabled() { 860 void MemoryDumpManager::OnTraceLogDisabled() {
822 // There might be a memory dump in progress while this happens. Therefore, 861 // There might be a memory dump in progress while this happens. Therefore,
823 // ensure that the MDM state which depends on the tracing enabled / disabled 862 // ensure that the MDM state which depends on the tracing enabled / disabled
824 // state is always accessed by the dumping methods holding the |lock_|. 863 // state is always accessed by the dumping methods holding the |lock_|.
825 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_)) 864 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_))
826 return; 865 return;
827 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); 866 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
828 std::unique_ptr<Thread> dump_thread; 867 std::unique_ptr<Thread> dump_thread;
868 std::unique_ptr<MemoryDumpScheduler> scheduler;
829 { 869 {
830 AutoLock lock(lock_); 870 AutoLock lock(lock_);
831 dump_thread = std::move(dump_thread_); 871 dump_thread = std::move(dump_thread_);
832 session_state_ = nullptr; 872 session_state_ = nullptr;
873 scheduler = std::move(dump_scheduler_);
833 } 874 }
875 scheduler->DisableAllTriggers();
834 876
835 // Thread stops are blocking and must be performed outside of the |lock_| 877 // Thread stops are blocking and must be performed outside of the |lock_|
836 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). 878 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
837 periodic_dump_timer_.Stop();
838 if (dump_thread) 879 if (dump_thread)
839 dump_thread->Stop(); 880 dump_thread->Stop();
840 881
841 // |dump_providers_for_polling_| must be cleared only after the dump thread is 882 // |dump_providers_for_polling_| must be cleared only after the dump thread is
842 // stopped (polling tasks are done). 883 // stopped (polling tasks are done).
843 { 884 {
844 AutoLock lock(lock_); 885 AutoLock lock(lock_);
845 for (const auto& mdpinfo : dump_providers_for_polling_) 886 for (const auto& mdpinfo : dump_providers_for_polling_)
846 mdpinfo->dump_provider->SuspendFastMemoryPolling(); 887 mdpinfo->dump_provider->SuspendFastMemoryPolling();
847 dump_providers_for_polling_.clear(); 888 dump_providers_for_polling_.clear();
848 } 889 }
849 } 890 }
850 891
851 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { 892 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
852 AutoLock lock(lock_); 893 AutoLock lock(lock_);
853 if (!session_state_) 894 if (!session_state_)
854 return false; 895 return false;
855 return session_state_->memory_dump_config().allowed_dump_modes.count( 896 return session_state_->IsDumpModeAllowed(dump_mode);
856 dump_mode) != 0;
857 } 897 }
858 898
859 uint64_t MemoryDumpManager::GetTracingProcessId() const { 899 uint64_t MemoryDumpManager::GetTracingProcessId() const {
860 return delegate_->GetTracingProcessId(); 900 return delegate_->GetTracingProcessId();
861 } 901 }
862 902
863 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( 903 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
864 MemoryDumpProvider* dump_provider, 904 MemoryDumpProvider* dump_provider,
865 const char* name, 905 const char* name,
866 scoped_refptr<SequencedTaskRunner> task_runner, 906 scoped_refptr<SequencedTaskRunner> task_runner,
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
912 const MemoryDumpArgs& dump_args) { 952 const MemoryDumpArgs& dump_args) {
913 auto iter = process_dumps.find(pid); 953 auto iter = process_dumps.find(pid);
914 if (iter == process_dumps.end()) { 954 if (iter == process_dumps.end()) {
915 std::unique_ptr<ProcessMemoryDump> new_pmd( 955 std::unique_ptr<ProcessMemoryDump> new_pmd(
916 new ProcessMemoryDump(session_state, dump_args)); 956 new ProcessMemoryDump(session_state, dump_args));
917 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; 957 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
918 } 958 }
919 return iter->second.get(); 959 return iter->second.get();
920 } 960 }
921 961
922 MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
923
924 MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
925 Stop();
926 }
927
928 void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
929 const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
930 if (triggers_list.empty())
931 return;
932
933 // At the moment the periodic support is limited to at most one periodic
934 // trigger per dump mode. All intervals should be an integer multiple of the
935 // smallest interval specified.
936 periodic_dumps_count_ = 0;
937 uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
938 uint32_t light_dump_period_ms = 0;
939 uint32_t heavy_dump_period_ms = 0;
940 DCHECK_LE(triggers_list.size(), 3u);
941 auto* mdm = MemoryDumpManager::GetInstance();
942 for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
943 DCHECK_NE(0u, config.min_time_between_dumps_ms);
944 DCHECK_EQ(MemoryDumpType::PERIODIC_INTERVAL, config.trigger_type)
945 << "Only periodic_interval triggers are suppported";
946 switch (config.level_of_detail) {
947 case MemoryDumpLevelOfDetail::BACKGROUND:
948 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
949 break;
950 case MemoryDumpLevelOfDetail::LIGHT:
951 DCHECK_EQ(0u, light_dump_period_ms);
952 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
953 light_dump_period_ms = config.min_time_between_dumps_ms;
954 break;
955 case MemoryDumpLevelOfDetail::DETAILED:
956 DCHECK_EQ(0u, heavy_dump_period_ms);
957 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
958 heavy_dump_period_ms = config.min_time_between_dumps_ms;
959 break;
960 }
961 min_timer_period_ms =
962 std::min(min_timer_period_ms, config.min_time_between_dumps_ms);
963 }
964
965 DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
966 light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
967 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
968 heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
969
970 timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
971 base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
972 base::Unretained(this)));
973 }
974
975 void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
976 if (IsRunning()) {
977 timer_.Stop();
978 }
979 }
980
981 bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
982 return timer_.IsRunning();
983 }
984
985 void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
986 MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
987 if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
988 level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
989 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
990 level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
991 ++periodic_dumps_count_;
992
993 MemoryDumpManager::GetInstance()->RequestGlobalDump(
994 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
995 }
996
997 } // namespace trace_event 962 } // namespace trace_event
998 } // namespace base 963 } // namespace base
OLDNEW
« no previous file with comments | « base/trace_event/memory_dump_manager.h ('k') | base/trace_event/memory_dump_manager_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698