Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(300)

Side by Side Diff: base/trace_event/memory_dump_manager.cc

Issue 2582453002: [tracing] Implement polling in MemoryDumpManager (Closed)
Patch Set: Address comments. Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/memory_dump_manager.h" 5 #include "base/trace_event/memory_dump_manager.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <utility> 8 #include <utility>
9 9
10 #include "base/allocator/features.h" 10 #include "base/allocator/features.h"
11 #include "base/atomic_sequence_num.h" 11 #include "base/atomic_sequence_num.h"
12 #include "base/base_switches.h" 12 #include "base/base_switches.h"
13 #include "base/command_line.h" 13 #include "base/command_line.h"
14 #include "base/compiler_specific.h" 14 #include "base/compiler_specific.h"
15 #include "base/debug/debugging_flags.h" 15 #include "base/debug/debugging_flags.h"
16 #include "base/debug/stack_trace.h" 16 #include "base/debug/stack_trace.h"
17 #include "base/memory/ptr_util.h" 17 #include "base/memory/ptr_util.h"
18 #include "base/threading/thread.h" 18 #include "base/threading/thread.h"
19 #include "base/threading/thread_task_runner_handle.h" 19 #include "base/threading/thread_task_runner_handle.h"
20 #include "base/trace_event/heap_profiler.h" 20 #include "base/trace_event/heap_profiler.h"
21 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 21 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
22 #include "base/trace_event/heap_profiler_event_filter.h" 22 #include "base/trace_event/heap_profiler_event_filter.h"
23 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" 23 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
24 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" 24 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
25 #include "base/trace_event/malloc_dump_provider.h" 25 #include "base/trace_event/malloc_dump_provider.h"
26 #include "base/trace_event/memory_dump_provider.h" 26 #include "base/trace_event/memory_dump_provider.h"
27 #include "base/trace_event/memory_dump_scheduler.h"
27 #include "base/trace_event/memory_dump_session_state.h" 28 #include "base/trace_event/memory_dump_session_state.h"
28 #include "base/trace_event/memory_infra_background_whitelist.h" 29 #include "base/trace_event/memory_infra_background_whitelist.h"
29 #include "base/trace_event/process_memory_dump.h" 30 #include "base/trace_event/process_memory_dump.h"
30 #include "base/trace_event/trace_event.h" 31 #include "base/trace_event/trace_event.h"
31 #include "base/trace_event/trace_event_argument.h" 32 #include "base/trace_event/trace_event_argument.h"
32 #include "build/build_config.h" 33 #include "build/build_config.h"
33 34
34 #if defined(OS_ANDROID) 35 #if defined(OS_ANDROID)
35 #include "base/trace_event/java_heap_dump_provider_android.h" 36 #include "base/trace_event/java_heap_dump_provider_android.h"
36 #endif 37 #endif
(...skipping 367 matching lines...) Expand 10 before | Expand all | Expand 10 after
404 // to just skip it, without actually invoking the |mdp|, which might be 405 // to just skip it, without actually invoking the |mdp|, which might be
405 // destroyed by the caller soon after this method returns. 406 // destroyed by the caller soon after this method returns.
406 (*mdp_iter)->disabled = true; 407 (*mdp_iter)->disabled = true;
407 dump_providers_.erase(mdp_iter); 408 dump_providers_.erase(mdp_iter);
408 } 409 }
409 410
410 void MemoryDumpManager::RegisterPollingMDPOnDumpThread( 411 void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
411 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { 412 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
412 AutoLock lock(lock_); 413 AutoLock lock(lock_);
413 dump_providers_for_polling_.insert(mdpinfo); 414 dump_providers_for_polling_.insert(mdpinfo);
415
416 // Notify ready for polling when first polling supported provider is
417 // registered. This handles the case where OnTraceLogEnabled() did not notify
418 // ready since no polling supported mdp has yet been registered.
419 if (dump_providers_for_polling_.size() == 1)
420 dump_scheduler_->NotifyPollingSupported();
414 } 421 }
415 422
416 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread( 423 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
417 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { 424 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
418 mdpinfo->dump_provider->SuspendFastMemoryPolling(); 425 mdpinfo->dump_provider->SuspendFastMemoryPolling();
419 426
420 AutoLock lock(lock_); 427 AutoLock lock(lock_);
421 dump_providers_for_polling_.erase(mdpinfo); 428 dump_providers_for_polling_.erase(mdpinfo);
429 DCHECK(!dump_providers_for_polling_.empty())
430 << "All polling MDPs cannot be unregistered.";
422 } 431 }
423 432
424 void MemoryDumpManager::RequestGlobalDump( 433 void MemoryDumpManager::RequestGlobalDump(
425 MemoryDumpType dump_type, 434 MemoryDumpType dump_type,
426 MemoryDumpLevelOfDetail level_of_detail, 435 MemoryDumpLevelOfDetail level_of_detail,
427 const MemoryDumpCallback& callback) { 436 const MemoryDumpCallback& callback) {
428 // Bail out immediately if tracing is not enabled at all or if the dump mode 437 // Bail out immediately if tracing is not enabled at all or if the dump mode
429 // is not allowed. 438 // is not allowed.
430 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || 439 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) ||
431 !IsDumpModeAllowed(level_of_detail)) { 440 !IsDumpModeAllowed(level_of_detail)) {
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
493 // here. SetupNextMemoryDump() is robust enough to tolerate it and will 502 // here. SetupNextMemoryDump() is robust enough to tolerate it and will
494 // NACK the dump. 503 // NACK the dump.
495 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( 504 pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
496 args, dump_providers_, session_state_, callback, 505 args, dump_providers_, session_state_, callback,
497 dump_thread_ ? dump_thread_->task_runner() : nullptr)); 506 dump_thread_ ? dump_thread_->task_runner() : nullptr));
498 507
499 // Safety check to prevent reaching here without calling RequestGlobalDump, 508 // Safety check to prevent reaching here without calling RequestGlobalDump,
500 // with disallowed modes. If |session_state_| is null then tracing is 509 // with disallowed modes. If |session_state_| is null then tracing is
501 // disabled. 510 // disabled.
502 CHECK(!session_state_ || 511 CHECK(!session_state_ ||
503 session_state_->memory_dump_config().allowed_dump_modes.count( 512 session_state_->IsDumpModeAllowed(args.level_of_detail));
504 args.level_of_detail));
505 } 513 }
506 514
507 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", 515 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
508 TRACE_ID_MANGLE(args.dump_guid), 516 TRACE_ID_MANGLE(args.dump_guid),
509 TRACE_EVENT_FLAG_FLOW_OUT); 517 TRACE_EVENT_FLAG_FLOW_OUT);
510 518
511 // Start the process dump. This involves task runner hops as specified by the 519 // Start the process dump. This involves task runner hops as specified by the
512 // MemoryDumpProvider(s) in RegisterDumpProvider()). 520 // MemoryDumpProvider(s) in RegisterDumpProvider()).
513 SetupNextMemoryDump(std::move(pmd_async_state)); 521 SetupNextMemoryDump(std::move(pmd_async_state));
514 } 522 }
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
667 args); 675 args);
668 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); 676 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
669 mdpinfo->consecutive_failures = 677 mdpinfo->consecutive_failures =
670 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; 678 dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
671 } 679 }
672 680
673 pmd_async_state->pending_dump_providers.pop_back(); 681 pmd_async_state->pending_dump_providers.pop_back();
674 SetupNextMemoryDump(std::move(pmd_async_state)); 682 SetupNextMemoryDump(std::move(pmd_async_state));
675 } 683 }
676 684
677 void MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) { 685 bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
686 #if DCHECK_IS_ON()
687 {
688 AutoLock lock(lock_);
689 if (dump_thread_)
690 DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread());
691 }
692 #endif
693 if (dump_providers_for_polling_.empty())
694 return false;
695
678 *memory_total = 0; 696 *memory_total = 0;
679 // Note that we call PollFastMemoryTotal() even if the dump provider is 697 // Note that we call PollFastMemoryTotal() even if the dump provider is
680 // disabled (unregistered). This is to avoid taking lock while polling. 698 // disabled (unregistered). This is to avoid taking lock while polling.
681 for (const auto& mdpinfo : dump_providers_for_polling_) { 699 for (const auto& mdpinfo : dump_providers_for_polling_) {
682 uint64_t value = 0; 700 uint64_t value = 0;
683 mdpinfo->dump_provider->PollFastMemoryTotal(&value); 701 mdpinfo->dump_provider->PollFastMemoryTotal(&value);
684 *memory_total += value; 702 *memory_total += value;
685 } 703 }
686 return; 704 return true;
687 } 705 }
688 706
689 // static 707 // static
690 void MemoryDumpManager::FinalizeDumpAndAddToTrace( 708 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
691 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { 709 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
692 HEAP_PROFILER_SCOPED_IGNORE; 710 HEAP_PROFILER_SCOPED_IGNORE;
693 DCHECK(pmd_async_state->pending_dump_providers.empty()); 711 DCHECK(pmd_async_state->pending_dump_providers.empty());
694 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; 712 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
695 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { 713 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
696 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = 714 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
755 // while the |lock_| is taken; 773 // while the |lock_| is taken;
756 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); 774 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
757 775
758 // Spin-up the thread used to invoke unbound dump providers. 776 // Spin-up the thread used to invoke unbound dump providers.
759 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra")); 777 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
760 if (!dump_thread->Start()) { 778 if (!dump_thread->Start()) {
761 LOG(ERROR) << "Failed to start the memory-infra thread for tracing"; 779 LOG(ERROR) << "Failed to start the memory-infra thread for tracing";
762 return; 780 return;
763 } 781 }
764 782
765 const TraceConfig trace_config = 783 const TraceConfig& trace_config =
766 TraceLog::GetInstance()->GetCurrentTraceConfig(); 784 TraceLog::GetInstance()->GetCurrentTraceConfig();
785 const TraceConfig::MemoryDumpConfig& memory_dump_config =
786 trace_config.memory_dump_config();
767 scoped_refptr<MemoryDumpSessionState> session_state = 787 scoped_refptr<MemoryDumpSessionState> session_state =
768 new MemoryDumpSessionState; 788 new MemoryDumpSessionState;
769 session_state->SetMemoryDumpConfig(trace_config.memory_dump_config()); 789 session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes);
790 session_state->set_heap_profiler_breakdown_threshold_bytes(
791 memory_dump_config.heap_profiler_options.breakdown_threshold_bytes);
770 if (heap_profiling_enabled_) { 792 if (heap_profiling_enabled_) {
771 // If heap profiling is enabled, the stack frame deduplicator and type name 793 // If heap profiling is enabled, the stack frame deduplicator and type name
772 // deduplicator will be in use. Add a metadata events to write the frames 794 // deduplicator will be in use. Add a metadata events to write the frames
773 // and type IDs. 795 // and type IDs.
774 session_state->SetStackFrameDeduplicator( 796 session_state->SetStackFrameDeduplicator(
775 WrapUnique(new StackFrameDeduplicator)); 797 WrapUnique(new StackFrameDeduplicator));
776 798
777 session_state->SetTypeNameDeduplicator( 799 session_state->SetTypeNameDeduplicator(
778 WrapUnique(new TypeNameDeduplicator)); 800 WrapUnique(new TypeNameDeduplicator));
779 801
780 TRACE_EVENT_API_ADD_METADATA_EVENT( 802 TRACE_EVENT_API_ADD_METADATA_EVENT(
781 TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames", 803 TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
782 "stackFrames", 804 "stackFrames",
783 MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>( 805 MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>(
784 session_state, &MemoryDumpSessionState::stack_frame_deduplicator)); 806 session_state, &MemoryDumpSessionState::stack_frame_deduplicator));
785 807
786 TRACE_EVENT_API_ADD_METADATA_EVENT( 808 TRACE_EVENT_API_ADD_METADATA_EVENT(
787 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", 809 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
788 "typeNames", 810 "typeNames",
789 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>( 811 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>(
790 session_state, &MemoryDumpSessionState::type_name_deduplicator)); 812 session_state, &MemoryDumpSessionState::type_name_deduplicator));
791 } 813 }
792 814
815 std::unique_ptr<MemoryDumpScheduler> dump_scheduler(
816 new MemoryDumpScheduler(this, dump_thread->task_runner()));
817 DCHECK_LE(memory_dump_config.triggers.size(), 3u);
818 for (const auto& trigger : memory_dump_config.triggers) {
819 if (!session_state->IsDumpModeAllowed(trigger.level_of_detail)) {
820 NOTREACHED();
821 continue;
822 }
823 dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
824 trigger.min_time_between_dumps_ms);
825 }
826
793 { 827 {
794 AutoLock lock(lock_); 828 AutoLock lock(lock_);
795 829
796 DCHECK(delegate_); // At this point we must have a delegate. 830 DCHECK(delegate_); // At this point we must have a delegate.
797 session_state_ = session_state; 831 session_state_ = session_state;
798 832
799 DCHECK(!dump_thread_); 833 DCHECK(!dump_thread_);
800 dump_thread_ = std::move(dump_thread); 834 dump_thread_ = std::move(dump_thread);
835 dump_scheduler_ = std::move(dump_scheduler);
836
837 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
801 838
802 dump_providers_for_polling_.clear(); 839 dump_providers_for_polling_.clear();
803 for (const auto& mdpinfo : dump_providers_) { 840 for (const auto& mdpinfo : dump_providers_) {
804 if (mdpinfo->options.is_fast_polling_supported) 841 if (mdpinfo->options.is_fast_polling_supported)
805 dump_providers_for_polling_.insert(mdpinfo); 842 dump_providers_for_polling_.insert(mdpinfo);
806 } 843 }
844 // Notify polling supported only if some polling supported provider was
845 // registered, else RegisterPollingMDPOnDumpThread() will notify when first
846 // polling MDP registers.
847 if (!dump_providers_for_polling_.empty())
848 dump_scheduler_->NotifyPollingSupported();
807 849
808 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); 850 // Only coordinator process triggers periodic global memory dumps.
809 851 if (is_coordinator_)
810 if (!is_coordinator_) 852 dump_scheduler_->NotifyPeriodicTriggerSupported();
811 return;
812 } 853 }
813 854
814 // Enable periodic dumps if necessary.
815 periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers);
816 } 855 }
817 856
818 void MemoryDumpManager::OnTraceLogDisabled() { 857 void MemoryDumpManager::OnTraceLogDisabled() {
819 // There might be a memory dump in progress while this happens. Therefore, 858 // There might be a memory dump in progress while this happens. Therefore,
820 // ensure that the MDM state which depends on the tracing enabled / disabled 859 // ensure that the MDM state which depends on the tracing enabled / disabled
821 // state is always accessed by the dumping methods holding the |lock_|. 860 // state is always accessed by the dumping methods holding the |lock_|.
822 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_)) 861 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_))
823 return; 862 return;
824 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); 863 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
825 std::unique_ptr<Thread> dump_thread; 864 std::unique_ptr<Thread> dump_thread;
865 std::unique_ptr<MemoryDumpScheduler> scheduler;
826 { 866 {
827 AutoLock lock(lock_); 867 AutoLock lock(lock_);
828 dump_thread = std::move(dump_thread_); 868 dump_thread = std::move(dump_thread_);
829 session_state_ = nullptr; 869 session_state_ = nullptr;
870 scheduler = std::move(dump_scheduler_);
830 } 871 }
872 scheduler->DisableAllTriggers();
831 873
832 // Thread stops are blocking and must be performed outside of the |lock_| 874 // Thread stops are blocking and must be performed outside of the |lock_|
833 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). 875 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
834 periodic_dump_timer_.Stop();
835 if (dump_thread) 876 if (dump_thread)
836 dump_thread->Stop(); 877 dump_thread->Stop();
837 878
838 // |dump_providers_for_polling_| must be cleared only after the dump thread is 879 // |dump_providers_for_polling_| must be cleared only after the dump thread is
839 // stopped (polling tasks are done). 880 // stopped (polling tasks are done).
840 { 881 {
841 AutoLock lock(lock_); 882 AutoLock lock(lock_);
842 for (const auto& mdpinfo : dump_providers_for_polling_) 883 for (const auto& mdpinfo : dump_providers_for_polling_)
843 mdpinfo->dump_provider->SuspendFastMemoryPolling(); 884 mdpinfo->dump_provider->SuspendFastMemoryPolling();
844 dump_providers_for_polling_.clear(); 885 dump_providers_for_polling_.clear();
845 } 886 }
846 } 887 }
847 888
848 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { 889 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
849 AutoLock lock(lock_); 890 AutoLock lock(lock_);
850 if (!session_state_) 891 if (!session_state_)
851 return false; 892 return false;
852 return session_state_->memory_dump_config().allowed_dump_modes.count( 893 return session_state_->IsDumpModeAllowed(dump_mode);
853 dump_mode) != 0;
854 } 894 }
855 895
856 uint64_t MemoryDumpManager::GetTracingProcessId() const { 896 uint64_t MemoryDumpManager::GetTracingProcessId() const {
857 return delegate_->GetTracingProcessId(); 897 return delegate_->GetTracingProcessId();
858 } 898 }
859 899
860 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( 900 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
861 MemoryDumpProvider* dump_provider, 901 MemoryDumpProvider* dump_provider,
862 const char* name, 902 const char* name,
863 scoped_refptr<SequencedTaskRunner> task_runner, 903 scoped_refptr<SequencedTaskRunner> task_runner,
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
909 const MemoryDumpArgs& dump_args) { 949 const MemoryDumpArgs& dump_args) {
910 auto iter = process_dumps.find(pid); 950 auto iter = process_dumps.find(pid);
911 if (iter == process_dumps.end()) { 951 if (iter == process_dumps.end()) {
912 std::unique_ptr<ProcessMemoryDump> new_pmd( 952 std::unique_ptr<ProcessMemoryDump> new_pmd(
913 new ProcessMemoryDump(session_state, dump_args)); 953 new ProcessMemoryDump(session_state, dump_args));
914 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; 954 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
915 } 955 }
916 return iter->second.get(); 956 return iter->second.get();
917 } 957 }
918 958
919 MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
920
921 MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
922 Stop();
923 }
924
925 void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
926 const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
927 if (triggers_list.empty())
928 return;
929
930 // At the moment the periodic support is limited to at most one periodic
931 // trigger per dump mode. All intervals should be an integer multiple of the
932 // smallest interval specified.
933 periodic_dumps_count_ = 0;
934 uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
935 uint32_t light_dump_period_ms = 0;
936 uint32_t heavy_dump_period_ms = 0;
937 DCHECK_LE(triggers_list.size(), 3u);
938 auto* mdm = MemoryDumpManager::GetInstance();
939 for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
940 DCHECK_NE(0u, config.min_time_between_dumps_ms);
941 DCHECK_EQ(MemoryDumpType::PERIODIC_INTERVAL, config.trigger_type)
942 << "Only periodic_interval triggers are suppported";
943 switch (config.level_of_detail) {
944 case MemoryDumpLevelOfDetail::BACKGROUND:
945 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
946 break;
947 case MemoryDumpLevelOfDetail::LIGHT:
948 DCHECK_EQ(0u, light_dump_period_ms);
949 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
950 light_dump_period_ms = config.min_time_between_dumps_ms;
951 break;
952 case MemoryDumpLevelOfDetail::DETAILED:
953 DCHECK_EQ(0u, heavy_dump_period_ms);
954 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
955 heavy_dump_period_ms = config.min_time_between_dumps_ms;
956 break;
957 }
958 min_timer_period_ms =
959 std::min(min_timer_period_ms, config.min_time_between_dumps_ms);
960 }
961
962 DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
963 light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
964 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
965 heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
966
967 timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
968 base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
969 base::Unretained(this)));
970 }
971
972 void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
973 if (IsRunning()) {
974 timer_.Stop();
975 }
976 }
977
978 bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
979 return timer_.IsRunning();
980 }
981
982 void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
983 MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
984 if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
985 level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
986 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
987 level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
988 ++periodic_dumps_count_;
989
990 MemoryDumpManager::GetInstance()->RequestGlobalDump(
991 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
992 }
993
994 } // namespace trace_event 959 } // namespace trace_event
995 } // namespace base 960 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698