Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(127)

Side by Side Diff: base/trace_event/memory_dump_manager.cc

Issue 2582453002: [tracing] Implement polling in MemoryDumpManager (Closed)
Patch Set: Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/memory_dump_manager.h" 5 #include "base/trace_event/memory_dump_manager.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <utility> 8 #include <utility>
9 9
10 #include "base/allocator/features.h" 10 #include "base/allocator/features.h"
11 #include "base/atomic_sequence_num.h" 11 #include "base/atomic_sequence_num.h"
12 #include "base/base_switches.h" 12 #include "base/base_switches.h"
13 #include "base/command_line.h" 13 #include "base/command_line.h"
14 #include "base/compiler_specific.h" 14 #include "base/compiler_specific.h"
15 #include "base/debug/debugging_flags.h" 15 #include "base/debug/debugging_flags.h"
16 #include "base/debug/stack_trace.h" 16 #include "base/debug/stack_trace.h"
17 #include "base/memory/ptr_util.h" 17 #include "base/memory/ptr_util.h"
18 #include "base/threading/thread.h" 18 #include "base/threading/thread.h"
19 #include "base/threading/thread_task_runner_handle.h" 19 #include "base/threading/thread_task_runner_handle.h"
20 #include "base/trace_event/heap_profiler.h" 20 #include "base/trace_event/heap_profiler.h"
21 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 21 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
22 #include "base/trace_event/heap_profiler_event_filter.h" 22 #include "base/trace_event/heap_profiler_event_filter.h"
23 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" 23 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
24 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" 24 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
25 #include "base/trace_event/malloc_dump_provider.h" 25 #include "base/trace_event/malloc_dump_provider.h"
26 #include "base/trace_event/memory_dump_provider.h" 26 #include "base/trace_event/memory_dump_provider.h"
27 #include "base/trace_event/memory_dump_session_state.h" 27 #include "base/trace_event/memory_dump_session_state.h"
28 #include "base/trace_event/memory_dump_trigger.h"
28 #include "base/trace_event/memory_infra_background_whitelist.h" 29 #include "base/trace_event/memory_infra_background_whitelist.h"
29 #include "base/trace_event/process_memory_dump.h" 30 #include "base/trace_event/process_memory_dump.h"
30 #include "base/trace_event/trace_event.h" 31 #include "base/trace_event/trace_event.h"
31 #include "base/trace_event/trace_event_argument.h" 32 #include "base/trace_event/trace_event_argument.h"
32 #include "build/build_config.h" 33 #include "build/build_config.h"
33 34
34 #if defined(OS_ANDROID) 35 #if defined(OS_ANDROID)
35 #include "base/trace_event/java_heap_dump_provider_android.h" 36 #include "base/trace_event/java_heap_dump_provider_android.h"
36 #endif 37 #endif
37 38
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
121 LeakySingletonTraits<MemoryDumpManager>>::get(); 122 LeakySingletonTraits<MemoryDumpManager>>::get();
122 } 123 }
123 124
124 // static 125 // static
125 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { 126 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
126 g_instance_for_testing = instance; 127 g_instance_for_testing = instance;
127 } 128 }
128 129
129 MemoryDumpManager::MemoryDumpManager() 130 MemoryDumpManager::MemoryDumpManager()
130 : delegate_(nullptr), 131 : delegate_(nullptr),
131 is_coordinator_(false),
132 memory_tracing_enabled_(0), 132 memory_tracing_enabled_(0),
133 tracing_process_id_(kInvalidTracingProcessId), 133 tracing_process_id_(kInvalidTracingProcessId),
134 dumper_registrations_ignored_for_testing_(false), 134 dumper_registrations_ignored_for_testing_(false),
135 heap_profiling_enabled_(false) { 135 heap_profiling_enabled_(false) {
136 g_next_guid.GetNext(); // Make sure that first guid is not zero. 136 g_next_guid.GetNext(); // Make sure that first guid is not zero.
137 137
138 // At this point the command line may not be initialized but we try to 138 // At this point the command line may not be initialized but we try to
139 // enable the heap profiler to capture allocations as soon as possible. 139 // enable the heap profiler to capture allocations as soon as possible.
140 EnableHeapProfilingIfNeeded(); 140 EnableHeapProfilingIfNeeded();
141 } 141 }
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
181 heap_profiling_enabled_ = true; 181 heap_profiling_enabled_ = true;
182 } 182 }
183 183
184 void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate, 184 void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
185 bool is_coordinator) { 185 bool is_coordinator) {
186 { 186 {
187 AutoLock lock(lock_); 187 AutoLock lock(lock_);
188 DCHECK(delegate); 188 DCHECK(delegate);
189 DCHECK(!delegate_); 189 DCHECK(!delegate_);
190 delegate_ = delegate; 190 delegate_ = delegate;
191 is_coordinator_ = is_coordinator;
192 EnableHeapProfilingIfNeeded(); 191 EnableHeapProfilingIfNeeded();
192 dump_trigger_.reset(new MemoryDumpTrigger(this, is_coordinator));
193 } 193 }
194 194
195 // Enable the core dump providers. 195 // Enable the core dump providers.
196 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED) 196 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
197 RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr); 197 RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
198 #endif 198 #endif
199 199
200 #if defined(OS_ANDROID) 200 #if defined(OS_ANDROID)
201 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap", 201 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
202 nullptr); 202 nullptr);
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
367 // destroyed by the caller soon after this method returns. 367 // destroyed by the caller soon after this method returns.
368 (*mdp_iter)->disabled = true; 368 (*mdp_iter)->disabled = true;
369 dump_providers_.erase(mdp_iter); 369 dump_providers_.erase(mdp_iter);
370 } 370 }
371 371
372 void MemoryDumpManager::RegisterPollingMDPOnDumpThread( 372 void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
373 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { 373 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
374 DCHECK(!mdpinfo->task_runner); 374 DCHECK(!mdpinfo->task_runner);
375 AutoLock lock(lock_); 375 AutoLock lock(lock_);
376 dump_providers_for_polling_.insert(mdpinfo); 376 dump_providers_for_polling_.insert(mdpinfo);
377
378 // Setup polling when first polling supported provider is registered since
379 // OnTraceLogEnabled() would not have setup polling without any provider that
380 // supports polling.
Primiano Tucci (use gerrit) 2017/01/18 16:16:06 can you expand a bit here about why? To me feels a
ssid 2017/01/20 23:07:27 If I did that then we would be unnecessarily setti
381 if (dump_providers_for_polling_.size() == 1) {
382 dump_trigger_->SetupPeakTriggers(
383 session_state_->memory_dump_config().triggers,
384 ThreadTaskRunnerHandle::Get());
Primiano Tucci (use gerrit) 2017/01/18 16:16:06 this one confuses me a bit: how do you know at thi
ssid 2017/01/20 23:07:27 So, now it does not matter. The scheduler knows if
385 }
377 } 386 }
378 387
379 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread( 388 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
380 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { 389 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
381 mdpinfo->dump_provider->SuspendFastMemoryPolling(); 390 mdpinfo->dump_provider->SuspendFastMemoryPolling();
382 391
383 AutoLock lock(lock_); 392 AutoLock lock(lock_);
384 dump_providers_for_polling_.erase(mdpinfo); 393 dump_providers_for_polling_.erase(mdpinfo);
394 DCHECK(!dump_providers_for_polling_.empty())
395 << "All polling MDPs cannot be unregistered, since it will cause polling "
396 "without without any MDPs";
385 } 397 }
386 398
387 void MemoryDumpManager::RequestGlobalDump( 399 void MemoryDumpManager::RequestGlobalDump(
388 MemoryDumpType dump_type, 400 MemoryDumpType dump_type,
389 MemoryDumpLevelOfDetail level_of_detail, 401 MemoryDumpLevelOfDetail level_of_detail,
390 const MemoryDumpCallback& callback) { 402 const MemoryDumpCallback& callback) {
391 // Bail out immediately if tracing is not enabled at all or if the dump mode 403 // Bail out immediately if tracing is not enabled at all or if the dump mode
392 // is not allowed. 404 // is not allowed.
393 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || 405 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) ||
394 !IsDumpModeAllowed(level_of_detail)) { 406 !IsDumpModeAllowed(level_of_detail)) {
(...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after
630 args); 642 args);
631 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); 643 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
632 mdpinfo->consecutive_failures = 644 mdpinfo->consecutive_failures =
633 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; 645 dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
634 } 646 }
635 647
636 pmd_async_state->pending_dump_providers.pop_back(); 648 pmd_async_state->pending_dump_providers.pop_back();
637 SetupNextMemoryDump(std::move(pmd_async_state)); 649 SetupNextMemoryDump(std::move(pmd_async_state));
638 } 650 }
639 651
640 void MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) { 652 bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
653 #if DCHECK_IS_ON()
654 {
655 AutoLock lock(lock_);
656 if (dump_thread_)
657 DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread());
658 }
659 #endif
660 if (dump_providers_for_polling_.empty())
661 return false;
662
641 *memory_total = 0; 663 *memory_total = 0;
642 // Note that we call PollFastMemoryTotal() even if the dump provider is 664 // Note that we call PollFastMemoryTotal() even if the dump provider is
643 // disabled (unregistered). This is to avoid taking lock while polling. 665 // disabled (unregistered). This is to avoid taking lock while polling.
644 for (const auto& mdpinfo : dump_providers_for_polling_) { 666 for (const auto& mdpinfo : dump_providers_for_polling_) {
645 uint64_t value = 0; 667 uint64_t value = 0;
646 mdpinfo->dump_provider->PollFastMemoryTotal(&value); 668 mdpinfo->dump_provider->PollFastMemoryTotal(&value);
647 *memory_total += value; 669 *memory_total += value;
648 } 670 }
649 return; 671 return true;
650 } 672 }
651 673
652 // static 674 // static
653 void MemoryDumpManager::FinalizeDumpAndAddToTrace( 675 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
654 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { 676 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
655 HEAP_PROFILER_SCOPED_IGNORE; 677 HEAP_PROFILER_SCOPED_IGNORE;
656 DCHECK(pmd_async_state->pending_dump_providers.empty()); 678 DCHECK(pmd_async_state->pending_dump_providers.empty());
657 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; 679 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
658 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { 680 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
659 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = 681 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
746 MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>( 768 MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>(
747 session_state, &MemoryDumpSessionState::stack_frame_deduplicator)); 769 session_state, &MemoryDumpSessionState::stack_frame_deduplicator));
748 770
749 TRACE_EVENT_API_ADD_METADATA_EVENT( 771 TRACE_EVENT_API_ADD_METADATA_EVENT(
750 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", 772 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
751 "typeNames", 773 "typeNames",
752 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>( 774 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>(
753 session_state, &MemoryDumpSessionState::type_name_deduplicator)); 775 session_state, &MemoryDumpSessionState::type_name_deduplicator));
754 } 776 }
755 777
778 scoped_refptr<SingleThreadTaskRunner> polling_task_runner;
779 MemoryDumpTrigger* trigger = nullptr;
756 { 780 {
757 AutoLock lock(lock_); 781 AutoLock lock(lock_);
758 782
759 DCHECK(delegate_); // At this point we must have a delegate. 783 DCHECK(delegate_); // At this point we must have a delegate.
760 session_state_ = session_state; 784 session_state_ = session_state;
761 785
762 DCHECK(!dump_thread_); 786 DCHECK(!dump_thread_);
763 dump_thread_ = std::move(dump_thread); 787 dump_thread_ = std::move(dump_thread);
764 788
765 dump_providers_for_polling_.clear(); 789 dump_providers_for_polling_.clear();
766 for (const auto& mdpinfo : dump_providers_) { 790 for (const auto& mdpinfo : dump_providers_) {
767 if (mdpinfo->options.is_fast_polling_supported) 791 if (mdpinfo->options.is_fast_polling_supported)
768 dump_providers_for_polling_.insert(mdpinfo); 792 dump_providers_for_polling_.insert(mdpinfo);
769 } 793 }
794 if (!dump_providers_for_polling_.empty())
795 polling_task_runner = dump_thread_->task_runner();
770 796
771 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); 797 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
772 798
773 // TODO(primiano): This is a temporary hack to disable periodic memory dumps 799 trigger = dump_trigger_.get();
774 // when running memory benchmarks until telemetry uses TraceConfig to
775 // enable/disable periodic dumps. See crbug.com/529184 .
776 if (!is_coordinator_ ||
777 CommandLine::ForCurrentProcess()->HasSwitch(
778 "enable-memory-benchmarking")) {
779 return;
780 }
781 } 800 }
782 801
802 // Setup peak triggers only if some polling supported provider was registered,
803 // else polling will be setup by RegisterPollingMDPOnDumpThread when first
804 // polling MDP registers.
805 if (polling_task_runner) {
806 trigger->SetupPeakTriggers(trace_config.memory_dump_config().triggers,
807 polling_task_runner);
808 }
783 // Enable periodic dumps if necessary. 809 // Enable periodic dumps if necessary.
784 periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers); 810 trigger->SetupPeriodicTriggers(trace_config.memory_dump_config().triggers);
785 } 811 }
786 812
787 void MemoryDumpManager::OnTraceLogDisabled() { 813 void MemoryDumpManager::OnTraceLogDisabled() {
788 // There might be a memory dump in progress while this happens. Therefore, 814 // There might be a memory dump in progress while this happens. Therefore,
789 // ensure that the MDM state which depends on the tracing enabled / disabled 815 // ensure that the MDM state which depends on the tracing enabled / disabled
790 // state is always accessed by the dumping methods holding the |lock_|. 816 // state is always accessed by the dumping methods holding the |lock_|.
791 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_)) 817 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_))
792 return; 818 return;
793 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); 819 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
794 std::unique_ptr<Thread> dump_thread; 820 std::unique_ptr<Thread> dump_thread;
795 { 821 {
796 AutoLock lock(lock_); 822 AutoLock lock(lock_);
797 dump_thread = std::move(dump_thread_); 823 dump_thread = std::move(dump_thread_);
798 session_state_ = nullptr; 824 session_state_ = nullptr;
799 } 825 }
800 826
801 // Thread stops are blocking and must be performed outside of the |lock_| 827 // Thread stops are blocking and must be performed outside of the |lock_|
802 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). 828 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
803 periodic_dump_timer_.Stop();
804 if (dump_thread) 829 if (dump_thread)
805 dump_thread->Stop(); 830 dump_thread->Stop();
806 831
807 // |dump_providers_for_polling_| must be cleared only after the dump thread is 832 // |dump_providers_for_polling_| must be cleared and triggers must be
808 // stopped (polling tasks are done). 833 // disabled only after the dump thread is stopped (polling tasks are done).
834 MemoryDumpTrigger* trigger = nullptr;
809 { 835 {
810 AutoLock lock(lock_); 836 AutoLock lock(lock_);
811 for (const auto& mdpinfo : dump_providers_for_polling_) 837 for (const auto& mdpinfo : dump_providers_for_polling_)
812 mdpinfo->dump_provider->SuspendFastMemoryPolling(); 838 mdpinfo->dump_provider->SuspendFastMemoryPolling();
813 dump_providers_for_polling_.clear(); 839 dump_providers_for_polling_.clear();
840 trigger = dump_trigger_.get();
814 } 841 }
842 trigger->Disable();
815 } 843 }
816 844
817 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { 845 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
818 AutoLock lock(lock_); 846 AutoLock lock(lock_);
819 if (!session_state_) 847 if (!session_state_)
820 return false; 848 return false;
821 return session_state_->memory_dump_config().allowed_dump_modes.count( 849 return session_state_->memory_dump_config().allowed_dump_modes.count(
822 dump_mode) != 0; 850 dump_mode) != 0;
823 } 851 }
824 852
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
878 const MemoryDumpArgs& dump_args) { 906 const MemoryDumpArgs& dump_args) {
879 auto iter = process_dumps.find(pid); 907 auto iter = process_dumps.find(pid);
880 if (iter == process_dumps.end()) { 908 if (iter == process_dumps.end()) {
881 std::unique_ptr<ProcessMemoryDump> new_pmd( 909 std::unique_ptr<ProcessMemoryDump> new_pmd(
882 new ProcessMemoryDump(session_state, dump_args)); 910 new ProcessMemoryDump(session_state, dump_args));
883 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; 911 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
884 } 912 }
885 return iter->second.get(); 913 return iter->second.get();
886 } 914 }
887 915
888 MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
889
890 MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
891 Stop();
892 }
893
894 void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
895 const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
896 if (triggers_list.empty())
897 return;
898
899 // At the moment the periodic support is limited to at most one periodic
900 // trigger per dump mode. All intervals should be an integer multiple of the
901 // smallest interval specified.
902 periodic_dumps_count_ = 0;
903 uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
904 uint32_t light_dump_period_ms = 0;
905 uint32_t heavy_dump_period_ms = 0;
906 DCHECK_LE(triggers_list.size(), 3u);
907 auto* mdm = MemoryDumpManager::GetInstance();
908 for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
909 DCHECK_NE(0u, config.min_time_between_dumps_ms);
910 DCHECK_EQ(MemoryDumpType::PERIODIC_INTERVAL, config.trigger_type)
911 << "Only periodic_interval triggers are suppported";
912 switch (config.level_of_detail) {
913 case MemoryDumpLevelOfDetail::BACKGROUND:
914 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
915 break;
916 case MemoryDumpLevelOfDetail::LIGHT:
917 DCHECK_EQ(0u, light_dump_period_ms);
918 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
919 light_dump_period_ms = config.min_time_between_dumps_ms;
920 break;
921 case MemoryDumpLevelOfDetail::DETAILED:
922 DCHECK_EQ(0u, heavy_dump_period_ms);
923 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
924 heavy_dump_period_ms = config.min_time_between_dumps_ms;
925 break;
926 }
927 min_timer_period_ms =
928 std::min(min_timer_period_ms, config.min_time_between_dumps_ms);
929 }
930
931 DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
932 light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
933 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
934 heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
935
936 timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
937 base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
938 base::Unretained(this)));
939 }
940
941 void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
942 if (IsRunning()) {
943 timer_.Stop();
944 }
945 }
946
947 bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
948 return timer_.IsRunning();
949 }
950
951 void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
952 MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
953 if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
954 level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
955 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
956 level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
957 ++periodic_dumps_count_;
958
959 MemoryDumpManager::GetInstance()->RequestGlobalDump(
960 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
961 }
962
963 } // namespace trace_event 916 } // namespace trace_event
964 } // namespace base 917 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698