Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(186)

Side by Side Diff: base/trace_event/memory_dump_manager.cc

Issue 2582453002: [tracing] Implement polling in MemoryDumpManager (Closed)
Patch Set: nit. Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/memory_dump_manager.h" 5 #include "base/trace_event/memory_dump_manager.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <utility> 8 #include <utility>
9 9
10 #include "base/allocator/features.h" 10 #include "base/allocator/features.h"
11 #include "base/atomic_sequence_num.h" 11 #include "base/atomic_sequence_num.h"
12 #include "base/base_switches.h" 12 #include "base/base_switches.h"
13 #include "base/command_line.h" 13 #include "base/command_line.h"
14 #include "base/compiler_specific.h" 14 #include "base/compiler_specific.h"
15 #include "base/debug/debugging_flags.h" 15 #include "base/debug/debugging_flags.h"
16 #include "base/debug/stack_trace.h" 16 #include "base/debug/stack_trace.h"
17 #include "base/memory/ptr_util.h" 17 #include "base/memory/ptr_util.h"
18 #include "base/threading/thread.h" 18 #include "base/threading/thread.h"
19 #include "base/threading/thread_task_runner_handle.h" 19 #include "base/threading/thread_task_runner_handle.h"
20 #include "base/trace_event/heap_profiler.h" 20 #include "base/trace_event/heap_profiler.h"
21 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 21 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
22 #include "base/trace_event/heap_profiler_event_filter.h" 22 #include "base/trace_event/heap_profiler_event_filter.h"
23 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" 23 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
24 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" 24 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
25 #include "base/trace_event/malloc_dump_provider.h" 25 #include "base/trace_event/malloc_dump_provider.h"
26 #include "base/trace_event/memory_dump_provider.h" 26 #include "base/trace_event/memory_dump_provider.h"
27 #include "base/trace_event/memory_dump_scheduler.h"
27 #include "base/trace_event/memory_dump_session_state.h" 28 #include "base/trace_event/memory_dump_session_state.h"
28 #include "base/trace_event/memory_infra_background_whitelist.h" 29 #include "base/trace_event/memory_infra_background_whitelist.h"
29 #include "base/trace_event/process_memory_dump.h" 30 #include "base/trace_event/process_memory_dump.h"
30 #include "base/trace_event/trace_event.h" 31 #include "base/trace_event/trace_event.h"
31 #include "base/trace_event/trace_event_argument.h" 32 #include "base/trace_event/trace_event_argument.h"
32 #include "build/build_config.h" 33 #include "build/build_config.h"
33 34
34 #if defined(OS_ANDROID) 35 #if defined(OS_ANDROID)
35 #include "base/trace_event/java_heap_dump_provider_android.h" 36 #include "base/trace_event/java_heap_dump_provider_android.h"
36 #endif 37 #endif
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
153 LeakySingletonTraits<MemoryDumpManager>>::get(); 154 LeakySingletonTraits<MemoryDumpManager>>::get();
154 } 155 }
155 156
156 // static 157 // static
157 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { 158 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
158 g_instance_for_testing = instance; 159 g_instance_for_testing = instance;
159 } 160 }
160 161
161 MemoryDumpManager::MemoryDumpManager() 162 MemoryDumpManager::MemoryDumpManager()
162 : delegate_(nullptr), 163 : delegate_(nullptr),
163 is_coordinator_(false),
164 memory_tracing_enabled_(0), 164 memory_tracing_enabled_(0),
165 tracing_process_id_(kInvalidTracingProcessId), 165 tracing_process_id_(kInvalidTracingProcessId),
166 dumper_registrations_ignored_for_testing_(false), 166 dumper_registrations_ignored_for_testing_(false),
167 heap_profiling_enabled_(false) { 167 heap_profiling_enabled_(false) {
168 g_next_guid.GetNext(); // Make sure that first guid is not zero. 168 g_next_guid.GetNext(); // Make sure that first guid is not zero.
169 169
170 // At this point the command line may not be initialized but we try to 170 // At this point the command line may not be initialized but we try to
171 // enable the heap profiler to capture allocations as soon as possible. 171 // enable the heap profiler to capture allocations as soon as possible.
172 EnableHeapProfilingIfNeeded(); 172 EnableHeapProfilingIfNeeded();
173 173
(...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after
411 // to just skip it, without actually invoking the |mdp|, which might be 411 // to just skip it, without actually invoking the |mdp|, which might be
412 // destroyed by the caller soon after this method returns. 412 // destroyed by the caller soon after this method returns.
413 (*mdp_iter)->disabled = true; 413 (*mdp_iter)->disabled = true;
414 dump_providers_.erase(mdp_iter); 414 dump_providers_.erase(mdp_iter);
415 } 415 }
416 416
417 void MemoryDumpManager::RegisterPollingMDPOnDumpThread( 417 void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
418 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { 418 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
419 AutoLock lock(lock_); 419 AutoLock lock(lock_);
420 dump_providers_for_polling_.insert(mdpinfo); 420 dump_providers_for_polling_.insert(mdpinfo);
421
422 // Notify ready for polling when first polling supported provider is
423 // registered. This handles the case where OnTraceLogEnabled() did not notify
424 // ready since no polling supported mdp has yet been registered.
425 if (dump_providers_for_polling_.size() == 1)
426 dump_scheduler_->NotifyPollingSupported();
421 } 427 }
422 428
423 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread( 429 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
424 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { 430 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
425 mdpinfo->dump_provider->SuspendFastMemoryPolling(); 431 mdpinfo->dump_provider->SuspendFastMemoryPolling();
426 432
427 AutoLock lock(lock_); 433 AutoLock lock(lock_);
428 dump_providers_for_polling_.erase(mdpinfo); 434 dump_providers_for_polling_.erase(mdpinfo);
435 DCHECK(!dump_providers_for_polling_.empty())
436 << "All polling MDPs cannot be unregistered, since it will cause polling "
Primiano Tucci (use gerrit) 2017/01/26 02:52:22 i'd remove the "since... " part here.
ssid 2017/01/26 21:48:57 Done.
437 "without without any MDPs";
429 } 438 }
430 439
431 void MemoryDumpManager::RequestGlobalDump( 440 void MemoryDumpManager::RequestGlobalDump(
432 MemoryDumpType dump_type, 441 MemoryDumpType dump_type,
433 MemoryDumpLevelOfDetail level_of_detail, 442 MemoryDumpLevelOfDetail level_of_detail,
434 const MemoryDumpCallback& callback) { 443 const MemoryDumpCallback& callback) {
435 // Bail out immediately if tracing is not enabled at all or if the dump mode 444 // Bail out immediately if tracing is not enabled at all or if the dump mode
436 // is not allowed. 445 // is not allowed.
437 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || 446 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) ||
438 !IsDumpModeAllowed(level_of_detail)) { 447 !IsDumpModeAllowed(level_of_detail)) {
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
500 // here. SetupNextMemoryDump() is robust enough to tolerate it and will 509 // here. SetupNextMemoryDump() is robust enough to tolerate it and will
501 // NACK the dump. 510 // NACK the dump.
502 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( 511 pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
503 args, dump_providers_, session_state_, callback, 512 args, dump_providers_, session_state_, callback,
504 dump_thread_ ? dump_thread_->task_runner() : nullptr)); 513 dump_thread_ ? dump_thread_->task_runner() : nullptr));
505 514
506 // Safety check to prevent reaching here without calling RequestGlobalDump, 515 // Safety check to prevent reaching here without calling RequestGlobalDump,
507 // with disallowed modes. If |session_state_| is null then tracing is 516 // with disallowed modes. If |session_state_| is null then tracing is
508 // disabled. 517 // disabled.
509 CHECK(!session_state_ || 518 CHECK(!session_state_ ||
510 session_state_->memory_dump_config().allowed_dump_modes.count( 519 session_state_->IsDumpModeAllowed(args.level_of_detail));
511 args.level_of_detail));
512 } 520 }
513 521
514 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", 522 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
515 TRACE_ID_MANGLE(args.dump_guid), 523 TRACE_ID_MANGLE(args.dump_guid),
516 TRACE_EVENT_FLAG_FLOW_OUT); 524 TRACE_EVENT_FLAG_FLOW_OUT);
517 525
518 // Start the process dump. This involves task runner hops as specified by the 526 // Start the process dump. This involves task runner hops as specified by the
519 // MemoryDumpProvider(s) in RegisterDumpProvider()). 527 // MemoryDumpProvider(s) in RegisterDumpProvider()).
520 SetupNextMemoryDump(std::move(pmd_async_state)); 528 SetupNextMemoryDump(std::move(pmd_async_state));
521 } 529 }
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
674 args); 682 args);
675 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); 683 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
676 mdpinfo->consecutive_failures = 684 mdpinfo->consecutive_failures =
677 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; 685 dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
678 } 686 }
679 687
680 pmd_async_state->pending_dump_providers.pop_back(); 688 pmd_async_state->pending_dump_providers.pop_back();
681 SetupNextMemoryDump(std::move(pmd_async_state)); 689 SetupNextMemoryDump(std::move(pmd_async_state));
682 } 690 }
683 691
684 void MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) { 692 bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
693 #if DCHECK_IS_ON()
694 {
695 AutoLock lock(lock_);
Primiano Tucci (use gerrit) 2017/01/26 02:52:22 I think I wrote this in another cl but forgot the
ssid 2017/01/26 21:48:57 Yes that is true. That is why I am holding a lock
696 if (dump_thread_)
697 DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread());
698 }
699 #endif
700 if (dump_providers_for_polling_.empty())
701 return false;
702
685 *memory_total = 0; 703 *memory_total = 0;
686 // Note that we call PollFastMemoryTotal() even if the dump provider is 704 // Note that we call PollFastMemoryTotal() even if the dump provider is
687 // disabled (unregistered). This is to avoid taking lock while polling. 705 // disabled (unregistered). This is to avoid taking lock while polling.
688 for (const auto& mdpinfo : dump_providers_for_polling_) { 706 for (const auto& mdpinfo : dump_providers_for_polling_) {
689 uint64_t value = 0; 707 uint64_t value = 0;
690 mdpinfo->dump_provider->PollFastMemoryTotal(&value); 708 mdpinfo->dump_provider->PollFastMemoryTotal(&value);
691 *memory_total += value; 709 *memory_total += value;
692 } 710 }
693 return; 711 return true;
694 } 712 }
695 713
696 // static 714 // static
697 void MemoryDumpManager::FinalizeDumpAndAddToTrace( 715 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
698 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { 716 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
699 HEAP_PROFILER_SCOPED_IGNORE; 717 HEAP_PROFILER_SCOPED_IGNORE;
700 DCHECK(pmd_async_state->pending_dump_providers.empty()); 718 DCHECK(pmd_async_state->pending_dump_providers.empty());
701 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; 719 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
702 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { 720 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
703 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = 721 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
762 // while the |lock_| is taken; 780 // while the |lock_| is taken;
763 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); 781 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
764 782
765 // Spin-up the thread used to invoke unbound dump providers. 783 // Spin-up the thread used to invoke unbound dump providers.
766 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra")); 784 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
767 if (!dump_thread->Start()) { 785 if (!dump_thread->Start()) {
768 LOG(ERROR) << "Failed to start the memory-infra thread for tracing"; 786 LOG(ERROR) << "Failed to start the memory-infra thread for tracing";
769 return; 787 return;
770 } 788 }
771 789
772 const TraceConfig trace_config = 790 const TraceConfig::MemoryDumpConfig memory_dump_config =
Primiano Tucci (use gerrit) 2017/01/26 02:52:22 since you don't need to copy this anymore, you can
ssid 2017/01/26 21:48:57 Done.
ssid 2017/01/27 00:43:24 Actually this would need keep alive a copy of trac
773 TraceLog::GetInstance()->GetCurrentTraceConfig(); 791 TraceLog::GetInstance()->GetCurrentTraceConfig().memory_dump_config();
774 scoped_refptr<MemoryDumpSessionState> session_state = 792 scoped_refptr<MemoryDumpSessionState> session_state =
775 new MemoryDumpSessionState; 793 new MemoryDumpSessionState;
776 session_state->SetMemoryDumpConfig(trace_config.memory_dump_config()); 794 session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes);
795 session_state->set_heap_profiler_breakdown_threshold_bytes(
796 memory_dump_config.heap_profiler_options.breakdown_threshold_bytes);
777 if (heap_profiling_enabled_) { 797 if (heap_profiling_enabled_) {
778 // If heap profiling is enabled, the stack frame deduplicator and type name 798 // If heap profiling is enabled, the stack frame deduplicator and type name
779 // deduplicator will be in use. Add a metadata events to write the frames 799 // deduplicator will be in use. Add a metadata events to write the frames
780 // and type IDs. 800 // and type IDs.
781 session_state->SetStackFrameDeduplicator( 801 session_state->SetStackFrameDeduplicator(
782 WrapUnique(new StackFrameDeduplicator)); 802 WrapUnique(new StackFrameDeduplicator));
783 803
784 session_state->SetTypeNameDeduplicator( 804 session_state->SetTypeNameDeduplicator(
785 WrapUnique(new TypeNameDeduplicator)); 805 WrapUnique(new TypeNameDeduplicator));
786 806
787 TRACE_EVENT_API_ADD_METADATA_EVENT( 807 TRACE_EVENT_API_ADD_METADATA_EVENT(
788 TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames", 808 TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
789 "stackFrames", 809 "stackFrames",
790 MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>( 810 MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>(
791 session_state, &MemoryDumpSessionState::stack_frame_deduplicator)); 811 session_state, &MemoryDumpSessionState::stack_frame_deduplicator));
792 812
793 TRACE_EVENT_API_ADD_METADATA_EVENT( 813 TRACE_EVENT_API_ADD_METADATA_EVENT(
794 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", 814 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
795 "typeNames", 815 "typeNames",
796 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>( 816 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>(
797 session_state, &MemoryDumpSessionState::type_name_deduplicator)); 817 session_state, &MemoryDumpSessionState::type_name_deduplicator));
798 } 818 }
799 819
820 std::unique_ptr<MemoryDumpScheduler> dump_scheduler(
821 WrapUnique(new MemoryDumpScheduler(this, dump_thread->task_runner())));
Primiano Tucci (use gerrit) 2017/01/26 02:52:23 no need to WrapUnique if you do this explicitly. E
ssid 2017/01/26 21:48:57 Done.
822 DCHECK_LE(memory_dump_config.triggers.size(), 3u);
Primiano Tucci (use gerrit) 2017/01/26 02:52:23 shouldn't this dcheck be in the scheduler?
ssid 2017/01/26 21:48:56 We cannot easily have this in scheduler since we n
823 for (const auto& trigger : memory_dump_config.triggers) {
824 DCHECK(session_state->IsDumpModeAllowed(trigger.level_of_detail));
Primiano Tucci (use gerrit) 2017/01/26 02:52:23 shoudln't this be: if (!allowed) { NOTREACHED();
ssid 2017/01/26 21:48:56 Done.
825 dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
826 trigger.min_time_between_dumps_ms);
827 }
828
800 { 829 {
801 AutoLock lock(lock_); 830 AutoLock lock(lock_);
802 831
803 DCHECK(delegate_); // At this point we must have a delegate. 832 DCHECK(delegate_); // At this point we must have a delegate.
804 session_state_ = session_state; 833 session_state_ = session_state;
805 834
806 DCHECK(!dump_thread_); 835 DCHECK(!dump_thread_);
807 dump_thread_ = std::move(dump_thread); 836 dump_thread_ = std::move(dump_thread);
837 dump_scheduler_ = std::move(dump_scheduler);
838
839 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
808 840
809 dump_providers_for_polling_.clear(); 841 dump_providers_for_polling_.clear();
810 for (const auto& mdpinfo : dump_providers_) { 842 for (const auto& mdpinfo : dump_providers_) {
811 if (mdpinfo->options.is_fast_polling_supported) 843 if (mdpinfo->options.is_fast_polling_supported)
812 dump_providers_for_polling_.insert(mdpinfo); 844 dump_providers_for_polling_.insert(mdpinfo);
813 } 845 }
846 // Notify polling supported only if some polling supported provider was
847 // registered, else RegisterPollingMDPOnDumpThread() will notify when first
848 // polling MDP registers.
849 if (!dump_providers_for_polling_.empty())
850 dump_scheduler_->NotifyPollingSupported();
814 851
815 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); 852 // Only coordinator process triggers periodic global memory dumps.
816 853 if (is_coordinator_)
817 if (!is_coordinator_) 854 dump_scheduler_->NotifyPeriodicTriggerSupported();
818 return;
819 } 855 }
820 856
821 // Enable periodic dumps if necessary.
822 periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers);
823 } 857 }
824 858
825 void MemoryDumpManager::OnTraceLogDisabled() { 859 void MemoryDumpManager::OnTraceLogDisabled() {
826 // There might be a memory dump in progress while this happens. Therefore, 860 // There might be a memory dump in progress while this happens. Therefore,
827 // ensure that the MDM state which depends on the tracing enabled / disabled 861 // ensure that the MDM state which depends on the tracing enabled / disabled
828 // state is always accessed by the dumping methods holding the |lock_|. 862 // state is always accessed by the dumping methods holding the |lock_|.
829 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_)) 863 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_))
830 return; 864 return;
831 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); 865 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
832 std::unique_ptr<Thread> dump_thread; 866 std::unique_ptr<Thread> dump_thread;
867 std::unique_ptr<MemoryDumpScheduler> scheduler = nullptr;
Primiano Tucci (use gerrit) 2017/01/26 02:52:23 no need for the = nullptr part, it should be impli
ssid 2017/01/26 21:48:57 Done.
833 { 868 {
834 AutoLock lock(lock_); 869 AutoLock lock(lock_);
835 dump_thread = std::move(dump_thread_); 870 dump_thread = std::move(dump_thread_);
836 session_state_ = nullptr; 871 session_state_ = nullptr;
872 scheduler = std::move(dump_scheduler_);
837 } 873 }
874 scheduler->DisableAllTriggers();
838 875
839 // Thread stops are blocking and must be performed outside of the |lock_| 876 // Thread stops are blocking and must be performed outside of the |lock_|
840 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). 877 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
841 periodic_dump_timer_.Stop();
842 if (dump_thread) 878 if (dump_thread)
843 dump_thread->Stop(); 879 dump_thread->Stop();
844 880
845 // |dump_providers_for_polling_| must be cleared only after the dump thread is 881 // |dump_providers_for_polling_| must be cleared only after the dump thread is
846 // stopped (polling tasks are done). 882 // stopped (polling tasks are done).
847 { 883 {
848 AutoLock lock(lock_); 884 AutoLock lock(lock_);
849 for (const auto& mdpinfo : dump_providers_for_polling_) 885 for (const auto& mdpinfo : dump_providers_for_polling_)
850 mdpinfo->dump_provider->SuspendFastMemoryPolling(); 886 mdpinfo->dump_provider->SuspendFastMemoryPolling();
851 dump_providers_for_polling_.clear(); 887 dump_providers_for_polling_.clear();
852 } 888 }
853 } 889 }
854 890
855 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { 891 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
856 AutoLock lock(lock_); 892 AutoLock lock(lock_);
857 if (!session_state_) 893 if (!session_state_)
858 return false; 894 return false;
859 return session_state_->memory_dump_config().allowed_dump_modes.count( 895 return session_state_->IsDumpModeAllowed(dump_mode);
860 dump_mode) != 0;
861 } 896 }
862 897
863 uint64_t MemoryDumpManager::GetTracingProcessId() const { 898 uint64_t MemoryDumpManager::GetTracingProcessId() const {
864 return delegate_->GetTracingProcessId(); 899 return delegate_->GetTracingProcessId();
865 } 900 }
866 901
867 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( 902 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
868 MemoryDumpProvider* dump_provider, 903 MemoryDumpProvider* dump_provider,
869 const char* name, 904 const char* name,
870 scoped_refptr<SequencedTaskRunner> task_runner, 905 scoped_refptr<SequencedTaskRunner> task_runner,
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
916 const MemoryDumpArgs& dump_args) { 951 const MemoryDumpArgs& dump_args) {
917 auto iter = process_dumps.find(pid); 952 auto iter = process_dumps.find(pid);
918 if (iter == process_dumps.end()) { 953 if (iter == process_dumps.end()) {
919 std::unique_ptr<ProcessMemoryDump> new_pmd( 954 std::unique_ptr<ProcessMemoryDump> new_pmd(
920 new ProcessMemoryDump(session_state, dump_args)); 955 new ProcessMemoryDump(session_state, dump_args));
921 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; 956 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
922 } 957 }
923 return iter->second.get(); 958 return iter->second.get();
924 } 959 }
925 960
926 MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {}
927
928 MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() {
929 Stop();
930 }
931
932 void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
933 const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) {
934 if (triggers_list.empty())
935 return;
936
937 // At the moment the periodic support is limited to at most one periodic
938 // trigger per dump mode. All intervals should be an integer multiple of the
939 // smallest interval specified.
940 periodic_dumps_count_ = 0;
941 uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
942 uint32_t light_dump_period_ms = 0;
943 uint32_t heavy_dump_period_ms = 0;
944 DCHECK_LE(triggers_list.size(), 3u);
945 auto* mdm = MemoryDumpManager::GetInstance();
946 for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
947 DCHECK_NE(0u, config.min_time_between_dumps_ms);
948 DCHECK_EQ(MemoryDumpType::PERIODIC_INTERVAL, config.trigger_type)
949 << "Only periodic_interval triggers are suppported";
950 switch (config.level_of_detail) {
951 case MemoryDumpLevelOfDetail::BACKGROUND:
952 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
953 break;
954 case MemoryDumpLevelOfDetail::LIGHT:
955 DCHECK_EQ(0u, light_dump_period_ms);
956 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
957 light_dump_period_ms = config.min_time_between_dumps_ms;
958 break;
959 case MemoryDumpLevelOfDetail::DETAILED:
960 DCHECK_EQ(0u, heavy_dump_period_ms);
961 DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
962 heavy_dump_period_ms = config.min_time_between_dumps_ms;
963 break;
964 }
965 min_timer_period_ms =
966 std::min(min_timer_period_ms, config.min_time_between_dumps_ms);
967 }
968
969 DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
970 light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
971 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
972 heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
973
974 timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
975 base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump,
976 base::Unretained(this)));
977 }
978
979 void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() {
980 if (IsRunning()) {
981 timer_.Stop();
982 }
983 }
984
985 bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() {
986 return timer_.IsRunning();
987 }
988
989 void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() {
990 MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
991 if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
992 level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
993 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
994 level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
995 ++periodic_dumps_count_;
996
997 MemoryDumpManager::GetInstance()->RequestGlobalDump(
998 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
999 }
1000
1001 } // namespace trace_event 961 } // namespace trace_event
1002 } // namespace base 962 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698