OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <utility> | 8 #include <utility> |
9 | 9 |
10 #include "base/atomic_sequence_num.h" | 10 #include "base/atomic_sequence_num.h" |
11 #include "base/base_switches.h" | 11 #include "base/base_switches.h" |
12 #include "base/command_line.h" | 12 #include "base/command_line.h" |
13 #include "base/compiler_specific.h" | 13 #include "base/compiler_specific.h" |
14 #include "base/debug/debugging_flags.h" | 14 #include "base/debug/debugging_flags.h" |
15 #include "base/debug/stack_trace.h" | 15 #include "base/debug/stack_trace.h" |
16 #include "base/memory/ptr_util.h" | 16 #include "base/memory/ptr_util.h" |
17 #include "base/threading/thread.h" | 17 #include "base/threading/thread.h" |
18 #include "base/threading/thread_task_runner_handle.h" | 18 #include "base/threading/thread_task_runner_handle.h" |
| 19 #include "base/timer/timer.h" |
19 #include "base/trace_event/heap_profiler.h" | 20 #include "base/trace_event/heap_profiler.h" |
20 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" | 21 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" |
21 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" | 22 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" |
22 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" | 23 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" |
23 #include "base/trace_event/malloc_dump_provider.h" | 24 #include "base/trace_event/malloc_dump_provider.h" |
24 #include "base/trace_event/memory_dump_provider.h" | 25 #include "base/trace_event/memory_dump_provider.h" |
25 #include "base/trace_event/memory_dump_session_state.h" | 26 #include "base/trace_event/memory_dump_session_state.h" |
26 #include "base/trace_event/process_memory_dump.h" | 27 #include "base/trace_event/process_memory_dump.h" |
27 #include "base/trace_event/trace_event.h" | 28 #include "base/trace_event/trace_event.h" |
28 #include "base/trace_event/trace_event_argument.h" | 29 #include "base/trace_event/trace_event_argument.h" |
(...skipping 10 matching lines...) Expand all Loading... |
39 namespace base { | 40 namespace base { |
40 namespace trace_event { | 41 namespace trace_event { |
41 | 42 |
42 namespace { | 43 namespace { |
43 | 44 |
44 const int kTraceEventNumArgs = 1; | 45 const int kTraceEventNumArgs = 1; |
45 const char* kTraceEventArgNames[] = {"dumps"}; | 46 const char* kTraceEventArgNames[] = {"dumps"}; |
46 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE}; | 47 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE}; |
47 | 48 |
48 StaticAtomicSequenceNumber g_next_guid; | 49 StaticAtomicSequenceNumber g_next_guid; |
49 uint32_t g_periodic_dumps_count = 0; | |
50 uint32_t g_heavy_dumps_rate = 0; | |
51 MemoryDumpManager* g_instance_for_testing = nullptr; | 50 MemoryDumpManager* g_instance_for_testing = nullptr; |
52 | 51 |
53 void RequestPeriodicGlobalDump() { | 52 // The names of dump providers whitelisted for background tracing. Dump |
54 MemoryDumpLevelOfDetail level_of_detail; | 53 // providers can be added here only if the background mode dump has very |
55 if (g_heavy_dumps_rate == 0) { | 54 // less performance and memory overhead. |
56 level_of_detail = MemoryDumpLevelOfDetail::LIGHT; | 55 const char* const kDumpProviderWhitelist[] = { |
57 } else { | 56 // TODO(ssid): Fill this list with dump provider names which support |
58 level_of_detail = g_periodic_dumps_count == 0 | 57 // background mode, crbug.com/613198. |
59 ? MemoryDumpLevelOfDetail::DETAILED | 58 nullptr, // End of list marker. |
60 : MemoryDumpLevelOfDetail::LIGHT; | 59 }; |
61 | |
62 if (++g_periodic_dumps_count == g_heavy_dumps_rate) | |
63 g_periodic_dumps_count = 0; | |
64 } | |
65 | |
66 MemoryDumpManager::GetInstance()->RequestGlobalDump( | |
67 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); | |
68 } | |
69 | 60 |
70 // Callback wrapper to hook upon the completion of RequestGlobalDump() and | 61 // Callback wrapper to hook upon the completion of RequestGlobalDump() and |
71 // inject trace markers. | 62 // inject trace markers. |
72 void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback, | 63 void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback, |
73 uint64_t dump_guid, | 64 uint64_t dump_guid, |
74 bool success) { | 65 bool success) { |
75 TRACE_EVENT_NESTABLE_ASYNC_END1( | 66 TRACE_EVENT_NESTABLE_ASYNC_END1( |
76 MemoryDumpManager::kTraceCategory, "GlobalMemoryDump", | 67 MemoryDumpManager::kTraceCategory, "GlobalMemoryDump", |
77 TRACE_ID_MANGLE(dump_guid), "success", success); | 68 TRACE_ID_MANGLE(dump_guid), "success", success); |
78 | 69 |
(...skipping 23 matching lines...) Expand all Loading... |
102 void EstimateTraceMemoryOverhead( | 93 void EstimateTraceMemoryOverhead( |
103 TraceEventMemoryOverhead* overhead) override { | 94 TraceEventMemoryOverhead* overhead) override { |
104 return (session_state.get()->*getter_function)() | 95 return (session_state.get()->*getter_function)() |
105 ->EstimateTraceMemoryOverhead(overhead); | 96 ->EstimateTraceMemoryOverhead(overhead); |
106 } | 97 } |
107 | 98 |
108 scoped_refptr<MemoryDumpSessionState> session_state; | 99 scoped_refptr<MemoryDumpSessionState> session_state; |
109 GetterFunctPtr const getter_function; | 100 GetterFunctPtr const getter_function; |
110 }; | 101 }; |
111 | 102 |
| 103 // Checks if the name is in the given |list|. Last element of the list should be |
| 104 // an empty string. |
| 105 bool IsNameInList(const char* name, const char* const* list) { |
| 106 for (size_t i = 0; list[i] != nullptr; ++i) { |
| 107 if (strcmp(name, list[i]) == 0) |
| 108 return true; |
| 109 } |
| 110 return false; |
| 111 } |
| 112 |
112 } // namespace | 113 } // namespace |
113 | 114 |
114 // static | 115 // static |
115 const char* const MemoryDumpManager::kTraceCategory = | 116 const char* const MemoryDumpManager::kTraceCategory = |
116 TRACE_DISABLED_BY_DEFAULT("memory-infra"); | 117 TRACE_DISABLED_BY_DEFAULT("memory-infra"); |
117 | 118 |
118 // static | 119 // static |
119 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; | 120 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; |
120 | 121 |
121 // static | 122 // static |
(...skipping 20 matching lines...) Expand all Loading... |
142 | 143 |
143 // static | 144 // static |
144 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { | 145 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { |
145 g_instance_for_testing = instance; | 146 g_instance_for_testing = instance; |
146 } | 147 } |
147 | 148 |
148 MemoryDumpManager::MemoryDumpManager() | 149 MemoryDumpManager::MemoryDumpManager() |
149 : delegate_(nullptr), | 150 : delegate_(nullptr), |
150 is_coordinator_(false), | 151 is_coordinator_(false), |
151 memory_tracing_enabled_(0), | 152 memory_tracing_enabled_(0), |
| 153 dump_provider_whitelist_(kDumpProviderWhitelist), |
152 tracing_process_id_(kInvalidTracingProcessId), | 154 tracing_process_id_(kInvalidTracingProcessId), |
153 dumper_registrations_ignored_for_testing_(false), | 155 dumper_registrations_ignored_for_testing_(false), |
154 heap_profiling_enabled_(false) { | 156 heap_profiling_enabled_(false) { |
155 g_next_guid.GetNext(); // Make sure that first guid is not zero. | 157 g_next_guid.GetNext(); // Make sure that first guid is not zero. |
156 | 158 |
157 // At this point the command line may not be initialized but we try to | 159 // At this point the command line may not be initialized but we try to |
158 // enable the heap profiler to capture allocations as soon as possible. | 160 // enable the heap profiler to capture allocations as soon as possible. |
159 EnableHeapProfilingIfNeeded(); | 161 EnableHeapProfilingIfNeeded(); |
160 } | 162 } |
161 | 163 |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
265 } | 267 } |
266 | 268 |
267 void MemoryDumpManager::RegisterDumpProviderInternal( | 269 void MemoryDumpManager::RegisterDumpProviderInternal( |
268 MemoryDumpProvider* mdp, | 270 MemoryDumpProvider* mdp, |
269 const char* name, | 271 const char* name, |
270 scoped_refptr<SequencedTaskRunner> task_runner, | 272 scoped_refptr<SequencedTaskRunner> task_runner, |
271 const MemoryDumpProvider::Options& options) { | 273 const MemoryDumpProvider::Options& options) { |
272 if (dumper_registrations_ignored_for_testing_) | 274 if (dumper_registrations_ignored_for_testing_) |
273 return; | 275 return; |
274 | 276 |
| 277 bool whitelisted_for_background_mode = |
| 278 IsNameInList(name, dump_provider_whitelist_); |
275 scoped_refptr<MemoryDumpProviderInfo> mdpinfo = | 279 scoped_refptr<MemoryDumpProviderInfo> mdpinfo = |
276 new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options); | 280 new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options, |
| 281 whitelisted_for_background_mode); |
277 | 282 |
278 { | 283 { |
279 AutoLock lock(lock_); | 284 AutoLock lock(lock_); |
280 bool already_registered = !dump_providers_.insert(mdpinfo).second; | 285 bool already_registered = !dump_providers_.insert(mdpinfo).second; |
281 // This actually happens in some tests which don't have a clean tear-down | 286 // This actually happens in some tests which don't have a clean tear-down |
282 // path for RenderThreadImpl::Init(). | 287 // path for RenderThreadImpl::Init(). |
283 if (already_registered) | 288 if (already_registered) |
284 return; | 289 return; |
285 } | 290 } |
286 | 291 |
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
442 pmd_async_state->pending_dump_providers.clear(); | 447 pmd_async_state->pending_dump_providers.clear(); |
443 } | 448 } |
444 if (pmd_async_state->pending_dump_providers.empty()) | 449 if (pmd_async_state->pending_dump_providers.empty()) |
445 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); | 450 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); |
446 | 451 |
447 // Read MemoryDumpProviderInfo thread safety considerations in | 452 // Read MemoryDumpProviderInfo thread safety considerations in |
448 // memory_dump_manager.h when accessing |mdpinfo| fields. | 453 // memory_dump_manager.h when accessing |mdpinfo| fields. |
449 MemoryDumpProviderInfo* mdpinfo = | 454 MemoryDumpProviderInfo* mdpinfo = |
450 pmd_async_state->pending_dump_providers.back().get(); | 455 pmd_async_state->pending_dump_providers.back().get(); |
451 | 456 |
| 457 // If we are in background tracing, we should invoke only the whitelisted |
| 458 // providers. Ignore other providers and continue. |
| 459 if (pmd_async_state->req_args.level_of_detail == |
| 460 MemoryDumpLevelOfDetail::BACKGROUND && |
| 461 !mdpinfo->whitelisted_for_background_mode) { |
| 462 pmd_async_state->pending_dump_providers.pop_back(); |
| 463 return SetupNextMemoryDump(std::move(pmd_async_state)); |
| 464 } |
| 465 |
452 // If the dump provider did not specify a task runner affinity, dump on | 466 // If the dump provider did not specify a task runner affinity, dump on |
453 // |dump_thread_| which is already checked above for presence. | 467 // |dump_thread_| which is already checked above for presence. |
454 SequencedTaskRunner* task_runner = mdpinfo->task_runner.get(); | 468 SequencedTaskRunner* task_runner = mdpinfo->task_runner.get(); |
455 if (!task_runner) { | 469 if (!task_runner) { |
456 DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner); | 470 DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner); |
457 task_runner = pmd_async_state->dump_thread_task_runner.get(); | 471 task_runner = pmd_async_state->dump_thread_task_runner.get(); |
458 DCHECK(task_runner); | 472 DCHECK(task_runner); |
459 } | 473 } |
460 | 474 |
461 if (mdpinfo->options.dumps_on_single_thread_task_runner && | 475 if (mdpinfo->options.dumps_on_single_thread_task_runner && |
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
625 // while the |lock_| is taken; | 639 // while the |lock_| is taken; |
626 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 640 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
627 | 641 |
628 // Spin-up the thread used to invoke unbound dump providers. | 642 // Spin-up the thread used to invoke unbound dump providers. |
629 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra")); | 643 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra")); |
630 if (!dump_thread->Start()) { | 644 if (!dump_thread->Start()) { |
631 LOG(ERROR) << "Failed to start the memory-infra thread for tracing"; | 645 LOG(ERROR) << "Failed to start the memory-infra thread for tracing"; |
632 return; | 646 return; |
633 } | 647 } |
634 | 648 |
635 AutoLock lock(lock_); | 649 const TraceConfig trace_config = |
636 | 650 TraceLog::GetInstance()->GetCurrentTraceConfig(); |
637 DCHECK(delegate_); // At this point we must have a delegate. | 651 scoped_refptr<MemoryDumpSessionState> session_state = |
638 session_state_ = new MemoryDumpSessionState; | 652 new MemoryDumpSessionState; |
639 | 653 session_state->SetMemoryDumpConfig(trace_config.memory_dump_config()); |
640 if (heap_profiling_enabled_) { | 654 if (heap_profiling_enabled_) { |
641 // If heap profiling is enabled, the stack frame deduplicator and type name | 655 // If heap profiling is enabled, the stack frame deduplicator and type name |
642 // deduplicator will be in use. Add a metadata events to write the frames | 656 // deduplicator will be in use. Add a metadata events to write the frames |
643 // and type IDs. | 657 // and type IDs. |
644 session_state_->SetStackFrameDeduplicator( | 658 session_state->SetStackFrameDeduplicator( |
645 WrapUnique(new StackFrameDeduplicator)); | 659 WrapUnique(new StackFrameDeduplicator)); |
646 | 660 |
647 session_state_->SetTypeNameDeduplicator( | 661 session_state->SetTypeNameDeduplicator( |
648 WrapUnique(new TypeNameDeduplicator)); | 662 WrapUnique(new TypeNameDeduplicator)); |
649 | 663 |
650 TRACE_EVENT_API_ADD_METADATA_EVENT( | 664 TRACE_EVENT_API_ADD_METADATA_EVENT( |
651 TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames", | 665 TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames", |
652 "stackFrames", | 666 "stackFrames", |
653 WrapUnique( | 667 WrapUnique(new SessionStateConvertableProxy<StackFrameDeduplicator>( |
654 new SessionStateConvertableProxy<StackFrameDeduplicator>( | 668 session_state, &MemoryDumpSessionState::stack_frame_deduplicator))); |
655 session_state_, | |
656 &MemoryDumpSessionState::stack_frame_deduplicator))); | |
657 | 669 |
658 TRACE_EVENT_API_ADD_METADATA_EVENT( | 670 TRACE_EVENT_API_ADD_METADATA_EVENT( |
659 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", | 671 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", |
660 "typeNames", | 672 "typeNames", |
661 WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>( | 673 WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>( |
662 session_state_, &MemoryDumpSessionState::type_name_deduplicator))); | 674 session_state, &MemoryDumpSessionState::type_name_deduplicator))); |
663 } | 675 } |
664 | 676 |
665 DCHECK(!dump_thread_); | 677 { |
666 dump_thread_ = std::move(dump_thread); | 678 AutoLock lock(lock_); |
667 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); | |
668 | 679 |
669 // TODO(primiano): This is a temporary hack to disable periodic memory dumps | 680 DCHECK(delegate_); // At this point we must have a delegate. |
670 // when running memory benchmarks until telemetry uses TraceConfig to | 681 session_state_ = session_state; |
671 // enable/disable periodic dumps. See crbug.com/529184 . | 682 |
672 if (!is_coordinator_ || | 683 DCHECK(!dump_thread_); |
673 CommandLine::ForCurrentProcess()->HasSwitch( | 684 dump_thread_ = std::move(dump_thread); |
674 "enable-memory-benchmarking")) { | 685 |
675 return; | 686 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); |
| 687 |
| 688 // TODO(primiano): This is a temporary hack to disable periodic memory dumps |
| 689 // when running memory benchmarks until telemetry uses TraceConfig to |
| 690 // enable/disable periodic dumps. See crbug.com/529184 . |
| 691 if (!is_coordinator_ || |
| 692 CommandLine::ForCurrentProcess()->HasSwitch( |
| 693 "enable-memory-benchmarking")) { |
| 694 return; |
| 695 } |
676 } | 696 } |
677 | 697 |
678 // Enable periodic dumps. At the moment the periodic support is limited to at | 698 // Enable periodic dumps if necessary. |
679 // most one low-detail periodic dump and at most one high-detail periodic | 699 periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers); |
680 // dump. If both are specified the high-detail period must be an integer | |
681 // multiple of the low-level one. | |
682 g_periodic_dumps_count = 0; | |
683 const TraceConfig trace_config = | |
684 TraceLog::GetInstance()->GetCurrentTraceConfig(); | |
685 session_state_->SetMemoryDumpConfig(trace_config.memory_dump_config()); | |
686 const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list = | |
687 trace_config.memory_dump_config().triggers; | |
688 if (triggers_list.empty()) | |
689 return; | |
690 | |
691 uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max(); | |
692 uint32_t heavy_dump_period_ms = 0; | |
693 DCHECK_LE(triggers_list.size(), 2u); | |
694 for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) { | |
695 DCHECK(config.periodic_interval_ms); | |
696 if (config.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) | |
697 heavy_dump_period_ms = config.periodic_interval_ms; | |
698 min_timer_period_ms = | |
699 std::min(min_timer_period_ms, config.periodic_interval_ms); | |
700 } | |
701 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms); | |
702 g_heavy_dumps_rate = heavy_dump_period_ms / min_timer_period_ms; | |
703 | |
704 periodic_dump_timer_.Start(FROM_HERE, | |
705 TimeDelta::FromMilliseconds(min_timer_period_ms), | |
706 base::Bind(&RequestPeriodicGlobalDump)); | |
707 } | 700 } |
708 | 701 |
709 void MemoryDumpManager::OnTraceLogDisabled() { | 702 void MemoryDumpManager::OnTraceLogDisabled() { |
710 // There might be a memory dump in progress while this happens. Therefore, | 703 // There might be a memory dump in progress while this happens. Therefore, |
711 // ensure that the MDM state which depends on the tracing enabled / disabled | 704 // ensure that the MDM state which depends on the tracing enabled / disabled |
712 // state is always accessed by the dumping methods holding the |lock_|. | 705 // state is always accessed by the dumping methods holding the |lock_|. |
713 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); | 706 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); |
714 std::unique_ptr<Thread> dump_thread; | 707 std::unique_ptr<Thread> dump_thread; |
715 { | 708 { |
716 AutoLock lock(lock_); | 709 AutoLock lock(lock_); |
717 dump_thread = std::move(dump_thread_); | 710 dump_thread = std::move(dump_thread_); |
718 session_state_ = nullptr; | 711 session_state_ = nullptr; |
719 } | 712 } |
720 | 713 |
721 // Thread stops are blocking and must be performed outside of the |lock_| | 714 // Thread stops are blocking and must be performed outside of the |lock_| |
722 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). | 715 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). |
723 periodic_dump_timer_.Stop(); | 716 periodic_dump_timer_.Stop(); |
724 if (dump_thread) | 717 if (dump_thread) |
725 dump_thread->Stop(); | 718 dump_thread->Stop(); |
726 } | 719 } |
727 | 720 |
728 uint64_t MemoryDumpManager::GetTracingProcessId() const { | 721 uint64_t MemoryDumpManager::GetTracingProcessId() const { |
729 return delegate_->GetTracingProcessId(); | 722 return delegate_->GetTracingProcessId(); |
730 } | 723 } |
731 | 724 |
732 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( | 725 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( |
733 MemoryDumpProvider* dump_provider, | 726 MemoryDumpProvider* dump_provider, |
734 const char* name, | 727 const char* name, |
735 scoped_refptr<SequencedTaskRunner> task_runner, | 728 scoped_refptr<SequencedTaskRunner> task_runner, |
736 const MemoryDumpProvider::Options& options) | 729 const MemoryDumpProvider::Options& options, |
| 730 bool whitelisted_for_background_mode) |
737 : dump_provider(dump_provider), | 731 : dump_provider(dump_provider), |
738 name(name), | 732 name(name), |
739 task_runner(std::move(task_runner)), | 733 task_runner(std::move(task_runner)), |
740 options(options), | 734 options(options), |
741 consecutive_failures(0), | 735 consecutive_failures(0), |
742 disabled(false) {} | 736 disabled(false), |
| 737 whitelisted_for_background_mode(whitelisted_for_background_mode) {} |
743 | 738 |
744 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} | 739 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} |
745 | 740 |
746 bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()( | 741 bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()( |
747 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a, | 742 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a, |
748 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const { | 743 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const { |
749 if (!a || !b) | 744 if (!a || !b) |
750 return a.get() < b.get(); | 745 return a.get() < b.get(); |
751 // Ensure that unbound providers (task_runner == nullptr) always run last. | 746 // Ensure that unbound providers (task_runner == nullptr) always run last. |
752 // Rationale: some unbound dump providers are known to be slow, keep them last | 747 // Rationale: some unbound dump providers are known to be slow, keep them last |
(...skipping 25 matching lines...) Expand all Loading... |
778 GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) { | 773 GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) { |
779 auto iter = process_dumps.find(pid); | 774 auto iter = process_dumps.find(pid); |
780 if (iter == process_dumps.end()) { | 775 if (iter == process_dumps.end()) { |
781 std::unique_ptr<ProcessMemoryDump> new_pmd( | 776 std::unique_ptr<ProcessMemoryDump> new_pmd( |
782 new ProcessMemoryDump(session_state)); | 777 new ProcessMemoryDump(session_state)); |
783 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; | 778 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; |
784 } | 779 } |
785 return iter->second.get(); | 780 return iter->second.get(); |
786 } | 781 } |
787 | 782 |
| 783 MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {} |
| 784 |
| 785 MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() { |
| 786 Stop(); |
| 787 } |
| 788 |
| 789 void MemoryDumpManager::PeriodicGlobalDumpTimer::Start( |
| 790 const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) { |
| 791 if (triggers_list.empty()) |
| 792 return; |
| 793 |
| 794 // At the moment the periodic support is limited to at most one periodic |
| 795 // trigger per dump mode. All intervals should be an integer multiple of the |
| 796 // smallest interval specified. |
| 797 periodic_dumps_count_ = 0; |
| 798 uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max(); |
| 799 uint32_t light_dump_period_ms = 0; |
| 800 uint32_t heavy_dump_period_ms = 0; |
| 801 DCHECK_LE(triggers_list.size(), 3u); |
| 802 for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) { |
| 803 DCHECK_NE(0u, config.periodic_interval_ms); |
| 804 if (config.level_of_detail == MemoryDumpLevelOfDetail::LIGHT) { |
| 805 DCHECK_EQ(0u, light_dump_period_ms); |
| 806 light_dump_period_ms = config.periodic_interval_ms; |
| 807 } else if (config.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) { |
| 808 DCHECK_EQ(0u, heavy_dump_period_ms); |
| 809 heavy_dump_period_ms = config.periodic_interval_ms; |
| 810 } |
| 811 min_timer_period_ms = |
| 812 std::min(min_timer_period_ms, config.periodic_interval_ms); |
| 813 } |
| 814 |
| 815 DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms); |
| 816 light_dump_rate_ = light_dump_period_ms / min_timer_period_ms; |
| 817 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms); |
| 818 heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms; |
| 819 |
| 820 timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms), |
| 821 base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump, |
| 822 base::Unretained(this))); |
| 823 } |
| 824 |
| 825 void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() { |
| 826 if (IsRunning()) { |
| 827 timer_.Stop(); |
| 828 } |
| 829 } |
| 830 |
| 831 bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() { |
| 832 return timer_.IsRunning(); |
| 833 } |
| 834 |
| 835 void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() { |
| 836 MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND; |
| 837 if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0) |
| 838 level_of_detail = MemoryDumpLevelOfDetail::LIGHT; |
| 839 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0) |
| 840 level_of_detail = MemoryDumpLevelOfDetail::DETAILED; |
| 841 ++periodic_dumps_count_; |
| 842 |
| 843 MemoryDumpManager::GetInstance()->RequestGlobalDump( |
| 844 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); |
| 845 } |
| 846 |
788 } // namespace trace_event | 847 } // namespace trace_event |
789 } // namespace base | 848 } // namespace base |
OLD | NEW |