Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(284)

Side by Side Diff: base/trace_event/memory_dump_manager.cc

Issue 1995573003: [tracing] Introduce BACKGROUND mode in MemoryInfra (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: with periodic again. Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/memory_dump_manager.h" 5 #include "base/trace_event/memory_dump_manager.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <utility> 8 #include <utility>
9 9
10 #include "base/atomic_sequence_num.h" 10 #include "base/atomic_sequence_num.h"
11 #include "base/base_switches.h" 11 #include "base/base_switches.h"
12 #include "base/command_line.h" 12 #include "base/command_line.h"
13 #include "base/compiler_specific.h" 13 #include "base/compiler_specific.h"
14 #include "base/debug/debugging_flags.h" 14 #include "base/debug/debugging_flags.h"
15 #include "base/debug/stack_trace.h" 15 #include "base/debug/stack_trace.h"
16 #include "base/memory/ptr_util.h" 16 #include "base/memory/ptr_util.h"
17 #include "base/threading/thread.h" 17 #include "base/threading/thread.h"
18 #include "base/threading/thread_task_runner_handle.h" 18 #include "base/threading/thread_task_runner_handle.h"
19 #include "base/timer/timer.h"
19 #include "base/trace_event/heap_profiler.h" 20 #include "base/trace_event/heap_profiler.h"
20 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 21 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
21 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" 22 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
22 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" 23 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
23 #include "base/trace_event/malloc_dump_provider.h" 24 #include "base/trace_event/malloc_dump_provider.h"
24 #include "base/trace_event/memory_dump_provider.h" 25 #include "base/trace_event/memory_dump_provider.h"
25 #include "base/trace_event/memory_dump_session_state.h" 26 #include "base/trace_event/memory_dump_session_state.h"
26 #include "base/trace_event/process_memory_dump.h" 27 #include "base/trace_event/process_memory_dump.h"
27 #include "base/trace_event/trace_event.h" 28 #include "base/trace_event/trace_event.h"
28 #include "base/trace_event/trace_event_argument.h" 29 #include "base/trace_event/trace_event_argument.h"
(...skipping 10 matching lines...) Expand all
39 namespace base { 40 namespace base {
40 namespace trace_event { 41 namespace trace_event {
41 42
42 namespace { 43 namespace {
43 44
44 const int kTraceEventNumArgs = 1; 45 const int kTraceEventNumArgs = 1;
45 const char* kTraceEventArgNames[] = {"dumps"}; 46 const char* kTraceEventArgNames[] = {"dumps"};
46 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE}; 47 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
47 48
48 StaticAtomicSequenceNumber g_next_guid; 49 StaticAtomicSequenceNumber g_next_guid;
49 uint32_t g_periodic_dumps_count = 0;
50 uint32_t g_heavy_dumps_rate = 0;
51 MemoryDumpManager* g_instance_for_testing = nullptr; 50 MemoryDumpManager* g_instance_for_testing = nullptr;
52 51
53 void RequestPeriodicGlobalDump() { 52 // The names of dump providers whitelisted for background tracing. Dump
54 MemoryDumpLevelOfDetail level_of_detail; 53 // providers can be added here only if the background mode dump has very
55 if (g_heavy_dumps_rate == 0) { 54 // less performance and memory overhead.
56 level_of_detail = MemoryDumpLevelOfDetail::LIGHT; 55 const char* const kDumpProviderWhitelist[] = {
57 } else { 56 "WhitelistedTestDumpProvider", // For testing.
Primiano Tucci (use gerrit) 2016/05/26 17:20:53 I think a better approach is to: - Make this an em
ssid 2016/05/26 22:12:54 Yeah the problem was that i cannot get the array s
58 level_of_detail = g_periodic_dumps_count == 0 57 };
59 ? MemoryDumpLevelOfDetail::DETAILED
60 : MemoryDumpLevelOfDetail::LIGHT;
61
62 if (++g_periodic_dumps_count == g_heavy_dumps_rate)
63 g_periodic_dumps_count = 0;
64 }
65
66 MemoryDumpManager::GetInstance()->RequestGlobalDump(
67 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
68 }
69 58
70 // Callback wrapper to hook upon the completion of RequestGlobalDump() and 59 // Callback wrapper to hook upon the completion of RequestGlobalDump() and
71 // inject trace markers. 60 // inject trace markers.
72 void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback, 61 void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
73 uint64_t dump_guid, 62 uint64_t dump_guid,
74 bool success) { 63 bool success) {
75 TRACE_EVENT_NESTABLE_ASYNC_END1( 64 TRACE_EVENT_NESTABLE_ASYNC_END1(
76 MemoryDumpManager::kTraceCategory, "GlobalMemoryDump", 65 MemoryDumpManager::kTraceCategory, "GlobalMemoryDump",
77 TRACE_ID_MANGLE(dump_guid), "success", success); 66 TRACE_ID_MANGLE(dump_guid), "success", success);
78 67
(...skipping 23 matching lines...) Expand all
102 void EstimateTraceMemoryOverhead( 91 void EstimateTraceMemoryOverhead(
103 TraceEventMemoryOverhead* overhead) override { 92 TraceEventMemoryOverhead* overhead) override {
104 return (session_state.get()->*getter_function)() 93 return (session_state.get()->*getter_function)()
105 ->EstimateTraceMemoryOverhead(overhead); 94 ->EstimateTraceMemoryOverhead(overhead);
106 } 95 }
107 96
108 scoped_refptr<MemoryDumpSessionState> session_state; 97 scoped_refptr<MemoryDumpSessionState> session_state;
109 GetterFunctPtr const getter_function; 98 GetterFunctPtr const getter_function;
110 }; 99 };
111 100
101 // Checks if the dump provider is in the whitelist.
102 bool IsDumpProviderInWhitelist(const char* name) {
103 for (size_t i = 0; i < arraysize(kDumpProviderWhitelist); ++i) {
104 if (strcmp(name, kDumpProviderWhitelist[i]) == 0)
105 return true;
106 }
107 return false;
108 }
109
112 } // namespace 110 } // namespace
113 111
112 // Sets up periodic memory dump timers to start global dump requests based on
113 // the dump triggers from trace config.
114 class MemoryDumpManager::PeriodicGlobalDumpInvokeHelper {
Primiano Tucci (use gerrit) 2016/05/26 17:20:53 nit: I think this should go after the constants be
ssid 2016/05/26 22:12:54 Added to the last.
115 public:
116 PeriodicGlobalDumpInvokeHelper(
117 const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list);
118 ~PeriodicGlobalDumpInvokeHelper();
119
120 // Periodically called by the timer.
121 void RequestPeriodicGlobalDump();
122
123 private:
124 RepeatingTimer periodic_dump_timer_;
125 uint32_t periodic_dumps_count_;
126 uint32_t light_dump_rate_;
127 uint32_t heavy_dump_rate_;
128 };
129
130 MemoryDumpManager::PeriodicGlobalDumpInvokeHelper::
131 PeriodicGlobalDumpInvokeHelper(
Primiano Tucci (use gerrit) 2016/05/26 17:20:53 In general the constructor should not do actions,
ssid 2016/05/26 22:12:54 Makes sense.
132 const std::vector<TraceConfig::MemoryDumpConfig::Trigger>&
133 triggers_list)
134 : periodic_dumps_count_(0) {
135 uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
136 uint32_t light_dump_period_ms = 0;
137 uint32_t heavy_dump_period_ms = 0;
138 DCHECK(triggers_list.size());
139 DCHECK_LE(triggers_list.size(), 3u);
140 for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
141 if (config.level_of_detail == MemoryDumpLevelOfDetail::LIGHT) {
142 DCHECK_EQ(0u, light_dump_period_ms);
143 light_dump_period_ms = config.periodic_interval_ms;
144 } else if (config.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
145 DCHECK_EQ(0u, heavy_dump_period_ms);
146 heavy_dump_period_ms = config.periodic_interval_ms;
147 }
148 min_timer_period_ms =
149 std::min(min_timer_period_ms, config.periodic_interval_ms);
150 }
151
152 // Do not start timer if period is 0 for any trigger.
153 if (!min_timer_period_ms)
Primiano Tucci (use gerrit) 2016/05/26 17:20:53 Hmm this is a bit contra-intuitive. THis means tha
ssid 2016/05/26 22:12:54 Yeah, I was thinking it is useful to specify mode
154 return;
155
156 DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
157 light_dump_rate_ = light_dump_period_ms / min_timer_period_ms;
158 DCHECK_EQ(0u, heavy_dump_rate_ % min_timer_period_ms);
159 heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms;
160
161 periodic_dump_timer_.Start(
162 FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms),
163 base::Bind(&PeriodicGlobalDumpInvokeHelper::RequestPeriodicGlobalDump,
164 base::Unretained(this)));
165 }
166
167 MemoryDumpManager::PeriodicGlobalDumpInvokeHelper::
168 ~PeriodicGlobalDumpInvokeHelper() {
169 periodic_dump_timer_.Stop();
170 }
171
172 void MemoryDumpManager::PeriodicGlobalDumpInvokeHelper::
173 RequestPeriodicGlobalDump() {
174 MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
175 if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0)
176 level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
177 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0)
178 level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
179 ++periodic_dumps_count_;
180
181 MemoryDumpManager::GetInstance()->RequestGlobalDump(
182 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
183 }
184
114 // static 185 // static
115 const char* const MemoryDumpManager::kTraceCategory = 186 const char* const MemoryDumpManager::kTraceCategory =
116 TRACE_DISABLED_BY_DEFAULT("memory-infra"); 187 TRACE_DISABLED_BY_DEFAULT("memory-infra");
117 188
118 // static 189 // static
119 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; 190 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
120 191
121 // static 192 // static
122 const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0; 193 const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
123 194
(...skipping 318 matching lines...) Expand 10 before | Expand all | Expand 10 after
442 pmd_async_state->pending_dump_providers.clear(); 513 pmd_async_state->pending_dump_providers.clear();
443 } 514 }
444 if (pmd_async_state->pending_dump_providers.empty()) 515 if (pmd_async_state->pending_dump_providers.empty())
445 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); 516 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
446 517
447 // Read MemoryDumpProviderInfo thread safety considerations in 518 // Read MemoryDumpProviderInfo thread safety considerations in
448 // memory_dump_manager.h when accessing |mdpinfo| fields. 519 // memory_dump_manager.h when accessing |mdpinfo| fields.
449 MemoryDumpProviderInfo* mdpinfo = 520 MemoryDumpProviderInfo* mdpinfo =
450 pmd_async_state->pending_dump_providers.back().get(); 521 pmd_async_state->pending_dump_providers.back().get();
451 522
523 // If we are in background tracing, we should invoke only the whitelisted
524 // providers. Ignore other providers and continue.
525 if (pmd_async_state->req_args.level_of_detail ==
526 MemoryDumpLevelOfDetail::BACKGROUND &&
527 mdpinfo->disabled_for_background_mode) {
528 pmd_async_state->pending_dump_providers.pop_back();
529 return SetupNextMemoryDump(std::move(pmd_async_state));
530 }
531
452 // If the dump provider did not specify a task runner affinity, dump on 532 // If the dump provider did not specify a task runner affinity, dump on
453 // |dump_thread_| which is already checked above for presence. 533 // |dump_thread_| which is already checked above for presence.
454 SequencedTaskRunner* task_runner = mdpinfo->task_runner.get(); 534 SequencedTaskRunner* task_runner = mdpinfo->task_runner.get();
455 if (!task_runner) { 535 if (!task_runner) {
456 DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner); 536 DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
457 task_runner = pmd_async_state->dump_thread_task_runner.get(); 537 task_runner = pmd_async_state->dump_thread_task_runner.get();
458 DCHECK(task_runner); 538 DCHECK(task_runner);
459 } 539 }
460 540
461 if (mdpinfo->options.dumps_on_single_thread_task_runner && 541 if (mdpinfo->options.dumps_on_single_thread_task_runner &&
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
625 // while the |lock_| is taken; 705 // while the |lock_| is taken;
626 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); 706 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
627 707
628 // Spin-up the thread used to invoke unbound dump providers. 708 // Spin-up the thread used to invoke unbound dump providers.
629 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra")); 709 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
630 if (!dump_thread->Start()) { 710 if (!dump_thread->Start()) {
631 LOG(ERROR) << "Failed to start the memory-infra thread for tracing"; 711 LOG(ERROR) << "Failed to start the memory-infra thread for tracing";
632 return; 712 return;
633 } 713 }
634 714
635 AutoLock lock(lock_); 715 const TraceConfig trace_config =
ssid 2016/05/23 18:17:03 This unnecessarily locks the whole function. I can
Primiano Tucci (use gerrit) 2016/05/26 17:20:53 Where is the call to RequestGlobalDump? In the tim
ssid 2016/05/26 22:12:54 We currently do the first dump after 250ms of star
636 716 TraceLog::GetInstance()->GetCurrentTraceConfig();
637 DCHECK(delegate_); // At this point we must have a delegate. 717 scoped_refptr<MemoryDumpSessionState> session_state =
638 session_state_ = new MemoryDumpSessionState; 718 new MemoryDumpSessionState;
639
640 if (heap_profiling_enabled_) { 719 if (heap_profiling_enabled_) {
641 // If heap profiling is enabled, the stack frame deduplicator and type name 720 // If heap profiling is enabled, the stack frame deduplicator and type name
642 // deduplicator will be in use. Add a metadata events to write the frames 721 // deduplicator will be in use. Add a metadata events to write the frames
643 // and type IDs. 722 // and type IDs.
644 session_state_->SetStackFrameDeduplicator( 723 session_state->SetStackFrameDeduplicator(
645 WrapUnique(new StackFrameDeduplicator)); 724 WrapUnique(new StackFrameDeduplicator));
646 725
647 session_state_->SetTypeNameDeduplicator( 726 session_state->SetTypeNameDeduplicator(
648 WrapUnique(new TypeNameDeduplicator)); 727 WrapUnique(new TypeNameDeduplicator));
649 728
650 TRACE_EVENT_API_ADD_METADATA_EVENT( 729 TRACE_EVENT_API_ADD_METADATA_EVENT(
651 TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames", 730 TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
652 "stackFrames", 731 "stackFrames",
653 WrapUnique( 732 WrapUnique(new SessionStateConvertableProxy<StackFrameDeduplicator>(
654 new SessionStateConvertableProxy<StackFrameDeduplicator>( 733 session_state, &MemoryDumpSessionState::stack_frame_deduplicator)));
655 session_state_,
656 &MemoryDumpSessionState::stack_frame_deduplicator)));
657 734
658 TRACE_EVENT_API_ADD_METADATA_EVENT( 735 TRACE_EVENT_API_ADD_METADATA_EVENT(
659 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", 736 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
660 "typeNames", 737 "typeNames",
661 WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>( 738 WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>(
662 session_state_, &MemoryDumpSessionState::type_name_deduplicator))); 739 session_state, &MemoryDumpSessionState::type_name_deduplicator)));
663 } 740 }
741 session_state->SetMemoryDumpConfig(trace_config.memory_dump_config());
Primiano Tucci (use gerrit) 2016/05/26 17:20:53 nit: seems a bit more logical to have this after s
ssid 2016/05/26 22:12:54 Done.
664 742
665 DCHECK(!dump_thread_); 743 {
666 dump_thread_ = std::move(dump_thread); 744 AutoLock lock(lock_);
667 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
668 745
669 // TODO(primiano): This is a temporary hack to disable periodic memory dumps 746 DCHECK(delegate_); // At this point we must have a delegate.
670 // when running memory benchmarks until telemetry uses TraceConfig to 747 session_state_ = session_state;
671 // enable/disable periodic dumps. See crbug.com/529184 . 748
672 if (!is_coordinator_ || 749 DCHECK(!dump_thread_);
673 CommandLine::ForCurrentProcess()->HasSwitch( 750 dump_thread_ = std::move(dump_thread);
674 "enable-memory-benchmarking")) { 751
675 return; 752 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
753
754 // TODO(primiano): This is a temporary hack to disable periodic memory dumps
755 // when running memory benchmarks until telemetry uses TraceConfig to
756 // enable/disable periodic dumps. See crbug.com/529184 .
757 if (!is_coordinator_ ||
758 CommandLine::ForCurrentProcess()->HasSwitch(
759 "enable-memory-benchmarking")) {
760 return;
761 }
676 } 762 }
677 763
678 // Enable periodic dumps. At the moment the periodic support is limited to at 764 // Enable periodic dumps. At the moment the periodic support is limited to at
679 // most one low-detail periodic dump and at most one high-detail periodic 765 // most one periodic trigger per dump mode. All intervals should be an integer
680 // dump. If both are specified the high-detail period must be an integer 766 // multiple of the smallest interval specified.
681 // multiple of the low-level one.
682 g_periodic_dumps_count = 0;
683 const TraceConfig trace_config =
684 TraceLog::GetInstance()->GetCurrentTraceConfig();
685 session_state_->SetMemoryDumpConfig(trace_config.memory_dump_config());
ssid 2016/05/23 18:17:03 I think this needs to be set before the check for
686 const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list = 767 const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list =
687 trace_config.memory_dump_config().triggers; 768 trace_config.memory_dump_config().triggers;
688 if (triggers_list.empty()) 769 if (!triggers_list.empty()) {
Primiano Tucci (use gerrit) 2016/05/26 17:20:53 maybe just always pass the triggers and to this ch
ssid 2016/05/26 22:12:54 Done.
689 return; 770 periodic_dump_helper_.reset(
690 771 new PeriodicGlobalDumpInvokeHelper(triggers_list));
691 uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max();
692 uint32_t heavy_dump_period_ms = 0;
693 DCHECK_LE(triggers_list.size(), 2u);
694 for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
695 DCHECK(config.periodic_interval_ms);
696 if (config.level_of_detail == MemoryDumpLevelOfDetail::DETAILED)
697 heavy_dump_period_ms = config.periodic_interval_ms;
698 min_timer_period_ms =
699 std::min(min_timer_period_ms, config.periodic_interval_ms);
700 } 772 }
701 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
702 g_heavy_dumps_rate = heavy_dump_period_ms / min_timer_period_ms;
703
704 periodic_dump_timer_.Start(FROM_HERE,
705 TimeDelta::FromMilliseconds(min_timer_period_ms),
706 base::Bind(&RequestPeriodicGlobalDump));
707 } 773 }
708 774
709 void MemoryDumpManager::OnTraceLogDisabled() { 775 void MemoryDumpManager::OnTraceLogDisabled() {
710 // There might be a memory dump in progress while this happens. Therefore, 776 // There might be a memory dump in progress while this happens. Therefore,
711 // ensure that the MDM state which depends on the tracing enabled / disabled 777 // ensure that the MDM state which depends on the tracing enabled / disabled
712 // state is always accessed by the dumping methods holding the |lock_|. 778 // state is always accessed by the dumping methods holding the |lock_|.
713 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); 779 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
714 std::unique_ptr<Thread> dump_thread; 780 std::unique_ptr<Thread> dump_thread;
715 { 781 {
716 AutoLock lock(lock_); 782 AutoLock lock(lock_);
717 dump_thread = std::move(dump_thread_); 783 dump_thread = std::move(dump_thread_);
718 session_state_ = nullptr; 784 session_state_ = nullptr;
719 } 785 }
720 786
721 // Thread stops are blocking and must be performed outside of the |lock_| 787 // Thread stops are blocking and must be performed outside of the |lock_|
722 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). 788 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
723 periodic_dump_timer_.Stop(); 789 periodic_dump_helper_.reset();
724 if (dump_thread) 790 if (dump_thread)
725 dump_thread->Stop(); 791 dump_thread->Stop();
726 } 792 }
727 793
728 uint64_t MemoryDumpManager::GetTracingProcessId() const { 794 uint64_t MemoryDumpManager::GetTracingProcessId() const {
729 return delegate_->GetTracingProcessId(); 795 return delegate_->GetTracingProcessId();
730 } 796 }
731 797
732 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( 798 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
733 MemoryDumpProvider* dump_provider, 799 MemoryDumpProvider* dump_provider,
734 const char* name, 800 const char* name,
735 scoped_refptr<SequencedTaskRunner> task_runner, 801 scoped_refptr<SequencedTaskRunner> task_runner,
736 const MemoryDumpProvider::Options& options) 802 const MemoryDumpProvider::Options& options)
737 : dump_provider(dump_provider), 803 : dump_provider(dump_provider),
738 name(name), 804 name(name),
739 task_runner(std::move(task_runner)), 805 task_runner(std::move(task_runner)),
740 options(options), 806 options(options),
741 consecutive_failures(0), 807 consecutive_failures(0),
742 disabled(false) {} 808 disabled(false),
809 disabled_for_background_mode(!IsDumpProviderInWhitelist(name)) {}
743 810
744 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} 811 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
745 812
746 bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()( 813 bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()(
747 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a, 814 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a,
748 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const { 815 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const {
749 if (!a || !b) 816 if (!a || !b)
750 return a.get() < b.get(); 817 return a.get() < b.get();
751 // Ensure that unbound providers (task_runner == nullptr) always run last. 818 // Ensure that unbound providers (task_runner == nullptr) always run last.
752 // Rationale: some unbound dump providers are known to be slow, keep them last 819 // Rationale: some unbound dump providers are known to be slow, keep them last
(...skipping 27 matching lines...) Expand all
780 if (iter == process_dumps.end()) { 847 if (iter == process_dumps.end()) {
781 std::unique_ptr<ProcessMemoryDump> new_pmd( 848 std::unique_ptr<ProcessMemoryDump> new_pmd(
782 new ProcessMemoryDump(session_state)); 849 new ProcessMemoryDump(session_state));
783 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; 850 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
784 } 851 }
785 return iter->second.get(); 852 return iter->second.get();
786 } 853 }
787 854
788 } // namespace trace_event 855 } // namespace trace_event
789 } // namespace base 856 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698