OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 | 8 |
9 #include "base/atomic_sequence_num.h" | 9 #include "base/atomic_sequence_num.h" |
10 #include "base/compiler_specific.h" | 10 #include "base/compiler_specific.h" |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
47 const int kDumpIntervalMs = 250; | 47 const int kDumpIntervalMs = 250; |
48 const int kTraceEventNumArgs = 1; | 48 const int kTraceEventNumArgs = 1; |
49 const char* kTraceEventArgNames[] = {"dumps"}; | 49 const char* kTraceEventArgNames[] = {"dumps"}; |
50 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE}; | 50 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE}; |
51 | 51 |
52 StaticAtomicSequenceNumber g_next_guid; | 52 StaticAtomicSequenceNumber g_next_guid; |
53 uint32 g_periodic_dumps_count = 0; | 53 uint32 g_periodic_dumps_count = 0; |
54 MemoryDumpManager* g_instance_for_testing = nullptr; | 54 MemoryDumpManager* g_instance_for_testing = nullptr; |
55 MemoryDumpProvider* g_mmaps_dump_provider = nullptr; | 55 MemoryDumpProvider* g_mmaps_dump_provider = nullptr; |
56 | 56 |
57 // Internal class used to hold details about ProcessMemoryDump requests for the | |
58 // current process. | |
59 class ProcessMemoryDumpHolder | |
60 : public RefCountedThreadSafe<ProcessMemoryDumpHolder> { | |
61 public: | |
62 ProcessMemoryDumpHolder( | |
63 MemoryDumpRequestArgs req_args, | |
64 const scoped_refptr<MemoryDumpSessionState>& session_state, | |
65 MemoryDumpCallback callback) | |
66 : process_memory_dump(session_state), | |
67 req_args(req_args), | |
68 callback(callback), | |
69 task_runner(MessageLoop::current()->task_runner()), | |
70 num_pending_async_requests(0) {} | |
71 | |
72 ProcessMemoryDump process_memory_dump; | |
73 const MemoryDumpRequestArgs req_args; | |
74 | |
75 // Callback passed to the initial call to CreateProcessDump(). | |
76 MemoryDumpCallback callback; | |
77 | |
78 // Thread on which FinalizeDumpAndAddToTrace() should be called, which is the | |
79 // same that invoked the initial CreateProcessDump(). | |
80 const scoped_refptr<SingleThreadTaskRunner> task_runner; | |
81 | |
82 // Number of pending ContinueAsyncProcessDump() calls. | |
83 int num_pending_async_requests; | |
84 | |
85 private: | |
86 friend class RefCountedThreadSafe<ProcessMemoryDumpHolder>; | |
87 virtual ~ProcessMemoryDumpHolder() {} | |
88 DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpHolder); | |
89 }; | |
90 | |
91 void FinalizeDumpAndAddToTrace( | |
92 const scoped_refptr<ProcessMemoryDumpHolder>& pmd_holder) { | |
93 DCHECK_EQ(0, pmd_holder->num_pending_async_requests); | |
94 | |
95 if (!pmd_holder->task_runner->BelongsToCurrentThread()) { | |
96 pmd_holder->task_runner->PostTask( | |
97 FROM_HERE, Bind(&FinalizeDumpAndAddToTrace, pmd_holder)); | |
98 return; | |
99 } | |
100 | |
101 scoped_refptr<ConvertableToTraceFormat> event_value(new TracedValue()); | |
102 pmd_holder->process_memory_dump.AsValueInto( | |
103 static_cast<TracedValue*>(event_value.get())); | |
104 const char* const event_name = | |
105 MemoryDumpTypeToString(pmd_holder->req_args.dump_type); | |
106 | |
107 TRACE_EVENT_API_ADD_TRACE_EVENT( | |
108 TRACE_EVENT_PHASE_MEMORY_DUMP, | |
109 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name, | |
110 pmd_holder->req_args.dump_guid, kTraceEventNumArgs, kTraceEventArgNames, | |
111 kTraceEventArgTypes, nullptr /* arg_values */, &event_value, | |
112 TRACE_EVENT_FLAG_HAS_ID); | |
113 | |
114 if (!pmd_holder->callback.is_null()) { | |
115 pmd_holder->callback.Run(pmd_holder->req_args.dump_guid, true); | |
116 pmd_holder->callback.Reset(); | |
117 } | |
118 } | |
119 | |
120 void RequestPeriodicGlobalDump() { | 57 void RequestPeriodicGlobalDump() { |
121 MemoryDumpType dump_type = g_periodic_dumps_count == 0 | 58 MemoryDumpType dump_type = g_periodic_dumps_count == 0 |
122 ? MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS | 59 ? MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS |
123 : MemoryDumpType::PERIODIC_INTERVAL; | 60 : MemoryDumpType::PERIODIC_INTERVAL; |
124 if (++g_periodic_dumps_count == kHeavyMmapsDumpsRate) | 61 if (++g_periodic_dumps_count == kHeavyMmapsDumpsRate) |
125 g_periodic_dumps_count = 0; | 62 g_periodic_dumps_count = 0; |
126 | 63 |
127 MemoryDumpManager::GetInstance()->RequestGlobalDump(dump_type); | 64 MemoryDumpManager::GetInstance()->RequestGlobalDump(dump_type); |
128 } | 65 } |
129 | 66 |
130 void InitializeThreadLocalEventBufferIfSupported() { | |
131 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | |
132 } | |
133 | |
134 } // namespace | 67 } // namespace |
135 | 68 |
136 // static | 69 // static |
137 const char* const MemoryDumpManager::kTraceCategoryForTesting = kTraceCategory; | 70 const char* const MemoryDumpManager::kTraceCategoryForTesting = kTraceCategory; |
138 | 71 |
139 // static | 72 // static |
140 const uint64 MemoryDumpManager::kInvalidTracingProcessId = 0; | 73 const uint64 MemoryDumpManager::kInvalidTracingProcessId = 0; |
141 | 74 |
142 // static | 75 // static |
143 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; | 76 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; |
144 | 77 |
145 // static | 78 // static |
146 MemoryDumpManager* MemoryDumpManager::GetInstance() { | 79 MemoryDumpManager* MemoryDumpManager::GetInstance() { |
147 if (g_instance_for_testing) | 80 if (g_instance_for_testing) |
148 return g_instance_for_testing; | 81 return g_instance_for_testing; |
149 | 82 |
150 return Singleton<MemoryDumpManager, | 83 return Singleton<MemoryDumpManager, |
151 LeakySingletonTraits<MemoryDumpManager>>::get(); | 84 LeakySingletonTraits<MemoryDumpManager>>::get(); |
152 } | 85 } |
153 | 86 |
154 // static | 87 // static |
155 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { | 88 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { |
156 if (instance) | 89 if (instance) |
157 instance->skip_core_dumpers_auto_registration_for_testing_ = true; | 90 instance->skip_core_dumpers_auto_registration_for_testing_ = true; |
158 g_instance_for_testing = instance; | 91 g_instance_for_testing = instance; |
159 } | 92 } |
160 | 93 |
161 MemoryDumpManager::MemoryDumpManager() | 94 MemoryDumpManager::MemoryDumpManager() |
162 : delegate_(nullptr), | 95 : did_unregister_dump_provider_(false), |
| 96 delegate_(nullptr), |
163 memory_tracing_enabled_(0), | 97 memory_tracing_enabled_(0), |
164 tracing_process_id_(kInvalidTracingProcessId), | 98 tracing_process_id_(kInvalidTracingProcessId), |
165 skip_core_dumpers_auto_registration_for_testing_(false) { | 99 skip_core_dumpers_auto_registration_for_testing_(false) { |
166 g_next_guid.GetNext(); // Make sure that first guid is not zero. | 100 g_next_guid.GetNext(); // Make sure that first guid is not zero. |
167 } | 101 } |
168 | 102 |
169 MemoryDumpManager::~MemoryDumpManager() { | 103 MemoryDumpManager::~MemoryDumpManager() { |
170 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this); | 104 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this); |
171 } | 105 } |
172 | 106 |
(...skipping 26 matching lines...) Expand all Loading... |
199 | 133 |
200 void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate* delegate) { | 134 void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate* delegate) { |
201 AutoLock lock(lock_); | 135 AutoLock lock(lock_); |
202 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_); | 136 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_); |
203 delegate_ = delegate; | 137 delegate_ = delegate; |
204 } | 138 } |
205 | 139 |
206 void MemoryDumpManager::RegisterDumpProvider( | 140 void MemoryDumpManager::RegisterDumpProvider( |
207 MemoryDumpProvider* mdp, | 141 MemoryDumpProvider* mdp, |
208 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { | 142 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { |
209 MemoryDumpProviderInfo mdp_info(task_runner); | 143 MemoryDumpProviderInfo mdp_info(mdp, task_runner); |
210 AutoLock lock(lock_); | 144 AutoLock lock(lock_); |
211 dump_providers_.insert(std::make_pair(mdp, mdp_info)); | 145 dump_providers_.insert(mdp_info); |
212 } | 146 } |
213 | 147 |
214 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) { | 148 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) { |
215 RegisterDumpProvider(mdp, nullptr); | 149 RegisterDumpProvider(mdp, nullptr); |
216 } | 150 } |
217 | 151 |
218 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 152 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
219 AutoLock lock(lock_); | 153 AutoLock lock(lock_); |
220 | 154 |
221 auto it = dump_providers_.find(mdp); | 155 auto mdp_iter = dump_providers_.begin(); |
222 if (it == dump_providers_.end()) | 156 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) { |
| 157 if (mdp_iter->dump_provider == mdp) |
| 158 break; |
| 159 } |
| 160 |
| 161 if (mdp_iter == dump_providers_.end()) |
223 return; | 162 return; |
224 | 163 |
225 const MemoryDumpProviderInfo& mdp_info = it->second; | |
226 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe | 164 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe |
227 // only if the MDP has specified a thread affinity (via task_runner()) AND | 165 // only if the MDP has specified a thread affinity (via task_runner()) AND |
228 // the unregistration happens on the same thread (so the MDP cannot unregister | 166 // the unregistration happens on the same thread (so the MDP cannot unregister |
229 // and OnMemoryDump() at the same time). | 167 // and OnMemoryDump() at the same time). |
230 // Otherwise, it is not possible to guarantee that its unregistration is | 168 // Otherwise, it is not possible to guarantee that its unregistration is |
231 // race-free. If you hit this DCHECK, your MDP has a bug. | 169 // race-free. If you hit this DCHECK, your MDP has a bug. |
232 DCHECK_IMPLIES( | 170 DCHECK_IMPLIES( |
233 subtle::NoBarrier_Load(&memory_tracing_enabled_), | 171 subtle::NoBarrier_Load(&memory_tracing_enabled_), |
234 mdp_info.task_runner && mdp_info.task_runner->BelongsToCurrentThread()) | 172 mdp_iter->task_runner && mdp_iter->task_runner->BelongsToCurrentThread()) |
235 << "The MemoryDumpProvider attempted to unregister itself in a racy way. " | 173 << "The MemoryDumpProvider attempted to unregister itself in a racy way. " |
236 << " Please file a crbug."; | 174 << "Please file a crbug."; |
237 | 175 |
238 // Remove from the enabled providers list. This is to deal with the case that | 176 dump_providers_.erase(mdp_iter); |
239 // UnregisterDumpProvider is called while the trace is enabled. | 177 did_unregister_dump_provider_ = true; |
240 dump_providers_.erase(it); | |
241 } | 178 } |
242 | 179 |
243 void MemoryDumpManager::RequestGlobalDump( | 180 void MemoryDumpManager::RequestGlobalDump( |
244 MemoryDumpType dump_type, | 181 MemoryDumpType dump_type, |
245 const MemoryDumpCallback& callback) { | 182 const MemoryDumpCallback& callback) { |
246 // Bail out immediately if tracing is not enabled at all. | 183 // Bail out immediately if tracing is not enabled at all. |
247 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) | 184 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) |
248 return; | 185 return; |
249 | 186 |
250 const uint64 guid = | 187 const uint64 guid = |
(...skipping 14 matching lines...) Expand all Loading... |
265 delegate->RequestGlobalMemoryDump(args, callback); | 202 delegate->RequestGlobalMemoryDump(args, callback); |
266 } else if (!callback.is_null()) { | 203 } else if (!callback.is_null()) { |
267 callback.Run(guid, false /* success */); | 204 callback.Run(guid, false /* success */); |
268 } | 205 } |
269 } | 206 } |
270 | 207 |
271 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type) { | 208 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type) { |
272 RequestGlobalDump(dump_type, MemoryDumpCallback()); | 209 RequestGlobalDump(dump_type, MemoryDumpCallback()); |
273 } | 210 } |
274 | 211 |
275 // Creates a memory dump for the current process and appends it to the trace. | |
276 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, | 212 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
277 const MemoryDumpCallback& callback) { | 213 const MemoryDumpCallback& callback) { |
278 scoped_refptr<ProcessMemoryDumpHolder> pmd_holder( | 214 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
279 new ProcessMemoryDumpHolder(args, session_state_, callback)); | |
280 ProcessMemoryDump* pmd = &pmd_holder->process_memory_dump; | |
281 bool did_any_provider_dump = false; | |
282 bool did_post_any_async_task = false; | |
283 | |
284 // Initalizes the ThreadLocalEventBuffer for the syncrhonous dump providers | |
285 // that will be invoked in this thread without other posts. The initialization | |
286 // for the asynchronous providers, instead, is handled in OnTraceLogEnabled(). | |
287 InitializeThreadLocalEventBufferIfSupported(); | |
288 | |
289 // Iterate over the active dump providers and invoke OnMemoryDump(pmd). | |
290 // The MDM guarantees linearity (at most one MDP is active within one | |
291 // process) and thread-safety (MDM enforces the right locking when entering / | |
292 // leaving the MDP.OnMemoryDump() call). This is to simplify the clients' | |
293 // design | |
294 // and not let the MDPs worry about locking. | |
295 // As regards thread affinity, depending on the MDP configuration (see | |
296 // memory_dump_provider.h), the OnMemoryDump() invocation can happen: | |
297 // - Synchronousy on the MDM thread, when MDP.task_runner() is not set. | |
298 // - Posted on MDP.task_runner(), when MDP.task_runner() is set. | |
299 { | 215 { |
300 AutoLock lock(lock_); | 216 AutoLock lock(lock_); |
301 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) { | 217 did_unregister_dump_provider_ = false; |
302 MemoryDumpProvider* mdp = it->first; | 218 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
303 MemoryDumpProviderInfo* mdp_info = &it->second; | 219 args, dump_providers_.begin(), session_state_, callback)); |
| 220 } |
| 221 |
| 222 // Start the thread hop. |dump_providers_| are kept sorted by thread, so |
| 223 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread |
| 224 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). |
| 225 ContinueAsyncProcessDump(pmd_async_state.Pass()); |
| 226 } |
| 227 |
| 228 // At most one ContinueAsyncProcessDump() can be active at any time for a given |
| 229 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to |
| 230 // ensure consistency w.r.t. (un)registrations of |dump_providers_|. |
| 231 // The linearization of dump providers' OnMemoryDump invocations is achieved by |
| 232 // means of subsequent PostTask(s). |
| 233 // |
| 234 // 1) Prologue: |
| 235 // - Check if the dump provider is disabled, if so skip the dump. |
| 236 // - Check if we are on the right thread. If not hop and continue there. |
| 237 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped). |
| 238 // 3) Epilogue: |
| 239 // - Unregister the dump provider if it failed too many times consecutively. |
| 240 // - Advance the |next_dump_provider| iterator to the next dump provider. |
| 241 // - If this was the last hop, create a trace event, add it to the trace |
| 242 // and finalize (invoke callback). |
| 243 |
| 244 void MemoryDumpManager::ContinueAsyncProcessDump( |
| 245 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
| 246 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs |
| 247 // in the PostTask below don't end up registering their own dump providers |
| 248 // (for discounting trace memory overhead) while holding the |lock_|. |
| 249 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
| 250 |
| 251 // DO NOT put any LOG() statement in the locked sections, as in some contexts |
| 252 // (GPU process) LOG() ends up performing PostTask/IPCs. |
| 253 MemoryDumpProvider* mdp; |
| 254 bool skip_dump = false; |
| 255 { |
| 256 AutoLock lock(lock_); |
| 257 // In the unlikely event that a dump provider was unregistered while |
| 258 // dumping, abort the dump, as that would make |next_dump_provider| invalid. |
| 259 // Registration, on the other hand, is safe as per std::set<> contract. |
| 260 if (did_unregister_dump_provider_) { |
| 261 return AbortDumpLocked(pmd_async_state->callback, |
| 262 pmd_async_state->task_runner, |
| 263 pmd_async_state->req_args.dump_guid); |
| 264 } |
| 265 |
| 266 auto* mdp_info = &*pmd_async_state->next_dump_provider; |
| 267 mdp = mdp_info->dump_provider; |
| 268 if (mdp_info->disabled) { |
| 269 skip_dump = true; |
| 270 } else if (mdp == g_mmaps_dump_provider && |
| 271 pmd_async_state->req_args.dump_type != |
| 272 MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS) { |
304 // Mmaps dumping is very heavyweight and cannot be performed at the same | 273 // Mmaps dumping is very heavyweight and cannot be performed at the same |
305 // rate of other dumps. TODO(primiano): this is a hack and should be | 274 // rate of other dumps. TODO(primiano): this is a hack and should be |
306 // cleaned up as part of crbug.com/499731. | 275 // cleaned up as part of crbug.com/499731. |
307 if (mdp == g_mmaps_dump_provider && | 276 skip_dump = true; |
308 args.dump_type != MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS) { | 277 } else if (mdp_info->task_runner && |
309 continue; | 278 !mdp_info->task_runner->BelongsToCurrentThread()) { |
310 } | 279 // It's time to hop onto another thread. |
311 if (mdp_info->disabled) | 280 |
312 continue; | 281 // Copy the callback + arguments just for the unlikley case in which |
313 if (mdp_info->task_runner) { | 282 // PostTask fails. In such case the Bind helper will destroy the |
314 // The OnMemoryDump() call must be posted. | 283 // pmd_async_state and we must keep a copy of the fields to notify the |
315 bool did_post_async_task = mdp_info->task_runner->PostTask( | 284 // abort. |
316 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, | 285 MemoryDumpCallback callback = pmd_async_state->callback; |
317 Unretained(this), Unretained(mdp), pmd_holder)); | 286 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
318 // The thread underlying the TaskRunner might have gone away. | 287 pmd_async_state->task_runner; |
319 if (did_post_async_task) { | 288 const uint64 dump_guid = pmd_async_state->req_args.dump_guid; |
320 ++pmd_holder->num_pending_async_requests; | 289 |
321 did_post_any_async_task = true; | 290 const bool did_post_task = mdp_info->task_runner->PostTask( |
322 } | 291 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, |
323 } else { | 292 Unretained(this), Passed(pmd_async_state.Pass()))); |
324 // Invoke the dump provider synchronously. | 293 if (did_post_task) |
325 did_any_provider_dump |= InvokeDumpProviderLocked(mdp, pmd); | 294 return; |
| 295 |
| 296 // The thread is gone. At this point the best thing we can do is to |
| 297 // disable the dump provider and abort this dump. |
| 298 mdp_info->disabled = true; |
| 299 return AbortDumpLocked(callback, callback_task_runner, dump_guid); |
| 300 } |
| 301 } // AutoLock(lock_) |
| 302 |
| 303 // Invoke the dump provider without holding the |lock_|. |
| 304 bool finalize = false; |
| 305 bool dump_successful = false; |
| 306 if (!skip_dump) |
| 307 dump_successful = mdp->OnMemoryDump(&pmd_async_state->process_memory_dump); |
| 308 |
| 309 { |
| 310 AutoLock lock(lock_); |
| 311 if (did_unregister_dump_provider_) { |
| 312 return AbortDumpLocked(pmd_async_state->callback, |
| 313 pmd_async_state->task_runner, |
| 314 pmd_async_state->req_args.dump_guid); |
| 315 } |
| 316 auto* mdp_info = &*pmd_async_state->next_dump_provider; |
| 317 if (dump_successful) { |
| 318 mdp_info->consecutive_failures = 0; |
| 319 } else if (!skip_dump) { |
| 320 ++mdp_info->consecutive_failures; |
| 321 if (mdp_info->consecutive_failures >= kMaxConsecutiveFailuresCount) { |
| 322 mdp_info->disabled = true; |
326 } | 323 } |
327 } | 324 } |
328 } // AutoLock | 325 ++pmd_async_state->next_dump_provider; |
| 326 finalize = pmd_async_state->next_dump_provider == dump_providers_.end(); |
| 327 } |
329 | 328 |
330 // If at least one synchronous provider did dump and there are no pending | 329 if (!skip_dump && !dump_successful) { |
331 // asynchronous requests, add the dump to the trace and invoke the callback | 330 LOG(ERROR) << "A memory dumper failed, possibly due to sandboxing " |
332 // straight away (FinalizeDumpAndAddToTrace() takes care of the callback). | 331 "(crbug.com/461788). Disabling dumper for current process. " |
333 if (did_any_provider_dump && !did_post_any_async_task) | 332 "Try restarting chrome with the --no-sandbox switch."; |
334 FinalizeDumpAndAddToTrace(pmd_holder); | 333 } |
| 334 |
| 335 if (finalize) |
| 336 return FinalizeDumpAndAddToTrace(pmd_async_state.Pass()); |
| 337 |
| 338 ContinueAsyncProcessDump(pmd_async_state.Pass()); |
335 } | 339 } |
336 | 340 |
337 // Invokes the MemoryDumpProvider.OnMemoryDump(), taking care of the fail-safe | 341 // static |
338 // logic which disables the dumper when failing (crbug.com/461788). | 342 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
339 bool MemoryDumpManager::InvokeDumpProviderLocked(MemoryDumpProvider* mdp, | 343 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
340 ProcessMemoryDump* pmd) { | 344 if (!pmd_async_state->task_runner->BelongsToCurrentThread()) { |
341 lock_.AssertAcquired(); | 345 scoped_refptr<SingleThreadTaskRunner> task_runner = |
342 bool dump_successful = mdp->OnMemoryDump(pmd); | 346 pmd_async_state->task_runner; |
343 MemoryDumpProviderInfo* mdp_info = &dump_providers_.find(mdp)->second; | 347 task_runner->PostTask(FROM_HERE, |
344 if (dump_successful) { | 348 Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace, |
345 mdp_info->consecutive_failures = 0; | 349 Passed(pmd_async_state.Pass()))); |
346 } else { | 350 return; |
347 // Disable the MDP if it fails kMaxConsecutiveFailuresCount times | |
348 // consecutively. | |
349 mdp_info->consecutive_failures++; | |
350 if (mdp_info->consecutive_failures >= kMaxConsecutiveFailuresCount) { | |
351 mdp_info->disabled = true; | |
352 LOG(ERROR) << "The memory dumper failed, possibly due to sandboxing " | |
353 "(crbug.com/461788), disabling it for current process. Try " | |
354 "restarting chrome with the --no-sandbox switch."; | |
355 } | |
356 } | 351 } |
357 return dump_successful; | 352 |
| 353 scoped_refptr<ConvertableToTraceFormat> event_value(new TracedValue()); |
| 354 pmd_async_state->process_memory_dump.AsValueInto( |
| 355 static_cast<TracedValue*>(event_value.get())); |
| 356 const char* const event_name = |
| 357 MemoryDumpTypeToString(pmd_async_state->req_args.dump_type); |
| 358 |
| 359 TRACE_EVENT_API_ADD_TRACE_EVENT( |
| 360 TRACE_EVENT_PHASE_MEMORY_DUMP, |
| 361 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name, |
| 362 pmd_async_state->req_args.dump_guid, kTraceEventNumArgs, |
| 363 kTraceEventArgNames, kTraceEventArgTypes, nullptr /* arg_values */, |
| 364 &event_value, TRACE_EVENT_FLAG_HAS_ID); |
| 365 |
| 366 if (!pmd_async_state->callback.is_null()) { |
| 367 pmd_async_state->callback.Run(pmd_async_state->req_args.dump_guid, |
| 368 true /* success */); |
| 369 pmd_async_state->callback.Reset(); |
| 370 } |
358 } | 371 } |
359 | 372 |
360 // This is posted to arbitrary threads as a continuation of CreateProcessDump(), | 373 // static |
361 // when one or more MemoryDumpProvider(s) require the OnMemoryDump() call to | 374 void MemoryDumpManager::AbortDumpLocked( |
362 // happen on a different thread. | 375 MemoryDumpCallback callback, |
363 void MemoryDumpManager::ContinueAsyncProcessDump( | 376 scoped_refptr<SingleThreadTaskRunner> task_runner, |
364 MemoryDumpProvider* mdp, | 377 uint64 dump_guid) { |
365 scoped_refptr<ProcessMemoryDumpHolder> pmd_holder) { | 378 if (callback.is_null()) |
366 bool should_finalize_dump = false; | 379 return; // There is nothing to NACK. |
367 { | |
368 // The lock here is to guarantee that different asynchronous dumps on | |
369 // different threads are still serialized, so that the MemoryDumpProvider | |
370 // has a consistent view of the |pmd| argument passed. | |
371 AutoLock lock(lock_); | |
372 ProcessMemoryDump* pmd = &pmd_holder->process_memory_dump; | |
373 | 380 |
374 // Check if the MemoryDumpProvider is still there. It might have been | 381 // Post the callback even if we are already on the right thread to avoid |
375 // destroyed and unregistered while hopping threads. | 382 // invoking the callback while holding the lock_. |
376 if (dump_providers_.count(mdp)) | 383 task_runner->PostTask(FROM_HERE, |
377 InvokeDumpProviderLocked(mdp, pmd); | 384 Bind(callback, dump_guid, false /* success */)); |
378 | |
379 // Finalize the dump appending it to the trace if this was the last | |
380 // asynchronous request pending. | |
381 --pmd_holder->num_pending_async_requests; | |
382 if (pmd_holder->num_pending_async_requests == 0) | |
383 should_finalize_dump = true; | |
384 } // AutoLock(lock_) | |
385 | |
386 if (should_finalize_dump) | |
387 FinalizeDumpAndAddToTrace(pmd_holder); | |
388 } | 385 } |
389 | 386 |
390 void MemoryDumpManager::OnTraceLogEnabled() { | 387 void MemoryDumpManager::OnTraceLogEnabled() { |
391 // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter | 388 // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter |
392 // to figure out (and cache) which dumpers should be enabled or not. | 389 // to figure out (and cache) which dumpers should be enabled or not. |
393 // For the moment piggy back everything on the generic "memory" category. | 390 // For the moment piggy back everything on the generic "memory" category. |
394 bool enabled; | 391 bool enabled; |
395 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); | 392 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); |
396 | 393 |
397 // Initialize the TraceLog for the current thread. This is to avoid that the | 394 // Initialize the TraceLog for the current thread. This is to avoid that the |
398 // TraceLog memory dump provider is registered lazily in the PostTask() below | 395 // TraceLog memory dump provider is registered lazily in the PostTask() below |
399 // while the |lock_| is taken; | 396 // while the |lock_| is taken; |
400 InitializeThreadLocalEventBufferIfSupported(); | 397 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
401 | 398 |
402 AutoLock lock(lock_); | 399 AutoLock lock(lock_); |
403 | 400 |
404 // There is no point starting the tracing without a delegate. | 401 // There is no point starting the tracing without a delegate. |
405 if (!enabled || !delegate_) { | 402 if (!enabled || !delegate_) { |
406 // Disable all the providers. | 403 // Disable all the providers. |
407 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) | 404 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) |
408 it->second.disabled = true; | 405 it->disabled = true; |
409 return; | 406 return; |
410 } | 407 } |
411 | 408 |
412 session_state_ = new MemoryDumpSessionState(); | 409 session_state_ = new MemoryDumpSessionState(); |
413 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) { | 410 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) { |
414 MemoryDumpProviderInfo& mdp_info = it->second; | 411 it->disabled = false; |
415 mdp_info.disabled = false; | 412 it->consecutive_failures = 0; |
416 mdp_info.consecutive_failures = 0; | |
417 if (mdp_info.task_runner) { | |
418 // The thread local event buffer must be initialized at this point as it | |
419 // registers its own dump provider (for tracing overhead acounting). | |
420 // The registration cannot happen lazily during the first TRACE_EVENT* | |
421 // as it might end up registering the ThreadLocalEventBuffer while | |
422 // in onMemoryDump(), which will deadlock. | |
423 mdp_info.task_runner->PostTask( | |
424 FROM_HERE, Bind(&InitializeThreadLocalEventBufferIfSupported)); | |
425 } | |
426 } | 413 } |
427 | 414 |
428 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); | 415 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); |
429 | 416 |
430 if (delegate_->IsCoordinatorProcess()) { | 417 if (delegate_->IsCoordinatorProcess()) { |
431 g_periodic_dumps_count = 0; | 418 g_periodic_dumps_count = 0; |
432 periodic_dump_timer_.Start(FROM_HERE, | 419 periodic_dump_timer_.Start(FROM_HERE, |
433 TimeDelta::FromMilliseconds(kDumpIntervalMs), | 420 TimeDelta::FromMilliseconds(kDumpIntervalMs), |
434 base::Bind(&RequestPeriodicGlobalDump)); | 421 base::Bind(&RequestPeriodicGlobalDump)); |
435 } | 422 } |
436 } | 423 } |
437 | 424 |
438 void MemoryDumpManager::OnTraceLogDisabled() { | 425 void MemoryDumpManager::OnTraceLogDisabled() { |
439 AutoLock lock(lock_); | 426 AutoLock lock(lock_); |
440 periodic_dump_timer_.Stop(); | 427 periodic_dump_timer_.Stop(); |
441 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); | 428 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); |
442 session_state_ = nullptr; | 429 session_state_ = nullptr; |
443 } | 430 } |
444 | 431 |
445 // static | 432 // static |
446 uint64 MemoryDumpManager::ChildProcessIdToTracingProcessId( | 433 uint64 MemoryDumpManager::ChildProcessIdToTracingProcessId( |
447 int child_process_id) { | 434 int child_process_id) { |
448 return static_cast<uint64>( | 435 return static_cast<uint64>( |
449 Hash(reinterpret_cast<const char*>(&child_process_id), | 436 Hash(reinterpret_cast<const char*>(&child_process_id), |
450 sizeof(child_process_id))) + | 437 sizeof(child_process_id))) + |
451 1; | 438 1; |
452 } | 439 } |
453 | 440 |
454 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( | 441 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( |
| 442 MemoryDumpProvider* dump_provider, |
455 const scoped_refptr<SingleThreadTaskRunner>& task_runner) | 443 const scoped_refptr<SingleThreadTaskRunner>& task_runner) |
456 : task_runner(task_runner), consecutive_failures(0), disabled(false) { | 444 : dump_provider(dump_provider), |
| 445 task_runner(task_runner), |
| 446 consecutive_failures(0), |
| 447 disabled(false) { |
457 } | 448 } |
| 449 |
458 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() { | 450 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() { |
459 } | 451 } |
460 | 452 |
| 453 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<( |
| 454 const MemoryDumpProviderInfo& other) const { |
| 455 if (task_runner == other.task_runner) |
| 456 return dump_provider < other.dump_provider; |
| 457 return task_runner < other.task_runner; |
| 458 } |
| 459 |
| 460 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( |
| 461 MemoryDumpRequestArgs req_args, |
| 462 MemoryDumpProviderInfoSet::iterator next_dump_provider, |
| 463 const scoped_refptr<MemoryDumpSessionState>& session_state, |
| 464 MemoryDumpCallback callback) |
| 465 : process_memory_dump(session_state), |
| 466 req_args(req_args), |
| 467 next_dump_provider(next_dump_provider), |
| 468 callback(callback), |
| 469 task_runner(MessageLoop::current()->task_runner()) { |
| 470 } |
| 471 |
| 472 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { |
| 473 } |
| 474 |
461 } // namespace trace_event | 475 } // namespace trace_event |
462 } // namespace base | 476 } // namespace base |
OLD | NEW |