OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <utility> | 8 #include <utility> |
9 | 9 |
10 #include "base/atomic_sequence_num.h" | 10 #include "base/atomic_sequence_num.h" |
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
192 const MemoryDumpProvider::Options& options) { | 192 const MemoryDumpProvider::Options& options) { |
193 if (dumper_registrations_ignored_for_testing_) | 193 if (dumper_registrations_ignored_for_testing_) |
194 return; | 194 return; |
195 | 195 |
196 scoped_refptr<MemoryDumpProviderInfo> mdpinfo = | 196 scoped_refptr<MemoryDumpProviderInfo> mdpinfo = |
197 new MemoryDumpProviderInfo(mdp, name, task_runner, options); | 197 new MemoryDumpProviderInfo(mdp, name, task_runner, options); |
198 | 198 |
199 { | 199 { |
200 AutoLock lock(lock_); | 200 AutoLock lock(lock_); |
201 bool already_registered = !dump_providers_.insert(mdpinfo).second; | 201 bool already_registered = !dump_providers_.insert(mdpinfo).second; |
202 // This actually happen in some tests which don't have a clean tear-down | 202 // This actually happens in some tests which don't have a clean tear-down |
203 // path for RenderThreadImpl::Init(). | 203 // path for RenderThreadImpl::Init(). |
204 if (already_registered) | 204 if (already_registered) |
205 return; | 205 return; |
206 } | 206 } |
207 | 207 |
208 if (heap_profiling_enabled_) | 208 if (heap_profiling_enabled_) |
209 mdp->OnHeapProfilingEnabled(true); | 209 mdp->OnHeapProfilingEnabled(true); |
210 } | 210 } |
211 | 211 |
212 void MemoryDumpManager::RegisterDumpProvider( | 212 void MemoryDumpManager::RegisterDumpProvider( |
213 MemoryDumpProvider* mdp, | 213 MemoryDumpProvider* mdp, |
214 const char* name, | 214 const char* name, |
215 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { | 215 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { |
216 RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options()); | 216 RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options()); |
217 } | 217 } |
218 | 218 |
219 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 219 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
| 220 UnregisterDumpProviderInternal(mdp, false /* delete_async */); |
| 221 } |
| 222 |
| 223 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon( |
| 224 scoped_ptr<MemoryDumpProvider> mdp) { |
| 225 UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */); |
| 226 } |
| 227 |
| 228 void MemoryDumpManager::UnregisterDumpProviderInternal( |
| 229 MemoryDumpProvider* mdp, |
| 230 bool take_mdp_ownership_and_delete_async) { |
| 231 scoped_ptr<MemoryDumpProvider> owned_mdp; |
| 232 if (take_mdp_ownership_and_delete_async) |
| 233 owned_mdp.reset(mdp); |
| 234 |
220 AutoLock lock(lock_); | 235 AutoLock lock(lock_); |
221 | 236 |
222 auto mdp_iter = dump_providers_.begin(); | 237 auto mdp_iter = dump_providers_.begin(); |
223 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) { | 238 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) { |
224 if ((*mdp_iter)->dump_provider == mdp) | 239 if ((*mdp_iter)->dump_provider == mdp) |
225 break; | 240 break; |
226 } | 241 } |
227 | 242 |
228 if (mdp_iter == dump_providers_.end()) | 243 if (mdp_iter == dump_providers_.end()) |
229 return; | 244 return; // Not registered / already unregistered. |
230 | 245 |
231 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe | 246 if (take_mdp_ownership_and_delete_async) { |
232 // only if the MDP has specified a thread affinity (via task_runner()) AND | 247 // The MDP will be deleted whenever the MDPInfo struct will, that is either: |
233 // the unregistration happens on the same thread (so the MDP cannot unregister | 248 // - At the end of this function, if no dump is in progress. |
234 // and OnMemoryDump() at the same time). | 249 // - In the prologue of the ContinueAsyncProcessDump(). |
235 // Otherwise, it is not possible to guarantee that its unregistration is | 250 DCHECK(!(*mdp_iter)->owned_dump_provider); |
236 // race-free. If you hit this DCHECK, your MDP has a bug. | 251 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); |
237 DCHECK(!subtle::NoBarrier_Load(&memory_tracing_enabled_) || | 252 } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) { |
238 ((*mdp_iter)->task_runner && | 253 // If you hit this DCHECK, your dump provider has a bug. |
239 (*mdp_iter)->task_runner->BelongsToCurrentThread())) | 254 // Unregistration of a MemoryDumpProvider is safe only if: |
240 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " | 255 // - The MDP has specified a thread affinity (via task_runner()) AND |
241 << "unregister itself in a racy way. Please file a crbug."; | 256 // the unregistration happens on the same thread (so the MDP cannot |
| 257 // unregister and be in the middle of a OnMemoryDump() at the same time. |
| 258 // - The MDP has NOT specified a thread affinity and its ownership is |
| 259 // transferred via UnregisterAndDeleteDumpProviderSoon(). |
| 260 // In all the other cases, it is not possible to guarantee that the |
| 261 // unregistration will not race with OnMemoryDump() calls. |
| 262 DCHECK((*mdp_iter)->task_runner && |
| 263 (*mdp_iter)->task_runner->BelongsToCurrentThread()) |
| 264 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " |
| 265 << "unregister itself in a racy way. Please file a crbug."; |
| 266 } |
242 | 267 |
243 // The MDPInfo instance can still be referenced by the | 268 // The MDPInfo instance can still be referenced by the |
244 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason | 269 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason |
245 // the MDPInfo is flagged as disabled. It will cause ContinueAsyncProcessDump | 270 // the MDPInfo is flagged as disabled. It will cause ContinueAsyncProcessDump |
246 // to just skip it, without actually invoking the |mdp|, which might be | 271 // to just skip it, without actually invoking the |mdp|, which might be |
247 // destroyed by the caller soon after this method returns. | 272 // destroyed by the caller soon after this method returns. |
248 (*mdp_iter)->disabled = true; | 273 (*mdp_iter)->disabled = true; |
249 dump_providers_.erase(mdp_iter); | 274 dump_providers_.erase(mdp_iter); |
250 } | 275 } |
251 | 276 |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
358 pmd_async_state->pending_dump_providers.back().get(); | 383 pmd_async_state->pending_dump_providers.back().get(); |
359 | 384 |
360 // If the dump provider did not specify a thread affinity, dump on | 385 // If the dump provider did not specify a thread affinity, dump on |
361 // |dump_thread_|. Note that |dump_thread_| might have been Stop()-ed at this | 386 // |dump_thread_|. Note that |dump_thread_| might have been Stop()-ed at this |
362 // point (if tracing was disabled in the meanwhile). In such case the | 387 // point (if tracing was disabled in the meanwhile). In such case the |
363 // PostTask() below will fail, but |task_runner| should always be non-null. | 388 // PostTask() below will fail, but |task_runner| should always be non-null. |
364 SingleThreadTaskRunner* task_runner = mdpinfo->task_runner.get(); | 389 SingleThreadTaskRunner* task_runner = mdpinfo->task_runner.get(); |
365 if (!task_runner) | 390 if (!task_runner) |
366 task_runner = pmd_async_state->dump_thread_task_runner.get(); | 391 task_runner = pmd_async_state->dump_thread_task_runner.get(); |
367 | 392 |
| 393 bool post_task_failed = false; |
368 if (!task_runner->BelongsToCurrentThread()) { | 394 if (!task_runner->BelongsToCurrentThread()) { |
369 // It's time to hop onto another thread. | 395 // It's time to hop onto another thread. |
370 const bool did_post_task = task_runner->PostTask( | 396 post_task_failed = !task_runner->PostTask( |
371 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, | 397 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, |
372 Unretained(this), Unretained(pmd_async_state.get()))); | 398 Unretained(this), Unretained(pmd_async_state.get()))); |
373 if (did_post_task) { | 399 if (!post_task_failed) { |
374 // Ownership is tranferred to the next ContinueAsyncProcessDump(). | 400 // Ownership is tranferred to the next ContinueAsyncProcessDump(). |
375 ignore_result(pmd_async_state.release()); | 401 ignore_result(pmd_async_state.release()); |
376 return; | 402 return; |
377 } | 403 } |
378 // The thread is gone. Skip the dump provider and keep going. | |
379 mdpinfo->disabled = true; | |
380 } | 404 } |
381 | 405 |
382 // At this point wither we are on the right thread (|mdpinfo.task_runner|) | 406 // At this point either: |
383 // to access mdp fields, or the right thread is gone (and |disabled| == true). | 407 // - The MDP has a task runner affinity and we are on the right thread. |
| 408 // - The MDP has a task runner affinity but the underlying thread is gone, |
| 409 // hence the above |post_task_failed| == true. |
| 410 // - The MDP does NOT have a task runner affinity. A locked access is required |
| 411 // to R/W |disabled| (for the UnregisterAndDeleteDumpProviderSoon() case). |
| 412 bool should_dump; |
| 413 const char* disabled_reason = nullptr; |
| 414 { |
| 415 AutoLock lock(lock_); |
| 416 if (!mdpinfo->disabled) { |
| 417 if (mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) { |
| 418 mdpinfo->disabled = true; |
| 419 disabled_reason = |
| 420 "Dump failure, possibly related with sandboxing (crbug.com/461788)." |
| 421 " Try --no-sandbox."; |
| 422 } else if (post_task_failed) { |
| 423 disabled_reason = "The thread it was meant to dump onto is gone."; |
| 424 mdpinfo->disabled = true; |
| 425 } |
| 426 } |
| 427 should_dump = !mdpinfo->disabled; |
| 428 } |
384 | 429 |
385 if (!mdpinfo->disabled) { | 430 if (disabled_reason) { |
| 431 LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name << "\". " |
| 432 << disabled_reason; |
| 433 } |
| 434 |
| 435 if (should_dump) { |
386 // Invoke the dump provider. | 436 // Invoke the dump provider. |
387 TRACE_EVENT_WITH_FLOW1(kTraceCategory, | 437 TRACE_EVENT_WITH_FLOW1(kTraceCategory, |
388 "MemoryDumpManager::ContinueAsyncProcessDump", | 438 "MemoryDumpManager::ContinueAsyncProcessDump", |
389 TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid), | 439 TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid), |
390 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, | 440 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, |
391 "dump_provider.name", mdpinfo->name); | 441 "dump_provider.name", mdpinfo->name); |
392 | 442 |
393 // Pid of the target process being dumped. Often kNullProcessId (= current | 443 // Pid of the target process being dumped. Often kNullProcessId (= current |
394 // process), non-zero when the coordinator process creates dumps on behalf | 444 // process), non-zero when the coordinator process creates dumps on behalf |
395 // of child processes (see crbug.com/461788). | 445 // of child processes (see crbug.com/461788). |
396 ProcessId target_pid = mdpinfo->options.target_pid; | 446 ProcessId target_pid = mdpinfo->options.target_pid; |
397 ProcessMemoryDump* pmd = | 447 ProcessMemoryDump* pmd = |
398 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid); | 448 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid); |
399 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail}; | 449 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail}; |
400 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); | 450 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); |
401 | 451 mdpinfo->consecutive_failures = |
402 if (dump_successful) { | 452 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; |
403 mdpinfo->consecutive_failures = 0; | |
404 } else { | |
405 ++mdpinfo->consecutive_failures; | |
406 if (mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) { | |
407 mdpinfo->disabled = true; | |
408 LOG(ERROR) << "MemoryDumpProvider \"" << mdpinfo->name << "\" failed, " | |
409 << "possibly due to sandboxing (crbug.com/461788)." | |
410 << "Disabling dumper for current process. Try --no-sandbox."; | |
411 } | |
412 } | |
413 } // if (!mdpinfo->disabled) | 453 } // if (!mdpinfo->disabled) |
414 | 454 |
415 pmd_async_state->pending_dump_providers.pop_back(); | 455 pmd_async_state->pending_dump_providers.pop_back(); |
416 ContinueAsyncProcessDump(pmd_async_state.release()); | 456 ContinueAsyncProcessDump(pmd_async_state.release()); |
417 } | 457 } |
418 | 458 |
419 // static | 459 // static |
420 void MemoryDumpManager::FinalizeDumpAndAddToTrace( | 460 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
421 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 461 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
422 DCHECK(pmd_async_state->pending_dump_providers.empty()); | 462 DCHECK(pmd_async_state->pending_dump_providers.empty()); |
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
617 auto iter = process_dumps.find(pid); | 657 auto iter = process_dumps.find(pid); |
618 if (iter == process_dumps.end()) { | 658 if (iter == process_dumps.end()) { |
619 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state)); | 659 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state)); |
620 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; | 660 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; |
621 } | 661 } |
622 return iter->second.get(); | 662 return iter->second.get(); |
623 } | 663 } |
624 | 664 |
625 } // namespace trace_event | 665 } // namespace trace_event |
626 } // namespace base | 666 } // namespace base |
OLD | NEW |