Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "base/atomic_sequence_num.h" | 9 #include "base/atomic_sequence_num.h" |
| 10 #include "base/base_switches.h" | 10 #include "base/base_switches.h" |
| 11 #include "base/command_line.h" | 11 #include "base/command_line.h" |
| 12 #include "base/compiler_specific.h" | 12 #include "base/compiler_specific.h" |
| 13 #include "base/thread_task_runner_handle.h" | 13 #include "base/thread_task_runner_handle.h" |
| 14 #include "base/threading/thread.h" | |
| 14 #include "base/trace_event/memory_dump_provider.h" | 15 #include "base/trace_event/memory_dump_provider.h" |
| 15 #include "base/trace_event/memory_dump_session_state.h" | 16 #include "base/trace_event/memory_dump_session_state.h" |
| 16 #include "base/trace_event/memory_profiler_allocation_context.h" | 17 #include "base/trace_event/memory_profiler_allocation_context.h" |
| 17 #include "base/trace_event/process_memory_dump.h" | 18 #include "base/trace_event/process_memory_dump.h" |
| 18 #include "base/trace_event/trace_event_argument.h" | 19 #include "base/trace_event/trace_event_argument.h" |
| 19 #include "build/build_config.h" | 20 #include "build/build_config.h" |
| 20 | 21 |
| 21 #if !defined(OS_NACL) | 22 #if !defined(OS_NACL) |
| 22 #include "base/trace_event/process_memory_totals_dump_provider.h" | 23 #include "base/trace_event/process_memory_totals_dump_provider.h" |
| 23 #endif | 24 #endif |
| (...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 275 | 276 |
| 276 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, | 277 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
| 277 const MemoryDumpCallback& callback) { | 278 const MemoryDumpCallback& callback) { |
| 278 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", | 279 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", |
| 279 TRACE_ID_MANGLE(args.dump_guid)); | 280 TRACE_ID_MANGLE(args.dump_guid)); |
| 280 | 281 |
| 281 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; | 282 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
| 282 { | 283 { |
| 283 AutoLock lock(lock_); | 284 AutoLock lock(lock_); |
| 284 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( | 285 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
| 285 args, dump_providers_.begin(), session_state_, callback)); | 286 args, dump_providers_.begin(), session_state_, callback, |
| 287 dump_thread_->task_runner())); | |
| 286 } | 288 } |
| 287 | 289 |
| 288 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", | 290 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", |
| 289 TRACE_ID_MANGLE(args.dump_guid), | 291 TRACE_ID_MANGLE(args.dump_guid), |
| 290 TRACE_EVENT_FLAG_FLOW_OUT); | 292 TRACE_EVENT_FLAG_FLOW_OUT); |
| 291 | 293 |
| 292 // Start the thread hop. |dump_providers_| are kept sorted by thread, so | 294 // Start the thread hop. |dump_providers_| are kept sorted by thread, so |
| 293 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread | 295 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread |
| 294 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). | 296 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). |
| 295 ContinueAsyncProcessDump(pmd_async_state.Pass()); | 297 ContinueAsyncProcessDump(pmd_async_state.Pass()); |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 324 // DO NOT put any LOG() statement in the locked sections, as in some contexts | 326 // DO NOT put any LOG() statement in the locked sections, as in some contexts |
| 325 // (GPU process) LOG() ends up performing PostTask/IPCs. | 327 // (GPU process) LOG() ends up performing PostTask/IPCs. |
| 326 MemoryDumpProvider* mdp; | 328 MemoryDumpProvider* mdp; |
| 327 bool skip_dump = false; | 329 bool skip_dump = false; |
| 328 { | 330 { |
| 329 AutoLock lock(lock_); | 331 AutoLock lock(lock_); |
| 330 | 332 |
| 331 auto mdp_info = pmd_async_state->next_dump_provider; | 333 auto mdp_info = pmd_async_state->next_dump_provider; |
| 332 mdp = mdp_info->dump_provider; | 334 mdp = mdp_info->dump_provider; |
| 333 dump_provider_name = mdp_info->name; | 335 dump_provider_name = mdp_info->name; |
| 336 | |
| 337 // If the dump provider did not specify a thread affinity, dump on the | |
|
petrcermak
2015/10/30 17:43:04
nit: "dump_thread_" is a name so I'd drop the arti
Primiano Tucci (use gerrit)
2015/11/02 10:22:22
Done.
| |
| 338 // |dump_thread_|. | |
| 339 SingleThreadTaskRunner* task_runner = mdp_info->task_runner.get(); | |
| 340 if (!task_runner) | |
| 341 task_runner = pmd_async_state->dump_thread_task_runner.get(); | |
| 342 | |
| 343 // The |dump_thread_| might have been Stop()-ed at this point (if tracing | |
|
petrcermak
2015/10/30 17:43:04
ditto
Primiano Tucci (use gerrit)
2015/11/02 10:22:22
Done.
| |
| 344 // was disabled in the meanwhile). In such case the PostTask() below will | |
| 345 // fail. The task_runner handle, however, should always be non-null. | |
| 346 DCHECK(task_runner); | |
| 347 | |
| 334 if (mdp_info->disabled || mdp_info->unregistered) { | 348 if (mdp_info->disabled || mdp_info->unregistered) { |
| 335 skip_dump = true; | 349 skip_dump = true; |
| 336 } else if (mdp_info->task_runner && | 350 } else if (!task_runner->BelongsToCurrentThread()) { |
| 337 !mdp_info->task_runner->BelongsToCurrentThread()) { | |
| 338 // It's time to hop onto another thread. | 351 // It's time to hop onto another thread. |
| 339 | 352 |
| 340 // Copy the callback + arguments just for the unlikley case in which | 353 // Copy the callback + arguments just for the unlikley case in which |
| 341 // PostTask fails. In such case the Bind helper will destroy the | 354 // PostTask fails. In such case the Bind helper will destroy the |
| 342 // pmd_async_state and we must keep a copy of the fields to notify the | 355 // pmd_async_state and we must keep a copy of the fields to notify the |
| 343 // abort. | 356 // abort. |
| 344 MemoryDumpCallback callback = pmd_async_state->callback; | 357 MemoryDumpCallback callback = pmd_async_state->callback; |
| 345 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 358 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
| 346 pmd_async_state->task_runner; | 359 pmd_async_state->callback_task_runner; |
| 347 | 360 |
| 348 const bool did_post_task = mdp_info->task_runner->PostTask( | 361 const bool did_post_task = task_runner->PostTask( |
| 349 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, | 362 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, |
| 350 Unretained(this), Passed(pmd_async_state.Pass()))); | 363 Unretained(this), Passed(pmd_async_state.Pass()))); |
| 351 if (did_post_task) | 364 if (did_post_task) |
| 352 return; | 365 return; |
| 353 | 366 |
| 354 // The thread is gone. At this point the best thing we can do is to | 367 // The thread is gone. At this point the best thing we can do is to |
| 355 // disable the dump provider and abort this dump. | 368 // disable the dump provider and abort this dump. |
| 356 mdp_info->disabled = true; | 369 mdp_info->disabled = true; |
| 357 return AbortDumpLocked(callback, callback_task_runner, dump_guid); | 370 return AbortDumpLocked(callback, callback_task_runner, dump_guid); |
| 358 } | 371 } |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 400 if (finalize) | 413 if (finalize) |
| 401 return FinalizeDumpAndAddToTrace(pmd_async_state.Pass()); | 414 return FinalizeDumpAndAddToTrace(pmd_async_state.Pass()); |
| 402 | 415 |
| 403 ContinueAsyncProcessDump(pmd_async_state.Pass()); | 416 ContinueAsyncProcessDump(pmd_async_state.Pass()); |
| 404 } | 417 } |
| 405 | 418 |
| 406 // static | 419 // static |
| 407 void MemoryDumpManager::FinalizeDumpAndAddToTrace( | 420 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
| 408 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 421 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
| 409 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; | 422 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; |
| 410 if (!pmd_async_state->task_runner->BelongsToCurrentThread()) { | 423 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { |
| 411 scoped_refptr<SingleThreadTaskRunner> task_runner = | 424 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
| 412 pmd_async_state->task_runner; | 425 pmd_async_state->callback_task_runner; |
| 413 task_runner->PostTask(FROM_HERE, | 426 callback_task_runner->PostTask( |
| 414 Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace, | 427 FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace, |
| 415 Passed(pmd_async_state.Pass()))); | 428 Passed(pmd_async_state.Pass()))); |
| 416 return; | 429 return; |
| 417 } | 430 } |
| 418 | 431 |
| 419 TRACE_EVENT_WITH_FLOW0(kTraceCategory, | 432 TRACE_EVENT_WITH_FLOW0(kTraceCategory, |
| 420 "MemoryDumpManager::FinalizeDumpAndAddToTrace", | 433 "MemoryDumpManager::FinalizeDumpAndAddToTrace", |
| 421 TRACE_ID_MANGLE(dump_guid), TRACE_EVENT_FLAG_FLOW_IN); | 434 TRACE_ID_MANGLE(dump_guid), TRACE_EVENT_FLAG_FLOW_IN); |
| 422 | 435 |
| 423 TracedValue* traced_value = new TracedValue(); | 436 TracedValue* traced_value = new TracedValue(); |
| 424 scoped_refptr<ConvertableToTraceFormat> event_value(traced_value); | 437 scoped_refptr<ConvertableToTraceFormat> event_value(traced_value); |
| 425 pmd_async_state->process_memory_dump.AsValueInto(traced_value); | 438 pmd_async_state->process_memory_dump.AsValueInto(traced_value); |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 476 scoped_refptr<StackFrameDeduplicator> stack_frame_deduplicator = nullptr; | 489 scoped_refptr<StackFrameDeduplicator> stack_frame_deduplicator = nullptr; |
| 477 | 490 |
| 478 if (heap_profiling_enabled_) { | 491 if (heap_profiling_enabled_) { |
| 479 // If heap profiling is enabled, the stack frame deduplicator will be in | 492 // If heap profiling is enabled, the stack frame deduplicator will be in |
| 480 // use. Add a metadata event to write its frames. | 493 // use. Add a metadata event to write its frames. |
| 481 stack_frame_deduplicator = new StackFrameDeduplicator; | 494 stack_frame_deduplicator = new StackFrameDeduplicator; |
| 482 TRACE_EVENT_API_ADD_METADATA_EVENT("stackFrames", "stackFrames", | 495 TRACE_EVENT_API_ADD_METADATA_EVENT("stackFrames", "stackFrames", |
| 483 stack_frame_deduplicator); | 496 stack_frame_deduplicator); |
| 484 } | 497 } |
| 485 | 498 |
| 499 dump_thread_.reset(new Thread("MemoryInfra")); | |
| 500 dump_thread_->Start(); | |
|
Ruud van Asseldonk
2015/10/30 16:52:41
Is |new Thread| or |dump_thread_->Start()| an expe
petrcermak
2015/10/30 17:43:04
It seems like that could create a race with OnTrac
Primiano Tucci (use gerrit)
2015/11/02 10:22:23
Makes sense, moved to a temporary ref_ptr before a
| |
| 501 | |
| 486 session_state_ = new MemoryDumpSessionState(stack_frame_deduplicator); | 502 session_state_ = new MemoryDumpSessionState(stack_frame_deduplicator); |
| 487 | 503 |
| 488 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) { | 504 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) { |
| 489 it->disabled = false; | 505 it->disabled = false; |
| 490 it->consecutive_failures = 0; | 506 it->consecutive_failures = 0; |
| 491 } | 507 } |
| 492 | 508 |
| 493 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); | 509 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); |
| 494 | 510 |
| 495 // TODO(primiano): This is a temporary hack to disable periodic memory dumps | 511 // TODO(primiano): This is a temporary hack to disable periodic memory dumps |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 525 } | 541 } |
| 526 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms); | 542 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms); |
| 527 g_heavy_dumps_rate = heavy_dump_period_ms / min_timer_period_ms; | 543 g_heavy_dumps_rate = heavy_dump_period_ms / min_timer_period_ms; |
| 528 | 544 |
| 529 periodic_dump_timer_.Start(FROM_HERE, | 545 periodic_dump_timer_.Start(FROM_HERE, |
| 530 TimeDelta::FromMilliseconds(min_timer_period_ms), | 546 TimeDelta::FromMilliseconds(min_timer_period_ms), |
| 531 base::Bind(&RequestPeriodicGlobalDump)); | 547 base::Bind(&RequestPeriodicGlobalDump)); |
| 532 } | 548 } |
| 533 | 549 |
| 534 void MemoryDumpManager::OnTraceLogDisabled() { | 550 void MemoryDumpManager::OnTraceLogDisabled() { |
| 551 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); | |
| 552 | |
| 553 // Thread stops are blocking and must be performed outside of the |lock_| | |
| 554 // or will deadlock (e.g., if ContinueAsyncProcessDump() tries to acquire it). | |
| 555 periodic_dump_timer_.Stop(); | |
| 556 if (dump_thread_) | |
|
Ruud van Asseldonk
2015/10/30 16:52:41
So |dump_thread_| is set inside the lock everywher
petrcermak
2015/10/30 17:43:04
I agree that would be better. It's strange (and po
Primiano Tucci (use gerrit)
2015/11/02 10:22:23
So, tecnically is fine as it is accessed only in O
| |
| 557 dump_thread_->Stop(); | |
| 558 | |
| 535 AutoLock lock(lock_); | 559 AutoLock lock(lock_); |
| 536 periodic_dump_timer_.Stop(); | 560 dump_thread_.reset(); |
| 537 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); | |
| 538 session_state_ = nullptr; | 561 session_state_ = nullptr; |
| 539 } | 562 } |
| 540 | 563 |
| 541 uint64_t MemoryDumpManager::GetTracingProcessId() const { | 564 uint64_t MemoryDumpManager::GetTracingProcessId() const { |
| 542 return delegate_->GetTracingProcessId(); | 565 return delegate_->GetTracingProcessId(); |
| 543 } | 566 } |
| 544 | 567 |
| 545 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( | 568 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( |
| 546 MemoryDumpProvider* dump_provider, | 569 MemoryDumpProvider* dump_provider, |
| 547 const char* name, | 570 const char* name, |
| 548 const scoped_refptr<SingleThreadTaskRunner>& task_runner) | 571 const scoped_refptr<SingleThreadTaskRunner>& task_runner) |
| 549 : dump_provider(dump_provider), | 572 : dump_provider(dump_provider), |
| 550 name(name), | 573 name(name), |
| 551 task_runner(task_runner), | 574 task_runner(task_runner), |
| 552 consecutive_failures(0), | 575 consecutive_failures(0), |
| 553 disabled(false), | 576 disabled(false), |
| 554 unregistered(false) {} | 577 unregistered(false) {} |
| 555 | 578 |
| 556 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} | 579 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} |
| 557 | 580 |
| 558 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<( | 581 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<( |
| 559 const MemoryDumpProviderInfo& other) const { | 582 const MemoryDumpProviderInfo& other) const { |
| 560 if (task_runner == other.task_runner) | 583 if (task_runner == other.task_runner) |
| 561 return dump_provider < other.dump_provider; | 584 return dump_provider < other.dump_provider; |
| 562 return task_runner < other.task_runner; | 585 // Ensure that unbound providers (task_runner == nullptr) run always last. |
|
petrcermak
2015/10/30 17:43:04
supernit: "always run" sounds better than "run alw
Primiano Tucci (use gerrit)
2015/11/02 10:22:23
Done.
| |
| 586 return !(task_runner < other.task_runner); | |
|
Ruud van Asseldonk
2015/10/30 16:52:41
You replaced < with >= here but we already know th
Primiano Tucci (use gerrit)
2015/11/02 10:22:23
The reason why I did that is that operator< is alr
Ruud van Asseldonk
2015/11/02 11:00:37
I see. You could call |.get()| to work around.
| |
| 563 } | 587 } |
| 564 | 588 |
| 565 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( | 589 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( |
| 566 MemoryDumpRequestArgs req_args, | 590 MemoryDumpRequestArgs req_args, |
| 567 MemoryDumpProviderInfoSet::iterator next_dump_provider, | 591 MemoryDumpProviderInfoSet::iterator next_dump_provider, |
| 568 const scoped_refptr<MemoryDumpSessionState>& session_state, | 592 const scoped_refptr<MemoryDumpSessionState>& session_state, |
| 569 MemoryDumpCallback callback) | 593 MemoryDumpCallback callback, |
| 594 const scoped_refptr<SingleThreadTaskRunner>& dump_thread_task_runner) | |
| 570 : process_memory_dump(session_state), | 595 : process_memory_dump(session_state), |
| 571 req_args(req_args), | 596 req_args(req_args), |
| 572 next_dump_provider(next_dump_provider), | 597 next_dump_provider(next_dump_provider), |
| 573 callback(callback), | 598 callback(callback), |
| 574 task_runner(MessageLoop::current()->task_runner()) {} | 599 callback_task_runner(MessageLoop::current()->task_runner()), |
| 600 dump_thread_task_runner(dump_thread_task_runner) {} | |
| 575 | 601 |
| 576 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { | 602 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { |
| 577 } | 603 } |
| 578 | 604 |
| 579 } // namespace trace_event | 605 } // namespace trace_event |
| 580 } // namespace base | 606 } // namespace base |
| OLD | NEW |