Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(378)

Side by Side Diff: base/trace_event/memory_dump_manager.cc

Issue 1427963002: [tracing] Move memory-infra dumps to dedicated thread (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@memory-infra-names
Patch Set: Address review + fix browsertest Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/memory_dump_manager.h" 5 #include "base/trace_event/memory_dump_manager.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/atomic_sequence_num.h" 9 #include "base/atomic_sequence_num.h"
10 #include "base/base_switches.h" 10 #include "base/base_switches.h"
11 #include "base/command_line.h" 11 #include "base/command_line.h"
12 #include "base/compiler_specific.h" 12 #include "base/compiler_specific.h"
13 #include "base/thread_task_runner_handle.h" 13 #include "base/thread_task_runner_handle.h"
14 #include "base/threading/thread.h"
14 #include "base/trace_event/memory_dump_provider.h" 15 #include "base/trace_event/memory_dump_provider.h"
15 #include "base/trace_event/memory_dump_session_state.h" 16 #include "base/trace_event/memory_dump_session_state.h"
16 #include "base/trace_event/memory_profiler_allocation_context.h" 17 #include "base/trace_event/memory_profiler_allocation_context.h"
17 #include "base/trace_event/process_memory_dump.h" 18 #include "base/trace_event/process_memory_dump.h"
18 #include "base/trace_event/trace_event_argument.h" 19 #include "base/trace_event/trace_event_argument.h"
19 #include "build/build_config.h" 20 #include "build/build_config.h"
20 21
21 #if !defined(OS_NACL) 22 #if !defined(OS_NACL)
22 #include "base/trace_event/process_memory_totals_dump_provider.h" 23 #include "base/trace_event/process_memory_totals_dump_provider.h"
23 #endif 24 #endif
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after
275 276
276 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, 277 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
277 const MemoryDumpCallback& callback) { 278 const MemoryDumpCallback& callback) {
278 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", 279 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump",
279 TRACE_ID_MANGLE(args.dump_guid)); 280 TRACE_ID_MANGLE(args.dump_guid));
280 281
281 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; 282 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
282 { 283 {
283 AutoLock lock(lock_); 284 AutoLock lock(lock_);
284 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( 285 pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
285 args, dump_providers_.begin(), session_state_, callback)); 286 args, dump_providers_.begin(), session_state_, callback,
287 dump_thread_->task_runner()));
286 } 288 }
287 289
288 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", 290 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
289 TRACE_ID_MANGLE(args.dump_guid), 291 TRACE_ID_MANGLE(args.dump_guid),
290 TRACE_EVENT_FLAG_FLOW_OUT); 292 TRACE_EVENT_FLAG_FLOW_OUT);
291 293
292 // Start the thread hop. |dump_providers_| are kept sorted by thread, so 294 // Start the thread hop. |dump_providers_| are kept sorted by thread, so
293 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread 295 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread
294 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). 296 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()).
295 ContinueAsyncProcessDump(pmd_async_state.Pass()); 297 ContinueAsyncProcessDump(pmd_async_state.Pass());
(...skipping 28 matching lines...) Expand all
324 // DO NOT put any LOG() statement in the locked sections, as in some contexts 326 // DO NOT put any LOG() statement in the locked sections, as in some contexts
325 // (GPU process) LOG() ends up performing PostTask/IPCs. 327 // (GPU process) LOG() ends up performing PostTask/IPCs.
326 MemoryDumpProvider* mdp; 328 MemoryDumpProvider* mdp;
327 bool skip_dump = false; 329 bool skip_dump = false;
328 { 330 {
329 AutoLock lock(lock_); 331 AutoLock lock(lock_);
330 332
331 auto mdp_info = pmd_async_state->next_dump_provider; 333 auto mdp_info = pmd_async_state->next_dump_provider;
332 mdp = mdp_info->dump_provider; 334 mdp = mdp_info->dump_provider;
333 dump_provider_name = mdp_info->name; 335 dump_provider_name = mdp_info->name;
336
337 // If the dump provider did not specify a thread affinity, dump on
338 // |dump_thread_|.
339 SingleThreadTaskRunner* task_runner = mdp_info->task_runner.get();
340 if (!task_runner)
341 task_runner = pmd_async_state->dump_thread_task_runner.get();
342
343 // |dump_thread_| might have been Stop()-ed at this point (if tracing was
344 // disabled in the meanwhile). In such case the PostTask() below will fail.
345 // The task_runner handle, however, should always be non-null.
petrcermak 2015/11/02 10:35:20 nit: s/The task_runner handle/|task_runner|/
Primiano Tucci (use gerrit) 2015/11/02 15:07:07 Done.
346 DCHECK(task_runner);
347
334 if (mdp_info->disabled || mdp_info->unregistered) { 348 if (mdp_info->disabled || mdp_info->unregistered) {
335 skip_dump = true; 349 skip_dump = true;
336 } else if (mdp_info->task_runner && 350 } else if (!task_runner->BelongsToCurrentThread()) {
337 !mdp_info->task_runner->BelongsToCurrentThread()) {
338 // It's time to hop onto another thread. 351 // It's time to hop onto another thread.
339 352
340 // Copy the callback + arguments just for the unlikley case in which 353 // Copy the callback + arguments just for the unlikley case in which
341 // PostTask fails. In such case the Bind helper will destroy the 354 // PostTask fails. In such case the Bind helper will destroy the
342 // pmd_async_state and we must keep a copy of the fields to notify the 355 // pmd_async_state and we must keep a copy of the fields to notify the
343 // abort. 356 // abort.
344 MemoryDumpCallback callback = pmd_async_state->callback; 357 MemoryDumpCallback callback = pmd_async_state->callback;
345 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = 358 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
346 pmd_async_state->task_runner; 359 pmd_async_state->callback_task_runner;
347 360
348 const bool did_post_task = mdp_info->task_runner->PostTask( 361 const bool did_post_task = task_runner->PostTask(
349 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, 362 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
350 Unretained(this), Passed(pmd_async_state.Pass()))); 363 Unretained(this), Passed(pmd_async_state.Pass())));
351 if (did_post_task) 364 if (did_post_task)
352 return; 365 return;
353 366
354 // The thread is gone. At this point the best thing we can do is to 367 // The thread is gone. At this point the best thing we can do is to
355 // disable the dump provider and abort this dump. 368 // disable the dump provider and abort this dump.
356 mdp_info->disabled = true; 369 mdp_info->disabled = true;
357 return AbortDumpLocked(callback, callback_task_runner, dump_guid); 370 return AbortDumpLocked(callback, callback_task_runner, dump_guid);
358 } 371 }
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
400 if (finalize) 413 if (finalize)
401 return FinalizeDumpAndAddToTrace(pmd_async_state.Pass()); 414 return FinalizeDumpAndAddToTrace(pmd_async_state.Pass());
402 415
403 ContinueAsyncProcessDump(pmd_async_state.Pass()); 416 ContinueAsyncProcessDump(pmd_async_state.Pass());
404 } 417 }
405 418
406 // static 419 // static
407 void MemoryDumpManager::FinalizeDumpAndAddToTrace( 420 void MemoryDumpManager::FinalizeDumpAndAddToTrace(
408 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { 421 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
409 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; 422 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
410 if (!pmd_async_state->task_runner->BelongsToCurrentThread()) { 423 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
411 scoped_refptr<SingleThreadTaskRunner> task_runner = 424 scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
412 pmd_async_state->task_runner; 425 pmd_async_state->callback_task_runner;
413 task_runner->PostTask(FROM_HERE, 426 callback_task_runner->PostTask(
414 Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace, 427 FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace,
415 Passed(pmd_async_state.Pass()))); 428 Passed(pmd_async_state.Pass())));
416 return; 429 return;
417 } 430 }
418 431
419 TRACE_EVENT_WITH_FLOW0(kTraceCategory, 432 TRACE_EVENT_WITH_FLOW0(kTraceCategory,
420 "MemoryDumpManager::FinalizeDumpAndAddToTrace", 433 "MemoryDumpManager::FinalizeDumpAndAddToTrace",
421 TRACE_ID_MANGLE(dump_guid), TRACE_EVENT_FLAG_FLOW_IN); 434 TRACE_ID_MANGLE(dump_guid), TRACE_EVENT_FLAG_FLOW_IN);
422 435
423 TracedValue* traced_value = new TracedValue(); 436 TracedValue* traced_value = new TracedValue();
424 scoped_refptr<ConvertableToTraceFormat> event_value(traced_value); 437 scoped_refptr<ConvertableToTraceFormat> event_value(traced_value);
425 pmd_async_state->process_memory_dump.AsValueInto(traced_value); 438 pmd_async_state->process_memory_dump.AsValueInto(traced_value);
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
462 bool enabled; 475 bool enabled;
463 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); 476 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
464 if (!enabled) 477 if (!enabled)
465 return; 478 return;
466 479
467 // Initialize the TraceLog for the current thread. This is to avoid that the 480 // Initialize the TraceLog for the current thread. This is to avoid that the
468 // TraceLog memory dump provider is registered lazily in the PostTask() below 481 // TraceLog memory dump provider is registered lazily in the PostTask() below
469 // while the |lock_| is taken; 482 // while the |lock_| is taken;
470 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); 483 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
471 484
485 // Spin-up the thread used to invoke unbound dump providers.
486 scoped_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
487 if (!dump_thread->Start()) {
488 LOG(ERROR) << "Failed to start the memory-infra thread for tracing";
489 return;
490 }
491
472 AutoLock lock(lock_); 492 AutoLock lock(lock_);
473 493
474 DCHECK(delegate_); // At this point we must have a delegate. 494 DCHECK(delegate_); // At this point we must have a delegate.
475 495
476 scoped_refptr<StackFrameDeduplicator> stack_frame_deduplicator = nullptr; 496 scoped_refptr<StackFrameDeduplicator> stack_frame_deduplicator = nullptr;
477 497
478 if (heap_profiling_enabled_) { 498 if (heap_profiling_enabled_) {
479 // If heap profiling is enabled, the stack frame deduplicator will be in 499 // If heap profiling is enabled, the stack frame deduplicator will be in
480 // use. Add a metadata event to write its frames. 500 // use. Add a metadata event to write its frames.
481 stack_frame_deduplicator = new StackFrameDeduplicator; 501 stack_frame_deduplicator = new StackFrameDeduplicator;
482 TRACE_EVENT_API_ADD_METADATA_EVENT("stackFrames", "stackFrames", 502 TRACE_EVENT_API_ADD_METADATA_EVENT("stackFrames", "stackFrames",
483 stack_frame_deduplicator); 503 stack_frame_deduplicator);
484 } 504 }
485 505
506 dump_thread_ = dump_thread.Pass();
petrcermak 2015/11/02 10:35:20 Shouldn't you DCHECK that dump_thread_ was null be
Primiano Tucci (use gerrit) 2015/11/02 15:07:07 Done.
486 session_state_ = new MemoryDumpSessionState(stack_frame_deduplicator); 507 session_state_ = new MemoryDumpSessionState(stack_frame_deduplicator);
487 508
488 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) { 509 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) {
489 it->disabled = false; 510 it->disabled = false;
490 it->consecutive_failures = 0; 511 it->consecutive_failures = 0;
491 } 512 }
492 513
493 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); 514 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
494 515
495 // TODO(primiano): This is a temporary hack to disable periodic memory dumps 516 // TODO(primiano): This is a temporary hack to disable periodic memory dumps
(...skipping 29 matching lines...) Expand all
525 } 546 }
526 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms); 547 DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms);
527 g_heavy_dumps_rate = heavy_dump_period_ms / min_timer_period_ms; 548 g_heavy_dumps_rate = heavy_dump_period_ms / min_timer_period_ms;
528 549
529 periodic_dump_timer_.Start(FROM_HERE, 550 periodic_dump_timer_.Start(FROM_HERE,
530 TimeDelta::FromMilliseconds(min_timer_period_ms), 551 TimeDelta::FromMilliseconds(min_timer_period_ms),
531 base::Bind(&RequestPeriodicGlobalDump)); 552 base::Bind(&RequestPeriodicGlobalDump));
532 } 553 }
533 554
534 void MemoryDumpManager::OnTraceLogDisabled() { 555 void MemoryDumpManager::OnTraceLogDisabled() {
535 AutoLock lock(lock_); 556 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
557 scoped_ptr<Thread> dump_thread;
558 {
559 AutoLock lock(lock_);
560 dump_thread = dump_thread_.Pass();
561 session_state_ = nullptr;
562 }
563
564 // Thread stops are blocking and must be performed outside of the |lock_|
petrcermak 2015/11/02 10:35:20 nit: Does the comment apply to line 566 (periodic_
Primiano Tucci (use gerrit) 2015/11/02 15:07:07 yes, it does.
565 // or will deadlock (e.g., if ContinueAsyncProcessDump() tries to acquire it).
536 periodic_dump_timer_.Stop(); 566 periodic_dump_timer_.Stop();
537 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); 567 if (dump_thread)
538 session_state_ = nullptr; 568 dump_thread->Stop();
539 } 569 }
540 570
541 uint64_t MemoryDumpManager::GetTracingProcessId() const { 571 uint64_t MemoryDumpManager::GetTracingProcessId() const {
542 return delegate_->GetTracingProcessId(); 572 return delegate_->GetTracingProcessId();
543 } 573 }
544 574
545 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( 575 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
546 MemoryDumpProvider* dump_provider, 576 MemoryDumpProvider* dump_provider,
547 const char* name, 577 const char* name,
548 const scoped_refptr<SingleThreadTaskRunner>& task_runner) 578 const scoped_refptr<SingleThreadTaskRunner>& task_runner)
549 : dump_provider(dump_provider), 579 : dump_provider(dump_provider),
550 name(name), 580 name(name),
551 task_runner(task_runner), 581 task_runner(task_runner),
552 consecutive_failures(0), 582 consecutive_failures(0),
553 disabled(false), 583 disabled(false),
554 unregistered(false) {} 584 unregistered(false) {}
555 585
556 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} 586 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {}
557 587
558 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<( 588 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<(
559 const MemoryDumpProviderInfo& other) const { 589 const MemoryDumpProviderInfo& other) const {
560 if (task_runner == other.task_runner) 590 if (task_runner == other.task_runner)
561 return dump_provider < other.dump_provider; 591 return dump_provider < other.dump_provider;
562 return task_runner < other.task_runner; 592 // Ensure that unbound providers (task_runner == nullptr) always run last.
593 return !(task_runner < other.task_runner);
563 } 594 }
564 595
565 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( 596 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
566 MemoryDumpRequestArgs req_args, 597 MemoryDumpRequestArgs req_args,
567 MemoryDumpProviderInfoSet::iterator next_dump_provider, 598 MemoryDumpProviderInfoSet::iterator next_dump_provider,
568 const scoped_refptr<MemoryDumpSessionState>& session_state, 599 const scoped_refptr<MemoryDumpSessionState>& session_state,
569 MemoryDumpCallback callback) 600 MemoryDumpCallback callback,
601 const scoped_refptr<SingleThreadTaskRunner>& dump_thread_task_runner)
570 : process_memory_dump(session_state), 602 : process_memory_dump(session_state),
571 req_args(req_args), 603 req_args(req_args),
572 next_dump_provider(next_dump_provider), 604 next_dump_provider(next_dump_provider),
573 callback(callback), 605 callback(callback),
574 task_runner(MessageLoop::current()->task_runner()) {} 606 callback_task_runner(MessageLoop::current()->task_runner()),
607 dump_thread_task_runner(dump_thread_task_runner) {}
575 608
576 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { 609 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
577 } 610 }
578 611
579 } // namespace trace_event 612 } // namespace trace_event
580 } // namespace base 613 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698