Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(455)

Side by Side Diff: base/trace_event/memory_dump_manager.cc

Issue 2836933002: memory-infra: Never kill memory-infra background thread (Closed)
Patch Set: sacrifice to the dark gods of TSAN Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/trace_event/memory_dump_manager.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/memory_dump_manager.h" 5 #include "base/trace_event/memory_dump_manager.h"
6 6
7 #include <inttypes.h> 7 #include <inttypes.h>
8 #include <stdio.h> 8 #include <stdio.h>
9 9
10 #include <algorithm> 10 #include <algorithm>
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
175 LeakySingletonTraits<MemoryDumpManager>>::get(); 175 LeakySingletonTraits<MemoryDumpManager>>::get();
176 } 176 }
177 177
178 // static 178 // static
179 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { 179 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) {
180 g_instance_for_testing = instance; 180 g_instance_for_testing = instance;
181 } 181 }
182 182
183 MemoryDumpManager::MemoryDumpManager() 183 MemoryDumpManager::MemoryDumpManager()
184 : is_coordinator_(false), 184 : is_coordinator_(false),
185 memory_tracing_enabled_(0), 185 is_enabled_(0),
186 tracing_process_id_(kInvalidTracingProcessId), 186 tracing_process_id_(kInvalidTracingProcessId),
187 dumper_registrations_ignored_for_testing_(false), 187 dumper_registrations_ignored_for_testing_(false),
188 heap_profiling_enabled_(false) { 188 heap_profiling_enabled_(false) {
189 g_next_guid.GetNext(); // Make sure that first guid is not zero. 189 g_next_guid.GetNext(); // Make sure that first guid is not zero.
190 190
191 // At this point the command line may not be initialized but we try to 191 // At this point the command line may not be initialized but we try to
192 // enable the heap profiler to capture allocations as soon as possible. 192 // enable the heap profiler to capture allocations as soon as possible.
193 EnableHeapProfilingIfNeeded(); 193 EnableHeapProfilingIfNeeded();
194 194
195 strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist), 195 strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist),
196 std::end(kStrictThreadCheckBlacklist)); 196 std::end(kStrictThreadCheckBlacklist));
197 } 197 }
198 198
199 MemoryDumpManager::~MemoryDumpManager() { 199 MemoryDumpManager::~MemoryDumpManager() {
200 AutoLock lock(lock_);
201 if (dump_thread_) {
202 dump_thread_->Stop();
203 dump_thread_.reset();
204 }
200 } 205 }
201 206
202 void MemoryDumpManager::EnableHeapProfilingIfNeeded() { 207 void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
203 if (heap_profiling_enabled_) 208 if (heap_profiling_enabled_)
204 return; 209 return;
205 210
206 if (!CommandLine::InitializedForCurrentProcess() || 211 if (!CommandLine::InitializedForCurrentProcess() ||
207 !CommandLine::ForCurrentProcess()->HasSwitch( 212 !CommandLine::ForCurrentProcess()->HasSwitch(
208 switches::kEnableHeapProfiling)) 213 switches::kEnableHeapProfiling))
209 return; 214 return;
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
387 392
388 if (take_mdp_ownership_and_delete_async) { 393 if (take_mdp_ownership_and_delete_async) {
389 // The MDP will be deleted whenever the MDPInfo struct will, that is either: 394 // The MDP will be deleted whenever the MDPInfo struct will, that is either:
390 // - At the end of this function, if no dump is in progress. 395 // - At the end of this function, if no dump is in progress.
391 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is 396 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
392 // removed from |pending_dump_providers|. 397 // removed from |pending_dump_providers|.
393 // - When the provider is removed from other clients (MemoryPeakDetector). 398 // - When the provider is removed from other clients (MemoryPeakDetector).
394 DCHECK(!(*mdp_iter)->owned_dump_provider); 399 DCHECK(!(*mdp_iter)->owned_dump_provider);
395 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); 400 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
396 } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 || 401 } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 ||
397 subtle::NoBarrier_Load(&memory_tracing_enabled_)) { 402 subtle::NoBarrier_Load(&is_enabled_)) {
398 // If dump provider's name is on |strict_thread_check_blacklist_|, then the 403 // If dump provider's name is on |strict_thread_check_blacklist_|, then the
399 // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is 404 // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is
400 // fired even when tracing is not enabled (stricter). 405 // fired even when tracing is not enabled (stricter).
401 // TODO(ssid): Remove this condition after removing all the dump providers 406 // TODO(ssid): Remove this condition after removing all the dump providers
402 // in the blacklist and the buildbots are no longer flakily hitting the 407 // in the blacklist and the buildbots are no longer flakily hitting the
403 // DCHECK, crbug.com/643438. 408 // DCHECK, crbug.com/643438.
404 409
405 // If you hit this DCHECK, your dump provider has a bug. 410 // If you hit this DCHECK, your dump provider has a bug.
406 // Unregistration of a MemoryDumpProvider is safe only if: 411 // Unregistration of a MemoryDumpProvider is safe only if:
407 // - The MDP has specified a sequenced task runner affinity AND the 412 // - The MDP has specified a sequenced task runner affinity AND the
(...skipping 22 matching lines...) Expand all
430 (*mdp_iter)->disabled = true; 435 (*mdp_iter)->disabled = true;
431 dump_providers_.erase(mdp_iter); 436 dump_providers_.erase(mdp_iter);
432 } 437 }
433 438
434 void MemoryDumpManager::RequestGlobalDump( 439 void MemoryDumpManager::RequestGlobalDump(
435 MemoryDumpType dump_type, 440 MemoryDumpType dump_type,
436 MemoryDumpLevelOfDetail level_of_detail, 441 MemoryDumpLevelOfDetail level_of_detail,
437 const GlobalMemoryDumpCallback& callback) { 442 const GlobalMemoryDumpCallback& callback) {
438 // Bail out immediately if tracing is not enabled at all or if the dump mode 443 // Bail out immediately if tracing is not enabled at all or if the dump mode
439 // is not allowed. 444 // is not allowed.
440 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || 445 if (!UNLIKELY(subtle::NoBarrier_Load(&is_enabled_)) ||
441 !IsDumpModeAllowed(level_of_detail)) { 446 !IsDumpModeAllowed(level_of_detail)) {
442 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory 447 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory
443 << " tracing category is not enabled or the requested dump mode is " 448 << " tracing category is not enabled or the requested dump mode is "
444 "not allowed by trace config."; 449 "not allowed by trace config.";
445 if (!callback.is_null()) 450 if (!callback.is_null())
446 callback.Run(0u /* guid */, false /* success */); 451 callback.Run(0u /* guid */, false /* success */);
447 return; 452 return;
448 } 453 }
449 454
450 const uint64_t guid = 455 const uint64_t guid =
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
485 MemoryDumpProvider* provider) { 490 MemoryDumpProvider* provider) {
486 AutoLock lock(lock_); 491 AutoLock lock(lock_);
487 492
488 for (const auto& info : dump_providers_) { 493 for (const auto& info : dump_providers_) {
489 if (info->dump_provider == provider) 494 if (info->dump_provider == provider)
490 return true; 495 return true;
491 } 496 }
492 return false; 497 return false;
493 } 498 }
494 499
500 scoped_refptr<base::SequencedTaskRunner>
501 MemoryDumpManager::GetOrCreateBgTaskRunnerLocked() {
502 lock_.AssertAcquired();
503
504 if (dump_thread_)
505 return dump_thread_->task_runner();
506
507 dump_thread_ = MakeUnique<Thread>("MemoryInfra");
508 bool started = dump_thread_->Start();
509 CHECK(started);
510
511 return dump_thread_->task_runner();
512 }
513
495 void MemoryDumpManager::CreateProcessDump( 514 void MemoryDumpManager::CreateProcessDump(
496 const MemoryDumpRequestArgs& args, 515 const MemoryDumpRequestArgs& args,
497 const ProcessMemoryDumpCallback& callback) { 516 const ProcessMemoryDumpCallback& callback) {
498 char guid_str[20]; 517 char guid_str[20];
499 sprintf(guid_str, "0x%" PRIx64, args.dump_guid); 518 sprintf(guid_str, "0x%" PRIx64, args.dump_guid);
500 TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump", 519 TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump",
501 TRACE_ID_LOCAL(args.dump_guid), "dump_guid", 520 TRACE_ID_LOCAL(args.dump_guid), "dump_guid",
502 TRACE_STR_COPY(guid_str)); 521 TRACE_STR_COPY(guid_str));
503 522
504 // If argument filter is enabled then only background mode dumps should be 523 // If argument filter is enabled then only background mode dumps should be
505 // allowed. In case the trace config passed for background tracing session 524 // allowed. In case the trace config passed for background tracing session
506 // missed the allowed modes argument, it crashes here instead of creating 525 // missed the allowed modes argument, it crashes here instead of creating
507 // unexpected dumps. 526 // unexpected dumps.
508 if (TraceLog::GetInstance() 527 if (TraceLog::GetInstance()
509 ->GetCurrentTraceConfig() 528 ->GetCurrentTraceConfig()
510 .IsArgumentFilterEnabled()) { 529 .IsArgumentFilterEnabled()) {
511 CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail); 530 CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
512 } 531 }
513 532
514 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; 533 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
515 { 534 {
516 AutoLock lock(lock_); 535 AutoLock lock(lock_);
517 536
518 // |dump_thread_| can be nullptr is tracing was disabled before reaching
519 // here. SetupNextMemoryDump() is robust enough to tolerate it and will
520 // NACK the dump.
521 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( 537 pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
522 args, dump_providers_, session_state_, callback, 538 args, dump_providers_, session_state_, callback,
523 dump_thread_ ? dump_thread_->task_runner() : nullptr)); 539 GetOrCreateBgTaskRunnerLocked()));
524 540
525 // Safety check to prevent reaching here without calling RequestGlobalDump, 541 // Safety check to prevent reaching here without calling RequestGlobalDump,
526 // with disallowed modes. If |session_state_| is null then tracing is 542 // with disallowed modes. If |session_state_| is null then tracing is
527 // disabled. 543 // disabled.
528 CHECK(!session_state_ || 544 CHECK(!session_state_ ||
529 session_state_->IsDumpModeAllowed(args.level_of_detail)); 545 session_state_->IsDumpModeAllowed(args.level_of_detail));
530 546
531 // If enabled, holds back the peak detector resetting its estimation window. 547 // If enabled, holds back the peak detector resetting its estimation window.
532 MemoryPeakDetector::GetInstance()->Throttle(); 548 MemoryPeakDetector::GetInstance()->Throttle();
533 } 549 }
(...skipping 13 matching lines...) Expand all
547 // |lock_| is used in these functions purely to ensure consistency w.r.t. 563 // |lock_| is used in these functions purely to ensure consistency w.r.t.
548 // (un)registrations of |dump_providers_|. 564 // (un)registrations of |dump_providers_|.
549 void MemoryDumpManager::SetupNextMemoryDump( 565 void MemoryDumpManager::SetupNextMemoryDump(
550 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { 566 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
551 HEAP_PROFILER_SCOPED_IGNORE; 567 HEAP_PROFILER_SCOPED_IGNORE;
552 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs 568 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
553 // in the PostTask below don't end up registering their own dump providers 569 // in the PostTask below don't end up registering their own dump providers
554 // (for discounting trace memory overhead) while holding the |lock_|. 570 // (for discounting trace memory overhead) while holding the |lock_|.
555 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); 571 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
556 572
557 // |dump_thread_| might be destroyed before getting this point. 573 // MDM might have been disabled before getting to this point.
558 // It means that tracing was disabled right before starting this dump. 574 // Anyway either MDM is disabled or this was the last hop, create a trace
559 // Anyway either tracing is stopped or this was the last hop, create a trace
560 // event, add it to the trace and finalize process dump invoking the callback. 575 // event, add it to the trace and finalize process dump invoking the callback.
561 if (!pmd_async_state->dump_thread_task_runner.get()) { 576 if (!subtle::NoBarrier_Load(&is_enabled_)) {
562 if (pmd_async_state->pending_dump_providers.empty()) { 577 if (pmd_async_state->pending_dump_providers.empty()) {
563 VLOG(1) << kLogPrefix << " failed because dump thread was destroyed" 578 VLOG(1) << kLogPrefix << " failed because MemoryDumpManager was disabled"
564 << " before finalizing the dump"; 579 << " before finalizing the dump";
565 } else { 580 } else {
566 VLOG(1) << kLogPrefix << " failed because dump thread was destroyed" 581 VLOG(1) << kLogPrefix << " failed because MemoryDumpManager was disabled"
567 << " before dumping " 582 << " before dumping "
568 << pmd_async_state->pending_dump_providers.back().get()->name; 583 << pmd_async_state->pending_dump_providers.back().get()->name;
569 } 584 }
570 pmd_async_state->dump_successful = false; 585 pmd_async_state->dump_successful = false;
571 pmd_async_state->pending_dump_providers.clear(); 586 pmd_async_state->pending_dump_providers.clear();
572 } 587 }
588
573 if (pmd_async_state->pending_dump_providers.empty()) 589 if (pmd_async_state->pending_dump_providers.empty())
574 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); 590 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
575 591
576 // Read MemoryDumpProviderInfo thread safety considerations in 592 // Read MemoryDumpProviderInfo thread safety considerations in
577 // memory_dump_manager.h when accessing |mdpinfo| fields. 593 // memory_dump_manager.h when accessing |mdpinfo| fields.
578 MemoryDumpProviderInfo* mdpinfo = 594 MemoryDumpProviderInfo* mdpinfo =
579 pmd_async_state->pending_dump_providers.back().get(); 595 pmd_async_state->pending_dump_providers.back().get();
580 596
581 // If we are in background tracing, we should invoke only the whitelisted 597 // If we are in background tracing, we should invoke only the whitelisted
582 // providers. Ignore other providers and continue. 598 // providers. Ignore other providers and continue.
583 if (pmd_async_state->req_args.level_of_detail == 599 if (pmd_async_state->req_args.level_of_detail ==
584 MemoryDumpLevelOfDetail::BACKGROUND && 600 MemoryDumpLevelOfDetail::BACKGROUND &&
585 !mdpinfo->whitelisted_for_background_mode) { 601 !mdpinfo->whitelisted_for_background_mode) {
586 pmd_async_state->pending_dump_providers.pop_back(); 602 pmd_async_state->pending_dump_providers.pop_back();
587 return SetupNextMemoryDump(std::move(pmd_async_state)); 603 return SetupNextMemoryDump(std::move(pmd_async_state));
588 } 604 }
589 605
590 // If the dump provider did not specify a task runner affinity, dump on 606 // If the dump provider did not specify a task runner affinity, dump on
591 // |dump_thread_| which is already checked above for presence. 607 // |dump_thread_|.
592 SequencedTaskRunner* task_runner = mdpinfo->task_runner.get(); 608 SequencedTaskRunner* task_runner = mdpinfo->task_runner.get();
593 if (!task_runner) { 609 if (!task_runner) {
594 DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner); 610 DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
595 task_runner = pmd_async_state->dump_thread_task_runner.get(); 611 task_runner = pmd_async_state->dump_thread_task_runner.get();
596 DCHECK(task_runner); 612 DCHECK(task_runner);
597 } 613 }
598 614
599 if (mdpinfo->options.dumps_on_single_thread_task_runner && 615 if (mdpinfo->options.dumps_on_single_thread_task_runner &&
600 task_runner->RunsTasksOnCurrentThread()) { 616 task_runner->RunsTasksOnCurrentThread()) {
601 // If |dumps_on_single_thread_task_runner| is true then no PostTask is 617 // If |dumps_on_single_thread_task_runner| is true then no PostTask is
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
782 pmd_async_state->callback.Run(dump_guid, dump_successful, result); 798 pmd_async_state->callback.Run(dump_guid, dump_successful, result);
783 pmd_async_state->callback.Reset(); 799 pmd_async_state->callback.Reset();
784 } 800 }
785 801
786 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump", 802 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
787 TRACE_ID_LOCAL(dump_guid)); 803 TRACE_ID_LOCAL(dump_guid));
788 } 804 }
789 805
790 void MemoryDumpManager::Enable( 806 void MemoryDumpManager::Enable(
791 const TraceConfig::MemoryDumpConfig& memory_dump_config) { 807 const TraceConfig::MemoryDumpConfig& memory_dump_config) {
792 // Spin-up the thread used to invoke unbound dump providers.
793 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
794 if (!dump_thread->Start()) {
795 LOG(ERROR) << "Failed to start the memory-infra thread for tracing";
796 return;
797 }
798 808
799 scoped_refptr<MemoryDumpSessionState> session_state = 809 scoped_refptr<MemoryDumpSessionState> session_state =
800 new MemoryDumpSessionState; 810 new MemoryDumpSessionState;
801 session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes); 811 session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes);
802 session_state->set_heap_profiler_breakdown_threshold_bytes( 812 session_state->set_heap_profiler_breakdown_threshold_bytes(
803 memory_dump_config.heap_profiler_options.breakdown_threshold_bytes); 813 memory_dump_config.heap_profiler_options.breakdown_threshold_bytes);
804 if (heap_profiling_enabled_) { 814 if (heap_profiling_enabled_) {
805 // If heap profiling is enabled, the stack frame deduplicator and type name 815 // If heap profiling is enabled, the stack frame deduplicator and type name
806 // deduplicator will be in use. Add a metadata events to write the frames 816 // deduplicator will be in use. Add a metadata events to write the frames
807 // and type IDs. 817 // and type IDs.
(...skipping 15 matching lines...) Expand all
823 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>( 833 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>(
824 session_state, &MemoryDumpSessionState::type_name_deduplicator)); 834 session_state, &MemoryDumpSessionState::type_name_deduplicator));
825 } 835 }
826 836
827 AutoLock lock(lock_); 837 AutoLock lock(lock_);
828 838
829 // At this point we must have the ability to request global dumps. 839 // At this point we must have the ability to request global dumps.
830 DCHECK(!request_dump_function_.is_null()); 840 DCHECK(!request_dump_function_.is_null());
831 session_state_ = session_state; 841 session_state_ = session_state;
832 842
833 DCHECK(!dump_thread_); 843 subtle::NoBarrier_Store(&is_enabled_, 1);
834 dump_thread_ = std::move(dump_thread);
835
836 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
837 844
838 MemoryDumpScheduler::Config periodic_config; 845 MemoryDumpScheduler::Config periodic_config;
839 bool peak_detector_configured = false; 846 bool peak_detector_configured = false;
840 for (const auto& trigger : memory_dump_config.triggers) { 847 for (const auto& trigger : memory_dump_config.triggers) {
841 if (!session_state_->IsDumpModeAllowed(trigger.level_of_detail)) { 848 if (!session_state_->IsDumpModeAllowed(trigger.level_of_detail)) {
842 NOTREACHED(); 849 NOTREACHED();
843 continue; 850 continue;
844 } 851 }
845 if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) { 852 if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
846 if (periodic_config.triggers.empty()) { 853 if (periodic_config.triggers.empty()) {
847 periodic_config.callback = BindRepeating(&OnPeriodicSchedulerTick); 854 periodic_config.callback = BindRepeating(&OnPeriodicSchedulerTick);
848 } 855 }
849 periodic_config.triggers.push_back( 856 periodic_config.triggers.push_back(
850 {trigger.level_of_detail, trigger.min_time_between_dumps_ms}); 857 {trigger.level_of_detail, trigger.min_time_between_dumps_ms});
851 } else if (trigger.trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) { 858 } else if (trigger.trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
852 // At most one peak trigger is allowed. 859 // At most one peak trigger is allowed.
853 CHECK(!peak_detector_configured); 860 CHECK(!peak_detector_configured);
854 peak_detector_configured = true; 861 peak_detector_configured = true;
855 MemoryPeakDetector::GetInstance()->Setup( 862 MemoryPeakDetector::GetInstance()->Setup(
856 BindRepeating(&MemoryDumpManager::GetDumpProvidersForPolling, 863 BindRepeating(&MemoryDumpManager::GetDumpProvidersForPolling,
857 Unretained(this)), 864 Unretained(this)),
858 dump_thread_->task_runner(), 865 GetOrCreateBgTaskRunnerLocked(),
859 BindRepeating(&OnPeakDetected, trigger.level_of_detail)); 866 BindRepeating(&OnPeakDetected, trigger.level_of_detail));
860 867
861 MemoryPeakDetector::Config peak_config; 868 MemoryPeakDetector::Config peak_config;
862 peak_config.polling_interval_ms = 10; 869 peak_config.polling_interval_ms = 10;
863 peak_config.min_time_between_peaks_ms = trigger.min_time_between_dumps_ms; 870 peak_config.min_time_between_peaks_ms = trigger.min_time_between_dumps_ms;
864 peak_config.enable_verbose_poll_tracing = 871 peak_config.enable_verbose_poll_tracing =
865 trigger.level_of_detail == MemoryDumpLevelOfDetail::DETAILED; 872 trigger.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
866 MemoryPeakDetector::GetInstance()->Start(peak_config); 873 MemoryPeakDetector::GetInstance()->Start(peak_config);
867 874
868 // When peak detection is enabled, trigger a dump straight away as it 875 // When peak detection is enabled, trigger a dump straight away as it
869 // gives a good reference point for analyzing the trace. 876 // gives a good reference point for analyzing the trace.
870 if (is_coordinator_) { 877 if (is_coordinator_) {
871 dump_thread_->task_runner()->PostTask( 878 GetOrCreateBgTaskRunnerLocked()->PostTask(
872 FROM_HERE, BindRepeating(&OnPeakDetected, trigger.level_of_detail)); 879 FROM_HERE, BindRepeating(&OnPeakDetected, trigger.level_of_detail));
873 } 880 }
874 } 881 }
875 } 882 }
876 883
877 // Only coordinator process triggers periodic global memory dumps. 884 // Only coordinator process triggers periodic global memory dumps.
878 if (is_coordinator_ && !periodic_config.triggers.empty()) { 885 if (is_coordinator_ && !periodic_config.triggers.empty()) {
879 MemoryDumpScheduler::GetInstance()->Start(periodic_config, 886 MemoryDumpScheduler::GetInstance()->Start(periodic_config,
880 dump_thread_->task_runner()); 887 GetOrCreateBgTaskRunnerLocked());
881 } 888 }
882 } 889 }
883 890
884 void MemoryDumpManager::Disable() { 891 void MemoryDumpManager::Disable() {
885 // There might be a memory dump in progress while this happens. Therefore, 892 // There might be a memory dump in progress while this happens. Therefore,
886 // ensure that the MDM state which depends on the tracing enabled / disabled 893 // ensure that the MDM state which depends on the tracing enabled / disabled
887 // state is always accessed by the dumping methods holding the |lock_|. 894 // state is always accessed by the dumping methods holding the |lock_|.
888 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_)) 895 if (!subtle::NoBarrier_Load(&is_enabled_))
889 return; 896 return;
890 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); 897 subtle::NoBarrier_Store(&is_enabled_, 0);
891 std::unique_ptr<Thread> dump_thread;
892 { 898 {
893 AutoLock lock(lock_); 899 AutoLock lock(lock_);
894 MemoryDumpScheduler::GetInstance()->Stop(); 900 MemoryDumpScheduler::GetInstance()->Stop();
895 MemoryPeakDetector::GetInstance()->TearDown(); 901 MemoryPeakDetector::GetInstance()->TearDown();
896 dump_thread = std::move(dump_thread_);
897 session_state_ = nullptr; 902 session_state_ = nullptr;
898 } 903 }
899
900 // Thread stops are blocking and must be performed outside of the |lock_|
901 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
902 if (dump_thread)
903 dump_thread->Stop();
904 } 904 }
905 905
906 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { 906 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
907 AutoLock lock(lock_); 907 AutoLock lock(lock_);
908 if (!session_state_) 908 if (!session_state_)
909 return false; 909 return false;
910 return session_state_->IsDumpModeAllowed(dump_mode); 910 return session_state_->IsDumpModeAllowed(dump_mode);
911 } 911 }
912 912
913 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( 913 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
914 MemoryDumpRequestArgs req_args, 914 MemoryDumpRequestArgs req_args,
915 const MemoryDumpProviderInfo::OrderedSet& dump_providers, 915 const MemoryDumpProviderInfo::OrderedSet& dump_providers,
916 scoped_refptr<MemoryDumpSessionState> session_state, 916 scoped_refptr<MemoryDumpSessionState> session_state,
917 ProcessMemoryDumpCallback callback, 917 ProcessMemoryDumpCallback callback,
918 scoped_refptr<SingleThreadTaskRunner> dump_thread_task_runner) 918 scoped_refptr<SequencedTaskRunner> dump_thread_task_runner)
919 : req_args(req_args), 919 : req_args(req_args),
920 session_state(std::move(session_state)), 920 session_state(std::move(session_state)),
921 callback(callback), 921 callback(callback),
922 dump_successful(true), 922 dump_successful(true),
923 callback_task_runner(ThreadTaskRunnerHandle::Get()), 923 callback_task_runner(ThreadTaskRunnerHandle::Get()),
924 dump_thread_task_runner(std::move(dump_thread_task_runner)) { 924 dump_thread_task_runner(std::move(dump_thread_task_runner)) {
925 pending_dump_providers.reserve(dump_providers.size()); 925 pending_dump_providers.reserve(dump_providers.size());
926 pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend()); 926 pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
927 } 927 }
928 928
929 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { 929 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() {
930 } 930 }
931 931
932 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState:: 932 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState::
933 GetOrCreateMemoryDumpContainerForProcess(ProcessId pid, 933 GetOrCreateMemoryDumpContainerForProcess(ProcessId pid,
934 const MemoryDumpArgs& dump_args) { 934 const MemoryDumpArgs& dump_args) {
935 auto iter = process_dumps.find(pid); 935 auto iter = process_dumps.find(pid);
936 if (iter == process_dumps.end()) { 936 if (iter == process_dumps.end()) {
937 std::unique_ptr<ProcessMemoryDump> new_pmd( 937 std::unique_ptr<ProcessMemoryDump> new_pmd(
938 new ProcessMemoryDump(session_state, dump_args)); 938 new ProcessMemoryDump(session_state, dump_args));
939 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; 939 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
940 } 940 }
941 return iter->second.get(); 941 return iter->second.get();
942 } 942 }
943 943
944 } // namespace trace_event 944 } // namespace trace_event
945 } // namespace base 945 } // namespace base
OLDNEW
« no previous file with comments | « base/trace_event/memory_dump_manager.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698