Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(183)

Side by Side Diff: base/trace_event/memory_dump_manager.cc

Issue 2799023002: memory-infra: Switch to MemoryPeakDetector and simplify MemoryDumpScheduler (Closed)
Patch Set: rebase bind -> bindonce Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/memory_dump_manager.h" 5 #include "base/trace_event/memory_dump_manager.h"
6 6
7 #include <inttypes.h> 7 #include <inttypes.h>
8 #include <stdio.h> 8 #include <stdio.h>
9 9
10 #include <algorithm> 10 #include <algorithm>
(...skipping 16 matching lines...) Expand all
27 #include "base/trace_event/heap_profiler.h" 27 #include "base/trace_event/heap_profiler.h"
28 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 28 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
29 #include "base/trace_event/heap_profiler_event_filter.h" 29 #include "base/trace_event/heap_profiler_event_filter.h"
30 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" 30 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
31 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" 31 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
32 #include "base/trace_event/malloc_dump_provider.h" 32 #include "base/trace_event/malloc_dump_provider.h"
33 #include "base/trace_event/memory_dump_provider.h" 33 #include "base/trace_event/memory_dump_provider.h"
34 #include "base/trace_event/memory_dump_scheduler.h" 34 #include "base/trace_event/memory_dump_scheduler.h"
35 #include "base/trace_event/memory_dump_session_state.h" 35 #include "base/trace_event/memory_dump_session_state.h"
36 #include "base/trace_event/memory_infra_background_whitelist.h" 36 #include "base/trace_event/memory_infra_background_whitelist.h"
37 #include "base/trace_event/memory_peak_detector.h"
37 #include "base/trace_event/process_memory_dump.h" 38 #include "base/trace_event/process_memory_dump.h"
38 #include "base/trace_event/trace_event.h" 39 #include "base/trace_event/trace_event.h"
39 #include "base/trace_event/trace_event_argument.h" 40 #include "base/trace_event/trace_event_argument.h"
40 #include "build/build_config.h" 41 #include "build/build_config.h"
41 42
42 #if defined(OS_ANDROID) 43 #if defined(OS_ANDROID)
43 #include "base/trace_event/java_heap_dump_provider_android.h" 44 #include "base/trace_event/java_heap_dump_provider_android.h"
44 #endif 45 #endif
45 46
46 namespace base { 47 namespace base {
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
127 void EstimateTraceMemoryOverhead( 128 void EstimateTraceMemoryOverhead(
128 TraceEventMemoryOverhead* overhead) override { 129 TraceEventMemoryOverhead* overhead) override {
129 return (session_state.get()->*getter_function)() 130 return (session_state.get()->*getter_function)()
130 ->EstimateTraceMemoryOverhead(overhead); 131 ->EstimateTraceMemoryOverhead(overhead);
131 } 132 }
132 133
133 scoped_refptr<MemoryDumpSessionState> session_state; 134 scoped_refptr<MemoryDumpSessionState> session_state;
134 GetterFunctPtr const getter_function; 135 GetterFunctPtr const getter_function;
135 }; 136 };
136 137
138 void OnPeakDetected(MemoryDumpLevelOfDetail level_of_detail) {
139 MemoryDumpManager::GetInstance()->RequestGlobalDump(
140 MemoryDumpType::PEAK_MEMORY_USAGE, level_of_detail);
141 }
142
143 void OnPeriodicSchedulerTick(MemoryDumpLevelOfDetail level_of_detail) {
144 MemoryDumpManager::GetInstance()->RequestGlobalDump(
145 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail);
146 }
147
137 } // namespace 148 } // namespace
138 149
139 // static 150 // static
140 const char* const MemoryDumpManager::kTraceCategory = 151 const char* const MemoryDumpManager::kTraceCategory =
141 TRACE_DISABLED_BY_DEFAULT("memory-infra"); 152 TRACE_DISABLED_BY_DEFAULT("memory-infra");
142 153
143 // static 154 // static
144 const char* const MemoryDumpManager::kLogPrefix = "Memory-infra dump"; 155 const char* const MemoryDumpManager::kLogPrefix = "Memory-infra dump";
145 156
146 // static 157 // static
(...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after
337 } 348 }
338 349
339 { 350 {
340 AutoLock lock(lock_); 351 AutoLock lock(lock_);
341 bool already_registered = !dump_providers_.insert(mdpinfo).second; 352 bool already_registered = !dump_providers_.insert(mdpinfo).second;
342 // This actually happens in some tests which don't have a clean tear-down 353 // This actually happens in some tests which don't have a clean tear-down
343 // path for RenderThreadImpl::Init(). 354 // path for RenderThreadImpl::Init().
344 if (already_registered) 355 if (already_registered)
345 return; 356 return;
346 357
347 // The list of polling MDPs is populated OnTraceLogEnabled(). This code 358 if (options.is_fast_polling_supported)
348 // deals with the case of a MDP capable of fast polling that is registered 359 MemoryPeakDetector::GetInstance()->NotifyMemoryDumpProvidersChanged();
349 // after the OnTraceLogEnabled()
350 if (options.is_fast_polling_supported && dump_thread_) {
351 dump_thread_->task_runner()->PostTask(
352 FROM_HERE,
353 BindOnce(&MemoryDumpManager::RegisterPollingMDPOnDumpThread,
354 Unretained(this), mdpinfo));
355 }
356 } 360 }
357 361
358 if (heap_profiling_enabled_) 362 if (heap_profiling_enabled_)
359 mdp->OnHeapProfilingEnabled(true); 363 mdp->OnHeapProfilingEnabled(true);
360 } 364 }
361 365
362 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { 366 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
363 UnregisterDumpProviderInternal(mdp, false /* delete_async */); 367 UnregisterDumpProviderInternal(mdp, false /* delete_async */);
364 } 368 }
365 369
(...skipping 18 matching lines...) Expand all
384 } 388 }
385 389
386 if (mdp_iter == dump_providers_.end()) 390 if (mdp_iter == dump_providers_.end())
387 return; // Not registered / already unregistered. 391 return; // Not registered / already unregistered.
388 392
389 if (take_mdp_ownership_and_delete_async) { 393 if (take_mdp_ownership_and_delete_async) {
390 // The MDP will be deleted whenever the MDPInfo struct will, that is either: 394 // The MDP will be deleted whenever the MDPInfo struct will, that is either:
391 // - At the end of this function, if no dump is in progress. 395 // - At the end of this function, if no dump is in progress.
392 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is 396 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
393 // removed from |pending_dump_providers|. 397 // removed from |pending_dump_providers|.
394 // - When the provider is removed from |dump_providers_for_polling_|. 398 // - When the provider is removed from other clients (MemoryPeakDetector).
395 DCHECK(!(*mdp_iter)->owned_dump_provider); 399 DCHECK(!(*mdp_iter)->owned_dump_provider);
396 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); 400 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
397 } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 || 401 } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 ||
398 subtle::NoBarrier_Load(&memory_tracing_enabled_)) { 402 subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
399 // If dump provider's name is on |strict_thread_check_blacklist_|, then the 403 // If dump provider's name is on |strict_thread_check_blacklist_|, then the
400 // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is 404 // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is
401 // fired even when tracing is not enabled (stricter). 405 // fired even when tracing is not enabled (stricter).
402 // TODO(ssid): Remove this condition after removing all the dump providers 406 // TODO(ssid): Remove this condition after removing all the dump providers
403 // in the blacklist and the buildbots are no longer flakily hitting the 407 // in the blacklist and the buildbots are no longer flakily hitting the
404 // DCHECK, crbug.com/643438. 408 // DCHECK, crbug.com/643438.
405 409
406 // If you hit this DCHECK, your dump provider has a bug. 410 // If you hit this DCHECK, your dump provider has a bug.
407 // Unregistration of a MemoryDumpProvider is safe only if: 411 // Unregistration of a MemoryDumpProvider is safe only if:
408 // - The MDP has specified a sequenced task runner affinity AND the 412 // - The MDP has specified a sequenced task runner affinity AND the
409 // unregistration happens on the same task runner. So that the MDP cannot 413 // unregistration happens on the same task runner. So that the MDP cannot
410 // unregister and be in the middle of a OnMemoryDump() at the same time. 414 // unregister and be in the middle of a OnMemoryDump() at the same time.
411 // - The MDP has NOT specified a task runner affinity and its ownership is 415 // - The MDP has NOT specified a task runner affinity and its ownership is
412 // transferred via UnregisterAndDeleteDumpProviderSoon(). 416 // transferred via UnregisterAndDeleteDumpProviderSoon().
413 // In all the other cases, it is not possible to guarantee that the 417 // In all the other cases, it is not possible to guarantee that the
414 // unregistration will not race with OnMemoryDump() calls. 418 // unregistration will not race with OnMemoryDump() calls.
415 DCHECK((*mdp_iter)->task_runner && 419 DCHECK((*mdp_iter)->task_runner &&
416 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) 420 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread())
417 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " 421 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
418 << "unregister itself in a racy way. Please file a crbug."; 422 << "unregister itself in a racy way. Please file a crbug.";
419 } 423 }
420 424
421 if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) { 425 if ((*mdp_iter)->options.is_fast_polling_supported) {
422 DCHECK(take_mdp_ownership_and_delete_async); 426 DCHECK(take_mdp_ownership_and_delete_async);
423 dump_thread_->task_runner()->PostTask( 427 MemoryPeakDetector::GetInstance()->NotifyMemoryDumpProvidersChanged();
424 FROM_HERE,
425 BindOnce(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread,
426 Unretained(this), *mdp_iter));
427 } 428 }
428 429
429 // The MDPInfo instance can still be referenced by the 430 // The MDPInfo instance can still be referenced by the
430 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason 431 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
431 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() 432 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
432 // to just skip it, without actually invoking the |mdp|, which might be 433 // to just skip it, without actually invoking the |mdp|, which might be
433 // destroyed by the caller soon after this method returns. 434 // destroyed by the caller soon after this method returns.
434 (*mdp_iter)->disabled = true; 435 (*mdp_iter)->disabled = true;
435 dump_providers_.erase(mdp_iter); 436 dump_providers_.erase(mdp_iter);
436 } 437 }
437 438
438 void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
439 scoped_refptr<MemoryDumpProviderInfo> mdpinfo) {
440 AutoLock lock(lock_);
441 dump_providers_for_polling_.insert(mdpinfo);
442
443 // Notify ready for polling when first polling supported provider is
444 // registered. This handles the case where OnTraceLogEnabled() did not notify
445 // ready since no polling supported mdp has yet been registered.
446 if (dump_providers_for_polling_.size() == 1)
447 MemoryDumpScheduler::GetInstance()->EnablePollingIfNeeded();
448 }
449
450 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
451 scoped_refptr<MemoryDumpProviderInfo> mdpinfo) {
452 mdpinfo->dump_provider->SuspendFastMemoryPolling();
453
454 AutoLock lock(lock_);
455 dump_providers_for_polling_.erase(mdpinfo);
456 DCHECK(!dump_providers_for_polling_.empty())
457 << "All polling MDPs cannot be unregistered.";
458 }
459
460 void MemoryDumpManager::RequestGlobalDump( 439 void MemoryDumpManager::RequestGlobalDump(
461 MemoryDumpType dump_type, 440 MemoryDumpType dump_type,
462 MemoryDumpLevelOfDetail level_of_detail, 441 MemoryDumpLevelOfDetail level_of_detail,
463 const MemoryDumpCallback& callback) { 442 const MemoryDumpCallback& callback) {
464 // Bail out immediately if tracing is not enabled at all or if the dump mode 443 // Bail out immediately if tracing is not enabled at all or if the dump mode
465 // is not allowed. 444 // is not allowed.
466 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || 445 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) ||
467 !IsDumpModeAllowed(level_of_detail)) { 446 !IsDumpModeAllowed(level_of_detail)) {
468 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory 447 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory
469 << " tracing category is not enabled or the requested dump mode is " 448 << " tracing category is not enabled or the requested dump mode is "
(...skipping 14 matching lines...) Expand all
484 MemoryDumpTypeToString(dump_type), "level_of_detail", 463 MemoryDumpTypeToString(dump_type), "level_of_detail",
485 MemoryDumpLevelOfDetailToString(level_of_detail)); 464 MemoryDumpLevelOfDetailToString(level_of_detail));
486 MemoryDumpCallback wrapped_callback = Bind(&OnGlobalDumpDone, callback); 465 MemoryDumpCallback wrapped_callback = Bind(&OnGlobalDumpDone, callback);
487 466
488 // The delegate will coordinate the IPC broadcast and at some point invoke 467 // The delegate will coordinate the IPC broadcast and at some point invoke
489 // CreateProcessDump() to get a dump for the current process. 468 // CreateProcessDump() to get a dump for the current process.
490 MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail}; 469 MemoryDumpRequestArgs args = {guid, dump_type, level_of_detail};
491 delegate_->RequestGlobalMemoryDump(args, wrapped_callback); 470 delegate_->RequestGlobalMemoryDump(args, wrapped_callback);
492 } 471 }
493 472
473 void MemoryDumpManager::GetDumpProvidersForPolling(
474 std::vector<scoped_refptr<MemoryDumpProviderInfo>>* providers) {
475 DCHECK(providers->empty());
476 AutoLock lock(lock_);
477 for (const scoped_refptr<MemoryDumpProviderInfo>& mdp : dump_providers_) {
478 if (mdp->options.is_fast_polling_supported)
479 providers->push_back(mdp);
480 }
481 }
482
494 void MemoryDumpManager::RequestGlobalDump( 483 void MemoryDumpManager::RequestGlobalDump(
495 MemoryDumpType dump_type, 484 MemoryDumpType dump_type,
496 MemoryDumpLevelOfDetail level_of_detail) { 485 MemoryDumpLevelOfDetail level_of_detail) {
497 RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback()); 486 RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback());
498 } 487 }
499 488
500 bool MemoryDumpManager::IsDumpProviderRegisteredForTesting( 489 bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
501 MemoryDumpProvider* provider) { 490 MemoryDumpProvider* provider) {
502 AutoLock lock(lock_); 491 AutoLock lock(lock_);
503 492
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
536 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( 525 pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
537 args, dump_providers_, session_state_, callback, 526 args, dump_providers_, session_state_, callback,
538 dump_thread_ ? dump_thread_->task_runner() : nullptr)); 527 dump_thread_ ? dump_thread_->task_runner() : nullptr));
539 528
540 // Safety check to prevent reaching here without calling RequestGlobalDump, 529 // Safety check to prevent reaching here without calling RequestGlobalDump,
541 // with disallowed modes. If |session_state_| is null then tracing is 530 // with disallowed modes. If |session_state_| is null then tracing is
542 // disabled. 531 // disabled.
543 CHECK(!session_state_ || 532 CHECK(!session_state_ ||
544 session_state_->IsDumpModeAllowed(args.level_of_detail)); 533 session_state_->IsDumpModeAllowed(args.level_of_detail));
545 534
546 MemoryDumpScheduler::GetInstance()->NotifyDumpTriggered(); 535 // If enabled, holds back the peak detector resetting its estimation window.
536 MemoryPeakDetector::GetInstance()->Throttle();
547 } 537 }
548 538
549 // Start the process dump. This involves task runner hops as specified by the 539 // Start the process dump. This involves task runner hops as specified by the
550 // MemoryDumpProvider(s) in RegisterDumpProvider()). 540 // MemoryDumpProvider(s) in RegisterDumpProvider()).
551 SetupNextMemoryDump(std::move(pmd_async_state)); 541 SetupNextMemoryDump(std::move(pmd_async_state));
552 } 542 }
553 543
554 // PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A 544 // PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A
555 // PostTask is always required for a generic SequencedTaskRunner to ensure that 545 // PostTask is always required for a generic SequencedTaskRunner to ensure that
556 // no other task is running on it concurrently. SetupNextMemoryDump() and 546 // no other task is running on it concurrently. SetupNextMemoryDump() and
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
712 args); 702 args);
713 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); 703 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
714 mdpinfo->consecutive_failures = 704 mdpinfo->consecutive_failures =
715 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; 705 dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
716 } 706 }
717 707
718 pmd_async_state->pending_dump_providers.pop_back(); 708 pmd_async_state->pending_dump_providers.pop_back();
719 SetupNextMemoryDump(std::move(pmd_async_state)); 709 SetupNextMemoryDump(std::move(pmd_async_state));
720 } 710 }
721 711
722 bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
723 #if DCHECK_IS_ON()
724 {
725 AutoLock lock(lock_);
726 if (dump_thread_)
727 DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread());
728 }
729 #endif
730 if (dump_providers_for_polling_.empty())
731 return false;
732
733 *memory_total = 0;
734 // Note that we call PollFastMemoryTotal() even if the dump provider is
735 // disabled (unregistered). This is to avoid taking lock while polling.
736 for (const auto& mdpinfo : dump_providers_for_polling_) {
737 uint64_t value = 0;
738 mdpinfo->dump_provider->PollFastMemoryTotal(&value);
739 *memory_total += value;
740 }
741 return true;
742 }
743
744 // static 712 // static
745 uint32_t MemoryDumpManager::GetDumpsSumKb(const std::string& pattern, 713 uint32_t MemoryDumpManager::GetDumpsSumKb(const std::string& pattern,
746 const ProcessMemoryDump* pmd) { 714 const ProcessMemoryDump* pmd) {
747 uint64_t sum = 0; 715 uint64_t sum = 0;
748 for (const auto& kv : pmd->allocator_dumps()) { 716 for (const auto& kv : pmd->allocator_dumps()) {
749 auto name = StringPiece(kv.first); 717 auto name = StringPiece(kv.first);
750 if (MatchPattern(name, pattern)) 718 if (MatchPattern(name, pattern))
751 sum += kv.second->GetSize(); 719 sum += kv.second->GetSize();
752 } 720 }
753 return sum / 1024; 721 return sum / 1024;
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
884 MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>( 852 MakeUnique<SessionStateConvertableProxy<StackFrameDeduplicator>>(
885 session_state, &MemoryDumpSessionState::stack_frame_deduplicator)); 853 session_state, &MemoryDumpSessionState::stack_frame_deduplicator));
886 854
887 TRACE_EVENT_API_ADD_METADATA_EVENT( 855 TRACE_EVENT_API_ADD_METADATA_EVENT(
888 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", 856 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
889 "typeNames", 857 "typeNames",
890 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>( 858 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>(
891 session_state, &MemoryDumpSessionState::type_name_deduplicator)); 859 session_state, &MemoryDumpSessionState::type_name_deduplicator));
892 } 860 }
893 861
894 { 862 AutoLock lock(lock_);
895 AutoLock lock(lock_);
896 863
897 DCHECK(delegate_); // At this point we must have a delegate. 864 DCHECK(delegate_); // At this point we must have a delegate.
898 session_state_ = session_state; 865 session_state_ = session_state;
899 866
900 DCHECK(!dump_thread_); 867 DCHECK(!dump_thread_);
901 dump_thread_ = std::move(dump_thread); 868 dump_thread_ = std::move(dump_thread);
902 869
903 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); 870 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
904 871
905 dump_providers_for_polling_.clear(); 872 MemoryDumpScheduler::Config periodic_config;
906 for (const auto& mdpinfo : dump_providers_) { 873 bool peak_detector_configured = false;
907 if (mdpinfo->options.is_fast_polling_supported) 874 for (const auto& trigger : memory_dump_config.triggers) {
908 dump_providers_for_polling_.insert(mdpinfo); 875 if (!session_state_->IsDumpModeAllowed(trigger.level_of_detail)) {
876 NOTREACHED();
877 continue;
909 } 878 }
879 if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
880 if (periodic_config.triggers.empty()) {
881 periodic_config.callback = BindRepeating(&OnPeriodicSchedulerTick);
882 }
883 periodic_config.triggers.push_back(
884 {trigger.level_of_detail, trigger.min_time_between_dumps_ms});
885 } else if (trigger.trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) {
886 // At most one peak trigger is allowed.
887 CHECK(!peak_detector_configured);
888 peak_detector_configured = true;
889 MemoryPeakDetector::GetInstance()->Setup(
890 BindRepeating(&MemoryDumpManager::GetDumpProvidersForPolling,
891 Unretained(this)),
892 dump_thread_->task_runner(),
893 BindRepeating(&OnPeakDetected, trigger.level_of_detail));
910 894
911 MemoryDumpScheduler* dump_scheduler = MemoryDumpScheduler::GetInstance(); 895 MemoryPeakDetector::Config peak_config;
912 dump_scheduler->Setup(this, dump_thread_->task_runner()); 896 peak_config.polling_interval_ms = 10;
913 DCHECK_LE(memory_dump_config.triggers.size(), 3u); 897 peak_config.min_time_between_peaks_ms = trigger.min_time_between_dumps_ms;
914 for (const auto& trigger : memory_dump_config.triggers) { 898 peak_config.enable_verbose_poll_tracing =
915 if (!session_state_->IsDumpModeAllowed(trigger.level_of_detail)) { 899 trigger.level_of_detail == MemoryDumpLevelOfDetail::DETAILED;
916 NOTREACHED(); 900 MemoryPeakDetector::GetInstance()->Start(peak_config);
917 continue; 901
902 // When peak detection is enabled, trigger a dump straight away as it
903 // gives a good reference point for analyzing the trace.
904 if (delegate_->IsCoordinator()) {
905 dump_thread_->task_runner()->PostTask(
906 FROM_HERE, BindRepeating(&OnPeakDetected, trigger.level_of_detail));
918 } 907 }
919 dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail,
920 trigger.min_time_between_dumps_ms);
921 } 908 }
922
923 // Notify polling supported only if some polling supported provider was
924 // registered, else RegisterPollingMDPOnDumpThread() will notify when first
925 // polling MDP registers.
926 if (!dump_providers_for_polling_.empty())
927 dump_scheduler->EnablePollingIfNeeded();
928
929 // Only coordinator process triggers periodic global memory dumps.
930 if (delegate_->IsCoordinator())
931 dump_scheduler->EnablePeriodicTriggerIfNeeded();
932 } 909 }
933 910
911 // Only coordinator process triggers periodic global memory dumps.
912 if (delegate_->IsCoordinator() && !periodic_config.triggers.empty()) {
913 MemoryDumpScheduler::GetInstance()->Start(periodic_config,
914 dump_thread_->task_runner());
915 }
934 } 916 }
935 917
936 void MemoryDumpManager::OnTraceLogDisabled() { 918 void MemoryDumpManager::OnTraceLogDisabled() {
937 // There might be a memory dump in progress while this happens. Therefore, 919 // There might be a memory dump in progress while this happens. Therefore,
938 // ensure that the MDM state which depends on the tracing enabled / disabled 920 // ensure that the MDM state which depends on the tracing enabled / disabled
939 // state is always accessed by the dumping methods holding the |lock_|. 921 // state is always accessed by the dumping methods holding the |lock_|.
940 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_)) 922 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_))
941 return; 923 return;
942 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); 924 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
943 std::unique_ptr<Thread> dump_thread; 925 std::unique_ptr<Thread> dump_thread;
944 { 926 {
945 AutoLock lock(lock_); 927 AutoLock lock(lock_);
928 MemoryDumpScheduler::GetInstance()->Stop();
929 MemoryPeakDetector::GetInstance()->TearDown();
946 dump_thread = std::move(dump_thread_); 930 dump_thread = std::move(dump_thread_);
947 session_state_ = nullptr; 931 session_state_ = nullptr;
948 MemoryDumpScheduler::GetInstance()->DisableAllTriggers();
949 } 932 }
950 933
951 // Thread stops are blocking and must be performed outside of the |lock_| 934 // Thread stops are blocking and must be performed outside of the |lock_|
952 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). 935 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
953 if (dump_thread) 936 if (dump_thread)
954 dump_thread->Stop(); 937 dump_thread->Stop();
955
956 // |dump_providers_for_polling_| must be cleared only after the dump thread is
957 // stopped (polling tasks are done).
958 {
959 AutoLock lock(lock_);
960 for (const auto& mdpinfo : dump_providers_for_polling_)
961 mdpinfo->dump_provider->SuspendFastMemoryPolling();
962 dump_providers_for_polling_.clear();
963 }
964 } 938 }
965 939
966 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { 940 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
967 AutoLock lock(lock_); 941 AutoLock lock(lock_);
968 if (!session_state_) 942 if (!session_state_)
969 return false; 943 return false;
970 return session_state_->IsDumpModeAllowed(dump_mode); 944 return session_state_->IsDumpModeAllowed(dump_mode);
971 } 945 }
972 946
973 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( 947 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
(...skipping 22 matching lines...) Expand all
996 if (iter == process_dumps.end()) { 970 if (iter == process_dumps.end()) {
997 std::unique_ptr<ProcessMemoryDump> new_pmd( 971 std::unique_ptr<ProcessMemoryDump> new_pmd(
998 new ProcessMemoryDump(session_state, dump_args)); 972 new ProcessMemoryDump(session_state, dump_args));
999 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; 973 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
1000 } 974 }
1001 return iter->second.get(); 975 return iter->second.get();
1002 } 976 }
1003 977
1004 } // namespace trace_event 978 } // namespace trace_event
1005 } // namespace base 979 } // namespace base
OLDNEW
« no previous file with comments | « base/trace_event/memory_dump_manager.h ('k') | base/trace_event/memory_dump_manager_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698