Chromium Code Reviews| Index: base/trace_event/memory_dump_manager.cc |
| diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc |
| index 18b255ab8b1db1303f3ee4567f309f819622e11b..538534c185eccc3e647b0b5d8aae3be8d6cd4eb2 100644 |
| --- a/base/trace_event/memory_dump_manager.cc |
| +++ b/base/trace_event/memory_dump_manager.cc |
| @@ -24,6 +24,7 @@ |
| #include "base/trace_event/heap_profiler_type_name_deduplicator.h" |
| #include "base/trace_event/malloc_dump_provider.h" |
| #include "base/trace_event/memory_dump_provider.h" |
| +#include "base/trace_event/memory_dump_scheduler.h" |
| #include "base/trace_event/memory_dump_session_state.h" |
| #include "base/trace_event/memory_infra_background_whitelist.h" |
| #include "base/trace_event/process_memory_dump.h" |
| @@ -160,7 +161,6 @@ void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { |
| MemoryDumpManager::MemoryDumpManager() |
| : delegate_(nullptr), |
| - is_coordinator_(false), |
| memory_tracing_enabled_(0), |
| tracing_process_id_(kInvalidTracingProcessId), |
| dumper_registrations_ignored_for_testing_(false), |
| @@ -418,6 +418,12 @@ void MemoryDumpManager::RegisterPollingMDPOnDumpThread( |
| scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { |
| AutoLock lock(lock_); |
| dump_providers_for_polling_.insert(mdpinfo); |
| + |
| + // Notify ready for polling when first polling supported provider is |
| + // registered. This handles the case where OnTraceLogEnabled() did not notify |
| + // ready since no polling supported mdp has yet been registered. |
| + if (dump_providers_for_polling_.size() == 1) |
| + dump_scheduler_->NotifyPollingSupported(); |
| } |
| void MemoryDumpManager::UnregisterPollingMDPOnDumpThread( |
| @@ -426,6 +432,9 @@ void MemoryDumpManager::UnregisterPollingMDPOnDumpThread( |
| AutoLock lock(lock_); |
| dump_providers_for_polling_.erase(mdpinfo); |
| + DCHECK(!dump_providers_for_polling_.empty()) |
| + << "All polling MDPs cannot be unregistered, since it will cause polling " |
|
Primiano Tucci (use gerrit)
2017/01/26 02:52:22
i'd remove the "since... " part here.
ssid
2017/01/26 21:48:57
Done.
|
| + "without without any MDPs"; |
| } |
| void MemoryDumpManager::RequestGlobalDump( |
| @@ -507,8 +516,7 @@ void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
| // with disallowed modes. If |session_state_| is null then tracing is |
| // disabled. |
| CHECK(!session_state_ || |
| - session_state_->memory_dump_config().allowed_dump_modes.count( |
| - args.level_of_detail)); |
| + session_state_->IsDumpModeAllowed(args.level_of_detail)); |
| } |
| TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", |
| @@ -681,7 +689,17 @@ void MemoryDumpManager::InvokeOnMemoryDump( |
| SetupNextMemoryDump(std::move(pmd_async_state)); |
| } |
| -void MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) { |
| +bool MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) { |
| +#if DCHECK_IS_ON() |
| + { |
| + AutoLock lock(lock_); |
|
Primiano Tucci (use gerrit)
2017/01/26 02:52:22
I think I wrote this in another cl but forgot the
ssid
2017/01/26 21:48:57
Yes that is true. That is why I am holding a lock
|
| + if (dump_thread_) |
| + DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread()); |
| + } |
| +#endif |
| + if (dump_providers_for_polling_.empty()) |
| + return false; |
| + |
| *memory_total = 0; |
| // Note that we call PollFastMemoryTotal() even if the dump provider is |
| // disabled (unregistered). This is to avoid taking lock while polling. |
| @@ -690,7 +708,7 @@ void MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) { |
| mdpinfo->dump_provider->PollFastMemoryTotal(&value); |
| *memory_total += value; |
| } |
| - return; |
| + return true; |
| } |
| // static |
| @@ -769,11 +787,13 @@ void MemoryDumpManager::OnTraceLogEnabled() { |
| return; |
| } |
| - const TraceConfig trace_config = |
| - TraceLog::GetInstance()->GetCurrentTraceConfig(); |
| + const TraceConfig::MemoryDumpConfig memory_dump_config = |
|
Primiano Tucci (use gerrit)
2017/01/26 02:52:22
since you don't need to copy this anymore, you can
ssid
2017/01/26 21:48:57
Done.
ssid
2017/01/27 00:43:24
Actually this would need keep alive a copy of trac
|
| + TraceLog::GetInstance()->GetCurrentTraceConfig().memory_dump_config(); |
| scoped_refptr<MemoryDumpSessionState> session_state = |
| new MemoryDumpSessionState; |
| - session_state->SetMemoryDumpConfig(trace_config.memory_dump_config()); |
| + session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes); |
| + session_state->set_heap_profiler_breakdown_threshold_bytes( |
| + memory_dump_config.heap_profiler_options.breakdown_threshold_bytes); |
| if (heap_profiling_enabled_) { |
| // If heap profiling is enabled, the stack frame deduplicator and type name |
| // deduplicator will be in use. Add a metadata events to write the frames |
| @@ -797,6 +817,15 @@ void MemoryDumpManager::OnTraceLogEnabled() { |
| session_state, &MemoryDumpSessionState::type_name_deduplicator)); |
| } |
| + std::unique_ptr<MemoryDumpScheduler> dump_scheduler( |
| + WrapUnique(new MemoryDumpScheduler(this, dump_thread->task_runner()))); |
|
Primiano Tucci (use gerrit)
2017/01/26 02:52:23
no need to WrapUnique if you do this explicitly.
E
ssid
2017/01/26 21:48:57
Done.
|
| + DCHECK_LE(memory_dump_config.triggers.size(), 3u); |
|
Primiano Tucci (use gerrit)
2017/01/26 02:52:23
shouldn't this dcheck be in the scheduler?
ssid
2017/01/26 21:48:56
We cannot easily have this in scheduler since we n
|
| + for (const auto& trigger : memory_dump_config.triggers) { |
| + DCHECK(session_state->IsDumpModeAllowed(trigger.level_of_detail)); |
|
Primiano Tucci (use gerrit)
2017/01/26 02:52:23
shoudln't this be:
if (!allowed) {
NOTREACHED();
ssid
2017/01/26 21:48:56
Done.
|
| + dump_scheduler->AddTrigger(trigger.trigger_type, trigger.level_of_detail, |
| + trigger.min_time_between_dumps_ms); |
| + } |
| + |
| { |
| AutoLock lock(lock_); |
| @@ -805,21 +834,26 @@ void MemoryDumpManager::OnTraceLogEnabled() { |
| DCHECK(!dump_thread_); |
| dump_thread_ = std::move(dump_thread); |
| + dump_scheduler_ = std::move(dump_scheduler); |
| + |
| + subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); |
| dump_providers_for_polling_.clear(); |
| for (const auto& mdpinfo : dump_providers_) { |
| if (mdpinfo->options.is_fast_polling_supported) |
| dump_providers_for_polling_.insert(mdpinfo); |
| } |
| + // Notify polling supported only if some polling supported provider was |
| + // registered, else RegisterPollingMDPOnDumpThread() will notify when first |
| + // polling MDP registers. |
| + if (!dump_providers_for_polling_.empty()) |
| + dump_scheduler_->NotifyPollingSupported(); |
| - subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); |
| - |
| - if (!is_coordinator_) |
| - return; |
| + // Only coordinator process triggers periodic global memory dumps. |
| + if (is_coordinator_) |
| + dump_scheduler_->NotifyPeriodicTriggerSupported(); |
| } |
| - // Enable periodic dumps if necessary. |
| - periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers); |
| } |
| void MemoryDumpManager::OnTraceLogDisabled() { |
| @@ -830,15 +864,17 @@ void MemoryDumpManager::OnTraceLogDisabled() { |
| return; |
| subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); |
| std::unique_ptr<Thread> dump_thread; |
| + std::unique_ptr<MemoryDumpScheduler> scheduler = nullptr; |
|
Primiano Tucci (use gerrit)
2017/01/26 02:52:23
no need for the = nullptr part, it should be impli
ssid
2017/01/26 21:48:57
Done.
|
| { |
| AutoLock lock(lock_); |
| dump_thread = std::move(dump_thread_); |
| session_state_ = nullptr; |
| + scheduler = std::move(dump_scheduler_); |
| } |
| + scheduler->DisableAllTriggers(); |
| // Thread stops are blocking and must be performed outside of the |lock_| |
| // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). |
| - periodic_dump_timer_.Stop(); |
| if (dump_thread) |
| dump_thread->Stop(); |
| @@ -856,8 +892,7 @@ bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { |
| AutoLock lock(lock_); |
| if (!session_state_) |
| return false; |
| - return session_state_->memory_dump_config().allowed_dump_modes.count( |
| - dump_mode) != 0; |
| + return session_state_->IsDumpModeAllowed(dump_mode); |
| } |
| uint64_t MemoryDumpManager::GetTracingProcessId() const { |
| @@ -923,80 +958,5 @@ ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState:: |
| return iter->second.get(); |
| } |
| -MemoryDumpManager::PeriodicGlobalDumpTimer::PeriodicGlobalDumpTimer() {} |
| - |
| -MemoryDumpManager::PeriodicGlobalDumpTimer::~PeriodicGlobalDumpTimer() { |
| - Stop(); |
| -} |
| - |
| -void MemoryDumpManager::PeriodicGlobalDumpTimer::Start( |
| - const std::vector<TraceConfig::MemoryDumpConfig::Trigger>& triggers_list) { |
| - if (triggers_list.empty()) |
| - return; |
| - |
| - // At the moment the periodic support is limited to at most one periodic |
| - // trigger per dump mode. All intervals should be an integer multiple of the |
| - // smallest interval specified. |
| - periodic_dumps_count_ = 0; |
| - uint32_t min_timer_period_ms = std::numeric_limits<uint32_t>::max(); |
| - uint32_t light_dump_period_ms = 0; |
| - uint32_t heavy_dump_period_ms = 0; |
| - DCHECK_LE(triggers_list.size(), 3u); |
| - auto* mdm = MemoryDumpManager::GetInstance(); |
| - for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) { |
| - DCHECK_NE(0u, config.min_time_between_dumps_ms); |
| - DCHECK_EQ(MemoryDumpType::PERIODIC_INTERVAL, config.trigger_type) |
| - << "Only periodic_interval triggers are suppported"; |
| - switch (config.level_of_detail) { |
| - case MemoryDumpLevelOfDetail::BACKGROUND: |
| - DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND)); |
| - break; |
| - case MemoryDumpLevelOfDetail::LIGHT: |
| - DCHECK_EQ(0u, light_dump_period_ms); |
| - DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT)); |
| - light_dump_period_ms = config.min_time_between_dumps_ms; |
| - break; |
| - case MemoryDumpLevelOfDetail::DETAILED: |
| - DCHECK_EQ(0u, heavy_dump_period_ms); |
| - DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED)); |
| - heavy_dump_period_ms = config.min_time_between_dumps_ms; |
| - break; |
| - } |
| - min_timer_period_ms = |
| - std::min(min_timer_period_ms, config.min_time_between_dumps_ms); |
| - } |
| - |
| - DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms); |
| - light_dump_rate_ = light_dump_period_ms / min_timer_period_ms; |
| - DCHECK_EQ(0u, heavy_dump_period_ms % min_timer_period_ms); |
| - heavy_dump_rate_ = heavy_dump_period_ms / min_timer_period_ms; |
| - |
| - timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(min_timer_period_ms), |
| - base::Bind(&PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump, |
| - base::Unretained(this))); |
| -} |
| - |
| -void MemoryDumpManager::PeriodicGlobalDumpTimer::Stop() { |
| - if (IsRunning()) { |
| - timer_.Stop(); |
| - } |
| -} |
| - |
| -bool MemoryDumpManager::PeriodicGlobalDumpTimer::IsRunning() { |
| - return timer_.IsRunning(); |
| -} |
| - |
| -void MemoryDumpManager::PeriodicGlobalDumpTimer::RequestPeriodicGlobalDump() { |
| - MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND; |
| - if (light_dump_rate_ > 0 && periodic_dumps_count_ % light_dump_rate_ == 0) |
| - level_of_detail = MemoryDumpLevelOfDetail::LIGHT; |
| - if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0) |
| - level_of_detail = MemoryDumpLevelOfDetail::DETAILED; |
| - ++periodic_dumps_count_; |
| - |
| - MemoryDumpManager::GetInstance()->RequestGlobalDump( |
| - MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); |
| -} |
| - |
| } // namespace trace_event |
| } // namespace base |