| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "base/atomic_sequence_num.h" | 9 #include "base/atomic_sequence_num.h" |
| 10 #include "base/command_line.h" | 10 #include "base/command_line.h" |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 99 | 99 |
| 100 // static | 100 // static |
| 101 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { | 101 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { |
| 102 if (instance) | 102 if (instance) |
| 103 instance->skip_core_dumpers_auto_registration_for_testing_ = true; | 103 instance->skip_core_dumpers_auto_registration_for_testing_ = true; |
| 104 g_instance_for_testing = instance; | 104 g_instance_for_testing = instance; |
| 105 } | 105 } |
| 106 | 106 |
| 107 MemoryDumpManager::MemoryDumpManager() | 107 MemoryDumpManager::MemoryDumpManager() |
| 108 : delegate_(nullptr), | 108 : delegate_(nullptr), |
| 109 is_coordinator_(false), |
| 109 memory_tracing_enabled_(0), | 110 memory_tracing_enabled_(0), |
| 110 tracing_process_id_(kInvalidTracingProcessId), | 111 tracing_process_id_(kInvalidTracingProcessId), |
| 111 skip_core_dumpers_auto_registration_for_testing_(false), | 112 skip_core_dumpers_auto_registration_for_testing_(false), |
| 112 disable_periodic_dumps_for_testing_(false) { | 113 disable_periodic_dumps_for_testing_(false) { |
| 113 g_next_guid.GetNext(); // Make sure that first guid is not zero. | 114 g_next_guid.GetNext(); // Make sure that first guid is not zero. |
| 114 } | 115 } |
| 115 | 116 |
| 116 MemoryDumpManager::~MemoryDumpManager() { | 117 MemoryDumpManager::~MemoryDumpManager() { |
| 117 TraceLog::GetInstance()->RemoveEnabledStateObserver(this); | 118 TraceLog::GetInstance()->RemoveEnabledStateObserver(this); |
| 118 } | 119 } |
| 119 | 120 |
| 120 void MemoryDumpManager::Initialize() { | 121 void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate, |
| 121 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list. | 122 bool is_coordinator) { |
| 122 | 123 { |
| 123 TraceLog::GetInstance()->AddEnabledStateObserver(this); | 124 AutoLock lock(lock_); |
| 125 DCHECK(delegate); |
| 126 DCHECK(!delegate_); |
| 127 delegate_ = delegate; |
| 128 is_coordinator_ = is_coordinator; |
| 129 } |
| 124 | 130 |
| 125 // Enable the core dump providers. | 131 // Enable the core dump providers. |
| 126 if (!skip_core_dumpers_auto_registration_for_testing_) { | 132 if (!skip_core_dumpers_auto_registration_for_testing_) { |
| 127 #if !defined(OS_NACL) | 133 #if !defined(OS_NACL) |
| 128 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance()); | 134 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance()); |
| 129 #endif | 135 #endif |
| 130 | 136 |
| 131 #if defined(OS_LINUX) || defined(OS_ANDROID) | 137 #if defined(OS_LINUX) || defined(OS_ANDROID) |
| 132 RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance()); | 138 RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance()); |
| 133 RegisterDumpProvider(MallocDumpProvider::GetInstance()); | 139 RegisterDumpProvider(MallocDumpProvider::GetInstance()); |
| 134 #endif | 140 #endif |
| 135 | 141 |
| 136 #if defined(OS_ANDROID) | 142 #if defined(OS_ANDROID) |
| 137 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance()); | 143 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance()); |
| 138 #endif | 144 #endif |
| 139 | 145 |
| 140 #if defined(OS_WIN) | 146 #if defined(OS_WIN) |
| 141 RegisterDumpProvider(WinHeapDumpProvider::GetInstance()); | 147 RegisterDumpProvider(WinHeapDumpProvider::GetInstance()); |
| 142 #endif | 148 #endif |
| 143 } // !skip_core_dumpers_auto_registration_for_testing_ | 149 } // !skip_core_dumpers_auto_registration_for_testing_ |
| 144 } | |
| 145 | 150 |
| 146 void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate* delegate) { | 151 // If tracing was enabled before initializing MemoryDumpManager, we missed the |
| 147 AutoLock lock(lock_); | 152 // OnTraceLogEnabled() event. Synthetize it so we can late-join the party. |
| 148 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_); | 153 bool is_tracing_already_enabled = TraceLog::GetInstance()->IsEnabled(); |
| 149 delegate_ = delegate; | 154 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list. |
| 155 TraceLog::GetInstance()->AddEnabledStateObserver(this); |
| 156 if (is_tracing_already_enabled) |
| 157 OnTraceLogEnabled(); |
| 150 } | 158 } |
| 151 | 159 |
| 152 void MemoryDumpManager::RegisterDumpProvider( | 160 void MemoryDumpManager::RegisterDumpProvider( |
| 153 MemoryDumpProvider* mdp, | 161 MemoryDumpProvider* mdp, |
| 154 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { | 162 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { |
| 155 MemoryDumpProviderInfo mdp_info(mdp, task_runner); | 163 MemoryDumpProviderInfo mdp_info(mdp, task_runner); |
| 156 AutoLock lock(lock_); | 164 AutoLock lock(lock_); |
| 157 auto iter_new = dump_providers_.insert(mdp_info); | 165 auto iter_new = dump_providers_.insert(mdp_info); |
| 158 | 166 |
| 159 // If there was a previous entry, replace it with the new one. This is to deal | 167 // If there was a previous entry, replace it with the new one. This is to deal |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 203 // Bail out immediately if tracing is not enabled at all. | 211 // Bail out immediately if tracing is not enabled at all. |
| 204 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { | 212 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { |
| 205 if (!callback.is_null()) | 213 if (!callback.is_null()) |
| 206 callback.Run(0u /* guid */, false /* success */); | 214 callback.Run(0u /* guid */, false /* success */); |
| 207 return; | 215 return; |
| 208 } | 216 } |
| 209 | 217 |
| 210 const uint64 guid = | 218 const uint64 guid = |
| 211 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); | 219 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); |
| 212 | 220 |
| 213 // The delegate_ is supposed to be thread safe, immutable and long lived. | 221 // Technically there is no need to grab the |lock_| here as the delegate is |
| 214 // No need to keep the lock after we ensured that a delegate has been set. | 222 // long-lived and can only be set by Initialize(), which is locked and |
| 223 // necessarily happens before memory_tracing_enabled_ == true. |
| 224 // Not taking the |lock_|, though, is lakely make TSan barf and, at this point |
| 225 // (memory-infra is enabled) we're not in the fast-path anymore. |
| 215 MemoryDumpManagerDelegate* delegate; | 226 MemoryDumpManagerDelegate* delegate; |
| 216 { | 227 { |
| 217 AutoLock lock(lock_); | 228 AutoLock lock(lock_); |
| 218 delegate = delegate_; | 229 delegate = delegate_; |
| 219 } | 230 } |
| 220 | 231 |
| 221 if (delegate) { | 232 // The delegate will coordinate the IPC broadcast and at some point invoke |
| 222 // The delegate is in charge to coordinate the request among all the | 233 // CreateProcessDump() to get a dump for the current process. |
| 223 // processes and call the CreateLocalDumpPoint on the local process. | 234 MemoryDumpRequestArgs args = {guid, dump_type, dump_args}; |
| 224 MemoryDumpRequestArgs args = {guid, dump_type, dump_args}; | 235 delegate->RequestGlobalMemoryDump(args, callback); |
| 225 delegate->RequestGlobalMemoryDump(args, callback); | |
| 226 } else if (!callback.is_null()) { | |
| 227 callback.Run(guid, false /* success */); | |
| 228 } | |
| 229 } | 236 } |
| 230 | 237 |
| 231 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type, | 238 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type, |
| 232 const MemoryDumpArgs& dump_args) { | 239 const MemoryDumpArgs& dump_args) { |
| 233 RequestGlobalDump(dump_type, dump_args, MemoryDumpCallback()); | 240 RequestGlobalDump(dump_type, dump_args, MemoryDumpCallback()); |
| 234 } | 241 } |
| 235 | 242 |
| 236 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, | 243 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
| 237 const MemoryDumpCallback& callback) { | 244 const MemoryDumpCallback& callback) { |
| 238 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; | 245 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
| (...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 387 if (callback.is_null()) | 394 if (callback.is_null()) |
| 388 return; // There is nothing to NACK. | 395 return; // There is nothing to NACK. |
| 389 | 396 |
| 390 // Post the callback even if we are already on the right thread to avoid | 397 // Post the callback even if we are already on the right thread to avoid |
| 391 // invoking the callback while holding the lock_. | 398 // invoking the callback while holding the lock_. |
| 392 task_runner->PostTask(FROM_HERE, | 399 task_runner->PostTask(FROM_HERE, |
| 393 Bind(callback, dump_guid, false /* success */)); | 400 Bind(callback, dump_guid, false /* success */)); |
| 394 } | 401 } |
| 395 | 402 |
| 396 void MemoryDumpManager::OnTraceLogEnabled() { | 403 void MemoryDumpManager::OnTraceLogEnabled() { |
| 397 // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter | |
| 398 // to figure out (and cache) which dumpers should be enabled or not. | |
| 399 // For the moment piggy back everything on the generic "memory" category. | |
| 400 bool enabled; | 404 bool enabled; |
| 401 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); | 405 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); |
| 406 if (!enabled) |
| 407 return; |
| 402 | 408 |
| 403 // Initialize the TraceLog for the current thread. This is to avoid that the | 409 // Initialize the TraceLog for the current thread. This is to avoid that the |
| 404 // TraceLog memory dump provider is registered lazily in the PostTask() below | 410 // TraceLog memory dump provider is registered lazily in the PostTask() below |
| 405 // while the |lock_| is taken; | 411 // while the |lock_| is taken; |
| 406 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 412 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
| 407 | 413 |
| 408 AutoLock lock(lock_); | 414 AutoLock lock(lock_); |
| 409 | 415 |
| 410 // There is no point starting the tracing without a delegate. | 416 DCHECK(delegate_); // At this point we must have a delegate. |
| 411 if (!enabled || !delegate_) { | |
| 412 // Disable all the providers. | |
| 413 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) | |
| 414 it->disabled = true; | |
| 415 return; | |
| 416 } | |
| 417 | 417 |
| 418 session_state_ = new MemoryDumpSessionState(); | 418 session_state_ = new MemoryDumpSessionState(); |
| 419 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) { | 419 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) { |
| 420 it->disabled = false; | 420 it->disabled = false; |
| 421 it->consecutive_failures = 0; | 421 it->consecutive_failures = 0; |
| 422 } | 422 } |
| 423 | 423 |
| 424 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); | 424 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); |
| 425 | 425 |
| 426 // TODO(primiano): This is a temporary hack to disable periodic memory dumps | 426 // TODO(primiano): This is a temporary hack to disable periodic memory dumps |
| 427 // when running memory benchmarks until telemetry uses TraceConfig to | 427 // when running memory benchmarks until telemetry uses TraceConfig to |
| 428 // enable/disable periodic dumps. | 428 // enable/disable periodic dumps. See crbug.com/529184 . |
| 429 // The same mechanism should be used to disable periodic dumps in tests. | 429 // The same mechanism should be used to disable periodic dumps in tests. |
| 430 if (!delegate_->IsCoordinatorProcess() || | 430 if (!is_coordinator_ || |
| 431 CommandLine::ForCurrentProcess()->HasSwitch( | 431 CommandLine::ForCurrentProcess()->HasSwitch( |
| 432 "enable-memory-benchmarking") || | 432 "enable-memory-benchmarking") || |
| 433 disable_periodic_dumps_for_testing_) { | 433 disable_periodic_dumps_for_testing_) { |
| 434 return; | 434 return; |
| 435 } | 435 } |
| 436 | 436 |
| 437 // Enable periodic dumps. At the moment the periodic support is limited to at | 437 // Enable periodic dumps. At the moment the periodic support is limited to at |
| 438 // most one low-detail periodic dump and at most one high-detail periodic | 438 // most one low-detail periodic dump and at most one high-detail periodic |
| 439 // dump. If both are specified the high-detail period must be an integer | 439 // dump. If both are specified the high-detail period must be an integer |
| 440 // multiple of the low-level one. | 440 // multiple of the low-level one. |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 504 next_dump_provider(next_dump_provider), | 504 next_dump_provider(next_dump_provider), |
| 505 callback(callback), | 505 callback(callback), |
| 506 task_runner(MessageLoop::current()->task_runner()) { | 506 task_runner(MessageLoop::current()->task_runner()) { |
| 507 } | 507 } |
| 508 | 508 |
| 509 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { | 509 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { |
| 510 } | 510 } |
| 511 | 511 |
| 512 } // namespace trace_event | 512 } // namespace trace_event |
| 513 } // namespace base | 513 } // namespace base |
| OLD | NEW |