| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "base/atomic_sequence_num.h" | 9 #include "base/atomic_sequence_num.h" |
| 10 #include "base/compiler_specific.h" | 10 #include "base/compiler_specific.h" |
| 11 #include "base/hash.h" |
| 12 #include "base/thread_task_runner_handle.h" |
| 11 #include "base/trace_event/memory_dump_provider.h" | 13 #include "base/trace_event/memory_dump_provider.h" |
| 12 #include "base/trace_event/memory_dump_session_state.h" | 14 #include "base/trace_event/memory_dump_session_state.h" |
| 13 #include "base/trace_event/process_memory_dump.h" | 15 #include "base/trace_event/process_memory_dump.h" |
| 14 #include "base/trace_event/trace_event_argument.h" | 16 #include "base/trace_event/trace_event_argument.h" |
| 15 #include "build/build_config.h" | 17 #include "build/build_config.h" |
| 16 | 18 |
| 17 #if !defined(OS_NACL) | 19 #if !defined(OS_NACL) |
| 18 #include "base/trace_event/process_memory_totals_dump_provider.h" | 20 #include "base/trace_event/process_memory_totals_dump_provider.h" |
| 19 #endif | 21 #endif |
| 20 | 22 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 33 | 35 |
| 34 namespace base { | 36 namespace base { |
| 35 namespace trace_event { | 37 namespace trace_event { |
| 36 | 38 |
| 37 namespace { | 39 namespace { |
| 38 | 40 |
| 39 // TODO(primiano): this should be smarter and should do something similar to | 41 // TODO(primiano): this should be smarter and should do something similar to |
| 40 // trace event synthetic delays. | 42 // trace event synthetic delays. |
| 41 const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("memory-infra"); | 43 const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("memory-infra"); |
| 42 | 44 |
| 43 MemoryDumpManager* g_instance_for_testing = nullptr; | 45 // Throttle mmaps at a rate of once every kHeavyMmapsDumpsRate standard dumps. |
| 44 const int kDumpIntervalSeconds = 2; | 46 const int kHeavyMmapsDumpsRate = 8; // 250 ms * 8 = 2000 ms. |
| 47 const int kDumpIntervalMs = 250; |
| 45 const int kTraceEventNumArgs = 1; | 48 const int kTraceEventNumArgs = 1; |
| 46 const char* kTraceEventArgNames[] = {"dumps"}; | 49 const char* kTraceEventArgNames[] = {"dumps"}; |
| 47 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE}; | 50 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE}; |
| 51 |
| 48 StaticAtomicSequenceNumber g_next_guid; | 52 StaticAtomicSequenceNumber g_next_guid; |
| 49 | 53 uint32 g_periodic_dumps_count = 0; |
| 50 const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) { | 54 MemoryDumpManager* g_instance_for_testing = nullptr; |
| 51 switch (dump_type) { | 55 MemoryDumpProvider* g_mmaps_dump_provider = nullptr; |
| 52 case MemoryDumpType::TASK_BEGIN: | |
| 53 return "TASK_BEGIN"; | |
| 54 case MemoryDumpType::TASK_END: | |
| 55 return "TASK_END"; | |
| 56 case MemoryDumpType::PERIODIC_INTERVAL: | |
| 57 return "PERIODIC_INTERVAL"; | |
| 58 case MemoryDumpType::EXPLICITLY_TRIGGERED: | |
| 59 return "EXPLICITLY_TRIGGERED"; | |
| 60 } | |
| 61 NOTREACHED(); | |
| 62 return "UNKNOWN"; | |
| 63 } | |
| 64 | |
| 65 // Internal class used to hold details about ProcessMemoryDump requests for the | |
| 66 // current process. | |
| 67 // TODO(primiano): In the upcoming CLs, ProcessMemoryDump will become async. | |
| 68 // and this class will be used to convey more details across PostTask()s. | |
| 69 class ProcessMemoryDumpHolder | |
| 70 : public RefCountedThreadSafe<ProcessMemoryDumpHolder> { | |
| 71 public: | |
| 72 ProcessMemoryDumpHolder( | |
| 73 MemoryDumpRequestArgs req_args, | |
| 74 const scoped_refptr<MemoryDumpSessionState>& session_state, | |
| 75 MemoryDumpCallback callback) | |
| 76 : process_memory_dump(session_state), | |
| 77 req_args(req_args), | |
| 78 callback(callback), | |
| 79 task_runner(MessageLoop::current()->task_runner()), | |
| 80 num_pending_async_requests(0) {} | |
| 81 | |
| 82 ProcessMemoryDump process_memory_dump; | |
| 83 const MemoryDumpRequestArgs req_args; | |
| 84 | |
| 85 // Callback passed to the initial call to CreateProcessDump(). | |
| 86 MemoryDumpCallback callback; | |
| 87 | |
| 88 // Thread on which FinalizeDumpAndAddToTrace() should be called, which is the | |
| 89 // same that invoked the initial CreateProcessDump(). | |
| 90 const scoped_refptr<SingleThreadTaskRunner> task_runner; | |
| 91 | |
| 92 // Number of pending ContinueAsyncProcessDump() calls. | |
| 93 int num_pending_async_requests; | |
| 94 | |
| 95 private: | |
| 96 friend class RefCountedThreadSafe<ProcessMemoryDumpHolder>; | |
| 97 virtual ~ProcessMemoryDumpHolder() {} | |
| 98 DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpHolder); | |
| 99 }; | |
| 100 | |
| 101 void FinalizeDumpAndAddToTrace( | |
| 102 const scoped_refptr<ProcessMemoryDumpHolder>& pmd_holder) { | |
| 103 DCHECK_EQ(0, pmd_holder->num_pending_async_requests); | |
| 104 | |
| 105 if (!pmd_holder->task_runner->BelongsToCurrentThread()) { | |
| 106 pmd_holder->task_runner->PostTask( | |
| 107 FROM_HERE, Bind(&FinalizeDumpAndAddToTrace, pmd_holder)); | |
| 108 return; | |
| 109 } | |
| 110 | |
| 111 scoped_refptr<ConvertableToTraceFormat> event_value(new TracedValue()); | |
| 112 pmd_holder->process_memory_dump.AsValueInto( | |
| 113 static_cast<TracedValue*>(event_value.get())); | |
| 114 const char* const event_name = | |
| 115 MemoryDumpTypeToString(pmd_holder->req_args.dump_type); | |
| 116 | |
| 117 TRACE_EVENT_API_ADD_TRACE_EVENT( | |
| 118 TRACE_EVENT_PHASE_MEMORY_DUMP, | |
| 119 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name, | |
| 120 pmd_holder->req_args.dump_guid, kTraceEventNumArgs, kTraceEventArgNames, | |
| 121 kTraceEventArgTypes, nullptr /* arg_values */, &event_value, | |
| 122 TRACE_EVENT_FLAG_HAS_ID); | |
| 123 | |
| 124 if (!pmd_holder->callback.is_null()) { | |
| 125 pmd_holder->callback.Run(pmd_holder->req_args.dump_guid, true); | |
| 126 pmd_holder->callback.Reset(); | |
| 127 } | |
| 128 } | |
| 129 | 56 |
| 130 void RequestPeriodicGlobalDump() { | 57 void RequestPeriodicGlobalDump() { |
| 131 MemoryDumpManager::GetInstance()->RequestGlobalDump( | 58 MemoryDumpType dump_type = g_periodic_dumps_count == 0 |
| 132 MemoryDumpType::PERIODIC_INTERVAL); | 59 ? MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS |
| 60 : MemoryDumpType::PERIODIC_INTERVAL; |
| 61 if (++g_periodic_dumps_count == kHeavyMmapsDumpsRate) |
| 62 g_periodic_dumps_count = 0; |
| 63 |
| 64 MemoryDumpManager::GetInstance()->RequestGlobalDump(dump_type); |
| 133 } | 65 } |
| 134 | 66 |
| 135 } // namespace | 67 } // namespace |
| 136 | 68 |
| 137 // static | 69 // static |
| 138 const char* const MemoryDumpManager::kTraceCategoryForTesting = kTraceCategory; | 70 const char* const MemoryDumpManager::kTraceCategoryForTesting = kTraceCategory; |
| 139 | 71 |
| 140 // static | 72 // static |
| 73 const uint64 MemoryDumpManager::kInvalidTracingProcessId = 0; |
| 74 |
| 75 // static |
| 76 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; |
| 77 |
| 78 // static |
| 141 MemoryDumpManager* MemoryDumpManager::GetInstance() { | 79 MemoryDumpManager* MemoryDumpManager::GetInstance() { |
| 142 if (g_instance_for_testing) | 80 if (g_instance_for_testing) |
| 143 return g_instance_for_testing; | 81 return g_instance_for_testing; |
| 144 | 82 |
| 145 return Singleton<MemoryDumpManager, | 83 return Singleton<MemoryDumpManager, |
| 146 LeakySingletonTraits<MemoryDumpManager>>::get(); | 84 LeakySingletonTraits<MemoryDumpManager>>::get(); |
| 147 } | 85 } |
| 148 | 86 |
| 149 // static | 87 // static |
| 150 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { | 88 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { |
| 151 if (instance) | 89 if (instance) |
| 152 instance->skip_core_dumpers_auto_registration_for_testing_ = true; | 90 instance->skip_core_dumpers_auto_registration_for_testing_ = true; |
| 153 g_instance_for_testing = instance; | 91 g_instance_for_testing = instance; |
| 154 } | 92 } |
| 155 | 93 |
| 156 MemoryDumpManager::MemoryDumpManager() | 94 MemoryDumpManager::MemoryDumpManager() |
| 157 : delegate_(nullptr), | 95 : did_unregister_dump_provider_(false), |
| 96 delegate_(nullptr), |
| 158 memory_tracing_enabled_(0), | 97 memory_tracing_enabled_(0), |
| 98 tracing_process_id_(kInvalidTracingProcessId), |
| 159 skip_core_dumpers_auto_registration_for_testing_(false) { | 99 skip_core_dumpers_auto_registration_for_testing_(false) { |
| 160 g_next_guid.GetNext(); // Make sure that first guid is not zero. | 100 g_next_guid.GetNext(); // Make sure that first guid is not zero. |
| 161 } | 101 } |
| 162 | 102 |
| 163 MemoryDumpManager::~MemoryDumpManager() { | 103 MemoryDumpManager::~MemoryDumpManager() { |
| 164 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this); | 104 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this); |
| 165 } | 105 } |
| 166 | 106 |
| 167 void MemoryDumpManager::Initialize() { | 107 void MemoryDumpManager::Initialize() { |
| 168 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list. | 108 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list. |
| 169 trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(this); | 109 trace_event::TraceLog::GetInstance()->AddEnabledStateObserver(this); |
| 170 | 110 |
| 171 if (skip_core_dumpers_auto_registration_for_testing_) | 111 if (skip_core_dumpers_auto_registration_for_testing_) |
| 172 return; | 112 return; |
| 173 | 113 |
| 174 // Enable the core dump providers. | 114 // Enable the core dump providers. |
| 175 #if !defined(OS_NACL) | 115 #if !defined(OS_NACL) |
| 176 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance()); | 116 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance()); |
| 177 #endif | 117 #endif |
| 178 | 118 |
| 179 #if (defined(OS_LINUX) && !defined(FNL_MUSL)) || defined(OS_ANDROID) | 119 #if (defined(OS_LINUX) && !defined(FNL_MUSL)) || defined(OS_ANDROID) |
| 180 RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance()); | 120 g_mmaps_dump_provider = ProcessMemoryMapsDumpProvider::GetInstance(); |
| 121 RegisterDumpProvider(g_mmaps_dump_provider); |
| 181 RegisterDumpProvider(MallocDumpProvider::GetInstance()); | 122 RegisterDumpProvider(MallocDumpProvider::GetInstance()); |
| 182 #endif | 123 #endif |
| 183 | 124 |
| 184 #if defined(OS_ANDROID) | 125 #if defined(OS_ANDROID) |
| 185 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance()); | 126 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance()); |
| 186 #endif | 127 #endif |
| 187 | 128 |
| 188 #if defined(OS_WIN) | 129 #if defined(OS_WIN) |
| 189 RegisterDumpProvider(WinHeapDumpProvider::GetInstance()); | 130 RegisterDumpProvider(WinHeapDumpProvider::GetInstance()); |
| 190 #endif | 131 #endif |
| 191 } | 132 } |
| 192 | 133 |
| 193 void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate* delegate) { | 134 void MemoryDumpManager::SetDelegate(MemoryDumpManagerDelegate* delegate) { |
| 194 AutoLock lock(lock_); | 135 AutoLock lock(lock_); |
| 195 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_); | 136 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_); |
| 196 delegate_ = delegate; | 137 delegate_ = delegate; |
| 197 } | 138 } |
| 198 | 139 |
| 199 void MemoryDumpManager::RegisterDumpProvider( | 140 void MemoryDumpManager::RegisterDumpProvider( |
| 200 MemoryDumpProvider* mdp, | 141 MemoryDumpProvider* mdp, |
| 201 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { | 142 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { |
| 202 MemoryDumpProviderInfo mdp_info(task_runner); | 143 MemoryDumpProviderInfo mdp_info(mdp, task_runner); |
| 203 AutoLock lock(lock_); | 144 AutoLock lock(lock_); |
| 204 dump_providers_.insert(std::make_pair(mdp, mdp_info)); | 145 dump_providers_.insert(mdp_info); |
| 205 } | 146 } |
| 206 | 147 |
| 207 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) { | 148 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) { |
| 208 RegisterDumpProvider(mdp, nullptr); | 149 RegisterDumpProvider(mdp, nullptr); |
| 209 } | 150 } |
| 210 | 151 |
| 211 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 152 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
| 212 AutoLock lock(lock_); | 153 AutoLock lock(lock_); |
| 213 | 154 |
| 214 auto it = dump_providers_.find(mdp); | 155 auto mdp_iter = dump_providers_.begin(); |
| 215 if (it == dump_providers_.end()) | 156 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) { |
| 157 if (mdp_iter->dump_provider == mdp) |
| 158 break; |
| 159 } |
| 160 |
| 161 if (mdp_iter == dump_providers_.end()) |
| 216 return; | 162 return; |
| 217 | 163 |
| 218 const MemoryDumpProviderInfo& mdp_info = it->second; | |
| 219 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe | 164 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe |
| 220 // only if the MDP has specified a thread affinity (via task_runner()) AND | 165 // only if the MDP has specified a thread affinity (via task_runner()) AND |
| 221 // the unregistration happens on the same thread (so the MDP cannot unregister | 166 // the unregistration happens on the same thread (so the MDP cannot unregister |
| 222 // and OnMemoryDump() at the same time). | 167 // and OnMemoryDump() at the same time). |
| 223 // Otherwise, it is not possible to guarantee that its unregistration is | 168 // Otherwise, it is not possible to guarantee that its unregistration is |
| 224 // race-free. If you hit this DCHECK, your MDP has a bug. | 169 // race-free. If you hit this DCHECK, your MDP has a bug. |
| 225 DCHECK_IMPLIES( | 170 DCHECK_IMPLIES( |
| 226 subtle::NoBarrier_Load(&memory_tracing_enabled_), | 171 subtle::NoBarrier_Load(&memory_tracing_enabled_), |
| 227 mdp_info.task_runner && mdp_info.task_runner->BelongsToCurrentThread()) | 172 mdp_iter->task_runner && mdp_iter->task_runner->BelongsToCurrentThread()) |
| 228 << "The MemoryDumpProvider attempted to unregister itself in a racy way. " | 173 << "The MemoryDumpProvider attempted to unregister itself in a racy way. " |
| 229 << " Please file a crbug."; | 174 << "Please file a crbug."; |
| 230 | 175 |
| 231 // Remove from the enabled providers list. This is to deal with the case that | 176 dump_providers_.erase(mdp_iter); |
| 232 // UnregisterDumpProvider is called while the trace is enabled. | 177 did_unregister_dump_provider_ = true; |
| 233 dump_providers_.erase(it); | |
| 234 } | 178 } |
| 235 | 179 |
| 236 void MemoryDumpManager::RequestGlobalDump( | 180 void MemoryDumpManager::RequestGlobalDump( |
| 237 MemoryDumpType dump_type, | 181 MemoryDumpType dump_type, |
| 238 const MemoryDumpCallback& callback) { | 182 const MemoryDumpCallback& callback) { |
| 239 // Bail out immediately if tracing is not enabled at all. | 183 // Bail out immediately if tracing is not enabled at all. |
| 240 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) | 184 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) |
| 241 return; | 185 return; |
| 242 | 186 |
| 243 const uint64 guid = | 187 const uint64 guid = |
| (...skipping 14 matching lines...) Expand all Loading... |
| 258 delegate->RequestGlobalMemoryDump(args, callback); | 202 delegate->RequestGlobalMemoryDump(args, callback); |
| 259 } else if (!callback.is_null()) { | 203 } else if (!callback.is_null()) { |
| 260 callback.Run(guid, false /* success */); | 204 callback.Run(guid, false /* success */); |
| 261 } | 205 } |
| 262 } | 206 } |
| 263 | 207 |
| 264 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type) { | 208 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type) { |
| 265 RequestGlobalDump(dump_type, MemoryDumpCallback()); | 209 RequestGlobalDump(dump_type, MemoryDumpCallback()); |
| 266 } | 210 } |
| 267 | 211 |
| 268 // Creates a memory dump for the current process and appends it to the trace. | |
| 269 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, | 212 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
| 270 const MemoryDumpCallback& callback) { | 213 const MemoryDumpCallback& callback) { |
| 271 scoped_refptr<ProcessMemoryDumpHolder> pmd_holder( | 214 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
| 272 new ProcessMemoryDumpHolder(args, session_state_, callback)); | |
| 273 ProcessMemoryDump* pmd = &pmd_holder->process_memory_dump; | |
| 274 bool did_any_provider_dump = false; | |
| 275 | |
| 276 // Iterate over the active dump providers and invoke OnMemoryDump(pmd). | |
| 277 // The MDM guarantees linearity (at most one MDP is active within one | |
| 278 // process) and thread-safety (MDM enforces the right locking when entering / | |
| 279 // leaving the MDP.OnMemoryDump() call). This is to simplify the clients' | |
| 280 // design | |
| 281 // and not let the MDPs worry about locking. | |
| 282 // As regards thread affinity, depending on the MDP configuration (see | |
| 283 // memory_dump_provider.h), the OnMemoryDump() invocation can happen: | |
| 284 // - Synchronousy on the MDM thread, when MDP.task_runner() is not set. | |
| 285 // - Posted on MDP.task_runner(), when MDP.task_runner() is set. | |
| 286 { | 215 { |
| 287 AutoLock lock(lock_); | 216 AutoLock lock(lock_); |
| 288 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) { | 217 did_unregister_dump_provider_ = false; |
| 289 MemoryDumpProvider* mdp = it->first; | 218 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
| 290 MemoryDumpProviderInfo* mdp_info = &it->second; | 219 args, dump_providers_.begin(), session_state_, callback)); |
| 291 if (mdp_info->disabled) | 220 } |
| 292 continue; | 221 |
| 293 if (mdp_info->task_runner) { | 222 // Start the thread hop. |dump_providers_| are kept sorted by thread, so |
| 294 // The OnMemoryDump() call must be posted. | 223 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread |
| 295 bool did_post_async_task = mdp_info->task_runner->PostTask( | 224 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). |
| 296 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, | 225 ContinueAsyncProcessDump(pmd_async_state.Pass()); |
| 297 Unretained(this), Unretained(mdp), pmd_holder)); | 226 } |
| 298 // The thread underlying the TaskRunner might have gone away. | 227 |
| 299 if (did_post_async_task) | 228 // At most one ContinueAsyncProcessDump() can be active at any time for a given |
| 300 ++pmd_holder->num_pending_async_requests; | 229 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to |
| 301 } else { | 230 // ensure consistency w.r.t. (un)registrations of |dump_providers_|. |
| 302 // Invoke the dump provider synchronously. | 231 // The linearization of dump providers' OnMemoryDump invocations is achieved by |
| 303 did_any_provider_dump |= InvokeDumpProviderLocked(mdp, pmd); | 232 // means of subsequent PostTask(s). |
| 233 // |
| 234 // 1) Prologue: |
| 235 // - Check if the dump provider is disabled, if so skip the dump. |
| 236 // - Check if we are on the right thread. If not hop and continue there. |
| 237 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped). |
| 238 // 3) Epilogue: |
| 239 // - Unregister the dump provider if it failed too many times consecutively. |
| 240 // - Advance the |next_dump_provider| iterator to the next dump provider. |
| 241 // - If this was the last hop, create a trace event, add it to the trace |
| 242 // and finalize (invoke callback). |
| 243 |
| 244 void MemoryDumpManager::ContinueAsyncProcessDump( |
| 245 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
| 246 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs |
| 247 // in the PostTask below don't end up registering their own dump providers |
| 248 // (for discounting trace memory overhead) while holding the |lock_|. |
| 249 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
| 250 |
| 251 // DO NOT put any LOG() statement in the locked sections, as in some contexts |
| 252 // (GPU process) LOG() ends up performing PostTask/IPCs. |
| 253 MemoryDumpProvider* mdp; |
| 254 bool skip_dump = false; |
| 255 { |
| 256 AutoLock lock(lock_); |
| 257 // In the unlikely event that a dump provider was unregistered while |
| 258 // dumping, abort the dump, as that would make |next_dump_provider| invalid. |
| 259 // Registration, on the other hand, is safe as per std::set<> contract. |
| 260 if (did_unregister_dump_provider_) { |
| 261 return AbortDumpLocked(pmd_async_state->callback, |
| 262 pmd_async_state->task_runner, |
| 263 pmd_async_state->req_args.dump_guid); |
| 264 } |
| 265 |
| 266 auto* mdp_info = &*pmd_async_state->next_dump_provider; |
| 267 mdp = mdp_info->dump_provider; |
| 268 if (mdp_info->disabled) { |
| 269 skip_dump = true; |
| 270 } else if (mdp == g_mmaps_dump_provider && |
| 271 pmd_async_state->req_args.dump_type != |
| 272 MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS) { |
| 273 // Mmaps dumping is very heavyweight and cannot be performed at the same |
| 274 // rate of other dumps. TODO(primiano): this is a hack and should be |
| 275 // cleaned up as part of crbug.com/499731. |
| 276 skip_dump = true; |
| 277 } else if (mdp_info->task_runner && |
| 278 !mdp_info->task_runner->BelongsToCurrentThread()) { |
| 279 // It's time to hop onto another thread. |
| 280 |
| 281 // Copy the callback + arguments just for the unlikley case in which |
| 282 // PostTask fails. In such case the Bind helper will destroy the |
| 283 // pmd_async_state and we must keep a copy of the fields to notify the |
| 284 // abort. |
| 285 MemoryDumpCallback callback = pmd_async_state->callback; |
| 286 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
| 287 pmd_async_state->task_runner; |
| 288 const uint64 dump_guid = pmd_async_state->req_args.dump_guid; |
| 289 |
| 290 const bool did_post_task = mdp_info->task_runner->PostTask( |
| 291 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, |
| 292 Unretained(this), Passed(pmd_async_state.Pass()))); |
| 293 if (did_post_task) |
| 294 return; |
| 295 |
| 296 // The thread is gone. At this point the best thing we can do is to |
| 297 // disable the dump provider and abort this dump. |
| 298 mdp_info->disabled = true; |
| 299 return AbortDumpLocked(callback, callback_task_runner, dump_guid); |
| 300 } |
| 301 } // AutoLock(lock_) |
| 302 |
| 303 // Invoke the dump provider without holding the |lock_|. |
| 304 bool finalize = false; |
| 305 bool dump_successful = false; |
| 306 if (!skip_dump) |
| 307 dump_successful = mdp->OnMemoryDump(&pmd_async_state->process_memory_dump); |
| 308 |
| 309 { |
| 310 AutoLock lock(lock_); |
| 311 if (did_unregister_dump_provider_) { |
| 312 return AbortDumpLocked(pmd_async_state->callback, |
| 313 pmd_async_state->task_runner, |
| 314 pmd_async_state->req_args.dump_guid); |
| 315 } |
| 316 auto* mdp_info = &*pmd_async_state->next_dump_provider; |
| 317 if (dump_successful) { |
| 318 mdp_info->consecutive_failures = 0; |
| 319 } else if (!skip_dump) { |
| 320 ++mdp_info->consecutive_failures; |
| 321 if (mdp_info->consecutive_failures >= kMaxConsecutiveFailuresCount) { |
| 322 mdp_info->disabled = true; |
| 304 } | 323 } |
| 305 } | 324 } |
| 306 } // AutoLock | 325 ++pmd_async_state->next_dump_provider; |
| 326 finalize = pmd_async_state->next_dump_provider == dump_providers_.end(); |
| 327 } |
| 307 | 328 |
| 308 // If at least one synchronous provider did dump and there are no pending | 329 if (!skip_dump && !dump_successful) { |
| 309 // asynchronous requests, add the dump to the trace and invoke the callback | 330 LOG(ERROR) << "A memory dumper failed, possibly due to sandboxing " |
| 310 // straight away (FinalizeDumpAndAddToTrace() takes care of the callback). | 331 "(crbug.com/461788). Disabling dumper for current process. " |
| 311 if (did_any_provider_dump && pmd_holder->num_pending_async_requests == 0) | 332 "Try restarting chrome with the --no-sandbox switch."; |
| 312 FinalizeDumpAndAddToTrace(pmd_holder); | 333 } |
| 334 |
| 335 if (finalize) |
| 336 return FinalizeDumpAndAddToTrace(pmd_async_state.Pass()); |
| 337 |
| 338 ContinueAsyncProcessDump(pmd_async_state.Pass()); |
| 313 } | 339 } |
| 314 | 340 |
| 315 // Invokes the MemoryDumpProvider.OnMemoryDump(), taking care of the fail-safe | 341 // static |
| 316 // logic which disables the dumper when failing (crbug.com/461788). | 342 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
| 317 bool MemoryDumpManager::InvokeDumpProviderLocked(MemoryDumpProvider* mdp, | 343 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
| 318 ProcessMemoryDump* pmd) { | 344 if (!pmd_async_state->task_runner->BelongsToCurrentThread()) { |
| 319 lock_.AssertAcquired(); | 345 scoped_refptr<SingleThreadTaskRunner> task_runner = |
| 320 bool dump_successful = mdp->OnMemoryDump(pmd); | 346 pmd_async_state->task_runner; |
| 321 if (!dump_successful) { | 347 task_runner->PostTask(FROM_HERE, |
| 322 LOG(ERROR) << "The memory dumper failed, possibly due to sandboxing " | 348 Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace, |
| 323 "(crbug.com/461788), disabling it for current process. Try " | 349 Passed(pmd_async_state.Pass()))); |
| 324 "restarting chrome with the --no-sandbox switch."; | 350 return; |
| 325 dump_providers_.find(mdp)->second.disabled = true; | |
| 326 } | 351 } |
| 327 return dump_successful; | 352 |
| 353 scoped_refptr<ConvertableToTraceFormat> event_value(new TracedValue()); |
| 354 pmd_async_state->process_memory_dump.AsValueInto( |
| 355 static_cast<TracedValue*>(event_value.get())); |
| 356 const char* const event_name = |
| 357 MemoryDumpTypeToString(pmd_async_state->req_args.dump_type); |
| 358 |
| 359 TRACE_EVENT_API_ADD_TRACE_EVENT( |
| 360 TRACE_EVENT_PHASE_MEMORY_DUMP, |
| 361 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name, |
| 362 pmd_async_state->req_args.dump_guid, kTraceEventNumArgs, |
| 363 kTraceEventArgNames, kTraceEventArgTypes, nullptr /* arg_values */, |
| 364 &event_value, TRACE_EVENT_FLAG_HAS_ID); |
| 365 |
| 366 if (!pmd_async_state->callback.is_null()) { |
| 367 pmd_async_state->callback.Run(pmd_async_state->req_args.dump_guid, |
| 368 true /* success */); |
| 369 pmd_async_state->callback.Reset(); |
| 370 } |
| 328 } | 371 } |
| 329 | 372 |
| 330 // This is posted to arbitrary threads as a continuation of CreateProcessDump(), | 373 // static |
| 331 // when one or more MemoryDumpProvider(s) require the OnMemoryDump() call to | 374 void MemoryDumpManager::AbortDumpLocked( |
| 332 // happen on a different thread. | 375 MemoryDumpCallback callback, |
| 333 void MemoryDumpManager::ContinueAsyncProcessDump( | 376 scoped_refptr<SingleThreadTaskRunner> task_runner, |
| 334 MemoryDumpProvider* mdp, | 377 uint64 dump_guid) { |
| 335 scoped_refptr<ProcessMemoryDumpHolder> pmd_holder) { | 378 if (callback.is_null()) |
| 336 bool should_finalize_dump = false; | 379 return; // There is nothing to NACK. |
| 337 { | |
| 338 // The lock here is to guarantee that different asynchronous dumps on | |
| 339 // different threads are still serialized, so that the MemoryDumpProvider | |
| 340 // has a consistent view of the |pmd| argument passed. | |
| 341 AutoLock lock(lock_); | |
| 342 ProcessMemoryDump* pmd = &pmd_holder->process_memory_dump; | |
| 343 | 380 |
| 344 // Check if the MemoryDumpProvider is still there. It might have been | 381 // Post the callback even if we are already on the right thread to avoid |
| 345 // destroyed and unregistered while hopping threads. | 382 // invoking the callback while holding the lock_. |
| 346 if (dump_providers_.count(mdp)) | 383 task_runner->PostTask(FROM_HERE, |
| 347 InvokeDumpProviderLocked(mdp, pmd); | 384 Bind(callback, dump_guid, false /* success */)); |
| 348 | |
| 349 // Finalize the dump appending it to the trace if this was the last | |
| 350 // asynchronous request pending. | |
| 351 --pmd_holder->num_pending_async_requests; | |
| 352 if (pmd_holder->num_pending_async_requests == 0) | |
| 353 should_finalize_dump = true; | |
| 354 } // AutoLock(lock_) | |
| 355 | |
| 356 if (should_finalize_dump) | |
| 357 FinalizeDumpAndAddToTrace(pmd_holder); | |
| 358 } | 385 } |
| 359 | 386 |
| 360 void MemoryDumpManager::OnTraceLogEnabled() { | 387 void MemoryDumpManager::OnTraceLogEnabled() { |
| 361 // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter | 388 // TODO(primiano): at this point we query TraceLog::GetCurrentCategoryFilter |
| 362 // to figure out (and cache) which dumpers should be enabled or not. | 389 // to figure out (and cache) which dumpers should be enabled or not. |
| 363 // For the moment piggy back everything on the generic "memory" category. | 390 // For the moment piggy back everything on the generic "memory" category. |
| 364 bool enabled; | 391 bool enabled; |
| 365 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); | 392 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); |
| 366 | 393 |
| 394 // Initialize the TraceLog for the current thread. This is to avoid that the |
| 395 // TraceLog memory dump provider is registered lazily in the PostTask() below |
| 396 // while the |lock_| is taken; |
| 397 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
| 398 |
| 367 AutoLock lock(lock_); | 399 AutoLock lock(lock_); |
| 368 | 400 |
| 369 // There is no point starting the tracing without a delegate. | 401 // There is no point starting the tracing without a delegate. |
| 370 if (!enabled || !delegate_) { | 402 if (!enabled || !delegate_) { |
| 371 // Disable all the providers. | 403 // Disable all the providers. |
| 372 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) | 404 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) |
| 373 it->second.disabled = true; | 405 it->disabled = true; |
| 374 return; | 406 return; |
| 375 } | 407 } |
| 376 | 408 |
| 377 session_state_ = new MemoryDumpSessionState(); | 409 session_state_ = new MemoryDumpSessionState(); |
| 378 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) | 410 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) { |
| 379 it->second.disabled = false; | 411 it->disabled = false; |
| 412 it->consecutive_failures = 0; |
| 413 } |
| 380 | 414 |
| 381 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); | 415 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); |
| 382 | 416 |
| 383 if (delegate_->IsCoordinatorProcess()) { | 417 if (delegate_->IsCoordinatorProcess()) { |
| 418 g_periodic_dumps_count = 0; |
| 384 periodic_dump_timer_.Start(FROM_HERE, | 419 periodic_dump_timer_.Start(FROM_HERE, |
| 385 TimeDelta::FromSeconds(kDumpIntervalSeconds), | 420 TimeDelta::FromMilliseconds(kDumpIntervalMs), |
| 386 base::Bind(&RequestPeriodicGlobalDump)); | 421 base::Bind(&RequestPeriodicGlobalDump)); |
| 387 } | 422 } |
| 388 } | 423 } |
| 389 | 424 |
| 390 void MemoryDumpManager::OnTraceLogDisabled() { | 425 void MemoryDumpManager::OnTraceLogDisabled() { |
| 391 AutoLock lock(lock_); | 426 AutoLock lock(lock_); |
| 392 periodic_dump_timer_.Stop(); | 427 periodic_dump_timer_.Stop(); |
| 393 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); | 428 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); |
| 394 session_state_ = nullptr; | 429 session_state_ = nullptr; |
| 395 } | 430 } |
| 396 | 431 |
| 432 // static |
| 433 uint64 MemoryDumpManager::ChildProcessIdToTracingProcessId( |
| 434 int child_process_id) { |
| 435 return static_cast<uint64>( |
| 436 Hash(reinterpret_cast<const char*>(&child_process_id), |
| 437 sizeof(child_process_id))) + |
| 438 1; |
| 439 } |
| 440 |
| 397 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( | 441 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( |
| 442 MemoryDumpProvider* dump_provider, |
| 398 const scoped_refptr<SingleThreadTaskRunner>& task_runner) | 443 const scoped_refptr<SingleThreadTaskRunner>& task_runner) |
| 399 : task_runner(task_runner), disabled(false) { | 444 : dump_provider(dump_provider), |
| 445 task_runner(task_runner), |
| 446 consecutive_failures(0), |
| 447 disabled(false) {} |
| 448 |
| 449 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() { |
| 400 } | 450 } |
| 401 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() { | 451 |
| 452 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<( |
| 453 const MemoryDumpProviderInfo& other) const { |
| 454 if (task_runner == other.task_runner) |
| 455 return dump_provider < other.dump_provider; |
| 456 return task_runner < other.task_runner; |
| 457 } |
| 458 |
| 459 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( |
| 460 MemoryDumpRequestArgs req_args, |
| 461 MemoryDumpProviderInfoSet::iterator next_dump_provider, |
| 462 const scoped_refptr<MemoryDumpSessionState>& session_state, |
| 463 MemoryDumpCallback callback) |
| 464 : process_memory_dump(session_state), |
| 465 req_args(req_args), |
| 466 next_dump_provider(next_dump_provider), |
| 467 callback(callback), |
| 468 task_runner(MessageLoop::current()->task_runner()) {} |
| 469 |
| 470 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { |
| 402 } | 471 } |
| 403 | 472 |
| 404 } // namespace trace_event | 473 } // namespace trace_event |
| 405 } // namespace base | 474 } // namespace base |
| OLD | NEW |