Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "base/atomic_sequence_num.h" | 9 #include "base/atomic_sequence_num.h" |
| 10 #include "base/command_line.h" | 10 #include "base/command_line.h" |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 86 } | 86 } |
| 87 | 87 |
| 88 // static | 88 // static |
| 89 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { | 89 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { |
| 90 if (instance) | 90 if (instance) |
| 91 instance->skip_core_dumpers_auto_registration_for_testing_ = true; | 91 instance->skip_core_dumpers_auto_registration_for_testing_ = true; |
| 92 g_instance_for_testing = instance; | 92 g_instance_for_testing = instance; |
| 93 } | 93 } |
| 94 | 94 |
| 95 MemoryDumpManager::MemoryDumpManager() | 95 MemoryDumpManager::MemoryDumpManager() |
| 96 : did_unregister_dump_provider_(false), | 96 : delegate_(nullptr), |
| 97 delegate_(nullptr), | |
| 98 memory_tracing_enabled_(0), | 97 memory_tracing_enabled_(0), |
| 99 tracing_process_id_(kInvalidTracingProcessId), | 98 tracing_process_id_(kInvalidTracingProcessId), |
| 100 system_allocator_pool_name_(nullptr), | 99 system_allocator_pool_name_(nullptr), |
| 101 skip_core_dumpers_auto_registration_for_testing_(false) { | 100 skip_core_dumpers_auto_registration_for_testing_(false) { |
| 102 g_next_guid.GetNext(); // Make sure that first guid is not zero. | 101 g_next_guid.GetNext(); // Make sure that first guid is not zero. |
| 103 } | 102 } |
| 104 | 103 |
| 105 MemoryDumpManager::~MemoryDumpManager() { | 104 MemoryDumpManager::~MemoryDumpManager() { |
| 106 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this); | 105 base::trace_event::TraceLog::GetInstance()->RemoveEnabledStateObserver(this); |
| 107 } | 106 } |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 145 AutoLock lock(lock_); | 144 AutoLock lock(lock_); |
| 146 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_); | 145 DCHECK_EQ(static_cast<MemoryDumpManagerDelegate*>(nullptr), delegate_); |
| 147 delegate_ = delegate; | 146 delegate_ = delegate; |
| 148 } | 147 } |
| 149 | 148 |
| 150 void MemoryDumpManager::RegisterDumpProvider( | 149 void MemoryDumpManager::RegisterDumpProvider( |
| 151 MemoryDumpProvider* mdp, | 150 MemoryDumpProvider* mdp, |
| 152 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { | 151 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { |
| 153 MemoryDumpProviderInfo mdp_info(mdp, task_runner); | 152 MemoryDumpProviderInfo mdp_info(mdp, task_runner); |
| 154 AutoLock lock(lock_); | 153 AutoLock lock(lock_); |
| 155 dump_providers_.insert(mdp_info); | 154 auto iter_new = dump_providers_.insert(mdp_info); |
| 155 | |
| 156 // If there was a previous entry, replace it with the new one. | |
|
Primiano Tucci (use gerrit)
2015/08/21 09:00:31
Is this really necessary? To me the fact that some
Ruud van Asseldonk
2015/08/21 10:51:40
If the provider registers itself twice, that is a
| |
| 157 if (!iter_new.second) { | |
| 158 dump_providers_.erase(iter_new.first); | |
| 159 dump_providers_.insert(mdp_info); | |
| 160 } | |
| 156 } | 161 } |
| 157 | 162 |
| 158 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) { | 163 void MemoryDumpManager::RegisterDumpProvider(MemoryDumpProvider* mdp) { |
| 159 RegisterDumpProvider(mdp, nullptr); | 164 RegisterDumpProvider(mdp, nullptr); |
| 160 } | 165 } |
| 161 | 166 |
| 162 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 167 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
| 163 AutoLock lock(lock_); | 168 AutoLock lock(lock_); |
| 164 | 169 |
| 165 auto mdp_iter = dump_providers_.begin(); | 170 auto mdp_iter = dump_providers_.begin(); |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 176 // the unregistration happens on the same thread (so the MDP cannot unregister | 181 // the unregistration happens on the same thread (so the MDP cannot unregister |
| 177 // and OnMemoryDump() at the same time). | 182 // and OnMemoryDump() at the same time). |
| 178 // Otherwise, it is not possible to guarantee that its unregistration is | 183 // Otherwise, it is not possible to guarantee that its unregistration is |
| 179 // race-free. If you hit this DCHECK, your MDP has a bug. | 184 // race-free. If you hit this DCHECK, your MDP has a bug. |
| 180 DCHECK_IMPLIES( | 185 DCHECK_IMPLIES( |
| 181 subtle::NoBarrier_Load(&memory_tracing_enabled_), | 186 subtle::NoBarrier_Load(&memory_tracing_enabled_), |
| 182 mdp_iter->task_runner && mdp_iter->task_runner->BelongsToCurrentThread()) | 187 mdp_iter->task_runner && mdp_iter->task_runner->BelongsToCurrentThread()) |
| 183 << "The MemoryDumpProvider attempted to unregister itself in a racy way. " | 188 << "The MemoryDumpProvider attempted to unregister itself in a racy way. " |
| 184 << "Please file a crbug."; | 189 << "Please file a crbug."; |
| 185 | 190 |
| 186 dump_providers_.erase(mdp_iter); | 191 mdp_iter->unregistered = true; |
| 187 did_unregister_dump_provider_ = true; | |
| 188 } | 192 } |
| 189 | 193 |
| 190 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type, | 194 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type, |
| 191 const MemoryDumpArgs& dump_args, | 195 const MemoryDumpArgs& dump_args, |
| 192 const MemoryDumpCallback& callback) { | 196 const MemoryDumpCallback& callback) { |
| 193 // Bail out immediately if tracing is not enabled at all. | 197 // Bail out immediately if tracing is not enabled at all. |
| 194 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { | 198 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { |
| 195 if (!callback.is_null()) | 199 if (!callback.is_null()) |
| 196 callback.Run(0u /* guid */, false /* success */); | 200 callback.Run(0u /* guid */, false /* success */); |
| 197 return; | 201 return; |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 221 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type, | 225 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type, |
| 222 const MemoryDumpArgs& dump_args) { | 226 const MemoryDumpArgs& dump_args) { |
| 223 RequestGlobalDump(dump_type, dump_args, MemoryDumpCallback()); | 227 RequestGlobalDump(dump_type, dump_args, MemoryDumpCallback()); |
| 224 } | 228 } |
| 225 | 229 |
| 226 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, | 230 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
| 227 const MemoryDumpCallback& callback) { | 231 const MemoryDumpCallback& callback) { |
| 228 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; | 232 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
| 229 { | 233 { |
| 230 AutoLock lock(lock_); | 234 AutoLock lock(lock_); |
| 231 did_unregister_dump_provider_ = false; | |
| 232 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( | 235 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
| 233 args, dump_providers_.begin(), session_state_, callback)); | 236 args, dump_providers_.begin(), session_state_, callback)); |
| 234 } | 237 } |
| 235 | 238 |
| 236 // Start the thread hop. |dump_providers_| are kept sorted by thread, so | 239 // Start the thread hop. |dump_providers_| are kept sorted by thread, so |
| 237 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread | 240 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread |
| 238 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). | 241 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). |
| 239 ContinueAsyncProcessDump(pmd_async_state.Pass()); | 242 ContinueAsyncProcessDump(pmd_async_state.Pass()); |
| 240 } | 243 } |
| 241 | 244 |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 261 // in the PostTask below don't end up registering their own dump providers | 264 // in the PostTask below don't end up registering their own dump providers |
| 262 // (for discounting trace memory overhead) while holding the |lock_|. | 265 // (for discounting trace memory overhead) while holding the |lock_|. |
| 263 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 266 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
| 264 | 267 |
| 265 // DO NOT put any LOG() statement in the locked sections, as in some contexts | 268 // DO NOT put any LOG() statement in the locked sections, as in some contexts |
| 266 // (GPU process) LOG() ends up performing PostTask/IPCs. | 269 // (GPU process) LOG() ends up performing PostTask/IPCs. |
| 267 MemoryDumpProvider* mdp; | 270 MemoryDumpProvider* mdp; |
| 268 bool skip_dump = false; | 271 bool skip_dump = false; |
| 269 { | 272 { |
| 270 AutoLock lock(lock_); | 273 AutoLock lock(lock_); |
| 271 // In the unlikely event that a dump provider was unregistered while | |
| 272 // dumping, abort the dump, as that would make |next_dump_provider| invalid. | |
| 273 // Registration, on the other hand, is safe as per std::set<> contract. | |
| 274 if (did_unregister_dump_provider_) { | |
| 275 return AbortDumpLocked(pmd_async_state->callback, | |
| 276 pmd_async_state->task_runner, | |
| 277 pmd_async_state->req_args.dump_guid); | |
| 278 } | |
| 279 | 274 |
| 280 auto* mdp_info = &*pmd_async_state->next_dump_provider; | 275 auto* mdp_info = &*pmd_async_state->next_dump_provider; |
| 281 mdp = mdp_info->dump_provider; | 276 mdp = mdp_info->dump_provider; |
| 282 if (mdp_info->disabled) { | 277 if (mdp_info->disabled || mdp_info->unregistered) { |
| 283 skip_dump = true; | 278 skip_dump = true; |
| 284 } else if (mdp_info->task_runner && | 279 } else if (mdp_info->task_runner && |
| 285 !mdp_info->task_runner->BelongsToCurrentThread()) { | 280 !mdp_info->task_runner->BelongsToCurrentThread()) { |
| 286 // It's time to hop onto another thread. | 281 // It's time to hop onto another thread. |
| 287 | 282 |
| 288 // Copy the callback + arguments just for the unlikley case in which | 283 // Copy the callback + arguments just for the unlikley case in which |
| 289 // PostTask fails. In such case the Bind helper will destroy the | 284 // PostTask fails. In such case the Bind helper will destroy the |
| 290 // pmd_async_state and we must keep a copy of the fields to notify the | 285 // pmd_async_state and we must keep a copy of the fields to notify the |
| 291 // abort. | 286 // abort. |
| 292 MemoryDumpCallback callback = pmd_async_state->callback; | 287 MemoryDumpCallback callback = pmd_async_state->callback; |
| 293 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 288 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
| 294 pmd_async_state->task_runner; | 289 pmd_async_state->task_runner; |
| 295 const uint64 dump_guid = pmd_async_state->req_args.dump_guid; | 290 const uint64 dump_guid = pmd_async_state->req_args.dump_guid; |
| 296 | 291 |
| 297 const bool did_post_task = mdp_info->task_runner->PostTask( | 292 const bool did_post_task = mdp_info->task_runner->PostTask( |
| 298 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, | 293 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, |
| 299 Unretained(this), Passed(pmd_async_state.Pass()))); | 294 Unretained(this), Passed(pmd_async_state.Pass()))); |
| 300 if (did_post_task) | 295 if (did_post_task) |
| 301 return; | 296 return; |
| 302 | 297 |
| 303 // The thread is gone. At this point the best thing we can do is to | 298 // The thread is gone. At this point the best thing we can do is to |
| 304 // disable the dump provider and abort this dump. | 299 // disable the dump provider and abort this dump. |
| 305 mdp_info->disabled = true; | 300 mdp_info->disabled = true; |
|
Primiano Tucci (use gerrit)
2015/08/21 09:00:31
I wonder at this point if we should also gracefull
Ruud van Asseldonk
2015/08/21 10:51:40
Does it happen in practice?
Primiano Tucci (use gerrit)
2015/08/24 09:31:19
I don't know, that was I was wondering.
It's just
| |
| 306 return AbortDumpLocked(callback, callback_task_runner, dump_guid); | 301 return AbortDumpLocked(callback, callback_task_runner, dump_guid); |
| 307 } | 302 } |
| 308 } // AutoLock(lock_) | 303 } // AutoLock(lock_) |
| 309 | 304 |
| 310 // Invoke the dump provider without holding the |lock_|. | 305 // Invoke the dump provider without holding the |lock_|. |
| 311 bool finalize = false; | 306 bool finalize = false; |
| 312 bool dump_successful = false; | 307 bool dump_successful = false; |
| 313 | 308 |
| 314 if (!skip_dump) { | 309 if (!skip_dump) { |
| 315 dump_successful = mdp->OnMemoryDump(pmd_async_state->req_args.dump_args, | 310 dump_successful = mdp->OnMemoryDump(pmd_async_state->req_args.dump_args, |
| 316 &pmd_async_state->process_memory_dump); | 311 &pmd_async_state->process_memory_dump); |
| 317 } | 312 } |
| 318 | 313 |
| 319 { | 314 { |
| 320 AutoLock lock(lock_); | 315 AutoLock lock(lock_); |
| 321 if (did_unregister_dump_provider_) { | |
| 322 return AbortDumpLocked(pmd_async_state->callback, | |
| 323 pmd_async_state->task_runner, | |
| 324 pmd_async_state->req_args.dump_guid); | |
| 325 } | |
| 326 auto* mdp_info = &*pmd_async_state->next_dump_provider; | 316 auto* mdp_info = &*pmd_async_state->next_dump_provider; |
| 327 if (dump_successful) { | 317 if (dump_successful) { |
| 328 mdp_info->consecutive_failures = 0; | 318 mdp_info->consecutive_failures = 0; |
| 329 } else if (!skip_dump) { | 319 } else if (!skip_dump) { |
| 330 ++mdp_info->consecutive_failures; | 320 ++mdp_info->consecutive_failures; |
| 331 if (mdp_info->consecutive_failures >= kMaxConsecutiveFailuresCount) { | 321 if (mdp_info->consecutive_failures >= kMaxConsecutiveFailuresCount) { |
| 332 mdp_info->disabled = true; | 322 mdp_info->disabled = true; |
| 333 } | 323 } |
| 334 } | 324 } |
| 335 ++pmd_async_state->next_dump_provider; | 325 ++pmd_async_state->next_dump_provider; |
| 336 finalize = pmd_async_state->next_dump_provider == dump_providers_.end(); | 326 finalize = pmd_async_state->next_dump_provider == dump_providers_.end(); |
| 327 | |
| 328 // Remove providers that unregistered since the last dump. | |
|
Primiano Tucci (use gerrit)
2015/08/21 09:00:30
Hmm I don't like too much the idea of iterating ov
Ruud van Asseldonk
2015/08/21 10:51:40
Done.
| |
| 329 if (finalize) { | |
| 330 auto i = dump_providers_.begin(); | |
| 331 while (i != dump_providers_.end()) { | |
| 332 auto current = i++; | |
| 333 if (current->unregistered) | |
| 334 dump_providers_.erase(current); | |
| 335 } | |
| 336 } | |
| 337 } | 337 } |
| 338 | 338 |
| 339 if (!skip_dump && !dump_successful) { | 339 if (!skip_dump && !dump_successful) { |
| 340 LOG(ERROR) << "A memory dumper failed, possibly due to sandboxing " | 340 LOG(ERROR) << "A memory dumper failed, possibly due to sandboxing " |
| 341 "(crbug.com/461788). Disabling dumper for current process. " | 341 "(crbug.com/461788). Disabling dumper for current process. " |
| 342 "Try restarting chrome with the --no-sandbox switch."; | 342 "Try restarting chrome with the --no-sandbox switch."; |
| 343 } | 343 } |
| 344 | 344 |
| 345 if (finalize) | 345 if (finalize) |
| 346 return FinalizeDumpAndAddToTrace(pmd_async_state.Pass()); | 346 return FinalizeDumpAndAddToTrace(pmd_async_state.Pass()); |
| (...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 447 uint64 MemoryDumpManager::GetTracingProcessId() const { | 447 uint64 MemoryDumpManager::GetTracingProcessId() const { |
| 448 return delegate_->GetTracingProcessId(); | 448 return delegate_->GetTracingProcessId(); |
| 449 } | 449 } |
| 450 | 450 |
| 451 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( | 451 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( |
| 452 MemoryDumpProvider* dump_provider, | 452 MemoryDumpProvider* dump_provider, |
| 453 const scoped_refptr<SingleThreadTaskRunner>& task_runner) | 453 const scoped_refptr<SingleThreadTaskRunner>& task_runner) |
| 454 : dump_provider(dump_provider), | 454 : dump_provider(dump_provider), |
| 455 task_runner(task_runner), | 455 task_runner(task_runner), |
| 456 consecutive_failures(0), | 456 consecutive_failures(0), |
| 457 disabled(false) { | 457 disabled(false), |
| 458 } | 458 unregistered(false) {} |
| 459 | 459 |
| 460 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() { | 460 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() { |
| 461 } | 461 } |
| 462 | 462 |
| 463 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<( | 463 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<( |
| 464 const MemoryDumpProviderInfo& other) const { | 464 const MemoryDumpProviderInfo& other) const { |
| 465 if (task_runner == other.task_runner) | 465 if (task_runner == other.task_runner) |
| 466 return dump_provider < other.dump_provider; | 466 return dump_provider < other.dump_provider; |
| 467 return task_runner < other.task_runner; | 467 return task_runner < other.task_runner; |
| 468 } | 468 } |
| 469 | 469 |
| 470 bool MemoryDumpManager::MemoryDumpProviderInfo::operator==( | |
|
Primiano Tucci (use gerrit)
2015/08/21 09:00:30
Why is this needed? AFAIK std::set relies only on
Ruud van Asseldonk
2015/08/21 10:51:40
It isn’t needed.
| |
| 471 const MemoryDumpProviderInfo& other) const { | |
| 472 return dump_provider == other.dump_provider && | |
| 473 task_runner == other.task_runner; | |
| 474 } | |
| 475 | |
| 470 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( | 476 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( |
| 471 MemoryDumpRequestArgs req_args, | 477 MemoryDumpRequestArgs req_args, |
| 472 MemoryDumpProviderInfoSet::iterator next_dump_provider, | 478 MemoryDumpProviderInfoSet::iterator next_dump_provider, |
| 473 const scoped_refptr<MemoryDumpSessionState>& session_state, | 479 const scoped_refptr<MemoryDumpSessionState>& session_state, |
| 474 MemoryDumpCallback callback) | 480 MemoryDumpCallback callback) |
| 475 : process_memory_dump(session_state), | 481 : process_memory_dump(session_state), |
| 476 req_args(req_args), | 482 req_args(req_args), |
| 477 next_dump_provider(next_dump_provider), | 483 next_dump_provider(next_dump_provider), |
| 478 callback(callback), | 484 callback(callback), |
| 479 task_runner(MessageLoop::current()->task_runner()) { | 485 task_runner(MessageLoop::current()->task_runner()) { |
| 480 } | 486 } |
| 481 | 487 |
| 482 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { | 488 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { |
| 483 } | 489 } |
| 484 | 490 |
| 485 } // namespace trace_event | 491 } // namespace trace_event |
| 486 } // namespace base | 492 } // namespace base |
| OLD | NEW |