Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <utility> | 8 #include <utility> |
| 9 | 9 |
| 10 #include "base/atomic_sequence_num.h" | 10 #include "base/atomic_sequence_num.h" |
| (...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 177 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list. | 177 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list. |
| 178 TraceLog::GetInstance()->AddEnabledStateObserver(this); | 178 TraceLog::GetInstance()->AddEnabledStateObserver(this); |
| 179 if (is_tracing_already_enabled) | 179 if (is_tracing_already_enabled) |
| 180 OnTraceLogEnabled(); | 180 OnTraceLogEnabled(); |
| 181 } | 181 } |
| 182 | 182 |
| 183 void MemoryDumpManager::RegisterDumpProvider( | 183 void MemoryDumpManager::RegisterDumpProvider( |
| 184 MemoryDumpProvider* mdp, | 184 MemoryDumpProvider* mdp, |
| 185 const char* name, | 185 const char* name, |
| 186 const scoped_refptr<SingleThreadTaskRunner>& task_runner, | 186 const scoped_refptr<SingleThreadTaskRunner>& task_runner, |
| 187 MemoryDumpProvider::Options options) { | |
| 188 options.dumps_on_single_thread_task_runner = true; | |
| 189 RegisterDumpProviderInternal(mdp, name, task_runner, options); | |
| 190 } | |
| 191 | |
| 192 void MemoryDumpManager::RegisterDumpProvider( | |
| 193 MemoryDumpProvider* mdp, | |
| 194 const char* name, | |
| 195 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { | |
| 196 // Set |dumps_on_single_thread_task_runner| to true because all providers | |
| 197 // without task runner are run on dump thread. | |
| 198 MemoryDumpProvider::Options options; | |
| 199 options.dumps_on_single_thread_task_runner = true; | |
| 200 RegisterDumpProviderInternal(mdp, name, task_runner, options); | |
| 201 } | |
| 202 | |
| 203 void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner( | |
| 204 MemoryDumpProvider* mdp, | |
| 205 const char* name, | |
| 206 const scoped_refptr<SequencedTaskRunner>& task_runner, | |
| 207 MemoryDumpProvider::Options options) { | |
| 208 DCHECK(task_runner); | |
| 209 options.dumps_on_single_thread_task_runner = false; | |
| 210 RegisterDumpProviderInternal(mdp, name, task_runner, options); | |
| 211 } | |
| 212 | |
| 213 void MemoryDumpManager::RegisterDumpProviderInternal( | |
| 214 MemoryDumpProvider* mdp, | |
| 215 const char* name, | |
| 216 const scoped_refptr<SequencedTaskRunner>& task_runner, | |
| 187 const MemoryDumpProvider::Options& options) { | 217 const MemoryDumpProvider::Options& options) { |
| 188 if (dumper_registrations_ignored_for_testing_) | 218 if (dumper_registrations_ignored_for_testing_) |
| 189 return; | 219 return; |
| 190 | 220 |
| 191 scoped_refptr<MemoryDumpProviderInfo> mdpinfo = | 221 scoped_refptr<MemoryDumpProviderInfo> mdpinfo = |
| 192 new MemoryDumpProviderInfo(mdp, name, task_runner, options); | 222 new MemoryDumpProviderInfo(mdp, name, task_runner, options); |
| 193 | 223 |
| 194 { | 224 { |
| 195 AutoLock lock(lock_); | 225 AutoLock lock(lock_); |
| 196 bool already_registered = !dump_providers_.insert(mdpinfo).second; | 226 bool already_registered = !dump_providers_.insert(mdpinfo).second; |
| 197 // This actually happens in some tests which don't have a clean tear-down | 227 // This actually happens in some tests which don't have a clean tear-down |
| 198 // path for RenderThreadImpl::Init(). | 228 // path for RenderThreadImpl::Init(). |
| 199 if (already_registered) | 229 if (already_registered) |
| 200 return; | 230 return; |
| 201 } | 231 } |
| 202 | 232 |
| 203 if (heap_profiling_enabled_) | 233 if (heap_profiling_enabled_) |
| 204 mdp->OnHeapProfilingEnabled(true); | 234 mdp->OnHeapProfilingEnabled(true); |
| 205 } | 235 } |
| 206 | 236 |
| 207 void MemoryDumpManager::RegisterDumpProvider( | |
| 208 MemoryDumpProvider* mdp, | |
| 209 const char* name, | |
| 210 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { | |
| 211 RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options()); | |
| 212 } | |
| 213 | |
| 214 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 237 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
| 215 UnregisterDumpProviderInternal(mdp, false /* delete_async */); | 238 UnregisterDumpProviderInternal(mdp, false /* delete_async */); |
| 216 } | 239 } |
| 217 | 240 |
| 218 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon( | 241 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon( |
| 219 scoped_ptr<MemoryDumpProvider> mdp) { | 242 scoped_ptr<MemoryDumpProvider> mdp) { |
| 220 UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */); | 243 UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */); |
| 221 } | 244 } |
| 222 | 245 |
| 223 void MemoryDumpManager::UnregisterDumpProviderInternal( | 246 void MemoryDumpManager::UnregisterDumpProviderInternal( |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 234 if ((*mdp_iter)->dump_provider == mdp) | 257 if ((*mdp_iter)->dump_provider == mdp) |
| 235 break; | 258 break; |
| 236 } | 259 } |
| 237 | 260 |
| 238 if (mdp_iter == dump_providers_.end()) | 261 if (mdp_iter == dump_providers_.end()) |
| 239 return; // Not registered / already unregistered. | 262 return; // Not registered / already unregistered. |
| 240 | 263 |
| 241 if (take_mdp_ownership_and_delete_async) { | 264 if (take_mdp_ownership_and_delete_async) { |
| 242 // The MDP will be deleted whenever the MDPInfo struct will, that is either: | 265 // The MDP will be deleted whenever the MDPInfo struct will, that is either: |
| 243 // - At the end of this function, if no dump is in progress. | 266 // - At the end of this function, if no dump is in progress. |
| 244 // - In the prologue of the ContinueAsyncProcessDump(). | 267 // - In FinalizeCurrentDump() after the current dump finishes. |
| 245 DCHECK(!(*mdp_iter)->owned_dump_provider); | 268 DCHECK(!(*mdp_iter)->owned_dump_provider); |
| 246 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); | 269 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); |
| 247 } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) { | 270 } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) { |
| 248 // If you hit this DCHECK, your dump provider has a bug. | 271 // If you hit this DCHECK, your dump provider has a bug. |
| 249 // Unregistration of a MemoryDumpProvider is safe only if: | 272 // Unregistration of a MemoryDumpProvider is safe only if: |
| 250 // - The MDP has specified a thread affinity (via task_runner()) AND | 273 // - The MDP has specified a sequenced task runner affinity AND the |
| 251 // the unregistration happens on the same thread (so the MDP cannot | 274 // unregistration happens on the same task runner. So that the MDP cannot |
| 252 // unregister and be in the middle of a OnMemoryDump() at the same time. | 275 // unregister and be in the middle of a OnMemoryDump() at the same time. |
| 253 // - The MDP has NOT specified a thread affinity and its ownership is | 276 // - The MDP has NOT specified a task runner affinity and its ownership is |
| 254 // transferred via UnregisterAndDeleteDumpProviderSoon(). | 277 // transferred via UnregisterAndDeleteDumpProviderSoon(). |
| 255 // In all the other cases, it is not possible to guarantee that the | 278 // In all the other cases, it is not possible to guarantee that the |
| 256 // unregistration will not race with OnMemoryDump() calls. | 279 // unregistration will not race with OnMemoryDump() calls. |
| 257 DCHECK((*mdp_iter)->task_runner && | 280 DCHECK((*mdp_iter)->task_runner && |
| 258 (*mdp_iter)->task_runner->BelongsToCurrentThread()) | 281 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) |
| 259 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " | 282 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " |
| 260 << "unregister itself in a racy way. Please file a crbug."; | 283 << "unregister itself in a racy way. Please file a crbug."; |
| 261 } | 284 } |
| 262 | 285 |
| 263 // The MDPInfo instance can still be referenced by the | 286 // The MDPInfo instance can still be referenced by the |
| 264 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason | 287 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason |
| 265 // the MDPInfo is flagged as disabled. It will cause ContinueAsyncProcessDump | 288 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() |
| 266 // to just skip it, without actually invoking the |mdp|, which might be | 289 // to just skip it, without actually invoking the |mdp|, which might be |
| 267 // destroyed by the caller soon after this method returns. | 290 // destroyed by the caller soon after this method returns. |
| 268 (*mdp_iter)->disabled = true; | 291 (*mdp_iter)->disabled = true; |
| 269 dump_providers_.erase(mdp_iter); | 292 dump_providers_.erase(mdp_iter); |
| 270 } | 293 } |
| 271 | 294 |
| 272 void MemoryDumpManager::RequestGlobalDump( | 295 void MemoryDumpManager::RequestGlobalDump( |
| 273 MemoryDumpType dump_type, | 296 MemoryDumpType dump_type, |
| 274 MemoryDumpLevelOfDetail level_of_detail, | 297 MemoryDumpLevelOfDetail level_of_detail, |
| 275 const MemoryDumpCallback& callback) { | 298 const MemoryDumpCallback& callback) { |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 315 | 338 |
| 316 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, | 339 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
| 317 const MemoryDumpCallback& callback) { | 340 const MemoryDumpCallback& callback) { |
| 318 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", | 341 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", |
| 319 TRACE_ID_MANGLE(args.dump_guid)); | 342 TRACE_ID_MANGLE(args.dump_guid)); |
| 320 | 343 |
| 321 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; | 344 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
| 322 { | 345 { |
| 323 AutoLock lock(lock_); | 346 AutoLock lock(lock_); |
| 324 // |dump_thread_| can be nullptr is tracing was disabled before reaching | 347 // |dump_thread_| can be nullptr is tracing was disabled before reaching |
| 325 // here. ContinueAsyncProcessDump is robust enough to tolerate it and will | 348 // here. SetupNextMemoryDump is robust enough to tolerate it and will |
| 326 // NACK the dump. | 349 // NACK the dump. |
| 327 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( | 350 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
| 328 args, dump_providers_, session_state_, callback, | 351 args, dump_providers_, session_state_, callback, |
| 329 dump_thread_ ? dump_thread_->task_runner() : nullptr)); | 352 dump_thread_ ? dump_thread_->task_runner() : nullptr)); |
| 330 } | 353 } |
| 331 | 354 |
| 332 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", | 355 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", |
| 333 TRACE_ID_MANGLE(args.dump_guid), | 356 TRACE_ID_MANGLE(args.dump_guid), |
| 334 TRACE_EVENT_FLAG_FLOW_OUT); | 357 TRACE_EVENT_FLAG_FLOW_OUT); |
| 335 | 358 |
| 336 // Start the thread hop. |dump_providers_| are kept sorted by thread, so | 359 // Start the process dump. This involves task runner hops as specified by the |
| 337 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread | 360 // MemoryDumpProvider(s) in RegisterDumpProvider()). |
| 338 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). | 361 SetupNextMemoryDump(std::move(pmd_async_state)); |
| 339 ContinueAsyncProcessDump(pmd_async_state.release()); | |
| 340 } | 362 } |
| 341 | 363 |
| 342 // At most one ContinueAsyncProcessDump() can be active at any time for a given | 364 // PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A |
| 343 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to | 365 // PostTask is always required for SequencedTaskRunner with no additional |
|
Primiano Tucci (use gerrit)
2016/02/08 12:03:15
s/SequencedTaskRunner with no additional guarantee
ssid
2016/02/11 01:45:03
Done.
| |
| 344 // ensure consistency w.r.t. (un)registrations of |dump_providers_|. | 366 // guarantees to ensure that no other task is running on it concurrently. |
| 345 // The linearization of dump providers' OnMemoryDump invocations is achieved by | 367 // SetupNextMemoryDump, InvokeOnMemoryDump and FinalizeCurrentDump are called in |
|
Primiano Tucci (use gerrit)
2016/02/08 12:03:15
add () after function names, e.g., SetupNextMemory
ssid
2016/02/11 01:45:03
Done.
| |
| 346 // means of subsequent PostTask(s). | 368 // series which linearizes the dump provider's OnMemoryDump invocations. |
| 347 // | 369 // At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be |
| 348 // 1) Prologue: | 370 // active at any time for a given PMD, regardless of status of the |lock_|. |
| 349 // - If this was the last hop, create a trace event, add it to the trace | 371 // |lock_| is used in these functions purely to ensure consistency w.r.t. |
| 350 // and finalize (invoke callback). | 372 // (un)registrations of |dump_providers_|. |
| 351 // - Check if we are on the right thread. If not hop and continue there. | 373 void MemoryDumpManager::SetupNextMemoryDump( |
| 352 // - Check if the dump provider is disabled, if so skip the dump. | 374 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
| 353 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped). | 375 if (pmd_async_state->pending_dump_providers.empty()) |
| 354 // 3) Epilogue: | 376 return FinalizeCurrentDump(std::move(pmd_async_state)); |
| 355 // - Unregister the dump provider if it failed too many times consecutively. | 377 |
| 356 // - Pop() the MDP from the |pending_dump_providers| list, eventually | |
| 357 // destroying the MDPInfo if that was unregistered in the meantime. | |
| 358 void MemoryDumpManager::ContinueAsyncProcessDump( | |
| 359 ProcessMemoryDumpAsyncState* owned_pmd_async_state) { | |
| 360 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs | 378 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs |
|
Primiano Tucci (use gerrit)
2016/02/08 12:03:15
I think you still need to keep this chunk first, t
ssid
2016/02/11 01:45:03
Done.
| |
| 361 // in the PostTask below don't end up registering their own dump providers | 379 // in the PostTask below don't end up registering their own dump providers |
| 362 // (for discounting trace memory overhead) while holding the |lock_|. | 380 // (for discounting trace memory overhead) while holding the |lock_|. |
| 363 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 381 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
| 364 | 382 |
| 383 // Read MemoryDumpProviderInfo thread safety considerations in | |
| 384 // memory_dump_manager.h when accessing |mdpinfo| fields. | |
| 385 MemoryDumpProviderInfo* mdpinfo = | |
| 386 pmd_async_state->pending_dump_providers.back().get(); | |
| 387 | |
| 388 // If the dump provider did not specify a task runner affinity, dump on | |
| 389 // |dump_thread_|. Note that |dump_thread_| might have been destroyed | |
| 390 // meanwhile. | |
| 391 SequencedTaskRunner* task_runner = mdpinfo->task_runner.get(); | |
| 392 if (!task_runner) { | |
| 393 DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner); | |
| 394 task_runner = pmd_async_state->dump_thread_task_runner.get(); | |
| 395 if (!task_runner) { | |
| 396 // If tracing was disabled before reaching CreateProcessDump() the | |
| 397 // dump_thread_ would have been already torn down. Nack current dump and | |
| 398 // finalize. | |
| 399 pmd_async_state->dump_successful = false; | |
| 400 FinalizeCurrentDump(std::move(pmd_async_state)); | |
|
Primiano Tucci (use gerrit)
2016/02/08 12:03:15
micro-nit. for consistency with above I'd do the c
ssid
2016/02/11 01:45:03
Done.
| |
| 401 return; | |
| 402 } | |
| 403 } | |
| 404 | |
| 405 if (mdpinfo->options.dumps_on_single_thread_task_runner && | |
| 406 task_runner->RunsTasksOnCurrentThread()) { | |
| 407 // If |dumps_on_single_thread_task_runner| is true then no PostTask is | |
| 408 // required if we are on the right thread. So invoke dump on the current | |
|
Primiano Tucci (use gerrit)
2016/02/08 12:03:15
I'd shorten this saying just:
// If |dumps_on_sin
ssid
2016/02/11 01:45:03
Done.
| |
| 409 // thread. | |
| 410 InvokeOnMemoryDump(pmd_async_state.release()); | |
| 411 return; | |
|
Primiano Tucci (use gerrit)
2016/02/08 12:03:15
ditto return Invoke...
ssid
2016/02/11 01:45:03
Done.
| |
| 412 } | |
| 413 | |
| 414 bool did_post_task = task_runner->PostTask( | |
| 415 FROM_HERE, Bind(&MemoryDumpManager::InvokeOnMemoryDump, Unretained(this), | |
| 416 Unretained(pmd_async_state.get()))); | |
| 417 | |
| 418 if (did_post_task) { | |
| 419 // Ownership is tranferred to InvokeOnMemoryDump(). | |
| 420 ignore_result(pmd_async_state.release()); | |
| 421 return; | |
| 422 } | |
| 423 | |
| 424 // PostTask usually fails only if the process or thread is shut down. So, the | |
| 425 // dump provider is disabled here. But, don't disable unbound dump providers. | |
| 426 // The utility thread is normally shutdown when disabling the trace and | |
| 427 // getting here in this case is expected. | |
| 428 if (mdpinfo->task_runner) { | |
| 429 LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name | |
| 430 << "\". Failed to post task on the task runner provided."; | |
| 431 | |
| 432 // A locked access is required to R/W |disabled| (for the | |
| 433 // UnregisterAndDeleteDumpProviderSoon() case). | |
| 434 AutoLock lock(lock_); | |
| 435 mdpinfo->disabled = true; | |
| 436 } | |
| 437 | |
| 438 // PostTask failed. Ignore the MDP and call FinalizeCurrentDump. | |
| 439 FinalizeCurrentDump(std::move(pmd_async_state)); | |
| 440 } | |
| 441 | |
| 442 // This function is called on the right task runner for current MDP. It is | |
| 443 // either the task runner specified by MDP or |dump_thread_task_runner| if the | |
| 444 // MDP did not specify task runner. Invokes the dump provider's OnMemoryDump() | |
| 445 // (unless disabled). | |
| 446 void MemoryDumpManager::InvokeOnMemoryDump( | |
| 447 ProcessMemoryDumpAsyncState* owned_pmd_async_state) { | |
| 365 // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason | 448 // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason |
| 366 // why it isn't is because of the corner case logic of |did_post_task| below, | 449 // why it isn't is because of the corner case logic of |did_post_task| |
| 367 // which needs to take back the ownership of the |pmd_async_state| when a | 450 // above, which needs to take back the ownership of the |pmd_async_state| when |
| 368 // thread goes away and consequently the PostTask() fails. | 451 // the PostTask() fails. |
| 369 // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure | 452 // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure |
| 370 // to prevent accidental leaks. Using a scoped_ptr would prevent us to to | 453 // to prevent accidental leaks. Using a scoped_ptr would prevent us to to |
| 371 // skip the hop and move on. Hence the manual naked -> scoped ptr juggling. | 454 // skip the hop and move on. Hence the manual naked -> scoped ptr juggling. |
| 372 auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state); | 455 auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state); |
| 373 owned_pmd_async_state = nullptr; | 456 owned_pmd_async_state = nullptr; |
| 374 | 457 |
| 375 if (pmd_async_state->pending_dump_providers.empty()) | |
| 376 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); | |
| 377 | |
| 378 // Read MemoryDumpProviderInfo thread safety considerations in | 458 // Read MemoryDumpProviderInfo thread safety considerations in |
| 379 // memory_dump_manager.h when accessing |mdpinfo| fields. | 459 // memory_dump_manager.h when accessing |mdpinfo| fields. |
| 380 MemoryDumpProviderInfo* mdpinfo = | 460 MemoryDumpProviderInfo* mdpinfo = |
| 381 pmd_async_state->pending_dump_providers.back().get(); | 461 pmd_async_state->pending_dump_providers.back().get(); |
| 382 | 462 |
| 383 // If the dump provider did not specify a thread affinity, dump on | 463 DCHECK(!mdpinfo->task_runner || |
| 384 // |dump_thread_|. Note that |dump_thread_| might have been Stop()-ed at this | 464 mdpinfo->task_runner->RunsTasksOnCurrentThread()); |
| 385 // point (if tracing was disabled in the meanwhile). In such case the | |
| 386 // PostTask() below will fail, but |task_runner| should always be non-null. | |
| 387 SingleThreadTaskRunner* task_runner = mdpinfo->task_runner.get(); | |
| 388 if (!task_runner) | |
| 389 task_runner = pmd_async_state->dump_thread_task_runner.get(); | |
| 390 | 465 |
| 391 bool post_task_failed = false; | 466 bool should_dump; |
| 392 if (!task_runner) { | 467 { |
| 393 // If tracing was disabled before reaching CreateProcessDump() |task_runner| | 468 // A locked access is required to R/W |disabled| (for the |
| 394 // will be null, as the dump_thread_ would have been already torn down. | 469 // UnregisterAndDeleteDumpProviderSoon() case). |
| 395 post_task_failed = true; | 470 AutoLock lock(lock_); |
| 396 pmd_async_state->dump_successful = false; | 471 |
| 397 } else if (!task_runner->BelongsToCurrentThread()) { | 472 // Unregister the dump provider if it failed too many times consecutively. |
| 398 // It's time to hop onto another thread. | 473 if (!mdpinfo->disabled && |
| 399 post_task_failed = !task_runner->PostTask( | 474 mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) { |
| 400 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, | 475 mdpinfo->disabled = true; |
| 401 Unretained(this), Unretained(pmd_async_state.get()))); | 476 LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name |
| 402 if (!post_task_failed) { | 477 << "\". Dump failed multiple times consecutively."; |
| 403 // Ownership is tranferred to the next ContinueAsyncProcessDump(). | |
| 404 ignore_result(pmd_async_state.release()); | |
| 405 return; | |
| 406 } | 478 } |
| 407 } | 479 should_dump = !mdpinfo->disabled; |
| 408 | |
| 409 // At this point either: | |
| 410 // - The MDP has a task runner affinity and we are on the right thread. | |
| 411 // - The MDP has a task runner affinity but the underlying thread is gone, | |
| 412 // hence the above |post_task_failed| == true. | |
| 413 // - The MDP does NOT have a task runner affinity. A locked access is required | |
| 414 // to R/W |disabled| (for the UnregisterAndDeleteDumpProviderSoon() case). | |
| 415 bool should_dump; | |
| 416 const char* disabled_reason = nullptr; | |
| 417 { | |
| 418 AutoLock lock(lock_); | |
| 419 if (!mdpinfo->disabled) { | |
| 420 if (mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) { | |
| 421 mdpinfo->disabled = true; | |
| 422 disabled_reason = | |
| 423 "Dump failure, possibly related with sandboxing (crbug.com/461788)." | |
| 424 " Try --no-sandbox."; | |
| 425 } else if (post_task_failed && mdpinfo->task_runner) { | |
| 426 // Don't disable unbound dump providers. The utility thread is normally | |
| 427 // shutdown when disabling the trace and getting here in this case is | |
| 428 // expected. | |
| 429 mdpinfo->disabled = true; | |
| 430 disabled_reason = "The thread it was meant to dump onto is gone."; | |
| 431 } | |
| 432 } | |
| 433 should_dump = !mdpinfo->disabled && !post_task_failed; | |
| 434 } // AutoLock lock(lock_); | 480 } // AutoLock lock(lock_); |
| 435 | 481 |
| 436 if (disabled_reason) { | |
| 437 LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name << "\". " | |
| 438 << disabled_reason; | |
| 439 } | |
| 440 | |
| 441 if (should_dump) { | 482 if (should_dump) { |
| 442 // Invoke the dump provider. | 483 // Invoke the dump provider. |
| 443 TRACE_EVENT_WITH_FLOW1(kTraceCategory, | 484 TRACE_EVENT_WITH_FLOW1(kTraceCategory, |
| 444 "MemoryDumpManager::ContinueAsyncProcessDump", | 485 "MemoryDumpManager::SetupNextMemoryDump", |
|
Primiano Tucci (use gerrit)
2016/02/08 12:03:15
Shouldn't this be InvokeOnMemoryDump?
ssid
2016/02/11 01:45:03
Done.
| |
| 445 TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid), | 486 TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid), |
| 446 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, | 487 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, |
| 447 "dump_provider.name", mdpinfo->name); | 488 "dump_provider.name", mdpinfo->name); |
| 448 | 489 |
| 449 // Pid of the target process being dumped. Often kNullProcessId (= current | 490 // Pid of the target process being dumped. Often kNullProcessId (= current |
| 450 // process), non-zero when the coordinator process creates dumps on behalf | 491 // process), non-zero when the coordinator process creates dumps on behalf |
| 451 // of child processes (see crbug.com/461788). | 492 // of child processes (see crbug.com/461788). |
| 452 ProcessId target_pid = mdpinfo->options.target_pid; | 493 ProcessId target_pid = mdpinfo->options.target_pid; |
| 453 ProcessMemoryDump* pmd = | 494 ProcessMemoryDump* pmd = |
| 454 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid); | 495 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid); |
| 455 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail}; | 496 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail}; |
| 456 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); | 497 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); |
| 457 mdpinfo->consecutive_failures = | 498 mdpinfo->consecutive_failures = |
| 458 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; | 499 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; |
| 459 } // if (!mdpinfo->disabled) | 500 } |
| 460 | 501 |
| 461 pmd_async_state->pending_dump_providers.pop_back(); | 502 FinalizeCurrentDump(std::move(pmd_async_state)); |
| 462 ContinueAsyncProcessDump(pmd_async_state.release()); | 503 } |
| 504 | |
|
Primiano Tucci (use gerrit)
2016/02/08 12:03:15
Hmm this function:
- Has a misleading name, especi
ssid
2016/02/11 01:45:03
I did not want multiple functions destroying mdpin
| |
| 505 void MemoryDumpManager::FinalizeCurrentDump( | |
| 506 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | |
| 507 // Pop() the MDP from the |pending_dump_providers| list, eventually destroying | |
| 508 // the MDPInfo if that was unregistered in the meantime. | |
| 509 if (!pmd_async_state->pending_dump_providers.empty()) | |
| 510 pmd_async_state->pending_dump_providers.pop_back(); | |
| 511 | |
| 512 // Call SetupNextMemoryDump to continue with the next dump provider in the | |
| 513 // process dump. | |
| 514 if (!pmd_async_state->pending_dump_providers.empty()) | |
| 515 return SetupNextMemoryDump(std::move(pmd_async_state)); | |
| 516 | |
| 517 // If this was the last hop, create a trace event, add it to the trace and | |
| 518 // finalize process dump (invoke callback). | |
| 519 FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); | |
| 463 } | 520 } |
| 464 | 521 |
| 465 // static | 522 // static |
| 466 void MemoryDumpManager::FinalizeDumpAndAddToTrace( | 523 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
| 467 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 524 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
| 468 DCHECK(pmd_async_state->pending_dump_providers.empty()); | 525 DCHECK(pmd_async_state->pending_dump_providers.empty()); |
| 469 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; | 526 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; |
| 470 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { | 527 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { |
| 471 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 528 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
| 472 pmd_async_state->callback_task_runner; | 529 pmd_async_state->callback_task_runner; |
| (...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 605 // state is always accessed by the dumping methods holding the |lock_|. | 662 // state is always accessed by the dumping methods holding the |lock_|. |
| 606 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); | 663 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); |
| 607 scoped_ptr<Thread> dump_thread; | 664 scoped_ptr<Thread> dump_thread; |
| 608 { | 665 { |
| 609 AutoLock lock(lock_); | 666 AutoLock lock(lock_); |
| 610 dump_thread = std::move(dump_thread_); | 667 dump_thread = std::move(dump_thread_); |
| 611 session_state_ = nullptr; | 668 session_state_ = nullptr; |
| 612 } | 669 } |
| 613 | 670 |
| 614 // Thread stops are blocking and must be performed outside of the |lock_| | 671 // Thread stops are blocking and must be performed outside of the |lock_| |
| 615 // or will deadlock (e.g., if ContinueAsyncProcessDump() tries to acquire it). | 672 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). |
| 616 periodic_dump_timer_.Stop(); | 673 periodic_dump_timer_.Stop(); |
| 617 if (dump_thread) | 674 if (dump_thread) |
| 618 dump_thread->Stop(); | 675 dump_thread->Stop(); |
| 619 } | 676 } |
| 620 | 677 |
| 621 uint64_t MemoryDumpManager::GetTracingProcessId() const { | 678 uint64_t MemoryDumpManager::GetTracingProcessId() const { |
| 622 return delegate_->GetTracingProcessId(); | 679 return delegate_->GetTracingProcessId(); |
| 623 } | 680 } |
| 624 | 681 |
| 625 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( | 682 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( |
| 626 MemoryDumpProvider* dump_provider, | 683 MemoryDumpProvider* dump_provider, |
| 627 const char* name, | 684 const char* name, |
| 628 const scoped_refptr<SingleThreadTaskRunner>& task_runner, | 685 const scoped_refptr<SequencedTaskRunner>& task_runner, |
| 629 const MemoryDumpProvider::Options& options) | 686 const MemoryDumpProvider::Options& options) |
| 630 : dump_provider(dump_provider), | 687 : dump_provider(dump_provider), |
| 631 name(name), | 688 name(name), |
| 632 task_runner(task_runner), | 689 task_runner(task_runner), |
| 633 options(options), | 690 options(options), |
| 634 consecutive_failures(0), | 691 consecutive_failures(0), |
| 635 disabled(false) {} | 692 disabled(false) {} |
| 636 | 693 |
| 637 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} | 694 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} |
| 638 | 695 |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 672 auto iter = process_dumps.find(pid); | 729 auto iter = process_dumps.find(pid); |
| 673 if (iter == process_dumps.end()) { | 730 if (iter == process_dumps.end()) { |
| 674 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state)); | 731 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state)); |
| 675 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; | 732 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; |
| 676 } | 733 } |
| 677 return iter->second.get(); | 734 return iter->second.get(); |
| 678 } | 735 } |
| 679 | 736 |
| 680 } // namespace trace_event | 737 } // namespace trace_event |
| 681 } // namespace base | 738 } // namespace base |
| OLD | NEW |