| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <utility> | 8 #include <utility> |
| 9 | 9 |
| 10 #include "base/atomic_sequence_num.h" | 10 #include "base/atomic_sequence_num.h" |
| (...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 178 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list. | 178 TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list. |
| 179 TraceLog::GetInstance()->AddEnabledStateObserver(this); | 179 TraceLog::GetInstance()->AddEnabledStateObserver(this); |
| 180 if (is_tracing_already_enabled) | 180 if (is_tracing_already_enabled) |
| 181 OnTraceLogEnabled(); | 181 OnTraceLogEnabled(); |
| 182 } | 182 } |
| 183 | 183 |
| 184 void MemoryDumpManager::RegisterDumpProvider( | 184 void MemoryDumpManager::RegisterDumpProvider( |
| 185 MemoryDumpProvider* mdp, | 185 MemoryDumpProvider* mdp, |
| 186 const char* name, | 186 const char* name, |
| 187 const scoped_refptr<SingleThreadTaskRunner>& task_runner, | 187 const scoped_refptr<SingleThreadTaskRunner>& task_runner, |
| 188 MemoryDumpProvider::Options options) { |
| 189 options.dumps_on_single_thread_task_runner = true; |
| 190 RegisterDumpProviderInternal(mdp, name, task_runner, options); |
| 191 } |
| 192 |
| 193 void MemoryDumpManager::RegisterDumpProvider( |
| 194 MemoryDumpProvider* mdp, |
| 195 const char* name, |
| 196 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { |
| 197 // Set |dumps_on_single_thread_task_runner| to true because all providers |
| 198 // without task runner are run on dump thread. |
| 199 MemoryDumpProvider::Options options; |
| 200 options.dumps_on_single_thread_task_runner = true; |
| 201 RegisterDumpProviderInternal(mdp, name, task_runner, options); |
| 202 } |
| 203 |
| 204 void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner( |
| 205 MemoryDumpProvider* mdp, |
| 206 const char* name, |
| 207 const scoped_refptr<SequencedTaskRunner>& task_runner, |
| 208 MemoryDumpProvider::Options options) { |
| 209 DCHECK(task_runner); |
| 210 options.dumps_on_single_thread_task_runner = false; |
| 211 RegisterDumpProviderInternal(mdp, name, task_runner, options); |
| 212 } |
| 213 |
| 214 void MemoryDumpManager::RegisterDumpProviderInternal( |
| 215 MemoryDumpProvider* mdp, |
| 216 const char* name, |
| 217 const scoped_refptr<SequencedTaskRunner>& task_runner, |
| 188 const MemoryDumpProvider::Options& options) { | 218 const MemoryDumpProvider::Options& options) { |
| 189 if (dumper_registrations_ignored_for_testing_) | 219 if (dumper_registrations_ignored_for_testing_) |
| 190 return; | 220 return; |
| 191 | 221 |
| 192 scoped_refptr<MemoryDumpProviderInfo> mdpinfo = | 222 scoped_refptr<MemoryDumpProviderInfo> mdpinfo = |
| 193 new MemoryDumpProviderInfo(mdp, name, task_runner, options); | 223 new MemoryDumpProviderInfo(mdp, name, task_runner, options); |
| 194 | 224 |
| 195 { | 225 { |
| 196 AutoLock lock(lock_); | 226 AutoLock lock(lock_); |
| 197 bool already_registered = !dump_providers_.insert(mdpinfo).second; | 227 bool already_registered = !dump_providers_.insert(mdpinfo).second; |
| 198 // This actually happens in some tests which don't have a clean tear-down | 228 // This actually happens in some tests which don't have a clean tear-down |
| 199 // path for RenderThreadImpl::Init(). | 229 // path for RenderThreadImpl::Init(). |
| 200 if (already_registered) | 230 if (already_registered) |
| 201 return; | 231 return; |
| 202 } | 232 } |
| 203 | 233 |
| 204 if (heap_profiling_enabled_) | 234 if (heap_profiling_enabled_) |
| 205 mdp->OnHeapProfilingEnabled(true); | 235 mdp->OnHeapProfilingEnabled(true); |
| 206 } | 236 } |
| 207 | 237 |
| 208 void MemoryDumpManager::RegisterDumpProvider( | |
| 209 MemoryDumpProvider* mdp, | |
| 210 const char* name, | |
| 211 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { | |
| 212 RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options()); | |
| 213 } | |
| 214 | |
| 215 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 238 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
| 216 UnregisterDumpProviderInternal(mdp, false /* delete_async */); | 239 UnregisterDumpProviderInternal(mdp, false /* delete_async */); |
| 217 } | 240 } |
| 218 | 241 |
| 219 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon( | 242 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon( |
| 220 scoped_ptr<MemoryDumpProvider> mdp) { | 243 scoped_ptr<MemoryDumpProvider> mdp) { |
| 221 UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */); | 244 UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */); |
| 222 } | 245 } |
| 223 | 246 |
| 224 void MemoryDumpManager::UnregisterDumpProviderInternal( | 247 void MemoryDumpManager::UnregisterDumpProviderInternal( |
| (...skipping 10 matching lines...) Expand all Loading... |
| 235 if ((*mdp_iter)->dump_provider == mdp) | 258 if ((*mdp_iter)->dump_provider == mdp) |
| 236 break; | 259 break; |
| 237 } | 260 } |
| 238 | 261 |
| 239 if (mdp_iter == dump_providers_.end()) | 262 if (mdp_iter == dump_providers_.end()) |
| 240 return; // Not registered / already unregistered. | 263 return; // Not registered / already unregistered. |
| 241 | 264 |
| 242 if (take_mdp_ownership_and_delete_async) { | 265 if (take_mdp_ownership_and_delete_async) { |
| 243 // The MDP will be deleted whenever the MDPInfo struct will, that is either: | 266 // The MDP will be deleted whenever the MDPInfo struct will, that is either: |
| 244 // - At the end of this function, if no dump is in progress. | 267 // - At the end of this function, if no dump is in progress. |
| 245 // - In the prologue of the ContinueAsyncProcessDump(). | 268 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is |
| 269 // removed from |pending_dump_providers|. |
| 246 DCHECK(!(*mdp_iter)->owned_dump_provider); | 270 DCHECK(!(*mdp_iter)->owned_dump_provider); |
| 247 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); | 271 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); |
| 248 } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) { | 272 } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) { |
| 249 // If you hit this DCHECK, your dump provider has a bug. | 273 // If you hit this DCHECK, your dump provider has a bug. |
| 250 // Unregistration of a MemoryDumpProvider is safe only if: | 274 // Unregistration of a MemoryDumpProvider is safe only if: |
| 251 // - The MDP has specified a thread affinity (via task_runner()) AND | 275 // - The MDP has specified a sequenced task runner affinity AND the |
| 252 // the unregistration happens on the same thread (so the MDP cannot | 276 // unregistration happens on the same task runner. So that the MDP cannot |
| 253 // unregister and be in the middle of a OnMemoryDump() at the same time. | 277 // unregister and be in the middle of a OnMemoryDump() at the same time. |
| 254 // - The MDP has NOT specified a thread affinity and its ownership is | 278 // - The MDP has NOT specified a task runner affinity and its ownership is |
| 255 // transferred via UnregisterAndDeleteDumpProviderSoon(). | 279 // transferred via UnregisterAndDeleteDumpProviderSoon(). |
| 256 // In all the other cases, it is not possible to guarantee that the | 280 // In all the other cases, it is not possible to guarantee that the |
| 257 // unregistration will not race with OnMemoryDump() calls. | 281 // unregistration will not race with OnMemoryDump() calls. |
| 258 DCHECK((*mdp_iter)->task_runner && | 282 DCHECK((*mdp_iter)->task_runner && |
| 259 (*mdp_iter)->task_runner->BelongsToCurrentThread()) | 283 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) |
| 260 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " | 284 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " |
| 261 << "unregister itself in a racy way. Please file a crbug."; | 285 << "unregister itself in a racy way. Please file a crbug."; |
| 262 } | 286 } |
| 263 | 287 |
| 264 // The MDPInfo instance can still be referenced by the | 288 // The MDPInfo instance can still be referenced by the |
| 265 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason | 289 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason |
| 266 // the MDPInfo is flagged as disabled. It will cause ContinueAsyncProcessDump | 290 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() |
| 267 // to just skip it, without actually invoking the |mdp|, which might be | 291 // to just skip it, without actually invoking the |mdp|, which might be |
| 268 // destroyed by the caller soon after this method returns. | 292 // destroyed by the caller soon after this method returns. |
| 269 (*mdp_iter)->disabled = true; | 293 (*mdp_iter)->disabled = true; |
| 270 dump_providers_.erase(mdp_iter); | 294 dump_providers_.erase(mdp_iter); |
| 271 } | 295 } |
| 272 | 296 |
| 273 void MemoryDumpManager::RequestGlobalDump( | 297 void MemoryDumpManager::RequestGlobalDump( |
| 274 MemoryDumpType dump_type, | 298 MemoryDumpType dump_type, |
| 275 MemoryDumpLevelOfDetail level_of_detail, | 299 MemoryDumpLevelOfDetail level_of_detail, |
| 276 const MemoryDumpCallback& callback) { | 300 const MemoryDumpCallback& callback) { |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 316 | 340 |
| 317 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, | 341 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
| 318 const MemoryDumpCallback& callback) { | 342 const MemoryDumpCallback& callback) { |
| 319 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", | 343 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", |
| 320 TRACE_ID_MANGLE(args.dump_guid)); | 344 TRACE_ID_MANGLE(args.dump_guid)); |
| 321 | 345 |
| 322 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; | 346 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
| 323 { | 347 { |
| 324 AutoLock lock(lock_); | 348 AutoLock lock(lock_); |
| 325 // |dump_thread_| can be nullptr is tracing was disabled before reaching | 349 // |dump_thread_| can be nullptr is tracing was disabled before reaching |
| 326 // here. ContinueAsyncProcessDump is robust enough to tolerate it and will | 350 // here. SetupNextMemoryDump() is robust enough to tolerate it and will |
| 327 // NACK the dump. | 351 // NACK the dump. |
| 328 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( | 352 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
| 329 args, dump_providers_, session_state_, callback, | 353 args, dump_providers_, session_state_, callback, |
| 330 dump_thread_ ? dump_thread_->task_runner() : nullptr)); | 354 dump_thread_ ? dump_thread_->task_runner() : nullptr)); |
| 331 } | 355 } |
| 332 | 356 |
| 333 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", | 357 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", |
| 334 TRACE_ID_MANGLE(args.dump_guid), | 358 TRACE_ID_MANGLE(args.dump_guid), |
| 335 TRACE_EVENT_FLAG_FLOW_OUT); | 359 TRACE_EVENT_FLAG_FLOW_OUT); |
| 336 | 360 |
| 337 // Start the thread hop. |dump_providers_| are kept sorted by thread, so | 361 // Start the process dump. This involves task runner hops as specified by the |
| 338 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread | 362 // MemoryDumpProvider(s) in RegisterDumpProvider()). |
| 339 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). | 363 SetupNextMemoryDump(std::move(pmd_async_state)); |
| 340 ContinueAsyncProcessDump(pmd_async_state.release()); | |
| 341 } | 364 } |
| 342 | 365 |
| 343 // At most one ContinueAsyncProcessDump() can be active at any time for a given | 366 // PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A |
| 344 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to | 367 // PostTask is always required for a generic SequencedTaskRunner to ensure that |
| 345 // ensure consistency w.r.t. (un)registrations of |dump_providers_|. | 368 // no other task is running on it concurrently. SetupNextMemoryDump() and |
| 346 // The linearization of dump providers' OnMemoryDump invocations is achieved by | 369 // InvokeOnMemoryDump() are called alternatively which linearizes the dump |
| 347 // means of subsequent PostTask(s). | 370 // provider's OnMemoryDump invocations. |
| 348 // | 371 // At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be |
| 349 // 1) Prologue: | 372 // active at any time for a given PMD, regardless of status of the |lock_|. |
| 350 // - If this was the last hop, create a trace event, add it to the trace | 373 // |lock_| is used in these functions purely to ensure consistency w.r.t. |
| 351 // and finalize (invoke callback). | 374 // (un)registrations of |dump_providers_|. |
| 352 // - Check if we are on the right thread. If not hop and continue there. | 375 void MemoryDumpManager::SetupNextMemoryDump( |
| 353 // - Check if the dump provider is disabled, if so skip the dump. | 376 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
| 354 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped). | |
| 355 // 3) Epilogue: | |
| 356 // - Unregister the dump provider if it failed too many times consecutively. | |
| 357 // - Pop() the MDP from the |pending_dump_providers| list, eventually | |
| 358 // destroying the MDPInfo if that was unregistered in the meantime. | |
| 359 void MemoryDumpManager::ContinueAsyncProcessDump( | |
| 360 ProcessMemoryDumpAsyncState* owned_pmd_async_state) { | |
| 361 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs | 377 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs |
| 362 // in the PostTask below don't end up registering their own dump providers | 378 // in the PostTask below don't end up registering their own dump providers |
| 363 // (for discounting trace memory overhead) while holding the |lock_|. | 379 // (for discounting trace memory overhead) while holding the |lock_|. |
| 364 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 380 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
| 365 | 381 |
| 366 // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason | 382 // If this was the last hop, create a trace event, add it to the trace and |
| 367 // why it isn't is because of the corner case logic of |did_post_task| below, | 383 // finalize process dump (invoke callback). |
| 368 // which needs to take back the ownership of the |pmd_async_state| when a | |
| 369 // thread goes away and consequently the PostTask() fails. | |
| 370 // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure | |
| 371 // to prevent accidental leaks. Using a scoped_ptr would prevent us to to | |
| 372 // skip the hop and move on. Hence the manual naked -> scoped ptr juggling. | |
| 373 auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state); | |
| 374 owned_pmd_async_state = nullptr; | |
| 375 | |
| 376 if (pmd_async_state->pending_dump_providers.empty()) | 384 if (pmd_async_state->pending_dump_providers.empty()) |
| 377 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); | 385 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); |
| 378 | 386 |
| 379 // Read MemoryDumpProviderInfo thread safety considerations in | 387 // Read MemoryDumpProviderInfo thread safety considerations in |
| 380 // memory_dump_manager.h when accessing |mdpinfo| fields. | 388 // memory_dump_manager.h when accessing |mdpinfo| fields. |
| 381 MemoryDumpProviderInfo* mdpinfo = | 389 MemoryDumpProviderInfo* mdpinfo = |
| 382 pmd_async_state->pending_dump_providers.back().get(); | 390 pmd_async_state->pending_dump_providers.back().get(); |
| 383 | 391 |
| 384 // If the dump provider did not specify a thread affinity, dump on | 392 // If the dump provider did not specify a task runner affinity, dump on |
| 385 // |dump_thread_|. Note that |dump_thread_| might have been Stop()-ed at this | 393 // |dump_thread_|. Note that |dump_thread_| might have been destroyed |
| 386 // point (if tracing was disabled in the meanwhile). In such case the | 394 // meanwhile. |
| 387 // PostTask() below will fail, but |task_runner| should always be non-null. | 395 SequencedTaskRunner* task_runner = mdpinfo->task_runner.get(); |
| 388 SingleThreadTaskRunner* task_runner = mdpinfo->task_runner.get(); | 396 if (!task_runner) { |
| 389 if (!task_runner) | 397 DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner); |
| 390 task_runner = pmd_async_state->dump_thread_task_runner.get(); | 398 task_runner = pmd_async_state->dump_thread_task_runner.get(); |
| 391 | 399 if (!task_runner) { |
| 392 bool post_task_failed = false; | 400 // If tracing was disabled before reaching CreateProcessDump() the |
| 393 if (!task_runner) { | 401 // dump_thread_ would have been already torn down. Nack current dump and |
| 394 // If tracing was disabled before reaching CreateProcessDump() |task_runner| | 402 // continue. |
| 395 // will be null, as the dump_thread_ would have been already torn down. | 403 pmd_async_state->dump_successful = false; |
| 396 post_task_failed = true; | 404 pmd_async_state->pending_dump_providers.pop_back(); |
| 397 pmd_async_state->dump_successful = false; | 405 return SetupNextMemoryDump(std::move(pmd_async_state)); |
| 398 } else if (!task_runner->BelongsToCurrentThread()) { | |
| 399 // It's time to hop onto another thread. | |
| 400 post_task_failed = !task_runner->PostTask( | |
| 401 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, | |
| 402 Unretained(this), Unretained(pmd_async_state.get()))); | |
| 403 if (!post_task_failed) { | |
| 404 // Ownership is tranferred to the next ContinueAsyncProcessDump(). | |
| 405 ignore_result(pmd_async_state.release()); | |
| 406 return; | |
| 407 } | 406 } |
| 408 } | 407 } |
| 409 | 408 |
| 410 // At this point either: | 409 if (mdpinfo->options.dumps_on_single_thread_task_runner && |
| 411 // - The MDP has a task runner affinity and we are on the right thread. | 410 task_runner->RunsTasksOnCurrentThread()) { |
| 412 // - The MDP has a task runner affinity but the underlying thread is gone, | 411 // If |dumps_on_single_thread_task_runner| is true then no PostTask is |
| 413 // hence the above |post_task_failed| == true. | 412 // required if we are on the right thread. |
| 414 // - The MDP does NOT have a task runner affinity. A locked access is required | 413 return InvokeOnMemoryDump(pmd_async_state.release()); |
| 415 // to R/W |disabled| (for the UnregisterAndDeleteDumpProviderSoon() case). | 414 } |
| 415 |
| 416 bool did_post_task = task_runner->PostTask( |
| 417 FROM_HERE, Bind(&MemoryDumpManager::InvokeOnMemoryDump, Unretained(this), |
| 418 Unretained(pmd_async_state.get()))); |
| 419 |
| 420 if (did_post_task) { |
| 421 // Ownership is tranferred to InvokeOnMemoryDump(). |
| 422 ignore_result(pmd_async_state.release()); |
| 423 return; |
| 424 } |
| 425 |
| 426 // PostTask usually fails only if the process or thread is shut down. So, the |
| 427 // dump provider is disabled here. But, don't disable unbound dump providers. |
| 428 // The utility thread is normally shutdown when disabling the trace and |
| 429 // getting here in this case is expected. |
| 430 if (mdpinfo->task_runner) { |
| 431 LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name |
| 432 << "\". Failed to post task on the task runner provided."; |
| 433 |
| 434 // A locked access is required to R/W |disabled| (for the |
| 435 // UnregisterAndDeleteDumpProviderSoon() case). |
| 436 AutoLock lock(lock_); |
| 437 mdpinfo->disabled = true; |
| 438 } |
| 439 |
| 440 // PostTask failed. Ignore the dump provider and continue. |
| 441 pmd_async_state->pending_dump_providers.pop_back(); |
| 442 SetupNextMemoryDump(std::move(pmd_async_state)); |
| 443 } |
| 444 |
| 445 // This function is called on the right task runner for current MDP. It is |
| 446 // either the task runner specified by MDP or |dump_thread_task_runner| if the |
| 447 // MDP did not specify task runner. Invokes the dump provider's OnMemoryDump() |
| 448 // (unless disabled). |
| 449 void MemoryDumpManager::InvokeOnMemoryDump( |
| 450 ProcessMemoryDumpAsyncState* owned_pmd_async_state) { |
| 451 // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason |
| 452 // why it isn't is because of the corner case logic of |did_post_task| |
| 453 // above, which needs to take back the ownership of the |pmd_async_state| when |
| 454 // the PostTask() fails. |
| 455 // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure |
| 456 // to prevent accidental leaks. Using a scoped_ptr would prevent us to to |
| 457 // skip the hop and move on. Hence the manual naked -> scoped ptr juggling. |
| 458 auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state); |
| 459 owned_pmd_async_state = nullptr; |
| 460 |
| 461 // Read MemoryDumpProviderInfo thread safety considerations in |
| 462 // memory_dump_manager.h when accessing |mdpinfo| fields. |
| 463 MemoryDumpProviderInfo* mdpinfo = |
| 464 pmd_async_state->pending_dump_providers.back().get(); |
| 465 |
| 466 DCHECK(!mdpinfo->task_runner || |
| 467 mdpinfo->task_runner->RunsTasksOnCurrentThread()); |
| 468 |
| 416 bool should_dump; | 469 bool should_dump; |
| 417 const char* disabled_reason = nullptr; | |
| 418 { | 470 { |
| 471 // A locked access is required to R/W |disabled| (for the |
| 472 // UnregisterAndDeleteDumpProviderSoon() case). |
| 419 AutoLock lock(lock_); | 473 AutoLock lock(lock_); |
| 420 if (!mdpinfo->disabled) { | 474 |
| 421 if (mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) { | 475 // Unregister the dump provider if it failed too many times consecutively. |
| 422 mdpinfo->disabled = true; | 476 if (!mdpinfo->disabled && |
| 423 disabled_reason = "Dump failed multiple times consecutively."; | 477 mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) { |
| 424 } else if (post_task_failed && mdpinfo->task_runner) { | 478 mdpinfo->disabled = true; |
| 425 // Don't disable unbound dump providers. The utility thread is normally | 479 LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name |
| 426 // shutdown when disabling the trace and getting here in this case is | 480 << "\". Dump failed multiple times consecutively."; |
| 427 // expected. | |
| 428 mdpinfo->disabled = true; | |
| 429 disabled_reason = "The thread it was meant to dump onto is gone."; | |
| 430 } | |
| 431 } | 481 } |
| 432 should_dump = !mdpinfo->disabled && !post_task_failed; | 482 should_dump = !mdpinfo->disabled; |
| 433 } // AutoLock lock(lock_); | 483 } // AutoLock lock(lock_); |
| 434 | 484 |
| 435 if (disabled_reason) { | |
| 436 LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name << "\". " | |
| 437 << disabled_reason; | |
| 438 } | |
| 439 | |
| 440 if (should_dump) { | 485 if (should_dump) { |
| 441 // Invoke the dump provider. | 486 // Invoke the dump provider. |
| 442 TRACE_EVENT_WITH_FLOW1(kTraceCategory, | 487 TRACE_EVENT_WITH_FLOW1(kTraceCategory, |
| 443 "MemoryDumpManager::ContinueAsyncProcessDump", | 488 "MemoryDumpManager::InvokeOnMemoryDump", |
| 444 TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid), | 489 TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid), |
| 445 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, | 490 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, |
| 446 "dump_provider.name", mdpinfo->name); | 491 "dump_provider.name", mdpinfo->name); |
| 447 | 492 |
| 448 // Pid of the target process being dumped. Often kNullProcessId (= current | 493 // Pid of the target process being dumped. Often kNullProcessId (= current |
| 449 // process), non-zero when the coordinator process creates dumps on behalf | 494 // process), non-zero when the coordinator process creates dumps on behalf |
| 450 // of child processes (see crbug.com/461788). | 495 // of child processes (see crbug.com/461788). |
| 451 ProcessId target_pid = mdpinfo->options.target_pid; | 496 ProcessId target_pid = mdpinfo->options.target_pid; |
| 452 ProcessMemoryDump* pmd = | 497 ProcessMemoryDump* pmd = |
| 453 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid); | 498 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid); |
| 454 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail}; | 499 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail}; |
| 455 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); | 500 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); |
| 456 mdpinfo->consecutive_failures = | 501 mdpinfo->consecutive_failures = |
| 457 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; | 502 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; |
| 458 } // if (!mdpinfo->disabled) | 503 } |
| 459 | 504 |
| 460 pmd_async_state->pending_dump_providers.pop_back(); | 505 pmd_async_state->pending_dump_providers.pop_back(); |
| 461 ContinueAsyncProcessDump(pmd_async_state.release()); | 506 SetupNextMemoryDump(std::move(pmd_async_state)); |
| 462 } | 507 } |
| 463 | 508 |
| 464 // static | 509 // static |
| 465 void MemoryDumpManager::FinalizeDumpAndAddToTrace( | 510 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
| 466 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 511 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
| 467 DCHECK(pmd_async_state->pending_dump_providers.empty()); | 512 DCHECK(pmd_async_state->pending_dump_providers.empty()); |
| 468 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; | 513 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; |
| 469 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { | 514 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { |
| 470 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 515 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
| 471 pmd_async_state->callback_task_runner; | 516 pmd_async_state->callback_task_runner; |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 605 // state is always accessed by the dumping methods holding the |lock_|. | 650 // state is always accessed by the dumping methods holding the |lock_|. |
| 606 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); | 651 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); |
| 607 scoped_ptr<Thread> dump_thread; | 652 scoped_ptr<Thread> dump_thread; |
| 608 { | 653 { |
| 609 AutoLock lock(lock_); | 654 AutoLock lock(lock_); |
| 610 dump_thread = std::move(dump_thread_); | 655 dump_thread = std::move(dump_thread_); |
| 611 session_state_ = nullptr; | 656 session_state_ = nullptr; |
| 612 } | 657 } |
| 613 | 658 |
| 614 // Thread stops are blocking and must be performed outside of the |lock_| | 659 // Thread stops are blocking and must be performed outside of the |lock_| |
| 615 // or will deadlock (e.g., if ContinueAsyncProcessDump() tries to acquire it). | 660 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). |
| 616 periodic_dump_timer_.Stop(); | 661 periodic_dump_timer_.Stop(); |
| 617 if (dump_thread) | 662 if (dump_thread) |
| 618 dump_thread->Stop(); | 663 dump_thread->Stop(); |
| 619 } | 664 } |
| 620 | 665 |
| 621 uint64_t MemoryDumpManager::GetTracingProcessId() const { | 666 uint64_t MemoryDumpManager::GetTracingProcessId() const { |
| 622 return delegate_->GetTracingProcessId(); | 667 return delegate_->GetTracingProcessId(); |
| 623 } | 668 } |
| 624 | 669 |
| 625 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( | 670 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( |
| 626 MemoryDumpProvider* dump_provider, | 671 MemoryDumpProvider* dump_provider, |
| 627 const char* name, | 672 const char* name, |
| 628 const scoped_refptr<SingleThreadTaskRunner>& task_runner, | 673 const scoped_refptr<SequencedTaskRunner>& task_runner, |
| 629 const MemoryDumpProvider::Options& options) | 674 const MemoryDumpProvider::Options& options) |
| 630 : dump_provider(dump_provider), | 675 : dump_provider(dump_provider), |
| 631 name(name), | 676 name(name), |
| 632 task_runner(task_runner), | 677 task_runner(task_runner), |
| 633 options(options), | 678 options(options), |
| 634 consecutive_failures(0), | 679 consecutive_failures(0), |
| 635 disabled(false) {} | 680 disabled(false) {} |
| 636 | 681 |
| 637 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} | 682 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} |
| 638 | 683 |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 672 auto iter = process_dumps.find(pid); | 717 auto iter = process_dumps.find(pid); |
| 673 if (iter == process_dumps.end()) { | 718 if (iter == process_dumps.end()) { |
| 674 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state)); | 719 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state)); |
| 675 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; | 720 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; |
| 676 } | 721 } |
| 677 return iter->second.get(); | 722 return iter->second.get(); |
| 678 } | 723 } |
| 679 | 724 |
| 680 } // namespace trace_event | 725 } // namespace trace_event |
| 681 } // namespace base | 726 } // namespace base |
| OLD | NEW |