OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <utility> | 8 #include <utility> |
9 | 9 |
10 #include "base/atomic_sequence_num.h" | 10 #include "base/atomic_sequence_num.h" |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
78 TRACE_EVENT_NESTABLE_ASYNC_END1( | 78 TRACE_EVENT_NESTABLE_ASYNC_END1( |
79 MemoryDumpManager::kTraceCategory, "GlobalMemoryDump", | 79 MemoryDumpManager::kTraceCategory, "GlobalMemoryDump", |
80 TRACE_ID_MANGLE(dump_guid), "success", success); | 80 TRACE_ID_MANGLE(dump_guid), "success", success); |
81 | 81 |
82 if (!wrapped_callback.is_null()) { | 82 if (!wrapped_callback.is_null()) { |
83 wrapped_callback.Run(dump_guid, success); | 83 wrapped_callback.Run(dump_guid, success); |
84 wrapped_callback.Reset(); | 84 wrapped_callback.Reset(); |
85 } | 85 } |
86 } | 86 } |
87 | 87 |
88 // Empty callback to PostTask the deletion of an owned MemoryDumpProvider. | |
89 void DeleteMemoryDumpProvider(scoped_ptr<MemoryDumpProvider> mdp) { | |
90 // We just need an empty method that takes ownership of the scoped_ptr | |
91 // and de-scopes it, causing its deletion. | |
92 } | |
93 | |
88 } // namespace | 94 } // namespace |
89 | 95 |
90 // static | 96 // static |
91 const char* const MemoryDumpManager::kTraceCategory = | 97 const char* const MemoryDumpManager::kTraceCategory = |
92 TRACE_DISABLED_BY_DEFAULT("memory-infra"); | 98 TRACE_DISABLED_BY_DEFAULT("memory-infra"); |
93 | 99 |
94 // static | 100 // static |
95 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; | 101 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; |
96 | 102 |
97 // static | 103 // static |
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
211 } | 217 } |
212 | 218 |
213 void MemoryDumpManager::RegisterDumpProvider( | 219 void MemoryDumpManager::RegisterDumpProvider( |
214 MemoryDumpProvider* mdp, | 220 MemoryDumpProvider* mdp, |
215 const char* name, | 221 const char* name, |
216 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { | 222 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { |
217 RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options()); | 223 RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options()); |
218 } | 224 } |
219 | 225 |
220 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 226 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
227 UnregisterDumpProviderInternal(mdp, false /* delete_async */); | |
228 } | |
229 | |
230 void MemoryDumpManager::UnregisterAndDeleteDumpProviderAsync( | |
231 scoped_ptr<MemoryDumpProvider> mdp) { | |
232 UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */); | |
233 } | |
234 | |
235 void MemoryDumpManager::UnregisterDumpProviderInternal(MemoryDumpProvider* mdp, | |
236 bool delete_async) { | |
221 AutoLock lock(lock_); | 237 AutoLock lock(lock_); |
238 scoped_ptr<MemoryDumpProvider> owned_mdp; | |
239 if (delete_async) | |
240 owned_mdp.reset(mdp); // Auto delete in case of early outs. | |
222 | 241 |
223 auto mdp_iter = dump_providers_.begin(); | 242 auto mdp_iter = dump_providers_.begin(); |
224 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) { | 243 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) { |
225 if (mdp_iter->dump_provider == mdp) | 244 if (mdp_iter->dump_provider == mdp) |
226 break; | 245 break; |
227 } | 246 } |
228 | 247 |
229 if (mdp_iter == dump_providers_.end()) | 248 if (mdp_iter == dump_providers_.end()) |
230 return; | 249 return; // Already unregistered. |
Ruud van Asseldonk
2015/12/14 16:25:48
So the async deletion might actually be synchronou
| |
231 | 250 |
232 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe | 251 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe |
233 // only if the MDP has specified a thread affinity (via task_runner()) AND | 252 // only if the MDP has specified a thread affinity (via task_runner()) AND |
234 // the unregistration happens on the same thread (so the MDP cannot unregister | 253 // the unregistration happens on the same thread (so the MDP cannot unregister |
235 // and OnMemoryDump() at the same time). | 254 // and OnMemoryDump() at the same time). |
236 // Otherwise, it is not possible to guarantee that its unregistration is | 255 // Otherwise, it is not possible to guarantee that its unregistration is |
237 // race-free. If you hit this DCHECK, your MDP has a bug. | 256 // race-free. If you hit this DCHECK, your MDP has a bug. |
238 DCHECK(!subtle::NoBarrier_Load(&memory_tracing_enabled_) || | 257 if (!delete_async && subtle::NoBarrier_Load(&memory_tracing_enabled_)) { |
239 (mdp_iter->task_runner && | 258 DCHECK((mdp_iter->task_runner && |
240 mdp_iter->task_runner->BelongsToCurrentThread())) | 259 mdp_iter->task_runner->BelongsToCurrentThread())) |
241 << "MemoryDumpProvider \"" << mdp_iter->name << "\" attempted to " | 260 << "MemoryDumpProvider \"" << mdp_iter->name << "\" attempted to " |
242 << "unregister itself in a racy way. Please file a crbug."; | 261 << "unregister itself in a racy way. Please file a crbug."; |
262 } | |
243 | 263 |
244 mdp_iter->unregistered = true; | 264 mdp_iter->unregistered = true; |
265 | |
266 // Delete the descriptor immediately if there are no no dumps in progress. | |
267 // Otherwise, the prologue of ContinueAsyncProcessDump will take care. | |
268 if (outstanding_dumps_.empty()) { | |
269 dump_providers_.erase(mdp_iter); | |
270 // If |delete_async| == true, the |mdp| will be destroyed synchronously here | |
271 // as there is no reason to defer the deletion (and no thread to defer to). | |
272 } else if (delete_async) { | |
273 outstanding_dumps_.front()->dump_thread_task_runner->PostTask( | |
274 FROM_HERE, Bind(&DeleteMemoryDumpProvider, Passed(&owned_mdp))); | |
Ruud van Asseldonk
2015/12/14 16:25:48
Having an in-flight message own the dump provider
| |
275 } | |
245 } | 276 } |
246 | 277 |
247 void MemoryDumpManager::RequestGlobalDump( | 278 void MemoryDumpManager::RequestGlobalDump( |
248 MemoryDumpType dump_type, | 279 MemoryDumpType dump_type, |
249 MemoryDumpLevelOfDetail level_of_detail, | 280 MemoryDumpLevelOfDetail level_of_detail, |
250 const MemoryDumpCallback& callback) { | 281 const MemoryDumpCallback& callback) { |
251 // Bail out immediately if tracing is not enabled at all. | 282 // Bail out immediately if tracing is not enabled at all. |
252 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { | 283 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { |
253 if (!callback.is_null()) | 284 if (!callback.is_null()) |
254 callback.Run(0u /* guid */, false /* success */); | 285 callback.Run(0u /* guid */, false /* success */); |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
286 MemoryDumpType dump_type, | 317 MemoryDumpType dump_type, |
287 MemoryDumpLevelOfDetail level_of_detail) { | 318 MemoryDumpLevelOfDetail level_of_detail) { |
288 RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback()); | 319 RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback()); |
289 } | 320 } |
290 | 321 |
291 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, | 322 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
292 const MemoryDumpCallback& callback) { | 323 const MemoryDumpCallback& callback) { |
293 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", | 324 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", |
294 TRACE_ID_MANGLE(args.dump_guid)); | 325 TRACE_ID_MANGLE(args.dump_guid)); |
295 | 326 |
327 bool no_dump_providers_registered; | |
296 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; | 328 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
297 { | 329 { |
298 AutoLock lock(lock_); | 330 AutoLock lock(lock_); |
331 no_dump_providers_registered = dump_providers_.empty(); | |
299 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( | 332 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
300 args, dump_providers_.begin(), session_state_, callback, | 333 args, dump_providers_.begin(), session_state_, callback, |
301 dump_thread_->task_runner())); | 334 dump_thread_->task_runner())); |
335 outstanding_dumps_.push_back(pmd_async_state.get()); | |
Ruud van Asseldonk
2015/12/14 16:25:48
By doing this you lose all of the advantages of a
| |
302 } | 336 } |
303 | 337 |
304 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", | 338 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", |
305 TRACE_ID_MANGLE(args.dump_guid), | 339 TRACE_ID_MANGLE(args.dump_guid), |
306 TRACE_EVENT_FLAG_FLOW_OUT); | 340 TRACE_EVENT_FLAG_FLOW_OUT); |
307 | 341 |
342 if (no_dump_providers_registered) | |
343 return FinalizeDumpAndAddToTrace(pmd_async_state.Pass()); | |
344 | |
308 // Start the thread hop. |dump_providers_| are kept sorted by thread, so | 345 // Start the thread hop. |dump_providers_| are kept sorted by thread, so |
309 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread | 346 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread |
310 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). | 347 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). |
311 ContinueAsyncProcessDump(std::move(pmd_async_state)); | 348 ContinueAsyncProcessDump(pmd_async_state.release()); |
312 } | 349 } |
313 | 350 |
314 // At most one ContinueAsyncProcessDump() can be active at any time for a given | 351 // At most one ContinueAsyncProcessDump() can be active at any time for a given |
315 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to | 352 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to |
316 // ensure consistency w.r.t. (un)registrations of |dump_providers_|. | 353 // ensure consistency w.r.t. (un)registrations of |dump_providers_|. |
317 // The linearization of dump providers' OnMemoryDump invocations is achieved by | 354 // The linearization of dump providers' OnMemoryDump invocations is achieved by |
318 // means of subsequent PostTask(s). | 355 // means of subsequent PostTask(s). |
319 // | 356 // |
320 // 1) Prologue: | 357 // 1) Prologue: |
321 // - Check if the dump provider is disabled, if so skip the dump. | 358 // - Check if the dump provider is disabled, if so skip the dump. |
322 // - Check if we are on the right thread. If not hop and continue there. | 359 // - Check if we are on the right thread. If not hop and continue there. |
323 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped). | 360 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped). |
324 // 3) Epilogue: | 361 // 3) Epilogue: |
325 // - Unregister the dump provider if it failed too many times consecutively. | 362 // - Unregister the dump provider if it failed too many times consecutively. |
326 // - Advance the |next_dump_provider| iterator to the next dump provider. | 363 // - Advance the |next_dump_provider| iterator to the next dump provider. |
327 // - If this was the last hop, create a trace event, add it to the trace | 364 // - If this was the last hop, create a trace event, add it to the trace |
328 // and finalize (invoke callback). | 365 // and finalize (invoke callback). |
329 | |
330 void MemoryDumpManager::ContinueAsyncProcessDump( | 366 void MemoryDumpManager::ContinueAsyncProcessDump( |
331 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 367 ProcessMemoryDumpAsyncState* owned_pmd_async_state) { |
332 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs | 368 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs |
333 // in the PostTask below don't end up registering their own dump providers | 369 // in the PostTask below don't end up registering their own dump providers |
334 // (for discounting trace memory overhead) while holding the |lock_|. | 370 // (for discounting trace memory overhead) while holding the |lock_|. |
335 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 371 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
336 | 372 |
373 // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason | |
374 // why it isn't is because of the corner case logic of |did_post_task| below, | |
375 // which needs to take back the ownsership of the |pmd_async_state| when a | |
Ruud van Asseldonk
2015/12/14 16:25:48
/s/ownsership/ownership/
| |
376 // thread goes away and consequently PostTask() fails. PostTask(), in fact, | |
377 // destroyes the scoped_ptr arguments upon failure. This would prevent us to | |
378 // to just skip the hop and move on. Hence the naked -> scoped ptr conversion. | |
379 auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state); | |
Ruud van Asseldonk
2015/12/14 16:25:48
I wonder if it would be simpler to just make |pmd_
| |
380 owned_pmd_async_state = nullptr; | |
381 | |
337 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; | 382 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; |
338 const char* dump_provider_name = nullptr; | 383 const char* dump_provider_name = nullptr; |
339 | 384 |
340 // Pid of the target process being dumped. Often kNullProcessId (= current | 385 // Pid of the target process being dumped. Often kNullProcessId (= current |
341 // process), non-zero when the coordinator process creates dumps on behalf | 386 // process), non-zero when the coordinator process creates dumps on behalf |
342 // of child processes (see crbug.com/461788). | 387 // of child processes (see crbug.com/461788). |
343 ProcessId pid; | 388 ProcessId pid; |
344 | 389 |
345 // DO NOT put any LOG() statement in the locked sections, as in some contexts | 390 // DO NOT put any LOG() statement in the locked sections, as in some contexts |
346 // (GPU process) LOG() ends up performing PostTask/IPCs. | 391 // (GPU process) LOG() ends up performing PostTask/IPCs. |
347 MemoryDumpProvider* mdp; | 392 MemoryDumpProvider* mdp; |
348 bool skip_dump = false; | 393 bool skip_dump = false; |
349 { | 394 { |
350 AutoLock lock(lock_); | 395 AutoLock lock(lock_); |
351 | 396 |
352 auto mdp_info = pmd_async_state->next_dump_provider; | 397 auto mdp_info = pmd_async_state->next_dump_provider; |
398 DCHECK(mdp_info != dump_providers_.end()); | |
353 mdp = mdp_info->dump_provider; | 399 mdp = mdp_info->dump_provider; |
354 dump_provider_name = mdp_info->name; | 400 dump_provider_name = mdp_info->name; |
355 pid = mdp_info->options.target_pid; | 401 pid = mdp_info->options.target_pid; |
356 | 402 |
357 // If the dump provider did not specify a thread affinity, dump on | 403 // If the dump provider did not specify a thread affinity, dump on |
358 // |dump_thread_|. | 404 // |dump_thread_|. |
359 SingleThreadTaskRunner* task_runner = mdp_info->task_runner.get(); | 405 SingleThreadTaskRunner* task_runner = mdp_info->task_runner.get(); |
360 if (!task_runner) | 406 if (!task_runner) |
361 task_runner = pmd_async_state->dump_thread_task_runner.get(); | 407 task_runner = pmd_async_state->dump_thread_task_runner.get(); |
362 | 408 |
363 // |dump_thread_| might have been Stop()-ed at this point (if tracing was | 409 // |dump_thread_| might have been Stop()-ed at this point (if tracing was |
364 // disabled in the meanwhile). In such case the PostTask() below will fail. | 410 // disabled in the meanwhile). In such case the PostTask() below will fail. |
365 // |task_runner|, however, should always be non-null. | 411 // |task_runner|, however, should always be non-null. |
366 DCHECK(task_runner); | 412 DCHECK(task_runner); |
367 | 413 |
368 if (mdp_info->disabled || mdp_info->unregistered) { | 414 if (mdp_info->disabled || mdp_info->unregistered) { |
369 skip_dump = true; | 415 skip_dump = true; |
370 } else if (!task_runner->BelongsToCurrentThread()) { | 416 } else if (!task_runner->BelongsToCurrentThread()) { |
371 // It's time to hop onto another thread. | 417 // It's time to hop onto another thread. |
372 | 418 |
373 // Copy the callback + arguments just for the unlikley case in which | 419 // Copy the callback + arguments just for the unlikley case in which |
Ruud van Asseldonk
2015/12/14 16:25:48
/s/unlikley/unlikely/
| |
374 // PostTask fails. In such case the Bind helper will destroy the | 420 // PostTask fails. In such case the Bind helper will destroy the |
375 // pmd_async_state and we must keep a copy of the fields to notify the | 421 // pmd_async_state and we must keep a copy of the fields to notify the |
376 // abort. | 422 // abort. |
377 MemoryDumpCallback callback = pmd_async_state->callback; | 423 MemoryDumpCallback callback = pmd_async_state->callback; |
Ruud van Asseldonk
2015/12/14 16:25:48
This variable is now unused. (Didn't Clang warn ab
| |
378 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 424 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
Ruud van Asseldonk
2015/12/14 16:25:48
Same here.
| |
379 pmd_async_state->callback_task_runner; | 425 pmd_async_state->callback_task_runner; |
380 | 426 |
381 const bool did_post_task = task_runner->PostTask( | 427 const bool did_post_task = task_runner->PostTask( |
Ruud van Asseldonk
2015/12/14 16:25:48
From the |PostTask| documentation, this is no guar
| |
382 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, | 428 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, |
383 Unretained(this), Passed(&pmd_async_state))); | 429 Unretained(this), Unretained(pmd_async_state.get()))); |
384 if (did_post_task) | 430 |
431 if (did_post_task) { | |
432 // Ownership is tranferred to the next ContinueAsyncProcessDump(). | |
433 (void)pmd_async_state.release(); | |
Ruud van Asseldonk
2015/12/14 16:25:48
You can do |pmd_async_state.reset()| instead.
| |
385 return; | 434 return; |
435 } | |
386 | 436 |
387 // The thread is gone. At this point the best thing we can do is to | 437 // The thread is gone. Disable the dump provider, skip its dump and move |
388 // disable the dump provider and abort this dump. | 438 // on with the other dumper providers. |
389 mdp_info->disabled = true; | 439 mdp_info->disabled = true; |
Ruud van Asseldonk
2015/12/14 16:25:48
Shouldn't all dump providers that share that threa
| |
390 return AbortDumpLocked(callback, callback_task_runner, dump_guid); | 440 skip_dump = true; |
391 } | 441 } |
392 } // AutoLock(lock_) | 442 } // AutoLock(lock_) |
393 | 443 |
394 // Invoke the dump provider without holding the |lock_|. | 444 // Invoke the dump provider without holding the |lock_|. |
395 bool finalize = false; | 445 bool finalize = false; |
396 bool dump_successful = false; | 446 bool dump_successful = false; |
397 | 447 |
398 if (!skip_dump) { | 448 if (!skip_dump) { |
399 TRACE_EVENT_WITH_FLOW1(kTraceCategory, | 449 TRACE_EVENT_WITH_FLOW1(kTraceCategory, |
400 "MemoryDumpManager::ContinueAsyncProcessDump", | 450 "MemoryDumpManager::ContinueAsyncProcessDump", |
401 TRACE_ID_MANGLE(dump_guid), | 451 TRACE_ID_MANGLE(dump_guid), |
402 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, | 452 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, |
403 "dump_provider.name", dump_provider_name); | 453 "dump_provider.name", dump_provider_name); |
404 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail}; | 454 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail}; |
405 ProcessMemoryDump* process_memory_dump = | 455 ProcessMemoryDump* process_memory_dump = |
406 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(pid); | 456 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(pid); |
407 dump_successful = mdp->OnMemoryDump(args, process_memory_dump); | 457 dump_successful = mdp->OnMemoryDump(args, process_memory_dump); |
408 } | 458 } |
409 | 459 |
410 { | 460 { |
411 AutoLock lock(lock_); | 461 AutoLock lock(lock_); |
412 auto mdp_info = pmd_async_state->next_dump_provider; | 462 auto mdp_info = pmd_async_state->next_dump_provider; |
463 DCHECK(mdp_info != dump_providers_.end()); | |
413 if (dump_successful) { | 464 if (dump_successful) { |
414 mdp_info->consecutive_failures = 0; | 465 mdp_info->consecutive_failures = 0; |
415 } else if (!skip_dump) { | 466 } else if (!skip_dump) { |
416 ++mdp_info->consecutive_failures; | 467 ++mdp_info->consecutive_failures; |
417 if (mdp_info->consecutive_failures >= kMaxConsecutiveFailuresCount) { | 468 if (mdp_info->consecutive_failures >= kMaxConsecutiveFailuresCount) { |
418 mdp_info->disabled = true; | 469 mdp_info->disabled = true; |
419 } | 470 } |
420 } | 471 } |
421 ++pmd_async_state->next_dump_provider; | 472 ++pmd_async_state->next_dump_provider; |
422 finalize = pmd_async_state->next_dump_provider == dump_providers_.end(); | 473 if (pmd_async_state->next_dump_provider == dump_providers_.end()) { |
423 | 474 finalize = true; |
475 std::remove(outstanding_dumps_.begin(), outstanding_dumps_.end(), | |
476 pmd_async_state.get()); | |
477 } | |
424 if (mdp_info->unregistered) | 478 if (mdp_info->unregistered) |
425 dump_providers_.erase(mdp_info); | 479 dump_providers_.erase(mdp_info); |
426 } | 480 } // AutoLock(lock_) |
427 | 481 |
428 if (!skip_dump && !dump_successful) { | 482 if (!skip_dump && !dump_successful) { |
429 LOG(ERROR) << "MemoryDumpProvider \"" << dump_provider_name << "\" failed, " | 483 LOG(ERROR) << "MemoryDumpProvider \"" << dump_provider_name << "\" failed, " |
430 << "possibly due to sandboxing (crbug.com/461788)." | 484 << "possibly due to sandboxing (crbug.com/461788)." |
431 << "Disabling dumper for current process. Try --no-sandbox."; | 485 << "Disabling dumper for current process. Try --no-sandbox."; |
432 } | 486 } |
433 | 487 |
434 if (finalize) | 488 if (finalize) |
435 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); | 489 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); |
436 | 490 |
437 ContinueAsyncProcessDump(std::move(pmd_async_state)); | 491 ContinueAsyncProcessDump(std::move(pmd_async_state.release())); |
Ruud van Asseldonk
2015/12/14 16:25:48
You don't need the move here.
| |
438 } | 492 } |
439 | 493 |
440 // static | 494 // static |
441 void MemoryDumpManager::FinalizeDumpAndAddToTrace( | 495 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
442 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 496 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
443 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; | 497 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; |
444 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { | 498 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { |
445 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 499 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
446 pmd_async_state->callback_task_runner; | 500 pmd_async_state->callback_task_runner; |
447 callback_task_runner->PostTask( | 501 callback_task_runner->PostTask( |
(...skipping 28 matching lines...) Expand all Loading... | |
476 | 530 |
477 if (!pmd_async_state->callback.is_null()) { | 531 if (!pmd_async_state->callback.is_null()) { |
478 pmd_async_state->callback.Run(dump_guid, true /* success */); | 532 pmd_async_state->callback.Run(dump_guid, true /* success */); |
479 pmd_async_state->callback.Reset(); | 533 pmd_async_state->callback.Reset(); |
480 } | 534 } |
481 | 535 |
482 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump", | 536 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump", |
483 TRACE_ID_MANGLE(dump_guid)); | 537 TRACE_ID_MANGLE(dump_guid)); |
484 } | 538 } |
485 | 539 |
486 // static | |
487 void MemoryDumpManager::AbortDumpLocked( | |
488 MemoryDumpCallback callback, | |
489 scoped_refptr<SingleThreadTaskRunner> task_runner, | |
490 uint64_t dump_guid) { | |
491 if (callback.is_null()) | |
492 return; // There is nothing to NACK. | |
493 | |
494 // Post the callback even if we are already on the right thread to avoid | |
495 // invoking the callback while holding the lock_. | |
496 task_runner->PostTask(FROM_HERE, | |
497 Bind(callback, dump_guid, false /* success */)); | |
498 } | |
499 | |
500 void MemoryDumpManager::OnTraceLogEnabled() { | 540 void MemoryDumpManager::OnTraceLogEnabled() { |
501 bool enabled; | 541 bool enabled; |
502 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); | 542 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); |
503 if (!enabled) | 543 if (!enabled) |
504 return; | 544 return; |
505 | 545 |
506 // Initialize the TraceLog for the current thread. This is to avoid that the | 546 // Initialize the TraceLog for the current thread. This is to avoid that the |
507 // TraceLog memory dump provider is registered lazily in the PostTask() below | 547 // TraceLog memory dump provider is registered lazily in the PostTask() below |
508 // while the |lock_| is taken; | 548 // while the |lock_| is taken; |
509 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 549 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
651 auto iter = process_dumps.find(pid); | 691 auto iter = process_dumps.find(pid); |
652 if (iter == process_dumps.end()) { | 692 if (iter == process_dumps.end()) { |
653 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state)); | 693 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state)); |
654 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; | 694 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; |
655 } | 695 } |
656 return iter->second.get(); | 696 return iter->second.get(); |
657 } | 697 } |
658 | 698 |
659 } // namespace trace_event | 699 } // namespace trace_event |
660 } // namespace base | 700 } // namespace base |
OLD | NEW |