| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
| 6 | 6 |
| 7 #include <inttypes.h> | 7 #include <inttypes.h> |
| 8 #include <stdio.h> | 8 #include <stdio.h> |
| 9 | 9 |
| 10 #include <algorithm> | 10 #include <algorithm> |
| (...skipping 331 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 342 // This actually happens in some tests which don't have a clean tear-down | 342 // This actually happens in some tests which don't have a clean tear-down |
| 343 // path for RenderThreadImpl::Init(). | 343 // path for RenderThreadImpl::Init(). |
| 344 if (already_registered) | 344 if (already_registered) |
| 345 return; | 345 return; |
| 346 | 346 |
| 347 // The list of polling MDPs is populated OnTraceLogEnabled(). This code | 347 // The list of polling MDPs is populated OnTraceLogEnabled(). This code |
| 348 // deals with the case of a MDP capable of fast polling that is registered | 348 // deals with the case of a MDP capable of fast polling that is registered |
| 349 // after the OnTraceLogEnabled() | 349 // after the OnTraceLogEnabled() |
| 350 if (options.is_fast_polling_supported && dump_thread_) { | 350 if (options.is_fast_polling_supported && dump_thread_) { |
| 351 dump_thread_->task_runner()->PostTask( | 351 dump_thread_->task_runner()->PostTask( |
| 352 FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread, | 352 FROM_HERE, |
| 353 Unretained(this), mdpinfo)); | 353 BindOnce(&MemoryDumpManager::RegisterPollingMDPOnDumpThread, |
| 354 Unretained(this), mdpinfo)); |
| 354 } | 355 } |
| 355 } | 356 } |
| 356 | 357 |
| 357 if (heap_profiling_enabled_) | 358 if (heap_profiling_enabled_) |
| 358 mdp->OnHeapProfilingEnabled(true); | 359 mdp->OnHeapProfilingEnabled(true); |
| 359 } | 360 } |
| 360 | 361 |
| 361 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 362 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
| 362 UnregisterDumpProviderInternal(mdp, false /* delete_async */); | 363 UnregisterDumpProviderInternal(mdp, false /* delete_async */); |
| 363 } | 364 } |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 413 // unregistration will not race with OnMemoryDump() calls. | 414 // unregistration will not race with OnMemoryDump() calls. |
| 414 DCHECK((*mdp_iter)->task_runner && | 415 DCHECK((*mdp_iter)->task_runner && |
| 415 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) | 416 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) |
| 416 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " | 417 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " |
| 417 << "unregister itself in a racy way. Please file a crbug."; | 418 << "unregister itself in a racy way. Please file a crbug."; |
| 418 } | 419 } |
| 419 | 420 |
| 420 if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) { | 421 if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) { |
| 421 DCHECK(take_mdp_ownership_and_delete_async); | 422 DCHECK(take_mdp_ownership_and_delete_async); |
| 422 dump_thread_->task_runner()->PostTask( | 423 dump_thread_->task_runner()->PostTask( |
| 423 FROM_HERE, Bind(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread, | 424 FROM_HERE, |
| 424 Unretained(this), *mdp_iter)); | 425 BindOnce(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread, |
| 426 Unretained(this), *mdp_iter)); |
| 425 } | 427 } |
| 426 | 428 |
| 427 // The MDPInfo instance can still be referenced by the | 429 // The MDPInfo instance can still be referenced by the |
| 428 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason | 430 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason |
| 429 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() | 431 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() |
| 430 // to just skip it, without actually invoking the |mdp|, which might be | 432 // to just skip it, without actually invoking the |mdp|, which might be |
| 431 // destroyed by the caller soon after this method returns. | 433 // destroyed by the caller soon after this method returns. |
| 432 (*mdp_iter)->disabled = true; | 434 (*mdp_iter)->disabled = true; |
| 433 dump_providers_.erase(mdp_iter); | 435 dump_providers_.erase(mdp_iter); |
| 434 } | 436 } |
| (...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 609 } | 611 } |
| 610 | 612 |
| 611 if (mdpinfo->options.dumps_on_single_thread_task_runner && | 613 if (mdpinfo->options.dumps_on_single_thread_task_runner && |
| 612 task_runner->RunsTasksOnCurrentThread()) { | 614 task_runner->RunsTasksOnCurrentThread()) { |
| 613 // If |dumps_on_single_thread_task_runner| is true then no PostTask is | 615 // If |dumps_on_single_thread_task_runner| is true then no PostTask is |
| 614 // required if we are on the right thread. | 616 // required if we are on the right thread. |
| 615 return InvokeOnMemoryDump(pmd_async_state.release()); | 617 return InvokeOnMemoryDump(pmd_async_state.release()); |
| 616 } | 618 } |
| 617 | 619 |
| 618 bool did_post_task = task_runner->PostTask( | 620 bool did_post_task = task_runner->PostTask( |
| 619 FROM_HERE, Bind(&MemoryDumpManager::InvokeOnMemoryDump, Unretained(this), | 621 FROM_HERE, BindOnce(&MemoryDumpManager::InvokeOnMemoryDump, |
| 620 Unretained(pmd_async_state.get()))); | 622 Unretained(this), Unretained(pmd_async_state.get()))); |
| 621 | 623 |
| 622 if (did_post_task) { | 624 if (did_post_task) { |
| 623 // Ownership is tranferred to InvokeOnMemoryDump(). | 625 // Ownership is tranferred to InvokeOnMemoryDump(). |
| 624 ignore_result(pmd_async_state.release()); | 626 ignore_result(pmd_async_state.release()); |
| 625 return; | 627 return; |
| 626 } | 628 } |
| 627 | 629 |
| 628 // PostTask usually fails only if the process or thread is shut down. So, the | 630 // PostTask usually fails only if the process or thread is shut down. So, the |
| 629 // dump provider is disabled here. But, don't disable unbound dump providers. | 631 // dump provider is disabled here. But, don't disable unbound dump providers. |
| 630 // The utility thread is normally shutdown when disabling the trace and | 632 // The utility thread is normally shutdown when disabling the trace and |
| (...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 754 // static | 756 // static |
| 755 void MemoryDumpManager::FinalizeDumpAndAddToTrace( | 757 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
| 756 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 758 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
| 757 HEAP_PROFILER_SCOPED_IGNORE; | 759 HEAP_PROFILER_SCOPED_IGNORE; |
| 758 DCHECK(pmd_async_state->pending_dump_providers.empty()); | 760 DCHECK(pmd_async_state->pending_dump_providers.empty()); |
| 759 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; | 761 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; |
| 760 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { | 762 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { |
| 761 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 763 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
| 762 pmd_async_state->callback_task_runner; | 764 pmd_async_state->callback_task_runner; |
| 763 callback_task_runner->PostTask( | 765 callback_task_runner->PostTask( |
| 764 FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace, | 766 FROM_HERE, BindOnce(&MemoryDumpManager::FinalizeDumpAndAddToTrace, |
| 765 Passed(&pmd_async_state))); | 767 Passed(&pmd_async_state))); |
| 766 return; | 768 return; |
| 767 } | 769 } |
| 768 | 770 |
| 769 TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinalizeDumpAndAddToTrace"); | 771 TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinalizeDumpAndAddToTrace"); |
| 770 | 772 |
| 771 // The results struct to fill. | 773 // The results struct to fill. |
| 772 // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203 | 774 // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203 |
| 773 MemoryDumpCallbackResult result; | 775 MemoryDumpCallbackResult result; |
| 774 | 776 |
| 775 for (const auto& kv : pmd_async_state->process_dumps) { | 777 for (const auto& kv : pmd_async_state->process_dumps) { |
| (...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 994 if (iter == process_dumps.end()) { | 996 if (iter == process_dumps.end()) { |
| 995 std::unique_ptr<ProcessMemoryDump> new_pmd( | 997 std::unique_ptr<ProcessMemoryDump> new_pmd( |
| 996 new ProcessMemoryDump(session_state, dump_args)); | 998 new ProcessMemoryDump(session_state, dump_args)); |
| 997 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; | 999 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; |
| 998 } | 1000 } |
| 999 return iter->second.get(); | 1001 return iter->second.get(); |
| 1000 } | 1002 } |
| 1001 | 1003 |
| 1002 } // namespace trace_event | 1004 } // namespace trace_event |
| 1003 } // namespace base | 1005 } // namespace base |
| OLD | NEW |