| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
| 6 | 6 |
| 7 #include <inttypes.h> | 7 #include <inttypes.h> |
| 8 #include <stdio.h> | 8 #include <stdio.h> |
| 9 | 9 |
| 10 #include <algorithm> | 10 #include <algorithm> |
| (...skipping 323 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 334 // This actually happens in some tests which don't have a clean tear-down | 334 // This actually happens in some tests which don't have a clean tear-down |
| 335 // path for RenderThreadImpl::Init(). | 335 // path for RenderThreadImpl::Init(). |
| 336 if (already_registered) | 336 if (already_registered) |
| 337 return; | 337 return; |
| 338 | 338 |
| 339 // The list of polling MDPs is populated OnTraceLogEnabled(). This code | 339 // The list of polling MDPs is populated OnTraceLogEnabled(). This code |
| 340 // deals with the case of a MDP capable of fast polling that is registered | 340 // deals with the case of a MDP capable of fast polling that is registered |
| 341 // after the OnTraceLogEnabled() | 341 // after the OnTraceLogEnabled() |
| 342 if (options.is_fast_polling_supported && dump_thread_) { | 342 if (options.is_fast_polling_supported && dump_thread_) { |
| 343 dump_thread_->task_runner()->PostTask( | 343 dump_thread_->task_runner()->PostTask( |
| 344 FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread, | 344 FROM_HERE, |
| 345 Unretained(this), mdpinfo)); | 345 BindOnce(&MemoryDumpManager::RegisterPollingMDPOnDumpThread, |
| 346 Unretained(this), mdpinfo)); |
| 346 } | 347 } |
| 347 } | 348 } |
| 348 | 349 |
| 349 if (heap_profiling_enabled_) | 350 if (heap_profiling_enabled_) |
| 350 mdp->OnHeapProfilingEnabled(true); | 351 mdp->OnHeapProfilingEnabled(true); |
| 351 } | 352 } |
| 352 | 353 |
| 353 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 354 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
| 354 UnregisterDumpProviderInternal(mdp, false /* delete_async */); | 355 UnregisterDumpProviderInternal(mdp, false /* delete_async */); |
| 355 } | 356 } |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 405 // unregistration will not race with OnMemoryDump() calls. | 406 // unregistration will not race with OnMemoryDump() calls. |
| 406 DCHECK((*mdp_iter)->task_runner && | 407 DCHECK((*mdp_iter)->task_runner && |
| 407 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) | 408 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) |
| 408 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " | 409 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " |
| 409 << "unregister itself in a racy way. Please file a crbug."; | 410 << "unregister itself in a racy way. Please file a crbug."; |
| 410 } | 411 } |
| 411 | 412 |
| 412 if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) { | 413 if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) { |
| 413 DCHECK(take_mdp_ownership_and_delete_async); | 414 DCHECK(take_mdp_ownership_and_delete_async); |
| 414 dump_thread_->task_runner()->PostTask( | 415 dump_thread_->task_runner()->PostTask( |
| 415 FROM_HERE, Bind(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread, | 416 FROM_HERE, |
| 416 Unretained(this), *mdp_iter)); | 417 BindOnce(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread, |
| 418 Unretained(this), *mdp_iter)); |
| 417 } | 419 } |
| 418 | 420 |
| 419 // The MDPInfo instance can still be referenced by the | 421 // The MDPInfo instance can still be referenced by the |
| 420 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason | 422 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason |
| 421 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() | 423 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() |
| 422 // to just skip it, without actually invoking the |mdp|, which might be | 424 // to just skip it, without actually invoking the |mdp|, which might be |
| 423 // destroyed by the caller soon after this method returns. | 425 // destroyed by the caller soon after this method returns. |
| 424 (*mdp_iter)->disabled = true; | 426 (*mdp_iter)->disabled = true; |
| 425 dump_providers_.erase(mdp_iter); | 427 dump_providers_.erase(mdp_iter); |
| 426 } | 428 } |
| (...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 601 } | 603 } |
| 602 | 604 |
| 603 if (mdpinfo->options.dumps_on_single_thread_task_runner && | 605 if (mdpinfo->options.dumps_on_single_thread_task_runner && |
| 604 task_runner->RunsTasksOnCurrentThread()) { | 606 task_runner->RunsTasksOnCurrentThread()) { |
| 605 // If |dumps_on_single_thread_task_runner| is true then no PostTask is | 607 // If |dumps_on_single_thread_task_runner| is true then no PostTask is |
| 606 // required if we are on the right thread. | 608 // required if we are on the right thread. |
| 607 return InvokeOnMemoryDump(pmd_async_state.release()); | 609 return InvokeOnMemoryDump(pmd_async_state.release()); |
| 608 } | 610 } |
| 609 | 611 |
| 610 bool did_post_task = task_runner->PostTask( | 612 bool did_post_task = task_runner->PostTask( |
| 611 FROM_HERE, Bind(&MemoryDumpManager::InvokeOnMemoryDump, Unretained(this), | 613 FROM_HERE, BindOnce(&MemoryDumpManager::InvokeOnMemoryDump, |
| 612 Unretained(pmd_async_state.get()))); | 614 Unretained(this), Unretained(pmd_async_state.get()))); |
| 613 | 615 |
| 614 if (did_post_task) { | 616 if (did_post_task) { |
| 615 // Ownership is tranferred to InvokeOnMemoryDump(). | 617 // Ownership is tranferred to InvokeOnMemoryDump(). |
| 616 ignore_result(pmd_async_state.release()); | 618 ignore_result(pmd_async_state.release()); |
| 617 return; | 619 return; |
| 618 } | 620 } |
| 619 | 621 |
| 620 // PostTask usually fails only if the process or thread is shut down. So, the | 622 // PostTask usually fails only if the process or thread is shut down. So, the |
| 621 // dump provider is disabled here. But, don't disable unbound dump providers. | 623 // dump provider is disabled here. But, don't disable unbound dump providers. |
| 622 // The utility thread is normally shutdown when disabling the trace and | 624 // The utility thread is normally shutdown when disabling the trace and |
| (...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 746 // static | 748 // static |
| 747 void MemoryDumpManager::FinalizeDumpAndAddToTrace( | 749 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
| 748 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 750 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
| 749 HEAP_PROFILER_SCOPED_IGNORE; | 751 HEAP_PROFILER_SCOPED_IGNORE; |
| 750 DCHECK(pmd_async_state->pending_dump_providers.empty()); | 752 DCHECK(pmd_async_state->pending_dump_providers.empty()); |
| 751 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; | 753 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; |
| 752 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { | 754 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { |
| 753 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 755 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
| 754 pmd_async_state->callback_task_runner; | 756 pmd_async_state->callback_task_runner; |
| 755 callback_task_runner->PostTask( | 757 callback_task_runner->PostTask( |
| 756 FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace, | 758 FROM_HERE, BindOnce(&MemoryDumpManager::FinalizeDumpAndAddToTrace, |
| 757 Passed(&pmd_async_state))); | 759 Passed(&pmd_async_state))); |
| 758 return; | 760 return; |
| 759 } | 761 } |
| 760 | 762 |
| 761 TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinalizeDumpAndAddToTrace"); | 763 TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinalizeDumpAndAddToTrace"); |
| 762 | 764 |
| 763 // The results struct to fill. | 765 // The results struct to fill. |
| 764 // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203 | 766 // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203 |
| 765 MemoryDumpCallbackResult result; | 767 MemoryDumpCallbackResult result; |
| 766 | 768 |
| 767 for (const auto& kv : pmd_async_state->process_dumps) { | 769 for (const auto& kv : pmd_async_state->process_dumps) { |
| (...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 982 if (iter == process_dumps.end()) { | 984 if (iter == process_dumps.end()) { |
| 983 std::unique_ptr<ProcessMemoryDump> new_pmd( | 985 std::unique_ptr<ProcessMemoryDump> new_pmd( |
| 984 new ProcessMemoryDump(session_state, dump_args)); | 986 new ProcessMemoryDump(session_state, dump_args)); |
| 985 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; | 987 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; |
| 986 } | 988 } |
| 987 return iter->second.get(); | 989 return iter->second.get(); |
| 988 } | 990 } |
| 989 | 991 |
| 990 } // namespace trace_event | 992 } // namespace trace_event |
| 991 } // namespace base | 993 } // namespace base |
| OLD | NEW |