OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <utility> | 8 #include <utility> |
9 | 9 |
10 #include "base/allocator/features.h" | 10 #include "base/allocator/features.h" |
(...skipping 266 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
277 new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options, | 277 new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options, |
278 whitelisted_for_background_mode); | 278 whitelisted_for_background_mode); |
279 | 279 |
280 { | 280 { |
281 AutoLock lock(lock_); | 281 AutoLock lock(lock_); |
282 bool already_registered = !dump_providers_.insert(mdpinfo).second; | 282 bool already_registered = !dump_providers_.insert(mdpinfo).second; |
283 // This actually happens in some tests which don't have a clean tear-down | 283 // This actually happens in some tests which don't have a clean tear-down |
284 // path for RenderThreadImpl::Init(). | 284 // path for RenderThreadImpl::Init(). |
285 if (already_registered) | 285 if (already_registered) |
286 return; | 286 return; |
287 | |
288 // The list of polling MDPs is populated OnTraceLogEnabled(). This code | |
289 // deals with the case of a MDP capable of fast polling that is registered | |
290 // after the OnTraceLogEnabled() | |
291 if (options.is_fast_polling_supported && dump_thread_ && session_state_ && | |
292 session_state_->is_polling_enabled()) { | |
293 dump_thread_->task_runner()->PostTask( | |
294 FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread, | |
295 Unretained(this), mdpinfo)); | |
296 } | |
287 } | 297 } |
288 | 298 |
289 if (heap_profiling_enabled_) | 299 if (heap_profiling_enabled_) |
290 mdp->OnHeapProfilingEnabled(true); | 300 mdp->OnHeapProfilingEnabled(true); |
291 } | 301 } |
292 | 302 |
293 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 303 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
294 UnregisterDumpProviderInternal(mdp, false /* delete_async */); | 304 UnregisterDumpProviderInternal(mdp, false /* delete_async */); |
295 } | 305 } |
296 | 306 |
(...skipping 18 matching lines...) Expand all Loading... | |
315 } | 325 } |
316 | 326 |
317 if (mdp_iter == dump_providers_.end()) | 327 if (mdp_iter == dump_providers_.end()) |
318 return; // Not registered / already unregistered. | 328 return; // Not registered / already unregistered. |
319 | 329 |
320 if (take_mdp_ownership_and_delete_async) { | 330 if (take_mdp_ownership_and_delete_async) { |
321 // The MDP will be deleted whenever the MDPInfo struct will, that is either: | 331 // The MDP will be deleted whenever the MDPInfo struct will, that is either: |
322 // - At the end of this function, if no dump is in progress. | 332 // - At the end of this function, if no dump is in progress. |
323 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is | 333 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is |
324 // removed from |pending_dump_providers|. | 334 // removed from |pending_dump_providers|. |
335 // - When the provider is removed from |dumps_providers_for_polling_|. | |
325 DCHECK(!(*mdp_iter)->owned_dump_provider); | 336 DCHECK(!(*mdp_iter)->owned_dump_provider); |
326 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); | 337 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); |
327 } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) { | 338 } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) { |
328 // If you hit this DCHECK, your dump provider has a bug. | 339 // If you hit this DCHECK, your dump provider has a bug. |
329 // Unregistration of a MemoryDumpProvider is safe only if: | 340 // Unregistration of a MemoryDumpProvider is safe only if: |
330 // - The MDP has specified a sequenced task runner affinity AND the | 341 // - The MDP has specified a sequenced task runner affinity AND the |
331 // unregistration happens on the same task runner. So that the MDP cannot | 342 // unregistration happens on the same task runner. So that the MDP cannot |
332 // unregister and be in the middle of a OnMemoryDump() at the same time. | 343 // unregister and be in the middle of a OnMemoryDump() at the same time. |
333 // - The MDP has NOT specified a task runner affinity and its ownership is | 344 // - The MDP has NOT specified a task runner affinity and its ownership is |
334 // transferred via UnregisterAndDeleteDumpProviderSoon(). | 345 // transferred via UnregisterAndDeleteDumpProviderSoon(). |
335 // In all the other cases, it is not possible to guarantee that the | 346 // In all the other cases, it is not possible to guarantee that the |
336 // unregistration will not race with OnMemoryDump() calls. | 347 // unregistration will not race with OnMemoryDump() calls. |
337 DCHECK((*mdp_iter)->task_runner && | 348 DCHECK((*mdp_iter)->task_runner && |
338 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) | 349 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) |
339 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " | 350 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " |
340 << "unregister itself in a racy way. Please file a crbug."; | 351 << "unregister itself in a racy way. Please file a crbug."; |
341 } | 352 } |
342 | 353 |
354 if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_ && | |
355 session_state_ && session_state_->is_polling_enabled()) { | |
356 DCHECK(take_mdp_ownership_and_delete_async) | |
357 << "MemoryDumpProviders capable of fast polling must NOT be thread " | |
358 "bound and hence destroyed using " | |
Primiano Tucci (use gerrit)
2016/12/16 12:34:40
+"must be" before destroyed. Otherwise is not clea
ssid
2016/12/16 18:58:22
Done.
| |
359 "UnregisterAndDeleteDumpProviderSoon()"; | |
360 dump_thread_->task_runner()->PostTask( | |
361 FROM_HERE, Bind(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread, | |
362 Unretained(this), *mdp_iter)); | |
363 } | |
364 | |
343 // The MDPInfo instance can still be referenced by the | 365 // The MDPInfo instance can still be referenced by the |
344 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason | 366 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason |
345 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() | 367 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() |
346 // to just skip it, without actually invoking the |mdp|, which might be | 368 // to just skip it, without actually invoking the |mdp|, which might be |
347 // destroyed by the caller soon after this method returns. | 369 // destroyed by the caller soon after this method returns. |
348 (*mdp_iter)->disabled = true; | 370 (*mdp_iter)->disabled = true; |
349 dump_providers_.erase(mdp_iter); | 371 dump_providers_.erase(mdp_iter); |
350 } | 372 } |
351 | 373 |
374 void MemoryDumpManager::RegisterPollingMDPOnDumpThread( | |
375 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { | |
376 DCHECK(!mdpinfo->task_runner); | |
377 mdpinfo->dump_provider->SetFastMemoryPollingEnabled(true); | |
378 | |
379 AutoLock lock(lock_); | |
380 dumps_providers_for_polling_.insert(mdpinfo); | |
381 } | |
382 | |
383 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread( | |
384 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { | |
385 mdpinfo->dump_provider->SetFastMemoryPollingEnabled(false); | |
386 | |
387 AutoLock lock(lock_); | |
388 dumps_providers_for_polling_.erase(mdpinfo); | |
389 } | |
390 | |
352 void MemoryDumpManager::RequestGlobalDump( | 391 void MemoryDumpManager::RequestGlobalDump( |
353 MemoryDumpType dump_type, | 392 MemoryDumpType dump_type, |
354 MemoryDumpLevelOfDetail level_of_detail, | 393 MemoryDumpLevelOfDetail level_of_detail, |
355 const MemoryDumpCallback& callback) { | 394 const MemoryDumpCallback& callback) { |
356 // Bail out immediately if tracing is not enabled at all or if the dump mode | 395 // Bail out immediately if tracing is not enabled at all or if the dump mode |
357 // is not allowed. | 396 // is not allowed. |
358 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || | 397 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || |
359 !IsDumpModeAllowed(level_of_detail)) { | 398 !IsDumpModeAllowed(level_of_detail)) { |
360 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory | 399 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory |
361 << " tracing category is not enabled or the requested dump mode is " | 400 << " tracing category is not enabled or the requested dump mode is " |
(...skipping 233 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
595 args); | 634 args); |
596 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); | 635 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); |
597 mdpinfo->consecutive_failures = | 636 mdpinfo->consecutive_failures = |
598 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; | 637 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; |
599 } | 638 } |
600 | 639 |
601 pmd_async_state->pending_dump_providers.pop_back(); | 640 pmd_async_state->pending_dump_providers.pop_back(); |
602 SetupNextMemoryDump(std::move(pmd_async_state)); | 641 SetupNextMemoryDump(std::move(pmd_async_state)); |
603 } | 642 } |
604 | 643 |
644 void MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) { | |
645 *memory_total = 0; | |
646 // Note that we call PollFastMemoryTotal even if the dump provider is disabled | |
647 // (unregistered). This is to avoid taking lock slowing this method. | |
648 for (const auto& mdpinfo : dumps_providers_for_polling_) { | |
649 uint64_t value = 0; | |
650 mdpinfo->dump_provider->PollFastMemoryTotal(&value); | |
651 *memory_total += value; | |
652 } | |
653 return; | |
654 } | |
655 | |
605 // static | 656 // static |
606 void MemoryDumpManager::FinalizeDumpAndAddToTrace( | 657 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
607 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 658 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
608 HEAP_PROFILER_SCOPED_IGNORE; | 659 HEAP_PROFILER_SCOPED_IGNORE; |
609 DCHECK(pmd_async_state->pending_dump_providers.empty()); | 660 DCHECK(pmd_async_state->pending_dump_providers.empty()); |
610 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; | 661 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; |
611 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { | 662 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { |
612 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 663 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
613 pmd_async_state->callback_task_runner; | 664 pmd_async_state->callback_task_runner; |
614 callback_task_runner->PostTask( | 665 callback_task_runner->PostTask( |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
708 | 759 |
709 { | 760 { |
710 AutoLock lock(lock_); | 761 AutoLock lock(lock_); |
711 | 762 |
712 DCHECK(delegate_); // At this point we must have a delegate. | 763 DCHECK(delegate_); // At this point we must have a delegate. |
713 session_state_ = session_state; | 764 session_state_ = session_state; |
714 | 765 |
715 DCHECK(!dump_thread_); | 766 DCHECK(!dump_thread_); |
716 dump_thread_ = std::move(dump_thread); | 767 dump_thread_ = std::move(dump_thread); |
717 | 768 |
769 dumps_providers_for_polling_.clear(); | |
770 if (session_state_->is_polling_enabled()) { | |
771 for (const auto& mdpinfo : dump_providers_) { | |
772 if (mdpinfo->options.is_fast_polling_supported) { | |
773 dumps_providers_for_polling_.insert(mdpinfo); | |
774 mdpinfo->dump_provider->SetFastMemoryPollingEnabled(true); | |
775 } | |
776 } | |
777 } | |
778 | |
718 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); | 779 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); |
719 | 780 |
720 // TODO(primiano): This is a temporary hack to disable periodic memory dumps | 781 // TODO(primiano): This is a temporary hack to disable periodic memory dumps |
721 // when running memory benchmarks until telemetry uses TraceConfig to | 782 // when running memory benchmarks until telemetry uses TraceConfig to |
722 // enable/disable periodic dumps. See crbug.com/529184 . | 783 // enable/disable periodic dumps. See crbug.com/529184 . |
723 if (!is_coordinator_ || | 784 if (!is_coordinator_ || |
724 CommandLine::ForCurrentProcess()->HasSwitch( | 785 CommandLine::ForCurrentProcess()->HasSwitch( |
725 "enable-memory-benchmarking")) { | 786 "enable-memory-benchmarking")) { |
726 return; | 787 return; |
727 } | 788 } |
728 } | 789 } |
729 | 790 |
730 // Enable periodic dumps if necessary. | 791 // Enable periodic dumps if necessary. |
731 periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers); | 792 periodic_dump_timer_.Start(trace_config.memory_dump_config().triggers); |
732 } | 793 } |
733 | 794 |
734 void MemoryDumpManager::OnTraceLogDisabled() { | 795 void MemoryDumpManager::OnTraceLogDisabled() { |
735 // There might be a memory dump in progress while this happens. Therefore, | 796 // There might be a memory dump in progress while this happens. Therefore, |
736 // ensure that the MDM state which depends on the tracing enabled / disabled | 797 // ensure that the MDM state which depends on the tracing enabled / disabled |
737 // state is always accessed by the dumping methods holding the |lock_|. | 798 // state is always accessed by the dumping methods holding the |lock_|. |
799 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_)) | |
Primiano Tucci (use gerrit)
2016/12/16 12:34:40
why is this required?
ssid
2016/12/16 18:58:22
Not really required, just an optimization to retur
Primiano Tucci (use gerrit)
2016/12/16 20:29:10
Ahh I see the point now, right . Good point.
| |
800 return; | |
738 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); | 801 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); |
802 | |
739 std::unique_ptr<Thread> dump_thread; | 803 std::unique_ptr<Thread> dump_thread; |
740 { | 804 { |
741 AutoLock lock(lock_); | 805 AutoLock lock(lock_); |
742 dump_thread = std::move(dump_thread_); | 806 dump_thread = std::move(dump_thread_); |
743 session_state_ = nullptr; | |
744 } | 807 } |
745 | 808 |
746 // Thread stops are blocking and must be performed outside of the |lock_| | 809 // Thread stops are blocking and must be performed outside of the |lock_| |
747 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). | 810 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). |
748 periodic_dump_timer_.Stop(); | 811 periodic_dump_timer_.Stop(); |
749 if (dump_thread) | 812 if (dump_thread) |
750 dump_thread->Stop(); | 813 dump_thread->Stop(); |
814 | |
815 // |dumps_providers_for_polling_| must be cleared only after the dump thread | |
816 // is stopped (polling tasks are done). | |
817 { | |
818 AutoLock lock(lock_); | |
819 if (session_state_ && session_state_->is_polling_enabled()) { | |
820 for (const auto& mdpinfo : dumps_providers_for_polling_) | |
821 mdpinfo->dump_provider->SetFastMemoryPollingEnabled(false); | |
822 dumps_providers_for_polling_.clear(); | |
823 } | |
824 session_state_ = nullptr; | |
Primiano Tucci (use gerrit)
2016/12/16 12:34:40
let's put this back in place (see my comment on la
ssid
2016/12/16 18:58:22
Done.
| |
825 } | |
751 } | 826 } |
752 | 827 |
753 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { | 828 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { |
754 AutoLock lock(lock_); | 829 AutoLock lock(lock_); |
755 if (!session_state_) | 830 if (!session_state_) |
756 return false; | 831 return false; |
757 return session_state_->memory_dump_config().allowed_dump_modes.count( | 832 return session_state_->memory_dump_config().allowed_dump_modes.count( |
758 dump_mode) != 0; | 833 dump_mode) != 0; |
759 } | 834 } |
760 | 835 |
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
891 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0) | 966 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0) |
892 level_of_detail = MemoryDumpLevelOfDetail::DETAILED; | 967 level_of_detail = MemoryDumpLevelOfDetail::DETAILED; |
893 ++periodic_dumps_count_; | 968 ++periodic_dumps_count_; |
894 | 969 |
895 MemoryDumpManager::GetInstance()->RequestGlobalDump( | 970 MemoryDumpManager::GetInstance()->RequestGlobalDump( |
896 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); | 971 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); |
897 } | 972 } |
898 | 973 |
899 } // namespace trace_event | 974 } // namespace trace_event |
900 } // namespace base | 975 } // namespace base |
OLD | NEW |