OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <utility> | 8 #include <utility> |
9 | 9 |
10 #include "base/allocator/features.h" | 10 #include "base/allocator/features.h" |
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
276 new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options, | 276 new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options, |
277 whitelisted_for_background_mode); | 277 whitelisted_for_background_mode); |
278 | 278 |
279 { | 279 { |
280 AutoLock lock(lock_); | 280 AutoLock lock(lock_); |
281 bool already_registered = !dump_providers_.insert(mdpinfo).second; | 281 bool already_registered = !dump_providers_.insert(mdpinfo).second; |
282 // This actually happens in some tests which don't have a clean tear-down | 282 // This actually happens in some tests which don't have a clean tear-down |
283 // path for RenderThreadImpl::Init(). | 283 // path for RenderThreadImpl::Init(). |
284 if (already_registered) | 284 if (already_registered) |
285 return; | 285 return; |
| 286 |
| 287 if (options.is_fast_polling_supported && dump_thread_) { |
| 288 dump_thread_->task_runner()->PostTask( |
| 289 FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread, |
| 290 Unretained(this), mdpinfo)); |
| 291 } |
286 } | 292 } |
287 | 293 |
288 if (heap_profiling_enabled_) | 294 if (heap_profiling_enabled_) |
289 mdp->OnHeapProfilingEnabled(true); | 295 mdp->OnHeapProfilingEnabled(true); |
290 } | 296 } |
291 | 297 |
292 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 298 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
293 UnregisterDumpProviderInternal(mdp, false /* delete_async */); | 299 UnregisterDumpProviderInternal(mdp, false /* delete_async */); |
294 } | 300 } |
295 | 301 |
(...skipping 18 matching lines...) Expand all Loading... |
314 } | 320 } |
315 | 321 |
316 if (mdp_iter == dump_providers_.end()) | 322 if (mdp_iter == dump_providers_.end()) |
317 return; // Not registered / already unregistered. | 323 return; // Not registered / already unregistered. |
318 | 324 |
319 if (take_mdp_ownership_and_delete_async) { | 325 if (take_mdp_ownership_and_delete_async) { |
320 // The MDP will be deleted whenever the MDPInfo struct will, that is either: | 326 // The MDP will be deleted whenever the MDPInfo struct will, that is either: |
321 // - At the end of this function, if no dump is in progress. | 327 // - At the end of this function, if no dump is in progress. |
322 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is | 328 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is |
323 // removed from |pending_dump_providers|. | 329 // removed from |pending_dump_providers|. |
| 330 // - When the provider is removed from |mdps_for_fast_polling_|. |
324 DCHECK(!(*mdp_iter)->owned_dump_provider); | 331 DCHECK(!(*mdp_iter)->owned_dump_provider); |
325 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); | 332 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); |
326 } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) { | 333 } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) { |
327 // If you hit this DCHECK, your dump provider has a bug. | 334 // If you hit this DCHECK, your dump provider has a bug. |
328 // Unregistration of a MemoryDumpProvider is safe only if: | 335 // Unregistration of a MemoryDumpProvider is safe only if: |
329 // - The MDP has specified a sequenced task runner affinity AND the | 336 // - The MDP has specified a sequenced task runner affinity AND the |
330 // unregistration happens on the same task runner. So that the MDP cannot | 337 // unregistration happens on the same task runner. So that the MDP cannot |
331 // unregister and be in the middle of a OnMemoryDump() at the same time. | 338 // unregister and be in the middle of a OnMemoryDump() at the same time. |
332 // - The MDP has NOT specified a task runner affinity and its ownership is | 339 // - The MDP has NOT specified a task runner affinity and its ownership is |
333 // transferred via UnregisterAndDeleteDumpProviderSoon(). | 340 // transferred via UnregisterAndDeleteDumpProviderSoon(). |
334 // In all the other cases, it is not possible to guarantee that the | 341 // In all the other cases, it is not possible to guarantee that the |
335 // unregistration will not race with OnMemoryDump() calls. | 342 // unregistration will not race with OnMemoryDump() calls. |
336 DCHECK((*mdp_iter)->task_runner && | 343 DCHECK((*mdp_iter)->task_runner && |
337 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) | 344 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) |
338 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " | 345 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " |
339 << "unregister itself in a racy way. Please file a crbug."; | 346 << "unregister itself in a racy way. Please file a crbug."; |
340 } | 347 } |
341 | 348 |
| 349 if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) { |
| 350 dump_thread_->task_runner()->PostTask( |
| 351 FROM_HERE, Bind(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread, |
| 352 Unretained(this), *mdp_iter)); |
| 353 } |
| 354 |
342 // The MDPInfo instance can still be referenced by the | 355 // The MDPInfo instance can still be referenced by the |
343 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason | 356 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason |
344 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() | 357 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() |
345 // to just skip it, without actually invoking the |mdp|, which might be | 358 // to just skip it, without actually invoking the |mdp|, which might be |
346 // destroyed by the caller soon after this method returns. | 359 // destroyed by the caller soon after this method returns. |
347 (*mdp_iter)->disabled = true; | 360 (*mdp_iter)->disabled = true; |
348 dump_providers_.erase(mdp_iter); | 361 dump_providers_.erase(mdp_iter); |
349 } | 362 } |
350 | 363 |
| 364 void MemoryDumpManager::RegisterPollingMDPOnDumpThread( |
| 365 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { |
| 366 DCHECK(!mdpinfo->task_runner); |
| 367 mdpinfo->dump_provider->SetFastMemoryPollingEnabled(true); |
| 368 |
| 369 AutoLock lock(lock_); |
| 370 mdps_for_fast_polling_.insert(mdpinfo); |
| 371 } |
| 372 |
| 373 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread( |
| 374 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { |
| 375 mdpinfo->dump_provider->SetFastMemoryPollingEnabled(false); |
| 376 |
| 377 AutoLock lock(lock_); |
| 378 mdps_for_fast_polling_.erase(mdpinfo); |
| 379 } |
| 380 |
351 void MemoryDumpManager::RequestGlobalDump( | 381 void MemoryDumpManager::RequestGlobalDump( |
352 MemoryDumpType dump_type, | 382 MemoryDumpType dump_type, |
353 MemoryDumpLevelOfDetail level_of_detail, | 383 MemoryDumpLevelOfDetail level_of_detail, |
354 const MemoryDumpCallback& callback) { | 384 const MemoryDumpCallback& callback) { |
355 // Bail out immediately if tracing is not enabled at all or if the dump mode | 385 // Bail out immediately if tracing is not enabled at all or if the dump mode |
356 // is not allowed. | 386 // is not allowed. |
357 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || | 387 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || |
358 !IsDumpModeAllowed(level_of_detail)) { | 388 !IsDumpModeAllowed(level_of_detail)) { |
359 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory | 389 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory |
360 << " tracing category is not enabled or the requested dump mode is " | 390 << " tracing category is not enabled or the requested dump mode is " |
(...skipping 233 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
594 args); | 624 args); |
595 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); | 625 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); |
596 mdpinfo->consecutive_failures = | 626 mdpinfo->consecutive_failures = |
597 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; | 627 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; |
598 } | 628 } |
599 | 629 |
600 pmd_async_state->pending_dump_providers.pop_back(); | 630 pmd_async_state->pending_dump_providers.pop_back(); |
601 SetupNextMemoryDump(std::move(pmd_async_state)); | 631 SetupNextMemoryDump(std::move(pmd_async_state)); |
602 } | 632 } |
603 | 633 |
| 634 void MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) { |
| 635 #if DCHECK_IS_ON() |
| 636 { |
| 637 AutoLock lock(lock_); |
| 638 DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread()); |
| 639 } |
| 640 #endif |
| 641 *memory_total = 0; |
| 642 // Note that we call PollFastMemoryTotal even if the dump provider is disabled |
| 643 // (unregistered). This is to avoid taking lock slowing this method. |
| 644 for (const auto& mdpinfo : mdps_for_fast_polling_) { |
| 645 uint64_t value = 0; |
| 646 mdpinfo->dump_provider->PollFastMemoryTotal(&value); |
| 647 *memory_total += value; |
| 648 } |
| 649 return; |
| 650 } |
| 651 |
604 // static | 652 // static |
605 void MemoryDumpManager::FinalizeDumpAndAddToTrace( | 653 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
606 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 654 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
607 HEAP_PROFILER_SCOPED_IGNORE; | 655 HEAP_PROFILER_SCOPED_IGNORE; |
608 DCHECK(pmd_async_state->pending_dump_providers.empty()); | 656 DCHECK(pmd_async_state->pending_dump_providers.empty()); |
609 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; | 657 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; |
610 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { | 658 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { |
611 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 659 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
612 pmd_async_state->callback_task_runner; | 660 pmd_async_state->callback_task_runner; |
613 callback_task_runner->PostTask( | 661 callback_task_runner->PostTask( |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
707 | 755 |
708 { | 756 { |
709 AutoLock lock(lock_); | 757 AutoLock lock(lock_); |
710 | 758 |
711 DCHECK(delegate_); // At this point we must have a delegate. | 759 DCHECK(delegate_); // At this point we must have a delegate. |
712 session_state_ = session_state; | 760 session_state_ = session_state; |
713 | 761 |
714 DCHECK(!dump_thread_); | 762 DCHECK(!dump_thread_); |
715 dump_thread_ = std::move(dump_thread); | 763 dump_thread_ = std::move(dump_thread); |
716 | 764 |
| 765 mdps_for_fast_polling_.clear(); |
| 766 for (const auto& mdpinfo : dump_providers_) { |
| 767 if (mdpinfo->options.is_fast_polling_supported) |
| 768 mdps_for_fast_polling_.insert(mdpinfo); |
| 769 mdpinfo->dump_provider->SetFastMemoryPollingEnabled(true); |
| 770 } |
| 771 |
717 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); | 772 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); |
718 | 773 |
719 // TODO(primiano): This is a temporary hack to disable periodic memory dumps | 774 // TODO(primiano): This is a temporary hack to disable periodic memory dumps |
720 // when running memory benchmarks until telemetry uses TraceConfig to | 775 // when running memory benchmarks until telemetry uses TraceConfig to |
721 // enable/disable periodic dumps. See crbug.com/529184 . | 776 // enable/disable periodic dumps. See crbug.com/529184 . |
722 if (!is_coordinator_ || | 777 if (!is_coordinator_ || |
723 CommandLine::ForCurrentProcess()->HasSwitch( | 778 CommandLine::ForCurrentProcess()->HasSwitch( |
724 "enable-memory-benchmarking")) { | 779 "enable-memory-benchmarking")) { |
725 return; | 780 return; |
726 } | 781 } |
(...skipping 13 matching lines...) Expand all Loading... |
740 AutoLock lock(lock_); | 795 AutoLock lock(lock_); |
741 dump_thread = std::move(dump_thread_); | 796 dump_thread = std::move(dump_thread_); |
742 session_state_ = nullptr; | 797 session_state_ = nullptr; |
743 } | 798 } |
744 | 799 |
745 // Thread stops are blocking and must be performed outside of the |lock_| | 800 // Thread stops are blocking and must be performed outside of the |lock_| |
746 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). | 801 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). |
747 periodic_dump_timer_.Stop(); | 802 periodic_dump_timer_.Stop(); |
748 if (dump_thread) | 803 if (dump_thread) |
749 dump_thread->Stop(); | 804 dump_thread->Stop(); |
| 805 |
| 806 // |mdps_for_fast_polling_| must be cleared only after the dump thread is |
| 807 // stopped (polling tasks are done). |
| 808 { |
| 809 AutoLock lock(lock_); |
| 810 for (const auto& mdpinfo : mdps_for_fast_polling_) |
| 811 mdpinfo->dump_provider->SetFastMemoryPollingEnabled(false); |
| 812 mdps_for_fast_polling_.clear(); |
| 813 } |
750 } | 814 } |
751 | 815 |
752 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { | 816 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { |
753 AutoLock lock(lock_); | 817 AutoLock lock(lock_); |
754 if (!session_state_) | 818 if (!session_state_) |
755 return false; | 819 return false; |
756 return session_state_->memory_dump_config().allowed_dump_modes.count( | 820 return session_state_->memory_dump_config().allowed_dump_modes.count( |
757 dump_mode) != 0; | 821 dump_mode) != 0; |
758 } | 822 } |
759 | 823 |
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
890 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0) | 954 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0) |
891 level_of_detail = MemoryDumpLevelOfDetail::DETAILED; | 955 level_of_detail = MemoryDumpLevelOfDetail::DETAILED; |
892 ++periodic_dumps_count_; | 956 ++periodic_dumps_count_; |
893 | 957 |
894 MemoryDumpManager::GetInstance()->RequestGlobalDump( | 958 MemoryDumpManager::GetInstance()->RequestGlobalDump( |
895 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); | 959 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); |
896 } | 960 } |
897 | 961 |
898 } // namespace trace_event | 962 } // namespace trace_event |
899 } // namespace base | 963 } // namespace base |
OLD | NEW |