OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <utility> | 8 #include <utility> |
9 | 9 |
10 #include "base/allocator/features.h" | 10 #include "base/allocator/features.h" |
(...skipping 266 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
277 new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options, | 277 new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options, |
278 whitelisted_for_background_mode); | 278 whitelisted_for_background_mode); |
279 | 279 |
280 { | 280 { |
281 AutoLock lock(lock_); | 281 AutoLock lock(lock_); |
282 bool already_registered = !dump_providers_.insert(mdpinfo).second; | 282 bool already_registered = !dump_providers_.insert(mdpinfo).second; |
283 // This actually happens in some tests which don't have a clean tear-down | 283 // This actually happens in some tests which don't have a clean tear-down |
284 // path for RenderThreadImpl::Init(). | 284 // path for RenderThreadImpl::Init(). |
285 if (already_registered) | 285 if (already_registered) |
286 return; | 286 return; |
287 | |
288 if (options.is_fast_polling_supported && dump_thread_) { | |
Primiano Tucci (use gerrit)
2016/12/15 16:45:01
Can you add a comment here saying:
// the list of
ssid
2016/12/16 02:31:54
Done.
| |
289 dump_thread_->task_runner()->PostTask( | |
290 FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread, | |
291 Unretained(this), mdpinfo)); | |
292 } | |
287 } | 293 } |
288 | 294 |
289 if (heap_profiling_enabled_) | 295 if (heap_profiling_enabled_) |
290 mdp->OnHeapProfilingEnabled(true); | 296 mdp->OnHeapProfilingEnabled(true); |
291 } | 297 } |
292 | 298 |
293 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 299 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
294 UnregisterDumpProviderInternal(mdp, false /* delete_async */); | 300 UnregisterDumpProviderInternal(mdp, false /* delete_async */); |
295 } | 301 } |
296 | 302 |
(...skipping 18 matching lines...) Expand all Loading... | |
315 } | 321 } |
316 | 322 |
317 if (mdp_iter == dump_providers_.end()) | 323 if (mdp_iter == dump_providers_.end()) |
318 return; // Not registered / already unregistered. | 324 return; // Not registered / already unregistered. |
319 | 325 |
320 if (take_mdp_ownership_and_delete_async) { | 326 if (take_mdp_ownership_and_delete_async) { |
321 // The MDP will be deleted whenever the MDPInfo struct will, that is either: | 327 // The MDP will be deleted whenever the MDPInfo struct will, that is either: |
322 // - At the end of this function, if no dump is in progress. | 328 // - At the end of this function, if no dump is in progress. |
323 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is | 329 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is |
324 // removed from |pending_dump_providers|. | 330 // removed from |pending_dump_providers|. |
331 // - When the provider is removed from |mdps_for_fast_polling_|. | |
325 DCHECK(!(*mdp_iter)->owned_dump_provider); | 332 DCHECK(!(*mdp_iter)->owned_dump_provider); |
326 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); | 333 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); |
327 } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) { | 334 } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) { |
328 // If you hit this DCHECK, your dump provider has a bug. | 335 // If you hit this DCHECK, your dump provider has a bug. |
329 // Unregistration of a MemoryDumpProvider is safe only if: | 336 // Unregistration of a MemoryDumpProvider is safe only if: |
330 // - The MDP has specified a sequenced task runner affinity AND the | 337 // - The MDP has specified a sequenced task runner affinity AND the |
331 // unregistration happens on the same task runner. So that the MDP cannot | 338 // unregistration happens on the same task runner. So that the MDP cannot |
332 // unregister and be in the middle of a OnMemoryDump() at the same time. | 339 // unregister and be in the middle of a OnMemoryDump() at the same time. |
333 // - The MDP has NOT specified a task runner affinity and its ownership is | 340 // - The MDP has NOT specified a task runner affinity and its ownership is |
334 // transferred via UnregisterAndDeleteDumpProviderSoon(). | 341 // transferred via UnregisterAndDeleteDumpProviderSoon(). |
335 // In all the other cases, it is not possible to guarantee that the | 342 // In all the other cases, it is not possible to guarantee that the |
336 // unregistration will not race with OnMemoryDump() calls. | 343 // unregistration will not race with OnMemoryDump() calls. |
337 DCHECK((*mdp_iter)->task_runner && | 344 DCHECK((*mdp_iter)->task_runner && |
338 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) | 345 (*mdp_iter)->task_runner->RunsTasksOnCurrentThread()) |
339 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " | 346 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " |
340 << "unregister itself in a racy way. Please file a crbug."; | 347 << "unregister itself in a racy way. Please file a crbug."; |
341 } | 348 } |
342 | 349 |
350 if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) { | |
351 dump_thread_->task_runner()->PostTask( | |
Primiano Tucci (use gerrit)
2016/12/15 16:45:01
Add a DCHECK(take_mdp_ownership_and_delete_async)
ssid
2016/12/16 02:31:54
Done.
| |
352 FROM_HERE, Bind(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread, | |
353 Unretained(this), *mdp_iter)); | |
354 } | |
355 | |
343 // The MDPInfo instance can still be referenced by the | 356 // The MDPInfo instance can still be referenced by the |
344 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason | 357 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason |
345 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() | 358 // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump() |
346 // to just skip it, without actually invoking the |mdp|, which might be | 359 // to just skip it, without actually invoking the |mdp|, which might be |
347 // destroyed by the caller soon after this method returns. | 360 // destroyed by the caller soon after this method returns. |
348 (*mdp_iter)->disabled = true; | 361 (*mdp_iter)->disabled = true; |
349 dump_providers_.erase(mdp_iter); | 362 dump_providers_.erase(mdp_iter); |
350 } | 363 } |
351 | 364 |
365 void MemoryDumpManager::RegisterPollingMDPOnDumpThread( | |
366 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { | |
367 DCHECK(!mdpinfo->task_runner); | |
368 mdpinfo->dump_provider->SetFastMemoryPollingEnabled(true); | |
369 | |
370 AutoLock lock(lock_); | |
371 mdps_for_fast_polling_.insert(mdpinfo); | |
372 } | |
373 | |
374 void MemoryDumpManager::UnregisterPollingMDPOnDumpThread( | |
375 scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) { | |
376 mdpinfo->dump_provider->SetFastMemoryPollingEnabled(false); | |
377 | |
378 AutoLock lock(lock_); | |
379 mdps_for_fast_polling_.erase(mdpinfo); | |
380 } | |
381 | |
352 void MemoryDumpManager::RequestGlobalDump( | 382 void MemoryDumpManager::RequestGlobalDump( |
353 MemoryDumpType dump_type, | 383 MemoryDumpType dump_type, |
354 MemoryDumpLevelOfDetail level_of_detail, | 384 MemoryDumpLevelOfDetail level_of_detail, |
355 const MemoryDumpCallback& callback) { | 385 const MemoryDumpCallback& callback) { |
356 // Bail out immediately if tracing is not enabled at all or if the dump mode | 386 // Bail out immediately if tracing is not enabled at all or if the dump mode |
357 // is not allowed. | 387 // is not allowed. |
358 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || | 388 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_)) || |
359 !IsDumpModeAllowed(level_of_detail)) { | 389 !IsDumpModeAllowed(level_of_detail)) { |
360 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory | 390 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory |
361 << " tracing category is not enabled or the requested dump mode is " | 391 << " tracing category is not enabled or the requested dump mode is " |
(...skipping 233 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
595 args); | 625 args); |
596 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); | 626 bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); |
597 mdpinfo->consecutive_failures = | 627 mdpinfo->consecutive_failures = |
598 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; | 628 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; |
599 } | 629 } |
600 | 630 |
601 pmd_async_state->pending_dump_providers.pop_back(); | 631 pmd_async_state->pending_dump_providers.pop_back(); |
602 SetupNextMemoryDump(std::move(pmd_async_state)); | 632 SetupNextMemoryDump(std::move(pmd_async_state)); |
603 } | 633 } |
604 | 634 |
635 void MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) { | |
636 #if DCHECK_IS_ON() | |
Primiano Tucci (use gerrit)
2016/12/15 16:45:01
plz dcheck section and just add a comment.
There i
ssid
2016/12/16 02:31:54
I agree this is the case. But this is the part I s
Primiano Tucci (use gerrit)
2016/12/16 12:34:40
Let's move this discussion to the next CL.
There i
| |
637 { | |
638 AutoLock lock(lock_); | |
639 DCHECK(dump_thread_->task_runner()->BelongsToCurrentThread()); | |
640 } | |
641 #endif | |
642 *memory_total = 0; | |
643 // Note that we call PollFastMemoryTotal even if the dump provider is disabled | |
644 // (unregistered). This is to avoid taking lock slowing this method. | |
645 for (const auto& mdpinfo : mdps_for_fast_polling_) { | |
646 uint64_t value = 0; | |
647 mdpinfo->dump_provider->PollFastMemoryTotal(&value); | |
648 *memory_total += value; | |
649 } | |
650 return; | |
651 } | |
652 | |
605 // static | 653 // static |
606 void MemoryDumpManager::FinalizeDumpAndAddToTrace( | 654 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
607 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 655 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
608 HEAP_PROFILER_SCOPED_IGNORE; | 656 HEAP_PROFILER_SCOPED_IGNORE; |
609 DCHECK(pmd_async_state->pending_dump_providers.empty()); | 657 DCHECK(pmd_async_state->pending_dump_providers.empty()); |
610 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; | 658 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; |
611 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { | 659 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { |
612 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 660 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
613 pmd_async_state->callback_task_runner; | 661 pmd_async_state->callback_task_runner; |
614 callback_task_runner->PostTask( | 662 callback_task_runner->PostTask( |
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
708 | 756 |
709 { | 757 { |
710 AutoLock lock(lock_); | 758 AutoLock lock(lock_); |
711 | 759 |
712 DCHECK(delegate_); // At this point we must have a delegate. | 760 DCHECK(delegate_); // At this point we must have a delegate. |
713 session_state_ = session_state; | 761 session_state_ = session_state; |
714 | 762 |
715 DCHECK(!dump_thread_); | 763 DCHECK(!dump_thread_); |
716 dump_thread_ = std::move(dump_thread); | 764 dump_thread_ = std::move(dump_thread); |
717 | 765 |
766 mdps_for_fast_polling_.clear(); | |
767 for (const auto& mdpinfo : dump_providers_) { | |
768 if (mdpinfo->options.is_fast_polling_supported) { | |
769 mdps_for_fast_polling_.insert(mdpinfo); | |
770 mdpinfo->dump_provider->SetFastMemoryPollingEnabled(true); | |
Primiano Tucci (use gerrit)
2016/12/15 16:45:01
I think we should do this call only when needed.
O
ssid
2016/12/16 02:31:54
I just don't like the idea of session_state_ scann
Primiano Tucci (use gerrit)
2016/12/16 12:34:40
Ok the more I think to this the more I feel that t
| |
771 } | |
772 } | |
773 | |
718 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); | 774 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); |
719 | 775 |
720 // TODO(primiano): This is a temporary hack to disable periodic memory dumps | 776 // TODO(primiano): This is a temporary hack to disable periodic memory dumps |
721 // when running memory benchmarks until telemetry uses TraceConfig to | 777 // when running memory benchmarks until telemetry uses TraceConfig to |
722 // enable/disable periodic dumps. See crbug.com/529184 . | 778 // enable/disable periodic dumps. See crbug.com/529184 . |
723 if (!is_coordinator_ || | 779 if (!is_coordinator_ || |
724 CommandLine::ForCurrentProcess()->HasSwitch( | 780 CommandLine::ForCurrentProcess()->HasSwitch( |
725 "enable-memory-benchmarking")) { | 781 "enable-memory-benchmarking")) { |
726 return; | 782 return; |
727 } | 783 } |
(...skipping 13 matching lines...) Expand all Loading... | |
741 AutoLock lock(lock_); | 797 AutoLock lock(lock_); |
742 dump_thread = std::move(dump_thread_); | 798 dump_thread = std::move(dump_thread_); |
743 session_state_ = nullptr; | 799 session_state_ = nullptr; |
744 } | 800 } |
745 | 801 |
746 // Thread stops are blocking and must be performed outside of the |lock_| | 802 // Thread stops are blocking and must be performed outside of the |lock_| |
747 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). | 803 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). |
748 periodic_dump_timer_.Stop(); | 804 periodic_dump_timer_.Stop(); |
749 if (dump_thread) | 805 if (dump_thread) |
750 dump_thread->Stop(); | 806 dump_thread->Stop(); |
807 | |
808 // |mdps_for_fast_polling_| must be cleared only after the dump thread is | |
809 // stopped (polling tasks are done). | |
810 { | |
811 AutoLock lock(lock_); | |
812 for (const auto& mdpinfo : mdps_for_fast_polling_) | |
813 mdpinfo->dump_provider->SetFastMemoryPollingEnabled(false); | |
814 mdps_for_fast_polling_.clear(); | |
815 } | |
751 } | 816 } |
752 | 817 |
753 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { | 818 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { |
754 AutoLock lock(lock_); | 819 AutoLock lock(lock_); |
755 if (!session_state_) | 820 if (!session_state_) |
756 return false; | 821 return false; |
757 return session_state_->memory_dump_config().allowed_dump_modes.count( | 822 return session_state_->memory_dump_config().allowed_dump_modes.count( |
758 dump_mode) != 0; | 823 dump_mode) != 0; |
759 } | 824 } |
760 | 825 |
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
891 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0) | 956 if (heavy_dump_rate_ > 0 && periodic_dumps_count_ % heavy_dump_rate_ == 0) |
892 level_of_detail = MemoryDumpLevelOfDetail::DETAILED; | 957 level_of_detail = MemoryDumpLevelOfDetail::DETAILED; |
893 ++periodic_dumps_count_; | 958 ++periodic_dumps_count_; |
894 | 959 |
895 MemoryDumpManager::GetInstance()->RequestGlobalDump( | 960 MemoryDumpManager::GetInstance()->RequestGlobalDump( |
896 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); | 961 MemoryDumpType::PERIODIC_INTERVAL, level_of_detail); |
897 } | 962 } |
898 | 963 |
899 } // namespace trace_event | 964 } // namespace trace_event |
900 } // namespace base | 965 } // namespace base |
OLD | NEW |