OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <inttypes.h> | 7 #include <inttypes.h> |
8 #include <stdio.h> | 8 #include <stdio.h> |
9 | 9 |
10 #include <algorithm> | 10 #include <algorithm> |
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
175 LeakySingletonTraits<MemoryDumpManager>>::get(); | 175 LeakySingletonTraits<MemoryDumpManager>>::get(); |
176 } | 176 } |
177 | 177 |
178 // static | 178 // static |
179 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { | 179 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { |
180 g_instance_for_testing = instance; | 180 g_instance_for_testing = instance; |
181 } | 181 } |
182 | 182 |
183 MemoryDumpManager::MemoryDumpManager() | 183 MemoryDumpManager::MemoryDumpManager() |
184 : is_coordinator_(false), | 184 : is_coordinator_(false), |
185 is_enabled_(0), | |
186 tracing_process_id_(kInvalidTracingProcessId), | 185 tracing_process_id_(kInvalidTracingProcessId), |
187 dumper_registrations_ignored_for_testing_(false), | 186 dumper_registrations_ignored_for_testing_(false), |
188 heap_profiling_enabled_(false) { | 187 heap_profiling_enabled_(false) { |
189 g_next_guid.GetNext(); // Make sure that first guid is not zero. | 188 g_next_guid.GetNext(); // Make sure that first guid is not zero. |
190 | 189 |
191 // At this point the command line may not be initialized but we try to | 190 // At this point the command line may not be initialized but we try to |
192 // enable the heap profiler to capture allocations as soon as possible. | 191 // enable the heap profiler to capture allocations as soon as possible. |
193 EnableHeapProfilingIfNeeded(); | 192 EnableHeapProfilingIfNeeded(); |
194 | 193 |
195 strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist), | 194 strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist), |
(...skipping 191 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
387 | 386 |
388 if (take_mdp_ownership_and_delete_async) { | 387 if (take_mdp_ownership_and_delete_async) { |
389 // The MDP will be deleted whenever the MDPInfo struct will, that is either: | 388 // The MDP will be deleted whenever the MDPInfo struct will, that is either: |
390 // - At the end of this function, if no dump is in progress. | 389 // - At the end of this function, if no dump is in progress. |
391 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is | 390 // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is |
392 // removed from |pending_dump_providers|. | 391 // removed from |pending_dump_providers|. |
393 // - When the provider is removed from other clients (MemoryPeakDetector). | 392 // - When the provider is removed from other clients (MemoryPeakDetector). |
394 DCHECK(!(*mdp_iter)->owned_dump_provider); | 393 DCHECK(!(*mdp_iter)->owned_dump_provider); |
395 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); | 394 (*mdp_iter)->owned_dump_provider = std::move(owned_mdp); |
396 } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 || | 395 } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 || |
397 subtle::NoBarrier_Load(&is_enabled_)) { | 396 tracing_observer_->IsMemoryInfraTracingEnabled()) { |
Primiano Tucci (use gerrit)
2017/04/28 15:58:17
ssid is ripping out this code for you in https://c
ssid
2017/04/28 18:35:04
Sorry i did not review this cl because of the ment
| |
398 // If dump provider's name is on |strict_thread_check_blacklist_|, then the | 397 // If dump provider's name is on |strict_thread_check_blacklist_|, then the |
399 // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is | 398 // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is |
400 // fired even when tracing is not enabled (stricter). | 399 // fired even when tracing is not enabled (stricter). |
401 // TODO(ssid): Remove this condition after removing all the dump providers | 400 // TODO(ssid): Remove this condition after removing all the dump providers |
402 // in the blacklist and the buildbots are no longer flakily hitting the | 401 // in the blacklist and the buildbots are no longer flakily hitting the |
403 // DCHECK, crbug.com/643438. | 402 // DCHECK, crbug.com/643438. |
404 | 403 |
405 // If you hit this DCHECK, your dump provider has a bug. | 404 // If you hit this DCHECK, your dump provider has a bug. |
406 // Unregistration of a MemoryDumpProvider is safe only if: | 405 // Unregistration of a MemoryDumpProvider is safe only if: |
407 // - The MDP has specified a sequenced task runner affinity AND the | 406 // - The MDP has specified a sequenced task runner affinity AND the |
(...skipping 20 matching lines...) Expand all Loading... | |
428 // to just skip it, without actually invoking the |mdp|, which might be | 427 // to just skip it, without actually invoking the |mdp|, which might be |
429 // destroyed by the caller soon after this method returns. | 428 // destroyed by the caller soon after this method returns. |
430 (*mdp_iter)->disabled = true; | 429 (*mdp_iter)->disabled = true; |
431 dump_providers_.erase(mdp_iter); | 430 dump_providers_.erase(mdp_iter); |
432 } | 431 } |
433 | 432 |
434 void MemoryDumpManager::RequestGlobalDump( | 433 void MemoryDumpManager::RequestGlobalDump( |
435 MemoryDumpType dump_type, | 434 MemoryDumpType dump_type, |
436 MemoryDumpLevelOfDetail level_of_detail, | 435 MemoryDumpLevelOfDetail level_of_detail, |
437 const GlobalMemoryDumpCallback& callback) { | 436 const GlobalMemoryDumpCallback& callback) { |
438 // Bail out immediately if tracing is not enabled at all or if the dump mode | 437 if (!IsDumpModeAllowed(level_of_detail)) { |
Primiano Tucci (use gerrit)
2017/04/28 15:58:17
I think we should have this check only in the ::Fi
ssid
2017/04/28 18:35:04
No this used to exist here and not even trigger wr
hjd
2017/05/04 11:17:18
Done.
| |
439 // is not allowed. | 438 VLOG(1) << kLogPrefix |
440 if (!UNLIKELY(subtle::NoBarrier_Load(&is_enabled_)) || | 439 << " failed because the requested dump mode is " |
441 !IsDumpModeAllowed(level_of_detail)) { | |
442 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory | |
443 << " tracing category is not enabled or the requested dump mode is " | |
444 "not allowed by trace config."; | 440 "not allowed by trace config."; |
445 if (!callback.is_null()) | 441 if (!callback.is_null()) |
446 callback.Run(0u /* guid */, false /* success */); | 442 callback.Run(0u /* guid */, false /* success */); |
447 return; | 443 return; |
448 } | 444 } |
449 | 445 |
450 const uint64_t guid = | 446 const uint64_t guid = |
451 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); | 447 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); |
452 | 448 |
453 // Creates an async event to keep track of the global dump evolution. | 449 // Creates an async event to keep track of the global dump evolution. |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
521 // unexpected dumps. | 517 // unexpected dumps. |
522 if (TraceLog::GetInstance() | 518 if (TraceLog::GetInstance() |
523 ->GetCurrentTraceConfig() | 519 ->GetCurrentTraceConfig() |
524 .IsArgumentFilterEnabled()) { | 520 .IsArgumentFilterEnabled()) { |
525 CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail); | 521 CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail); |
526 } | 522 } |
527 | 523 |
528 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; | 524 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
529 { | 525 { |
530 AutoLock lock(lock_); | 526 AutoLock lock(lock_); |
531 | 527 |
ssid
2017/04/28 18:35:04
We should have a condition here that says:
if (le
hjd
2017/05/04 11:17:18
Done.
| |
532 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( | 528 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
533 args, dump_providers_, session_state_, callback, | 529 args, dump_providers_, session_state_, callback, |
534 GetOrCreateBgTaskRunnerLocked())); | 530 GetOrCreateBgTaskRunnerLocked())); |
535 | 531 |
536 // Safety check to prevent reaching here without calling RequestGlobalDump, | 532 // Safety check to prevent reaching here without calling RequestGlobalDump, |
537 // with disallowed modes. If |session_state_| is null then tracing is | 533 // with disallowed modes. If |session_state_| is null then tracing is |
538 // disabled. | 534 // disabled. |
539 CHECK(!session_state_ || | 535 CHECK(!session_state_ || |
540 session_state_->IsDumpModeAllowed(args.level_of_detail)); | 536 session_state_->IsDumpModeAllowed(args.level_of_detail)); |
ssid
2017/04/28 18:35:04
If we are going with the first option of first cre
hjd
2017/05/04 11:17:18
Acknowledged.
| |
541 | 537 |
542 // If enabled, holds back the peak detector resetting its estimation window. | 538 // If enabled, holds back the peak detector resetting its estimation window. |
543 MemoryPeakDetector::GetInstance()->Throttle(); | 539 MemoryPeakDetector::GetInstance()->Throttle(); |
544 } | 540 } |
545 | 541 |
546 // Start the process dump. This involves task runner hops as specified by the | 542 // Start the process dump. This involves task runner hops as specified by the |
547 // MemoryDumpProvider(s) in RegisterDumpProvider()). | 543 // MemoryDumpProvider(s) in RegisterDumpProvider()). |
548 SetupNextMemoryDump(std::move(pmd_async_state)); | 544 SetupNextMemoryDump(std::move(pmd_async_state)); |
549 } | 545 } |
550 | 546 |
551 // PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A | 547 // PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A |
552 // PostTask is always required for a generic SequencedTaskRunner to ensure that | 548 // PostTask is always required for a generic SequencedTaskRunner to ensure that |
553 // no other task is running on it concurrently. SetupNextMemoryDump() and | 549 // no other task is running on it concurrently. SetupNextMemoryDump() and |
554 // InvokeOnMemoryDump() are called alternatively which linearizes the dump | 550 // InvokeOnMemoryDump() are called alternatively which linearizes the dump |
555 // provider's OnMemoryDump invocations. | 551 // provider's OnMemoryDump invocations. |
556 // At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be | 552 // At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be |
557 // active at any time for a given PMD, regardless of status of the |lock_|. | 553 // active at any time for a given PMD, regardless of status of the |lock_|. |
558 // |lock_| is used in these functions purely to ensure consistency w.r.t. | 554 // |lock_| is used in these functions purely to ensure consistency w.r.t. |
559 // (un)registrations of |dump_providers_|. | 555 // (un)registrations of |dump_providers_|. |
560 void MemoryDumpManager::SetupNextMemoryDump( | 556 void MemoryDumpManager::SetupNextMemoryDump( |
561 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 557 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
562 HEAP_PROFILER_SCOPED_IGNORE; | 558 HEAP_PROFILER_SCOPED_IGNORE; |
563 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs | 559 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs |
564 // in the PostTask below don't end up registering their own dump providers | 560 // in the PostTask below don't end up registering their own dump providers |
565 // (for discounting trace memory overhead) while holding the |lock_|. | 561 // (for discounting trace memory overhead) while holding the |lock_|. |
566 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 562 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
567 | 563 |
568 // MDM might have been disabled before getting to this point. | |
569 // Anyway either MDM is disabled or this was the last hop, create a trace | |
570 // event, add it to the trace and finalize process dump invoking the callback. | |
571 if (!subtle::NoBarrier_Load(&is_enabled_)) { | |
572 if (pmd_async_state->pending_dump_providers.empty()) { | |
573 VLOG(1) << kLogPrefix << " failed because MemoryDumpManager was disabled" | |
574 << " before finalizing the dump"; | |
575 } else { | |
576 VLOG(1) << kLogPrefix << " failed because MemoryDumpManager was disabled" | |
577 << " before dumping " | |
578 << pmd_async_state->pending_dump_providers.back().get()->name; | |
579 } | |
580 pmd_async_state->dump_successful = false; | |
581 pmd_async_state->pending_dump_providers.clear(); | |
582 } | |
583 | |
584 if (pmd_async_state->pending_dump_providers.empty()) | 564 if (pmd_async_state->pending_dump_providers.empty()) |
585 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); | 565 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); |
586 | 566 |
587 // Read MemoryDumpProviderInfo thread safety considerations in | 567 // Read MemoryDumpProviderInfo thread safety considerations in |
588 // memory_dump_manager.h when accessing |mdpinfo| fields. | 568 // memory_dump_manager.h when accessing |mdpinfo| fields. |
589 MemoryDumpProviderInfo* mdpinfo = | 569 MemoryDumpProviderInfo* mdpinfo = |
590 pmd_async_state->pending_dump_providers.back().get(); | 570 pmd_async_state->pending_dump_providers.back().get(); |
591 | 571 |
592 // If we are in background tracing, we should invoke only the whitelisted | 572 // If we are in background tracing, we should invoke only the whitelisted |
593 // providers. Ignore other providers and continue. | 573 // providers. Ignore other providers and continue. |
(...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
828 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>( | 808 MakeUnique<SessionStateConvertableProxy<TypeNameDeduplicator>>( |
829 session_state, &MemoryDumpSessionState::type_name_deduplicator)); | 809 session_state, &MemoryDumpSessionState::type_name_deduplicator)); |
830 } | 810 } |
831 | 811 |
832 AutoLock lock(lock_); | 812 AutoLock lock(lock_); |
833 | 813 |
834 // At this point we must have the ability to request global dumps. | 814 // At this point we must have the ability to request global dumps. |
835 DCHECK(!request_dump_function_.is_null()); | 815 DCHECK(!request_dump_function_.is_null()); |
836 session_state_ = session_state; | 816 session_state_ = session_state; |
837 | 817 |
838 subtle::NoBarrier_Store(&is_enabled_, 1); | |
839 | |
840 MemoryDumpScheduler::Config periodic_config; | 818 MemoryDumpScheduler::Config periodic_config; |
841 bool peak_detector_configured = false; | 819 bool peak_detector_configured = false; |
842 for (const auto& trigger : memory_dump_config.triggers) { | 820 for (const auto& trigger : memory_dump_config.triggers) { |
843 if (!session_state_->IsDumpModeAllowed(trigger.level_of_detail)) { | 821 if (!session_state_->IsDumpModeAllowed(trigger.level_of_detail)) { |
844 NOTREACHED(); | 822 NOTREACHED(); |
845 continue; | 823 continue; |
846 } | 824 } |
847 if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) { | 825 if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) { |
848 if (periodic_config.triggers.empty()) { | 826 if (periodic_config.triggers.empty()) { |
849 periodic_config.callback = BindRepeating(&OnPeriodicSchedulerTick); | 827 periodic_config.callback = BindRepeating(&OnPeriodicSchedulerTick); |
(...skipping 30 matching lines...) Expand all Loading... | |
880 if (is_coordinator_ && !periodic_config.triggers.empty()) { | 858 if (is_coordinator_ && !periodic_config.triggers.empty()) { |
881 MemoryDumpScheduler::GetInstance()->Start(periodic_config, | 859 MemoryDumpScheduler::GetInstance()->Start(periodic_config, |
882 GetOrCreateBgTaskRunnerLocked()); | 860 GetOrCreateBgTaskRunnerLocked()); |
883 } | 861 } |
884 } | 862 } |
885 | 863 |
886 void MemoryDumpManager::Disable() { | 864 void MemoryDumpManager::Disable() { |
887 // There might be a memory dump in progress while this happens. Therefore, | 865 // There might be a memory dump in progress while this happens. Therefore, |
888 // ensure that the MDM state which depends on the tracing enabled / disabled | 866 // ensure that the MDM state which depends on the tracing enabled / disabled |
889 // state is always accessed by the dumping methods holding the |lock_|. | 867 // state is always accessed by the dumping methods holding the |lock_|. |
890 if (!subtle::NoBarrier_Load(&is_enabled_)) | 868 AutoLock lock(lock_); |
891 return; | 869 |
892 subtle::NoBarrier_Store(&is_enabled_, 0); | 870 MemoryDumpScheduler::GetInstance()->Stop(); |
ssid
2017/04/28 18:35:04
Yay we don't need this anymore :D
hjd
2017/05/04 11:17:18
:D
| |
893 { | 871 MemoryPeakDetector::GetInstance()->TearDown(); |
894 AutoLock lock(lock_); | 872 session_state_ = nullptr; |
895 MemoryDumpScheduler::GetInstance()->Stop(); | |
896 MemoryPeakDetector::GetInstance()->TearDown(); | |
897 session_state_ = nullptr; | |
898 } | |
899 } | 873 } |
900 | 874 |
901 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { | 875 bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) { |
ssid
2017/04/28 18:35:04
Maybe rename to DoesSessionAllowDumpMode?
Since wi
hjd
2017/05/04 11:17:18
Done.
| |
902 AutoLock lock(lock_); | 876 AutoLock lock(lock_); |
903 if (!session_state_) | 877 if (!session_state_) |
904 return false; | 878 return false; |
905 return session_state_->IsDumpModeAllowed(dump_mode); | 879 return session_state_->IsDumpModeAllowed(dump_mode); |
906 } | 880 } |
907 | 881 |
908 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( | 882 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( |
909 MemoryDumpRequestArgs req_args, | 883 MemoryDumpRequestArgs req_args, |
910 const MemoryDumpProviderInfo::OrderedSet& dump_providers, | 884 const MemoryDumpProviderInfo::OrderedSet& dump_providers, |
911 scoped_refptr<MemoryDumpSessionState> session_state, | 885 scoped_refptr<MemoryDumpSessionState> session_state, |
(...skipping 19 matching lines...) Expand all Loading... | |
931 if (iter == process_dumps.end()) { | 905 if (iter == process_dumps.end()) { |
932 std::unique_ptr<ProcessMemoryDump> new_pmd( | 906 std::unique_ptr<ProcessMemoryDump> new_pmd( |
933 new ProcessMemoryDump(session_state, dump_args)); | 907 new ProcessMemoryDump(session_state, dump_args)); |
934 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; | 908 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; |
935 } | 909 } |
936 return iter->second.get(); | 910 return iter->second.get(); |
937 } | 911 } |
938 | 912 |
939 } // namespace trace_event | 913 } // namespace trace_event |
940 } // namespace base | 914 } // namespace base |
OLD | NEW |