OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <inttypes.h> | 7 #include <inttypes.h> |
8 #include <stdio.h> | 8 #include <stdio.h> |
9 | 9 |
10 #include <algorithm> | 10 #include <algorithm> |
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
155 LeakySingletonTraits<MemoryDumpManager>>::get(); | 155 LeakySingletonTraits<MemoryDumpManager>>::get(); |
156 } | 156 } |
157 | 157 |
158 // static | 158 // static |
159 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { | 159 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { |
160 g_instance_for_testing = instance; | 160 g_instance_for_testing = instance; |
161 } | 161 } |
162 | 162 |
163 MemoryDumpManager::MemoryDumpManager() | 163 MemoryDumpManager::MemoryDumpManager() |
164 : is_coordinator_(false), | 164 : is_coordinator_(false), |
165 is_enabled_(0), | |
166 tracing_process_id_(kInvalidTracingProcessId), | 165 tracing_process_id_(kInvalidTracingProcessId), |
167 dumper_registrations_ignored_for_testing_(false), | 166 dumper_registrations_ignored_for_testing_(false), |
168 heap_profiling_enabled_(false) { | 167 heap_profiling_enabled_(false) { |
169 g_next_guid.GetNext(); // Make sure that first guid is not zero. | 168 g_next_guid.GetNext(); // Make sure that first guid is not zero. |
170 | 169 |
171 // At this point the command line may not be initialized but we try to | 170 // At this point the command line may not be initialized but we try to |
172 // enable the heap profiler to capture allocations as soon as possible. | 171 // enable the heap profiler to capture allocations as soon as possible. |
173 EnableHeapProfilingIfNeeded(); | 172 EnableHeapProfilingIfNeeded(); |
174 } | 173 } |
175 | 174 |
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
402 // to just skip it, without actually invoking the |mdp|, which might be | 401 // to just skip it, without actually invoking the |mdp|, which might be |
403 // destroyed by the caller soon after this method returns. | 402 // destroyed by the caller soon after this method returns. |
404 (*mdp_iter)->disabled = true; | 403 (*mdp_iter)->disabled = true; |
405 dump_providers_.erase(mdp_iter); | 404 dump_providers_.erase(mdp_iter); |
406 } | 405 } |
407 | 406 |
408 void MemoryDumpManager::RequestGlobalDump( | 407 void MemoryDumpManager::RequestGlobalDump( |
409 MemoryDumpType dump_type, | 408 MemoryDumpType dump_type, |
410 MemoryDumpLevelOfDetail level_of_detail, | 409 MemoryDumpLevelOfDetail level_of_detail, |
411 const GlobalMemoryDumpCallback& callback) { | 410 const GlobalMemoryDumpCallback& callback) { |
412 // Bail out immediately if tracing is not enabled at all or if the dump mode | 411 // If |request_dump_function_| is null MDM hasn't been initialized yet. |
413 // is not allowed. | 412 if (request_dump_function_.is_null()) { |
414 if (!UNLIKELY(subtle::NoBarrier_Load(&is_enabled_))) { | 413 VLOG(1) << kLogPrefix << " failed because" |
415 VLOG(1) << kLogPrefix << " failed because " << kTraceCategory | 414 << " memory dump manager is not enabled."; |
416 << " tracing category is not enabled."; | |
417 if (!callback.is_null()) | 415 if (!callback.is_null()) |
418 callback.Run(0u /* guid */, false /* success */); | 416 callback.Run(0u /* guid */, false /* success */); |
419 return; | 417 return; |
420 } | 418 } |
421 | 419 |
422 const uint64_t guid = | 420 const uint64_t guid = |
423 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); | 421 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); |
424 | 422 |
425 // Creates an async event to keep track of the global dump evolution. | 423 // Creates an async event to keep track of the global dump evolution. |
426 // The |wrapped_callback| will generate the ASYNC_END event and then invoke | 424 // The |wrapped_callback| will generate the ASYNC_END event and then invoke |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
494 if (TraceLog::GetInstance() | 492 if (TraceLog::GetInstance() |
495 ->GetCurrentTraceConfig() | 493 ->GetCurrentTraceConfig() |
496 .IsArgumentFilterEnabled()) { | 494 .IsArgumentFilterEnabled()) { |
497 CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail); | 495 CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail); |
498 } | 496 } |
499 | 497 |
500 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; | 498 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
501 { | 499 { |
502 AutoLock lock(lock_); | 500 AutoLock lock(lock_); |
503 | 501 |
| 502 // MDM could have been disabled by this point destroying |
| 503 // |heap_profiler_serialization_state|. If heap profiling is enabled we |
| 504 // require session state so if heap profiling is on and session state is |
| 505 // absent we fail the dump immediately. |
| 506 if (args.dump_type != MemoryDumpType::SUMMARY_ONLY && |
| 507 heap_profiling_enabled_ && !heap_profiler_serialization_state_) { |
| 508 callback.Run(args.dump_guid, false /* success */, base::nullopt); |
| 509 return; |
| 510 } |
| 511 |
504 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( | 512 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
505 args, dump_providers_, heap_profiler_serialization_state_, callback, | 513 args, dump_providers_, heap_profiler_serialization_state_, callback, |
506 GetOrCreateBgTaskRunnerLocked())); | 514 GetOrCreateBgTaskRunnerLocked())); |
507 | 515 |
508 // If enabled, holds back the peak detector resetting its estimation window. | 516 // If enabled, holds back the peak detector resetting its estimation window. |
509 MemoryPeakDetector::GetInstance()->Throttle(); | 517 MemoryPeakDetector::GetInstance()->Throttle(); |
510 } | 518 } |
511 | 519 |
512 // Start the process dump. This involves task runner hops as specified by the | 520 // Start the process dump. This involves task runner hops as specified by the |
513 // MemoryDumpProvider(s) in RegisterDumpProvider()). | 521 // MemoryDumpProvider(s) in RegisterDumpProvider()). |
(...skipping 10 matching lines...) Expand all Loading... |
524 // |lock_| is used in these functions purely to ensure consistency w.r.t. | 532 // |lock_| is used in these functions purely to ensure consistency w.r.t. |
525 // (un)registrations of |dump_providers_|. | 533 // (un)registrations of |dump_providers_|. |
526 void MemoryDumpManager::SetupNextMemoryDump( | 534 void MemoryDumpManager::SetupNextMemoryDump( |
527 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 535 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
528 HEAP_PROFILER_SCOPED_IGNORE; | 536 HEAP_PROFILER_SCOPED_IGNORE; |
529 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs | 537 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs |
530 // in the PostTask below don't end up registering their own dump providers | 538 // in the PostTask below don't end up registering their own dump providers |
531 // (for discounting trace memory overhead) while holding the |lock_|. | 539 // (for discounting trace memory overhead) while holding the |lock_|. |
532 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 540 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
533 | 541 |
534 // MDM might have been disabled before getting to this point. | |
535 // Anyway either MDM is disabled or this was the last hop, create a trace | |
536 // event, add it to the trace and finalize process dump invoking the callback. | |
537 if (!subtle::NoBarrier_Load(&is_enabled_)) { | |
538 if (pmd_async_state->pending_dump_providers.empty()) { | |
539 VLOG(1) << kLogPrefix << " failed because MemoryDumpManager was disabled" | |
540 << " before finalizing the dump"; | |
541 } else { | |
542 VLOG(1) << kLogPrefix << " failed because MemoryDumpManager was disabled" | |
543 << " before dumping " | |
544 << pmd_async_state->pending_dump_providers.back().get()->name; | |
545 } | |
546 pmd_async_state->dump_successful = false; | |
547 pmd_async_state->pending_dump_providers.clear(); | |
548 } | |
549 | |
550 if (pmd_async_state->pending_dump_providers.empty()) | 542 if (pmd_async_state->pending_dump_providers.empty()) |
551 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); | 543 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); |
552 | 544 |
553 // Read MemoryDumpProviderInfo thread safety considerations in | 545 // Read MemoryDumpProviderInfo thread safety considerations in |
554 // memory_dump_manager.h when accessing |mdpinfo| fields. | 546 // memory_dump_manager.h when accessing |mdpinfo| fields. |
555 MemoryDumpProviderInfo* mdpinfo = | 547 MemoryDumpProviderInfo* mdpinfo = |
556 pmd_async_state->pending_dump_providers.back().get(); | 548 pmd_async_state->pending_dump_providers.back().get(); |
557 | 549 |
558 // If we are in background tracing, we should invoke only the whitelisted | 550 // If we are in background tracing, we should invoke only the whitelisted |
559 // providers. Ignore other providers and continue. | 551 // providers. Ignore other providers and continue. |
(...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
800 heap_profiler_serialization_state, | 792 heap_profiler_serialization_state, |
801 &HeapProfilerSerializationState::type_name_deduplicator)); | 793 &HeapProfilerSerializationState::type_name_deduplicator)); |
802 } | 794 } |
803 | 795 |
804 AutoLock lock(lock_); | 796 AutoLock lock(lock_); |
805 | 797 |
806 // At this point we must have the ability to request global dumps. | 798 // At this point we must have the ability to request global dumps. |
807 DCHECK(!request_dump_function_.is_null()); | 799 DCHECK(!request_dump_function_.is_null()); |
808 heap_profiler_serialization_state_ = heap_profiler_serialization_state; | 800 heap_profiler_serialization_state_ = heap_profiler_serialization_state; |
809 | 801 |
810 subtle::NoBarrier_Store(&is_enabled_, 1); | |
811 | |
812 MemoryDumpScheduler::Config periodic_config; | 802 MemoryDumpScheduler::Config periodic_config; |
813 bool peak_detector_configured = false; | 803 bool peak_detector_configured = false; |
814 for (const auto& trigger : memory_dump_config.triggers) { | 804 for (const auto& trigger : memory_dump_config.triggers) { |
815 if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) { | 805 if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) { |
816 if (periodic_config.triggers.empty()) { | 806 if (periodic_config.triggers.empty()) { |
817 periodic_config.callback = BindRepeating(&OnPeriodicSchedulerTick); | 807 periodic_config.callback = BindRepeating(&OnPeriodicSchedulerTick); |
818 } | 808 } |
819 periodic_config.triggers.push_back( | 809 periodic_config.triggers.push_back( |
820 {trigger.level_of_detail, trigger.min_time_between_dumps_ms}); | 810 {trigger.level_of_detail, trigger.min_time_between_dumps_ms}); |
821 } else if (trigger.trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) { | 811 } else if (trigger.trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE) { |
(...skipping 26 matching lines...) Expand all Loading... |
848 if (is_coordinator_ && !periodic_config.triggers.empty()) { | 838 if (is_coordinator_ && !periodic_config.triggers.empty()) { |
849 MemoryDumpScheduler::GetInstance()->Start(periodic_config, | 839 MemoryDumpScheduler::GetInstance()->Start(periodic_config, |
850 GetOrCreateBgTaskRunnerLocked()); | 840 GetOrCreateBgTaskRunnerLocked()); |
851 } | 841 } |
852 } | 842 } |
853 | 843 |
854 void MemoryDumpManager::Disable() { | 844 void MemoryDumpManager::Disable() { |
855 // There might be a memory dump in progress while this happens. Therefore, | 845 // There might be a memory dump in progress while this happens. Therefore, |
856 // ensure that the MDM state which depends on the tracing enabled / disabled | 846 // ensure that the MDM state which depends on the tracing enabled / disabled |
857 // state is always accessed by the dumping methods holding the |lock_|. | 847 // state is always accessed by the dumping methods holding the |lock_|. |
858 if (!subtle::NoBarrier_Load(&is_enabled_)) | 848 AutoLock lock(lock_); |
859 return; | 849 |
860 subtle::NoBarrier_Store(&is_enabled_, 0); | 850 MemoryDumpScheduler::GetInstance()->Stop(); |
861 { | 851 MemoryPeakDetector::GetInstance()->TearDown(); |
862 AutoLock lock(lock_); | 852 heap_profiler_serialization_state_ = nullptr; |
863 MemoryDumpScheduler::GetInstance()->Stop(); | |
864 MemoryPeakDetector::GetInstance()->TearDown(); | |
865 heap_profiler_serialization_state_ = nullptr; | |
866 } | |
867 } | 853 } |
868 | 854 |
869 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( | 855 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( |
870 MemoryDumpRequestArgs req_args, | 856 MemoryDumpRequestArgs req_args, |
871 const MemoryDumpProviderInfo::OrderedSet& dump_providers, | 857 const MemoryDumpProviderInfo::OrderedSet& dump_providers, |
872 scoped_refptr<HeapProfilerSerializationState> | 858 scoped_refptr<HeapProfilerSerializationState> |
873 heap_profiler_serialization_state, | 859 heap_profiler_serialization_state, |
874 ProcessMemoryDumpCallback callback, | 860 ProcessMemoryDumpCallback callback, |
875 scoped_refptr<SequencedTaskRunner> dump_thread_task_runner) | 861 scoped_refptr<SequencedTaskRunner> dump_thread_task_runner) |
876 : req_args(req_args), | 862 : req_args(req_args), |
(...skipping 17 matching lines...) Expand all Loading... |
894 if (iter == process_dumps.end()) { | 880 if (iter == process_dumps.end()) { |
895 std::unique_ptr<ProcessMemoryDump> new_pmd( | 881 std::unique_ptr<ProcessMemoryDump> new_pmd( |
896 new ProcessMemoryDump(heap_profiler_serialization_state, dump_args)); | 882 new ProcessMemoryDump(heap_profiler_serialization_state, dump_args)); |
897 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; | 883 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; |
898 } | 884 } |
899 return iter->second.get(); | 885 return iter->second.get(); |
900 } | 886 } |
901 | 887 |
902 } // namespace trace_event | 888 } // namespace trace_event |
903 } // namespace base | 889 } // namespace base |
OLD | NEW |