Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(18)

Side by Side Diff: base/trace_event/memory_dump_manager.cc

Issue 2820433005: memory-infra: Start disentangling tracing from memory-infra (Closed)
Patch Set: pass config in Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/memory_dump_manager.h" 5 #include "base/trace_event/memory_dump_manager.h"
6 6
7 #include <inttypes.h> 7 #include <inttypes.h>
8 #include <stdio.h> 8 #include <stdio.h>
9 9
10 #include <algorithm> 10 #include <algorithm>
(...skipping 17 matching lines...) Expand all
28 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 28 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
29 #include "base/trace_event/heap_profiler_event_filter.h" 29 #include "base/trace_event/heap_profiler_event_filter.h"
30 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" 30 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
31 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" 31 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
32 #include "base/trace_event/malloc_dump_provider.h" 32 #include "base/trace_event/malloc_dump_provider.h"
33 #include "base/trace_event/memory_dump_provider.h" 33 #include "base/trace_event/memory_dump_provider.h"
34 #include "base/trace_event/memory_dump_scheduler.h" 34 #include "base/trace_event/memory_dump_scheduler.h"
35 #include "base/trace_event/memory_dump_session_state.h" 35 #include "base/trace_event/memory_dump_session_state.h"
36 #include "base/trace_event/memory_infra_background_whitelist.h" 36 #include "base/trace_event/memory_infra_background_whitelist.h"
37 #include "base/trace_event/memory_peak_detector.h" 37 #include "base/trace_event/memory_peak_detector.h"
38 #include "base/trace_event/memory_tracing_observer.h"
38 #include "base/trace_event/process_memory_dump.h" 39 #include "base/trace_event/process_memory_dump.h"
39 #include "base/trace_event/trace_event.h" 40 #include "base/trace_event/trace_event.h"
40 #include "base/trace_event/trace_event_argument.h" 41 #include "base/trace_event/trace_event_argument.h"
41 #include "build/build_config.h" 42 #include "build/build_config.h"
42 43
43 #if defined(OS_ANDROID) 44 #if defined(OS_ANDROID)
44 #include "base/trace_event/java_heap_dump_provider_android.h" 45 #include "base/trace_event/java_heap_dump_provider_android.h"
45 #endif 46 #endif
46 47
47 namespace base { 48 namespace base {
48 namespace trace_event { 49 namespace trace_event {
49 50
50 namespace { 51 namespace {
51 52
52 const int kTraceEventNumArgs = 1;
53 const char* kTraceEventArgNames[] = {"dumps"};
54 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
55
56 StaticAtomicSequenceNumber g_next_guid; 53 StaticAtomicSequenceNumber g_next_guid;
57 MemoryDumpManager* g_instance_for_testing = nullptr; 54 MemoryDumpManager* g_instance_for_testing = nullptr;
58 55
59 // The list of names of dump providers that are blacklisted from strict thread 56 // The list of names of dump providers that are blacklisted from strict thread
60 // affinity check on unregistration. These providers could potentially cause 57 // affinity check on unregistration. These providers could potentially cause
61 // crashes on build bots if they do not unregister on right thread. 58 // crashes on build bots if they do not unregister on right thread.
62 // TODO(ssid): Fix all the dump providers to unregister if needed and clear the 59 // TODO(ssid): Fix all the dump providers to unregister if needed and clear the
63 // blacklist, crbug.com/643438. 60 // blacklist, crbug.com/643438.
64 const char* const kStrictThreadCheckBlacklist[] = { 61 const char* const kStrictThreadCheckBlacklist[] = {
65 "ClientDiscardableSharedMemoryManager", 62 "ClientDiscardableSharedMemoryManager",
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
191 188
192 // At this point the command line may not be initialized but we try to 189 // At this point the command line may not be initialized but we try to
193 // enable the heap profiler to capture allocations as soon as possible. 190 // enable the heap profiler to capture allocations as soon as possible.
194 EnableHeapProfilingIfNeeded(); 191 EnableHeapProfilingIfNeeded();
195 192
196 strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist), 193 strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist),
197 std::end(kStrictThreadCheckBlacklist)); 194 std::end(kStrictThreadCheckBlacklist));
198 } 195 }
199 196
200 MemoryDumpManager::~MemoryDumpManager() { 197 MemoryDumpManager::~MemoryDumpManager() {
201 TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
202 } 198 }
203 199
204 void MemoryDumpManager::EnableHeapProfilingIfNeeded() { 200 void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
205 if (heap_profiling_enabled_) 201 if (heap_profiling_enabled_)
206 return; 202 return;
207 203
208 if (!CommandLine::InitializedForCurrentProcess() || 204 if (!CommandLine::InitializedForCurrentProcess() ||
209 !CommandLine::ForCurrentProcess()->HasSwitch( 205 !CommandLine::ForCurrentProcess()->HasSwitch(
210 switches::kEnableHeapProfiling)) 206 switches::kEnableHeapProfiling))
211 return; 207 return;
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
282 278
283 TraceConfig::EventFilters filters; 279 TraceConfig::EventFilters filters;
284 filters.push_back(heap_profiler_filter_config); 280 filters.push_back(heap_profiler_filter_config);
285 TraceConfig filtering_trace_config; 281 TraceConfig filtering_trace_config;
286 filtering_trace_config.SetEventFilters(filters); 282 filtering_trace_config.SetEventFilters(filters);
287 283
288 TraceLog::GetInstance()->SetEnabled(filtering_trace_config, 284 TraceLog::GetInstance()->SetEnabled(filtering_trace_config,
289 TraceLog::FILTERING_MODE); 285 TraceLog::FILTERING_MODE);
290 } 286 }
291 287
292 // If tracing was enabled before initializing MemoryDumpManager, we missed the 288 trace_log_observer_ =
293 // OnTraceLogEnabled() event. Synthetize it so we can late-join the party. 289 MakeUnique<MemoryTracingObserver>(TraceLog::GetInstance(), this);
294 // IsEnabled is called before adding observer to avoid calling
295 // OnTraceLogEnabled twice.
296 bool is_tracing_already_enabled = TraceLog::GetInstance()->IsEnabled();
297 TraceLog::GetInstance()->AddEnabledStateObserver(this);
298 if (is_tracing_already_enabled)
299 OnTraceLogEnabled();
300 } 290 }
301 291
302 void MemoryDumpManager::RegisterDumpProvider( 292 void MemoryDumpManager::RegisterDumpProvider(
303 MemoryDumpProvider* mdp, 293 MemoryDumpProvider* mdp,
304 const char* name, 294 const char* name,
305 scoped_refptr<SingleThreadTaskRunner> task_runner, 295 scoped_refptr<SingleThreadTaskRunner> task_runner,
306 MemoryDumpProvider::Options options) { 296 MemoryDumpProvider::Options options) {
307 options.dumps_on_single_thread_task_runner = true; 297 options.dumps_on_single_thread_task_runner = true;
308 RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options); 298 RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
309 } 299 }
(...skipping 428 matching lines...) Expand 10 before | Expand all | Expand 10 after
738 728
739 TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinalizeDumpAndAddToTrace"); 729 TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinalizeDumpAndAddToTrace");
740 730
741 // The results struct to fill. 731 // The results struct to fill.
742 // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203 732 // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
743 MemoryDumpCallbackResult result; 733 MemoryDumpCallbackResult result;
744 734
745 for (const auto& kv : pmd_async_state->process_dumps) { 735 for (const auto& kv : pmd_async_state->process_dumps) {
746 ProcessId pid = kv.first; // kNullProcessId for the current process. 736 ProcessId pid = kv.first; // kNullProcessId for the current process.
747 ProcessMemoryDump* process_memory_dump = kv.second.get(); 737 ProcessMemoryDump* process_memory_dump = kv.second.get();
748 std::unique_ptr<TracedValue> traced_value(new TracedValue);
749 process_memory_dump->AsValueInto(traced_value.get());
750 traced_value->SetString("level_of_detail",
751 MemoryDumpLevelOfDetailToString(
752 pmd_async_state->req_args.level_of_detail));
753 const char* const event_name =
754 MemoryDumpTypeToString(pmd_async_state->req_args.dump_type);
755 738
756 std::unique_ptr<ConvertableToTraceFormat> event_value( 739 MemoryTracingObserver::MaybeAddDumpToTrace(&pmd_async_state->req_args, pid,
Primiano Tucci (use gerrit) 2017/04/21 10:10:26 I'd call this AddDumpToTraceIf(Tracing)Enabled to
hjd 2017/04/21 11:51:57 Done.
757 std::move(traced_value)); 740 process_memory_dump);
758 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID(
759 TRACE_EVENT_PHASE_MEMORY_DUMP,
760 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name,
761 trace_event_internal::kGlobalScope, dump_guid, pid,
762 kTraceEventNumArgs, kTraceEventArgNames,
763 kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
764 TRACE_EVENT_FLAG_HAS_ID);
765 741
766 // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203 742 // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
767 // Don't try to fill the struct in detailed mode since it is hard to avoid 743 // Don't try to fill the struct in detailed mode since it is hard to avoid
768 // double counting. 744 // double counting.
769 if (pmd_async_state->req_args.level_of_detail == 745 if (pmd_async_state->req_args.level_of_detail ==
770 MemoryDumpLevelOfDetail::DETAILED) 746 MemoryDumpLevelOfDetail::DETAILED)
771 continue; 747 continue;
772 748
773 // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203 749 // TODO(hjd): Transitional until we send the full PMD. See crbug.com/704203
774 if (pid == kNullProcessId) { 750 if (pid == kNullProcessId) {
(...skipping 10 matching lines...) Expand all
785 GetDumpsSumKb("partition_alloc/partitions/*", process_memory_dump); 761 GetDumpsSumKb("partition_alloc/partitions/*", process_memory_dump);
786 result.chrome_dump.blink_gc_total_kb = 762 result.chrome_dump.blink_gc_total_kb =
787 GetDumpsSumKb("blink_gc", process_memory_dump); 763 GetDumpsSumKb("blink_gc", process_memory_dump);
788 FillOsDumpFromProcessMemoryDump(process_memory_dump, &result.os_dump); 764 FillOsDumpFromProcessMemoryDump(process_memory_dump, &result.os_dump);
789 } else { 765 } else {
790 auto& os_dump = result.extra_processes_dump[pid]; 766 auto& os_dump = result.extra_processes_dump[pid];
791 FillOsDumpFromProcessMemoryDump(process_memory_dump, &os_dump); 767 FillOsDumpFromProcessMemoryDump(process_memory_dump, &os_dump);
792 } 768 }
793 } 769 }
794 770
795 bool tracing_still_enabled;
796 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &tracing_still_enabled);
797 if (!tracing_still_enabled) {
798 pmd_async_state->dump_successful = false;
799 VLOG(1) << kLogPrefix << " failed because tracing was disabled before"
800 << " the dump was completed";
801 }
802
803 if (!pmd_async_state->callback.is_null()) { 771 if (!pmd_async_state->callback.is_null()) {
804 pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful); 772 pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful);
805 pmd_async_state->callback.Reset(); 773 pmd_async_state->callback.Reset();
806 } 774 }
807 775
808 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump", 776 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
809 TRACE_ID_LOCAL(dump_guid)); 777 TRACE_ID_LOCAL(dump_guid));
810 } 778 }
811 779
812 void MemoryDumpManager::OnTraceLogEnabled() { 780 void MemoryDumpManager::Enable(
813 bool enabled; 781 const TraceConfig::MemoryDumpConfig& memory_dump_config) {
Primiano Tucci (use gerrit) 2017/04/21 10:10:26 okay for the moment, but I think that in the next
hjd 2017/04/21 11:51:57 Acknowledged.
814 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
815 if (!enabled)
816 return;
817
818 // Initialize the TraceLog for the current thread. This is to avoid that the 782 // Initialize the TraceLog for the current thread. This is to avoid that the
819 // TraceLog memory dump provider is registered lazily in the PostTask() below 783 // TraceLog memory dump provider is registered lazily in the PostTask() below
820 // while the |lock_| is taken; 784 // while the |lock_| is taken;
821 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); 785 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
Primiano Tucci (use gerrit) 2017/04/21 10:10:26 I think this should also be moved to the MTO
hjd 2017/04/21 11:51:57 Done.
822 786
823 // Spin-up the thread used to invoke unbound dump providers. 787 // Spin-up the thread used to invoke unbound dump providers.
824 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra")); 788 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra"));
825 if (!dump_thread->Start()) { 789 if (!dump_thread->Start()) {
826 LOG(ERROR) << "Failed to start the memory-infra thread for tracing"; 790 LOG(ERROR) << "Failed to start the memory-infra thread for tracing";
827 return; 791 return;
828 } 792 }
829 793
830 const TraceConfig& trace_config =
831 TraceLog::GetInstance()->GetCurrentTraceConfig();
832 const TraceConfig::MemoryDumpConfig& memory_dump_config =
833 trace_config.memory_dump_config();
834 scoped_refptr<MemoryDumpSessionState> session_state = 794 scoped_refptr<MemoryDumpSessionState> session_state =
835 new MemoryDumpSessionState; 795 new MemoryDumpSessionState;
836 session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes); 796 session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes);
837 session_state->set_heap_profiler_breakdown_threshold_bytes( 797 session_state->set_heap_profiler_breakdown_threshold_bytes(
838 memory_dump_config.heap_profiler_options.breakdown_threshold_bytes); 798 memory_dump_config.heap_profiler_options.breakdown_threshold_bytes);
839 if (heap_profiling_enabled_) { 799 if (heap_profiling_enabled_) {
840 // If heap profiling is enabled, the stack frame deduplicator and type name 800 // If heap profiling is enabled, the stack frame deduplicator and type name
841 // deduplicator will be in use. Add a metadata events to write the frames 801 // deduplicator will be in use. Add a metadata events to write the frames
842 // and type IDs. 802 // and type IDs.
843 session_state->SetStackFrameDeduplicator( 803 session_state->SetStackFrameDeduplicator(
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
908 } 868 }
909 } 869 }
910 870
911 // Only coordinator process triggers periodic global memory dumps. 871 // Only coordinator process triggers periodic global memory dumps.
912 if (delegate_->IsCoordinator() && !periodic_config.triggers.empty()) { 872 if (delegate_->IsCoordinator() && !periodic_config.triggers.empty()) {
913 MemoryDumpScheduler::GetInstance()->Start(periodic_config, 873 MemoryDumpScheduler::GetInstance()->Start(periodic_config,
914 dump_thread_->task_runner()); 874 dump_thread_->task_runner());
915 } 875 }
916 } 876 }
917 877
918 void MemoryDumpManager::OnTraceLogDisabled() { 878 void MemoryDumpManager::Disable() {
919 // There might be a memory dump in progress while this happens. Therefore, 879 // There might be a memory dump in progress while this happens. Therefore,
920 // ensure that the MDM state which depends on the tracing enabled / disabled 880 // ensure that the MDM state which depends on the tracing enabled / disabled
921 // state is always accessed by the dumping methods holding the |lock_|. 881 // state is always accessed by the dumping methods holding the |lock_|.
922 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_)) 882 if (!subtle::NoBarrier_Load(&memory_tracing_enabled_))
923 return; 883 return;
924 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); 884 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
925 std::unique_ptr<Thread> dump_thread; 885 std::unique_ptr<Thread> dump_thread;
926 { 886 {
927 AutoLock lock(lock_); 887 AutoLock lock(lock_);
928 MemoryDumpScheduler::GetInstance()->Stop(); 888 MemoryDumpScheduler::GetInstance()->Stop();
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
970 if (iter == process_dumps.end()) { 930 if (iter == process_dumps.end()) {
971 std::unique_ptr<ProcessMemoryDump> new_pmd( 931 std::unique_ptr<ProcessMemoryDump> new_pmd(
972 new ProcessMemoryDump(session_state, dump_args)); 932 new ProcessMemoryDump(session_state, dump_args));
973 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; 933 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first;
974 } 934 }
975 return iter->second.get(); 935 return iter->second.get();
976 } 936 }
977 937
978 } // namespace trace_event 938 } // namespace trace_event
979 } // namespace base 939 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698