| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <utility> | 8 #include <utility> |
| 9 | 9 |
| 10 #include "base/allocator/features.h" | 10 #include "base/allocator/features.h" |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 153 | 153 |
| 154 // static | 154 // static |
| 155 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { | 155 void MemoryDumpManager::SetInstanceForTesting(MemoryDumpManager* instance) { |
| 156 g_instance_for_testing = instance; | 156 g_instance_for_testing = instance; |
| 157 } | 157 } |
| 158 | 158 |
| 159 MemoryDumpManager::MemoryDumpManager() | 159 MemoryDumpManager::MemoryDumpManager() |
| 160 : memory_tracing_enabled_(0), | 160 : memory_tracing_enabled_(0), |
| 161 tracing_process_id_(kInvalidTracingProcessId), | 161 tracing_process_id_(kInvalidTracingProcessId), |
| 162 dumper_registrations_ignored_for_testing_(false), | 162 dumper_registrations_ignored_for_testing_(false), |
| 163 heap_profiling_enabled_(false) { | 163 heap_profiling_state_(HeapProfilingState::DISABLED) { |
| 164 g_next_guid.GetNext(); // Make sure that first guid is not zero. | 164 g_next_guid.GetNext(); // Make sure that first guid is not zero. |
| 165 | 165 |
| 166 // At this point the command line may not be initialized but we try to | 166 // At this point the command line may not be initialized but we try to |
| 167 // enable the heap profiler to capture allocations as soon as possible. | 167 // enable the heap profiler to capture allocations as soon as possible. |
| 168 EnableHeapProfilingIfNeeded(); | 168 EnableHeapProfilingIfNeeded(); |
| 169 | 169 |
| 170 strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist), | 170 strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist), |
| 171 std::end(kStrictThreadCheckBlacklist)); | 171 std::end(kStrictThreadCheckBlacklist)); |
| 172 } | 172 } |
| 173 | 173 |
| 174 MemoryDumpManager::~MemoryDumpManager() { | 174 MemoryDumpManager::~MemoryDumpManager() { |
| 175 TraceLog::GetInstance()->RemoveEnabledStateObserver(this); | 175 TraceLog::GetInstance()->RemoveEnabledStateObserver(this); |
| 176 } | 176 } |
| 177 | 177 |
| 178 void MemoryDumpManager::EnableHeapProfilingIfNeeded() { | 178 void MemoryDumpManager::EnableHeapProfilingIfNeeded() { |
| 179 if (heap_profiling_enabled_) | 179 AutoLock lock(lock_); |
| 180 if (heap_profiling_state_ != HeapProfilingState::DISABLED) |
| 180 return; | 181 return; |
| 181 | 182 |
| 182 if (!CommandLine::InitializedForCurrentProcess() || | 183 if (!CommandLine::InitializedForCurrentProcess() || |
| 183 !CommandLine::ForCurrentProcess()->HasSwitch( | 184 !CommandLine::ForCurrentProcess()->HasSwitch( |
| 184 switches::kEnableHeapProfiling)) | 185 switches::kEnableHeapProfiling)) |
| 185 return; | 186 return; |
| 186 | 187 |
| 187 std::string profiling_mode = CommandLine::ForCurrentProcess() | 188 std::string profiling_mode = CommandLine::ForCurrentProcess() |
| 188 ->GetSwitchValueASCII(switches::kEnableHeapProfiling); | 189 ->GetSwitchValueASCII(switches::kEnableHeapProfiling); |
| 189 if (profiling_mode == "") { | 190 if (profiling_mode == "") { |
| 190 AllocationContextTracker::SetCaptureMode( | 191 EnableHeapProfilingLocked( |
| 191 AllocationContextTracker::CaptureMode::PSEUDO_STACK); | 192 AllocationContextTracker::CaptureMode::PSEUDO_STACK); |
| 192 #if HAVE_TRACE_STACK_FRAME_POINTERS && \ | 193 #if HAVE_TRACE_STACK_FRAME_POINTERS && \ |
| 193 (BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG)) | 194 (BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG)) |
| 194 } else if (profiling_mode == switches::kEnableHeapProfilingModeNative) { | 195 } else if (profiling_mode == switches::kEnableHeapProfilingModeNative) { |
| 195 // We need frame pointers for native tracing to work, and they are | 196 // We need frame pointers for native tracing to work, and they are |
| 196 // enabled in profiling and debug builds. | 197 // enabled in profiling and debug builds. |
| 197 AllocationContextTracker::SetCaptureMode( | 198 EnableHeapProfilingLocked( |
| 198 AllocationContextTracker::CaptureMode::NATIVE_STACK); | 199 AllocationContextTracker::CaptureMode::NATIVE_STACK); |
| 199 #endif | 200 #endif |
| 200 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER) | 201 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER) |
| 201 } else if (profiling_mode == switches::kEnableHeapProfilingTaskProfiler) { | 202 } else if (profiling_mode == switches::kEnableHeapProfilingTaskProfiler) { |
| 202 // Enable heap tracking, which in turn enables capture of heap usage | 203 // Enable heap tracking, which in turn enables capture of heap usage |
| 203 // tracking in tracked_objects.cc. | 204 // tracking in tracked_objects.cc. |
| 204 if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled()) | 205 if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled()) |
| 205 base::debug::ThreadHeapUsageTracker::EnableHeapTracking(); | 206 base::debug::ThreadHeapUsageTracker::EnableHeapTracking(); |
| 206 #endif | 207 #endif |
| 207 } else { | 208 } else { |
| 208 CHECK(false) << "Invalid mode '" << profiling_mode << "' for " | 209 CHECK(false) << "Invalid mode '" << profiling_mode << "' for " |
| 209 << switches::kEnableHeapProfiling << " flag."; | 210 << switches::kEnableHeapProfiling << " flag."; |
| 210 } | 211 } |
| 212 } |
| 211 | 213 |
| 212 for (auto mdp : dump_providers_) | 214 void MemoryDumpManager::EnableHeapProfiling( |
| 213 mdp->dump_provider->OnHeapProfilingEnabled(true); | 215 AllocationContextTracker::CaptureMode capture_mode) { |
| 214 heap_profiling_enabled_ = true; | 216 CHECK_NE(AllocationContextTracker::CaptureMode::NATIVE_STACK, capture_mode) |
| 217 << "Native mode can be enabled only by command line flag"; |
| 218 AutoLock lock(lock_); |
| 219 EnableHeapProfilingLocked(capture_mode); |
| 220 } |
| 221 |
| 222 void MemoryDumpManager::EnableHeapProfilingLocked( |
| 223 AllocationContextTracker::CaptureMode capture_mode) { |
| 224 // Do not enable heap profiling if tracing is enabled, since session_state_ |
| 225 // will not be initialized. |
| 226 if (capture_mode != AllocationContextTracker::CaptureMode::DISABLED && |
| 227 subtle::NoBarrier_Load(&memory_tracing_enabled_)) { |
| 228 return; |
| 229 } |
| 230 |
| 231 switch (capture_mode) { |
| 232 case AllocationContextTracker::CaptureMode::PSEUDO_STACK: |
| 233 case AllocationContextTracker::CaptureMode::NATIVE_STACK: |
| 234 case AllocationContextTracker::CaptureMode::BACKGROUND: |
| 235 if (heap_profiling_state_ != HeapProfilingState::DISABLED) |
| 236 return; |
| 237 heap_profiling_state_ = HeapProfilingState::ENABLED; |
| 238 break; |
| 239 case AllocationContextTracker::CaptureMode::DISABLED: |
| 240 heap_profiling_state_ = HeapProfilingState::DISABLED_PERMANENTLY; |
| 241 break; |
| 242 } |
| 243 AllocationContextTracker::SetCaptureMode(capture_mode); |
| 244 for (auto mdp : dump_providers_) { |
| 245 mdp->dump_provider->OnHeapProfilingEnabled(heap_profiling_state_ == |
| 246 HeapProfilingState::ENABLED); |
| 247 } |
| 215 } | 248 } |
| 216 | 249 |
| 217 void MemoryDumpManager::Initialize( | 250 void MemoryDumpManager::Initialize( |
| 218 std::unique_ptr<MemoryDumpManagerDelegate> delegate) { | 251 std::unique_ptr<MemoryDumpManagerDelegate> delegate) { |
| 219 { | 252 { |
| 220 AutoLock lock(lock_); | 253 AutoLock lock(lock_); |
| 221 DCHECK(delegate); | 254 DCHECK(delegate); |
| 222 DCHECK(!delegate_); | 255 DCHECK(!delegate_); |
| 223 delegate_ = std::move(delegate); | 256 delegate_ = std::move(delegate); |
| 224 EnableHeapProfilingIfNeeded(); | |
| 225 } | 257 } |
| 258 EnableHeapProfilingIfNeeded(); |
| 226 | 259 |
| 227 // Enable the core dump providers. | 260 // Enable the core dump providers. |
| 228 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED) | 261 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED) |
| 229 RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr); | 262 RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr); |
| 230 #endif | 263 #endif |
| 231 | 264 |
| 232 #if defined(OS_ANDROID) | 265 #if defined(OS_ANDROID) |
| 233 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap", | 266 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap", |
| 234 nullptr); | 267 nullptr); |
| 235 #endif | 268 #endif |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 333 // The list of polling MDPs is populated OnTraceLogEnabled(). This code | 366 // The list of polling MDPs is populated OnTraceLogEnabled(). This code |
| 334 // deals with the case of a MDP capable of fast polling that is registered | 367 // deals with the case of a MDP capable of fast polling that is registered |
| 335 // after the OnTraceLogEnabled() | 368 // after the OnTraceLogEnabled() |
| 336 if (options.is_fast_polling_supported && dump_thread_) { | 369 if (options.is_fast_polling_supported && dump_thread_) { |
| 337 dump_thread_->task_runner()->PostTask( | 370 dump_thread_->task_runner()->PostTask( |
| 338 FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread, | 371 FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread, |
| 339 Unretained(this), mdpinfo)); | 372 Unretained(this), mdpinfo)); |
| 340 } | 373 } |
| 341 } | 374 } |
| 342 | 375 |
| 343 if (heap_profiling_enabled_) | 376 if (heap_profiling_state_ == HeapProfilingState::ENABLED) |
| 344 mdp->OnHeapProfilingEnabled(true); | 377 mdp->OnHeapProfilingEnabled(true); |
| 345 } | 378 } |
| 346 | 379 |
| 347 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 380 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
| 348 UnregisterDumpProviderInternal(mdp, false /* delete_async */); | 381 UnregisterDumpProviderInternal(mdp, false /* delete_async */); |
| 349 } | 382 } |
| 350 | 383 |
| 351 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon( | 384 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon( |
| 352 std::unique_ptr<MemoryDumpProvider> mdp) { | 385 std::unique_ptr<MemoryDumpProvider> mdp) { |
| 353 UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */); | 386 UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */); |
| (...skipping 491 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 845 | 878 |
| 846 const TraceConfig& trace_config = | 879 const TraceConfig& trace_config = |
| 847 TraceLog::GetInstance()->GetCurrentTraceConfig(); | 880 TraceLog::GetInstance()->GetCurrentTraceConfig(); |
| 848 const TraceConfig::MemoryDumpConfig& memory_dump_config = | 881 const TraceConfig::MemoryDumpConfig& memory_dump_config = |
| 849 trace_config.memory_dump_config(); | 882 trace_config.memory_dump_config(); |
| 850 scoped_refptr<MemoryDumpSessionState> session_state = | 883 scoped_refptr<MemoryDumpSessionState> session_state = |
| 851 new MemoryDumpSessionState; | 884 new MemoryDumpSessionState; |
| 852 session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes); | 885 session_state->SetAllowedDumpModes(memory_dump_config.allowed_dump_modes); |
| 853 session_state->set_heap_profiler_breakdown_threshold_bytes( | 886 session_state->set_heap_profiler_breakdown_threshold_bytes( |
| 854 memory_dump_config.heap_profiler_options.breakdown_threshold_bytes); | 887 memory_dump_config.heap_profiler_options.breakdown_threshold_bytes); |
| 855 if (heap_profiling_enabled_) { | 888 if (heap_profiling_state_ == HeapProfilingState::ENABLED) { |
| 856 // If heap profiling is enabled, the stack frame deduplicator and type name | 889 // If heap profiling is enabled, the stack frame deduplicator and type name |
| 857 // deduplicator will be in use. Add a metadata events to write the frames | 890 // deduplicator will be in use. Add a metadata events to write the frames |
| 858 // and type IDs. | 891 // and type IDs. |
| 859 session_state->SetStackFrameDeduplicator( | 892 session_state->SetStackFrameDeduplicator( |
| 860 WrapUnique(new StackFrameDeduplicator)); | 893 WrapUnique(new StackFrameDeduplicator)); |
| 861 | 894 |
| 862 session_state->SetTypeNameDeduplicator( | 895 session_state->SetTypeNameDeduplicator( |
| 863 WrapUnique(new TypeNameDeduplicator)); | 896 WrapUnique(new TypeNameDeduplicator)); |
| 864 | 897 |
| 865 TRACE_EVENT_API_ADD_METADATA_EVENT( | 898 TRACE_EVENT_API_ADD_METADATA_EVENT( |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1008 if (iter == process_dumps.end()) { | 1041 if (iter == process_dumps.end()) { |
| 1009 std::unique_ptr<ProcessMemoryDump> new_pmd( | 1042 std::unique_ptr<ProcessMemoryDump> new_pmd( |
| 1010 new ProcessMemoryDump(session_state, dump_args)); | 1043 new ProcessMemoryDump(session_state, dump_args)); |
| 1011 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; | 1044 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; |
| 1012 } | 1045 } |
| 1013 return iter->second.get(); | 1046 return iter->second.get(); |
| 1014 } | 1047 } |
| 1015 | 1048 |
| 1016 } // namespace trace_event | 1049 } // namespace trace_event |
| 1017 } // namespace base | 1050 } // namespace base |
| OLD | NEW |