| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "base/atomic_sequence_num.h" | 9 #include "base/atomic_sequence_num.h" |
| 10 #include "base/command_line.h" | 10 #include "base/command_line.h" |
| (...skipping 25 matching lines...) Expand all Loading... |
| 36 namespace base { | 36 namespace base { |
| 37 namespace trace_event { | 37 namespace trace_event { |
| 38 | 38 |
| 39 namespace { | 39 namespace { |
| 40 | 40 |
| 41 // TODO(primiano): this should be smarter and should do something similar to | 41 // TODO(primiano): this should be smarter and should do something similar to |
| 42 // trace event synthetic delays. | 42 // trace event synthetic delays. |
| 43 const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("memory-infra"); | 43 const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("memory-infra"); |
| 44 | 44 |
| 45 // Throttle mmaps at a rate of once every kHeavyMmapsDumpsRate standard dumps. | 45 // Throttle mmaps at a rate of once every kHeavyMmapsDumpsRate standard dumps. |
| 46 const int kHeavyDumpsRate = 8; // 250 ms * 8 = 2000 ms. | 46 const int kHeavyMmapsDumpsRate = 8; // 250 ms * 8 = 2000 ms. |
| 47 const int kDumpIntervalMs = 250; | 47 const int kDumpIntervalMs = 250; |
| 48 const int kTraceEventNumArgs = 1; | 48 const int kTraceEventNumArgs = 1; |
| 49 const char* kTraceEventArgNames[] = {"dumps"}; | 49 const char* kTraceEventArgNames[] = {"dumps"}; |
| 50 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE}; | 50 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE}; |
| 51 | 51 |
| 52 StaticAtomicSequenceNumber g_next_guid; | 52 StaticAtomicSequenceNumber g_next_guid; |
| 53 uint32 g_periodic_dumps_count = 0; | 53 uint32 g_periodic_dumps_count = 0; |
| 54 MemoryDumpManager* g_instance_for_testing = nullptr; | 54 MemoryDumpManager* g_instance_for_testing = nullptr; |
| 55 MemoryDumpProvider* g_mmaps_dump_provider = nullptr; |
| 55 | 56 |
| 56 void RequestPeriodicGlobalDump() { | 57 void RequestPeriodicGlobalDump() { |
| 57 MemoryDumpArgs::LevelOfDetail dump_level_of_detail = | 58 MemoryDumpType dump_type = g_periodic_dumps_count == 0 |
| 58 g_periodic_dumps_count == 0 ? MemoryDumpArgs::LEVEL_OF_DETAIL_HIGH | 59 ? MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS |
| 59 : MemoryDumpArgs::LEVEL_OF_DETAIL_LOW; | 60 : MemoryDumpType::PERIODIC_INTERVAL; |
| 60 if (++g_periodic_dumps_count == kHeavyDumpsRate) | 61 if (++g_periodic_dumps_count == kHeavyMmapsDumpsRate) |
| 61 g_periodic_dumps_count = 0; | 62 g_periodic_dumps_count = 0; |
| 62 | 63 |
| 63 MemoryDumpArgs dump_args = {dump_level_of_detail}; | 64 MemoryDumpManager::GetInstance()->RequestGlobalDump(dump_type); |
| 64 MemoryDumpManager::GetInstance()->RequestGlobalDump( | |
| 65 MemoryDumpType::PERIODIC_INTERVAL, dump_args); | |
| 66 } | 65 } |
| 67 | 66 |
| 68 } // namespace | 67 } // namespace |
| 69 | 68 |
| 70 // static | 69 // static |
| 71 const char* const MemoryDumpManager::kTraceCategoryForTesting = kTraceCategory; | 70 const char* const MemoryDumpManager::kTraceCategoryForTesting = kTraceCategory; |
| 72 | 71 |
| 73 // static | 72 // static |
| 74 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; | 73 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; |
| 75 | 74 |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 112 | 111 |
| 113 if (skip_core_dumpers_auto_registration_for_testing_) | 112 if (skip_core_dumpers_auto_registration_for_testing_) |
| 114 return; | 113 return; |
| 115 | 114 |
| 116 // Enable the core dump providers. | 115 // Enable the core dump providers. |
| 117 #if !defined(OS_NACL) | 116 #if !defined(OS_NACL) |
| 118 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance()); | 117 RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance()); |
| 119 #endif | 118 #endif |
| 120 | 119 |
| 121 #if defined(OS_LINUX) || defined(OS_ANDROID) | 120 #if defined(OS_LINUX) || defined(OS_ANDROID) |
| 122 RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance()); | 121 g_mmaps_dump_provider = ProcessMemoryMapsDumpProvider::GetInstance(); |
| 122 RegisterDumpProvider(g_mmaps_dump_provider); |
| 123 RegisterDumpProvider(MallocDumpProvider::GetInstance()); | 123 RegisterDumpProvider(MallocDumpProvider::GetInstance()); |
| 124 system_allocator_pool_name_ = MallocDumpProvider::kAllocatedObjects; | 124 system_allocator_pool_name_ = MallocDumpProvider::kAllocatedObjects; |
| 125 #endif | 125 #endif |
| 126 | 126 |
| 127 #if defined(OS_ANDROID) | 127 #if defined(OS_ANDROID) |
| 128 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance()); | 128 RegisterDumpProvider(JavaHeapDumpProvider::GetInstance()); |
| 129 #endif | 129 #endif |
| 130 | 130 |
| 131 #if defined(OS_WIN) | 131 #if defined(OS_WIN) |
| 132 RegisterDumpProvider(WinHeapDumpProvider::GetInstance()); | 132 RegisterDumpProvider(WinHeapDumpProvider::GetInstance()); |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 173 DCHECK_IMPLIES( | 173 DCHECK_IMPLIES( |
| 174 subtle::NoBarrier_Load(&memory_tracing_enabled_), | 174 subtle::NoBarrier_Load(&memory_tracing_enabled_), |
| 175 mdp_iter->task_runner && mdp_iter->task_runner->BelongsToCurrentThread()) | 175 mdp_iter->task_runner && mdp_iter->task_runner->BelongsToCurrentThread()) |
| 176 << "The MemoryDumpProvider attempted to unregister itself in a racy way. " | 176 << "The MemoryDumpProvider attempted to unregister itself in a racy way. " |
| 177 << "Please file a crbug."; | 177 << "Please file a crbug."; |
| 178 | 178 |
| 179 dump_providers_.erase(mdp_iter); | 179 dump_providers_.erase(mdp_iter); |
| 180 did_unregister_dump_provider_ = true; | 180 did_unregister_dump_provider_ = true; |
| 181 } | 181 } |
| 182 | 182 |
| 183 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type, | 183 void MemoryDumpManager::RequestGlobalDump( |
| 184 const MemoryDumpArgs& dump_args, | 184 MemoryDumpType dump_type, |
| 185 const MemoryDumpCallback& callback) { | 185 const MemoryDumpCallback& callback) { |
| 186 // Bail out immediately if tracing is not enabled at all. | 186 // Bail out immediately if tracing is not enabled at all. |
| 187 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { | 187 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { |
| 188 if (!callback.is_null()) | 188 if (!callback.is_null()) |
| 189 callback.Run(0u /* guid */, false /* success */); | 189 callback.Run(0u /* guid */, false /* success */); |
| 190 return; | 190 return; |
| 191 } | 191 } |
| 192 | 192 |
| 193 const uint64 guid = | 193 const uint64 guid = |
| 194 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); | 194 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); |
| 195 | 195 |
| 196 // The delegate_ is supposed to be thread safe, immutable and long lived. | 196 // The delegate_ is supposed to be thread safe, immutable and long lived. |
| 197 // No need to keep the lock after we ensure that a delegate has been set. | 197 // No need to keep the lock after we ensure that a delegate has been set. |
| 198 MemoryDumpManagerDelegate* delegate; | 198 MemoryDumpManagerDelegate* delegate; |
| 199 { | 199 { |
| 200 AutoLock lock(lock_); | 200 AutoLock lock(lock_); |
| 201 delegate = delegate_; | 201 delegate = delegate_; |
| 202 } | 202 } |
| 203 | 203 |
| 204 if (delegate) { | 204 if (delegate) { |
| 205 // The delegate is in charge to coordinate the request among all the | 205 // The delegate is in charge to coordinate the request among all the |
| 206 // processes and call the CreateLocalDumpPoint on the local process. | 206 // processes and call the CreateLocalDumpPoint on the local process. |
| 207 MemoryDumpRequestArgs args = {guid, dump_type, dump_args}; | 207 MemoryDumpRequestArgs args = {guid, dump_type}; |
| 208 delegate->RequestGlobalMemoryDump(args, callback); | 208 delegate->RequestGlobalMemoryDump(args, callback); |
| 209 } else if (!callback.is_null()) { | 209 } else if (!callback.is_null()) { |
| 210 callback.Run(guid, false /* success */); | 210 callback.Run(guid, false /* success */); |
| 211 } | 211 } |
| 212 } | 212 } |
| 213 | 213 |
| 214 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type, | 214 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type) { |
| 215 const MemoryDumpArgs& dump_args) { | 215 RequestGlobalDump(dump_type, MemoryDumpCallback()); |
| 216 RequestGlobalDump(dump_type, dump_args, MemoryDumpCallback()); | |
| 217 } | 216 } |
| 218 | 217 |
| 219 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, | 218 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
| 220 const MemoryDumpCallback& callback) { | 219 const MemoryDumpCallback& callback) { |
| 221 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; | 220 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
| 222 { | 221 { |
| 223 AutoLock lock(lock_); | 222 AutoLock lock(lock_); |
| 224 did_unregister_dump_provider_ = false; | 223 did_unregister_dump_provider_ = false; |
| 225 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( | 224 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
| 226 args, dump_providers_.begin(), session_state_, callback)); | 225 args, dump_providers_.begin(), session_state_, callback)); |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 267 if (did_unregister_dump_provider_) { | 266 if (did_unregister_dump_provider_) { |
| 268 return AbortDumpLocked(pmd_async_state->callback, | 267 return AbortDumpLocked(pmd_async_state->callback, |
| 269 pmd_async_state->task_runner, | 268 pmd_async_state->task_runner, |
| 270 pmd_async_state->req_args.dump_guid); | 269 pmd_async_state->req_args.dump_guid); |
| 271 } | 270 } |
| 272 | 271 |
| 273 auto* mdp_info = &*pmd_async_state->next_dump_provider; | 272 auto* mdp_info = &*pmd_async_state->next_dump_provider; |
| 274 mdp = mdp_info->dump_provider; | 273 mdp = mdp_info->dump_provider; |
| 275 if (mdp_info->disabled) { | 274 if (mdp_info->disabled) { |
| 276 skip_dump = true; | 275 skip_dump = true; |
| 276 } else if (mdp == g_mmaps_dump_provider && |
| 277 pmd_async_state->req_args.dump_type != |
| 278 MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS && |
| 279 pmd_async_state->req_args.dump_type != |
| 280 MemoryDumpType::EXPLICITLY_TRIGGERED) { |
| 281 // Mmaps dumping is very heavyweight and cannot be performed at the same |
| 282 // rate of other dumps. TODO(primiano): this is a hack and should be |
| 283 // cleaned up as part of crbug.com/499731. |
| 284 skip_dump = true; |
| 277 } else if (mdp_info->task_runner && | 285 } else if (mdp_info->task_runner && |
| 278 !mdp_info->task_runner->BelongsToCurrentThread()) { | 286 !mdp_info->task_runner->BelongsToCurrentThread()) { |
| 279 // It's time to hop onto another thread. | 287 // It's time to hop onto another thread. |
| 280 | 288 |
| 281 // Copy the callback + arguments just for the unlikley case in which | 289 // Copy the callback + arguments just for the unlikley case in which |
| 282 // PostTask fails. In such case the Bind helper will destroy the | 290 // PostTask fails. In such case the Bind helper will destroy the |
| 283 // pmd_async_state and we must keep a copy of the fields to notify the | 291 // pmd_async_state and we must keep a copy of the fields to notify the |
| 284 // abort. | 292 // abort. |
| 285 MemoryDumpCallback callback = pmd_async_state->callback; | 293 MemoryDumpCallback callback = pmd_async_state->callback; |
| 286 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 294 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
| (...skipping 10 matching lines...) Expand all Loading... |
| 297 // disable the dump provider and abort this dump. | 305 // disable the dump provider and abort this dump. |
| 298 mdp_info->disabled = true; | 306 mdp_info->disabled = true; |
| 299 return AbortDumpLocked(callback, callback_task_runner, dump_guid); | 307 return AbortDumpLocked(callback, callback_task_runner, dump_guid); |
| 300 } | 308 } |
| 301 } // AutoLock(lock_) | 309 } // AutoLock(lock_) |
| 302 | 310 |
| 303 // Invoke the dump provider without holding the |lock_|. | 311 // Invoke the dump provider without holding the |lock_|. |
| 304 bool finalize = false; | 312 bool finalize = false; |
| 305 bool dump_successful = false; | 313 bool dump_successful = false; |
| 306 | 314 |
| 315 // TODO(ssid): Change RequestGlobalDump to use MemoryDumpArgs along with |
| 316 // MemoryDumpType to get request for light / heavy dump, and remove this |
| 317 // constant. |
| 307 if (!skip_dump) { | 318 if (!skip_dump) { |
| 308 dump_successful = mdp->OnMemoryDump(pmd_async_state->req_args.dump_args, | 319 MemoryDumpArgs dump_args = {MemoryDumpArgs::LEVEL_OF_DETAIL_HIGH}; |
| 309 &pmd_async_state->process_memory_dump); | 320 dump_successful = |
| 321 mdp->OnMemoryDump(dump_args, &pmd_async_state->process_memory_dump); |
| 310 } | 322 } |
| 311 | 323 |
| 312 { | 324 { |
| 313 AutoLock lock(lock_); | 325 AutoLock lock(lock_); |
| 314 if (did_unregister_dump_provider_) { | 326 if (did_unregister_dump_provider_) { |
| 315 return AbortDumpLocked(pmd_async_state->callback, | 327 return AbortDumpLocked(pmd_async_state->callback, |
| 316 pmd_async_state->task_runner, | 328 pmd_async_state->task_runner, |
| 317 pmd_async_state->req_args.dump_guid); | 329 pmd_async_state->req_args.dump_guid); |
| 318 } | 330 } |
| 319 auto* mdp_info = &*pmd_async_state->next_dump_provider; | 331 auto* mdp_info = &*pmd_async_state->next_dump_provider; |
| (...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 470 next_dump_provider(next_dump_provider), | 482 next_dump_provider(next_dump_provider), |
| 471 callback(callback), | 483 callback(callback), |
| 472 task_runner(MessageLoop::current()->task_runner()) { | 484 task_runner(MessageLoop::current()->task_runner()) { |
| 473 } | 485 } |
| 474 | 486 |
| 475 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { | 487 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { |
| 476 } | 488 } |
| 477 | 489 |
| 478 } // namespace trace_event | 490 } // namespace trace_event |
| 479 } // namespace base | 491 } // namespace base |
| OLD | NEW |