OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 | 8 |
9 #include "base/atomic_sequence_num.h" | 9 #include "base/atomic_sequence_num.h" |
10 #include "base/command_line.h" | 10 #include "base/command_line.h" |
(...skipping 25 matching lines...) Expand all Loading... | |
36 namespace base { | 36 namespace base { |
37 namespace trace_event { | 37 namespace trace_event { |
38 | 38 |
39 namespace { | 39 namespace { |
40 | 40 |
41 // TODO(primiano): this should be smarter and should do something similar to | 41 // TODO(primiano): this should be smarter and should do something similar to |
42 // trace event synthetic delays. | 42 // trace event synthetic delays. |
43 const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("memory-infra"); | 43 const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("memory-infra"); |
44 | 44 |
45 // Throttle mmaps at a rate of once every kHeavyMmapsDumpsRate standard dumps. | 45 // Throttle mmaps at a rate of once every kHeavyMmapsDumpsRate standard dumps. |
46 const int kHeavyMmapsDumpsRate = 8; // 250 ms * 8 = 2000 ms. | 46 const int kHeavyDumpsRate = 8; // 250 ms * 8 = 2000 ms. |
47 const int kDumpIntervalMs = 250; | 47 const int kDumpIntervalMs = 250; |
48 const int kTraceEventNumArgs = 1; | 48 const int kTraceEventNumArgs = 1; |
49 const char* kTraceEventArgNames[] = {"dumps"}; | 49 const char* kTraceEventArgNames[] = {"dumps"}; |
50 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE}; | 50 const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE}; |
51 | 51 |
52 StaticAtomicSequenceNumber g_next_guid; | 52 StaticAtomicSequenceNumber g_next_guid; |
53 uint32 g_periodic_dumps_count = 0; | 53 uint32 g_periodic_dumps_count = 0; |
54 MemoryDumpManager* g_instance_for_testing = nullptr; | 54 MemoryDumpManager* g_instance_for_testing = nullptr; |
55 MemoryDumpProvider* g_mmaps_dump_provider = nullptr; | 55 MemoryDumpProvider* g_mmaps_dump_provider = nullptr; |
56 | 56 |
57 void RequestPeriodicGlobalDump() { | 57 void RequestPeriodicGlobalDump() { |
58 MemoryDumpType dump_type = g_periodic_dumps_count == 0 | 58 MemoryDumpArgs::LevelOfDetail dump_level_of_detail = |
59 ? MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS | 59 g_periodic_dumps_count == 0 ? MemoryDumpArgs::LEVEL_OF_DETAIL_HIGH |
60 : MemoryDumpType::PERIODIC_INTERVAL; | 60 : MemoryDumpArgs::LEVEL_OF_DETAIL_LOW; |
61 if (++g_periodic_dumps_count == kHeavyMmapsDumpsRate) | 61 if (++g_periodic_dumps_count == kHeavyDumpsRate) |
62 g_periodic_dumps_count = 0; | 62 g_periodic_dumps_count = 0; |
63 | 63 |
64 MemoryDumpManager::GetInstance()->RequestGlobalDump(dump_type); | 64 MemoryDumpArgs dump_args = {dump_level_of_detail}; |
65 MemoryDumpType dump_type = MemoryDumpType::PERIODIC_INTERVAL; | |
petrcermak
2015/08/03 16:15:03
nit: No need to declare this variable. You could j
ssid
2015/08/03 16:24:09
Done.
| |
66 | |
67 MemoryDumpManager::GetInstance()->RequestGlobalDump(dump_type, dump_args); | |
65 } | 68 } |
66 | 69 |
67 } // namespace | 70 } // namespace |
68 | 71 |
69 // static | 72 // static |
70 const char* const MemoryDumpManager::kTraceCategoryForTesting = kTraceCategory; | 73 const char* const MemoryDumpManager::kTraceCategoryForTesting = kTraceCategory; |
71 | 74 |
72 // static | 75 // static |
73 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; | 76 const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3; |
74 | 77 |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
173 DCHECK_IMPLIES( | 176 DCHECK_IMPLIES( |
174 subtle::NoBarrier_Load(&memory_tracing_enabled_), | 177 subtle::NoBarrier_Load(&memory_tracing_enabled_), |
175 mdp_iter->task_runner && mdp_iter->task_runner->BelongsToCurrentThread()) | 178 mdp_iter->task_runner && mdp_iter->task_runner->BelongsToCurrentThread()) |
176 << "The MemoryDumpProvider attempted to unregister itself in a racy way. " | 179 << "The MemoryDumpProvider attempted to unregister itself in a racy way. " |
177 << "Please file a crbug."; | 180 << "Please file a crbug."; |
178 | 181 |
179 dump_providers_.erase(mdp_iter); | 182 dump_providers_.erase(mdp_iter); |
180 did_unregister_dump_provider_ = true; | 183 did_unregister_dump_provider_ = true; |
181 } | 184 } |
182 | 185 |
183 void MemoryDumpManager::RequestGlobalDump( | 186 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type, |
184 MemoryDumpType dump_type, | 187 const MemoryDumpArgs& dump_args, |
185 const MemoryDumpCallback& callback) { | 188 const MemoryDumpCallback& callback) { |
186 // Bail out immediately if tracing is not enabled at all. | 189 // Bail out immediately if tracing is not enabled at all. |
187 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { | 190 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { |
188 if (!callback.is_null()) | 191 if (!callback.is_null()) |
189 callback.Run(0u /* guid */, false /* success */); | 192 callback.Run(0u /* guid */, false /* success */); |
190 return; | 193 return; |
191 } | 194 } |
192 | 195 |
193 const uint64 guid = | 196 const uint64 guid = |
194 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); | 197 TraceLog::GetInstance()->MangleEventId(g_next_guid.GetNext()); |
195 | 198 |
196 // The delegate_ is supposed to be thread safe, immutable and long lived. | 199 // The delegate_ is supposed to be thread safe, immutable and long lived. |
197 // No need to keep the lock after we ensure that a delegate has been set. | 200 // No need to keep the lock after we ensure that a delegate has been set. |
198 MemoryDumpManagerDelegate* delegate; | 201 MemoryDumpManagerDelegate* delegate; |
199 { | 202 { |
200 AutoLock lock(lock_); | 203 AutoLock lock(lock_); |
201 delegate = delegate_; | 204 delegate = delegate_; |
202 } | 205 } |
203 | 206 |
204 if (delegate) { | 207 if (delegate) { |
205 // The delegate is in charge to coordinate the request among all the | 208 // The delegate is in charge to coordinate the request among all the |
206 // processes and call the CreateLocalDumpPoint on the local process. | 209 // processes and call the CreateLocalDumpPoint on the local process. |
207 MemoryDumpRequestArgs args = {guid, dump_type}; | 210 MemoryDumpRequestArgs args = {guid, dump_type, dump_args}; |
208 delegate->RequestGlobalMemoryDump(args, callback); | 211 delegate->RequestGlobalMemoryDump(args, callback); |
209 } else if (!callback.is_null()) { | 212 } else if (!callback.is_null()) { |
210 callback.Run(guid, false /* success */); | 213 callback.Run(guid, false /* success */); |
211 } | 214 } |
212 } | 215 } |
213 | 216 |
214 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type) { | 217 void MemoryDumpManager::RequestGlobalDump(MemoryDumpType dump_type, |
215 RequestGlobalDump(dump_type, MemoryDumpCallback()); | 218 const MemoryDumpArgs& dump_args) { |
219 RequestGlobalDump(dump_type, dump_args, MemoryDumpCallback()); | |
216 } | 220 } |
217 | 221 |
218 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, | 222 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
219 const MemoryDumpCallback& callback) { | 223 const MemoryDumpCallback& callback) { |
220 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; | 224 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
221 { | 225 { |
222 AutoLock lock(lock_); | 226 AutoLock lock(lock_); |
223 did_unregister_dump_provider_ = false; | 227 did_unregister_dump_provider_ = false; |
224 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( | 228 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
225 args, dump_providers_.begin(), session_state_, callback)); | 229 args, dump_providers_.begin(), session_state_, callback)); |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
266 if (did_unregister_dump_provider_) { | 270 if (did_unregister_dump_provider_) { |
267 return AbortDumpLocked(pmd_async_state->callback, | 271 return AbortDumpLocked(pmd_async_state->callback, |
268 pmd_async_state->task_runner, | 272 pmd_async_state->task_runner, |
269 pmd_async_state->req_args.dump_guid); | 273 pmd_async_state->req_args.dump_guid); |
270 } | 274 } |
271 | 275 |
272 auto* mdp_info = &*pmd_async_state->next_dump_provider; | 276 auto* mdp_info = &*pmd_async_state->next_dump_provider; |
273 mdp = mdp_info->dump_provider; | 277 mdp = mdp_info->dump_provider; |
274 if (mdp_info->disabled) { | 278 if (mdp_info->disabled) { |
275 skip_dump = true; | 279 skip_dump = true; |
276 } else if (mdp == g_mmaps_dump_provider && | |
277 pmd_async_state->req_args.dump_type != | |
278 MemoryDumpType::PERIODIC_INTERVAL_WITH_MMAPS && | |
279 pmd_async_state->req_args.dump_type != | |
280 MemoryDumpType::EXPLICITLY_TRIGGERED) { | |
281 // Mmaps dumping is very heavyweight and cannot be performed at the same | |
282 // rate of other dumps. TODO(primiano): this is a hack and should be | |
283 // cleaned up as part of crbug.com/499731. | |
284 skip_dump = true; | |
285 } else if (mdp_info->task_runner && | 280 } else if (mdp_info->task_runner && |
286 !mdp_info->task_runner->BelongsToCurrentThread()) { | 281 !mdp_info->task_runner->BelongsToCurrentThread()) { |
287 // It's time to hop onto another thread. | 282 // It's time to hop onto another thread. |
288 | 283 |
289 // Copy the callback + arguments just for the unlikley case in which | 284 // Copy the callback + arguments just for the unlikley case in which |
290 // PostTask fails. In such case the Bind helper will destroy the | 285 // PostTask fails. In such case the Bind helper will destroy the |
291 // pmd_async_state and we must keep a copy of the fields to notify the | 286 // pmd_async_state and we must keep a copy of the fields to notify the |
292 // abort. | 287 // abort. |
293 MemoryDumpCallback callback = pmd_async_state->callback; | 288 MemoryDumpCallback callback = pmd_async_state->callback; |
294 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 289 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
(...skipping 10 matching lines...) Expand all Loading... | |
305 // disable the dump provider and abort this dump. | 300 // disable the dump provider and abort this dump. |
306 mdp_info->disabled = true; | 301 mdp_info->disabled = true; |
307 return AbortDumpLocked(callback, callback_task_runner, dump_guid); | 302 return AbortDumpLocked(callback, callback_task_runner, dump_guid); |
308 } | 303 } |
309 } // AutoLock(lock_) | 304 } // AutoLock(lock_) |
310 | 305 |
311 // Invoke the dump provider without holding the |lock_|. | 306 // Invoke the dump provider without holding the |lock_|. |
312 bool finalize = false; | 307 bool finalize = false; |
313 bool dump_successful = false; | 308 bool dump_successful = false; |
314 | 309 |
315 // TODO(ssid): Change RequestGlobalDump to use MemoryDumpArgs along with | |
316 // MemoryDumpType to get request for light / heavy dump, and remove this | |
317 // constant. | |
318 if (!skip_dump) { | 310 if (!skip_dump) { |
319 MemoryDumpArgs dump_args = {MemoryDumpArgs::LEVEL_OF_DETAIL_HIGH}; | 311 dump_successful = mdp->OnMemoryDump(pmd_async_state->req_args.dump_args, |
320 dump_successful = | 312 &pmd_async_state->process_memory_dump); |
321 mdp->OnMemoryDump(dump_args, &pmd_async_state->process_memory_dump); | |
322 } | 313 } |
323 | 314 |
324 { | 315 { |
325 AutoLock lock(lock_); | 316 AutoLock lock(lock_); |
326 if (did_unregister_dump_provider_) { | 317 if (did_unregister_dump_provider_) { |
327 return AbortDumpLocked(pmd_async_state->callback, | 318 return AbortDumpLocked(pmd_async_state->callback, |
328 pmd_async_state->task_runner, | 319 pmd_async_state->task_runner, |
329 pmd_async_state->req_args.dump_guid); | 320 pmd_async_state->req_args.dump_guid); |
330 } | 321 } |
331 auto* mdp_info = &*pmd_async_state->next_dump_provider; | 322 auto* mdp_info = &*pmd_async_state->next_dump_provider; |
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
482 next_dump_provider(next_dump_provider), | 473 next_dump_provider(next_dump_provider), |
483 callback(callback), | 474 callback(callback), |
484 task_runner(MessageLoop::current()->task_runner()) { | 475 task_runner(MessageLoop::current()->task_runner()) { |
485 } | 476 } |
486 | 477 |
487 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { | 478 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { |
488 } | 479 } |
489 | 480 |
490 } // namespace trace_event | 481 } // namespace trace_event |
491 } // namespace base | 482 } // namespace base |
OLD | NEW |