OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <utility> | 8 #include <utility> |
9 | 9 |
10 #include "base/atomic_sequence_num.h" | 10 #include "base/atomic_sequence_num.h" |
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
186 } | 186 } |
187 | 187 |
188 void MemoryDumpManager::RegisterDumpProvider( | 188 void MemoryDumpManager::RegisterDumpProvider( |
189 MemoryDumpProvider* mdp, | 189 MemoryDumpProvider* mdp, |
190 const char* name, | 190 const char* name, |
191 const scoped_refptr<SingleThreadTaskRunner>& task_runner, | 191 const scoped_refptr<SingleThreadTaskRunner>& task_runner, |
192 const MemoryDumpProvider::Options& options) { | 192 const MemoryDumpProvider::Options& options) { |
193 if (dumper_registrations_ignored_for_testing_) | 193 if (dumper_registrations_ignored_for_testing_) |
194 return; | 194 return; |
195 | 195 |
196 MemoryDumpProviderInfo mdp_info(mdp, name, task_runner, options); | 196 scoped_refptr<MemoryDumpProviderInfo> mdpinfo = |
197 new MemoryDumpProviderInfo(mdp, name, task_runner, options); | |
197 AutoLock lock(lock_); | 198 AutoLock lock(lock_); |
198 auto iter_new = dump_providers_.insert(mdp_info); | 199 dump_providers_.insert(mdpinfo); |
199 | |
200 // If there was a previous entry, replace it with the new one. This is to deal | |
201 // with the case where a dump provider unregisters itself and then re- | |
202 // registers before a memory dump happens, so its entry was still in the | |
203 // collection but flagged |unregistered|. | |
204 if (!iter_new.second) { | |
205 dump_providers_.erase(iter_new.first); | |
206 dump_providers_.insert(mdp_info); | |
207 } | |
208 | 200 |
209 if (heap_profiling_enabled_) | 201 if (heap_profiling_enabled_) |
210 mdp->OnHeapProfilingEnabled(true); | 202 mdp->OnHeapProfilingEnabled(true); |
211 } | 203 } |
212 | 204 |
213 void MemoryDumpManager::RegisterDumpProvider( | 205 void MemoryDumpManager::RegisterDumpProvider( |
214 MemoryDumpProvider* mdp, | 206 MemoryDumpProvider* mdp, |
215 const char* name, | 207 const char* name, |
216 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { | 208 const scoped_refptr<SingleThreadTaskRunner>& task_runner) { |
217 RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options()); | 209 RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options()); |
218 } | 210 } |
219 | 211 |
220 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 212 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
221 AutoLock lock(lock_); | 213 AutoLock lock(lock_); |
222 | 214 |
223 auto mdp_iter = dump_providers_.begin(); | 215 auto mdp_iter = dump_providers_.begin(); |
224 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) { | 216 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) { |
225 if (mdp_iter->dump_provider == mdp) | 217 if ((*mdp_iter)->dump_provider == mdp) |
226 break; | 218 break; |
227 } | 219 } |
228 | 220 |
229 if (mdp_iter == dump_providers_.end()) | 221 if (mdp_iter == dump_providers_.end()) |
230 return; | 222 return; |
231 | 223 |
232 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe | 224 // Unregistration of a MemoryDumpProvider while tracing is ongoing is safe |
233 // only if the MDP has specified a thread affinity (via task_runner()) AND | 225 // only if the MDP has specified a thread affinity (via task_runner()) AND |
234 // the unregistration happens on the same thread (so the MDP cannot unregister | 226 // the unregistration happens on the same thread (so the MDP cannot unregister |
235 // and OnMemoryDump() at the same time). | 227 // and OnMemoryDump() at the same time). |
236 // Otherwise, it is not possible to guarantee that its unregistration is | 228 // Otherwise, it is not possible to guarantee that its unregistration is |
237 // race-free. If you hit this DCHECK, your MDP has a bug. | 229 // race-free. If you hit this DCHECK, your MDP has a bug. |
238 DCHECK(!subtle::NoBarrier_Load(&memory_tracing_enabled_) || | 230 DCHECK(!subtle::NoBarrier_Load(&memory_tracing_enabled_) || |
239 (mdp_iter->task_runner && | 231 ((*mdp_iter)->task_runner && |
240 mdp_iter->task_runner->BelongsToCurrentThread())) | 232 (*mdp_iter)->task_runner->BelongsToCurrentThread())) |
241 << "MemoryDumpProvider \"" << mdp_iter->name << "\" attempted to " | 233 << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to " |
242 << "unregister itself in a racy way. Please file a crbug."; | 234 << "unregister itself in a racy way. Please file a crbug."; |
243 | 235 |
244 mdp_iter->unregistered = true; | 236 // The MDPInfo instance can still be refcounted by the |
237 // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason | |
238 // the MDPInfo is flagged as disabled. It will cause ContinueAsyncProcessDump | |
239 // to just skip it, without actually invoking the |mdp|, which might be | |
240 // destroyed by the caller soon after this method returns. | |
241 (*mdp_iter)->disabled = true; | |
242 dump_providers_.erase(mdp_iter); | |
245 } | 243 } |
246 | 244 |
247 void MemoryDumpManager::RequestGlobalDump( | 245 void MemoryDumpManager::RequestGlobalDump( |
248 MemoryDumpType dump_type, | 246 MemoryDumpType dump_type, |
249 MemoryDumpLevelOfDetail level_of_detail, | 247 MemoryDumpLevelOfDetail level_of_detail, |
250 const MemoryDumpCallback& callback) { | 248 const MemoryDumpCallback& callback) { |
251 // Bail out immediately if tracing is not enabled at all. | 249 // Bail out immediately if tracing is not enabled at all. |
252 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { | 250 if (!UNLIKELY(subtle::NoBarrier_Load(&memory_tracing_enabled_))) { |
253 if (!callback.is_null()) | 251 if (!callback.is_null()) |
254 callback.Run(0u /* guid */, false /* success */); | 252 callback.Run(0u /* guid */, false /* success */); |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
289 } | 287 } |
290 | 288 |
291 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, | 289 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
292 const MemoryDumpCallback& callback) { | 290 const MemoryDumpCallback& callback) { |
293 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", | 291 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", |
294 TRACE_ID_MANGLE(args.dump_guid)); | 292 TRACE_ID_MANGLE(args.dump_guid)); |
295 | 293 |
296 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; | 294 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
297 { | 295 { |
298 AutoLock lock(lock_); | 296 AutoLock lock(lock_); |
299 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( | 297 pmd_async_state.reset( |
300 args, dump_providers_.begin(), session_state_, callback, | 298 new ProcessMemoryDumpAsyncState(args, dump_providers_, session_state_, |
301 dump_thread_->task_runner())); | 299 callback, dump_thread_->task_runner())); |
302 } | 300 } |
303 | 301 |
304 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", | 302 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", |
305 TRACE_ID_MANGLE(args.dump_guid), | 303 TRACE_ID_MANGLE(args.dump_guid), |
306 TRACE_EVENT_FLAG_FLOW_OUT); | 304 TRACE_EVENT_FLAG_FLOW_OUT); |
307 | 305 |
308 // Start the thread hop. |dump_providers_| are kept sorted by thread, so | 306 // Start the thread hop. |dump_providers_| are kept sorted by thread, so |
309 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread | 307 // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread |
310 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). | 308 // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()). |
311 ContinueAsyncProcessDump(std::move(pmd_async_state)); | 309 ContinueAsyncProcessDump(pmd_async_state.release()); |
312 } | 310 } |
313 | 311 |
314 // At most one ContinueAsyncProcessDump() can be active at any time for a given | 312 // At most one ContinueAsyncProcessDump() can be active at any time for a given |
315 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to | 313 // PMD, regardless of status of the |lock_|. |lock_| is used here purely to |
316 // ensure consistency w.r.t. (un)registrations of |dump_providers_|. | 314 // ensure consistency w.r.t. (un)registrations of |dump_providers_|. |
317 // The linearization of dump providers' OnMemoryDump invocations is achieved by | 315 // The linearization of dump providers' OnMemoryDump invocations is achieved by |
318 // means of subsequent PostTask(s). | 316 // means of subsequent PostTask(s). |
319 // | 317 // |
320 // 1) Prologue: | 318 // 1) Prologue: |
321 // - Check if the dump provider is disabled, if so skip the dump. | 319 // - Check if the dump provider is disabled, if so skip the dump. |
322 // - Check if we are on the right thread. If not hop and continue there. | 320 // - Check if we are on the right thread. If not hop and continue there. |
323 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped). | 321 // 2) Invoke the dump provider's OnMemoryDump() (unless skipped). |
324 // 3) Epilogue: | 322 // 3) Epilogue: |
325 // - Unregister the dump provider if it failed too many times consecutively. | 323 // - Unregister the dump provider if it failed too many times consecutively. |
326 // - Advance the |next_dump_provider| iterator to the next dump provider. | 324 // - Pop() the MDP from the |pending_dump_providers| list. |
327 // - If this was the last hop, create a trace event, add it to the trace | 325 // - If this was the last hop, create a trace event, add it to the trace |
328 // and finalize (invoke callback). | 326 // and finalize (invoke callback). |
329 | 327 |
330 void MemoryDumpManager::ContinueAsyncProcessDump( | 328 void MemoryDumpManager::ContinueAsyncProcessDump( |
331 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 329 ProcessMemoryDumpAsyncState* owned_pmd_async_state) { |
332 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs | 330 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs |
333 // in the PostTask below don't end up registering their own dump providers | 331 // in the PostTask below don't end up registering their own dump providers |
334 // (for discounting trace memory overhead) while holding the |lock_|. | 332 // (for discounting trace memory overhead) while holding the |lock_|. |
335 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 333 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
336 | 334 |
337 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; | 335 // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason |
338 const char* dump_provider_name = nullptr; | 336 // why it isn't is because of the corner case logic of |did_post_task| below, |
337 // which needs to take back the ownership of the |pmd_async_state| when a | |
338 // thread goes away and consequently the PostTask() fails. | |
339 // Unfortunatelly, in fact, PostTask() destroyes the scoped_ptr arguments upon | |
ssid
2015/12/17 22:09:36
s/Unfortunatelly/Unfortunately
s/destroyes/destroy
Primiano Tucci (use gerrit)
2015/12/18 12:00:31
Done.
| |
340 // failure (to prevent accidental leaks). This would prevent us to to just | |
341 // skip the hop and move on. Hence the manual naked -> scoped ptr juggling. | |
342 auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state); | |
343 owned_pmd_async_state = nullptr; | |
339 | 344 |
340 // Pid of the target process being dumped. Often kNullProcessId (= current | 345 if (pmd_async_state->pending_dump_providers.empty()) |
341 // process), non-zero when the coordinator process creates dumps on behalf | 346 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); |
342 // of child processes (see crbug.com/461788). | 347 |
343 ProcessId pid; | 348 // Read MemoryDumpProviderInfo thread safety considerations in |
349 // memory_dump_manager.h when accessing |mdpinfo| fields. | |
350 MemoryDumpProviderInfo* mdpinfo = | |
351 pmd_async_state->pending_dump_providers.front().get(); | |
352 | |
353 // If the dump provider did not specify a thread affinity, dump on | |
354 // |dump_thread_|. | |
355 SingleThreadTaskRunner* task_runner = mdpinfo->task_runner.get(); | |
356 if (!task_runner) | |
357 task_runner = pmd_async_state->dump_thread_task_runner.get(); | |
358 | |
359 // |dump_thread_| might have been Stop()-ed at this point (if tracing was | |
360 // disabled in the meanwhile). In such case the PostTask() below will fail. | |
361 // |task_runner|, however, should always be non-null. | |
362 DCHECK(task_runner); | |
344 | 363 |
345 // DO NOT put any LOG() statement in the locked sections, as in some contexts | 364 // DO NOT put any LOG() statement in the locked sections, as in some contexts |
346 // (GPU process) LOG() ends up performing PostTask/IPCs. | 365 // (GPU process) LOG() ends up performing PostTask/IPCs. |
347 MemoryDumpProvider* mdp; | |
348 bool skip_dump = false; | 366 bool skip_dump = false; |
349 { | 367 { |
350 AutoLock lock(lock_); | 368 AutoLock lock(lock_); |
351 | 369 // The MDP can be disabled if: |
352 auto mdp_info = pmd_async_state->next_dump_provider; | 370 // - it failed too many times (see failsafe logic below). |
353 mdp = mdp_info->dump_provider; | 371 // - UnregisterDumpProvider() was called in the meanwhile. |
354 dump_provider_name = mdp_info->name; | 372 if (mdpinfo->disabled) { |
355 pid = mdp_info->options.target_pid; | |
356 | |
357 // If the dump provider did not specify a thread affinity, dump on | |
358 // |dump_thread_|. | |
359 SingleThreadTaskRunner* task_runner = mdp_info->task_runner.get(); | |
360 if (!task_runner) | |
361 task_runner = pmd_async_state->dump_thread_task_runner.get(); | |
362 | |
363 // |dump_thread_| might have been Stop()-ed at this point (if tracing was | |
364 // disabled in the meanwhile). In such case the PostTask() below will fail. | |
365 // |task_runner|, however, should always be non-null. | |
366 DCHECK(task_runner); | |
367 | |
368 if (mdp_info->disabled || mdp_info->unregistered) { | |
369 skip_dump = true; | 373 skip_dump = true; |
370 } else if (!task_runner->BelongsToCurrentThread()) { | 374 } else if (!task_runner->BelongsToCurrentThread()) { |
371 // It's time to hop onto another thread. | 375 // It's time to hop onto another thread. |
372 | |
373 // Copy the callback + arguments just for the unlikley case in which | |
374 // PostTask fails. In such case the Bind helper will destroy the | |
375 // pmd_async_state and we must keep a copy of the fields to notify the | |
376 // abort. | |
377 MemoryDumpCallback callback = pmd_async_state->callback; | |
378 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | |
379 pmd_async_state->callback_task_runner; | |
380 | |
381 const bool did_post_task = task_runner->PostTask( | 376 const bool did_post_task = task_runner->PostTask( |
382 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, | 377 FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump, |
383 Unretained(this), Passed(&pmd_async_state))); | 378 Unretained(this), Unretained(pmd_async_state.get()))); |
384 if (did_post_task) | 379 if (did_post_task) { |
380 // Ownership is tranferred to the next ContinueAsyncProcessDump(). | |
381 ignore_result(pmd_async_state.release()); | |
385 return; | 382 return; |
386 | 383 } |
387 // The thread is gone. At this point the best thing we can do is to | 384 // The thread is gone. At this point the best thing we can do is to |
388 // disable the dump provider and abort this dump. | 385 // disable the dump provider and abort this dump. |
389 mdp_info->disabled = true; | 386 mdpinfo->disabled = true; |
390 return AbortDumpLocked(callback, callback_task_runner, dump_guid); | 387 skip_dump = true; |
391 } | 388 } |
392 } // AutoLock(lock_) | 389 } // AutoLock(lock_) |
393 | 390 |
394 // Invoke the dump provider without holding the |lock_|. | 391 // Invoke the dump provider without holding the |lock_|. |
395 bool finalize = false; | |
396 bool dump_successful = false; | 392 bool dump_successful = false; |
397 | |
398 if (!skip_dump) { | 393 if (!skip_dump) { |
399 TRACE_EVENT_WITH_FLOW1(kTraceCategory, | 394 TRACE_EVENT_WITH_FLOW1(kTraceCategory, |
400 "MemoryDumpManager::ContinueAsyncProcessDump", | 395 "MemoryDumpManager::ContinueAsyncProcessDump", |
401 TRACE_ID_MANGLE(dump_guid), | 396 TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid), |
402 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, | 397 TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, |
403 "dump_provider.name", dump_provider_name); | 398 "dump_provider.name", mdpinfo->name); |
399 | |
400 // Pid of the target process being dumped. Often kNullProcessId (= current | |
401 // process), non-zero when the coordinator process creates dumps on behalf | |
402 // of child processes (see crbug.com/461788). | |
403 ProcessId target_pid = mdpinfo->options.target_pid; | |
404 ProcessMemoryDump* pmd = | |
405 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(target_pid); | |
404 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail}; | 406 MemoryDumpArgs args = {pmd_async_state->req_args.level_of_detail}; |
405 ProcessMemoryDump* process_memory_dump = | 407 dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd); |
406 pmd_async_state->GetOrCreateMemoryDumpContainerForProcess(pid); | 408 } // if (!skip_dump) |
407 dump_successful = mdp->OnMemoryDump(args, process_memory_dump); | |
408 } | |
409 | 409 |
410 { | 410 { |
411 AutoLock lock(lock_); | 411 AutoLock lock(lock_); |
412 auto mdp_info = pmd_async_state->next_dump_provider; | |
413 if (dump_successful) { | 412 if (dump_successful) { |
414 mdp_info->consecutive_failures = 0; | 413 mdpinfo->consecutive_failures = 0; |
415 } else if (!skip_dump) { | 414 } else if (!skip_dump) { |
416 ++mdp_info->consecutive_failures; | 415 ++mdpinfo->consecutive_failures; |
417 if (mdp_info->consecutive_failures >= kMaxConsecutiveFailuresCount) { | 416 if (mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) { |
418 mdp_info->disabled = true; | 417 mdpinfo->disabled = true; |
419 } | 418 } |
420 } | 419 } |
421 ++pmd_async_state->next_dump_provider; | 420 pmd_async_state->pending_dump_providers.pop_front(); |
422 finalize = pmd_async_state->next_dump_provider == dump_providers_.end(); | 421 } // AutoLock(lock_) |
423 | |
424 if (mdp_info->unregistered) | |
425 dump_providers_.erase(mdp_info); | |
426 } | |
427 | 422 |
428 if (!skip_dump && !dump_successful) { | 423 if (!skip_dump && !dump_successful) { |
429 LOG(ERROR) << "MemoryDumpProvider \"" << dump_provider_name << "\" failed, " | 424 LOG(ERROR) << "MemoryDumpProvider \"" << mdpinfo->name << "\" failed, " |
430 << "possibly due to sandboxing (crbug.com/461788)." | 425 << "possibly due to sandboxing (crbug.com/461788)." |
431 << "Disabling dumper for current process. Try --no-sandbox."; | 426 << "Disabling dumper for current process. Try --no-sandbox."; |
432 } | 427 } |
433 | 428 |
434 if (finalize) | 429 if (pmd_async_state->pending_dump_providers.empty()) |
435 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); | 430 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); |
436 | 431 |
437 ContinueAsyncProcessDump(std::move(pmd_async_state)); | 432 ContinueAsyncProcessDump(pmd_async_state.release()); |
438 } | 433 } |
439 | 434 |
440 // static | 435 // static |
441 void MemoryDumpManager::FinalizeDumpAndAddToTrace( | 436 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
442 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 437 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
438 DCHECK(pmd_async_state->pending_dump_providers.empty()); | |
443 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; | 439 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; |
444 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { | 440 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { |
445 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 441 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
446 pmd_async_state->callback_task_runner; | 442 pmd_async_state->callback_task_runner; |
447 callback_task_runner->PostTask( | 443 callback_task_runner->PostTask( |
448 FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace, | 444 FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace, |
449 Passed(&pmd_async_state))); | 445 Passed(&pmd_async_state))); |
450 return; | 446 return; |
451 } | 447 } |
452 | 448 |
(...skipping 23 matching lines...) Expand all Loading... | |
476 | 472 |
477 if (!pmd_async_state->callback.is_null()) { | 473 if (!pmd_async_state->callback.is_null()) { |
478 pmd_async_state->callback.Run(dump_guid, true /* success */); | 474 pmd_async_state->callback.Run(dump_guid, true /* success */); |
479 pmd_async_state->callback.Reset(); | 475 pmd_async_state->callback.Reset(); |
480 } | 476 } |
481 | 477 |
482 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump", | 478 TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump", |
483 TRACE_ID_MANGLE(dump_guid)); | 479 TRACE_ID_MANGLE(dump_guid)); |
484 } | 480 } |
485 | 481 |
486 // static | |
487 void MemoryDumpManager::AbortDumpLocked( | |
488 MemoryDumpCallback callback, | |
489 scoped_refptr<SingleThreadTaskRunner> task_runner, | |
490 uint64_t dump_guid) { | |
491 if (callback.is_null()) | |
492 return; // There is nothing to NACK. | |
493 | |
494 // Post the callback even if we are already on the right thread to avoid | |
495 // invoking the callback while holding the lock_. | |
496 task_runner->PostTask(FROM_HERE, | |
497 Bind(callback, dump_guid, false /* success */)); | |
498 } | |
499 | |
500 void MemoryDumpManager::OnTraceLogEnabled() { | 482 void MemoryDumpManager::OnTraceLogEnabled() { |
501 bool enabled; | 483 bool enabled; |
502 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); | 484 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); |
503 if (!enabled) | 485 if (!enabled) |
504 return; | 486 return; |
505 | 487 |
506 // Initialize the TraceLog for the current thread. This is to avoid that the | 488 // Initialize the TraceLog for the current thread. This is to avoid that the |
507 // TraceLog memory dump provider is registered lazily in the PostTask() below | 489 // TraceLog memory dump provider is registered lazily in the PostTask() below |
508 // while the |lock_| is taken; | 490 // while the |lock_| is taken; |
509 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 491 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
(...skipping 24 matching lines...) Expand all Loading... | |
534 TRACE_EVENT_API_ADD_METADATA_EVENT( | 516 TRACE_EVENT_API_ADD_METADATA_EVENT( |
535 "typeNames", "typeNames", | 517 "typeNames", "typeNames", |
536 scoped_refptr<ConvertableToTraceFormat>(type_name_deduplicator)); | 518 scoped_refptr<ConvertableToTraceFormat>(type_name_deduplicator)); |
537 } | 519 } |
538 | 520 |
539 DCHECK(!dump_thread_); | 521 DCHECK(!dump_thread_); |
540 dump_thread_ = std::move(dump_thread); | 522 dump_thread_ = std::move(dump_thread); |
541 session_state_ = new MemoryDumpSessionState(stack_frame_deduplicator, | 523 session_state_ = new MemoryDumpSessionState(stack_frame_deduplicator, |
542 type_name_deduplicator); | 524 type_name_deduplicator); |
543 | 525 |
544 for (auto it = dump_providers_.begin(); it != dump_providers_.end(); ++it) { | 526 for (const scoped_refptr<MemoryDumpProviderInfo>& mdpinfo : dump_providers_) { |
545 it->disabled = false; | 527 mdpinfo->disabled = false; |
546 it->consecutive_failures = 0; | 528 mdpinfo->consecutive_failures = 0; |
547 } | 529 } |
548 | 530 |
549 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); | 531 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); |
550 | 532 |
551 // TODO(primiano): This is a temporary hack to disable periodic memory dumps | 533 // TODO(primiano): This is a temporary hack to disable periodic memory dumps |
552 // when running memory benchmarks until telemetry uses TraceConfig to | 534 // when running memory benchmarks until telemetry uses TraceConfig to |
553 // enable/disable periodic dumps. See crbug.com/529184 . | 535 // enable/disable periodic dumps. See crbug.com/529184 . |
554 if (!is_coordinator_ || | 536 if (!is_coordinator_ || |
555 CommandLine::ForCurrentProcess()->HasSwitch( | 537 CommandLine::ForCurrentProcess()->HasSwitch( |
556 "enable-memory-benchmarking")) { | 538 "enable-memory-benchmarking")) { |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
610 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( | 592 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo( |
611 MemoryDumpProvider* dump_provider, | 593 MemoryDumpProvider* dump_provider, |
612 const char* name, | 594 const char* name, |
613 const scoped_refptr<SingleThreadTaskRunner>& task_runner, | 595 const scoped_refptr<SingleThreadTaskRunner>& task_runner, |
614 const MemoryDumpProvider::Options& options) | 596 const MemoryDumpProvider::Options& options) |
615 : dump_provider(dump_provider), | 597 : dump_provider(dump_provider), |
616 name(name), | 598 name(name), |
617 task_runner(task_runner), | 599 task_runner(task_runner), |
618 options(options), | 600 options(options), |
619 consecutive_failures(0), | 601 consecutive_failures(0), |
620 disabled(false), | 602 disabled(false) {} |
621 unregistered(false) {} | |
622 | 603 |
623 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} | 604 MemoryDumpManager::MemoryDumpProviderInfo::~MemoryDumpProviderInfo() {} |
624 | 605 |
625 bool MemoryDumpManager::MemoryDumpProviderInfo::operator<( | 606 bool MemoryDumpManager::MemoryDumpProviderInfo::Comparator::operator()( |
626 const MemoryDumpProviderInfo& other) const { | 607 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& a, |
627 if (task_runner == other.task_runner) | 608 const scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo>& b) const { |
628 return dump_provider < other.dump_provider; | 609 if (!a || !b) |
610 return a.get() < b.get(); | |
611 if (a->task_runner == b->task_runner) | |
612 return a->dump_provider < b->dump_provider; | |
629 // Ensure that unbound providers (task_runner == nullptr) always run last. | 613 // Ensure that unbound providers (task_runner == nullptr) always run last. |
630 return !(task_runner < other.task_runner); | 614 return !(a->task_runner < b->task_runner); |
631 } | 615 } |
632 | 616 |
633 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( | 617 MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState( |
634 MemoryDumpRequestArgs req_args, | 618 MemoryDumpRequestArgs req_args, |
635 MemoryDumpProviderInfoSet::iterator next_dump_provider, | 619 const MemoryDumpProviderInfo::OrderedSet& dump_providers, |
636 const scoped_refptr<MemoryDumpSessionState>& session_state, | 620 const scoped_refptr<MemoryDumpSessionState>& session_state, |
637 MemoryDumpCallback callback, | 621 MemoryDumpCallback callback, |
638 const scoped_refptr<SingleThreadTaskRunner>& dump_thread_task_runner) | 622 const scoped_refptr<SingleThreadTaskRunner>& dump_thread_task_runner) |
639 : req_args(req_args), | 623 : req_args(req_args), |
640 next_dump_provider(next_dump_provider), | 624 pending_dump_providers(dump_providers.begin(), dump_providers.end()), |
Ruud van Asseldonk
2015/12/17 20:20:39
If you make this a vector, this will cause inserti
Primiano Tucci (use gerrit)
2015/12/18 12:00:31
ok switches to a vector (and using that as a stack
| |
641 session_state(session_state), | 625 session_state(session_state), |
642 callback(callback), | 626 callback(callback), |
643 callback_task_runner(MessageLoop::current()->task_runner()), | 627 callback_task_runner(MessageLoop::current()->task_runner()), |
644 dump_thread_task_runner(dump_thread_task_runner) {} | 628 dump_thread_task_runner(dump_thread_task_runner) {} |
645 | 629 |
646 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { | 630 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { |
647 } | 631 } |
648 | 632 |
649 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState:: | 633 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState:: |
650 GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) { | 634 GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) { |
651 auto iter = process_dumps.find(pid); | 635 auto iter = process_dumps.find(pid); |
652 if (iter == process_dumps.end()) { | 636 if (iter == process_dumps.end()) { |
653 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state)); | 637 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state)); |
654 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; | 638 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; |
655 } | 639 } |
656 return iter->second.get(); | 640 return iter->second.get(); |
657 } | 641 } |
658 | 642 |
659 } // namespace trace_event | 643 } // namespace trace_event |
660 } // namespace base | 644 } // namespace base |
OLD | NEW |