OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <utility> | 8 #include <utility> |
9 | 9 |
10 #include "base/atomic_sequence_num.h" | 10 #include "base/atomic_sequence_num.h" |
11 #include "base/base_switches.h" | 11 #include "base/base_switches.h" |
12 #include "base/command_line.h" | 12 #include "base/command_line.h" |
13 #include "base/compiler_specific.h" | 13 #include "base/compiler_specific.h" |
| 14 #include "base/memory/ptr_util.h" |
14 #include "base/thread_task_runner_handle.h" | 15 #include "base/thread_task_runner_handle.h" |
15 #include "base/threading/thread.h" | 16 #include "base/threading/thread.h" |
16 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" | 17 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" |
17 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" | 18 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" |
18 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" | 19 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" |
19 #include "base/trace_event/malloc_dump_provider.h" | 20 #include "base/trace_event/malloc_dump_provider.h" |
20 #include "base/trace_event/memory_dump_provider.h" | 21 #include "base/trace_event/memory_dump_provider.h" |
21 #include "base/trace_event/memory_dump_session_state.h" | 22 #include "base/trace_event/memory_dump_session_state.h" |
22 #include "base/trace_event/process_memory_dump.h" | 23 #include "base/trace_event/process_memory_dump.h" |
23 #include "base/trace_event/trace_event.h" | 24 #include "base/trace_event/trace_event.h" |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
74 | 75 |
75 if (!wrapped_callback.is_null()) { | 76 if (!wrapped_callback.is_null()) { |
76 wrapped_callback.Run(dump_guid, success); | 77 wrapped_callback.Run(dump_guid, success); |
77 wrapped_callback.Reset(); | 78 wrapped_callback.Reset(); |
78 } | 79 } |
79 } | 80 } |
80 | 81 |
81 // Proxy class which wraps a ConvertableToTraceFormat owned by the | 82 // Proxy class which wraps a ConvertableToTraceFormat owned by the |
82 // |session_state| into a proxy object that can be added to the trace event log. | 83 // |session_state| into a proxy object that can be added to the trace event log. |
83 // This is to solve the problem that the MemoryDumpSessionState is refcounted | 84 // This is to solve the problem that the MemoryDumpSessionState is refcounted |
84 // but the tracing subsystem wants a scoped_ptr<ConvertableToTraceFormat>. | 85 // but the tracing subsystem wants a std::unique_ptr<ConvertableToTraceFormat>. |
85 template <typename T> | 86 template <typename T> |
86 struct SessionStateConvertableProxy : public ConvertableToTraceFormat { | 87 struct SessionStateConvertableProxy : public ConvertableToTraceFormat { |
87 using GetterFunctPtr = T* (MemoryDumpSessionState::*)() const; | 88 using GetterFunctPtr = T* (MemoryDumpSessionState::*)() const; |
88 | 89 |
89 SessionStateConvertableProxy( | 90 SessionStateConvertableProxy( |
90 scoped_refptr<MemoryDumpSessionState> session_state, | 91 scoped_refptr<MemoryDumpSessionState> session_state, |
91 GetterFunctPtr getter_function) | 92 GetterFunctPtr getter_function) |
92 : session_state(session_state), getter_function(getter_function) {} | 93 : session_state(session_state), getter_function(getter_function) {} |
93 | 94 |
94 void AppendAsTraceFormat(std::string* out) const override { | 95 void AppendAsTraceFormat(std::string* out) const override { |
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
260 | 261 |
261 if (heap_profiling_enabled_) | 262 if (heap_profiling_enabled_) |
262 mdp->OnHeapProfilingEnabled(true); | 263 mdp->OnHeapProfilingEnabled(true); |
263 } | 264 } |
264 | 265 |
265 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { | 266 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) { |
266 UnregisterDumpProviderInternal(mdp, false /* delete_async */); | 267 UnregisterDumpProviderInternal(mdp, false /* delete_async */); |
267 } | 268 } |
268 | 269 |
269 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon( | 270 void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon( |
270 scoped_ptr<MemoryDumpProvider> mdp) { | 271 std::unique_ptr<MemoryDumpProvider> mdp) { |
271 UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */); | 272 UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */); |
272 } | 273 } |
273 | 274 |
274 void MemoryDumpManager::UnregisterDumpProviderInternal( | 275 void MemoryDumpManager::UnregisterDumpProviderInternal( |
275 MemoryDumpProvider* mdp, | 276 MemoryDumpProvider* mdp, |
276 bool take_mdp_ownership_and_delete_async) { | 277 bool take_mdp_ownership_and_delete_async) { |
277 scoped_ptr<MemoryDumpProvider> owned_mdp; | 278 std::unique_ptr<MemoryDumpProvider> owned_mdp; |
278 if (take_mdp_ownership_and_delete_async) | 279 if (take_mdp_ownership_and_delete_async) |
279 owned_mdp.reset(mdp); | 280 owned_mdp.reset(mdp); |
280 | 281 |
281 AutoLock lock(lock_); | 282 AutoLock lock(lock_); |
282 | 283 |
283 auto mdp_iter = dump_providers_.begin(); | 284 auto mdp_iter = dump_providers_.begin(); |
284 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) { | 285 for (; mdp_iter != dump_providers_.end(); ++mdp_iter) { |
285 if ((*mdp_iter)->dump_provider == mdp) | 286 if ((*mdp_iter)->dump_provider == mdp) |
286 break; | 287 break; |
287 } | 288 } |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
363 MemoryDumpType dump_type, | 364 MemoryDumpType dump_type, |
364 MemoryDumpLevelOfDetail level_of_detail) { | 365 MemoryDumpLevelOfDetail level_of_detail) { |
365 RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback()); | 366 RequestGlobalDump(dump_type, level_of_detail, MemoryDumpCallback()); |
366 } | 367 } |
367 | 368 |
368 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, | 369 void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args, |
369 const MemoryDumpCallback& callback) { | 370 const MemoryDumpCallback& callback) { |
370 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", | 371 TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(kTraceCategory, "ProcessMemoryDump", |
371 TRACE_ID_MANGLE(args.dump_guid)); | 372 TRACE_ID_MANGLE(args.dump_guid)); |
372 | 373 |
373 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; | 374 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state; |
374 { | 375 { |
375 AutoLock lock(lock_); | 376 AutoLock lock(lock_); |
376 // |dump_thread_| can be nullptr is tracing was disabled before reaching | 377 // |dump_thread_| can be nullptr is tracing was disabled before reaching |
377 // here. SetupNextMemoryDump() is robust enough to tolerate it and will | 378 // here. SetupNextMemoryDump() is robust enough to tolerate it and will |
378 // NACK the dump. | 379 // NACK the dump. |
379 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( | 380 pmd_async_state.reset(new ProcessMemoryDumpAsyncState( |
380 args, dump_providers_, session_state_, callback, | 381 args, dump_providers_, session_state_, callback, |
381 dump_thread_ ? dump_thread_->task_runner() : nullptr)); | 382 dump_thread_ ? dump_thread_->task_runner() : nullptr)); |
382 } | 383 } |
383 | 384 |
384 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", | 385 TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump", |
385 TRACE_ID_MANGLE(args.dump_guid), | 386 TRACE_ID_MANGLE(args.dump_guid), |
386 TRACE_EVENT_FLAG_FLOW_OUT); | 387 TRACE_EVENT_FLAG_FLOW_OUT); |
387 | 388 |
388 // Start the process dump. This involves task runner hops as specified by the | 389 // Start the process dump. This involves task runner hops as specified by the |
389 // MemoryDumpProvider(s) in RegisterDumpProvider()). | 390 // MemoryDumpProvider(s) in RegisterDumpProvider()). |
390 SetupNextMemoryDump(std::move(pmd_async_state)); | 391 SetupNextMemoryDump(std::move(pmd_async_state)); |
391 } | 392 } |
392 | 393 |
393 // PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A | 394 // PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A |
394 // PostTask is always required for a generic SequencedTaskRunner to ensure that | 395 // PostTask is always required for a generic SequencedTaskRunner to ensure that |
395 // no other task is running on it concurrently. SetupNextMemoryDump() and | 396 // no other task is running on it concurrently. SetupNextMemoryDump() and |
396 // InvokeOnMemoryDump() are called alternatively which linearizes the dump | 397 // InvokeOnMemoryDump() are called alternatively which linearizes the dump |
397 // provider's OnMemoryDump invocations. | 398 // provider's OnMemoryDump invocations. |
398 // At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be | 399 // At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be |
399 // active at any time for a given PMD, regardless of status of the |lock_|. | 400 // active at any time for a given PMD, regardless of status of the |lock_|. |
400 // |lock_| is used in these functions purely to ensure consistency w.r.t. | 401 // |lock_| is used in these functions purely to ensure consistency w.r.t. |
401 // (un)registrations of |dump_providers_|. | 402 // (un)registrations of |dump_providers_|. |
402 void MemoryDumpManager::SetupNextMemoryDump( | 403 void MemoryDumpManager::SetupNextMemoryDump( |
403 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 404 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
404 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs | 405 // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs |
405 // in the PostTask below don't end up registering their own dump providers | 406 // in the PostTask below don't end up registering their own dump providers |
406 // (for discounting trace memory overhead) while holding the |lock_|. | 407 // (for discounting trace memory overhead) while holding the |lock_|. |
407 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 408 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
408 | 409 |
409 // If this was the last hop, create a trace event, add it to the trace and | 410 // If this was the last hop, create a trace event, add it to the trace and |
410 // finalize process dump (invoke callback). | 411 // finalize process dump (invoke callback). |
411 if (pmd_async_state->pending_dump_providers.empty()) | 412 if (pmd_async_state->pending_dump_providers.empty()) |
412 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); | 413 return FinalizeDumpAndAddToTrace(std::move(pmd_async_state)); |
413 | 414 |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
475 // (unless disabled). | 476 // (unless disabled). |
476 void MemoryDumpManager::InvokeOnMemoryDump( | 477 void MemoryDumpManager::InvokeOnMemoryDump( |
477 ProcessMemoryDumpAsyncState* owned_pmd_async_state) { | 478 ProcessMemoryDumpAsyncState* owned_pmd_async_state) { |
478 // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason | 479 // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason |
479 // why it isn't is because of the corner case logic of |did_post_task| | 480 // why it isn't is because of the corner case logic of |did_post_task| |
480 // above, which needs to take back the ownership of the |pmd_async_state| when | 481 // above, which needs to take back the ownership of the |pmd_async_state| when |
481 // the PostTask() fails. | 482 // the PostTask() fails. |
482 // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure | 483 // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure |
483 // to prevent accidental leaks. Using a scoped_ptr would prevent us to to | 484 // to prevent accidental leaks. Using a scoped_ptr would prevent us to to |
484 // skip the hop and move on. Hence the manual naked -> scoped ptr juggling. | 485 // skip the hop and move on. Hence the manual naked -> scoped ptr juggling. |
485 auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state); | 486 auto pmd_async_state = WrapUnique(owned_pmd_async_state); |
486 owned_pmd_async_state = nullptr; | 487 owned_pmd_async_state = nullptr; |
487 | 488 |
488 // Read MemoryDumpProviderInfo thread safety considerations in | 489 // Read MemoryDumpProviderInfo thread safety considerations in |
489 // memory_dump_manager.h when accessing |mdpinfo| fields. | 490 // memory_dump_manager.h when accessing |mdpinfo| fields. |
490 MemoryDumpProviderInfo* mdpinfo = | 491 MemoryDumpProviderInfo* mdpinfo = |
491 pmd_async_state->pending_dump_providers.back().get(); | 492 pmd_async_state->pending_dump_providers.back().get(); |
492 | 493 |
493 DCHECK(!mdpinfo->task_runner || | 494 DCHECK(!mdpinfo->task_runner || |
494 mdpinfo->task_runner->RunsTasksOnCurrentThread()); | 495 mdpinfo->task_runner->RunsTasksOnCurrentThread()); |
495 | 496 |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
528 mdpinfo->consecutive_failures = | 529 mdpinfo->consecutive_failures = |
529 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; | 530 dump_successful ? 0 : mdpinfo->consecutive_failures + 1; |
530 } | 531 } |
531 | 532 |
532 pmd_async_state->pending_dump_providers.pop_back(); | 533 pmd_async_state->pending_dump_providers.pop_back(); |
533 SetupNextMemoryDump(std::move(pmd_async_state)); | 534 SetupNextMemoryDump(std::move(pmd_async_state)); |
534 } | 535 } |
535 | 536 |
536 // static | 537 // static |
537 void MemoryDumpManager::FinalizeDumpAndAddToTrace( | 538 void MemoryDumpManager::FinalizeDumpAndAddToTrace( |
538 scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { | 539 std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) { |
539 DCHECK(pmd_async_state->pending_dump_providers.empty()); | 540 DCHECK(pmd_async_state->pending_dump_providers.empty()); |
540 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; | 541 const uint64_t dump_guid = pmd_async_state->req_args.dump_guid; |
541 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { | 542 if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) { |
542 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = | 543 scoped_refptr<SingleThreadTaskRunner> callback_task_runner = |
543 pmd_async_state->callback_task_runner; | 544 pmd_async_state->callback_task_runner; |
544 callback_task_runner->PostTask( | 545 callback_task_runner->PostTask( |
545 FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace, | 546 FROM_HERE, Bind(&MemoryDumpManager::FinalizeDumpAndAddToTrace, |
546 Passed(&pmd_async_state))); | 547 Passed(&pmd_async_state))); |
547 return; | 548 return; |
548 } | 549 } |
549 | 550 |
550 TRACE_EVENT_WITH_FLOW0(kTraceCategory, | 551 TRACE_EVENT_WITH_FLOW0(kTraceCategory, |
551 "MemoryDumpManager::FinalizeDumpAndAddToTrace", | 552 "MemoryDumpManager::FinalizeDumpAndAddToTrace", |
552 TRACE_ID_MANGLE(dump_guid), TRACE_EVENT_FLAG_FLOW_IN); | 553 TRACE_ID_MANGLE(dump_guid), TRACE_EVENT_FLAG_FLOW_IN); |
553 | 554 |
554 for (const auto& kv : pmd_async_state->process_dumps) { | 555 for (const auto& kv : pmd_async_state->process_dumps) { |
555 ProcessId pid = kv.first; // kNullProcessId for the current process. | 556 ProcessId pid = kv.first; // kNullProcessId for the current process. |
556 ProcessMemoryDump* process_memory_dump = kv.second.get(); | 557 ProcessMemoryDump* process_memory_dump = kv.second.get(); |
557 scoped_ptr<TracedValue> traced_value(new TracedValue); | 558 std::unique_ptr<TracedValue> traced_value(new TracedValue); |
558 process_memory_dump->AsValueInto(traced_value.get()); | 559 process_memory_dump->AsValueInto(traced_value.get()); |
559 traced_value->SetString("level_of_detail", | 560 traced_value->SetString("level_of_detail", |
560 MemoryDumpLevelOfDetailToString( | 561 MemoryDumpLevelOfDetailToString( |
561 pmd_async_state->req_args.level_of_detail)); | 562 pmd_async_state->req_args.level_of_detail)); |
562 const char* const event_name = | 563 const char* const event_name = |
563 MemoryDumpTypeToString(pmd_async_state->req_args.dump_type); | 564 MemoryDumpTypeToString(pmd_async_state->req_args.dump_type); |
564 | 565 |
565 scoped_ptr<ConvertableToTraceFormat> event_value(std::move(traced_value)); | 566 std::unique_ptr<ConvertableToTraceFormat> event_value( |
| 567 std::move(traced_value)); |
566 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID( | 568 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID( |
567 TRACE_EVENT_PHASE_MEMORY_DUMP, | 569 TRACE_EVENT_PHASE_MEMORY_DUMP, |
568 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name, | 570 TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name, |
569 trace_event_internal::kGlobalScope, dump_guid, pid, | 571 trace_event_internal::kGlobalScope, dump_guid, pid, |
570 kTraceEventNumArgs, kTraceEventArgNames, | 572 kTraceEventNumArgs, kTraceEventArgNames, |
571 kTraceEventArgTypes, nullptr /* arg_values */, &event_value, | 573 kTraceEventArgTypes, nullptr /* arg_values */, &event_value, |
572 TRACE_EVENT_FLAG_HAS_ID); | 574 TRACE_EVENT_FLAG_HAS_ID); |
573 } | 575 } |
574 | 576 |
575 bool tracing_still_enabled; | 577 bool tracing_still_enabled; |
(...skipping 15 matching lines...) Expand all Loading... |
591 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); | 593 TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled); |
592 if (!enabled) | 594 if (!enabled) |
593 return; | 595 return; |
594 | 596 |
595 // Initialize the TraceLog for the current thread. This is to avoid that the | 597 // Initialize the TraceLog for the current thread. This is to avoid that the |
596 // TraceLog memory dump provider is registered lazily in the PostTask() below | 598 // TraceLog memory dump provider is registered lazily in the PostTask() below |
597 // while the |lock_| is taken; | 599 // while the |lock_| is taken; |
598 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); | 600 TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported(); |
599 | 601 |
600 // Spin-up the thread used to invoke unbound dump providers. | 602 // Spin-up the thread used to invoke unbound dump providers. |
601 scoped_ptr<Thread> dump_thread(new Thread("MemoryInfra")); | 603 std::unique_ptr<Thread> dump_thread(new Thread("MemoryInfra")); |
602 if (!dump_thread->Start()) { | 604 if (!dump_thread->Start()) { |
603 LOG(ERROR) << "Failed to start the memory-infra thread for tracing"; | 605 LOG(ERROR) << "Failed to start the memory-infra thread for tracing"; |
604 return; | 606 return; |
605 } | 607 } |
606 | 608 |
607 AutoLock lock(lock_); | 609 AutoLock lock(lock_); |
608 | 610 |
609 DCHECK(delegate_); // At this point we must have a delegate. | 611 DCHECK(delegate_); // At this point we must have a delegate. |
610 session_state_ = new MemoryDumpSessionState; | 612 session_state_ = new MemoryDumpSessionState; |
611 | 613 |
612 if (heap_profiling_enabled_) { | 614 if (heap_profiling_enabled_) { |
613 // If heap profiling is enabled, the stack frame deduplicator and type name | 615 // If heap profiling is enabled, the stack frame deduplicator and type name |
614 // deduplicator will be in use. Add a metadata events to write the frames | 616 // deduplicator will be in use. Add a metadata events to write the frames |
615 // and type IDs. | 617 // and type IDs. |
616 session_state_->SetStackFrameDeduplicator( | 618 session_state_->SetStackFrameDeduplicator( |
617 make_scoped_ptr(new StackFrameDeduplicator)); | 619 WrapUnique(new StackFrameDeduplicator)); |
618 | 620 |
619 session_state_->SetTypeNameDeduplicator( | 621 session_state_->SetTypeNameDeduplicator( |
620 make_scoped_ptr(new TypeNameDeduplicator)); | 622 WrapUnique(new TypeNameDeduplicator)); |
621 | 623 |
622 TRACE_EVENT_API_ADD_METADATA_EVENT( | 624 TRACE_EVENT_API_ADD_METADATA_EVENT( |
623 TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames", | 625 TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames", |
624 "stackFrames", | 626 "stackFrames", |
625 make_scoped_ptr( | 627 WrapUnique( |
626 new SessionStateConvertableProxy<StackFrameDeduplicator>( | 628 new SessionStateConvertableProxy<StackFrameDeduplicator>( |
627 session_state_, | 629 session_state_, |
628 &MemoryDumpSessionState::stack_frame_deduplicator))); | 630 &MemoryDumpSessionState::stack_frame_deduplicator))); |
629 | 631 |
630 TRACE_EVENT_API_ADD_METADATA_EVENT( | 632 TRACE_EVENT_API_ADD_METADATA_EVENT( |
631 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", | 633 TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames", |
632 "typeNames", | 634 "typeNames", |
633 make_scoped_ptr(new SessionStateConvertableProxy<TypeNameDeduplicator>( | 635 WrapUnique(new SessionStateConvertableProxy<TypeNameDeduplicator>( |
634 session_state_, &MemoryDumpSessionState::type_name_deduplicator))); | 636 session_state_, &MemoryDumpSessionState::type_name_deduplicator))); |
635 } | 637 } |
636 | 638 |
637 DCHECK(!dump_thread_); | 639 DCHECK(!dump_thread_); |
638 dump_thread_ = std::move(dump_thread); | 640 dump_thread_ = std::move(dump_thread); |
639 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); | 641 subtle::NoBarrier_Store(&memory_tracing_enabled_, 1); |
640 | 642 |
641 // TODO(primiano): This is a temporary hack to disable periodic memory dumps | 643 // TODO(primiano): This is a temporary hack to disable periodic memory dumps |
642 // when running memory benchmarks until telemetry uses TraceConfig to | 644 // when running memory benchmarks until telemetry uses TraceConfig to |
643 // enable/disable periodic dumps. See crbug.com/529184 . | 645 // enable/disable periodic dumps. See crbug.com/529184 . |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
675 periodic_dump_timer_.Start(FROM_HERE, | 677 periodic_dump_timer_.Start(FROM_HERE, |
676 TimeDelta::FromMilliseconds(min_timer_period_ms), | 678 TimeDelta::FromMilliseconds(min_timer_period_ms), |
677 base::Bind(&RequestPeriodicGlobalDump)); | 679 base::Bind(&RequestPeriodicGlobalDump)); |
678 } | 680 } |
679 | 681 |
680 void MemoryDumpManager::OnTraceLogDisabled() { | 682 void MemoryDumpManager::OnTraceLogDisabled() { |
681 // There might be a memory dump in progress while this happens. Therefore, | 683 // There might be a memory dump in progress while this happens. Therefore, |
682 // ensure that the MDM state which depends on the tracing enabled / disabled | 684 // ensure that the MDM state which depends on the tracing enabled / disabled |
683 // state is always accessed by the dumping methods holding the |lock_|. | 685 // state is always accessed by the dumping methods holding the |lock_|. |
684 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); | 686 subtle::NoBarrier_Store(&memory_tracing_enabled_, 0); |
685 scoped_ptr<Thread> dump_thread; | 687 std::unique_ptr<Thread> dump_thread; |
686 { | 688 { |
687 AutoLock lock(lock_); | 689 AutoLock lock(lock_); |
688 dump_thread = std::move(dump_thread_); | 690 dump_thread = std::move(dump_thread_); |
689 session_state_ = nullptr; | 691 session_state_ = nullptr; |
690 } | 692 } |
691 | 693 |
692 // Thread stops are blocking and must be performed outside of the |lock_| | 694 // Thread stops are blocking and must be performed outside of the |lock_| |
693 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). | 695 // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it). |
694 periodic_dump_timer_.Stop(); | 696 periodic_dump_timer_.Stop(); |
695 if (dump_thread) | 697 if (dump_thread) |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
742 pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend()); | 744 pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend()); |
743 } | 745 } |
744 | 746 |
745 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { | 747 MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() { |
746 } | 748 } |
747 | 749 |
748 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState:: | 750 ProcessMemoryDump* MemoryDumpManager::ProcessMemoryDumpAsyncState:: |
749 GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) { | 751 GetOrCreateMemoryDumpContainerForProcess(ProcessId pid) { |
750 auto iter = process_dumps.find(pid); | 752 auto iter = process_dumps.find(pid); |
751 if (iter == process_dumps.end()) { | 753 if (iter == process_dumps.end()) { |
752 scoped_ptr<ProcessMemoryDump> new_pmd(new ProcessMemoryDump(session_state)); | 754 std::unique_ptr<ProcessMemoryDump> new_pmd( |
| 755 new ProcessMemoryDump(session_state)); |
753 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; | 756 iter = process_dumps.insert(std::make_pair(pid, std::move(new_pmd))).first; |
754 } | 757 } |
755 return iter->second.get(); | 758 return iter->second.get(); |
756 } | 759 } |
757 | 760 |
758 } // namespace trace_event | 761 } // namespace trace_event |
759 } // namespace base | 762 } // namespace base |
OLD | NEW |