| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
| 6 | 6 |
| 7 #include <stdint.h> | 7 #include <stdint.h> |
| 8 | 8 |
| 9 #include <memory> |
| 9 #include <vector> | 10 #include <vector> |
| 10 | 11 |
| 11 #include "base/bind_helpers.h" | 12 #include "base/bind_helpers.h" |
| 13 #include "base/memory/ptr_util.h" |
| 12 #include "base/memory/ref_counted_memory.h" | 14 #include "base/memory/ref_counted_memory.h" |
| 13 #include "base/memory/scoped_ptr.h" | |
| 14 #include "base/message_loop/message_loop.h" | 15 #include "base/message_loop/message_loop.h" |
| 15 #include "base/run_loop.h" | 16 #include "base/run_loop.h" |
| 16 #include "base/strings/stringprintf.h" | 17 #include "base/strings/stringprintf.h" |
| 17 #include "base/synchronization/waitable_event.h" | 18 #include "base/synchronization/waitable_event.h" |
| 18 #include "base/test/test_io_thread.h" | 19 #include "base/test/test_io_thread.h" |
| 19 #include "base/test/trace_event_analyzer.h" | 20 #include "base/test/trace_event_analyzer.h" |
| 20 #include "base/thread_task_runner_handle.h" | 21 #include "base/thread_task_runner_handle.h" |
| 21 #include "base/threading/platform_thread.h" | 22 #include "base/threading/platform_thread.h" |
| 22 #include "base/threading/sequenced_worker_pool.h" | 23 #include "base/threading/sequenced_worker_pool.h" |
| 23 #include "base/threading/thread.h" | 24 #include "base/threading/thread.h" |
| (...skipping 202 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 226 | 227 |
| 227 bool IsPeriodicDumpingEnabled() const { | 228 bool IsPeriodicDumpingEnabled() const { |
| 228 return mdm_->periodic_dump_timer_.IsRunning(); | 229 return mdm_->periodic_dump_timer_.IsRunning(); |
| 229 } | 230 } |
| 230 | 231 |
| 231 int GetMaxConsecutiveFailuresCount() const { | 232 int GetMaxConsecutiveFailuresCount() const { |
| 232 return MemoryDumpManager::kMaxConsecutiveFailuresCount; | 233 return MemoryDumpManager::kMaxConsecutiveFailuresCount; |
| 233 } | 234 } |
| 234 | 235 |
| 235 const MemoryDumpProvider::Options kDefaultOptions; | 236 const MemoryDumpProvider::Options kDefaultOptions; |
| 236 scoped_ptr<MemoryDumpManager> mdm_; | 237 std::unique_ptr<MemoryDumpManager> mdm_; |
| 237 scoped_ptr<MemoryDumpManagerDelegateForTesting> delegate_; | 238 std::unique_ptr<MemoryDumpManagerDelegateForTesting> delegate_; |
| 238 bool last_callback_success_; | 239 bool last_callback_success_; |
| 239 | 240 |
| 240 private: | 241 private: |
| 241 scoped_ptr<MessageLoop> message_loop_; | 242 std::unique_ptr<MessageLoop> message_loop_; |
| 242 | 243 |
| 243 // We want our singleton torn down after each test. | 244 // We want our singleton torn down after each test. |
| 244 ShadowingAtExitManager at_exit_manager_; | 245 ShadowingAtExitManager at_exit_manager_; |
| 245 }; | 246 }; |
| 246 | 247 |
| 247 // Basic sanity checks. Registers a memory dump provider and checks that it is | 248 // Basic sanity checks. Registers a memory dump provider and checks that it is |
| 248 // called, but only when memory-infra is enabled. | 249 // called, but only when memory-infra is enabled. |
| 249 TEST_F(MemoryDumpManagerTest, SingleDumper) { | 250 TEST_F(MemoryDumpManagerTest, SingleDumper) { |
| 250 InitializeMemoryDumpManager(false /* is_coordinator */); | 251 InitializeMemoryDumpManager(false /* is_coordinator */); |
| 251 MockMemoryDumpProvider mdp; | 252 MockMemoryDumpProvider mdp; |
| (...skipping 186 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 438 } | 439 } |
| 439 | 440 |
| 440 // Checks that the MemoryDumpManager respects the thread affinity when a | 441 // Checks that the MemoryDumpManager respects the thread affinity when a |
| 441 // MemoryDumpProvider specifies a task_runner(). The test starts creating 8 | 442 // MemoryDumpProvider specifies a task_runner(). The test starts creating 8 |
| 442 // threads and registering a MemoryDumpProvider on each of them. At each | 443 // threads and registering a MemoryDumpProvider on each of them. At each |
| 443 // iteration, one thread is removed, to check the live unregistration logic. | 444 // iteration, one thread is removed, to check the live unregistration logic. |
| 444 TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) { | 445 TEST_F(MemoryDumpManagerTest, RespectTaskRunnerAffinity) { |
| 445 InitializeMemoryDumpManager(false /* is_coordinator */); | 446 InitializeMemoryDumpManager(false /* is_coordinator */); |
| 446 const uint32_t kNumInitialThreads = 8; | 447 const uint32_t kNumInitialThreads = 8; |
| 447 | 448 |
| 448 std::vector<scoped_ptr<Thread>> threads; | 449 std::vector<std::unique_ptr<Thread>> threads; |
| 449 std::vector<scoped_ptr<MockMemoryDumpProvider>> mdps; | 450 std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps; |
| 450 | 451 |
| 451 // Create the threads and setup the expectations. Given that at each iteration | 452 // Create the threads and setup the expectations. Given that at each iteration |
| 452 // we will pop out one thread/MemoryDumpProvider, each MDP is supposed to be | 453 // we will pop out one thread/MemoryDumpProvider, each MDP is supposed to be |
| 453 // invoked a number of times equal to its index. | 454 // invoked a number of times equal to its index. |
| 454 for (uint32_t i = kNumInitialThreads; i > 0; --i) { | 455 for (uint32_t i = kNumInitialThreads; i > 0; --i) { |
| 455 threads.push_back(make_scoped_ptr(new Thread("test thread"))); | 456 threads.push_back(WrapUnique(new Thread("test thread"))); |
| 456 auto thread = threads.back().get(); | 457 auto thread = threads.back().get(); |
| 457 thread->Start(); | 458 thread->Start(); |
| 458 scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner(); | 459 scoped_refptr<SingleThreadTaskRunner> task_runner = thread->task_runner(); |
| 459 mdps.push_back(make_scoped_ptr(new MockMemoryDumpProvider())); | 460 mdps.push_back(WrapUnique(new MockMemoryDumpProvider())); |
| 460 auto mdp = mdps.back().get(); | 461 auto mdp = mdps.back().get(); |
| 461 RegisterDumpProvider(mdp, task_runner, kDefaultOptions); | 462 RegisterDumpProvider(mdp, task_runner, kDefaultOptions); |
| 462 EXPECT_CALL(*mdp, OnMemoryDump(_, _)) | 463 EXPECT_CALL(*mdp, OnMemoryDump(_, _)) |
| 463 .Times(i) | 464 .Times(i) |
| 464 .WillRepeatedly(Invoke( | 465 .WillRepeatedly(Invoke( |
| 465 [task_runner](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool { | 466 [task_runner](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool { |
| 466 EXPECT_TRUE(task_runner->RunsTasksOnCurrentThread()); | 467 EXPECT_TRUE(task_runner->RunsTasksOnCurrentThread()); |
| 467 return true; | 468 return true; |
| 468 })); | 469 })); |
| 469 } | 470 } |
| (...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 643 MemoryDumpLevelOfDetail::DETAILED); | 644 MemoryDumpLevelOfDetail::DETAILED); |
| 644 } | 645 } |
| 645 | 646 |
| 646 DisableTracing(); | 647 DisableTracing(); |
| 647 } | 648 } |
| 648 | 649 |
| 649 // Checks that the dump does not abort when unregistering a provider while | 650 // Checks that the dump does not abort when unregistering a provider while |
| 650 // dumping from a different thread than the dumping thread. | 651 // dumping from a different thread than the dumping thread. |
| 651 TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) { | 652 TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) { |
| 652 InitializeMemoryDumpManager(false /* is_coordinator */); | 653 InitializeMemoryDumpManager(false /* is_coordinator */); |
| 653 std::vector<scoped_ptr<TestIOThread>> threads; | 654 std::vector<std::unique_ptr<TestIOThread>> threads; |
| 654 std::vector<scoped_ptr<MockMemoryDumpProvider>> mdps; | 655 std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps; |
| 655 | 656 |
| 656 for (int i = 0; i < 2; i++) { | 657 for (int i = 0; i < 2; i++) { |
| 657 threads.push_back( | 658 threads.push_back( |
| 658 make_scoped_ptr(new TestIOThread(TestIOThread::kAutoStart))); | 659 WrapUnique(new TestIOThread(TestIOThread::kAutoStart))); |
| 659 mdps.push_back(make_scoped_ptr(new MockMemoryDumpProvider())); | 660 mdps.push_back(WrapUnique(new MockMemoryDumpProvider())); |
| 660 RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(), | 661 RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(), |
| 661 kDefaultOptions); | 662 kDefaultOptions); |
| 662 } | 663 } |
| 663 | 664 |
| 664 int on_memory_dump_call_count = 0; | 665 int on_memory_dump_call_count = 0; |
| 665 | 666 |
| 666 // When OnMemoryDump is called on either of the dump providers, it will | 667 // When OnMemoryDump is called on either of the dump providers, it will |
| 667 // unregister the other one. | 668 // unregister the other one. |
| 668 for (const scoped_ptr<MockMemoryDumpProvider>& mdp : mdps) { | 669 for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) { |
| 669 int other_idx = (mdps.front() == mdp); | 670 int other_idx = (mdps.front() == mdp); |
| 670 TestIOThread* other_thread = threads[other_idx].get(); | 671 TestIOThread* other_thread = threads[other_idx].get(); |
| 671 MockMemoryDumpProvider* other_mdp = mdps[other_idx].get(); | 672 MockMemoryDumpProvider* other_mdp = mdps[other_idx].get(); |
| 672 auto on_dump = [this, other_thread, other_mdp, &on_memory_dump_call_count]( | 673 auto on_dump = [this, other_thread, other_mdp, &on_memory_dump_call_count]( |
| 673 const MemoryDumpArgs& args, ProcessMemoryDump* pmd) { | 674 const MemoryDumpArgs& args, ProcessMemoryDump* pmd) { |
| 674 other_thread->PostTaskAndWait( | 675 other_thread->PostTaskAndWait( |
| 675 FROM_HERE, base::Bind(&MemoryDumpManager::UnregisterDumpProvider, | 676 FROM_HERE, base::Bind(&MemoryDumpManager::UnregisterDumpProvider, |
| 676 base::Unretained(&*mdm_), other_mdp)); | 677 base::Unretained(&*mdm_), other_mdp)); |
| 677 on_memory_dump_call_count++; | 678 on_memory_dump_call_count++; |
| 678 return true; | 679 return true; |
| (...skipping 14 matching lines...) Expand all Loading... |
| 693 ASSERT_EQ(1, on_memory_dump_call_count); | 694 ASSERT_EQ(1, on_memory_dump_call_count); |
| 694 ASSERT_TRUE(last_callback_success_); | 695 ASSERT_TRUE(last_callback_success_); |
| 695 | 696 |
| 696 DisableTracing(); | 697 DisableTracing(); |
| 697 } | 698 } |
| 698 | 699 |
| 699 // If a thread (with a dump provider living on it) is torn down during a dump | 700 // If a thread (with a dump provider living on it) is torn down during a dump |
| 700 // its dump provider should be skipped but the dump itself should succeed. | 701 // its dump provider should be skipped but the dump itself should succeed. |
| 701 TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) { | 702 TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) { |
| 702 InitializeMemoryDumpManager(false /* is_coordinator */); | 703 InitializeMemoryDumpManager(false /* is_coordinator */); |
| 703 std::vector<scoped_ptr<TestIOThread>> threads; | 704 std::vector<std::unique_ptr<TestIOThread>> threads; |
| 704 std::vector<scoped_ptr<MockMemoryDumpProvider>> mdps; | 705 std::vector<std::unique_ptr<MockMemoryDumpProvider>> mdps; |
| 705 | 706 |
| 706 for (int i = 0; i < 2; i++) { | 707 for (int i = 0; i < 2; i++) { |
| 707 threads.push_back( | 708 threads.push_back( |
| 708 make_scoped_ptr(new TestIOThread(TestIOThread::kAutoStart))); | 709 WrapUnique(new TestIOThread(TestIOThread::kAutoStart))); |
| 709 mdps.push_back(make_scoped_ptr(new MockMemoryDumpProvider())); | 710 mdps.push_back(WrapUnique(new MockMemoryDumpProvider())); |
| 710 RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(), | 711 RegisterDumpProvider(mdps.back().get(), threads.back()->task_runner(), |
| 711 kDefaultOptions); | 712 kDefaultOptions); |
| 712 } | 713 } |
| 713 | 714 |
| 714 int on_memory_dump_call_count = 0; | 715 int on_memory_dump_call_count = 0; |
| 715 | 716 |
| 716 // When OnMemoryDump is called on either of the dump providers, it will | 717 // When OnMemoryDump is called on either of the dump providers, it will |
| 717 // tear down the thread of the other one. | 718 // tear down the thread of the other one. |
| 718 for (const scoped_ptr<MockMemoryDumpProvider>& mdp : mdps) { | 719 for (const std::unique_ptr<MockMemoryDumpProvider>& mdp : mdps) { |
| 719 int other_idx = (mdps.front() == mdp); | 720 int other_idx = (mdps.front() == mdp); |
| 720 TestIOThread* other_thread = threads[other_idx].get(); | 721 TestIOThread* other_thread = threads[other_idx].get(); |
| 721 auto on_dump = [other_thread, &on_memory_dump_call_count]( | 722 auto on_dump = [other_thread, &on_memory_dump_call_count]( |
| 722 const MemoryDumpArgs& args, ProcessMemoryDump* pmd) { | 723 const MemoryDumpArgs& args, ProcessMemoryDump* pmd) { |
| 723 other_thread->Stop(); | 724 other_thread->Stop(); |
| 724 on_memory_dump_call_count++; | 725 on_memory_dump_call_count++; |
| 725 return true; | 726 return true; |
| 726 }; | 727 }; |
| 727 | 728 |
| 728 // OnMemoryDump is called once for the provider that dumps first, and zero | 729 // OnMemoryDump is called once for the provider that dumps first, and zero |
| (...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 881 DisableTracing(); | 882 DisableTracing(); |
| 882 } | 883 } |
| 883 | 884 |
| 884 // Tests against race conditions that might arise when disabling tracing in the | 885 // Tests against race conditions that might arise when disabling tracing in the |
| 885 // middle of a global memory dump. | 886 // middle of a global memory dump. |
| 886 TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) { | 887 TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) { |
| 887 base::WaitableEvent tracing_disabled_event(false, false); | 888 base::WaitableEvent tracing_disabled_event(false, false); |
| 888 InitializeMemoryDumpManager(false /* is_coordinator */); | 889 InitializeMemoryDumpManager(false /* is_coordinator */); |
| 889 | 890 |
| 890 // Register a bound dump provider. | 891 // Register a bound dump provider. |
| 891 scoped_ptr<Thread> mdp_thread(new Thread("test thread")); | 892 std::unique_ptr<Thread> mdp_thread(new Thread("test thread")); |
| 892 mdp_thread->Start(); | 893 mdp_thread->Start(); |
| 893 MockMemoryDumpProvider mdp_with_affinity; | 894 MockMemoryDumpProvider mdp_with_affinity; |
| 894 RegisterDumpProvider(&mdp_with_affinity, mdp_thread->task_runner(), | 895 RegisterDumpProvider(&mdp_with_affinity, mdp_thread->task_runner(), |
| 895 kDefaultOptions); | 896 kDefaultOptions); |
| 896 | 897 |
| 897 // Register also an unbound dump provider. Unbound dump providers are always | 898 // Register also an unbound dump provider. Unbound dump providers are always |
| 898 // invoked after bound ones. | 899 // invoked after bound ones. |
| 899 MockMemoryDumpProvider unbound_mdp; | 900 MockMemoryDumpProvider unbound_mdp; |
| 900 RegisterDumpProvider(&unbound_mdp, nullptr, kDefaultOptions); | 901 RegisterDumpProvider(&unbound_mdp, nullptr, kDefaultOptions); |
| 901 | 902 |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 988 TraceResultBuffer::SimpleOutput trace_output; | 989 TraceResultBuffer::SimpleOutput trace_output; |
| 989 buffer.SetOutputCallback(trace_output.GetCallback()); | 990 buffer.SetOutputCallback(trace_output.GetCallback()); |
| 990 RunLoop run_loop; | 991 RunLoop run_loop; |
| 991 buffer.Start(); | 992 buffer.Start(); |
| 992 trace_event::TraceLog::GetInstance()->Flush( | 993 trace_event::TraceLog::GetInstance()->Flush( |
| 993 Bind(&OnTraceDataCollected, run_loop.QuitClosure(), Unretained(&buffer))); | 994 Bind(&OnTraceDataCollected, run_loop.QuitClosure(), Unretained(&buffer))); |
| 994 run_loop.Run(); | 995 run_loop.Run(); |
| 995 buffer.Finish(); | 996 buffer.Finish(); |
| 996 | 997 |
| 997 // Analyze the JSON. | 998 // Analyze the JSON. |
| 998 scoped_ptr<trace_analyzer::TraceAnalyzer> analyzer = make_scoped_ptr( | 999 std::unique_ptr<trace_analyzer::TraceAnalyzer> analyzer = WrapUnique( |
| 999 trace_analyzer::TraceAnalyzer::Create(trace_output.json_output)); | 1000 trace_analyzer::TraceAnalyzer::Create(trace_output.json_output)); |
| 1000 trace_analyzer::TraceEventVector events; | 1001 trace_analyzer::TraceEventVector events; |
| 1001 analyzer->FindEvents(Query::EventPhaseIs(TRACE_EVENT_PHASE_MEMORY_DUMP), | 1002 analyzer->FindEvents(Query::EventPhaseIs(TRACE_EVENT_PHASE_MEMORY_DUMP), |
| 1002 &events); | 1003 &events); |
| 1003 | 1004 |
| 1004 ASSERT_EQ(3u, events.size()); | 1005 ASSERT_EQ(3u, events.size()); |
| 1005 ASSERT_EQ(1u, trace_analyzer::CountMatches(events, Query::EventPidIs(123))); | 1006 ASSERT_EQ(1u, trace_analyzer::CountMatches(events, Query::EventPidIs(123))); |
| 1006 ASSERT_EQ(1u, trace_analyzer::CountMatches(events, Query::EventPidIs(456))); | 1007 ASSERT_EQ(1u, trace_analyzer::CountMatches(events, Query::EventPidIs(456))); |
| 1007 ASSERT_EQ(1u, trace_analyzer::CountMatches( | 1008 ASSERT_EQ(1u, trace_analyzer::CountMatches( |
| 1008 events, Query::EventPidIs(GetCurrentProcId()))); | 1009 events, Query::EventPidIs(GetCurrentProcId()))); |
| 1009 ASSERT_EQ(events[0]->id, events[1]->id); | 1010 ASSERT_EQ(events[0]->id, events[1]->id); |
| 1010 ASSERT_EQ(events[0]->id, events[2]->id); | 1011 ASSERT_EQ(events[0]->id, events[2]->id); |
| 1011 } | 1012 } |
| 1012 | 1013 |
| 1013 // Tests the basics of the UnregisterAndDeleteDumpProviderSoon(): the | 1014 // Tests the basics of the UnregisterAndDeleteDumpProviderSoon(): the |
| 1014 // unregistration should actually delete the providers and not leak them. | 1015 // unregistration should actually delete the providers and not leak them. |
| 1015 TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoon) { | 1016 TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoon) { |
| 1016 InitializeMemoryDumpManager(false /* is_coordinator */); | 1017 InitializeMemoryDumpManager(false /* is_coordinator */); |
| 1017 static const int kNumProviders = 3; | 1018 static const int kNumProviders = 3; |
| 1018 int dtor_count = 0; | 1019 int dtor_count = 0; |
| 1019 std::vector<scoped_ptr<MemoryDumpProvider>> mdps; | 1020 std::vector<std::unique_ptr<MemoryDumpProvider>> mdps; |
| 1020 for (int i = 0; i < kNumProviders; ++i) { | 1021 for (int i = 0; i < kNumProviders; ++i) { |
| 1021 scoped_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider); | 1022 std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider); |
| 1022 mdp->enable_mock_destructor = true; | 1023 mdp->enable_mock_destructor = true; |
| 1023 EXPECT_CALL(*mdp, Destructor()) | 1024 EXPECT_CALL(*mdp, Destructor()) |
| 1024 .WillOnce(Invoke([&dtor_count]() { dtor_count++; })); | 1025 .WillOnce(Invoke([&dtor_count]() { dtor_count++; })); |
| 1025 RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions); | 1026 RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions); |
| 1026 mdps.push_back(std::move(mdp)); | 1027 mdps.push_back(std::move(mdp)); |
| 1027 } | 1028 } |
| 1028 | 1029 |
| 1029 while (!mdps.empty()) { | 1030 while (!mdps.empty()) { |
| 1030 mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdps.back())); | 1031 mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdps.back())); |
| 1031 mdps.pop_back(); | 1032 mdps.pop_back(); |
| 1032 } | 1033 } |
| 1033 | 1034 |
| 1034 ASSERT_EQ(kNumProviders, dtor_count); | 1035 ASSERT_EQ(kNumProviders, dtor_count); |
| 1035 } | 1036 } |
| 1036 | 1037 |
| 1037 // This test checks against races when unregistering an unbound dump provider | 1038 // This test checks against races when unregistering an unbound dump provider |
| 1038 // from another thread while dumping. It registers one MDP and, when | 1039 // from another thread while dumping. It registers one MDP and, when |
| 1039 // OnMemoryDump() is called, it invokes UnregisterAndDeleteDumpProviderSoon() | 1040 // OnMemoryDump() is called, it invokes UnregisterAndDeleteDumpProviderSoon() |
| 1040 // from another thread. The OnMemoryDump() and the dtor call are expected to | 1041 // from another thread. The OnMemoryDump() and the dtor call are expected to |
| 1041 // happen on the same thread (the MemoryDumpManager utility thread). | 1042 // happen on the same thread (the MemoryDumpManager utility thread). |
| 1042 TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) { | 1043 TEST_F(MemoryDumpManagerTest, UnregisterAndDeleteDumpProviderSoonDuringDump) { |
| 1043 InitializeMemoryDumpManager(false /* is_coordinator */); | 1044 InitializeMemoryDumpManager(false /* is_coordinator */); |
| 1044 scoped_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider); | 1045 std::unique_ptr<MockMemoryDumpProvider> mdp(new MockMemoryDumpProvider); |
| 1045 mdp->enable_mock_destructor = true; | 1046 mdp->enable_mock_destructor = true; |
| 1046 RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions); | 1047 RegisterDumpProvider(mdp.get(), nullptr, kDefaultOptions); |
| 1047 | 1048 |
| 1048 base::PlatformThreadRef thread_ref; | 1049 base::PlatformThreadRef thread_ref; |
| 1049 auto self_unregister_from_another_thread = [&mdp, &thread_ref]( | 1050 auto self_unregister_from_another_thread = [&mdp, &thread_ref]( |
| 1050 const MemoryDumpArgs&, ProcessMemoryDump*) -> bool { | 1051 const MemoryDumpArgs&, ProcessMemoryDump*) -> bool { |
| 1051 thread_ref = PlatformThread::CurrentRef(); | 1052 thread_ref = PlatformThread::CurrentRef(); |
| 1052 TestIOThread thread_for_unregistration(TestIOThread::kAutoStart); | 1053 TestIOThread thread_for_unregistration(TestIOThread::kAutoStart); |
| 1053 thread_for_unregistration.PostTaskAndWait( | 1054 thread_for_unregistration.PostTaskAndWait( |
| 1054 FROM_HERE, | 1055 FROM_HERE, |
| 1055 base::Bind( | 1056 base::Bind( |
| 1056 &MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon, | 1057 &MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon, |
| 1057 base::Unretained(MemoryDumpManager::GetInstance()), | 1058 base::Unretained(MemoryDumpManager::GetInstance()), |
| 1058 base::Passed(scoped_ptr<MemoryDumpProvider>(std::move(mdp))))); | 1059 base::Passed(std::unique_ptr<MemoryDumpProvider>(std::move(mdp))))); |
| 1059 thread_for_unregistration.Stop(); | 1060 thread_for_unregistration.Stop(); |
| 1060 return true; | 1061 return true; |
| 1061 }; | 1062 }; |
| 1062 EXPECT_CALL(*mdp, OnMemoryDump(_, _)) | 1063 EXPECT_CALL(*mdp, OnMemoryDump(_, _)) |
| 1063 .Times(1) | 1064 .Times(1) |
| 1064 .WillOnce(Invoke(self_unregister_from_another_thread)); | 1065 .WillOnce(Invoke(self_unregister_from_another_thread)); |
| 1065 EXPECT_CALL(*mdp, Destructor()) | 1066 EXPECT_CALL(*mdp, Destructor()) |
| 1066 .Times(1) | 1067 .Times(1) |
| 1067 .WillOnce(Invoke([&thread_ref]() { | 1068 .WillOnce(Invoke([&thread_ref]() { |
| 1068 EXPECT_EQ(thread_ref, PlatformThread::CurrentRef()); | 1069 EXPECT_EQ(thread_ref, PlatformThread::CurrentRef()); |
| 1069 })); | 1070 })); |
| 1070 | 1071 |
| 1071 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory); | 1072 EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory); |
| 1072 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2); | 1073 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2); |
| 1073 for (int i = 0; i < 2; ++i) { | 1074 for (int i = 0; i < 2; ++i) { |
| 1074 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED, | 1075 RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED, |
| 1075 MemoryDumpLevelOfDetail::DETAILED); | 1076 MemoryDumpLevelOfDetail::DETAILED); |
| 1076 } | 1077 } |
| 1077 DisableTracing(); | 1078 DisableTracing(); |
| 1078 } | 1079 } |
| 1079 | 1080 |
| 1080 } // namespace trace_event | 1081 } // namespace trace_event |
| 1081 } // namespace base | 1082 } // namespace base |
| OLD | NEW |