OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_dump_manager.h" | 5 #include "base/trace_event/memory_dump_manager.h" |
6 | 6 |
7 #include <stdint.h> | 7 #include <stdint.h> |
8 | 8 |
9 #include <memory> | 9 #include <memory> |
10 #include <vector> | 10 #include <vector> |
11 | 11 |
12 #include "base/bind_helpers.h" | 12 #include "base/bind_helpers.h" |
13 #include "base/memory/ptr_util.h" | 13 #include "base/memory/ptr_util.h" |
14 #include "base/memory/ref_counted_memory.h" | 14 #include "base/memory/ref_counted_memory.h" |
15 #include "base/message_loop/message_loop.h" | 15 #include "base/message_loop/message_loop.h" |
16 #include "base/run_loop.h" | 16 #include "base/run_loop.h" |
17 #include "base/strings/stringprintf.h" | 17 #include "base/strings/stringprintf.h" |
18 #include "base/synchronization/waitable_event.h" | 18 #include "base/synchronization/waitable_event.h" |
19 #include "base/test/sequenced_worker_pool_owner.h" | 19 #include "base/test/sequenced_worker_pool_owner.h" |
20 #include "base/test/test_io_thread.h" | 20 #include "base/test/test_io_thread.h" |
21 #include "base/test/trace_event_analyzer.h" | 21 #include "base/test/trace_event_analyzer.h" |
22 #include "base/threading/platform_thread.h" | 22 #include "base/threading/platform_thread.h" |
23 #include "base/threading/sequenced_task_runner_handle.h" | 23 #include "base/threading/sequenced_task_runner_handle.h" |
24 #include "base/threading/sequenced_worker_pool.h" | 24 #include "base/threading/sequenced_worker_pool.h" |
25 #include "base/threading/thread.h" | 25 #include "base/threading/thread.h" |
26 #include "base/threading/thread_task_runner_handle.h" | 26 #include "base/threading/thread_task_runner_handle.h" |
27 #include "base/trace_event/memory_dump_provider.h" | 27 #include "base/trace_event/memory_dump_provider.h" |
| 28 #include "base/trace_event/memory_dump_scheduler.h" |
28 #include "base/trace_event/memory_infra_background_whitelist.h" | 29 #include "base/trace_event/memory_infra_background_whitelist.h" |
29 #include "base/trace_event/process_memory_dump.h" | 30 #include "base/trace_event/process_memory_dump.h" |
30 #include "base/trace_event/trace_buffer.h" | 31 #include "base/trace_event/trace_buffer.h" |
31 #include "base/trace_event/trace_config_memory_test_util.h" | 32 #include "base/trace_event/trace_config_memory_test_util.h" |
32 #include "testing/gmock/include/gmock/gmock.h" | 33 #include "testing/gmock/include/gmock/gmock.h" |
33 #include "testing/gtest/include/gtest/gtest.h" | 34 #include "testing/gtest/include/gtest/gtest.h" |
34 | 35 |
35 using testing::_; | 36 using testing::_; |
36 using testing::AnyNumber; | 37 using testing::AnyNumber; |
37 using testing::AtMost; | 38 using testing::AtMost; |
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
266 } | 267 } |
267 | 268 |
268 void EnableTracingWithTraceConfig(const std::string& trace_config) { | 269 void EnableTracingWithTraceConfig(const std::string& trace_config) { |
269 TraceLog::GetInstance()->SetEnabled(TraceConfig(trace_config), | 270 TraceLog::GetInstance()->SetEnabled(TraceConfig(trace_config), |
270 TraceLog::RECORDING_MODE); | 271 TraceLog::RECORDING_MODE); |
271 } | 272 } |
272 | 273 |
273 void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); } | 274 void DisableTracing() { TraceLog::GetInstance()->SetDisabled(); } |
274 | 275 |
275 bool IsPeriodicDumpingEnabled() const { | 276 bool IsPeriodicDumpingEnabled() const { |
276 return mdm_->periodic_dump_timer_.IsRunning(); | 277 return mdm_->dump_scheduler_->IsPeriodicTimerRunningForTesting(); |
277 } | 278 } |
278 | 279 |
279 int GetMaxConsecutiveFailuresCount() const { | 280 int GetMaxConsecutiveFailuresCount() const { |
280 return MemoryDumpManager::kMaxConsecutiveFailuresCount; | 281 return MemoryDumpManager::kMaxConsecutiveFailuresCount; |
281 } | 282 } |
282 | 283 |
283 scoped_refptr<SequencedTaskRunner> GetPollingTaskRunnerUnsafe() { | |
284 return mdm_->dump_thread_->task_runner(); | |
285 } | |
286 | |
287 const MemoryDumpProvider::Options kDefaultOptions; | 284 const MemoryDumpProvider::Options kDefaultOptions; |
288 std::unique_ptr<MemoryDumpManager> mdm_; | 285 std::unique_ptr<MemoryDumpManager> mdm_; |
289 std::unique_ptr<MemoryDumpManagerDelegateForTesting> delegate_; | 286 std::unique_ptr<MemoryDumpManagerDelegateForTesting> delegate_; |
290 bool last_callback_success_; | 287 bool last_callback_success_; |
291 | 288 |
292 private: | 289 private: |
293 std::unique_ptr<MessageLoop> message_loop_; | 290 std::unique_ptr<MessageLoop> message_loop_; |
294 | 291 |
295 // We want our singleton torn down after each test. | 292 // We want our singleton torn down after each test. |
296 ShadowingAtExitManager at_exit_manager_; | 293 ShadowingAtExitManager at_exit_manager_; |
(...skipping 460 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
757 std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider()); | 754 std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider()); |
758 std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider()); | 755 std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider()); |
759 mdp1->enable_mock_destructor = true; | 756 mdp1->enable_mock_destructor = true; |
760 mdp2->enable_mock_destructor = true; | 757 mdp2->enable_mock_destructor = true; |
761 | 758 |
762 EXPECT_CALL(*mdp1, SuspendFastMemoryPolling()).Times(1); | 759 EXPECT_CALL(*mdp1, SuspendFastMemoryPolling()).Times(1); |
763 EXPECT_CALL(*mdp2, SuspendFastMemoryPolling()).Times(1); | 760 EXPECT_CALL(*mdp2, SuspendFastMemoryPolling()).Times(1); |
764 EXPECT_CALL(*mdp1, Destructor()); | 761 EXPECT_CALL(*mdp1, Destructor()); |
765 EXPECT_CALL(*mdp2, Destructor()); | 762 EXPECT_CALL(*mdp2, Destructor()); |
766 | 763 |
| 764 MemoryDumpProvider::Options options; |
| 765 options.is_fast_polling_supported = true; |
| 766 RegisterDumpProvider(mdp1.get(), nullptr, options); |
| 767 |
767 RunLoop run_loop; | 768 RunLoop run_loop; |
768 scoped_refptr<SingleThreadTaskRunner> test_task_runner = | 769 scoped_refptr<SingleThreadTaskRunner> test_task_runner = |
769 ThreadTaskRunnerHandle::Get(); | 770 ThreadTaskRunnerHandle::Get(); |
770 auto quit_closure = run_loop.QuitClosure(); | 771 auto quit_closure = run_loop.QuitClosure(); |
771 | 772 |
| 773 const int kPollsToQuit = 10; |
772 int call_count = 0; | 774 int call_count = 0; |
| 775 MemoryDumpManager* mdm = mdm_.get(); |
| 776 const auto poll_function1 = [&call_count, &test_task_runner, quit_closure, |
| 777 &mdp2, mdm, &options, kPollsToQuit, |
| 778 this](uint64_t* total) -> void { |
| 779 ++call_count; |
| 780 if (call_count == 1) |
| 781 RegisterDumpProvider(mdp2.get(), nullptr, options, kMDPName); |
| 782 else if (call_count == 4) |
| 783 mdm->UnregisterAndDeleteDumpProviderSoon(std::move(mdp2)); |
| 784 else if (call_count == kPollsToQuit) |
| 785 test_task_runner->PostTask(FROM_HERE, quit_closure); |
| 786 |
| 787 // Record increase of 1 GiB of memory at each call. |
| 788 *total = static_cast<uint64_t>(call_count) * 1024 * 1024 * 1024; |
| 789 }; |
773 EXPECT_CALL(*mdp1, PollFastMemoryTotal(_)) | 790 EXPECT_CALL(*mdp1, PollFastMemoryTotal(_)) |
774 .Times(4) | 791 .Times(testing::AtLeast(kPollsToQuit)) |
775 .WillRepeatedly(Invoke([&call_count, &test_task_runner, | 792 .WillRepeatedly(Invoke(poll_function1)); |
776 quit_closure](uint64_t* total) -> void { | |
777 ++call_count; | |
778 if (call_count == 4) | |
779 test_task_runner->PostTask(FROM_HERE, quit_closure); | |
780 })); | |
781 | 793 |
782 // Depending on the order of PostTask calls the mdp2 might be registered after | 794 // Depending on the order of PostTask calls the mdp2 might be registered after |
783 // all polls or in between polls. | 795 // all polls or in between polls. |
784 EXPECT_CALL(*mdp2, PollFastMemoryTotal(_)) | 796 EXPECT_CALL(*mdp2, PollFastMemoryTotal(_)) |
785 .Times(Between(0, 4)) | 797 .Times(Between(0, kPollsToQuit - 1)) |
786 .WillRepeatedly(Return()); | 798 .WillRepeatedly(Return()); |
787 | 799 |
788 MemoryDumpProvider::Options options; | 800 MemoryDumpScheduler::SetPollingIntervalForTesting(1); |
789 options.is_fast_polling_supported = true; | |
790 RegisterDumpProvider(mdp1.get(), nullptr, options); | |
791 EnableTracingWithTraceConfig( | 801 EnableTracingWithTraceConfig( |
792 TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(1)); | 802 TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(3)); |
793 scoped_refptr<SequencedTaskRunner> polling_task_runner = | |
794 GetPollingTaskRunnerUnsafe().get(); | |
795 ASSERT_TRUE(polling_task_runner); | |
796 | 803 |
797 uint64_t value = 0; | 804 int last_poll_to_request_dump = -2; |
798 for (int i = 0; i < 4; i++) { | 805 EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)) |
799 if (i == 0) | 806 .Times(testing::AtLeast(2)) |
800 RegisterDumpProvider(mdp2.get(), nullptr, options); | 807 .WillRepeatedly(Invoke([&last_poll_to_request_dump, &call_count]( |
801 if (i == 2) | 808 const MemoryDumpRequestArgs& args, |
802 mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdp2)); | 809 const MemoryDumpCallback& callback) -> void { |
803 polling_task_runner->PostTask( | 810 // Minimum number of polls between dumps must be 3 (polling interval is |
804 FROM_HERE, Bind(&MemoryDumpManagerTest::PollFastMemoryTotal, | 811 // 1ms). |
805 Unretained(this), &value)); | 812 EXPECT_GE(call_count - last_poll_to_request_dump, 3); |
806 } | 813 last_poll_to_request_dump = call_count; |
| 814 })); |
807 | 815 |
808 run_loop.Run(); | 816 run_loop.Run(); |
809 DisableTracing(); | 817 DisableTracing(); |
810 mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdp1)); | 818 mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdp1)); |
811 } | 819 } |
812 | 820 |
813 // If a thread (with a dump provider living on it) is torn down during a dump | 821 // If a thread (with a dump provider living on it) is torn down during a dump |
814 // its dump provider should be skipped but the dump itself should succeed. | 822 // its dump provider should be skipped but the dump itself should succeed. |
815 TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) { | 823 TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) { |
816 InitializeMemoryDumpManager(false /* is_coordinator */); | 824 InitializeMemoryDumpManager(false /* is_coordinator */); |
(...skipping 461 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1278 thread.Start(); | 1286 thread.Start(); |
1279 RegisterDumpProvider(&mdp1, thread.task_runner(), kDefaultOptions, | 1287 RegisterDumpProvider(&mdp1, thread.task_runner(), kDefaultOptions, |
1280 "BlacklistTestDumpProvider"); | 1288 "BlacklistTestDumpProvider"); |
1281 // Unregistering on wrong thread should not crash. | 1289 // Unregistering on wrong thread should not crash. |
1282 mdm_->UnregisterDumpProvider(&mdp1); | 1290 mdm_->UnregisterDumpProvider(&mdp1); |
1283 thread.Stop(); | 1291 thread.Stop(); |
1284 } | 1292 } |
1285 | 1293 |
1286 } // namespace trace_event | 1294 } // namespace trace_event |
1287 } // namespace base | 1295 } // namespace base |
OLD | NEW |