Chromium Code Reviews| Index: base/trace_event/memory_dump_manager_unittest.cc |
| diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc |
| index 34c98b0343f0a1d41f3e025c7ce0544cf7edf72c..3b776be568bf15edd8f0b77c2ba2cf968d98d61e 100644 |
| --- a/base/trace_event/memory_dump_manager_unittest.cc |
| +++ b/base/trace_event/memory_dump_manager_unittest.cc |
| @@ -202,10 +202,13 @@ TEST_F(MemoryDumpManagerTest, SingleDumper) { |
| // Finally check the unregister logic: the delegate will be invoked but not |
| // the dump provider, as it has been unregistered. |
| EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory); |
| - EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1); |
| + EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(3); |
| EXPECT_CALL(mdp, OnMemoryDump(_, _)).Times(0); |
| - RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED, |
| - MemoryDumpLevelOfDetail::DETAILED); |
| + |
| + for (int i = 0; i < 3; ++i) { |
| + RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED, |
| + MemoryDumpLevelOfDetail::DETAILED); |
| + } |
| DisableTracing(); |
| } |
| @@ -576,6 +579,52 @@ TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) { |
| DisableTracing(); |
| } |
| +// If a thread (with a dump provider living on it) is torn down during a dump |
| +// its dump provider should be skipped but the dump itself shall succeed. |
| +TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) { |
| + InitializeMemoryDumpManager(false /* is_coordinator */); |
| + ScopedVector<TestIOThread> threads; |
|
Ruud van Asseldonk
2015/12/14 16:25:49
Scoped vectors are being removed in favour of |std
|
| + ScopedVector<MockMemoryDumpProvider> mdps; |
| + |
| + for (int i = 0; i < 2; i++) { |
| + threads.push_back(new TestIOThread(TestIOThread::kAutoStart)); |
| + mdps.push_back(new MockMemoryDumpProvider()); |
| + RegisterDumpProvider(mdps.back(), threads.back()->task_runner(), |
| + kDefaultOptions); |
| + } |
| + |
| + int on_memory_dump_call_count = 0; |
| + |
| + // When OnMemoryDump is called on either of the dump providers, it will |
| + // tear down the thread of the other one. |
| + for (MockMemoryDumpProvider* mdp : mdps) { |
| + int other_idx = (mdps.front() == mdp); |
| + TestIOThread* other_thread = threads[other_idx]; |
| + auto on_dump = [other_thread, &on_memory_dump_call_count]( |
| + const MemoryDumpArgs& args, ProcessMemoryDump* pmd) { |
| + other_thread->Stop(); |
| + on_memory_dump_call_count++; |
| + return true; |
| + }; |
| + |
| + // OnMemoryDump is called once for the provider that dumps first, and zero |
| + // times for the other provider. |
| + EXPECT_CALL(*mdp, OnMemoryDump(_, _)) |
| + .Times(AtMost(1)) |
| + .WillOnce(Invoke(on_dump)); |
| + } |
| + |
| + last_callback_success_ = false; |
| + EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory); |
| + EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1); |
| + RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED, |
| + MemoryDumpLevelOfDetail::DETAILED); |
| + ASSERT_EQ(1, on_memory_dump_call_count); |
| + ASSERT_EQ(true, last_callback_success_); |
| + |
| + DisableTracing(); |
| +} |
| + |
| // Checks that a NACK callback is invoked if RequestGlobalDump() is called when |
| // tracing is not enabled. |
| TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) { |
| @@ -761,9 +810,9 @@ TEST_F(MemoryDumpManagerTest, DisableTracingWhileDumping) { |
| tracing_disabled_event.Signal(); |
| run_loop.Run(); |
| - // RequestGlobalMemoryDump() should be NACK-ed because one of the threads |
| - // threads died before we had a chance to PostTask onto them. |
| - EXPECT_FALSE(last_callback_success_); |
| + // RequestGlobalMemoryDump() should still suceed even if some threads were |
| + // torn down during the dump. |
| + EXPECT_TRUE(last_callback_success_); |
| } |
| TEST_F(MemoryDumpManagerTest, DumpOnBehalfOfOtherProcess) { |