OLD | NEW |
| (Empty) |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "components/tracing/core/trace_buffer_writer.h" | |
6 | |
7 #include "base/memory/ptr_util.h" | |
8 #include "base/strings/string_number_conversions.h" | |
9 #include "components/tracing/core/trace_ring_buffer.h" | |
10 #include "components/tracing/test/golden_protos/events_chunk.pb.h" | |
11 #include "testing/gtest/include/gtest/gtest.h" | |
12 | |
13 namespace tracing { | |
14 namespace v2 { | |
15 namespace { | |
16 | |
17 class MockEvent : public proto::Event { | |
18 public: | |
19 static TraceEventHandle Add(TraceBufferWriter* writer, size_t event_size) { | |
20 TraceEventHandle handle = writer->AddEvent(); | |
21 MockEvent* mock_event = static_cast<MockEvent*>(&*handle); | |
22 | |
23 size_t buffer_size = 0; | |
24 DCHECK_GT(event_size, 2u); | |
25 if (event_size < (1 << 7) + 2) | |
26 buffer_size = event_size - 2; | |
27 else if (event_size < (1 << 14) + 3) | |
28 buffer_size = event_size - 3; | |
29 else if (event_size < (1 << 21) + 4) | |
30 buffer_size = event_size - 4; | |
31 else | |
32 NOTREACHED(); | |
33 | |
34 DCHECK(buffer_size); | |
35 std::unique_ptr<uint8_t[]> buf(new uint8_t[buffer_size]); | |
36 memset(buf.get(), 0, buffer_size); | |
37 mock_event->AppendBytes(1, buf.get(), buffer_size); | |
38 | |
39 return handle; | |
40 } | |
41 | |
42 private: | |
43 DISALLOW_COPY_AND_ASSIGN(MockEvent); | |
44 }; | |
45 | |
46 constexpr uint32_t kNumChunks = 10; | |
47 constexpr size_t kBufferSize = kNumChunks * kChunkSize; | |
48 | |
49 class TraceBufferWriterTest : public ::testing::Test { | |
50 public: | |
51 void SetUp() override { | |
52 ring_buf_mem_.reset(new uint8_t[kBufferSize]); | |
53 memset(ring_buf_mem_.get(), 0, kBufferSize); | |
54 ring_buf_.reset(new TraceRingBuffer(ring_buf_mem_.get(), kBufferSize)); | |
55 | |
56 // Estimate the size required to fill one event. | |
57 std::unique_ptr<TraceBufferWriter> writer = CreateWriter(1); | |
58 MockEvent::Add(&*writer, 4); | |
59 event_size_to_fill_chunk_ = writer->stream_writer().bytes_available() + 4; | |
60 writer.reset(); | |
61 ring_buf_.reset(new TraceRingBuffer(ring_buf_mem_.get(), kBufferSize)); | |
62 } | |
63 | |
64 void TearDown() override { | |
65 ring_buf_.reset(); | |
66 ring_buf_mem_.reset(); | |
67 } | |
68 | |
69 std::unique_ptr<TraceBufferWriter> CreateWriter(uint32_t writer_id) { | |
70 return base::WrapUnique(new TraceBufferWriter(ring_buf_.get(), writer_id)); | |
71 } | |
72 | |
73 const TraceRingBuffer::Chunk* GetChunk(uint32_t i) { | |
74 DCHECK_LT(i, kNumChunks); | |
75 return &ring_buf_->chunks_for_testing()[i]; | |
76 } | |
77 | |
78 proto::EventsChunk ReadBackAndTestChunk(uint32_t chunk_id, | |
79 uint32_t expected_writer_id, | |
80 uint32_t expected_seq_id, | |
81 int expected_num_events, | |
82 bool expected_first_event_continues, | |
83 bool expected_last_event_continues) { | |
84 const TraceRingBuffer::Chunk* chunk = GetChunk(chunk_id); | |
85 proto::EventsChunk parsed_chunk; | |
86 EXPECT_TRUE( | |
87 parsed_chunk.ParseFromArray(chunk->payload(), chunk->used_size())); | |
88 | |
89 EXPECT_TRUE(parsed_chunk.has_writer_id()); | |
90 EXPECT_EQ(expected_writer_id, parsed_chunk.writer_id()); | |
91 | |
92 EXPECT_TRUE(parsed_chunk.has_seq_id()); | |
93 EXPECT_EQ(expected_seq_id, parsed_chunk.seq_id()); | |
94 | |
95 EXPECT_EQ(expected_first_event_continues, | |
96 parsed_chunk.first_event_continues_from_prev_chunk()); | |
97 EXPECT_EQ(expected_last_event_continues, | |
98 parsed_chunk.last_event_continues_on_next_chunk()); | |
99 EXPECT_EQ(expected_num_events, parsed_chunk.events_size()); | |
100 return parsed_chunk; | |
101 } | |
102 | |
103 const TraceRingBuffer& ring_buffer() const { return *ring_buf_; } | |
104 size_t event_size_to_fill_chunk() const { return event_size_to_fill_chunk_; } | |
105 | |
106 private: | |
107 std::unique_ptr<uint8_t[]> ring_buf_mem_; | |
108 std::unique_ptr<TraceRingBuffer> ring_buf_; | |
109 size_t event_size_to_fill_chunk_; | |
110 }; | |
111 | |
112 TEST_F(TraceBufferWriterTest, SingleEvent) { | |
113 const uint32_t kWriterId = 0x42; | |
114 std::unique_ptr<TraceBufferWriter> writer = CreateWriter(kWriterId); | |
115 MockEvent::Add(writer.get(), 7); | |
116 writer->Flush(); | |
117 | |
118 auto parsed_chunk = ReadBackAndTestChunk(0, kWriterId, 0, 1, false, false); | |
119 EXPECT_EQ(7u, parsed_chunk.events(0).size()); | |
120 } | |
121 | |
122 TEST_F(TraceBufferWriterTest, ManySmallEvents) { | |
123 const uint32_t kWriterId = 0x42; | |
124 std::unique_ptr<TraceBufferWriter> writer = CreateWriter(kWriterId); | |
125 | |
126 uint32_t last_owned_chunk_id = 1; | |
127 uint32_t num_times_did_switch_to_chunk[kNumChunks] = {}; | |
128 | |
129 // kBufferSize here is just an upper bound to prevent the test to get stuck | |
130 // undefinitely in the loop if it fails. | |
131 for (size_t i = 0; i < kBufferSize; i++) { | |
132 MockEvent::Add(&*writer, 5); | |
133 | |
134 // Small events shouldn't never cause more than a chunk to be owned. Check | |
135 // that TraceBufferWriter doesn't accidentally retain chunks. | |
136 uint32_t num_chunks_owned = 0; | |
137 for (uint32_t chunk_id = 0; chunk_id < kNumChunks; chunk_id++) { | |
138 const bool is_owned = GetChunk(chunk_id)->is_owned(); | |
139 num_chunks_owned += is_owned ? 1 : 0; | |
140 if (is_owned && chunk_id != last_owned_chunk_id) { | |
141 last_owned_chunk_id = chunk_id; | |
142 num_times_did_switch_to_chunk[chunk_id]++; | |
143 } | |
144 } | |
145 ASSERT_EQ(1u, num_chunks_owned); | |
146 | |
147 // Stop once the last chunk has been filled twice. | |
148 if (num_times_did_switch_to_chunk[kNumChunks - 1] == 2) | |
149 break; | |
150 } | |
151 | |
152 // Test the wrap-over logic: all chunks should have been filled twice. | |
153 for (uint32_t chunk_id = 0; chunk_id < kNumChunks; chunk_id++) | |
154 EXPECT_EQ(2u, num_times_did_switch_to_chunk[chunk_id]); | |
155 | |
156 // Test that Flush() releases all chunks. | |
157 writer->Flush(); | |
158 for (uint32_t chunk_id = 0; chunk_id < kNumChunks; chunk_id++) | |
159 EXPECT_FALSE(GetChunk(chunk_id)->is_owned()); | |
160 } | |
161 | |
162 TEST_F(TraceBufferWriterTest, OneWriterWithFragmentingEvents) { | |
163 const uint32_t kWriterId = 0x42; | |
164 std::unique_ptr<TraceBufferWriter> writer = CreateWriter(kWriterId); | |
165 | |
166 MockEvent::Add(&*writer, event_size_to_fill_chunk()); | |
167 | |
168 EXPECT_TRUE(GetChunk(0)->is_owned()); | |
169 EXPECT_FALSE(GetChunk(1)->is_owned()); | |
170 | |
171 MockEvent::Add(&*writer, event_size_to_fill_chunk()); | |
172 EXPECT_TRUE(GetChunk(1)->is_owned()); | |
173 EXPECT_FALSE(GetChunk(2)->is_owned()); | |
174 | |
175 // Create one event which starts at the beginning of chunk 2 and overflows | |
176 // into chunk 3. | |
177 MockEvent::Add(&*writer, event_size_to_fill_chunk() + 1); | |
178 EXPECT_TRUE(GetChunk(2)->is_owned()); | |
179 EXPECT_TRUE(GetChunk(3)->is_owned()); | |
180 | |
181 // Adding a new event should cause the chunk 2 to be released, while chunk | |
182 // 3 is still retained. | |
183 MockEvent::Add(&*writer, 4); | |
184 EXPECT_FALSE(GetChunk(2)->is_owned()); | |
185 EXPECT_TRUE(GetChunk(3)->is_owned()); | |
186 | |
187 // Now add a very large event which spans across 3 chunks (chunks 3, 4 and 5). | |
188 MockEvent::Add(&*writer, event_size_to_fill_chunk() * 2 + 1); | |
189 EXPECT_TRUE(GetChunk(3)->is_owned()); | |
190 EXPECT_TRUE(GetChunk(4)->is_owned()); | |
191 EXPECT_TRUE(GetChunk(5)->is_owned()); | |
192 | |
193 // Add a final small event and check that chunks 3 and 4 are released. | |
194 MockEvent::Add(&*writer, 4); | |
195 EXPECT_FALSE(GetChunk(3)->is_owned()); | |
196 EXPECT_FALSE(GetChunk(4)->is_owned()); | |
197 EXPECT_TRUE(GetChunk(5)->is_owned()); | |
198 | |
199 // Flush and readback the chunks using the official protos. | |
200 writer->Flush(); | |
201 | |
202 // The first two chunks should have one event each, neither of them wrapping. | |
203 auto chunk = ReadBackAndTestChunk(0, kWriterId, 0, 1, false, false); | |
204 EXPECT_EQ(event_size_to_fill_chunk(), chunk.events(0).size()); | |
205 | |
206 chunk = ReadBackAndTestChunk(1, kWriterId, 1, 1, false, false); | |
207 EXPECT_EQ(event_size_to_fill_chunk(), chunk.events(0).size()); | |
208 | |
209 // Chunk 2 should have one partial event, which overflows into 3. | |
210 chunk = ReadBackAndTestChunk(2, kWriterId, 2, 1, false, true); | |
211 EXPECT_EQ(event_size_to_fill_chunk(), chunk.events(0).size()); | |
212 | |
213 // Chunk 3 should have the overflowing event from above, a small event, and | |
214 // the beginning of the very large event. | |
215 chunk = ReadBackAndTestChunk(3, kWriterId, 3, 3, true, true); | |
216 EXPECT_EQ(4u, chunk.events(1).size()); | |
217 | |
218 // Chunk 4 should contain the partial continuation of the large event. | |
219 chunk = ReadBackAndTestChunk(4, kWriterId, 4, 1, true, true); | |
220 EXPECT_EQ(event_size_to_fill_chunk() - 2, chunk.events(0).size()); | |
221 | |
222 // Chunk 5 should contain the end of the large event and the final small one. | |
223 chunk = ReadBackAndTestChunk(5, kWriterId, 5, 2, true, false); | |
224 EXPECT_EQ(4u, chunk.events(1).size()); | |
225 } | |
226 | |
227 TEST_F(TraceBufferWriterTest, ManyWriters) { | |
228 const uint32_t kNumWriters = kNumChunks / 2; | |
229 std::unique_ptr<TraceBufferWriter> writer[kNumWriters]; | |
230 | |
231 for (uint32_t i = 0; i < kNumWriters; ++i) { | |
232 writer[i] = CreateWriter(i + 1); | |
233 MockEvent::Add(writer[i].get(), 4); | |
234 EXPECT_EQ(writer[i]->writer_id(), GetChunk(i)->owner()); | |
235 } | |
236 | |
237 // Write one large and one small event on each writer. | |
238 for (uint32_t i = 0; i < kNumWriters; ++i) { | |
239 MockEvent::Add(writer[i].get(), event_size_to_fill_chunk()); | |
240 MockEvent::Add(writer[i].get(), 5 + i); | |
241 } | |
242 | |
243 // At this point the first 5 chunks should be returned and the last 5 owned | |
244 // by the respective 5 writers. | |
245 for (uint32_t i = 0; i < kNumWriters; ++i) | |
246 EXPECT_FALSE(GetChunk(i)->is_owned()); | |
247 for (uint32_t i = kNumWriters; i < kNumWriters * 2; ++i) | |
248 EXPECT_EQ(writer[i - kNumWriters]->writer_id(), GetChunk(i)->owner()); | |
249 | |
250 // Write one large event to writer 0 (currently owning chunk 5). That will | |
251 // make it return the chunk 5 and take ownership of chunks [0, 4]. | |
252 MockEvent::Add(writer[0].get(), event_size_to_fill_chunk()); | |
253 auto retain_event = | |
254 MockEvent::Add(writer[0].get(), event_size_to_fill_chunk() * 4 + 1); | |
255 for (uint32_t i = 0; i < 5; ++i) | |
256 EXPECT_EQ(writer[0]->writer_id(), GetChunk(i)->owner()); | |
257 | |
258 // At this point the only free chunk is chunk 5. Attempting a write from | |
259 // another writer should fill that. | |
260 EXPECT_FALSE(GetChunk(5)->is_owned()); | |
261 auto retain_event_2 = | |
262 MockEvent::Add(writer[3].get(), event_size_to_fill_chunk()); | |
263 | |
264 // Now all the chunks are taken. | |
265 for (uint32_t i = 0; i < kNumChunks; ++i) | |
266 EXPECT_TRUE(GetChunk(i)->is_owned()); | |
267 | |
268 // An attempt to write a larger event on another write should cause the ring | |
269 // buffer to fall in bankrupcy mode. | |
270 auto retain_event_3 = | |
271 MockEvent::Add(writer[4].get(), event_size_to_fill_chunk()); | |
272 EXPECT_EQ(ring_buffer().num_chunks(), ring_buffer().GetNumChunksTaken()); | |
273 | |
274 // A small writer to the writer 0 should cause it to return all chunks but the | |
275 // last one and leave the bankrupcy chunk. | |
276 retain_event = MockEvent::Add(writer[0].get(), 7); | |
277 EXPECT_LT(ring_buffer().GetNumChunksTaken(), ring_buffer().num_chunks()); | |
278 EXPECT_EQ(writer[0]->writer_id(), GetChunk(4)->owner()); | |
279 for (uint32_t i = 0; i < 3; ++i) | |
280 EXPECT_FALSE(GetChunk(i)->is_owned()); | |
281 | |
282 // Flush all the writers and test that all chunks are returned. | |
283 for (uint32_t i = 0; i < kNumWriters; ++i) | |
284 writer[i]->Flush(); | |
285 for (uint32_t i = 0; i < kNumChunks; ++i) | |
286 EXPECT_FALSE(GetChunk(i)->is_owned()); | |
287 | |
288 // Readback and test the content of the chunks. | |
289 | |
290 auto chunk = | |
291 ReadBackAndTestChunk(4, writer[0]->writer_id(), 6, 2, true, false); | |
292 EXPECT_EQ(7u, chunk.events(1).size()); | |
293 | |
294 // writer[1] and writer[2] just have a continuation and a small event each. | |
295 chunk = ReadBackAndTestChunk(6, writer[1]->writer_id(), 1, 2, true, false); | |
296 EXPECT_EQ(5u + 1, chunk.events(1).size()); | |
297 | |
298 chunk = ReadBackAndTestChunk(7, writer[2]->writer_id(), 1, 2, true, false); | |
299 EXPECT_EQ(5u + 2, chunk.events(1).size()); | |
300 | |
301 // writer[3] did the last write before the bankrupcy and has one extra event. | |
302 ReadBackAndTestChunk(8, writer[3]->writer_id(), 1, 3, true, true); | |
303 | |
304 // writer[4] overflew in the bankrupcy chunk and has 3 events as well. | |
305 ReadBackAndTestChunk(9, writer[4]->writer_id(), 1, 3, true, true); | |
306 } | |
307 | |
308 } // namespace | |
309 } // namespace v2 | |
310 } // namespace tracing | |
OLD | NEW |