| Index: components/tracing/core/trace_ring_buffer_unittest.cc
|
| diff --git a/components/tracing/core/trace_ring_buffer_unittest.cc b/components/tracing/core/trace_ring_buffer_unittest.cc
|
| index 0738f82516dbdbb12e464f255e26fd1aa3465825..34104bd15459a8e9a5da79c704ce027ea27f2a82 100644
|
| --- a/components/tracing/core/trace_ring_buffer_unittest.cc
|
| +++ b/components/tracing/core/trace_ring_buffer_unittest.cc
|
| @@ -13,27 +13,29 @@ namespace {
|
|
|
| const size_t kChunkSize = TraceRingBuffer::Chunk::kSize;
|
|
|
| -bool overlap(uint8_t* start1, uint8_t* end1, uint8_t* start2, uint8_t* end2) {
|
| - return start1 < end2 && start2 < end1;
|
| -}
|
| -
|
| TEST(TraceRingBufferTest, BasicChunkWrapping) {
|
| const uint32_t kNumChunks = 5;
|
| const size_t kBufferSize = kChunkSize * kNumChunks;
|
| std::unique_ptr<uint8_t[]> storage(new uint8_t[kBufferSize]);
|
| TraceRingBuffer ring_buffer(storage.get(), kBufferSize);
|
|
|
| + EXPECT_EQ(0u, ring_buffer.GetNumChunksTaken());
|
| uint8_t* last_chunk_end = nullptr;
|
| +
|
| // Fill the buffer twice to test wrapping logic.
|
| for (uint32_t i = 0; i < kNumChunks * 2; ++i) {
|
| - TraceRingBuffer::Chunk* chunk = ring_buffer.TakeChunk();
|
| + TraceRingBuffer::Chunk* chunk = ring_buffer.TakeChunk(42 /* owner */);
|
| ASSERT_NE(nullptr, chunk);
|
| + EXPECT_EQ(1u, ring_buffer.GetNumChunksTaken());
|
| + EXPECT_EQ(42u, chunk->owner());
|
| const uint32_t chunk_idx = i % kNumChunks;
|
| EXPECT_EQ(chunk_idx == 0 ? storage.get() : last_chunk_end, chunk->begin());
|
| const uint32_t kPayloadSize = (chunk_idx + 1) * 8;
|
| memset(chunk->payload(), static_cast<int>(chunk_idx + 1), kPayloadSize);
|
| last_chunk_end = chunk->end();
|
| - ring_buffer.ReturnChunk(chunk, /* used_size = */ kPayloadSize);
|
| + chunk->set_used_size(kPayloadSize);
|
| + ring_buffer.ReturnChunk(chunk);
|
| + EXPECT_EQ(0u, ring_buffer.GetNumChunksTaken());
|
| }
|
|
|
| // Now scrape the |storage| buffer and check its contents.
|
| @@ -52,28 +54,31 @@ TEST(TraceRingBufferTest, ChunkBankrupcyDoesNotCrash) {
|
| std::unique_ptr<uint8_t[]> storage(new uint8_t[kBufferSize]);
|
| TraceRingBuffer ring_buffer(storage.get(), kBufferSize);
|
|
|
| - TraceRingBuffer::Chunk* chunk1 = ring_buffer.TakeChunk();
|
| + TraceRingBuffer::Chunk* chunk1 = ring_buffer.TakeChunk(1);
|
| ASSERT_NE(nullptr, chunk1);
|
|
|
| - TraceRingBuffer::Chunk* chunk2 = ring_buffer.TakeChunk();
|
| + TraceRingBuffer::Chunk* chunk2 = ring_buffer.TakeChunk(1);
|
| ASSERT_NE(nullptr, chunk2);
|
|
|
| + EXPECT_EQ(2u, ring_buffer.GetNumChunksTaken());
|
| +
|
| for (int i = 0; i < 3; ++i) {
|
| - TraceRingBuffer::Chunk* bankrupcy_chunk = ring_buffer.TakeChunk();
|
| + TraceRingBuffer::Chunk* bankrupcy_chunk = ring_buffer.TakeChunk(1);
|
| ASSERT_NE(nullptr, bankrupcy_chunk);
|
| - ASSERT_FALSE(overlap(bankrupcy_chunk->begin(), bankrupcy_chunk->end(),
|
| - storage.get(), storage.get() + kBufferSize));
|
| + ASSERT_TRUE(ring_buffer.IsBankrupcyChunkForTesting(bankrupcy_chunk));
|
|
|
| // Make sure that the memory of the bankrupty chunk can be dereferenced.
|
| memset(bankrupcy_chunk->begin(), 0, kChunkSize);
|
| }
|
| + EXPECT_EQ(2u, ring_buffer.GetNumChunksTaken());
|
|
|
| // Return a chunk and check that the ring buffer is not bankrupt anymore.
|
| - ring_buffer.ReturnChunk(chunk2, 42);
|
| - TraceRingBuffer::Chunk* chunk = ring_buffer.TakeChunk();
|
| + chunk2->set_used_size(42);
|
| + ring_buffer.ReturnChunk(chunk2);
|
| + EXPECT_EQ(1u, ring_buffer.GetNumChunksTaken());
|
| + TraceRingBuffer::Chunk* chunk = ring_buffer.TakeChunk(1);
|
| ASSERT_NE(nullptr, chunk);
|
| - ASSERT_TRUE(overlap(chunk->begin(), chunk->end(), storage.get(),
|
| - storage.get() + kBufferSize));
|
| + ASSERT_FALSE(ring_buffer.IsBankrupcyChunkForTesting(chunk));
|
| }
|
|
|
| } // namespace
|
|
|