OLD | NEW |
(Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "components/tracing/core/trace_ring_buffer.h" |
| 6 |
| 7 #include "testing/gtest/include/gtest/gtest.h" |
| 8 |
| 9 namespace tracing { |
| 10 namespace v2 { |
| 11 |
| 12 namespace { |
| 13 |
| 14 const size_t kChunkSize = TraceRingBuffer::Chunk::kSize; |
| 15 |
| 16 bool overlap(uint8_t* start1, uint8_t* end1, uint8_t* start2, uint8_t* end2) { |
| 17 return start1 < end2 && start2 < end1; |
| 18 } |
| 19 |
| 20 TEST(TraceRingBufferTest, BasicChunkWrapping) { |
| 21 const uint32_t kNumChunks = 5; |
| 22 const size_t kBufferSize = kChunkSize * kNumChunks; |
| 23 std::unique_ptr<uint8_t[]> storage(new uint8_t[kBufferSize]); |
| 24 TraceRingBuffer ring_buffer(storage.get(), kBufferSize); |
| 25 |
| 26 uint8_t* last_chunk_end = nullptr; |
| 27 // Fill the buffer twice to test wrapping logic. |
| 28 for (uint32_t i = 0; i < kNumChunks * 2; ++i) { |
| 29 TraceRingBuffer::Chunk* chunk = ring_buffer.TakeChunk(); |
| 30 ASSERT_NE(nullptr, chunk); |
| 31 const uint32_t chunk_idx = i % kNumChunks; |
| 32 EXPECT_EQ(chunk_idx == 0 ? storage.get() : last_chunk_end, chunk->begin()); |
| 33 const uint32_t kPayloadSize = (chunk_idx + 1) * 8; |
| 34 memset(chunk->payload(), static_cast<int>(chunk_idx + 1), kPayloadSize); |
| 35 last_chunk_end = chunk->end(); |
| 36 ring_buffer.ReturnChunk(chunk, /* used_size = */ kPayloadSize); |
| 37 } |
| 38 |
| 39 // Now scrape the |storage| buffer and check its contents. |
| 40 for (uint32_t chunk_idx = 0; chunk_idx < kNumChunks; ++chunk_idx) { |
| 41 uint8_t* chunk_start = storage.get() + (chunk_idx * kChunkSize); |
| 42 const uint32_t kPayloadSize = (chunk_idx + 1) * 8; |
| 43 EXPECT_EQ(kPayloadSize, *reinterpret_cast<uint32_t*>(chunk_start)); |
| 44 for (uint32_t i = 0; i < kPayloadSize; ++i) |
| 45 EXPECT_EQ(chunk_idx + 1, *(chunk_start + sizeof(uint32_t) + i)); |
| 46 } |
| 47 } |
| 48 |
| 49 TEST(TraceRingBufferTest, ChunkBankrupcyDoesNotCrash) { |
| 50 const size_t kNumChunks = 2; |
| 51 const size_t kBufferSize = TraceRingBuffer::Chunk::kSize * kNumChunks; |
| 52 std::unique_ptr<uint8_t[]> storage(new uint8_t[kBufferSize]); |
| 53 TraceRingBuffer ring_buffer(storage.get(), kBufferSize); |
| 54 |
| 55 TraceRingBuffer::Chunk* chunk1 = ring_buffer.TakeChunk(); |
| 56 ASSERT_NE(nullptr, chunk1); |
| 57 |
| 58 TraceRingBuffer::Chunk* chunk2 = ring_buffer.TakeChunk(); |
| 59 ASSERT_NE(nullptr, chunk2); |
| 60 |
| 61 for (int i = 0; i < 3; ++i) { |
| 62 TraceRingBuffer::Chunk* bankrupcy_chunk = ring_buffer.TakeChunk(); |
| 63 ASSERT_NE(nullptr, bankrupcy_chunk); |
| 64 ASSERT_FALSE(overlap(bankrupcy_chunk->begin(), bankrupcy_chunk->end(), |
| 65 storage.get(), storage.get() + kBufferSize)); |
| 66 |
| 67 // Make sure that the memory of the bankrupty chunk can be dereferenced. |
| 68 memset(bankrupcy_chunk->begin(), 0, kChunkSize); |
| 69 } |
| 70 |
| 71 // Return a chunk and check that the ring buffer is not bankrupt anymore. |
| 72 ring_buffer.ReturnChunk(chunk2, 42); |
| 73 TraceRingBuffer::Chunk* chunk = ring_buffer.TakeChunk(); |
| 74 ASSERT_NE(nullptr, chunk); |
| 75 ASSERT_TRUE(overlap(chunk->begin(), chunk->end(), storage.get(), |
| 76 storage.get() + kBufferSize)); |
| 77 } |
| 78 |
| 79 } // namespace |
| 80 } // namespace v2 |
| 81 } // namespace tracing |
OLD | NEW |