| OLD | NEW |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "components/tracing/core/trace_ring_buffer.h" | 5 #include "components/tracing/core/trace_ring_buffer.h" |
| 6 | 6 |
| 7 #include "testing/gtest/include/gtest/gtest.h" | 7 #include "testing/gtest/include/gtest/gtest.h" |
| 8 | 8 |
| 9 namespace tracing { | 9 namespace tracing { |
| 10 namespace v2 { | 10 namespace v2 { |
| 11 | 11 |
| 12 namespace { | 12 namespace { |
| 13 | 13 |
| 14 const size_t kChunkSize = TraceRingBuffer::Chunk::kSize; | 14 const size_t kChunkSize = TraceRingBuffer::Chunk::kSize; |
| 15 | 15 |
| 16 bool overlap(uint8_t* start1, uint8_t* end1, uint8_t* start2, uint8_t* end2) { | |
| 17 return start1 < end2 && start2 < end1; | |
| 18 } | |
| 19 | |
| 20 TEST(TraceRingBufferTest, BasicChunkWrapping) { | 16 TEST(TraceRingBufferTest, BasicChunkWrapping) { |
| 21 const uint32_t kNumChunks = 5; | 17 const uint32_t kNumChunks = 5; |
| 22 const size_t kBufferSize = kChunkSize * kNumChunks; | 18 const size_t kBufferSize = kChunkSize * kNumChunks; |
| 23 std::unique_ptr<uint8_t[]> storage(new uint8_t[kBufferSize]); | 19 std::unique_ptr<uint8_t[]> storage(new uint8_t[kBufferSize]); |
| 24 TraceRingBuffer ring_buffer(storage.get(), kBufferSize); | 20 TraceRingBuffer ring_buffer(storage.get(), kBufferSize); |
| 25 | 21 |
| 22 EXPECT_EQ(0u, ring_buffer.GetNumChunksTaken()); |
| 26 uint8_t* last_chunk_end = nullptr; | 23 uint8_t* last_chunk_end = nullptr; |
| 24 |
| 27 // Fill the buffer twice to test wrapping logic. | 25 // Fill the buffer twice to test wrapping logic. |
| 28 for (uint32_t i = 0; i < kNumChunks * 2; ++i) { | 26 for (uint32_t i = 0; i < kNumChunks * 2; ++i) { |
| 29 TraceRingBuffer::Chunk* chunk = ring_buffer.TakeChunk(); | 27 TraceRingBuffer::Chunk* chunk = ring_buffer.TakeChunk(42 /* owner */); |
| 30 ASSERT_NE(nullptr, chunk); | 28 ASSERT_NE(nullptr, chunk); |
| 29 EXPECT_EQ(1u, ring_buffer.GetNumChunksTaken()); |
| 30 EXPECT_EQ(42u, chunk->owner()); |
| 31 const uint32_t chunk_idx = i % kNumChunks; | 31 const uint32_t chunk_idx = i % kNumChunks; |
| 32 EXPECT_EQ(chunk_idx == 0 ? storage.get() : last_chunk_end, chunk->begin()); | 32 EXPECT_EQ(chunk_idx == 0 ? storage.get() : last_chunk_end, chunk->begin()); |
| 33 const uint32_t kPayloadSize = (chunk_idx + 1) * 8; | 33 const uint32_t kPayloadSize = (chunk_idx + 1) * 8; |
| 34 memset(chunk->payload(), static_cast<int>(chunk_idx + 1), kPayloadSize); | 34 memset(chunk->payload(), static_cast<int>(chunk_idx + 1), kPayloadSize); |
| 35 last_chunk_end = chunk->end(); | 35 last_chunk_end = chunk->end(); |
| 36 ring_buffer.ReturnChunk(chunk, /* used_size = */ kPayloadSize); | 36 chunk->set_used_size(kPayloadSize); |
| 37 ring_buffer.ReturnChunk(chunk); |
| 38 EXPECT_EQ(0u, ring_buffer.GetNumChunksTaken()); |
| 37 } | 39 } |
| 38 | 40 |
| 39 // Now scrape the |storage| buffer and check its contents. | 41 // Now scrape the |storage| buffer and check its contents. |
| 40 for (uint32_t chunk_idx = 0; chunk_idx < kNumChunks; ++chunk_idx) { | 42 for (uint32_t chunk_idx = 0; chunk_idx < kNumChunks; ++chunk_idx) { |
| 41 uint8_t* chunk_start = storage.get() + (chunk_idx * kChunkSize); | 43 uint8_t* chunk_start = storage.get() + (chunk_idx * kChunkSize); |
| 42 const uint32_t kPayloadSize = (chunk_idx + 1) * 8; | 44 const uint32_t kPayloadSize = (chunk_idx + 1) * 8; |
| 43 EXPECT_EQ(kPayloadSize, *reinterpret_cast<uint32_t*>(chunk_start)); | 45 EXPECT_EQ(kPayloadSize, *reinterpret_cast<uint32_t*>(chunk_start)); |
| 44 for (uint32_t i = 0; i < kPayloadSize; ++i) | 46 for (uint32_t i = 0; i < kPayloadSize; ++i) |
| 45 EXPECT_EQ(chunk_idx + 1, *(chunk_start + sizeof(uint32_t) + i)); | 47 EXPECT_EQ(chunk_idx + 1, *(chunk_start + sizeof(uint32_t) + i)); |
| 46 } | 48 } |
| 47 } | 49 } |
| 48 | 50 |
| 49 TEST(TraceRingBufferTest, ChunkBankrupcyDoesNotCrash) { | 51 TEST(TraceRingBufferTest, ChunkBankrupcyDoesNotCrash) { |
| 50 const size_t kNumChunks = 2; | 52 const size_t kNumChunks = 2; |
| 51 const size_t kBufferSize = TraceRingBuffer::Chunk::kSize * kNumChunks; | 53 const size_t kBufferSize = TraceRingBuffer::Chunk::kSize * kNumChunks; |
| 52 std::unique_ptr<uint8_t[]> storage(new uint8_t[kBufferSize]); | 54 std::unique_ptr<uint8_t[]> storage(new uint8_t[kBufferSize]); |
| 53 TraceRingBuffer ring_buffer(storage.get(), kBufferSize); | 55 TraceRingBuffer ring_buffer(storage.get(), kBufferSize); |
| 54 | 56 |
| 55 TraceRingBuffer::Chunk* chunk1 = ring_buffer.TakeChunk(); | 57 TraceRingBuffer::Chunk* chunk1 = ring_buffer.TakeChunk(1); |
| 56 ASSERT_NE(nullptr, chunk1); | 58 ASSERT_NE(nullptr, chunk1); |
| 57 | 59 |
| 58 TraceRingBuffer::Chunk* chunk2 = ring_buffer.TakeChunk(); | 60 TraceRingBuffer::Chunk* chunk2 = ring_buffer.TakeChunk(1); |
| 59 ASSERT_NE(nullptr, chunk2); | 61 ASSERT_NE(nullptr, chunk2); |
| 60 | 62 |
| 63 EXPECT_EQ(2u, ring_buffer.GetNumChunksTaken()); |
| 64 |
| 61 for (int i = 0; i < 3; ++i) { | 65 for (int i = 0; i < 3; ++i) { |
| 62 TraceRingBuffer::Chunk* bankrupcy_chunk = ring_buffer.TakeChunk(); | 66 TraceRingBuffer::Chunk* bankrupcy_chunk = ring_buffer.TakeChunk(1); |
| 63 ASSERT_NE(nullptr, bankrupcy_chunk); | 67 ASSERT_NE(nullptr, bankrupcy_chunk); |
| 64 ASSERT_FALSE(overlap(bankrupcy_chunk->begin(), bankrupcy_chunk->end(), | 68 ASSERT_TRUE(ring_buffer.IsBankrupcyChunkForTesting(bankrupcy_chunk)); |
| 65 storage.get(), storage.get() + kBufferSize)); | |
| 66 | 69 |
| 67 // Make sure that the memory of the bankrupty chunk can be dereferenced. | 70 // Make sure that the memory of the bankrupty chunk can be dereferenced. |
| 68 memset(bankrupcy_chunk->begin(), 0, kChunkSize); | 71 memset(bankrupcy_chunk->begin(), 0, kChunkSize); |
| 69 } | 72 } |
| 73 EXPECT_EQ(2u, ring_buffer.GetNumChunksTaken()); |
| 70 | 74 |
| 71 // Return a chunk and check that the ring buffer is not bankrupt anymore. | 75 // Return a chunk and check that the ring buffer is not bankrupt anymore. |
| 72 ring_buffer.ReturnChunk(chunk2, 42); | 76 chunk2->set_used_size(42); |
| 73 TraceRingBuffer::Chunk* chunk = ring_buffer.TakeChunk(); | 77 ring_buffer.ReturnChunk(chunk2); |
| 78 EXPECT_EQ(1u, ring_buffer.GetNumChunksTaken()); |
| 79 TraceRingBuffer::Chunk* chunk = ring_buffer.TakeChunk(1); |
| 74 ASSERT_NE(nullptr, chunk); | 80 ASSERT_NE(nullptr, chunk); |
| 75 ASSERT_TRUE(overlap(chunk->begin(), chunk->end(), storage.get(), | 81 ASSERT_FALSE(ring_buffer.IsBankrupcyChunkForTesting(chunk)); |
| 76 storage.get() + kBufferSize)); | |
| 77 } | 82 } |
| 78 | 83 |
| 79 } // namespace | 84 } // namespace |
| 80 } // namespace v2 | 85 } // namespace v2 |
| 81 } // namespace tracing | 86 } // namespace tracing |
| OLD | NEW |