| OLD | NEW |
| (Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #ifndef BASE_TRACE_EVENT_V2_RING_BUFFER_H_ |
| 6 #define BASE_TRACE_EVENT_V2_RING_BUFFER_H_ |
| 7 |
| 8 #include <inttypes.h> |
| 9 |
| 10 #include <vector> |
| 11 |
| 12 #include "base/atomicops.h" |
| 13 #include "base/base_export.h" |
| 14 #include "base/macros.h" |
| 15 #include "base/synchronization/lock.h" |
| 16 #include "base/threading/thread.h" |
| 17 #include "base/trace_event/v2/scattered_buffer.h" |
| 18 #include "base/trace_event/v2/trace_event.h" |
| 19 |
| 20 namespace tracing { |
| 21 class EventsChunk; |
| 22 } |
| 23 |
| 24 namespace base { |
| 25 namespace trace_event { |
| 26 namespace v2 { |
| 27 |
| 28 // This class deals only with raw storage, doesn't know anything about categorie
s and such. |
| 29 |
| 30 class BASE_EXPORT RingBuffer { |
| 31 public: |
| 32 |
| 33 class Chunk { |
| 34 public: |
| 35 using Header = subtle::Atomic32; |
| 36 Chunk(); |
| 37 ~Chunk(); |
| 38 |
| 39 uint8_t* begin() const { return begin_; } |
| 40 uint8_t* proto_begin() const { return begin_ + sizeof(Header); } |
| 41 uint8_t* end() const { return end_; } |
| 42 void set_proto_used_size(uint32_t size) { subtle::NoBarrier_Store(header(),
size); } |
| 43 uint32_t proto_used_size() const { return subtle::NoBarrier_Load(header());
} |
| 44 |
| 45 private: |
| 46 friend class RingBuffer; |
| 47 |
| 48 Header* header() const { return reinterpret_cast<Header*>(begin_); } |
| 49 bool is_returned() const { return owner_ == kInvalidThreadId; } |
| 50 void set_returned() { owner_ = kInvalidThreadId; } |
| 51 void set_owner(PlatformThreadId tid) { owner_ = tid; } |
| 52 |
| 53 uint8_t* begin_; |
| 54 uint8_t* end_; |
| 55 |
| 56 // Accesses to owner_ must happen under the buffer |lock_|. |
| 57 PlatformThreadId owner_; // kInvalidThreadId means free/returned; |
| 58 |
| 59 DISALLOW_COPY_AND_ASSIGN(Chunk); |
| 60 }; |
| 61 |
| 62 RingBuffer(uint8_t* begin, size_t size); |
| 63 ~RingBuffer(); |
| 64 |
| 65 Chunk* TakeChunk(); |
| 66 void ReturnChunk(Chunk* chunk, uint32_t used_size); |
| 67 |
| 68 private: |
| 69 uint8_t* const begin_; |
| 70 uint8_t* const end_; |
| 71 |
| 72 Lock lock_; |
| 73 std::unique_ptr<Chunk[]> chunks_; |
| 74 const size_t num_chunks_; |
| 75 size_t current_chunk_idx_; |
| 76 |
| 77 DISALLOW_COPY_AND_ASSIGN(RingBuffer); |
| 78 }; |
| 79 |
| 80 // Typically there will be one instance of this per thread. This is going to |
| 81 // be owned by some thread-local tracing thing. |
| 82 class BASE_EXPORT ZeroCopyTraceBufferWriter : public ScatteredBuffer::Delegate { |
| 83 public: |
| 84 ZeroCopyTraceBufferWriter(RingBuffer* trace_buffer, uint32_t stream_id); |
| 85 ~ZeroCopyTraceBufferWriter(); |
| 86 |
| 87 TraceEventHandle AddEvent(); |
| 88 |
| 89 // ScatteredBuffer::Delegate implementation. |
| 90 ScatteredBuffer::ContiguousMemoryRange GetNewContiguousMemoryBuffer() override
; |
| 91 |
| 92 private: |
| 93 ScatteredBuffer::ContiguousMemoryRange AcquireNewChunk(bool event_continues_fr
om_prev_chunk); |
| 94 |
| 95 RingBuffer* ring_buffer_; |
| 96 RingBuffer::Chunk* chunk_; |
| 97 uint32_t stream_id_; |
| 98 uint32_t chunk_seq_id_; |
| 99 uint8_t* continue_on_next_chunk_ptr_; |
| 100 uint8_t* event_start_addr_; |
| 101 TraceEvent* event_; |
| 102 uint8_t event_storage_[sizeof(TraceEvent)]; |
| 103 ScatteredBuffer scattered_buffer_; |
| 104 |
| 105 DISALLOW_COPY_AND_ASSIGN(ZeroCopyTraceBufferWriter); |
| 106 }; |
| 107 |
| 108 } // namespace v2 |
| 109 } // namespace trace_event |
| 110 } // namespace base |
| 111 |
| 112 #endif // BASE_TRACE_EVENT_V2_RING_BUFFER_H_ |
| OLD | NEW |