Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(807)

Side by Side Diff: base/trace_event/v2/ring_buffer.cc

Issue 1947373002: Tracing V2 prototype [NOT FOR REVIEW] Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: WORKS Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/trace_event/v2/ring_buffer.h ('k') | base/trace_event/v2/scattered_buffer.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/v2/ring_buffer.h"
6
7 #include "base/threading/platform_thread.h"
8 #include "base/trace_event/common/proto/events_chunk.tracing-pb.h"
9 #include "base/trace_event/v2/proto_utils.h"
10 #include "base/trace_event/v2/append_only_proto_message.h"
11
12 namespace base {
13 namespace trace_event {
14 namespace v2 {
15
16 using ChunkProto = tracing::EventsChunk;
17
18 namespace {
19
20 const size_t kChunkSize = 32*1024;
21 const size_t kEventPreambleSize = AppendOnlyProtoMessage::kMaxMessageLengthField Size + 1;
22
23 uint8_t* WriteProtoVarInt(uint8_t* dst, uint32_t field_id, uint32_t value) {
24 dst += ProtoUtils::EncodeVarIntUnsigned(
25 ProtoUtils::GetVarIntFieldHeader(field_id), dst);
26 dst += ProtoUtils::EncodeVarIntUnsigned(value, dst);
27 return dst;
28 }
29
30 uint8_t* WriteEventPreamble(TraceEvent* event, uint8_t* dst) {
31 static_assert(
32 ChunkProto::kEventsFieldNumber <= 31,
33 "Chunk.event must have a field id <= 31 to stay within 1 byte preamble");
34 uint8_t* buf = dst;
35 *buf = static_cast<uint8_t>(ProtoUtils::GetLengthLimitedFieldHeader(ChunkProto ::kEventsFieldNumber));
36 ++buf;
37 ScatteredBuffer::ContiguousMemoryRange reserved_range;
38 reserved_range.begin = buf;
39 buf += AppendOnlyProtoMessage::kMaxMessageLengthFieldSize;
40 reserved_range.end = buf;
41 event->set_write_size_field_on_finalization(reserved_range);
42 DCHECK_EQ(kEventPreambleSize, static_cast<size_t>(buf - dst));
43 return buf;
44 }
45
46 } // namespace
47
48 RingBuffer::RingBuffer(uint8_t* begin, size_t size)
49 : begin_(begin),
50 end_(begin + size),
51 num_chunks_(size / kChunkSize),
52 current_chunk_idx_(0) {
53 DCHECK_GE(size, kChunkSize);
54 DCHECK_EQ(0ul, size % kChunkSize);
55 chunks_.reset(new Chunk[num_chunks_]);
56 uint8_t* chunk_begin = begin;
57 for (size_t i = 0; i < num_chunks_; ++i) {
58 Chunk& chunk = chunks_[i];
59 chunk.begin_ = chunk_begin;
60 chunk.end_ = chunk_begin + kChunkSize;
61 chunk_begin = chunk.end_;
62 }
63 }
64
65 RingBuffer::~RingBuffer() {}
66
67 RingBuffer::Chunk* RingBuffer::TakeChunk() {
68 AutoLock lock(lock_);
69 DCHECK_GT(num_chunks_, 0ul);
70 DCHECK_LT(current_chunk_idx_, num_chunks_);
71 Chunk* chunk = nullptr;
72 for (size_t i = 0; i < num_chunks_; ++i) {
73 chunk = &chunks_[current_chunk_idx_];
74 ++current_chunk_idx_;
75 if (current_chunk_idx_ >= num_chunks_)
76 current_chunk_idx_ = 0;
77 if (chunk->is_returned()) {
78 chunk->set_owner(PlatformThread::CurrentId());
79 chunk->set_proto_used_size(0);
80 return chunk;
81 }
82 }
83
84 // Bankrupcy: there are more threads than chunks. All chunks were on flight.
85 return nullptr; // TODO return a fallback chunk where everybody locks?
86 }
87
88 void RingBuffer::ReturnChunk(RingBuffer::Chunk* chunk, uint32_t used_size) {
89 AutoLock lock(lock_);
90 chunk->set_proto_used_size(used_size);
91 chunk->set_returned();
92 }
93
94 RingBuffer::Chunk::Chunk()
95 : begin_(nullptr), end_(nullptr), owner_(kInvalidThreadId) {}
96 RingBuffer::Chunk::~Chunk() {}
97
98 ZeroCopyTraceBufferWriter::ZeroCopyTraceBufferWriter(RingBuffer* ring_buffer,
99 uint32_t stream_id)
100 : ring_buffer_(ring_buffer),
101 chunk_(nullptr),
102 stream_id_(stream_id),
103 chunk_seq_id_(0),
104 continue_on_next_chunk_ptr_(nullptr),
105 event_start_addr_(nullptr),
106 event_(nullptr),
107 scattered_buffer_(this) {
108 memset(event_storage_, 0, sizeof(event_storage_));
109 }
110
111 ZeroCopyTraceBufferWriter::~ZeroCopyTraceBufferWriter() {}
112
113 TraceEventHandle ZeroCopyTraceBufferWriter::AddEvent() {
114 if (event_)
115 event_->~TraceEvent();
116 event_ = new (event_storage_) TraceEvent();
117 // event_.reset(new TraceEvent());
118
119 // In order to start a new event at least kMaxMessageLengthFieldSize + 1 bytes
120 // are required in the chunk, to write at least the size of the partial event.
121 // We stay a bit more conservative here purely for performance reasons. It
122 // doesn't make a lot of sense starting a partial event that will fragment
123 // immediately.
124 // TODO bump up 8 to a reasonable value.
125 if (scattered_buffer_.contiguous_bytes_available() < 8) {
126 scattered_buffer_.Reset(
127 AcquireNewChunk(false /* event_continues_from_prev_chunk */));
128 }
129
130 auto range = scattered_buffer_.ReserveBytes(kEventPreambleSize);
131 event_start_addr_= WriteEventPreamble(event_, range.begin);
132 DCHECK_EQ(scattered_buffer_.write_ptr(), event_start_addr_);
133 event_->set_buffer_writer(&scattered_buffer_);
134
135 // TODO keep alive chunks for reservation until event is finalized. Hmmmmm.
136 return TraceEventHandle(event_);
137 }
138
139 ScatteredBuffer::ContiguousMemoryRange
140 ZeroCopyTraceBufferWriter::GetNewContiguousMemoryBuffer() {
141 return AcquireNewChunk(true /* event_continues_from_prev_chunk */);
142 }
143
144 ScatteredBuffer::ContiguousMemoryRange
145 ZeroCopyTraceBufferWriter::AcquireNewChunk(bool event_continues_from_prev_chunk) {
146 if (event_continues_from_prev_chunk) {
147 // The |event_| spawn across multiple chunks.
148 *continue_on_next_chunk_ptr_ = 1;
149 const size_t event_partial_size =
150 static_cast<size_t>(scattered_buffer_.write_ptr() - event_start_addr_);
151
152 ProtoUtils::EncodeRedundantVarIntUnsigned(
153 event_partial_size,
154 AppendOnlyProtoMessage::kMaxMessageLengthFieldSize,
155 event_->size_field().begin);
156 event_->inc_size_already_written(event_partial_size);
157 }
158
159 // Return the current chunk and acquire a new one.
160 if (chunk_) {
161 DCHECK_GE(scattered_buffer_.write_ptr(), chunk_->proto_begin());
162 DCHECK_LE(scattered_buffer_.write_ptr(), chunk_->end());
163 const uint32_t used_size = static_cast<uint32_t>(scattered_buffer_.write_ptr () - chunk_->proto_begin());
164 ring_buffer_->ReturnChunk(chunk_, used_size);
165 }
166 chunk_ = ring_buffer_->TakeChunk();
167
168 // Write the protobuf for the chunk header. The proto for events_chunk is
169 // filled manually instead of using the generated C++ stub. Rationale: the
170 // stub require this class to perform the buffer write operations. Using this
171 // to write our own proto would make this code extremely hard to reason about.
172 uint8_t* header = reinterpret_cast<uint8_t*>(chunk_->proto_begin());
173
174 header =
175 WriteProtoVarInt(header, ChunkProto::kStreamIdFieldNumber, stream_id_);
176 header = WriteProtoVarInt(header, ChunkProto::kSeqIdInStreamFieldNumber,
177 chunk_seq_id_);
178
179 header = WriteProtoVarInt(
180 header, ChunkProto::kFirstEventContinuesFromPrevChunkFieldNumber,
181 event_continues_from_prev_chunk ? 1 : 0);
182
183 // At this point we don't know yet whether the last event in the chunk is
184 // going to continue on the next chunk. For the moment we put a zero as a
185 // placeholder and remember its position in the chunk. The actual value will
186 // be written the next time we take a new chunk (above in this function).
187 header = WriteProtoVarInt(
188 header, ChunkProto::kLastEventContinuesOnNextChunkFieldNumber, 0);
189 continue_on_next_chunk_ptr_ = header - 1;
190 ++chunk_seq_id_;
191
192 // If the new chunk happened while writing an event (the event is spread over
193 // multiple chunks) write a new proto preamble for the new partial byte array.
194 if (event_continues_from_prev_chunk) {
195 header = WriteEventPreamble(event_, header);
196 }
197
198 return {header, chunk_->end()};
199 }
200
201 // TODO: Things to test:
202 // events beginning and ending precisely on a chunk boundary.
203 // events longer than 2 chunks.
204 // presence of last event in a non-full chunk.
205 // test a long message which nested submessages where the size of the nested
206 // subm is in a previous chunk. (chunk retaining logic).
207 // reserved size field num bytes != for main event in chunk vs nested messages.
208
209 // TODO possible optimization: in the codegen for add_args_simple instead of
210 // ::tracing::EventArgsSimple* inst = new ::tracing::EventArgsSimple();
211 // use a placement new.
212
213 } // namespace v2
214 } // namespace trace_event
215 } // namespace base
OLDNEW
« no previous file with comments | « base/trace_event/v2/ring_buffer.h ('k') | base/trace_event/v2/scattered_buffer.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698