OLD | NEW |
| (Empty) |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "components/tracing/core/trace_buffer_writer.h" | |
6 | |
7 #include "base/compiler_specific.h" | |
8 #include "base/logging.h" | |
9 #include "components/tracing/core/proto_utils.h" | |
10 | |
11 namespace tracing { | |
12 namespace v2 { | |
13 | |
14 namespace { | |
15 | |
16 // TODO(primiano) remove this in next CLs. This should just be taken from the | |
17 // C++ class autogenerated from events_chunk.proto (crbug.com/608721). | |
18 struct ChunkProto { | |
19 enum : uint32_t { | |
20 kWriterIdFieldNumber = 1, | |
21 kSeqIdInStreamFieldNumber = 2, | |
22 kEventsFieldNumber = 3, | |
23 kFirstEventContinuesFromPrevChunkFieldNumber = 4, | |
24 kLastEventContinuesOnNextChunkFieldNumber = 5 | |
25 }; | |
26 }; | |
27 | |
28 const size_t kEventPreambleSize = 1 + proto::kMessageLengthFieldSize; | |
29 | |
30 // TODO(primiano): replace 16 with a more reasonable size, that is, the size | |
31 // of a simple trace event with no args. | |
32 const size_t kMinEventSize = 16; | |
33 | |
34 } // namespace | |
35 | |
36 TraceBufferWriter::TraceBufferWriter(TraceRingBuffer* trace_ring_buffer, | |
37 uint32_t writer_id) | |
38 : trace_ring_buffer_(trace_ring_buffer), | |
39 writer_id_(writer_id), | |
40 chunk_seq_id_(0), | |
41 chunk_(nullptr), | |
42 event_data_start_in_current_chunk_(nullptr), | |
43 stream_writer_(this) { | |
44 event_.Reset(&stream_writer_); | |
45 } | |
46 | |
47 TraceBufferWriter::~TraceBufferWriter() {} | |
48 | |
49 void TraceBufferWriter::FinalizeCurrentEvent() { | |
50 if (UNLIKELY(!chunk_)) | |
51 return; | |
52 | |
53 // Finalize the last event added. This ensures that it and all its nested | |
54 // fields are committed to the ring buffer and sealed. No further changes to | |
55 // the chunks's memory can be made from the |event_| after this point. | |
56 event_.Finalize(); | |
57 | |
58 // In the unlikely event that the last event did wrap over one or more chunks, | |
59 // is is now time to return those chunks (all but the active one) back. | |
60 TraceRingBuffer::Chunk* retained_chunk = chunk_->next_in_owner_list(); | |
61 if (UNLIKELY(retained_chunk)) { | |
62 while (retained_chunk) { | |
63 TraceRingBuffer::Chunk* next = retained_chunk->next_in_owner_list(); | |
64 retained_chunk->set_next_in_owner_list(nullptr); | |
65 trace_ring_buffer_->ReturnChunk(retained_chunk); | |
66 retained_chunk = next; | |
67 } | |
68 chunk_->set_next_in_owner_list(nullptr); | |
69 } | |
70 } | |
71 | |
72 TraceEventHandle TraceBufferWriter::AddEvent() { | |
73 FinalizeCurrentEvent(); | |
74 | |
75 // In order to start a new event at least kMessageLengthFieldSize + 1 bytes | |
76 // are required in the chunk to write the preamble and size of the event | |
77 // itself. We take a bit more room here, it doesn't make a lot of sense | |
78 // starting a partial event that will fragment immediately after. | |
79 static_assert(kMinEventSize >= proto::kMessageLengthFieldSize + 1, | |
80 "kMinEventSize too small"); | |
81 if (stream_writer_.bytes_available() < kMinEventSize) | |
82 stream_writer_.Reset(AcquireNewChunk(false /* is_fragmenting_event */)); | |
83 | |
84 event_.Reset(&stream_writer_); | |
85 WriteEventPreambleForNewChunk( | |
86 stream_writer_.ReserveBytesUnsafe(kEventPreambleSize)); | |
87 DCHECK_EQ(stream_writer_.write_ptr(), event_data_start_in_current_chunk_); | |
88 return TraceEventHandle(static_cast<::tracing::proto::Event*>(&event_)); | |
89 } | |
90 | |
91 // This is invoked by the ProtoZeroMessage write methods when reaching the | |
92 // end of the current chunk during a write. | |
93 ContiguousMemoryRange TraceBufferWriter::GetNewBuffer() { | |
94 return AcquireNewChunk(true /* is_fragmenting_event */); | |
95 } | |
96 | |
97 void TraceBufferWriter::FinalizeCurrentChunk(bool is_fragmenting_event) { | |
98 DCHECK(!is_fragmenting_event || chunk_); | |
99 if (!chunk_) | |
100 return; | |
101 uint8_t* write_ptr = stream_writer_.write_ptr(); | |
102 DCHECK_GE(write_ptr, chunk_->payload()); | |
103 DCHECK_LE(write_ptr, chunk_->end() - 2); | |
104 | |
105 if (is_fragmenting_event) { | |
106 proto::StaticAssertSingleBytePreamble< | |
107 ChunkProto::kLastEventContinuesOnNextChunkFieldNumber>(); | |
108 *write_ptr++ = static_cast<uint8_t>(proto::MakeTagVarInt( | |
109 ChunkProto::kLastEventContinuesOnNextChunkFieldNumber)); | |
110 *write_ptr++ = 1; // = true. | |
111 } | |
112 | |
113 DCHECK_LT(static_cast<uintptr_t>(write_ptr - chunk_->payload()), kChunkSize); | |
114 chunk_->set_used_size(static_cast<uint32_t>(write_ptr - chunk_->payload())); | |
115 } | |
116 | |
117 // There are paths that lead to AcquireNewChunk(): | |
118 // When |is_fragmenting_event| = false: | |
119 // AddEvent() is called and there isn't enough room in the current chunk to | |
120 // start a new event (or we don't have a chunk yet). | |
121 // When |is_fragmenting_event| = true: | |
122 // The client is writing an event, a ProtoZeroMessage::Append* method hits | |
123 // the boundary of the chunk and requests a new one via GetNewBuffer(). | |
124 ContiguousMemoryRange TraceBufferWriter::AcquireNewChunk( | |
125 bool is_fragmenting_event) { | |
126 FinalizeCurrentChunk(is_fragmenting_event); | |
127 TraceRingBuffer::Chunk* new_chunk = trace_ring_buffer_->TakeChunk(writer_id_); | |
128 if (is_fragmenting_event) { | |
129 // Backfill the size field of the event with the partial size accumulated | |
130 // so far in the old chunk. WriteEventPreambleForNewChunk() will take care | |
131 // of resetting the |size_field| of the event to the new chunk. | |
132 DCHECK_GE(event_data_start_in_current_chunk_, chunk_->payload()); | |
133 DCHECK_LE(event_data_start_in_current_chunk_, | |
134 chunk_->end() - proto::kMessageLengthFieldSize); | |
135 const uint32_t event_partial_size = static_cast<uint32_t>( | |
136 stream_writer_.write_ptr() - event_data_start_in_current_chunk_); | |
137 proto::WriteRedundantVarIntU32<proto::kMessageLengthFieldSize>( | |
138 event_partial_size, event_.size_field().begin); | |
139 event_.inc_size_already_written(event_partial_size); | |
140 | |
141 // If this is a continuation of an event, this writer needs to retain the | |
142 // old chunk. The client might still be able to write to it. This is to deal | |
143 // with the case of a nested message which is started in one chunk and | |
144 // ends in another one. The finalization needs to write-back the size field | |
145 // in the old chunk. | |
146 new_chunk->set_next_in_owner_list(chunk_); | |
147 } else if (chunk_) { | |
148 // Otherwise, if this is a new event, the previous chunk can be returned. | |
149 trace_ring_buffer_->ReturnChunk(chunk_); | |
150 } | |
151 chunk_ = new_chunk; | |
152 | |
153 // Write the protobuf for the chunk header. The generated C++ stub for | |
154 // events_chunk.proto cannot be used here because that would re-enter this | |
155 // class and make this code extremely hard to reason about. | |
156 uint8_t* chunk_proto = new_chunk->payload(); | |
157 | |
158 proto::StaticAssertSingleBytePreamble<ChunkProto::kWriterIdFieldNumber>(); | |
159 *chunk_proto++ = static_cast<uint8_t>( | |
160 proto::MakeTagVarInt(ChunkProto::kWriterIdFieldNumber)); | |
161 chunk_proto = proto::WriteVarIntU32(writer_id_, chunk_proto); | |
162 | |
163 proto::StaticAssertSingleBytePreamble< | |
164 ChunkProto::kSeqIdInStreamFieldNumber>(); | |
165 *chunk_proto++ = static_cast<uint8_t>( | |
166 proto::MakeTagVarInt(ChunkProto::kSeqIdInStreamFieldNumber)); | |
167 chunk_proto = proto::WriteVarIntU32(chunk_seq_id_, chunk_proto); | |
168 | |
169 if (is_fragmenting_event) { | |
170 proto::StaticAssertSingleBytePreamble< | |
171 ChunkProto::kFirstEventContinuesFromPrevChunkFieldNumber>(); | |
172 *chunk_proto++ = static_cast<uint8_t>(proto::MakeTagVarInt( | |
173 ChunkProto::kFirstEventContinuesFromPrevChunkFieldNumber)); | |
174 *chunk_proto++ = 1; // = true. | |
175 } | |
176 | |
177 ++chunk_seq_id_; | |
178 | |
179 // If the new chunk was requested while writing an event (the event spans | |
180 // across chunks) write a new preamble for the partial event in the new chunk. | |
181 if (is_fragmenting_event) | |
182 chunk_proto = WriteEventPreambleForNewChunk(chunk_proto); | |
183 | |
184 // We reserve 2 bytes from the end, so that FinalizeCurrentChunk() can use | |
185 // them to write the |last_event_continues_on_next_chunk| field. | |
186 return {chunk_proto, new_chunk->end() - 2}; | |
187 } | |
188 | |
189 // Writes the one-byte preamble for the start of either a new or a partial | |
190 // event and reserves kMessageLengthFieldSize bytes for its length. Also | |
191 // keeps size-field the bookkeeping up to date. Returns the pointer in the chunk | |
192 // past the event preamble, where the event proto should be written. | |
193 uint8_t* TraceBufferWriter::WriteEventPreambleForNewChunk(uint8_t* begin) { | |
194 // The caller must have ensured to have enough room in the chunk. The event | |
195 // preamble itself cannot be fragmented. | |
196 uint8_t* const end = begin + kEventPreambleSize; | |
197 proto::StaticAssertSingleBytePreamble<ChunkProto::kEventsFieldNumber>(); | |
198 *begin++ = static_cast<uint8_t>( | |
199 proto::MakeTagLengthDelimited(ChunkProto::kEventsFieldNumber)); | |
200 ContiguousMemoryRange range = {begin, end}; | |
201 event_.set_size_field(range); | |
202 event_data_start_in_current_chunk_ = end; | |
203 return end; | |
204 } | |
205 | |
206 void TraceBufferWriter::Flush() { | |
207 FinalizeCurrentEvent(); | |
208 FinalizeCurrentChunk(false /* is_fragmenting_event */); | |
209 trace_ring_buffer_->ReturnChunk(chunk_); | |
210 chunk_ = nullptr; | |
211 } | |
212 | |
213 } // namespace v2 | |
214 } // namespace tracing | |
OLD | NEW |