Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(748)

Unified Diff: base/trace_event/v2/ring_buffer.cc

Issue 1947373002: Tracing V2 prototype [NOT FOR REVIEW] Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: WORKS Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « base/trace_event/v2/ring_buffer.h ('k') | base/trace_event/v2/scattered_buffer.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: base/trace_event/v2/ring_buffer.cc
diff --git a/base/trace_event/v2/ring_buffer.cc b/base/trace_event/v2/ring_buffer.cc
new file mode 100644
index 0000000000000000000000000000000000000000..5b02b32fcf04bd94a5df33b78c757dfb1d8abae6
--- /dev/null
+++ b/base/trace_event/v2/ring_buffer.cc
@@ -0,0 +1,215 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/v2/ring_buffer.h"
+
+#include "base/threading/platform_thread.h"
+#include "base/trace_event/common/proto/events_chunk.tracing-pb.h"
+#include "base/trace_event/v2/proto_utils.h"
+#include "base/trace_event/v2/append_only_proto_message.h"
+
+namespace base {
+namespace trace_event {
+namespace v2 {
+
+using ChunkProto = tracing::EventsChunk;
+
+namespace {
+
+const size_t kChunkSize = 32*1024;
+const size_t kEventPreambleSize = AppendOnlyProtoMessage::kMaxMessageLengthFieldSize + 1;
+
+uint8_t* WriteProtoVarInt(uint8_t* dst, uint32_t field_id, uint32_t value) {
+ dst += ProtoUtils::EncodeVarIntUnsigned(
+ ProtoUtils::GetVarIntFieldHeader(field_id), dst);
+ dst += ProtoUtils::EncodeVarIntUnsigned(value, dst);
+ return dst;
+}
+
+uint8_t* WriteEventPreamble(TraceEvent* event, uint8_t* dst) {
+ static_assert(
+ ChunkProto::kEventsFieldNumber <= 31,
+ "Chunk.event must have a field id <= 31 to stay within 1 byte preamble");
+ uint8_t* buf = dst;
+ *buf = static_cast<uint8_t>(ProtoUtils::GetLengthLimitedFieldHeader(ChunkProto::kEventsFieldNumber));
+ ++buf;
+ ScatteredBuffer::ContiguousMemoryRange reserved_range;
+ reserved_range.begin = buf;
+ buf += AppendOnlyProtoMessage::kMaxMessageLengthFieldSize;
+ reserved_range.end = buf;
+ event->set_write_size_field_on_finalization(reserved_range);
+ DCHECK_EQ(kEventPreambleSize, static_cast<size_t>(buf - dst));
+ return buf;
+}
+
+} // namespace
+
+RingBuffer::RingBuffer(uint8_t* begin, size_t size)
+ : begin_(begin),
+ end_(begin + size),
+ num_chunks_(size / kChunkSize),
+ current_chunk_idx_(0) {
+ DCHECK_GE(size, kChunkSize);
+ DCHECK_EQ(0ul, size % kChunkSize);
+ chunks_.reset(new Chunk[num_chunks_]);
+ uint8_t* chunk_begin = begin;
+ for (size_t i = 0; i < num_chunks_; ++i) {
+ Chunk& chunk = chunks_[i];
+ chunk.begin_ = chunk_begin;
+ chunk.end_ = chunk_begin + kChunkSize;
+ chunk_begin = chunk.end_;
+ }
+}
+
+RingBuffer::~RingBuffer() {}
+
+RingBuffer::Chunk* RingBuffer::TakeChunk() {
+ AutoLock lock(lock_);
+ DCHECK_GT(num_chunks_, 0ul);
+ DCHECK_LT(current_chunk_idx_, num_chunks_);
+ Chunk* chunk = nullptr;
+ for (size_t i = 0; i < num_chunks_; ++i) {
+ chunk = &chunks_[current_chunk_idx_];
+ ++current_chunk_idx_;
+ if (current_chunk_idx_ >= num_chunks_)
+ current_chunk_idx_ = 0;
+ if (chunk->is_returned()) {
+ chunk->set_owner(PlatformThread::CurrentId());
+ chunk->set_proto_used_size(0);
+ return chunk;
+ }
+ }
+
+ // Bankrupcy: there are more threads than chunks. All chunks were on flight.
+ return nullptr; // TODO return a fallback chunk where everybody locks?
+}
+
+void RingBuffer::ReturnChunk(RingBuffer::Chunk* chunk, uint32_t used_size) {
+ AutoLock lock(lock_);
+ chunk->set_proto_used_size(used_size);
+ chunk->set_returned();
+}
+
+RingBuffer::Chunk::Chunk()
+ : begin_(nullptr), end_(nullptr), owner_(kInvalidThreadId) {}
+RingBuffer::Chunk::~Chunk() {}
+
+ZeroCopyTraceBufferWriter::ZeroCopyTraceBufferWriter(RingBuffer* ring_buffer,
+ uint32_t stream_id)
+ : ring_buffer_(ring_buffer),
+ chunk_(nullptr),
+ stream_id_(stream_id),
+ chunk_seq_id_(0),
+ continue_on_next_chunk_ptr_(nullptr),
+ event_start_addr_(nullptr),
+ event_(nullptr),
+ scattered_buffer_(this) {
+ memset(event_storage_, 0, sizeof(event_storage_));
+}
+
+ZeroCopyTraceBufferWriter::~ZeroCopyTraceBufferWriter() {}
+
+TraceEventHandle ZeroCopyTraceBufferWriter::AddEvent() {
+ if (event_)
+ event_->~TraceEvent();
+ event_ = new (event_storage_) TraceEvent();
+ // event_.reset(new TraceEvent());
+
+ // In order to start a new event at least kMaxMessageLengthFieldSize + 1 bytes
+ // are required in the chunk, to write at least the size of the partial event.
+ // We stay a bit more conservative here purely for performance reasons. It
+ // doesn't make a lot of sense starting a partial event that will fragment
+ // immediately.
+ // TODO bump up 8 to a reasonable value.
+ if (scattered_buffer_.contiguous_bytes_available() < 8) {
+ scattered_buffer_.Reset(
+ AcquireNewChunk(false /* event_continues_from_prev_chunk */));
+ }
+
+ auto range = scattered_buffer_.ReserveBytes(kEventPreambleSize);
+ event_start_addr_= WriteEventPreamble(event_, range.begin);
+ DCHECK_EQ(scattered_buffer_.write_ptr(), event_start_addr_);
+ event_->set_buffer_writer(&scattered_buffer_);
+
+ // TODO keep alive chunks for reservation until event is finalized. Hmmmmm.
+ return TraceEventHandle(event_);
+}
+
+ScatteredBuffer::ContiguousMemoryRange
+ZeroCopyTraceBufferWriter::GetNewContiguousMemoryBuffer() {
+ return AcquireNewChunk(true /* event_continues_from_prev_chunk */);
+}
+
+ScatteredBuffer::ContiguousMemoryRange
+ZeroCopyTraceBufferWriter::AcquireNewChunk(bool event_continues_from_prev_chunk) {
+ if (event_continues_from_prev_chunk) {
+ // The |event_| spawn across multiple chunks.
+ *continue_on_next_chunk_ptr_ = 1;
+ const size_t event_partial_size =
+ static_cast<size_t>(scattered_buffer_.write_ptr() - event_start_addr_);
+
+ ProtoUtils::EncodeRedundantVarIntUnsigned(
+ event_partial_size,
+ AppendOnlyProtoMessage::kMaxMessageLengthFieldSize,
+ event_->size_field().begin);
+ event_->inc_size_already_written(event_partial_size);
+ }
+
+ // Return the current chunk and acquire a new one.
+ if (chunk_) {
+ DCHECK_GE(scattered_buffer_.write_ptr(), chunk_->proto_begin());
+ DCHECK_LE(scattered_buffer_.write_ptr(), chunk_->end());
+ const uint32_t used_size = static_cast<uint32_t>(scattered_buffer_.write_ptr() - chunk_->proto_begin());
+ ring_buffer_->ReturnChunk(chunk_, used_size);
+ }
+ chunk_ = ring_buffer_->TakeChunk();
+
+ // Write the protobuf for the chunk header. The proto for events_chunk is
+ // filled manually instead of using the generated C++ stub. Rationale: the
+ // stub require this class to perform the buffer write operations. Using this
+ // to write our own proto would make this code extremely hard to reason about.
+ uint8_t* header = reinterpret_cast<uint8_t*>(chunk_->proto_begin());
+
+ header =
+ WriteProtoVarInt(header, ChunkProto::kStreamIdFieldNumber, stream_id_);
+ header = WriteProtoVarInt(header, ChunkProto::kSeqIdInStreamFieldNumber,
+ chunk_seq_id_);
+
+ header = WriteProtoVarInt(
+ header, ChunkProto::kFirstEventContinuesFromPrevChunkFieldNumber,
+ event_continues_from_prev_chunk ? 1 : 0);
+
+ // At this point we don't know yet whether the last event in the chunk is
+ // going to continue on the next chunk. For the moment we put a zero as a
+ // placeholder and remember its position in the chunk. The actual value will
+ // be written the next time we take a new chunk (above in this function).
+ header = WriteProtoVarInt(
+ header, ChunkProto::kLastEventContinuesOnNextChunkFieldNumber, 0);
+ continue_on_next_chunk_ptr_ = header - 1;
+ ++chunk_seq_id_;
+
+ // If the new chunk happened while writing an event (the event is spread over
+ // multiple chunks) write a new proto preamble for the new partial byte array.
+ if (event_continues_from_prev_chunk) {
+ header = WriteEventPreamble(event_, header);
+ }
+
+ return {header, chunk_->end()};
+}
+
+// TODO: Things to test:
+// events beginning and ending precisely on a chunk boundary.
+// events longer than 2 chunks.
+// presence of last event in a non-full chunk.
+// test a long message which nested submessages where the size of the nested
+// subm is in a previous chunk. (chunk retaining logic).
+// reserved size field num bytes != for main event in chunk vs nested messages.
+
+// TODO possible optimization: in the codegen for add_args_simple instead of
+// ::tracing::EventArgsSimple* inst = new ::tracing::EventArgsSimple();
+// use a placement new.
+
+} // namespace v2
+} // namespace trace_event
+} // namespace base
« no previous file with comments | « base/trace_event/v2/ring_buffer.h ('k') | base/trace_event/v2/scattered_buffer.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698