| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/trace_buffer.h" | 5 #include "base/trace_event/trace_buffer.h" |
| 6 | 6 |
| 7 #include <memory> | 7 #include <memory> |
| 8 #include <utility> | 8 #include <utility> |
| 9 #include <vector> | 9 #include <vector> |
| 10 | 10 |
| 11 #include "base/macros.h" | 11 #include "base/macros.h" |
| 12 #include "base/trace_event/heap_profiler_scoped_ignore.h" |
| 12 #include "base/trace_event/trace_event_impl.h" | 13 #include "base/trace_event/trace_event_impl.h" |
| 13 | 14 |
| 14 namespace base { | 15 namespace base { |
| 15 namespace trace_event { | 16 namespace trace_event { |
| 16 | 17 |
| 17 namespace { | 18 namespace { |
| 18 | 19 |
| 19 class TraceBufferRingBuffer : public TraceBuffer { | 20 class TraceBufferRingBuffer : public TraceBuffer { |
| 20 public: | 21 public: |
| 21 TraceBufferRingBuffer(size_t max_chunks) | 22 TraceBufferRingBuffer(size_t max_chunks) |
| 22 : max_chunks_(max_chunks), | 23 : max_chunks_(max_chunks), |
| 23 recyclable_chunks_queue_(new size_t[queue_capacity()]), | 24 recyclable_chunks_queue_(new size_t[queue_capacity()]), |
| 24 queue_head_(0), | 25 queue_head_(0), |
| 25 queue_tail_(max_chunks), | 26 queue_tail_(max_chunks), |
| 26 current_iteration_index_(0), | 27 current_iteration_index_(0), |
| 27 current_chunk_seq_(1) { | 28 current_chunk_seq_(1) { |
| 28 chunks_.reserve(max_chunks); | 29 chunks_.reserve(max_chunks); |
| 29 for (size_t i = 0; i < max_chunks; ++i) | 30 for (size_t i = 0; i < max_chunks; ++i) |
| 30 recyclable_chunks_queue_[i] = i; | 31 recyclable_chunks_queue_[i] = i; |
| 31 } | 32 } |
| 32 | 33 |
| 33 std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override { | 34 std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override { |
| 35 HEAP_PROFILER_SCOPED_IGNORE; |
| 36 |
| 34 // Because the number of threads is much less than the number of chunks, | 37 // Because the number of threads is much less than the number of chunks, |
| 35 // the queue should never be empty. | 38 // the queue should never be empty. |
| 36 DCHECK(!QueueIsEmpty()); | 39 DCHECK(!QueueIsEmpty()); |
| 37 | 40 |
| 38 *index = recyclable_chunks_queue_[queue_head_]; | 41 *index = recyclable_chunks_queue_[queue_head_]; |
| 39 queue_head_ = NextQueueIndex(queue_head_); | 42 queue_head_ = NextQueueIndex(queue_head_); |
| 40 current_iteration_index_ = queue_head_; | 43 current_iteration_index_ = queue_head_; |
| 41 | 44 |
| 42 if (*index >= chunks_.size()) | 45 if (*index >= chunks_.size()) |
| 43 chunks_.resize(*index + 1); | 46 chunks_.resize(*index + 1); |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 151 class TraceBufferVector : public TraceBuffer { | 154 class TraceBufferVector : public TraceBuffer { |
| 152 public: | 155 public: |
| 153 TraceBufferVector(size_t max_chunks) | 156 TraceBufferVector(size_t max_chunks) |
| 154 : in_flight_chunk_count_(0), | 157 : in_flight_chunk_count_(0), |
| 155 current_iteration_index_(0), | 158 current_iteration_index_(0), |
| 156 max_chunks_(max_chunks) { | 159 max_chunks_(max_chunks) { |
| 157 chunks_.reserve(max_chunks_); | 160 chunks_.reserve(max_chunks_); |
| 158 } | 161 } |
| 159 | 162 |
| 160 std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override { | 163 std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override { |
| 164 HEAP_PROFILER_SCOPED_IGNORE; |
| 165 |
| 161 // This function may be called when adding normal events or indirectly from | 166 // This function may be called when adding normal events or indirectly from |
| 162 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we | 167 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we |
| 163 // have to add the metadata events and flush thread-local buffers even if | 168 // have to add the metadata events and flush thread-local buffers even if |
| 164 // the buffer is full. | 169 // the buffer is full. |
| 165 *index = chunks_.size(); | 170 *index = chunks_.size(); |
| 166 chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk. | 171 chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk. |
| 167 ++in_flight_chunk_count_; | 172 ++in_flight_chunk_count_; |
| 168 // + 1 because zero chunk_seq is not allowed. | 173 // + 1 because zero chunk_seq is not allowed. |
| 169 return std::unique_ptr<TraceBufferChunk>( | 174 return std::unique_ptr<TraceBufferChunk>( |
| 170 new TraceBufferChunk(static_cast<uint32_t>(*index) + 1)); | 175 new TraceBufferChunk(static_cast<uint32_t>(*index) + 1)); |
| (...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 330 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) { | 335 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) { |
| 331 return new TraceBufferRingBuffer(max_chunks); | 336 return new TraceBufferRingBuffer(max_chunks); |
| 332 } | 337 } |
| 333 | 338 |
| 334 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) { | 339 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) { |
| 335 return new TraceBufferVector(max_chunks); | 340 return new TraceBufferVector(max_chunks); |
| 336 } | 341 } |
| 337 | 342 |
| 338 } // namespace trace_event | 343 } // namespace trace_event |
| 339 } // namespace base | 344 } // namespace base |
| OLD | NEW |