OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/trace_buffer.h" | 5 #include "base/trace_event/trace_buffer.h" |
6 | 6 |
7 #include <utility> | 7 #include <utility> |
8 #include <vector> | 8 #include <vector> |
9 | 9 |
10 #include "base/macros.h" | 10 #include "base/macros.h" |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
92 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_]; | 92 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_]; |
93 current_iteration_index_ = NextQueueIndex(current_iteration_index_); | 93 current_iteration_index_ = NextQueueIndex(current_iteration_index_); |
94 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. | 94 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. |
95 continue; | 95 continue; |
96 DCHECK(chunks_[chunk_index]); | 96 DCHECK(chunks_[chunk_index]); |
97 return chunks_[chunk_index].get(); | 97 return chunks_[chunk_index].get(); |
98 } | 98 } |
99 return NULL; | 99 return NULL; |
100 } | 100 } |
101 | 101 |
102 scoped_ptr<TraceBuffer> CloneForIteration() const override { | |
103 scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer()); | |
104 for (size_t queue_index = queue_head_; queue_index != queue_tail_; | |
105 queue_index = NextQueueIndex(queue_index)) { | |
106 size_t chunk_index = recyclable_chunks_queue_[queue_index]; | |
107 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. | |
108 continue; | |
109 TraceBufferChunk* chunk = chunks_[chunk_index].get(); | |
110 cloned_buffer->chunks_.push_back(chunk ? chunk->Clone() : NULL); | |
111 } | |
112 return std::move(cloned_buffer); | |
113 } | |
114 | |
115 void EstimateTraceMemoryOverhead( | 102 void EstimateTraceMemoryOverhead( |
116 TraceEventMemoryOverhead* overhead) override { | 103 TraceEventMemoryOverhead* overhead) override { |
117 overhead->Add("TraceBufferRingBuffer", sizeof(*this)); | 104 overhead->Add("TraceBufferRingBuffer", sizeof(*this)); |
118 for (size_t queue_index = queue_head_; queue_index != queue_tail_; | 105 for (size_t queue_index = queue_head_; queue_index != queue_tail_; |
119 queue_index = NextQueueIndex(queue_index)) { | 106 queue_index = NextQueueIndex(queue_index)) { |
120 size_t chunk_index = recyclable_chunks_queue_[queue_index]; | 107 size_t chunk_index = recyclable_chunks_queue_[queue_index]; |
121 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. | 108 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. |
122 continue; | 109 continue; |
123 chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead); | 110 chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead); |
124 } | 111 } |
125 } | 112 } |
126 | 113 |
127 private: | 114 private: |
128 class ClonedTraceBuffer : public TraceBuffer { | |
129 public: | |
130 ClonedTraceBuffer() : current_iteration_index_(0) {} | |
131 | |
132 // The only implemented method. | |
133 const TraceBufferChunk* NextChunk() override { | |
134 return current_iteration_index_ < chunks_.size() | |
135 ? chunks_[current_iteration_index_++].get() | |
136 : NULL; | |
137 } | |
138 | |
139 scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override { | |
140 NOTIMPLEMENTED(); | |
141 return scoped_ptr<TraceBufferChunk>(); | |
142 } | |
143 void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override { | |
144 NOTIMPLEMENTED(); | |
145 } | |
146 bool IsFull() const override { return false; } | |
147 size_t Size() const override { return 0; } | |
148 size_t Capacity() const override { return 0; } | |
149 TraceEvent* GetEventByHandle(TraceEventHandle handle) override { | |
150 return NULL; | |
151 } | |
152 scoped_ptr<TraceBuffer> CloneForIteration() const override { | |
153 NOTIMPLEMENTED(); | |
154 return scoped_ptr<TraceBuffer>(); | |
155 } | |
156 void EstimateTraceMemoryOverhead( | |
157 TraceEventMemoryOverhead* overhead) override { | |
158 NOTIMPLEMENTED(); | |
159 } | |
160 | |
161 size_t current_iteration_index_; | |
162 std::vector<scoped_ptr<TraceBufferChunk>> chunks_; | |
163 }; | |
164 | |
165 bool QueueIsEmpty() const { return queue_head_ == queue_tail_; } | 115 bool QueueIsEmpty() const { return queue_head_ == queue_tail_; } |
166 | 116 |
167 size_t QueueSize() const { | 117 size_t QueueSize() const { |
168 return queue_tail_ > queue_head_ | 118 return queue_tail_ > queue_head_ |
169 ? queue_tail_ - queue_head_ | 119 ? queue_tail_ - queue_head_ |
170 : queue_tail_ + queue_capacity() - queue_head_; | 120 : queue_tail_ + queue_capacity() - queue_head_; |
171 } | 121 } |
172 | 122 |
173 bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; } | 123 bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; } |
174 | 124 |
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
250 const TraceBufferChunk* NextChunk() override { | 200 const TraceBufferChunk* NextChunk() override { |
251 while (current_iteration_index_ < chunks_.size()) { | 201 while (current_iteration_index_ < chunks_.size()) { |
252 // Skip in-flight chunks. | 202 // Skip in-flight chunks. |
253 const TraceBufferChunk* chunk = chunks_[current_iteration_index_++]; | 203 const TraceBufferChunk* chunk = chunks_[current_iteration_index_++]; |
254 if (chunk) | 204 if (chunk) |
255 return chunk; | 205 return chunk; |
256 } | 206 } |
257 return NULL; | 207 return NULL; |
258 } | 208 } |
259 | 209 |
260 scoped_ptr<TraceBuffer> CloneForIteration() const override { | |
261 NOTIMPLEMENTED(); | |
262 return scoped_ptr<TraceBuffer>(); | |
263 } | |
264 | |
265 void EstimateTraceMemoryOverhead( | 210 void EstimateTraceMemoryOverhead( |
266 TraceEventMemoryOverhead* overhead) override { | 211 TraceEventMemoryOverhead* overhead) override { |
267 const size_t chunks_ptr_vector_allocated_size = | 212 const size_t chunks_ptr_vector_allocated_size = |
268 sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type); | 213 sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type); |
269 const size_t chunks_ptr_vector_resident_size = | 214 const size_t chunks_ptr_vector_resident_size = |
270 sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type); | 215 sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type); |
271 overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size, | 216 overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size, |
272 chunks_ptr_vector_resident_size); | 217 chunks_ptr_vector_resident_size); |
273 for (size_t i = 0; i < chunks_.size(); ++i) { | 218 for (size_t i = 0; i < chunks_.size(); ++i) { |
274 TraceBufferChunk* chunk = chunks_[i]; | 219 TraceBufferChunk* chunk = chunks_[i]; |
(...skipping 26 matching lines...) Expand all Loading... |
301 seq_ = new_seq; | 246 seq_ = new_seq; |
302 cached_overhead_estimate_.reset(); | 247 cached_overhead_estimate_.reset(); |
303 } | 248 } |
304 | 249 |
305 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) { | 250 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) { |
306 DCHECK(!IsFull()); | 251 DCHECK(!IsFull()); |
307 *event_index = next_free_++; | 252 *event_index = next_free_++; |
308 return &chunk_[*event_index]; | 253 return &chunk_[*event_index]; |
309 } | 254 } |
310 | 255 |
311 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const { | |
312 scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_)); | |
313 cloned_chunk->next_free_ = next_free_; | |
314 for (size_t i = 0; i < next_free_; ++i) | |
315 cloned_chunk->chunk_[i].CopyFrom(chunk_[i]); | |
316 return cloned_chunk; | |
317 } | |
318 | |
319 void TraceBufferChunk::EstimateTraceMemoryOverhead( | 256 void TraceBufferChunk::EstimateTraceMemoryOverhead( |
320 TraceEventMemoryOverhead* overhead) { | 257 TraceEventMemoryOverhead* overhead) { |
321 if (!cached_overhead_estimate_) { | 258 if (!cached_overhead_estimate_) { |
322 cached_overhead_estimate_.reset(new TraceEventMemoryOverhead); | 259 cached_overhead_estimate_.reset(new TraceEventMemoryOverhead); |
323 | 260 |
324 // When estimating the size of TraceBufferChunk, exclude the array of trace | 261 // When estimating the size of TraceBufferChunk, exclude the array of trace |
325 // events, as they are computed individually below. | 262 // events, as they are computed individually below. |
326 cached_overhead_estimate_->Add("TraceBufferChunk", | 263 cached_overhead_estimate_->Add("TraceBufferChunk", |
327 sizeof(*this) - sizeof(chunk_)); | 264 sizeof(*this) - sizeof(chunk_)); |
328 } | 265 } |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
391 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) { | 328 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) { |
392 return new TraceBufferRingBuffer(max_chunks); | 329 return new TraceBufferRingBuffer(max_chunks); |
393 } | 330 } |
394 | 331 |
395 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) { | 332 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) { |
396 return new TraceBufferVector(max_chunks); | 333 return new TraceBufferVector(max_chunks); |
397 } | 334 } |
398 | 335 |
399 } // namespace trace_event | 336 } // namespace trace_event |
400 } // namespace base | 337 } // namespace base |
OLD | NEW |