Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(60)

Side by Side Diff: base/trace_event/trace_buffer.cc

Issue 2857543002: tracing: Simplify TraceEventMemoryOverhead, use an enum insted of a map (Closed)
Patch Set: Fix compiler issues + omit empty values Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/trace_buffer.h" 5 #include "base/trace_event/trace_buffer.h"
6 6
7 #include <memory> 7 #include <memory>
8 #include <utility> 8 #include <utility>
9 #include <vector> 9 #include <vector>
10 10
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
98 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. 98 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
99 continue; 99 continue;
100 DCHECK(chunks_[chunk_index]); 100 DCHECK(chunks_[chunk_index]);
101 return chunks_[chunk_index].get(); 101 return chunks_[chunk_index].get();
102 } 102 }
103 return NULL; 103 return NULL;
104 } 104 }
105 105
106 void EstimateTraceMemoryOverhead( 106 void EstimateTraceMemoryOverhead(
107 TraceEventMemoryOverhead* overhead) override { 107 TraceEventMemoryOverhead* overhead) override {
108 overhead->Add("TraceBufferRingBuffer", sizeof(*this)); 108 overhead->Add(TraceEventMemoryOverhead::kTraceBuffer, sizeof(*this));
109 for (size_t queue_index = queue_head_; queue_index != queue_tail_; 109 for (size_t queue_index = queue_head_; queue_index != queue_tail_;
110 queue_index = NextQueueIndex(queue_index)) { 110 queue_index = NextQueueIndex(queue_index)) {
111 size_t chunk_index = recyclable_chunks_queue_[queue_index]; 111 size_t chunk_index = recyclable_chunks_queue_[queue_index];
112 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. 112 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
113 continue; 113 continue;
114 chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead); 114 chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
115 } 115 }
116 } 116 }
117 117
118 private: 118 private:
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
214 } 214 }
215 return NULL; 215 return NULL;
216 } 216 }
217 217
218 void EstimateTraceMemoryOverhead( 218 void EstimateTraceMemoryOverhead(
219 TraceEventMemoryOverhead* overhead) override { 219 TraceEventMemoryOverhead* overhead) override {
220 const size_t chunks_ptr_vector_allocated_size = 220 const size_t chunks_ptr_vector_allocated_size =
221 sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type); 221 sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
222 const size_t chunks_ptr_vector_resident_size = 222 const size_t chunks_ptr_vector_resident_size =
223 sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type); 223 sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
224 overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size, 224 overhead->Add(TraceEventMemoryOverhead::kTraceBuffer,
225 chunks_ptr_vector_allocated_size,
225 chunks_ptr_vector_resident_size); 226 chunks_ptr_vector_resident_size);
226 for (size_t i = 0; i < chunks_.size(); ++i) { 227 for (size_t i = 0; i < chunks_.size(); ++i) {
227 TraceBufferChunk* chunk = chunks_[i].get(); 228 TraceBufferChunk* chunk = chunks_[i].get();
228 // Skip the in-flight (nullptr) chunks. They will be accounted by the 229 // Skip the in-flight (nullptr) chunks. They will be accounted by the
229 // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump. 230 // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
230 if (chunk) 231 if (chunk)
231 chunk->EstimateTraceMemoryOverhead(overhead); 232 chunk->EstimateTraceMemoryOverhead(overhead);
232 } 233 }
233 } 234 }
234 235
(...skipping 26 matching lines...) Expand all
261 return &chunk_[*event_index]; 262 return &chunk_[*event_index];
262 } 263 }
263 264
264 void TraceBufferChunk::EstimateTraceMemoryOverhead( 265 void TraceBufferChunk::EstimateTraceMemoryOverhead(
265 TraceEventMemoryOverhead* overhead) { 266 TraceEventMemoryOverhead* overhead) {
266 if (!cached_overhead_estimate_) { 267 if (!cached_overhead_estimate_) {
267 cached_overhead_estimate_.reset(new TraceEventMemoryOverhead); 268 cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
268 269
269 // When estimating the size of TraceBufferChunk, exclude the array of trace 270 // When estimating the size of TraceBufferChunk, exclude the array of trace
270 // events, as they are computed individually below. 271 // events, as they are computed individually below.
271 cached_overhead_estimate_->Add("TraceBufferChunk", 272 cached_overhead_estimate_->Add(TraceEventMemoryOverhead::kTraceBufferChunk,
272 sizeof(*this) - sizeof(chunk_)); 273 sizeof(*this) - sizeof(chunk_));
273 } 274 }
274 275
275 const size_t num_cached_estimated_events = 276 const size_t num_cached_estimated_events =
276 cached_overhead_estimate_->GetCount("TraceEvent"); 277 cached_overhead_estimate_->GetCount(
278 TraceEventMemoryOverhead::kTraceEvent);
277 DCHECK_LE(num_cached_estimated_events, size()); 279 DCHECK_LE(num_cached_estimated_events, size());
278 280
279 if (IsFull() && num_cached_estimated_events == size()) { 281 if (IsFull() && num_cached_estimated_events == size()) {
280 overhead->Update(*cached_overhead_estimate_); 282 overhead->Update(*cached_overhead_estimate_);
281 return; 283 return;
282 } 284 }
283 285
284 for (size_t i = num_cached_estimated_events; i < size(); ++i) 286 for (size_t i = num_cached_estimated_events; i < size(); ++i)
285 chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get()); 287 chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
286 288
287 if (IsFull()) { 289 if (IsFull()) {
288 cached_overhead_estimate_->AddSelf(); 290 cached_overhead_estimate_->AddSelf();
289 } else { 291 } else {
290 // The unused TraceEvents in |chunks_| are not cached. They will keep 292 // The unused TraceEvents in |chunks_| are not cached. They will keep
291 // changing as new TraceEvents are added to this chunk, so they are 293 // changing as new TraceEvents are added to this chunk, so they are
292 // computed on the fly. 294 // computed on the fly.
293 const size_t num_unused_trace_events = capacity() - size(); 295 const size_t num_unused_trace_events = capacity() - size();
294 overhead->Add("TraceEvent (unused)", 296 overhead->Add(TraceEventMemoryOverhead::kUnusedTraceEvent,
295 num_unused_trace_events * sizeof(TraceEvent)); 297 num_unused_trace_events * sizeof(TraceEvent));
296 } 298 }
297 299
298 overhead->Update(*cached_overhead_estimate_); 300 overhead->Update(*cached_overhead_estimate_);
299 } 301 }
300 302
301 TraceResultBuffer::OutputCallback 303 TraceResultBuffer::OutputCallback
302 TraceResultBuffer::SimpleOutput::GetCallback() { 304 TraceResultBuffer::SimpleOutput::GetCallback() {
303 return Bind(&SimpleOutput::Append, Unretained(this)); 305 return Bind(&SimpleOutput::Append, Unretained(this));
304 } 306 }
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
336 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) { 338 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
337 return new TraceBufferRingBuffer(max_chunks); 339 return new TraceBufferRingBuffer(max_chunks);
338 } 340 }
339 341
340 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) { 342 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
341 return new TraceBufferVector(max_chunks); 343 return new TraceBufferVector(max_chunks);
342 } 344 }
343 345
344 } // namespace trace_event 346 } // namespace trace_event
345 } // namespace base 347 } // namespace base
OLDNEW
« no previous file with comments | « base/trace_event/heap_profiler_type_name_deduplicator.cc ('k') | base/trace_event/trace_event_argument.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698