Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(146)

Side by Side Diff: base/debug/trace_event_impl.cc

Issue 614103004: replace 'virtual ... OVERRIDE' with '... override' (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: process base/ Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/trace_event_impl.h" 5 #include "base/debug/trace_event_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/base_switches.h" 9 #include "base/base_switches.h"
10 #include "base/bind.h" 10 #include "base/bind.h"
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
133 recyclable_chunks_queue_(new size_t[queue_capacity()]), 133 recyclable_chunks_queue_(new size_t[queue_capacity()]),
134 queue_head_(0), 134 queue_head_(0),
135 queue_tail_(max_chunks), 135 queue_tail_(max_chunks),
136 current_iteration_index_(0), 136 current_iteration_index_(0),
137 current_chunk_seq_(1) { 137 current_chunk_seq_(1) {
138 chunks_.reserve(max_chunks); 138 chunks_.reserve(max_chunks);
139 for (size_t i = 0; i < max_chunks; ++i) 139 for (size_t i = 0; i < max_chunks; ++i)
140 recyclable_chunks_queue_[i] = i; 140 recyclable_chunks_queue_[i] = i;
141 } 141 }
142 142
143 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE { 143 scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
144 // Because the number of threads is much less than the number of chunks, 144 // Because the number of threads is much less than the number of chunks,
145 // the queue should never be empty. 145 // the queue should never be empty.
146 DCHECK(!QueueIsEmpty()); 146 DCHECK(!QueueIsEmpty());
147 147
148 *index = recyclable_chunks_queue_[queue_head_]; 148 *index = recyclable_chunks_queue_[queue_head_];
149 queue_head_ = NextQueueIndex(queue_head_); 149 queue_head_ = NextQueueIndex(queue_head_);
150 current_iteration_index_ = queue_head_; 150 current_iteration_index_ = queue_head_;
151 151
152 if (*index >= chunks_.size()) 152 if (*index >= chunks_.size())
153 chunks_.resize(*index + 1); 153 chunks_.resize(*index + 1);
154 154
155 TraceBufferChunk* chunk = chunks_[*index]; 155 TraceBufferChunk* chunk = chunks_[*index];
156 chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk. 156 chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk.
157 if (chunk) 157 if (chunk)
158 chunk->Reset(current_chunk_seq_++); 158 chunk->Reset(current_chunk_seq_++);
159 else 159 else
160 chunk = new TraceBufferChunk(current_chunk_seq_++); 160 chunk = new TraceBufferChunk(current_chunk_seq_++);
161 161
162 return scoped_ptr<TraceBufferChunk>(chunk); 162 return scoped_ptr<TraceBufferChunk>(chunk);
163 } 163 }
164 164
165 virtual void ReturnChunk(size_t index, 165 void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
166 scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
167 // When this method is called, the queue should not be full because it 166 // When this method is called, the queue should not be full because it
168 // can contain all chunks including the one to be returned. 167 // can contain all chunks including the one to be returned.
169 DCHECK(!QueueIsFull()); 168 DCHECK(!QueueIsFull());
170 DCHECK(chunk); 169 DCHECK(chunk);
171 DCHECK_LT(index, chunks_.size()); 170 DCHECK_LT(index, chunks_.size());
172 DCHECK(!chunks_[index]); 171 DCHECK(!chunks_[index]);
173 chunks_[index] = chunk.release(); 172 chunks_[index] = chunk.release();
174 recyclable_chunks_queue_[queue_tail_] = index; 173 recyclable_chunks_queue_[queue_tail_] = index;
175 queue_tail_ = NextQueueIndex(queue_tail_); 174 queue_tail_ = NextQueueIndex(queue_tail_);
176 } 175 }
177 176
178 virtual bool IsFull() const OVERRIDE { 177 bool IsFull() const override { return false; }
179 return false;
180 }
181 178
182 virtual size_t Size() const OVERRIDE { 179 size_t Size() const override {
183 // This is approximate because not all of the chunks are full. 180 // This is approximate because not all of the chunks are full.
184 return chunks_.size() * kTraceBufferChunkSize; 181 return chunks_.size() * kTraceBufferChunkSize;
185 } 182 }
186 183
187 virtual size_t Capacity() const OVERRIDE { 184 size_t Capacity() const override {
188 return max_chunks_ * kTraceBufferChunkSize; 185 return max_chunks_ * kTraceBufferChunkSize;
189 } 186 }
190 187
191 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE { 188 TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
192 if (handle.chunk_index >= chunks_.size()) 189 if (handle.chunk_index >= chunks_.size())
193 return NULL; 190 return NULL;
194 TraceBufferChunk* chunk = chunks_[handle.chunk_index]; 191 TraceBufferChunk* chunk = chunks_[handle.chunk_index];
195 if (!chunk || chunk->seq() != handle.chunk_seq) 192 if (!chunk || chunk->seq() != handle.chunk_seq)
196 return NULL; 193 return NULL;
197 return chunk->GetEventAt(handle.event_index); 194 return chunk->GetEventAt(handle.event_index);
198 } 195 }
199 196
200 virtual const TraceBufferChunk* NextChunk() OVERRIDE { 197 const TraceBufferChunk* NextChunk() override {
201 if (chunks_.empty()) 198 if (chunks_.empty())
202 return NULL; 199 return NULL;
203 200
204 while (current_iteration_index_ != queue_tail_) { 201 while (current_iteration_index_ != queue_tail_) {
205 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_]; 202 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
206 current_iteration_index_ = NextQueueIndex(current_iteration_index_); 203 current_iteration_index_ = NextQueueIndex(current_iteration_index_);
207 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. 204 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
208 continue; 205 continue;
209 DCHECK(chunks_[chunk_index]); 206 DCHECK(chunks_[chunk_index]);
210 return chunks_[chunk_index]; 207 return chunks_[chunk_index];
211 } 208 }
212 return NULL; 209 return NULL;
213 } 210 }
214 211
215 virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE { 212 scoped_ptr<TraceBuffer> CloneForIteration() const override {
216 scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer()); 213 scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
217 for (size_t queue_index = queue_head_; queue_index != queue_tail_; 214 for (size_t queue_index = queue_head_; queue_index != queue_tail_;
218 queue_index = NextQueueIndex(queue_index)) { 215 queue_index = NextQueueIndex(queue_index)) {
219 size_t chunk_index = recyclable_chunks_queue_[queue_index]; 216 size_t chunk_index = recyclable_chunks_queue_[queue_index];
220 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. 217 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
221 continue; 218 continue;
222 TraceBufferChunk* chunk = chunks_[chunk_index]; 219 TraceBufferChunk* chunk = chunks_[chunk_index];
223 cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL); 220 cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
224 } 221 }
225 return cloned_buffer.PassAs<TraceBuffer>(); 222 return cloned_buffer.PassAs<TraceBuffer>();
226 } 223 }
227 224
228 private: 225 private:
229 class ClonedTraceBuffer : public TraceBuffer { 226 class ClonedTraceBuffer : public TraceBuffer {
230 public: 227 public:
231 ClonedTraceBuffer() : current_iteration_index_(0) {} 228 ClonedTraceBuffer() : current_iteration_index_(0) {}
232 229
233 // The only implemented method. 230 // The only implemented method.
234 virtual const TraceBufferChunk* NextChunk() OVERRIDE { 231 const TraceBufferChunk* NextChunk() override {
235 return current_iteration_index_ < chunks_.size() ? 232 return current_iteration_index_ < chunks_.size() ?
236 chunks_[current_iteration_index_++] : NULL; 233 chunks_[current_iteration_index_++] : NULL;
237 } 234 }
238 235
239 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE { 236 scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
240 NOTIMPLEMENTED(); 237 NOTIMPLEMENTED();
241 return scoped_ptr<TraceBufferChunk>(); 238 return scoped_ptr<TraceBufferChunk>();
242 } 239 }
243 virtual void ReturnChunk(size_t index, 240 void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override {
244 scoped_ptr<TraceBufferChunk>) OVERRIDE {
245 NOTIMPLEMENTED(); 241 NOTIMPLEMENTED();
246 } 242 }
247 virtual bool IsFull() const OVERRIDE { return false; } 243 bool IsFull() const override { return false; }
248 virtual size_t Size() const OVERRIDE { return 0; } 244 size_t Size() const override { return 0; }
249 virtual size_t Capacity() const OVERRIDE { return 0; } 245 size_t Capacity() const override { return 0; }
250 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE { 246 TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
251 return NULL; 247 return NULL;
252 } 248 }
253 virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE { 249 scoped_ptr<TraceBuffer> CloneForIteration() const override {
254 NOTIMPLEMENTED(); 250 NOTIMPLEMENTED();
255 return scoped_ptr<TraceBuffer>(); 251 return scoped_ptr<TraceBuffer>();
256 } 252 }
257 253
258 size_t current_iteration_index_; 254 size_t current_iteration_index_;
259 ScopedVector<TraceBufferChunk> chunks_; 255 ScopedVector<TraceBufferChunk> chunks_;
260 }; 256 };
261 257
262 bool QueueIsEmpty() const { 258 bool QueueIsEmpty() const {
263 return queue_head_ == queue_tail_; 259 return queue_head_ == queue_tail_;
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
299 295
300 class TraceBufferVector : public TraceBuffer { 296 class TraceBufferVector : public TraceBuffer {
301 public: 297 public:
302 TraceBufferVector(size_t max_chunks) 298 TraceBufferVector(size_t max_chunks)
303 : in_flight_chunk_count_(0), 299 : in_flight_chunk_count_(0),
304 current_iteration_index_(0), 300 current_iteration_index_(0),
305 max_chunks_(max_chunks) { 301 max_chunks_(max_chunks) {
306 chunks_.reserve(max_chunks_); 302 chunks_.reserve(max_chunks_);
307 } 303 }
308 304
309 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE { 305 scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
310 // This function may be called when adding normal events or indirectly from 306 // This function may be called when adding normal events or indirectly from
311 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we 307 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
312 // have to add the metadata events and flush thread-local buffers even if 308 // have to add the metadata events and flush thread-local buffers even if
313 // the buffer is full. 309 // the buffer is full.
314 *index = chunks_.size(); 310 *index = chunks_.size();
315 chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk. 311 chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
316 ++in_flight_chunk_count_; 312 ++in_flight_chunk_count_;
317 // + 1 because zero chunk_seq is not allowed. 313 // + 1 because zero chunk_seq is not allowed.
318 return scoped_ptr<TraceBufferChunk>( 314 return scoped_ptr<TraceBufferChunk>(
319 new TraceBufferChunk(static_cast<uint32>(*index) + 1)); 315 new TraceBufferChunk(static_cast<uint32>(*index) + 1));
320 } 316 }
321 317
322 virtual void ReturnChunk(size_t index, 318 void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
323 scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
324 DCHECK_GT(in_flight_chunk_count_, 0u); 319 DCHECK_GT(in_flight_chunk_count_, 0u);
325 DCHECK_LT(index, chunks_.size()); 320 DCHECK_LT(index, chunks_.size());
326 DCHECK(!chunks_[index]); 321 DCHECK(!chunks_[index]);
327 --in_flight_chunk_count_; 322 --in_flight_chunk_count_;
328 chunks_[index] = chunk.release(); 323 chunks_[index] = chunk.release();
329 } 324 }
330 325
331 virtual bool IsFull() const OVERRIDE { 326 bool IsFull() const override { return chunks_.size() >= max_chunks_; }
332 return chunks_.size() >= max_chunks_;
333 }
334 327
335 virtual size_t Size() const OVERRIDE { 328 size_t Size() const override {
336 // This is approximate because not all of the chunks are full. 329 // This is approximate because not all of the chunks are full.
337 return chunks_.size() * kTraceBufferChunkSize; 330 return chunks_.size() * kTraceBufferChunkSize;
338 } 331 }
339 332
340 virtual size_t Capacity() const OVERRIDE { 333 size_t Capacity() const override {
341 return max_chunks_ * kTraceBufferChunkSize; 334 return max_chunks_ * kTraceBufferChunkSize;
342 } 335 }
343 336
344 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE { 337 TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
345 if (handle.chunk_index >= chunks_.size()) 338 if (handle.chunk_index >= chunks_.size())
346 return NULL; 339 return NULL;
347 TraceBufferChunk* chunk = chunks_[handle.chunk_index]; 340 TraceBufferChunk* chunk = chunks_[handle.chunk_index];
348 if (!chunk || chunk->seq() != handle.chunk_seq) 341 if (!chunk || chunk->seq() != handle.chunk_seq)
349 return NULL; 342 return NULL;
350 return chunk->GetEventAt(handle.event_index); 343 return chunk->GetEventAt(handle.event_index);
351 } 344 }
352 345
353 virtual const TraceBufferChunk* NextChunk() OVERRIDE { 346 const TraceBufferChunk* NextChunk() override {
354 while (current_iteration_index_ < chunks_.size()) { 347 while (current_iteration_index_ < chunks_.size()) {
355 // Skip in-flight chunks. 348 // Skip in-flight chunks.
356 const TraceBufferChunk* chunk = chunks_[current_iteration_index_++]; 349 const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
357 if (chunk) 350 if (chunk)
358 return chunk; 351 return chunk;
359 } 352 }
360 return NULL; 353 return NULL;
361 } 354 }
362 355
363 virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE { 356 scoped_ptr<TraceBuffer> CloneForIteration() const override {
364 NOTIMPLEMENTED(); 357 NOTIMPLEMENTED();
365 return scoped_ptr<TraceBuffer>(); 358 return scoped_ptr<TraceBuffer>();
366 } 359 }
367 360
368 private: 361 private:
369 size_t in_flight_chunk_count_; 362 size_t in_flight_chunk_count_;
370 size_t current_iteration_index_; 363 size_t current_iteration_index_;
371 size_t max_chunks_; 364 size_t max_chunks_;
372 ScopedVector<TraceBufferChunk> chunks_; 365 ScopedVector<TraceBufferChunk> chunks_;
373 366
(...skipping 488 matching lines...) Expand 10 before | Expand all | Expand 10 after
862 TraceSampleCallback callback; 855 TraceSampleCallback callback;
863 }; 856 };
864 857
865 // This object must be created on the IO thread. 858 // This object must be created on the IO thread.
866 class TraceSamplingThread : public PlatformThread::Delegate { 859 class TraceSamplingThread : public PlatformThread::Delegate {
867 public: 860 public:
868 TraceSamplingThread(); 861 TraceSamplingThread();
869 virtual ~TraceSamplingThread(); 862 virtual ~TraceSamplingThread();
870 863
871 // Implementation of PlatformThread::Delegate: 864 // Implementation of PlatformThread::Delegate:
872 virtual void ThreadMain() OVERRIDE; 865 void ThreadMain() override;
873 866
874 static void DefaultSamplingCallback(TraceBucketData* bucekt_data); 867 static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
875 868
876 void Stop(); 869 void Stop();
877 void WaitSamplingEventForTesting(); 870 void WaitSamplingEventForTesting();
878 871
879 private: 872 private:
880 friend class TraceLog; 873 friend class TraceLog;
881 874
882 void GetSamples(); 875 void GetSamples();
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after
1059 handle.chunk_index != chunk_index_) 1052 handle.chunk_index != chunk_index_)
1060 return NULL; 1053 return NULL;
1061 1054
1062 return chunk_->GetEventAt(handle.event_index); 1055 return chunk_->GetEventAt(handle.event_index);
1063 } 1056 }
1064 1057
1065 int generation() const { return generation_; } 1058 int generation() const { return generation_; }
1066 1059
1067 private: 1060 private:
1068 // MessageLoop::DestructionObserver 1061 // MessageLoop::DestructionObserver
1069 virtual void WillDestroyCurrentMessageLoop() OVERRIDE; 1062 void WillDestroyCurrentMessageLoop() override;
1070 1063
1071 void FlushWhileLocked(); 1064 void FlushWhileLocked();
1072 1065
1073 void CheckThisIsCurrentBuffer() const { 1066 void CheckThisIsCurrentBuffer() const {
1074 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this); 1067 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
1075 } 1068 }
1076 1069
1077 // Since TraceLog is a leaky singleton, trace_log_ will always be valid 1070 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
1078 // as long as the thread exists. 1071 // as long as the thread exists.
1079 TraceLog* trace_log_; 1072 TraceLog* trace_log_;
(...skipping 1491 matching lines...) Expand 10 before | Expand all | Expand 10 after
2571 } 2564 }
2572 2565
2573 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { 2566 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2574 if (*category_group_enabled_) { 2567 if (*category_group_enabled_) {
2575 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, 2568 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
2576 name_, event_handle_); 2569 name_, event_handle_);
2577 } 2570 }
2578 } 2571 }
2579 2572
2580 } // namespace trace_event_internal 2573 } // namespace trace_event_internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698