Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(8)

Side by Side Diff: base/debug/trace_event_impl.cc

Issue 611153004: replace OVERRIDE and FINAL with override and final in base/ (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: CC_ -> BASE_ Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/debug/trace_event_argument.h ('k') | base/debug/trace_event_memory.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/trace_event_impl.h" 5 #include "base/debug/trace_event_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/base_switches.h" 9 #include "base/base_switches.h"
10 #include "base/bind.h" 10 #include "base/bind.h"
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
133 recyclable_chunks_queue_(new size_t[queue_capacity()]), 133 recyclable_chunks_queue_(new size_t[queue_capacity()]),
134 queue_head_(0), 134 queue_head_(0),
135 queue_tail_(max_chunks), 135 queue_tail_(max_chunks),
136 current_iteration_index_(0), 136 current_iteration_index_(0),
137 current_chunk_seq_(1) { 137 current_chunk_seq_(1) {
138 chunks_.reserve(max_chunks); 138 chunks_.reserve(max_chunks);
139 for (size_t i = 0; i < max_chunks; ++i) 139 for (size_t i = 0; i < max_chunks; ++i)
140 recyclable_chunks_queue_[i] = i; 140 recyclable_chunks_queue_[i] = i;
141 } 141 }
142 142
143 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE { 143 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
144 // Because the number of threads is much less than the number of chunks, 144 // Because the number of threads is much less than the number of chunks,
145 // the queue should never be empty. 145 // the queue should never be empty.
146 DCHECK(!QueueIsEmpty()); 146 DCHECK(!QueueIsEmpty());
147 147
148 *index = recyclable_chunks_queue_[queue_head_]; 148 *index = recyclable_chunks_queue_[queue_head_];
149 queue_head_ = NextQueueIndex(queue_head_); 149 queue_head_ = NextQueueIndex(queue_head_);
150 current_iteration_index_ = queue_head_; 150 current_iteration_index_ = queue_head_;
151 151
152 if (*index >= chunks_.size()) 152 if (*index >= chunks_.size())
153 chunks_.resize(*index + 1); 153 chunks_.resize(*index + 1);
154 154
155 TraceBufferChunk* chunk = chunks_[*index]; 155 TraceBufferChunk* chunk = chunks_[*index];
156 chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk. 156 chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk.
157 if (chunk) 157 if (chunk)
158 chunk->Reset(current_chunk_seq_++); 158 chunk->Reset(current_chunk_seq_++);
159 else 159 else
160 chunk = new TraceBufferChunk(current_chunk_seq_++); 160 chunk = new TraceBufferChunk(current_chunk_seq_++);
161 161
162 return scoped_ptr<TraceBufferChunk>(chunk); 162 return scoped_ptr<TraceBufferChunk>(chunk);
163 } 163 }
164 164
165 virtual void ReturnChunk(size_t index, 165 virtual void ReturnChunk(size_t index,
166 scoped_ptr<TraceBufferChunk> chunk) OVERRIDE { 166 scoped_ptr<TraceBufferChunk> chunk) override {
167 // When this method is called, the queue should not be full because it 167 // When this method is called, the queue should not be full because it
168 // can contain all chunks including the one to be returned. 168 // can contain all chunks including the one to be returned.
169 DCHECK(!QueueIsFull()); 169 DCHECK(!QueueIsFull());
170 DCHECK(chunk); 170 DCHECK(chunk);
171 DCHECK_LT(index, chunks_.size()); 171 DCHECK_LT(index, chunks_.size());
172 DCHECK(!chunks_[index]); 172 DCHECK(!chunks_[index]);
173 chunks_[index] = chunk.release(); 173 chunks_[index] = chunk.release();
174 recyclable_chunks_queue_[queue_tail_] = index; 174 recyclable_chunks_queue_[queue_tail_] = index;
175 queue_tail_ = NextQueueIndex(queue_tail_); 175 queue_tail_ = NextQueueIndex(queue_tail_);
176 } 176 }
177 177
178 virtual bool IsFull() const OVERRIDE { 178 virtual bool IsFull() const override {
179 return false; 179 return false;
180 } 180 }
181 181
182 virtual size_t Size() const OVERRIDE { 182 virtual size_t Size() const override {
183 // This is approximate because not all of the chunks are full. 183 // This is approximate because not all of the chunks are full.
184 return chunks_.size() * kTraceBufferChunkSize; 184 return chunks_.size() * kTraceBufferChunkSize;
185 } 185 }
186 186
187 virtual size_t Capacity() const OVERRIDE { 187 virtual size_t Capacity() const override {
188 return max_chunks_ * kTraceBufferChunkSize; 188 return max_chunks_ * kTraceBufferChunkSize;
189 } 189 }
190 190
191 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE { 191 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
192 if (handle.chunk_index >= chunks_.size()) 192 if (handle.chunk_index >= chunks_.size())
193 return NULL; 193 return NULL;
194 TraceBufferChunk* chunk = chunks_[handle.chunk_index]; 194 TraceBufferChunk* chunk = chunks_[handle.chunk_index];
195 if (!chunk || chunk->seq() != handle.chunk_seq) 195 if (!chunk || chunk->seq() != handle.chunk_seq)
196 return NULL; 196 return NULL;
197 return chunk->GetEventAt(handle.event_index); 197 return chunk->GetEventAt(handle.event_index);
198 } 198 }
199 199
200 virtual const TraceBufferChunk* NextChunk() OVERRIDE { 200 virtual const TraceBufferChunk* NextChunk() override {
201 if (chunks_.empty()) 201 if (chunks_.empty())
202 return NULL; 202 return NULL;
203 203
204 while (current_iteration_index_ != queue_tail_) { 204 while (current_iteration_index_ != queue_tail_) {
205 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_]; 205 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
206 current_iteration_index_ = NextQueueIndex(current_iteration_index_); 206 current_iteration_index_ = NextQueueIndex(current_iteration_index_);
207 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. 207 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
208 continue; 208 continue;
209 DCHECK(chunks_[chunk_index]); 209 DCHECK(chunks_[chunk_index]);
210 return chunks_[chunk_index]; 210 return chunks_[chunk_index];
211 } 211 }
212 return NULL; 212 return NULL;
213 } 213 }
214 214
215 virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE { 215 virtual scoped_ptr<TraceBuffer> CloneForIteration() const override {
216 scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer()); 216 scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
217 for (size_t queue_index = queue_head_; queue_index != queue_tail_; 217 for (size_t queue_index = queue_head_; queue_index != queue_tail_;
218 queue_index = NextQueueIndex(queue_index)) { 218 queue_index = NextQueueIndex(queue_index)) {
219 size_t chunk_index = recyclable_chunks_queue_[queue_index]; 219 size_t chunk_index = recyclable_chunks_queue_[queue_index];
220 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. 220 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
221 continue; 221 continue;
222 TraceBufferChunk* chunk = chunks_[chunk_index]; 222 TraceBufferChunk* chunk = chunks_[chunk_index];
223 cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL); 223 cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL);
224 } 224 }
225 return cloned_buffer.PassAs<TraceBuffer>(); 225 return cloned_buffer.PassAs<TraceBuffer>();
226 } 226 }
227 227
228 private: 228 private:
229 class ClonedTraceBuffer : public TraceBuffer { 229 class ClonedTraceBuffer : public TraceBuffer {
230 public: 230 public:
231 ClonedTraceBuffer() : current_iteration_index_(0) {} 231 ClonedTraceBuffer() : current_iteration_index_(0) {}
232 232
233 // The only implemented method. 233 // The only implemented method.
234 virtual const TraceBufferChunk* NextChunk() OVERRIDE { 234 virtual const TraceBufferChunk* NextChunk() override {
235 return current_iteration_index_ < chunks_.size() ? 235 return current_iteration_index_ < chunks_.size() ?
236 chunks_[current_iteration_index_++] : NULL; 236 chunks_[current_iteration_index_++] : NULL;
237 } 237 }
238 238
239 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE { 239 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
240 NOTIMPLEMENTED(); 240 NOTIMPLEMENTED();
241 return scoped_ptr<TraceBufferChunk>(); 241 return scoped_ptr<TraceBufferChunk>();
242 } 242 }
243 virtual void ReturnChunk(size_t index, 243 virtual void ReturnChunk(size_t index,
244 scoped_ptr<TraceBufferChunk>) OVERRIDE { 244 scoped_ptr<TraceBufferChunk>) override {
245 NOTIMPLEMENTED(); 245 NOTIMPLEMENTED();
246 } 246 }
247 virtual bool IsFull() const OVERRIDE { return false; } 247 virtual bool IsFull() const override { return false; }
248 virtual size_t Size() const OVERRIDE { return 0; } 248 virtual size_t Size() const override { return 0; }
249 virtual size_t Capacity() const OVERRIDE { return 0; } 249 virtual size_t Capacity() const override { return 0; }
250 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE { 250 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
251 return NULL; 251 return NULL;
252 } 252 }
253 virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE { 253 virtual scoped_ptr<TraceBuffer> CloneForIteration() const override {
254 NOTIMPLEMENTED(); 254 NOTIMPLEMENTED();
255 return scoped_ptr<TraceBuffer>(); 255 return scoped_ptr<TraceBuffer>();
256 } 256 }
257 257
258 size_t current_iteration_index_; 258 size_t current_iteration_index_;
259 ScopedVector<TraceBufferChunk> chunks_; 259 ScopedVector<TraceBufferChunk> chunks_;
260 }; 260 };
261 261
262 bool QueueIsEmpty() const { 262 bool QueueIsEmpty() const {
263 return queue_head_ == queue_tail_; 263 return queue_head_ == queue_tail_;
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
299 299
300 class TraceBufferVector : public TraceBuffer { 300 class TraceBufferVector : public TraceBuffer {
301 public: 301 public:
302 TraceBufferVector(size_t max_chunks) 302 TraceBufferVector(size_t max_chunks)
303 : in_flight_chunk_count_(0), 303 : in_flight_chunk_count_(0),
304 current_iteration_index_(0), 304 current_iteration_index_(0),
305 max_chunks_(max_chunks) { 305 max_chunks_(max_chunks) {
306 chunks_.reserve(max_chunks_); 306 chunks_.reserve(max_chunks_);
307 } 307 }
308 308
309 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE { 309 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
310 // This function may be called when adding normal events or indirectly from 310 // This function may be called when adding normal events or indirectly from
311 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we 311 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
312 // have to add the metadata events and flush thread-local buffers even if 312 // have to add the metadata events and flush thread-local buffers even if
313 // the buffer is full. 313 // the buffer is full.
314 *index = chunks_.size(); 314 *index = chunks_.size();
315 chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk. 315 chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
316 ++in_flight_chunk_count_; 316 ++in_flight_chunk_count_;
317 // + 1 because zero chunk_seq is not allowed. 317 // + 1 because zero chunk_seq is not allowed.
318 return scoped_ptr<TraceBufferChunk>( 318 return scoped_ptr<TraceBufferChunk>(
319 new TraceBufferChunk(static_cast<uint32>(*index) + 1)); 319 new TraceBufferChunk(static_cast<uint32>(*index) + 1));
320 } 320 }
321 321
322 virtual void ReturnChunk(size_t index, 322 virtual void ReturnChunk(size_t index,
323 scoped_ptr<TraceBufferChunk> chunk) OVERRIDE { 323 scoped_ptr<TraceBufferChunk> chunk) override {
324 DCHECK_GT(in_flight_chunk_count_, 0u); 324 DCHECK_GT(in_flight_chunk_count_, 0u);
325 DCHECK_LT(index, chunks_.size()); 325 DCHECK_LT(index, chunks_.size());
326 DCHECK(!chunks_[index]); 326 DCHECK(!chunks_[index]);
327 --in_flight_chunk_count_; 327 --in_flight_chunk_count_;
328 chunks_[index] = chunk.release(); 328 chunks_[index] = chunk.release();
329 } 329 }
330 330
331 virtual bool IsFull() const OVERRIDE { 331 virtual bool IsFull() const override {
332 return chunks_.size() >= max_chunks_; 332 return chunks_.size() >= max_chunks_;
333 } 333 }
334 334
335 virtual size_t Size() const OVERRIDE { 335 virtual size_t Size() const override {
336 // This is approximate because not all of the chunks are full. 336 // This is approximate because not all of the chunks are full.
337 return chunks_.size() * kTraceBufferChunkSize; 337 return chunks_.size() * kTraceBufferChunkSize;
338 } 338 }
339 339
340 virtual size_t Capacity() const OVERRIDE { 340 virtual size_t Capacity() const override {
341 return max_chunks_ * kTraceBufferChunkSize; 341 return max_chunks_ * kTraceBufferChunkSize;
342 } 342 }
343 343
344 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE { 344 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
345 if (handle.chunk_index >= chunks_.size()) 345 if (handle.chunk_index >= chunks_.size())
346 return NULL; 346 return NULL;
347 TraceBufferChunk* chunk = chunks_[handle.chunk_index]; 347 TraceBufferChunk* chunk = chunks_[handle.chunk_index];
348 if (!chunk || chunk->seq() != handle.chunk_seq) 348 if (!chunk || chunk->seq() != handle.chunk_seq)
349 return NULL; 349 return NULL;
350 return chunk->GetEventAt(handle.event_index); 350 return chunk->GetEventAt(handle.event_index);
351 } 351 }
352 352
353 virtual const TraceBufferChunk* NextChunk() OVERRIDE { 353 virtual const TraceBufferChunk* NextChunk() override {
354 while (current_iteration_index_ < chunks_.size()) { 354 while (current_iteration_index_ < chunks_.size()) {
355 // Skip in-flight chunks. 355 // Skip in-flight chunks.
356 const TraceBufferChunk* chunk = chunks_[current_iteration_index_++]; 356 const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
357 if (chunk) 357 if (chunk)
358 return chunk; 358 return chunk;
359 } 359 }
360 return NULL; 360 return NULL;
361 } 361 }
362 362
363 virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE { 363 virtual scoped_ptr<TraceBuffer> CloneForIteration() const override {
364 NOTIMPLEMENTED(); 364 NOTIMPLEMENTED();
365 return scoped_ptr<TraceBuffer>(); 365 return scoped_ptr<TraceBuffer>();
366 } 366 }
367 367
368 private: 368 private:
369 size_t in_flight_chunk_count_; 369 size_t in_flight_chunk_count_;
370 size_t current_iteration_index_; 370 size_t current_iteration_index_;
371 size_t max_chunks_; 371 size_t max_chunks_;
372 ScopedVector<TraceBufferChunk> chunks_; 372 ScopedVector<TraceBufferChunk> chunks_;
373 373
(...skipping 488 matching lines...) Expand 10 before | Expand all | Expand 10 after
862 TraceSampleCallback callback; 862 TraceSampleCallback callback;
863 }; 863 };
864 864
865 // This object must be created on the IO thread. 865 // This object must be created on the IO thread.
866 class TraceSamplingThread : public PlatformThread::Delegate { 866 class TraceSamplingThread : public PlatformThread::Delegate {
867 public: 867 public:
868 TraceSamplingThread(); 868 TraceSamplingThread();
869 virtual ~TraceSamplingThread(); 869 virtual ~TraceSamplingThread();
870 870
871 // Implementation of PlatformThread::Delegate: 871 // Implementation of PlatformThread::Delegate:
872 virtual void ThreadMain() OVERRIDE; 872 virtual void ThreadMain() override;
873 873
874 static void DefaultSamplingCallback(TraceBucketData* bucekt_data); 874 static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
875 875
876 void Stop(); 876 void Stop();
877 void WaitSamplingEventForTesting(); 877 void WaitSamplingEventForTesting();
878 878
879 private: 879 private:
880 friend class TraceLog; 880 friend class TraceLog;
881 881
882 void GetSamples(); 882 void GetSamples();
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after
1059 handle.chunk_index != chunk_index_) 1059 handle.chunk_index != chunk_index_)
1060 return NULL; 1060 return NULL;
1061 1061
1062 return chunk_->GetEventAt(handle.event_index); 1062 return chunk_->GetEventAt(handle.event_index);
1063 } 1063 }
1064 1064
1065 int generation() const { return generation_; } 1065 int generation() const { return generation_; }
1066 1066
1067 private: 1067 private:
1068 // MessageLoop::DestructionObserver 1068 // MessageLoop::DestructionObserver
1069 virtual void WillDestroyCurrentMessageLoop() OVERRIDE; 1069 virtual void WillDestroyCurrentMessageLoop() override;
1070 1070
1071 void FlushWhileLocked(); 1071 void FlushWhileLocked();
1072 1072
1073 void CheckThisIsCurrentBuffer() const { 1073 void CheckThisIsCurrentBuffer() const {
1074 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this); 1074 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
1075 } 1075 }
1076 1076
1077 // Since TraceLog is a leaky singleton, trace_log_ will always be valid 1077 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
1078 // as long as the thread exists. 1078 // as long as the thread exists.
1079 TraceLog* trace_log_; 1079 TraceLog* trace_log_;
(...skipping 1491 matching lines...) Expand 10 before | Expand all | Expand 10 after
2571 } 2571 }
2572 2572
2573 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { 2573 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2574 if (*category_group_enabled_) { 2574 if (*category_group_enabled_) {
2575 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, 2575 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
2576 name_, event_handle_); 2576 name_, event_handle_);
2577 } 2577 }
2578 } 2578 }
2579 2579
2580 } // namespace trace_event_internal 2580 } // namespace trace_event_internal
OLDNEW
« no previous file with comments | « base/debug/trace_event_argument.h ('k') | base/debug/trace_event_memory.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698