| Index: base/debug/trace_event_impl.cc
|
| diff --git a/base/debug/trace_event_impl.cc b/base/debug/trace_event_impl.cc
|
| index db321e2bfa42c893e34acce70c37d802459c0e6f..28c0c0d962fdbdd2d5de5f4650bca61880ff7ee2 100644
|
| --- a/base/debug/trace_event_impl.cc
|
| +++ b/base/debug/trace_event_impl.cc
|
| @@ -140,7 +140,7 @@ class TraceBufferRingBuffer : public TraceBuffer {
|
| recyclable_chunks_queue_[i] = i;
|
| }
|
|
|
| - virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
|
| + scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
|
| // Because the number of threads is much less than the number of chunks,
|
| // the queue should never be empty.
|
| DCHECK(!QueueIsEmpty());
|
| @@ -162,8 +162,7 @@ class TraceBufferRingBuffer : public TraceBuffer {
|
| return scoped_ptr<TraceBufferChunk>(chunk);
|
| }
|
|
|
| - virtual void ReturnChunk(size_t index,
|
| - scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
|
| + void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
|
| // When this method is called, the queue should not be full because it
|
| // can contain all chunks including the one to be returned.
|
| DCHECK(!QueueIsFull());
|
| @@ -175,20 +174,18 @@ class TraceBufferRingBuffer : public TraceBuffer {
|
| queue_tail_ = NextQueueIndex(queue_tail_);
|
| }
|
|
|
| - virtual bool IsFull() const OVERRIDE {
|
| - return false;
|
| - }
|
| + bool IsFull() const override { return false; }
|
|
|
| - virtual size_t Size() const OVERRIDE {
|
| + size_t Size() const override {
|
| // This is approximate because not all of the chunks are full.
|
| return chunks_.size() * kTraceBufferChunkSize;
|
| }
|
|
|
| - virtual size_t Capacity() const OVERRIDE {
|
| + size_t Capacity() const override {
|
| return max_chunks_ * kTraceBufferChunkSize;
|
| }
|
|
|
| - virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
|
| + TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
|
| if (handle.chunk_index >= chunks_.size())
|
| return NULL;
|
| TraceBufferChunk* chunk = chunks_[handle.chunk_index];
|
| @@ -197,7 +194,7 @@ class TraceBufferRingBuffer : public TraceBuffer {
|
| return chunk->GetEventAt(handle.event_index);
|
| }
|
|
|
| - virtual const TraceBufferChunk* NextChunk() OVERRIDE {
|
| + const TraceBufferChunk* NextChunk() override {
|
| if (chunks_.empty())
|
| return NULL;
|
|
|
| @@ -212,7 +209,7 @@ class TraceBufferRingBuffer : public TraceBuffer {
|
| return NULL;
|
| }
|
|
|
| - virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
|
| + scoped_ptr<TraceBuffer> CloneForIteration() const override {
|
| scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
|
| for (size_t queue_index = queue_head_; queue_index != queue_tail_;
|
| queue_index = NextQueueIndex(queue_index)) {
|
| @@ -231,26 +228,25 @@ class TraceBufferRingBuffer : public TraceBuffer {
|
| ClonedTraceBuffer() : current_iteration_index_(0) {}
|
|
|
| // The only implemented method.
|
| - virtual const TraceBufferChunk* NextChunk() OVERRIDE {
|
| + const TraceBufferChunk* NextChunk() override {
|
| return current_iteration_index_ < chunks_.size() ?
|
| chunks_[current_iteration_index_++] : NULL;
|
| }
|
|
|
| - virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
|
| + scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
|
| NOTIMPLEMENTED();
|
| return scoped_ptr<TraceBufferChunk>();
|
| }
|
| - virtual void ReturnChunk(size_t index,
|
| - scoped_ptr<TraceBufferChunk>) OVERRIDE {
|
| + void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override {
|
| NOTIMPLEMENTED();
|
| }
|
| - virtual bool IsFull() const OVERRIDE { return false; }
|
| - virtual size_t Size() const OVERRIDE { return 0; }
|
| - virtual size_t Capacity() const OVERRIDE { return 0; }
|
| - virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
|
| + bool IsFull() const override { return false; }
|
| + size_t Size() const override { return 0; }
|
| + size_t Capacity() const override { return 0; }
|
| + TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
|
| return NULL;
|
| }
|
| - virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
|
| + scoped_ptr<TraceBuffer> CloneForIteration() const override {
|
| NOTIMPLEMENTED();
|
| return scoped_ptr<TraceBuffer>();
|
| }
|
| @@ -306,7 +302,7 @@ class TraceBufferVector : public TraceBuffer {
|
| chunks_.reserve(max_chunks_);
|
| }
|
|
|
| - virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) OVERRIDE {
|
| + scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
|
| // This function may be called when adding normal events or indirectly from
|
| // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
|
| // have to add the metadata events and flush thread-local buffers even if
|
| @@ -319,8 +315,7 @@ class TraceBufferVector : public TraceBuffer {
|
| new TraceBufferChunk(static_cast<uint32>(*index) + 1));
|
| }
|
|
|
| - virtual void ReturnChunk(size_t index,
|
| - scoped_ptr<TraceBufferChunk> chunk) OVERRIDE {
|
| + void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override {
|
| DCHECK_GT(in_flight_chunk_count_, 0u);
|
| DCHECK_LT(index, chunks_.size());
|
| DCHECK(!chunks_[index]);
|
| @@ -328,20 +323,18 @@ class TraceBufferVector : public TraceBuffer {
|
| chunks_[index] = chunk.release();
|
| }
|
|
|
| - virtual bool IsFull() const OVERRIDE {
|
| - return chunks_.size() >= max_chunks_;
|
| - }
|
| + bool IsFull() const override { return chunks_.size() >= max_chunks_; }
|
|
|
| - virtual size_t Size() const OVERRIDE {
|
| + size_t Size() const override {
|
| // This is approximate because not all of the chunks are full.
|
| return chunks_.size() * kTraceBufferChunkSize;
|
| }
|
|
|
| - virtual size_t Capacity() const OVERRIDE {
|
| + size_t Capacity() const override {
|
| return max_chunks_ * kTraceBufferChunkSize;
|
| }
|
|
|
| - virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) OVERRIDE {
|
| + TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
|
| if (handle.chunk_index >= chunks_.size())
|
| return NULL;
|
| TraceBufferChunk* chunk = chunks_[handle.chunk_index];
|
| @@ -350,7 +343,7 @@ class TraceBufferVector : public TraceBuffer {
|
| return chunk->GetEventAt(handle.event_index);
|
| }
|
|
|
| - virtual const TraceBufferChunk* NextChunk() OVERRIDE {
|
| + const TraceBufferChunk* NextChunk() override {
|
| while (current_iteration_index_ < chunks_.size()) {
|
| // Skip in-flight chunks.
|
| const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
|
| @@ -360,7 +353,7 @@ class TraceBufferVector : public TraceBuffer {
|
| return NULL;
|
| }
|
|
|
| - virtual scoped_ptr<TraceBuffer> CloneForIteration() const OVERRIDE {
|
| + scoped_ptr<TraceBuffer> CloneForIteration() const override {
|
| NOTIMPLEMENTED();
|
| return scoped_ptr<TraceBuffer>();
|
| }
|
| @@ -869,7 +862,7 @@ class TraceSamplingThread : public PlatformThread::Delegate {
|
| virtual ~TraceSamplingThread();
|
|
|
| // Implementation of PlatformThread::Delegate:
|
| - virtual void ThreadMain() OVERRIDE;
|
| + void ThreadMain() override;
|
|
|
| static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
|
|
|
| @@ -1066,7 +1059,7 @@ class TraceLog::ThreadLocalEventBuffer
|
|
|
| private:
|
| // MessageLoop::DestructionObserver
|
| - virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
|
| + void WillDestroyCurrentMessageLoop() override;
|
|
|
| void FlushWhileLocked();
|
|
|
|
|