Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/trace_buffer.h" | 5 #include "base/trace_event/trace_buffer.h" |
| 6 | 6 |
| 7 #include "base/memory/scoped_vector.h" | 7 #include "base/memory/scoped_vector.h" |
|
Primiano Tucci (use gerrit)
2015/11/26 15:47:14
remove this and add <vector> and scoped_ptr plz :)
| |
| 8 #include "base/trace_event/trace_event_impl.h" | 8 #include "base/trace_event/trace_event_impl.h" |
| 9 | 9 |
| 10 namespace base { | 10 namespace base { |
| 11 namespace trace_event { | 11 namespace trace_event { |
| 12 | 12 |
| 13 namespace { | 13 namespace { |
| 14 | 14 |
| 15 class TraceBufferRingBuffer : public TraceBuffer { | 15 class TraceBufferRingBuffer : public TraceBuffer { |
| 16 public: | 16 public: |
| 17 TraceBufferRingBuffer(size_t max_chunks) | 17 TraceBufferRingBuffer(size_t max_chunks) |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 31 // the queue should never be empty. | 31 // the queue should never be empty. |
| 32 DCHECK(!QueueIsEmpty()); | 32 DCHECK(!QueueIsEmpty()); |
| 33 | 33 |
| 34 *index = recyclable_chunks_queue_[queue_head_]; | 34 *index = recyclable_chunks_queue_[queue_head_]; |
| 35 queue_head_ = NextQueueIndex(queue_head_); | 35 queue_head_ = NextQueueIndex(queue_head_); |
| 36 current_iteration_index_ = queue_head_; | 36 current_iteration_index_ = queue_head_; |
| 37 | 37 |
| 38 if (*index >= chunks_.size()) | 38 if (*index >= chunks_.size()) |
| 39 chunks_.resize(*index + 1); | 39 chunks_.resize(*index + 1); |
| 40 | 40 |
| 41 TraceBufferChunk* chunk = chunks_[*index]; | 41 TraceBufferChunk* chunk = chunks_[*index].release(); |
| 42 chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk. | 42 chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk. |
| 43 if (chunk) | 43 if (chunk) |
| 44 chunk->Reset(current_chunk_seq_++); | 44 chunk->Reset(current_chunk_seq_++); |
| 45 else | 45 else |
| 46 chunk = new TraceBufferChunk(current_chunk_seq_++); | 46 chunk = new TraceBufferChunk(current_chunk_seq_++); |
| 47 | 47 |
| 48 return scoped_ptr<TraceBufferChunk>(chunk); | 48 return scoped_ptr<TraceBufferChunk>(chunk); |
| 49 } | 49 } |
| 50 | 50 |
| 51 void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override { | 51 void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk> chunk) override { |
| 52 // When this method is called, the queue should not be full because it | 52 // When this method is called, the queue should not be full because it |
| 53 // can contain all chunks including the one to be returned. | 53 // can contain all chunks including the one to be returned. |
| 54 DCHECK(!QueueIsFull()); | 54 DCHECK(!QueueIsFull()); |
| 55 DCHECK(chunk); | 55 DCHECK(chunk); |
| 56 DCHECK_LT(index, chunks_.size()); | 56 DCHECK_LT(index, chunks_.size()); |
| 57 DCHECK(!chunks_[index]); | 57 DCHECK(!chunks_[index]); |
| 58 chunks_[index] = chunk.release(); | 58 chunks_[index] = std::move(chunk); |
| 59 recyclable_chunks_queue_[queue_tail_] = index; | 59 recyclable_chunks_queue_[queue_tail_] = index; |
| 60 queue_tail_ = NextQueueIndex(queue_tail_); | 60 queue_tail_ = NextQueueIndex(queue_tail_); |
| 61 } | 61 } |
| 62 | 62 |
| 63 bool IsFull() const override { return false; } | 63 bool IsFull() const override { return false; } |
| 64 | 64 |
| 65 size_t Size() const override { | 65 size_t Size() const override { |
| 66 // This is approximate because not all of the chunks are full. | 66 // This is approximate because not all of the chunks are full. |
| 67 return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize; | 67 return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize; |
| 68 } | 68 } |
| 69 | 69 |
| 70 size_t Capacity() const override { | 70 size_t Capacity() const override { |
| 71 return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize; | 71 return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize; |
| 72 } | 72 } |
| 73 | 73 |
| 74 TraceEvent* GetEventByHandle(TraceEventHandle handle) override { | 74 TraceEvent* GetEventByHandle(TraceEventHandle handle) override { |
| 75 if (handle.chunk_index >= chunks_.size()) | 75 if (handle.chunk_index >= chunks_.size()) |
| 76 return NULL; | 76 return NULL; |
| 77 TraceBufferChunk* chunk = chunks_[handle.chunk_index]; | 77 TraceBufferChunk* chunk = chunks_[handle.chunk_index].get(); |
| 78 if (!chunk || chunk->seq() != handle.chunk_seq) | 78 if (!chunk || chunk->seq() != handle.chunk_seq) |
| 79 return NULL; | 79 return NULL; |
| 80 return chunk->GetEventAt(handle.event_index); | 80 return chunk->GetEventAt(handle.event_index); |
| 81 } | 81 } |
| 82 | 82 |
| 83 const TraceBufferChunk* NextChunk() override { | 83 const TraceBufferChunk* NextChunk() override { |
| 84 if (chunks_.empty()) | 84 if (chunks_.empty()) |
| 85 return NULL; | 85 return NULL; |
| 86 | 86 |
| 87 while (current_iteration_index_ != queue_tail_) { | 87 while (current_iteration_index_ != queue_tail_) { |
| 88 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_]; | 88 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_]; |
| 89 current_iteration_index_ = NextQueueIndex(current_iteration_index_); | 89 current_iteration_index_ = NextQueueIndex(current_iteration_index_); |
| 90 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. | 90 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. |
| 91 continue; | 91 continue; |
| 92 DCHECK(chunks_[chunk_index]); | 92 DCHECK(chunks_[chunk_index]); |
| 93 return chunks_[chunk_index]; | 93 return chunks_[chunk_index].get(); |
| 94 } | 94 } |
| 95 return NULL; | 95 return NULL; |
| 96 } | 96 } |
| 97 | 97 |
| 98 scoped_ptr<TraceBuffer> CloneForIteration() const override { | 98 scoped_ptr<TraceBuffer> CloneForIteration() const override { |
| 99 scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer()); | 99 scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer()); |
| 100 for (size_t queue_index = queue_head_; queue_index != queue_tail_; | 100 for (size_t queue_index = queue_head_; queue_index != queue_tail_; |
| 101 queue_index = NextQueueIndex(queue_index)) { | 101 queue_index = NextQueueIndex(queue_index)) { |
| 102 size_t chunk_index = recyclable_chunks_queue_[queue_index]; | 102 size_t chunk_index = recyclable_chunks_queue_[queue_index]; |
| 103 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. | 103 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. |
| 104 continue; | 104 continue; |
| 105 TraceBufferChunk* chunk = chunks_[chunk_index]; | 105 TraceBufferChunk* chunk = chunks_[chunk_index].get(); |
| 106 cloned_buffer->chunks_.push_back(chunk ? chunk->Clone().release() : NULL); | 106 cloned_buffer->chunks_.push_back(chunk ? chunk->Clone() |
|
Primiano Tucci (use gerrit)
2015/11/26 15:47:14
doesn't this fit on one line?
| |
| 107 : NULL); | |
| 107 } | 108 } |
| 108 return cloned_buffer.Pass(); | 109 return cloned_buffer.Pass(); |
| 109 } | 110 } |
| 110 | 111 |
| 111 void EstimateTraceMemoryOverhead( | 112 void EstimateTraceMemoryOverhead( |
| 112 TraceEventMemoryOverhead* overhead) override { | 113 TraceEventMemoryOverhead* overhead) override { |
| 113 overhead->Add("TraceBufferRingBuffer", sizeof(*this)); | 114 overhead->Add("TraceBufferRingBuffer", sizeof(*this)); |
| 114 for (size_t queue_index = queue_head_; queue_index != queue_tail_; | 115 for (size_t queue_index = queue_head_; queue_index != queue_tail_; |
| 115 queue_index = NextQueueIndex(queue_index)) { | 116 queue_index = NextQueueIndex(queue_index)) { |
| 116 size_t chunk_index = recyclable_chunks_queue_[queue_index]; | 117 size_t chunk_index = recyclable_chunks_queue_[queue_index]; |
| 117 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. | 118 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks. |
| 118 continue; | 119 continue; |
| 119 chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead); | 120 chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead); |
| 120 } | 121 } |
| 121 } | 122 } |
| 122 | 123 |
| 123 private: | 124 private: |
| 124 class ClonedTraceBuffer : public TraceBuffer { | 125 class ClonedTraceBuffer : public TraceBuffer { |
| 125 public: | 126 public: |
| 126 ClonedTraceBuffer() : current_iteration_index_(0) {} | 127 ClonedTraceBuffer() : current_iteration_index_(0) {} |
| 127 | 128 |
| 128 // The only implemented method. | 129 // The only implemented method. |
| 129 const TraceBufferChunk* NextChunk() override { | 130 const TraceBufferChunk* NextChunk() override { |
| 130 return current_iteration_index_ < chunks_.size() | 131 return current_iteration_index_ < chunks_.size() |
| 131 ? chunks_[current_iteration_index_++] | 132 ? chunks_[current_iteration_index_++].get() |
| 132 : NULL; | 133 : NULL; |
| 133 } | 134 } |
| 134 | 135 |
| 135 scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override { | 136 scoped_ptr<TraceBufferChunk> GetChunk(size_t* index) override { |
| 136 NOTIMPLEMENTED(); | 137 NOTIMPLEMENTED(); |
| 137 return scoped_ptr<TraceBufferChunk>(); | 138 return scoped_ptr<TraceBufferChunk>(); |
| 138 } | 139 } |
| 139 void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override { | 140 void ReturnChunk(size_t index, scoped_ptr<TraceBufferChunk>) override { |
| 140 NOTIMPLEMENTED(); | 141 NOTIMPLEMENTED(); |
| 141 } | 142 } |
| 142 bool IsFull() const override { return false; } | 143 bool IsFull() const override { return false; } |
| 143 size_t Size() const override { return 0; } | 144 size_t Size() const override { return 0; } |
| 144 size_t Capacity() const override { return 0; } | 145 size_t Capacity() const override { return 0; } |
| 145 TraceEvent* GetEventByHandle(TraceEventHandle handle) override { | 146 TraceEvent* GetEventByHandle(TraceEventHandle handle) override { |
| 146 return NULL; | 147 return NULL; |
| 147 } | 148 } |
| 148 scoped_ptr<TraceBuffer> CloneForIteration() const override { | 149 scoped_ptr<TraceBuffer> CloneForIteration() const override { |
| 149 NOTIMPLEMENTED(); | 150 NOTIMPLEMENTED(); |
| 150 return scoped_ptr<TraceBuffer>(); | 151 return scoped_ptr<TraceBuffer>(); |
| 151 } | 152 } |
| 152 void EstimateTraceMemoryOverhead( | 153 void EstimateTraceMemoryOverhead( |
| 153 TraceEventMemoryOverhead* overhead) override { | 154 TraceEventMemoryOverhead* overhead) override { |
| 154 NOTIMPLEMENTED(); | 155 NOTIMPLEMENTED(); |
| 155 } | 156 } |
| 156 | 157 |
| 157 size_t current_iteration_index_; | 158 size_t current_iteration_index_; |
| 158 ScopedVector<TraceBufferChunk> chunks_; | 159 std::vector<scoped_ptr<TraceBufferChunk>> chunks_; |
| 159 }; | 160 }; |
| 160 | 161 |
| 161 bool QueueIsEmpty() const { return queue_head_ == queue_tail_; } | 162 bool QueueIsEmpty() const { return queue_head_ == queue_tail_; } |
| 162 | 163 |
| 163 size_t QueueSize() const { | 164 size_t QueueSize() const { |
| 164 return queue_tail_ > queue_head_ | 165 return queue_tail_ > queue_head_ |
| 165 ? queue_tail_ - queue_head_ | 166 ? queue_tail_ - queue_head_ |
| 166 : queue_tail_ + queue_capacity() - queue_head_; | 167 : queue_tail_ + queue_capacity() - queue_head_; |
| 167 } | 168 } |
| 168 | 169 |
| 169 bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; } | 170 bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; } |
| 170 | 171 |
| 171 size_t queue_capacity() const { | 172 size_t queue_capacity() const { |
| 172 // One extra space to help distinguish full state and empty state. | 173 // One extra space to help distinguish full state and empty state. |
| 173 return max_chunks_ + 1; | 174 return max_chunks_ + 1; |
| 174 } | 175 } |
| 175 | 176 |
| 176 size_t NextQueueIndex(size_t index) const { | 177 size_t NextQueueIndex(size_t index) const { |
| 177 index++; | 178 index++; |
| 178 if (index >= queue_capacity()) | 179 if (index >= queue_capacity()) |
| 179 index = 0; | 180 index = 0; |
| 180 return index; | 181 return index; |
| 181 } | 182 } |
| 182 | 183 |
| 183 size_t max_chunks_; | 184 size_t max_chunks_; |
| 184 ScopedVector<TraceBufferChunk> chunks_; | 185 std::vector<scoped_ptr<TraceBufferChunk>> chunks_; |
| 185 | 186 |
| 186 scoped_ptr<size_t[]> recyclable_chunks_queue_; | 187 scoped_ptr<size_t[]> recyclable_chunks_queue_; |
| 187 size_t queue_head_; | 188 size_t queue_head_; |
| 188 size_t queue_tail_; | 189 size_t queue_tail_; |
| 189 | 190 |
| 190 size_t current_iteration_index_; | 191 size_t current_iteration_index_; |
| 191 uint32 current_chunk_seq_; | 192 uint32 current_chunk_seq_; |
| 192 | 193 |
| 193 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer); | 194 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer); |
| 194 }; | 195 }; |
| (...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 387 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) { | 388 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) { |
| 388 return new TraceBufferRingBuffer(max_chunks); | 389 return new TraceBufferRingBuffer(max_chunks); |
| 389 } | 390 } |
| 390 | 391 |
| 391 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) { | 392 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) { |
| 392 return new TraceBufferVector(max_chunks); | 393 return new TraceBufferVector(max_chunks); |
| 393 } | 394 } |
| 394 | 395 |
| 395 } // namespace trace_event | 396 } // namespace trace_event |
| 396 } // namespace base | 397 } // namespace base |
| OLD | NEW |