Chromium Code Reviews| Index: src/circular-queue.cc |
| diff --git a/src/circular-queue.cc b/src/circular-queue.cc |
| index 0aea3435927491511ef72b56147949389250f79d..89e4f50145a133109dd9315fa2c30c3988b1cdc9 100644 |
| --- a/src/circular-queue.cc |
| +++ b/src/circular-queue.cc |
| @@ -34,20 +34,18 @@ namespace internal { |
| SamplingCircularQueue::SamplingCircularQueue(size_t record_size_in_bytes, |
| - size_t desired_chunk_size_in_bytes, |
| - unsigned buffer_size_in_chunks) |
| - : record_size_(record_size_in_bytes / sizeof(Cell)), |
| - chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes * |
| - record_size_in_bytes + sizeof(Cell)), |
| - chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)), |
| - buffer_size_(chunk_size_ * buffer_size_in_chunks), |
| - buffer_(NewArray<Cell>(buffer_size_)) { |
| - ASSERT(record_size_ * sizeof(Cell) == record_size_in_bytes); |
| - ASSERT(chunk_size_ * sizeof(Cell) == chunk_size_in_bytes_); |
| - ASSERT(buffer_size_in_chunks > 2); |
| - // Mark all chunks as clear. |
| - for (size_t i = 0; i < buffer_size_; i += chunk_size_) { |
| - buffer_[i] = kClear; |
| + unsigned buffer_size_in_records) |
| + : entry_size_(RoundUp(record_size_in_bytes + sizeof(Cell), |
| + kProcessorCacheLineSize) / sizeof(Cell)), |
| + buffer_size_(entry_size_ * buffer_size_in_records) { |
| + const size_t cache_line_size = kProcessorCacheLineSize / sizeof(Cell); |
| + not_aligned_buffer_ = NewArray<Cell>(buffer_size_ + cache_line_size); |
| + // Align on cache line boundaries. |
| + buffer_ = reinterpret_cast<Cell*>(RoundUp(not_aligned_buffer_, |
| + cache_line_size)); |
| + // Mark all entries as empty. |
| + for (size_t i = 0; i < buffer_size_; i += entry_size_) { |
| + buffer_[i] = kEmpty; |
| } |
| // Layout producer and consumer position pointers each on their own |
| @@ -63,63 +61,36 @@ SamplingCircularQueue::SamplingCircularQueue(size_t record_size_in_bytes, |
| producer_pos_ = reinterpret_cast<ProducerPosition*>( |
| RoundUp(positions_, kProcessorCacheLineSize)); |
| - producer_pos_->next_chunk_pos = buffer_; |
| producer_pos_->enqueue_pos = buffer_; |
| consumer_pos_ = reinterpret_cast<ConsumerPosition*>( |
| reinterpret_cast<byte*>(producer_pos_) + kProcessorCacheLineSize); |
| ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <= |
| positions_ + positions_size); |
| - consumer_pos_->dequeue_chunk_pos = buffer_; |
| - // The distance ensures that producer and consumer never step on |
| - // each other's chunks and helps eviction of produced data from |
| - // the CPU cache (having that chunk size is bigger than the cache.) |
| - const size_t producer_consumer_distance = (2 * chunk_size_); |
| - consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance; |
| - consumer_pos_->dequeue_pos = NULL; |
| + consumer_pos_->dequeue_pos = buffer_; |
| } |
| SamplingCircularQueue::~SamplingCircularQueue() { |
| DeleteArray(positions_); |
| - DeleteArray(buffer_); |
| + DeleteArray(not_aligned_buffer_); |
| } |
| void* SamplingCircularQueue::StartDequeue() { |
| - if (consumer_pos_->dequeue_pos != NULL) { |
| - return consumer_pos_->dequeue_pos; |
| - } else { |
| - if (Acquire_Load(consumer_pos_->dequeue_chunk_poll_pos) != kClear) { |
| - // Skip marker. |
| - consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos + 1; |
| - consumer_pos_->dequeue_end_pos = |
| - consumer_pos_->dequeue_chunk_pos + chunk_size_; |
| - return consumer_pos_->dequeue_pos; |
| - } else { |
| - return NULL; |
| - } |
| + MemoryBarrier(); |
|
Benedikt Meurer
2013/08/13 09:31:32
Why do we need this memory barrier here?
yurys
2013/08/13 10:05:20
Because we'd like to see up-to-date value at consu
|
| + if (Acquire_Load(consumer_pos_->dequeue_pos) != kEmpty) { |
| + // Skip marker. |
| + return consumer_pos_->dequeue_pos + 1; |
| } |
| + return NULL; |
| } |
| void SamplingCircularQueue::FinishDequeue() { |
| - consumer_pos_->dequeue_pos += record_size_; |
| - if (consumer_pos_->dequeue_pos < consumer_pos_->dequeue_end_pos) return; |
| - // Move to next chunk. |
| - consumer_pos_->dequeue_pos = NULL; |
| - *consumer_pos_->dequeue_chunk_pos = kClear; |
| - consumer_pos_->dequeue_chunk_pos += chunk_size_; |
| - WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_pos); |
| - consumer_pos_->dequeue_chunk_poll_pos += chunk_size_; |
| - WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_poll_pos); |
| + Release_Store(consumer_pos_->dequeue_pos, kEmpty); |
| + consumer_pos_->dequeue_pos += entry_size_; |
| + WrapPositionIfNeeded(&consumer_pos_->dequeue_pos); |
| } |
|
Benedikt Meurer
2013/08/13 09:31:32
T* StartDequeue() {
if (Acquire_Load(&buffer_[de
yurys
2013/08/13 14:10:29
Changed that code.
|
| - |
| -void SamplingCircularQueue::FlushResidualRecords() { |
| - // Eliminate producer / consumer distance. |
| - consumer_pos_->dequeue_chunk_poll_pos = consumer_pos_->dequeue_chunk_pos; |
| -} |
| - |
| - |
| } } // namespace v8::internal |