| OLD | NEW |
| (Empty) |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | |
| 2 // Redistribution and use in source and binary forms, with or without | |
| 3 // modification, are permitted provided that the following conditions are | |
| 4 // met: | |
| 5 // | |
| 6 // * Redistributions of source code must retain the above copyright | |
| 7 // notice, this list of conditions and the following disclaimer. | |
| 8 // * Redistributions in binary form must reproduce the above | |
| 9 // copyright notice, this list of conditions and the following | |
| 10 // disclaimer in the documentation and/or other materials provided | |
| 11 // with the distribution. | |
| 12 // * Neither the name of Google Inc. nor the names of its | |
| 13 // contributors may be used to endorse or promote products derived | |
| 14 // from this software without specific prior written permission. | |
| 15 // | |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 27 | |
| 28 #include "v8.h" | |
| 29 | |
| 30 #include "circular-queue-inl.h" | |
| 31 | |
| 32 namespace v8 { | |
| 33 namespace internal { | |
| 34 | |
| 35 | |
| 36 SamplingCircularQueue::SamplingCircularQueue(size_t record_size_in_bytes, | |
| 37 size_t desired_chunk_size_in_bytes, | |
| 38 unsigned buffer_size_in_chunks) | |
| 39 : record_size_(record_size_in_bytes / sizeof(Cell)), | |
| 40 chunk_size_in_bytes_(desired_chunk_size_in_bytes / record_size_in_bytes * | |
| 41 record_size_in_bytes + sizeof(Cell)), | |
| 42 chunk_size_(chunk_size_in_bytes_ / sizeof(Cell)), | |
| 43 buffer_size_(chunk_size_ * buffer_size_in_chunks), | |
| 44 buffer_(NewArray<Cell>(buffer_size_)) { | |
| 45 ASSERT(record_size_ * sizeof(Cell) == record_size_in_bytes); | |
| 46 ASSERT(chunk_size_ * sizeof(Cell) == chunk_size_in_bytes_); | |
| 47 ASSERT(buffer_size_in_chunks > 2); | |
| 48 // Mark all chunks as clear. | |
| 49 for (size_t i = 0; i < buffer_size_; i += chunk_size_) { | |
| 50 buffer_[i] = kClear; | |
| 51 } | |
| 52 | |
| 53 // Layout producer and consumer position pointers each on their own | |
| 54 // cache lines to avoid cache lines thrashing due to simultaneous | |
| 55 // updates of positions by different processor cores. | |
| 56 const int positions_size = | |
| 57 RoundUp(1, kProcessorCacheLineSize) + | |
| 58 RoundUp(static_cast<int>(sizeof(ProducerPosition)), | |
| 59 kProcessorCacheLineSize) + | |
| 60 RoundUp(static_cast<int>(sizeof(ConsumerPosition)), | |
| 61 kProcessorCacheLineSize); | |
| 62 positions_ = NewArray<byte>(positions_size); | |
| 63 | |
| 64 producer_pos_ = reinterpret_cast<ProducerPosition*>( | |
| 65 RoundUp(positions_, kProcessorCacheLineSize)); | |
| 66 producer_pos_->next_chunk_pos = buffer_; | |
| 67 producer_pos_->enqueue_pos = buffer_; | |
| 68 | |
| 69 consumer_pos_ = reinterpret_cast<ConsumerPosition*>( | |
| 70 reinterpret_cast<byte*>(producer_pos_) + kProcessorCacheLineSize); | |
| 71 ASSERT(reinterpret_cast<byte*>(consumer_pos_ + 1) <= | |
| 72 positions_ + positions_size); | |
| 73 consumer_pos_->dequeue_chunk_pos = buffer_; | |
| 74 // The distance ensures that producer and consumer never step on | |
| 75 // each other's chunks and helps eviction of produced data from | |
| 76 // the CPU cache (having that chunk size is bigger than the cache.) | |
| 77 const size_t producer_consumer_distance = (2 * chunk_size_); | |
| 78 consumer_pos_->dequeue_chunk_poll_pos = buffer_ + producer_consumer_distance; | |
| 79 consumer_pos_->dequeue_pos = NULL; | |
| 80 } | |
| 81 | |
| 82 | |
| 83 SamplingCircularQueue::~SamplingCircularQueue() { | |
| 84 DeleteArray(positions_); | |
| 85 DeleteArray(buffer_); | |
| 86 } | |
| 87 | |
| 88 | |
| 89 void* SamplingCircularQueue::StartDequeue() { | |
| 90 if (consumer_pos_->dequeue_pos != NULL) { | |
| 91 return consumer_pos_->dequeue_pos; | |
| 92 } else { | |
| 93 if (Acquire_Load(consumer_pos_->dequeue_chunk_poll_pos) != kClear) { | |
| 94 // Skip marker. | |
| 95 consumer_pos_->dequeue_pos = consumer_pos_->dequeue_chunk_pos + 1; | |
| 96 consumer_pos_->dequeue_end_pos = | |
| 97 consumer_pos_->dequeue_chunk_pos + chunk_size_; | |
| 98 return consumer_pos_->dequeue_pos; | |
| 99 } else { | |
| 100 return NULL; | |
| 101 } | |
| 102 } | |
| 103 } | |
| 104 | |
| 105 | |
| 106 void SamplingCircularQueue::FinishDequeue() { | |
| 107 consumer_pos_->dequeue_pos += record_size_; | |
| 108 if (consumer_pos_->dequeue_pos < consumer_pos_->dequeue_end_pos) return; | |
| 109 // Move to next chunk. | |
| 110 consumer_pos_->dequeue_pos = NULL; | |
| 111 *consumer_pos_->dequeue_chunk_pos = kClear; | |
| 112 consumer_pos_->dequeue_chunk_pos += chunk_size_; | |
| 113 WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_pos); | |
| 114 consumer_pos_->dequeue_chunk_poll_pos += chunk_size_; | |
| 115 WrapPositionIfNeeded(&consumer_pos_->dequeue_chunk_poll_pos); | |
| 116 } | |
| 117 | |
| 118 | |
| 119 void SamplingCircularQueue::FlushResidualRecords() { | |
| 120 // Eliminate producer / consumer distance. | |
| 121 consumer_pos_->dequeue_chunk_poll_pos = consumer_pos_->dequeue_chunk_pos; | |
| 122 } | |
| 123 | |
| 124 | |
| 125 } } // namespace v8::internal | |
| OLD | NEW |