Chromium Code Reviews| Index: base/debug/activity_tracker.h |
| diff --git a/base/debug/activity_tracker.h b/base/debug/activity_tracker.h |
| index d6ff724cf2daee6106c3fbce75b9622f1d7a9eda..d1f49a5b86846596f7de2b02bad963eb36d27172 100644 |
| --- a/base/debug/activity_tracker.h |
| +++ b/base/debug/activity_tracker.h |
| @@ -41,6 +41,200 @@ namespace debug { |
| class ThreadActivityTracker; |
| + |
| +//============================================================================= |
| +// This class provides a lock-free FIFO of any atomic type with the |
| +// limitation that there must be at least one "invalid" value. This is |
| +// built as a completely generic type and can (hopefully) be moved to a |
| +// more generally useful place in the future. |
| +template <typename T> |
| +class LockFreeSimpleFifo { |
| + public: |
| + // Construct a simple lock-free FIFO with the specified |size| but |
| + // not alowed to hold the |invalid_value|. |
| + LockFreeSimpleFifo(size_t size, T invalid_value) |
| + : size_(size + 1), invalid_value_(invalid_value), head_(0), tail_(0) { |
| + DCHECK_LE(1, size); |
| + |
| + // Allocate memory for the FIFO value. Its size is the requested size +1 |
| + // because it can never be full (head == tail is reserved to mean "empty"). |
| + stack_.reset(new std::atomic<T>[size_]); |
| + |
| + // Ensure that the underlying atomics are also lock-free. This should |
| + // evaluate to a constant at compile time and so produce no code, but |
| + // a static_assert will not compile. |
| + CHECK(head_.is_lock_free()); |
| + CHECK(stack_[0].is_lock_free()); |
| + |
| + // All elements must be "invalid" to start in order for the push/pop |
| + // operations to work. |
| + for (size_t i = 0; i < size_; ++i) |
| + stack_[i].store(invalid_value_, std::memory_order_relaxed); |
| + } |
| + |
| + T invalid_value() { return invalid_value_; } |
| + size_t size() { return size_ - 1; } |
| + size_t used() { |
| + return (head_.load(std::memory_order_relaxed) + size_ - |
| + tail_.load(std::memory_order_relaxed)) % |
| + size_; |
| + } |
| + bool empty() { |
| + return empty(head_.load(std::memory_order_relaxed), |
| + tail_.load(std::memory_order_relaxed)); |
| + } |
| + bool full() { |
| + return full(head_.load(std::memory_order_relaxed), |
| + tail_.load(std::memory_order_relaxed)); |
| + } |
| + |
| + // Adds a new |value| to the end of the FIFO and returns true on success |
| + // or false if the stack was full. |
| + bool push(T value); |
| + |
| + // Retrieves the first value off the FIFO and returns it or the "invalid" |
| + // value if the stack is empty. |
| + T pop(); |
| + |
| + private: |
| + // Reports if the stack is empty/full based on explicit head/tail values. |
| + // Note that the % operaton in C/C++ is a "remainder" operator and thus will |
| + // not provide correct "modular arithmetic" if the left value is negative; |
| + // adding |size_| keeps it positive. |
| + bool empty(size_t head, size_t tail) { return head == tail; } |
| + bool full(size_t head, size_t tail) { |
| + return (tail + size_ - 1) % size_ == head; |
| + } |
| + |
| + const size_t size_; // Size of the internal |stack_|: requested + 1 |
| + const T invalid_value_; // A value not allowed to be stored. |
| + |
| + std::atomic<size_t> head_; // One past the newest value; where to push. |
| + std::atomic<size_t> tail_; // The oldest value; first to pop. |
| + |
| + // Array holding pushed values. |
| + std::unique_ptr<std::atomic<T>[]> stack_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(LockFreeSimpleFifo); |
| +}; |
| + |
| +template <typename T> |
| +bool LockFreeSimpleFifo<T>::push(T value) { |
| + // Pushing the "invalid" value is not allowed; it would be cause an infinite |
|
manzagop (departed)
2016/08/19 20:44:50
nit: "it would be cause"
bcwhite
2016/08/23 14:52:37
Done.
|
| + // loop in pop. |
| + CHECK_NE(invalid_value_, value); |
| + |
| + // Get the head of the stack and acquire its contents. |
| + size_t head = head_.load(std::memory_order_acquire); |
| + |
| + while (true) { |
| + DCHECK_LE(0U, head); |
| + DCHECK_GT(size_, head); |
| + |
| + // If the stack is full, fail. |
| + if (full(head, tail_.load(std::memory_order_relaxed))) |
| + return false; |
| + |
| + // Write the value being pushed to the top of the fifo, exchanging it |
| + // with the "invalid" value that should be there. If the atomic operation |
| + // fails then something else has snuck in and pushed something else to |
| + // that slot. A "strong" exchange is used to avoid mistakenly yielding |
| + // the CPU. |
| + T value_expected = invalid_value_; |
| + if (!stack_[head].compare_exchange_strong(value_expected, value, |
| + std::memory_order_release, |
| + std::memory_order_relaxed)) { |
| + // Get the new head since the one acquired above is no longer valid. |
| + size_t new_head = head_.load(std::memory_order_acquire); |
| + |
| + // If the new head maches the old one then another thread is currently |
|
manzagop (departed)
2016/08/19 20:44:50
typo: maches
bcwhite
2016/08/23 14:52:37
Acknowledged.
|
| + // between the write of the value and the increment of the head. Give it |
| + // a chance to run. |
| + if (new_head == head) { |
| + PlatformThread::YieldCurrentThread(); |
| + new_head = head_.load(std::memory_order_acquire); |
| + } |
| + |
| + // Try again. |
| + head = new_head; |
| + continue; |
| + } |
| + |
| + // Increment the head, releasing the newly stored value. There is no reason |
| + // for this to fail since only one thread at a time can be between the |
|
manzagop (departed)
2016/08/19 20:44:50
Ugh... I think we still have the issue where after
bcwhite
2016/08/22 12:59:41
I don't think that can happen any more. Only the
manzagop (departed)
2016/08/22 14:49:29
Here's what I have in mind: T1 stores a copy of he
bcwhite
2016/08/23 14:52:37
Right. I'm going about this all wrong. A slot sh
|
| + // exchange above and the increment below. A "weak" exchange is sufficient |
| + // because a simple retry can handle false failures. |
| + size_t expected_head = head; |
| + while (!head_.compare_exchange_weak(expected_head, (head + 1) % size_, |
| + std::memory_order_release, |
| + std::memory_order_relaxed)) { |
| + // |expected_head| is updated with the actual value stored there. |
| + // Ensure it matches our expectation that only false-failures can |
| + // occur. |
| + DCHECK_EQ(head, expected_head); |
| + } |
| + |
| + // Success! |
| + return true; |
| + } |
| +} |
| + |
| +template <typename T> |
| +T LockFreeSimpleFifo<T>::pop() { |
| + // Get the current number of elements on the stack. |
| + size_t tail = tail_.load(std::memory_order_acquire); |
| + |
| + while (true) { |
| + DCHECK_LE(0U, tail); |
| + DCHECK_GT(size_, tail); |
| + |
| + // If the stack is empty, fail. |
| + if (empty(head_.load(std::memory_order_relaxed), tail)) |
| + return invalid_value_; |
| + |
| + // Read a value from the bottom of the fifo, writing the "invalid" value |
| + // in its place. If the retrieved value is invalid then it has already |
| + // been taken. |
| + T value = stack_[tail].exchange(invalid_value_, std::memory_order_relaxed); |
| + if (value == invalid_value_) { |
| + // Get the new tail since the one acquired above is no longer valid. |
| + size_t new_tail = tail_.load(std::memory_order_acquire); |
| + |
| + // If the new tail maches the old one then another thread is currently |
| + // between the read of the value and the increment of the tail. Give it |
| + // a chance to run. |
| + if (new_tail == tail) { |
| + PlatformThread::YieldCurrentThread(); |
| + new_tail = tail_.load(std::memory_order_acquire); |
| + } |
| + |
| + // Try again. |
| + tail = new_tail; |
| + continue; |
| + } |
| + |
| + // Increment the tail, releasing the newly stored value. There is no reason |
| + // for this to fail since only one thread at a time can be between the |
| + // exchange above and the increment below. A "weak" exchange is sufficient |
| + // because a simple retry can handle false failures. |
| + size_t expected_tail = tail; |
| + while (!tail_.compare_exchange_weak(expected_tail, (tail + 1) % size_, |
| + std::memory_order_release, |
| + std::memory_order_relaxed)) { |
| + // |expected_tail| is updated with the actual value stored there. |
| + // Ensure it matches our expectation that only false-failures can |
| + // occur. |
| + DCHECK_EQ(tail, expected_tail); |
| + } |
| + |
| + // Success! |
| + DCHECK_NE(invalid_value_, value); |
| + return value; |
| + } |
| +} |
| +//============================================================================= |
| + |
| + |
| enum : int { |
| // The maximum number of call-stack addresses stored per activity. This |
| // cannot be changed without also changing the version number of the |
| @@ -508,9 +702,7 @@ class BASE_EXPORT GlobalActivityTracker { |
| // These have to be lock-free because lock activity is tracked and causes |
| // re-entry problems. |
| std::atomic<int> thread_tracker_count_; |
| - std::atomic<int> available_memories_count_; |
| - std::atomic<PersistentMemoryAllocator::Reference> |
| - available_memories_[kMaxThreadCount]; |
| + LockFreeSimpleFifo<PersistentMemoryAllocator::Reference> available_memories_; |
| // The active global activity tracker. |
| static GlobalActivityTracker* g_tracker_; |