Chromium Code Reviews| Index: base/debug/activity_tracker.h |
| diff --git a/base/debug/activity_tracker.h b/base/debug/activity_tracker.h |
| index d6ff724cf2daee6106c3fbce75b9622f1d7a9eda..ba351bfb7534c21d658a9d2750f36c3dd60ba3f5 100644 |
| --- a/base/debug/activity_tracker.h |
| +++ b/base/debug/activity_tracker.h |
| @@ -23,6 +23,7 @@ |
| #include "base/base_export.h" |
| #include "base/location.h" |
| #include "base/metrics/persistent_memory_allocator.h" |
| +#include "base/threading/platform_thread.h" |
| #include "base/threading/thread_checker.h" |
| #include "base/threading/thread_local_storage.h" |
| @@ -41,6 +42,159 @@ namespace debug { |
| class ThreadActivityTracker; |
| + |
| +//============================================================================= |
| +// This class provides a lock-free stack of any atomic type with the |
| +// limitation that there must be at least one "invalid" value. This is |
| +// built as a completely generic type and can (hopefully) be moved to a |
| +// more generally useful place in the future. |
| +template <typename T> |
| +class LockFreeSimpleStack { |
| + public: |
| + // Construct a simple lock-free stack with the specified |size| but |
| + // not alowed to hold the |invalid_value|. |
| + LockFreeSimpleStack(size_t size, T invalid_value) |
| + : size_(size), invalid_value_(invalid_value), head_(0) { |
| + DCHECK_LE(1U, size); |
| + |
| + // Allocate memory for the stack value. |
| + values_.reset(new std::atomic<T>[size_]); |
| + |
| + // Ensure that the underlying atomics are also lock-free. This should |
| + // evaluate to a constant at compile time and so produce no code, but |
| + // a static_assert will not compile. |
| + CHECK(head_.is_lock_free()); |
| + CHECK(values_[0].is_lock_free()); |
| + |
| + // All elements must be "invalid" to start in order for the push/pop |
| + // operations to work. |
| + for (size_t i = 0; i < size_; ++i) |
| + values_[i].store(invalid_value_, std::memory_order_relaxed); |
| + } |
| + |
| + T invalid_value() { return invalid_value_; } |
| + size_t size() { return size_; } |
| + size_t used() { return (head_.load(std::memory_order_relaxed)); } |
| + bool empty() { return used() == 0; } |
| + bool full() { return used() == size_; } |
| + |
| + // Adds a new |value| to the top of the stack and returns true on success |
| + // or false if the stack was full. |
| + bool push(T value); |
| + |
| + // Retrieves the top value off the stack and returns it or the "invalid" |
| + // value if the stack is empty. |
| + T pop(); |
| + |
| + private: |
| + const size_t size_; // Size of the internal |values_| |
| + const T invalid_value_; // A value not allowed to be stored. |
| + |
| + std::atomic<size_t> head_; // One past the newest value; where to push. |
|
manzagop (departed)
2016/08/25 15:26:55
nit: is "index of the first empty slot" clearer?
bcwhite
2016/08/25 15:53:19
Done.
|
| + |
| + // Array holding pushed values. |
| + std::unique_ptr<std::atomic<T>[]> values_; |
| + |
| + DISALLOW_COPY_AND_ASSIGN(LockFreeSimpleStack); |
| +}; |
| + |
| +template <typename T> |
| +bool LockFreeSimpleStack<T>::push(T value) { |
| + // Pushing the "invalid" value is not allowed; it would cause an infinite |
| + // loop in pop. |
| + CHECK_NE(invalid_value_, value); |
| + |
| + // Get the head of the stack. |
| + size_t head = head_.load(std::memory_order_relaxed); |
| + |
| + // In short: allocate a slot at the head of the stack, write the value to |
| + // it, try again if anything gets in the way. |
| + while (true) { |
| + DCHECK_LE(0U, head); |
| + DCHECK_GE(size_, head); |
| + |
| + // If the stack is full, fail. |
| + if (head == size_) |
| + return false; |
| + |
| + // The "head" is the critical resource so allocate a slot from the |
| + // |values_| buffer at its current location, acquiring the value there. |
| + // A "weak" operation is used because it's relatively trivial to try |
| + // this operation again. |
| + size_t slot = head; |
| + if (!head_.compare_exchange_weak(slot, head + 1, |
| + std::memory_order_acquire, |
| + std::memory_order_relaxed)) { |
| + // The exchange will have loaded the latest |head_| into |slot|. |
| + head = slot; |
| + continue; |
| + } |
| + |
| + // Save the value being pushed to the reserved slot, overwriting the |
| + // "invalid" value that should be there. If it's not, it's because the |
| + // slot was released by a pop() but that method hasn't yet extracted |
| + // the value. Wait for it to do so. Use a "strong" exchange to avoid |
| + // mistakenly releasing the CPU. |
| + T expected_value = invalid_value_; |
| + while (!values_[slot].compare_exchange_strong(expected_value, value, |
| + std::memory_order_relaxed, |
| + std::memory_order_relaxed)) { |
| + PlatformThread::YieldCurrentThread(); |
| + expected_value = invalid_value_; |
| + } |
| + |
| + // Success! |
| + return true; |
| + } |
| +} |
| + |
| +template <typename T> |
| +T LockFreeSimpleStack<T>::pop() { |
| + // Get the head of the stack. |
| + size_t head = head_.load(std::memory_order_relaxed); |
| + |
| + // In short: deallocate a slot at the head of the stack, read the value from |
| + // it, try again if anything gets in the way. |
| + while (true) { |
| + DCHECK_LE(0U, head); |
| + DCHECK_GE(size_, head); |
| + |
| + // If the stack is full, fail. |
|
manzagop (departed)
2016/08/25 15:26:55
nit: full -> empty
bcwhite
2016/08/25 15:53:19
Done.
|
| + if (head == 0) |
| + return invalid_value_; |
| + |
| + // The "head" is the critical resource so deallocate a slot from the |
| + // |values_| buffer at its current location, acquiring the value there. |
| + // A "weak" operation is used because it's relatively trivial to try |
| + // this operation again. |
| + size_t slot = head; |
| + if (!head_.compare_exchange_weak(slot, head - 1, |
| + std::memory_order_acquire, |
| + std::memory_order_relaxed)) { |
| + // The exchange will have loaded the latest |head_| into |slot|. |
| + head = slot; |
| + continue; |
| + } |
| + --slot; // "head" is past-the-top |
| + |
| + // Read a value from the top of the stack, writing the "invalid" value |
| + // in its place. If the retrieved value is invalid then the slot was |
| + // acquired by push() but that method hasn't yet written the value. Wait |
| + // for it to do so. |
| + T value; |
| + while ((value = values_[slot].exchange( |
| + invalid_value_, std::memory_order_relaxed)) == invalid_value_) { |
| + PlatformThread::YieldCurrentThread(); |
| + } |
| + |
| + // Success! |
| + DCHECK_NE(invalid_value_, value); |
| + return value; |
| + } |
| +} |
| +//============================================================================= |
| + |
| + |
| enum : int { |
| // The maximum number of call-stack addresses stored per activity. This |
| // cannot be changed without also changing the version number of the |
| @@ -508,9 +662,7 @@ class BASE_EXPORT GlobalActivityTracker { |
| // These have to be lock-free because lock activity is tracked and causes |
| // re-entry problems. |
| std::atomic<int> thread_tracker_count_; |
| - std::atomic<int> available_memories_count_; |
| - std::atomic<PersistentMemoryAllocator::Reference> |
| - available_memories_[kMaxThreadCount]; |
| + LockFreeSimpleStack<PersistentMemoryAllocator::Reference> available_memories_; |
| // The active global activity tracker. |
| static GlobalActivityTracker* g_tracker_; |