Index: base/debug/activity_tracker.h |
diff --git a/base/debug/activity_tracker.h b/base/debug/activity_tracker.h |
index d6ff724cf2daee6106c3fbce75b9622f1d7a9eda..79ed061d037a0bc8484d362c6bc6bea771c61d96 100644 |
--- a/base/debug/activity_tracker.h |
+++ b/base/debug/activity_tracker.h |
@@ -23,6 +23,7 @@ |
#include "base/base_export.h" |
#include "base/location.h" |
#include "base/metrics/persistent_memory_allocator.h" |
+#include "base/threading/platform_thread.h" |
#include "base/threading/thread_checker.h" |
#include "base/threading/thread_local_storage.h" |
@@ -41,6 +42,189 @@ namespace debug { |
class ThreadActivityTracker; |
+ |
+//============================================================================= |
+// This class provides a lock-free queue of any atomic type with the |
+// limitation that there must be at least one "invalid" value. This is |
+// built as a completely generic type and can (hopefully) be moved to a |
+// more generally useful place in the future. |
+template <typename T> |
+class LockFreeSimpleQueue { |
+ public: |
+ // Construct a simple lock-free queue with the specified |size| but |
+ // not alowed to hold the |invalid_value|. |
+ LockFreeSimpleQueue(size_t size, T invalid_value) |
+ : size_(size + 1), invalid_value_(invalid_value), head_(0), tail_(0) { |
+ DCHECK_LE(1U, size); |
+ |
+ // Allocate memory for the queue value. Its size is the requested size +1 |
+ // because it can never be full (head == tail is reserved to mean "empty"). |
+ values_.reset(new std::atomic<T>[size_]); |
+ |
+ // Ensure that the underlying atomics are also lock-free. This should |
+ // evaluate to a constant at compile time and so produce no code, but |
+ // a static_assert will not compile. |
+ CHECK(head_.is_lock_free()); |
+ CHECK(values_[0].is_lock_free()); |
+ |
+ // All elements must be "invalid" to start in order for the push/pop |
+ // operations to work. |
+ for (size_t i = 0; i < size_; ++i) |
+ values_[i].store(invalid_value_, std::memory_order_relaxed); |
+ } |
+ |
+ T invalid_value() { return invalid_value_; } |
+ size_t size() { return size_ - 1; } |
+ size_t used() { |
+ return (head_.load(std::memory_order_relaxed) + size_ - |
+ tail_.load(std::memory_order_relaxed)) % |
+ size_; |
+ } |
+ bool empty() { |
+ return empty(head_.load(std::memory_order_relaxed), |
+ tail_.load(std::memory_order_relaxed)); |
+ } |
+ bool full() { |
+ return full(head_.load(std::memory_order_relaxed), |
+ tail_.load(std::memory_order_relaxed)); |
+ } |
+ |
+ // Adds a new |value| to the end of the queue and returns true on success |
+ // or false if the stack was full. |
+ bool push(T value); |
+ |
+ // Retrieves the first value off the queue and returns it or the "invalid" |
+ // value if the stack is empty. |
+ T pop(); |
+ |
+ private: |
+ // Reports if the stack is empty/full based on explicit head/tail values. |
+ // Note that the % operaton in C/C++ is a "remainder" operator and thus will |
+ // not provide correct "modular arithmetic" if the left value is negative; |
+ // adding |size_| keeps it positive. |
+ bool empty(size_t head, size_t tail) { return head == tail; } |
+ bool full(size_t head, size_t tail) { |
+ return (tail + size_ - 1) % size_ == head; |
+ } |
+ |
+ const size_t size_; // Size of the internal |values_|: requested + 1 |
+ const T invalid_value_; // A value not allowed to be stored. |
+ |
+ std::atomic<size_t> head_; // One past the newest value; where to push. |
+ std::atomic<size_t> tail_; // The oldest value; first to pop. |
+ |
+ // Array holding pushed values. |
+ std::unique_ptr<std::atomic<T>[]> values_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(LockFreeSimpleQueue); |
+}; |
+ |
+template <typename T> |
+bool LockFreeSimpleQueue<T>::push(T value) { |
+ // Pushing the "invalid" value is not allowed; it would cause an infinite |
+ // loop in pop. |
+ CHECK_NE(invalid_value_, value); |
+ |
+ // Get the head of the stack and acquire its contents. |
+ size_t head = head_.load(std::memory_order_acquire); |
+ |
+ // In short: allocate a slot at the head of the queue, write the value to |
+ // it, try again if anything gets in the way. |
+ while (true) { |
+ DCHECK_LE(0U, head); |
+ DCHECK_GT(size_, head); |
+ |
+ // If the stack is full, fail. |
+ if (full(head, tail_.load(std::memory_order_relaxed))) |
+ return false; |
+ |
+ // The "head" is the critical resource so allocate a slot from the |
+ // |values_| buffer at its current location, acquiring the value there. |
+ // A "weak" operation is used because it's relatively trivial to try |
+ // this operation again. |
+ size_t slot = head; |
+ if (!head_.compare_exchange_weak(slot, (head + 1) % size_, |
+ std::memory_order_acquire, |
+ std::memory_order_relaxed)) { |
+ // The exchange will have loaded the latest "head" into |slot|. |
+ head = slot; |
+ continue; |
+ } |
+ |
+ // Save the value being pushed to the reserved slot, overwriting the |
+ // "invalid" value that must be there. |
+ DCHECK_EQ(invalid_value_, values_[slot].load(std::memory_order_relaxed)); |
+ values_[slot].store(value, std::memory_order_relaxed); |
+ |
+ // Success! |
+ return true; |
+ } |
+} |
+ |
+template <typename T> |
+T LockFreeSimpleQueue<T>::pop() { |
+ // Get the current number of elements on the stack. |
+ size_t tail = tail_.load(std::memory_order_acquire); |
+ |
+ // In short: ensure the tail value is valid, take it, try again if anything |
+ // goes wrong. |
+ while (true) { |
+ DCHECK_LE(0U, tail); |
+ DCHECK_GT(size_, tail); |
+ |
+ // If the stack is empty, fail. |
+ if (empty(head_.load(std::memory_order_relaxed), tail)) |
+ return invalid_value_; |
+ |
+ // Read a value from the bottom of the queue, writing the "invalid" value |
+ // in its place. If the retrieved value is invalid then something else is |
+ // happening. |
+ T value = values_[tail].exchange(invalid_value_, std::memory_order_relaxed); |
+ if (value == invalid_value_) { |
+ // Retrieve an updated "tail" value to see what happened. |
+ size_t new_tail = tail_.load(std::memory_order_acquire); |
+ if (new_tail == tail) { |
+ // Either a push{) has reserved this slot but not yet written a value |
+ // to it or another pop() has taken the value but not yet incremented |
+ // the tail. |
+ // It would be possible to simply act as if the queue were empty() but |
+ // this could violate an expectation where there is known to be a |
+ // single "popper" thread that has already verified that the queue is |
+ // not empty and thus doesn't expect the pop() operation to fail. |
+ // Instead, yield the CPU and retry. |
+ PlatformThread::YieldCurrentThread(); |
manzagop (departed)
2016/08/23 16:02:03
Do you need to reload tail in case it was an ongoi
bcwhite
2016/08/23 16:12:41
Done.
|
+ } else { |
+ // Another thread must have taken the value and incremented the tail. |
+ // Nothing to do but try again. |
+ } |
+ |
+ // Try again. |
+ tail = new_tail; |
+ continue; |
+ } |
+ |
+ // Increment the tail, releasing the newly stored value. There is no reason |
+ // for this to fail since only one thread at a time can be between the |
+ // exchange above and the increment below. A "weak" exchange is sufficient |
+ // because a simple retry can handle false failures. |
+ size_t expected_tail = tail; |
+ while (!tail_.compare_exchange_weak(expected_tail, (tail + 1) % size_, |
+ std::memory_order_release, |
+ std::memory_order_relaxed)) { |
+ // |expected_tail| is updated with the actual value stored there. |
+ // Ensure it matches our expectation that only false-failures can |
+ // occur. |
+ DCHECK_EQ(tail, expected_tail); |
+ } |
+ |
+ // Success! |
+ DCHECK_NE(invalid_value_, value); |
+ return value; |
+ } |
+} |
+//============================================================================= |
+ |
+ |
enum : int { |
// The maximum number of call-stack addresses stored per activity. This |
// cannot be changed without also changing the version number of the |
@@ -508,9 +692,7 @@ class BASE_EXPORT GlobalActivityTracker { |
// These have to be lock-free because lock activity is tracked and causes |
// re-entry problems. |
std::atomic<int> thread_tracker_count_; |
- std::atomic<int> available_memories_count_; |
- std::atomic<PersistentMemoryAllocator::Reference> |
- available_memories_[kMaxThreadCount]; |
+ LockFreeSimpleQueue<PersistentMemoryAllocator::Reference> available_memories_; |
// The active global activity tracker. |
static GlobalActivityTracker* g_tracker_; |