Index: base/debug/activity_tracker_unittest.cc |
diff --git a/base/debug/activity_tracker_unittest.cc b/base/debug/activity_tracker_unittest.cc |
index c36d8fac352cb169759d9a9e07db5c9648860010..7a57caff6a676bcf26f2efdab025d195ff540304 100644 |
--- a/base/debug/activity_tracker_unittest.cc |
+++ b/base/debug/activity_tracker_unittest.cc |
@@ -13,9 +13,11 @@ |
#include "base/files/scoped_temp_dir.h" |
#include "base/memory/ptr_util.h" |
#include "base/pending_task.h" |
+#include "base/rand_util.h" |
#include "base/synchronization/condition_variable.h" |
#include "base/synchronization/lock.h" |
#include "base/synchronization/spin_wait.h" |
+#include "base/threading/platform_thread.h" |
#include "base/threading/simple_thread.h" |
#include "base/time/time.h" |
#include "testing/gtest/include/gtest/gtest.h" |
@@ -37,9 +39,212 @@ class TestActivityTracker : public ThreadActivityTracker { |
std::unique_ptr<char[]> mem_segment_; |
}; |
+ |
+// The interval between which the Stack threads will wait for the stack to |
+// become full or empty. It's prime so that it won't correspond to any other |
+// interval (except itself). |
+const int kStackTestOperationInterval = 997; |
+ |
+class StackPushThread : public SimpleThread { |
+ public: |
+ StackPushThread(LockFreeSimpleStack<int>* stack, int count) |
+ : SimpleThread("StackPush", Options()), stack_(stack), count_(count) {} |
+ ~StackPushThread() override {} |
+ |
+ void Run() override { |
+ int yield_after = RandInt(1, stack_->size() * 2); |
+ for (int i = 0; i < count_; ++i) { |
+ // Two ways of pushing: check for full first or check for failed push. |
+ if (i % 2 == 0) { |
+ // Will fail if full; keep trying. |
+ while (!stack_->push(i)) |
+ ; |
+ } else { |
+ // This is valid because there is exactly one thread pushing. |
+ while (stack_->full()) |
+ ; |
+ DCHECK(stack_->push(i)); |
+ } |
+ |
+ // Take a break once in a while. |
+ if (--yield_after <= 0) { |
+ PlatformThread::YieldCurrentThread(); |
+ yield_after = RandInt(1, stack_->size() * 2); |
+ } |
+ |
+ // Every so often, wait for the stack to empty. |
+ if (i < count_ - kStackTestOperationInterval && |
+ i % kStackTestOperationInterval == kStackTestOperationInterval - 1) { |
+ while (!stack_->empty()) |
+ ; |
+ } |
+ } |
+ } |
+ |
+ private: |
+ LockFreeSimpleStack<int>* const stack_; |
+ const int count_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(StackPushThread); |
+}; |
+ |
+class StackPopThread : public SimpleThread { |
+ public: |
+ StackPopThread(LockFreeSimpleStack<int>* stack, int count) |
+ : SimpleThread("StackPop", Options()), stack_(stack), count_(count) {} |
+ ~StackPopThread() override {} |
+ |
+ void Run() override { |
+ int yield_after = RandInt(1, stack_->size() * 2); |
+ for (int i = 0; i < count_; ++i) { |
+ int popped; |
+ // Two ways of popping: check for empty first or check for invalid return. |
+ if (i % 2 == 0) { |
+ // Will return "invalid" if empty; keep trying. |
+ while ((popped = stack_->pop()) < 0) |
+ ; |
+ } else { |
+ // This is valid only because there is exactly one thread popping. |
+ while (stack_->empty()) |
+ ; |
+ popped = stack_->pop(); |
+ } |
+ DCHECK_LE(0, popped); |
+ DCHECK_GT(i + static_cast<int>(stack_->size()), popped); |
+ |
+ // Take a break once in a while. |
+ if (--yield_after <= 0) { |
+ PlatformThread::YieldCurrentThread(); |
+ yield_after = RandInt(1, stack_->size() * 2); |
+ } |
+ |
+ // Every so often, wait for the stack to fill. |
+ if (i < count_ - kStackTestOperationInterval && |
+ i % kStackTestOperationInterval == kStackTestOperationInterval / 2) { |
+ while (!stack_->full()) |
+ ; |
+ } |
+ } |
+ } |
+ |
+ private: |
+ LockFreeSimpleStack<int>* const stack_; |
+ const int count_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(StackPopThread); |
+}; |
+ |
+class StackParallelThread : public SimpleThread { |
+ public: |
+ StackParallelThread(LockFreeSimpleStack<int>* stack, |
+ int thread_number, |
+ bool push_not_pop, |
+ std::atomic<char>* pending, |
+ int count) |
+ : SimpleThread(std::string("Stack") + (push_not_pop ? "Push" : "Pop") + |
+ static_cast<char>('A' + thread_number), |
+ Options()), |
+ stack_(stack), |
+ pending_(pending), |
+ push_not_pop_(push_not_pop), |
+ count_(count) {} |
+ ~StackParallelThread() override {} |
+ |
+ void Run() override { |
+ int yield_after = RandInt(1, stack_->size() * 2); |
+ |
+ for (int i = 0; i < count_; ++i) { |
+ if (push_not_pop_) { |
+ while (!stack_->push(i)) |
+ ; |
+ pending_[i].fetch_add(1); |
+ } else { |
+ int popped; |
+ while ((popped = stack_->pop()) < 0) |
+ ; |
+ pending_[popped].fetch_sub(1); |
+ } |
+ |
+ // Take a break once in a while. |
+ if (--yield_after <= 0) { |
+ PlatformThread::YieldCurrentThread(); |
+ yield_after = RandInt(1, stack_->size() * 2); |
+ } |
+ } |
+ } |
+ |
+ private: |
+ LockFreeSimpleStack<int>* const stack_; |
+ std::atomic<char>* const pending_; |
+ const bool push_not_pop_; |
+ const int count_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(StackParallelThread); |
+}; |
+ |
} // namespace |
+TEST(LockFreeSimpleStack, PushPopTest) { |
+ LockFreeSimpleStack<int> stack(50U, -1); |
+ ASSERT_EQ(50U, stack.size()); |
+ ASSERT_EQ(0U, stack.used()); |
+ |
+ stack.push(1001); |
+ EXPECT_EQ(1U, stack.used()); |
+ |
+ stack.push(2002); |
+ EXPECT_EQ(2U, stack.used()); |
+ |
+ int value = stack.pop(); |
+ EXPECT_EQ(2002, value); |
+ EXPECT_EQ(1U, stack.used()); |
+ |
+ value = stack.pop(); |
+ EXPECT_EQ(1001, value); |
+ EXPECT_EQ(0U, stack.used()); |
+ |
+ value = stack.pop(); |
+ ASSERT_EQ(-1, value); |
+ ASSERT_TRUE(stack.empty()); |
+ |
+ // Test push/pop many times and in parallel. |
+ const int kStackOperations = 1000000; |
+ StackPushThread pusher(&stack, kStackOperations); |
+ StackPopThread popper(&stack, kStackOperations); |
+ pusher.Start(); |
+ popper.Start(); |
+ pusher.Join(); |
+ popper.Join(); |
+ |
+ // Test many push/pop threads. |
+ const int kParallelThreads = 10; |
+ const int kParallelOperations = kStackOperations / kParallelThreads; |
+ std::unique_ptr<std::atomic<char>[]> pending( |
+ new std::atomic<char>[kParallelOperations]); |
+ for (int i = 0; i < kParallelOperations; ++i) |
+ pending[i].store(0, std::memory_order_relaxed); |
+ std::unique_ptr<StackParallelThread> pushers[kParallelThreads]; |
+ std::unique_ptr<StackParallelThread> poppers[kParallelThreads]; |
+ for (int t = 0; t < kParallelThreads; ++t) { |
+ pushers[t].reset(new StackParallelThread(&stack, t, true, pending.get(), |
+ kParallelOperations)); |
+ poppers[t].reset(new StackParallelThread(&stack, t, false, pending.get(), |
+ kParallelOperations)); |
+ } |
+ for (int t = 0; t < kParallelThreads; ++t) { |
+ pushers[t]->Start(); |
+ poppers[t]->Start(); |
+ } |
+ for (int t = 0; t < kParallelThreads; ++t) { |
+ pushers[t]->Join(); |
+ poppers[t]->Join(); |
+ } |
+ for (int i = 0; i < kParallelOperations; ++i) |
+ EXPECT_EQ(0, static_cast<int>(pending[i].load(std::memory_order_relaxed))); |
+} |
+ |
+ |
class ActivityTrackerTest : public testing::Test { |
public: |
const int kMemorySize = 1 << 10; // 1MiB |
@@ -72,8 +277,7 @@ class ActivityTrackerTest : public testing::Test { |
GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get(); |
if (!global_tracker) |
return 0; |
- return global_tracker->available_memories_count_.load( |
- std::memory_order_relaxed); |
+ return global_tracker->available_memories_.used(); |
} |
static void DoNothing() {} |