OLD | NEW |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/debug/activity_tracker.h" | 5 #include "base/debug/activity_tracker.h" |
6 | 6 |
7 #include <memory> | 7 #include <memory> |
8 | 8 |
9 #include "base/bind.h" | 9 #include "base/bind.h" |
10 #include "base/files/file.h" | 10 #include "base/files/file.h" |
11 #include "base/files/file_util.h" | 11 #include "base/files/file_util.h" |
12 #include "base/files/memory_mapped_file.h" | 12 #include "base/files/memory_mapped_file.h" |
13 #include "base/files/scoped_temp_dir.h" | 13 #include "base/files/scoped_temp_dir.h" |
14 #include "base/memory/ptr_util.h" | 14 #include "base/memory/ptr_util.h" |
15 #include "base/pending_task.h" | 15 #include "base/pending_task.h" |
| 16 #include "base/rand_util.h" |
16 #include "base/synchronization/condition_variable.h" | 17 #include "base/synchronization/condition_variable.h" |
17 #include "base/synchronization/lock.h" | 18 #include "base/synchronization/lock.h" |
18 #include "base/synchronization/spin_wait.h" | 19 #include "base/synchronization/spin_wait.h" |
| 20 #include "base/threading/platform_thread.h" |
19 #include "base/threading/simple_thread.h" | 21 #include "base/threading/simple_thread.h" |
20 #include "base/time/time.h" | 22 #include "base/time/time.h" |
21 #include "testing/gtest/include/gtest/gtest.h" | 23 #include "testing/gtest/include/gtest/gtest.h" |
22 | 24 |
23 namespace base { | 25 namespace base { |
24 namespace debug { | 26 namespace debug { |
25 | 27 |
26 namespace { | 28 namespace { |
27 | 29 |
28 class TestActivityTracker : public ThreadActivityTracker { | 30 class TestActivityTracker : public ThreadActivityTracker { |
29 public: | 31 public: |
30 TestActivityTracker(std::unique_ptr<char[]> memory, size_t mem_size) | 32 TestActivityTracker(std::unique_ptr<char[]> memory, size_t mem_size) |
31 : ThreadActivityTracker(memset(memory.get(), 0, mem_size), mem_size), | 33 : ThreadActivityTracker(memset(memory.get(), 0, mem_size), mem_size), |
32 mem_segment_(std::move(memory)) {} | 34 mem_segment_(std::move(memory)) {} |
33 | 35 |
34 ~TestActivityTracker() override {} | 36 ~TestActivityTracker() override {} |
35 | 37 |
36 private: | 38 private: |
37 std::unique_ptr<char[]> mem_segment_; | 39 std::unique_ptr<char[]> mem_segment_; |
38 }; | 40 }; |
39 | 41 |
| 42 |
| 43 // The interval between which the Stack threads will wait for the stack to |
| 44 // become full or empty. It's prime so that it won't correspond to any other |
| 45 // interval (except itself). |
| 46 const int kStackTestOperationInterval = 997; |
| 47 |
| 48 class StackPushThread : public SimpleThread { |
| 49 public: |
| 50 StackPushThread(LockFreeSimpleStack<int>* stack, int count) |
| 51 : SimpleThread("StackPush", Options()), stack_(stack), count_(count) {} |
| 52 ~StackPushThread() override {} |
| 53 |
| 54 void Run() override { |
| 55 int yield_after = RandInt(1, stack_->size() * 2); |
| 56 for (int i = 0; i < count_; ++i) { |
| 57 // Two ways of pushing: check for full first or check for failed push. |
| 58 if (i % 2 == 0) { |
| 59 // Will fail if full; keep trying. |
| 60 while (!stack_->push(i)) |
| 61 ; |
| 62 } else { |
| 63 // This is valid because there is exactly one thread pushing. |
| 64 while (stack_->full()) |
| 65 ; |
| 66 DCHECK(stack_->push(i)); |
| 67 } |
| 68 |
| 69 // Take a break once in a while. |
| 70 if (--yield_after <= 0) { |
| 71 PlatformThread::YieldCurrentThread(); |
| 72 yield_after = RandInt(1, stack_->size() * 2); |
| 73 } |
| 74 |
| 75 // Every so often, wait for the stack to empty. |
| 76 if (i < count_ - kStackTestOperationInterval && |
| 77 i % kStackTestOperationInterval == kStackTestOperationInterval - 1) { |
| 78 while (!stack_->empty()) |
| 79 ; |
| 80 } |
| 81 } |
| 82 } |
| 83 |
| 84 private: |
| 85 LockFreeSimpleStack<int>* const stack_; |
| 86 const int count_; |
| 87 |
| 88 DISALLOW_COPY_AND_ASSIGN(StackPushThread); |
| 89 }; |
| 90 |
| 91 class StackPopThread : public SimpleThread { |
| 92 public: |
| 93 StackPopThread(LockFreeSimpleStack<int>* stack, int count) |
| 94 : SimpleThread("StackPop", Options()), stack_(stack), count_(count) {} |
| 95 ~StackPopThread() override {} |
| 96 |
| 97 void Run() override { |
| 98 int yield_after = RandInt(1, stack_->size() * 2); |
| 99 for (int i = 0; i < count_; ++i) { |
| 100 int popped; |
| 101 // Two ways of popping: check for empty first or check for invalid return. |
| 102 if (i % 2 == 0) { |
| 103 // Will return "invalid" if empty; keep trying. |
| 104 while ((popped = stack_->pop()) < 0) |
| 105 ; |
| 106 } else { |
| 107 // This is valid only because there is exactly one thread popping. |
| 108 while (stack_->empty()) |
| 109 ; |
| 110 popped = stack_->pop(); |
| 111 } |
| 112 DCHECK_LE(0, popped); |
| 113 DCHECK_GT(i + static_cast<int>(stack_->size()), popped); |
| 114 |
| 115 // Take a break once in a while. |
| 116 if (--yield_after <= 0) { |
| 117 PlatformThread::YieldCurrentThread(); |
| 118 yield_after = RandInt(1, stack_->size() * 2); |
| 119 } |
| 120 |
| 121 // Every so often, wait for the stack to fill. |
| 122 if (i < count_ - kStackTestOperationInterval && |
| 123 i % kStackTestOperationInterval == kStackTestOperationInterval / 2) { |
| 124 while (!stack_->full()) |
| 125 ; |
| 126 } |
| 127 } |
| 128 } |
| 129 |
| 130 private: |
| 131 LockFreeSimpleStack<int>* const stack_; |
| 132 const int count_; |
| 133 |
| 134 DISALLOW_COPY_AND_ASSIGN(StackPopThread); |
| 135 }; |
| 136 |
| 137 class StackParallelThread : public SimpleThread { |
| 138 public: |
| 139 StackParallelThread(LockFreeSimpleStack<int>* stack, |
| 140 int thread_number, |
| 141 bool push_not_pop, |
| 142 std::atomic<char>* pending, |
| 143 int count) |
| 144 : SimpleThread(std::string("Stack") + (push_not_pop ? "Push" : "Pop") + |
| 145 static_cast<char>('A' + thread_number), |
| 146 Options()), |
| 147 stack_(stack), |
| 148 pending_(pending), |
| 149 push_not_pop_(push_not_pop), |
| 150 count_(count) {} |
| 151 ~StackParallelThread() override {} |
| 152 |
| 153 void Run() override { |
| 154 int yield_after = RandInt(1, stack_->size() * 2); |
| 155 |
| 156 for (int i = 0; i < count_; ++i) { |
| 157 if (push_not_pop_) { |
| 158 while (!stack_->push(i)) |
| 159 ; |
| 160 pending_[i].fetch_add(1); |
| 161 } else { |
| 162 int popped; |
| 163 while ((popped = stack_->pop()) < 0) |
| 164 ; |
| 165 pending_[popped].fetch_sub(1); |
| 166 } |
| 167 |
| 168 // Take a break once in a while. |
| 169 if (--yield_after <= 0) { |
| 170 PlatformThread::YieldCurrentThread(); |
| 171 yield_after = RandInt(1, stack_->size() * 2); |
| 172 } |
| 173 } |
| 174 } |
| 175 |
| 176 private: |
| 177 LockFreeSimpleStack<int>* const stack_; |
| 178 std::atomic<char>* const pending_; |
| 179 const bool push_not_pop_; |
| 180 const int count_; |
| 181 |
| 182 DISALLOW_COPY_AND_ASSIGN(StackParallelThread); |
| 183 }; |
| 184 |
40 } // namespace | 185 } // namespace |
41 | 186 |
42 | 187 |
| 188 TEST(LockFreeSimpleStack, PushPopTest) { |
| 189 LockFreeSimpleStack<int> stack(50U, -1); |
| 190 ASSERT_EQ(50U, stack.size()); |
| 191 ASSERT_EQ(0U, stack.used()); |
| 192 |
| 193 stack.push(1001); |
| 194 EXPECT_EQ(1U, stack.used()); |
| 195 |
| 196 stack.push(2002); |
| 197 EXPECT_EQ(2U, stack.used()); |
| 198 |
| 199 int value = stack.pop(); |
| 200 EXPECT_EQ(2002, value); |
| 201 EXPECT_EQ(1U, stack.used()); |
| 202 |
| 203 value = stack.pop(); |
| 204 EXPECT_EQ(1001, value); |
| 205 EXPECT_EQ(0U, stack.used()); |
| 206 |
| 207 value = stack.pop(); |
| 208 ASSERT_EQ(-1, value); |
| 209 ASSERT_TRUE(stack.empty()); |
| 210 |
| 211 // Test push/pop many times and in parallel. |
| 212 const int kStackOperations = 1000000; |
| 213 StackPushThread pusher(&stack, kStackOperations); |
| 214 StackPopThread popper(&stack, kStackOperations); |
| 215 pusher.Start(); |
| 216 popper.Start(); |
| 217 pusher.Join(); |
| 218 popper.Join(); |
| 219 |
| 220 // Test many push/pop threads. |
| 221 const int kParallelThreads = 10; |
| 222 const int kParallelOperations = kStackOperations / kParallelThreads; |
| 223 std::unique_ptr<std::atomic<char>[]> pending( |
| 224 new std::atomic<char>[kParallelOperations]); |
| 225 for (int i = 0; i < kParallelOperations; ++i) |
| 226 pending[i].store(0, std::memory_order_relaxed); |
| 227 std::unique_ptr<StackParallelThread> pushers[kParallelThreads]; |
| 228 std::unique_ptr<StackParallelThread> poppers[kParallelThreads]; |
| 229 for (int t = 0; t < kParallelThreads; ++t) { |
| 230 pushers[t].reset(new StackParallelThread(&stack, t, true, pending.get(), |
| 231 kParallelOperations)); |
| 232 poppers[t].reset(new StackParallelThread(&stack, t, false, pending.get(), |
| 233 kParallelOperations)); |
| 234 } |
| 235 for (int t = 0; t < kParallelThreads; ++t) { |
| 236 pushers[t]->Start(); |
| 237 poppers[t]->Start(); |
| 238 } |
| 239 for (int t = 0; t < kParallelThreads; ++t) { |
| 240 pushers[t]->Join(); |
| 241 poppers[t]->Join(); |
| 242 } |
| 243 for (int i = 0; i < kParallelOperations; ++i) |
| 244 EXPECT_EQ(0, static_cast<int>(pending[i].load(std::memory_order_relaxed))); |
| 245 } |
| 246 |
| 247 |
43 class ActivityTrackerTest : public testing::Test { | 248 class ActivityTrackerTest : public testing::Test { |
44 public: | 249 public: |
45 const int kMemorySize = 1 << 10; // 1MiB | 250 const int kMemorySize = 1 << 10; // 1MiB |
46 const int kStackSize = 1 << 10; // 1KiB | 251 const int kStackSize = 1 << 10; // 1KiB |
47 | 252 |
48 ActivityTrackerTest() {} | 253 ActivityTrackerTest() {} |
49 | 254 |
50 ~ActivityTrackerTest() override { | 255 ~ActivityTrackerTest() override { |
51 GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get(); | 256 GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get(); |
52 if (global_tracker) { | 257 if (global_tracker) { |
(...skipping 12 matching lines...) Expand all Loading... |
65 if (!global_tracker) | 270 if (!global_tracker) |
66 return 0; | 271 return 0; |
67 return global_tracker->thread_tracker_count_.load( | 272 return global_tracker->thread_tracker_count_.load( |
68 std::memory_order_relaxed); | 273 std::memory_order_relaxed); |
69 } | 274 } |
70 | 275 |
71 size_t GetGlobalInactiveTrackerCount() { | 276 size_t GetGlobalInactiveTrackerCount() { |
72 GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get(); | 277 GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get(); |
73 if (!global_tracker) | 278 if (!global_tracker) |
74 return 0; | 279 return 0; |
75 return global_tracker->available_memories_count_.load( | 280 return global_tracker->available_memories_.used(); |
76 std::memory_order_relaxed); | |
77 } | 281 } |
78 | 282 |
79 static void DoNothing() {} | 283 static void DoNothing() {} |
80 }; | 284 }; |
81 | 285 |
82 TEST_F(ActivityTrackerTest, PushPopTest) { | 286 TEST_F(ActivityTrackerTest, PushPopTest) { |
83 std::unique_ptr<ThreadActivityTracker> tracker = CreateActivityTracker(); | 287 std::unique_ptr<ThreadActivityTracker> tracker = CreateActivityTracker(); |
84 ActivitySnapshot snapshot; | 288 ActivitySnapshot snapshot; |
85 | 289 |
86 ASSERT_TRUE(tracker->Snapshot(&snapshot)); | 290 ASSERT_TRUE(tracker->Snapshot(&snapshot)); |
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
280 EXPECT_EQ(starting_inactive, GetGlobalInactiveTrackerCount()); | 484 EXPECT_EQ(starting_inactive, GetGlobalInactiveTrackerCount()); |
281 | 485 |
282 t2.Exit(); | 486 t2.Exit(); |
283 t2.Join(); | 487 t2.Join(); |
284 EXPECT_EQ(starting_active, GetGlobalActiveTrackerCount()); | 488 EXPECT_EQ(starting_active, GetGlobalActiveTrackerCount()); |
285 EXPECT_EQ(starting_inactive + 1, GetGlobalInactiveTrackerCount()); | 489 EXPECT_EQ(starting_inactive + 1, GetGlobalInactiveTrackerCount()); |
286 } | 490 } |
287 | 491 |
288 } // namespace debug | 492 } // namespace debug |
289 } // namespace base | 493 } // namespace base |
OLD | NEW |