OLD | NEW |
1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/threading/worker_pool_posix.h" | 5 #include "base/threading/worker_pool_posix.h" |
6 | 6 |
7 #include <set> | 7 #include <set> |
8 | 8 |
9 #include "base/synchronization/condition_variable.h" | 9 #include "base/synchronization/condition_variable.h" |
10 #include "base/synchronization/lock.h" | 10 #include "base/synchronization/lock.h" |
11 #include "base/task.h" | 11 #include "base/task.h" |
12 #include "base/threading/platform_thread.h" | 12 #include "base/threading/platform_thread.h" |
13 #include "base/synchronization/waitable_event.h" | 13 #include "base/synchronization/waitable_event.h" |
14 #include "testing/gtest/include/gtest/gtest.h" | 14 #include "testing/gtest/include/gtest/gtest.h" |
15 | 15 |
16 namespace base { | 16 namespace base { |
17 | 17 |
18 // Peer class to provide passthrough access to PosixDynamicThreadPool internals. | 18 // Peer class to provide passthrough access to PosixDynamicThreadPool internals. |
19 class PosixDynamicThreadPool::PosixDynamicThreadPoolPeer { | 19 class PosixDynamicThreadPool::PosixDynamicThreadPoolPeer { |
20 public: | 20 public: |
21 explicit PosixDynamicThreadPoolPeer(PosixDynamicThreadPool* pool) | 21 explicit PosixDynamicThreadPoolPeer(PosixDynamicThreadPool* pool) |
22 : pool_(pool) {} | 22 : pool_(pool) {} |
23 | 23 |
24 Lock* lock() { return &pool_->lock_; } | 24 Lock* lock() { return &pool_->lock_; } |
25 ConditionVariable* tasks_available_cv() { | 25 ConditionVariable* pending_tasks_available_cv() { |
26 return &pool_->tasks_available_cv_; | 26 return &pool_->pending_tasks_available_cv_; |
27 } | 27 } |
28 const std::queue<Task*>& tasks() const { return pool_->tasks_; } | 28 const std::queue<PendingTask>& pending_tasks() const { |
| 29 return pool_->pending_tasks_; |
| 30 } |
29 int num_idle_threads() const { return pool_->num_idle_threads_; } | 31 int num_idle_threads() const { return pool_->num_idle_threads_; } |
30 ConditionVariable* num_idle_threads_cv() { | 32 ConditionVariable* num_idle_threads_cv() { |
31 return pool_->num_idle_threads_cv_.get(); | 33 return pool_->num_idle_threads_cv_.get(); |
32 } | 34 } |
33 void set_num_idle_threads_cv(ConditionVariable* cv) { | 35 void set_num_idle_threads_cv(ConditionVariable* cv) { |
34 pool_->num_idle_threads_cv_.reset(cv); | 36 pool_->num_idle_threads_cv_.reset(cv); |
35 } | 37 } |
36 | 38 |
37 private: | 39 private: |
38 PosixDynamicThreadPool* pool_; | 40 PosixDynamicThreadPool* pool_; |
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
173 int num_waiting_to_start_; | 175 int num_waiting_to_start_; |
174 ConditionVariable num_waiting_to_start_cv_; | 176 ConditionVariable num_waiting_to_start_cv_; |
175 base::WaitableEvent start_; | 177 base::WaitableEvent start_; |
176 }; | 178 }; |
177 | 179 |
178 } // namespace | 180 } // namespace |
179 | 181 |
180 TEST_F(PosixDynamicThreadPoolTest, Basic) { | 182 TEST_F(PosixDynamicThreadPoolTest, Basic) { |
181 EXPECT_EQ(0, peer_.num_idle_threads()); | 183 EXPECT_EQ(0, peer_.num_idle_threads()); |
182 EXPECT_EQ(0U, unique_threads_.size()); | 184 EXPECT_EQ(0U, unique_threads_.size()); |
183 EXPECT_EQ(0U, peer_.tasks().size()); | 185 EXPECT_EQ(0U, peer_.pending_tasks().size()); |
184 | 186 |
185 // Add one task and wait for it to be completed. | 187 // Add one task and wait for it to be completed. |
186 pool_->PostTask(CreateNewIncrementingTask()); | 188 pool_->PostTask(FROM_HERE, CreateNewIncrementingTask()); |
187 | 189 |
188 WaitForIdleThreads(1); | 190 WaitForIdleThreads(1); |
189 | 191 |
190 EXPECT_EQ(1U, unique_threads_.size()) << | 192 EXPECT_EQ(1U, unique_threads_.size()) << |
191 "There should be only one thread allocated for one task."; | 193 "There should be only one thread allocated for one task."; |
192 EXPECT_EQ(1, peer_.num_idle_threads()); | 194 EXPECT_EQ(1, peer_.num_idle_threads()); |
193 EXPECT_EQ(1, counter_); | 195 EXPECT_EQ(1, counter_); |
194 } | 196 } |
195 | 197 |
196 TEST_F(PosixDynamicThreadPoolTest, ReuseIdle) { | 198 TEST_F(PosixDynamicThreadPoolTest, ReuseIdle) { |
197 // Add one task and wait for it to be completed. | 199 // Add one task and wait for it to be completed. |
198 pool_->PostTask(CreateNewIncrementingTask()); | 200 pool_->PostTask(FROM_HERE, CreateNewIncrementingTask()); |
199 | 201 |
200 WaitForIdleThreads(1); | 202 WaitForIdleThreads(1); |
201 | 203 |
202 // Add another 2 tasks. One should reuse the existing worker thread. | 204 // Add another 2 tasks. One should reuse the existing worker thread. |
203 pool_->PostTask(CreateNewBlockingIncrementingTask()); | 205 pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTask()); |
204 pool_->PostTask(CreateNewBlockingIncrementingTask()); | 206 pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTask()); |
205 | 207 |
206 WaitForTasksToStart(2); | 208 WaitForTasksToStart(2); |
207 start_.Signal(); | 209 start_.Signal(); |
208 WaitForIdleThreads(2); | 210 WaitForIdleThreads(2); |
209 | 211 |
210 EXPECT_EQ(2U, unique_threads_.size()); | 212 EXPECT_EQ(2U, unique_threads_.size()); |
211 EXPECT_EQ(2, peer_.num_idle_threads()); | 213 EXPECT_EQ(2, peer_.num_idle_threads()); |
212 EXPECT_EQ(3, counter_); | 214 EXPECT_EQ(3, counter_); |
213 } | 215 } |
214 | 216 |
215 TEST_F(PosixDynamicThreadPoolTest, TwoActiveTasks) { | 217 TEST_F(PosixDynamicThreadPoolTest, TwoActiveTasks) { |
216 // Add two blocking tasks. | 218 // Add two blocking tasks. |
217 pool_->PostTask(CreateNewBlockingIncrementingTask()); | 219 pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTask()); |
218 pool_->PostTask(CreateNewBlockingIncrementingTask()); | 220 pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTask()); |
219 | 221 |
220 EXPECT_EQ(0, counter_) << "Blocking tasks should not have started yet."; | 222 EXPECT_EQ(0, counter_) << "Blocking tasks should not have started yet."; |
221 | 223 |
222 WaitForTasksToStart(2); | 224 WaitForTasksToStart(2); |
223 start_.Signal(); | 225 start_.Signal(); |
224 WaitForIdleThreads(2); | 226 WaitForIdleThreads(2); |
225 | 227 |
226 EXPECT_EQ(2U, unique_threads_.size()); | 228 EXPECT_EQ(2U, unique_threads_.size()); |
227 EXPECT_EQ(2, peer_.num_idle_threads()) << "Existing threads are now idle."; | 229 EXPECT_EQ(2, peer_.num_idle_threads()) << "Existing threads are now idle."; |
228 EXPECT_EQ(2, counter_); | 230 EXPECT_EQ(2, counter_); |
229 } | 231 } |
230 | 232 |
231 TEST_F(PosixDynamicThreadPoolTest, Complex) { | 233 TEST_F(PosixDynamicThreadPoolTest, Complex) { |
232 // Add two non blocking tasks and wait for them to finish. | 234 // Add two non blocking tasks and wait for them to finish. |
233 pool_->PostTask(CreateNewIncrementingTask()); | 235 pool_->PostTask(FROM_HERE, CreateNewIncrementingTask()); |
234 | 236 |
235 WaitForIdleThreads(1); | 237 WaitForIdleThreads(1); |
236 | 238 |
237 // Add two blocking tasks, start them simultaneously, and wait for them to | 239 // Add two blocking tasks, start them simultaneously, and wait for them to |
238 // finish. | 240 // finish. |
239 pool_->PostTask(CreateNewBlockingIncrementingTask()); | 241 pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTask()); |
240 pool_->PostTask(CreateNewBlockingIncrementingTask()); | 242 pool_->PostTask(FROM_HERE, CreateNewBlockingIncrementingTask()); |
241 | 243 |
242 WaitForTasksToStart(2); | 244 WaitForTasksToStart(2); |
243 start_.Signal(); | 245 start_.Signal(); |
244 WaitForIdleThreads(2); | 246 WaitForIdleThreads(2); |
245 | 247 |
246 EXPECT_EQ(3, counter_); | 248 EXPECT_EQ(3, counter_); |
247 EXPECT_EQ(2, peer_.num_idle_threads()); | 249 EXPECT_EQ(2, peer_.num_idle_threads()); |
248 EXPECT_EQ(2U, unique_threads_.size()); | 250 EXPECT_EQ(2U, unique_threads_.size()); |
249 | 251 |
250 // Wake up all idle threads so they can exit. | 252 // Wake up all idle threads so they can exit. |
251 { | 253 { |
252 base::AutoLock locked(*peer_.lock()); | 254 base::AutoLock locked(*peer_.lock()); |
253 while (peer_.num_idle_threads() > 0) { | 255 while (peer_.num_idle_threads() > 0) { |
254 peer_.tasks_available_cv()->Signal(); | 256 peer_.pending_tasks_available_cv()->Signal(); |
255 peer_.num_idle_threads_cv()->Wait(); | 257 peer_.num_idle_threads_cv()->Wait(); |
256 } | 258 } |
257 } | 259 } |
258 | 260 |
259 // Add another non blocking task. There are no threads to reuse. | 261 // Add another non blocking task. There are no threads to reuse. |
260 pool_->PostTask(CreateNewIncrementingTask()); | 262 pool_->PostTask(FROM_HERE, CreateNewIncrementingTask()); |
261 WaitForIdleThreads(1); | 263 WaitForIdleThreads(1); |
262 | 264 |
263 EXPECT_EQ(3U, unique_threads_.size()); | 265 EXPECT_EQ(3U, unique_threads_.size()); |
264 EXPECT_EQ(1, peer_.num_idle_threads()); | 266 EXPECT_EQ(1, peer_.num_idle_threads()); |
265 EXPECT_EQ(4, counter_); | 267 EXPECT_EQ(4, counter_); |
266 } | 268 } |
267 | 269 |
268 } // namespace base | 270 } // namespace base |
OLD | NEW |