OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/threading/sequenced_worker_pool.h" | |
6 | |
7 #include <deque> | |
8 #include <set> | |
9 | |
10 #include "base/atomicops.h" | |
11 #include "base/bind.h" | |
12 #include "base/memory/scoped_ptr.h" | |
13 #include "base/metrics/histogram.h" | |
14 #include "base/stringprintf.h" | |
15 #include "base/synchronization/condition_variable.h" | |
16 #include "base/threading/simple_thread.h" | |
17 #include "base/threading/thread.h" | |
18 | |
19 namespace base { | |
20 | |
21 namespace { | |
22 | |
23 struct SequencedTask { | |
24 int sequence_token_id; | |
25 SequencedWorkerPool::WorkerShutdown shutdown_behavior; | |
26 tracked_objects::Location location; | |
27 base::Closure task; | |
28 }; | |
29 | |
30 } // namespace | |
31 | |
32 // Worker --------------------------------------------------------------------- | |
33 | |
34 class SequencedWorkerPool::Worker : public base::SimpleThread { | |
35 public: | |
36 Worker(SequencedWorkerPool::Inner* inner, | |
37 int thread_number, | |
38 const std::string& thread_name_prefix); | |
39 ~Worker(); | |
40 | |
41 // SimpleThread implementation. This actually runs the background thread. | |
42 virtual void Run(); | |
43 | |
44 private: | |
45 SequencedWorkerPool::Inner* inner_; | |
46 SequencedWorkerPool::WorkerShutdown current_shutdown_mode_; | |
47 | |
48 DISALLOW_COPY_AND_ASSIGN(Worker); | |
49 }; | |
50 | |
51 | |
52 // Inner ---------------------------------------------------------------------- | |
53 | |
54 class SequencedWorkerPool::Inner | |
55 : public base::RefCountedThreadSafe<SequencedWorkerPool::Inner> { | |
56 public: | |
57 Inner(size_t max_threads, const std::string& thread_name_prefix); | |
58 virtual ~Inner(); | |
59 | |
60 // Backends for SequenceWorkerPool. | |
61 SequenceToken GetSequenceToken(); | |
62 SequenceToken GetNamedSequenceToken(const std::string& name); | |
63 bool PostTask(int sequence_token_id, | |
64 SequencedWorkerPool::WorkerShutdown shutdown_behavior, | |
65 const tracked_objects::Location& from_here, | |
66 const base::Closure& task); | |
67 void Shutdown(); | |
68 void SetTestingObserver(SequencedWorkerPool::TestingObserver* observer); | |
69 | |
70 // Runs the worker loop on the background thread. | |
71 void ThreadLoop(Worker* this_worker); | |
72 | |
73 private: | |
74 // The calling code should clear the given delete_these_oustide_lock | |
75 // vector the next time the lock is released. See the implementation for | |
76 // a more detailed description. | |
77 bool GetWork(SequencedTask* task, | |
78 std::vector<base::Closure>* delete_these_outside_lock); | |
79 | |
80 // Peforms init and cleanup around running the given task. WillRun... | |
81 // returns the value from PrepareToStartAdditionalThreadIfNecessary. | |
82 // The calling code should call FinishStartingAdditionalThread once the | |
83 // lock is released if the return values is nonzero. | |
84 int WillRunWorkerTask(const SequencedTask& task); | |
85 void DidRunWorkerTask(const SequencedTask& task); | |
86 | |
87 // Returns true if there are no threads currently running the given | |
88 // sequence token. | |
89 bool IsSequenceTokenRunnable(int sequence_token_id) const; | |
90 | |
91 // Checks if all threads are busy and the addition of one more could run an | |
92 // additional task waiting in the queue. This must be called from within | |
93 // the lock. | |
94 // | |
95 // If another thread is helpful, this will mark the thread as being in the | |
96 // process of starting and returns the index of the new thread which will be | |
97 // 0 or more. The caller should then call FinishStartingAdditionalThread to | |
98 // complete initialization once the lock is released. | |
99 // | |
100 // If another thread is not necessary, returne 0; | |
101 // | |
102 // See the implementedion for more. | |
103 int PrepareToStartAdditionalThreadIfHelpful(); | |
104 | |
105 // The second part of thread creation after | |
106 // PrepareToStartAdditionalThreadIfHelpful with the thread number it | |
107 // generated. This actually creates the thread and should be called outside | |
108 // the lock to avoid blocking important work starting a thread in the lock. | |
109 void FinishStartingAdditionalThread(int thread_number); | |
110 | |
111 // Checks whether there is work left that's blocking shutdown. Must be | |
112 // called inside the lock. | |
113 bool CanShutdown() const; | |
114 | |
115 // The last sequence number used. Managed by GetSequenceToken, since this | |
116 // only does threadsafe increment operations, you do not need to hold the | |
117 // lock. | |
118 volatile base::subtle::Atomic32 last_sequence_number_; | |
119 | |
120 // This lock protects |everything in this class|. Do not read or modify | |
121 // anything without holding this lock. Do not block while holding this | |
122 // lock. | |
123 base::Lock lock_; | |
124 | |
125 // Condition variable used to wake up worker threads when a task is runnable. | |
126 base::ConditionVariable cond_var_; | |
127 | |
128 // The maximum number of worker threads we'll create. | |
129 size_t max_threads_; | |
130 | |
131 std::string thread_name_prefix_; | |
132 | |
133 // Associates all known sequence token names with their IDs. | |
134 std::map<std::string, int> named_sequence_tokens_; | |
135 | |
136 // Owning pointers to all threads we've created so far. Since we lazily | |
137 // create threads, this may be less than max_threads_ and will be initially | |
138 // empty. | |
139 std::vector<linked_ptr<Worker> > threads_; | |
140 | |
141 // Set to true when we're in the process of creating another thread. | |
142 // See PrepareToStartAdditionalThreadIfHelpful for more. | |
143 bool thread_being_created_; | |
144 | |
145 // Number of threads currently waiting for work. | |
146 size_t waiting_thread_count_; | |
147 | |
148 // Number of threads currently running tasks that have the BLOCK_SHUTDOWN | |
149 // flag set. | |
150 size_t blocking_shutdown_thread_count_; | |
151 | |
152 // In-order list of all pending tasks. These are tasks waiting for a thread | |
153 // to run on or that are blocked on a previous task in their sequence. | |
154 // | |
155 // We maintain the pending_task_count_ separately for metrics because | |
156 // list.size() can be linear time. | |
157 std::list<SequencedTask> pending_tasks_; | |
158 size_t pending_task_count_; | |
159 | |
160 // Number of tasks in the pending_tasks_ list that are marked as blocking | |
161 // shutdown. | |
162 size_t blocking_shutdown_pending_task_count_; | |
163 | |
164 // Lists all sequence tokens currently executing. | |
165 std::set<int> current_sequences_; | |
166 | |
167 // Set when the app is terminating and no further tasks should be allowed, | |
168 // though we may still be running existing tasks. | |
169 bool terminating_; | |
170 | |
171 // Set when Shutdown is called to do some assertions. | |
172 bool shutdown_called_; | |
173 | |
174 SequencedWorkerPool::TestingObserver* testing_observer_; | |
175 }; | |
176 | |
177 SequencedWorkerPool::Worker::Worker(SequencedWorkerPool::Inner* inner, | |
178 int thread_number, | |
179 const std::string& prefix) | |
180 : base::SimpleThread( | |
181 prefix + StringPrintf("Worker%d", thread_number).c_str()), | |
182 inner_(inner), | |
183 current_shutdown_mode_(SequencedWorkerPool::CONTINUE_ON_SHUTDOWN) { | |
184 Start(); | |
185 } | |
186 | |
187 SequencedWorkerPool::Worker::~Worker() { | |
188 } | |
189 | |
190 void SequencedWorkerPool::Worker::Run() { | |
191 // Just jump back to the Inner object to run the thread, since it has all the | |
192 // tracking information and queues. It might be more natural to implement | |
193 // using DelegateSimpleThread and have Inner implement the Delegate to avoid | |
194 // having these worker objects at all, but that method lacks the ability to | |
195 // send thread-specific information easily to the thread loop. | |
196 inner_->ThreadLoop(this); | |
197 } | |
198 | |
199 SequencedWorkerPool::Inner::Inner(size_t max_threads, | |
200 const std::string& thread_name_prefix) | |
201 : last_sequence_number_(0), | |
202 lock_(), | |
203 cond_var_(&lock_), | |
204 max_threads_(max_threads), | |
205 thread_name_prefix_(thread_name_prefix), | |
206 thread_being_created_(false), | |
207 waiting_thread_count_(0), | |
208 blocking_shutdown_thread_count_(0), | |
209 pending_task_count_(0), | |
210 blocking_shutdown_pending_task_count_(0), | |
211 terminating_(false), | |
212 shutdown_called_(false) { | |
213 } | |
214 | |
215 SequencedWorkerPool::Inner::~Inner() { | |
216 // You must call Shutdown() before destroying the pool. | |
217 DCHECK(shutdown_called_); | |
218 | |
219 // Need to explicitly join with the threads before they're destroyed or else | |
220 // they will be running when our object is half torn down. | |
221 for (size_t i = 0; i < threads_.size(); i++) | |
222 threads_[i]->Join(); | |
223 threads_.clear(); | |
224 } | |
225 | |
226 SequencedWorkerPool::SequenceToken | |
227 SequencedWorkerPool::Inner::GetSequenceToken() { | |
228 base::subtle::Atomic32 result = | |
229 base::subtle::NoBarrier_AtomicIncrement(&last_sequence_number_, 1); | |
230 return SequenceToken(static_cast<int>(result)); | |
231 } | |
232 | |
233 SequencedWorkerPool::SequenceToken | |
234 SequencedWorkerPool::Inner::GetNamedSequenceToken( | |
235 const std::string& name) { | |
236 base::AutoLock lock(lock_); | |
237 std::map<std::string, int>::const_iterator found = | |
238 named_sequence_tokens_.find(name); | |
239 if (found != named_sequence_tokens_.end()) | |
240 return SequenceToken(found->second); // Got an existing one. | |
241 | |
242 // Create a new one for this name. | |
243 SequenceToken result = GetSequenceToken(); | |
244 named_sequence_tokens_.insert(std::make_pair(name, result.id_)); | |
245 return result; | |
246 } | |
247 | |
248 bool SequencedWorkerPool::Inner::PostTask( | |
249 int sequence_token_id, | |
250 SequencedWorkerPool::WorkerShutdown shutdown_behavior, | |
251 const tracked_objects::Location& from_here, | |
252 const base::Closure& task) { | |
253 SequencedTask sequenced; | |
254 sequenced.sequence_token_id = sequence_token_id; | |
255 sequenced.shutdown_behavior = shutdown_behavior; | |
256 sequenced.location = from_here; | |
257 sequenced.task = task; | |
258 | |
259 int create_thread_id = 0; | |
260 { | |
261 base::AutoLock lock(lock_); | |
262 if (terminating_) | |
263 return false; | |
264 | |
265 pending_tasks_.push_back(sequenced); | |
266 pending_task_count_++; | |
267 if (shutdown_behavior == BLOCK_SHUTDOWN) | |
268 blocking_shutdown_pending_task_count_++; | |
269 | |
270 create_thread_id = PrepareToStartAdditionalThreadIfHelpful(); | |
271 } | |
272 | |
273 // Actually start the additional thread or signal an existing one now that | |
274 // we're outside the lock. | |
275 if (create_thread_id) | |
276 FinishStartingAdditionalThread(create_thread_id); | |
277 else | |
278 cond_var_.Signal(); | |
279 | |
280 return true; | |
281 } | |
282 | |
283 void SequencedWorkerPool::Inner::Shutdown() { | |
284 if (shutdown_called_) | |
285 return; | |
286 shutdown_called_ = true; | |
287 | |
288 // Mark us as terminated and go through and drop all tasks that aren't | |
289 // required to run on shutdown. Since no new tasks will get posted once the | |
290 // terminated flag is set, this ensures that all remaining tasks are required | |
291 // for shutdown whenever the termianted_ flag is set. | |
292 { | |
293 base::AutoLock lock(lock_); | |
294 DCHECK(!terminating_); | |
295 terminating_ = true; | |
296 | |
297 // Tickle the threads. This will wake up a waiting one so it will know that | |
298 // it can exit, which in turn will wake up any other waiting ones. | |
299 cond_var_.Signal(); | |
300 | |
301 // There are no pending or running tasks blocking shutdown, we're done. | |
302 if (CanShutdown()) | |
303 return; | |
304 } | |
305 | |
306 // If we get here, we know we're either waiting on a blocking task that's | |
307 // currently running, waiting on a blocking task that hasn't been scheduled | |
308 // yet, or both. Block on the "queue empty" event to know when all tasks are | |
309 // complete. This must be done outside the lock. | |
310 if (testing_observer_) | |
311 testing_observer_->WillWaitForShutdown(); | |
312 | |
313 base::TimeTicks shutdown_wait_begin = base::TimeTicks::Now(); | |
314 | |
315 // Wait for no more tasks. | |
316 { | |
317 base::AutoLock lock(lock_); | |
318 while (!CanShutdown()) | |
319 cond_var_.Wait(); | |
320 } | |
321 UMA_HISTOGRAM_TIMES("SequencedWorkerPool.ShutdownDelayTime", | |
322 base::TimeTicks::Now() - shutdown_wait_begin); | |
323 } | |
324 | |
325 void SequencedWorkerPool::Inner::SetTestingObserver( | |
326 SequencedWorkerPool::TestingObserver* observer) { | |
327 base::AutoLock lock(lock_); | |
328 testing_observer_ = observer; | |
329 } | |
330 | |
331 void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) { | |
332 { | |
333 base::AutoLock lock(lock_); | |
334 DCHECK(thread_being_created_); | |
335 thread_being_created_ = false; | |
336 while (true) { | |
337 // See GetWork for what delete_these_outside_lock is doing. | |
338 SequencedTask task; | |
339 std::vector<base::Closure> delete_these_outside_lock; | |
340 if (GetWork(&task, &delete_these_outside_lock)) { | |
341 int new_thread_id = WillRunWorkerTask(task); | |
342 { | |
343 base::AutoUnlock unlock(lock_); | |
344 cond_var_.Signal(); | |
345 delete_these_outside_lock.clear(); | |
346 | |
347 // Complete thread creation outside the lock if necessary. | |
348 if (new_thread_id) | |
349 FinishStartingAdditionalThread(new_thread_id); | |
350 | |
351 task.task.Run(); | |
352 | |
353 // Make sure our task is erased outside the lock for the same reason | |
354 // we do this with delete_these_oustide_lock. | |
355 task.task = base::Closure(); | |
356 } | |
357 DidRunWorkerTask(task); // Must be done inside the lock. | |
358 } else { | |
359 // When we're terminating and there's no more work, we can shut down. | |
360 // You can't get more tasks posted once terminating_ is set. There may | |
361 // be some tasks stuck behind running ones with the same sequence | |
362 // token, but additional threads won't help this case. | |
363 if (terminating_) | |
364 break; | |
365 waiting_thread_count_++; | |
366 cond_var_.Wait(); | |
367 waiting_thread_count_--; | |
jar (doing other things)
2011/12/31 19:06:10
This is much nicer, as it is easy to read in one p
| |
368 } | |
369 } | |
370 } | |
371 | |
372 // We noticed we should exit. Wake up the next worker so it knows it should | |
373 // exit as well (because the Shutdown() code only signals once). | |
374 cond_var_.Signal(); | |
375 } | |
376 | |
377 bool SequencedWorkerPool::Inner::GetWork( | |
378 SequencedTask* task, | |
379 std::vector<base::Closure>* delete_these_outside_lock) { | |
380 lock_.AssertAcquired(); | |
381 | |
382 DCHECK_EQ(pending_tasks_.size(), pending_task_count_); | |
383 UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.TaskCount", | |
384 static_cast<int>(pending_task_count_)); | |
385 | |
386 // Find the next task with a sequence token that's not currently in use. | |
387 // If the token is in use, that means another thread is running something | |
388 // in that sequence, and we can't run it without going out-of-order. | |
389 // | |
390 // This algorithm is simple and fair, but inefficient in some cases. For | |
391 // example, say somebody schedules 1000 slow tasks with the same sequence | |
392 // number. We'll have to go through all those tasks each time we feel like | |
393 // there might be work to schedule. If this proves to be a problem, we | |
394 // should make this more efficient. | |
395 // | |
396 // One possible enhancement would be to keep a map from sequence ID to a | |
397 // list of pending but currently blocked SequencedTasks for that ID. | |
398 // When a worker finishes a task of one sequence token, it can pick up the | |
399 // next one from that token right away. | |
400 // | |
401 // This may lead to starvation if there are sufficient numbers of sequences | |
402 // in use. To alleviate this, we could add an incrementing priority counter | |
403 // to each SequencedTask. Then maintain a priority_queue of all runnable | |
404 // tasks, sorted by priority counter. When a sequenced task is completed | |
405 // we would pop the head element off of that tasks pending list and add it | |
406 // to the priority queue. Then we would run the first item in the priority | |
407 // queue. | |
408 bool found_task = false; | |
409 int unrunnable_tasks = 0; | |
410 std::list<SequencedTask>::iterator i = pending_tasks_.begin(); | |
411 while (i != pending_tasks_.end()) { | |
412 if (!IsSequenceTokenRunnable(i->sequence_token_id)) { | |
413 unrunnable_tasks++; | |
414 ++i; | |
415 continue; | |
416 } | |
417 | |
418 if (terminating_ && i->shutdown_behavior != BLOCK_SHUTDOWN) { | |
419 // We're shutting down and the task we just found isn't blocking | |
420 // shutdown. Delete it and get more work. | |
421 // | |
422 // Note that we do not want to delete unrunnable tasks. Deleting a task | |
423 // can have side effects (like freeing some objects) and deleting a | |
424 // task that's supposed to run after one that's currently running could | |
425 // cause an obscure crash. | |
426 // | |
427 // We really want to delete these tasks outside the lock in case the | |
428 // closures are holding refs to objects that want to post work from | |
429 // their destructorss (which would deadlock). The closures are | |
430 // internally refcounted, so we just need to keep a copy of them alive | |
431 // until the lock is exited. The calling code can just clear() the | |
432 // vector they passed to us once the lock is exited to make this | |
433 // happen. | |
434 delete_these_outside_lock->push_back(i->task); | |
435 i = pending_tasks_.erase(i); | |
436 pending_task_count_--; | |
437 } else { | |
438 // Found a runnable task. | |
439 *task = *i; | |
440 i = pending_tasks_.erase(i); | |
441 pending_task_count_--; | |
442 if (task->shutdown_behavior == BLOCK_SHUTDOWN) | |
443 blocking_shutdown_pending_task_count_--; | |
444 | |
445 found_task = true; | |
446 break; | |
447 } | |
448 } | |
449 | |
450 // Track the number of tasks we had to skip over to see if we should be | |
451 // making this more efficient. If this number ever becomes large or is | |
452 // frequently "some", we should consider the optimization above. | |
453 UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.UnrunnableTaskCount", | |
454 unrunnable_tasks); | |
455 return found_task; | |
456 } | |
457 | |
458 int SequencedWorkerPool::Inner::WillRunWorkerTask(const SequencedTask& task) { | |
459 lock_.AssertAcquired(); | |
460 | |
461 // Mark the task's sequence number as in use. | |
462 if (task.sequence_token_id) | |
463 current_sequences_.insert(task.sequence_token_id); | |
464 | |
465 if (task.shutdown_behavior == SequencedWorkerPool::BLOCK_SHUTDOWN) | |
466 blocking_shutdown_thread_count_++; | |
467 | |
468 // We just picked up a task. Since StartAdditionalThreadIfHelpful only | |
469 // creates a new thread if there is no free one, there is a race when posting | |
470 // tasks that many tasks could have been posted before a thread started | |
471 // running them, so only one thread would have been created. So we also check | |
472 // whether we should create more threads after removing our task from the | |
473 // queue, which also has the nice side effect of creating the workers from | |
474 // background threads rather than the main thread of the app. | |
475 // | |
476 // If another thread wasn't created, we want to wake up an existing thread | |
477 // if there is one waiting to pick up the next task. | |
478 // | |
479 // Note that we really need to do this *before* running the task, not | |
480 // after. Otherwise, if more than one task is posted, the creation of the | |
481 // second thread (since we only create one at a time) will be blocked by | |
482 // the execution of the first task, which could be arbitrarily long. | |
483 return PrepareToStartAdditionalThreadIfHelpful(); | |
484 } | |
485 | |
486 void SequencedWorkerPool::Inner::DidRunWorkerTask(const SequencedTask& task) { | |
487 lock_.AssertAcquired(); | |
488 | |
489 if (task.shutdown_behavior == SequencedWorkerPool::BLOCK_SHUTDOWN) { | |
490 DCHECK_GT(blocking_shutdown_thread_count_, 0u); | |
491 blocking_shutdown_thread_count_--; | |
492 } | |
493 | |
494 if (task.sequence_token_id) | |
495 current_sequences_.erase(task.sequence_token_id); | |
496 } | |
497 | |
498 bool SequencedWorkerPool::Inner::IsSequenceTokenRunnable( | |
499 int sequence_token_id) const { | |
500 lock_.AssertAcquired(); | |
501 return !sequence_token_id || | |
502 current_sequences_.find(sequence_token_id) == | |
503 current_sequences_.end(); | |
504 } | |
505 | |
506 int SequencedWorkerPool::Inner::PrepareToStartAdditionalThreadIfHelpful() { | |
507 // How thread creation works: | |
508 // | |
509 // We'de like to avoid creating threads with the lock held. However, we | |
510 // need to be sure that we have an accurate accounting of the threads for | |
511 // proper Joining and deltion on shutdown. | |
512 // | |
513 // We need to figure out if we need another thread with the lock held, which | |
514 // is what this function does. It then marks us as in the process of creating | |
515 // a thread. When we do shutdown, we wait until the thread_being_created_ | |
516 // flag is cleared, which ensures that the new thread is properly added to | |
517 // all the data structures and we can't leak it. Once shutdown starts, we'll | |
518 // refuse to create more threads or they would be leaked. | |
519 // | |
520 // Note that this creates a mostly benign race condition on shutdown that | |
521 // will cause fewer workers to be created than one would expect. It isn't | |
522 // much of an issue in real life, but affects some tests. Since we only spawn | |
523 // one worker at a time, the following sequence of events can happen: | |
524 // | |
525 // 1. Main thread posts a bunch of unrelated tasks that would normally be | |
526 // run on separate threads. | |
527 // 2. The first task post causes us to start a worker. Other tasks do not | |
528 // cause a worker to start since one is pending. | |
529 // 3. Main thread initiates shutdown. | |
530 // 4. No more threads are created since the terminating_ flag is set. | |
531 // | |
532 // The result is that one may expect that max_threads_ workers to be created | |
533 // given the workload, but in reality fewer may be created because the | |
534 // sequence of thread creation on the background threads is racing with the | |
535 // shutdown call. | |
536 if (!terminating_ && | |
537 !thread_being_created_ && | |
538 threads_.size() < max_threads_ && | |
jar (doing other things)
2011/12/31 19:06:10
Note that you still depend on threads_.size() here
| |
539 waiting_thread_count_ == 0) { | |
540 // We could use an additional thread if there's work to be done. | |
541 for (std::list<SequencedTask>::iterator i = pending_tasks_.begin(); | |
542 i != pending_tasks_.end(); ++i) { | |
543 if (IsSequenceTokenRunnable(i->sequence_token_id)) { | |
544 // Found a runnable task, mark the thread as being started. | |
545 thread_being_created_ = true; | |
546 return static_cast<int>(threads_.size() + 1); | |
547 } | |
548 } | |
549 } | |
550 return 0; | |
551 } | |
552 | |
553 void SequencedWorkerPool::Inner::FinishStartingAdditionalThread( | |
554 int thread_number) { | |
555 // Called outside of the lock. | |
556 DCHECK(thread_number > 0); | |
557 Worker* new_thread = new Worker(this, thread_number, thread_name_prefix_); | |
558 | |
559 { | |
560 base::AutoLock lock(lock_); | |
561 // Note: we can't clear the thread_being_created_ flag here, since that | |
562 // will race with the worker thread actually starting. We'll clear this | |
563 // flag when the worker actually starts. | |
564 threads_.push_back(linked_ptr<Worker>(new_thread)); | |
565 } | |
566 | |
567 // Clearing the thread_being_created_ flag could be the last thing the | |
568 // shutdown code was waiting for, so we need to wake it up. | |
569 cond_var_.Signal(); | |
570 } | |
571 | |
572 bool SequencedWorkerPool::Inner::CanShutdown() const { | |
573 lock_.AssertAcquired(); | |
574 // See PrepareToStartAdditionalThreadIfHelpful for how thread creation works. | |
575 return !thread_being_created_ && | |
576 blocking_shutdown_thread_count_ == 0 && | |
577 blocking_shutdown_pending_task_count_ == 0; | |
578 } | |
579 | |
580 // SequencedWorkerPool -------------------------------------------------------- | |
581 | |
582 SequencedWorkerPool::SequencedWorkerPool(size_t max_threads, | |
583 const std::string& thread_name_prefix) | |
584 : inner_(new Inner(max_threads, thread_name_prefix)) { | |
585 } | |
586 | |
587 SequencedWorkerPool::~SequencedWorkerPool() { | |
588 } | |
589 | |
590 SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetSequenceToken() { | |
591 return inner_->GetSequenceToken(); | |
592 } | |
593 | |
594 SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetNamedSequenceToken( | |
595 const std::string& name) { | |
596 return inner_->GetNamedSequenceToken(name); | |
597 } | |
598 | |
599 bool SequencedWorkerPool::PostWorkerTask( | |
600 const tracked_objects::Location& from_here, | |
601 const base::Closure& task) { | |
602 return inner_->PostTask(0, BLOCK_SHUTDOWN, from_here, task); | |
603 } | |
604 | |
605 bool SequencedWorkerPool::PostWorkerTaskWithShutdownBehavior( | |
606 const tracked_objects::Location& from_here, | |
607 const base::Closure& task, | |
608 WorkerShutdown shutdown_behavior) { | |
609 return inner_->PostTask(0, shutdown_behavior, from_here, task); | |
610 } | |
611 | |
612 bool SequencedWorkerPool::PostSequencedWorkerTask( | |
613 SequenceToken sequence_token, | |
614 const tracked_objects::Location& from_here, | |
615 const base::Closure& task) { | |
616 return inner_->PostTask(sequence_token.id_, BLOCK_SHUTDOWN, | |
617 from_here, task); | |
618 } | |
619 | |
620 bool SequencedWorkerPool::PostSequencedWorkerTaskWithShutdownBehavior( | |
621 SequenceToken sequence_token, | |
622 const tracked_objects::Location& from_here, | |
623 const base::Closure& task, | |
624 WorkerShutdown shutdown_behavior) { | |
625 return inner_->PostTask(sequence_token.id_, shutdown_behavior, | |
626 from_here, task); | |
627 } | |
628 | |
629 void SequencedWorkerPool::Shutdown() { | |
630 inner_->Shutdown(); | |
631 } | |
632 | |
633 void SequencedWorkerPool::SetTestingObserver(TestingObserver* observer) { | |
634 inner_->SetTestingObserver(observer); | |
635 } | |
636 | |
637 } // namespace base | |
OLD | NEW |