OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/sequenced_worker_pool.h" | |
6 | |
7 #include <deque> | |
8 #include <set> | |
9 | |
10 #include "base/atomicops.h" | |
11 #include "base/bind.h" | |
12 #include "base/memory/scoped_ptr.h" | |
13 #include "base/metrics/histogram.h" | |
14 #include "base/stringprintf.h" | |
15 #include "base/synchronization/condition_variable.h" | |
16 #include "base/synchronization/waitable_event.h" | |
michaeln
2011/12/14 20:37:47
is this included needed?
| |
17 #include "base/threading/simple_thread.h" | |
18 #include "base/threading/thread.h" | |
19 | |
20 namespace { | |
21 | |
22 struct SequencedTask { | |
23 int sequence_token_id; | |
24 SequencedWorkerPool::WorkerShutdown shutdown_behavior; | |
25 tracked_objects::Location location; | |
26 base::Closure task; | |
27 }; | |
28 | |
29 } // namespace | |
30 | |
31 // Worker --------------------------------------------------------------------- | |
32 | |
33 class SequencedWorkerPool::Worker : public base::SimpleThread { | |
34 public: | |
35 Worker(SequencedWorkerPool::Inner* inner, | |
36 int thread_number, | |
37 const std::string& thread_name_prefix); | |
38 ~Worker(); | |
39 | |
40 // SimpleThread implementation. This actually runs the background thread. | |
41 virtual void Run(); | |
42 | |
43 private: | |
44 SequencedWorkerPool::Inner* inner_; | |
45 SequencedWorkerPool::WorkerShutdown current_shutdown_mode_; | |
46 | |
47 DISALLOW_COPY_AND_ASSIGN(Worker); | |
48 }; | |
49 | |
50 | |
51 // Inner ---------------------------------------------------------------------- | |
52 | |
53 class SequencedWorkerPool::Inner | |
54 : public base::RefCountedThreadSafe<SequencedWorkerPool::Inner> { | |
55 public: | |
56 Inner(size_t max_threads, const std::string& thread_name_prefix); | |
57 virtual ~Inner(); | |
58 | |
59 // Backends for SequenceWorkerPool. | |
60 SequenceToken GetSequenceToken(); | |
61 SequenceToken GetNamedSequenceToken(const std::string& name); | |
62 bool PostTask(int sequence_token_id, | |
63 SequencedWorkerPool::WorkerShutdown shutdown_behavior, | |
64 const tracked_objects::Location& from_here, | |
65 const base::Closure& task); | |
66 void Shutdown(); | |
67 void SetTestingObserver(SequencedWorkerPool::TestingObserver* observer); | |
68 | |
69 // Runs the worker loop on the background thread. | |
70 void ThreadLoop(Worker* this_worker); | |
71 | |
72 private: | |
73 // The calling code should clear the given delete_these_oustide_lock | |
74 // vector the next time the lock is released. See the implementation for | |
75 // a more detailed description. | |
76 bool GetWork(SequencedTask* task, | |
77 std::vector<base::Closure>* delete_these_outside_lock); | |
78 | |
79 // Peforms init and cleanup around running the given task. WillRun... | |
80 // returns the value from PrepareToStartAdditionalThreadIfNecessary. | |
81 // The calling code should call FinishStartingAdditionalThread once the | |
82 // lock is released if the return values is nonzero. | |
83 int WillRunWorkerTask(Worker* worker, const SequencedTask& task); | |
84 void DidRunWorkerTask(Worker* worker, const SequencedTask& task); | |
85 | |
86 // Returns true if there are no threads currently running the given | |
87 // sequence token. | |
88 bool IsSequenceTokenRunnable(int sequence_token_id) const; | |
89 | |
90 // Checks if all threads are busy and the addition of one more could run an | |
91 // additional task waiting in the queue. This must be called from within | |
92 // the lock. | |
93 // | |
94 // If another thread is helpful, this will mark the thread as being in the | |
95 // process of starting and returns the index of the new thread which will be | |
96 // 0 or more. The caller should then call FinishStartingAdditionalThread to | |
97 // complete initialization once the lock is released. | |
98 // | |
99 // If another thread is not necessary, returne 0; | |
100 // | |
101 // See the implementedion for more. | |
102 int PrepareToStartAdditionalThreadIfHelpful(); | |
103 | |
104 // The second part of thread creation after | |
105 // PrepareToStartAdditionalThreadIfHelpful with the thread number it | |
106 // generated. This actually creates the thread and should be called outside | |
107 // the lock to avoid blocking important work starting a thread in the lock. | |
108 void FinishStartingAdditionalThread(int thread_number); | |
109 | |
110 // Checks whether there is work left that's blocking shutdown. Must be | |
111 // called inside the lock. | |
112 bool CanShutdown() const; | |
113 | |
114 // The last sequence number used. Managed by GetSequenceToken, since this | |
115 // only does threadsafe increment operations, you do not need to hold the | |
116 // lock. | |
117 volatile base::subtle::Atomic32 last_sequence_number_; | |
118 | |
119 // This lock protects |everything in this class|. Do not read or modify | |
120 // anything without holding this lock. Do not block while holding this | |
121 // lock. | |
122 base::Lock lock_; | |
123 | |
124 // Condition variable used to wake up worker threads when a task is runnable. | |
125 base::ConditionVariable cond_var_; | |
126 | |
127 // The maximum number of worker threads we'll create. | |
128 size_t max_threads_; | |
129 | |
130 std::string thread_name_prefix_; | |
131 | |
132 // Associates all known sequence token names with their IDs. | |
133 std::map<std::string, int> named_sequence_tokens_; | |
134 | |
135 // Owning pointers to all threads we've created so far. Since we lazily | |
136 // create threads, this may be less than max_threads_ and will be initially | |
137 // empty. | |
138 std::vector<linked_ptr<Worker> > threads_; | |
139 | |
140 // Set to true when we're in the process of creating another thread. | |
141 // See PrepareToStartAdditionalThreadIfHelpful for more. | |
142 bool thread_being_created_; | |
143 | |
144 // Number of threads currently running tasks. | |
145 size_t running_thread_count_; | |
146 | |
147 // Number of threads currently running tasks that have the BLOCK_SHUTDOWN | |
148 // flag set. | |
149 size_t blocking_shutdown_thread_count_; | |
150 | |
151 // In-order list of all pending tasks. These are tasks waiting for a thread | |
152 // to run on or that are blocked on a previous task in their sequence. | |
153 // | |
154 // We maintain the pending_task_count_ separately for metrics because | |
155 // list.size() can be linear time. | |
156 std::list<SequencedTask> pending_tasks_; | |
157 size_t pending_task_count_; | |
158 | |
159 // Number of tasks in the pending_tasks_ list that are marked as blocking | |
160 // shutdown. | |
161 size_t blocking_shutdown_pending_task_count_; | |
162 | |
163 // Lists all sequence tokens currently executing. | |
164 std::set<int> current_sequences_; | |
165 | |
166 // Set when the app is terminating and no further tasks should be allowed, | |
167 // though we may still be running existing tasks. | |
168 bool terminating_; | |
169 | |
170 // Set when Shutdown is called to do some assertions. | |
171 bool shutdown_called_; | |
172 | |
173 SequencedWorkerPool::TestingObserver* testing_observer_; | |
174 }; | |
175 | |
176 SequencedWorkerPool::Worker::Worker(SequencedWorkerPool::Inner* inner, | |
177 int thread_number, | |
178 const std::string& prefix) | |
179 : base::SimpleThread( | |
180 prefix + StringPrintf("Worker%d", thread_number).c_str()), | |
181 inner_(inner), | |
182 current_shutdown_mode_(SequencedWorkerPool::CONTINUE_ON_SHUTDOWN) { | |
183 Start(); | |
184 } | |
185 | |
186 SequencedWorkerPool::Worker::~Worker() { | |
187 } | |
188 | |
189 void SequencedWorkerPool::Worker::Run() { | |
190 // Just jump back to the Inner object to run the thread, since it has all the | |
191 // tracking information and queues. It might be more natural to implement | |
192 // using DelegateSimpleThread and have Inner implement the Delegate to avoid | |
193 // having these worker objects at all, but that method lacks the ability to | |
194 // send thread-specific information easily to the thread loop. | |
195 inner_->ThreadLoop(this); | |
196 } | |
197 | |
198 SequencedWorkerPool::Inner::Inner(size_t max_threads, | |
199 const std::string& thread_name_prefix) | |
200 : last_sequence_number_(0), | |
201 lock_(), | |
202 cond_var_(&lock_), | |
203 max_threads_(max_threads), | |
204 thread_name_prefix_(thread_name_prefix), | |
205 thread_being_created_(false), | |
206 running_thread_count_(0), | |
207 blocking_shutdown_thread_count_(0), | |
208 pending_task_count_(0), | |
209 blocking_shutdown_pending_task_count_(0), | |
210 terminating_(false), | |
211 shutdown_called_(false) { | |
212 } | |
213 | |
214 SequencedWorkerPool::Inner::~Inner() { | |
215 // You must call Shutdown() before destroying the pool. | |
216 DCHECK(shutdown_called_); | |
217 | |
218 // Need to explicitly join with the threads before they're destroyed or else | |
219 // they will be running when our object is half torn down. | |
220 for (size_t i = 0; i < threads_.size(); i++) | |
221 threads_[i]->Join(); | |
222 threads_.clear(); | |
223 } | |
224 | |
225 SequencedWorkerPool::SequenceToken | |
226 SequencedWorkerPool::Inner::GetSequenceToken() { | |
227 base::subtle::Atomic32 result = | |
228 base::subtle::NoBarrier_AtomicIncrement(&last_sequence_number_, 1); | |
229 return SequenceToken(static_cast<int>(result)); | |
230 } | |
231 | |
232 SequencedWorkerPool::SequenceToken | |
233 SequencedWorkerPool::Inner::GetNamedSequenceToken( | |
234 const std::string& name) { | |
235 base::AutoLock lock(lock_); | |
236 std::map<std::string, int>::const_iterator found = | |
237 named_sequence_tokens_.find(name); | |
238 if (found != named_sequence_tokens_.end()) | |
239 return SequenceToken(found->second); // Got an existing one. | |
240 | |
241 // Create a new one for this name. | |
242 SequenceToken result = GetSequenceToken(); | |
243 named_sequence_tokens_.insert(std::make_pair(name, result.id_)); | |
244 return result; | |
245 } | |
246 | |
247 bool SequencedWorkerPool::Inner::PostTask( | |
248 int sequence_token_id, | |
249 SequencedWorkerPool::WorkerShutdown shutdown_behavior, | |
250 const tracked_objects::Location& from_here, | |
251 const base::Closure& task) { | |
252 SequencedTask sequenced; | |
253 sequenced.sequence_token_id = sequence_token_id; | |
254 sequenced.shutdown_behavior = shutdown_behavior; | |
255 sequenced.location = from_here; | |
256 sequenced.task = task; | |
257 | |
258 int create_thread_id = 0; | |
259 { | |
260 base::AutoLock lock(lock_); | |
261 if (terminating_) | |
262 return false; | |
263 | |
264 pending_tasks_.push_back(sequenced); | |
265 pending_task_count_++; | |
266 if (shutdown_behavior == BLOCK_SHUTDOWN) | |
267 blocking_shutdown_pending_task_count_++; | |
268 | |
269 create_thread_id = PrepareToStartAdditionalThreadIfHelpful(); | |
270 } | |
271 | |
272 // Actually start the additional thread or signal an existing one now that | |
273 // we're outside the lock. | |
274 if (create_thread_id) | |
275 FinishStartingAdditionalThread(create_thread_id); | |
276 else | |
277 cond_var_.Signal(); | |
278 | |
279 return true; | |
280 } | |
281 | |
282 void SequencedWorkerPool::Inner::Shutdown() { | |
283 if (shutdown_called_) | |
284 return; | |
285 shutdown_called_ = true; | |
286 | |
287 // Mark us as terminated and go through and drop all tasks that aren't | |
288 // required to run on shutdown. Since no new tasks will get posted once the | |
289 // terminated flag is set, this ensures that all remaining tasks are required | |
290 // for shutdown whenever the termianted_ flag is set. | |
291 { | |
292 base::AutoLock lock(lock_); | |
293 DCHECK(!terminating_); | |
294 terminating_ = true; | |
295 | |
296 // Tickle the threads. This will wake up a waiting one so it will know that | |
297 // it can exit, which in turn will wake up any other waiting ones. | |
298 cond_var_.Signal(); | |
299 | |
300 // There are no pending or running tasks blocking shutdown, we're done. | |
301 if (CanShutdown()) | |
302 return; | |
303 } | |
304 | |
305 // If we get here, we know we're either waiting on a blocking task that's | |
306 // currently running, waiting on a blocking task that hasn't been scheduled | |
307 // yet, or both. Block on the "queue empty" event to know when all tasks are | |
308 // complete. This must be done outside the lock. | |
309 if (testing_observer_) | |
310 testing_observer_->WillWaitForShutdown(); | |
311 | |
312 base::TimeTicks shutdown_wait_begin = base::TimeTicks::Now(); | |
313 | |
314 // Wait for no more tasks. | |
315 { | |
316 base::AutoLock lock(lock_); | |
317 while (!CanShutdown()) | |
318 cond_var_.Wait(); | |
319 } | |
320 UMA_HISTOGRAM_TIMES("SequencedWorkerPool.ShutdownDelayTime", | |
321 base::TimeTicks::Now() - shutdown_wait_begin); | |
322 } | |
323 | |
324 void SequencedWorkerPool::Inner::SetTestingObserver( | |
325 SequencedWorkerPool::TestingObserver* observer) { | |
326 base::AutoLock lock(lock_); | |
327 testing_observer_ = observer; | |
328 } | |
329 | |
330 void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) { | |
331 { | |
332 base::AutoLock lock(lock_); | |
333 while (true) { | |
334 // See GetWork for what delete_these_outside_lock is doing. | |
335 SequencedTask task; | |
336 std::vector<base::Closure> delete_these_outside_lock; | |
337 if (GetWork(&task, &delete_these_outside_lock)) { | |
338 int new_thread_id = WillRunWorkerTask(this_worker, task); | |
339 { | |
340 base::AutoUnlock unlock(lock_); | |
341 cond_var_.Signal(); | |
342 delete_these_outside_lock.clear(); | |
343 | |
344 // Complete thread creation outside the lock if necessary. | |
345 if (new_thread_id) | |
346 FinishStartingAdditionalThread(new_thread_id); | |
347 | |
348 task.task.Run(); | |
349 | |
350 // Make sure our task is erased outside the lock for the same reason | |
351 // we do this with delete_these_oustide_lock. | |
352 task.task = base::Closure(); | |
353 } | |
354 DidRunWorkerTask(this_worker, task); // Must be done inside the lock. | |
355 } else { | |
356 // When we're terminating and there's no more work, we can shut down. | |
357 // You can't get more tasks posted once terminating_ is set. There may | |
358 // be some tasks stuck behind running ones with the same sequence | |
359 // token, but additional threads won't help this case. | |
360 if (terminating_) | |
361 break; | |
362 cond_var_.Wait(); | |
363 } | |
364 } | |
365 } | |
366 | |
367 // We noticed we should exit. Wake up the next worker so it knows it should | |
368 // exit as well (because the Shutdown() code only signals once). | |
369 cond_var_.Signal(); | |
370 } | |
371 | |
372 bool SequencedWorkerPool::Inner::GetWork( | |
373 SequencedTask* task, | |
374 std::vector<base::Closure>* delete_these_outside_lock) { | |
375 lock_.AssertAcquired(); | |
376 | |
377 DCHECK_EQ(pending_tasks_.size(), pending_task_count_); | |
378 UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.TaskCount", | |
379 pending_task_count_); | |
380 | |
381 // Find the next task with a sequence token that's not currently in use. | |
382 // If the token is in use, that means another thread is running something | |
383 // in that sequence, and we can't run it without going out-of-order. | |
384 // | |
385 // This algorithm is simple and fair, but inefficient in some cases. For | |
386 // example, say somebody schedules 1000 slow tasks with the same sequence | |
387 // number. We'll have to go through all those tasks each time we feel like | |
388 // there might be work to schedule. If this proves to be a problem, we | |
389 // should make this more efficient. | |
390 // | |
391 // One possible enhancement would be to keep a map from sequence ID to a | |
392 // list of pending but currently blocked SequencedTasks for that ID. | |
393 // When a worker finishes a task of one sequence token, it can pick up the | |
394 // next one from that token right away. | |
395 // | |
396 // This may lead to starvation if there are sufficient numbers of sequences | |
397 // in use. To alleviate this, we could add an incrementing priority counter | |
398 // to each SequencedTask. Then maintain a priority_queue of all runnable | |
399 // tasks, sorted by priority counter. When a sequenced task is completed | |
400 // we would pop the head element off of that tasks pending list and add it | |
401 // to the priority queue. Then we would run the first item in the priority | |
402 // queue. | |
403 bool found_task = false; | |
404 int unrunnable_tasks = 0; | |
405 std::list<SequencedTask>::iterator i = pending_tasks_.begin(); | |
406 while (i != pending_tasks_.end()) { | |
407 if (!IsSequenceTokenRunnable(i->sequence_token_id)) { | |
408 unrunnable_tasks++; | |
409 ++i; | |
410 continue; | |
411 } | |
412 | |
413 if (terminating_ && i->shutdown_behavior != BLOCK_SHUTDOWN) { | |
414 // We're shutting down and the task we just found isn't blocking | |
415 // shutdown. Delete it and get more work. | |
416 // | |
417 // Note that we do not want to delete unrunnable tasks. Deleting a task | |
418 // can have side effects (like freeing some objects) and deleting a | |
419 // task that's supposed to run after one that's currently running could | |
420 // cause an obscure crash. | |
421 // | |
422 // We really want to delete these tasks outside the lock in case the | |
423 // closures are holding refs to objects that want to post work from | |
424 // their destructorss (which would deadlock). The closures are | |
425 // internally refcounted, so we just need to keep a copy of them alive | |
426 // until the lock is exited. The calling code can just clear() the | |
427 // vector they passed to us once the lock is exited to make this | |
428 // happen. | |
429 delete_these_outside_lock->push_back(i->task); | |
430 i = pending_tasks_.erase(i); | |
431 pending_task_count_--; | |
432 } else { | |
433 // Found a runnable task. | |
434 *task = *i; | |
435 i = pending_tasks_.erase(i); | |
436 pending_task_count_--; | |
437 if (task->shutdown_behavior == BLOCK_SHUTDOWN) | |
438 blocking_shutdown_pending_task_count_--; | |
439 | |
440 found_task = true; | |
michaeln
2011/12/14 20:37:47
Would it make sense to put the logic under Prepare
| |
441 break; | |
442 } | |
443 } | |
444 | |
445 // Track the number of tasks we had to skip over to see if we should be | |
446 // making this more efficient. If this number ever becomes large or is | |
447 // frequently "some", we should consider the optimization above. | |
448 UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.UnrunnableTaskCount", | |
449 unrunnable_tasks); | |
450 return found_task; | |
451 } | |
452 | |
453 int SequencedWorkerPool::Inner::WillRunWorkerTask(Worker* worker, | |
michaeln
2011/12/14 20:37:47
THe 'worker' arg isn't used here or in the DidRun
| |
454 const SequencedTask& task) { | |
455 lock_.AssertAcquired(); | |
456 | |
457 // Mark the task's sequence number as in use. | |
458 if (task.sequence_token_id) | |
459 current_sequences_.insert(task.sequence_token_id); | |
460 | |
461 running_thread_count_++; | |
462 | |
463 if (task.shutdown_behavior == SequencedWorkerPool::BLOCK_SHUTDOWN) | |
464 blocking_shutdown_thread_count_++; | |
465 | |
466 // We just picked up a task. Since StartAdditionalThreadIfHelpful only | |
467 // creates a new thread if there is no free one, there is a race when posting | |
468 // tasks that many tasks could have been posted before a thread started | |
469 // running them, so only one thread would have been created. So we also check | |
470 // for more threads after removing our task from the queue, which also has | |
471 // the nice side effect of creating the workers from background threads | |
472 // rather than the main thread of the app. | |
473 // | |
474 // If another thread wasn't created, we want to wake up an existing thread | |
475 // if there is one waiting to pick up the next task. | |
476 int create_thread_id = PrepareToStartAdditionalThreadIfHelpful(); | |
477 return create_thread_id; | |
478 } | |
479 | |
480 void SequencedWorkerPool::Inner::DidRunWorkerTask(Worker* worker, | |
481 const SequencedTask& task) { | |
482 lock_.AssertAcquired(); | |
483 | |
484 if (task.shutdown_behavior == SequencedWorkerPool::BLOCK_SHUTDOWN) { | |
485 DCHECK_GT(blocking_shutdown_thread_count_, 0u); | |
486 blocking_shutdown_thread_count_--; | |
487 } | |
488 | |
489 if (task.sequence_token_id) | |
490 current_sequences_.erase(task.sequence_token_id); | |
491 | |
492 running_thread_count_--; | |
493 } | |
494 | |
495 bool SequencedWorkerPool::Inner::IsSequenceTokenRunnable( | |
496 int sequence_token_id) const { | |
497 lock_.AssertAcquired(); | |
498 return !sequence_token_id || | |
499 current_sequences_.find(sequence_token_id) == | |
500 current_sequences_.end(); | |
501 } | |
502 | |
503 int SequencedWorkerPool::Inner::PrepareToStartAdditionalThreadIfHelpful() { | |
504 // How thread creation works: | |
505 // | |
506 // We'de like to avoid creating threads with the lock held. However, we | |
507 // need to be sure that we have an accurate accounting of the threads for | |
508 // proper Joining and deltion on shutdown. | |
509 // | |
510 // We need to figure out if we need another thread with the lock held, which | |
511 // is what this function does. It then marks us as in the process of creating | |
512 // a thread. When we do shutdown, we wait until the thread_being_created_ | |
513 // flag is cleared, which ensures that the new thread is properly added to | |
514 // all the data structures and we can't leak it. Once shutdown starts, we'll | |
515 // refuse to create more threads or they would be leaked. | |
516 if (!terminating_ && | |
517 !thread_being_created_ && | |
518 threads_.size() < max_threads_ && | |
519 running_thread_count_ == threads_.size()) { | |
520 // We could use an additional thread if there's work to be done. | |
521 for (std::list<SequencedTask>::iterator i = pending_tasks_.begin(); | |
522 i != pending_tasks_.end(); ++i) { | |
523 if (terminating_ && i->shutdown_behavior != BLOCK_SHUTDOWN) | |
michaeln
2011/12/14 20:37:47
the outer condition sayz if !terminating_, is this
jar (doing other things)
2011/12/14 23:47:15
+1 It seems like these two lines should go.
| |
524 continue; // Skip non-critical tasks on shutdown. | |
525 if (IsSequenceTokenRunnable(i->sequence_token_id)) { | |
526 // Found a runnable task, mark the thread as being started. | |
527 thread_being_created_ = true; | |
528 return threads_.size() + 1; | |
529 } | |
530 } | |
531 } | |
532 return 0; | |
533 } | |
534 | |
535 void SequencedWorkerPool::Inner::FinishStartingAdditionalThread( | |
536 int thread_number) { | |
537 // Called outside of the lock. | |
538 DCHECK(thread_number > 0); | |
539 Worker* new_thread = new Worker(this, thread_number, thread_name_prefix_); | |
540 | |
541 { | |
542 base::AutoLock lock(lock_); | |
543 | |
544 DCHECK(thread_being_created_); | |
545 thread_being_created_ = false; | |
546 | |
547 threads_.push_back(linked_ptr<Worker>(new_thread)); | |
548 } | |
549 | |
550 // Clearing the thread_being_created_ flag could be the last thing the | |
551 // shutdown code was waiting for, so we need to wake it up. | |
552 cond_var_.Signal(); | |
553 } | |
554 | |
555 bool SequencedWorkerPool::Inner::CanShutdown() const { | |
556 lock_.AssertAcquired(); | |
557 // See PrepareToStartAdditionalThreadIfHelpful for how thread creation works. | |
558 return !thread_being_created_ && | |
559 blocking_shutdown_thread_count_ == 0 && | |
560 blocking_shutdown_pending_task_count_ == 0; | |
561 } | |
562 | |
563 // SequencedWorkerPool -------------------------------------------------------- | |
564 | |
565 SequencedWorkerPool::SequencedWorkerPool(size_t max_threads, | |
566 const std::string& thread_name_prefix) | |
567 : inner_(new Inner(max_threads, thread_name_prefix)) { | |
568 } | |
569 | |
570 SequencedWorkerPool::~SequencedWorkerPool() { | |
571 } | |
572 | |
573 SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetSequenceToken() { | |
574 return inner_->GetSequenceToken(); | |
575 } | |
576 | |
577 SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetNamedSequenceToken( | |
578 const std::string& name) { | |
579 return inner_->GetNamedSequenceToken(name); | |
580 } | |
581 | |
582 bool SequencedWorkerPool::PostWorkerTask( | |
583 const tracked_objects::Location& from_here, | |
584 const base::Closure& task) { | |
585 return inner_->PostTask(0, BLOCK_SHUTDOWN, from_here, task); | |
586 } | |
587 | |
588 bool SequencedWorkerPool::PostWorkerTaskWithShutdownBehavior( | |
589 const tracked_objects::Location& from_here, | |
590 const base::Closure& task, | |
591 WorkerShutdown shutdown_behavior) { | |
592 return inner_->PostTask(0, shutdown_behavior, from_here, task); | |
593 } | |
594 | |
595 bool SequencedWorkerPool::PostSequencedWorkerTask( | |
596 SequenceToken sequence_token, | |
597 const tracked_objects::Location& from_here, | |
598 const base::Closure& task) { | |
599 return inner_->PostTask(sequence_token.id_, BLOCK_SHUTDOWN, | |
600 from_here, task); | |
601 } | |
602 | |
603 bool SequencedWorkerPool::PostSequencedWorkerTaskWithShutdownBehavior( | |
604 SequenceToken sequence_token, | |
605 const tracked_objects::Location& from_here, | |
606 const base::Closure& task, | |
607 WorkerShutdown shutdown_behavior) { | |
608 return inner_->PostTask(sequence_token.id_, shutdown_behavior, | |
609 from_here, task); | |
610 } | |
611 | |
612 void SequencedWorkerPool::Shutdown() { | |
613 inner_->Shutdown(); | |
614 } | |
615 | |
616 void SequencedWorkerPool::SetTestingObserver(TestingObserver* observer) { | |
617 inner_->SetTestingObserver(observer); | |
618 } | |
OLD | NEW |