Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(253)

Side by Side Diff: content/common/sequenced_worker_pool.cc

Issue 8416019: Add a sequenced worker pool (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 9 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/sequenced_worker_pool.h"
6
7 #include <deque>
8 #include <set>
9
10 #include "base/atomicops.h"
11 #include "base/bind.h"
12 #include "base/memory/scoped_ptr.h"
13 #include "base/metrics/histogram.h"
14 #include "base/threading/thread.h"
15 #include "base/stringprintf.h"
16 #include "base/synchronization/condition_variable.h"
17 #include "base/synchronization/waitable_event.h"
18 #include "base/threading/simple_thread.h"
19
20 namespace {
21
22 struct SequencedTask {
23 int sequence_token_id;
24 SequencedWorkerPool::WorkerShutdown shutdown_behavior;
25 tracked_objects::Location location;
26 base::Closure task;
27 };
28
29 } // namespace
30
31 // Worker ---------------------------------------------------------------------
32
33 class SequencedWorkerPool::Worker : public base::SimpleThread {
34 public:
35 Worker(SequencedWorkerPool::Inner* inner, int thread_number);
36 ~Worker();
37
38 // SimpleThread implementation. This actually runs the background thread.
39 virtual void Run();
40
41 // When running a task, the shutdown mode is stored on the worker. It will
42 // be INTERRUPT_ON_SHUTDOWN if there is no running task.
43 //
44 // This must only be called when holding the Inner class' lock.
45 SequencedWorkerPool::WorkerShutdown current_shutdown_mode() const {
46 return current_shutdown_mode_;
47 }
48 void set_current_shutdown_mode(SequencedWorkerPool::WorkerShutdown mode) {
49 current_shutdown_mode_ = mode;
50 }
51 void reset_current_shutdown_mode() {
52 // Resets back to the default.
53 current_shutdown_mode_ = SequencedWorkerPool::CONTINUE_ON_SHUTDOWN;
54 }
55
56 private:
57 SequencedWorkerPool::Inner* inner_;
58 SequencedWorkerPool::WorkerShutdown current_shutdown_mode_;
59
60 DISALLOW_COPY_AND_ASSIGN(Worker);
61 };
62
63
64 // Inner ----------------------------------------------------------------------
65
66 class SequencedWorkerPool::Inner
67 : public base::RefCountedThreadSafe<SequencedWorkerPool::Inner> {
68 public:
69 Inner(size_t max_threads);
70 virtual ~Inner();
71
72 // Backends for SequenceWorkerPool.
73 SequenceToken GetSequenceToken();
74 SequenceToken GetNamedSequenceToken(const std::string& name);
75 bool PostTask(int sequence_token_id,
76 SequencedWorkerPool::WorkerShutdown shutdown_behavior,
77 const tracked_objects::Location& from_here,
78 const base::Closure& task);
79 void Shutdown();
80 void SetTestingObserver(SequencedWorkerPool::TestingObserver* observer);
81
82 // Runs the worker loop on the background thread.
83 void ThreadLoop(Worker* this_worker);
84
85 private:
86 bool GetWork(SequencedTask* task);
87
88 void MarkWorkerBusy(Worker* worker, const SequencedTask& task);
89 void MarkWorkerFree(Worker* worker);
90
91 // Returns true if there are no threads currently running the given
92 // sequence token.
93 bool IsSequenceTokenRunnable(int sequence_token_id) const;
94
95 // Returns true if there are more currently runnable tasks than there are
96 // availble threads to run them. This function does a brute-force check of
97 // all tasks so avoid calling when possible.
98 //
99 // Must be called from within the lock.
100 bool RunnableTasksAreOverSubscribed() const;
jar (doing other things) 2011/11/25 23:06:09 I don't think you'll need this function.
101
102 // Returns true if any worker is running a shutdown-blocking task. This
103 // includes BLOCK_SHUTDOWN and also SKIP_ON_SHUTDOWN tasks that are already
104 // running (since they must be run to completion). It must be called with
105 // the lock held.
106 bool IsRunningBlockingTask() const;
107
108 // The last sequence number used. Managed by GetSequenceToken, since this
109 // only does threadsafe increment operations, you do not need to hold the
110 // lock.
111 volatile base::subtle::Atomic32 last_sequence_number_;
112
113 // This lock protects |everything in this class|. Do not read or modify
114 // anything without holding this lock. Do not block while holding this
115 // lock.
116 base::Lock lock_;
117
118 // Condition variable used to wake up worker threads when a task is runnable.
119 base::ConditionVariable cond_var_;
120
121 // The maximum number of worker threads we'll create.
122 size_t max_threads_;
123
124 // Associates all known sequence token names with their IDs.
125 std::map<std::string, int> named_sequence_tokens_;
126
127 // Owning pointers to all threads we've created so far. Since we lazily
128 // create threads, this may be less than max_threads_ and will be initially
129 // empty.
130 std::vector<linked_ptr<Worker> > threads_;
131
132 // Lists all workers currently running tasks. Contains non-owning pointers.
133 std::set<Worker*> running_threads_;
jar (doing other things) 2011/11/25 23:06:09 I'd expect each thread to hold its own status. At
134
135 // In-order list of all pending tasks. These are tasks waiting for a thread
136 // to run on or that are blocked on a previous task in their sequence.
137 //
138 // We maintain the pending_task_count_ separately for metrics because
139 // list.size() can be linear time.
140 std::list<SequencedTask> pending_tasks_;
141 size_t pending_task_count_;
142
143 // Lists all sequence tokens currently executing.
144 std::set<int> current_sequences_;
145
146 // Set when the app is terminating and no further tasks should be allowed,
147 // though we may still be running existing tasks.
148 bool terminating_;
149
150 // Set when the worker pool is being destroyed, and all worker threads should
151 // exit.
152 bool exit_workers_;
153
154 SequencedWorkerPool::TestingObserver* testing_observer_;
155
156 // Created lazily when terminating_ is set and there are pending tasks, this
157 // is signaled by ScheduleWork whel all blocking tasks have completed.
158 scoped_ptr<base::WaitableEvent> shutdown_complete_;
jar (doing other things) 2011/11/25 23:06:09 The condition variable would be the natural thing
159 };
160
161 SequencedWorkerPool::Worker::Worker(SequencedWorkerPool::Inner* inner,
162 int thread_number)
163 : base::SimpleThread(
164 StringPrintf("BrowserWorker%d", thread_number).c_str()),
165 inner_(inner),
166 current_shutdown_mode_(SequencedWorkerPool::CONTINUE_ON_SHUTDOWN) {
167 Start();
168 }
169
170 SequencedWorkerPool::Worker::~Worker() {
171 Join();
172 }
173
174 void SequencedWorkerPool::Worker::Run() {
175 // Just jump back to the Inner object to run the thread, since it has all the
176 // tracking information and queues. It might be more natural to implement
177 // using DelegateSimpleThread and have Inner implement the Delegate to avoid
178 // having these worker objects at all, but that method lacks the ability to
179 // send thread-specific information easily to the thread loop.
180 inner_->ThreadLoop(this);
181 }
182
183 SequencedWorkerPool::Inner::Inner(size_t max_threads)
184 : last_sequence_number_(0),
185 lock_(),
186 cond_var_(&lock_),
187 max_threads_(max_threads),
188 pending_task_count_(0),
189 terminating_(false),
190 exit_workers_(false) {
191 }
192
193 SequencedWorkerPool::Inner::~Inner() {
194 // Tell all workers to exit. The Worker destructor will actually join with
195 // each corresponding thread when the object is torn down.
196 exit_workers_ = true;
197 cond_var_.Broadcast();
198 }
199
200 SequencedWorkerPool::SequenceToken
201 SequencedWorkerPool::Inner::GetSequenceToken() {
202 base::subtle::Atomic32 result =
203 base::subtle::NoBarrier_AtomicIncrement(&last_sequence_number_, 1);
204 return SequenceToken(static_cast<int>(result));
205 }
206
207 SequencedWorkerPool::SequenceToken
208 SequencedWorkerPool::Inner::GetNamedSequenceToken(
209 const std::string& name) {
210 base::AutoLock lock(lock_);
211 std::map<std::string, int>::const_iterator found =
212 named_sequence_tokens_.find(name);
213 if (found != named_sequence_tokens_.end())
214 return SequenceToken(found->second); // Got an existing one.
215
216 // Create a new one for this name.
217 SequenceToken result = GetSequenceToken();
218 named_sequence_tokens_.insert(std::make_pair(name, result.id_));
219 return result;
220 }
221
222 bool SequencedWorkerPool::Inner::PostTask(
223 int sequence_token_id,
224 SequencedWorkerPool::WorkerShutdown shutdown_behavior,
225 const tracked_objects::Location& from_here,
226 const base::Closure& task) {
227 base::AutoLock lock(lock_);
228
229 if (terminating_)
230 return false;
231
232 SequencedTask sequenced;
233 sequenced.sequence_token_id = sequence_token_id;
234 sequenced.shutdown_behavior = shutdown_behavior;
235 sequenced.location = from_here;
236 sequenced.task = task;
237 pending_tasks_.push_back(sequenced);
238 pending_task_count_++;
239
240 // Start a new thread if it would be useful for this task.
241 //
242 // There is a race condition between posting tasks and having the workers
243 // pick them up. The main thread could post two tasks with the same sequence
244 // token before a worker picks the first one up. When the second task is
245 // posted, the sequence token will not be marked as running and a quick check
246 // of task and free thread counts would indicate that a new thread should be
jar (doing other things) 2011/11/25 23:06:09 I think what you left out of this argument is that
247 // started. However, this may not be the case since there could be runnable
248 // tasks and free threads just haven't picked them up yet.
249 //
250 // Therefore, we do a more thorough brute-force check of runnable tasks to
jar (doing other things) 2011/11/25 23:06:09 I think we should just maintain a few counters, an
251 // avoid creating extra threads. This should generally be fast, and most
252 // cases will be caught by the first two checks of the if statement, meaning
253 // the slow RunnableTasksAreOverSubscribed() is only run occationally.
254 if (threads_.size() < max_threads_ &&
255 IsSequenceTokenRunnable(sequence_token_id) &&
256 RunnableTasksAreOverSubscribed()) {
257 // Start a new thread. This is done from within the lock which sucks (since
258 // it's not necessarily fast and it could block other work), but avoiding
jar (doing other things) 2011/11/25 23:06:09 All you do inside the lock is bump the counter on
259 // this would be a lot more code.
260 threads_.push_back(linked_ptr<Worker>(new Worker(this, threads_.size())));
jar (doing other things) 2011/11/25 23:06:09 I don't think you need to maintain a list of threa
261 }
262
263 // Wake up a worker thread to run this.
264 cond_var_.Signal();
265
266 return true;
267 }
268
269 void SequencedWorkerPool::Inner::Shutdown() {
270 // Lists all tasks that we're dropping rather than running. The closure's
271 // destructor may do complicated things like release refcounted objects,
272 // which we don't want to do in the lock. Since the closures are internally
273 // refcounted, all we have to do is keep a ref to the closure alive longer
274 // than the lock is held. That's what this vector does, so the cleanup of
275 // the tasks will magically happen in the vector's destructor.
276 std::vector<SequencedTask> dropped_tasks;
277
278 // Mark us as terminated and go through and drop all tasks that aren't
279 // required to run on shutdown. Since no new tasks will get posted once the
280 // terminated flag is set, this ensures that all remaining tasks are required
281 // for shutdown whenever the termianted_ flag is set.
282 {
283 base::AutoLock lock(lock_);
284 DCHECK(!terminating_);
285 terminating_ = true;
286
287 std::list<SequencedTask>::iterator i = pending_tasks_.begin();
288 while (i != pending_tasks_.end()) {
289 if (i->shutdown_behavior == BLOCK_SHUTDOWN) {
290 i++;
291 } else {
292 dropped_tasks.push_back(*i);
jar (doing other things) 2011/11/25 23:06:09 This cleanup action should happen in a worker thre
brettw 2011/11/26 01:54:40 How can you guarantee that there is a worker threa
jar (doing other things) 2011/11/26 06:45:36 Interesting question. I hadn't thought about tha
293 i = pending_tasks_.erase(i);
294 pending_task_count_--;
295 }
296 }
297 DCHECK_EQ(pending_tasks_.size(), pending_task_count_);
298
299 if (pending_tasks_.empty() && !IsRunningBlockingTask()) {
300 // There are no pending or running tasks blocking shutdown, we're done.
301 return;
302 }
303
304 // Need to wait for some tasks, create the event.
305 DCHECK(!shutdown_complete_.get());
306 shutdown_complete_.reset(new base::WaitableEvent(false, false));
jar (doing other things) 2011/11/25 23:06:09 Instead of using a waitable event, you should wait
brettw 2011/11/26 01:54:40 You want me to wait on the same condition variable
jar (doing other things) 2011/11/26 06:45:36 Clearly the goal is to wait until the system achiv
307 }
308
309 // If we get here, we know we're either waiting on a blocking task that's
310 // currently running, waiting on a blocking task that hasn't been scheduled
311 // yet, or both. Block on the "queue empty" event to know when all tasks are
312 // complete. This must be done outside the lock.
313 if (testing_observer_)
314 testing_observer_->WillWaitForShutdown();
315
316 base::TimeTicks shutdown_wait_begin = base::TimeTicks::Now();
317 shutdown_complete_->Wait();
318 UMA_HISTOGRAM_TIMES("SequencedWorkerPool.ShutdownDelayTime",
319 base::TimeTicks::Now() - shutdown_wait_begin);
320 }
321
322 void SequencedWorkerPool::Inner::SetTestingObserver(
323 SequencedWorkerPool::TestingObserver* observer) {
324 base::AutoLock lock(lock_);
325 testing_observer_ = observer;
326 }
327
328 void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
329 base::AutoLock lock(lock_);
330 while (!exit_workers_) {
331 SequencedTask task;
332 if (GetWork(&task)) {
333 MarkWorkerBusy(this_worker, task); // Must be done inside the lock.
334 {
335 base::AutoUnlock unlock(lock_);
336 task.task.Run();
jar (doing other things) 2011/11/26 01:25:59 One other thing... it is common to signal after yo
brettw 2011/11/26 01:54:40 You're right, that's what was causing my problem.
jar (doing other things) 2011/11/26 06:45:36 The trick is to just move incrementally in the rig
337 }
338 MarkWorkerFree(this_worker); // Must be done inside the lock.
339 }
340
341 cond_var_.Wait();
342 }
343 }
344
345 bool SequencedWorkerPool::Inner::GetWork(SequencedTask* task) {
346 lock_.AssertAcquired();
347
348 DCHECK_EQ(pending_tasks_.size(), pending_task_count_);
349 UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.TaskCount",
350 pending_task_count_);
351
352 // Find the next task with a sequence token that's not currently in use.
353 // If the token is in use, that means another thread is running something
354 // in that sequence, and we can't run it without going out-of-order.
355 //
356 // This algorithm is simple and fair, but inefficient in some cases. For
357 // example, say somebody schedules 1000 slow tasks with the same sequence
358 // number. We'll have to go through all those tasks each time we feel like
359 // there might be work to schedule. If this proves to be a problem, we
360 // should make this more efficient.
361 //
362 // One possible enhancement would be to keep a map from sequence ID to a
363 // list of pending but currently blocked SequencedTasks for that ID.
364 // When a worker finishes a task of one sequence token, it can pick up the
365 // next one from that token right away.
366 //
367 // This may lead to starvation if there are sufficient numbers of sequences
368 // in use. To alleviate this, we could add an incrementing priority counter
369 // to each SequencedTask. Then maintain a priority_queue of all runnable
370 // tasks, sorted by priority counter. When a sequenced task is completed
371 // we would pop the head element off of that tasks pending list and add it
372 // to the priority queue. Then we would run the first item in the priority
373 // queue.
374 bool found_task = false;
375 int unrunnable_tasks = 0;
376 for (std::list<SequencedTask>::iterator i = pending_tasks_.begin();
377 i != pending_tasks_.end(); ++i) {
378 if (IsSequenceTokenRunnable(i->sequence_token_id)) {
379 // Found a runnable task.
380 *task = *i;
381 pending_tasks_.erase(i);
382 pending_task_count_--;
383
384 // Mark the task's sequence number as in use.
385 if (task->sequence_token_id)
386 current_sequences_.insert(task->sequence_token_id);
387
388 found_task = true;
389 break;
390 }
391 unrunnable_tasks++;
392 }
393
394 // Track the number of tasks we had to skip over to see if we should be
395 // making this more efficient. If this number ever becomes large or is
396 // frequently "some", we should consider the optimization above.
397 UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.UnrunnableTaskCount",
398 unrunnable_tasks);
399 return found_task;
400 }
401
402 void SequencedWorkerPool::Inner::MarkWorkerBusy(Worker* worker,
403 const SequencedTask& task) {
404 lock_.AssertAcquired();
405 DCHECK(running_threads_.find(worker) == running_threads_.end());
406
407 worker->set_current_shutdown_mode(task.shutdown_behavior);
408 running_threads_.insert(worker);
409 }
410
411 void SequencedWorkerPool::Inner::MarkWorkerFree(Worker* worker) {
412 lock_.AssertAcquired();
413 DCHECK(running_threads_.find(worker) != running_threads_.end());
414
415 worker->reset_current_shutdown_mode();
416 running_threads_.erase(worker);
417
418 if (terminating_ && shutdown_complete_.get()) {
419 // When the app is terminating, check for "no more blocking work" and
420 // signal if shutdown tasks are complete.
421 if (pending_tasks_.empty() && !IsRunningBlockingTask())
422 shutdown_complete_->Signal();
423 }
424 }
425
426 bool SequencedWorkerPool::Inner::IsSequenceTokenRunnable(
427 int sequence_token_id) const {
428 lock_.AssertAcquired();
429 return !sequence_token_id ||
430 current_sequences_.find(sequence_token_id) ==
431 current_sequences_.end();
432 }
433
434 bool SequencedWorkerPool::Inner::RunnableTasksAreOverSubscribed() const {
435 lock_.AssertAcquired();
436
437 size_t available_threads = threads_.size() - running_threads_.size();
438
439 // Fast path the (relatively common) case where there are more threads
440 // available than there are tasks to run.
441 if (pending_task_count_ <= available_threads)
442 return false;
443
444 size_t runnable_task_count = 0;
445 for (std::list<SequencedTask>::const_iterator i = pending_tasks_.begin();
446 i != pending_tasks_.end(); ++i) {
447 if (IsSequenceTokenRunnable(i->sequence_token_id)) {
448 runnable_task_count++;
449
450 // We can return as soon as we've found more runnable tasks than
451 // available threads.
452 if (runnable_task_count > available_threads)
453 return true;
454 }
455 }
456
457 // Did not fund more runnable tasks than threads.
458 return false;
459 }
460
461 bool SequencedWorkerPool::Inner::IsRunningBlockingTask() const {
462 lock_.AssertAcquired();
463
464 for (std::set<Worker*>::const_iterator i = running_threads_.begin();
465 i != running_threads_.end(); ++i) {
466 if ((*i)->current_shutdown_mode() == SequencedWorkerPool::BLOCK_SHUTDOWN ||
467 (*i)->current_shutdown_mode() == SequencedWorkerPool::SKIP_ON_SHUTDOWN)
468 return true;
469 }
470 return false;
471 }
472
473 // SequencedWorkerPool --------------------------------------------------------
474
475 SequencedWorkerPool::SequencedWorkerPool(size_t max_threads)
476 : inner_(new Inner(max_threads)) {
477 }
478
479 SequencedWorkerPool::~SequencedWorkerPool() {
480 }
481
482 SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetSequenceToken() {
483 return inner_->GetSequenceToken();
484 }
485
486 SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetNamedSequenceToken(
487 const std::string& name) {
488 return inner_->GetNamedSequenceToken(name);
489 }
490
491 bool SequencedWorkerPool::PostWorkerTask(
492 WorkerShutdown shutdown_behavior,
493 const tracked_objects::Location& from_here,
494 const base::Closure& task) {
495 return inner_->PostTask(0, shutdown_behavior, from_here, task);
496 }
497
498 bool SequencedWorkerPool::PostSequencedWorkerTask(
499 SequenceToken sequence_token,
500 WorkerShutdown shutdown_behavior,
501 const tracked_objects::Location& from_here,
502 const base::Closure& task) {
503 return inner_->PostTask(sequence_token.id_, shutdown_behavior,
504 from_here, task);
505 }
506
507 void SequencedWorkerPool::Shutdown() {
508 inner_->Shutdown();
509 }
510
511 void SequencedWorkerPool::SetTestingObserver(TestingObserver* observer) {
512 inner_->SetTestingObserver(observer);
513 }
OLDNEW
« no previous file with comments | « content/common/sequenced_worker_pool.h ('k') | content/common/sequenced_worker_pool_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698