Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(11)

Side by Side Diff: content/common/sequenced_worker_pool.cc

Issue 8416019: Add a sequenced worker pool (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 9 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/sequenced_worker_pool.h"
6
7 #include <deque>
8 #include <set>
9
10 #include "base/atomicops.h"
11 #include "base/bind.h"
12 #include "base/memory/scoped_ptr.h"
13 #include "base/metrics/histogram.h"
14 #include "base/threading/thread.h"
15 #include "base/stringprintf.h"
16 #include "base/synchronization/waitable_event.h"
17
18 namespace {
19
20 struct SequencedTask {
21 int sequence_token_id;
22 SequencedWorkerPool::WorkerShutdown shutdown_behavior;
23 tracked_objects::Location location;
24 base::Closure task;
25 };
26
27 class Worker {
28 public:
29 Worker(int thread_number)
30 : thread_(StringPrintf("Browser worker %d", thread_number).c_str()),
31 current_shutdown_mode_(SequencedWorkerPool::CONTINUE_ON_SHUTDOWN) {
32 thread_.Start();
33 }
34 ~Worker() {
35 }
36
37 // Posts a task to the worker's message loop for running. The actual task
38 // to run should be Inner's RunTask. The SequencedTask is passed in only
39 // to get statistics out of.
40 //
41 // This should only be called from within Inner's lock.
jar (doing other things) 2011/11/23 20:08:16 It is surprising to require a lock (when posting a
42 void PostTask(const SequencedTask& sequenced_info,
43 const base::Closure& task) {
44 // Use the original task birthplace as the source for this call so we can
45 // trace back who made us do this work.
46 thread_.message_loop()->PostTask(sequenced_info.location, task);
jar (doing other things) 2011/11/23 20:08:16 It is more surprising to see a local lock held as
47 current_shutdown_mode_ = sequenced_info.shutdown_behavior;
48 }
49
50 // Cleans up after a task is complete. This should be called from within
51 // Inner's lock as soon as the task is complete.
52 void WorkComplete() {
53 current_shutdown_mode_ = SequencedWorkerPool::CONTINUE_ON_SHUTDOWN;
54 }
55
56 // When running a task, the shutdown mode is stored on the worker. It will
57 // be INTERRUPT_ON_SHUTDOWN if there is no running task.
58 //
59 // This must only be called when holding the Inner class' lock.
60 const SequencedWorkerPool::WorkerShutdown current_shutdown_mode() const {
61 return current_shutdown_mode_;
62 }
63
64 private:
65 base::Thread thread_;
66
67 SequencedWorkerPool::WorkerShutdown current_shutdown_mode_;
68
69 DISALLOW_COPY_AND_ASSIGN(Worker);
70 };
71
72 } // namespace
73
74 // Inner ----------------------------------------------------------------------
75
76 class SequencedWorkerPool::Inner
jar (doing other things) 2011/11/23 20:08:16 Suggest you add a comment to clarify that this is
77 : public base::RefCountedThreadSafe<SequencedWorkerPool::Inner> {
78 public:
79 Inner(size_t max_threads);
80
81 // Returns a unique token that can be used to sequence tasks posted to
82 // PostSequencedWorkerTask(). Valid tokens are alwys nonzero.
83 SequenceToken GetSequenceToken();
84
85 // Posts a task. See PostSequencedWorkerTask. The token ID will be 0 for
86 // unsequenced tasks.
87 bool PostTask(int sequence_token_id,
88 SequencedWorkerPool::WorkerShutdown shutdown_behavior,
89 const tracked_objects::Location& from_here,
90 const base::Closure& task);
91
92 void Shutdown();
93
94 void SetTestingObserver(SequencedWorkerPool::TestingObserver* observer);
95
96 private:
97 // Attempts to schedule work on an available thread. Call this when a
98 // task is added to the pending_tasks_ list or a thread is added to the
99 // idle_threads_ list. The lock must be already held for this function.
100 void ScheduleWork();
101
102 // Worker task that actually runs on a worker thread to execute the given
103 // task. The worker it's running on is passed as the first argument.
104 void RunTask(Worker* this_worker, const SequencedTask& task);
105
106 // Returns true if any worker is running a shutdown-blocking task. This
107 // includes BLOCK_SHUTDOWN and also SKIP_ON_SHUTDOWN tasks that are already
108 // running (since they must be run to completion). It must be called with
109 // the lock held.
110 bool IsRunningBlockingTask() const;
111
112 // The last sequence number used. Managed by GetSequenceToken, since this
113 // only does threadsafe increment operations, you do not need to hold the
114 // lock.
115 volatile base::subtle::Atomic32 last_sequence_number_;
116
117 // This lock protects |everything in this class|. Do not read or modify
118 // anything without holding this lock. Do not block while holding this
119 // lock.
120 base::Lock lock_;
121
122 // The maximum number of worker threads we'll create.
123 size_t max_threads_;
124
125 // Owning pointers to all threads we've created so far. Since we lazily
126 // create threads, this may be less than max_threads_ and will be initially
127 // empty. Each of these pointers will also appear either in idle_threads_
128 // or running_threads_.
129 std::vector<linked_ptr<Worker> > threads_;
130
131 // Lists all currently idle worker threads. These pointers are non-owning,
132 // the threads_ array manages their lifetimes.
133 std::deque<Worker*> idle_threads_;
jar (doing other things) 2011/11/23 20:08:16 You should use a stack, as I don't think there is
134
135 // The opposite of idle_threads_, this contains non-owning pointers to all
136 // currently running or scheduled threads.
137 std::set<Worker*> running_threads_;
138
139 // In-order list of all pending tasks. These are tasks waiting for a thread
140 // to run on or that are blocked on a previous task in their sequence.
141 //
142 // We maintain the pending_task_count_ separately for metrics because
143 // list.size() can be linear time.
144 std::list<SequencedTask> pending_tasks_;
145 size_t pending_task_count_;
146
147 // Lists all sequence tokens currently executing.
148 std::set<int> current_sequences_;
149
150 // Set when the app is terminating and no further tasks should be allowed.
151 bool terminated_;
152
153 SequencedWorkerPool::TestingObserver* testing_observer_;
154
155 // Created lazily when terminated_ is set and there are pending tasks, this
156 // is signaled by ScheduleWork whel all blocking tasks have completed.
157 scoped_ptr<base::WaitableEvent> shutdown_complete_;
158 };
159
160 SequencedWorkerPool::Inner::Inner(size_t max_threads)
161 : last_sequence_number_(0),
162 max_threads_(max_threads),
163 pending_task_count_(0),
164 terminated_(false) {
165 }
166
167 SequencedWorkerPool::SequenceToken
168 SequencedWorkerPool::Inner::GetSequenceToken() {
jar (doing other things) 2011/11/23 20:08:16 Cool as this is... I'm suspicious that we need an
brettw 2011/11/23 21:01:13 I think an enum destroys the whole point of this p
169 base::subtle::Atomic32 result =
170 base::subtle::NoBarrier_AtomicIncrement(&last_sequence_number_, 1);
171 return SequenceToken(static_cast<int>(result));
172 }
173
174 bool SequencedWorkerPool::Inner::PostTask(
175 int sequence_token_id,
176 SequencedWorkerPool::WorkerShutdown shutdown_behavior,
177 const tracked_objects::Location& from_here,
178 const base::Closure& task) {
179 base::AutoLock lock(lock_);
180
181 if (terminated_)
182 return false;
183
184 SequencedTask sequenced;
185 sequenced.sequence_token_id = sequence_token_id;
186 sequenced.shutdown_behavior = shutdown_behavior;
187 sequenced.location = from_here;
188 sequenced.task = task;
189 pending_tasks_.push_back(sequenced);
190 pending_task_count_++;
191
192 ScheduleWork();
193 return true;
194 }
195
196 void SequencedWorkerPool::Inner::Shutdown() {
jar (doing other things) 2011/11/23 20:08:16 This function is written as a blocking call. I mu
brettw 2011/11/23 21:01:13 This has to be the main thread. It has to block si
197 // Mark us as terminated and go through and drop all tasks that aren't
198 // required to run on shutdown. Since no new tasks will get posted once the
199 // terminated flag is set, this ensures that all remaining tasks are required
200 // for shutdown whenever the termianted_ flag is set.
201 {
202 base::AutoLock lock(lock_);
203 DCHECK(!terminated_);
204 terminated_ = true;
205
206 std::list<SequencedTask>::iterator i = pending_tasks_.begin();
207 while (i != pending_tasks_.end()) {
208 if (i->shutdown_behavior == BLOCK_SHUTDOWN) {
209 i++;
210 } else {
211 i = pending_tasks_.erase(i);
jar (doing other things) 2011/11/23 20:08:16 Does this potentially induce a task destructor? D
212 pending_task_count_--;
213 }
214 }
215 DCHECK_EQ(pending_tasks_.size(), pending_task_count_);
216
217 if (pending_tasks_.empty() && !IsRunningBlockingTask()) {
218 // There are no pending or running tasks blocking shutdown, we're done.
219 return;
220 }
221
222 // Need to wait for some tasks, create the event.
223 DCHECK(!shutdown_complete_.get());
224 shutdown_complete_.reset(new base::WaitableEvent(false, false));
225 }
226
227 // If we get here, we know we're either waiting on a blocking task that's
228 // currently running, waiting on a blocking task that hasn't been scheduled
229 // yet, or both. Block on the "queue empty" event to know when all tasks are
230 // complete. This must be done outside the lock.
231 if (testing_observer_)
232 testing_observer_->WillWaitForShutdown();
233
234 base::TimeTicks shutdown_wait_begin = base::TimeTicks::Now();
235 shutdown_complete_->Wait();
236 UMA_HISTOGRAM_TIMES("SequencedWorkerPool.ShutdownDelayTime",
237 base::TimeTicks::Now() - shutdown_wait_begin);
238 }
239
240 void SequencedWorkerPool::Inner::SetTestingObserver(
241 SequencedWorkerPool::TestingObserver* observer) {
242 base::AutoLock lock(lock_);
243 testing_observer_ = observer;
244 }
245
246 void SequencedWorkerPool::Inner::ScheduleWork() {
jar (doing other things) 2011/11/23 20:08:16 You might consider adding a comment that this func
247 lock_.AssertAcquired();
248
249 DCHECK_EQ(pending_tasks_.size(), pending_task_count_);
250 UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.TaskCount",
251 pending_task_count_);
252
253 if (terminated_ && shutdown_complete_.get()) {
254 // When the app is terminating, check for "no more blocking work" and
255 // signal if shutdown tasks are complete.
256 if (pending_tasks_.empty() && !IsRunningBlockingTask()) {
257 shutdown_complete_->Signal();
258 return;
259 }
260 }
261
262 if (pending_tasks_.empty() ||
263 (idle_threads_.empty() && threads_.size() == max_threads_))
264 return; // No work to schedule or no threads to schedule them on.
265
266 // Find the next task with a sequence token that's not currently in use.
267 // If the token is in use, that means another thread is running something
268 // in that sequence, and we can't run it without going out-of-order.
269 //
270 // This algorithm is simple and fair, but inefficient in some cases. For
271 // example, say somebody schedules 1000 slow tasks with the same sequence
272 // number. We'll have to go through all those tasks each time we feel like
273 // there might be work to schedule. If this proves to be a problem, we
274 // should make this more efficient.
275 //
276 // One possible enhancement would be to keep a map from sequence ID to a
277 // list of pending but currently blocked SequencedTasks for that ID.
278 // When a worker finishes a task of one sequence token, it can pick up the
279 // next one from that token right away.
jar (doing other things) 2011/11/23 20:08:16 That is a good optimization for efficiency... but
280 //
281 // This may lead to starvation if there are sufficient numbers of sequences
282 // in use. To alleviate this, we could add an incrementing priority counter
283 // to each SequencedTask. Then maintain a priority_queue of all runnable
284 // tasks, sorted by priority counter. When a sequenced task is completed
285 // we would pop the head element off of that tasks pending list and add it
286 // to the priority queue. Then we would run the first item in the priority
287 // queue.
288 int unrunnable_tasks = 0;
289 for (std::list<SequencedTask>::iterator i = pending_tasks_.begin();
290 i != pending_tasks_.end(); ++i) {
291 if (!i->sequence_token_id ||
292 current_sequences_.find(i->sequence_token_id) ==
293 current_sequences_.end()) {
294 // This token is free, run this task on the first available worker,
295 // creating one if necessary.
296 Worker* worker;
jar (doing other things) 2011/11/23 20:08:16 Should this be a linked_ptr?
297 if (idle_threads_.empty()) {
298 // We should have early exited above if we're out of threads to
299 // schedule, so there should always be a free slot.
300 DCHECK(threads_.size() < max_threads_);
301 worker = new Worker(threads_.size());
302 threads_.push_back(linked_ptr<Worker>(worker));
303 } else {
304 worker = idle_threads_.front();
305 idle_threads_.pop_front();
306 }
307 running_threads_.insert(worker);
308
309 // Mark the task's sequence number as in use.
310 if (i->sequence_token_id)
311 current_sequences_.insert(i->sequence_token_id);
312
313 worker->PostTask(*i, base::Bind(&Inner::RunTask, this, worker, *i));
jar (doing other things) 2011/11/23 20:08:16 Now that we have the worker thread out of the idle
314 pending_tasks_.erase(i);
315 pending_task_count_--;
316 break;
317 }
318 unrunnable_tasks++;
319 }
320
321 // Track the number of tasks we had to skip over to see if we should be
322 // making this more efficient. If this number ever becomes large or is
323 // frequently "some", we should consider the optimization above.
324 UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.UnrunnableTaskCount",
325 unrunnable_tasks);
326 }
327
328 void SequencedWorkerPool::Inner::RunTask(Worker* this_worker,
329 const SequencedTask& task) {
330 task.task.Run();
331
332 // Now that this thread is free, mark ourselves and try to schedule more.
333 {
334 base::AutoLock lock(lock_);
335 this_worker->WorkComplete();
336
337 if (task.sequence_token_id)
338 current_sequences_.erase(task.sequence_token_id);
339 running_threads_.erase(this_worker);
340 idle_threads_.push_front(this_worker);
341
342 ScheduleWork();
343 }
344 }
345
346 bool SequencedWorkerPool::Inner::IsRunningBlockingTask() const {
347 lock_.AssertAcquired();
348
349 for (std::set<Worker*>::const_iterator i = running_threads_.begin();
350 i != running_threads_.end(); ++i) {
351 if ((*i)->current_shutdown_mode() == SequencedWorkerPool::BLOCK_SHUTDOWN ||
352 (*i)->current_shutdown_mode() == SequencedWorkerPool::SKIP_ON_SHUTDOWN)
353 return true;
354 }
355 return false;
356 }
357
358 // SequencedWorkerPool --------------------------------------------------------
359
360 SequencedWorkerPool::SequencedWorkerPool(size_t max_threads)
361 : inner_(new Inner(max_threads)) {
362 }
363
364 SequencedWorkerPool::~SequencedWorkerPool() {
365 }
366
367 SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetSequenceToken() {
368 return inner_->GetSequenceToken();
369 }
370
371 bool SequencedWorkerPool::PostWorkerTask(
372 WorkerShutdown shutdown_behavior,
373 const tracked_objects::Location& from_here,
374 const base::Closure& task) {
375 return inner_->PostTask(0, shutdown_behavior, from_here, task);
376 }
377
378 bool SequencedWorkerPool::PostSequencedWorkerTask(
379 SequenceToken sequence_token,
380 WorkerShutdown shutdown_behavior,
381 const tracked_objects::Location& from_here,
382 const base::Closure& task) {
383 return inner_->PostTask(sequence_token.id_, shutdown_behavior,
384 from_here, task);
385 }
386
387 void SequencedWorkerPool::Shutdown() {
388 inner_->Shutdown();
389 }
390
391 void SequencedWorkerPool::SetTestingObserver(TestingObserver* observer) {
392 inner_->SetTestingObserver(observer);
393 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698