OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/threading/sequenced_worker_pool.h" |
| 6 |
| 7 #include <deque> |
| 8 #include <set> |
| 9 |
| 10 #include "base/atomicops.h" |
| 11 #include "base/bind.h" |
| 12 #include "base/memory/scoped_ptr.h" |
| 13 #include "base/metrics/histogram.h" |
| 14 #include "base/stringprintf.h" |
| 15 #include "base/synchronization/condition_variable.h" |
| 16 #include "base/threading/simple_thread.h" |
| 17 #include "base/threading/thread.h" |
| 18 |
| 19 namespace base { |
| 20 |
| 21 namespace { |
| 22 |
| 23 struct SequencedTask { |
| 24 int sequence_token_id; |
| 25 SequencedWorkerPool::WorkerShutdown shutdown_behavior; |
| 26 tracked_objects::Location location; |
| 27 base::Closure task; |
| 28 }; |
| 29 |
| 30 } // namespace |
| 31 |
| 32 // Worker --------------------------------------------------------------------- |
| 33 |
| 34 class SequencedWorkerPool::Worker : public base::SimpleThread { |
| 35 public: |
| 36 Worker(SequencedWorkerPool::Inner* inner, |
| 37 int thread_number, |
| 38 const std::string& thread_name_prefix); |
| 39 ~Worker(); |
| 40 |
| 41 // SimpleThread implementation. This actually runs the background thread. |
| 42 virtual void Run(); |
| 43 |
| 44 private: |
| 45 SequencedWorkerPool::Inner* inner_; |
| 46 SequencedWorkerPool::WorkerShutdown current_shutdown_mode_; |
| 47 |
| 48 DISALLOW_COPY_AND_ASSIGN(Worker); |
| 49 }; |
| 50 |
| 51 |
| 52 // Inner ---------------------------------------------------------------------- |
| 53 |
| 54 class SequencedWorkerPool::Inner |
| 55 : public base::RefCountedThreadSafe<SequencedWorkerPool::Inner> { |
| 56 public: |
| 57 Inner(size_t max_threads, const std::string& thread_name_prefix); |
| 58 virtual ~Inner(); |
| 59 |
| 60 // Backends for SequenceWorkerPool. |
| 61 SequenceToken GetSequenceToken(); |
| 62 SequenceToken GetNamedSequenceToken(const std::string& name); |
| 63 bool PostTask(int sequence_token_id, |
| 64 SequencedWorkerPool::WorkerShutdown shutdown_behavior, |
| 65 const tracked_objects::Location& from_here, |
| 66 const base::Closure& task); |
| 67 void Shutdown(); |
| 68 void SetTestingObserver(SequencedWorkerPool::TestingObserver* observer); |
| 69 |
| 70 // Runs the worker loop on the background thread. |
| 71 void ThreadLoop(Worker* this_worker); |
| 72 |
| 73 private: |
| 74 // The calling code should clear the given delete_these_oustide_lock |
| 75 // vector the next time the lock is released. See the implementation for |
| 76 // a more detailed description. |
| 77 bool GetWork(SequencedTask* task, |
| 78 std::vector<base::Closure>* delete_these_outside_lock); |
| 79 |
| 80 // Peforms init and cleanup around running the given task. WillRun... |
| 81 // returns the value from PrepareToStartAdditionalThreadIfNecessary. |
| 82 // The calling code should call FinishStartingAdditionalThread once the |
| 83 // lock is released if the return values is nonzero. |
| 84 int WillRunWorkerTask(const SequencedTask& task); |
| 85 void DidRunWorkerTask(const SequencedTask& task); |
| 86 |
| 87 // Returns true if there are no threads currently running the given |
| 88 // sequence token. |
| 89 bool IsSequenceTokenRunnable(int sequence_token_id) const; |
| 90 |
| 91 // Checks if all threads are busy and the addition of one more could run an |
| 92 // additional task waiting in the queue. This must be called from within |
| 93 // the lock. |
| 94 // |
| 95 // If another thread is helpful, this will mark the thread as being in the |
| 96 // process of starting and returns the index of the new thread which will be |
| 97 // 0 or more. The caller should then call FinishStartingAdditionalThread to |
| 98 // complete initialization once the lock is released. |
| 99 // |
| 100 // If another thread is not necessary, returne 0; |
| 101 // |
| 102 // See the implementedion for more. |
| 103 int PrepareToStartAdditionalThreadIfHelpful(); |
| 104 |
| 105 // The second part of thread creation after |
| 106 // PrepareToStartAdditionalThreadIfHelpful with the thread number it |
| 107 // generated. This actually creates the thread and should be called outside |
| 108 // the lock to avoid blocking important work starting a thread in the lock. |
| 109 void FinishStartingAdditionalThread(int thread_number); |
| 110 |
| 111 // Checks whether there is work left that's blocking shutdown. Must be |
| 112 // called inside the lock. |
| 113 bool CanShutdown() const; |
| 114 |
| 115 // The last sequence number used. Managed by GetSequenceToken, since this |
| 116 // only does threadsafe increment operations, you do not need to hold the |
| 117 // lock. |
| 118 volatile base::subtle::Atomic32 last_sequence_number_; |
| 119 |
| 120 // This lock protects |everything in this class|. Do not read or modify |
| 121 // anything without holding this lock. Do not block while holding this |
| 122 // lock. |
| 123 base::Lock lock_; |
| 124 |
| 125 // Condition variable used to wake up worker threads when a task is runnable. |
| 126 base::ConditionVariable cond_var_; |
| 127 |
| 128 // The maximum number of worker threads we'll create. |
| 129 size_t max_threads_; |
| 130 |
| 131 std::string thread_name_prefix_; |
| 132 |
| 133 // Associates all known sequence token names with their IDs. |
| 134 std::map<std::string, int> named_sequence_tokens_; |
| 135 |
| 136 // Owning pointers to all threads we've created so far. Since we lazily |
| 137 // create threads, this may be less than max_threads_ and will be initially |
| 138 // empty. |
| 139 std::vector<linked_ptr<Worker> > threads_; |
| 140 |
| 141 // Set to true when we're in the process of creating another thread. |
| 142 // See PrepareToStartAdditionalThreadIfHelpful for more. |
| 143 bool thread_being_created_; |
| 144 |
| 145 // Number of threads currently running tasks. |
| 146 size_t running_thread_count_; |
| 147 |
| 148 // Number of threads currently running tasks that have the BLOCK_SHUTDOWN |
| 149 // flag set. |
| 150 size_t blocking_shutdown_thread_count_; |
| 151 |
| 152 // In-order list of all pending tasks. These are tasks waiting for a thread |
| 153 // to run on or that are blocked on a previous task in their sequence. |
| 154 // |
| 155 // We maintain the pending_task_count_ separately for metrics because |
| 156 // list.size() can be linear time. |
| 157 std::list<SequencedTask> pending_tasks_; |
| 158 size_t pending_task_count_; |
| 159 |
| 160 // Number of tasks in the pending_tasks_ list that are marked as blocking |
| 161 // shutdown. |
| 162 size_t blocking_shutdown_pending_task_count_; |
| 163 |
| 164 // Lists all sequence tokens currently executing. |
| 165 std::set<int> current_sequences_; |
| 166 |
| 167 // Set when the app is terminating and no further tasks should be allowed, |
| 168 // though we may still be running existing tasks. |
| 169 bool terminating_; |
| 170 |
| 171 // Set when Shutdown is called to do some assertions. |
| 172 bool shutdown_called_; |
| 173 |
| 174 SequencedWorkerPool::TestingObserver* testing_observer_; |
| 175 }; |
| 176 |
| 177 SequencedWorkerPool::Worker::Worker(SequencedWorkerPool::Inner* inner, |
| 178 int thread_number, |
| 179 const std::string& prefix) |
| 180 : base::SimpleThread( |
| 181 prefix + StringPrintf("Worker%d", thread_number).c_str()), |
| 182 inner_(inner), |
| 183 current_shutdown_mode_(SequencedWorkerPool::CONTINUE_ON_SHUTDOWN) { |
| 184 Start(); |
| 185 } |
| 186 |
| 187 SequencedWorkerPool::Worker::~Worker() { |
| 188 } |
| 189 |
| 190 void SequencedWorkerPool::Worker::Run() { |
| 191 // Just jump back to the Inner object to run the thread, since it has all the |
| 192 // tracking information and queues. It might be more natural to implement |
| 193 // using DelegateSimpleThread and have Inner implement the Delegate to avoid |
| 194 // having these worker objects at all, but that method lacks the ability to |
| 195 // send thread-specific information easily to the thread loop. |
| 196 inner_->ThreadLoop(this); |
| 197 } |
| 198 |
| 199 SequencedWorkerPool::Inner::Inner(size_t max_threads, |
| 200 const std::string& thread_name_prefix) |
| 201 : last_sequence_number_(0), |
| 202 lock_(), |
| 203 cond_var_(&lock_), |
| 204 max_threads_(max_threads), |
| 205 thread_name_prefix_(thread_name_prefix), |
| 206 thread_being_created_(false), |
| 207 running_thread_count_(0), |
| 208 blocking_shutdown_thread_count_(0), |
| 209 pending_task_count_(0), |
| 210 blocking_shutdown_pending_task_count_(0), |
| 211 terminating_(false), |
| 212 shutdown_called_(false) { |
| 213 } |
| 214 |
| 215 SequencedWorkerPool::Inner::~Inner() { |
| 216 // You must call Shutdown() before destroying the pool. |
| 217 DCHECK(shutdown_called_); |
| 218 |
| 219 // Need to explicitly join with the threads before they're destroyed or else |
| 220 // they will be running when our object is half torn down. |
| 221 for (size_t i = 0; i < threads_.size(); i++) |
| 222 threads_[i]->Join(); |
| 223 threads_.clear(); |
| 224 } |
| 225 |
| 226 SequencedWorkerPool::SequenceToken |
| 227 SequencedWorkerPool::Inner::GetSequenceToken() { |
| 228 base::subtle::Atomic32 result = |
| 229 base::subtle::NoBarrier_AtomicIncrement(&last_sequence_number_, 1); |
| 230 return SequenceToken(static_cast<int>(result)); |
| 231 } |
| 232 |
| 233 SequencedWorkerPool::SequenceToken |
| 234 SequencedWorkerPool::Inner::GetNamedSequenceToken( |
| 235 const std::string& name) { |
| 236 base::AutoLock lock(lock_); |
| 237 std::map<std::string, int>::const_iterator found = |
| 238 named_sequence_tokens_.find(name); |
| 239 if (found != named_sequence_tokens_.end()) |
| 240 return SequenceToken(found->second); // Got an existing one. |
| 241 |
| 242 // Create a new one for this name. |
| 243 SequenceToken result = GetSequenceToken(); |
| 244 named_sequence_tokens_.insert(std::make_pair(name, result.id_)); |
| 245 return result; |
| 246 } |
| 247 |
| 248 bool SequencedWorkerPool::Inner::PostTask( |
| 249 int sequence_token_id, |
| 250 SequencedWorkerPool::WorkerShutdown shutdown_behavior, |
| 251 const tracked_objects::Location& from_here, |
| 252 const base::Closure& task) { |
| 253 SequencedTask sequenced; |
| 254 sequenced.sequence_token_id = sequence_token_id; |
| 255 sequenced.shutdown_behavior = shutdown_behavior; |
| 256 sequenced.location = from_here; |
| 257 sequenced.task = task; |
| 258 |
| 259 int create_thread_id = 0; |
| 260 { |
| 261 base::AutoLock lock(lock_); |
| 262 if (terminating_) |
| 263 return false; |
| 264 |
| 265 pending_tasks_.push_back(sequenced); |
| 266 pending_task_count_++; |
| 267 if (shutdown_behavior == BLOCK_SHUTDOWN) |
| 268 blocking_shutdown_pending_task_count_++; |
| 269 |
| 270 create_thread_id = PrepareToStartAdditionalThreadIfHelpful(); |
| 271 } |
| 272 |
| 273 // Actually start the additional thread or signal an existing one now that |
| 274 // we're outside the lock. |
| 275 if (create_thread_id) |
| 276 FinishStartingAdditionalThread(create_thread_id); |
| 277 else |
| 278 cond_var_.Signal(); |
| 279 |
| 280 return true; |
| 281 } |
| 282 |
| 283 void SequencedWorkerPool::Inner::Shutdown() { |
| 284 if (shutdown_called_) |
| 285 return; |
| 286 shutdown_called_ = true; |
| 287 |
| 288 // Mark us as terminated and go through and drop all tasks that aren't |
| 289 // required to run on shutdown. Since no new tasks will get posted once the |
| 290 // terminated flag is set, this ensures that all remaining tasks are required |
| 291 // for shutdown whenever the termianted_ flag is set. |
| 292 { |
| 293 base::AutoLock lock(lock_); |
| 294 DCHECK(!terminating_); |
| 295 terminating_ = true; |
| 296 |
| 297 // Tickle the threads. This will wake up a waiting one so it will know that |
| 298 // it can exit, which in turn will wake up any other waiting ones. |
| 299 cond_var_.Signal(); |
| 300 |
| 301 // There are no pending or running tasks blocking shutdown, we're done. |
| 302 if (CanShutdown()) |
| 303 return; |
| 304 } |
| 305 |
| 306 // If we get here, we know we're either waiting on a blocking task that's |
| 307 // currently running, waiting on a blocking task that hasn't been scheduled |
| 308 // yet, or both. Block on the "queue empty" event to know when all tasks are |
| 309 // complete. This must be done outside the lock. |
| 310 if (testing_observer_) |
| 311 testing_observer_->WillWaitForShutdown(); |
| 312 |
| 313 base::TimeTicks shutdown_wait_begin = base::TimeTicks::Now(); |
| 314 |
| 315 // Wait for no more tasks. |
| 316 { |
| 317 base::AutoLock lock(lock_); |
| 318 while (!CanShutdown()) |
| 319 cond_var_.Wait(); |
| 320 } |
| 321 UMA_HISTOGRAM_TIMES("SequencedWorkerPool.ShutdownDelayTime", |
| 322 base::TimeTicks::Now() - shutdown_wait_begin); |
| 323 } |
| 324 |
| 325 void SequencedWorkerPool::Inner::SetTestingObserver( |
| 326 SequencedWorkerPool::TestingObserver* observer) { |
| 327 base::AutoLock lock(lock_); |
| 328 testing_observer_ = observer; |
| 329 } |
| 330 |
| 331 void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) { |
| 332 { |
| 333 base::AutoLock lock(lock_); |
| 334 DCHECK(thread_being_created_); |
| 335 thread_being_created_ = false; |
| 336 while (true) { |
| 337 // See GetWork for what delete_these_outside_lock is doing. |
| 338 SequencedTask task; |
| 339 std::vector<base::Closure> delete_these_outside_lock; |
| 340 if (GetWork(&task, &delete_these_outside_lock)) { |
| 341 int new_thread_id = WillRunWorkerTask(task); |
| 342 { |
| 343 base::AutoUnlock unlock(lock_); |
| 344 cond_var_.Signal(); |
| 345 delete_these_outside_lock.clear(); |
| 346 |
| 347 // Complete thread creation outside the lock if necessary. |
| 348 if (new_thread_id) |
| 349 FinishStartingAdditionalThread(new_thread_id); |
| 350 |
| 351 task.task.Run(); |
| 352 |
| 353 // Make sure our task is erased outside the lock for the same reason |
| 354 // we do this with delete_these_oustide_lock. |
| 355 task.task = base::Closure(); |
| 356 } |
| 357 DidRunWorkerTask(task); // Must be done inside the lock. |
| 358 } else { |
| 359 // When we're terminating and there's no more work, we can shut down. |
| 360 // You can't get more tasks posted once terminating_ is set. There may |
| 361 // be some tasks stuck behind running ones with the same sequence |
| 362 // token, but additional threads won't help this case. |
| 363 if (terminating_) |
| 364 break; |
| 365 cond_var_.Wait(); |
| 366 } |
| 367 } |
| 368 } |
| 369 |
| 370 // We noticed we should exit. Wake up the next worker so it knows it should |
| 371 // exit as well (because the Shutdown() code only signals once). |
| 372 cond_var_.Signal(); |
| 373 } |
| 374 |
| 375 bool SequencedWorkerPool::Inner::GetWork( |
| 376 SequencedTask* task, |
| 377 std::vector<base::Closure>* delete_these_outside_lock) { |
| 378 lock_.AssertAcquired(); |
| 379 |
| 380 DCHECK_EQ(pending_tasks_.size(), pending_task_count_); |
| 381 UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.TaskCount", |
| 382 static_cast<int>(pending_task_count_)); |
| 383 |
| 384 // Find the next task with a sequence token that's not currently in use. |
| 385 // If the token is in use, that means another thread is running something |
| 386 // in that sequence, and we can't run it without going out-of-order. |
| 387 // |
| 388 // This algorithm is simple and fair, but inefficient in some cases. For |
| 389 // example, say somebody schedules 1000 slow tasks with the same sequence |
| 390 // number. We'll have to go through all those tasks each time we feel like |
| 391 // there might be work to schedule. If this proves to be a problem, we |
| 392 // should make this more efficient. |
| 393 // |
| 394 // One possible enhancement would be to keep a map from sequence ID to a |
| 395 // list of pending but currently blocked SequencedTasks for that ID. |
| 396 // When a worker finishes a task of one sequence token, it can pick up the |
| 397 // next one from that token right away. |
| 398 // |
| 399 // This may lead to starvation if there are sufficient numbers of sequences |
| 400 // in use. To alleviate this, we could add an incrementing priority counter |
| 401 // to each SequencedTask. Then maintain a priority_queue of all runnable |
| 402 // tasks, sorted by priority counter. When a sequenced task is completed |
| 403 // we would pop the head element off of that tasks pending list and add it |
| 404 // to the priority queue. Then we would run the first item in the priority |
| 405 // queue. |
| 406 bool found_task = false; |
| 407 int unrunnable_tasks = 0; |
| 408 std::list<SequencedTask>::iterator i = pending_tasks_.begin(); |
| 409 while (i != pending_tasks_.end()) { |
| 410 if (!IsSequenceTokenRunnable(i->sequence_token_id)) { |
| 411 unrunnable_tasks++; |
| 412 ++i; |
| 413 continue; |
| 414 } |
| 415 |
| 416 if (terminating_ && i->shutdown_behavior != BLOCK_SHUTDOWN) { |
| 417 // We're shutting down and the task we just found isn't blocking |
| 418 // shutdown. Delete it and get more work. |
| 419 // |
| 420 // Note that we do not want to delete unrunnable tasks. Deleting a task |
| 421 // can have side effects (like freeing some objects) and deleting a |
| 422 // task that's supposed to run after one that's currently running could |
| 423 // cause an obscure crash. |
| 424 // |
| 425 // We really want to delete these tasks outside the lock in case the |
| 426 // closures are holding refs to objects that want to post work from |
| 427 // their destructorss (which would deadlock). The closures are |
| 428 // internally refcounted, so we just need to keep a copy of them alive |
| 429 // until the lock is exited. The calling code can just clear() the |
| 430 // vector they passed to us once the lock is exited to make this |
| 431 // happen. |
| 432 delete_these_outside_lock->push_back(i->task); |
| 433 i = pending_tasks_.erase(i); |
| 434 pending_task_count_--; |
| 435 } else { |
| 436 // Found a runnable task. |
| 437 *task = *i; |
| 438 i = pending_tasks_.erase(i); |
| 439 pending_task_count_--; |
| 440 if (task->shutdown_behavior == BLOCK_SHUTDOWN) |
| 441 blocking_shutdown_pending_task_count_--; |
| 442 |
| 443 found_task = true; |
| 444 break; |
| 445 } |
| 446 } |
| 447 |
| 448 // Track the number of tasks we had to skip over to see if we should be |
| 449 // making this more efficient. If this number ever becomes large or is |
| 450 // frequently "some", we should consider the optimization above. |
| 451 UMA_HISTOGRAM_COUNTS_100("SequencedWorkerPool.UnrunnableTaskCount", |
| 452 unrunnable_tasks); |
| 453 return found_task; |
| 454 } |
| 455 |
| 456 int SequencedWorkerPool::Inner::WillRunWorkerTask(const SequencedTask& task) { |
| 457 lock_.AssertAcquired(); |
| 458 |
| 459 // Mark the task's sequence number as in use. |
| 460 if (task.sequence_token_id) |
| 461 current_sequences_.insert(task.sequence_token_id); |
| 462 |
| 463 running_thread_count_++; |
| 464 |
| 465 if (task.shutdown_behavior == SequencedWorkerPool::BLOCK_SHUTDOWN) |
| 466 blocking_shutdown_thread_count_++; |
| 467 |
| 468 // We just picked up a task. Since StartAdditionalThreadIfHelpful only |
| 469 // creates a new thread if there is no free one, there is a race when posting |
| 470 // tasks that many tasks could have been posted before a thread started |
| 471 // running them, so only one thread would have been created. So we also check |
| 472 // whether we should create more threads after removing our task from the |
| 473 // queue, which also has the nice side effect of creating the workers from |
| 474 // background threads rather than the main thread of the app. |
| 475 // |
| 476 // If another thread wasn't created, we want to wake up an existing thread |
| 477 // if there is one waiting to pick up the next task. |
| 478 // |
| 479 // Note that we really need to do this *before* running the task, not |
| 480 // after. Otherwise, if more than one task is posted, the creation of the |
| 481 // second thread (since we only create one at a time) will be blocked by |
| 482 // the execution of the first task, which could be arbitrarily long. |
| 483 return PrepareToStartAdditionalThreadIfHelpful(); |
| 484 } |
| 485 |
| 486 void SequencedWorkerPool::Inner::DidRunWorkerTask(const SequencedTask& task) { |
| 487 lock_.AssertAcquired(); |
| 488 |
| 489 if (task.shutdown_behavior == SequencedWorkerPool::BLOCK_SHUTDOWN) { |
| 490 DCHECK_GT(blocking_shutdown_thread_count_, 0u); |
| 491 blocking_shutdown_thread_count_--; |
| 492 } |
| 493 |
| 494 if (task.sequence_token_id) |
| 495 current_sequences_.erase(task.sequence_token_id); |
| 496 |
| 497 running_thread_count_--; |
| 498 } |
| 499 |
| 500 bool SequencedWorkerPool::Inner::IsSequenceTokenRunnable( |
| 501 int sequence_token_id) const { |
| 502 lock_.AssertAcquired(); |
| 503 return !sequence_token_id || |
| 504 current_sequences_.find(sequence_token_id) == |
| 505 current_sequences_.end(); |
| 506 } |
| 507 |
| 508 int SequencedWorkerPool::Inner::PrepareToStartAdditionalThreadIfHelpful() { |
| 509 // How thread creation works: |
| 510 // |
| 511 // We'de like to avoid creating threads with the lock held. However, we |
| 512 // need to be sure that we have an accurate accounting of the threads for |
| 513 // proper Joining and deltion on shutdown. |
| 514 // |
| 515 // We need to figure out if we need another thread with the lock held, which |
| 516 // is what this function does. It then marks us as in the process of creating |
| 517 // a thread. When we do shutdown, we wait until the thread_being_created_ |
| 518 // flag is cleared, which ensures that the new thread is properly added to |
| 519 // all the data structures and we can't leak it. Once shutdown starts, we'll |
| 520 // refuse to create more threads or they would be leaked. |
| 521 // |
| 522 // Note that this creates a mostly benign race condition on shutdown that |
| 523 // will cause fewer workers to be created than one would expect. It isn't |
| 524 // much of an issue in real life, but affects some tests. Since we only spawn |
| 525 // one worker at a time, the following sequence of events can happen: |
| 526 // |
| 527 // 1. Main thread posts a bunch of unrelated tasks that would normally be |
| 528 // run on separate threads. |
| 529 // 2. The first task post causes us to start a worker. Other tasks do not |
| 530 // cause a worker to start since one is pending. |
| 531 // 3. Main thread initiates shutdown. |
| 532 // 4. No more threads are created since the terminating_ flag is set. |
| 533 // |
| 534 // The result is that one may expect that max_threads_ workers to be created |
| 535 // given the workload, but in reality fewer may be created because the |
| 536 // sequence of thread creation on the background threads is racing with the |
| 537 // shutdown call. |
| 538 if (!terminating_ && |
| 539 !thread_being_created_ && |
| 540 threads_.size() < max_threads_ && |
| 541 running_thread_count_ == threads_.size()) { |
| 542 // We could use an additional thread if there's work to be done. |
| 543 for (std::list<SequencedTask>::iterator i = pending_tasks_.begin(); |
| 544 i != pending_tasks_.end(); ++i) { |
| 545 if (IsSequenceTokenRunnable(i->sequence_token_id)) { |
| 546 // Found a runnable task, mark the thread as being started. |
| 547 thread_being_created_ = true; |
| 548 return static_cast<int>(threads_.size() + 1); |
| 549 } |
| 550 } |
| 551 } |
| 552 return 0; |
| 553 } |
| 554 |
| 555 void SequencedWorkerPool::Inner::FinishStartingAdditionalThread( |
| 556 int thread_number) { |
| 557 // Called outside of the lock. |
| 558 DCHECK(thread_number > 0); |
| 559 Worker* new_thread = new Worker(this, thread_number, thread_name_prefix_); |
| 560 |
| 561 { |
| 562 base::AutoLock lock(lock_); |
| 563 // Note: we can't clear the thread_being_created_ flag here, since that |
| 564 // will race with the worker thread actually starting. We'll clear this |
| 565 // flag when the worker actually starts. |
| 566 threads_.push_back(linked_ptr<Worker>(new_thread)); |
| 567 } |
| 568 |
| 569 // Clearing the thread_being_created_ flag could be the last thing the |
| 570 // shutdown code was waiting for, so we need to wake it up. |
| 571 cond_var_.Signal(); |
| 572 } |
| 573 |
| 574 bool SequencedWorkerPool::Inner::CanShutdown() const { |
| 575 lock_.AssertAcquired(); |
| 576 // See PrepareToStartAdditionalThreadIfHelpful for how thread creation works. |
| 577 return !thread_being_created_ && |
| 578 blocking_shutdown_thread_count_ == 0 && |
| 579 blocking_shutdown_pending_task_count_ == 0; |
| 580 } |
| 581 |
| 582 // SequencedWorkerPool -------------------------------------------------------- |
| 583 |
| 584 SequencedWorkerPool::SequencedWorkerPool(size_t max_threads, |
| 585 const std::string& thread_name_prefix) |
| 586 : inner_(new Inner(max_threads, thread_name_prefix)) { |
| 587 } |
| 588 |
| 589 SequencedWorkerPool::~SequencedWorkerPool() { |
| 590 } |
| 591 |
| 592 SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetSequenceToken() { |
| 593 return inner_->GetSequenceToken(); |
| 594 } |
| 595 |
| 596 SequencedWorkerPool::SequenceToken SequencedWorkerPool::GetNamedSequenceToken( |
| 597 const std::string& name) { |
| 598 return inner_->GetNamedSequenceToken(name); |
| 599 } |
| 600 |
| 601 bool SequencedWorkerPool::PostWorkerTask( |
| 602 const tracked_objects::Location& from_here, |
| 603 const base::Closure& task) { |
| 604 return inner_->PostTask(0, BLOCK_SHUTDOWN, from_here, task); |
| 605 } |
| 606 |
| 607 bool SequencedWorkerPool::PostWorkerTaskWithShutdownBehavior( |
| 608 const tracked_objects::Location& from_here, |
| 609 const base::Closure& task, |
| 610 WorkerShutdown shutdown_behavior) { |
| 611 return inner_->PostTask(0, shutdown_behavior, from_here, task); |
| 612 } |
| 613 |
| 614 bool SequencedWorkerPool::PostSequencedWorkerTask( |
| 615 SequenceToken sequence_token, |
| 616 const tracked_objects::Location& from_here, |
| 617 const base::Closure& task) { |
| 618 return inner_->PostTask(sequence_token.id_, BLOCK_SHUTDOWN, |
| 619 from_here, task); |
| 620 } |
| 621 |
| 622 bool SequencedWorkerPool::PostSequencedWorkerTaskWithShutdownBehavior( |
| 623 SequenceToken sequence_token, |
| 624 const tracked_objects::Location& from_here, |
| 625 const base::Closure& task, |
| 626 WorkerShutdown shutdown_behavior) { |
| 627 return inner_->PostTask(sequence_token.id_, shutdown_behavior, |
| 628 from_here, task); |
| 629 } |
| 630 |
| 631 void SequencedWorkerPool::Shutdown() { |
| 632 inner_->Shutdown(); |
| 633 } |
| 634 |
| 635 void SequencedWorkerPool::SetTestingObserver(TestingObserver* observer) { |
| 636 inner_->SetTestingObserver(observer); |
| 637 } |
| 638 |
| 639 } // namespace base |
OLD | NEW |