Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "components/scheduler/base/task_queue_manager.h" | 5 #include "components/scheduler/base/task_queue_manager.h" |
| 6 | 6 |
| 7 #include <queue> | 7 #include <queue> |
| 8 #include <set> | 8 #include <set> |
| 9 | 9 |
| 10 #include "base/bind.h" | 10 #include "base/bind.h" |
| 11 #include "base/time/default_tick_clock.h" | |
| 12 #include "components/scheduler/base/lazy_now.h" | 11 #include "components/scheduler/base/lazy_now.h" |
| 13 #include "components/scheduler/base/nestable_single_thread_task_runner.h" | |
| 14 #include "components/scheduler/base/task_queue_impl.h" | 12 #include "components/scheduler/base/task_queue_impl.h" |
| 13 #include "components/scheduler/base/task_queue_manager_delegate.h" | |
| 15 #include "components/scheduler/base/task_queue_selector.h" | 14 #include "components/scheduler/base/task_queue_selector.h" |
| 16 #include "components/scheduler/base/task_queue_sets.h" | 15 #include "components/scheduler/base/task_queue_sets.h" |
| 17 | 16 |
| 18 namespace { | 17 namespace { |
| 19 const int64_t kMaxTimeTicks = std::numeric_limits<int64>::max(); | 18 const int64_t kMaxTimeTicks = std::numeric_limits<int64>::max(); |
| 20 } | 19 } |
| 21 | 20 |
| 22 namespace scheduler { | 21 namespace scheduler { |
| 23 | 22 |
| 24 TaskQueueManager::TaskQueueManager( | 23 TaskQueueManager::TaskQueueManager( |
| 25 scoped_refptr<NestableSingleThreadTaskRunner> main_task_runner, | 24 scoped_refptr<TaskQueueManagerDelegate> delegate, |
| 26 const char* tracing_category, | 25 const char* tracing_category, |
| 27 const char* disabled_by_default_tracing_category, | 26 const char* disabled_by_default_tracing_category, |
| 28 const char* disabled_by_default_verbose_tracing_category) | 27 const char* disabled_by_default_verbose_tracing_category) |
| 29 : main_task_runner_(main_task_runner), | 28 : delegate_(delegate), |
| 30 task_was_run_on_quiescence_monitored_queue_(false), | 29 task_was_run_on_quiescence_monitored_queue_(false), |
| 31 pending_dowork_count_(0), | 30 pending_dowork_count_(0), |
| 32 work_batch_size_(1), | 31 work_batch_size_(1), |
| 33 time_source_(new base::DefaultTickClock), | |
| 34 tracing_category_(tracing_category), | 32 tracing_category_(tracing_category), |
| 35 disabled_by_default_tracing_category_( | 33 disabled_by_default_tracing_category_( |
| 36 disabled_by_default_tracing_category), | 34 disabled_by_default_tracing_category), |
| 37 disabled_by_default_verbose_tracing_category_( | 35 disabled_by_default_verbose_tracing_category_( |
| 38 disabled_by_default_verbose_tracing_category), | 36 disabled_by_default_verbose_tracing_category), |
| 39 observer_(nullptr), | 37 observer_(nullptr), |
| 40 deletion_sentinel_(new DeletionSentinel()), | 38 deletion_sentinel_(new DeletionSentinel()), |
| 41 weak_factory_(this) { | 39 weak_factory_(this) { |
| 42 DCHECK(main_task_runner->RunsTasksOnCurrentThread()); | 40 DCHECK(delegate->RunsTasksOnCurrentThread()); |
| 43 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, | 41 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, |
| 44 "TaskQueueManager", this); | 42 "TaskQueueManager", this); |
| 45 selector_.SetTaskQueueSelectorObserver(this); | 43 selector_.SetTaskQueueSelectorObserver(this); |
| 46 | 44 |
| 47 do_work_from_main_thread_closure_ = | 45 do_work_from_main_thread_closure_ = |
| 48 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true); | 46 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true); |
| 49 do_work_from_other_thread_closure_ = | 47 do_work_from_other_thread_closure_ = |
| 50 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false); | 48 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false); |
| 51 delayed_queue_wakeup_closure_ = | 49 delayed_queue_wakeup_closure_ = |
| 52 base::Bind(&TaskQueueManager::DelayedDoWork, weak_factory_.GetWeakPtr()); | 50 base::Bind(&TaskQueueManager::DelayedDoWork, weak_factory_.GetWeakPtr()); |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 168 newly_updatable_.pop_back(); | 166 newly_updatable_.pop_back(); |
| 169 } | 167 } |
| 170 } | 168 } |
| 171 | 169 |
| 172 void TaskQueueManager::UpdateWorkQueues( | 170 void TaskQueueManager::UpdateWorkQueues( |
| 173 bool should_trigger_wakeup, | 171 bool should_trigger_wakeup, |
| 174 const internal::TaskQueueImpl::Task* previous_task) { | 172 const internal::TaskQueueImpl::Task* previous_task) { |
| 175 DCHECK(main_thread_checker_.CalledOnValidThread()); | 173 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 176 TRACE_EVENT0(disabled_by_default_tracing_category_, | 174 TRACE_EVENT0(disabled_by_default_tracing_category_, |
| 177 "TaskQueueManager::UpdateWorkQueues"); | 175 "TaskQueueManager::UpdateWorkQueues"); |
| 178 internal::LazyNow lazy_now(this); | 176 internal::LazyNow lazy_now(tick_clock()); |
| 179 | 177 |
| 180 // Move any ready delayed tasks into the incomming queues. | 178 // Move any ready delayed tasks into the incomming queues. |
| 181 WakeupReadyDelayedQueues(&lazy_now); | 179 WakeupReadyDelayedQueues(&lazy_now); |
| 182 | 180 |
| 183 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet(); | 181 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet(); |
| 184 | 182 |
| 185 auto iter = updatable_queue_set_.begin(); | 183 auto iter = updatable_queue_set_.begin(); |
| 186 while (iter != updatable_queue_set_.end()) { | 184 while (iter != updatable_queue_set_.end()) { |
| 187 internal::TaskQueueImpl* queue = *iter++; | 185 internal::TaskQueueImpl* queue = *iter++; |
| 188 // NOTE Update work queue may erase itself from |updatable_queue_set_|. | 186 // NOTE Update work queue may erase itself from |updatable_queue_set_|. |
| 189 // This is fine, erasing an element won't invalidate any interator, as long | 187 // This is fine, erasing an element won't invalidate any interator, as long |
| 190 // as the iterator isn't the element being delated. | 188 // as the iterator isn't the element being delated. |
| 191 if (queue->work_queue().empty()) | 189 if (queue->work_queue().empty()) |
| 192 queue->UpdateWorkQueue(&lazy_now, should_trigger_wakeup, previous_task); | 190 queue->UpdateWorkQueue(&lazy_now, should_trigger_wakeup, previous_task); |
| 193 } | 191 } |
| 194 } | 192 } |
| 195 | 193 |
| 196 void TaskQueueManager::ScheduleDelayedWorkTask( | 194 void TaskQueueManager::ScheduleDelayedWorkTask( |
| 197 scoped_refptr<internal::TaskQueueImpl> queue, | 195 scoped_refptr<internal::TaskQueueImpl> queue, |
| 198 base::TimeTicks delayed_run_time) { | 196 base::TimeTicks delayed_run_time) { |
| 199 internal::LazyNow lazy_now(this); | 197 internal::LazyNow lazy_now(tick_clock()); |
| 200 ScheduleDelayedWork(queue.get(), delayed_run_time, &lazy_now); | 198 ScheduleDelayedWork(queue.get(), delayed_run_time, &lazy_now); |
| 201 } | 199 } |
| 202 | 200 |
| 203 void TaskQueueManager::ScheduleDelayedWork(internal::TaskQueueImpl* queue, | 201 void TaskQueueManager::ScheduleDelayedWork(internal::TaskQueueImpl* queue, |
| 204 base::TimeTicks delayed_run_time, | 202 base::TimeTicks delayed_run_time, |
| 205 internal::LazyNow* lazy_now) { | 203 internal::LazyNow* lazy_now) { |
| 206 if (!main_task_runner_->BelongsToCurrentThread()) { | 204 if (!delegate_->BelongsToCurrentThread()) { |
| 207 // NOTE posting a delayed task from a different thread is not expected to be | 205 // NOTE posting a delayed task from a different thread is not expected to be |
| 208 // common. This pathway is less optimal than perhaps it could be because | 206 // common. This pathway is less optimal than perhaps it could be because |
| 209 // it causes two main thread tasks to be run. Should this assumption prove | 207 // it causes two main thread tasks to be run. Should this assumption prove |
| 210 // to be false in future, we may need to revisit this. | 208 // to be false in future, we may need to revisit this. |
| 211 main_task_runner_->PostTask( | 209 delegate_->PostTask( |
| 212 FROM_HERE, base::Bind(&TaskQueueManager::ScheduleDelayedWorkTask, | 210 FROM_HERE, base::Bind(&TaskQueueManager::ScheduleDelayedWorkTask, |
| 213 weak_factory_.GetWeakPtr(), | 211 weak_factory_.GetWeakPtr(), |
| 214 scoped_refptr<internal::TaskQueueImpl>(queue), | 212 scoped_refptr<internal::TaskQueueImpl>(queue), |
| 215 delayed_run_time)); | 213 delayed_run_time)); |
| 216 return; | 214 return; |
| 217 } | 215 } |
| 218 | 216 |
| 219 // Make sure there's one (and only one) task posted to |main_task_runner_| | 217 // Make sure there's one (and only one) task posted to |delegate_| |
| 220 // to call |DelayedDoWork| at |delayed_run_time|. | 218 // to call |DelayedDoWork| at |delayed_run_time|. |
| 221 if (delayed_wakeup_multimap_.find(delayed_run_time) == | 219 if (delayed_wakeup_multimap_.find(delayed_run_time) == |
| 222 delayed_wakeup_multimap_.end()) { | 220 delayed_wakeup_multimap_.end()) { |
| 223 base::TimeDelta delay = | 221 base::TimeDelta delay = |
| 224 std::max(base::TimeDelta(), delayed_run_time - lazy_now->Now()); | 222 std::max(base::TimeDelta(), delayed_run_time - lazy_now->Now()); |
| 225 main_task_runner_->PostDelayedTask(FROM_HERE, delayed_queue_wakeup_closure_, | 223 delegate_->PostDelayedTask(FROM_HERE, delayed_queue_wakeup_closure_, delay); |
| 226 delay); | |
| 227 } | 224 } |
| 228 delayed_wakeup_multimap_.insert(std::make_pair(delayed_run_time, queue)); | 225 delayed_wakeup_multimap_.insert(std::make_pair(delayed_run_time, queue)); |
| 229 } | 226 } |
| 230 | 227 |
| 231 void TaskQueueManager::DelayedDoWork() { | 228 void TaskQueueManager::DelayedDoWork() { |
| 232 DCHECK(main_thread_checker_.CalledOnValidThread()); | 229 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 233 | 230 |
| 234 { | 231 { |
| 235 internal::LazyNow lazy_now(this); | 232 internal::LazyNow lazy_now(tick_clock()); |
| 236 WakeupReadyDelayedQueues(&lazy_now); | 233 WakeupReadyDelayedQueues(&lazy_now); |
| 237 } | 234 } |
| 238 | 235 |
| 239 DoWork(false); | 236 DoWork(false); |
| 240 } | 237 } |
| 241 | 238 |
| 242 void TaskQueueManager::WakeupReadyDelayedQueues(internal::LazyNow* lazy_now) { | 239 void TaskQueueManager::WakeupReadyDelayedQueues(internal::LazyNow* lazy_now) { |
| 243 // Wake up any queues with pending delayed work. Note std::multipmap stores | 240 // Wake up any queues with pending delayed work. Note std::multipmap stores |
| 244 // the elements sorted by key, so the begin() iterator points to the earliest | 241 // the elements sorted by key, so the begin() iterator points to the earliest |
| 245 // queue to wakeup. | 242 // queue to wakeup. |
| 246 std::set<internal::TaskQueueImpl*> dedup_set; | 243 std::set<internal::TaskQueueImpl*> dedup_set; |
| 247 while (!delayed_wakeup_multimap_.empty()) { | 244 while (!delayed_wakeup_multimap_.empty()) { |
| 248 DelayedWakeupMultimap::iterator next_wakeup = | 245 DelayedWakeupMultimap::iterator next_wakeup = |
| 249 delayed_wakeup_multimap_.begin(); | 246 delayed_wakeup_multimap_.begin(); |
| 250 if (next_wakeup->first > lazy_now->Now()) | 247 if (next_wakeup->first > lazy_now->Now()) |
| 251 break; | 248 break; |
| 252 // A queue could have any number of delayed tasks pending so it's worthwhile | 249 // A queue could have any number of delayed tasks pending so it's worthwhile |
| 253 // deduping calls to MoveReadyDelayedTasksToIncomingQueue since it takes a | 250 // deduping calls to MoveReadyDelayedTasksToIncomingQueue since it takes a |
| 254 // lock. NOTE the order in which these are called matters since the order | 251 // lock. NOTE the order in which these are called matters since the order |
| 255 // in which EnqueueTaskLocks is called is respected when choosing which | 252 // in which EnqueueTaskLocks is called is respected when choosing which |
| 256 // queue to execute a task from. | 253 // queue to execute a task from. |
| 257 if (dedup_set.insert(next_wakeup->second).second) | 254 if (dedup_set.insert(next_wakeup->second).second) |
| 258 next_wakeup->second->MoveReadyDelayedTasksToIncomingQueue(lazy_now); | 255 next_wakeup->second->MoveReadyDelayedTasksToIncomingQueue(lazy_now); |
| 259 delayed_wakeup_multimap_.erase(next_wakeup); | 256 delayed_wakeup_multimap_.erase(next_wakeup); |
| 260 } | 257 } |
| 261 } | 258 } |
| 262 | 259 |
| 263 void TaskQueueManager::MaybePostDoWorkOnMainRunner() { | 260 void TaskQueueManager::MaybePostDoWorkOnMainRunner() { |
| 264 bool on_main_thread = main_task_runner_->BelongsToCurrentThread(); | 261 bool on_main_thread = delegate_->BelongsToCurrentThread(); |
| 265 if (on_main_thread) { | 262 if (on_main_thread) { |
| 266 // We only want one pending DoWork posted from the main thread, or we risk | 263 // We only want one pending DoWork posted from the main thread, or we risk |
| 267 // an explosion of pending DoWorks which could starve out everything else. | 264 // an explosion of pending DoWorks which could starve out everything else. |
| 268 if (pending_dowork_count_ > 0) { | 265 if (pending_dowork_count_ > 0) { |
| 269 return; | 266 return; |
| 270 } | 267 } |
| 271 pending_dowork_count_++; | 268 pending_dowork_count_++; |
| 272 main_task_runner_->PostTask(FROM_HERE, do_work_from_main_thread_closure_); | 269 delegate_->PostTask(FROM_HERE, do_work_from_main_thread_closure_); |
| 273 } else { | 270 } else { |
| 274 main_task_runner_->PostTask(FROM_HERE, do_work_from_other_thread_closure_); | 271 delegate_->PostTask(FROM_HERE, do_work_from_other_thread_closure_); |
| 275 } | 272 } |
| 276 } | 273 } |
| 277 | 274 |
| 278 void TaskQueueManager::DoWork(bool decrement_pending_dowork_count) { | 275 void TaskQueueManager::DoWork(bool decrement_pending_dowork_count) { |
| 279 if (decrement_pending_dowork_count) { | 276 if (decrement_pending_dowork_count) { |
| 280 pending_dowork_count_--; | 277 pending_dowork_count_--; |
| 281 DCHECK_GE(pending_dowork_count_, 0); | 278 DCHECK_GE(pending_dowork_count_, 0); |
| 282 } | 279 } |
| 283 DCHECK(main_thread_checker_.CalledOnValidThread()); | 280 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 284 | 281 |
| 285 if (!main_task_runner_->IsNested()) | 282 if (!delegate_->IsNested()) |
| 286 queues_to_delete_.clear(); | 283 queues_to_delete_.clear(); |
| 287 | 284 |
| 288 // Pass false and nullptr to UpdateWorkQueues here to prevent waking up a | 285 // Pass false and nullptr to UpdateWorkQueues here to prevent waking up a |
| 289 // pump-after-wakeup queue. | 286 // pump-after-wakeup queue. |
| 290 UpdateWorkQueues(false, nullptr); | 287 UpdateWorkQueues(false, nullptr); |
| 291 | 288 |
| 292 internal::TaskQueueImpl::Task previous_task; | 289 internal::TaskQueueImpl::Task previous_task; |
| 293 for (int i = 0; i < work_batch_size_; i++) { | 290 for (int i = 0; i < work_batch_size_; i++) { |
| 294 internal::TaskQueueImpl* queue; | 291 internal::TaskQueueImpl* queue; |
| 295 if (!SelectQueueToService(&queue)) | 292 if (!SelectQueueToService(&queue)) |
| 296 break; | 293 break; |
| 297 | 294 |
| 298 switch (ProcessTaskFromWorkQueue(queue, &previous_task)) { | 295 switch (ProcessTaskFromWorkQueue(queue, &previous_task)) { |
| 299 case ProcessTaskResult::DEFERRED: | 296 case ProcessTaskResult::DEFERRED: |
| 300 // If a task was deferred, try again with another task. Note that this | 297 // If a task was deferred, try again with another task. Note that this |
| 301 // means deferred tasks (i.e. non-nestable tasks) will never trigger | 298 // means deferred tasks (i.e. non-nestable tasks) will never trigger |
| 302 // queue wake-ups. | 299 // queue wake-ups. |
| 303 continue; | 300 continue; |
| 304 case ProcessTaskResult::EXECUTED: | 301 case ProcessTaskResult::EXECUTED: |
| 305 break; | 302 break; |
| 306 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED: | 303 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED: |
| 307 return; // The TaskQueueManager got deleted, we must bail out. | 304 return; // The TaskQueueManager got deleted, we must bail out. |
| 308 } | 305 } |
| 309 bool should_trigger_wakeup = queue->wakeup_policy() == | 306 bool should_trigger_wakeup = queue->wakeup_policy() == |
| 310 TaskQueue::WakeupPolicy::CAN_WAKE_OTHER_QUEUES; | 307 TaskQueue::WakeupPolicy::CAN_WAKE_OTHER_QUEUES; |
| 311 UpdateWorkQueues(should_trigger_wakeup, &previous_task); | 308 UpdateWorkQueues(should_trigger_wakeup, &previous_task); |
| 312 | 309 |
| 313 // Only run a single task per batch in nested run loops so that we can | 310 // Only run a single task per batch in nested run loops so that we can |
| 314 // properly exit the nested loop when someone calls RunLoop::Quit(). | 311 // properly exit the nested loop when someone calls RunLoop::Quit(). |
| 315 if (main_task_runner_->IsNested()) | 312 if (delegate_->IsNested()) |
| 316 break; | 313 break; |
| 317 } | 314 } |
| 318 | 315 |
| 319 // TODO(alexclarke): Consider refactoring the above loop to terminate only | 316 // TODO(alexclarke): Consider refactoring the above loop to terminate only |
| 320 // when there's no more work left to be done, rather than posting a | 317 // when there's no more work left to be done, rather than posting a |
| 321 // continuation task. | 318 // continuation task. |
| 322 if (!selector_.EnabledWorkQueuesEmpty()) | 319 if (!selector_.EnabledWorkQueuesEmpty()) { |
| 323 MaybePostDoWorkOnMainRunner(); | 320 MaybePostDoWorkOnMainRunner(); |
| 321 } else { | |
| 322 // Tell the task runner we have no more work. | |
| 323 delegate_->OnNoMoreWork(); | |
|
Sami
2015/10/30 11:11:20
Were we going to rename this to OnNoMoreImmediateW
alex clarke (OOO till 29th)
2015/10/30 12:12:24
Done.
| |
| 324 } | |
| 324 } | 325 } |
| 325 | 326 |
| 326 bool TaskQueueManager::SelectQueueToService( | 327 bool TaskQueueManager::SelectQueueToService( |
| 327 internal::TaskQueueImpl** out_queue) { | 328 internal::TaskQueueImpl** out_queue) { |
| 328 bool should_run = selector_.SelectQueueToService(out_queue); | 329 bool should_run = selector_.SelectQueueToService(out_queue); |
| 329 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( | 330 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( |
| 330 disabled_by_default_tracing_category_, "TaskQueueManager", this, | 331 disabled_by_default_tracing_category_, "TaskQueueManager", this, |
| 331 AsValueWithSelectorResult(should_run, *out_queue)); | 332 AsValueWithSelectorResult(should_run, *out_queue)); |
| 332 return should_run; | 333 return should_run; |
| 333 } | 334 } |
| 334 | 335 |
| 335 void TaskQueueManager::DidQueueTask( | 336 void TaskQueueManager::DidQueueTask( |
| 336 const internal::TaskQueueImpl::Task& pending_task) { | 337 const internal::TaskQueueImpl::Task& pending_task) { |
| 337 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task); | 338 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task); |
| 338 } | 339 } |
| 339 | 340 |
| 340 TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue( | 341 TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue( |
| 341 internal::TaskQueueImpl* queue, | 342 internal::TaskQueueImpl* queue, |
| 342 internal::TaskQueueImpl::Task* out_previous_task) { | 343 internal::TaskQueueImpl::Task* out_previous_task) { |
| 343 DCHECK(main_thread_checker_.CalledOnValidThread()); | 344 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 344 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_); | 345 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_); |
| 345 // TODO(alexclarke): consider std::move() when allowed. | 346 // TODO(alexclarke): consider std::move() when allowed. |
| 346 internal::TaskQueueImpl::Task pending_task = queue->TakeTaskFromWorkQueue(); | 347 internal::TaskQueueImpl::Task pending_task = queue->TakeTaskFromWorkQueue(); |
| 347 | 348 |
| 348 if (queue->GetQuiescenceMonitored()) | 349 if (queue->GetQuiescenceMonitored()) |
| 349 task_was_run_on_quiescence_monitored_queue_ = true; | 350 task_was_run_on_quiescence_monitored_queue_ = true; |
| 350 | 351 |
| 351 if (!pending_task.nestable && main_task_runner_->IsNested()) { | 352 if (!pending_task.nestable && delegate_->IsNested()) { |
| 352 // Defer non-nestable work to the main task runner. NOTE these tasks can be | 353 // Defer non-nestable work to the main task runner. NOTE these tasks can be |
| 353 // arbitrarily delayed so the additional delay should not be a problem. | 354 // arbitrarily delayed so the additional delay should not be a problem. |
| 354 // TODO(skyostil): Figure out a way to not forget which task queue the | 355 // TODO(skyostil): Figure out a way to not forget which task queue the |
| 355 // task is associated with. See http://crbug.com/522843. | 356 // task is associated with. See http://crbug.com/522843. |
| 356 main_task_runner_->PostNonNestableTask(pending_task.posted_from, | 357 delegate_->PostNonNestableTask(pending_task.posted_from, pending_task.task); |
| 357 pending_task.task); | |
| 358 return ProcessTaskResult::DEFERRED; | 358 return ProcessTaskResult::DEFERRED; |
| 359 } | 359 } |
| 360 | 360 |
| 361 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue", | 361 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue", |
| 362 pending_task); | 362 pending_task); |
| 363 if (queue->GetShouldNotifyObservers()) { | 363 if (queue->GetShouldNotifyObservers()) { |
| 364 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_, | 364 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_, |
| 365 WillProcessTask(pending_task)); | 365 WillProcessTask(pending_task)); |
| 366 queue->NotifyWillProcessTask(pending_task); | 366 queue->NotifyWillProcessTask(pending_task); |
| 367 } | 367 } |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 379 DidProcessTask(pending_task)); | 379 DidProcessTask(pending_task)); |
| 380 queue->NotifyDidProcessTask(pending_task); | 380 queue->NotifyDidProcessTask(pending_task); |
| 381 } | 381 } |
| 382 | 382 |
| 383 pending_task.task.Reset(); | 383 pending_task.task.Reset(); |
| 384 *out_previous_task = pending_task; | 384 *out_previous_task = pending_task; |
| 385 return ProcessTaskResult::EXECUTED; | 385 return ProcessTaskResult::EXECUTED; |
| 386 } | 386 } |
| 387 | 387 |
| 388 bool TaskQueueManager::RunsTasksOnCurrentThread() const { | 388 bool TaskQueueManager::RunsTasksOnCurrentThread() const { |
| 389 return main_task_runner_->RunsTasksOnCurrentThread(); | 389 return delegate_->RunsTasksOnCurrentThread(); |
| 390 } | |
| 391 | |
| 392 bool TaskQueueManager::PostDelayedTask( | |
| 393 const tracked_objects::Location& from_here, | |
| 394 const base::Closure& task, | |
| 395 base::TimeDelta delay) { | |
| 396 DCHECK_GE(delay, base::TimeDelta()); | |
| 397 return main_task_runner_->PostDelayedTask(from_here, task, delay); | |
| 398 } | 390 } |
| 399 | 391 |
| 400 void TaskQueueManager::SetWorkBatchSize(int work_batch_size) { | 392 void TaskQueueManager::SetWorkBatchSize(int work_batch_size) { |
| 401 DCHECK(main_thread_checker_.CalledOnValidThread()); | 393 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 402 DCHECK_GE(work_batch_size, 1); | 394 DCHECK_GE(work_batch_size, 1); |
| 403 work_batch_size_ = work_batch_size; | 395 work_batch_size_ = work_batch_size; |
| 404 } | 396 } |
| 405 | 397 |
| 406 void TaskQueueManager::AddTaskObserver( | 398 void TaskQueueManager::AddTaskObserver( |
| 407 base::MessageLoop::TaskObserver* task_observer) { | 399 base::MessageLoop::TaskObserver* task_observer) { |
| 408 DCHECK(main_thread_checker_.CalledOnValidThread()); | 400 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 409 task_observers_.AddObserver(task_observer); | 401 task_observers_.AddObserver(task_observer); |
| 410 } | 402 } |
| 411 | 403 |
| 412 void TaskQueueManager::RemoveTaskObserver( | 404 void TaskQueueManager::RemoveTaskObserver( |
| 413 base::MessageLoop::TaskObserver* task_observer) { | 405 base::MessageLoop::TaskObserver* task_observer) { |
| 414 DCHECK(main_thread_checker_.CalledOnValidThread()); | 406 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 415 task_observers_.RemoveObserver(task_observer); | 407 task_observers_.RemoveObserver(task_observer); |
| 416 } | 408 } |
| 417 | 409 |
| 418 void TaskQueueManager::SetTimeSourceForTesting( | |
| 419 scoped_ptr<base::TickClock> time_source) { | |
| 420 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 421 time_source_ = time_source.Pass(); | |
| 422 } | |
| 423 | |
| 424 bool TaskQueueManager::GetAndClearSystemIsQuiescentBit() { | 410 bool TaskQueueManager::GetAndClearSystemIsQuiescentBit() { |
| 425 bool task_was_run = task_was_run_on_quiescence_monitored_queue_; | 411 bool task_was_run = task_was_run_on_quiescence_monitored_queue_; |
| 426 task_was_run_on_quiescence_monitored_queue_ = false; | 412 task_was_run_on_quiescence_monitored_queue_ = false; |
| 427 return !task_was_run; | 413 return !task_was_run; |
| 428 } | 414 } |
| 429 | 415 |
| 430 base::TimeTicks TaskQueueManager::Now() const { | 416 base::TickClock* TaskQueueManager::tick_clock() const { |
| 431 return time_source_->NowTicks(); | 417 return delegate_.get(); |
| 432 } | 418 } |
| 433 | 419 |
| 434 int TaskQueueManager::GetNextSequenceNumber() { | 420 int TaskQueueManager::GetNextSequenceNumber() { |
| 435 return task_sequence_num_.GetNext(); | 421 return task_sequence_num_.GetNext(); |
| 436 } | 422 } |
| 437 | 423 |
| 438 scoped_refptr<base::trace_event::ConvertableToTraceFormat> | 424 scoped_refptr<base::trace_event::ConvertableToTraceFormat> |
| 439 TaskQueueManager::AsValueWithSelectorResult( | 425 TaskQueueManager::AsValueWithSelectorResult( |
| 440 bool should_run, | 426 bool should_run, |
| 441 internal::TaskQueueImpl* selected_queue) const { | 427 internal::TaskQueueImpl* selected_queue) const { |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 460 } | 446 } |
| 461 | 447 |
| 462 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { | 448 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { |
| 463 DCHECK(main_thread_checker_.CalledOnValidThread()); | 449 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 464 // Only schedule DoWork if there's something to do. | 450 // Only schedule DoWork if there's something to do. |
| 465 if (!queue->work_queue().empty()) | 451 if (!queue->work_queue().empty()) |
| 466 MaybePostDoWorkOnMainRunner(); | 452 MaybePostDoWorkOnMainRunner(); |
| 467 } | 453 } |
| 468 | 454 |
| 469 } // namespace scheduler | 455 } // namespace scheduler |
| OLD | NEW |