| OLD | NEW |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "platform/scheduler/base/task_queue_manager.h" | 5 #include "platform/scheduler/base/task_queue_manager.h" |
| 6 | 6 |
| 7 #include <queue> | 7 #include <queue> |
| 8 #include <set> | 8 #include <set> |
| 9 | 9 |
| 10 #include "base/bind.h" | 10 #include "base/bind.h" |
| (...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 159 const IncomingImmediateWorkMap& queues_to_reload) const { | 159 const IncomingImmediateWorkMap& queues_to_reload) const { |
| 160 // There are two cases where a queue needs reloading. First, it might be | 160 // There are two cases where a queue needs reloading. First, it might be |
| 161 // completely empty and we've just posted a task (this method handles that | 161 // completely empty and we've just posted a task (this method handles that |
| 162 // case). Secondly if the work queue becomes empty in when calling | 162 // case). Secondly if the work queue becomes empty in when calling |
| 163 // WorkQueue::TakeTaskFromWorkQueue (handled there). | 163 // WorkQueue::TakeTaskFromWorkQueue (handled there). |
| 164 for (const auto& pair : queues_to_reload) { | 164 for (const auto& pair : queues_to_reload) { |
| 165 pair.first->ReloadImmediateWorkQueueIfEmpty(); | 165 pair.first->ReloadImmediateWorkQueueIfEmpty(); |
| 166 } | 166 } |
| 167 } | 167 } |
| 168 | 168 |
| 169 void TaskQueueManager::WakeupReadyDelayedQueues(LazyNow* lazy_now) { | 169 void TaskQueueManager::WakeUpReadyDelayedQueues(LazyNow* lazy_now) { |
| 170 TRACE_EVENT0(disabled_by_default_tracing_category_, | 170 TRACE_EVENT0(disabled_by_default_tracing_category_, |
| 171 "TaskQueueManager::WakeupReadyDelayedQueues"); | 171 "TaskQueueManager::WakeUpReadyDelayedQueues"); |
| 172 | 172 |
| 173 for (TimeDomain* time_domain : time_domains_) { | 173 for (TimeDomain* time_domain : time_domains_) { |
| 174 if (time_domain == real_time_domain_.get()) { | 174 if (time_domain == real_time_domain_.get()) { |
| 175 time_domain->WakeupReadyDelayedQueues(lazy_now); | 175 time_domain->WakeUpReadyDelayedQueues(lazy_now); |
| 176 } else { | 176 } else { |
| 177 LazyNow time_domain_lazy_now = time_domain->CreateLazyNow(); | 177 LazyNow time_domain_lazy_now = time_domain->CreateLazyNow(); |
| 178 time_domain->WakeupReadyDelayedQueues(&time_domain_lazy_now); | 178 time_domain->WakeUpReadyDelayedQueues(&time_domain_lazy_now); |
| 179 } | 179 } |
| 180 } | 180 } |
| 181 } | 181 } |
| 182 | 182 |
| 183 void TaskQueueManager::OnBeginNestedMessageLoop() { | 183 void TaskQueueManager::OnBeginNestedMessageLoop() { |
| 184 // We just entered a nested message loop, make sure there's a DoWork posted or | 184 // We just entered a nested message loop, make sure there's a DoWork posted or |
| 185 // the system will grind to a halt. | 185 // the system will grind to a halt. |
| 186 { | 186 { |
| 187 base::AutoLock lock(any_thread_lock_); | 187 base::AutoLock lock(any_thread_lock_); |
| 188 any_thread().immediate_do_work_posted_count++; | 188 any_thread().immediate_do_work_posted_count++; |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 233 "TaskQueueManager::MaybeScheduleImmediateWorkLocked::PostTask"); | 233 "TaskQueueManager::MaybeScheduleImmediateWorkLocked::PostTask"); |
| 234 delegate_->PostTask(from_here, immediate_do_work_closure_); | 234 delegate_->PostTask(from_here, immediate_do_work_closure_); |
| 235 } | 235 } |
| 236 | 236 |
| 237 void TaskQueueManager::MaybeScheduleDelayedWork( | 237 void TaskQueueManager::MaybeScheduleDelayedWork( |
| 238 const tracked_objects::Location& from_here, | 238 const tracked_objects::Location& from_here, |
| 239 TimeDomain* requesting_time_domain, | 239 TimeDomain* requesting_time_domain, |
| 240 base::TimeTicks now, | 240 base::TimeTicks now, |
| 241 base::TimeTicks run_time) { | 241 base::TimeTicks run_time) { |
| 242 DCHECK(main_thread_checker_.CalledOnValidThread()); | 242 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 243 // Make sure we don't cancel another TimeDomain's wakeup. | 243 // Make sure we don't cancel another TimeDomain's wake_up. |
| 244 DCHECK(!next_delayed_do_work_ || | 244 DCHECK(!next_delayed_do_work_ || |
| 245 next_delayed_do_work_.time_domain() == requesting_time_domain); | 245 next_delayed_do_work_.time_domain() == requesting_time_domain); |
| 246 { | 246 { |
| 247 base::AutoLock lock(any_thread_lock_); | 247 base::AutoLock lock(any_thread_lock_); |
| 248 | 248 |
| 249 // Unless we're nested, don't post a delayed DoWork if there's an immediate | 249 // Unless we're nested, don't post a delayed DoWork if there's an immediate |
| 250 // DoWork in flight or we're inside a DoWork. We can rely on DoWork posting | 250 // DoWork in flight or we're inside a DoWork. We can rely on DoWork posting |
| 251 // a delayed continuation as needed. | 251 // a delayed continuation as needed. |
| 252 if (!any_thread().is_nested && | 252 if (!any_thread().is_nested && |
| 253 (any_thread().immediate_do_work_posted_count > 0 || | 253 (any_thread().immediate_do_work_posted_count > 0 || |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 319 any_thread().is_nested = is_nested; | 319 any_thread().is_nested = is_nested; |
| 320 } | 320 } |
| 321 DCHECK_EQ(any_thread().is_nested, delegate_->IsNested()); | 321 DCHECK_EQ(any_thread().is_nested, delegate_->IsNested()); |
| 322 std::swap(queues_to_reload, any_thread().has_incoming_immediate_work); | 322 std::swap(queues_to_reload, any_thread().has_incoming_immediate_work); |
| 323 } | 323 } |
| 324 | 324 |
| 325 // It's important we call ReloadEmptyWorkQueues out side of the lock to | 325 // It's important we call ReloadEmptyWorkQueues out side of the lock to |
| 326 // avoid a lock order inversion. | 326 // avoid a lock order inversion. |
| 327 ReloadEmptyWorkQueues(queues_to_reload); | 327 ReloadEmptyWorkQueues(queues_to_reload); |
| 328 | 328 |
| 329 WakeupReadyDelayedQueues(&lazy_now); | 329 WakeUpReadyDelayedQueues(&lazy_now); |
| 330 | 330 |
| 331 internal::WorkQueue* work_queue = nullptr; | 331 internal::WorkQueue* work_queue = nullptr; |
| 332 if (!SelectWorkQueueToService(&work_queue)) | 332 if (!SelectWorkQueueToService(&work_queue)) |
| 333 break; | 333 break; |
| 334 | 334 |
| 335 // NB this may unregister |work_queue|. | 335 // NB this may unregister |work_queue|. |
| 336 base::TimeTicks time_after_task; | 336 base::TimeTicks time_after_task; |
| 337 switch (ProcessTaskFromWorkQueue(work_queue, is_nested, lazy_now, | 337 switch (ProcessTaskFromWorkQueue(work_queue, is_nested, lazy_now, |
| 338 &time_after_task)) { | 338 &time_after_task)) { |
| 339 case ProcessTaskResult::DEFERRED: | 339 case ProcessTaskResult::DEFERRED: |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 437 if (pair.first->CouldTaskRun(pair.second)) | 437 if (pair.first->CouldTaskRun(pair.second)) |
| 438 return NextTaskDelay(); | 438 return NextTaskDelay(); |
| 439 } | 439 } |
| 440 | 440 |
| 441 // If the selector has non-empty queues we trivially know there is immediate | 441 // If the selector has non-empty queues we trivially know there is immediate |
| 442 // work to be done. | 442 // work to be done. |
| 443 if (!selector_.EnabledWorkQueuesEmpty()) | 443 if (!selector_.EnabledWorkQueuesEmpty()) |
| 444 return NextTaskDelay(); | 444 return NextTaskDelay(); |
| 445 | 445 |
| 446 // Otherwise we need to find the shortest delay, if any. NB we don't need to | 446 // Otherwise we need to find the shortest delay, if any. NB we don't need to |
| 447 // call WakeupReadyDelayedQueues because it's assumed DelayTillNextTask will | 447 // call WakeUpReadyDelayedQueues because it's assumed DelayTillNextTask will |
| 448 // return base::TimeDelta>() if the delayed task is due to run now. | 448 // return base::TimeDelta>() if the delayed task is due to run now. |
| 449 base::Optional<NextTaskDelay> delay_till_next_task; | 449 base::Optional<NextTaskDelay> delay_till_next_task; |
| 450 for (TimeDomain* time_domain : time_domains_) { | 450 for (TimeDomain* time_domain : time_domains_) { |
| 451 base::Optional<base::TimeDelta> delay = | 451 base::Optional<base::TimeDelta> delay = |
| 452 time_domain->DelayTillNextTask(lazy_now); | 452 time_domain->DelayTillNextTask(lazy_now); |
| 453 if (!delay) | 453 if (!delay) |
| 454 continue; | 454 continue; |
| 455 | 455 |
| 456 NextTaskDelay task_delay = (delay.value() == base::TimeDelta()) | 456 NextTaskDelay task_delay = (delay.value() == base::TimeDelta()) |
| 457 ? NextTaskDelay() | 457 ? NextTaskDelay() |
| (...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 711 for (const scoped_refptr<internal::TaskQueueImpl>& queue : queues_) { | 711 for (const scoped_refptr<internal::TaskQueueImpl>& queue : queues_) { |
| 712 TimeDomain* time_domain = queue->GetTimeDomain(); | 712 TimeDomain* time_domain = queue->GetTimeDomain(); |
| 713 if (time_domain_now.find(time_domain) == time_domain_now.end()) | 713 if (time_domain_now.find(time_domain) == time_domain_now.end()) |
| 714 time_domain_now.insert(std::make_pair(time_domain, time_domain->Now())); | 714 time_domain_now.insert(std::make_pair(time_domain, time_domain->Now())); |
| 715 queue->SweepCanceledDelayedTasks(time_domain_now[time_domain]); | 715 queue->SweepCanceledDelayedTasks(time_domain_now[time_domain]); |
| 716 } | 716 } |
| 717 } | 717 } |
| 718 | 718 |
| 719 } // namespace scheduler | 719 } // namespace scheduler |
| 720 } // namespace blink | 720 } // namespace blink |
| OLD | NEW |