OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "platform/scheduler/base/task_queue_manager.h" | 5 #include "platform/scheduler/base/task_queue_manager.h" |
6 | 6 |
7 #include <queue> | 7 #include <queue> |
8 #include <set> | 8 #include <set> |
9 | 9 |
10 #include "base/bind.h" | 10 #include "base/bind.h" |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
68 disabled_by_default_verbose_tracing_category), | 68 disabled_by_default_verbose_tracing_category), |
69 currently_executing_task_queue_(nullptr), | 69 currently_executing_task_queue_(nullptr), |
70 observer_(nullptr), | 70 observer_(nullptr), |
71 deletion_sentinel_(new DeletionSentinel()), | 71 deletion_sentinel_(new DeletionSentinel()), |
72 weak_factory_(this) { | 72 weak_factory_(this) { |
73 DCHECK(delegate->RunsTasksOnCurrentThread()); | 73 DCHECK(delegate->RunsTasksOnCurrentThread()); |
74 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, | 74 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, |
75 "TaskQueueManager", this); | 75 "TaskQueueManager", this); |
76 selector_.SetTaskQueueSelectorObserver(this); | 76 selector_.SetTaskQueueSelectorObserver(this); |
77 | 77 |
78 from_main_thread_immediate_do_work_closure_ = | 78 delayed_do_work_closure_ = |
79 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), | 79 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true); |
80 base::TimeTicks(), true); | 80 immediate_do_work_closure_ = |
81 from_other_thread_immediate_do_work_closure_ = | 81 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false); |
82 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), | |
83 base::TimeTicks(), false); | |
84 | 82 |
85 // TODO(alexclarke): Change this to be a parameter that's passed in. | 83 // TODO(alexclarke): Change this to be a parameter that's passed in. |
86 RegisterTimeDomain(real_time_domain_.get()); | 84 RegisterTimeDomain(real_time_domain_.get()); |
87 | 85 |
88 delegate_->AddNestingObserver(this); | 86 delegate_->AddNestingObserver(this); |
89 } | 87 } |
90 | 88 |
91 TaskQueueManager::~TaskQueueManager() { | 89 TaskQueueManager::~TaskQueueManager() { |
92 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, | 90 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, |
93 "TaskQueueManager", this); | 91 "TaskQueueManager", this); |
94 | 92 |
95 while (!queues_.empty()) | 93 while (!queues_.empty()) |
96 (*queues_.begin())->UnregisterTaskQueue(); | 94 (*queues_.begin())->UnregisterTaskQueue(); |
97 | 95 |
98 selector_.SetTaskQueueSelectorObserver(nullptr); | 96 selector_.SetTaskQueueSelectorObserver(nullptr); |
99 | 97 |
100 delegate_->RemoveNestingObserver(this); | 98 delegate_->RemoveNestingObserver(this); |
101 } | 99 } |
102 | 100 |
103 TaskQueueManager::AnyThread::AnyThread() : other_thread_pending_wakeup(false) {} | 101 TaskQueueManager::AnyThread::AnyThread() |
| 102 : do_work_running_count(0), |
| 103 immediate_do_work_posted_count(0), |
| 104 is_nested(false) {} |
104 | 105 |
105 void TaskQueueManager::RegisterTimeDomain(TimeDomain* time_domain) { | 106 void TaskQueueManager::RegisterTimeDomain(TimeDomain* time_domain) { |
106 time_domains_.insert(time_domain); | 107 time_domains_.insert(time_domain); |
107 time_domain->OnRegisterWithTaskQueueManager(this); | 108 time_domain->OnRegisterWithTaskQueueManager(this); |
108 } | 109 } |
109 | 110 |
110 void TaskQueueManager::UnregisterTimeDomain(TimeDomain* time_domain) { | 111 void TaskQueueManager::UnregisterTimeDomain(TimeDomain* time_domain) { |
111 time_domains_.erase(time_domain); | 112 time_domains_.erase(time_domain); |
112 } | 113 } |
113 | 114 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
148 | 149 |
149 selector_.RemoveQueue(task_queue.get()); | 150 selector_.RemoveQueue(task_queue.get()); |
150 | 151 |
151 { | 152 { |
152 base::AutoLock lock(any_thread_lock_); | 153 base::AutoLock lock(any_thread_lock_); |
153 any_thread().has_incoming_immediate_work.erase(task_queue.get()); | 154 any_thread().has_incoming_immediate_work.erase(task_queue.get()); |
154 } | 155 } |
155 } | 156 } |
156 | 157 |
157 void TaskQueueManager::ReloadEmptyWorkQueues( | 158 void TaskQueueManager::ReloadEmptyWorkQueues( |
158 const std::unordered_set<internal::TaskQueueImpl*>& queues_to_reload) | 159 const IncomingImmediateWorkMap& queues_to_reload) const { |
159 const { | |
160 // There are two cases where a queue needs reloading. First, it might be | 160 // There are two cases where a queue needs reloading. First, it might be |
161 // completely empty and we've just posted a task (this method handles that | 161 // completely empty and we've just posted a task (this method handles that |
162 // case). Secondly if the work queue becomes empty in when calling | 162 // case). Secondly if the work queue becomes empty in when calling |
163 // WorkQueue::TakeTaskFromWorkQueue (handled there). | 163 // WorkQueue::TakeTaskFromWorkQueue (handled there). |
164 for (internal::TaskQueueImpl* queue : queues_to_reload) { | 164 for (const auto& pair : queues_to_reload) { |
165 queue->ReloadImmediateWorkQueueIfEmpty(); | 165 pair.first->ReloadImmediateWorkQueueIfEmpty(); |
166 } | 166 } |
167 } | 167 } |
168 | 168 |
169 void TaskQueueManager::WakeupReadyDelayedQueues(LazyNow* lazy_now) { | 169 void TaskQueueManager::WakeupReadyDelayedQueues(LazyNow* lazy_now) { |
170 TRACE_EVENT0(disabled_by_default_tracing_category_, | 170 TRACE_EVENT0(disabled_by_default_tracing_category_, |
171 "TaskQueueManager::WakeupReadyDelayedQueues"); | 171 "TaskQueueManager::WakeupReadyDelayedQueues"); |
172 | 172 |
173 for (TimeDomain* time_domain : time_domains_) { | 173 for (TimeDomain* time_domain : time_domains_) { |
174 if (time_domain == real_time_domain_.get()) { | 174 if (time_domain == real_time_domain_.get()) { |
175 time_domain->WakeupReadyDelayedQueues(lazy_now); | 175 time_domain->WakeupReadyDelayedQueues(lazy_now); |
176 } else { | 176 } else { |
177 LazyNow time_domain_lazy_now = time_domain->CreateLazyNow(); | 177 LazyNow time_domain_lazy_now = time_domain->CreateLazyNow(); |
178 time_domain->WakeupReadyDelayedQueues(&time_domain_lazy_now); | 178 time_domain->WakeupReadyDelayedQueues(&time_domain_lazy_now); |
179 } | 179 } |
180 } | 180 } |
181 } | 181 } |
182 | 182 |
183 void TaskQueueManager::OnBeginNestedMessageLoop() { | 183 void TaskQueueManager::OnBeginNestedMessageLoop() { |
184 // We just entered a nested message loop, make sure there's a DoWork posted or | 184 // We just entered a nested message loop, make sure there's a DoWork posted or |
185 // the system will grind to a halt. | 185 // the system will grind to a halt. |
186 delegate_->PostTask(FROM_HERE, from_main_thread_immediate_do_work_closure_); | 186 { |
| 187 base::AutoLock lock(any_thread_lock_); |
| 188 any_thread().immediate_do_work_posted_count++; |
| 189 any_thread().is_nested = true; |
| 190 } |
| 191 delegate_->PostTask(FROM_HERE, immediate_do_work_closure_); |
187 } | 192 } |
188 | 193 |
189 void TaskQueueManager::OnQueueHasIncomingImmediateWork( | 194 void TaskQueueManager::OnQueueHasIncomingImmediateWork( |
190 internal::TaskQueueImpl* queue, | 195 internal::TaskQueueImpl* queue, |
| 196 internal::EnqueueOrder enqueue_order, |
191 bool queue_is_blocked) { | 197 bool queue_is_blocked) { |
192 bool on_main_thread = delegate_->BelongsToCurrentThread(); | 198 MoveableAutoLock lock(any_thread_lock_); |
193 | 199 any_thread().has_incoming_immediate_work.insert( |
194 { | 200 std::make_pair(queue, enqueue_order)); |
195 base::AutoLock lock(any_thread_lock_); | 201 if (!queue_is_blocked) |
196 any_thread().has_incoming_immediate_work.insert(queue); | 202 MaybeScheduleImmediateWorkLocked(FROM_HERE, std::move(lock)); |
197 | |
198 if (queue_is_blocked) | |
199 return; | |
200 | |
201 // De-duplicate DoWork posts. | |
202 if (on_main_thread) { | |
203 if (!main_thread_pending_wakeups_.insert(base::TimeTicks()).second) | |
204 return; | |
205 } else { | |
206 if (any_thread().other_thread_pending_wakeup) | |
207 return; | |
208 any_thread().other_thread_pending_wakeup = true; | |
209 } | |
210 } | |
211 | |
212 if (on_main_thread) { | |
213 delegate_->PostTask(FROM_HERE, from_main_thread_immediate_do_work_closure_); | |
214 } else { | |
215 delegate_->PostTask(FROM_HERE, | |
216 from_other_thread_immediate_do_work_closure_); | |
217 } | |
218 } | 203 } |
219 | 204 |
220 void TaskQueueManager::MaybeScheduleImmediateWork( | 205 void TaskQueueManager::MaybeScheduleImmediateWork( |
221 const tracked_objects::Location& from_here) { | 206 const tracked_objects::Location& from_here) { |
222 bool on_main_thread = delegate_->BelongsToCurrentThread(); | 207 MoveableAutoLock lock(any_thread_lock_); |
223 // De-duplicate DoWork posts. | 208 MaybeScheduleImmediateWorkLocked(from_here, std::move(lock)); |
224 if (on_main_thread) { | 209 } |
225 if (!main_thread_pending_wakeups_.insert(base::TimeTicks()).second) { | 210 |
| 211 void TaskQueueManager::MaybeScheduleImmediateWorkLocked( |
| 212 const tracked_objects::Location& from_here, |
| 213 MoveableAutoLock&& lock) { |
| 214 { |
| 215 MoveableAutoLock auto_lock(std::move(lock)); |
| 216 // Unless we're nested, try to avoid posting redundant DoWorks. |
| 217 if (!any_thread().is_nested && |
| 218 (any_thread().do_work_running_count == 1 || |
| 219 any_thread().immediate_do_work_posted_count > 0)) { |
226 return; | 220 return; |
227 } | 221 } |
228 delegate_->PostTask(from_here, from_main_thread_immediate_do_work_closure_); | 222 |
229 } else { | 223 any_thread().immediate_do_work_posted_count++; |
230 { | |
231 base::AutoLock lock(any_thread_lock_); | |
232 if (any_thread().other_thread_pending_wakeup) | |
233 return; | |
234 any_thread().other_thread_pending_wakeup = true; | |
235 } | |
236 delegate_->PostTask(from_here, | |
237 from_other_thread_immediate_do_work_closure_); | |
238 } | 224 } |
| 225 |
| 226 TRACE_EVENT0(disabled_by_default_tracing_category_, |
| 227 "TaskQueueManager::MaybeScheduleImmediateWorkLocked::PostTask"); |
| 228 delegate_->PostTask(from_here, immediate_do_work_closure_); |
239 } | 229 } |
240 | 230 |
241 void TaskQueueManager::MaybeScheduleDelayedWork( | 231 void TaskQueueManager::MaybeScheduleDelayedWork( |
242 const tracked_objects::Location& from_here, | 232 const tracked_objects::Location& from_here, |
243 base::TimeTicks now, | 233 base::TimeTicks now, |
244 base::TimeDelta delay) { | 234 base::TimeDelta delay) { |
245 DCHECK(main_thread_checker_.CalledOnValidThread()); | 235 DCHECK(main_thread_checker_.CalledOnValidThread()); |
246 DCHECK_GE(delay, base::TimeDelta()); | 236 DCHECK_GE(delay, base::TimeDelta()); |
| 237 { |
| 238 base::AutoLock lock(any_thread_lock_); |
247 | 239 |
248 // If there's a pending immediate DoWork then we rely on | 240 // Unless we're nested, don't post a delayed DoWork if there's an immediate |
249 // TryAdvanceTimeDomains getting the TimeDomain to call | 241 // DoWork in flight or we're inside a DoWork. We can rely on DoWork posting |
250 // MaybeScheduleDelayedWork again when the immediate DoWork is complete. | 242 // a delayed continuation as needed. |
251 if (main_thread_pending_wakeups_.find(base::TimeTicks()) != | 243 if (!any_thread().is_nested && |
252 main_thread_pending_wakeups_.end()) { | 244 (any_thread().immediate_do_work_posted_count > 0 || |
253 return; | 245 any_thread().do_work_running_count == 1)) { |
| 246 return; |
| 247 } |
254 } | 248 } |
| 249 |
255 // De-duplicate DoWork posts. | 250 // De-duplicate DoWork posts. |
256 base::TimeTicks run_time = now + delay; | 251 base::TimeTicks run_time = now + delay; |
257 if (!main_thread_pending_wakeups_.empty() && | 252 if (next_scheduled_delayed_do_work_time_ <= run_time && |
258 *main_thread_pending_wakeups_.begin() <= run_time) { | 253 !next_scheduled_delayed_do_work_time_.is_null()) |
259 return; | 254 return; |
260 } | 255 |
261 main_thread_pending_wakeups_.insert(run_time); | 256 TRACE_EVENT1(disabled_by_default_tracing_category_, |
| 257 "TaskQueueManager::MaybeScheduleDelayedWork::PostDelayedTask", |
| 258 "delay_ms", delay.InMillisecondsF()); |
| 259 |
| 260 cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_); |
| 261 next_scheduled_delayed_do_work_time_ = run_time; |
262 delegate_->PostDelayedTask( | 262 delegate_->PostDelayedTask( |
263 from_here, base::Bind(&TaskQueueManager::DoWork, | 263 from_here, cancelable_delayed_do_work_closure_.callback(), delay); |
264 weak_factory_.GetWeakPtr(), run_time, true), | |
265 delay); | |
266 } | 264 } |
267 | 265 |
268 void TaskQueueManager::DoWork(base::TimeTicks run_time, bool from_main_thread) { | 266 void TaskQueueManager::DoWork(bool delayed) { |
269 DCHECK(main_thread_checker_.CalledOnValidThread()); | 267 DCHECK(main_thread_checker_.CalledOnValidThread()); |
270 TRACE_EVENT1(tracing_category_, "TaskQueueManager::DoWork", | 268 TRACE_EVENT1(tracing_category_, "TaskQueueManager::DoWork", "delayed", |
271 "from_main_thread", from_main_thread); | 269 delayed); |
272 | 270 |
273 if (from_main_thread) { | 271 LazyNow lazy_now(real_time_domain()->CreateLazyNow()); |
274 main_thread_pending_wakeups_.erase(run_time); | |
275 } else { | |
276 base::AutoLock lock(any_thread_lock_); | |
277 any_thread().other_thread_pending_wakeup = false; | |
278 } | |
279 | |
280 // Posting a DoWork while a DoWork is running leads to spurious DoWorks. | |
281 main_thread_pending_wakeups_.insert(base::TimeTicks()); | |
282 | |
283 bool is_nested = delegate_->IsNested(); | 272 bool is_nested = delegate_->IsNested(); |
284 if (!is_nested) | 273 if (!is_nested) |
285 queues_to_delete_.clear(); | 274 queues_to_delete_.clear(); |
286 | 275 |
287 LazyNow lazy_now(real_time_domain()->CreateLazyNow()); | 276 // This must be done before running any tasks because they could invoke a |
288 WakeupReadyDelayedQueues(&lazy_now); | 277 // nested message loop and we risk having a stale |
| 278 // |next_scheduled_delayed_do_work_time_|. |
| 279 if (delayed) |
| 280 next_scheduled_delayed_do_work_time_ = base::TimeTicks(); |
289 | 281 |
290 for (int i = 0; i < work_batch_size_; i++) { | 282 for (int i = 0; i < work_batch_size_; i++) { |
291 std::unordered_set<internal::TaskQueueImpl*> queues_to_reload; | 283 IncomingImmediateWorkMap queues_to_reload; |
292 | 284 |
293 { | 285 { |
294 base::AutoLock lock(any_thread_lock_); | 286 base::AutoLock lock(any_thread_lock_); |
| 287 if (i == 0) { |
| 288 any_thread().do_work_running_count++; |
| 289 |
| 290 if (!delayed) { |
| 291 any_thread().immediate_do_work_posted_count--; |
| 292 DCHECK_GE(any_thread().immediate_do_work_posted_count, 0); |
| 293 } |
| 294 } else { |
| 295 // Ideally we'd have an OnNestedMessageloopExit observer, but in it's |
| 296 // absence we may need to clear this flag after running a task (which |
| 297 // ran a nested messageloop). |
| 298 any_thread().is_nested = is_nested; |
| 299 } |
| 300 DCHECK_EQ(any_thread().is_nested, delegate_->IsNested()); |
295 std::swap(queues_to_reload, any_thread().has_incoming_immediate_work); | 301 std::swap(queues_to_reload, any_thread().has_incoming_immediate_work); |
296 } | 302 } |
297 | 303 |
298 // It's important we call ReloadEmptyWorkQueues out side of the lock to | 304 // It's important we call ReloadEmptyWorkQueues out side of the lock to |
299 // avoid a lock order inversion. | 305 // avoid a lock order inversion. |
300 ReloadEmptyWorkQueues(queues_to_reload); | 306 ReloadEmptyWorkQueues(queues_to_reload); |
301 | 307 |
302 internal::WorkQueue* work_queue; | 308 WakeupReadyDelayedQueues(&lazy_now); |
| 309 |
| 310 internal::WorkQueue* work_queue = nullptr; |
303 if (!SelectWorkQueueToService(&work_queue)) | 311 if (!SelectWorkQueueToService(&work_queue)) |
304 break; | 312 break; |
305 | 313 |
| 314 // NB this may unregister |work_queue|. |
306 base::TimeTicks time_after_task; | 315 base::TimeTicks time_after_task; |
307 switch (ProcessTaskFromWorkQueue(work_queue, is_nested, lazy_now, | 316 switch (ProcessTaskFromWorkQueue(work_queue, is_nested, lazy_now, |
308 &time_after_task)) { | 317 &time_after_task)) { |
309 case ProcessTaskResult::DEFERRED: | 318 case ProcessTaskResult::DEFERRED: |
310 // If a task was deferred, try again with another task. | 319 // If a task was deferred, try again with another task. |
311 continue; | 320 continue; |
312 case ProcessTaskResult::EXECUTED: | 321 case ProcessTaskResult::EXECUTED: |
313 break; | 322 break; |
314 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED: | 323 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED: |
315 return; // The TaskQueueManager got deleted, we must bail out. | 324 return; // The TaskQueueManager got deleted, we must bail out. |
316 } | 325 } |
317 | 326 |
318 work_queue = nullptr; // The queue may have been unregistered. | |
319 | |
320 lazy_now = time_after_task.is_null() ? real_time_domain()->CreateLazyNow() | 327 lazy_now = time_after_task.is_null() ? real_time_domain()->CreateLazyNow() |
321 : LazyNow(time_after_task); | 328 : LazyNow(time_after_task); |
322 WakeupReadyDelayedQueues(&lazy_now); | |
323 | 329 |
324 // Only run a single task per batch in nested run loops so that we can | 330 // Only run a single task per batch in nested run loops so that we can |
325 // properly exit the nested loop when someone calls RunLoop::Quit(). | 331 // properly exit the nested loop when someone calls RunLoop::Quit(). |
326 if (is_nested) | 332 if (is_nested) |
327 break; | 333 break; |
328 } | 334 } |
329 | 335 |
330 main_thread_pending_wakeups_.erase(base::TimeTicks()); | |
331 | |
332 // TODO(alexclarke): Consider refactoring the above loop to terminate only | 336 // TODO(alexclarke): Consider refactoring the above loop to terminate only |
333 // when there's no more work left to be done, rather than posting a | 337 // when there's no more work left to be done, rather than posting a |
334 // continuation task. | 338 // continuation task. |
335 base::Optional<base::TimeDelta> next_delay = | |
336 ComputeDelayTillNextTask(&lazy_now); | |
337 | 339 |
338 if (!next_delay) | 340 { |
339 return; | 341 MoveableAutoLock lock(any_thread_lock_); |
| 342 base::Optional<base::TimeDelta> next_delay = |
| 343 ComputeDelayTillNextTaskLocked(&lazy_now); |
340 | 344 |
341 base::TimeDelta delay = next_delay.value(); | 345 any_thread().do_work_running_count--; |
342 if (delay.is_zero()) { | 346 DCHECK_GE(any_thread().do_work_running_count, 0); |
343 MaybeScheduleImmediateWork(FROM_HERE); | 347 |
344 } else { | 348 any_thread().is_nested = is_nested; |
345 MaybeScheduleDelayedWork(FROM_HERE, lazy_now.Now(), delay); | 349 DCHECK_EQ(any_thread().is_nested, delegate_->IsNested()); |
| 350 |
| 351 PostDoWorkContinuationLocked(next_delay, &lazy_now, std::move(lock)); |
346 } | 352 } |
347 } | 353 } |
348 | 354 |
349 base::Optional<base::TimeDelta> TaskQueueManager::ComputeDelayTillNextTask( | 355 void TaskQueueManager::PostDoWorkContinuationLocked( |
350 LazyNow* lazy_now) { | 356 base::Optional<base::TimeDelta> next_delay, |
| 357 LazyNow* lazy_now, |
| 358 MoveableAutoLock&& lock) { |
| 359 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 360 base::TimeDelta delay; |
| 361 |
| 362 { |
| 363 MoveableAutoLock auto_lock(std::move(lock)); |
| 364 |
| 365 // If there are no tasks left then we don't need to post a continuation. |
| 366 if (!next_delay) { |
| 367 // If there's a pending delayed DoWork, cancel it because it's not needed. |
| 368 if (!next_scheduled_delayed_do_work_time_.is_null()) { |
| 369 next_scheduled_delayed_do_work_time_ = base::TimeTicks(); |
| 370 cancelable_delayed_do_work_closure_.Cancel(); |
| 371 } |
| 372 return; |
| 373 } |
| 374 |
| 375 // If an immediate DoWork is posted, we don't need to post a continuation. |
| 376 if (any_thread().immediate_do_work_posted_count > 0) |
| 377 return; |
| 378 |
| 379 delay = next_delay.value(); |
| 380 |
| 381 // This isn't supposed to happen, but in case it does convert to |
| 382 // non-delayed. |
| 383 if (delay < base::TimeDelta()) |
| 384 delay = base::TimeDelta(); |
| 385 |
| 386 if (delay.is_zero()) { |
| 387 // If a delayed DoWork is pending then we don't need to post a |
| 388 // continuation because it should run immediately. |
| 389 if (!next_scheduled_delayed_do_work_time_.is_null() && |
| 390 next_scheduled_delayed_do_work_time_ <= lazy_now->Now()) { |
| 391 return; |
| 392 } |
| 393 |
| 394 any_thread().immediate_do_work_posted_count++; |
| 395 } else { |
| 396 base::TimeTicks run_time = lazy_now->Now() + delay; |
| 397 if (next_scheduled_delayed_do_work_time_ == run_time) |
| 398 return; |
| 399 |
| 400 next_scheduled_delayed_do_work_time_ = run_time; |
| 401 } |
| 402 } |
| 403 |
| 404 // We avoid holding |any_thread_lock_| while posting the task. |
| 405 if (delay.is_zero()) { |
| 406 delegate_->PostTask(FROM_HERE, immediate_do_work_closure_); |
| 407 } else { |
| 408 cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_); |
| 409 delegate_->PostDelayedTask( |
| 410 FROM_HERE, cancelable_delayed_do_work_closure_.callback(), delay); |
| 411 } |
| 412 } |
| 413 |
| 414 base::Optional<base::TimeDelta> |
| 415 TaskQueueManager::ComputeDelayTillNextTaskLocked(LazyNow* lazy_now) { |
351 DCHECK(main_thread_checker_.CalledOnValidThread()); | 416 DCHECK(main_thread_checker_.CalledOnValidThread()); |
352 | 417 |
353 std::unordered_set<internal::TaskQueueImpl*> queues_to_reload; | 418 // Unfortunately because |any_thread_lock_| is held it's not safe to call |
354 { | 419 // ReloadEmptyWorkQueues here (possible lock order inversion), however this |
355 base::AutoLock lock(any_thread_lock_); | 420 // check is equavalent to calling ReloadEmptyWorkQueues first. |
356 std::swap(queues_to_reload, any_thread().has_incoming_immediate_work); | 421 for (const auto& pair : any_thread().has_incoming_immediate_work) { |
| 422 if (pair.first->CouldTaskRun(pair.second)) |
| 423 return base::TimeDelta(); |
357 } | 424 } |
358 | 425 |
359 // It's important we call ReloadEmptyWorkQueues out side of the lock to | |
360 // avoid a lock order inversion. | |
361 ReloadEmptyWorkQueues(queues_to_reload); | |
362 | |
363 // If the selector has non-empty queues we trivially know there is immediate | 426 // If the selector has non-empty queues we trivially know there is immediate |
364 // work to be done. | 427 // work to be done. |
365 if (!selector_.EnabledWorkQueuesEmpty()) | 428 if (!selector_.EnabledWorkQueuesEmpty()) |
366 return base::TimeDelta(); | 429 return base::TimeDelta(); |
367 | 430 |
368 // Otherwise we need to find the shortest delay, if any. | 431 // Otherwise we need to find the shortest delay, if any. NB we don't need to |
| 432 // call WakeupReadyDelayedQueues because it's assumed DelayTillNextTask will |
| 433 // return base::TimeDelta>() if the delayed task is due to run now. |
369 base::Optional<base::TimeDelta> next_continuation; | 434 base::Optional<base::TimeDelta> next_continuation; |
370 for (TimeDomain* time_domain : time_domains_) { | 435 for (TimeDomain* time_domain : time_domains_) { |
371 base::Optional<base::TimeDelta> continuation = | 436 base::Optional<base::TimeDelta> continuation = |
372 time_domain->DelayTillNextTask(lazy_now); | 437 time_domain->DelayTillNextTask(lazy_now); |
373 if (!continuation) | 438 if (!continuation) |
374 continue; | 439 continue; |
375 if (!next_continuation || next_continuation.value() > continuation.value()) | 440 if (!next_continuation || next_continuation.value() > continuation.value()) |
376 next_continuation = continuation; | 441 next_continuation = continuation; |
377 } | 442 } |
378 return next_continuation; | 443 return next_continuation; |
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
571 selected_work_queue->task_queue()->GetName()); | 636 selected_work_queue->task_queue()->GetName()); |
572 state->SetString("work_queue_name", selected_work_queue->name()); | 637 state->SetString("work_queue_name", selected_work_queue->name()); |
573 } | 638 } |
574 | 639 |
575 state->BeginArray("time_domains"); | 640 state->BeginArray("time_domains"); |
576 for (auto* time_domain : time_domains_) | 641 for (auto* time_domain : time_domains_) |
577 time_domain->AsValueInto(state.get()); | 642 time_domain->AsValueInto(state.get()); |
578 state->EndArray(); | 643 state->EndArray(); |
579 { | 644 { |
580 base::AutoLock lock(any_thread_lock_); | 645 base::AutoLock lock(any_thread_lock_); |
| 646 state->SetBoolean("is_nested", any_thread().is_nested); |
| 647 state->SetInteger("do_work_running_count", |
| 648 any_thread().do_work_running_count); |
| 649 state->SetInteger("immediate_do_work_posted_count", |
| 650 any_thread().immediate_do_work_posted_count); |
| 651 |
581 state->BeginArray("has_incoming_immediate_work"); | 652 state->BeginArray("has_incoming_immediate_work"); |
582 for (internal::TaskQueueImpl* task_queue : | 653 for (const auto& pair : any_thread().has_incoming_immediate_work) { |
583 any_thread().has_incoming_immediate_work) { | 654 state->AppendString(pair.first->GetName()); |
584 state->AppendString(task_queue->GetName()); | |
585 } | 655 } |
586 state->EndArray(); | 656 state->EndArray(); |
587 } | 657 } |
588 return std::move(state); | 658 return std::move(state); |
589 } | 659 } |
590 | 660 |
591 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { | 661 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { |
592 DCHECK(main_thread_checker_.CalledOnValidThread()); | 662 DCHECK(main_thread_checker_.CalledOnValidThread()); |
593 DCHECK(queue->IsQueueEnabled()); | 663 DCHECK(queue->IsQueueEnabled()); |
594 // Only schedule DoWork if there's something to do. | 664 // Only schedule DoWork if there's something to do. |
(...skipping 26 matching lines...) Expand all Loading... |
621 for (const scoped_refptr<internal::TaskQueueImpl>& queue : queues_) { | 691 for (const scoped_refptr<internal::TaskQueueImpl>& queue : queues_) { |
622 TimeDomain* time_domain = queue->GetTimeDomain(); | 692 TimeDomain* time_domain = queue->GetTimeDomain(); |
623 if (time_domain_now.find(time_domain) == time_domain_now.end()) | 693 if (time_domain_now.find(time_domain) == time_domain_now.end()) |
624 time_domain_now.insert(std::make_pair(time_domain, time_domain->Now())); | 694 time_domain_now.insert(std::make_pair(time_domain, time_domain->Now())); |
625 queue->SweepCanceledDelayedTasks(time_domain_now[time_domain]); | 695 queue->SweepCanceledDelayedTasks(time_domain_now[time_domain]); |
626 } | 696 } |
627 } | 697 } |
628 | 698 |
629 } // namespace scheduler | 699 } // namespace scheduler |
630 } // namespace blink | 700 } // namespace blink |
OLD | NEW |