OLD | NEW |
---|---|
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "platform/scheduler/base/task_queue_manager.h" | 5 #include "platform/scheduler/base/task_queue_manager.h" |
6 | 6 |
7 #include <queue> | 7 #include <queue> |
8 #include <set> | 8 #include <set> |
9 | 9 |
10 #include "base/bind.h" | 10 #include "base/bind.h" |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
51 } | 51 } |
52 | 52 |
53 TaskQueueManager::TaskQueueManager( | 53 TaskQueueManager::TaskQueueManager( |
54 scoped_refptr<TaskQueueManagerDelegate> delegate, | 54 scoped_refptr<TaskQueueManagerDelegate> delegate, |
55 const char* tracing_category, | 55 const char* tracing_category, |
56 const char* disabled_by_default_tracing_category, | 56 const char* disabled_by_default_tracing_category, |
57 const char* disabled_by_default_verbose_tracing_category) | 57 const char* disabled_by_default_verbose_tracing_category) |
58 : real_time_domain_(new RealTimeDomain(tracing_category)), | 58 : real_time_domain_(new RealTimeDomain(tracing_category)), |
59 delegate_(delegate), | 59 delegate_(delegate), |
60 task_was_run_on_quiescence_monitored_queue_(false), | 60 task_was_run_on_quiescence_monitored_queue_(false), |
61 other_thread_pending_wakeup_(false), | 61 do_work_pending_lock_(), // NOTE this calls the constructor! |
62 do_work_running_count_(0), | |
63 immediate_do_work_posted_count_(0), | |
64 is_nested_(false), | |
65 record_task_delay_histograms_(true), | |
62 work_batch_size_(1), | 66 work_batch_size_(1), |
63 task_count_(0), | 67 task_count_(0), |
64 tracing_category_(tracing_category), | 68 tracing_category_(tracing_category), |
65 disabled_by_default_tracing_category_( | 69 disabled_by_default_tracing_category_( |
66 disabled_by_default_tracing_category), | 70 disabled_by_default_tracing_category), |
67 disabled_by_default_verbose_tracing_category_( | 71 disabled_by_default_verbose_tracing_category_( |
68 disabled_by_default_verbose_tracing_category), | 72 disabled_by_default_verbose_tracing_category), |
69 currently_executing_task_queue_(nullptr), | 73 currently_executing_task_queue_(nullptr), |
70 observer_(nullptr), | 74 observer_(nullptr), |
71 deletion_sentinel_(new DeletionSentinel()), | 75 deletion_sentinel_(new DeletionSentinel()), |
72 weak_factory_(this) { | 76 weak_factory_(this) { |
73 DCHECK(delegate->RunsTasksOnCurrentThread()); | 77 DCHECK(delegate->RunsTasksOnCurrentThread()); |
74 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, | 78 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, |
75 "TaskQueueManager", this); | 79 "TaskQueueManager", this); |
76 selector_.SetTaskQueueSelectorObserver(this); | 80 selector_.SetTaskQueueSelectorObserver(this); |
77 | 81 |
78 from_main_thread_immediate_do_work_closure_ = | 82 delayed_do_work_closure_ = |
79 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), | 83 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true); |
80 base::TimeTicks(), true); | 84 immediate_do_work_closure_ = |
81 from_other_thread_immediate_do_work_closure_ = | 85 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false); |
82 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), | |
83 base::TimeTicks(), false); | |
84 | 86 |
85 // TODO(alexclarke): Change this to be a parameter that's passed in. | 87 // TODO(alexclarke): Change this to be a parameter that's passed in. |
86 RegisterTimeDomain(real_time_domain_.get()); | 88 RegisterTimeDomain(real_time_domain_.get()); |
87 | 89 |
88 delegate_->AddNestingObserver(this); | 90 delegate_->AddNestingObserver(this); |
89 } | 91 } |
90 | 92 |
91 TaskQueueManager::~TaskQueueManager() { | 93 TaskQueueManager::~TaskQueueManager() { |
92 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, | 94 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, |
93 "TaskQueueManager", this); | 95 "TaskQueueManager", this); |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
138 DCHECK(main_thread_checker_.CalledOnValidThread()); | 140 DCHECK(main_thread_checker_.CalledOnValidThread()); |
139 if (observer_) | 141 if (observer_) |
140 observer_->OnUnregisterTaskQueue(task_queue); | 142 observer_->OnUnregisterTaskQueue(task_queue); |
141 | 143 |
142 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being | 144 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being |
143 // freed while any of our structures hold hold a raw pointer to it. | 145 // freed while any of our structures hold hold a raw pointer to it. |
144 queues_to_delete_.insert(task_queue); | 146 queues_to_delete_.insert(task_queue); |
145 queues_.erase(task_queue); | 147 queues_.erase(task_queue); |
146 | 148 |
147 selector_.RemoveQueue(task_queue.get()); | 149 selector_.RemoveQueue(task_queue.get()); |
150 | |
151 { | |
152 SpinLock::Guard guard(do_work_pending_lock_); | |
153 has_incomming_immediate_work_.erase(task_queue.get()); | |
154 } | |
148 } | 155 } |
149 | 156 |
150 void TaskQueueManager::UpdateWorkQueues(LazyNow lazy_now) { | 157 void TaskQueueManager::UpdateWorkQueues(LazyNow* lazy_now) { |
151 TRACE_EVENT0(disabled_by_default_tracing_category_, | |
152 "TaskQueueManager::UpdateWorkQueues"); | |
153 | |
154 for (TimeDomain* time_domain : time_domains_) { | 158 for (TimeDomain* time_domain : time_domains_) { |
155 LazyNow lazy_now_in_domain = time_domain == real_time_domain_.get() | 159 if (time_domain == real_time_domain_.get()) { |
156 ? lazy_now | 160 time_domain->WakeupReadyDelayedQueues(lazy_now); |
157 : time_domain->CreateLazyNow(); | 161 continue; |
158 time_domain->UpdateWorkQueues(lazy_now_in_domain); | 162 } |
163 LazyNow time_domain_lazy_now = time_domain->CreateLazyNow(); | |
164 time_domain->WakeupReadyDelayedQueues(&time_domain_lazy_now); | |
159 } | 165 } |
160 } | 166 } |
161 | 167 |
162 void TaskQueueManager::OnBeginNestedMessageLoop() { | 168 void TaskQueueManager::OnBeginNestedMessageLoop() { |
163 // We just entered a nested message loop, make sure there's a DoWork posted or | 169 // We just entered a nested message loop, make sure there's a DoWork posted or |
164 // the system will grind to a halt. | 170 // the system will grind to a halt. |
165 delegate_->PostTask(FROM_HERE, from_main_thread_immediate_do_work_closure_); | 171 SpinLock::Guard guard(do_work_pending_lock_); |
172 delegate_->PostTask(FROM_HERE, immediate_do_work_closure_); | |
173 immediate_do_work_posted_count_++; | |
174 is_nested_ = true; | |
175 } | |
176 | |
177 void TaskQueueManager::OnQueueHasImmediateWork(internal::TaskQueueImpl* queue) { | |
178 SpinLock::Guard guard(do_work_pending_lock_); | |
179 has_incomming_immediate_work_.insert(queue); | |
180 | |
181 // There's no point posting a DoWork for a blocked or disabled queue, although | |
182 // we can only determine that on the main thread. | |
183 if (queue->RunsTasksOnCurrentThread() && | |
184 (!queue->IsQueueEnabled() || queue->BlockedByFenceLocked())) { | |
Sami
2016/12/05 17:52:54
Hmm, we're not holding the any thread lock here, a
alex clarke (OOO till 29th)
2016/12/06 17:37:54
Actually we are but that's perhaps a little fragil
Sami
2016/12/07 16:13:42
Yeah, something like that would be better. The cou
| |
185 return; | |
186 } | |
187 | |
188 MaybeScheduleImmediateWorkLocked(FROM_HERE); | |
189 } | |
190 | |
191 void TaskQueueManager::NotifyQueuesOfIncomingImmediateWorkOnMainThreadLocked() { | |
192 for (internal::TaskQueueImpl* queue : has_incomming_immediate_work_) { | |
193 queue->OnIncomingImmediateTaskAvailable(); | |
194 } | |
195 has_incomming_immediate_work_.clear(); | |
166 } | 196 } |
167 | 197 |
168 void TaskQueueManager::MaybeScheduleImmediateWork( | 198 void TaskQueueManager::MaybeScheduleImmediateWork( |
169 const tracked_objects::Location& from_here) { | 199 const tracked_objects::Location& from_here) { |
170 bool on_main_thread = delegate_->BelongsToCurrentThread(); | 200 SpinLock::Guard guard(do_work_pending_lock_); |
171 // De-duplicate DoWork posts. | 201 MaybeScheduleImmediateWorkLocked(from_here); |
172 if (on_main_thread) { | 202 } |
173 if (!main_thread_pending_wakeups_.insert(base::TimeTicks()).second) { | 203 |
174 return; | 204 void TaskQueueManager::MaybeScheduleImmediateWorkLocked( |
175 } | 205 const tracked_objects::Location& from_here) { |
Sami
2016/12/05 17:52:54
Can we check that a spinlock is held? Not sure...
alex clarke (OOO till 29th)
2016/12/06 17:37:54
Currently no it's not possible to do so in a good
Sami
2016/12/07 16:13:42
I was thinking something like a checked spinlock t
| |
176 delegate_->PostTask(from_here, from_main_thread_immediate_do_work_closure_); | 206 // Unlwss we're ensted, try to avoid posting redundant DoWorks. |
177 } else { | 207 if (!is_nested_ && |
178 { | 208 (do_work_running_count_ == 1 || immediate_do_work_posted_count_ > 0)) { |
179 base::AutoLock lock(other_thread_lock_); | 209 return; |
180 if (other_thread_pending_wakeup_) | |
181 return; | |
182 other_thread_pending_wakeup_ = true; | |
183 } | |
184 delegate_->PostTask(from_here, | |
185 from_other_thread_immediate_do_work_closure_); | |
186 } | 210 } |
211 | |
212 delegate_->PostTask(from_here, immediate_do_work_closure_); | |
213 immediate_do_work_posted_count_++; | |
187 } | 214 } |
188 | 215 |
189 void TaskQueueManager::MaybeScheduleDelayedWork( | 216 void TaskQueueManager::MaybeScheduleDelayedWork( |
190 const tracked_objects::Location& from_here, | 217 const tracked_objects::Location& from_here, |
191 base::TimeTicks now, | 218 base::TimeTicks now, |
192 base::TimeDelta delay) { | 219 base::TimeDelta delay) { |
193 DCHECK(main_thread_checker_.CalledOnValidThread()); | 220 DCHECK(main_thread_checker_.CalledOnValidThread()); |
221 { | |
222 SpinLock::Guard guard(do_work_pending_lock_); | |
223 | |
224 // If there's a pending immediate DoWork then we rely on the logic in DoWork | |
225 // to post a continuation as needed. | |
226 if (immediate_do_work_posted_count_ > 0) | |
227 return; | |
228 | |
229 // If a non-nested DoWork is running we can also rely on the logic in DoWork | |
230 // to post a continuation as needed. | |
231 if (do_work_running_count_ == 1 && !is_nested_) | |
232 return; | |
233 } | |
234 MaybeScheduleDelayedWorkInternal(from_here, now, delay); | |
235 } | |
236 | |
237 void TaskQueueManager::MaybeScheduleDelayedWorkInternal( | |
238 const tracked_objects::Location& from_here, | |
239 base::TimeTicks now, | |
240 base::TimeDelta delay) { | |
194 DCHECK_GE(delay, base::TimeDelta()); | 241 DCHECK_GE(delay, base::TimeDelta()); |
195 | |
196 // If there's a pending immediate DoWork then we rely on | |
197 // TryAdvanceTimeDomains getting the TimeDomain to call | |
198 // MaybeScheduleDelayedWork again when the immediate DoWork is complete. | |
199 if (main_thread_pending_wakeups_.find(base::TimeTicks()) != | |
200 main_thread_pending_wakeups_.end()) { | |
201 return; | |
202 } | |
203 // De-duplicate DoWork posts. | 242 // De-duplicate DoWork posts. |
204 base::TimeTicks run_time = now + delay; | 243 base::TimeTicks run_time = now + delay; |
205 if (!main_thread_pending_wakeups_.empty() && | 244 if (next_delayed_do_work_ <= run_time && !next_delayed_do_work_.is_null()) |
206 *main_thread_pending_wakeups_.begin() <= run_time) { | |
207 return; | 245 return; |
208 } | 246 |
209 main_thread_pending_wakeups_.insert(run_time); | 247 cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_); |
248 next_delayed_do_work_ = run_time; | |
210 delegate_->PostDelayedTask( | 249 delegate_->PostDelayedTask( |
211 from_here, base::Bind(&TaskQueueManager::DoWork, | 250 from_here, cancelable_delayed_do_work_closure_.callback(), delay); |
212 weak_factory_.GetWeakPtr(), run_time, true), | |
213 delay); | |
214 } | 251 } |
215 | 252 |
216 void TaskQueueManager::DoWork(base::TimeTicks run_time, bool from_main_thread) { | 253 void TaskQueueManager::DoWork(bool delayed) { |
Sami
2016/12/05 17:52:54
meta-comment: This function is getting long -- may
alex clarke (OOO till 29th)
2016/12/06 17:37:54
Done.
| |
217 DCHECK(main_thread_checker_.CalledOnValidThread()); | 254 DCHECK(main_thread_checker_.CalledOnValidThread()); |
218 TRACE_EVENT1(tracing_category_, "TaskQueueManager::DoWork", | 255 TRACE_EVENT1("tracing_category_", "TaskQueueManager::DoWork", "delayed", |
219 "from_main_thread", from_main_thread); | 256 delayed ? "false" : "true"); |
Sami
2016/12/05 17:52:54
nit: you can just pass |delayed| here and tracing
alex clarke (OOO till 29th)
2016/12/06 17:37:54
Done.
| |
220 | |
221 if (from_main_thread) { | |
222 main_thread_pending_wakeups_.erase(run_time); | |
223 } else { | |
224 base::AutoLock lock(other_thread_lock_); | |
225 other_thread_pending_wakeup_ = false; | |
226 } | |
227 | |
228 // Posting a DoWork while a DoWork is running leads to spurious DoWorks. | |
229 main_thread_pending_wakeups_.insert(base::TimeTicks()); | |
230 | |
231 if (!delegate_->IsNested()) | |
232 queues_to_delete_.clear(); | |
233 | |
234 LazyNow lazy_now(real_time_domain()->CreateLazyNow()); | 257 LazyNow lazy_now(real_time_domain()->CreateLazyNow()); |
235 base::TimeTicks task_start_time; | 258 base::TimeTicks task_start_time; |
236 | 259 |
237 if (!delegate_->IsNested() && task_time_observers_.might_have_observers()) | 260 bool is_nested = delegate_->IsNested(); |
238 task_start_time = lazy_now.Now(); | 261 if (!delegate_->IsNested()) { |
262 if (task_time_observers_.might_have_observers()) | |
263 task_start_time = lazy_now.Now(); | |
239 | 264 |
240 UpdateWorkQueues(lazy_now); | 265 queues_to_delete_.clear(); |
266 } | |
241 | 267 |
242 for (int i = 0; i < work_batch_size_; i++) { | 268 for (int i = 0; i < work_batch_size_; i++) { |
243 internal::WorkQueue* work_queue; | 269 { |
270 SpinLock::Guard guard(do_work_pending_lock_); | |
271 | |
272 is_nested_ = is_nested; | |
273 DCHECK_EQ(is_nested_, delegate_->IsNested()); | |
274 | |
275 if (i == 0) { | |
276 do_work_running_count_++; | |
277 | |
278 if (!delayed) { | |
279 immediate_do_work_posted_count_--; | |
280 DCHECK_GE(immediate_do_work_posted_count_, 0); | |
281 } | |
282 } | |
283 | |
284 NotifyQueuesOfIncomingImmediateWorkOnMainThreadLocked(); | |
285 } | |
286 | |
287 UpdateWorkQueues(&lazy_now); | |
288 | |
289 internal::WorkQueue* work_queue = nullptr; | |
244 if (!SelectWorkQueueToService(&work_queue)) | 290 if (!SelectWorkQueueToService(&work_queue)) |
245 break; | 291 break; |
246 | 292 |
247 // TaskQueueManager guarantees that task queue will not be deleted | 293 // TaskQueueManager guarantees that task queue will not be deleted |
248 // when we are in DoWork (but WorkQueue may be deleted). | 294 // when we are in DoWork (but WorkQueue may be deleted). |
249 internal::TaskQueueImpl* task_queue = work_queue->task_queue(); | 295 internal::TaskQueueImpl* task_queue = work_queue->task_queue(); |
250 | 296 |
251 switch (ProcessTaskFromWorkQueue(work_queue)) { | 297 switch (ProcessTaskFromWorkQueue(work_queue)) { |
252 case ProcessTaskResult::DEFERRED: | 298 case ProcessTaskResult::DEFERRED: |
253 // If a task was deferred, try again with another task. | 299 // If a task was deferred, try again with another task. |
254 continue; | 300 continue; |
255 case ProcessTaskResult::EXECUTED: | 301 case ProcessTaskResult::EXECUTED: |
256 break; | 302 break; |
257 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED: | 303 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED: |
258 return; // The TaskQueueManager got deleted, we must bail out. | 304 return; // The TaskQueueManager got deleted, we must bail out. |
259 } | 305 } |
260 | 306 |
261 lazy_now = real_time_domain()->CreateLazyNow(); | 307 lazy_now = real_time_domain()->CreateLazyNow(); |
262 if (!delegate_->IsNested() && task_start_time != base::TimeTicks()) { | 308 if (!is_nested && task_start_time != base::TimeTicks()) { |
263 // Only report top level task durations. | 309 // Only report top level task durations. |
264 base::TimeTicks task_end_time = lazy_now.Now(); | 310 base::TimeTicks task_end_time = lazy_now.Now(); |
265 for (auto& observer : task_time_observers_) { | 311 for (auto& observer : task_time_observers_) { |
266 observer.ReportTaskTime(task_queue, | 312 observer.ReportTaskTime(task_queue, |
267 MonotonicTimeInSeconds(task_start_time), | 313 MonotonicTimeInSeconds(task_start_time), |
268 MonotonicTimeInSeconds(task_end_time)); | 314 MonotonicTimeInSeconds(task_end_time)); |
269 } | 315 } |
270 task_start_time = task_end_time; | 316 task_start_time = task_end_time; |
271 } | 317 } |
272 | 318 |
273 work_queue = nullptr; // The queue may have been unregistered. | 319 work_queue = nullptr; // The queue may have been unregistered. |
274 | 320 |
275 UpdateWorkQueues(lazy_now); | |
276 | |
277 // Only run a single task per batch in nested run loops so that we can | 321 // Only run a single task per batch in nested run loops so that we can |
278 // properly exit the nested loop when someone calls RunLoop::Quit(). | 322 // properly exit the nested loop when someone calls RunLoop::Quit(). |
279 if (delegate_->IsNested()) | 323 if (is_nested) |
280 break; | 324 break; |
281 } | 325 } |
282 | 326 |
283 main_thread_pending_wakeups_.erase(base::TimeTicks()); | |
284 | |
285 // TODO(alexclarke): Consider refactoring the above loop to terminate only | 327 // TODO(alexclarke): Consider refactoring the above loop to terminate only |
286 // when there's no more work left to be done, rather than posting a | 328 // when there's no more work left to be done, rather than posting a |
287 // continuation task. | 329 // continuation task. |
288 if (!selector_.EnabledWorkQueuesEmpty() || TryAdvanceTimeDomains()) | 330 if (delayed) |
289 MaybeScheduleImmediateWork(FROM_HERE); | 331 next_delayed_do_work_ = base::TimeTicks(); |
332 | |
333 LazyNow continuation_lazy_now(real_time_domain()->CreateLazyNow()); | |
334 UpdateWorkQueues(&continuation_lazy_now); | |
335 | |
336 SpinLock::Guard guard(do_work_pending_lock_); | |
337 NotifyQueuesOfIncomingImmediateWorkOnMainThreadLocked(); | |
338 base::Optional<base::TimeDelta> next_delay = | |
339 DelayTillNextTask(&continuation_lazy_now); | |
340 | |
341 do_work_running_count_--; | |
342 DCHECK_GE(do_work_running_count_, 0); | |
343 | |
344 is_nested_ = is_nested; | |
345 DCHECK_EQ(is_nested_, delegate_->IsNested()); | |
346 | |
347 // If there are no tasks left then we don't need to post a continuation. | |
348 if (!next_delay) | |
349 return; | |
350 | |
351 // If either an immediate DoWork or a delayed DoWork is pending then we don't | |
352 // need to post a continuation. | |
353 if (immediate_do_work_posted_count_ > 0 || | |
354 (!next_delayed_do_work_.is_null() && | |
355 next_delayed_do_work_ < continuation_lazy_now.Now())) { | |
356 return; | |
357 } | |
358 | |
359 // Post a continuation task based on the delay till the next task. | |
360 if (next_delay.value().is_zero()) { | |
361 delegate_->PostTask(FROM_HERE, immediate_do_work_closure_); | |
362 immediate_do_work_posted_count_++; | |
363 } else { | |
364 MaybeScheduleDelayedWorkInternal(FROM_HERE, lazy_now.Now(), | |
365 next_delay.value()); | |
366 } | |
290 } | 367 } |
291 | 368 |
292 bool TaskQueueManager::TryAdvanceTimeDomains() { | 369 base::Optional<base::TimeDelta> TaskQueueManager::DelayTillNextTask( |
293 bool can_advance = false; | 370 LazyNow* lazy_now) { |
371 // If the selector has non-empty queues we trivially know there is immediate | |
372 // word to be done. | |
373 if (!selector_.EnabledWorkQueuesEmpty()) | |
374 return base::TimeDelta(); | |
375 | |
376 // Otherwise we need to find the shortest delay, if any. | |
377 base::Optional<base::TimeDelta> next_continuation; | |
294 for (TimeDomain* time_domain : time_domains_) { | 378 for (TimeDomain* time_domain : time_domains_) { |
295 can_advance |= time_domain->MaybeAdvanceTime(); | 379 base::Optional<base::TimeDelta> continuation = |
380 time_domain->DelayTillNextTask(lazy_now); | |
381 if (!continuation) | |
382 continue; | |
383 if (!next_continuation || next_continuation.value() < continuation.value()) | |
384 next_continuation = continuation; | |
296 } | 385 } |
297 return can_advance; | 386 return next_continuation; |
298 } | 387 } |
299 | 388 |
300 bool TaskQueueManager::SelectWorkQueueToService( | 389 bool TaskQueueManager::SelectWorkQueueToService( |
301 internal::WorkQueue** out_work_queue) { | 390 internal::WorkQueue** out_work_queue) { |
302 bool should_run = selector_.SelectWorkQueueToService(out_work_queue); | 391 bool should_run = selector_.SelectWorkQueueToService(out_work_queue); |
303 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( | 392 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( |
304 disabled_by_default_tracing_category_, "TaskQueueManager", this, | 393 disabled_by_default_tracing_category_, "TaskQueueManager", this, |
305 AsValueWithSelectorResult(should_run, *out_work_queue)); | 394 AsValueWithSelectorResult(should_run, *out_work_queue)); |
306 return should_run; | 395 return should_run; |
307 } | 396 } |
(...skipping 11 matching lines...) Expand all Loading... | |
319 work_queue->TakeTaskFromWorkQueue(); | 408 work_queue->TakeTaskFromWorkQueue(); |
320 | 409 |
321 // It's possible the task was canceled, if so bail out. | 410 // It's possible the task was canceled, if so bail out. |
322 if (pending_task.task.IsCancelled()) | 411 if (pending_task.task.IsCancelled()) |
323 return ProcessTaskResult::EXECUTED; | 412 return ProcessTaskResult::EXECUTED; |
324 | 413 |
325 internal::TaskQueueImpl* queue = work_queue->task_queue(); | 414 internal::TaskQueueImpl* queue = work_queue->task_queue(); |
326 if (queue->GetQuiescenceMonitored()) | 415 if (queue->GetQuiescenceMonitored()) |
327 task_was_run_on_quiescence_monitored_queue_ = true; | 416 task_was_run_on_quiescence_monitored_queue_ = true; |
328 | 417 |
329 if (!pending_task.nestable && delegate_->IsNested()) { | 418 DCHECK_EQ(is_nested_, delegate_->IsNested()); |
419 if (!pending_task.nestable && is_nested_) { | |
330 // Defer non-nestable work to the main task runner. NOTE these tasks can be | 420 // Defer non-nestable work to the main task runner. NOTE these tasks can be |
331 // arbitrarily delayed so the additional delay should not be a problem. | 421 // arbitrarily delayed so the additional delay should not be a problem. |
332 // TODO(skyostil): Figure out a way to not forget which task queue the | 422 // TODO(skyostil): Figure out a way to not forget which task queue the |
333 // task is associated with. See http://crbug.com/522843. | 423 // task is associated with. See http://crbug.com/522843. |
334 // TODO(tzik): Remove base::UnsafeConvertOnceClosureToRepeating once | 424 // TODO(tzik): Remove base::UnsafeConvertOnceClosureToRepeating once |
335 // TaskRunners have migrated to OnceClosure. | 425 // TaskRunners have migrated to OnceClosure. |
336 delegate_->PostNonNestableTask( | 426 delegate_->PostNonNestableTask( |
337 pending_task.posted_from, | 427 pending_task.posted_from, |
338 UnsafeConvertOnceClosureToRepeating(std::move(pending_task.task))); | 428 UnsafeConvertOnceClosureToRepeating(std::move(pending_task.task))); |
339 return ProcessTaskResult::DEFERRED; | 429 return ProcessTaskResult::DEFERRED; |
340 } | 430 } |
341 | 431 |
342 MaybeRecordTaskDelayHistograms(pending_task, queue); | 432 if (record_task_delay_histograms_) |
433 MaybeRecordTaskDelayHistograms(pending_task, queue); | |
343 | 434 |
344 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue", | 435 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue", |
345 pending_task); | 436 pending_task); |
346 if (queue->GetShouldNotifyObservers()) { | 437 if (queue->GetShouldNotifyObservers()) { |
347 for (auto& observer : task_observers_) | 438 for (auto& observer : task_observers_) |
348 observer.WillProcessTask(pending_task); | 439 observer.WillProcessTask(pending_task); |
349 queue->NotifyWillProcessTask(pending_task); | 440 queue->NotifyWillProcessTask(pending_task); |
350 } | 441 } |
351 TRACE_EVENT1(tracing_category_, "TaskQueueManager::RunTask", "queue", | 442 TRACE_EVENT1(tracing_category_, "TaskQueueManager::RunTask", "queue", |
352 queue->GetName()); | 443 queue->GetName()); |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
465 if (should_run) { | 556 if (should_run) { |
466 state->SetString("selected_queue", | 557 state->SetString("selected_queue", |
467 selected_work_queue->task_queue()->GetName()); | 558 selected_work_queue->task_queue()->GetName()); |
468 state->SetString("work_queue_name", selected_work_queue->name()); | 559 state->SetString("work_queue_name", selected_work_queue->name()); |
469 } | 560 } |
470 | 561 |
471 state->BeginArray("time_domains"); | 562 state->BeginArray("time_domains"); |
472 for (auto* time_domain : time_domains_) | 563 for (auto* time_domain : time_domains_) |
473 time_domain->AsValueInto(state.get()); | 564 time_domain->AsValueInto(state.get()); |
474 state->EndArray(); | 565 state->EndArray(); |
566 | |
567 state->SetBoolean("is_nested", is_nested_); | |
568 | |
569 SpinLock::Guard guard(do_work_pending_lock_); | |
570 state->SetInteger("do_work_count", do_work_running_count_); | |
571 state->SetInteger("immediate_do_work_posted", | |
572 immediate_do_work_posted_count_); | |
475 return std::move(state); | 573 return std::move(state); |
476 } | 574 } |
477 | 575 |
478 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { | 576 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { |
479 DCHECK(main_thread_checker_.CalledOnValidThread()); | 577 DCHECK(main_thread_checker_.CalledOnValidThread()); |
480 // Only schedule DoWork if there's something to do. | 578 // Only schedule DoWork if there's something to do. |
481 if (queue->HasPendingImmediateWork()) | 579 if (queue->HasPendingImmediateWork()) |
482 MaybeScheduleImmediateWork(FROM_HERE); | 580 MaybeScheduleImmediateWork(FROM_HERE); |
483 } | 581 } |
484 | 582 |
485 void TaskQueueManager::OnTriedToSelectBlockedWorkQueue( | 583 void TaskQueueManager::OnTriedToSelectBlockedWorkQueue( |
486 internal::WorkQueue* work_queue) { | 584 internal::WorkQueue* work_queue) { |
487 DCHECK(main_thread_checker_.CalledOnValidThread()); | 585 DCHECK(main_thread_checker_.CalledOnValidThread()); |
488 DCHECK(!work_queue->Empty()); | 586 DCHECK(!work_queue->Empty()); |
489 if (observer_) { | 587 if (observer_) { |
490 observer_->OnTriedToExecuteBlockedTask(*work_queue->task_queue(), | 588 observer_->OnTriedToExecuteBlockedTask(*work_queue->task_queue(), |
491 *work_queue->GetFrontTask()); | 589 *work_queue->GetFrontTask()); |
492 } | 590 } |
493 } | 591 } |
494 | 592 |
495 bool TaskQueueManager::HasImmediateWorkForTesting() const { | 593 bool TaskQueueManager::HasImmediateWorkForTesting() const { |
496 return !selector_.EnabledWorkQueuesEmpty(); | 594 return !selector_.EnabledWorkQueuesEmpty(); |
497 } | 595 } |
498 | 596 |
597 void TaskQueueManager::SetRecordTaskDelayHistograms( | |
598 bool record_task_delay_histograms) { | |
599 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
600 record_task_delay_histograms_ = record_task_delay_histograms; | |
601 } | |
602 | |
499 } // namespace scheduler | 603 } // namespace scheduler |
500 } // namespace blink | 604 } // namespace blink |
OLD | NEW |