Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(17)

Side by Side Diff: third_party/WebKit/Source/platform/scheduler/base/task_queue_manager.cc

Issue 2546423002: [Try # 3] Scheduler refactoring to virtually eliminate redundant DoWorks (Closed)
Patch Set: Add an extra dcheck Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "platform/scheduler/base/task_queue_manager.h" 5 #include "platform/scheduler/base/task_queue_manager.h"
6 6
7 #include <queue> 7 #include <queue>
8 #include <set> 8 #include <set>
9 9
10 #include "base/bind.h" 10 #include "base/bind.h"
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
51 } 51 }
52 52
53 TaskQueueManager::TaskQueueManager( 53 TaskQueueManager::TaskQueueManager(
54 scoped_refptr<TaskQueueManagerDelegate> delegate, 54 scoped_refptr<TaskQueueManagerDelegate> delegate,
55 const char* tracing_category, 55 const char* tracing_category,
56 const char* disabled_by_default_tracing_category, 56 const char* disabled_by_default_tracing_category,
57 const char* disabled_by_default_verbose_tracing_category) 57 const char* disabled_by_default_verbose_tracing_category)
58 : real_time_domain_(new RealTimeDomain(tracing_category)), 58 : real_time_domain_(new RealTimeDomain(tracing_category)),
59 delegate_(delegate), 59 delegate_(delegate),
60 task_was_run_on_quiescence_monitored_queue_(false), 60 task_was_run_on_quiescence_monitored_queue_(false),
61 other_thread_pending_wakeup_(false), 61 record_task_delay_histograms_(true),
62 work_batch_size_(1), 62 work_batch_size_(1),
63 task_count_(0), 63 task_count_(0),
64 tracing_category_(tracing_category), 64 tracing_category_(tracing_category),
65 disabled_by_default_tracing_category_( 65 disabled_by_default_tracing_category_(
66 disabled_by_default_tracing_category), 66 disabled_by_default_tracing_category),
67 disabled_by_default_verbose_tracing_category_( 67 disabled_by_default_verbose_tracing_category_(
68 disabled_by_default_verbose_tracing_category), 68 disabled_by_default_verbose_tracing_category),
69 currently_executing_task_queue_(nullptr), 69 currently_executing_task_queue_(nullptr),
70 observer_(nullptr), 70 observer_(nullptr),
71 deletion_sentinel_(new DeletionSentinel()), 71 deletion_sentinel_(new DeletionSentinel()),
72 weak_factory_(this) { 72 weak_factory_(this) {
73 DCHECK(delegate->RunsTasksOnCurrentThread()); 73 DCHECK(delegate->RunsTasksOnCurrentThread());
74 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, 74 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category,
75 "TaskQueueManager", this); 75 "TaskQueueManager", this);
76 selector_.SetTaskQueueSelectorObserver(this); 76 selector_.SetTaskQueueSelectorObserver(this);
77 77
78 from_main_thread_immediate_do_work_closure_ = 78 delayed_do_work_closure_ =
79 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), 79 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true);
80 base::TimeTicks(), true); 80 immediate_do_work_closure_ =
81 from_other_thread_immediate_do_work_closure_ = 81 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false);
82 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(),
83 base::TimeTicks(), false);
84 82
85 // TODO(alexclarke): Change this to be a parameter that's passed in. 83 // TODO(alexclarke): Change this to be a parameter that's passed in.
86 RegisterTimeDomain(real_time_domain_.get()); 84 RegisterTimeDomain(real_time_domain_.get());
87 85
88 delegate_->AddNestingObserver(this); 86 delegate_->AddNestingObserver(this);
89 } 87 }
90 88
91 TaskQueueManager::~TaskQueueManager() { 89 TaskQueueManager::~TaskQueueManager() {
92 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, 90 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_,
93 "TaskQueueManager", this); 91 "TaskQueueManager", this);
94 92
95 while (!queues_.empty()) 93 while (!queues_.empty())
96 (*queues_.begin())->UnregisterTaskQueue(); 94 (*queues_.begin())->UnregisterTaskQueue();
97 95
98 selector_.SetTaskQueueSelectorObserver(nullptr); 96 selector_.SetTaskQueueSelectorObserver(nullptr);
99 97
100 delegate_->RemoveNestingObserver(this); 98 delegate_->RemoveNestingObserver(this);
101 } 99 }
102 100
101 TaskQueueManager::AnyThread::AnyThread()
102 : do_work_running_count(0),
103 immediate_do_work_posted_count(0),
104 is_nested(false) {}
105
103 void TaskQueueManager::RegisterTimeDomain(TimeDomain* time_domain) { 106 void TaskQueueManager::RegisterTimeDomain(TimeDomain* time_domain) {
104 time_domains_.insert(time_domain); 107 time_domains_.insert(time_domain);
105 time_domain->OnRegisterWithTaskQueueManager(this); 108 time_domain->OnRegisterWithTaskQueueManager(this);
106 } 109 }
107 110
108 void TaskQueueManager::UnregisterTimeDomain(TimeDomain* time_domain) { 111 void TaskQueueManager::UnregisterTimeDomain(TimeDomain* time_domain) {
109 time_domains_.erase(time_domain); 112 time_domains_.erase(time_domain);
110 } 113 }
111 114
112 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue( 115 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue(
(...skipping 25 matching lines...) Expand all
138 DCHECK(main_thread_checker_.CalledOnValidThread()); 141 DCHECK(main_thread_checker_.CalledOnValidThread());
139 if (observer_) 142 if (observer_)
140 observer_->OnUnregisterTaskQueue(task_queue); 143 observer_->OnUnregisterTaskQueue(task_queue);
141 144
142 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being 145 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being
143 // freed while any of our structures hold hold a raw pointer to it. 146 // freed while any of our structures hold hold a raw pointer to it.
144 queues_to_delete_.insert(task_queue); 147 queues_to_delete_.insert(task_queue);
145 queues_.erase(task_queue); 148 queues_.erase(task_queue);
146 149
147 selector_.RemoveQueue(task_queue.get()); 150 selector_.RemoveQueue(task_queue.get());
151
152 {
153 base::AutoLock lock(any_thread_lock_);
154 any_thread().has_incoming_immediate_work.erase(task_queue.get());
155 }
148 } 156 }
149 157
150 void TaskQueueManager::UpdateWorkQueues(LazyNow lazy_now) { 158 void TaskQueueManager::UpdateWorkQueues(
151 TRACE_EVENT0(disabled_by_default_tracing_category_, 159 const std::set<internal::TaskQueueImpl*>* queues_to_reload,
152 "TaskQueueManager::UpdateWorkQueues"); 160 LazyNow* lazy_now) {
161 if (queues_to_reload) {
162 for (internal::TaskQueueImpl* queue : *queues_to_reload) {
163 queue->ReloadImmediateWorkQueueIfEmpty();
164 }
165 }
153 166
154 for (TimeDomain* time_domain : time_domains_) { 167 for (TimeDomain* time_domain : time_domains_) {
155 LazyNow lazy_now_in_domain = time_domain == real_time_domain_.get() 168 if (time_domain == real_time_domain_.get()) {
156 ? lazy_now 169 time_domain->WakeupReadyDelayedQueues(lazy_now);
157 : time_domain->CreateLazyNow(); 170 continue;
158 time_domain->UpdateWorkQueues(lazy_now_in_domain); 171 }
172 LazyNow time_domain_lazy_now = time_domain->CreateLazyNow();
173 time_domain->WakeupReadyDelayedQueues(&time_domain_lazy_now);
159 } 174 }
160 } 175 }
161 176
162 void TaskQueueManager::OnBeginNestedMessageLoop() { 177 void TaskQueueManager::OnBeginNestedMessageLoop() {
163 // We just entered a nested message loop, make sure there's a DoWork posted or 178 // We just entered a nested message loop, make sure there's a DoWork posted or
164 // the system will grind to a halt. 179 // the system will grind to a halt.
165 delegate_->PostTask(FROM_HERE, from_main_thread_immediate_do_work_closure_); 180 {
181 base::AutoLock lock(any_thread_lock_);
182 any_thread().immediate_do_work_posted_count++;
183 any_thread().is_nested = true;
184 }
185 delegate_->PostTask(FROM_HERE, immediate_do_work_closure_);
186 }
187
188 void TaskQueueManager::OnQueueHasIncomingImmediateWork(
189 internal::TaskQueueImpl* queue,
190 bool ensure_do_work_posted) {
191 MoveableAutoLock lock(any_thread_lock_);
192 any_thread().has_incoming_immediate_work.insert(queue);
193 if (ensure_do_work_posted)
194 MaybeScheduleImmediateWorkLocked(FROM_HERE, std::move(lock));
166 } 195 }
167 196
168 void TaskQueueManager::MaybeScheduleImmediateWork( 197 void TaskQueueManager::MaybeScheduleImmediateWork(
169 const tracked_objects::Location& from_here) { 198 const tracked_objects::Location& from_here) {
170 bool on_main_thread = delegate_->BelongsToCurrentThread(); 199 MoveableAutoLock lock(any_thread_lock_);
171 // De-duplicate DoWork posts. 200 MaybeScheduleImmediateWorkLocked(from_here, std::move(lock));
172 if (on_main_thread) { 201 }
173 if (!main_thread_pending_wakeups_.insert(base::TimeTicks()).second) { 202
203 void TaskQueueManager::MaybeScheduleImmediateWorkLocked(
204 const tracked_objects::Location& from_here,
205 MoveableAutoLock&& lock) {
206 {
207 MoveableAutoLock auto_lock(std::move(lock));
208 // Unless we're nested, try to avoid posting redundant DoWorks.
209 if (!any_thread().is_nested &&
210 (any_thread().do_work_running_count == 1 ||
211 any_thread().immediate_do_work_posted_count > 0)) {
174 return; 212 return;
175 } 213 }
176 delegate_->PostTask(from_here, from_main_thread_immediate_do_work_closure_); 214
177 } else { 215 any_thread().immediate_do_work_posted_count++;
178 {
179 base::AutoLock lock(other_thread_lock_);
180 if (other_thread_pending_wakeup_)
181 return;
182 other_thread_pending_wakeup_ = true;
183 }
184 delegate_->PostTask(from_here,
185 from_other_thread_immediate_do_work_closure_);
186 } 216 }
217 delegate_->PostTask(from_here, immediate_do_work_closure_);
187 } 218 }
188 219
189 void TaskQueueManager::MaybeScheduleDelayedWork( 220 void TaskQueueManager::MaybeScheduleDelayedWork(
190 const tracked_objects::Location& from_here, 221 const tracked_objects::Location& from_here,
191 base::TimeTicks now, 222 base::TimeTicks now,
192 base::TimeDelta delay) { 223 base::TimeDelta delay) {
193 DCHECK(main_thread_checker_.CalledOnValidThread()); 224 DCHECK(main_thread_checker_.CalledOnValidThread());
194 DCHECK_GE(delay, base::TimeDelta()); 225 DCHECK_GE(delay, base::TimeDelta());
226 {
227 base::AutoLock lock(any_thread_lock_);
195 228
196 // If there's a pending immediate DoWork then we rely on 229 // Unless we're nested, don't post a delayed DoWork if there's an immediate
197 // TryAdvanceTimeDomains getting the TimeDomain to call 230 // DoWork in flight or we're inside a DoWork. We can rely on DoWork posting
198 // MaybeScheduleDelayedWork again when the immediate DoWork is complete. 231 // a delayed continuation as needed.
199 if (main_thread_pending_wakeups_.find(base::TimeTicks()) != 232 if (!any_thread().is_nested &&
200 main_thread_pending_wakeups_.end()) { 233 (any_thread().immediate_do_work_posted_count > 0 ||
201 return; 234 any_thread().do_work_running_count == 1)) {
235 return;
236 }
202 } 237 }
238
203 // De-duplicate DoWork posts. 239 // De-duplicate DoWork posts.
204 base::TimeTicks run_time = now + delay; 240 base::TimeTicks run_time = now + delay;
205 if (!main_thread_pending_wakeups_.empty() && 241 if (next_delayed_do_work_ <= run_time && !next_delayed_do_work_.is_null())
206 *main_thread_pending_wakeups_.begin() <= run_time) {
207 return; 242 return;
208 } 243
209 main_thread_pending_wakeups_.insert(run_time); 244 TRACE_EVENT1(tracing_category_, "MaybeScheduleDelayedWorkInternal",
245 "delay_ms", delay.InMillisecondsF());
246
247 cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
248 next_delayed_do_work_ = run_time;
210 delegate_->PostDelayedTask( 249 delegate_->PostDelayedTask(
211 from_here, base::Bind(&TaskQueueManager::DoWork, 250 from_here, cancelable_delayed_do_work_closure_.callback(), delay);
212 weak_factory_.GetWeakPtr(), run_time, true),
213 delay);
214 } 251 }
215 252
216 void TaskQueueManager::DoWork(base::TimeTicks run_time, bool from_main_thread) { 253 void TaskQueueManager::DoWork(bool delayed) {
217 DCHECK(main_thread_checker_.CalledOnValidThread()); 254 DCHECK(main_thread_checker_.CalledOnValidThread());
218 TRACE_EVENT1(tracing_category_, "TaskQueueManager::DoWork", 255 TRACE_EVENT1(tracing_category_, "TaskQueueManager::DoWork", "delayed",
219 "from_main_thread", from_main_thread); 256 delayed);
257 LazyNow lazy_now(real_time_domain()->CreateLazyNow());
220 258
221 if (from_main_thread) { 259 bool is_nested = delegate_->IsNested();
222 main_thread_pending_wakeups_.erase(run_time); 260 if (!is_nested)
223 } else {
224 base::AutoLock lock(other_thread_lock_);
225 other_thread_pending_wakeup_ = false;
226 }
227
228 // Posting a DoWork while a DoWork is running leads to spurious DoWorks.
229 main_thread_pending_wakeups_.insert(base::TimeTicks());
230
231 if (!delegate_->IsNested())
232 queues_to_delete_.clear(); 261 queues_to_delete_.clear();
233 262
234 LazyNow lazy_now(real_time_domain()->CreateLazyNow()); 263 // This must be done before running any tasks because they could invoke a
235 UpdateWorkQueues(lazy_now); 264 // nested message loop and we risk having a stale |next_delayed_do_work_|.
265 if (delayed)
266 next_delayed_do_work_ = base::TimeTicks();
236 267
237 for (int i = 0; i < work_batch_size_; i++) { 268 for (int i = 0; i < work_batch_size_; i++) {
238 internal::WorkQueue* work_queue; 269 std::set<internal::TaskQueueImpl*> queues_to_reload;
270
271 {
272 base::AutoLock lock(any_thread_lock_);
273 any_thread().is_nested = is_nested;
274 DCHECK_EQ(any_thread().is_nested, delegate_->IsNested());
275
276 if (i == 0) {
277 any_thread().do_work_running_count++;
278
279 if (!delayed) {
280 any_thread().immediate_do_work_posted_count--;
281 DCHECK_GE(any_thread().immediate_do_work_posted_count, 0);
282 }
283 }
284 std::swap(queues_to_reload, any_thread().has_incoming_immediate_work);
285 }
286
287 // It's important we call UpdateWorkQueues out side of the lock to avoid a
288 // lock order inversion.
289 UpdateWorkQueues(&queues_to_reload, &lazy_now);
290
291 internal::WorkQueue* work_queue = nullptr;
239 if (!SelectWorkQueueToService(&work_queue)) 292 if (!SelectWorkQueueToService(&work_queue))
240 break; 293 break;
241 294
242 switch (ProcessTaskFromWorkQueue(work_queue, &lazy_now)) { 295 // NB this may unregister the queue.
296 switch (ProcessTaskFromWorkQueue(work_queue, is_nested, &lazy_now)) {
243 case ProcessTaskResult::DEFERRED: 297 case ProcessTaskResult::DEFERRED:
244 // If a task was deferred, try again with another task. 298 // If a task was deferred, try again with another task.
245 continue; 299 continue;
246 case ProcessTaskResult::EXECUTED: 300 case ProcessTaskResult::EXECUTED:
247 break; 301 break;
248 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED: 302 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED:
249 return; // The TaskQueueManager got deleted, we must bail out. 303 return; // The TaskQueueManager got deleted, we must bail out.
250 } 304 }
251 305
252 work_queue = nullptr; // The queue may have been unregistered.
253
254 UpdateWorkQueues(lazy_now);
255
256 // Only run a single task per batch in nested run loops so that we can 306 // Only run a single task per batch in nested run loops so that we can
257 // properly exit the nested loop when someone calls RunLoop::Quit(). 307 // properly exit the nested loop when someone calls RunLoop::Quit().
258 if (delegate_->IsNested()) 308 if (is_nested)
259 break; 309 break;
260 } 310 }
261 311
262 main_thread_pending_wakeups_.erase(base::TimeTicks());
263
264 // TODO(alexclarke): Consider refactoring the above loop to terminate only 312 // TODO(alexclarke): Consider refactoring the above loop to terminate only
265 // when there's no more work left to be done, rather than posting a 313 // when there's no more work left to be done, rather than posting a
266 // continuation task. 314 // continuation task.
267 if (!selector_.EnabledWorkQueuesEmpty() || TryAdvanceTimeDomains()) 315
268 MaybeScheduleImmediateWork(FROM_HERE); 316 {
317 MoveableAutoLock lock(any_thread_lock_);
318 base::Optional<base::TimeDelta> next_delay =
319 ComputeDelayTillNextTaskLocked(&lazy_now);
320
321 any_thread().do_work_running_count--;
322 DCHECK_GE(any_thread().do_work_running_count, 0);
323
324 any_thread().is_nested = is_nested;
325 DCHECK_EQ(any_thread().is_nested, delegate_->IsNested());
326
327 PostDoWorkContinuationLocked(next_delay, &lazy_now, std::move(lock));
328 }
269 } 329 }
270 330
271 bool TaskQueueManager::TryAdvanceTimeDomains() { 331 void TaskQueueManager::PostDoWorkContinuationLocked(
altimin 2017/01/17 17:25:54 Can you please add a DCHECK that we're on the main
alex clarke (OOO till 29th) 2017/01/25 11:22:24 Done.
272 bool can_advance = false; 332 base::Optional<base::TimeDelta> next_delay,
333 LazyNow* lazy_now,
334 MoveableAutoLock&& lock) {
335 base::TimeDelta delay;
336
337 {
338 MoveableAutoLock auto_lock(std::move(lock));
339
340 // If there are no tasks left then we don't need to post a continuation.
341 if (!next_delay) {
342 // If there's a pending delayed DoWork, cancel it because it's not needed.
343 if (!next_delayed_do_work_.is_null()) {
344 next_delayed_do_work_ = base::TimeTicks();
345 cancelable_delayed_do_work_closure_.Cancel();
346 }
347 return;
348 }
349
350 // If an immediate DoWork is posted, we don't need to post a continuation.
351 if (any_thread().immediate_do_work_posted_count > 0)
352 return;
353
354 delay = next_delay.value();
355
356 // This isn't supposed to happen, but in case it does convert to
357 // non-delayed.
358 if (delay < base::TimeDelta())
359 delay = base::TimeDelta();
360
361 if (delay.is_zero()) {
362 // If a delayed DoWork is pending then we don't need to post a
363 // continuation because it should run immediately.
364 if (!next_delayed_do_work_.is_null() &&
365 next_delayed_do_work_ <= lazy_now->Now()) {
366 return;
367 }
368
369 any_thread().immediate_do_work_posted_count++;
370 } else {
371 base::TimeTicks run_time = lazy_now->Now() + delay;
372 if (next_delayed_do_work_ == run_time)
373 return;
374
375 next_delayed_do_work_ = run_time;
376 }
377 }
378
379 // We avoid holding |any_thread_lock_| while posting the task.
380 if (delay.is_zero()) {
381 delegate_->PostTask(FROM_HERE, immediate_do_work_closure_);
382 } else {
383 cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
384 delegate_->PostDelayedTask(
385 FROM_HERE, cancelable_delayed_do_work_closure_.callback(), delay);
386 }
387 }
388
389 base::Optional<base::TimeDelta>
390 TaskQueueManager::ComputeDelayTillNextTaskLocked(LazyNow* lazy_now) {
391 // If the selector has non-empty queues we trivially know there is immediate
392 // work to be done.
393 if (!selector_.EnabledWorkQueuesEmpty())
394 return base::TimeDelta();
395
396 // If we have incoming immediate work for any queue able to run, we know there
397 // is immediate work to be done.
398 for (internal::TaskQueueImpl* queue :
399 any_thread().has_incoming_immediate_work) {
400 // Note when posted ImmediateTaskCouldRun would have returned true but
401 // conditions may have changed since then.
402 if (queue->ImmediateTaskCouldRun())
403 return base::TimeDelta();
404 }
405
406 // We don't process |any_thread().has_incoming_immediate_work| here because
407 // we'd need to go into and out of the lock again (to avoid lock order
408 // inversion) which seems overkill.
409 UpdateWorkQueues(nullptr, lazy_now);
410
411 // Otherwise we need to find the shortest delay, if any.
412 base::Optional<base::TimeDelta> next_continuation;
273 for (TimeDomain* time_domain : time_domains_) { 413 for (TimeDomain* time_domain : time_domains_) {
274 can_advance |= time_domain->MaybeAdvanceTime(); 414 base::Optional<base::TimeDelta> continuation =
415 time_domain->DelayTillNextTask(lazy_now);
416 if (!continuation)
417 continue;
418 if (!next_continuation || next_continuation.value() > continuation.value())
419 next_continuation = continuation;
275 } 420 }
276 return can_advance; 421 return next_continuation;
277 } 422 }
278 423
279 bool TaskQueueManager::SelectWorkQueueToService( 424 bool TaskQueueManager::SelectWorkQueueToService(
280 internal::WorkQueue** out_work_queue) { 425 internal::WorkQueue** out_work_queue) {
281 bool should_run = selector_.SelectWorkQueueToService(out_work_queue); 426 bool should_run = selector_.SelectWorkQueueToService(out_work_queue);
282 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( 427 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
283 disabled_by_default_tracing_category_, "TaskQueueManager", this, 428 disabled_by_default_tracing_category_, "TaskQueueManager", this,
284 AsValueWithSelectorResult(should_run, *out_work_queue)); 429 AsValueWithSelectorResult(should_run, *out_work_queue));
285 return should_run; 430 return should_run;
286 } 431 }
287 432
288 void TaskQueueManager::DidQueueTask( 433 void TaskQueueManager::DidQueueTask(
289 const internal::TaskQueueImpl::Task& pending_task) { 434 const internal::TaskQueueImpl::Task& pending_task) {
290 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task); 435 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task);
291 } 436 }
292 437
293 TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue( 438 TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue(
294 internal::WorkQueue* work_queue, 439 internal::WorkQueue* work_queue,
440 bool is_nested,
295 LazyNow* lazy_now) { 441 LazyNow* lazy_now) {
296 DCHECK(main_thread_checker_.CalledOnValidThread()); 442 DCHECK(main_thread_checker_.CalledOnValidThread());
297 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_); 443 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_);
298 internal::TaskQueueImpl::Task pending_task = 444 internal::TaskQueueImpl::Task pending_task =
299 work_queue->TakeTaskFromWorkQueue(); 445 work_queue->TakeTaskFromWorkQueue();
300 446
301 // It's possible the task was canceled, if so bail out. 447 // It's possible the task was canceled, if so bail out.
302 if (pending_task.task.IsCancelled()) 448 if (pending_task.task.IsCancelled())
303 return ProcessTaskResult::EXECUTED; 449 return ProcessTaskResult::EXECUTED;
304 450
305 internal::TaskQueueImpl* queue = work_queue->task_queue(); 451 internal::TaskQueueImpl* queue = work_queue->task_queue();
306 if (queue->GetQuiescenceMonitored()) 452 if (queue->GetQuiescenceMonitored())
307 task_was_run_on_quiescence_monitored_queue_ = true; 453 task_was_run_on_quiescence_monitored_queue_ = true;
308 454
309 if (!pending_task.nestable && delegate_->IsNested()) { 455 if (!pending_task.nestable && is_nested) {
310 // Defer non-nestable work to the main task runner. NOTE these tasks can be 456 // Defer non-nestable work to the main task runner. NOTE these tasks can be
311 // arbitrarily delayed so the additional delay should not be a problem. 457 // arbitrarily delayed so the additional delay should not be a problem.
312 // TODO(skyostil): Figure out a way to not forget which task queue the 458 // TODO(skyostil): Figure out a way to not forget which task queue the
313 // task is associated with. See http://crbug.com/522843. 459 // task is associated with. See http://crbug.com/522843.
314 // TODO(tzik): Remove base::UnsafeConvertOnceClosureToRepeating once 460 // TODO(tzik): Remove base::UnsafeConvertOnceClosureToRepeating once
315 // TaskRunners have migrated to OnceClosure. 461 // TaskRunners have migrated to OnceClosure.
316 delegate_->PostNonNestableTask( 462 delegate_->PostNonNestableTask(
317 pending_task.posted_from, 463 pending_task.posted_from,
318 UnsafeConvertOnceClosureToRepeating(std::move(pending_task.task))); 464 UnsafeConvertOnceClosureToRepeating(std::move(pending_task.task)));
319 return ProcessTaskResult::DEFERRED; 465 return ProcessTaskResult::DEFERRED;
320 } 466 }
321 467
322 MaybeRecordTaskDelayHistograms(pending_task, queue); 468 if (record_task_delay_histograms_)
469 MaybeRecordTaskDelayHistograms(pending_task, queue);
323 470
324 double task_start_time = 0; 471 double task_start_time = 0;
325 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue", 472 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue",
326 pending_task); 473 pending_task);
327 if (queue->GetShouldNotifyObservers()) { 474 if (queue->GetShouldNotifyObservers()) {
328 for (auto& observer : task_observers_) 475 for (auto& observer : task_observers_)
329 observer.WillProcessTask(pending_task); 476 observer.WillProcessTask(pending_task);
330 queue->NotifyWillProcessTask(pending_task); 477 queue->NotifyWillProcessTask(pending_task);
331 478
332 bool notify_time_observers = 479 bool notify_time_observers =
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
463 if (should_run) { 610 if (should_run) {
464 state->SetString("selected_queue", 611 state->SetString("selected_queue",
465 selected_work_queue->task_queue()->GetName()); 612 selected_work_queue->task_queue()->GetName());
466 state->SetString("work_queue_name", selected_work_queue->name()); 613 state->SetString("work_queue_name", selected_work_queue->name());
467 } 614 }
468 615
469 state->BeginArray("time_domains"); 616 state->BeginArray("time_domains");
470 for (auto* time_domain : time_domains_) 617 for (auto* time_domain : time_domains_)
471 time_domain->AsValueInto(state.get()); 618 time_domain->AsValueInto(state.get());
472 state->EndArray(); 619 state->EndArray();
620
621 {
622 base::AutoLock lock(any_thread_lock_);
623 state->SetBoolean("is_nested", any_thread().is_nested);
624 state->SetInteger("do_work_running_count",
625 any_thread().do_work_running_count);
626 state->SetInteger("immediate_do_work_posted_count",
627 any_thread().immediate_do_work_posted_count);
628
629 state->BeginArray("has_incoming_immediate_work");
630 for (internal::TaskQueueImpl* task_queue :
631 any_thread().has_incoming_immediate_work) {
632 state->AppendString(task_queue->GetName());
633 }
634 state->EndArray();
635 }
473 return std::move(state); 636 return std::move(state);
474 } 637 }
475 638
476 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { 639 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
477 DCHECK(main_thread_checker_.CalledOnValidThread()); 640 DCHECK(main_thread_checker_.CalledOnValidThread());
641 DCHECK(queue->IsQueueEnabled());
478 // Only schedule DoWork if there's something to do. 642 // Only schedule DoWork if there's something to do.
479 if (queue->HasPendingImmediateWork()) 643 if (queue->HasPendingImmediateWork() && !queue->BlockedByFence())
480 MaybeScheduleImmediateWork(FROM_HERE); 644 MaybeScheduleImmediateWork(FROM_HERE);
481 } 645 }
482 646
483 void TaskQueueManager::OnTriedToSelectBlockedWorkQueue( 647 void TaskQueueManager::OnTriedToSelectBlockedWorkQueue(
484 internal::WorkQueue* work_queue) { 648 internal::WorkQueue* work_queue) {
485 DCHECK(main_thread_checker_.CalledOnValidThread()); 649 DCHECK(main_thread_checker_.CalledOnValidThread());
486 DCHECK(!work_queue->Empty()); 650 DCHECK(!work_queue->Empty());
487 if (observer_) { 651 if (observer_) {
488 observer_->OnTriedToExecuteBlockedTask(*work_queue->task_queue(), 652 observer_->OnTriedToExecuteBlockedTask(*work_queue->task_queue(),
489 *work_queue->GetFrontTask()); 653 *work_queue->GetFrontTask());
490 } 654 }
491 } 655 }
492 656
493 bool TaskQueueManager::HasImmediateWorkForTesting() const { 657 bool TaskQueueManager::HasImmediateWorkForTesting() const {
494 return !selector_.EnabledWorkQueuesEmpty(); 658 return !selector_.EnabledWorkQueuesEmpty();
495 } 659 }
496 660
661 void TaskQueueManager::SetRecordTaskDelayHistograms(
662 bool record_task_delay_histograms) {
663 DCHECK(main_thread_checker_.CalledOnValidThread());
664 record_task_delay_histograms_ = record_task_delay_histograms;
665 }
666
497 void TaskQueueManager::SweepCanceledDelayedTasks() { 667 void TaskQueueManager::SweepCanceledDelayedTasks() {
498 std::map<TimeDomain*, base::TimeTicks> time_domain_now; 668 std::map<TimeDomain*, base::TimeTicks> time_domain_now;
499 for (const scoped_refptr<internal::TaskQueueImpl>& queue : queues_) { 669 for (const scoped_refptr<internal::TaskQueueImpl>& queue : queues_) {
500 TimeDomain* time_domain = queue->GetTimeDomain(); 670 TimeDomain* time_domain = queue->GetTimeDomain();
501 if (time_domain_now.find(time_domain) == time_domain_now.end()) 671 if (time_domain_now.find(time_domain) == time_domain_now.end())
502 time_domain_now.insert(std::make_pair(time_domain, time_domain->Now())); 672 time_domain_now.insert(std::make_pair(time_domain, time_domain->Now()));
503 queue->SweepCanceledDelayedTasks(time_domain_now[time_domain]); 673 queue->SweepCanceledDelayedTasks(time_domain_now[time_domain]);
504 } 674 }
505 } 675 }
506 676
507 } // namespace scheduler 677 } // namespace scheduler
508 } // namespace blink 678 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698