Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(103)

Side by Side Diff: third_party/WebKit/Source/platform/scheduler/base/task_queue_manager.cc

Issue 2546423002: [Try # 3] Scheduler refactoring to virtually eliminate redundant DoWorks (Closed)
Patch Set: Fix lock order inversion Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "platform/scheduler/base/task_queue_manager.h" 5 #include "platform/scheduler/base/task_queue_manager.h"
6 6
7 #include <queue> 7 #include <queue>
8 #include <set> 8 #include <set>
9 9
10 #include "base/bind.h" 10 #include "base/bind.h"
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
51 } 51 }
52 52
53 TaskQueueManager::TaskQueueManager( 53 TaskQueueManager::TaskQueueManager(
54 scoped_refptr<TaskQueueManagerDelegate> delegate, 54 scoped_refptr<TaskQueueManagerDelegate> delegate,
55 const char* tracing_category, 55 const char* tracing_category,
56 const char* disabled_by_default_tracing_category, 56 const char* disabled_by_default_tracing_category,
57 const char* disabled_by_default_verbose_tracing_category) 57 const char* disabled_by_default_verbose_tracing_category)
58 : real_time_domain_(new RealTimeDomain(tracing_category)), 58 : real_time_domain_(new RealTimeDomain(tracing_category)),
59 delegate_(delegate), 59 delegate_(delegate),
60 task_was_run_on_quiescence_monitored_queue_(false), 60 task_was_run_on_quiescence_monitored_queue_(false),
61 other_thread_pending_wakeup_(false), 61 record_task_delay_histograms_(true),
62 work_batch_size_(1), 62 work_batch_size_(1),
63 task_count_(0), 63 task_count_(0),
64 tracing_category_(tracing_category), 64 tracing_category_(tracing_category),
65 disabled_by_default_tracing_category_( 65 disabled_by_default_tracing_category_(
66 disabled_by_default_tracing_category), 66 disabled_by_default_tracing_category),
67 disabled_by_default_verbose_tracing_category_( 67 disabled_by_default_verbose_tracing_category_(
68 disabled_by_default_verbose_tracing_category), 68 disabled_by_default_verbose_tracing_category),
69 currently_executing_task_queue_(nullptr), 69 currently_executing_task_queue_(nullptr),
70 observer_(nullptr), 70 observer_(nullptr),
71 deletion_sentinel_(new DeletionSentinel()), 71 deletion_sentinel_(new DeletionSentinel()),
72 weak_factory_(this) { 72 weak_factory_(this) {
73 DCHECK(delegate->RunsTasksOnCurrentThread()); 73 DCHECK(delegate->RunsTasksOnCurrentThread());
74 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, 74 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category,
75 "TaskQueueManager", this); 75 "TaskQueueManager", this);
76 selector_.SetTaskQueueSelectorObserver(this); 76 selector_.SetTaskQueueSelectorObserver(this);
77 77
78 from_main_thread_immediate_do_work_closure_ = 78 delayed_do_work_closure_ =
79 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), 79 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true);
80 base::TimeTicks(), true); 80 immediate_do_work_closure_ =
81 from_other_thread_immediate_do_work_closure_ = 81 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false);
82 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(),
83 base::TimeTicks(), false);
84 82
85 // TODO(alexclarke): Change this to be a parameter that's passed in. 83 // TODO(alexclarke): Change this to be a parameter that's passed in.
86 RegisterTimeDomain(real_time_domain_.get()); 84 RegisterTimeDomain(real_time_domain_.get());
87 85
88 delegate_->AddNestingObserver(this); 86 delegate_->AddNestingObserver(this);
89 } 87 }
90 88
91 TaskQueueManager::~TaskQueueManager() { 89 TaskQueueManager::~TaskQueueManager() {
92 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, 90 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_,
93 "TaskQueueManager", this); 91 "TaskQueueManager", this);
94 92
95 while (!queues_.empty()) 93 while (!queues_.empty())
96 (*queues_.begin())->UnregisterTaskQueue(); 94 (*queues_.begin())->UnregisterTaskQueue();
97 95
98 selector_.SetTaskQueueSelectorObserver(nullptr); 96 selector_.SetTaskQueueSelectorObserver(nullptr);
99 97
100 delegate_->RemoveNestingObserver(this); 98 delegate_->RemoveNestingObserver(this);
101 } 99 }
102 100
101 TaskQueueManager::AnyThread::AnyThread()
102 : do_work_running_count(0),
103 immediate_do_work_posted_count(0),
104 is_nested(false) {}
105
103 void TaskQueueManager::RegisterTimeDomain(TimeDomain* time_domain) { 106 void TaskQueueManager::RegisterTimeDomain(TimeDomain* time_domain) {
104 time_domains_.insert(time_domain); 107 time_domains_.insert(time_domain);
105 time_domain->OnRegisterWithTaskQueueManager(this); 108 time_domain->OnRegisterWithTaskQueueManager(this);
106 } 109 }
107 110
108 void TaskQueueManager::UnregisterTimeDomain(TimeDomain* time_domain) { 111 void TaskQueueManager::UnregisterTimeDomain(TimeDomain* time_domain) {
109 time_domains_.erase(time_domain); 112 time_domains_.erase(time_domain);
110 } 113 }
111 114
112 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue( 115 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue(
(...skipping 25 matching lines...) Expand all
138 DCHECK(main_thread_checker_.CalledOnValidThread()); 141 DCHECK(main_thread_checker_.CalledOnValidThread());
139 if (observer_) 142 if (observer_)
140 observer_->OnUnregisterTaskQueue(task_queue); 143 observer_->OnUnregisterTaskQueue(task_queue);
141 144
142 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being 145 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being
143 // freed while any of our structures hold hold a raw pointer to it. 146 // freed while any of our structures hold hold a raw pointer to it.
144 queues_to_delete_.insert(task_queue); 147 queues_to_delete_.insert(task_queue);
145 queues_.erase(task_queue); 148 queues_.erase(task_queue);
146 149
147 selector_.RemoveQueue(task_queue.get()); 150 selector_.RemoveQueue(task_queue.get());
151
152 {
153 base::AutoLock lock(any_thread_lock_);
154 any_thread().has_incoming_immediate_work.erase(task_queue.get());
155 }
148 } 156 }
149 157
150 void TaskQueueManager::UpdateWorkQueues(LazyNow lazy_now) { 158 void TaskQueueManager::UpdateWorkQueues(LazyNow* lazy_now) {
151 TRACE_EVENT0(disabled_by_default_tracing_category_,
152 "TaskQueueManager::UpdateWorkQueues");
153
154 for (TimeDomain* time_domain : time_domains_) { 159 for (TimeDomain* time_domain : time_domains_) {
155 LazyNow lazy_now_in_domain = time_domain == real_time_domain_.get() 160 if (time_domain == real_time_domain_.get()) {
156 ? lazy_now 161 time_domain->WakeupReadyDelayedQueues(lazy_now);
157 : time_domain->CreateLazyNow(); 162 continue;
158 time_domain->UpdateWorkQueues(lazy_now_in_domain); 163 }
164 LazyNow time_domain_lazy_now = time_domain->CreateLazyNow();
165 time_domain->WakeupReadyDelayedQueues(&time_domain_lazy_now);
159 } 166 }
160 } 167 }
161 168
162 void TaskQueueManager::OnBeginNestedMessageLoop() { 169 void TaskQueueManager::OnBeginNestedMessageLoop() {
163 // We just entered a nested message loop, make sure there's a DoWork posted or 170 // We just entered a nested message loop, make sure there's a DoWork posted or
164 // the system will grind to a halt. 171 // the system will grind to a halt.
165 delegate_->PostTask(FROM_HERE, from_main_thread_immediate_do_work_closure_); 172 {
173 base::AutoLock lock(any_thread_lock_);
174 any_thread().immediate_do_work_posted_count++;
175 any_thread().is_nested = true;
176 }
177 delegate_->PostTask(FROM_HERE, immediate_do_work_closure_);
178 }
179
180 void TaskQueueManager::OnQueueHasImmediateWork(internal::TaskQueueImpl* queue,
181 bool ensure_do_work_posted) {
182 MoveableAutoLock lock(any_thread_lock_);
183 any_thread().has_incoming_immediate_work.insert(queue);
184 if (ensure_do_work_posted)
185 MaybeScheduleImmediateWorkLocked(FROM_HERE, std::move(lock));
166 } 186 }
167 187
168 void TaskQueueManager::MaybeScheduleImmediateWork( 188 void TaskQueueManager::MaybeScheduleImmediateWork(
169 const tracked_objects::Location& from_here) { 189 const tracked_objects::Location& from_here) {
170 bool on_main_thread = delegate_->BelongsToCurrentThread(); 190 MoveableAutoLock lock(any_thread_lock_);
171 // De-duplicate DoWork posts. 191 MaybeScheduleImmediateWorkLocked(from_here, std::move(lock));
172 if (on_main_thread) { 192 }
173 if (!main_thread_pending_wakeups_.insert(base::TimeTicks()).second) { 193
194 void TaskQueueManager::MaybeScheduleImmediateWorkLocked(
195 const tracked_objects::Location& from_here,
196 MoveableAutoLock&& lock) {
197 {
198 MoveableAutoLock auto_lock(std::move(lock));
199 // Unless we're nested, try to avoid posting redundant DoWorks.
200 if (!any_thread().is_nested &&
201 (any_thread().do_work_running_count == 1 ||
202 any_thread().immediate_do_work_posted_count > 0)) {
174 return; 203 return;
175 } 204 }
176 delegate_->PostTask(from_here, from_main_thread_immediate_do_work_closure_); 205
177 } else { 206 any_thread().immediate_do_work_posted_count++;
178 {
179 base::AutoLock lock(other_thread_lock_);
180 if (other_thread_pending_wakeup_)
181 return;
182 other_thread_pending_wakeup_ = true;
183 }
184 delegate_->PostTask(from_here,
185 from_other_thread_immediate_do_work_closure_);
186 } 207 }
208 delegate_->PostTask(from_here, immediate_do_work_closure_);
187 } 209 }
188 210
189 void TaskQueueManager::MaybeScheduleDelayedWork( 211 void TaskQueueManager::MaybeScheduleDelayedWork(
190 const tracked_objects::Location& from_here, 212 const tracked_objects::Location& from_here,
191 base::TimeTicks now, 213 base::TimeTicks now,
192 base::TimeDelta delay) { 214 base::TimeDelta delay) {
193 DCHECK(main_thread_checker_.CalledOnValidThread()); 215 DCHECK(main_thread_checker_.CalledOnValidThread());
194 DCHECK_GE(delay, base::TimeDelta()); 216 DCHECK_GE(delay, base::TimeDelta());
217 {
218 base::AutoLock lock(any_thread_lock_);
195 219
196 // If there's a pending immediate DoWork then we rely on 220 // Unless we're nested, don't post a delayed DoWork if there's an immediate
197 // TryAdvanceTimeDomains getting the TimeDomain to call 221 // DoWork in flight or we're inside a DoWork. We can rely on DoWork posting
198 // MaybeScheduleDelayedWork again when the immediate DoWork is complete. 222 // a delayed continuation as needed.
199 if (main_thread_pending_wakeups_.find(base::TimeTicks()) != 223 if (!any_thread().is_nested &&
200 main_thread_pending_wakeups_.end()) { 224 (any_thread().immediate_do_work_posted_count > 0 ||
201 return; 225 any_thread().do_work_running_count == 1)) {
226 return;
227 }
202 } 228 }
229
203 // De-duplicate DoWork posts. 230 // De-duplicate DoWork posts.
204 base::TimeTicks run_time = now + delay; 231 base::TimeTicks run_time = now + delay;
205 if (!main_thread_pending_wakeups_.empty() && 232 if (next_delayed_do_work_ <= run_time && !next_delayed_do_work_.is_null())
206 *main_thread_pending_wakeups_.begin() <= run_time) {
207 return; 233 return;
208 } 234
209 main_thread_pending_wakeups_.insert(run_time); 235 TRACE_EVENT1(tracing_category_, "MaybeScheduleDelayedWorkInternal",
236 "delay_ms", delay.InMillisecondsF());
237
238 cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
239 next_delayed_do_work_ = run_time;
210 delegate_->PostDelayedTask( 240 delegate_->PostDelayedTask(
211 from_here, base::Bind(&TaskQueueManager::DoWork, 241 from_here, cancelable_delayed_do_work_closure_.callback(), delay);
212 weak_factory_.GetWeakPtr(), run_time, true),
213 delay);
214 } 242 }
215 243
216 void TaskQueueManager::DoWork(base::TimeTicks run_time, bool from_main_thread) { 244 void TaskQueueManager::DoWork(bool delayed) {
217 DCHECK(main_thread_checker_.CalledOnValidThread()); 245 DCHECK(main_thread_checker_.CalledOnValidThread());
218 TRACE_EVENT1(tracing_category_, "TaskQueueManager::DoWork", 246 TRACE_EVENT1("tracing_category_", "TaskQueueManager::DoWork", "delayed",
219 "from_main_thread", from_main_thread); 247 delayed);
248 LazyNow lazy_now(real_time_domain()->CreateLazyNow());
220 249
221 if (from_main_thread) { 250 bool is_nested = delegate_->IsNested();
222 main_thread_pending_wakeups_.erase(run_time); 251 if (!is_nested)
223 } else {
224 base::AutoLock lock(other_thread_lock_);
225 other_thread_pending_wakeup_ = false;
226 }
227
228 // Posting a DoWork while a DoWork is running leads to spurious DoWorks.
229 main_thread_pending_wakeups_.insert(base::TimeTicks());
230
231 if (!delegate_->IsNested())
232 queues_to_delete_.clear(); 252 queues_to_delete_.clear();
233 253
234 LazyNow lazy_now(real_time_domain()->CreateLazyNow()); 254 // This must be done before running any tasks because they could invoke a
235 UpdateWorkQueues(lazy_now); 255 // nested message loop and we risk having a stale |next_delayed_do_work_|.
256 if (delayed)
257 next_delayed_do_work_ = base::TimeTicks();
236 258
237 for (int i = 0; i < work_batch_size_; i++) { 259 for (int i = 0; i < work_batch_size_; i++) {
238 internal::WorkQueue* work_queue; 260 std::set<internal::TaskQueueImpl*> has_incoming_immediate_work;
261
262 {
263 base::AutoLock lock(any_thread_lock_);
264 any_thread().is_nested = is_nested;
265 DCHECK_EQ(any_thread().is_nested, delegate_->IsNested());
266
267 if (i == 0) {
268 any_thread().do_work_running_count++;
269
270 if (!delayed) {
271 any_thread().immediate_do_work_posted_count--;
272 DCHECK_GE(any_thread().immediate_do_work_posted_count, 0);
273 }
274 }
275 std::swap(has_incoming_immediate_work,
276 any_thread().has_incoming_immediate_work);
277 }
278
279 for (internal::TaskQueueImpl* queue : has_incoming_immediate_work) {
Sami 2016/12/15 11:45:45 Would this fit better into UpdateWorkQueues?
alex clarke (OOO till 29th) 2016/12/15 12:11:11 Done.
280 queue->ReloadImmediateWorkQueueIfEmpty();
281 }
282
283 UpdateWorkQueues(&lazy_now);
284
285 internal::WorkQueue* work_queue = nullptr;
239 if (!SelectWorkQueueToService(&work_queue)) 286 if (!SelectWorkQueueToService(&work_queue))
240 break; 287 break;
241 288
242 switch (ProcessTaskFromWorkQueue(work_queue, &lazy_now)) { 289 // NB this may unregister the queue.
290 switch (ProcessTaskFromWorkQueue(work_queue, is_nested, &lazy_now)) {
243 case ProcessTaskResult::DEFERRED: 291 case ProcessTaskResult::DEFERRED:
244 // If a task was deferred, try again with another task. 292 // If a task was deferred, try again with another task.
245 continue; 293 continue;
246 case ProcessTaskResult::EXECUTED: 294 case ProcessTaskResult::EXECUTED:
247 break; 295 break;
248 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED: 296 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED:
249 return; // The TaskQueueManager got deleted, we must bail out. 297 return; // The TaskQueueManager got deleted, we must bail out.
250 } 298 }
251 299
252 work_queue = nullptr; // The queue may have been unregistered.
253
254 UpdateWorkQueues(lazy_now);
255
256 // Only run a single task per batch in nested run loops so that we can 300 // Only run a single task per batch in nested run loops so that we can
257 // properly exit the nested loop when someone calls RunLoop::Quit(). 301 // properly exit the nested loop when someone calls RunLoop::Quit().
258 if (delegate_->IsNested()) 302 if (is_nested)
259 break; 303 break;
260 } 304 }
261 305
262 main_thread_pending_wakeups_.erase(base::TimeTicks());
263
264 // TODO(alexclarke): Consider refactoring the above loop to terminate only 306 // TODO(alexclarke): Consider refactoring the above loop to terminate only
265 // when there's no more work left to be done, rather than posting a 307 // when there's no more work left to be done, rather than posting a
266 // continuation task. 308 // continuation task.
267 if (!selector_.EnabledWorkQueuesEmpty() || TryAdvanceTimeDomains()) 309
268 MaybeScheduleImmediateWork(FROM_HERE); 310 {
311 MoveableAutoLock lock(any_thread_lock_);
312 base::Optional<base::TimeDelta> next_delay =
313 ComputeDelayTillNextTaskLocked(&lazy_now);
314
315 any_thread().do_work_running_count--;
316 DCHECK_GE(any_thread().do_work_running_count, 0);
317
318 any_thread().is_nested = is_nested;
319 DCHECK_EQ(any_thread().is_nested, delegate_->IsNested());
320
321 PostDoWorkContinuationLocked(next_delay, &lazy_now, std::move(lock));
322 }
269 } 323 }
270 324
271 bool TaskQueueManager::TryAdvanceTimeDomains() { 325 void TaskQueueManager::PostDoWorkContinuationLocked(
272 bool can_advance = false; 326 base::Optional<base::TimeDelta> next_delay,
327 LazyNow* lazy_now,
328 MoveableAutoLock&& lock) {
329 base::TimeDelta delay;
330
331 {
332 MoveableAutoLock auto_lock(std::move(lock));
333
334 // If there are no tasks left then we don't need to post a continuation.
335 if (!next_delay) {
336 // If there's a pending delayed DoWork, cancel it because it's not needed.
337 if (!next_delayed_do_work_.is_null()) {
338 next_delayed_do_work_ = base::TimeTicks();
339 cancelable_delayed_do_work_closure_.Cancel();
340 }
341 return;
342 }
343
344 // If an immediate DoWork is posted, we don't need to post a continuation.
345 if (any_thread().immediate_do_work_posted_count > 0)
346 return;
347
348 delay = next_delay.value();
349
350 // This isn't supposed to happen, but in case it does convert to
351 // non-delayed.
352 if (delay < base::TimeDelta())
353 delay = base::TimeDelta();
354
355 if (delay.is_zero()) {
356 // If a delayed DoWork is pending then we don't need to post a
357 // continuation because it should run immediately.
358 if (!next_delayed_do_work_.is_null() &&
359 next_delayed_do_work_ <= lazy_now->Now()) {
360 return;
361 }
362
363 any_thread().immediate_do_work_posted_count++;
364 } else {
365 base::TimeTicks run_time = lazy_now->Now() + delay;
366 if (next_delayed_do_work_ == run_time)
367 return;
368
369 next_delayed_do_work_ = run_time;
370 }
371 }
372
373 // We avoid holding |any_thread_lock_| while posting the task.
374 if (delay.is_zero()) {
375 delegate_->PostTask(FROM_HERE, immediate_do_work_closure_);
376 } else {
377 cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
378 delegate_->PostDelayedTask(
379 FROM_HERE, cancelable_delayed_do_work_closure_.callback(), delay);
380 }
381 }
382
383 base::Optional<base::TimeDelta>
384 TaskQueueManager::ComputeDelayTillNextTaskLocked(LazyNow* lazy_now) {
385 // If we have incoming immediate work for any enabled queue, we know there is
386 // immediate work to be done.
387 for (internal::TaskQueueImpl* queue :
388 any_thread().has_incoming_immediate_work) {
389 if (queue->IsQueueEnabled())
390 return base::TimeDelta();
391 }
392
393 // If the selector has non-empty queues we trivially know there is immediate
394 // work to be done.
395 if (!selector_.EnabledWorkQueuesEmpty())
396 return base::TimeDelta();
397
398 UpdateWorkQueues(lazy_now);
399
400 // Otherwise we need to find the shortest delay, if any.
401 base::Optional<base::TimeDelta> next_continuation;
273 for (TimeDomain* time_domain : time_domains_) { 402 for (TimeDomain* time_domain : time_domains_) {
274 can_advance |= time_domain->MaybeAdvanceTime(); 403 base::Optional<base::TimeDelta> continuation =
404 time_domain->DelayTillNextTask(lazy_now);
405 if (!continuation)
406 continue;
407 if (!next_continuation || next_continuation.value() > continuation.value())
408 next_continuation = continuation;
275 } 409 }
276 return can_advance; 410 return next_continuation;
277 } 411 }
278 412
279 bool TaskQueueManager::SelectWorkQueueToService( 413 bool TaskQueueManager::SelectWorkQueueToService(
280 internal::WorkQueue** out_work_queue) { 414 internal::WorkQueue** out_work_queue) {
281 bool should_run = selector_.SelectWorkQueueToService(out_work_queue); 415 bool should_run = selector_.SelectWorkQueueToService(out_work_queue);
282 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( 416 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
283 disabled_by_default_tracing_category_, "TaskQueueManager", this, 417 disabled_by_default_tracing_category_, "TaskQueueManager", this,
284 AsValueWithSelectorResult(should_run, *out_work_queue)); 418 AsValueWithSelectorResult(should_run, *out_work_queue));
285 return should_run; 419 return should_run;
286 } 420 }
287 421
288 void TaskQueueManager::DidQueueTask( 422 void TaskQueueManager::DidQueueTask(
289 const internal::TaskQueueImpl::Task& pending_task) { 423 const internal::TaskQueueImpl::Task& pending_task) {
290 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task); 424 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task);
291 } 425 }
292 426
293 TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue( 427 TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue(
294 internal::WorkQueue* work_queue, 428 internal::WorkQueue* work_queue,
429 bool is_nested,
295 LazyNow* lazy_now) { 430 LazyNow* lazy_now) {
296 DCHECK(main_thread_checker_.CalledOnValidThread()); 431 DCHECK(main_thread_checker_.CalledOnValidThread());
297 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_); 432 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_);
298 internal::TaskQueueImpl::Task pending_task = 433 internal::TaskQueueImpl::Task pending_task =
299 work_queue->TakeTaskFromWorkQueue(); 434 work_queue->TakeTaskFromWorkQueue();
300 435
301 // It's possible the task was canceled, if so bail out. 436 // It's possible the task was canceled, if so bail out.
302 if (pending_task.task.IsCancelled()) 437 if (pending_task.task.IsCancelled())
303 return ProcessTaskResult::EXECUTED; 438 return ProcessTaskResult::EXECUTED;
304 439
305 internal::TaskQueueImpl* queue = work_queue->task_queue(); 440 internal::TaskQueueImpl* queue = work_queue->task_queue();
306 if (queue->GetQuiescenceMonitored()) 441 if (queue->GetQuiescenceMonitored())
307 task_was_run_on_quiescence_monitored_queue_ = true; 442 task_was_run_on_quiescence_monitored_queue_ = true;
308 443
309 if (!pending_task.nestable && delegate_->IsNested()) { 444 if (!pending_task.nestable && is_nested) {
310 // Defer non-nestable work to the main task runner. NOTE these tasks can be 445 // Defer non-nestable work to the main task runner. NOTE these tasks can be
311 // arbitrarily delayed so the additional delay should not be a problem. 446 // arbitrarily delayed so the additional delay should not be a problem.
312 // TODO(skyostil): Figure out a way to not forget which task queue the 447 // TODO(skyostil): Figure out a way to not forget which task queue the
313 // task is associated with. See http://crbug.com/522843. 448 // task is associated with. See http://crbug.com/522843.
314 // TODO(tzik): Remove base::UnsafeConvertOnceClosureToRepeating once 449 // TODO(tzik): Remove base::UnsafeConvertOnceClosureToRepeating once
315 // TaskRunners have migrated to OnceClosure. 450 // TaskRunners have migrated to OnceClosure.
316 delegate_->PostNonNestableTask( 451 delegate_->PostNonNestableTask(
317 pending_task.posted_from, 452 pending_task.posted_from,
318 UnsafeConvertOnceClosureToRepeating(std::move(pending_task.task))); 453 UnsafeConvertOnceClosureToRepeating(std::move(pending_task.task)));
319 return ProcessTaskResult::DEFERRED; 454 return ProcessTaskResult::DEFERRED;
320 } 455 }
321 456
322 MaybeRecordTaskDelayHistograms(pending_task, queue); 457 if (record_task_delay_histograms_)
458 MaybeRecordTaskDelayHistograms(pending_task, queue);
323 459
324 double task_start_time = 0; 460 double task_start_time = 0;
325 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue", 461 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue",
326 pending_task); 462 pending_task);
327 if (queue->GetShouldNotifyObservers()) { 463 if (queue->GetShouldNotifyObservers()) {
328 for (auto& observer : task_observers_) 464 for (auto& observer : task_observers_)
329 observer.WillProcessTask(pending_task); 465 observer.WillProcessTask(pending_task);
330 queue->NotifyWillProcessTask(pending_task); 466 queue->NotifyWillProcessTask(pending_task);
331 467
332 bool notify_time_observers = 468 bool notify_time_observers =
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
463 if (should_run) { 599 if (should_run) {
464 state->SetString("selected_queue", 600 state->SetString("selected_queue",
465 selected_work_queue->task_queue()->GetName()); 601 selected_work_queue->task_queue()->GetName());
466 state->SetString("work_queue_name", selected_work_queue->name()); 602 state->SetString("work_queue_name", selected_work_queue->name());
467 } 603 }
468 604
469 state->BeginArray("time_domains"); 605 state->BeginArray("time_domains");
470 for (auto* time_domain : time_domains_) 606 for (auto* time_domain : time_domains_)
471 time_domain->AsValueInto(state.get()); 607 time_domain->AsValueInto(state.get());
472 state->EndArray(); 608 state->EndArray();
609
610 {
611 base::AutoLock lock(any_thread_lock_);
612 state->SetBoolean("is_nested", any_thread().is_nested);
613 state->SetInteger("do_work_running_count",
614 any_thread().do_work_running_count);
615 state->SetInteger("immediate_do_work_posted_count",
616 any_thread().immediate_do_work_posted_count);
617 }
473 return std::move(state); 618 return std::move(state);
474 } 619 }
475 620
476 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { 621 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
477 DCHECK(main_thread_checker_.CalledOnValidThread()); 622 DCHECK(main_thread_checker_.CalledOnValidThread());
478 // Only schedule DoWork if there's something to do. 623 // Only schedule DoWork if there's something to do.
479 if (queue->HasPendingImmediateWork()) 624 if (queue->HasPendingImmediateWork() && !queue->BlockedByFence())
480 MaybeScheduleImmediateWork(FROM_HERE); 625 MaybeScheduleImmediateWork(FROM_HERE);
481 } 626 }
482 627
483 void TaskQueueManager::OnTriedToSelectBlockedWorkQueue( 628 void TaskQueueManager::OnTriedToSelectBlockedWorkQueue(
484 internal::WorkQueue* work_queue) { 629 internal::WorkQueue* work_queue) {
485 DCHECK(main_thread_checker_.CalledOnValidThread()); 630 DCHECK(main_thread_checker_.CalledOnValidThread());
486 DCHECK(!work_queue->Empty()); 631 DCHECK(!work_queue->Empty());
487 if (observer_) { 632 if (observer_) {
488 observer_->OnTriedToExecuteBlockedTask(*work_queue->task_queue(), 633 observer_->OnTriedToExecuteBlockedTask(*work_queue->task_queue(),
489 *work_queue->GetFrontTask()); 634 *work_queue->GetFrontTask());
490 } 635 }
491 } 636 }
492 637
493 bool TaskQueueManager::HasImmediateWorkForTesting() const { 638 bool TaskQueueManager::HasImmediateWorkForTesting() const {
494 return !selector_.EnabledWorkQueuesEmpty(); 639 return !selector_.EnabledWorkQueuesEmpty();
495 } 640 }
496 641
642 void TaskQueueManager::SetRecordTaskDelayHistograms(
643 bool record_task_delay_histograms) {
644 DCHECK(main_thread_checker_.CalledOnValidThread());
645 record_task_delay_histograms_ = record_task_delay_histograms;
646 }
647
497 } // namespace scheduler 648 } // namespace scheduler
498 } // namespace blink 649 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698