Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(211)

Side by Side Diff: third_party/WebKit/Source/platform/scheduler/base/task_queue_manager.cc

Issue 2590593002: Revert of [Reland] Scheduler refactoring to virtually eliminate redundant DoWorks (Closed)
Patch Set: Created 3 years, 12 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "platform/scheduler/base/task_queue_manager.h" 5 #include "platform/scheduler/base/task_queue_manager.h"
6 6
7 #include <queue> 7 #include <queue>
8 #include <set> 8 #include <set>
9 9
10 #include "base/bind.h" 10 #include "base/bind.h"
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
51 } 51 }
52 52
53 TaskQueueManager::TaskQueueManager( 53 TaskQueueManager::TaskQueueManager(
54 scoped_refptr<TaskQueueManagerDelegate> delegate, 54 scoped_refptr<TaskQueueManagerDelegate> delegate,
55 const char* tracing_category, 55 const char* tracing_category,
56 const char* disabled_by_default_tracing_category, 56 const char* disabled_by_default_tracing_category,
57 const char* disabled_by_default_verbose_tracing_category) 57 const char* disabled_by_default_verbose_tracing_category)
58 : real_time_domain_(new RealTimeDomain(tracing_category)), 58 : real_time_domain_(new RealTimeDomain(tracing_category)),
59 delegate_(delegate), 59 delegate_(delegate),
60 task_was_run_on_quiescence_monitored_queue_(false), 60 task_was_run_on_quiescence_monitored_queue_(false),
61 record_task_delay_histograms_(true), 61 other_thread_pending_wakeup_(false),
62 work_batch_size_(1), 62 work_batch_size_(1),
63 task_count_(0), 63 task_count_(0),
64 tracing_category_(tracing_category), 64 tracing_category_(tracing_category),
65 disabled_by_default_tracing_category_( 65 disabled_by_default_tracing_category_(
66 disabled_by_default_tracing_category), 66 disabled_by_default_tracing_category),
67 disabled_by_default_verbose_tracing_category_( 67 disabled_by_default_verbose_tracing_category_(
68 disabled_by_default_verbose_tracing_category), 68 disabled_by_default_verbose_tracing_category),
69 currently_executing_task_queue_(nullptr), 69 currently_executing_task_queue_(nullptr),
70 observer_(nullptr), 70 observer_(nullptr),
71 deletion_sentinel_(new DeletionSentinel()), 71 deletion_sentinel_(new DeletionSentinel()),
72 weak_factory_(this) { 72 weak_factory_(this) {
73 DCHECK(delegate->RunsTasksOnCurrentThread()); 73 DCHECK(delegate->RunsTasksOnCurrentThread());
74 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, 74 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category,
75 "TaskQueueManager", this); 75 "TaskQueueManager", this);
76 selector_.SetTaskQueueSelectorObserver(this); 76 selector_.SetTaskQueueSelectorObserver(this);
77 77
78 delayed_do_work_closure_ = 78 from_main_thread_immediate_do_work_closure_ =
79 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true); 79 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(),
80 immediate_do_work_closure_ = 80 base::TimeTicks(), true);
81 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false); 81 from_other_thread_immediate_do_work_closure_ =
82 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(),
83 base::TimeTicks(), false);
82 84
83 // TODO(alexclarke): Change this to be a parameter that's passed in. 85 // TODO(alexclarke): Change this to be a parameter that's passed in.
84 RegisterTimeDomain(real_time_domain_.get()); 86 RegisterTimeDomain(real_time_domain_.get());
85 87
86 delegate_->AddNestingObserver(this); 88 delegate_->AddNestingObserver(this);
87 } 89 }
88 90
89 TaskQueueManager::~TaskQueueManager() { 91 TaskQueueManager::~TaskQueueManager() {
90 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, 92 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_,
91 "TaskQueueManager", this); 93 "TaskQueueManager", this);
92 94
93 while (!queues_.empty()) 95 while (!queues_.empty())
94 (*queues_.begin())->UnregisterTaskQueue(); 96 (*queues_.begin())->UnregisterTaskQueue();
95 97
96 selector_.SetTaskQueueSelectorObserver(nullptr); 98 selector_.SetTaskQueueSelectorObserver(nullptr);
97 99
98 delegate_->RemoveNestingObserver(this); 100 delegate_->RemoveNestingObserver(this);
99 } 101 }
100 102
101 TaskQueueManager::AnyThread::AnyThread()
102 : do_work_running_count(0),
103 immediate_do_work_posted_count(0),
104 is_nested(false) {}
105
106 void TaskQueueManager::RegisterTimeDomain(TimeDomain* time_domain) { 103 void TaskQueueManager::RegisterTimeDomain(TimeDomain* time_domain) {
107 time_domains_.insert(time_domain); 104 time_domains_.insert(time_domain);
108 time_domain->OnRegisterWithTaskQueueManager(this); 105 time_domain->OnRegisterWithTaskQueueManager(this);
109 } 106 }
110 107
111 void TaskQueueManager::UnregisterTimeDomain(TimeDomain* time_domain) { 108 void TaskQueueManager::UnregisterTimeDomain(TimeDomain* time_domain) {
112 time_domains_.erase(time_domain); 109 time_domains_.erase(time_domain);
113 } 110 }
114 111
115 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue( 112 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue(
(...skipping 25 matching lines...) Expand all
141 DCHECK(main_thread_checker_.CalledOnValidThread()); 138 DCHECK(main_thread_checker_.CalledOnValidThread());
142 if (observer_) 139 if (observer_)
143 observer_->OnUnregisterTaskQueue(task_queue); 140 observer_->OnUnregisterTaskQueue(task_queue);
144 141
145 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being 142 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being
146 // freed while any of our structures hold hold a raw pointer to it. 143 // freed while any of our structures hold hold a raw pointer to it.
147 queues_to_delete_.insert(task_queue); 144 queues_to_delete_.insert(task_queue);
148 queues_.erase(task_queue); 145 queues_.erase(task_queue);
149 146
150 selector_.RemoveQueue(task_queue.get()); 147 selector_.RemoveQueue(task_queue.get());
151
152 {
153 base::AutoLock lock(any_thread_lock_);
154 any_thread().has_incoming_immediate_work.erase(task_queue.get());
155 }
156 } 148 }
157 149
158 void TaskQueueManager::UpdateWorkQueues( 150 void TaskQueueManager::UpdateWorkQueues(LazyNow lazy_now) {
159 const std::set<internal::TaskQueueImpl*>* queues_to_reload, 151 TRACE_EVENT0(disabled_by_default_tracing_category_,
160 LazyNow* lazy_now) { 152 "TaskQueueManager::UpdateWorkQueues");
161 if (queues_to_reload) {
162 for (internal::TaskQueueImpl* queue : *queues_to_reload) {
163 queue->ReloadImmediateWorkQueueIfEmpty();
164 }
165 }
166 153
167 for (TimeDomain* time_domain : time_domains_) { 154 for (TimeDomain* time_domain : time_domains_) {
168 if (time_domain == real_time_domain_.get()) { 155 LazyNow lazy_now_in_domain = time_domain == real_time_domain_.get()
169 time_domain->WakeupReadyDelayedQueues(lazy_now); 156 ? lazy_now
170 continue; 157 : time_domain->CreateLazyNow();
171 } 158 time_domain->UpdateWorkQueues(lazy_now_in_domain);
172 LazyNow time_domain_lazy_now = time_domain->CreateLazyNow();
173 time_domain->WakeupReadyDelayedQueues(&time_domain_lazy_now);
174 } 159 }
175 } 160 }
176 161
177 void TaskQueueManager::OnBeginNestedMessageLoop() { 162 void TaskQueueManager::OnBeginNestedMessageLoop() {
178 // We just entered a nested message loop, make sure there's a DoWork posted or 163 // We just entered a nested message loop, make sure there's a DoWork posted or
179 // the system will grind to a halt. 164 // the system will grind to a halt.
180 { 165 delegate_->PostTask(FROM_HERE, from_main_thread_immediate_do_work_closure_);
181 base::AutoLock lock(any_thread_lock_);
182 any_thread().immediate_do_work_posted_count++;
183 any_thread().is_nested = true;
184 }
185 delegate_->PostTask(FROM_HERE, immediate_do_work_closure_);
186 }
187
188 void TaskQueueManager::OnQueueHasImmediateWork(internal::TaskQueueImpl* queue,
189 bool ensure_do_work_posted) {
190 MoveableAutoLock lock(any_thread_lock_);
191 any_thread().has_incoming_immediate_work.insert(queue);
192 if (ensure_do_work_posted)
193 MaybeScheduleImmediateWorkLocked(FROM_HERE, std::move(lock));
194 } 166 }
195 167
196 void TaskQueueManager::MaybeScheduleImmediateWork( 168 void TaskQueueManager::MaybeScheduleImmediateWork(
197 const tracked_objects::Location& from_here) { 169 const tracked_objects::Location& from_here) {
198 MoveableAutoLock lock(any_thread_lock_); 170 bool on_main_thread = delegate_->BelongsToCurrentThread();
199 MaybeScheduleImmediateWorkLocked(from_here, std::move(lock)); 171 // De-duplicate DoWork posts.
200 } 172 if (on_main_thread) {
201 173 if (!main_thread_pending_wakeups_.insert(base::TimeTicks()).second) {
202 void TaskQueueManager::MaybeScheduleImmediateWorkLocked(
203 const tracked_objects::Location& from_here,
204 MoveableAutoLock&& lock) {
205 {
206 MoveableAutoLock auto_lock(std::move(lock));
207 // Unless we're nested, try to avoid posting redundant DoWorks.
208 if (!any_thread().is_nested &&
209 (any_thread().do_work_running_count == 1 ||
210 any_thread().immediate_do_work_posted_count > 0)) {
211 return; 174 return;
212 } 175 }
213 176 delegate_->PostTask(from_here, from_main_thread_immediate_do_work_closure_);
214 any_thread().immediate_do_work_posted_count++; 177 } else {
178 {
179 base::AutoLock lock(other_thread_lock_);
180 if (other_thread_pending_wakeup_)
181 return;
182 other_thread_pending_wakeup_ = true;
183 }
184 delegate_->PostTask(from_here,
185 from_other_thread_immediate_do_work_closure_);
215 } 186 }
216 delegate_->PostTask(from_here, immediate_do_work_closure_);
217 } 187 }
218 188
219 void TaskQueueManager::MaybeScheduleDelayedWork( 189 void TaskQueueManager::MaybeScheduleDelayedWork(
220 const tracked_objects::Location& from_here, 190 const tracked_objects::Location& from_here,
221 base::TimeTicks now, 191 base::TimeTicks now,
222 base::TimeDelta delay) { 192 base::TimeDelta delay) {
223 DCHECK(main_thread_checker_.CalledOnValidThread()); 193 DCHECK(main_thread_checker_.CalledOnValidThread());
224 DCHECK_GE(delay, base::TimeDelta()); 194 DCHECK_GE(delay, base::TimeDelta());
225 {
226 base::AutoLock lock(any_thread_lock_);
227 195
228 // Unless we're nested, don't post a delayed DoWork if there's an immediate 196 // If there's a pending immediate DoWork then we rely on
229 // DoWork in flight or we're inside a DoWork. We can rely on DoWork posting 197 // TryAdvanceTimeDomains getting the TimeDomain to call
230 // a delayed continuation as needed. 198 // MaybeScheduleDelayedWork again when the immediate DoWork is complete.
231 if (!any_thread().is_nested && 199 if (main_thread_pending_wakeups_.find(base::TimeTicks()) !=
232 (any_thread().immediate_do_work_posted_count > 0 || 200 main_thread_pending_wakeups_.end()) {
233 any_thread().do_work_running_count == 1)) { 201 return;
234 return; 202 }
235 } 203 // De-duplicate DoWork posts.
204 base::TimeTicks run_time = now + delay;
205 if (!main_thread_pending_wakeups_.empty() &&
206 *main_thread_pending_wakeups_.begin() <= run_time) {
207 return;
208 }
209 main_thread_pending_wakeups_.insert(run_time);
210 delegate_->PostDelayedTask(
211 from_here, base::Bind(&TaskQueueManager::DoWork,
212 weak_factory_.GetWeakPtr(), run_time, true),
213 delay);
214 }
215
216 void TaskQueueManager::DoWork(base::TimeTicks run_time, bool from_main_thread) {
217 DCHECK(main_thread_checker_.CalledOnValidThread());
218 TRACE_EVENT1(tracing_category_, "TaskQueueManager::DoWork",
219 "from_main_thread", from_main_thread);
220
221 if (from_main_thread) {
222 main_thread_pending_wakeups_.erase(run_time);
223 } else {
224 base::AutoLock lock(other_thread_lock_);
225 other_thread_pending_wakeup_ = false;
236 } 226 }
237 227
238 // De-duplicate DoWork posts. 228 // Posting a DoWork while a DoWork is running leads to spurious DoWorks.
239 base::TimeTicks run_time = now + delay; 229 main_thread_pending_wakeups_.insert(base::TimeTicks());
240 if (next_delayed_do_work_ <= run_time && !next_delayed_do_work_.is_null())
241 return;
242 230
243 TRACE_EVENT1(tracing_category_, "MaybeScheduleDelayedWorkInternal", 231 if (!delegate_->IsNested())
244 "delay_ms", delay.InMillisecondsF());
245
246 cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
247 next_delayed_do_work_ = run_time;
248 delegate_->PostDelayedTask(
249 from_here, cancelable_delayed_do_work_closure_.callback(), delay);
250 }
251
252 void TaskQueueManager::DoWork(bool delayed) {
253 DCHECK(main_thread_checker_.CalledOnValidThread());
254 TRACE_EVENT1(tracing_category_, "TaskQueueManager::DoWork", "delayed",
255 delayed);
256 LazyNow lazy_now(real_time_domain()->CreateLazyNow());
257
258 bool is_nested = delegate_->IsNested();
259 if (!is_nested)
260 queues_to_delete_.clear(); 232 queues_to_delete_.clear();
261 233
262 // This must be done before running any tasks because they could invoke a 234 LazyNow lazy_now(real_time_domain()->CreateLazyNow());
263 // nested message loop and we risk having a stale |next_delayed_do_work_|. 235 UpdateWorkQueues(lazy_now);
264 if (delayed)
265 next_delayed_do_work_ = base::TimeTicks();
266 236
267 for (int i = 0; i < work_batch_size_; i++) { 237 for (int i = 0; i < work_batch_size_; i++) {
268 std::set<internal::TaskQueueImpl*> queues_to_reload; 238 internal::WorkQueue* work_queue;
269
270 {
271 base::AutoLock lock(any_thread_lock_);
272 any_thread().is_nested = is_nested;
273 DCHECK_EQ(any_thread().is_nested, delegate_->IsNested());
274
275 if (i == 0) {
276 any_thread().do_work_running_count++;
277
278 if (!delayed) {
279 any_thread().immediate_do_work_posted_count--;
280 DCHECK_GE(any_thread().immediate_do_work_posted_count, 0);
281 }
282 }
283 std::swap(queues_to_reload, any_thread().has_incoming_immediate_work);
284 }
285
286 UpdateWorkQueues(&queues_to_reload, &lazy_now);
287
288 internal::WorkQueue* work_queue = nullptr;
289 if (!SelectWorkQueueToService(&work_queue)) 239 if (!SelectWorkQueueToService(&work_queue))
290 break; 240 break;
291 241
292 // NB this may unregister the queue. 242 switch (ProcessTaskFromWorkQueue(work_queue, &lazy_now)) {
293 switch (ProcessTaskFromWorkQueue(work_queue, is_nested, &lazy_now)) {
294 case ProcessTaskResult::DEFERRED: 243 case ProcessTaskResult::DEFERRED:
295 // If a task was deferred, try again with another task. 244 // If a task was deferred, try again with another task.
296 continue; 245 continue;
297 case ProcessTaskResult::EXECUTED: 246 case ProcessTaskResult::EXECUTED:
298 break; 247 break;
299 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED: 248 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED:
300 return; // The TaskQueueManager got deleted, we must bail out. 249 return; // The TaskQueueManager got deleted, we must bail out.
301 } 250 }
302 251
252 work_queue = nullptr; // The queue may have been unregistered.
253
254 UpdateWorkQueues(lazy_now);
255
303 // Only run a single task per batch in nested run loops so that we can 256 // Only run a single task per batch in nested run loops so that we can
304 // properly exit the nested loop when someone calls RunLoop::Quit(). 257 // properly exit the nested loop when someone calls RunLoop::Quit().
305 if (is_nested) 258 if (delegate_->IsNested())
306 break; 259 break;
307 } 260 }
308 261
262 main_thread_pending_wakeups_.erase(base::TimeTicks());
263
309 // TODO(alexclarke): Consider refactoring the above loop to terminate only 264 // TODO(alexclarke): Consider refactoring the above loop to terminate only
310 // when there's no more work left to be done, rather than posting a 265 // when there's no more work left to be done, rather than posting a
311 // continuation task. 266 // continuation task.
312 267 if (!selector_.EnabledWorkQueuesEmpty() || TryAdvanceTimeDomains())
313 { 268 MaybeScheduleImmediateWork(FROM_HERE);
314 MoveableAutoLock lock(any_thread_lock_);
315 base::Optional<base::TimeDelta> next_delay =
316 ComputeDelayTillNextTaskLocked(&lazy_now);
317
318 any_thread().do_work_running_count--;
319 DCHECK_GE(any_thread().do_work_running_count, 0);
320
321 any_thread().is_nested = is_nested;
322 DCHECK_EQ(any_thread().is_nested, delegate_->IsNested());
323
324 PostDoWorkContinuationLocked(next_delay, &lazy_now, std::move(lock));
325 }
326 } 269 }
327 270
328 void TaskQueueManager::PostDoWorkContinuationLocked( 271 bool TaskQueueManager::TryAdvanceTimeDomains() {
329 base::Optional<base::TimeDelta> next_delay, 272 bool can_advance = false;
330 LazyNow* lazy_now, 273 for (TimeDomain* time_domain : time_domains_) {
331 MoveableAutoLock&& lock) { 274 can_advance |= time_domain->MaybeAdvanceTime();
332 base::TimeDelta delay;
333
334 {
335 MoveableAutoLock auto_lock(std::move(lock));
336
337 // If there are no tasks left then we don't need to post a continuation.
338 if (!next_delay) {
339 // If there's a pending delayed DoWork, cancel it because it's not needed.
340 if (!next_delayed_do_work_.is_null()) {
341 next_delayed_do_work_ = base::TimeTicks();
342 cancelable_delayed_do_work_closure_.Cancel();
343 }
344 return;
345 }
346
347 // If an immediate DoWork is posted, we don't need to post a continuation.
348 if (any_thread().immediate_do_work_posted_count > 0)
349 return;
350
351 delay = next_delay.value();
352
353 // This isn't supposed to happen, but in case it does convert to
354 // non-delayed.
355 if (delay < base::TimeDelta())
356 delay = base::TimeDelta();
357
358 if (delay.is_zero()) {
359 // If a delayed DoWork is pending then we don't need to post a
360 // continuation because it should run immediately.
361 if (!next_delayed_do_work_.is_null() &&
362 next_delayed_do_work_ <= lazy_now->Now()) {
363 return;
364 }
365
366 any_thread().immediate_do_work_posted_count++;
367 } else {
368 base::TimeTicks run_time = lazy_now->Now() + delay;
369 if (next_delayed_do_work_ == run_time)
370 return;
371
372 next_delayed_do_work_ = run_time;
373 }
374 } 275 }
375 276 return can_advance;
376 // We avoid holding |any_thread_lock_| while posting the task.
377 if (delay.is_zero()) {
378 delegate_->PostTask(FROM_HERE, immediate_do_work_closure_);
379 } else {
380 cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
381 delegate_->PostDelayedTask(
382 FROM_HERE, cancelable_delayed_do_work_closure_.callback(), delay);
383 }
384 }
385
386 base::Optional<base::TimeDelta>
387 TaskQueueManager::ComputeDelayTillNextTaskLocked(LazyNow* lazy_now) {
388 // If we have incoming immediate work for any enabled queue, we know there is
389 // immediate work to be done.
390 for (internal::TaskQueueImpl* queue :
391 any_thread().has_incoming_immediate_work) {
392 if (queue->IsQueueEnabled())
393 return base::TimeDelta();
394 }
395
396 // If the selector has non-empty queues we trivially know there is immediate
397 // work to be done.
398 if (!selector_.EnabledWorkQueuesEmpty())
399 return base::TimeDelta();
400
401 UpdateWorkQueues(nullptr, lazy_now);
402
403 // Otherwise we need to find the shortest delay, if any.
404 base::Optional<base::TimeDelta> next_continuation;
405 for (TimeDomain* time_domain : time_domains_) {
406 base::Optional<base::TimeDelta> continuation =
407 time_domain->DelayTillNextTask(lazy_now);
408 if (!continuation)
409 continue;
410 if (!next_continuation || next_continuation.value() > continuation.value())
411 next_continuation = continuation;
412 }
413 return next_continuation;
414 } 277 }
415 278
416 bool TaskQueueManager::SelectWorkQueueToService( 279 bool TaskQueueManager::SelectWorkQueueToService(
417 internal::WorkQueue** out_work_queue) { 280 internal::WorkQueue** out_work_queue) {
418 bool should_run = selector_.SelectWorkQueueToService(out_work_queue); 281 bool should_run = selector_.SelectWorkQueueToService(out_work_queue);
419 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( 282 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
420 disabled_by_default_tracing_category_, "TaskQueueManager", this, 283 disabled_by_default_tracing_category_, "TaskQueueManager", this,
421 AsValueWithSelectorResult(should_run, *out_work_queue)); 284 AsValueWithSelectorResult(should_run, *out_work_queue));
422 return should_run; 285 return should_run;
423 } 286 }
424 287
425 void TaskQueueManager::DidQueueTask( 288 void TaskQueueManager::DidQueueTask(
426 const internal::TaskQueueImpl::Task& pending_task) { 289 const internal::TaskQueueImpl::Task& pending_task) {
427 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task); 290 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task);
428 } 291 }
429 292
430 TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue( 293 TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue(
431 internal::WorkQueue* work_queue, 294 internal::WorkQueue* work_queue,
432 bool is_nested,
433 LazyNow* lazy_now) { 295 LazyNow* lazy_now) {
434 DCHECK(main_thread_checker_.CalledOnValidThread()); 296 DCHECK(main_thread_checker_.CalledOnValidThread());
435 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_); 297 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_);
436 internal::TaskQueueImpl::Task pending_task = 298 internal::TaskQueueImpl::Task pending_task =
437 work_queue->TakeTaskFromWorkQueue(); 299 work_queue->TakeTaskFromWorkQueue();
438 300
439 // It's possible the task was canceled, if so bail out. 301 // It's possible the task was canceled, if so bail out.
440 if (pending_task.task.IsCancelled()) 302 if (pending_task.task.IsCancelled())
441 return ProcessTaskResult::EXECUTED; 303 return ProcessTaskResult::EXECUTED;
442 304
443 internal::TaskQueueImpl* queue = work_queue->task_queue(); 305 internal::TaskQueueImpl* queue = work_queue->task_queue();
444 if (queue->GetQuiescenceMonitored()) 306 if (queue->GetQuiescenceMonitored())
445 task_was_run_on_quiescence_monitored_queue_ = true; 307 task_was_run_on_quiescence_monitored_queue_ = true;
446 308
447 if (!pending_task.nestable && is_nested) { 309 if (!pending_task.nestable && delegate_->IsNested()) {
448 // Defer non-nestable work to the main task runner. NOTE these tasks can be 310 // Defer non-nestable work to the main task runner. NOTE these tasks can be
449 // arbitrarily delayed so the additional delay should not be a problem. 311 // arbitrarily delayed so the additional delay should not be a problem.
450 // TODO(skyostil): Figure out a way to not forget which task queue the 312 // TODO(skyostil): Figure out a way to not forget which task queue the
451 // task is associated with. See http://crbug.com/522843. 313 // task is associated with. See http://crbug.com/522843.
452 // TODO(tzik): Remove base::UnsafeConvertOnceClosureToRepeating once 314 // TODO(tzik): Remove base::UnsafeConvertOnceClosureToRepeating once
453 // TaskRunners have migrated to OnceClosure. 315 // TaskRunners have migrated to OnceClosure.
454 delegate_->PostNonNestableTask( 316 delegate_->PostNonNestableTask(
455 pending_task.posted_from, 317 pending_task.posted_from,
456 UnsafeConvertOnceClosureToRepeating(std::move(pending_task.task))); 318 UnsafeConvertOnceClosureToRepeating(std::move(pending_task.task)));
457 return ProcessTaskResult::DEFERRED; 319 return ProcessTaskResult::DEFERRED;
458 } 320 }
459 321
460 if (record_task_delay_histograms_) 322 MaybeRecordTaskDelayHistograms(pending_task, queue);
461 MaybeRecordTaskDelayHistograms(pending_task, queue);
462 323
463 double task_start_time = 0; 324 double task_start_time = 0;
464 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue", 325 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue",
465 pending_task); 326 pending_task);
466 if (queue->GetShouldNotifyObservers()) { 327 if (queue->GetShouldNotifyObservers()) {
467 for (auto& observer : task_observers_) 328 for (auto& observer : task_observers_)
468 observer.WillProcessTask(pending_task); 329 observer.WillProcessTask(pending_task);
469 queue->NotifyWillProcessTask(pending_task); 330 queue->NotifyWillProcessTask(pending_task);
470 331
471 bool notify_time_observers = 332 bool notify_time_observers =
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
602 if (should_run) { 463 if (should_run) {
603 state->SetString("selected_queue", 464 state->SetString("selected_queue",
604 selected_work_queue->task_queue()->GetName()); 465 selected_work_queue->task_queue()->GetName());
605 state->SetString("work_queue_name", selected_work_queue->name()); 466 state->SetString("work_queue_name", selected_work_queue->name());
606 } 467 }
607 468
608 state->BeginArray("time_domains"); 469 state->BeginArray("time_domains");
609 for (auto* time_domain : time_domains_) 470 for (auto* time_domain : time_domains_)
610 time_domain->AsValueInto(state.get()); 471 time_domain->AsValueInto(state.get());
611 state->EndArray(); 472 state->EndArray();
612
613 {
614 base::AutoLock lock(any_thread_lock_);
615 state->SetBoolean("is_nested", any_thread().is_nested);
616 state->SetInteger("do_work_running_count",
617 any_thread().do_work_running_count);
618 state->SetInteger("immediate_do_work_posted_count",
619 any_thread().immediate_do_work_posted_count);
620 }
621 return std::move(state); 473 return std::move(state);
622 } 474 }
623 475
624 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { 476 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
625 DCHECK(main_thread_checker_.CalledOnValidThread()); 477 DCHECK(main_thread_checker_.CalledOnValidThread());
626 // Only schedule DoWork if there's something to do. 478 // Only schedule DoWork if there's something to do.
627 if (queue->HasPendingImmediateWork() && !queue->BlockedByFence()) 479 if (queue->HasPendingImmediateWork())
628 MaybeScheduleImmediateWork(FROM_HERE); 480 MaybeScheduleImmediateWork(FROM_HERE);
629 } 481 }
630 482
631 void TaskQueueManager::OnTriedToSelectBlockedWorkQueue( 483 void TaskQueueManager::OnTriedToSelectBlockedWorkQueue(
632 internal::WorkQueue* work_queue) { 484 internal::WorkQueue* work_queue) {
633 DCHECK(main_thread_checker_.CalledOnValidThread()); 485 DCHECK(main_thread_checker_.CalledOnValidThread());
634 DCHECK(!work_queue->Empty()); 486 DCHECK(!work_queue->Empty());
635 if (observer_) { 487 if (observer_) {
636 observer_->OnTriedToExecuteBlockedTask(*work_queue->task_queue(), 488 observer_->OnTriedToExecuteBlockedTask(*work_queue->task_queue(),
637 *work_queue->GetFrontTask()); 489 *work_queue->GetFrontTask());
638 } 490 }
639 } 491 }
640 492
641 bool TaskQueueManager::HasImmediateWorkForTesting() const { 493 bool TaskQueueManager::HasImmediateWorkForTesting() const {
642 return !selector_.EnabledWorkQueuesEmpty(); 494 return !selector_.EnabledWorkQueuesEmpty();
643 } 495 }
644 496
645 void TaskQueueManager::SetRecordTaskDelayHistograms(
646 bool record_task_delay_histograms) {
647 DCHECK(main_thread_checker_.CalledOnValidThread());
648 record_task_delay_histograms_ = record_task_delay_histograms;
649 }
650
651 } // namespace scheduler 497 } // namespace scheduler
652 } // namespace blink 498 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698