Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(145)

Side by Side Diff: third_party/WebKit/Source/platform/scheduler/base/task_queue_manager.cc

Issue 2546423002: [Try # 3] Scheduler refactoring to virtually eliminate redundant DoWorks (Closed)
Patch Set: Added the MoveableAutoLock per Sami's suggestion Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "platform/scheduler/base/task_queue_manager.h" 5 #include "platform/scheduler/base/task_queue_manager.h"
6 6
7 #include <queue> 7 #include <queue>
8 #include <set> 8 #include <set>
9 9
10 #include "base/bind.h" 10 #include "base/bind.h"
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
51 } 51 }
52 52
53 TaskQueueManager::TaskQueueManager( 53 TaskQueueManager::TaskQueueManager(
54 scoped_refptr<TaskQueueManagerDelegate> delegate, 54 scoped_refptr<TaskQueueManagerDelegate> delegate,
55 const char* tracing_category, 55 const char* tracing_category,
56 const char* disabled_by_default_tracing_category, 56 const char* disabled_by_default_tracing_category,
57 const char* disabled_by_default_verbose_tracing_category) 57 const char* disabled_by_default_verbose_tracing_category)
58 : real_time_domain_(new RealTimeDomain(tracing_category)), 58 : real_time_domain_(new RealTimeDomain(tracing_category)),
59 delegate_(delegate), 59 delegate_(delegate),
60 task_was_run_on_quiescence_monitored_queue_(false), 60 task_was_run_on_quiescence_monitored_queue_(false),
61 other_thread_pending_wakeup_(false), 61 record_task_delay_histograms_(true),
62 work_batch_size_(1), 62 work_batch_size_(1),
63 task_count_(0), 63 task_count_(0),
64 tracing_category_(tracing_category), 64 tracing_category_(tracing_category),
65 disabled_by_default_tracing_category_( 65 disabled_by_default_tracing_category_(
66 disabled_by_default_tracing_category), 66 disabled_by_default_tracing_category),
67 disabled_by_default_verbose_tracing_category_( 67 disabled_by_default_verbose_tracing_category_(
68 disabled_by_default_verbose_tracing_category), 68 disabled_by_default_verbose_tracing_category),
69 currently_executing_task_queue_(nullptr), 69 currently_executing_task_queue_(nullptr),
70 observer_(nullptr), 70 observer_(nullptr),
71 deletion_sentinel_(new DeletionSentinel()), 71 deletion_sentinel_(new DeletionSentinel()),
72 weak_factory_(this) { 72 weak_factory_(this) {
73 DCHECK(delegate->RunsTasksOnCurrentThread()); 73 DCHECK(delegate->RunsTasksOnCurrentThread());
74 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, 74 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category,
75 "TaskQueueManager", this); 75 "TaskQueueManager", this);
76 selector_.SetTaskQueueSelectorObserver(this); 76 selector_.SetTaskQueueSelectorObserver(this);
77 77
78 from_main_thread_immediate_do_work_closure_ = 78 delayed_do_work_closure_ =
79 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), 79 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true);
80 base::TimeTicks(), true); 80 immediate_do_work_closure_ =
81 from_other_thread_immediate_do_work_closure_ = 81 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false);
82 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(),
83 base::TimeTicks(), false);
84 82
85 // TODO(alexclarke): Change this to be a parameter that's passed in. 83 // TODO(alexclarke): Change this to be a parameter that's passed in.
86 RegisterTimeDomain(real_time_domain_.get()); 84 RegisterTimeDomain(real_time_domain_.get());
87 85
88 delegate_->AddNestingObserver(this); 86 delegate_->AddNestingObserver(this);
89 } 87 }
90 88
91 TaskQueueManager::~TaskQueueManager() { 89 TaskQueueManager::~TaskQueueManager() {
92 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, 90 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_,
93 "TaskQueueManager", this); 91 "TaskQueueManager", this);
94 92
95 while (!queues_.empty()) 93 while (!queues_.empty())
96 (*queues_.begin())->UnregisterTaskQueue(); 94 (*queues_.begin())->UnregisterTaskQueue();
97 95
98 selector_.SetTaskQueueSelectorObserver(nullptr); 96 selector_.SetTaskQueueSelectorObserver(nullptr);
99 97
100 delegate_->RemoveNestingObserver(this); 98 delegate_->RemoveNestingObserver(this);
101 } 99 }
102 100
101 TaskQueueManager::AnyThread::AnyThread()
102 : do_work_running_count(0),
103 immediate_do_work_posted_count(0),
104 is_nested(false) {}
105
103 void TaskQueueManager::RegisterTimeDomain(TimeDomain* time_domain) { 106 void TaskQueueManager::RegisterTimeDomain(TimeDomain* time_domain) {
104 time_domains_.insert(time_domain); 107 time_domains_.insert(time_domain);
105 time_domain->OnRegisterWithTaskQueueManager(this); 108 time_domain->OnRegisterWithTaskQueueManager(this);
106 } 109 }
107 110
108 void TaskQueueManager::UnregisterTimeDomain(TimeDomain* time_domain) { 111 void TaskQueueManager::UnregisterTimeDomain(TimeDomain* time_domain) {
109 time_domains_.erase(time_domain); 112 time_domains_.erase(time_domain);
110 } 113 }
111 114
112 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue( 115 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue(
(...skipping 25 matching lines...) Expand all
138 DCHECK(main_thread_checker_.CalledOnValidThread()); 141 DCHECK(main_thread_checker_.CalledOnValidThread());
139 if (observer_) 142 if (observer_)
140 observer_->OnUnregisterTaskQueue(task_queue); 143 observer_->OnUnregisterTaskQueue(task_queue);
141 144
142 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being 145 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being
143 // freed while any of our structures hold hold a raw pointer to it. 146 // freed while any of our structures hold hold a raw pointer to it.
144 queues_to_delete_.insert(task_queue); 147 queues_to_delete_.insert(task_queue);
145 queues_.erase(task_queue); 148 queues_.erase(task_queue);
146 149
147 selector_.RemoveQueue(task_queue.get()); 150 selector_.RemoveQueue(task_queue.get());
151
152 {
153 base::AutoLock lock(any_thread_lock_);
154 any_thread().has_incoming_immediate_work.erase(task_queue.get());
155 }
148 } 156 }
149 157
150 void TaskQueueManager::UpdateWorkQueues(LazyNow lazy_now) { 158 void TaskQueueManager::UpdateWorkQueues(LazyNow* lazy_now) {
151 TRACE_EVENT0(disabled_by_default_tracing_category_,
152 "TaskQueueManager::UpdateWorkQueues");
153
154 for (TimeDomain* time_domain : time_domains_) { 159 for (TimeDomain* time_domain : time_domains_) {
155 LazyNow lazy_now_in_domain = time_domain == real_time_domain_.get() 160 if (time_domain == real_time_domain_.get()) {
156 ? lazy_now 161 time_domain->WakeupReadyDelayedQueues(lazy_now);
157 : time_domain->CreateLazyNow(); 162 continue;
158 time_domain->UpdateWorkQueues(lazy_now_in_domain); 163 }
164 LazyNow time_domain_lazy_now = time_domain->CreateLazyNow();
165 time_domain->WakeupReadyDelayedQueues(&time_domain_lazy_now);
159 } 166 }
160 } 167 }
161 168
162 void TaskQueueManager::OnBeginNestedMessageLoop() { 169 void TaskQueueManager::OnBeginNestedMessageLoop() {
163 // We just entered a nested message loop, make sure there's a DoWork posted or 170 // We just entered a nested message loop, make sure there's a DoWork posted or
164 // the system will grind to a halt. 171 // the system will grind to a halt.
165 delegate_->PostTask(FROM_HERE, from_main_thread_immediate_do_work_closure_); 172 {
173 base::AutoLock lock(any_thread_lock_);
174 any_thread().immediate_do_work_posted_count++;
175 any_thread().is_nested = true;
176 }
177 delegate_->PostTask(FROM_HERE, immediate_do_work_closure_);
178 }
179
180 void TaskQueueManager::OnQueueHasImmediateWork(internal::TaskQueueImpl* queue,
181 bool ensure_do_work_posted) {
182 MoveableAutoLock lock(any_thread_lock_);
183 any_thread().has_incoming_immediate_work.insert(queue);
184 if (ensure_do_work_posted)
185 MaybeScheduleImmediateWorkLocked(FROM_HERE, std::move(lock));
186 }
187
188 void TaskQueueManager::NotifyQueuesOfIncomingImmediateWorkOnMainThreadLocked() {
189 for (internal::TaskQueueImpl* queue :
190 any_thread().has_incoming_immediate_work) {
191 queue->ReloadImmediateWorkQueueIfEmpty();
192 }
193 any_thread().has_incoming_immediate_work.clear();
166 } 194 }
167 195
168 void TaskQueueManager::MaybeScheduleImmediateWork( 196 void TaskQueueManager::MaybeScheduleImmediateWork(
169 const tracked_objects::Location& from_here) { 197 const tracked_objects::Location& from_here) {
170 bool on_main_thread = delegate_->BelongsToCurrentThread(); 198 MoveableAutoLock lock(any_thread_lock_);
171 // De-duplicate DoWork posts. 199 MaybeScheduleImmediateWorkLocked(from_here, std::move(lock));
172 if (on_main_thread) { 200 }
173 if (!main_thread_pending_wakeups_.insert(base::TimeTicks()).second) { 201
202 void TaskQueueManager::MaybeScheduleImmediateWorkLocked(
203 const tracked_objects::Location& from_here,
204 MoveableAutoLock&& lock) {
205 {
206 MoveableAutoLock auto_lock(std::move(lock));
207 // Unless we're nested, try to avoid posting redundant DoWorks.
208 if (!any_thread().is_nested &&
209 (any_thread().do_work_running_count == 1 ||
210 any_thread().immediate_do_work_posted_count > 0)) {
174 return; 211 return;
175 } 212 }
176 delegate_->PostTask(from_here, from_main_thread_immediate_do_work_closure_); 213
177 } else { 214 any_thread().immediate_do_work_posted_count++;
178 {
179 base::AutoLock lock(other_thread_lock_);
180 if (other_thread_pending_wakeup_)
181 return;
182 other_thread_pending_wakeup_ = true;
183 }
184 delegate_->PostTask(from_here,
185 from_other_thread_immediate_do_work_closure_);
186 } 215 }
216 delegate_->PostTask(from_here, immediate_do_work_closure_);
187 } 217 }
188 218
189 void TaskQueueManager::MaybeScheduleDelayedWork( 219 void TaskQueueManager::MaybeScheduleDelayedWork(
190 const tracked_objects::Location& from_here, 220 const tracked_objects::Location& from_here,
191 base::TimeTicks now, 221 base::TimeTicks now,
192 base::TimeDelta delay) { 222 base::TimeDelta delay) {
193 DCHECK(main_thread_checker_.CalledOnValidThread()); 223 DCHECK(main_thread_checker_.CalledOnValidThread());
194 DCHECK_GE(delay, base::TimeDelta()); 224 DCHECK_GE(delay, base::TimeDelta());
225 {
226 base::AutoLock lock(any_thread_lock_);
195 227
196 // If there's a pending immediate DoWork then we rely on 228 // If there's a pending immediate DoWork then we rely on the logic in DoWork
197 // TryAdvanceTimeDomains getting the TimeDomain to call 229 // to post a continuation as needed.
198 // MaybeScheduleDelayedWork again when the immediate DoWork is complete. 230 if (any_thread().immediate_do_work_posted_count > 0)
199 if (main_thread_pending_wakeups_.find(base::TimeTicks()) != 231 return;
200 main_thread_pending_wakeups_.end()) { 232
201 return; 233 // If a non-nested DoWork is running we can also rely on the logic in DoWork
234 // to post a continuation as needed.
235 if (any_thread().do_work_running_count == 1 && !any_thread().is_nested)
236 return;
202 } 237 }
238
203 // De-duplicate DoWork posts. 239 // De-duplicate DoWork posts.
204 base::TimeTicks run_time = now + delay; 240 base::TimeTicks run_time = now + delay;
205 if (!main_thread_pending_wakeups_.empty() && 241 if (next_delayed_do_work_ <= run_time && !next_delayed_do_work_.is_null())
206 *main_thread_pending_wakeups_.begin() <= run_time) {
207 return; 242 return;
208 } 243
209 main_thread_pending_wakeups_.insert(run_time); 244 TRACE_EVENT1("tracing_category_", "MaybeScheduleDelayedWorkInternal", "delay",
Sami 2016/12/09 11:04:02 Probably did not mean to add quotes around tracing
alex clarke (OOO till 29th) 2016/12/12 11:45:04 Done.
245 delay.InSecondsF());
Sami 2016/12/09 11:04:02 nit: Could you do InMillisecondsF() and "delay_ms"
alex clarke (OOO till 29th) 2016/12/12 11:45:04 Done.
246
247 cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
248 next_delayed_do_work_ = run_time;
210 delegate_->PostDelayedTask( 249 delegate_->PostDelayedTask(
211 from_here, base::Bind(&TaskQueueManager::DoWork, 250 from_here, cancelable_delayed_do_work_closure_.callback(), delay);
212 weak_factory_.GetWeakPtr(), run_time, true),
213 delay);
214 } 251 }
215 252
216 void TaskQueueManager::DoWork(base::TimeTicks run_time, bool from_main_thread) { 253 void TaskQueueManager::DoWork(bool delayed) {
217 DCHECK(main_thread_checker_.CalledOnValidThread()); 254 DCHECK(main_thread_checker_.CalledOnValidThread());
218 TRACE_EVENT1(tracing_category_, "TaskQueueManager::DoWork", 255 TRACE_EVENT1("tracing_category_", "TaskQueueManager::DoWork", "delayed",
219 "from_main_thread", from_main_thread); 256 delayed);
257 LazyNow lazy_now(real_time_domain()->CreateLazyNow());
220 258
221 if (from_main_thread) { 259 bool is_nested = delegate_->IsNested();
222 main_thread_pending_wakeups_.erase(run_time); 260 if (!is_nested)
223 } else {
224 base::AutoLock lock(other_thread_lock_);
225 other_thread_pending_wakeup_ = false;
226 }
227
228 // Posting a DoWork while a DoWork is running leads to spurious DoWorks.
229 main_thread_pending_wakeups_.insert(base::TimeTicks());
230
231 if (!delegate_->IsNested())
232 queues_to_delete_.clear(); 261 queues_to_delete_.clear();
233 262
234 LazyNow lazy_now(real_time_domain()->CreateLazyNow()); 263 for (int i = 0; i < work_batch_size_; i++) {
235 UpdateWorkQueues(lazy_now); 264 {
265 base::AutoLock lock(any_thread_lock_);
266 any_thread().is_nested = is_nested;
267 DCHECK_EQ(any_thread().is_nested, delegate_->IsNested());
236 268
237 for (int i = 0; i < work_batch_size_; i++) { 269 if (i == 0) {
238 internal::WorkQueue* work_queue; 270 any_thread().do_work_running_count++;
271
272 if (!delayed) {
273 any_thread().immediate_do_work_posted_count--;
274 DCHECK_GE(any_thread().immediate_do_work_posted_count, 0);
275 }
276 }
277
278 NotifyQueuesOfIncomingImmediateWorkOnMainThreadLocked();
279 }
280
281 UpdateWorkQueues(&lazy_now);
282
283 internal::WorkQueue* work_queue = nullptr;
239 if (!SelectWorkQueueToService(&work_queue)) 284 if (!SelectWorkQueueToService(&work_queue))
240 break; 285 break;
241 286
242 switch (ProcessTaskFromWorkQueue(work_queue, &lazy_now)) { 287 switch (ProcessTaskFromWorkQueue(work_queue, is_nested, &lazy_now)) {
243 case ProcessTaskResult::DEFERRED: 288 case ProcessTaskResult::DEFERRED:
244 // If a task was deferred, try again with another task. 289 // If a task was deferred, try again with another task.
245 continue; 290 continue;
246 case ProcessTaskResult::EXECUTED: 291 case ProcessTaskResult::EXECUTED:
247 break; 292 break;
248 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED: 293 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED:
249 return; // The TaskQueueManager got deleted, we must bail out. 294 return; // The TaskQueueManager got deleted, we must bail out.
250 } 295 }
251 296
252 work_queue = nullptr; // The queue may have been unregistered. 297 work_queue = nullptr; // The queue may have been unregistered.
Sami 2016/12/09 11:04:02 Looks like this line could be removed now (even be
alex clarke (OOO till 29th) 2016/12/12 11:45:04 Done.
253 298
254 UpdateWorkQueues(lazy_now);
255
256 // Only run a single task per batch in nested run loops so that we can 299 // Only run a single task per batch in nested run loops so that we can
257 // properly exit the nested loop when someone calls RunLoop::Quit(). 300 // properly exit the nested loop when someone calls RunLoop::Quit().
258 if (delegate_->IsNested()) 301 if (is_nested)
259 break; 302 break;
260 } 303 }
261 304
262 main_thread_pending_wakeups_.erase(base::TimeTicks());
263
264 // TODO(alexclarke): Consider refactoring the above loop to terminate only 305 // TODO(alexclarke): Consider refactoring the above loop to terminate only
265 // when there's no more work left to be done, rather than posting a 306 // when there's no more work left to be done, rather than posting a
266 // continuation task. 307 // continuation task.
267 if (!selector_.EnabledWorkQueuesEmpty() || TryAdvanceTimeDomains()) 308 if (delayed)
268 MaybeScheduleImmediateWork(FROM_HERE); 309 next_delayed_do_work_ = base::TimeTicks();
310
311 {
312 MoveableAutoLock lock(any_thread_lock_);
313 base::Optional<base::TimeDelta> next_delay =
314 ComputeDelayTillNextTaskLocked(&lazy_now);
315
316 any_thread().do_work_running_count--;
317 DCHECK_GE(any_thread().do_work_running_count, 0);
318
319 any_thread().is_nested = is_nested;
320 DCHECK_EQ(any_thread().is_nested, delegate_->IsNested());
321
322 PostDoWorkContinuationLocked(next_delay, &lazy_now, std::move(lock));
323 }
269 } 324 }
270 325
271 bool TaskQueueManager::TryAdvanceTimeDomains() { 326 void TaskQueueManager::PostDoWorkContinuationLocked(
272 bool can_advance = false; 327 base::Optional<base::TimeDelta> next_delay,
328 LazyNow* lazy_now,
329 MoveableAutoLock&& lock) {
330 base::TimeDelta delay;
331
332 {
333 MoveableAutoLock auto_lock(std::move(lock));
334
335 // If there are no tasks left then we don't need to post a continuation.
336 if (!next_delay) {
337 // If there's a pending delayed DoWork, cancel it because it's not needed.
338 if (!next_delayed_do_work_.is_null()) {
339 next_delayed_do_work_ = base::TimeTicks();
340 cancelable_delayed_do_work_closure_.Cancel();
341 }
342 return;
343 }
344
345 // If an immediate DoWork is posted, we don't need to post a continuation.
346 if (any_thread().immediate_do_work_posted_count > 0)
347 return;
348
349 delay = next_delay.value();
350 if (delay.is_zero()) {
351 // If a delayed DoWork is pending then we don't need to post a
Sami 2016/12/09 11:04:02 nit: reflow
alex clarke (OOO till 29th) 2016/12/12 11:45:04 Done.
352 // continuation
353 // because it should run immediately.
354 if (!next_delayed_do_work_.is_null() &&
355 next_delayed_do_work_ <= lazy_now->Now()) {
356 return;
357 }
358
359 any_thread().immediate_do_work_posted_count++;
360 } else {
361 base::TimeTicks run_time = lazy_now->Now() + delay;
362 if (next_delayed_do_work_ == run_time)
363 return;
364
365 cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
366 next_delayed_do_work_ = run_time;
367 }
368 }
369
370 // We avoid holding |any_thread_lock_| while posting the task.
371 if (delay.is_zero()) {
372 delegate_->PostTask(FROM_HERE, immediate_do_work_closure_);
373 } else {
374 delegate_->PostDelayedTask(
375 FROM_HERE, cancelable_delayed_do_work_closure_.callback(), delay);
376 }
377 }
378
379 base::Optional<base::TimeDelta>
380 TaskQueueManager::ComputeDelayTillNextTaskLocked(LazyNow* lazy_now) {
381 NotifyQueuesOfIncomingImmediateWorkOnMainThreadLocked();
382
383 // If the selector has non-empty queues we trivially know there is immediate
384 // word to be done.
385 if (!selector_.EnabledWorkQueuesEmpty())
386 return base::TimeDelta();
387
388 UpdateWorkQueues(lazy_now);
389
390 // Otherwise we need to find the shortest delay, if any.
391 base::Optional<base::TimeDelta> next_continuation;
273 for (TimeDomain* time_domain : time_domains_) { 392 for (TimeDomain* time_domain : time_domains_) {
274 can_advance |= time_domain->MaybeAdvanceTime(); 393 base::Optional<base::TimeDelta> continuation =
394 time_domain->DelayTillNextTask(lazy_now);
395 if (!continuation)
396 continue;
397 if (!next_continuation || next_continuation.value() > continuation.value())
398 next_continuation = continuation;
275 } 399 }
276 return can_advance; 400 return next_continuation;
277 } 401 }
278 402
279 bool TaskQueueManager::SelectWorkQueueToService( 403 bool TaskQueueManager::SelectWorkQueueToService(
280 internal::WorkQueue** out_work_queue) { 404 internal::WorkQueue** out_work_queue) {
281 bool should_run = selector_.SelectWorkQueueToService(out_work_queue); 405 bool should_run = selector_.SelectWorkQueueToService(out_work_queue);
282 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( 406 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
283 disabled_by_default_tracing_category_, "TaskQueueManager", this, 407 disabled_by_default_tracing_category_, "TaskQueueManager", this,
284 AsValueWithSelectorResult(should_run, *out_work_queue)); 408 AsValueWithSelectorResult(should_run, *out_work_queue));
285 return should_run; 409 return should_run;
286 } 410 }
287 411
288 void TaskQueueManager::DidQueueTask( 412 void TaskQueueManager::DidQueueTask(
289 const internal::TaskQueueImpl::Task& pending_task) { 413 const internal::TaskQueueImpl::Task& pending_task) {
290 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task); 414 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task);
291 } 415 }
292 416
293 TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue( 417 TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue(
294 internal::WorkQueue* work_queue, 418 internal::WorkQueue* work_queue,
419 bool is_nested,
295 LazyNow* lazy_now) { 420 LazyNow* lazy_now) {
296 DCHECK(main_thread_checker_.CalledOnValidThread()); 421 DCHECK(main_thread_checker_.CalledOnValidThread());
297 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_); 422 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_);
298 internal::TaskQueueImpl::Task pending_task = 423 internal::TaskQueueImpl::Task pending_task =
299 work_queue->TakeTaskFromWorkQueue(); 424 work_queue->TakeTaskFromWorkQueue();
300 425
301 // It's possible the task was canceled, if so bail out. 426 // It's possible the task was canceled, if so bail out.
302 if (pending_task.task.IsCancelled()) 427 if (pending_task.task.IsCancelled())
303 return ProcessTaskResult::EXECUTED; 428 return ProcessTaskResult::EXECUTED;
304 429
305 internal::TaskQueueImpl* queue = work_queue->task_queue(); 430 internal::TaskQueueImpl* queue = work_queue->task_queue();
306 if (queue->GetQuiescenceMonitored()) 431 if (queue->GetQuiescenceMonitored())
307 task_was_run_on_quiescence_monitored_queue_ = true; 432 task_was_run_on_quiescence_monitored_queue_ = true;
308 433
309 if (!pending_task.nestable && delegate_->IsNested()) { 434 if (!pending_task.nestable && is_nested) {
310 // Defer non-nestable work to the main task runner. NOTE these tasks can be 435 // Defer non-nestable work to the main task runner. NOTE these tasks can be
311 // arbitrarily delayed so the additional delay should not be a problem. 436 // arbitrarily delayed so the additional delay should not be a problem.
312 // TODO(skyostil): Figure out a way to not forget which task queue the 437 // TODO(skyostil): Figure out a way to not forget which task queue the
313 // task is associated with. See http://crbug.com/522843. 438 // task is associated with. See http://crbug.com/522843.
314 // TODO(tzik): Remove base::UnsafeConvertOnceClosureToRepeating once 439 // TODO(tzik): Remove base::UnsafeConvertOnceClosureToRepeating once
315 // TaskRunners have migrated to OnceClosure. 440 // TaskRunners have migrated to OnceClosure.
316 delegate_->PostNonNestableTask( 441 delegate_->PostNonNestableTask(
317 pending_task.posted_from, 442 pending_task.posted_from,
318 UnsafeConvertOnceClosureToRepeating(std::move(pending_task.task))); 443 UnsafeConvertOnceClosureToRepeating(std::move(pending_task.task)));
319 return ProcessTaskResult::DEFERRED; 444 return ProcessTaskResult::DEFERRED;
320 } 445 }
321 446
322 MaybeRecordTaskDelayHistograms(pending_task, queue); 447 if (record_task_delay_histograms_)
448 MaybeRecordTaskDelayHistograms(pending_task, queue);
323 449
324 double task_start_time = 0; 450 double task_start_time = 0;
325 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue", 451 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue",
326 pending_task); 452 pending_task);
327 if (queue->GetShouldNotifyObservers()) { 453 if (queue->GetShouldNotifyObservers()) {
328 for (auto& observer : task_observers_) 454 for (auto& observer : task_observers_)
329 observer.WillProcessTask(pending_task); 455 observer.WillProcessTask(pending_task);
330 queue->NotifyWillProcessTask(pending_task); 456 queue->NotifyWillProcessTask(pending_task);
331 457
332 bool notify_time_observers = 458 bool notify_time_observers =
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
463 if (should_run) { 589 if (should_run) {
464 state->SetString("selected_queue", 590 state->SetString("selected_queue",
465 selected_work_queue->task_queue()->GetName()); 591 selected_work_queue->task_queue()->GetName());
466 state->SetString("work_queue_name", selected_work_queue->name()); 592 state->SetString("work_queue_name", selected_work_queue->name());
467 } 593 }
468 594
469 state->BeginArray("time_domains"); 595 state->BeginArray("time_domains");
470 for (auto* time_domain : time_domains_) 596 for (auto* time_domain : time_domains_)
471 time_domain->AsValueInto(state.get()); 597 time_domain->AsValueInto(state.get());
472 state->EndArray(); 598 state->EndArray();
599
600 {
601 base::AutoLock lock(any_thread_lock_);
602 state->SetBoolean("is_nested", any_thread().is_nested);
603 state->SetInteger("do_work_count", any_thread().do_work_running_count);
604 state->SetInteger("immediate_do_work_posted",
Sami 2016/12/09 11:04:02 nit: _count
alex clarke (OOO till 29th) 2016/12/12 11:45:04 Done.
605 any_thread().immediate_do_work_posted_count);
606 }
473 return std::move(state); 607 return std::move(state);
474 } 608 }
475 609
476 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { 610 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
477 DCHECK(main_thread_checker_.CalledOnValidThread()); 611 DCHECK(main_thread_checker_.CalledOnValidThread());
478 // Only schedule DoWork if there's something to do. 612 // Only schedule DoWork if there's something to do.
479 if (queue->HasPendingImmediateWork()) 613 if (queue->HasPendingImmediateWork())
480 MaybeScheduleImmediateWork(FROM_HERE); 614 MaybeScheduleImmediateWork(FROM_HERE);
481 } 615 }
482 616
483 void TaskQueueManager::OnTriedToSelectBlockedWorkQueue( 617 void TaskQueueManager::OnTriedToSelectBlockedWorkQueue(
484 internal::WorkQueue* work_queue) { 618 internal::WorkQueue* work_queue) {
485 DCHECK(main_thread_checker_.CalledOnValidThread()); 619 DCHECK(main_thread_checker_.CalledOnValidThread());
486 DCHECK(!work_queue->Empty()); 620 DCHECK(!work_queue->Empty());
487 if (observer_) { 621 if (observer_) {
488 observer_->OnTriedToExecuteBlockedTask(*work_queue->task_queue(), 622 observer_->OnTriedToExecuteBlockedTask(*work_queue->task_queue(),
489 *work_queue->GetFrontTask()); 623 *work_queue->GetFrontTask());
490 } 624 }
491 } 625 }
492 626
493 bool TaskQueueManager::HasImmediateWorkForTesting() const { 627 bool TaskQueueManager::HasImmediateWorkForTesting() const {
494 return !selector_.EnabledWorkQueuesEmpty(); 628 return !selector_.EnabledWorkQueuesEmpty();
495 } 629 }
496 630
631 void TaskQueueManager::SetRecordTaskDelayHistograms(
632 bool record_task_delay_histograms) {
633 DCHECK(main_thread_checker_.CalledOnValidThread());
634 record_task_delay_histograms_ = record_task_delay_histograms;
635 }
636
497 } // namespace scheduler 637 } // namespace scheduler
498 } // namespace blink 638 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698