Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(999)

Unified Diff: third_party/WebKit/Source/platform/scheduler/base/task_queue_manager.cc

Issue 2546423002: [Try # 3] Scheduler refactoring to virtually eliminate redundant DoWorks (Closed)
Patch Set: Few tweaks Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/WebKit/Source/platform/scheduler/base/task_queue_manager.cc
diff --git a/third_party/WebKit/Source/platform/scheduler/base/task_queue_manager.cc b/third_party/WebKit/Source/platform/scheduler/base/task_queue_manager.cc
index a72e1459cce9effc23e1d91ff6b2732b869d3c12..79b869f64261704e250bef04b381fd1f55638bc0 100644
--- a/third_party/WebKit/Source/platform/scheduler/base/task_queue_manager.cc
+++ b/third_party/WebKit/Source/platform/scheduler/base/task_queue_manager.cc
@@ -58,7 +58,11 @@ TaskQueueManager::TaskQueueManager(
: real_time_domain_(new RealTimeDomain(tracing_category)),
delegate_(delegate),
task_was_run_on_quiescence_monitored_queue_(false),
- other_thread_pending_wakeup_(false),
+ do_work_pending_lock_(), // NOTE this calls the constructor!
+ do_work_running_count_(0),
+ immediate_do_work_posted_count_(0),
+ is_nested_(false),
+ record_task_delay_histograms_(true),
work_batch_size_(1),
task_count_(0),
tracing_category_(tracing_category),
@@ -75,12 +79,10 @@ TaskQueueManager::TaskQueueManager(
"TaskQueueManager", this);
selector_.SetTaskQueueSelectorObserver(this);
- from_main_thread_immediate_do_work_closure_ =
- base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(),
- base::TimeTicks(), true);
- from_other_thread_immediate_do_work_closure_ =
- base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(),
- base::TimeTicks(), false);
+ delayed_do_work_closure_ =
+ base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true);
+ immediate_do_work_closure_ =
+ base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false);
// TODO(alexclarke): Change this to be a parameter that's passed in.
RegisterTimeDomain(real_time_domain_.get());
@@ -145,45 +147,70 @@ void TaskQueueManager::UnregisterTaskQueue(
queues_.erase(task_queue);
selector_.RemoveQueue(task_queue.get());
-}
-void TaskQueueManager::UpdateWorkQueues(LazyNow lazy_now) {
- TRACE_EVENT0(disabled_by_default_tracing_category_,
- "TaskQueueManager::UpdateWorkQueues");
+ {
+ SpinLock::Guard guard(do_work_pending_lock_);
+ has_incomming_immediate_work_.erase(task_queue.get());
+ }
+}
+void TaskQueueManager::UpdateWorkQueues(LazyNow* lazy_now) {
for (TimeDomain* time_domain : time_domains_) {
- LazyNow lazy_now_in_domain = time_domain == real_time_domain_.get()
- ? lazy_now
- : time_domain->CreateLazyNow();
- time_domain->UpdateWorkQueues(lazy_now_in_domain);
+ if (time_domain == real_time_domain_.get()) {
+ time_domain->WakeupReadyDelayedQueues(lazy_now);
+ continue;
+ }
+ LazyNow time_domain_lazy_now = time_domain->CreateLazyNow();
+ time_domain->WakeupReadyDelayedQueues(&time_domain_lazy_now);
}
}
void TaskQueueManager::OnBeginNestedMessageLoop() {
// We just entered a nested message loop, make sure there's a DoWork posted or
// the system will grind to a halt.
- delegate_->PostTask(FROM_HERE, from_main_thread_immediate_do_work_closure_);
+ SpinLock::Guard guard(do_work_pending_lock_);
+ delegate_->PostTask(FROM_HERE, immediate_do_work_closure_);
+ immediate_do_work_posted_count_++;
+ is_nested_ = true;
+}
+
+void TaskQueueManager::OnQueueHasImmediateWork(internal::TaskQueueImpl* queue) {
+ SpinLock::Guard guard(do_work_pending_lock_);
+ has_incomming_immediate_work_.insert(queue);
+
+ // There's no point posting a DoWork for a blocked or disabled queue, although
+ // we can only determine that on the main thread.
+ if (queue->RunsTasksOnCurrentThread() &&
+ (!queue->IsQueueEnabled() || queue->BlockedByFenceLocked())) {
Sami 2016/12/05 17:52:54 Hmm, we're not holding the any thread lock here, a
alex clarke (OOO till 29th) 2016/12/06 17:37:54 Actually we are but that's perhaps a little fragil
Sami 2016/12/07 16:13:42 Yeah, something like that would be better. The cou
+ return;
+ }
+
+ MaybeScheduleImmediateWorkLocked(FROM_HERE);
+}
+
+void TaskQueueManager::NotifyQueuesOfIncomingImmediateWorkOnMainThreadLocked() {
+ for (internal::TaskQueueImpl* queue : has_incomming_immediate_work_) {
+ queue->OnIncomingImmediateTaskAvailable();
+ }
+ has_incomming_immediate_work_.clear();
}
void TaskQueueManager::MaybeScheduleImmediateWork(
const tracked_objects::Location& from_here) {
- bool on_main_thread = delegate_->BelongsToCurrentThread();
- // De-duplicate DoWork posts.
- if (on_main_thread) {
- if (!main_thread_pending_wakeups_.insert(base::TimeTicks()).second) {
- return;
- }
- delegate_->PostTask(from_here, from_main_thread_immediate_do_work_closure_);
- } else {
- {
- base::AutoLock lock(other_thread_lock_);
- if (other_thread_pending_wakeup_)
- return;
- other_thread_pending_wakeup_ = true;
- }
- delegate_->PostTask(from_here,
- from_other_thread_immediate_do_work_closure_);
+ SpinLock::Guard guard(do_work_pending_lock_);
+ MaybeScheduleImmediateWorkLocked(from_here);
+}
+
+void TaskQueueManager::MaybeScheduleImmediateWorkLocked(
+ const tracked_objects::Location& from_here) {
Sami 2016/12/05 17:52:54 Can we check that a spinlock is held? Not sure...
alex clarke (OOO till 29th) 2016/12/06 17:37:54 Currently no it's not possible to do so in a good
Sami 2016/12/07 16:13:42 I was thinking something like a checked spinlock t
+ // Unlwss we're ensted, try to avoid posting redundant DoWorks.
+ if (!is_nested_ &&
+ (do_work_running_count_ == 1 || immediate_do_work_posted_count_ > 0)) {
+ return;
}
+
+ delegate_->PostTask(from_here, immediate_do_work_closure_);
+ immediate_do_work_posted_count_++;
}
void TaskQueueManager::MaybeScheduleDelayedWork(
@@ -191,56 +218,75 @@ void TaskQueueManager::MaybeScheduleDelayedWork(
base::TimeTicks now,
base::TimeDelta delay) {
DCHECK(main_thread_checker_.CalledOnValidThread());
- DCHECK_GE(delay, base::TimeDelta());
+ {
+ SpinLock::Guard guard(do_work_pending_lock_);
- // If there's a pending immediate DoWork then we rely on
- // TryAdvanceTimeDomains getting the TimeDomain to call
- // MaybeScheduleDelayedWork again when the immediate DoWork is complete.
- if (main_thread_pending_wakeups_.find(base::TimeTicks()) !=
- main_thread_pending_wakeups_.end()) {
- return;
+ // If there's a pending immediate DoWork then we rely on the logic in DoWork
+ // to post a continuation as needed.
+ if (immediate_do_work_posted_count_ > 0)
+ return;
+
+ // If a non-nested DoWork is running we can also rely on the logic in DoWork
+ // to post a continuation as needed.
+ if (do_work_running_count_ == 1 && !is_nested_)
+ return;
}
+ MaybeScheduleDelayedWorkInternal(from_here, now, delay);
+}
+
+void TaskQueueManager::MaybeScheduleDelayedWorkInternal(
+ const tracked_objects::Location& from_here,
+ base::TimeTicks now,
+ base::TimeDelta delay) {
+ DCHECK_GE(delay, base::TimeDelta());
// De-duplicate DoWork posts.
base::TimeTicks run_time = now + delay;
- if (!main_thread_pending_wakeups_.empty() &&
- *main_thread_pending_wakeups_.begin() <= run_time) {
+ if (next_delayed_do_work_ <= run_time && !next_delayed_do_work_.is_null())
return;
- }
- main_thread_pending_wakeups_.insert(run_time);
+
+ cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
+ next_delayed_do_work_ = run_time;
delegate_->PostDelayedTask(
- from_here, base::Bind(&TaskQueueManager::DoWork,
- weak_factory_.GetWeakPtr(), run_time, true),
- delay);
+ from_here, cancelable_delayed_do_work_closure_.callback(), delay);
}
-void TaskQueueManager::DoWork(base::TimeTicks run_time, bool from_main_thread) {
+void TaskQueueManager::DoWork(bool delayed) {
Sami 2016/12/05 17:52:54 meta-comment: This function is getting long -- may
alex clarke (OOO till 29th) 2016/12/06 17:37:54 Done.
DCHECK(main_thread_checker_.CalledOnValidThread());
- TRACE_EVENT1(tracing_category_, "TaskQueueManager::DoWork",
- "from_main_thread", from_main_thread);
+ TRACE_EVENT1("tracing_category_", "TaskQueueManager::DoWork", "delayed",
+ delayed ? "false" : "true");
Sami 2016/12/05 17:52:54 nit: you can just pass |delayed| here and tracing
alex clarke (OOO till 29th) 2016/12/06 17:37:54 Done.
+ LazyNow lazy_now(real_time_domain()->CreateLazyNow());
+ base::TimeTicks task_start_time;
- if (from_main_thread) {
- main_thread_pending_wakeups_.erase(run_time);
- } else {
- base::AutoLock lock(other_thread_lock_);
- other_thread_pending_wakeup_ = false;
+ bool is_nested = delegate_->IsNested();
+ if (!delegate_->IsNested()) {
+ if (task_time_observers_.might_have_observers())
+ task_start_time = lazy_now.Now();
+
+ queues_to_delete_.clear();
}
- // Posting a DoWork while a DoWork is running leads to spurious DoWorks.
- main_thread_pending_wakeups_.insert(base::TimeTicks());
+ for (int i = 0; i < work_batch_size_; i++) {
+ {
+ SpinLock::Guard guard(do_work_pending_lock_);
- if (!delegate_->IsNested())
- queues_to_delete_.clear();
+ is_nested_ = is_nested;
+ DCHECK_EQ(is_nested_, delegate_->IsNested());
- LazyNow lazy_now(real_time_domain()->CreateLazyNow());
- base::TimeTicks task_start_time;
+ if (i == 0) {
+ do_work_running_count_++;
- if (!delegate_->IsNested() && task_time_observers_.might_have_observers())
- task_start_time = lazy_now.Now();
+ if (!delayed) {
+ immediate_do_work_posted_count_--;
+ DCHECK_GE(immediate_do_work_posted_count_, 0);
+ }
+ }
- UpdateWorkQueues(lazy_now);
+ NotifyQueuesOfIncomingImmediateWorkOnMainThreadLocked();
+ }
- for (int i = 0; i < work_batch_size_; i++) {
- internal::WorkQueue* work_queue;
+ UpdateWorkQueues(&lazy_now);
+
+ internal::WorkQueue* work_queue = nullptr;
if (!SelectWorkQueueToService(&work_queue))
break;
@@ -259,7 +305,7 @@ void TaskQueueManager::DoWork(base::TimeTicks run_time, bool from_main_thread) {
}
lazy_now = real_time_domain()->CreateLazyNow();
- if (!delegate_->IsNested() && task_start_time != base::TimeTicks()) {
+ if (!is_nested && task_start_time != base::TimeTicks()) {
// Only report top level task durations.
base::TimeTicks task_end_time = lazy_now.Now();
for (auto& observer : task_time_observers_) {
@@ -272,29 +318,72 @@ void TaskQueueManager::DoWork(base::TimeTicks run_time, bool from_main_thread) {
work_queue = nullptr; // The queue may have been unregistered.
- UpdateWorkQueues(lazy_now);
-
// Only run a single task per batch in nested run loops so that we can
// properly exit the nested loop when someone calls RunLoop::Quit().
- if (delegate_->IsNested())
+ if (is_nested)
break;
}
- main_thread_pending_wakeups_.erase(base::TimeTicks());
-
// TODO(alexclarke): Consider refactoring the above loop to terminate only
// when there's no more work left to be done, rather than posting a
// continuation task.
- if (!selector_.EnabledWorkQueuesEmpty() || TryAdvanceTimeDomains())
- MaybeScheduleImmediateWork(FROM_HERE);
+ if (delayed)
+ next_delayed_do_work_ = base::TimeTicks();
+
+ LazyNow continuation_lazy_now(real_time_domain()->CreateLazyNow());
+ UpdateWorkQueues(&continuation_lazy_now);
+
+ SpinLock::Guard guard(do_work_pending_lock_);
+ NotifyQueuesOfIncomingImmediateWorkOnMainThreadLocked();
+ base::Optional<base::TimeDelta> next_delay =
+ DelayTillNextTask(&continuation_lazy_now);
+
+ do_work_running_count_--;
+ DCHECK_GE(do_work_running_count_, 0);
+
+ is_nested_ = is_nested;
+ DCHECK_EQ(is_nested_, delegate_->IsNested());
+
+ // If there are no tasks left then we don't need to post a continuation.
+ if (!next_delay)
+ return;
+
+ // If either an immediate DoWork or a delayed DoWork is pending then we don't
+ // need to post a continuation.
+ if (immediate_do_work_posted_count_ > 0 ||
+ (!next_delayed_do_work_.is_null() &&
+ next_delayed_do_work_ < continuation_lazy_now.Now())) {
+ return;
+ }
+
+ // Post a continuation task based on the delay till the next task.
+ if (next_delay.value().is_zero()) {
+ delegate_->PostTask(FROM_HERE, immediate_do_work_closure_);
+ immediate_do_work_posted_count_++;
+ } else {
+ MaybeScheduleDelayedWorkInternal(FROM_HERE, lazy_now.Now(),
+ next_delay.value());
+ }
}
-bool TaskQueueManager::TryAdvanceTimeDomains() {
- bool can_advance = false;
+base::Optional<base::TimeDelta> TaskQueueManager::DelayTillNextTask(
+ LazyNow* lazy_now) {
+ // If the selector has non-empty queues we trivially know there is immediate
+ // word to be done.
+ if (!selector_.EnabledWorkQueuesEmpty())
+ return base::TimeDelta();
+
+ // Otherwise we need to find the shortest delay, if any.
+ base::Optional<base::TimeDelta> next_continuation;
for (TimeDomain* time_domain : time_domains_) {
- can_advance |= time_domain->MaybeAdvanceTime();
+ base::Optional<base::TimeDelta> continuation =
+ time_domain->DelayTillNextTask(lazy_now);
+ if (!continuation)
+ continue;
+ if (!next_continuation || next_continuation.value() < continuation.value())
+ next_continuation = continuation;
}
- return can_advance;
+ return next_continuation;
}
bool TaskQueueManager::SelectWorkQueueToService(
@@ -326,7 +415,8 @@ TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue(
if (queue->GetQuiescenceMonitored())
task_was_run_on_quiescence_monitored_queue_ = true;
- if (!pending_task.nestable && delegate_->IsNested()) {
+ DCHECK_EQ(is_nested_, delegate_->IsNested());
+ if (!pending_task.nestable && is_nested_) {
// Defer non-nestable work to the main task runner. NOTE these tasks can be
// arbitrarily delayed so the additional delay should not be a problem.
// TODO(skyostil): Figure out a way to not forget which task queue the
@@ -339,7 +429,8 @@ TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue(
return ProcessTaskResult::DEFERRED;
}
- MaybeRecordTaskDelayHistograms(pending_task, queue);
+ if (record_task_delay_histograms_)
+ MaybeRecordTaskDelayHistograms(pending_task, queue);
TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue",
pending_task);
@@ -472,6 +563,13 @@ TaskQueueManager::AsValueWithSelectorResult(
for (auto* time_domain : time_domains_)
time_domain->AsValueInto(state.get());
state->EndArray();
+
+ state->SetBoolean("is_nested", is_nested_);
+
+ SpinLock::Guard guard(do_work_pending_lock_);
+ state->SetInteger("do_work_count", do_work_running_count_);
+ state->SetInteger("immediate_do_work_posted",
+ immediate_do_work_posted_count_);
return std::move(state);
}
@@ -496,5 +594,11 @@ bool TaskQueueManager::HasImmediateWorkForTesting() const {
return !selector_.EnabledWorkQueuesEmpty();
}
+void TaskQueueManager::SetRecordTaskDelayHistograms(
+ bool record_task_delay_histograms) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ record_task_delay_histograms_ = record_task_delay_histograms;
+}
+
} // namespace scheduler
} // namespace blink

Powered by Google App Engine
This is Rietveld 408576698