Chromium Code Reviews| Index: third_party/WebKit/Source/platform/scheduler/renderer/task_queue_throttler.cc |
| diff --git a/third_party/WebKit/Source/platform/scheduler/renderer/task_queue_throttler.cc b/third_party/WebKit/Source/platform/scheduler/renderer/task_queue_throttler.cc |
| index e9744e259d6f4b2ed021c828e9b90c8a4e1df9e1..fe25b85a505999d99e6987336ff7d216c0b3bd21 100644 |
| --- a/third_party/WebKit/Source/platform/scheduler/renderer/task_queue_throttler.cc |
| +++ b/third_party/WebKit/Source/platform/scheduler/renderer/task_queue_throttler.cc |
| @@ -144,10 +144,12 @@ void TaskQueueThrottler::IncreaseThrottleRefCount(TaskQueue* task_queue) { |
| void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { |
| TaskQueueMap::iterator iter = queue_details_.find(task_queue); |
| - if (iter == queue_details_.end() || |
| - --iter->second.throttling_ref_count != 0) { |
| + if (iter == queue_details_.end()) |
| + return; |
| + if (iter->second.throttling_ref_count == 0) |
|
alex clarke (OOO till 29th)
2017/04/21 09:14:19
That looks really weird. Is this papering over a
altimin
2017/04/25 13:22:35
There is a special test ensuring that extra Decrea
|
| + return; |
| + if (--iter->second.throttling_ref_count != 0) |
| return; |
| - } |
| TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueUnthrottled", |
| "task_queue", task_queue); |
| @@ -178,10 +180,9 @@ void TaskQueueThrottler::UnregisterTaskQueue(TaskQueue* task_queue) { |
| if (find_it == queue_details_.end()) |
| return; |
| - LazyNow lazy_now(tick_clock_); |
| std::unordered_set<BudgetPool*> budget_pools = find_it->second.budget_pools; |
| for (BudgetPool* budget_pool : budget_pools) { |
| - budget_pool->RemoveQueue(lazy_now.Now(), task_queue); |
| + budget_pool->UnregisterQueue(task_queue); |
| } |
| // Iterator may have been deleted by BudgetPool::RemoveQueue, so don't |
| @@ -210,9 +211,20 @@ void TaskQueueThrottler::OnQueueNextWakeUpChanged( |
| return; |
| base::TimeTicks now = tick_clock_->NowTicks(); |
| + |
| + auto find_it = queue_details_.find(queue); |
|
alex clarke (OOO till 29th)
2017/04/21 09:14:19
I don't remember off hand if return value optimiza
altimin
2017/04/25 13:22:35
auto find_it is a common pattern here. I don't wan
|
| + if (find_it == queue_details_.end()) |
| + return; |
| + |
| + for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
| + budget_pool->OnTaskQueueHasWork(queue, now, next_wake_up); |
| + } |
| + |
| + base::TimeTicks next_allowed_run_time = |
| + GetNextAllowedRunTime(queue, now, next_wake_up); |
| + // TODO(altimin): Remove after moving to budget pools completely. |
| MaybeSchedulePumpThrottledTasks( |
| - FROM_HERE, now, |
| - std::max(GetNextAllowedRunTime(now, queue), next_wake_up)); |
| + FROM_HERE, now, std::max(next_wake_up, next_allowed_run_time)); |
| } |
| void TaskQueueThrottler::PumpThrottledTasks() { |
| @@ -222,37 +234,57 @@ void TaskQueueThrottler::PumpThrottledTasks() { |
| LazyNow lazy_now(tick_clock_); |
| base::Optional<base::TimeTicks> next_scheduled_delayed_task; |
| + for (const auto& it : budget_pools_) |
|
alex clarke (OOO till 29th)
2017/04/21 09:14:19
uber nit: s/it/pair ;)
altimin
2017/04/25 13:22:35
Done.
|
| + it.first->OnWakeup(lazy_now.Now()); |
| + |
| for (const TaskQueueMap::value_type& map_entry : queue_details_) { |
| TaskQueue* task_queue = map_entry.first; |
| - if (task_queue->IsEmpty() || !IsThrottled(task_queue)) |
| + if (task_queue->IsEmpty() || !task_queue->IsQueueEnabled() || |
| + !IsThrottled(task_queue)) { |
| continue; |
| + } |
| - // Don't enable queues whose budget pool doesn't allow them to run now. |
| - base::TimeTicks next_allowed_run_time = |
| - GetNextAllowedRunTime(lazy_now.Now(), task_queue); |
| base::Optional<base::TimeTicks> next_desired_run_time = |
| NextTaskRunTime(&lazy_now, task_queue); |
| - if (next_desired_run_time && |
| - next_allowed_run_time > next_desired_run_time.value()) { |
| + if (!next_desired_run_time) { |
| + // This task queue does not any tasks. |
|
alex clarke (OOO till 29th)
2017/04/21 09:14:19
Looks like you're missing a word in the comment.
altimin
2017/04/25 13:22:35
Done.
|
| + task_queue->InsertFence(TaskQueue::InsertFencePosition::NOW); |
|
alex clarke (OOO till 29th)
2017/04/21 09:14:19
Is it possible to get here? The queue has to be e
altimin
2017/04/25 13:22:35
Done.
|
| + continue; |
| + } |
| + |
| + base::TimeTicks next_run_time = GetNextAllowedRunTime( |
| + task_queue, lazy_now.Now(), next_desired_run_time.value()); |
| + |
| + if (next_run_time > lazy_now.Now()) { |
| TRACE_EVENT1( |
| tracing_category_, |
| "TaskQueueThrottler::PumpThrottledTasks_ExpensiveTaskThrottled", |
| "throttle_time_in_seconds", |
| - (next_allowed_run_time - next_desired_run_time.value()).InSecondsF()); |
| + (next_run_time - next_desired_run_time.value()).InSecondsF()); |
| - // Schedule a pump for queue which was disabled because of time budget. |
| next_scheduled_delayed_task = |
| - Min(next_scheduled_delayed_task, next_allowed_run_time); |
| + Min(next_scheduled_delayed_task, next_run_time); |
| continue; |
| } |
| - next_scheduled_delayed_task = |
| - Min(next_scheduled_delayed_task, task_queue->GetNextScheduledWakeUp()); |
| + base::Optional<base::TimeTicks> next_wake_up = |
| + task_queue->GetNextScheduledWakeUp(); |
| + |
| + if (next_wake_up) { |
| + next_scheduled_delayed_task = |
| + Min(next_scheduled_delayed_task, |
| + GetNextAllowedRunTime(task_queue, lazy_now.Now(), |
| + next_wake_up.value())); |
| + } |
| - if (next_allowed_run_time > lazy_now.Now()) |
| + if (CanRunTasksUntil(task_queue, lazy_now.Now(), |
| + next_desired_run_time.value())) { |
| + // Remove fence if we can new tasks until next wakeup. |
| + task_queue->RemoveFence(); |
| continue; |
| + } |
| // Remove previous fence and install a new one, allowing all tasks posted |
| // on |task_queue| up until this point to run and block all further tasks. |
| @@ -315,6 +347,13 @@ CPUTimeBudgetPool* TaskQueueThrottler::CreateCPUTimeBudgetPool( |
| return time_budget_pool; |
| } |
| +WakeupBudgetPool* TaskQueueThrottler::CreateWakeupBudgetPool(const char* name) { |
| + WakeupBudgetPool* wakeup_budget_pool = |
| + new WakeupBudgetPool(name, this, tick_clock_->NowTicks()); |
| + budget_pools_[wakeup_budget_pool] = base::WrapUnique(wakeup_budget_pool); |
| + return wakeup_budget_pool; |
| +} |
| + |
| void TaskQueueThrottler::OnTaskRunTimeReported(TaskQueue* task_queue, |
| base::TimeTicks start_time, |
| base::TimeTicks end_time) { |
| @@ -326,17 +365,25 @@ void TaskQueueThrottler::OnTaskRunTimeReported(TaskQueue* task_queue, |
| return; |
| for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
| - budget_pool->RecordTaskRunTime(start_time, end_time); |
| - if (!budget_pool->HasEnoughBudgetToRun(end_time)) |
| + budget_pool->RecordTaskRunTime(task_queue, start_time, end_time); |
| + if (!budget_pool->CanRunTasksAt(end_time)) |
| budget_pool->BlockThrottledQueues(end_time); |
| } |
| } |
| -void TaskQueueThrottler::BlockQueue(base::TimeTicks now, TaskQueue* queue) { |
| +void TaskQueueThrottler::BlockQueue(QueueBlockType block_type, |
| + base::TimeTicks now, |
| + TaskQueue* queue) { |
| if (!IsThrottled(queue)) |
| return; |
| - queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); |
| + if (block_type == QueueBlockType::FULL) { |
| + queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); |
| + } else { // block_type == BlockType::NEW_TASKS_ONLY |
| + if (!queue->HasFence()) { |
| + queue->InsertFence(TaskQueue::InsertFencePosition::NOW); |
| + } |
| + } |
| SchedulePumpQueue(FROM_HERE, now, queue); |
| } |
| @@ -399,30 +446,36 @@ void TaskQueueThrottler::UnregisterBudgetPool(BudgetPool* budget_pool) { |
| } |
| void TaskQueueThrottler::UnblockQueue(base::TimeTicks now, TaskQueue* queue) { |
| - SchedulePumpQueue(FROM_HERE, now, queue); |
| + if (queue->HasFence()) |
|
alex clarke (OOO till 29th)
2017/04/21 09:14:19
I hope we don't get tempted to use fences for some
altimin
2017/04/25 13:22:36
Actually, we don't need this condition here.
|
| + SchedulePumpQueue(FROM_HERE, now, queue); |
| } |
| void TaskQueueThrottler::SchedulePumpQueue( |
| const tracked_objects::Location& from_here, |
| base::TimeTicks now, |
| TaskQueue* queue) { |
| - if (!IsThrottled(queue)) |
| + if (!IsThrottled(queue) || !queue->IsQueueEnabled()) |
| return; |
| LazyNow lazy_now(now); |
| base::Optional<base::TimeTicks> next_desired_run_time = |
| NextTaskRunTime(&lazy_now, queue); |
| - if (!next_desired_run_time) |
| + if (!next_desired_run_time) { |
| return; |
| - |
| - base::Optional<base::TimeTicks> next_run_time = |
| - Max(next_desired_run_time, GetNextAllowedRunTime(now, queue)); |
| + } |
| + base::Optional<base::TimeTicks> next_run_time; |
| + if (next_desired_run_time) { |
| + next_run_time = |
| + GetNextAllowedRunTime(queue, now, next_desired_run_time.value()); |
| + } |
| MaybeSchedulePumpThrottledTasks(from_here, now, next_run_time.value()); |
| } |
| -base::TimeTicks TaskQueueThrottler::GetNextAllowedRunTime(base::TimeTicks now, |
| - TaskQueue* queue) { |
| +base::TimeTicks TaskQueueThrottler::GetNextAllowedRunTime( |
| + TaskQueue* queue, |
| + base::TimeTicks now, |
|
alex clarke (OOO till 29th)
2017/04/21 09:14:19
I'm not 100% on this, but I wonder if things would
altimin
2017/04/25 13:22:35
Agreed.
|
| + base::TimeTicks desired_run_time) { |
| base::TimeTicks next_run_time = now; |
| auto find_it = queue_details_.find(queue); |
| @@ -430,13 +483,42 @@ base::TimeTicks TaskQueueThrottler::GetNextAllowedRunTime(base::TimeTicks now, |
| return next_run_time; |
| for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
| - next_run_time = |
| - std::max(next_run_time, budget_pool->GetNextAllowedRunTime()); |
| + next_run_time = std::max( |
| + next_run_time, budget_pool->GetNextAllowedRunTime(desired_run_time)); |
| } |
| return next_run_time; |
| } |
| +bool TaskQueueThrottler::CanRunTasksAt(TaskQueue* queue, |
| + base::TimeTicks moment) { |
| + auto find_it = queue_details_.find(queue); |
| + if (find_it == queue_details_.end()) |
| + return true; |
| + |
| + for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
| + if (!budget_pool->CanRunTasksAt(moment)) |
| + return false; |
| + } |
| + |
| + return true; |
| +} |
| + |
| +bool TaskQueueThrottler::CanRunTasksUntil(TaskQueue* queue, |
| + base::TimeTicks now, |
| + base::TimeTicks moment) { |
| + auto find_it = queue_details_.find(queue); |
| + if (find_it == queue_details_.end()) |
| + return true; |
| + |
| + for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
| + if (!budget_pool->CanRunTasksUntil(now, moment)) |
| + return false; |
| + } |
| + |
| + return true; |
| +} |
| + |
| void TaskQueueThrottler::MaybeDeleteQueueMetadata(TaskQueueMap::iterator it) { |
| if (it->second.throttling_ref_count == 0 && it->second.budget_pools.empty()) |
| queue_details_.erase(it); |