OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "platform/scheduler/renderer/task_queue_throttler.h" | 5 #include "platform/scheduler/renderer/task_queue_throttler.h" |
6 | 6 |
7 #include <cstdint> | 7 #include <cstdint> |
8 | 8 |
9 #include "base/format_macros.h" | 9 #include "base/format_macros.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
72 | 72 |
73 } // namespace | 73 } // namespace |
74 | 74 |
75 TaskQueueThrottler::TaskQueueThrottler( | 75 TaskQueueThrottler::TaskQueueThrottler( |
76 RendererSchedulerImpl* renderer_scheduler, | 76 RendererSchedulerImpl* renderer_scheduler, |
77 const char* tracing_category) | 77 const char* tracing_category) |
78 : task_runner_(renderer_scheduler->ControlTaskRunner()), | 78 : task_runner_(renderer_scheduler->ControlTaskRunner()), |
79 renderer_scheduler_(renderer_scheduler), | 79 renderer_scheduler_(renderer_scheduler), |
80 tick_clock_(renderer_scheduler->tick_clock()), | 80 tick_clock_(renderer_scheduler->tick_clock()), |
81 tracing_category_(tracing_category), | 81 tracing_category_(tracing_category), |
82 time_domain_(new ThrottledTimeDomain(this, tracing_category)), | 82 time_domain_(new ThrottledTimeDomain(tracing_category)), |
83 allow_throttling_(true), | 83 allow_throttling_(true), |
84 weak_factory_(this) { | 84 weak_factory_(this) { |
85 pump_throttled_tasks_closure_.Reset(base::Bind( | 85 pump_throttled_tasks_closure_.Reset(base::Bind( |
86 &TaskQueueThrottler::PumpThrottledTasks, weak_factory_.GetWeakPtr())); | 86 &TaskQueueThrottler::PumpThrottledTasks, weak_factory_.GetWeakPtr())); |
87 forward_immediate_work_callback_ = | 87 forward_immediate_work_callback_ = |
88 base::Bind(&TaskQueueThrottler::OnTimeDomainHasImmediateWork, | 88 base::Bind(&TaskQueueThrottler::OnQueueNextWakeUpChanged, |
89 weak_factory_.GetWeakPtr()); | 89 weak_factory_.GetWeakPtr()); |
90 | 90 |
91 renderer_scheduler_->RegisterTimeDomain(time_domain_.get()); | 91 renderer_scheduler_->RegisterTimeDomain(time_domain_.get()); |
92 } | 92 } |
93 | 93 |
94 TaskQueueThrottler::~TaskQueueThrottler() { | 94 TaskQueueThrottler::~TaskQueueThrottler() { |
95 // It's possible for queues to be still throttled, so we need to tidy up | 95 // It's possible for queues to be still throttled, so we need to tidy up |
96 // before unregistering the time domain. | 96 // before unregistering the time domain. |
97 for (const TaskQueueMap::value_type& map_entry : queue_details_) { | 97 for (const TaskQueueMap::value_type& map_entry : queue_details_) { |
98 TaskQueue* task_queue = map_entry.first; | 98 TaskQueue* task_queue = map_entry.first; |
99 if (IsThrottled(task_queue)) { | 99 if (IsThrottled(task_queue)) { |
100 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); | 100 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); |
101 task_queue->RemoveFence(); | 101 task_queue->RemoveFence(); |
102 } | 102 } |
| 103 if (map_entry.second.throttling_ref_count != 0) |
| 104 task_queue->SetObserver(nullptr); |
103 } | 105 } |
104 | 106 |
105 renderer_scheduler_->UnregisterTimeDomain(time_domain_.get()); | 107 renderer_scheduler_->UnregisterTimeDomain(time_domain_.get()); |
106 } | 108 } |
107 | 109 |
108 void TaskQueueThrottler::IncreaseThrottleRefCount(TaskQueue* task_queue) { | 110 void TaskQueueThrottler::IncreaseThrottleRefCount(TaskQueue* task_queue) { |
109 DCHECK_NE(task_queue, task_runner_.get()); | 111 DCHECK_NE(task_queue, task_runner_.get()); |
110 | 112 |
111 std::pair<TaskQueueMap::iterator, bool> insert_result = | 113 std::pair<TaskQueueMap::iterator, bool> insert_result = |
112 queue_details_.insert(std::make_pair(task_queue, Metadata())); | 114 queue_details_.insert(std::make_pair(task_queue, Metadata())); |
113 insert_result.first->second.throttling_ref_count++; | 115 insert_result.first->second.throttling_ref_count++; |
114 | 116 |
115 // If ref_count is 1, the task queue is newly throttled. | 117 // If ref_count is 1, the task queue is newly throttled. |
116 if (insert_result.first->second.throttling_ref_count != 1) | 118 if (insert_result.first->second.throttling_ref_count != 1) |
117 return; | 119 return; |
118 | 120 |
119 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueThrottled", | 121 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueThrottled", |
120 "task_queue", task_queue); | 122 "task_queue", task_queue); |
121 | 123 |
| 124 task_queue->SetObserver(this); |
| 125 |
122 if (!allow_throttling_) | 126 if (!allow_throttling_) |
123 return; | 127 return; |
124 | 128 |
125 task_queue->SetTimeDomain(time_domain_.get()); | 129 task_queue->SetTimeDomain(time_domain_.get()); |
126 // This blocks any tasks from |task_queue| until PumpThrottledTasks() to | 130 // This blocks any tasks from |task_queue| until PumpThrottledTasks() to |
127 // enforce task alignment. | 131 // enforce task alignment. |
128 task_queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); | 132 task_queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); |
129 | 133 |
130 if (!task_queue->IsQueueEnabled()) | 134 if (!task_queue->IsQueueEnabled()) |
131 return; | 135 return; |
132 | 136 |
133 if (!task_queue->IsEmpty()) { | 137 if (!task_queue->IsEmpty()) { |
134 if (task_queue->HasPendingImmediateWork()) { | 138 LazyNow lazy_now(tick_clock_); |
135 OnTimeDomainHasImmediateWork(task_queue); | 139 OnQueueNextWakeUpChanged(task_queue, |
136 } else { | 140 NextTaskRunTime(&lazy_now, task_queue).value()); |
137 OnTimeDomainHasDelayedWork(task_queue); | |
138 } | |
139 } | 141 } |
140 } | 142 } |
141 | 143 |
142 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { | 144 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { |
143 TaskQueueMap::iterator iter = queue_details_.find(task_queue); | 145 TaskQueueMap::iterator iter = queue_details_.find(task_queue); |
144 | 146 |
145 if (iter == queue_details_.end() || | 147 if (iter == queue_details_.end() || |
146 --iter->second.throttling_ref_count != 0) { | 148 --iter->second.throttling_ref_count != 0) { |
147 return; | 149 return; |
148 } | 150 } |
149 | 151 |
150 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueUnthrottled", | 152 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueUnthrottled", |
151 "task_queue", task_queue); | 153 "task_queue", task_queue); |
152 | 154 |
| 155 task_queue->SetObserver(nullptr); |
| 156 |
153 MaybeDeleteQueueMetadata(iter); | 157 MaybeDeleteQueueMetadata(iter); |
154 | 158 |
155 if (!allow_throttling_) | 159 if (!allow_throttling_) |
156 return; | 160 return; |
157 | 161 |
158 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); | 162 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); |
159 task_queue->RemoveFence(); | 163 task_queue->RemoveFence(); |
160 } | 164 } |
161 | 165 |
162 bool TaskQueueThrottler::IsThrottled(TaskQueue* task_queue) const { | 166 bool TaskQueueThrottler::IsThrottled(TaskQueue* task_queue) const { |
(...skipping 13 matching lines...) Expand all Loading... |
176 | 180 |
177 LazyNow lazy_now(tick_clock_); | 181 LazyNow lazy_now(tick_clock_); |
178 std::unordered_set<BudgetPool*> budget_pools = find_it->second.budget_pools; | 182 std::unordered_set<BudgetPool*> budget_pools = find_it->second.budget_pools; |
179 for (BudgetPool* budget_pool : budget_pools) { | 183 for (BudgetPool* budget_pool : budget_pools) { |
180 budget_pool->RemoveQueue(lazy_now.Now(), task_queue); | 184 budget_pool->RemoveQueue(lazy_now.Now(), task_queue); |
181 } | 185 } |
182 | 186 |
183 // Iterator may have been deleted by BudgetPool::RemoveQueue, so don't | 187 // Iterator may have been deleted by BudgetPool::RemoveQueue, so don't |
184 // use it here. | 188 // use it here. |
185 queue_details_.erase(task_queue); | 189 queue_details_.erase(task_queue); |
| 190 |
| 191 // NOTE: Observer is automatically unregistered when unregistering task queue. |
186 } | 192 } |
187 | 193 |
188 void TaskQueueThrottler::OnTimeDomainHasImmediateWork(TaskQueue* queue) { | 194 void TaskQueueThrottler::OnQueueNextWakeUpChanged( |
189 // Forward to the main thread if called from another thread | 195 TaskQueue* queue, |
| 196 base::TimeTicks next_wake_up) { |
190 if (!task_runner_->RunsTasksOnCurrentThread()) { | 197 if (!task_runner_->RunsTasksOnCurrentThread()) { |
191 task_runner_->PostTask(FROM_HERE, | 198 task_runner_->PostTask( |
192 base::Bind(forward_immediate_work_callback_, queue)); | 199 FROM_HERE, |
| 200 base::Bind(forward_immediate_work_callback_, queue, next_wake_up)); |
193 return; | 201 return; |
194 } | 202 } |
| 203 |
195 TRACE_EVENT0(tracing_category_, | 204 TRACE_EVENT0(tracing_category_, |
196 "TaskQueueThrottler::OnTimeDomainHasImmediateWork"); | 205 "TaskQueueThrottler::OnQueueNextWakeUpChanged"); |
197 | 206 |
198 // We don't expect this to get called for disabled queues, but we can't DCHECK | 207 // We don't expect this to get called for disabled queues, but we can't DCHECK |
199 // because of the above thread hop. Just bail out if the queue is disabled. | 208 // because of the above thread hop. Just bail out if the queue is disabled. |
200 if (!queue->IsQueueEnabled()) | 209 if (!queue->IsQueueEnabled()) |
201 return; | 210 return; |
202 | 211 |
203 base::TimeTicks now = tick_clock_->NowTicks(); | 212 base::TimeTicks now = tick_clock_->NowTicks(); |
204 base::TimeTicks next_allowed_run_time = GetNextAllowedRunTime(now, queue); | 213 MaybeSchedulePumpThrottledTasks( |
205 MaybeSchedulePumpThrottledTasks(FROM_HERE, now, next_allowed_run_time); | 214 FROM_HERE, now, |
206 } | 215 std::max(GetNextAllowedRunTime(now, queue), next_wake_up)); |
207 | |
208 void TaskQueueThrottler::OnTimeDomainHasDelayedWork(TaskQueue* queue) { | |
209 TRACE_EVENT0(tracing_category_, | |
210 "TaskQueueThrottler::OnTimeDomainHasDelayedWork"); | |
211 DCHECK(queue->IsQueueEnabled()); | |
212 base::TimeTicks now = tick_clock_->NowTicks(); | |
213 LazyNow lazy_now(now); | |
214 | |
215 base::Optional<base::TimeTicks> next_scheduled_delayed_task = | |
216 NextTaskRunTime(&lazy_now, queue); | |
217 DCHECK(next_scheduled_delayed_task); | |
218 MaybeSchedulePumpThrottledTasks(FROM_HERE, now, | |
219 next_scheduled_delayed_task.value()); | |
220 } | 216 } |
221 | 217 |
222 void TaskQueueThrottler::PumpThrottledTasks() { | 218 void TaskQueueThrottler::PumpThrottledTasks() { |
223 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler::PumpThrottledTasks"); | 219 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler::PumpThrottledTasks"); |
224 pending_pump_throttled_tasks_runtime_.reset(); | 220 pending_pump_throttled_tasks_runtime_.reset(); |
225 | 221 |
226 LazyNow lazy_now(tick_clock_); | 222 LazyNow lazy_now(tick_clock_); |
227 base::Optional<base::TimeTicks> next_scheduled_delayed_task; | 223 base::Optional<base::TimeTicks> next_scheduled_delayed_task; |
228 | 224 |
229 for (const TaskQueueMap::value_type& map_entry : queue_details_) { | 225 for (const TaskQueueMap::value_type& map_entry : queue_details_) { |
(...skipping 258 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
488 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); | 484 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); |
489 queue->SetTimeDomain(time_domain_.get()); | 485 queue->SetTimeDomain(time_domain_.get()); |
490 SchedulePumpQueue(FROM_HERE, lazy_now.Now(), queue); | 486 SchedulePumpQueue(FROM_HERE, lazy_now.Now(), queue); |
491 } | 487 } |
492 | 488 |
493 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler_EnableThrottling"); | 489 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler_EnableThrottling"); |
494 } | 490 } |
495 | 491 |
496 } // namespace scheduler | 492 } // namespace scheduler |
497 } // namespace blink | 493 } // namespace blink |
OLD | NEW |