OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "platform/scheduler/renderer/task_queue_throttler.h" | 5 #include "platform/scheduler/renderer/task_queue_throttler.h" |
6 | 6 |
7 #include <cstdint> | 7 #include <cstdint> |
8 | 8 |
9 #include "base/format_macros.h" | 9 #include "base/format_macros.h" |
10 #include "base/logging.h" | 10 #include "base/logging.h" |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
72 | 72 |
73 } // namespace | 73 } // namespace |
74 | 74 |
75 TaskQueueThrottler::TaskQueueThrottler( | 75 TaskQueueThrottler::TaskQueueThrottler( |
76 RendererSchedulerImpl* renderer_scheduler, | 76 RendererSchedulerImpl* renderer_scheduler, |
77 const char* tracing_category) | 77 const char* tracing_category) |
78 : task_runner_(renderer_scheduler->ControlTaskRunner()), | 78 : task_runner_(renderer_scheduler->ControlTaskRunner()), |
79 renderer_scheduler_(renderer_scheduler), | 79 renderer_scheduler_(renderer_scheduler), |
80 tick_clock_(renderer_scheduler->tick_clock()), | 80 tick_clock_(renderer_scheduler->tick_clock()), |
81 tracing_category_(tracing_category), | 81 tracing_category_(tracing_category), |
82 time_domain_(new ThrottledTimeDomain(tracing_category)), | 82 time_domain_(new ThrottledTimeDomain(this, tracing_category)), |
83 allow_throttling_(true), | 83 allow_throttling_(true), |
84 weak_factory_(this) { | 84 weak_factory_(this) { |
85 pump_throttled_tasks_closure_.Reset(base::Bind( | 85 pump_throttled_tasks_closure_.Reset(base::Bind( |
86 &TaskQueueThrottler::PumpThrottledTasks, weak_factory_.GetWeakPtr())); | 86 &TaskQueueThrottler::PumpThrottledTasks, weak_factory_.GetWeakPtr())); |
87 forward_immediate_work_callback_ = | 87 forward_immediate_work_callback_ = |
88 base::Bind(&TaskQueueThrottler::OnQueueNextWakeUpChanged, | 88 base::Bind(&TaskQueueThrottler::OnTimeDomainHasImmediateWork, |
89 weak_factory_.GetWeakPtr()); | 89 weak_factory_.GetWeakPtr()); |
90 | 90 |
91 renderer_scheduler_->RegisterTimeDomain(time_domain_.get()); | 91 renderer_scheduler_->RegisterTimeDomain(time_domain_.get()); |
92 } | 92 } |
93 | 93 |
94 TaskQueueThrottler::~TaskQueueThrottler() { | 94 TaskQueueThrottler::~TaskQueueThrottler() { |
95 // It's possible for queues to be still throttled, so we need to tidy up | 95 // It's possible for queues to be still throttled, so we need to tidy up |
96 // before unregistering the time domain. | 96 // before unregistering the time domain. |
97 for (const TaskQueueMap::value_type& map_entry : queue_details_) { | 97 for (const TaskQueueMap::value_type& map_entry : queue_details_) { |
98 TaskQueue* task_queue = map_entry.first; | 98 TaskQueue* task_queue = map_entry.first; |
99 if (IsThrottled(task_queue)) { | 99 if (IsThrottled(task_queue)) { |
100 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); | 100 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); |
101 task_queue->RemoveFence(); | 101 task_queue->RemoveFence(); |
102 } | 102 } |
103 if (map_entry.second.throttling_ref_count != 0) | |
104 task_queue->SetObserver(nullptr); | |
105 } | 103 } |
106 | 104 |
107 renderer_scheduler_->UnregisterTimeDomain(time_domain_.get()); | 105 renderer_scheduler_->UnregisterTimeDomain(time_domain_.get()); |
108 } | 106 } |
109 | 107 |
110 void TaskQueueThrottler::IncreaseThrottleRefCount(TaskQueue* task_queue) { | 108 void TaskQueueThrottler::IncreaseThrottleRefCount(TaskQueue* task_queue) { |
111 DCHECK_NE(task_queue, task_runner_.get()); | 109 DCHECK_NE(task_queue, task_runner_.get()); |
112 | 110 |
113 std::pair<TaskQueueMap::iterator, bool> insert_result = | 111 std::pair<TaskQueueMap::iterator, bool> insert_result = |
114 queue_details_.insert(std::make_pair(task_queue, Metadata())); | 112 queue_details_.insert(std::make_pair(task_queue, Metadata())); |
115 insert_result.first->second.throttling_ref_count++; | 113 insert_result.first->second.throttling_ref_count++; |
116 | 114 |
117 // If ref_count is 1, the task queue is newly throttled. | 115 // If ref_count is 1, the task queue is newly throttled. |
118 if (insert_result.first->second.throttling_ref_count != 1) | 116 if (insert_result.first->second.throttling_ref_count != 1) |
119 return; | 117 return; |
120 | 118 |
121 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueThrottled", | 119 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueThrottled", |
122 "task_queue", task_queue); | 120 "task_queue", task_queue); |
123 | 121 |
124 task_queue->SetObserver(this); | |
125 | |
126 if (!allow_throttling_) | 122 if (!allow_throttling_) |
127 return; | 123 return; |
128 | 124 |
129 task_queue->SetTimeDomain(time_domain_.get()); | 125 task_queue->SetTimeDomain(time_domain_.get()); |
130 // This blocks any tasks from |task_queue| until PumpThrottledTasks() to | 126 // This blocks any tasks from |task_queue| until PumpThrottledTasks() to |
131 // enforce task alignment. | 127 // enforce task alignment. |
132 task_queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); | 128 task_queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); |
133 | 129 |
134 if (!task_queue->IsQueueEnabled()) | 130 if (!task_queue->IsQueueEnabled()) |
135 return; | 131 return; |
136 | 132 |
137 if (!task_queue->IsEmpty()) { | 133 if (!task_queue->IsEmpty()) { |
138 LazyNow lazy_now(tick_clock_); | 134 if (task_queue->HasPendingImmediateWork()) { |
139 OnQueueNextWakeUpChanged(task_queue, | 135 OnTimeDomainHasImmediateWork(task_queue); |
140 NextTaskRunTime(&lazy_now, task_queue).value()); | 136 } else { |
| 137 OnTimeDomainHasDelayedWork(task_queue); |
| 138 } |
141 } | 139 } |
142 } | 140 } |
143 | 141 |
144 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { | 142 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { |
145 TaskQueueMap::iterator iter = queue_details_.find(task_queue); | 143 TaskQueueMap::iterator iter = queue_details_.find(task_queue); |
146 | 144 |
147 if (iter == queue_details_.end() || | 145 if (iter == queue_details_.end() || |
148 --iter->second.throttling_ref_count != 0) { | 146 --iter->second.throttling_ref_count != 0) { |
149 return; | 147 return; |
150 } | 148 } |
151 | 149 |
152 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueUnthrottled", | 150 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueUnthrottled", |
153 "task_queue", task_queue); | 151 "task_queue", task_queue); |
154 | 152 |
155 task_queue->SetObserver(nullptr); | |
156 | |
157 MaybeDeleteQueueMetadata(iter); | 153 MaybeDeleteQueueMetadata(iter); |
158 | 154 |
159 if (!allow_throttling_) | 155 if (!allow_throttling_) |
160 return; | 156 return; |
161 | 157 |
162 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); | 158 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); |
163 task_queue->RemoveFence(); | 159 task_queue->RemoveFence(); |
164 } | 160 } |
165 | 161 |
166 bool TaskQueueThrottler::IsThrottled(TaskQueue* task_queue) const { | 162 bool TaskQueueThrottler::IsThrottled(TaskQueue* task_queue) const { |
(...skipping 13 matching lines...) Expand all Loading... |
180 | 176 |
181 LazyNow lazy_now(tick_clock_); | 177 LazyNow lazy_now(tick_clock_); |
182 std::unordered_set<BudgetPool*> budget_pools = find_it->second.budget_pools; | 178 std::unordered_set<BudgetPool*> budget_pools = find_it->second.budget_pools; |
183 for (BudgetPool* budget_pool : budget_pools) { | 179 for (BudgetPool* budget_pool : budget_pools) { |
184 budget_pool->RemoveQueue(lazy_now.Now(), task_queue); | 180 budget_pool->RemoveQueue(lazy_now.Now(), task_queue); |
185 } | 181 } |
186 | 182 |
187 // Iterator may have been deleted by BudgetPool::RemoveQueue, so don't | 183 // Iterator may have been deleted by BudgetPool::RemoveQueue, so don't |
188 // use it here. | 184 // use it here. |
189 queue_details_.erase(task_queue); | 185 queue_details_.erase(task_queue); |
190 | |
191 // NOTE: Observer is automatically unregistered when unregistering task queue. | |
192 } | 186 } |
193 | 187 |
194 void TaskQueueThrottler::OnQueueNextWakeUpChanged( | 188 void TaskQueueThrottler::OnTimeDomainHasImmediateWork(TaskQueue* queue) { |
195 TaskQueue* queue, | 189 // Forward to the main thread if called from another thread |
196 base::TimeTicks next_wake_up) { | |
197 if (!task_runner_->RunsTasksOnCurrentThread()) { | 190 if (!task_runner_->RunsTasksOnCurrentThread()) { |
198 task_runner_->PostTask( | 191 task_runner_->PostTask(FROM_HERE, |
199 FROM_HERE, | 192 base::Bind(forward_immediate_work_callback_, queue)); |
200 base::Bind(forward_immediate_work_callback_, queue, next_wake_up)); | |
201 return; | 193 return; |
202 } | 194 } |
203 | |
204 TRACE_EVENT0(tracing_category_, | 195 TRACE_EVENT0(tracing_category_, |
205 "TaskQueueThrottler::OnQueueNextWakeUpChanged"); | 196 "TaskQueueThrottler::OnTimeDomainHasImmediateWork"); |
206 | 197 |
207 // We don't expect this to get called for disabled queues, but we can't DCHECK | 198 // We don't expect this to get called for disabled queues, but we can't DCHECK |
208 // because of the above thread hop. Just bail out if the queue is disabled. | 199 // because of the above thread hop. Just bail out if the queue is disabled. |
209 if (!queue->IsQueueEnabled()) | 200 if (!queue->IsQueueEnabled()) |
210 return; | 201 return; |
211 | 202 |
212 base::TimeTicks now = tick_clock_->NowTicks(); | 203 base::TimeTicks now = tick_clock_->NowTicks(); |
213 MaybeSchedulePumpThrottledTasks( | 204 base::TimeTicks next_allowed_run_time = GetNextAllowedRunTime(now, queue); |
214 FROM_HERE, now, | 205 MaybeSchedulePumpThrottledTasks(FROM_HERE, now, next_allowed_run_time); |
215 std::max(GetNextAllowedRunTime(now, queue), next_wake_up)); | 206 } |
| 207 |
| 208 void TaskQueueThrottler::OnTimeDomainHasDelayedWork(TaskQueue* queue) { |
| 209 TRACE_EVENT0(tracing_category_, |
| 210 "TaskQueueThrottler::OnTimeDomainHasDelayedWork"); |
| 211 DCHECK(queue->IsQueueEnabled()); |
| 212 base::TimeTicks now = tick_clock_->NowTicks(); |
| 213 LazyNow lazy_now(now); |
| 214 |
| 215 base::Optional<base::TimeTicks> next_scheduled_delayed_task = |
| 216 NextTaskRunTime(&lazy_now, queue); |
| 217 DCHECK(next_scheduled_delayed_task); |
| 218 MaybeSchedulePumpThrottledTasks(FROM_HERE, now, |
| 219 next_scheduled_delayed_task.value()); |
216 } | 220 } |
217 | 221 |
218 void TaskQueueThrottler::PumpThrottledTasks() { | 222 void TaskQueueThrottler::PumpThrottledTasks() { |
219 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler::PumpThrottledTasks"); | 223 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler::PumpThrottledTasks"); |
220 pending_pump_throttled_tasks_runtime_.reset(); | 224 pending_pump_throttled_tasks_runtime_.reset(); |
221 | 225 |
222 LazyNow lazy_now(tick_clock_); | 226 LazyNow lazy_now(tick_clock_); |
223 base::Optional<base::TimeTicks> next_scheduled_delayed_task; | 227 base::Optional<base::TimeTicks> next_scheduled_delayed_task; |
224 | 228 |
225 for (const TaskQueueMap::value_type& map_entry : queue_details_) { | 229 for (const TaskQueueMap::value_type& map_entry : queue_details_) { |
(...skipping 258 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
484 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); | 488 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); |
485 queue->SetTimeDomain(time_domain_.get()); | 489 queue->SetTimeDomain(time_domain_.get()); |
486 SchedulePumpQueue(FROM_HERE, lazy_now.Now(), queue); | 490 SchedulePumpQueue(FROM_HERE, lazy_now.Now(), queue); |
487 } | 491 } |
488 | 492 |
489 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler_EnableThrottling"); | 493 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler_EnableThrottling"); |
490 } | 494 } |
491 | 495 |
492 } // namespace scheduler | 496 } // namespace scheduler |
493 } // namespace blink | 497 } // namespace blink |
OLD | NEW |