Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "platform/scheduler/renderer/throttling_helper.h" | 5 #include "platform/scheduler/renderer/task_queue_throttler.h" |
| 6 | 6 |
| 7 #include <cstdint> | |
| 8 | |
| 9 #include "base/format_macros.h" | |
| 7 #include "base/logging.h" | 10 #include "base/logging.h" |
| 11 #include "base/memory/ptr_util.h" | |
| 12 #include "base/optional.h" | |
| 13 #include "base/strings/stringprintf.h" | |
| 8 #include "platform/scheduler/base/real_time_domain.h" | 14 #include "platform/scheduler/base/real_time_domain.h" |
| 9 #include "platform/scheduler/child/scheduler_tqm_delegate.h" | 15 #include "platform/scheduler/child/scheduler_tqm_delegate.h" |
| 10 #include "platform/scheduler/renderer/auto_advancing_virtual_time_domain.h" | 16 #include "platform/scheduler/renderer/auto_advancing_virtual_time_domain.h" |
| 11 #include "platform/scheduler/renderer/renderer_scheduler_impl.h" | 17 #include "platform/scheduler/renderer/renderer_scheduler_impl.h" |
| 12 #include "platform/scheduler/renderer/throttled_time_domain.h" | 18 #include "platform/scheduler/renderer/throttled_time_domain.h" |
| 13 #include "platform/scheduler/renderer/web_frame_scheduler_impl.h" | 19 #include "platform/scheduler/renderer/web_frame_scheduler_impl.h" |
| 14 #include "public/platform/WebFrameScheduler.h" | 20 #include "public/platform/WebFrameScheduler.h" |
| 15 | 21 |
| 16 namespace blink { | 22 namespace blink { |
| 17 namespace scheduler { | 23 namespace scheduler { |
| 18 | 24 |
| 19 ThrottlingHelper::ThrottlingHelper(RendererSchedulerImpl* renderer_scheduler, | 25 namespace { |
| 20 const char* tracing_category) | 26 const int kMaxBudgetLevelInSeconds = 1; |
| 27 | |
| 28 base::Optional<base::TimeTicks> NextTaskRunTime(LazyNow* lazy_now, | |
| 29 TaskQueue* queue) { | |
| 30 if (queue->HasPendingImmediateWork()) | |
| 31 return lazy_now->Now(); | |
| 32 return queue->GetNextScheduledWakeUp(); | |
| 33 } | |
| 34 } | |
| 35 | |
| 36 TaskQueueThrottler::TimeBudgetPool::TimeBudgetPool( | |
| 37 const char* name, | |
| 38 TaskQueueThrottler* task_queue_throttler, | |
| 39 base::TimeTicks now) | |
| 40 : name_(name), | |
| 41 task_queue_throttler_(task_queue_throttler), | |
| 42 max_budget_level_(base::TimeDelta::FromSeconds(kMaxBudgetLevelInSeconds)), | |
| 43 last_checkpoint_(now), | |
| 44 cpu_percentage_(1), | |
| 45 is_enabled_(true) {} | |
| 46 | |
| 47 TaskQueueThrottler::TimeBudgetPool::~TimeBudgetPool() {} | |
| 48 | |
| 49 void TaskQueueThrottler::TimeBudgetPool::SetTimeBudget(base::TimeTicks now, | |
| 50 double cpu_percentage) { | |
| 51 Advance(now); | |
| 52 cpu_percentage_ = cpu_percentage; | |
| 53 } | |
| 54 | |
| 55 void TaskQueueThrottler::TimeBudgetPool::AddQueue(base::TimeTicks now, | |
| 56 TaskQueue* queue) { | |
| 57 DCHECK(task_queue_throttler_->time_budget_pool_for_queue_.find(queue) == | |
| 58 task_queue_throttler_->time_budget_pool_for_queue_.end()); | |
| 59 task_queue_throttler_->time_budget_pool_for_queue_[queue] = this; | |
| 60 | |
| 61 associated_task_queues_.insert(queue); | |
| 62 | |
| 63 if (!task_queue_throttler_->IsThrottled(queue)) | |
|
alex clarke (OOO till 29th)
2016/09/15 12:19:35
Note: if |time_budget_pool_for_queue_| gets rolled
altimin
2016/09/15 15:52:11
It's not feasible to do this, because queue can be
alex clarke (OOO till 29th)
2016/09/15 16:18:40
That's not true. Just because |throttled_queues_|
altimin
2016/09/16 13:38:48
Done.
alex clarke (OOO till 29th)
2016/09/16 14:36:18
I'm not seeing this change, did you forget up load
altimin
2016/09/16 14:46:59
Sorry, fixed now.
| |
| 64 return; | |
| 65 | |
| 66 queue->SetQueueEnabled(false); | |
| 67 | |
| 68 task_queue_throttler_->MaybeSchedulePumpQueue(FROM_HERE, now, queue, | |
| 69 GetNextAllowedRunTime()); | |
| 70 } | |
| 71 | |
| 72 void TaskQueueThrottler::TimeBudgetPool::RemoveQueue(base::TimeTicks now, | |
| 73 TaskQueue* queue) { | |
| 74 DCHECK_EQ(task_queue_throttler_->time_budget_pool_for_queue_[queue], this); | |
| 75 task_queue_throttler_->time_budget_pool_for_queue_.erase(queue); | |
| 76 | |
| 77 associated_task_queues_.erase(queue); | |
| 78 | |
| 79 if (!task_queue_throttler_->IsThrottled(queue)) | |
| 80 return; | |
| 81 | |
| 82 task_queue_throttler_->MaybeSchedulePumpQueue(FROM_HERE, now, queue); | |
| 83 } | |
| 84 | |
| 85 void TaskQueueThrottler::TimeBudgetPool::EnableThrottling(LazyNow* lazy_now) { | |
| 86 if (is_enabled_) | |
| 87 return; | |
| 88 is_enabled_ = true; | |
| 89 | |
| 90 for (TaskQueue* queue : associated_task_queues_) { | |
| 91 if (!task_queue_throttler_->IsThrottled(queue)) | |
| 92 continue; | |
| 93 | |
| 94 queue->SetQueueEnabled(false); | |
| 95 | |
| 96 task_queue_throttler_->MaybeSchedulePumpQueue( | |
| 97 FROM_HERE, lazy_now->Now(), queue, GetNextAllowedRunTime()); | |
| 98 } | |
| 99 } | |
| 100 | |
| 101 void TaskQueueThrottler::TimeBudgetPool::DisableThrottling(LazyNow* lazy_now) { | |
| 102 if (!is_enabled_) | |
| 103 return; | |
| 104 is_enabled_ = false; | |
| 105 | |
| 106 for (TaskQueue* queue : associated_task_queues_) { | |
| 107 if (!task_queue_throttler_->IsThrottled(queue)) | |
| 108 continue; | |
| 109 | |
| 110 task_queue_throttler_->MaybeSchedulePumpQueue(FROM_HERE, lazy_now->Now(), | |
| 111 queue); | |
| 112 } | |
| 113 } | |
| 114 | |
| 115 bool TaskQueueThrottler::TimeBudgetPool::IsThrottlingEnabled() const { | |
| 116 return is_enabled_; | |
| 117 } | |
| 118 | |
| 119 void TaskQueueThrottler::TimeBudgetPool::Close() { | |
| 120 DCHECK_EQ(0u, associated_task_queues_.size()); | |
| 121 | |
| 122 task_queue_throttler_->time_budget_pools_.erase(this); | |
| 123 } | |
| 124 | |
| 125 bool TaskQueueThrottler::TimeBudgetPool::IsAllowedToRun(base::TimeTicks now) { | |
|
alex clarke (OOO till 29th)
2016/09/15 12:19:35
I wonder if HasEnoughBudgetToRun is a better name,
altimin
2016/09/15 15:52:11
Done.
| |
| 126 Advance(now); | |
| 127 return !is_enabled_ || current_budget_level_.InMicroseconds() >= 0; | |
| 128 } | |
| 129 | |
| 130 base::TimeTicks TaskQueueThrottler::TimeBudgetPool::GetNextAllowedRunTime() { | |
| 131 if (!is_enabled_ || current_budget_level_.InMicroseconds() >= 0) { | |
| 132 return last_checkpoint_; | |
| 133 } else { | |
| 134 // Subtract because current_budget is negative. | |
| 135 return last_checkpoint_ - current_budget_level_ / cpu_percentage_; | |
| 136 } | |
| 137 } | |
| 138 | |
| 139 void TaskQueueThrottler::TimeBudgetPool::RecordTaskRunTime( | |
| 140 base::TimeDelta task_run_time) { | |
| 141 if (is_enabled_) { | |
| 142 current_budget_level_ -= task_run_time; | |
| 143 } | |
| 144 } | |
| 145 | |
| 146 const char* TaskQueueThrottler::TimeBudgetPool::Name() const { | |
| 147 return name_; | |
| 148 } | |
| 149 | |
| 150 void TaskQueueThrottler::TimeBudgetPool::AsValueInto( | |
| 151 base::trace_event::TracedValue* state, | |
| 152 base::TimeTicks now) const { | |
| 153 state->BeginDictionary(); | |
| 154 | |
| 155 state->SetString("name", name_); | |
| 156 state->SetDouble("time_budget", cpu_percentage_); | |
| 157 state->SetDouble("time_budget_level_in_seconds", | |
| 158 current_budget_level_.InSecondsF()); | |
| 159 state->SetDouble("last_checkpoint_seconds_ago", | |
| 160 (now - last_checkpoint_).InSecondsF()); | |
| 161 | |
| 162 state->BeginArray("task_queues"); | |
| 163 for (TaskQueue* queue : associated_task_queues_) { | |
| 164 state->AppendString(base::StringPrintf( | |
| 165 "%" PRIx64, static_cast<uint64_t>(reinterpret_cast<uintptr_t>(queue)))); | |
| 166 } | |
| 167 state->EndArray(); | |
| 168 | |
| 169 state->EndDictionary(); | |
| 170 } | |
| 171 | |
| 172 void TaskQueueThrottler::TimeBudgetPool::Advance(base::TimeTicks now) { | |
| 173 if (now > last_checkpoint_) { | |
| 174 if (is_enabled_) { | |
| 175 current_budget_level_ = std::min( | |
| 176 current_budget_level_ + cpu_percentage_ * (now - last_checkpoint_), | |
| 177 max_budget_level_); | |
| 178 } | |
| 179 last_checkpoint_ = now; | |
| 180 } | |
| 181 } | |
| 182 | |
| 183 TaskQueueThrottler::TaskQueueThrottler( | |
| 184 RendererSchedulerImpl* renderer_scheduler, | |
| 185 const char* tracing_category) | |
| 21 : task_runner_(renderer_scheduler->ControlTaskRunner()), | 186 : task_runner_(renderer_scheduler->ControlTaskRunner()), |
| 22 renderer_scheduler_(renderer_scheduler), | 187 renderer_scheduler_(renderer_scheduler), |
| 23 tick_clock_(renderer_scheduler->tick_clock()), | 188 tick_clock_(renderer_scheduler->tick_clock()), |
| 24 tracing_category_(tracing_category), | 189 tracing_category_(tracing_category), |
| 25 time_domain_(new ThrottledTimeDomain(this, tracing_category)), | 190 time_domain_(new ThrottledTimeDomain(this, tracing_category)), |
| 26 virtual_time_(false), | 191 virtual_time_(false), |
| 27 weak_factory_(this) { | 192 weak_factory_(this) { |
| 28 pump_throttled_tasks_closure_.Reset(base::Bind( | 193 pump_throttled_tasks_closure_.Reset(base::Bind( |
| 29 &ThrottlingHelper::PumpThrottledTasks, weak_factory_.GetWeakPtr())); | 194 &TaskQueueThrottler::PumpThrottledTasks, weak_factory_.GetWeakPtr())); |
| 30 forward_immediate_work_closure_ = | 195 forward_immediate_work_callback_ = |
| 31 base::Bind(&ThrottlingHelper::OnTimeDomainHasImmediateWork, | 196 base::Bind(&TaskQueueThrottler::OnTimeDomainHasImmediateWork, |
| 32 weak_factory_.GetWeakPtr()); | 197 weak_factory_.GetWeakPtr()); |
| 33 | 198 |
| 34 renderer_scheduler_->RegisterTimeDomain(time_domain_.get()); | 199 renderer_scheduler_->RegisterTimeDomain(time_domain_.get()); |
| 35 } | 200 } |
| 36 | 201 |
| 37 ThrottlingHelper::~ThrottlingHelper() { | 202 TaskQueueThrottler::~TaskQueueThrottler() { |
| 38 // It's possible for queues to be still throttled, so we need to tidy up | 203 // It's possible for queues to be still throttled, so we need to tidy up |
| 39 // before unregistering the time domain. | 204 // before unregistering the time domain. |
| 40 for (const TaskQueueMap::value_type& map_entry : throttled_queues_) { | 205 for (const TaskQueueMap::value_type& map_entry : throttled_queues_) { |
| 41 TaskQueue* task_queue = map_entry.first; | 206 TaskQueue* task_queue = map_entry.first; |
| 42 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); | 207 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); |
| 43 task_queue->RemoveFence(); | 208 task_queue->RemoveFence(); |
| 44 } | 209 } |
| 45 | 210 |
| 46 renderer_scheduler_->UnregisterTimeDomain(time_domain_.get()); | 211 renderer_scheduler_->UnregisterTimeDomain(time_domain_.get()); |
| 47 } | 212 } |
| 48 | 213 |
| 49 void ThrottlingHelper::SetQueueEnabled(TaskQueue* task_queue, bool enabled) { | 214 void TaskQueueThrottler::SetQueueEnabled(TaskQueue* task_queue, bool enabled) { |
| 50 TaskQueueMap::iterator find_it = throttled_queues_.find(task_queue); | 215 TaskQueueMap::iterator find_it = throttled_queues_.find(task_queue); |
| 51 | 216 |
| 52 if (find_it == throttled_queues_.end()) { | 217 if (find_it == throttled_queues_.end()) { |
| 53 task_queue->SetQueueEnabled(enabled); | 218 task_queue->SetQueueEnabled(enabled); |
| 54 return; | 219 return; |
| 55 } | 220 } |
| 56 | 221 |
| 57 find_it->second.enabled = enabled; | 222 find_it->second.enabled = enabled; |
| 58 | 223 |
| 59 // We don't enable the queue here because it's throttled and there might be | 224 // We don't enable the queue here because it's throttled and there might be |
| 60 // tasks in it's work queue that would execute immediatly rather than after | 225 // tasks in it's work queue that would execute immediatly rather than after |
| 61 // PumpThrottledTasks runs. | 226 // PumpThrottledTasks runs. |
| 62 if (!enabled) | 227 if (!enabled) |
| 63 task_queue->SetQueueEnabled(false); | 228 task_queue->SetQueueEnabled(false); |
| 64 } | 229 } |
| 65 | 230 |
| 66 void ThrottlingHelper::IncreaseThrottleRefCount(TaskQueue* task_queue) { | 231 void TaskQueueThrottler::IncreaseThrottleRefCount(TaskQueue* task_queue) { |
| 67 DCHECK_NE(task_queue, task_runner_.get()); | 232 DCHECK_NE(task_queue, task_runner_.get()); |
| 68 | 233 |
| 69 if (virtual_time_) | 234 if (virtual_time_) |
| 70 return; | 235 return; |
| 71 | 236 |
| 72 std::pair<TaskQueueMap::iterator, bool> insert_result = | 237 std::pair<TaskQueueMap::iterator, bool> insert_result = |
| 73 throttled_queues_.insert(std::make_pair( | 238 throttled_queues_.insert(std::make_pair( |
| 74 task_queue, Metadata(1, task_queue->IsQueueEnabled()))); | 239 task_queue, Metadata(1, task_queue->IsQueueEnabled()))); |
| 75 | 240 |
| 76 if (insert_result.second) { | 241 if (insert_result.second) { |
| 77 // The insert was succesful so we need to throttle the queue. | 242 // The insert was successful so we need to throttle the queue. |
| 78 task_queue->SetTimeDomain(time_domain_.get()); | 243 task_queue->SetTimeDomain(time_domain_.get()); |
| 79 task_queue->RemoveFence(); | 244 task_queue->RemoveFence(); |
| 80 task_queue->SetQueueEnabled(false); | 245 task_queue->SetQueueEnabled(false); |
| 81 | 246 |
| 82 if (!task_queue->IsEmpty()) { | 247 if (!task_queue->IsEmpty()) { |
| 83 if (task_queue->HasPendingImmediateWork()) { | 248 if (task_queue->HasPendingImmediateWork()) { |
| 84 OnTimeDomainHasImmediateWork(); | 249 OnTimeDomainHasImmediateWork(task_queue); |
| 85 } else { | 250 } else { |
| 86 OnTimeDomainHasDelayedWork(); | 251 OnTimeDomainHasDelayedWork(task_queue); |
| 87 } | 252 } |
| 88 } | 253 } |
| 254 | |
| 255 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueThrottled", | |
| 256 "task_queue", task_queue); | |
| 89 } else { | 257 } else { |
| 90 // An entry already existed in the map so we need to increment the refcount. | 258 // An entry already existed in the map so we need to increment the refcount. |
| 91 insert_result.first->second.throttling_ref_count++; | 259 insert_result.first->second.throttling_ref_count++; |
| 92 } | 260 } |
| 93 } | 261 } |
| 94 | 262 |
| 95 void ThrottlingHelper::DecreaseThrottleRefCount(TaskQueue* task_queue) { | 263 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { |
| 96 if (virtual_time_) | 264 if (virtual_time_) |
| 97 return; | 265 return; |
| 98 | 266 |
| 99 TaskQueueMap::iterator iter = throttled_queues_.find(task_queue); | 267 TaskQueueMap::iterator iter = throttled_queues_.find(task_queue); |
| 100 | 268 |
| 101 if (iter != throttled_queues_.end() && | 269 if (iter != throttled_queues_.end() && |
| 102 --iter->second.throttling_ref_count == 0) { | 270 --iter->second.throttling_ref_count == 0) { |
| 103 bool enabled = iter->second.enabled; | 271 bool enabled = iter->second.enabled; |
| 104 // The refcount has become zero, we need to unthrottle the queue. | 272 // The refcount has become zero, we need to unthrottle the queue. |
| 105 throttled_queues_.erase(iter); | 273 throttled_queues_.erase(iter); |
| 106 | 274 |
| 107 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); | 275 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); |
| 108 task_queue->RemoveFence(); | 276 task_queue->RemoveFence(); |
| 109 task_queue->SetQueueEnabled(enabled); | 277 task_queue->SetQueueEnabled(enabled); |
| 278 | |
| 279 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueUntrottled", | |
| 280 "task_queue", task_queue); | |
| 110 } | 281 } |
| 111 } | 282 } |
| 112 | 283 |
| 113 bool ThrottlingHelper::IsThrottled(TaskQueue* task_queue) const { | 284 bool TaskQueueThrottler::IsThrottled(TaskQueue* task_queue) const { |
| 114 return throttled_queues_.find(task_queue) != throttled_queues_.end(); | 285 return throttled_queues_.find(task_queue) != throttled_queues_.end(); |
| 115 } | 286 } |
| 116 | 287 |
| 117 void ThrottlingHelper::UnregisterTaskQueue(TaskQueue* task_queue) { | 288 void TaskQueueThrottler::UnregisterTaskQueue(TaskQueue* task_queue) { |
| 118 throttled_queues_.erase(task_queue); | 289 throttled_queues_.erase(task_queue); |
| 119 } | 290 } |
| 120 | 291 |
| 121 void ThrottlingHelper::OnTimeDomainHasImmediateWork() { | 292 void TaskQueueThrottler::OnTimeDomainHasImmediateWork(TaskQueue* queue) { |
| 122 // Forward to the main thread if called from another thread. | 293 // Forward to the main thread if called from another thread |
| 123 if (!task_runner_->RunsTasksOnCurrentThread()) { | 294 if (!task_runner_->RunsTasksOnCurrentThread()) { |
| 124 task_runner_->PostTask(FROM_HERE, forward_immediate_work_closure_); | 295 task_runner_->PostTask(FROM_HERE, |
| 296 base::Bind(forward_immediate_work_callback_, queue)); | |
| 125 return; | 297 return; |
| 126 } | 298 } |
| 127 TRACE_EVENT0(tracing_category_, | 299 TRACE_EVENT0(tracing_category_, |
| 128 "ThrottlingHelper::OnTimeDomainHasImmediateWork"); | 300 "TaskQueueThrottler::OnTimeDomainHasImmediateWork"); |
| 301 | |
| 129 base::TimeTicks now = tick_clock_->NowTicks(); | 302 base::TimeTicks now = tick_clock_->NowTicks(); |
| 130 MaybeSchedulePumpThrottledTasksLocked(FROM_HERE, now, now); | 303 base::TimeTicks next_allowed_run_time = GetNextAllowedRunTime(now, queue); |
| 304 MaybeSchedulePumpThrottledTasks(FROM_HERE, now, next_allowed_run_time); | |
| 131 } | 305 } |
| 132 | 306 |
| 133 void ThrottlingHelper::OnTimeDomainHasDelayedWork() { | 307 void TaskQueueThrottler::OnTimeDomainHasDelayedWork(TaskQueue* queue) { |
| 134 TRACE_EVENT0(tracing_category_, | 308 TRACE_EVENT0(tracing_category_, |
| 135 "ThrottlingHelper::OnTimeDomainHasDelayedWork"); | 309 "TaskQueueThrottler::OnTimeDomainHasDelayedWork"); |
| 136 base::TimeTicks next_scheduled_delayed_task; | |
| 137 bool has_delayed_task = | |
| 138 time_domain_->NextScheduledRunTime(&next_scheduled_delayed_task); | |
| 139 DCHECK(has_delayed_task); | |
| 140 base::TimeTicks now = tick_clock_->NowTicks(); | 310 base::TimeTicks now = tick_clock_->NowTicks(); |
| 141 MaybeSchedulePumpThrottledTasksLocked(FROM_HERE, now, | 311 LazyNow lazy_now(now); |
| 142 next_scheduled_delayed_task); | 312 |
| 313 base::Optional<base::TimeTicks> next_scheduled_delayed_task = | |
| 314 NextTaskRunTime(&lazy_now, queue); | |
| 315 DCHECK(next_scheduled_delayed_task); | |
| 316 MaybeSchedulePumpThrottledTasks(FROM_HERE, now, | |
| 317 next_scheduled_delayed_task.value()); | |
| 143 } | 318 } |
| 144 | 319 |
| 145 void ThrottlingHelper::PumpThrottledTasks() { | 320 namespace { |
| 146 TRACE_EVENT0(tracing_category_, "ThrottlingHelper::PumpThrottledTasks"); | |
| 147 pending_pump_throttled_tasks_runtime_ = base::TimeTicks(); | |
| 148 | 321 |
| 149 LazyNow lazy_low(tick_clock_); | 322 template <class T> |
| 323 T Min(const base::Optional<T>& optional, const T& value) { | |
| 324 if (!optional) { | |
| 325 return value; | |
| 326 } | |
| 327 return std::min(optional.value(), value); | |
| 328 } | |
| 329 | |
| 330 template <class T> | |
| 331 base::Optional<T> Min(const base::Optional<T>& a, const base::Optional<T>& b) { | |
| 332 if (!b) | |
| 333 return a; | |
| 334 if (!a) | |
| 335 return b; | |
| 336 return std::min(a.value(), b.value()); | |
| 337 } | |
| 338 | |
| 339 template <class T> | |
| 340 T Max(const base::Optional<T>& optional, const T& value) { | |
| 341 if (!optional) | |
| 342 return value; | |
| 343 return std::max(optional.value(), value); | |
| 344 } | |
| 345 | |
| 346 template <class T> | |
| 347 base::Optional<T> Max(const base::Optional<T>& a, const base::Optional<T>& b) { | |
| 348 if (!b) | |
| 349 return a; | |
| 350 if (!a) | |
| 351 return b; | |
| 352 return std::max(a.value(), b.value()); | |
| 353 } | |
| 354 | |
| 355 } // namespace | |
| 356 | |
| 357 void TaskQueueThrottler::PumpThrottledTasks() { | |
| 358 TRACE_EVENT0("renderer.scheduler", "TaskQueueThrottler::PumpThrottledTasks"); | |
|
alex clarke (OOO till 29th)
2016/09/15 12:19:35
Why change tracing_category_?
altimin
2016/09/15 15:52:10
Done.
| |
| 359 pending_pump_throttled_tasks_runtime_.reset(); | |
| 360 | |
| 361 LazyNow lazy_now(tick_clock_); | |
| 362 base::Optional<base::TimeTicks> next_scheduled_delayed_task; | |
| 363 | |
| 150 for (const TaskQueueMap::value_type& map_entry : throttled_queues_) { | 364 for (const TaskQueueMap::value_type& map_entry : throttled_queues_) { |
| 151 TaskQueue* task_queue = map_entry.first; | 365 TaskQueue* task_queue = map_entry.first; |
| 152 if (!map_entry.second.enabled || task_queue->IsEmpty()) | 366 if (!map_entry.second.enabled || task_queue->IsEmpty()) |
| 153 continue; | 367 continue; |
| 154 | 368 |
| 155 task_queue->SetQueueEnabled(true); | 369 base::TimeTicks next_allowed_run_time = |
|
alex clarke (OOO till 29th)
2016/09/15 12:19:35
Please add a comment here saying: Don't pump queue
altimin
2016/09/15 15:52:10
Done.
| |
| 156 task_queue->InsertFence(); | 370 GetNextAllowedRunTime(lazy_now.Now(), task_queue); |
| 371 base::Optional<base::TimeTicks> next_desired_run_time = | |
| 372 NextTaskRunTime(&lazy_now, task_queue); | |
| 373 | |
| 374 if (next_desired_run_time && | |
| 375 next_allowed_run_time > next_desired_run_time.value()) { | |
| 376 TRACE_EVENT1( | |
| 377 "renderer.scheduler", | |
|
alex clarke (OOO till 29th)
2016/09/15 12:19:35
tracing_category_?
altimin
2016/09/15 15:52:11
Done.
| |
| 378 "TaskQueueThrottler::PumpThrottledTasks_ExpensiveTaskThrottled", | |
| 379 "throttle_time_in_seconds", | |
| 380 (next_allowed_run_time - next_desired_run_time.value()).InSecondsF()); | |
| 381 | |
| 382 next_scheduled_delayed_task = | |
| 383 Min(next_scheduled_delayed_task, next_allowed_run_time); | |
| 384 } | |
|
alex clarke (OOO till 29th)
2016/09/15 12:19:35
can we put continue; in there for clarity?
altimin
2016/09/15 15:52:10
Done.
| |
| 385 | |
| 386 next_scheduled_delayed_task = | |
| 387 Min(next_scheduled_delayed_task, task_queue->GetNextScheduledWakeUp()); | |
| 388 | |
| 389 if (next_allowed_run_time == lazy_now.Now()) { | |
|
alex clarke (OOO till 29th)
2016/09/15 12:19:35
For readability I would be tempted instead to writ
altimin
2016/09/15 15:52:11
Done.
| |
| 390 task_queue->SetQueueEnabled(true); | |
| 391 task_queue->InsertFence(); | |
| 392 } | |
| 157 } | 393 } |
| 158 // Make sure NextScheduledRunTime gives us an up-to date result. | |
| 159 time_domain_->ClearExpiredWakeups(); | |
| 160 | 394 |
| 161 base::TimeTicks next_scheduled_delayed_task; | 395 // Maybe schedule a call to TaskQueueThrottler::PumpThrottledTasks if there is |
| 162 // Maybe schedule a call to ThrottlingHelper::PumpThrottledTasks if there is | 396 // a pending delayed task or a throttled task ready to run. |
| 163 // a pending delayed task. NOTE posting a non-delayed task in the future will | 397 // NOTE: posting a non-delayed task in the future will result in |
| 164 // result in ThrottlingHelper::OnTimeDomainHasImmediateWork being called. | 398 // TaskQueueThrottler::OnTimeDomainHasImmediateWork being called. |
| 165 if (time_domain_->NextScheduledRunTime(&next_scheduled_delayed_task)) { | 399 if (next_scheduled_delayed_task) { |
| 166 MaybeSchedulePumpThrottledTasksLocked(FROM_HERE, lazy_low.Now(), | 400 MaybeSchedulePumpThrottledTasks(FROM_HERE, lazy_now.Now(), |
| 167 next_scheduled_delayed_task); | 401 *next_scheduled_delayed_task); |
| 168 } | 402 } |
| 169 } | 403 } |
| 170 | 404 |
| 171 /* static */ | 405 /* static */ |
| 172 base::TimeTicks ThrottlingHelper::ThrottledRunTime( | 406 base::TimeTicks TaskQueueThrottler::AlignedThrottledRunTime( |
| 173 base::TimeTicks unthrottled_runtime) { | 407 base::TimeTicks unthrottled_runtime) { |
| 174 const base::TimeDelta one_second = base::TimeDelta::FromSeconds(1); | 408 const base::TimeDelta one_second = base::TimeDelta::FromSeconds(1); |
| 175 return unthrottled_runtime + one_second - | 409 return unthrottled_runtime + one_second - |
| 176 ((unthrottled_runtime - base::TimeTicks()) % one_second); | 410 ((unthrottled_runtime - base::TimeTicks()) % one_second); |
| 177 } | 411 } |
| 178 | 412 |
| 179 void ThrottlingHelper::MaybeSchedulePumpThrottledTasksLocked( | 413 void TaskQueueThrottler::MaybeSchedulePumpThrottledTasks( |
| 180 const tracked_objects::Location& from_here, | 414 const tracked_objects::Location& from_here, |
| 181 base::TimeTicks now, | 415 base::TimeTicks now, |
| 182 base::TimeTicks unthrottled_runtime) { | 416 base::TimeTicks runtime) { |
|
alex clarke (OOO till 29th)
2016/09/15 12:19:35
unaligned_runtiume
altimin
2016/09/15 15:52:11
Done.
| |
| 183 if (virtual_time_) | 417 if (virtual_time_) |
| 184 return; | 418 return; |
| 185 | 419 |
| 186 base::TimeTicks throttled_runtime = | 420 runtime = std::max(now, AlignedThrottledRunTime(runtime)); |
| 187 ThrottledRunTime(std::max(now, unthrottled_runtime)); | 421 |
| 188 // If there is a pending call to PumpThrottledTasks and it's sooner than | 422 // If there is a pending call to PumpThrottledTasks and it's sooner than |
| 189 // |unthrottled_runtime| then return. | 423 // |runtime| then return. |
| 190 if (!pending_pump_throttled_tasks_runtime_.is_null() && | 424 if (pending_pump_throttled_tasks_runtime_ && |
| 191 throttled_runtime >= pending_pump_throttled_tasks_runtime_) { | 425 runtime >= pending_pump_throttled_tasks_runtime_.value()) { |
| 192 return; | 426 return; |
| 193 } | 427 } |
| 194 | 428 |
| 195 pending_pump_throttled_tasks_runtime_ = throttled_runtime; | 429 pending_pump_throttled_tasks_runtime_ = runtime; |
| 196 | 430 |
| 197 pump_throttled_tasks_closure_.Cancel(); | 431 pump_throttled_tasks_closure_.Cancel(); |
| 198 | 432 |
| 199 base::TimeDelta delay = pending_pump_throttled_tasks_runtime_ - now; | 433 base::TimeDelta delay = pending_pump_throttled_tasks_runtime_.value() - now; |
| 200 TRACE_EVENT1(tracing_category_, | 434 TRACE_EVENT1(tracing_category_, |
| 201 "ThrottlingHelper::MaybeSchedulePumpThrottledTasksLocked", | 435 "TaskQueueThrottler::MaybeSchedulePumpThrottledTasks", |
| 202 "delay_till_next_pump_ms", delay.InMilliseconds()); | 436 "delay_till_next_pump_ms", delay.InMilliseconds()); |
| 203 task_runner_->PostDelayedTask( | 437 task_runner_->PostDelayedTask( |
| 204 from_here, pump_throttled_tasks_closure_.callback(), delay); | 438 from_here, pump_throttled_tasks_closure_.callback(), delay); |
| 205 } | 439 } |
| 206 | 440 |
| 207 void ThrottlingHelper::EnableVirtualTime() { | 441 void TaskQueueThrottler::EnableVirtualTime() { |
| 208 virtual_time_ = true; | 442 virtual_time_ = true; |
| 209 | 443 |
| 210 pump_throttled_tasks_closure_.Cancel(); | 444 pump_throttled_tasks_closure_.Cancel(); |
| 211 | 445 |
| 212 while (!throttled_queues_.empty()) { | 446 while (!throttled_queues_.empty()) { |
| 213 TaskQueue* task_queue = throttled_queues_.begin()->first; | 447 TaskQueue* task_queue = throttled_queues_.begin()->first; |
| 214 bool enabled = throttled_queues_.begin()->second.enabled; | 448 bool enabled = throttled_queues_.begin()->second.enabled; |
| 215 | 449 |
| 216 throttled_queues_.erase(throttled_queues_.begin()); | 450 throttled_queues_.erase(throttled_queues_.begin()); |
| 217 | 451 |
| 218 task_queue->SetTimeDomain(renderer_scheduler_->GetVirtualTimeDomain()); | 452 task_queue->SetTimeDomain(renderer_scheduler_->GetVirtualTimeDomain()); |
| 219 task_queue->RemoveFence(); | 453 task_queue->RemoveFence(); |
| 220 task_queue->SetQueueEnabled(enabled); | 454 task_queue->SetQueueEnabled(enabled); |
| 221 } | 455 } |
| 222 } | 456 } |
| 223 | 457 |
| 458 TaskQueueThrottler::TimeBudgetPool* TaskQueueThrottler::CreateTimeBudgetPool( | |
| 459 const char* name) { | |
| 460 TimeBudgetPool* time_budget_pool = | |
| 461 new TimeBudgetPool(name, this, tick_clock_->NowTicks()); | |
| 462 time_budget_pools_[time_budget_pool] = base::WrapUnique(time_budget_pool); | |
| 463 return time_budget_pool; | |
| 464 } | |
| 465 | |
| 466 void TaskQueueThrottler::OnTaskRunTimeReported(TaskQueue* task_queue, | |
| 467 base::TimeTicks start_time, | |
| 468 base::TimeTicks end_time) { | |
| 469 if (!IsThrottled(task_queue)) | |
| 470 return; | |
| 471 | |
| 472 TimeBudgetPool* time_budget_pool = GetTimeBudgetPoolForQueue(task_queue); | |
| 473 if (time_budget_pool) { | |
|
alex clarke (OOO till 29th)
2016/09/15 12:19:35
For readability I think we should prefer:
if (!ti
altimin
2016/09/15 15:52:10
Done.
| |
| 474 time_budget_pool->RecordTaskRunTime(end_time - start_time); | |
| 475 if (!time_budget_pool->IsAllowedToRun(end_time)) { | |
| 476 // This task was too expensive and all following tasks are throttled | |
| 477 // until explicitly allowed. | |
| 478 task_queue->SetQueueEnabled(false); | |
| 479 | |
| 480 if (task_queue->HasPendingImmediateWork()) { | |
| 481 MaybeSchedulePumpThrottledTasks( | |
| 482 FROM_HERE, end_time, | |
| 483 std::max(end_time, time_budget_pool->GetNextAllowedRunTime())); | |
| 484 } | |
| 485 } | |
| 486 } | |
| 487 } | |
| 488 | |
| 489 void TaskQueueThrottler::AsValueInto(base::trace_event::TracedValue* state, | |
| 490 base::TimeTicks now) const { | |
| 491 if (pending_pump_throttled_tasks_runtime_) { | |
| 492 state->SetDouble( | |
| 493 "next_throttled_tasks_pump_in_seconds", | |
| 494 (pending_pump_throttled_tasks_runtime_.value() - now).InSecondsF()); | |
| 495 } | |
| 496 | |
| 497 state->BeginDictionary("time_budget_pools"); | |
| 498 | |
| 499 for (const auto& map_entry : time_budget_pools_) { | |
| 500 TaskQueueThrottler::TimeBudgetPool* pool = map_entry.first; | |
| 501 pool->AsValueInto(state, now); | |
| 502 } | |
| 503 | |
| 504 state->EndDictionary(); | |
| 505 } | |
| 506 | |
| 507 TaskQueueThrottler::TimeBudgetPool* | |
| 508 TaskQueueThrottler::GetTimeBudgetPoolForQueue(TaskQueue* queue) { | |
| 509 auto find_it = time_budget_pool_for_queue_.find(queue); | |
| 510 if (find_it == time_budget_pool_for_queue_.end()) { | |
| 511 return nullptr; | |
| 512 } else { | |
| 513 TimeBudgetPool* result = find_it->second; | |
| 514 DCHECK(result); | |
| 515 return result; | |
| 516 } | |
| 517 } | |
| 518 | |
| 519 void TaskQueueThrottler::MaybeSchedulePumpQueue( | |
| 520 const tracked_objects::Location& from_here, | |
| 521 base::TimeTicks now, | |
| 522 TaskQueue* queue, | |
| 523 base::Optional<base::TimeTicks> next_possible_run_time) { | |
| 524 LazyNow lazy_now(now); | |
| 525 base::Optional<base::TimeTicks> next_run_time = | |
| 526 Max(NextTaskRunTime(&lazy_now, queue), next_possible_run_time); | |
| 527 | |
| 528 if (next_run_time) { | |
|
alex clarke (OOO till 29th)
2016/09/15 12:19:35
Can this ever actually be false? Should we DCHECK
altimin
2016/09/15 15:52:10
Yes. We're calling MaybeSchedulePumpQueue when we'
| |
| 529 MaybeSchedulePumpThrottledTasks(from_here, now, next_run_time.value()); | |
| 530 } | |
| 531 } | |
| 532 | |
| 533 base::TimeTicks TaskQueueThrottler::GetNextAllowedRunTime(base::TimeTicks now, | |
| 534 TaskQueue* queue) { | |
| 535 TimeBudgetPool* time_budget_pool = GetTimeBudgetPoolForQueue(queue); | |
| 536 if (!time_budget_pool) { | |
| 537 return now; | |
| 538 } else { | |
| 539 return std::max(now, time_budget_pool->GetNextAllowedRunTime()); | |
| 540 } | |
| 541 } | |
| 542 | |
| 224 } // namespace scheduler | 543 } // namespace scheduler |
| 225 } // namespace blink | 544 } // namespace blink |
| OLD | NEW |