Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "platform/scheduler/renderer/throttling_helper.h" | 5 #include "platform/scheduler/renderer/task_queue_throttler.h" |
| 6 | 6 |
| 7 #include <cstdint> | |
| 8 | |
| 9 #include "base/format_macros.h" | |
| 7 #include "base/logging.h" | 10 #include "base/logging.h" |
| 11 #include "base/memory/ptr_util.h" | |
| 12 #include "base/optional.h" | |
| 13 #include "base/strings/stringprintf.h" | |
| 8 #include "platform/scheduler/base/real_time_domain.h" | 14 #include "platform/scheduler/base/real_time_domain.h" |
| 9 #include "platform/scheduler/child/scheduler_tqm_delegate.h" | 15 #include "platform/scheduler/child/scheduler_tqm_delegate.h" |
| 10 #include "platform/scheduler/renderer/auto_advancing_virtual_time_domain.h" | 16 #include "platform/scheduler/renderer/auto_advancing_virtual_time_domain.h" |
| 11 #include "platform/scheduler/renderer/renderer_scheduler_impl.h" | 17 #include "platform/scheduler/renderer/renderer_scheduler_impl.h" |
| 12 #include "platform/scheduler/renderer/throttled_time_domain.h" | 18 #include "platform/scheduler/renderer/throttled_time_domain.h" |
| 13 #include "platform/scheduler/renderer/web_frame_scheduler_impl.h" | 19 #include "platform/scheduler/renderer/web_frame_scheduler_impl.h" |
| 14 #include "public/platform/WebFrameScheduler.h" | 20 #include "public/platform/WebFrameScheduler.h" |
| 15 | 21 |
| 22 #include <iostream> // FIXME | |
| 23 | |
| 16 namespace blink { | 24 namespace blink { |
| 17 namespace scheduler { | 25 namespace scheduler { |
| 18 | 26 |
| 19 ThrottlingHelper::ThrottlingHelper(RendererSchedulerImpl* renderer_scheduler, | 27 namespace { |
| 20 const char* tracing_category) | 28 const int kMaxBudgetLevelInSeconds = 1; |
| 29 } | |
| 30 | |
| 31 TaskQueueThrottler::TimeBudgetPool::TimeBudgetPool( | |
| 32 const char* name, | |
| 33 TaskQueueThrottler* task_queue_throttler, | |
| 34 base::TimeTicks now) | |
| 35 : name_(name), | |
| 36 task_queue_throttler_(task_queue_throttler), | |
| 37 max_budget_level_(base::TimeDelta::FromSeconds(kMaxBudgetLevelInSeconds)), | |
| 38 last_checkpoint_(now), | |
| 39 cpu_percentage_(1), | |
| 40 is_enabled_(true) {} | |
| 41 | |
| 42 TaskQueueThrottler::TimeBudgetPool::~TimeBudgetPool() {} | |
| 43 | |
| 44 void TaskQueueThrottler::TimeBudgetPool::SetTimeBudget(base::TimeTicks now, | |
| 45 double cpu_percentage) { | |
| 46 Advance(now); | |
| 47 cpu_percentage_ = cpu_percentage; | |
| 48 } | |
| 49 | |
| 50 void TaskQueueThrottler::TimeBudgetPool::AddQueue(base::TimeTicks now, | |
| 51 TaskQueue* queue) { | |
| 52 DCHECK(task_queue_throttler_->time_budget_pool_for_queue_.find(queue) == | |
| 53 task_queue_throttler_->time_budget_pool_for_queue_.end()); | |
| 54 task_queue_throttler_->time_budget_pool_for_queue_[queue] = this; | |
| 55 | |
| 56 associated_task_queues_.insert(queue); | |
| 57 | |
| 58 if (!task_queue_throttler_->IsThrottled(queue)) | |
| 59 return; | |
| 60 | |
| 61 queue->SetQueueEnabled(false); | |
| 62 | |
| 63 task_queue_throttler_->MaybeSchedulePumpQueue(FROM_HERE, now, queue, | |
| 64 NextAllowedRunTime()); | |
| 65 } | |
| 66 | |
| 67 void TaskQueueThrottler::TimeBudgetPool::RemoveQueue(base::TimeTicks now, | |
| 68 TaskQueue* queue) { | |
| 69 DCHECK_EQ(task_queue_throttler_->time_budget_pool_for_queue_[queue], this); | |
| 70 task_queue_throttler_->time_budget_pool_for_queue_.erase(queue); | |
| 71 | |
| 72 associated_task_queues_.erase(queue); | |
| 73 | |
| 74 if (!task_queue_throttler_->IsThrottled(queue)) | |
| 75 return; | |
| 76 | |
| 77 task_queue_throttler_->MaybeSchedulePumpQueue(FROM_HERE, now, queue); | |
| 78 } | |
| 79 | |
| 80 void TaskQueueThrottler::TimeBudgetPool::Enable(LazyNow* lazy_now) { | |
| 81 if (is_enabled_) | |
| 82 return; | |
| 83 is_enabled_ = true; | |
| 84 | |
| 85 for (TaskQueue* queue : associated_task_queues_) { | |
| 86 if (!task_queue_throttler_->IsThrottled(queue)) | |
| 87 continue; | |
| 88 | |
| 89 queue->SetQueueEnabled(false); | |
| 90 | |
| 91 task_queue_throttler_->MaybeSchedulePumpQueue(FROM_HERE, lazy_now->Now(), | |
| 92 queue, NextAllowedRunTime()); | |
| 93 } | |
| 94 } | |
| 95 | |
| 96 void TaskQueueThrottler::TimeBudgetPool::Disable(LazyNow* lazy_now) { | |
| 97 if (!is_enabled_) | |
| 98 return; | |
| 99 is_enabled_ = false; | |
| 100 | |
| 101 for (TaskQueue* queue : associated_task_queues_) { | |
| 102 if (!task_queue_throttler_->IsThrottled(queue)) | |
| 103 continue; | |
| 104 | |
| 105 task_queue_throttler_->MaybeSchedulePumpQueue(FROM_HERE, lazy_now->Now(), | |
| 106 queue); | |
| 107 } | |
| 108 } | |
| 109 | |
| 110 bool TaskQueueThrottler::TimeBudgetPool::IsEnabled() const { | |
| 111 return is_enabled_; | |
| 112 } | |
| 113 | |
| 114 void TaskQueueThrottler::TimeBudgetPool::Close() { | |
| 115 DCHECK_EQ(0u, associated_task_queues_.size()); | |
| 116 | |
| 117 task_queue_throttler_->time_budget_pools_.erase(this); | |
| 118 } | |
| 119 | |
| 120 bool TaskQueueThrottler::TimeBudgetPool::IsAllowedToRun(base::TimeTicks now) { | |
| 121 Advance(now); | |
| 122 return !is_enabled_ || current_budget_level_.InMicroseconds() >= 0; | |
| 123 } | |
| 124 | |
| 125 base::TimeTicks TaskQueueThrottler::TimeBudgetPool::NextAllowedRunTime() { | |
| 126 if (!is_enabled_ || current_budget_level_.InMicroseconds() >= 0) { | |
| 127 return last_checkpoint_; | |
| 128 } else { | |
| 129 // Subtract because current_budget is negative. | |
| 130 return last_checkpoint_ - current_budget_level_ / cpu_percentage_; | |
| 131 } | |
| 132 } | |
| 133 | |
| 134 void TaskQueueThrottler::TimeBudgetPool::RecordTaskRunTime( | |
| 135 base::TimeDelta task_run_time) { | |
| 136 if (is_enabled_) { | |
| 137 current_budget_level_ -= task_run_time; | |
| 138 } | |
| 139 } | |
| 140 | |
| 141 const char* TaskQueueThrottler::TimeBudgetPool::Name() const { | |
| 142 return name_; | |
| 143 } | |
| 144 | |
| 145 void TaskQueueThrottler::TimeBudgetPool::AsValueInto( | |
| 146 base::trace_event::TracedValue* state, | |
| 147 base::TimeTicks now) const { | |
| 148 state->BeginDictionary(); | |
| 149 | |
| 150 state->SetString("name", name_); | |
| 151 state->SetDouble("time_budget", cpu_percentage_); | |
| 152 state->SetDouble("time_budget_level_in_seconds", | |
| 153 current_budget_level_.InSecondsF()); | |
| 154 state->SetDouble("last_checkpoint_seconds_ago", | |
| 155 (now - last_checkpoint_).InSecondsF()); | |
| 156 | |
| 157 state->BeginArray("task_queues"); | |
| 158 for (TaskQueue* queue : associated_task_queues_) { | |
| 159 state->AppendString(base::StringPrintf( | |
| 160 "%" PRIx64, static_cast<uint64_t>(reinterpret_cast<uintptr_t>(queue)))); | |
| 161 } | |
| 162 state->EndArray(); | |
| 163 | |
| 164 state->EndDictionary(); | |
| 165 } | |
| 166 | |
| 167 void TaskQueueThrottler::TimeBudgetPool::Advance(base::TimeTicks now) { | |
| 168 if (now > last_checkpoint_) { | |
| 169 if (is_enabled_) { | |
| 170 current_budget_level_ = std::min( | |
| 171 current_budget_level_ + cpu_percentage_ * (now - last_checkpoint_), | |
| 172 max_budget_level_); | |
| 173 } | |
| 174 last_checkpoint_ = now; | |
| 175 } | |
| 176 } | |
| 177 | |
| 178 TaskQueueThrottler::TaskQueueThrottler( | |
| 179 RendererSchedulerImpl* renderer_scheduler, | |
| 180 const char* tracing_category) | |
| 21 : task_runner_(renderer_scheduler->ControlTaskRunner()), | 181 : task_runner_(renderer_scheduler->ControlTaskRunner()), |
| 22 renderer_scheduler_(renderer_scheduler), | 182 renderer_scheduler_(renderer_scheduler), |
| 23 tick_clock_(renderer_scheduler->tick_clock()), | 183 tick_clock_(renderer_scheduler->tick_clock()), |
| 24 tracing_category_(tracing_category), | 184 tracing_category_(tracing_category), |
| 25 time_domain_(new ThrottledTimeDomain(this, tracing_category)), | 185 time_domain_(new ThrottledTimeDomain(this, tracing_category)), |
| 26 virtual_time_(false), | 186 virtual_time_(false), |
| 27 weak_factory_(this) { | 187 weak_factory_(this) { |
| 28 pump_throttled_tasks_closure_.Reset(base::Bind( | 188 pump_throttled_tasks_closure_.Reset(base::Bind( |
| 29 &ThrottlingHelper::PumpThrottledTasks, weak_factory_.GetWeakPtr())); | 189 &TaskQueueThrottler::PumpThrottledTasks, weak_factory_.GetWeakPtr())); |
| 30 forward_immediate_work_closure_ = | 190 forward_immediate_work_closure_ = |
| 31 base::Bind(&ThrottlingHelper::OnTimeDomainHasImmediateWork, | 191 base::Bind(&TaskQueueThrottler::OnTimeDomainHasImmediateWork, |
| 32 weak_factory_.GetWeakPtr()); | 192 weak_factory_.GetWeakPtr()); |
| 33 | 193 |
| 34 renderer_scheduler_->RegisterTimeDomain(time_domain_.get()); | 194 renderer_scheduler_->RegisterTimeDomain(time_domain_.get()); |
| 35 } | 195 } |
| 36 | 196 |
| 37 ThrottlingHelper::~ThrottlingHelper() { | 197 TaskQueueThrottler::~TaskQueueThrottler() { |
| 38 // It's possible for queues to be still throttled, so we need to tidy up | 198 // It's possible for queues to be still throttled, so we need to tidy up |
| 39 // before unregistering the time domain. | 199 // before unregistering the time domain. |
| 40 for (const TaskQueueMap::value_type& map_entry : throttled_queues_) { | 200 for (const TaskQueueMap::value_type& map_entry : throttled_queues_) { |
| 41 TaskQueue* task_queue = map_entry.first; | 201 TaskQueue* task_queue = map_entry.first; |
| 42 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); | 202 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); |
| 43 task_queue->RemoveFence(); | 203 task_queue->RemoveFence(); |
| 44 } | 204 } |
| 45 | 205 |
| 46 renderer_scheduler_->UnregisterTimeDomain(time_domain_.get()); | 206 renderer_scheduler_->UnregisterTimeDomain(time_domain_.get()); |
| 47 } | 207 } |
| 48 | 208 |
| 49 void ThrottlingHelper::SetQueueEnabled(TaskQueue* task_queue, bool enabled) { | 209 void TaskQueueThrottler::SetQueueEnabled(TaskQueue* task_queue, bool enabled) { |
| 50 TaskQueueMap::iterator find_it = throttled_queues_.find(task_queue); | 210 TaskQueueMap::iterator find_it = throttled_queues_.find(task_queue); |
| 51 | 211 |
| 52 if (find_it == throttled_queues_.end()) { | 212 if (find_it == throttled_queues_.end()) { |
| 53 task_queue->SetQueueEnabled(enabled); | 213 task_queue->SetQueueEnabled(enabled); |
| 54 return; | 214 return; |
| 55 } | 215 } |
| 56 | 216 |
| 57 find_it->second.enabled = enabled; | 217 find_it->second.enabled = enabled; |
| 58 | 218 |
| 59 // We don't enable the queue here because it's throttled and there might be | 219 // We don't enable the queue here because it's throttled and there might be |
| 60 // tasks in it's work queue that would execute immediatly rather than after | 220 // tasks in it's work queue that would execute immediatly rather than after |
| 61 // PumpThrottledTasks runs. | 221 // PumpThrottledTasks runs. |
| 62 if (!enabled) | 222 if (!enabled) |
| 63 task_queue->SetQueueEnabled(false); | 223 task_queue->SetQueueEnabled(false); |
| 64 } | 224 } |
| 65 | 225 |
| 66 void ThrottlingHelper::IncreaseThrottleRefCount(TaskQueue* task_queue) { | 226 void TaskQueueThrottler::IncreaseThrottleRefCount(TaskQueue* task_queue) { |
| 67 DCHECK_NE(task_queue, task_runner_.get()); | 227 DCHECK_NE(task_queue, task_runner_.get()); |
| 68 | 228 |
| 69 if (virtual_time_) | 229 if (virtual_time_) |
| 70 return; | 230 return; |
| 71 | 231 |
| 72 std::pair<TaskQueueMap::iterator, bool> insert_result = | 232 std::pair<TaskQueueMap::iterator, bool> insert_result = |
| 73 throttled_queues_.insert(std::make_pair( | 233 throttled_queues_.insert(std::make_pair( |
| 74 task_queue, Metadata(1, task_queue->IsQueueEnabled()))); | 234 task_queue, Metadata(1, task_queue->IsQueueEnabled()))); |
| 75 | 235 |
| 76 if (insert_result.second) { | 236 if (insert_result.second) { |
| 77 // The insert was succesful so we need to throttle the queue. | 237 // The insert was successful so we need to throttle the queue. |
| 78 task_queue->SetTimeDomain(time_domain_.get()); | 238 task_queue->SetTimeDomain(time_domain_.get()); |
| 79 task_queue->RemoveFence(); | 239 task_queue->RemoveFence(); |
| 80 task_queue->SetQueueEnabled(false); | 240 task_queue->SetQueueEnabled(false); |
| 81 | 241 |
| 82 if (!task_queue->IsEmpty()) { | 242 base::Optional<base::TimeTicks> next_run_time = |
| 83 if (task_queue->HasPendingImmediateWork()) { | 243 task_queue->GetNextTaskRunTime(); |
|
alex clarke (OOO till 29th)
2016/09/14 12:36:34
Do we need to change this?
altimin
2016/09/14 18:34:49
Done.
| |
| 84 OnTimeDomainHasImmediateWork(); | 244 |
| 85 } else { | 245 if (next_run_time) { |
| 86 OnTimeDomainHasDelayedWork(); | 246 MaybeSchedulePumpThrottledTasks(FROM_HERE, tick_clock_->NowTicks(), |
| 87 } | 247 next_run_time.value()); |
| 248 | |
| 249 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueThrottled", | |
| 250 "task_queue", task_queue); | |
| 88 } | 251 } |
| 89 } else { | 252 } else { |
| 90 // An entry already existed in the map so we need to increment the refcount. | 253 // An entry already existed in the map so we need to increment the refcount. |
| 91 insert_result.first->second.throttling_ref_count++; | 254 insert_result.first->second.throttling_ref_count++; |
| 92 } | 255 } |
| 93 } | 256 } |
| 94 | 257 |
| 95 void ThrottlingHelper::DecreaseThrottleRefCount(TaskQueue* task_queue) { | 258 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { |
| 96 if (virtual_time_) | 259 if (virtual_time_) |
| 97 return; | 260 return; |
| 98 | 261 |
| 99 TaskQueueMap::iterator iter = throttled_queues_.find(task_queue); | 262 TaskQueueMap::iterator iter = throttled_queues_.find(task_queue); |
| 100 | 263 |
| 101 if (iter != throttled_queues_.end() && | 264 if (iter != throttled_queues_.end() && |
| 102 --iter->second.throttling_ref_count == 0) { | 265 --iter->second.throttling_ref_count == 0) { |
| 103 bool enabled = iter->second.enabled; | 266 bool enabled = iter->second.enabled; |
| 104 // The refcount has become zero, we need to unthrottle the queue. | 267 // The refcount has become zero, we need to unthrottle the queue. |
| 105 throttled_queues_.erase(iter); | 268 throttled_queues_.erase(iter); |
| 106 | 269 |
| 107 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); | 270 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); |
| 108 task_queue->RemoveFence(); | 271 task_queue->RemoveFence(); |
| 109 task_queue->SetQueueEnabled(enabled); | 272 task_queue->SetQueueEnabled(enabled); |
| 273 | |
| 274 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueUntrottled", | |
| 275 "task_queue", task_queue); | |
| 110 } | 276 } |
| 111 } | 277 } |
| 112 | 278 |
| 113 bool ThrottlingHelper::IsThrottled(TaskQueue* task_queue) const { | 279 bool TaskQueueThrottler::IsThrottled(TaskQueue* task_queue) const { |
| 114 return throttled_queues_.find(task_queue) != throttled_queues_.end(); | 280 return throttled_queues_.find(task_queue) != throttled_queues_.end(); |
| 115 } | 281 } |
| 116 | 282 |
| 117 void ThrottlingHelper::UnregisterTaskQueue(TaskQueue* task_queue) { | 283 void TaskQueueThrottler::UnregisterTaskQueue(TaskQueue* task_queue) { |
| 118 throttled_queues_.erase(task_queue); | 284 throttled_queues_.erase(task_queue); |
| 119 } | 285 } |
| 120 | 286 |
| 121 void ThrottlingHelper::OnTimeDomainHasImmediateWork() { | 287 void TaskQueueThrottler::OnTimeDomainHasImmediateWork() { |
| 122 // Forward to the main thread if called from another thread. | 288 // Forward to the main thread if called from another thread. |
| 123 if (!task_runner_->RunsTasksOnCurrentThread()) { | 289 if (!task_runner_->RunsTasksOnCurrentThread()) { |
| 124 task_runner_->PostTask(FROM_HERE, forward_immediate_work_closure_); | 290 task_runner_->PostTask(FROM_HERE, forward_immediate_work_closure_); |
| 125 return; | 291 return; |
| 126 } | 292 } |
| 127 TRACE_EVENT0(tracing_category_, | 293 TRACE_EVENT0(tracing_category_, |
| 128 "ThrottlingHelper::OnTimeDomainHasImmediateWork"); | 294 "TaskQueueThrottler::OnTimeDomainHasImmediateWork"); |
| 129 base::TimeTicks now = tick_clock_->NowTicks(); | 295 base::TimeTicks now = tick_clock_->NowTicks(); |
| 130 MaybeSchedulePumpThrottledTasksLocked(FROM_HERE, now, now); | 296 MaybeSchedulePumpThrottledTasks(FROM_HERE, now, now); |
| 131 } | 297 } |
| 132 | 298 |
| 133 void ThrottlingHelper::OnTimeDomainHasDelayedWork() { | 299 void TaskQueueThrottler::OnTimeDomainHasDelayedWork() { |
| 134 TRACE_EVENT0(tracing_category_, | 300 TRACE_EVENT0(tracing_category_, |
| 135 "ThrottlingHelper::OnTimeDomainHasDelayedWork"); | 301 "TaskQueueThrottler::OnTimeDomainHasDelayedWork"); |
| 302 // TODO(altimin): Consider using TaskQueue::GetNextTaskRunTime here. | |
| 303 // to avoid unnecessary wakeups. Currently it's not possible because | |
| 304 // GetNextTaskRunTime requires a lock on the queue and | |
| 305 // OnTimeDomainHasDelayedWork can be called from TaskQueueImpl::SetTimeDomain, | |
| 306 // which acquires lock. | |
| 136 base::TimeTicks next_scheduled_delayed_task; | 307 base::TimeTicks next_scheduled_delayed_task; |
| 137 bool has_delayed_task = | 308 bool has_delayed_task = |
| 138 time_domain_->NextScheduledRunTime(&next_scheduled_delayed_task); | 309 time_domain_->NextScheduledRunTime(&next_scheduled_delayed_task); |
| 139 DCHECK(has_delayed_task); | 310 DCHECK(has_delayed_task); |
| 140 base::TimeTicks now = tick_clock_->NowTicks(); | 311 base::TimeTicks now = tick_clock_->NowTicks(); |
| 141 MaybeSchedulePumpThrottledTasksLocked(FROM_HERE, now, | 312 MaybeSchedulePumpThrottledTasks(FROM_HERE, now, next_scheduled_delayed_task); |
| 142 next_scheduled_delayed_task); | |
| 143 } | 313 } |
| 144 | 314 |
| 145 void ThrottlingHelper::PumpThrottledTasks() { | 315 namespace { |
| 146 TRACE_EVENT0(tracing_category_, "ThrottlingHelper::PumpThrottledTasks"); | |
| 147 pending_pump_throttled_tasks_runtime_ = base::TimeTicks(); | |
| 148 | 316 |
| 149 LazyNow lazy_low(tick_clock_); | 317 template <class T> |
| 318 T Min(const base::Optional<T>& optional, const T& value) { | |
| 319 if (!optional) { | |
| 320 return value; | |
| 321 } | |
| 322 return std::min(optional.value(), value); | |
| 323 } | |
| 324 | |
| 325 template <class T> | |
| 326 base::Optional<T> Min(const base::Optional<T>& a, const base::Optional<T>& b) { | |
| 327 if (!b) | |
| 328 return a; | |
| 329 if (!a) | |
| 330 return b; | |
| 331 return std::min(a.value(), b.value()); | |
| 332 } | |
| 333 | |
| 334 template <class T> | |
| 335 base::Optional<T> Max(const base::Optional<T>& a, const base::Optional<T>& b) { | |
| 336 if (!b) | |
| 337 return a; | |
| 338 if (!a) | |
| 339 return b; | |
| 340 return std::max(a.value(), b.value()); | |
| 341 } | |
| 342 | |
| 343 } // namespace | |
| 344 | |
| 345 void TaskQueueThrottler::PumpThrottledTasks() { | |
| 346 TRACE_EVENT0("renderer.scheduler", "TaskQueueThrottler::PumpThrottledTasks"); | |
| 347 pending_pump_throttled_tasks_runtime_.reset(); | |
| 348 | |
| 349 LazyNow lazy_now(tick_clock_); | |
| 350 base::Optional<base::TimeTicks> next_scheduled_delayed_task; | |
| 351 | |
| 150 for (const TaskQueueMap::value_type& map_entry : throttled_queues_) { | 352 for (const TaskQueueMap::value_type& map_entry : throttled_queues_) { |
| 151 TaskQueue* task_queue = map_entry.first; | 353 TaskQueue* task_queue = map_entry.first; |
| 152 if (!map_entry.second.enabled || task_queue->IsEmpty()) | 354 if (!map_entry.second.enabled || task_queue->IsEmpty()) |
| 153 continue; | 355 continue; |
| 154 | 356 |
| 357 TimeBudgetPool* time_budget_pool = GetTimeBudgetPoolForQueue(task_queue); | |
| 358 if (time_budget_pool && !time_budget_pool->IsAllowedToRun(lazy_now.Now())) { | |
| 359 base::TimeTicks next_run_time = | |
| 360 std::max(time_budget_pool->NextAllowedRunTime(), lazy_now.Now()); | |
| 361 | |
| 362 next_scheduled_delayed_task = | |
| 363 Min(next_scheduled_delayed_task, next_run_time); | |
| 364 | |
| 365 TRACE_EVENT1( | |
| 366 "renderer.scheduler", | |
| 367 "TaskQueueThrottler::PumpThrottledTasks_ExpensiveTaskThrottled", | |
| 368 "throttle_time_in_seconds", | |
| 369 (next_run_time - lazy_now.Now()).InSecondsF()); | |
| 370 | |
| 371 renderer_scheduler_->CreateTraceEventObjectSnapshot(); | |
| 372 | |
| 373 continue; | |
| 374 } | |
| 375 | |
| 376 base::Optional<base::TimeTicks> wake_up = | |
| 377 task_queue->GetNextScheduledWakeUp(); | |
| 378 next_scheduled_delayed_task = Min(next_scheduled_delayed_task, wake_up); | |
| 379 | |
| 380 // GetNextScheduledWakeUp() moves delayed tasks from incoming queue to work | |
| 381 // queue, so InsertFence() should be called after it in order to move | |
| 382 // ready tasks to delayed work queue before installing a fence. | |
| 155 task_queue->SetQueueEnabled(true); | 383 task_queue->SetQueueEnabled(true); |
| 156 task_queue->InsertFence(); | 384 task_queue->InsertFence(); |
| 157 } | 385 } |
| 158 // Make sure NextScheduledRunTime gives us an up-to date result. | |
| 159 time_domain_->ClearExpiredWakeups(); | |
| 160 | 386 |
| 161 base::TimeTicks next_scheduled_delayed_task; | 387 // Maybe schedule a call to TaskQueueThrottler::PumpThrottledTasks if there is |
| 162 // Maybe schedule a call to ThrottlingHelper::PumpThrottledTasks if there is | 388 // a pending delayed task or a throttled task ready to run. |
| 163 // a pending delayed task. NOTE posting a non-delayed task in the future will | 389 // NOTE: posting a non-delayed task in the future will result in |
| 164 // result in ThrottlingHelper::OnTimeDomainHasImmediateWork being called. | 390 // TaskQueueThrottler::OnTimeDomainHasImmediateWork being called. |
| 165 if (time_domain_->NextScheduledRunTime(&next_scheduled_delayed_task)) { | 391 if (next_scheduled_delayed_task) { |
| 166 MaybeSchedulePumpThrottledTasksLocked(FROM_HERE, lazy_low.Now(), | 392 MaybeSchedulePumpThrottledTasks(FROM_HERE, lazy_now.Now(), |
| 167 next_scheduled_delayed_task); | 393 *next_scheduled_delayed_task); |
| 168 } | 394 } |
| 169 } | 395 } |
| 170 | 396 |
| 171 /* static */ | 397 /* static */ |
| 172 base::TimeTicks ThrottlingHelper::ThrottledRunTime( | 398 base::TimeTicks TaskQueueThrottler::AlignedThrottledRunTime( |
| 173 base::TimeTicks unthrottled_runtime) { | 399 base::TimeTicks unthrottled_runtime) { |
| 174 const base::TimeDelta one_second = base::TimeDelta::FromSeconds(1); | 400 const base::TimeDelta one_second = base::TimeDelta::FromSeconds(1); |
| 175 return unthrottled_runtime + one_second - | 401 return unthrottled_runtime + one_second - |
| 176 ((unthrottled_runtime - base::TimeTicks()) % one_second); | 402 ((unthrottled_runtime - base::TimeTicks()) % one_second); |
| 177 } | 403 } |
| 178 | 404 |
| 179 void ThrottlingHelper::MaybeSchedulePumpThrottledTasksLocked( | 405 void TaskQueueThrottler::MaybeSchedulePumpThrottledTasks( |
| 180 const tracked_objects::Location& from_here, | 406 const tracked_objects::Location& from_here, |
| 181 base::TimeTicks now, | 407 base::TimeTicks now, |
| 182 base::TimeTicks unthrottled_runtime) { | 408 base::TimeTicks runtime) { |
| 183 if (virtual_time_) | 409 if (virtual_time_) |
| 184 return; | 410 return; |
| 185 | 411 |
| 186 base::TimeTicks throttled_runtime = | 412 runtime = std::max(now, AlignedThrottledRunTime(runtime)); |
| 187 ThrottledRunTime(std::max(now, unthrottled_runtime)); | 413 |
| 188 // If there is a pending call to PumpThrottledTasks and it's sooner than | 414 // If there is a pending call to PumpThrottledTasks and it's sooner than |
| 189 // |unthrottled_runtime| then return. | 415 // |runtime| then return. |
| 190 if (!pending_pump_throttled_tasks_runtime_.is_null() && | 416 if (pending_pump_throttled_tasks_runtime_ && |
| 191 throttled_runtime >= pending_pump_throttled_tasks_runtime_) { | 417 runtime >= pending_pump_throttled_tasks_runtime_.value()) { |
| 192 return; | 418 return; |
| 193 } | 419 } |
| 194 | 420 |
| 195 pending_pump_throttled_tasks_runtime_ = throttled_runtime; | 421 pending_pump_throttled_tasks_runtime_ = runtime; |
| 196 | 422 |
| 197 pump_throttled_tasks_closure_.Cancel(); | 423 pump_throttled_tasks_closure_.Cancel(); |
| 198 | 424 |
| 199 base::TimeDelta delay = pending_pump_throttled_tasks_runtime_ - now; | 425 base::TimeDelta delay = pending_pump_throttled_tasks_runtime_.value() - now; |
| 200 TRACE_EVENT1(tracing_category_, | 426 TRACE_EVENT1(tracing_category_, |
| 201 "ThrottlingHelper::MaybeSchedulePumpThrottledTasksLocked", | 427 "TaskQueueThrottler::MaybeSchedulePumpThrottledTasks", |
| 202 "delay_till_next_pump_ms", delay.InMilliseconds()); | 428 "delay_till_next_pump_ms", delay.InMilliseconds()); |
| 203 task_runner_->PostDelayedTask( | 429 task_runner_->PostDelayedTask( |
| 204 from_here, pump_throttled_tasks_closure_.callback(), delay); | 430 from_here, pump_throttled_tasks_closure_.callback(), delay); |
| 205 } | 431 } |
| 206 | 432 |
| 207 void ThrottlingHelper::EnableVirtualTime() { | 433 void TaskQueueThrottler::EnableVirtualTime() { |
| 208 virtual_time_ = true; | 434 virtual_time_ = true; |
| 209 | 435 |
| 210 pump_throttled_tasks_closure_.Cancel(); | 436 pump_throttled_tasks_closure_.Cancel(); |
| 211 | 437 |
| 212 while (!throttled_queues_.empty()) { | 438 while (!throttled_queues_.empty()) { |
| 213 TaskQueue* task_queue = throttled_queues_.begin()->first; | 439 TaskQueue* task_queue = throttled_queues_.begin()->first; |
| 214 bool enabled = throttled_queues_.begin()->second.enabled; | 440 bool enabled = throttled_queues_.begin()->second.enabled; |
| 215 | 441 |
| 216 throttled_queues_.erase(throttled_queues_.begin()); | 442 throttled_queues_.erase(throttled_queues_.begin()); |
| 217 | 443 |
| 218 task_queue->SetTimeDomain(renderer_scheduler_->GetVirtualTimeDomain()); | 444 task_queue->SetTimeDomain(renderer_scheduler_->GetVirtualTimeDomain()); |
| 219 task_queue->RemoveFence(); | 445 task_queue->RemoveFence(); |
| 220 task_queue->SetQueueEnabled(enabled); | 446 task_queue->SetQueueEnabled(enabled); |
| 221 } | 447 } |
| 222 } | 448 } |
| 223 | 449 |
| 450 TaskQueueThrottler::TimeBudgetPool* TaskQueueThrottler::CreateTimeBudgetPool( | |
| 451 const char* name) { | |
| 452 TimeBudgetPool* time_budget_pool = | |
| 453 new TimeBudgetPool(name, this, tick_clock_->NowTicks()); | |
| 454 time_budget_pools_[time_budget_pool] = base::WrapUnique(time_budget_pool); | |
| 455 return time_budget_pool; | |
| 456 } | |
| 457 | |
| 458 void TaskQueueThrottler::OnTaskRunTimeReported(TaskQueue* task_queue, | |
| 459 base::TimeTicks start_time, | |
| 460 base::TimeTicks end_time) { | |
| 461 if (!IsThrottled(task_queue)) | |
| 462 return; | |
| 463 | |
| 464 TimeBudgetPool* time_budget_pool = GetTimeBudgetPoolForQueue(task_queue); | |
| 465 if (time_budget_pool) { | |
| 466 time_budget_pool->RecordTaskRunTime(end_time - start_time); | |
| 467 if (!time_budget_pool->IsAllowedToRun(end_time)) { | |
| 468 // This task was too expensive and all following tasks are throttled | |
| 469 // until explicitly allowed. | |
| 470 task_queue->SetQueueEnabled(false); | |
| 471 | |
| 472 if (task_queue->HasPendingImmediateWork()) { | |
| 473 MaybeSchedulePumpThrottledTasks( | |
| 474 FROM_HERE, end_time, | |
| 475 std::max(end_time, time_budget_pool->NextAllowedRunTime())); | |
| 476 } | |
| 477 } | |
| 478 } | |
| 479 } | |
| 480 | |
| 481 void TaskQueueThrottler::AsValueInto(base::trace_event::TracedValue* state, | |
| 482 base::TimeTicks now) const { | |
| 483 if (pending_pump_throttled_tasks_runtime_) { | |
| 484 state->SetDouble( | |
| 485 "next_throttled_tasks_pump_in_seconds", | |
| 486 (pending_pump_throttled_tasks_runtime_.value() - now).InSecondsF()); | |
| 487 } | |
| 488 | |
| 489 state->BeginDictionary("time_budget_pools"); | |
| 490 | |
| 491 for (const auto& map_entry : time_budget_pools_) { | |
| 492 TaskQueueThrottler::TimeBudgetPool* pool = map_entry.first; | |
| 493 pool->AsValueInto(state, now); | |
| 494 } | |
| 495 | |
| 496 state->EndDictionary(); | |
| 497 } | |
| 498 | |
| 499 TaskQueueThrottler::TimeBudgetPool* | |
| 500 TaskQueueThrottler::GetTimeBudgetPoolForQueue(TaskQueue* queue) { | |
| 501 auto find_it = time_budget_pool_for_queue_.find(queue); | |
| 502 if (find_it == time_budget_pool_for_queue_.end()) { | |
| 503 return nullptr; | |
| 504 } else { | |
| 505 TimeBudgetPool* result = find_it->second; | |
| 506 DCHECK(result); | |
| 507 return result; | |
| 508 } | |
| 509 } | |
| 510 | |
| 511 void TaskQueueThrottler::MaybeSchedulePumpQueue( | |
| 512 const tracked_objects::Location& from_here, | |
| 513 base::TimeTicks now, | |
| 514 TaskQueue* queue, | |
| 515 base::Optional<base::TimeTicks> next_possible_run_time) { | |
| 516 base::Optional<base::TimeTicks> next_run_time = | |
| 517 Max(queue->GetNextTaskRunTime(), next_possible_run_time); | |
| 518 | |
| 519 if (next_run_time) { | |
| 520 MaybeSchedulePumpThrottledTasks(from_here, now, next_run_time.value()); | |
| 521 } | |
| 522 } | |
| 523 | |
| 224 } // namespace scheduler | 524 } // namespace scheduler |
| 225 } // namespace blink | 525 } // namespace blink |
| OLD | NEW |