| OLD | NEW | 
|---|
| (Empty) |  | 
|  | 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 
|  | 2 // Use of this source code is governed by a BSD-style license that can be | 
|  | 3 // found in the LICENSE file. | 
|  | 4 | 
|  | 5 #include "platform/scheduler/renderer/task_queue_throttler.h" | 
|  | 6 | 
|  | 7 #include <cstdint> | 
|  | 8 | 
|  | 9 #include "base/format_macros.h" | 
|  | 10 #include "base/logging.h" | 
|  | 11 #include "base/memory/ptr_util.h" | 
|  | 12 #include "base/optional.h" | 
|  | 13 #include "base/strings/stringprintf.h" | 
|  | 14 #include "platform/scheduler/base/real_time_domain.h" | 
|  | 15 #include "platform/scheduler/child/scheduler_tqm_delegate.h" | 
|  | 16 #include "platform/scheduler/renderer/auto_advancing_virtual_time_domain.h" | 
|  | 17 #include "platform/scheduler/renderer/renderer_scheduler_impl.h" | 
|  | 18 #include "platform/scheduler/renderer/throttled_time_domain.h" | 
|  | 19 #include "platform/scheduler/renderer/web_frame_scheduler_impl.h" | 
|  | 20 #include "public/platform/WebFrameScheduler.h" | 
|  | 21 | 
|  | 22 namespace blink { | 
|  | 23 namespace scheduler { | 
|  | 24 | 
|  | 25 namespace { | 
|  | 26 const int kMaxBudgetLevelInSeconds = 1; | 
|  | 27 | 
|  | 28 base::Optional<base::TimeTicks> NextTaskRunTime(LazyNow* lazy_now, | 
|  | 29                                                 TaskQueue* queue) { | 
|  | 30   if (queue->HasPendingImmediateWork()) | 
|  | 31     return lazy_now->Now(); | 
|  | 32   return queue->GetNextScheduledWakeUp(); | 
|  | 33 } | 
|  | 34 } | 
|  | 35 | 
|  | 36 TaskQueueThrottler::TimeBudgetPool::TimeBudgetPool( | 
|  | 37     const char* name, | 
|  | 38     TaskQueueThrottler* task_queue_throttler, | 
|  | 39     base::TimeTicks now) | 
|  | 40     : name_(name), | 
|  | 41       task_queue_throttler_(task_queue_throttler), | 
|  | 42       max_budget_level_(base::TimeDelta::FromSeconds(kMaxBudgetLevelInSeconds)), | 
|  | 43       last_checkpoint_(now), | 
|  | 44       cpu_percentage_(1), | 
|  | 45       is_enabled_(true) {} | 
|  | 46 | 
|  | 47 TaskQueueThrottler::TimeBudgetPool::~TimeBudgetPool() {} | 
|  | 48 | 
|  | 49 void TaskQueueThrottler::TimeBudgetPool::SetTimeBudget(base::TimeTicks now, | 
|  | 50                                                        double cpu_percentage) { | 
|  | 51   Advance(now); | 
|  | 52   cpu_percentage_ = cpu_percentage; | 
|  | 53 } | 
|  | 54 | 
|  | 55 void TaskQueueThrottler::TimeBudgetPool::AddQueue(base::TimeTicks now, | 
|  | 56                                                   TaskQueue* queue) { | 
|  | 57   DCHECK(!task_queue_throttler_->queue_details_[queue].time_budget_pool); | 
|  | 58   task_queue_throttler_->queue_details_[queue].time_budget_pool = this; | 
|  | 59 | 
|  | 60   associated_task_queues_.insert(queue); | 
|  | 61 | 
|  | 62   if (!task_queue_throttler_->IsThrottled(queue)) | 
|  | 63     return; | 
|  | 64 | 
|  | 65   queue->SetQueueEnabled(false); | 
|  | 66 | 
|  | 67   task_queue_throttler_->MaybeSchedulePumpQueue(FROM_HERE, now, queue, | 
|  | 68                                                 GetNextAllowedRunTime()); | 
|  | 69 } | 
|  | 70 | 
|  | 71 void TaskQueueThrottler::TimeBudgetPool::RemoveQueue(base::TimeTicks now, | 
|  | 72                                                      TaskQueue* queue) { | 
|  | 73   auto find_it = task_queue_throttler_->queue_details_.find(queue); | 
|  | 74   DCHECK(find_it != task_queue_throttler_->queue_details_.end() && | 
|  | 75          find_it->second.time_budget_pool == this); | 
|  | 76   find_it->second.time_budget_pool = nullptr; | 
|  | 77   bool is_throttled = find_it->second.throttling_ref_count > 0; | 
|  | 78 | 
|  | 79   task_queue_throttler_->MaybeDeleteQueueMetadata(find_it); | 
|  | 80   associated_task_queues_.erase(queue); | 
|  | 81 | 
|  | 82   if (is_throttled) | 
|  | 83     return; | 
|  | 84 | 
|  | 85   task_queue_throttler_->MaybeSchedulePumpQueue(FROM_HERE, now, queue, | 
|  | 86                                                 base::nullopt); | 
|  | 87 } | 
|  | 88 | 
|  | 89 void TaskQueueThrottler::TimeBudgetPool::EnableThrottling(LazyNow* lazy_now) { | 
|  | 90   if (is_enabled_) | 
|  | 91     return; | 
|  | 92   is_enabled_ = true; | 
|  | 93 | 
|  | 94   for (TaskQueue* queue : associated_task_queues_) { | 
|  | 95     if (!task_queue_throttler_->IsThrottled(queue)) | 
|  | 96       continue; | 
|  | 97 | 
|  | 98     queue->SetQueueEnabled(false); | 
|  | 99 | 
|  | 100     task_queue_throttler_->MaybeSchedulePumpQueue( | 
|  | 101         FROM_HERE, lazy_now->Now(), queue, GetNextAllowedRunTime()); | 
|  | 102   } | 
|  | 103 } | 
|  | 104 | 
|  | 105 void TaskQueueThrottler::TimeBudgetPool::DisableThrottling(LazyNow* lazy_now) { | 
|  | 106   if (!is_enabled_) | 
|  | 107     return; | 
|  | 108   is_enabled_ = false; | 
|  | 109 | 
|  | 110   for (TaskQueue* queue : associated_task_queues_) { | 
|  | 111     if (!task_queue_throttler_->IsThrottled(queue)) | 
|  | 112       continue; | 
|  | 113 | 
|  | 114     task_queue_throttler_->MaybeSchedulePumpQueue(FROM_HERE, lazy_now->Now(), | 
|  | 115                                                   queue, base::nullopt); | 
|  | 116   } | 
|  | 117 } | 
|  | 118 | 
|  | 119 bool TaskQueueThrottler::TimeBudgetPool::IsThrottlingEnabled() const { | 
|  | 120   return is_enabled_; | 
|  | 121 } | 
|  | 122 | 
|  | 123 void TaskQueueThrottler::TimeBudgetPool::Close() { | 
|  | 124   DCHECK_EQ(0u, associated_task_queues_.size()); | 
|  | 125 | 
|  | 126   task_queue_throttler_->time_budget_pools_.erase(this); | 
|  | 127 } | 
|  | 128 | 
|  | 129 bool TaskQueueThrottler::TimeBudgetPool::HasEnoughBudgetToRun( | 
|  | 130     base::TimeTicks now) { | 
|  | 131   Advance(now); | 
|  | 132   return !is_enabled_ || current_budget_level_.InMicroseconds() >= 0; | 
|  | 133 } | 
|  | 134 | 
|  | 135 base::TimeTicks TaskQueueThrottler::TimeBudgetPool::GetNextAllowedRunTime() { | 
|  | 136   if (!is_enabled_ || current_budget_level_.InMicroseconds() >= 0) { | 
|  | 137     return last_checkpoint_; | 
|  | 138   } else { | 
|  | 139     // Subtract because current_budget is negative. | 
|  | 140     return last_checkpoint_ - current_budget_level_ / cpu_percentage_; | 
|  | 141   } | 
|  | 142 } | 
|  | 143 | 
|  | 144 void TaskQueueThrottler::TimeBudgetPool::RecordTaskRunTime( | 
|  | 145     base::TimeDelta task_run_time) { | 
|  | 146   if (is_enabled_) { | 
|  | 147     current_budget_level_ -= task_run_time; | 
|  | 148   } | 
|  | 149 } | 
|  | 150 | 
|  | 151 const char* TaskQueueThrottler::TimeBudgetPool::Name() const { | 
|  | 152   return name_; | 
|  | 153 } | 
|  | 154 | 
|  | 155 void TaskQueueThrottler::TimeBudgetPool::AsValueInto( | 
|  | 156     base::trace_event::TracedValue* state, | 
|  | 157     base::TimeTicks now) const { | 
|  | 158   state->BeginDictionary(); | 
|  | 159 | 
|  | 160   state->SetString("name", name_); | 
|  | 161   state->SetDouble("time_budget", cpu_percentage_); | 
|  | 162   state->SetDouble("time_budget_level_in_seconds", | 
|  | 163                    current_budget_level_.InSecondsF()); | 
|  | 164   state->SetDouble("last_checkpoint_seconds_ago", | 
|  | 165                    (now - last_checkpoint_).InSecondsF()); | 
|  | 166 | 
|  | 167   state->BeginArray("task_queues"); | 
|  | 168   for (TaskQueue* queue : associated_task_queues_) { | 
|  | 169     state->AppendString(base::StringPrintf( | 
|  | 170         "%" PRIx64, static_cast<uint64_t>(reinterpret_cast<uintptr_t>(queue)))); | 
|  | 171   } | 
|  | 172   state->EndArray(); | 
|  | 173 | 
|  | 174   state->EndDictionary(); | 
|  | 175 } | 
|  | 176 | 
|  | 177 void TaskQueueThrottler::TimeBudgetPool::Advance(base::TimeTicks now) { | 
|  | 178   if (now > last_checkpoint_) { | 
|  | 179     if (is_enabled_) { | 
|  | 180       current_budget_level_ = std::min( | 
|  | 181           current_budget_level_ + cpu_percentage_ * (now - last_checkpoint_), | 
|  | 182           max_budget_level_); | 
|  | 183     } | 
|  | 184     last_checkpoint_ = now; | 
|  | 185   } | 
|  | 186 } | 
|  | 187 | 
|  | 188 void TaskQueueThrottler::TimeBudgetPool::BlockQueues(base::TimeTicks now) { | 
|  | 189   for (TaskQueue* queue : associated_task_queues_) { | 
|  | 190     if (!task_queue_throttler_->IsThrottled(queue)) | 
|  | 191       continue; | 
|  | 192 | 
|  | 193     queue->SetQueueEnabled(false); | 
|  | 194     task_queue_throttler_->MaybeSchedulePumpQueue(FROM_HERE, now, queue, | 
|  | 195                                                   base::nullopt); | 
|  | 196   } | 
|  | 197 } | 
|  | 198 | 
|  | 199 TaskQueueThrottler::TaskQueueThrottler( | 
|  | 200     RendererSchedulerImpl* renderer_scheduler, | 
|  | 201     const char* tracing_category) | 
|  | 202     : task_runner_(renderer_scheduler->ControlTaskRunner()), | 
|  | 203       renderer_scheduler_(renderer_scheduler), | 
|  | 204       tick_clock_(renderer_scheduler->tick_clock()), | 
|  | 205       tracing_category_(tracing_category), | 
|  | 206       time_domain_(new ThrottledTimeDomain(this, tracing_category)), | 
|  | 207       virtual_time_(false), | 
|  | 208       weak_factory_(this) { | 
|  | 209   pump_throttled_tasks_closure_.Reset(base::Bind( | 
|  | 210       &TaskQueueThrottler::PumpThrottledTasks, weak_factory_.GetWeakPtr())); | 
|  | 211   forward_immediate_work_callback_ = | 
|  | 212       base::Bind(&TaskQueueThrottler::OnTimeDomainHasImmediateWork, | 
|  | 213                  weak_factory_.GetWeakPtr()); | 
|  | 214 | 
|  | 215   renderer_scheduler_->RegisterTimeDomain(time_domain_.get()); | 
|  | 216 } | 
|  | 217 | 
|  | 218 TaskQueueThrottler::~TaskQueueThrottler() { | 
|  | 219   // It's possible for queues to be still throttled, so we need to tidy up | 
|  | 220   // before unregistering the time domain. | 
|  | 221   for (const TaskQueueMap::value_type& map_entry : queue_details_) { | 
|  | 222     if (map_entry.second.throttling_ref_count > 0) { | 
|  | 223       TaskQueue* task_queue = map_entry.first; | 
|  | 224       task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); | 
|  | 225       task_queue->RemoveFence(); | 
|  | 226     } | 
|  | 227   } | 
|  | 228 | 
|  | 229   renderer_scheduler_->UnregisterTimeDomain(time_domain_.get()); | 
|  | 230 } | 
|  | 231 | 
|  | 232 void TaskQueueThrottler::SetQueueEnabled(TaskQueue* task_queue, bool enabled) { | 
|  | 233   TaskQueueMap::iterator find_it = queue_details_.find(task_queue); | 
|  | 234 | 
|  | 235   if (find_it == queue_details_.end()) { | 
|  | 236     task_queue->SetQueueEnabled(enabled); | 
|  | 237     return; | 
|  | 238   } | 
|  | 239 | 
|  | 240   find_it->second.enabled = enabled; | 
|  | 241 | 
|  | 242   if (find_it->second.throttling_ref_count == 0) | 
|  | 243     return; | 
|  | 244 | 
|  | 245   // We don't enable the queue here because it's throttled and there might be | 
|  | 246   // tasks in it's work queue that would execute immediatly rather than after | 
|  | 247   // PumpThrottledTasks runs. | 
|  | 248   if (!enabled) { | 
|  | 249     task_queue->SetQueueEnabled(false); | 
|  | 250     MaybeSchedulePumpQueue(FROM_HERE, tick_clock_->NowTicks(), task_queue, | 
|  | 251                            base::nullopt); | 
|  | 252   } | 
|  | 253 } | 
|  | 254 | 
|  | 255 void TaskQueueThrottler::IncreaseThrottleRefCount(TaskQueue* task_queue) { | 
|  | 256   DCHECK_NE(task_queue, task_runner_.get()); | 
|  | 257 | 
|  | 258   if (virtual_time_) | 
|  | 259     return; | 
|  | 260 | 
|  | 261   std::pair<TaskQueueMap::iterator, bool> insert_result = | 
|  | 262       queue_details_.insert(std::make_pair(task_queue, Metadata())); | 
|  | 263 | 
|  | 264   if (insert_result.first->second.throttling_ref_count == 0) { | 
|  | 265     // The insert was successful so we need to throttle the queue. | 
|  | 266     insert_result.first->second.enabled = task_queue->IsQueueEnabled(); | 
|  | 267 | 
|  | 268     task_queue->SetTimeDomain(time_domain_.get()); | 
|  | 269     task_queue->RemoveFence(); | 
|  | 270     task_queue->SetQueueEnabled(false); | 
|  | 271 | 
|  | 272     if (!task_queue->IsEmpty()) { | 
|  | 273       if (task_queue->HasPendingImmediateWork()) { | 
|  | 274         OnTimeDomainHasImmediateWork(task_queue); | 
|  | 275       } else { | 
|  | 276         OnTimeDomainHasDelayedWork(task_queue); | 
|  | 277       } | 
|  | 278     } | 
|  | 279 | 
|  | 280     TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueThrottled", | 
|  | 281                  "task_queue", task_queue); | 
|  | 282   } | 
|  | 283 | 
|  | 284   insert_result.first->second.throttling_ref_count++; | 
|  | 285 } | 
|  | 286 | 
|  | 287 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { | 
|  | 288   if (virtual_time_) | 
|  | 289     return; | 
|  | 290 | 
|  | 291   TaskQueueMap::iterator iter = queue_details_.find(task_queue); | 
|  | 292 | 
|  | 293   if (iter != queue_details_.end() && | 
|  | 294       --iter->second.throttling_ref_count == 0) { | 
|  | 295     bool enabled = iter->second.enabled; | 
|  | 296 | 
|  | 297     MaybeDeleteQueueMetadata(iter); | 
|  | 298 | 
|  | 299     task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); | 
|  | 300     task_queue->RemoveFence(); | 
|  | 301     task_queue->SetQueueEnabled(enabled); | 
|  | 302 | 
|  | 303     TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueUntrottled", | 
|  | 304                  "task_queue", task_queue); | 
|  | 305   } | 
|  | 306 } | 
|  | 307 | 
|  | 308 bool TaskQueueThrottler::IsThrottled(TaskQueue* task_queue) const { | 
|  | 309   auto find_it = queue_details_.find(task_queue); | 
|  | 310   if (find_it == queue_details_.end()) | 
|  | 311     return false; | 
|  | 312   return find_it->second.throttling_ref_count > 0; | 
|  | 313 } | 
|  | 314 | 
|  | 315 void TaskQueueThrottler::UnregisterTaskQueue(TaskQueue* task_queue) { | 
|  | 316   LazyNow lazy_now(tick_clock_); | 
|  | 317   auto find_it = queue_details_.find(task_queue); | 
|  | 318 | 
|  | 319   if (find_it == queue_details_.end()) | 
|  | 320     return; | 
|  | 321 | 
|  | 322   if (find_it->second.time_budget_pool) | 
|  | 323     find_it->second.time_budget_pool->RemoveQueue(lazy_now.Now(), task_queue); | 
|  | 324 | 
|  | 325   queue_details_.erase(find_it); | 
|  | 326 } | 
|  | 327 | 
|  | 328 void TaskQueueThrottler::OnTimeDomainHasImmediateWork(TaskQueue* queue) { | 
|  | 329   // Forward to the main thread if called from another thread | 
|  | 330   if (!task_runner_->RunsTasksOnCurrentThread()) { | 
|  | 331     task_runner_->PostTask(FROM_HERE, | 
|  | 332                            base::Bind(forward_immediate_work_callback_, queue)); | 
|  | 333     return; | 
|  | 334   } | 
|  | 335   TRACE_EVENT0(tracing_category_, | 
|  | 336                "TaskQueueThrottler::OnTimeDomainHasImmediateWork"); | 
|  | 337 | 
|  | 338   base::TimeTicks now = tick_clock_->NowTicks(); | 
|  | 339   base::TimeTicks next_allowed_run_time = GetNextAllowedRunTime(now, queue); | 
|  | 340   MaybeSchedulePumpThrottledTasks(FROM_HERE, now, next_allowed_run_time); | 
|  | 341 } | 
|  | 342 | 
|  | 343 void TaskQueueThrottler::OnTimeDomainHasDelayedWork(TaskQueue* queue) { | 
|  | 344   TRACE_EVENT0(tracing_category_, | 
|  | 345                "TaskQueueThrottler::OnTimeDomainHasDelayedWork"); | 
|  | 346   base::TimeTicks now = tick_clock_->NowTicks(); | 
|  | 347   LazyNow lazy_now(now); | 
|  | 348 | 
|  | 349   base::Optional<base::TimeTicks> next_scheduled_delayed_task = | 
|  | 350       NextTaskRunTime(&lazy_now, queue); | 
|  | 351   DCHECK(next_scheduled_delayed_task); | 
|  | 352   MaybeSchedulePumpThrottledTasks(FROM_HERE, now, | 
|  | 353                                   next_scheduled_delayed_task.value()); | 
|  | 354 } | 
|  | 355 | 
|  | 356 namespace { | 
|  | 357 | 
|  | 358 template <class T> | 
|  | 359 T Min(const base::Optional<T>& optional, const T& value) { | 
|  | 360   if (!optional) { | 
|  | 361     return value; | 
|  | 362   } | 
|  | 363   return std::min(optional.value(), value); | 
|  | 364 } | 
|  | 365 | 
|  | 366 template <class T> | 
|  | 367 base::Optional<T> Min(const base::Optional<T>& a, const base::Optional<T>& b) { | 
|  | 368   if (!b) | 
|  | 369     return a; | 
|  | 370   if (!a) | 
|  | 371     return b; | 
|  | 372   return std::min(a.value(), b.value()); | 
|  | 373 } | 
|  | 374 | 
|  | 375 template <class T> | 
|  | 376 T Max(const base::Optional<T>& optional, const T& value) { | 
|  | 377   if (!optional) | 
|  | 378     return value; | 
|  | 379   return std::max(optional.value(), value); | 
|  | 380 } | 
|  | 381 | 
|  | 382 template <class T> | 
|  | 383 base::Optional<T> Max(const base::Optional<T>& a, const base::Optional<T>& b) { | 
|  | 384   if (!b) | 
|  | 385     return a; | 
|  | 386   if (!a) | 
|  | 387     return b; | 
|  | 388   return std::max(a.value(), b.value()); | 
|  | 389 } | 
|  | 390 | 
|  | 391 }  // namespace | 
|  | 392 | 
|  | 393 void TaskQueueThrottler::PumpThrottledTasks() { | 
|  | 394   TRACE_EVENT0(tracing_category_, "TaskQueueThrottler::PumpThrottledTasks"); | 
|  | 395   pending_pump_throttled_tasks_runtime_.reset(); | 
|  | 396 | 
|  | 397   LazyNow lazy_now(tick_clock_); | 
|  | 398   base::Optional<base::TimeTicks> next_scheduled_delayed_task; | 
|  | 399 | 
|  | 400   for (const TaskQueueMap::value_type& map_entry : queue_details_) { | 
|  | 401     TaskQueue* task_queue = map_entry.first; | 
|  | 402     if (!map_entry.second.enabled || task_queue->IsEmpty() || | 
|  | 403         map_entry.second.throttling_ref_count == 0) | 
|  | 404       continue; | 
|  | 405 | 
|  | 406     // Don't enable queues whose budget pool doesn't allow them to run now. | 
|  | 407     base::TimeTicks next_allowed_run_time = | 
|  | 408         GetNextAllowedRunTime(lazy_now.Now(), task_queue); | 
|  | 409     base::Optional<base::TimeTicks> next_desired_run_time = | 
|  | 410         NextTaskRunTime(&lazy_now, task_queue); | 
|  | 411 | 
|  | 412     if (next_desired_run_time && | 
|  | 413         next_allowed_run_time > next_desired_run_time.value()) { | 
|  | 414       TRACE_EVENT1( | 
|  | 415           tracing_category_, | 
|  | 416           "TaskQueueThrottler::PumpThrottledTasks_ExpensiveTaskThrottled", | 
|  | 417           "throttle_time_in_seconds", | 
|  | 418           (next_allowed_run_time - next_desired_run_time.value()).InSecondsF()); | 
|  | 419 | 
|  | 420       // Schedule a pump for queue which was disabled because of time budget. | 
|  | 421       next_scheduled_delayed_task = | 
|  | 422           Min(next_scheduled_delayed_task, next_allowed_run_time); | 
|  | 423 | 
|  | 424       continue; | 
|  | 425     } | 
|  | 426 | 
|  | 427     next_scheduled_delayed_task = | 
|  | 428         Min(next_scheduled_delayed_task, task_queue->GetNextScheduledWakeUp()); | 
|  | 429 | 
|  | 430     if (next_allowed_run_time > lazy_now.Now()) | 
|  | 431       continue; | 
|  | 432 | 
|  | 433     task_queue->SetQueueEnabled(true); | 
|  | 434     task_queue->InsertFence(); | 
|  | 435   } | 
|  | 436 | 
|  | 437   // Maybe schedule a call to TaskQueueThrottler::PumpThrottledTasks if there is | 
|  | 438   // a pending delayed task or a throttled task ready to run. | 
|  | 439   // NOTE: posting a non-delayed task in the future will result in | 
|  | 440   // TaskQueueThrottler::OnTimeDomainHasImmediateWork being called. | 
|  | 441   if (next_scheduled_delayed_task) { | 
|  | 442     MaybeSchedulePumpThrottledTasks(FROM_HERE, lazy_now.Now(), | 
|  | 443                                     *next_scheduled_delayed_task); | 
|  | 444   } | 
|  | 445 } | 
|  | 446 | 
|  | 447 /* static */ | 
|  | 448 base::TimeTicks TaskQueueThrottler::AlignedThrottledRunTime( | 
|  | 449     base::TimeTicks unthrottled_runtime) { | 
|  | 450   const base::TimeDelta one_second = base::TimeDelta::FromSeconds(1); | 
|  | 451   return unthrottled_runtime + one_second - | 
|  | 452          ((unthrottled_runtime - base::TimeTicks()) % one_second); | 
|  | 453 } | 
|  | 454 | 
|  | 455 void TaskQueueThrottler::MaybeSchedulePumpThrottledTasks( | 
|  | 456     const tracked_objects::Location& from_here, | 
|  | 457     base::TimeTicks now, | 
|  | 458     base::TimeTicks unaligned_runtime) { | 
|  | 459   if (virtual_time_) | 
|  | 460     return; | 
|  | 461 | 
|  | 462   base::TimeTicks runtime = | 
|  | 463       std::max(now, AlignedThrottledRunTime(unaligned_runtime)); | 
|  | 464 | 
|  | 465   // If there is a pending call to PumpThrottledTasks and it's sooner than | 
|  | 466   // |runtime| then return. | 
|  | 467   if (pending_pump_throttled_tasks_runtime_ && | 
|  | 468       runtime >= pending_pump_throttled_tasks_runtime_.value()) { | 
|  | 469     return; | 
|  | 470   } | 
|  | 471 | 
|  | 472   pending_pump_throttled_tasks_runtime_ = runtime; | 
|  | 473 | 
|  | 474   pump_throttled_tasks_closure_.Cancel(); | 
|  | 475 | 
|  | 476   base::TimeDelta delay = pending_pump_throttled_tasks_runtime_.value() - now; | 
|  | 477   TRACE_EVENT1(tracing_category_, | 
|  | 478                "TaskQueueThrottler::MaybeSchedulePumpThrottledTasks", | 
|  | 479                "delay_till_next_pump_ms", delay.InMilliseconds()); | 
|  | 480   task_runner_->PostDelayedTask( | 
|  | 481       from_here, pump_throttled_tasks_closure_.callback(), delay); | 
|  | 482 } | 
|  | 483 | 
|  | 484 void TaskQueueThrottler::EnableVirtualTime() { | 
|  | 485   virtual_time_ = true; | 
|  | 486 | 
|  | 487   pump_throttled_tasks_closure_.Cancel(); | 
|  | 488 | 
|  | 489   for (auto it = queue_details_.begin(); it != queue_details_.end();) { | 
|  | 490     TaskQueue* task_queue = it->first; | 
|  | 491     bool enabled = it->second.enabled; | 
|  | 492 | 
|  | 493     if (!it->second.time_budget_pool) { | 
|  | 494       it = queue_details_.erase(it); | 
|  | 495     } else { | 
|  | 496       // Fall back to default values. | 
|  | 497       it->second.throttling_ref_count = 0; | 
|  | 498       it->second.enabled = false; | 
|  | 499       it++; | 
|  | 500     } | 
|  | 501 | 
|  | 502     task_queue->SetTimeDomain(renderer_scheduler_->GetVirtualTimeDomain()); | 
|  | 503     task_queue->RemoveFence(); | 
|  | 504     task_queue->SetQueueEnabled(enabled); | 
|  | 505   } | 
|  | 506 } | 
|  | 507 | 
|  | 508 TaskQueueThrottler::TimeBudgetPool* TaskQueueThrottler::CreateTimeBudgetPool( | 
|  | 509     const char* name) { | 
|  | 510   TimeBudgetPool* time_budget_pool = | 
|  | 511       new TimeBudgetPool(name, this, tick_clock_->NowTicks()); | 
|  | 512   time_budget_pools_[time_budget_pool] = base::WrapUnique(time_budget_pool); | 
|  | 513   return time_budget_pool; | 
|  | 514 } | 
|  | 515 | 
|  | 516 void TaskQueueThrottler::OnTaskRunTimeReported(TaskQueue* task_queue, | 
|  | 517                                                base::TimeTicks start_time, | 
|  | 518                                                base::TimeTicks end_time) { | 
|  | 519   if (!IsThrottled(task_queue)) | 
|  | 520     return; | 
|  | 521 | 
|  | 522   TimeBudgetPool* time_budget_pool = GetTimeBudgetPoolForQueue(task_queue); | 
|  | 523   if (!time_budget_pool) | 
|  | 524     return; | 
|  | 525 | 
|  | 526   time_budget_pool->RecordTaskRunTime(end_time - start_time); | 
|  | 527   if (!time_budget_pool->HasEnoughBudgetToRun(end_time)) | 
|  | 528     time_budget_pool->BlockQueues(end_time); | 
|  | 529 } | 
|  | 530 | 
|  | 531 void TaskQueueThrottler::AsValueInto(base::trace_event::TracedValue* state, | 
|  | 532                                      base::TimeTicks now) const { | 
|  | 533   if (pending_pump_throttled_tasks_runtime_) { | 
|  | 534     state->SetDouble( | 
|  | 535         "next_throttled_tasks_pump_in_seconds", | 
|  | 536         (pending_pump_throttled_tasks_runtime_.value() - now).InSecondsF()); | 
|  | 537   } | 
|  | 538 | 
|  | 539   state->BeginDictionary("time_budget_pools"); | 
|  | 540 | 
|  | 541   for (const auto& map_entry : time_budget_pools_) { | 
|  | 542     TaskQueueThrottler::TimeBudgetPool* pool = map_entry.first; | 
|  | 543     pool->AsValueInto(state, now); | 
|  | 544   } | 
|  | 545 | 
|  | 546   state->EndDictionary(); | 
|  | 547 } | 
|  | 548 | 
|  | 549 TaskQueueThrottler::TimeBudgetPool* | 
|  | 550 TaskQueueThrottler::GetTimeBudgetPoolForQueue(TaskQueue* queue) { | 
|  | 551   auto find_it = queue_details_.find(queue); | 
|  | 552   if (find_it == queue_details_.end()) { | 
|  | 553     return nullptr; | 
|  | 554   } else { | 
|  | 555     return find_it->second.time_budget_pool; | 
|  | 556   } | 
|  | 557 } | 
|  | 558 | 
|  | 559 void TaskQueueThrottler::MaybeSchedulePumpQueue( | 
|  | 560     const tracked_objects::Location& from_here, | 
|  | 561     base::TimeTicks now, | 
|  | 562     TaskQueue* queue, | 
|  | 563     base::Optional<base::TimeTicks> next_possible_run_time) { | 
|  | 564   LazyNow lazy_now(now); | 
|  | 565   base::Optional<base::TimeTicks> next_run_time = | 
|  | 566       Max(NextTaskRunTime(&lazy_now, queue), next_possible_run_time); | 
|  | 567 | 
|  | 568   if (next_run_time) { | 
|  | 569     MaybeSchedulePumpThrottledTasks(from_here, now, next_run_time.value()); | 
|  | 570   } | 
|  | 571 } | 
|  | 572 | 
|  | 573 base::TimeTicks TaskQueueThrottler::GetNextAllowedRunTime(base::TimeTicks now, | 
|  | 574                                                           TaskQueue* queue) { | 
|  | 575   TimeBudgetPool* time_budget_pool = GetTimeBudgetPoolForQueue(queue); | 
|  | 576   if (!time_budget_pool) { | 
|  | 577     return now; | 
|  | 578   } else { | 
|  | 579     return std::max(now, time_budget_pool->GetNextAllowedRunTime()); | 
|  | 580   } | 
|  | 581 } | 
|  | 582 | 
|  | 583 void TaskQueueThrottler::MaybeDeleteQueueMetadata(TaskQueueMap::iterator it) { | 
|  | 584   if (it->second.throttling_ref_count == 0 && !it->second.time_budget_pool) | 
|  | 585     queue_details_.erase(it); | 
|  | 586 } | 
|  | 587 | 
|  | 588 }  // namespace scheduler | 
|  | 589 }  // namespace blink | 
| OLD | NEW | 
|---|