Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(113)

Side by Side Diff: third_party/WebKit/Source/platform/scheduler/renderer/task_queue_throttler.cc

Issue 2258133002: [scheduler] Implement time-based cpu throttling. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Use double instead of base::TimeTicks Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "platform/scheduler/renderer/throttling_helper.h" 5 #include "platform/scheduler/renderer/task_queue_throttler.h"
6 6
7 #include <cstdint>
8
9 #include "base/format_macros.h"
7 #include "base/logging.h" 10 #include "base/logging.h"
11 #include "base/memory/ptr_util.h"
12 #include "base/optional.h"
13 #include "base/strings/stringprintf.h"
8 #include "platform/scheduler/base/real_time_domain.h" 14 #include "platform/scheduler/base/real_time_domain.h"
9 #include "platform/scheduler/child/scheduler_tqm_delegate.h" 15 #include "platform/scheduler/child/scheduler_tqm_delegate.h"
10 #include "platform/scheduler/renderer/auto_advancing_virtual_time_domain.h" 16 #include "platform/scheduler/renderer/auto_advancing_virtual_time_domain.h"
11 #include "platform/scheduler/renderer/renderer_scheduler_impl.h" 17 #include "platform/scheduler/renderer/renderer_scheduler_impl.h"
12 #include "platform/scheduler/renderer/throttled_time_domain.h" 18 #include "platform/scheduler/renderer/throttled_time_domain.h"
13 #include "platform/scheduler/renderer/web_frame_scheduler_impl.h" 19 #include "platform/scheduler/renderer/web_frame_scheduler_impl.h"
14 #include "public/platform/WebFrameScheduler.h" 20 #include "public/platform/WebFrameScheduler.h"
15 21
22 #include <iostream> // FIXME
23
16 namespace blink { 24 namespace blink {
17 namespace scheduler { 25 namespace scheduler {
18 26
19 ThrottlingHelper::ThrottlingHelper(RendererSchedulerImpl* renderer_scheduler, 27 namespace {
20 const char* tracing_category) 28 const int kMaxBudgetLevelInSeconds = 1;
29 }
30
31 TaskQueueThrottler::TimeBudgetPool::TimeBudgetPool(
32 const char* name,
33 TaskQueueThrottler* task_queue_throttler,
34 base::TimeTicks now)
35 : name_(name),
36 task_queue_throttler_(task_queue_throttler),
37 max_budget_level_(base::TimeDelta::FromSeconds(kMaxBudgetLevelInSeconds)),
38 last_checkpoint_(now),
39 cpu_percentage_(1),
40 is_enabled_(true) {}
41
42 TaskQueueThrottler::TimeBudgetPool::~TimeBudgetPool() {}
43
44 void TaskQueueThrottler::TimeBudgetPool::SetTimeBudget(base::TimeTicks now,
45 double cpu_percentage) {
46 Advance(now);
47 cpu_percentage_ = cpu_percentage;
48 }
49
50 void TaskQueueThrottler::TimeBudgetPool::AddQueue(base::TimeTicks now,
51 TaskQueue* queue) {
52 DCHECK(task_queue_throttler_->time_budget_pool_for_queue_.find(queue) ==
53 task_queue_throttler_->time_budget_pool_for_queue_.end());
54 task_queue_throttler_->time_budget_pool_for_queue_[queue] = this;
55
56 assosiated_task_queues_.insert(queue);
57
58 if (!task_queue_throttler_->IsThrottled(queue))
59 return;
60
61 queue->SetQueueEnabled(false);
alex clarke (OOO till 29th) 2016/09/12 17:45:26 Is this safe? See TaskQueueThrottler::SetQueueEna
altimin 2016/09/14 11:23:16 Yes, it's safe to disable a throttled queue. For e
62
63 task_queue_throttler_->MaybeSchedulePumpQueueWithBudget(FROM_HERE, now, queue,
64 this);
65 }
66
67 void TaskQueueThrottler::TimeBudgetPool::RemoveQueue(base::TimeTicks now,
68 TaskQueue* queue) {
69 DCHECK_EQ(task_queue_throttler_->time_budget_pool_for_queue_[queue], this);
70 task_queue_throttler_->time_budget_pool_for_queue_.erase(queue);
71
72 assosiated_task_queues_.erase(queue);
73
74 if (!task_queue_throttler_->IsThrottled(queue))
75 return;
76
77 task_queue_throttler_->MaybeSchedulePumpQueue(FROM_HERE, now, queue);
78 }
79
80 void TaskQueueThrottler::TimeBudgetPool::Enable(base::TimeTicks now) {
alex clarke (OOO till 29th) 2016/09/12 17:45:26 Lazy now?
altimin 2016/09/14 11:23:17 Done.
81 if (is_enabled_)
82 return;
83 is_enabled_ = true;
84
85 for (TaskQueue* queue : assosiated_task_queues_) {
86 if (!task_queue_throttler_->IsThrottled(queue))
87 continue;
88
89 queue->SetQueueEnabled(false);
alex clarke (OOO till 29th) 2016/09/12 17:45:26 Ditto.
altimin 2016/09/14 11:23:17 Same as above.
90
91 task_queue_throttler_->MaybeSchedulePumpQueueWithBudget(FROM_HERE, now,
92 queue, this);
93 }
94 }
95
96 void TaskQueueThrottler::TimeBudgetPool::Disable(base::TimeTicks now) {
alex clarke (OOO till 29th) 2016/09/12 17:45:26 Lazy now?
alex clarke (OOO till 29th) 2016/09/12 17:45:26 I can't see how this is going to be used but I not
altimin 2016/09/14 11:23:16 We can't enable queue here because of timer alignm
altimin 2016/09/14 11:23:17 Done.
97 if (!is_enabled_)
98 return;
99 is_enabled_ = false;
100
101 for (TaskQueue* queue : assosiated_task_queues_) {
102 if (!task_queue_throttler_->IsThrottled(queue))
103 continue;
104
105 task_queue_throttler_->MaybeSchedulePumpQueue(FROM_HERE, now, queue);
106 }
107 }
108
109 bool TaskQueueThrottler::TimeBudgetPool::IsEnabled() const {
110 return is_enabled_;
111 }
112
113 void TaskQueueThrottler::TimeBudgetPool::Close() {
114 DCHECK_EQ(0u, assosiated_task_queues_.size());
115
116 task_queue_throttler_->time_budget_pools_.erase(this);
117 }
118
119 bool TaskQueueThrottler::TimeBudgetPool::IsAllowedToRun(base::TimeTicks now) {
120 Advance(now);
121 return !is_enabled_ || current_budget_level_.InMicroseconds() >= 0;
122 }
123
124 base::TimeTicks TaskQueueThrottler::TimeBudgetPool::NextAllowedRunTime() {
125 if (!is_enabled_ || current_budget_level_.InMicroseconds() >= 0) {
126 return last_checkpoint_;
127 } else {
128 // Subtract because current_budget is negative.
129 return last_checkpoint_ - current_budget_level_ / cpu_percentage_;
130 }
131 }
132
133 void TaskQueueThrottler::TimeBudgetPool::RecordTaskRunTime(
134 base::TimeDelta task_run_time) {
135 if (is_enabled_) {
136 current_budget_level_ -= task_run_time;
137 }
138 }
139
140 const char* TaskQueueThrottler::TimeBudgetPool::Name() const {
141 return name_;
142 }
143
144 void TaskQueueThrottler::TimeBudgetPool::AsValueInto(
145 base::trace_event::TracedValue* state,
146 base::TimeTicks now) const {
147 state->BeginDictionary();
148
149 state->SetString("name", name_);
150 state->SetDouble("time_budget", cpu_percentage_);
151 state->SetDouble("time_budget_level_in_seconds",
152 current_budget_level_.InSecondsF());
153 state->SetDouble("last_checkpoint_seconds_ago",
154 (now - last_checkpoint_).InSecondsF());
155
156 state->BeginArray("task_queues");
157 for (TaskQueue* queue : assosiated_task_queues_) {
158 // Yes, that's memory-infra-approved way of tracing pointers. Deal with it.
Sami 2016/09/12 17:49:10 I think we can do without this comment :)
altimin 2016/09/14 11:23:16 Done.
159 state->AppendString(base::StringPrintf(
160 "%" PRIx64, static_cast<uint64_t>(reinterpret_cast<uintptr_t>(queue))));
161 }
162 state->EndArray();
163
164 state->EndDictionary();
165 }
166
167 void TaskQueueThrottler::TimeBudgetPool::Advance(base::TimeTicks now) {
168 if (now > last_checkpoint_) {
169 if (is_enabled_) {
170 current_budget_level_ = std::min(
171 current_budget_level_ + cpu_percentage_ * (now - last_checkpoint_),
172 max_budget_level_);
173 }
174 last_checkpoint_ = now;
175 }
176 }
177
178 TaskQueueThrottler::TaskQueueThrottler(
179 RendererSchedulerImpl* renderer_scheduler,
180 const char* tracing_category)
21 : task_runner_(renderer_scheduler->ControlTaskRunner()), 181 : task_runner_(renderer_scheduler->ControlTaskRunner()),
22 renderer_scheduler_(renderer_scheduler), 182 renderer_scheduler_(renderer_scheduler),
23 tick_clock_(renderer_scheduler->tick_clock()), 183 tick_clock_(renderer_scheduler->tick_clock()),
24 tracing_category_(tracing_category), 184 tracing_category_(tracing_category),
25 time_domain_(new ThrottledTimeDomain(this, tracing_category)), 185 time_domain_(new ThrottledTimeDomain(this, tracing_category)),
26 virtual_time_(false), 186 virtual_time_(false),
27 weak_factory_(this) { 187 weak_factory_(this) {
28 pump_throttled_tasks_closure_.Reset(base::Bind( 188 pump_throttled_tasks_closure_.Reset(base::Bind(
29 &ThrottlingHelper::PumpThrottledTasks, weak_factory_.GetWeakPtr())); 189 &TaskQueueThrottler::PumpThrottledTasks, weak_factory_.GetWeakPtr()));
30 forward_immediate_work_closure_ = 190 forward_immediate_work_closure_ =
31 base::Bind(&ThrottlingHelper::OnTimeDomainHasImmediateWork, 191 base::Bind(&TaskQueueThrottler::OnTimeDomainHasImmediateWork,
32 weak_factory_.GetWeakPtr()); 192 weak_factory_.GetWeakPtr());
33 193
34 renderer_scheduler_->RegisterTimeDomain(time_domain_.get()); 194 renderer_scheduler_->RegisterTimeDomain(time_domain_.get());
35 } 195 }
36 196
37 ThrottlingHelper::~ThrottlingHelper() { 197 TaskQueueThrottler::~TaskQueueThrottler() {
38 // It's possible for queues to be still throttled, so we need to tidy up 198 // It's possible for queues to be still throttled, so we need to tidy up
39 // before unregistering the time domain. 199 // before unregistering the time domain.
40 for (const TaskQueueMap::value_type& map_entry : throttled_queues_) { 200 for (const TaskQueueMap::value_type& map_entry : throttled_queues_) {
41 TaskQueue* task_queue = map_entry.first; 201 TaskQueue* task_queue = map_entry.first;
42 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); 202 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain());
43 task_queue->RemoveFence(); 203 task_queue->RemoveFence();
44 } 204 }
45 205
46 renderer_scheduler_->UnregisterTimeDomain(time_domain_.get()); 206 renderer_scheduler_->UnregisterTimeDomain(time_domain_.get());
47 } 207 }
48 208
49 void ThrottlingHelper::SetQueueEnabled(TaskQueue* task_queue, bool enabled) { 209 void TaskQueueThrottler::SetQueueEnabled(TaskQueue* task_queue, bool enabled) {
50 TaskQueueMap::iterator find_it = throttled_queues_.find(task_queue); 210 TaskQueueMap::iterator find_it = throttled_queues_.find(task_queue);
51 211
52 if (find_it == throttled_queues_.end()) { 212 if (find_it == throttled_queues_.end()) {
53 task_queue->SetQueueEnabled(enabled); 213 task_queue->SetQueueEnabled(enabled);
54 return; 214 return;
55 } 215 }
56 216
57 find_it->second.enabled = enabled; 217 find_it->second.enabled = enabled;
58 218
59 // We don't enable the queue here because it's throttled and there might be 219 // We don't enable the queue here because it's throttled and there might be
60 // tasks in it's work queue that would execute immediatly rather than after 220 // tasks in it's work queue that would execute immediatly rather than after
61 // PumpThrottledTasks runs. 221 // PumpThrottledTasks runs.
62 if (!enabled) 222 if (!enabled)
63 task_queue->SetQueueEnabled(false); 223 task_queue->SetQueueEnabled(false);
64 } 224 }
65 225
66 void ThrottlingHelper::IncreaseThrottleRefCount(TaskQueue* task_queue) { 226 void TaskQueueThrottler::IncreaseThrottleRefCount(TaskQueue* task_queue) {
67 DCHECK_NE(task_queue, task_runner_.get()); 227 DCHECK_NE(task_queue, task_runner_.get());
68 228
69 if (virtual_time_) 229 if (virtual_time_)
70 return; 230 return;
71 231
72 std::pair<TaskQueueMap::iterator, bool> insert_result = 232 std::pair<TaskQueueMap::iterator, bool> insert_result =
73 throttled_queues_.insert(std::make_pair( 233 throttled_queues_.insert(std::make_pair(
74 task_queue, Metadata(1, task_queue->IsQueueEnabled()))); 234 task_queue, Metadata(1, task_queue->IsQueueEnabled())));
75 235
76 if (insert_result.second) { 236 if (insert_result.second) {
77 // The insert was succesful so we need to throttle the queue. 237 // The insert was successful so we need to throttle the queue.
78 task_queue->SetTimeDomain(time_domain_.get()); 238 task_queue->SetTimeDomain(time_domain_.get());
79 task_queue->RemoveFence(); 239 task_queue->RemoveFence();
80 task_queue->SetQueueEnabled(false); 240 task_queue->SetQueueEnabled(false);
81 241
82 if (!task_queue->IsEmpty()) { 242 base::Optional<base::TimeTicks> next_run_time =
83 if (task_queue->HasPendingImmediateWork()) { 243 task_queue->GetNextTaskRunTime();
84 OnTimeDomainHasImmediateWork(); 244
85 } else { 245 if (next_run_time) {
86 OnTimeDomainHasDelayedWork(); 246 MaybeSchedulePumpThrottledTasks(FROM_HERE, tick_clock_->NowTicks(),
87 } 247 next_run_time.value());
248
249 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueThrottled",
250 "task_queue", task_queue);
88 } 251 }
89 } else { 252 } else {
90 // An entry already existed in the map so we need to increment the refcount. 253 // An entry already existed in the map so we need to increment the refcount.
91 insert_result.first->second.throttling_ref_count++; 254 insert_result.first->second.throttling_ref_count++;
92 } 255 }
93 } 256 }
94 257
95 void ThrottlingHelper::DecreaseThrottleRefCount(TaskQueue* task_queue) { 258 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) {
96 if (virtual_time_) 259 if (virtual_time_)
97 return; 260 return;
98 261
99 TaskQueueMap::iterator iter = throttled_queues_.find(task_queue); 262 TaskQueueMap::iterator iter = throttled_queues_.find(task_queue);
100 263
101 if (iter != throttled_queues_.end() && 264 if (iter != throttled_queues_.end() &&
102 --iter->second.throttling_ref_count == 0) { 265 --iter->second.throttling_ref_count == 0) {
103 bool enabled = iter->second.enabled; 266 bool enabled = iter->second.enabled;
104 // The refcount has become zero, we need to unthrottle the queue. 267 // The refcount has become zero, we need to unthrottle the queue.
105 throttled_queues_.erase(iter); 268 throttled_queues_.erase(iter);
106 269
107 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain()); 270 task_queue->SetTimeDomain(renderer_scheduler_->real_time_domain());
108 task_queue->RemoveFence(); 271 task_queue->RemoveFence();
109 task_queue->SetQueueEnabled(enabled); 272 task_queue->SetQueueEnabled(enabled);
273
274 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueUntrottled",
275 "task_queue", task_queue);
110 } 276 }
111 } 277 }
112 278
113 bool ThrottlingHelper::IsThrottled(TaskQueue* task_queue) const { 279 bool TaskQueueThrottler::IsThrottled(TaskQueue* task_queue) const {
114 return throttled_queues_.find(task_queue) != throttled_queues_.end(); 280 return throttled_queues_.find(task_queue) != throttled_queues_.end();
115 } 281 }
116 282
117 void ThrottlingHelper::UnregisterTaskQueue(TaskQueue* task_queue) { 283 void TaskQueueThrottler::UnregisterTaskQueue(TaskQueue* task_queue) {
118 throttled_queues_.erase(task_queue); 284 throttled_queues_.erase(task_queue);
119 } 285 }
120 286
121 void ThrottlingHelper::OnTimeDomainHasImmediateWork() { 287 void TaskQueueThrottler::OnTimeDomainHasImmediateWork() {
122 // Forward to the main thread if called from another thread. 288 // Forward to the main thread if called from another thread.
123 if (!task_runner_->RunsTasksOnCurrentThread()) { 289 if (!task_runner_->RunsTasksOnCurrentThread()) {
124 task_runner_->PostTask(FROM_HERE, forward_immediate_work_closure_); 290 task_runner_->PostTask(FROM_HERE, forward_immediate_work_closure_);
125 return; 291 return;
126 } 292 }
127 TRACE_EVENT0(tracing_category_, 293 TRACE_EVENT0(tracing_category_,
128 "ThrottlingHelper::OnTimeDomainHasImmediateWork"); 294 "TaskQueueThrottler::OnTimeDomainHasImmediateWork");
129 base::TimeTicks now = tick_clock_->NowTicks(); 295 base::TimeTicks now = tick_clock_->NowTicks();
130 MaybeSchedulePumpThrottledTasksLocked(FROM_HERE, now, now); 296 MaybeSchedulePumpThrottledTasks(FROM_HERE, now, now);
131 } 297 }
132 298
133 void ThrottlingHelper::OnTimeDomainHasDelayedWork() { 299 void TaskQueueThrottler::OnTimeDomainHasDelayedWork() {
134 TRACE_EVENT0(tracing_category_, 300 TRACE_EVENT0(tracing_category_,
135 "ThrottlingHelper::OnTimeDomainHasDelayedWork"); 301 "TaskQueueThrottler::OnTimeDomainHasDelayedWork");
302 // TODO(altimin): Consider using TaskQueue::GetNextTaskRunTime here.
303 // to avoid unnecessary wakeups. Currently it's not possible because
304 // GetNextTaskRunTime requires a lock and OnTimeDomainHasDelayedWork
Sami 2016/09/12 17:49:10 nit: a lock on the queue?
altimin 2016/09/14 11:23:16 Done.
305 // can be called from TaskQueueImpl::SetTimeDomain, which acquires lock.
136 base::TimeTicks next_scheduled_delayed_task; 306 base::TimeTicks next_scheduled_delayed_task;
137 bool has_delayed_task = 307 bool has_delayed_task =
138 time_domain_->NextScheduledRunTime(&next_scheduled_delayed_task); 308 time_domain_->NextScheduledRunTime(&next_scheduled_delayed_task);
139 DCHECK(has_delayed_task); 309 DCHECK(has_delayed_task);
140 base::TimeTicks now = tick_clock_->NowTicks(); 310 base::TimeTicks now = tick_clock_->NowTicks();
141 MaybeSchedulePumpThrottledTasksLocked(FROM_HERE, now, 311 MaybeSchedulePumpThrottledTasks(FROM_HERE, now, next_scheduled_delayed_task);
142 next_scheduled_delayed_task);
143 } 312 }
144 313
145 void ThrottlingHelper::PumpThrottledTasks() { 314 namespace {
146 TRACE_EVENT0(tracing_category_, "ThrottlingHelper::PumpThrottledTasks");
147 pending_pump_throttled_tasks_runtime_ = base::TimeTicks();
148 315
149 LazyNow lazy_low(tick_clock_); 316 template <class T>
317 T Min(const base::Optional<T>& optional, const T& value) {
318 if (!optional) {
319 return value;
320 }
321 return std::min(optional.value(), value);
322 }
323
324 template <class T>
325 base::Optional<T> Min(const base::Optional<T>& a, const base::Optional<T>& b) {
326 if (!b)
327 return a;
328 if (!a)
329 return b;
330 return std::min(a.value(), b.value());
331 }
332
333 } // namespace
334
335 void TaskQueueThrottler::PumpThrottledTasks() {
336 TRACE_EVENT0("renderer.scheduler", "TaskQueueThrottler::PumpThrottledTasks");
337 pending_pump_throttled_tasks_runtime_.reset();
338
339 LazyNow lazy_now(tick_clock_);
340 base::Optional<base::TimeTicks> next_scheduled_delayed_task;
341
150 for (const TaskQueueMap::value_type& map_entry : throttled_queues_) { 342 for (const TaskQueueMap::value_type& map_entry : throttled_queues_) {
151 TaskQueue* task_queue = map_entry.first; 343 TaskQueue* task_queue = map_entry.first;
152 if (!map_entry.second.enabled || task_queue->IsEmpty()) 344 if (!map_entry.second.enabled || task_queue->IsEmpty())
153 continue; 345 continue;
154 346
347 TimeBudgetPool* time_budget_pool = GetTimeBudgetPoolForQueue(task_queue);
348 if (time_budget_pool && !time_budget_pool->IsAllowedToRun(lazy_now.Now())) {
349 base::TimeTicks next_run_time =
350 std::max(time_budget_pool->NextAllowedRunTime(), lazy_now.Now());
351
352 next_scheduled_delayed_task =
353 Min(next_scheduled_delayed_task, next_run_time);
354
355 TRACE_EVENT1(
356 "renderer.scheduler",
357 "TaskQueueThrottler::PumpThrottledTasks_ExpensiveTaskThrottled",
358 "throttle_time_in_seconds",
359 (next_run_time - lazy_now.Now()).InSecondsF());
360
361 renderer_scheduler_->CreateTraceEventObjectSnapshot();
362
363 continue;
364 }
365
366 base::Optional<base::TimeTicks> wake_up =
367 task_queue->GetNextScheduledWakeUp();
368 next_scheduled_delayed_task = Min(next_scheduled_delayed_task, wake_up);
369
370 // GetNextScheduledWakeUp() moves tasks from incoming queue to delayed
alex clarke (OOO till 29th) 2016/09/12 17:45:26 1. I'm not sure that's right, InsertFence should n
altimin 2016/09/14 11:23:16 My comment was badly phrased. We just want to move
371 // queue,
Sami 2016/09/12 17:49:10 nit: please reformat
altimin 2016/09/14 11:23:16 Done.
372 // so InsertFence() should be called after it in order to correctly
373 // get next wake up time.
155 task_queue->SetQueueEnabled(true); 374 task_queue->SetQueueEnabled(true);
156 task_queue->InsertFence(); 375 task_queue->InsertFence();
157 } 376 }
158 // Make sure NextScheduledRunTime gives us an up-to date result.
159 time_domain_->ClearExpiredWakeups();
160 377
161 base::TimeTicks next_scheduled_delayed_task; 378 // Maybe schedule a call to TaskQueueThrottler::PumpThrottledTasks if there is
162 // Maybe schedule a call to ThrottlingHelper::PumpThrottledTasks if there is 379 // a pending delayed task or a throttled task ready to run.
163 // a pending delayed task. NOTE posting a non-delayed task in the future will 380 // NOTE: posting a non-delayed task in the future will result in
164 // result in ThrottlingHelper::OnTimeDomainHasImmediateWork being called. 381 // TaskQueueThrottler::OnTimeDomainHasImmediateWork being called.
165 if (time_domain_->NextScheduledRunTime(&next_scheduled_delayed_task)) { 382 if (next_scheduled_delayed_task) {
166 MaybeSchedulePumpThrottledTasksLocked(FROM_HERE, lazy_low.Now(), 383 MaybeSchedulePumpThrottledTasks(FROM_HERE, lazy_now.Now(),
167 next_scheduled_delayed_task); 384 *next_scheduled_delayed_task);
168 } 385 }
169 } 386 }
170 387
171 /* static */ 388 /* static */
172 base::TimeTicks ThrottlingHelper::ThrottledRunTime( 389 base::TimeTicks TaskQueueThrottler::AlignedThrottledRunTime(
173 base::TimeTicks unthrottled_runtime) { 390 base::TimeTicks unthrottled_runtime) {
174 const base::TimeDelta one_second = base::TimeDelta::FromSeconds(1); 391 const base::TimeDelta one_second = base::TimeDelta::FromSeconds(1);
175 return unthrottled_runtime + one_second - 392 return unthrottled_runtime + one_second -
176 ((unthrottled_runtime - base::TimeTicks()) % one_second); 393 ((unthrottled_runtime - base::TimeTicks()) % one_second);
177 } 394 }
178 395
179 void ThrottlingHelper::MaybeSchedulePumpThrottledTasksLocked( 396 void TaskQueueThrottler::MaybeSchedulePumpThrottledTasks(
180 const tracked_objects::Location& from_here, 397 const tracked_objects::Location& from_here,
181 base::TimeTicks now, 398 base::TimeTicks now,
182 base::TimeTicks unthrottled_runtime) { 399 base::TimeTicks runtime) {
183 if (virtual_time_) 400 if (virtual_time_)
184 return; 401 return;
185 402
186 base::TimeTicks throttled_runtime = 403 runtime = std::max(now, AlignedThrottledRunTime(runtime));
187 ThrottledRunTime(std::max(now, unthrottled_runtime)); 404
188 // If there is a pending call to PumpThrottledTasks and it's sooner than 405 // If there is a pending call to PumpThrottledTasks and it's sooner than
189 // |unthrottled_runtime| then return. 406 // |runtime| then return.
190 if (!pending_pump_throttled_tasks_runtime_.is_null() && 407 if (pending_pump_throttled_tasks_runtime_ &&
191 throttled_runtime >= pending_pump_throttled_tasks_runtime_) { 408 runtime >= pending_pump_throttled_tasks_runtime_.value()) {
192 return; 409 return;
193 } 410 }
194 411
195 pending_pump_throttled_tasks_runtime_ = throttled_runtime; 412 pending_pump_throttled_tasks_runtime_ = runtime;
196 413
197 pump_throttled_tasks_closure_.Cancel(); 414 pump_throttled_tasks_closure_.Cancel();
198 415
199 base::TimeDelta delay = pending_pump_throttled_tasks_runtime_ - now; 416 base::TimeDelta delay = pending_pump_throttled_tasks_runtime_.value() - now;
200 TRACE_EVENT1(tracing_category_, 417 TRACE_EVENT1(tracing_category_,
201 "ThrottlingHelper::MaybeSchedulePumpThrottledTasksLocked", 418 "TaskQueueThrottler::MaybeSchedulePumpThrottledTasks",
202 "delay_till_next_pump_ms", delay.InMilliseconds()); 419 "delay_till_next_pump_ms", delay.InMilliseconds());
203 task_runner_->PostDelayedTask( 420 task_runner_->PostDelayedTask(
204 from_here, pump_throttled_tasks_closure_.callback(), delay); 421 from_here, pump_throttled_tasks_closure_.callback(), delay);
205 } 422 }
206 423
207 void ThrottlingHelper::EnableVirtualTime() { 424 void TaskQueueThrottler::EnableVirtualTime() {
208 virtual_time_ = true; 425 virtual_time_ = true;
209 426
210 pump_throttled_tasks_closure_.Cancel(); 427 pump_throttled_tasks_closure_.Cancel();
211 428
212 while (!throttled_queues_.empty()) { 429 while (!throttled_queues_.empty()) {
213 TaskQueue* task_queue = throttled_queues_.begin()->first; 430 TaskQueue* task_queue = throttled_queues_.begin()->first;
214 bool enabled = throttled_queues_.begin()->second.enabled; 431 bool enabled = throttled_queues_.begin()->second.enabled;
215 432
216 throttled_queues_.erase(throttled_queues_.begin()); 433 throttled_queues_.erase(throttled_queues_.begin());
217 434
218 task_queue->SetTimeDomain(renderer_scheduler_->GetVirtualTimeDomain()); 435 task_queue->SetTimeDomain(renderer_scheduler_->GetVirtualTimeDomain());
219 task_queue->RemoveFence(); 436 task_queue->RemoveFence();
220 task_queue->SetQueueEnabled(enabled); 437 task_queue->SetQueueEnabled(enabled);
221 } 438 }
222 } 439 }
223 440
441 TaskQueueThrottler::TimeBudgetPool* TaskQueueThrottler::CreateTimeBudgetPool(
442 const char* name) {
443 TimeBudgetPool* time_budget_pool =
444 new TimeBudgetPool(name, this, tick_clock_->NowTicks());
445 time_budget_pools_[time_budget_pool] = base::WrapUnique(time_budget_pool);
446 return time_budget_pool;
447 }
448
449 void TaskQueueThrottler::OnTaskRunTimeReported(TaskQueue* task_queue,
450 base::TimeTicks start_time,
451 base::TimeTicks end_time) {
452 if (throttled_queues_.find(task_queue) == throttled_queues_.end())
alex clarke (OOO till 29th) 2016/09/12 17:45:26 This is going to get called a lot, currently TaskQ
altimin 2016/09/14 11:23:16 Done.
453 return;
454
455 TimeBudgetPool* time_budget_pool = GetTimeBudgetPoolForQueue(task_queue);
456 if (time_budget_pool) {
457 time_budget_pool->RecordTaskRunTime(end_time - start_time);
458 if (!time_budget_pool->IsAllowedToRun(end_time)) {
459 // This task was too expensive and all following tasks are throttled
460 // until explicitly allowed.
461 task_queue->SetQueueEnabled(false);
462
463 if (task_queue->HasPendingImmediateWork()) {
464 MaybeSchedulePumpThrottledTasks(
465 FROM_HERE, end_time,
466 std::max(end_time, time_budget_pool->NextAllowedRunTime()));
467 }
468 }
469 }
470 }
471
472 void TaskQueueThrottler::AsValueInto(base::trace_event::TracedValue* state,
473 base::TimeTicks now) const {
474 if (pending_pump_throttled_tasks_runtime_) {
475 state->SetDouble(
476 "next_throttled_tasks_pump_in_seconds",
477 (pending_pump_throttled_tasks_runtime_.value() - now).InSecondsF());
478 }
479
480 state->BeginDictionary("time_budget_pools");
481
482 for (const auto& map_entry : time_budget_pools_) {
483 TaskQueueThrottler::TimeBudgetPool* pool = map_entry.first;
484 pool->AsValueInto(state, now);
485 }
486
487 state->EndDictionary();
488 }
489
490 TaskQueueThrottler::TimeBudgetPool*
491 TaskQueueThrottler::GetTimeBudgetPoolForQueue(TaskQueue* queue) {
alex clarke (OOO till 29th) 2016/09/12 17:45:26 If you move TimeBudgetPool* into the Metadata stru
altimin 2016/09/14 11:23:16 It will break the assumption that we have only thr
492 auto find_it = time_budget_pool_for_queue_.find(queue);
493 if (find_it == time_budget_pool_for_queue_.end()) {
494 return nullptr;
495 } else {
496 TimeBudgetPool* result = find_it->second;
497 DCHECK(result);
498 return result;
499 }
500 }
501
502 void TaskQueueThrottler::MaybeSchedulePumpQueue(
503 const tracked_objects::Location& from_here,
504 base::TimeTicks now,
505 TaskQueue* queue) {
506 base::Optional<base::TimeTicks> next_run_time = queue->GetNextTaskRunTime();
507 if (next_run_time) {
508 MaybeSchedulePumpThrottledTasks(from_here, now, next_run_time.value());
509 }
510 }
511
512 void TaskQueueThrottler::MaybeSchedulePumpQueueWithBudget(
513 const tracked_objects::Location& from_here,
514 base::TimeTicks now,
515 TaskQueue* queue,
516 TaskQueueThrottler::TimeBudgetPool* budget) {
517 base::Optional<base::TimeTicks> next_run_time = budget->NextAllowedRunTime();
Sami 2016/09/12 17:49:10 I think it's a little weird for this to ask inform
altimin 2016/09/14 11:23:16 Done.
518
519 if (!budget->IsAllowedToRun(now)) {
520 next_run_time = Min(next_run_time, queue->GetNextTaskRunTime());
521 }
522
523 if (next_run_time) {
524 MaybeSchedulePumpThrottledTasks(from_here, now, next_run_time.value());
525 }
526 }
527
224 } // namespace scheduler 528 } // namespace scheduler
225 } // namespace blink 529 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698