Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "platform/scheduler/renderer/task_queue_throttler.h" | 5 #include "platform/scheduler/renderer/task_queue_throttler.h" |
| 6 | 6 |
| 7 #include <cstdint> | 7 #include <cstdint> |
| 8 | 8 |
| 9 #include "base/format_macros.h" | 9 #include "base/format_macros.h" |
| 10 #include "base/logging.h" | 10 #include "base/logging.h" |
| (...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 137 if (!task_queue->IsEmpty()) { | 137 if (!task_queue->IsEmpty()) { |
| 138 LazyNow lazy_now(tick_clock_); | 138 LazyNow lazy_now(tick_clock_); |
| 139 OnQueueNextWakeUpChanged(task_queue, | 139 OnQueueNextWakeUpChanged(task_queue, |
| 140 NextTaskRunTime(&lazy_now, task_queue).value()); | 140 NextTaskRunTime(&lazy_now, task_queue).value()); |
| 141 } | 141 } |
| 142 } | 142 } |
| 143 | 143 |
| 144 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { | 144 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { |
| 145 TaskQueueMap::iterator iter = queue_details_.find(task_queue); | 145 TaskQueueMap::iterator iter = queue_details_.find(task_queue); |
| 146 | 146 |
| 147 if (iter == queue_details_.end() || | 147 if (iter == queue_details_.end()) |
| 148 --iter->second.throttling_ref_count != 0) { | |
| 149 return; | 148 return; |
| 150 } | 149 if (iter->second.throttling_ref_count == 0) |
| 150 return; | |
| 151 if (--iter->second.throttling_ref_count != 0) | |
| 152 return; | |
| 151 | 153 |
| 152 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueUnthrottled", | 154 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueUnthrottled", |
| 153 "task_queue", task_queue); | 155 "task_queue", task_queue); |
| 154 | 156 |
| 155 task_queue->SetObserver(nullptr); | 157 task_queue->SetObserver(nullptr); |
| 156 | 158 |
| 157 MaybeDeleteQueueMetadata(iter); | 159 MaybeDeleteQueueMetadata(iter); |
| 158 | 160 |
| 159 if (!allow_throttling_) | 161 if (!allow_throttling_) |
| 160 return; | 162 return; |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 171 if (find_it == queue_details_.end()) | 173 if (find_it == queue_details_.end()) |
| 172 return false; | 174 return false; |
| 173 return find_it->second.throttling_ref_count > 0; | 175 return find_it->second.throttling_ref_count > 0; |
| 174 } | 176 } |
| 175 | 177 |
| 176 void TaskQueueThrottler::UnregisterTaskQueue(TaskQueue* task_queue) { | 178 void TaskQueueThrottler::UnregisterTaskQueue(TaskQueue* task_queue) { |
| 177 auto find_it = queue_details_.find(task_queue); | 179 auto find_it = queue_details_.find(task_queue); |
| 178 if (find_it == queue_details_.end()) | 180 if (find_it == queue_details_.end()) |
| 179 return; | 181 return; |
| 180 | 182 |
| 181 LazyNow lazy_now(tick_clock_); | |
| 182 std::unordered_set<BudgetPool*> budget_pools = find_it->second.budget_pools; | 183 std::unordered_set<BudgetPool*> budget_pools = find_it->second.budget_pools; |
| 183 for (BudgetPool* budget_pool : budget_pools) { | 184 for (BudgetPool* budget_pool : budget_pools) { |
| 184 budget_pool->RemoveQueue(lazy_now.Now(), task_queue); | 185 budget_pool->UnregisterQueue(task_queue); |
| 185 } | 186 } |
| 186 | 187 |
| 187 // Iterator may have been deleted by BudgetPool::RemoveQueue, so don't | 188 // Iterator may have been deleted by BudgetPool::RemoveQueue, so don't |
| 188 // use it here. | 189 // use it here. |
| 189 queue_details_.erase(task_queue); | 190 queue_details_.erase(task_queue); |
| 190 | 191 |
| 191 // NOTE: Observer is automatically unregistered when unregistering task queue. | 192 // NOTE: Observer is automatically unregistered when unregistering task queue. |
| 192 } | 193 } |
| 193 | 194 |
| 194 void TaskQueueThrottler::OnQueueNextWakeUpChanged( | 195 void TaskQueueThrottler::OnQueueNextWakeUpChanged( |
| 195 TaskQueue* queue, | 196 TaskQueue* queue, |
| 196 base::TimeTicks next_wake_up) { | 197 base::TimeTicks next_wake_up) { |
| 197 if (!task_runner_->RunsTasksOnCurrentThread()) { | 198 if (!task_runner_->RunsTasksOnCurrentThread()) { |
| 198 task_runner_->PostTask( | 199 task_runner_->PostTask( |
| 199 FROM_HERE, | 200 FROM_HERE, |
| 200 base::Bind(forward_immediate_work_callback_, queue, next_wake_up)); | 201 base::Bind(forward_immediate_work_callback_, queue, next_wake_up)); |
| 201 return; | 202 return; |
| 202 } | 203 } |
| 203 | 204 |
| 204 TRACE_EVENT0(tracing_category_, | 205 TRACE_EVENT0(tracing_category_, |
| 205 "TaskQueueThrottler::OnQueueNextWakeUpChanged"); | 206 "TaskQueueThrottler::OnQueueNextWakeUpChanged"); |
| 206 | 207 |
| 207 // We don't expect this to get called for disabled queues, but we can't DCHECK | 208 // We don't expect this to get called for disabled queues, but we can't DCHECK |
| 208 // because of the above thread hop. Just bail out if the queue is disabled. | 209 // because of the above thread hop. Just bail out if the queue is disabled. |
| 209 if (!queue->IsQueueEnabled()) | 210 if (!queue->IsQueueEnabled()) |
| 210 return; | 211 return; |
| 211 | 212 |
| 212 base::TimeTicks now = tick_clock_->NowTicks(); | 213 base::TimeTicks now = tick_clock_->NowTicks(); |
| 214 | |
| 215 auto find_it = queue_details_.find(queue); | |
| 216 if (find_it == queue_details_.end()) | |
| 217 return; | |
| 218 | |
| 219 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | |
| 220 budget_pool->OnTaskQueueHasWork(queue, now, next_wake_up); | |
|
Sami
2017/04/26 13:09:08
Could we also call this OnQueueNextWakeUpChanged b
altimin
2017/04/26 14:31:29
Done.
| |
| 221 } | |
| 222 | |
| 223 base::TimeTicks next_allowed_run_time = | |
| 224 GetNextAllowedRunTime(queue, next_wake_up); | |
| 225 // TODO(altimin): Remove after moving to budget pools completely. | |
|
Sami
2017/04/26 13:09:08
Could you expand this comment? I'm not sure what i
altimin
2017/04/26 14:31:29
Done.
| |
| 213 MaybeSchedulePumpThrottledTasks( | 226 MaybeSchedulePumpThrottledTasks( |
| 214 FROM_HERE, now, | 227 FROM_HERE, now, std::max(next_wake_up, next_allowed_run_time)); |
| 215 std::max(GetNextAllowedRunTime(now, queue), next_wake_up)); | |
| 216 } | 228 } |
| 217 | 229 |
| 218 void TaskQueueThrottler::PumpThrottledTasks() { | 230 void TaskQueueThrottler::PumpThrottledTasks() { |
| 219 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler::PumpThrottledTasks"); | 231 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler::PumpThrottledTasks"); |
| 220 pending_pump_throttled_tasks_runtime_.reset(); | 232 pending_pump_throttled_tasks_runtime_.reset(); |
| 221 | 233 |
| 222 LazyNow lazy_now(tick_clock_); | 234 LazyNow lazy_now(tick_clock_); |
| 223 base::Optional<base::TimeTicks> next_scheduled_delayed_task; | 235 base::Optional<base::TimeTicks> next_scheduled_delayed_task; |
| 224 | 236 |
| 237 for (const auto& pair : budget_pools_) | |
| 238 pair.first->OnWakeUp(lazy_now.Now()); | |
| 239 | |
| 225 for (const TaskQueueMap::value_type& map_entry : queue_details_) { | 240 for (const TaskQueueMap::value_type& map_entry : queue_details_) { |
| 226 TaskQueue* task_queue = map_entry.first; | 241 TaskQueue* task_queue = map_entry.first; |
| 227 if (task_queue->IsEmpty() || !IsThrottled(task_queue)) | 242 UpdateQueueThrottlingStateInternal(lazy_now.Now(), task_queue, true); |
| 228 continue; | |
| 229 | |
| 230 // Don't enable queues whose budget pool doesn't allow them to run now. | |
| 231 base::TimeTicks next_allowed_run_time = | |
| 232 GetNextAllowedRunTime(lazy_now.Now(), task_queue); | |
| 233 base::Optional<base::TimeTicks> next_desired_run_time = | |
| 234 NextTaskRunTime(&lazy_now, task_queue); | |
| 235 | |
| 236 if (next_desired_run_time && | |
| 237 next_allowed_run_time > next_desired_run_time.value()) { | |
| 238 TRACE_EVENT1( | |
| 239 tracing_category_, | |
| 240 "TaskQueueThrottler::PumpThrottledTasks_ExpensiveTaskThrottled", | |
| 241 "throttle_time_in_seconds", | |
| 242 (next_allowed_run_time - next_desired_run_time.value()).InSecondsF()); | |
| 243 | |
| 244 // Schedule a pump for queue which was disabled because of time budget. | |
| 245 next_scheduled_delayed_task = | |
| 246 Min(next_scheduled_delayed_task, next_allowed_run_time); | |
| 247 | |
| 248 continue; | |
| 249 } | |
| 250 | |
| 251 next_scheduled_delayed_task = | |
| 252 Min(next_scheduled_delayed_task, task_queue->GetNextScheduledWakeUp()); | |
| 253 | |
| 254 if (next_allowed_run_time > lazy_now.Now()) | |
| 255 continue; | |
| 256 | |
| 257 // Remove previous fence and install a new one, allowing all tasks posted | |
| 258 // on |task_queue| up until this point to run and block all further tasks. | |
| 259 task_queue->InsertFence(TaskQueue::InsertFencePosition::NOW); | |
| 260 } | |
| 261 | |
| 262 // Maybe schedule a call to TaskQueueThrottler::PumpThrottledTasks if there is | |
| 263 // a pending delayed task or a throttled task ready to run. | |
| 264 // NOTE: posting a non-delayed task in the future will result in | |
| 265 // TaskQueueThrottler::OnTimeDomainHasImmediateWork being called. | |
| 266 if (next_scheduled_delayed_task) { | |
| 267 MaybeSchedulePumpThrottledTasks(FROM_HERE, lazy_now.Now(), | |
| 268 *next_scheduled_delayed_task); | |
| 269 } | 243 } |
| 270 } | 244 } |
| 271 | 245 |
| 272 /* static */ | 246 /* static */ |
| 273 base::TimeTicks TaskQueueThrottler::AlignedThrottledRunTime( | 247 base::TimeTicks TaskQueueThrottler::AlignedThrottledRunTime( |
| 274 base::TimeTicks unthrottled_runtime) { | 248 base::TimeTicks unthrottled_runtime) { |
| 275 const base::TimeDelta one_second = base::TimeDelta::FromSeconds(1); | 249 const base::TimeDelta one_second = base::TimeDelta::FromSeconds(1); |
| 276 return unthrottled_runtime + one_second - | 250 return unthrottled_runtime + one_second - |
| 277 ((unthrottled_runtime - base::TimeTicks()) % one_second); | 251 ((unthrottled_runtime - base::TimeTicks()) % one_second); |
| 278 } | 252 } |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 308 } | 282 } |
| 309 | 283 |
| 310 CPUTimeBudgetPool* TaskQueueThrottler::CreateCPUTimeBudgetPool( | 284 CPUTimeBudgetPool* TaskQueueThrottler::CreateCPUTimeBudgetPool( |
| 311 const char* name) { | 285 const char* name) { |
| 312 CPUTimeBudgetPool* time_budget_pool = | 286 CPUTimeBudgetPool* time_budget_pool = |
| 313 new CPUTimeBudgetPool(name, this, tick_clock_->NowTicks()); | 287 new CPUTimeBudgetPool(name, this, tick_clock_->NowTicks()); |
| 314 budget_pools_[time_budget_pool] = base::WrapUnique(time_budget_pool); | 288 budget_pools_[time_budget_pool] = base::WrapUnique(time_budget_pool); |
| 315 return time_budget_pool; | 289 return time_budget_pool; |
| 316 } | 290 } |
| 317 | 291 |
| 292 WakeUpBudgetPool* TaskQueueThrottler::CreateWakeUpBudgetPool(const char* name) { | |
| 293 WakeUpBudgetPool* wake_up_budget_pool = | |
| 294 new WakeUpBudgetPool(name, this, tick_clock_->NowTicks()); | |
| 295 budget_pools_[wake_up_budget_pool] = base::WrapUnique(wake_up_budget_pool); | |
| 296 return wake_up_budget_pool; | |
| 297 } | |
| 298 | |
| 318 void TaskQueueThrottler::OnTaskRunTimeReported(TaskQueue* task_queue, | 299 void TaskQueueThrottler::OnTaskRunTimeReported(TaskQueue* task_queue, |
| 319 base::TimeTicks start_time, | 300 base::TimeTicks start_time, |
| 320 base::TimeTicks end_time) { | 301 base::TimeTicks end_time) { |
| 321 if (!IsThrottled(task_queue)) | 302 if (!IsThrottled(task_queue)) |
| 322 return; | 303 return; |
| 323 | 304 |
| 324 auto find_it = queue_details_.find(task_queue); | 305 auto find_it = queue_details_.find(task_queue); |
| 325 if (find_it == queue_details_.end()) | 306 if (find_it == queue_details_.end()) |
| 326 return; | 307 return; |
| 327 | 308 |
| 328 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | 309 for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
| 329 budget_pool->RecordTaskRunTime(start_time, end_time); | 310 budget_pool->RecordTaskRunTime(task_queue, start_time, end_time); |
| 330 if (!budget_pool->HasEnoughBudgetToRun(end_time)) | |
| 331 budget_pool->BlockThrottledQueues(end_time); | |
| 332 } | 311 } |
| 333 } | 312 } |
| 334 | 313 |
| 335 void TaskQueueThrottler::BlockQueue(base::TimeTicks now, TaskQueue* queue) { | 314 void TaskQueueThrottler::UpdateQueueThrottlingState(base::TimeTicks now, |
| 336 if (!IsThrottled(queue)) | 315 TaskQueue* queue) { |
| 316 UpdateQueueThrottlingStateInternal(now, queue, false); | |
| 317 } | |
| 318 | |
| 319 void TaskQueueThrottler::UpdateQueueThrottlingStateInternal(base::TimeTicks now, | |
| 320 TaskQueue* queue, | |
| 321 bool is_wake_up) { | |
| 322 if (!queue->IsQueueEnabled() || !IsThrottled(queue)) { | |
| 337 return; | 323 return; |
| 324 } | |
| 338 | 325 |
| 339 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); | 326 LazyNow lazy_now(now); |
| 340 SchedulePumpQueue(FROM_HERE, now, queue); | 327 |
| 328 base::Optional<base::TimeTicks> next_desired_run_time = | |
| 329 NextTaskRunTime(&lazy_now, queue); | |
| 330 | |
| 331 if (!next_desired_run_time) { | |
| 332 // This queue is empty. Given that new task can arrive at any moment, | |
| 333 // block the queue completely and update the state upon the notification | |
| 334 // about a new task. | |
| 335 queue->InsertFence(TaskQueue::InsertFencePosition::NOW); | |
| 336 return; | |
| 337 } | |
| 338 | |
| 339 if (CanRunTasksUntil(queue, now, next_desired_run_time.value())) { | |
|
Sami
2017/04/27 12:12:30
Reading this more, I guess I'm still a bit confuse
altimin
2017/04/27 12:23:50
Yes, but it will be at least as complex as fence i
| |
| 340 // We can run up until the next task uninterrupted. Remove the fence | |
| 341 // to allow new tasks to run immediately. | |
|
Sami
2017/04/26 13:09:08
This is a little confusing: the next task doesn't
altimin
2017/04/26 14:31:29
Yes. Also there can be new tasks coming from diffe
Sami
2017/04/27 12:12:30
I see. I wonder if a fence with a timestamp would
altimin
2017/04/27 12:23:51
See comment above.
| |
| 342 queue->RemoveFence(); | |
| 343 | |
| 344 // TaskQueueThrottler does not schedule wake-ups implicitly, we need | |
| 345 // to be explicit. | |
| 346 if (next_desired_run_time.value() != now) { | |
| 347 time_domain_->SetNextTask(next_desired_run_time.value()); | |
|
Sami
2017/04/26 13:09:08
SetNextTaskRunTime? (also, why doesn't this just s
altimin
2017/04/26 14:31:29
ThrottledTimeDomain does not schedule wake-ups (do
| |
| 348 } | |
| 349 return; | |
| 350 } | |
| 351 | |
| 352 if (CanRunTasksAt(queue, now, is_wake_up)) { | |
| 353 // We can run task now, but we can't run until the next scheduled task. | |
| 354 // Insert a fresh fence to unblock queue and schedule a pump for the | |
| 355 // next wake-up. | |
| 356 queue->InsertFence(TaskQueue::InsertFencePosition::NOW); | |
| 357 | |
| 358 base::Optional<base::TimeTicks> next_wake_up = | |
|
Sami
2017/04/26 13:09:08
What's the difference between next_desired_run_tim
altimin
2017/04/26 14:31:29
next_desired_run_time takes into account tasks tha
| |
| 359 queue->GetNextScheduledWakeUp(); | |
| 360 if (next_wake_up) { | |
| 361 MaybeSchedulePumpThrottledTasks( | |
| 362 FROM_HERE, now, GetNextAllowedRunTime(queue, next_wake_up.value())); | |
| 363 } | |
| 364 return; | |
| 365 } | |
| 366 | |
| 367 // Ensure that correct type of a fence is blocking queue which can't run. | |
| 368 base::Optional<QueueBlockType> block_type = GetQueueBlockType(now, queue); | |
| 369 if (block_type == QueueBlockType::kAllTasks) { | |
| 370 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); | |
| 371 } else if (block_type == QueueBlockType::kNewTasksOnly && | |
| 372 !queue->HasFence()) { | |
| 373 // Insert a new non-fully blocking fence only when there is no fence already | |
| 374 // in order avoid undesired unblocking of old tasks. | |
| 375 queue->InsertFence(TaskQueue::InsertFencePosition::NOW); | |
| 376 } | |
| 377 DCHECK(block_type); | |
|
Sami
2017/04/26 13:09:08
nit: DCHECK before actually using |block_type|
Sami
2017/04/27 12:12:30
Missed this?
altimin
2017/04/27 12:23:51
Done, forgot to reply.
| |
| 378 | |
| 379 // Schedule a pump. | |
| 380 base::TimeTicks next_run_time = | |
| 381 GetNextAllowedRunTime(queue, next_desired_run_time.value()); | |
| 382 MaybeSchedulePumpThrottledTasks(FROM_HERE, now, next_run_time); | |
| 383 } | |
| 384 | |
| 385 base::Optional<QueueBlockType> TaskQueueThrottler::GetQueueBlockType( | |
| 386 base::TimeTicks now, | |
| 387 TaskQueue* queue) { | |
| 388 auto find_it = queue_details_.find(queue); | |
| 389 if (find_it == queue_details_.end()) | |
| 390 return base::nullopt; | |
| 391 | |
| 392 bool has_new_tasks_only_block = false; | |
| 393 | |
| 394 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | |
| 395 if (!budget_pool->CanRunTasksAt(now, false)) { | |
| 396 if (budget_pool->GetBlockType() == QueueBlockType::kAllTasks) | |
| 397 return QueueBlockType::kAllTasks; | |
| 398 // GetQueueBlockType() == QueueBlockType::kNewTasksOnly | |
|
Sami
2017/04/26 13:09:08
DCHECK?
altimin
2017/04/26 14:31:29
Done.
| |
| 399 has_new_tasks_only_block = true; | |
|
Sami
2017/04/26 13:09:08
break?
altimin
2017/04/26 14:31:29
No, we can still encounter QueueBlockType::kAllTas
Sami
2017/04/27 12:12:30
Ah got it.
| |
| 400 } | |
| 401 } | |
| 402 | |
| 403 if (has_new_tasks_only_block) | |
| 404 return QueueBlockType::kNewTasksOnly; | |
| 405 return base::nullopt; | |
| 341 } | 406 } |
| 342 | 407 |
| 343 void TaskQueueThrottler::AsValueInto(base::trace_event::TracedValue* state, | 408 void TaskQueueThrottler::AsValueInto(base::trace_event::TracedValue* state, |
| 344 base::TimeTicks now) const { | 409 base::TimeTicks now) const { |
| 345 if (pending_pump_throttled_tasks_runtime_) { | 410 if (pending_pump_throttled_tasks_runtime_) { |
| 346 state->SetDouble( | 411 state->SetDouble( |
| 347 "next_throttled_tasks_pump_in_seconds", | 412 "next_throttled_tasks_pump_in_seconds", |
| 348 (pending_pump_throttled_tasks_runtime_.value() - now).InSecondsF()); | 413 (pending_pump_throttled_tasks_runtime_.value() - now).InSecondsF()); |
| 349 } | 414 } |
| 350 | 415 |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 391 | 456 |
| 392 find_it->second.budget_pools.erase(budget_pool); | 457 find_it->second.budget_pools.erase(budget_pool); |
| 393 | 458 |
| 394 MaybeDeleteQueueMetadata(find_it); | 459 MaybeDeleteQueueMetadata(find_it); |
| 395 } | 460 } |
| 396 | 461 |
| 397 void TaskQueueThrottler::UnregisterBudgetPool(BudgetPool* budget_pool) { | 462 void TaskQueueThrottler::UnregisterBudgetPool(BudgetPool* budget_pool) { |
| 398 budget_pools_.erase(budget_pool); | 463 budget_pools_.erase(budget_pool); |
| 399 } | 464 } |
| 400 | 465 |
| 401 void TaskQueueThrottler::UnblockQueue(base::TimeTicks now, TaskQueue* queue) { | 466 base::TimeTicks TaskQueueThrottler::GetNextAllowedRunTime( |
| 402 SchedulePumpQueue(FROM_HERE, now, queue); | 467 TaskQueue* queue, |
| 403 } | 468 base::TimeTicks desired_run_time) { |
| 404 | 469 base::TimeTicks next_run_time = desired_run_time; |
| 405 void TaskQueueThrottler::SchedulePumpQueue( | |
| 406 const tracked_objects::Location& from_here, | |
| 407 base::TimeTicks now, | |
| 408 TaskQueue* queue) { | |
| 409 if (!IsThrottled(queue)) | |
| 410 return; | |
| 411 | |
| 412 LazyNow lazy_now(now); | |
| 413 base::Optional<base::TimeTicks> next_desired_run_time = | |
| 414 NextTaskRunTime(&lazy_now, queue); | |
| 415 if (!next_desired_run_time) | |
| 416 return; | |
| 417 | |
| 418 base::Optional<base::TimeTicks> next_run_time = | |
| 419 Max(next_desired_run_time, GetNextAllowedRunTime(now, queue)); | |
| 420 | |
| 421 MaybeSchedulePumpThrottledTasks(from_here, now, next_run_time.value()); | |
| 422 } | |
| 423 | |
| 424 base::TimeTicks TaskQueueThrottler::GetNextAllowedRunTime(base::TimeTicks now, | |
| 425 TaskQueue* queue) { | |
| 426 base::TimeTicks next_run_time = now; | |
| 427 | 470 |
| 428 auto find_it = queue_details_.find(queue); | 471 auto find_it = queue_details_.find(queue); |
| 429 if (find_it == queue_details_.end()) | 472 if (find_it == queue_details_.end()) |
| 430 return next_run_time; | 473 return next_run_time; |
| 431 | 474 |
| 432 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | 475 for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
| 433 next_run_time = | 476 next_run_time = std::max( |
| 434 std::max(next_run_time, budget_pool->GetNextAllowedRunTime()); | 477 next_run_time, budget_pool->GetNextAllowedRunTime(desired_run_time)); |
| 435 } | 478 } |
| 436 | 479 |
| 437 return next_run_time; | 480 return next_run_time; |
| 438 } | 481 } |
| 439 | 482 |
| 483 bool TaskQueueThrottler::CanRunTasksAt(TaskQueue* queue, | |
| 484 base::TimeTicks moment, | |
| 485 bool is_wake_up) { | |
| 486 auto find_it = queue_details_.find(queue); | |
| 487 if (find_it == queue_details_.end()) | |
| 488 return true; | |
| 489 | |
| 490 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | |
| 491 if (!budget_pool->CanRunTasksAt(moment, is_wake_up)) | |
| 492 return false; | |
| 493 } | |
| 494 | |
| 495 return true; | |
| 496 } | |
| 497 | |
| 498 bool TaskQueueThrottler::CanRunTasksUntil(TaskQueue* queue, | |
| 499 base::TimeTicks now, | |
| 500 base::TimeTicks moment) { | |
| 501 auto find_it = queue_details_.find(queue); | |
| 502 if (find_it == queue_details_.end()) | |
| 503 return true; | |
| 504 | |
| 505 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | |
| 506 if (!budget_pool->CanRunTasksUntil(now, moment)) | |
| 507 return false; | |
| 508 } | |
| 509 | |
| 510 return true; | |
| 511 } | |
| 512 | |
| 440 void TaskQueueThrottler::MaybeDeleteQueueMetadata(TaskQueueMap::iterator it) { | 513 void TaskQueueThrottler::MaybeDeleteQueueMetadata(TaskQueueMap::iterator it) { |
| 441 if (it->second.throttling_ref_count == 0 && it->second.budget_pools.empty()) | 514 if (it->second.throttling_ref_count == 0 && it->second.budget_pools.empty()) |
| 442 queue_details_.erase(it); | 515 queue_details_.erase(it); |
| 443 } | 516 } |
| 444 | 517 |
| 445 void TaskQueueThrottler::DisableThrottling() { | 518 void TaskQueueThrottler::DisableThrottling() { |
| 446 if (!allow_throttling_) | 519 if (!allow_throttling_) |
| 447 return; | 520 return; |
| 448 | 521 |
| 449 allow_throttling_ = false; | 522 allow_throttling_ = false; |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 476 for (const auto& map_entry : queue_details_) { | 549 for (const auto& map_entry : queue_details_) { |
| 477 if (map_entry.second.throttling_ref_count == 0) | 550 if (map_entry.second.throttling_ref_count == 0) |
| 478 continue; | 551 continue; |
| 479 | 552 |
| 480 TaskQueue* queue = map_entry.first; | 553 TaskQueue* queue = map_entry.first; |
| 481 | 554 |
| 482 // Throttling is enabled and task queue should be blocked immediately | 555 // Throttling is enabled and task queue should be blocked immediately |
| 483 // to enforce task alignment. | 556 // to enforce task alignment. |
| 484 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); | 557 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); |
| 485 queue->SetTimeDomain(time_domain_.get()); | 558 queue->SetTimeDomain(time_domain_.get()); |
| 486 SchedulePumpQueue(FROM_HERE, lazy_now.Now(), queue); | 559 UpdateQueueThrottlingState(lazy_now.Now(), queue); |
| 487 } | 560 } |
| 488 | 561 |
| 489 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler_EnableThrottling"); | 562 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler_EnableThrottling"); |
| 490 } | 563 } |
| 491 | 564 |
| 492 } // namespace scheduler | 565 } // namespace scheduler |
| 493 } // namespace blink | 566 } // namespace blink |
| OLD | NEW |