Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "platform/scheduler/renderer/task_queue_throttler.h" | 5 #include "platform/scheduler/renderer/task_queue_throttler.h" |
| 6 | 6 |
| 7 #include <cstdint> | 7 #include <cstdint> |
| 8 | 8 |
| 9 #include "base/format_macros.h" | 9 #include "base/format_macros.h" |
| 10 #include "base/logging.h" | 10 #include "base/logging.h" |
| (...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 137 if (!task_queue->IsEmpty()) { | 137 if (!task_queue->IsEmpty()) { |
| 138 LazyNow lazy_now(tick_clock_); | 138 LazyNow lazy_now(tick_clock_); |
| 139 OnQueueNextWakeUpChanged(task_queue, | 139 OnQueueNextWakeUpChanged(task_queue, |
| 140 NextTaskRunTime(&lazy_now, task_queue).value()); | 140 NextTaskRunTime(&lazy_now, task_queue).value()); |
| 141 } | 141 } |
| 142 } | 142 } |
| 143 | 143 |
| 144 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { | 144 void TaskQueueThrottler::DecreaseThrottleRefCount(TaskQueue* task_queue) { |
| 145 TaskQueueMap::iterator iter = queue_details_.find(task_queue); | 145 TaskQueueMap::iterator iter = queue_details_.find(task_queue); |
| 146 | 146 |
| 147 if (iter == queue_details_.end() || | 147 if (iter == queue_details_.end()) |
| 148 --iter->second.throttling_ref_count != 0) { | |
| 149 return; | 148 return; |
| 150 } | 149 if (iter->second.throttling_ref_count == 0) |
| 150 return; | |
| 151 if (--iter->second.throttling_ref_count != 0) | |
| 152 return; | |
| 151 | 153 |
| 152 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueUnthrottled", | 154 TRACE_EVENT1(tracing_category_, "TaskQueueThrottler_TaskQueueUnthrottled", |
| 153 "task_queue", task_queue); | 155 "task_queue", task_queue); |
| 154 | 156 |
| 155 task_queue->SetObserver(nullptr); | 157 task_queue->SetObserver(nullptr); |
| 156 | 158 |
| 157 MaybeDeleteQueueMetadata(iter); | 159 MaybeDeleteQueueMetadata(iter); |
| 158 | 160 |
| 159 if (!allow_throttling_) | 161 if (!allow_throttling_) |
| 160 return; | 162 return; |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 171 if (find_it == queue_details_.end()) | 173 if (find_it == queue_details_.end()) |
| 172 return false; | 174 return false; |
| 173 return find_it->second.throttling_ref_count > 0; | 175 return find_it->second.throttling_ref_count > 0; |
| 174 } | 176 } |
| 175 | 177 |
| 176 void TaskQueueThrottler::UnregisterTaskQueue(TaskQueue* task_queue) { | 178 void TaskQueueThrottler::UnregisterTaskQueue(TaskQueue* task_queue) { |
| 177 auto find_it = queue_details_.find(task_queue); | 179 auto find_it = queue_details_.find(task_queue); |
| 178 if (find_it == queue_details_.end()) | 180 if (find_it == queue_details_.end()) |
| 179 return; | 181 return; |
| 180 | 182 |
| 181 LazyNow lazy_now(tick_clock_); | |
| 182 std::unordered_set<BudgetPool*> budget_pools = find_it->second.budget_pools; | 183 std::unordered_set<BudgetPool*> budget_pools = find_it->second.budget_pools; |
| 183 for (BudgetPool* budget_pool : budget_pools) { | 184 for (BudgetPool* budget_pool : budget_pools) { |
| 184 budget_pool->RemoveQueue(lazy_now.Now(), task_queue); | 185 budget_pool->UnregisterQueue(task_queue); |
| 185 } | 186 } |
| 186 | 187 |
| 187 // Iterator may have been deleted by BudgetPool::RemoveQueue, so don't | 188 // Iterator may have been deleted by BudgetPool::RemoveQueue, so don't |
| 188 // use it here. | 189 // use it here. |
| 189 queue_details_.erase(task_queue); | 190 queue_details_.erase(task_queue); |
| 190 | 191 |
| 191 // NOTE: Observer is automatically unregistered when unregistering task queue. | 192 // NOTE: Observer is automatically unregistered when unregistering task queue. |
| 192 } | 193 } |
| 193 | 194 |
| 194 void TaskQueueThrottler::OnQueueNextWakeUpChanged( | 195 void TaskQueueThrottler::OnQueueNextWakeUpChanged( |
| 195 TaskQueue* queue, | 196 TaskQueue* queue, |
| 196 base::TimeTicks next_wake_up) { | 197 base::TimeTicks next_wake_up) { |
| 197 if (!task_runner_->RunsTasksOnCurrentThread()) { | 198 if (!task_runner_->RunsTasksOnCurrentThread()) { |
| 198 task_runner_->PostTask( | 199 task_runner_->PostTask( |
| 199 FROM_HERE, | 200 FROM_HERE, |
| 200 base::Bind(forward_immediate_work_callback_, queue, next_wake_up)); | 201 base::Bind(forward_immediate_work_callback_, queue, next_wake_up)); |
| 201 return; | 202 return; |
| 202 } | 203 } |
| 203 | 204 |
| 204 TRACE_EVENT0(tracing_category_, | 205 TRACE_EVENT0(tracing_category_, |
| 205 "TaskQueueThrottler::OnQueueNextWakeUpChanged"); | 206 "TaskQueueThrottler::OnQueueNextWakeUpChanged"); |
| 206 | 207 |
| 207 // We don't expect this to get called for disabled queues, but we can't DCHECK | 208 // We don't expect this to get called for disabled queues, but we can't DCHECK |
| 208 // because of the above thread hop. Just bail out if the queue is disabled. | 209 // because of the above thread hop. Just bail out if the queue is disabled. |
| 209 if (!queue->IsQueueEnabled()) | 210 if (!queue->IsQueueEnabled()) |
| 210 return; | 211 return; |
| 211 | 212 |
| 212 base::TimeTicks now = tick_clock_->NowTicks(); | 213 base::TimeTicks now = tick_clock_->NowTicks(); |
| 214 | |
| 215 auto find_it = queue_details_.find(queue); | |
| 216 if (find_it == queue_details_.end()) | |
| 217 return; | |
| 218 | |
| 219 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | |
| 220 budget_pool->OnQueueNextWakeUpChanged(queue, now, next_wake_up); | |
| 221 } | |
| 222 | |
| 223 // TODO(altimin): This probably can be removed —- budget pools should | |
| 224 // schedule this. | |
| 225 base::TimeTicks next_allowed_run_time = | |
| 226 GetNextAllowedRunTime(queue, next_wake_up); | |
| 213 MaybeSchedulePumpThrottledTasks( | 227 MaybeSchedulePumpThrottledTasks( |
| 214 FROM_HERE, now, | 228 FROM_HERE, now, std::max(next_wake_up, next_allowed_run_time)); |
| 215 std::max(GetNextAllowedRunTime(now, queue), next_wake_up)); | |
| 216 } | 229 } |
| 217 | 230 |
| 218 void TaskQueueThrottler::PumpThrottledTasks() { | 231 void TaskQueueThrottler::PumpThrottledTasks() { |
| 219 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler::PumpThrottledTasks"); | 232 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler::PumpThrottledTasks"); |
| 220 pending_pump_throttled_tasks_runtime_.reset(); | 233 pending_pump_throttled_tasks_runtime_.reset(); |
| 221 | 234 |
| 222 LazyNow lazy_now(tick_clock_); | 235 LazyNow lazy_now(tick_clock_); |
| 223 base::Optional<base::TimeTicks> next_scheduled_delayed_task; | 236 base::Optional<base::TimeTicks> next_scheduled_delayed_task; |
| 224 | 237 |
| 238 for (const auto& pair : budget_pools_) | |
| 239 pair.first->OnWakeUp(lazy_now.Now()); | |
| 240 | |
| 225 for (const TaskQueueMap::value_type& map_entry : queue_details_) { | 241 for (const TaskQueueMap::value_type& map_entry : queue_details_) { |
| 226 TaskQueue* task_queue = map_entry.first; | 242 TaskQueue* task_queue = map_entry.first; |
| 227 if (task_queue->IsEmpty() || !IsThrottled(task_queue)) | 243 UpdateQueueThrottlingStateInternal(lazy_now.Now(), task_queue, true); |
| 228 continue; | |
| 229 | |
| 230 // Don't enable queues whose budget pool doesn't allow them to run now. | |
| 231 base::TimeTicks next_allowed_run_time = | |
| 232 GetNextAllowedRunTime(lazy_now.Now(), task_queue); | |
| 233 base::Optional<base::TimeTicks> next_desired_run_time = | |
| 234 NextTaskRunTime(&lazy_now, task_queue); | |
| 235 | |
| 236 if (next_desired_run_time && | |
| 237 next_allowed_run_time > next_desired_run_time.value()) { | |
| 238 TRACE_EVENT1( | |
| 239 tracing_category_, | |
| 240 "TaskQueueThrottler::PumpThrottledTasks_ExpensiveTaskThrottled", | |
| 241 "throttle_time_in_seconds", | |
| 242 (next_allowed_run_time - next_desired_run_time.value()).InSecondsF()); | |
| 243 | |
| 244 // Schedule a pump for queue which was disabled because of time budget. | |
| 245 next_scheduled_delayed_task = | |
| 246 Min(next_scheduled_delayed_task, next_allowed_run_time); | |
| 247 | |
| 248 continue; | |
| 249 } | |
| 250 | |
| 251 next_scheduled_delayed_task = | |
| 252 Min(next_scheduled_delayed_task, task_queue->GetNextScheduledWakeUp()); | |
| 253 | |
| 254 if (next_allowed_run_time > lazy_now.Now()) | |
| 255 continue; | |
| 256 | |
| 257 // Remove previous fence and install a new one, allowing all tasks posted | |
| 258 // on |task_queue| up until this point to run and block all further tasks. | |
| 259 task_queue->InsertFence(TaskQueue::InsertFencePosition::NOW); | |
| 260 } | |
| 261 | |
| 262 // Maybe schedule a call to TaskQueueThrottler::PumpThrottledTasks if there is | |
| 263 // a pending delayed task or a throttled task ready to run. | |
| 264 // NOTE: posting a non-delayed task in the future will result in | |
| 265 // TaskQueueThrottler::OnTimeDomainHasImmediateWork being called. | |
| 266 if (next_scheduled_delayed_task) { | |
| 267 MaybeSchedulePumpThrottledTasks(FROM_HERE, lazy_now.Now(), | |
| 268 *next_scheduled_delayed_task); | |
| 269 } | 244 } |
| 270 } | 245 } |
| 271 | 246 |
| 272 /* static */ | 247 /* static */ |
| 273 base::TimeTicks TaskQueueThrottler::AlignedThrottledRunTime( | 248 base::TimeTicks TaskQueueThrottler::AlignedThrottledRunTime( |
| 274 base::TimeTicks unthrottled_runtime) { | 249 base::TimeTicks unthrottled_runtime) { |
| 275 const base::TimeDelta one_second = base::TimeDelta::FromSeconds(1); | 250 const base::TimeDelta one_second = base::TimeDelta::FromSeconds(1); |
| 276 return unthrottled_runtime + one_second - | 251 return unthrottled_runtime + one_second - |
| 277 ((unthrottled_runtime - base::TimeTicks()) % one_second); | 252 ((unthrottled_runtime - base::TimeTicks()) % one_second); |
| 278 } | 253 } |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 308 } | 283 } |
| 309 | 284 |
| 310 CPUTimeBudgetPool* TaskQueueThrottler::CreateCPUTimeBudgetPool( | 285 CPUTimeBudgetPool* TaskQueueThrottler::CreateCPUTimeBudgetPool( |
| 311 const char* name) { | 286 const char* name) { |
| 312 CPUTimeBudgetPool* time_budget_pool = | 287 CPUTimeBudgetPool* time_budget_pool = |
| 313 new CPUTimeBudgetPool(name, this, tick_clock_->NowTicks()); | 288 new CPUTimeBudgetPool(name, this, tick_clock_->NowTicks()); |
| 314 budget_pools_[time_budget_pool] = base::WrapUnique(time_budget_pool); | 289 budget_pools_[time_budget_pool] = base::WrapUnique(time_budget_pool); |
| 315 return time_budget_pool; | 290 return time_budget_pool; |
| 316 } | 291 } |
| 317 | 292 |
| 293 WakeUpBudgetPool* TaskQueueThrottler::CreateWakeUpBudgetPool(const char* name) { | |
| 294 WakeUpBudgetPool* wake_up_budget_pool = | |
| 295 new WakeUpBudgetPool(name, this, tick_clock_->NowTicks()); | |
| 296 budget_pools_[wake_up_budget_pool] = base::WrapUnique(wake_up_budget_pool); | |
| 297 return wake_up_budget_pool; | |
| 298 } | |
| 299 | |
| 318 void TaskQueueThrottler::OnTaskRunTimeReported(TaskQueue* task_queue, | 300 void TaskQueueThrottler::OnTaskRunTimeReported(TaskQueue* task_queue, |
| 319 base::TimeTicks start_time, | 301 base::TimeTicks start_time, |
| 320 base::TimeTicks end_time) { | 302 base::TimeTicks end_time) { |
| 321 if (!IsThrottled(task_queue)) | 303 if (!IsThrottled(task_queue)) |
| 322 return; | 304 return; |
| 323 | 305 |
| 324 auto find_it = queue_details_.find(task_queue); | 306 auto find_it = queue_details_.find(task_queue); |
| 325 if (find_it == queue_details_.end()) | 307 if (find_it == queue_details_.end()) |
| 326 return; | 308 return; |
| 327 | 309 |
| 328 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | 310 for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
| 329 budget_pool->RecordTaskRunTime(start_time, end_time); | 311 budget_pool->RecordTaskRunTime(task_queue, start_time, end_time); |
| 330 if (!budget_pool->HasEnoughBudgetToRun(end_time)) | |
| 331 budget_pool->BlockThrottledQueues(end_time); | |
| 332 } | 312 } |
| 333 } | 313 } |
| 334 | 314 |
| 335 void TaskQueueThrottler::BlockQueue(base::TimeTicks now, TaskQueue* queue) { | 315 void TaskQueueThrottler::UpdateQueueThrottlingState(base::TimeTicks now, |
| 336 if (!IsThrottled(queue)) | 316 TaskQueue* queue) { |
| 317 UpdateQueueThrottlingStateInternal(now, queue, false); | |
| 318 } | |
| 319 | |
| 320 void TaskQueueThrottler::UpdateQueueThrottlingStateInternal(base::TimeTicks now, | |
| 321 TaskQueue* queue, | |
| 322 bool is_wake_up) { | |
| 323 if (!queue->IsQueueEnabled() || !IsThrottled(queue)) { | |
| 337 return; | 324 return; |
| 325 } | |
| 338 | 326 |
| 339 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); | 327 LazyNow lazy_now(now); |
| 340 SchedulePumpQueue(FROM_HERE, now, queue); | 328 |
| 329 base::Optional<base::TimeTicks> next_desired_run_time = | |
| 330 NextTaskRunTime(&lazy_now, queue); | |
| 331 | |
| 332 if (!next_desired_run_time) { | |
| 333 // This queue is empty. Given that new task can arrive at any moment, | |
| 334 // block the queue completely and update the state upon the notification | |
| 335 // about a new task. | |
| 336 queue->InsertFence(TaskQueue::InsertFencePosition::NOW); | |
| 337 return; | |
| 338 } | |
| 339 | |
| 340 if (CanRunTasksUntil(queue, now, next_desired_run_time.value())) { | |
| 341 // We can run up until the next task uninterrupted. Remove the fence | |
| 342 // to allow new tasks to run immediately. | |
| 343 queue->RemoveFence(); | |
|
alex clarke (OOO till 29th)
2017/05/02 10:51:54
Can we DCHECK(queue->HasFence()) or is that going
altimin
2017/05/02 18:16:58
It is going to fail when the fence is removed and
| |
| 344 | |
| 345 // TaskQueueThrottler does not schedule wake-ups implicitly, we need | |
| 346 // to be explicit. | |
| 347 if (next_desired_run_time.value() != now) { | |
| 348 time_domain_->SetNextTaskRunTime(next_desired_run_time.value()); | |
| 349 } | |
| 350 return; | |
| 351 } | |
| 352 | |
| 353 if (CanRunTasksAt(queue, now, is_wake_up)) { | |
| 354 // We can run task now, but we can't run until the next scheduled task. | |
| 355 // Insert a fresh fence to unblock queue and schedule a pump for the | |
| 356 // next wake-up. | |
| 357 queue->InsertFence(TaskQueue::InsertFencePosition::NOW); | |
| 358 | |
| 359 base::Optional<base::TimeTicks> next_wake_up = | |
| 360 queue->GetNextScheduledWakeUp(); | |
| 361 if (next_wake_up) { | |
| 362 MaybeSchedulePumpThrottledTasks( | |
| 363 FROM_HERE, now, GetNextAllowedRunTime(queue, next_wake_up.value())); | |
| 364 } | |
| 365 return; | |
| 366 } | |
| 367 | |
| 368 base::TimeTicks next_run_time = | |
| 369 GetNextAllowedRunTime(queue, next_desired_run_time.value()); | |
| 370 | |
| 371 // Ensure that correct type of a fence is blocking queue which can't run. | |
|
alex clarke (OOO till 29th)
2017/05/02 10:51:54
Could you please re-word this comment? It doesn't
altimin
2017/05/02 18:16:58
Done.
| |
| 372 base::Optional<QueueBlockType> block_type = GetQueueBlockType(now, queue); | |
| 373 DCHECK(block_type); | |
| 374 | |
| 375 if (block_type == QueueBlockType::kAllTasks) { | |
|
alex clarke (OOO till 29th)
2017/05/02 10:51:54
Are you planning on adding more QueueBlockTypes?
altimin
2017/05/02 18:16:58
Agreed.
| |
| 376 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); | |
| 377 | |
| 378 TRACE_EVENT1( | |
| 379 "renderer.scheduler", | |
| 380 "TaskQueueThrottler::PumpThrottledTasks_ExpensiveTaskThrottled", | |
| 381 "throttle_time_in_seconds", | |
| 382 (next_run_time - next_desired_run_time.value()).InSecondsF()); | |
| 383 } else if (block_type == QueueBlockType::kNewTasksOnly && | |
| 384 !queue->HasFence()) { | |
| 385 // Insert a new non-fully blocking fence only when there is no fence already | |
| 386 // in order avoid undesired unblocking of old tasks. | |
| 387 queue->InsertFence(TaskQueue::InsertFencePosition::NOW); | |
| 388 } | |
|
alex clarke (OOO till 29th)
2017/05/02 10:51:54
So just to make sure I understad, if block_type ==
| |
| 389 | |
| 390 // Schedule a pump. | |
| 391 MaybeSchedulePumpThrottledTasks(FROM_HERE, now, next_run_time); | |
| 392 } | |
| 393 | |
| 394 base::Optional<QueueBlockType> TaskQueueThrottler::GetQueueBlockType( | |
|
alex clarke (OOO till 29th)
2017/05/02 10:51:54
Do we need this function? Could we instead expose
| |
| 395 base::TimeTicks now, | |
| 396 TaskQueue* queue) { | |
| 397 auto find_it = queue_details_.find(queue); | |
| 398 if (find_it == queue_details_.end()) | |
| 399 return base::nullopt; | |
| 400 | |
| 401 bool has_new_tasks_only_block = false; | |
| 402 | |
| 403 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | |
| 404 if (!budget_pool->CanRunTasksAt(now, false)) { | |
| 405 if (budget_pool->GetBlockType() == QueueBlockType::kAllTasks) | |
| 406 return QueueBlockType::kAllTasks; | |
| 407 DCHECK_EQ(budget_pool->GetBlockType(), QueueBlockType::kNewTasksOnly); | |
| 408 has_new_tasks_only_block = true; | |
| 409 } | |
| 410 } | |
| 411 | |
| 412 if (has_new_tasks_only_block) | |
| 413 return QueueBlockType::kNewTasksOnly; | |
| 414 return base::nullopt; | |
| 341 } | 415 } |
| 342 | 416 |
| 343 void TaskQueueThrottler::AsValueInto(base::trace_event::TracedValue* state, | 417 void TaskQueueThrottler::AsValueInto(base::trace_event::TracedValue* state, |
| 344 base::TimeTicks now) const { | 418 base::TimeTicks now) const { |
| 345 if (pending_pump_throttled_tasks_runtime_) { | 419 if (pending_pump_throttled_tasks_runtime_) { |
| 346 state->SetDouble( | 420 state->SetDouble( |
| 347 "next_throttled_tasks_pump_in_seconds", | 421 "next_throttled_tasks_pump_in_seconds", |
| 348 (pending_pump_throttled_tasks_runtime_.value() - now).InSecondsF()); | 422 (pending_pump_throttled_tasks_runtime_.value() - now).InSecondsF()); |
| 349 } | 423 } |
| 350 | 424 |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 391 | 465 |
| 392 find_it->second.budget_pools.erase(budget_pool); | 466 find_it->second.budget_pools.erase(budget_pool); |
| 393 | 467 |
| 394 MaybeDeleteQueueMetadata(find_it); | 468 MaybeDeleteQueueMetadata(find_it); |
| 395 } | 469 } |
| 396 | 470 |
| 397 void TaskQueueThrottler::UnregisterBudgetPool(BudgetPool* budget_pool) { | 471 void TaskQueueThrottler::UnregisterBudgetPool(BudgetPool* budget_pool) { |
| 398 budget_pools_.erase(budget_pool); | 472 budget_pools_.erase(budget_pool); |
| 399 } | 473 } |
| 400 | 474 |
| 401 void TaskQueueThrottler::UnblockQueue(base::TimeTicks now, TaskQueue* queue) { | 475 base::TimeTicks TaskQueueThrottler::GetNextAllowedRunTime( |
| 402 SchedulePumpQueue(FROM_HERE, now, queue); | 476 TaskQueue* queue, |
| 403 } | 477 base::TimeTicks desired_run_time) { |
| 404 | 478 base::TimeTicks next_run_time = desired_run_time; |
| 405 void TaskQueueThrottler::SchedulePumpQueue( | |
| 406 const tracked_objects::Location& from_here, | |
| 407 base::TimeTicks now, | |
| 408 TaskQueue* queue) { | |
| 409 if (!IsThrottled(queue)) | |
| 410 return; | |
| 411 | |
| 412 LazyNow lazy_now(now); | |
| 413 base::Optional<base::TimeTicks> next_desired_run_time = | |
| 414 NextTaskRunTime(&lazy_now, queue); | |
| 415 if (!next_desired_run_time) | |
| 416 return; | |
| 417 | |
| 418 base::Optional<base::TimeTicks> next_run_time = | |
| 419 Max(next_desired_run_time, GetNextAllowedRunTime(now, queue)); | |
| 420 | |
| 421 MaybeSchedulePumpThrottledTasks(from_here, now, next_run_time.value()); | |
| 422 } | |
| 423 | |
| 424 base::TimeTicks TaskQueueThrottler::GetNextAllowedRunTime(base::TimeTicks now, | |
| 425 TaskQueue* queue) { | |
| 426 base::TimeTicks next_run_time = now; | |
| 427 | 479 |
| 428 auto find_it = queue_details_.find(queue); | 480 auto find_it = queue_details_.find(queue); |
| 429 if (find_it == queue_details_.end()) | 481 if (find_it == queue_details_.end()) |
| 430 return next_run_time; | 482 return next_run_time; |
| 431 | 483 |
| 432 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | 484 for (BudgetPool* budget_pool : find_it->second.budget_pools) { |
| 433 next_run_time = | 485 next_run_time = std::max( |
| 434 std::max(next_run_time, budget_pool->GetNextAllowedRunTime()); | 486 next_run_time, budget_pool->GetNextAllowedRunTime(desired_run_time)); |
| 435 } | 487 } |
| 436 | 488 |
| 437 return next_run_time; | 489 return next_run_time; |
| 438 } | 490 } |
| 439 | 491 |
| 492 bool TaskQueueThrottler::CanRunTasksAt(TaskQueue* queue, | |
| 493 base::TimeTicks moment, | |
| 494 bool is_wake_up) { | |
| 495 auto find_it = queue_details_.find(queue); | |
| 496 if (find_it == queue_details_.end()) | |
| 497 return true; | |
| 498 | |
| 499 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | |
| 500 if (!budget_pool->CanRunTasksAt(moment, is_wake_up)) | |
| 501 return false; | |
| 502 } | |
| 503 | |
| 504 return true; | |
| 505 } | |
| 506 | |
| 507 bool TaskQueueThrottler::CanRunTasksUntil(TaskQueue* queue, | |
| 508 base::TimeTicks now, | |
| 509 base::TimeTicks moment) { | |
| 510 auto find_it = queue_details_.find(queue); | |
| 511 if (find_it == queue_details_.end()) | |
| 512 return true; | |
| 513 | |
| 514 for (BudgetPool* budget_pool : find_it->second.budget_pools) { | |
| 515 if (!budget_pool->CanRunTasksUntil(now, moment)) | |
| 516 return false; | |
| 517 } | |
| 518 | |
| 519 return true; | |
| 520 } | |
| 521 | |
| 440 void TaskQueueThrottler::MaybeDeleteQueueMetadata(TaskQueueMap::iterator it) { | 522 void TaskQueueThrottler::MaybeDeleteQueueMetadata(TaskQueueMap::iterator it) { |
| 441 if (it->second.throttling_ref_count == 0 && it->second.budget_pools.empty()) | 523 if (it->second.throttling_ref_count == 0 && it->second.budget_pools.empty()) |
| 442 queue_details_.erase(it); | 524 queue_details_.erase(it); |
| 443 } | 525 } |
| 444 | 526 |
| 445 void TaskQueueThrottler::DisableThrottling() { | 527 void TaskQueueThrottler::DisableThrottling() { |
| 446 if (!allow_throttling_) | 528 if (!allow_throttling_) |
| 447 return; | 529 return; |
| 448 | 530 |
| 449 allow_throttling_ = false; | 531 allow_throttling_ = false; |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 476 for (const auto& map_entry : queue_details_) { | 558 for (const auto& map_entry : queue_details_) { |
| 477 if (map_entry.second.throttling_ref_count == 0) | 559 if (map_entry.second.throttling_ref_count == 0) |
| 478 continue; | 560 continue; |
| 479 | 561 |
| 480 TaskQueue* queue = map_entry.first; | 562 TaskQueue* queue = map_entry.first; |
| 481 | 563 |
| 482 // Throttling is enabled and task queue should be blocked immediately | 564 // Throttling is enabled and task queue should be blocked immediately |
| 483 // to enforce task alignment. | 565 // to enforce task alignment. |
| 484 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); | 566 queue->InsertFence(TaskQueue::InsertFencePosition::BEGINNING_OF_TIME); |
| 485 queue->SetTimeDomain(time_domain_.get()); | 567 queue->SetTimeDomain(time_domain_.get()); |
| 486 SchedulePumpQueue(FROM_HERE, lazy_now.Now(), queue); | 568 UpdateQueueThrottlingState(lazy_now.Now(), queue); |
| 487 } | 569 } |
| 488 | 570 |
| 489 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler_EnableThrottling"); | 571 TRACE_EVENT0(tracing_category_, "TaskQueueThrottler_EnableThrottling"); |
| 490 } | 572 } |
| 491 | 573 |
| 492 } // namespace scheduler | 574 } // namespace scheduler |
| 493 } // namespace blink | 575 } // namespace blink |
| OLD | NEW |