OLD | NEW |
| (Empty) |
1 // Copyright (c) 2017 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "gpu/command_buffer/service/scheduler.h" | |
6 | |
7 #include <algorithm> | |
8 | |
9 #include "base/callback.h" | |
10 #include "base/memory/ptr_util.h" | |
11 #include "base/stl_util.h" | |
12 #include "base/trace_event/trace_event.h" | |
13 #include "base/trace_event/trace_event_argument.h" | |
14 #include "gpu/command_buffer/service/sync_point_manager.h" | |
15 | |
16 namespace gpu { | |
17 | |
18 class Scheduler::Sequence { | |
19 public: | |
20 Sequence(SequenceId sequence_id, | |
21 SchedulingPriority priority, | |
22 scoped_refptr<SyncPointOrderData> order_data); | |
23 | |
24 ~Sequence(); | |
25 | |
26 SequenceId sequence_id() const { return sequence_id_; } | |
27 | |
28 const SchedulingState& scheduling_state() const { return scheduling_state_; } | |
29 | |
30 bool enabled() const { return enabled_; } | |
31 | |
32 bool scheduled() const { return running_state_ == SCHEDULED; } | |
33 | |
34 bool running() const { return running_state_ == RUNNING; } | |
35 | |
36 // The sequence is runnable if its enabled and has tasks which are not blocked | |
37 // by wait fences. | |
38 bool IsRunnable() const; | |
39 | |
40 bool NeedsRescheduling() const; | |
41 | |
42 void UpdateSchedulingState(); | |
43 | |
44 // If this sequence runs before the other sequence. | |
45 bool RunsBefore(const Sequence* other) const; | |
46 | |
47 void SetEnabled(bool enabled); | |
48 | |
49 // Sets running state to SCHEDULED. | |
50 void SetScheduled(); | |
51 | |
52 // Called before running the next task on the sequence. Returns the closure | |
53 // for the task. Sets running state to RUNNING. | |
54 base::OnceClosure BeginTask(); | |
55 | |
56 // Called after running the closure returned by BeginTask. Sets running state | |
57 // to IDLE. | |
58 void FinishTask(); | |
59 | |
60 // Enqueues a task in the sequence and returns the generated order number. | |
61 uint32_t ScheduleTask(base::OnceClosure closure); | |
62 | |
63 // Continue running the current task with the given closure. Must be called in | |
64 // between |BeginTask| and |FinishTask|. | |
65 void ContinueTask(base::OnceClosure closure); | |
66 | |
67 // Add a sync token fence that this sequence should wait on. | |
68 void AddWaitFence(const SyncToken& sync_token, uint32_t order_num); | |
69 | |
70 // Remove a waiting sync token fence. | |
71 void RemoveWaitFence(const SyncToken& sync_token, uint32_t order_num); | |
72 | |
73 // Add a sync token fence that this sequence is expected to release. | |
74 void AddReleaseFence(const SyncToken& sync_token, uint32_t order_num); | |
75 | |
76 // Remove a release sync token fence. | |
77 void RemoveReleaseFence(const SyncToken& sync_token, uint32_t order_num); | |
78 | |
79 private: | |
80 enum RunningState { IDLE, SCHEDULED, RUNNING }; | |
81 | |
82 struct Fence { | |
83 SyncToken sync_token; | |
84 uint32_t order_num; | |
85 | |
86 bool operator==(const Fence& other) const { | |
87 return std::tie(sync_token, order_num) == | |
88 std::tie(other.sync_token, other.order_num); | |
89 } | |
90 }; | |
91 | |
92 struct Task { | |
93 base::OnceClosure closure; | |
94 uint32_t order_num; | |
95 }; | |
96 | |
97 SchedulingPriority GetSchedulingPriority() const; | |
98 | |
99 // If the sequence is enabled. Sequences are disabled/enabled based on when | |
100 // the command buffer is descheduled/scheduled. | |
101 bool enabled_ = true; | |
102 | |
103 RunningState running_state_ = IDLE; | |
104 | |
105 // Cached scheduling state used for comparison with other sequences using | |
106 // |RunsBefore|. Updated in |UpdateSchedulingState|. | |
107 SchedulingState scheduling_state_; | |
108 | |
109 const SequenceId sequence_id_; | |
110 | |
111 const SchedulingPriority priority_; | |
112 | |
113 scoped_refptr<SyncPointOrderData> order_data_; | |
114 | |
115 // Deque of tasks. Tasks are inserted at the back with increasing order number | |
116 // generated from SyncPointOrderData. If a running task needs to be continued, | |
117 // it is inserted at the front with the same order number. | |
118 std::deque<Task> tasks_; | |
119 | |
120 // List of fences that this sequence is waiting on. Fences are inserted in | |
121 // increasing order number but may be removed out of order. Tasks are blocked | |
122 // if there's a wait fence with order number less than or equal to the task's | |
123 // order number. | |
124 std::vector<Fence> wait_fences_; | |
125 | |
126 // List of fences that this sequence is expected to release. If this list is | |
127 // non-empty, the priority of the sequence is raised. | |
128 std::vector<Fence> release_fences_; | |
129 | |
130 DISALLOW_COPY_AND_ASSIGN(Sequence); | |
131 }; | |
132 | |
133 Scheduler::SchedulingState::SchedulingState() = default; | |
134 Scheduler::SchedulingState::SchedulingState(const SchedulingState& other) = | |
135 default; | |
136 Scheduler::SchedulingState::~SchedulingState() = default; | |
137 | |
138 std::unique_ptr<base::trace_event::ConvertableToTraceFormat> | |
139 Scheduler::SchedulingState::AsValue() const { | |
140 std::unique_ptr<base::trace_event::TracedValue> state( | |
141 new base::trace_event::TracedValue()); | |
142 state->SetInteger("sequence_id", sequence_id.GetUnsafeValue()); | |
143 state->SetString("priority", SchedulingPriorityToString(priority)); | |
144 state->SetInteger("order_num", order_num); | |
145 return std::move(state); | |
146 } | |
147 | |
148 Scheduler::Sequence::Sequence(SequenceId sequence_id, | |
149 SchedulingPriority priority, | |
150 scoped_refptr<SyncPointOrderData> order_data) | |
151 : sequence_id_(sequence_id), | |
152 priority_(priority), | |
153 order_data_(std::move(order_data)) {} | |
154 | |
155 Scheduler::Sequence::~Sequence() { | |
156 order_data_->Destroy(); | |
157 } | |
158 | |
159 bool Scheduler::Sequence::NeedsRescheduling() const { | |
160 return running_state_ != IDLE && | |
161 scheduling_state_.priority != GetSchedulingPriority(); | |
162 } | |
163 | |
164 bool Scheduler::Sequence::IsRunnable() const { | |
165 return enabled_ && !tasks_.empty() && | |
166 (wait_fences_.empty() || | |
167 wait_fences_.front().order_num > tasks_.front().order_num); | |
168 } | |
169 | |
170 SchedulingPriority Scheduler::Sequence::GetSchedulingPriority() const { | |
171 if (!release_fences_.empty()) | |
172 return std::min(priority_, SchedulingPriority::kHigh); | |
173 return priority_; | |
174 } | |
175 | |
176 bool Scheduler::Sequence::RunsBefore(const Scheduler::Sequence* other) const { | |
177 return scheduling_state_.RunsBefore(other->scheduling_state()); | |
178 } | |
179 | |
180 void Scheduler::Sequence::SetEnabled(bool enabled) { | |
181 if (enabled_ == enabled) | |
182 return; | |
183 DCHECK_EQ(running_state_, enabled ? IDLE : RUNNING); | |
184 enabled_ = enabled; | |
185 } | |
186 | |
187 void Scheduler::Sequence::SetScheduled() { | |
188 DCHECK_NE(running_state_, RUNNING); | |
189 running_state_ = SCHEDULED; | |
190 UpdateSchedulingState(); | |
191 } | |
192 | |
193 void Scheduler::Sequence::UpdateSchedulingState() { | |
194 scheduling_state_.sequence_id = sequence_id_; | |
195 scheduling_state_.priority = GetSchedulingPriority(); | |
196 | |
197 uint32_t order_num = UINT32_MAX; // IDLE | |
198 if (running_state_ == SCHEDULED) { | |
199 DCHECK(!tasks_.empty()); | |
200 order_num = tasks_.front().order_num; | |
201 } else if (running_state_ == RUNNING) { | |
202 order_num = order_data_->current_order_num(); | |
203 } | |
204 scheduling_state_.order_num = order_num; | |
205 } | |
206 | |
207 void Scheduler::Sequence::ContinueTask(base::OnceClosure closure) { | |
208 DCHECK_EQ(running_state_, RUNNING); | |
209 tasks_.push_front({std::move(closure), order_data_->current_order_num()}); | |
210 } | |
211 | |
212 uint32_t Scheduler::Sequence::ScheduleTask(base::OnceClosure closure) { | |
213 uint32_t order_num = order_data_->GenerateUnprocessedOrderNumber(); | |
214 tasks_.push_back({std::move(closure), order_num}); | |
215 return order_num; | |
216 } | |
217 | |
218 base::OnceClosure Scheduler::Sequence::BeginTask() { | |
219 DCHECK(!tasks_.empty()); | |
220 | |
221 DCHECK_EQ(running_state_, SCHEDULED); | |
222 running_state_ = RUNNING; | |
223 | |
224 base::OnceClosure closure = std::move(tasks_.front().closure); | |
225 uint32_t order_num = tasks_.front().order_num; | |
226 tasks_.pop_front(); | |
227 | |
228 order_data_->BeginProcessingOrderNumber(order_num); | |
229 | |
230 UpdateSchedulingState(); | |
231 | |
232 return closure; | |
233 } | |
234 | |
235 void Scheduler::Sequence::FinishTask() { | |
236 DCHECK_EQ(running_state_, RUNNING); | |
237 running_state_ = IDLE; | |
238 uint32_t order_num = order_data_->current_order_num(); | |
239 if (!tasks_.empty() && tasks_.front().order_num == order_num) { | |
240 order_data_->PauseProcessingOrderNumber(order_num); | |
241 } else { | |
242 order_data_->FinishProcessingOrderNumber(order_num); | |
243 } | |
244 UpdateSchedulingState(); | |
245 } | |
246 | |
247 void Scheduler::Sequence::AddWaitFence(const SyncToken& sync_token, | |
248 uint32_t order_num) { | |
249 wait_fences_.push_back({sync_token, order_num}); | |
250 } | |
251 | |
252 void Scheduler::Sequence::RemoveWaitFence(const SyncToken& sync_token, | |
253 uint32_t order_num) { | |
254 base::Erase(wait_fences_, Fence{sync_token, order_num}); | |
255 } | |
256 | |
257 void Scheduler::Sequence::AddReleaseFence(const SyncToken& sync_token, | |
258 uint32_t order_num) { | |
259 release_fences_.push_back({sync_token, order_num}); | |
260 } | |
261 | |
262 void Scheduler::Sequence::RemoveReleaseFence(const SyncToken& sync_token, | |
263 uint32_t order_num) { | |
264 base::Erase(release_fences_, Fence{sync_token, order_num}); | |
265 } | |
266 | |
267 Scheduler::Scheduler(scoped_refptr<base::SingleThreadTaskRunner> task_runner, | |
268 SyncPointManager* sync_point_manager) | |
269 : task_runner_(std::move(task_runner)), | |
270 sync_point_manager_(sync_point_manager), | |
271 weak_factory_(this) { | |
272 DCHECK(thread_checker_.CalledOnValidThread()); | |
273 } | |
274 | |
275 Scheduler::~Scheduler() { | |
276 DCHECK(thread_checker_.CalledOnValidThread()); | |
277 } | |
278 | |
279 SequenceId Scheduler::CreateSequence(SchedulingPriority priority) { | |
280 DCHECK(thread_checker_.CalledOnValidThread()); | |
281 base::AutoLock auto_lock(lock_); | |
282 scoped_refptr<SyncPointOrderData> order_data = | |
283 sync_point_manager_->CreateSyncPointOrderData(); | |
284 SequenceId sequence_id = order_data->sequence_id(); | |
285 auto sequence = | |
286 base::MakeUnique<Sequence>(sequence_id, priority, std::move(order_data)); | |
287 sequences_.emplace(sequence_id, std::move(sequence)); | |
288 return sequence_id; | |
289 } | |
290 | |
291 void Scheduler::DestroySequence(SequenceId sequence_id) { | |
292 DCHECK(thread_checker_.CalledOnValidThread()); | |
293 base::AutoLock auto_lock(lock_); | |
294 | |
295 Sequence* sequence = GetSequence(sequence_id); | |
296 DCHECK(sequence); | |
297 if (sequence->scheduled()) | |
298 rebuild_scheduling_queue_ = true; | |
299 | |
300 sequences_.erase(sequence_id); | |
301 } | |
302 | |
303 Scheduler::Sequence* Scheduler::GetSequence(SequenceId sequence_id) { | |
304 lock_.AssertAcquired(); | |
305 auto it = sequences_.find(sequence_id); | |
306 if (it != sequences_.end()) | |
307 return it->second.get(); | |
308 return nullptr; | |
309 } | |
310 | |
311 void Scheduler::EnableSequence(SequenceId sequence_id) { | |
312 DCHECK(thread_checker_.CalledOnValidThread()); | |
313 base::AutoLock auto_lock(lock_); | |
314 Sequence* sequence = GetSequence(sequence_id); | |
315 DCHECK(sequence); | |
316 sequence->SetEnabled(true); | |
317 TryScheduleSequence(sequence); | |
318 } | |
319 | |
320 void Scheduler::DisableSequence(SequenceId sequence_id) { | |
321 DCHECK(thread_checker_.CalledOnValidThread()); | |
322 base::AutoLock auto_lock(lock_); | |
323 Sequence* sequence = GetSequence(sequence_id); | |
324 DCHECK(sequence); | |
325 sequence->SetEnabled(false); | |
326 } | |
327 | |
328 void Scheduler::ScheduleTask(SequenceId sequence_id, | |
329 base::OnceClosure closure, | |
330 const std::vector<SyncToken>& sync_token_fences) { | |
331 base::AutoLock auto_lock(lock_); | |
332 Sequence* sequence = GetSequence(sequence_id); | |
333 DCHECK(sequence); | |
334 | |
335 uint32_t order_num = sequence->ScheduleTask(std::move(closure)); | |
336 | |
337 for (const SyncToken& sync_token : sync_token_fences) { | |
338 SequenceId release_id = | |
339 sync_point_manager_->GetSyncTokenReleaseSequenceId(sync_token); | |
340 Sequence* release_sequence = GetSequence(release_id); | |
341 if (!release_sequence) | |
342 continue; | |
343 if (sync_point_manager_->Wait( | |
344 sync_token, order_num, | |
345 base::Bind(&Scheduler::SyncTokenFenceReleased, | |
346 weak_factory_.GetWeakPtr(), sync_token, order_num, | |
347 release_id, sequence_id))) { | |
348 sequence->AddWaitFence(sync_token, order_num); | |
349 release_sequence->AddReleaseFence(sync_token, order_num); | |
350 TryScheduleSequence(release_sequence); | |
351 } | |
352 } | |
353 | |
354 TryScheduleSequence(sequence); | |
355 } | |
356 | |
357 void Scheduler::ContinueTask(SequenceId sequence_id, | |
358 base::OnceClosure closure) { | |
359 DCHECK(thread_checker_.CalledOnValidThread()); | |
360 base::AutoLock auto_lock(lock_); | |
361 Sequence* sequence = GetSequence(sequence_id); | |
362 DCHECK(sequence); | |
363 sequence->ContinueTask(std::move(closure)); | |
364 } | |
365 | |
366 bool Scheduler::ShouldYield(SequenceId sequence_id) { | |
367 DCHECK(thread_checker_.CalledOnValidThread()); | |
368 base::AutoLock auto_lock(lock_); | |
369 | |
370 Sequence* sequence = GetSequence(sequence_id); | |
371 DCHECK(sequence); | |
372 DCHECK(sequence->running()); | |
373 | |
374 if (should_yield_) | |
375 return true; | |
376 | |
377 RebuildSchedulingQueue(); | |
378 | |
379 sequence->UpdateSchedulingState(); | |
380 | |
381 if (!scheduling_queue_.empty()) { | |
382 Sequence* next_sequence = | |
383 GetSequence(scheduling_queue_.front().sequence_id); | |
384 DCHECK(next_sequence); | |
385 if (next_sequence->RunsBefore(sequence)) | |
386 should_yield_ = true; | |
387 } | |
388 | |
389 return should_yield_; | |
390 } | |
391 | |
392 void Scheduler::SyncTokenFenceReleased(const SyncToken& sync_token, | |
393 uint32_t order_num, | |
394 SequenceId release_sequence_id, | |
395 SequenceId waiting_sequence_id) { | |
396 base::AutoLock auto_lock(lock_); | |
397 Sequence* sequence = GetSequence(waiting_sequence_id); | |
398 if (sequence) { | |
399 sequence->RemoveWaitFence(sync_token, order_num); | |
400 TryScheduleSequence(sequence); | |
401 } | |
402 Sequence* release_sequence = GetSequence(release_sequence_id); | |
403 if (release_sequence) { | |
404 release_sequence->RemoveReleaseFence(sync_token, order_num); | |
405 TryScheduleSequence(release_sequence); | |
406 } | |
407 } | |
408 | |
409 void Scheduler::TryScheduleSequence(Sequence* sequence) { | |
410 lock_.AssertAcquired(); | |
411 | |
412 if (sequence->running()) | |
413 return; | |
414 | |
415 if (sequence->NeedsRescheduling()) { | |
416 DCHECK(sequence->IsRunnable()); | |
417 rebuild_scheduling_queue_ = true; | |
418 } else if (!sequence->scheduled() && sequence->IsRunnable()) { | |
419 sequence->SetScheduled(); | |
420 scheduling_queue_.push_back(sequence->scheduling_state()); | |
421 std::push_heap(scheduling_queue_.begin(), scheduling_queue_.end(), | |
422 &SchedulingState::Comparator); | |
423 } | |
424 | |
425 if (!running_) { | |
426 TRACE_EVENT_ASYNC_BEGIN0("gpu", "Scheduler::Running", this); | |
427 running_ = true; | |
428 task_runner_->PostTask(FROM_HERE, base::Bind(&Scheduler::RunNextTask, | |
429 weak_factory_.GetWeakPtr())); | |
430 } | |
431 } | |
432 | |
433 void Scheduler::RebuildSchedulingQueue() { | |
434 DCHECK(thread_checker_.CalledOnValidThread()); | |
435 lock_.AssertAcquired(); | |
436 | |
437 if (!rebuild_scheduling_queue_) | |
438 return; | |
439 rebuild_scheduling_queue_ = false; | |
440 | |
441 scheduling_queue_.clear(); | |
442 for (const auto& kv : sequences_) { | |
443 Sequence* sequence = kv.second.get(); | |
444 if (!sequence->IsRunnable() || sequence->running()) | |
445 continue; | |
446 sequence->SetScheduled(); | |
447 scheduling_queue_.push_back(sequence->scheduling_state()); | |
448 } | |
449 | |
450 std::make_heap(scheduling_queue_.begin(), scheduling_queue_.end(), | |
451 &SchedulingState::Comparator); | |
452 } | |
453 | |
454 void Scheduler::RunNextTask() { | |
455 DCHECK(thread_checker_.CalledOnValidThread()); | |
456 base::AutoLock auto_lock(lock_); | |
457 | |
458 should_yield_ = false; | |
459 | |
460 RebuildSchedulingQueue(); | |
461 | |
462 if (scheduling_queue_.empty()) { | |
463 TRACE_EVENT_ASYNC_END0("gpu", "Scheduler::Running", this); | |
464 running_ = false; | |
465 return; | |
466 } | |
467 | |
468 std::pop_heap(scheduling_queue_.begin(), scheduling_queue_.end(), | |
469 &SchedulingState::Comparator); | |
470 SchedulingState state = scheduling_queue_.back(); | |
471 scheduling_queue_.pop_back(); | |
472 | |
473 TRACE_EVENT1("gpu", "Scheduler::RunNextTask", "state", state.AsValue()); | |
474 | |
475 DCHECK(GetSequence(state.sequence_id)); | |
476 base::OnceClosure closure = GetSequence(state.sequence_id)->BeginTask(); | |
477 | |
478 { | |
479 base::AutoUnlock auto_unlock(lock_); | |
480 std::move(closure).Run(); | |
481 } | |
482 | |
483 // Check if sequence hasn't been destroyed. | |
484 Sequence* sequence = GetSequence(state.sequence_id); | |
485 if (sequence) { | |
486 sequence->FinishTask(); | |
487 if (sequence->IsRunnable()) { | |
488 sequence->SetScheduled(); | |
489 scheduling_queue_.push_back(sequence->scheduling_state()); | |
490 std::push_heap(scheduling_queue_.begin(), scheduling_queue_.end(), | |
491 &SchedulingState::Comparator); | |
492 } | |
493 } | |
494 | |
495 task_runner_->PostTask(FROM_HERE, base::Bind(&Scheduler::RunNextTask, | |
496 weak_factory_.GetWeakPtr())); | |
497 } | |
498 | |
499 } // namespace gpu | |
OLD | NEW |