OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2017 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "gpu/command_buffer/service/scheduler.h" | |
6 | |
7 #include <algorithm> | |
8 | |
9 #include "base/callback.h" | |
10 #include "base/memory/ptr_util.h" | |
11 #include "base/stl_util.h" | |
12 #include "base/trace_event/trace_event.h" | |
13 #include "base/trace_event/trace_event_argument.h" | |
14 #include "gpu/command_buffer/service/sync_point_manager.h" | |
15 | |
16 namespace gpu { | |
17 | |
18 class Scheduler::Sequence { | |
19 public: | |
20 Sequence(SequenceId sequence_id, | |
21 SchedulingPriority priority, | |
22 scoped_refptr<SyncPointOrderData> order_data); | |
23 | |
24 ~Sequence(); | |
25 | |
26 SequenceId sequence_id() const { return sequence_id_; } | |
27 | |
28 const SchedulingState& scheduling_state() const { return scheduling_state_; } | |
29 | |
30 bool enabled() const { return enabled_; } | |
31 | |
32 bool scheduled() const { return running_state_ == SCHEDULED; } | |
33 | |
34 bool running() const { return running_state_ == RUNNING; } | |
35 | |
36 // The sequence is runnable if its enabled and has tasks which are not blocked | |
37 // by wait fences. | |
38 bool IsRunnable() const; | |
39 | |
40 bool NeedsRescheduling() const; | |
41 | |
42 void UpdateSchedulingState(); | |
43 | |
44 // If this sequence runs before the other sequence. | |
45 bool RunsBefore(const Sequence* other) const; | |
46 | |
47 void SetEnabled(bool enabled); | |
48 | |
49 // Sets running state to SCHEDULED. | |
50 void SetScheduled(); | |
51 | |
52 // Called before running the next task on the sequence. Returns the closure | |
53 // for the task. Sets running state to RUNNING. | |
54 base::OnceClosure BeginTask(); | |
55 | |
56 // Called after running the closure returned by BeginTask. Sets running state | |
57 // to IDLE. | |
58 void FinishTask(); | |
59 | |
60 // Enqueues a task in the sequence and returns the generated order number. | |
61 uint32_t ScheduleTask(base::OnceClosure closure); | |
62 | |
63 // Continue running the current task with the given closure. Must be called in | |
64 // between |BeginTask| and |FinishTask|. | |
65 void ContinueTask(base::OnceClosure closure); | |
66 | |
67 // Add a sync token fence that this sequence should wait on. | |
68 void AddWaitFence(const SyncToken& sync_token, uint32_t order_num); | |
69 | |
70 // Remove a waiting sync token fence. | |
71 void RemoveWaitFence(const SyncToken& sync_token, uint32_t order_num); | |
72 | |
73 // Add a sync token fence that this sequence is expected to release. | |
74 void AddReleaseFence(const SyncToken& sync_token, uint32_t order_num); | |
75 | |
76 // Remove a release sync token fence. | |
77 void RemoveReleaseFence(const SyncToken& sync_token, uint32_t order_num); | |
78 | |
79 private: | |
80 enum RunningState { IDLE, SCHEDULED, RUNNING }; | |
81 | |
82 struct Fence { | |
83 SyncToken sync_token; | |
84 uint32_t order_num; | |
85 | |
86 bool operator==(const Fence& other) const { | |
87 return std::tie(sync_token, order_num) == | |
88 std::tie(other.sync_token, other.order_num); | |
89 } | |
90 }; | |
91 | |
92 struct Task { | |
93 base::OnceClosure closure; | |
94 uint32_t order_num; | |
95 }; | |
96 | |
97 SchedulingPriority GetSchedulingPriority() const; | |
98 | |
99 // If the sequence is enabled. Sequences are disabled/enabled based on when | |
100 // the command buffer is descheduled/scheduled. | |
101 bool enabled_ = true; | |
102 | |
103 RunningState running_state_; | |
104 | |
105 // Cached scheduling state used for comparison with other sequences using | |
106 // |RunsBefore|. Updated in |UpdateSchedulingState|. | |
107 SchedulingState scheduling_state_; | |
108 | |
109 const SequenceId sequence_id_; | |
110 | |
111 const SchedulingPriority priority_; | |
112 | |
113 scoped_refptr<SyncPointOrderData> order_data_; | |
114 | |
115 // Deque of tasks. Tasks are inserted at the back with increasing order number | |
116 // generated from SyncPointOrderData. If a running task needs to be continued, | |
117 // it is inserted at the front with the same order number. | |
118 std::deque<Task> tasks_; | |
119 | |
120 // List of fences that this sequence is waiting on. Fences are inserted in | |
121 // increasing order number but may be removed out of order. Tasks are blocked | |
122 // if there's a wait fence with order number less than or equal to the task's | |
123 // order number. | |
124 std::vector<Fence> wait_fences_; | |
125 | |
126 // List of fences that this sequence is expected to release. If this list is | |
127 // non-empty, the priority of the sequence is raised. | |
128 std::vector<Fence> release_fences_; | |
129 | |
130 DISALLOW_COPY_AND_ASSIGN(Sequence); | |
131 }; | |
132 | |
133 Scheduler::SchedulingState::SchedulingState() = default; | |
134 Scheduler::SchedulingState::SchedulingState(const SchedulingState& other) = | |
135 default; | |
136 Scheduler::SchedulingState::~SchedulingState() = default; | |
137 | |
138 std::unique_ptr<base::trace_event::ConvertableToTraceFormat> | |
139 Scheduler::SchedulingState::AsValue() const { | |
140 std::unique_ptr<base::trace_event::TracedValue> state( | |
141 new base::trace_event::TracedValue()); | |
142 state->SetInteger("sequence_id", sequence_id.GetUnsafeValue()); | |
143 state->SetString("priority", SchedulingPriorityToString(priority)); | |
144 state->SetInteger("order_num", order_num); | |
145 return std::move(state); | |
146 } | |
147 | |
148 Scheduler::Sequence::Sequence(SequenceId sequence_id, | |
149 SchedulingPriority priority, | |
150 scoped_refptr<SyncPointOrderData> order_data) | |
151 : sequence_id_(sequence_id), priority_(priority), order_data_(order_data) {} | |
dcheng
2017/05/11 03:31:47
Nit: std::move(order_data) since it's passed by va
sunnyps
2017/05/11 20:58:55
Done.
| |
152 | |
153 Scheduler::Sequence::~Sequence() { | |
154 order_data_->Destroy(); | |
155 } | |
156 | |
157 bool Scheduler::Sequence::NeedsRescheduling() const { | |
158 return running_state_ != IDLE && | |
159 scheduling_state_.priority != GetSchedulingPriority(); | |
160 } | |
161 | |
162 bool Scheduler::Sequence::IsRunnable() const { | |
163 return enabled_ && !tasks_.empty() && | |
164 (wait_fences_.empty() || | |
165 wait_fences_.front().order_num > tasks_.front().order_num); | |
166 } | |
167 | |
168 SchedulingPriority Scheduler::Sequence::GetSchedulingPriority() const { | |
169 if (!release_fences_.empty()) | |
170 return std::min(priority_, SchedulingPriority::kHigh); | |
171 return priority_; | |
172 } | |
173 | |
174 bool Scheduler::Sequence::RunsBefore(const Scheduler::Sequence* other) const { | |
175 return scheduling_state_.RunsBefore(other->scheduling_state()); | |
176 } | |
177 | |
178 void Scheduler::Sequence::SetEnabled(bool enabled) { | |
179 if (enabled_ == enabled) | |
180 return; | |
181 DCHECK_EQ(running_state_, enabled ? IDLE : RUNNING); | |
182 enabled_ = enabled; | |
183 } | |
184 | |
185 void Scheduler::Sequence::SetScheduled() { | |
186 DCHECK_NE(running_state_, RUNNING); | |
187 running_state_ = SCHEDULED; | |
188 UpdateSchedulingState(); | |
189 } | |
190 | |
191 void Scheduler::Sequence::UpdateSchedulingState() { | |
192 scheduling_state_.sequence_id = sequence_id_; | |
193 scheduling_state_.priority = GetSchedulingPriority(); | |
194 | |
195 uint32_t order_num = UINT32_MAX; // IDLE | |
196 if (running_state_ == SCHEDULED) { | |
197 DCHECK(!tasks_.empty()); | |
198 order_num = tasks_.front().order_num; | |
199 } else if (running_state_ == RUNNING) { | |
200 order_num = order_data_->current_order_num(); | |
201 } | |
202 scheduling_state_.order_num = order_num; | |
203 } | |
204 | |
205 void Scheduler::Sequence::ContinueTask(base::OnceClosure closure) { | |
206 DCHECK_EQ(running_state_, RUNNING); | |
207 tasks_.push_front({std::move(closure), order_data_->current_order_num()}); | |
208 } | |
209 | |
210 uint32_t Scheduler::Sequence::ScheduleTask(base::OnceClosure closure) { | |
211 uint32_t order_num = order_data_->GenerateUnprocessedOrderNumber(); | |
212 tasks_.push_back({std::move(closure), order_num}); | |
213 return order_num; | |
214 } | |
215 | |
216 base::OnceClosure Scheduler::Sequence::BeginTask() { | |
217 DCHECK(!tasks_.empty()); | |
218 | |
219 DCHECK_EQ(running_state_, SCHEDULED); | |
220 running_state_ = RUNNING; | |
221 | |
222 base::OnceClosure closure = std::move(tasks_.front().closure); | |
223 uint32_t order_num = tasks_.front().order_num; | |
224 tasks_.pop_front(); | |
225 | |
226 order_data_->BeginProcessingOrderNumber(order_num); | |
227 | |
228 UpdateSchedulingState(); | |
229 | |
230 return closure; | |
231 } | |
232 | |
233 void Scheduler::Sequence::FinishTask() { | |
234 DCHECK_EQ(running_state_, RUNNING); | |
235 running_state_ = IDLE; | |
236 uint32_t order_num = order_data_->current_order_num(); | |
237 if (!tasks_.empty() && tasks_.front().order_num == order_num) { | |
238 order_data_->PauseProcessingOrderNumber(order_num); | |
239 } else { | |
240 order_data_->FinishProcessingOrderNumber(order_num); | |
241 } | |
242 UpdateSchedulingState(); | |
243 } | |
244 | |
245 void Scheduler::Sequence::AddWaitFence(const SyncToken& sync_token, | |
246 uint32_t order_num) { | |
247 wait_fences_.push_back({sync_token, order_num}); | |
248 } | |
249 | |
250 void Scheduler::Sequence::RemoveWaitFence(const SyncToken& sync_token, | |
251 uint32_t order_num) { | |
252 base::Erase(wait_fences_, Fence{sync_token, order_num}); | |
253 } | |
254 | |
255 void Scheduler::Sequence::AddReleaseFence(const SyncToken& sync_token, | |
256 uint32_t order_num) { | |
257 release_fences_.push_back({sync_token, order_num}); | |
258 } | |
259 | |
260 void Scheduler::Sequence::RemoveReleaseFence(const SyncToken& sync_token, | |
261 uint32_t order_num) { | |
262 base::Erase(release_fences_, Fence{sync_token, order_num}); | |
263 } | |
264 | |
265 Scheduler::Scheduler(scoped_refptr<base::SingleThreadTaskRunner> task_runner, | |
266 SyncPointManager* sync_point_manager) | |
267 : task_runner_(std::move(task_runner)), | |
268 sync_point_manager_(sync_point_manager), | |
269 weak_factory_(this) { | |
270 DCHECK(thread_checker_.CalledOnValidThread()); | |
271 } | |
272 | |
273 Scheduler::~Scheduler() { | |
274 DCHECK(thread_checker_.CalledOnValidThread()); | |
275 } | |
276 | |
277 SequenceId Scheduler::CreateSequence(SchedulingPriority priority) { | |
278 DCHECK(thread_checker_.CalledOnValidThread()); | |
279 base::AutoLock auto_lock(lock_); | |
280 scoped_refptr<SyncPointOrderData> order_data = | |
281 sync_point_manager_->CreateSyncPointOrderData(); | |
282 SequenceId sequence_id = order_data->sequence_id(); | |
283 auto sequence = | |
284 base::MakeUnique<Sequence>(sequence_id, priority, std::move(order_data)); | |
285 sequences_.emplace(sequence_id, std::move(sequence)); | |
286 return sequence_id; | |
287 } | |
288 | |
289 void Scheduler::DestroySequence(SequenceId sequence_id) { | |
290 DCHECK(thread_checker_.CalledOnValidThread()); | |
291 base::AutoLock auto_lock(lock_); | |
292 | |
293 Sequence* sequence = GetSequence(sequence_id); | |
294 DCHECK(sequence); | |
295 if (sequence->scheduled()) | |
296 rebuild_scheduling_queue_ = true; | |
297 | |
298 sequences_.erase(sequence_id); | |
299 } | |
300 | |
301 Scheduler::Sequence* Scheduler::GetSequence(SequenceId sequence_id) { | |
302 lock_.AssertAcquired(); | |
303 auto it = sequences_.find(sequence_id); | |
304 if (it != sequences_.end()) | |
305 return it->second.get(); | |
306 return nullptr; | |
307 } | |
308 | |
309 void Scheduler::EnableSequence(SequenceId sequence_id) { | |
310 DCHECK(thread_checker_.CalledOnValidThread()); | |
311 base::AutoLock auto_lock(lock_); | |
312 Sequence* sequence = GetSequence(sequence_id); | |
313 DCHECK(sequence); | |
314 sequence->SetEnabled(true); | |
315 TryScheduleSequence(sequence); | |
316 } | |
317 | |
318 void Scheduler::DisableSequence(SequenceId sequence_id) { | |
319 DCHECK(thread_checker_.CalledOnValidThread()); | |
320 base::AutoLock auto_lock(lock_); | |
321 Sequence* sequence = GetSequence(sequence_id); | |
322 DCHECK(sequence); | |
323 sequence->SetEnabled(false); | |
324 } | |
325 | |
326 void Scheduler::ScheduleTask(SequenceId sequence_id, | |
327 base::OnceClosure closure, | |
328 const std::vector<SyncToken>& sync_token_fences) { | |
329 base::AutoLock auto_lock(lock_); | |
330 Sequence* sequence = GetSequence(sequence_id); | |
331 DCHECK(sequence); | |
332 | |
333 uint32_t order_num = sequence->ScheduleTask(std::move(closure)); | |
334 | |
335 for (const SyncToken& sync_token : sync_token_fences) { | |
336 SequenceId release_id = | |
337 sync_point_manager_->GetSyncTokenReleaseSequenceId(sync_token); | |
338 Sequence* release_sequence = GetSequence(release_id); | |
339 if (!release_sequence) | |
340 continue; | |
341 if (sync_point_manager_->Wait( | |
342 sync_token, order_num, | |
343 base::Bind(&Scheduler::SyncTokenFenceReleased, | |
344 weak_factory_.GetWeakPtr(), sync_token, order_num, | |
345 release_id, sequence_id))) { | |
346 sequence->AddWaitFence(sync_token, order_num); | |
347 release_sequence->AddReleaseFence(sync_token, order_num); | |
348 TryScheduleSequence(release_sequence); | |
349 } | |
350 } | |
351 | |
352 TryScheduleSequence(sequence); | |
353 } | |
354 | |
355 void Scheduler::ContinueTask(SequenceId sequence_id, | |
356 base::OnceClosure closure) { | |
357 DCHECK(thread_checker_.CalledOnValidThread()); | |
358 base::AutoLock auto_lock(lock_); | |
359 Sequence* sequence = GetSequence(sequence_id); | |
360 DCHECK(sequence); | |
361 sequence->ContinueTask(std::move(closure)); | |
362 } | |
363 | |
364 bool Scheduler::ShouldYield(SequenceId sequence_id) { | |
365 DCHECK(thread_checker_.CalledOnValidThread()); | |
366 base::AutoLock auto_lock(lock_); | |
367 | |
368 Sequence* sequence = GetSequence(sequence_id); | |
369 DCHECK(sequence); | |
370 DCHECK(sequence->running()); | |
371 | |
372 if (should_yield_) | |
373 return true; | |
374 | |
375 RebuildSchedulingQueue(); | |
376 | |
377 sequence->UpdateSchedulingState(); | |
378 | |
379 if (!scheduling_queue_.empty()) { | |
380 Sequence* next_sequence = | |
381 GetSequence(scheduling_queue_.front().sequence_id); | |
382 DCHECK(next_sequence); | |
383 if (next_sequence->RunsBefore(sequence)) | |
384 should_yield_ = true; | |
385 } | |
386 | |
387 return should_yield_; | |
388 } | |
389 | |
390 void Scheduler::SyncTokenFenceReleased(const SyncToken& sync_token, | |
391 uint32_t order_num, | |
392 SequenceId release_sequence_id, | |
393 SequenceId waiting_sequence_id) { | |
394 base::AutoLock auto_lock(lock_); | |
395 Sequence* sequence = GetSequence(waiting_sequence_id); | |
396 if (sequence) { | |
397 sequence->RemoveWaitFence(sync_token, order_num); | |
398 TryScheduleSequence(sequence); | |
399 } | |
400 Sequence* release_sequence = GetSequence(release_sequence_id); | |
401 if (release_sequence) { | |
402 release_sequence->RemoveReleaseFence(sync_token, order_num); | |
403 TryScheduleSequence(release_sequence); | |
404 } | |
405 } | |
406 | |
407 void Scheduler::TryScheduleSequence(Sequence* sequence) { | |
408 lock_.AssertAcquired(); | |
409 | |
410 if (sequence->running()) | |
411 return; | |
412 | |
413 if (sequence->NeedsRescheduling()) { | |
414 DCHECK(sequence->IsRunnable()); | |
415 rebuild_scheduling_queue_ = true; | |
416 } else if (!sequence->scheduled() && sequence->IsRunnable()) { | |
417 sequence->SetScheduled(); | |
418 scheduling_queue_.push_back(sequence->scheduling_state()); | |
419 std::push_heap(scheduling_queue_.begin(), scheduling_queue_.end(), | |
420 &SchedulingState::Comparator); | |
421 } | |
422 | |
423 if (!running_) { | |
424 TRACE_EVENT_ASYNC_BEGIN0("gpu", "Scheduler::Running", this); | |
425 running_ = true; | |
426 task_runner_->PostTask(FROM_HERE, base::Bind(&Scheduler::RunNextTask, | |
427 weak_factory_.GetWeakPtr())); | |
428 } | |
429 } | |
430 | |
431 void Scheduler::RebuildSchedulingQueue() { | |
432 DCHECK(thread_checker_.CalledOnValidThread()); | |
433 lock_.AssertAcquired(); | |
434 | |
435 if (!rebuild_scheduling_queue_) | |
436 return; | |
437 rebuild_scheduling_queue_ = false; | |
438 | |
439 scheduling_queue_.clear(); | |
440 for (const auto& kv : sequences_) { | |
441 Sequence* sequence = kv.second.get(); | |
442 if (!sequence->IsRunnable() || sequence->running()) | |
443 continue; | |
444 sequence->SetScheduled(); | |
445 scheduling_queue_.push_back(sequence->scheduling_state()); | |
446 } | |
447 | |
448 std::make_heap(scheduling_queue_.begin(), scheduling_queue_.end(), | |
449 &SchedulingState::Comparator); | |
450 } | |
451 | |
452 void Scheduler::RunNextTask() { | |
453 DCHECK(thread_checker_.CalledOnValidThread()); | |
454 base::AutoLock auto_lock(lock_); | |
455 | |
456 should_yield_ = false; | |
457 | |
458 RebuildSchedulingQueue(); | |
459 | |
460 if (scheduling_queue_.empty()) { | |
461 TRACE_EVENT_ASYNC_END0("gpu", "Scheduler::Running", this); | |
462 running_ = false; | |
463 return; | |
464 } | |
465 | |
466 std::pop_heap(scheduling_queue_.begin(), scheduling_queue_.end(), | |
467 &SchedulingState::Comparator); | |
468 SchedulingState state = scheduling_queue_.back(); | |
469 scheduling_queue_.pop_back(); | |
470 | |
471 TRACE_EVENT1("gpu", "Scheduler::RunNextTask", "state", state.AsValue()); | |
472 | |
473 DCHECK(GetSequence(state.sequence_id)); | |
474 base::OnceClosure closure = GetSequence(state.sequence_id)->BeginTask(); | |
475 | |
476 { | |
477 base::AutoUnlock auto_unlock(lock_); | |
478 std::move(closure).Run(); | |
479 } | |
480 | |
481 // Check if sequence hasn't been destroyed. | |
482 Sequence* sequence = GetSequence(state.sequence_id); | |
483 if (sequence) { | |
484 sequence->FinishTask(); | |
485 if (sequence->IsRunnable()) { | |
486 sequence->SetScheduled(); | |
487 scheduling_queue_.push_back(sequence->scheduling_state()); | |
488 std::push_heap(scheduling_queue_.begin(), scheduling_queue_.end(), | |
489 &SchedulingState::Comparator); | |
490 } | |
491 } | |
492 | |
493 task_runner_->PostTask(FROM_HERE, base::Bind(&Scheduler::RunNextTask, | |
494 weak_factory_.GetWeakPtr())); | |
495 } | |
496 | |
497 } // namespace gpu | |
OLD | NEW |