| OLD | NEW |
| (Empty) | |
| 1 // Copyright (c) 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "gpu/ipc/service/gpu_scheduler.h" |
| 6 |
| 7 #include <algorithm> |
| 8 |
| 9 #include "base/callback.h" |
| 10 #include "base/memory/ptr_util.h" |
| 11 #include "base/timer/timer.h" |
| 12 #include "base/trace_event/trace_event.h" |
| 13 #include "base/trace_event/trace_event_argument.h" |
| 14 #include "gpu/command_buffer/service/command_executor.h" |
| 15 #include "gpu/ipc/service/gpu_command_stream.h" |
| 16 |
| 17 namespace gpu { |
| 18 |
| 19 namespace { |
| 20 |
| 21 const int64_t kMinTimeSliceUs = 4000; |
| 22 const int64_t kMaxTimeSliceUs = 12000; |
| 23 |
| 24 } // anonymous namespace |
| 25 |
| 26 std::unique_ptr<GpuScheduler> GpuScheduler::Create( |
| 27 scoped_refptr<base::SingleThreadTaskRunner> task_runner) { |
| 28 GpuSchedulerSettings settings; |
| 29 settings.min_time_slice = base::TimeDelta::FromMicroseconds(kMinTimeSliceUs); |
| 30 settings.max_time_slice = base::TimeDelta::FromMicroseconds(kMaxTimeSliceUs); |
| 31 return base::WrapUnique(new GpuScheduler(settings, std::move(task_runner))); |
| 32 } |
| 33 |
| 34 GpuScheduler::GpuScheduler( |
| 35 const GpuSchedulerSettings& settings, |
| 36 scoped_refptr<base::SingleThreadTaskRunner> task_runner) |
| 37 : settings_(settings), |
| 38 task_runner_(std::move(task_runner)), |
| 39 weak_factory_(this) { |
| 40 DCHECK(thread_checker_.CalledOnValidThread()); |
| 41 } |
| 42 |
| 43 GpuScheduler::~GpuScheduler() { |
| 44 DCHECK(thread_checker_.CalledOnValidThread()); |
| 45 } |
| 46 |
| 47 base::TimeTicks GpuScheduler::Now() { |
| 48 return base::TimeTicks::Now(); |
| 49 } |
| 50 |
| 51 void GpuScheduler::AddStream(GpuCommandStream* stream, |
| 52 GpuStreamPriority priority) { |
| 53 base::AutoLock lock(lock_); |
| 54 DCHECK(thread_checker_.CalledOnValidThread()); |
| 55 StreamInfo stream_info; |
| 56 stream_info.stream = stream; |
| 57 stream_info.priority = priority; |
| 58 stream_info.ready = false; |
| 59 streams_.push_back(stream_info); |
| 60 } |
| 61 |
| 62 void GpuScheduler::RemoveStream(GpuCommandStream* stream) { |
| 63 DCHECK(thread_checker_.CalledOnValidThread()); |
| 64 |
| 65 auto it = FindStream(stream); |
| 66 DCHECK(it != streams_.end()); |
| 67 streams_.erase(it); |
| 68 |
| 69 auto work_queue_it = FindScheduledStream(stream); |
| 70 if (work_queue_it != work_queue_.end()) { |
| 71 work_queue_.erase(work_queue_it); |
| 72 std::make_heap(work_queue_.begin(), work_queue_.end()); |
| 73 } |
| 74 } |
| 75 |
| 76 std::vector<GpuScheduler::ScheduledStream>::iterator |
| 77 GpuScheduler::FindScheduledStream(GpuCommandStream* stream) { |
| 78 return std::find_if(work_queue_.begin(), work_queue_.end(), |
| 79 [&stream](const ScheduledStream& sched_stream) { |
| 80 return sched_stream.stream == stream; |
| 81 }); |
| 82 } |
| 83 |
| 84 std::vector<GpuScheduler::StreamInfo>::iterator GpuScheduler::FindStream( |
| 85 GpuCommandStream* stream) { |
| 86 return std::find_if(streams_.begin(), streams_.end(), |
| 87 [&stream](const StreamInfo& stream_info) { |
| 88 return stream_info.stream == stream; |
| 89 }); |
| 90 } |
| 91 |
| 92 void GpuScheduler::ScheduleStream(GpuCommandStream* stream) { |
| 93 base::AutoLock lock(lock_); |
| 94 |
| 95 auto it = FindStream(stream); |
| 96 DCHECK(it != streams_.end()); |
| 97 it->ready = true; |
| 98 if (it->priority == GpuStreamPriority::REAL_TIME && |
| 99 FindScheduledStream(stream) == work_queue_.end()) { |
| 100 needs_rescheduling_ = true; |
| 101 } |
| 102 |
| 103 if (!running_) { |
| 104 TRACE_EVENT_ASYNC_BEGIN0("gpu", "GpuScheduler::Running", this); |
| 105 running_ = true; |
| 106 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuScheduler::RunNextStream, |
| 107 weak_factory_.GetWeakPtr())); |
| 108 } |
| 109 } |
| 110 |
| 111 void GpuScheduler::DescheduleStream(GpuCommandStream* stream) { |
| 112 base::AutoLock lock(lock_); |
| 113 |
| 114 auto it = FindStream(stream); |
| 115 DCHECK(it != streams_.end()); |
| 116 it->ready = false; |
| 117 |
| 118 auto work_queue_it = FindScheduledStream(stream); |
| 119 if (work_queue_it != work_queue_.end()) { |
| 120 work_queue_.erase(work_queue_it); |
| 121 std::make_heap(work_queue_.begin(), work_queue_.end()); |
| 122 } |
| 123 } |
| 124 |
| 125 bool GpuScheduler::ShouldYield() { |
| 126 base::AutoLock lock(lock_); |
| 127 base::TimeTicks now = Now(); |
| 128 // Don't run for less than a minimum time slice to minimize context switches. |
| 129 if (now - running_stream_start_time_ < settings_.min_time_slice) |
| 130 return false; |
| 131 |
| 132 // If we need to reschedule, don't run streams for longer than minimum. |
| 133 if (needs_rescheduling_) |
| 134 return true; |
| 135 |
| 136 // Don't run for too long to share the thread with other tasks. |
| 137 if (now - running_stream_start_time_ >= settings_.max_time_slice) |
| 138 return true; |
| 139 |
| 140 return false; |
| 141 } |
| 142 |
| 143 void GpuScheduler::RunNextStream() { |
| 144 DCHECK(thread_checker_.CalledOnValidThread()); |
| 145 |
| 146 ScheduledStream sched_stream = GetNextStream(); |
| 147 if (sched_stream.stream) { |
| 148 running_stream_start_time_ = Now(); |
| 149 TRACE_EVENT2("gpu", "GpuScheduler::RunNextStream", "priority", |
| 150 GpuStreamPriorityToString(sched_stream.priority), "start_time", |
| 151 running_stream_start_time_); |
| 152 sched_stream.stream->Run(); |
| 153 |
| 154 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuScheduler::RunNextStream, |
| 155 weak_factory_.GetWeakPtr())); |
| 156 } |
| 157 } |
| 158 |
| 159 GpuScheduler::ScheduledStream GpuScheduler::GetNextStream() { |
| 160 base::AutoLock lock(lock_); |
| 161 |
| 162 if (work_queue_.empty()) { |
| 163 BuildWorkQueue(); |
| 164 needs_rescheduling_ = false; |
| 165 } |
| 166 |
| 167 if (work_queue_.empty()) { |
| 168 TRACE_EVENT_ASYNC_END0("gpu", "GpuScheduler::Running", this); |
| 169 running_ = false; |
| 170 return ScheduledStream(); |
| 171 } |
| 172 |
| 173 ScheduledStream sched_stream = work_queue_.front(); |
| 174 std::pop_heap(work_queue_.begin(), work_queue_.end()); |
| 175 work_queue_.pop_back(); |
| 176 |
| 177 return sched_stream; |
| 178 } |
| 179 |
| 180 void GpuScheduler::BuildWorkQueue() { |
| 181 TRACE_EVENT0("gpu", "GpuScheduler::BuildWorkQueue"); |
| 182 work_queue_.clear(); |
| 183 for (const StreamInfo& stream_info : streams_) { |
| 184 if (stream_info.ready) { |
| 185 ScheduledStream sched_stream; |
| 186 sched_stream.stream = stream_info.stream; |
| 187 sched_stream.priority = stream_info.priority; |
| 188 work_queue_.push_back(sched_stream); |
| 189 } |
| 190 } |
| 191 std::make_heap(work_queue_.begin(), work_queue_.end()); |
| 192 } |
| 193 |
| 194 } // namespace gpu |
| OLD | NEW |