| Index: gpu/ipc/service/gpu_scheduler.cc
|
| diff --git a/gpu/ipc/service/gpu_scheduler.cc b/gpu/ipc/service/gpu_scheduler.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..596a57d118917fdb3139cc39c3bf9c66612113d9
|
| --- /dev/null
|
| +++ b/gpu/ipc/service/gpu_scheduler.cc
|
| @@ -0,0 +1,194 @@
|
| +// Copyright (c) 2016 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "gpu/ipc/service/gpu_scheduler.h"
|
| +
|
| +#include <algorithm>
|
| +
|
| +#include "base/callback.h"
|
| +#include "base/memory/ptr_util.h"
|
| +#include "base/timer/timer.h"
|
| +#include "base/trace_event/trace_event.h"
|
| +#include "base/trace_event/trace_event_argument.h"
|
| +#include "gpu/command_buffer/service/command_executor.h"
|
| +#include "gpu/ipc/service/gpu_command_stream.h"
|
| +
|
| +namespace gpu {
|
| +
|
| +namespace {
|
| +
|
| +const int64_t kMinTimeSliceUs = 4000;
|
| +const int64_t kMaxTimeSliceUs = 12000;
|
| +
|
| +} // anonymous namespace
|
| +
|
| +std::unique_ptr<GpuScheduler> GpuScheduler::Create(
|
| + scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
|
| + GpuSchedulerSettings settings;
|
| + settings.min_time_slice = base::TimeDelta::FromMicroseconds(kMinTimeSliceUs);
|
| + settings.max_time_slice = base::TimeDelta::FromMicroseconds(kMaxTimeSliceUs);
|
| + return base::WrapUnique(new GpuScheduler(settings, std::move(task_runner)));
|
| +}
|
| +
|
| +GpuScheduler::GpuScheduler(
|
| + const GpuSchedulerSettings& settings,
|
| + scoped_refptr<base::SingleThreadTaskRunner> task_runner)
|
| + : settings_(settings),
|
| + task_runner_(std::move(task_runner)),
|
| + weak_factory_(this) {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| +}
|
| +
|
| +GpuScheduler::~GpuScheduler() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| +}
|
| +
|
| +base::TimeTicks GpuScheduler::Now() {
|
| + return base::TimeTicks::Now();
|
| +}
|
| +
|
| +void GpuScheduler::AddStream(GpuCommandStream* stream,
|
| + GpuStreamPriority priority) {
|
| + base::AutoLock lock(lock_);
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + StreamInfo stream_info;
|
| + stream_info.stream = stream;
|
| + stream_info.priority = priority;
|
| + stream_info.ready = false;
|
| + streams_.push_back(stream_info);
|
| +}
|
| +
|
| +void GpuScheduler::RemoveStream(GpuCommandStream* stream) {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| +
|
| + auto it = FindStream(stream);
|
| + DCHECK(it != streams_.end());
|
| + streams_.erase(it);
|
| +
|
| + auto work_queue_it = FindScheduledStream(stream);
|
| + if (work_queue_it != work_queue_.end()) {
|
| + work_queue_.erase(work_queue_it);
|
| + std::make_heap(work_queue_.begin(), work_queue_.end());
|
| + }
|
| +}
|
| +
|
| +std::vector<GpuScheduler::ScheduledStream>::iterator
|
| +GpuScheduler::FindScheduledStream(GpuCommandStream* stream) {
|
| + return std::find_if(work_queue_.begin(), work_queue_.end(),
|
| + [&stream](const ScheduledStream& sched_stream) {
|
| + return sched_stream.stream == stream;
|
| + });
|
| +}
|
| +
|
| +std::vector<GpuScheduler::StreamInfo>::iterator GpuScheduler::FindStream(
|
| + GpuCommandStream* stream) {
|
| + return std::find_if(streams_.begin(), streams_.end(),
|
| + [&stream](const StreamInfo& stream_info) {
|
| + return stream_info.stream == stream;
|
| + });
|
| +}
|
| +
|
| +void GpuScheduler::ScheduleStream(GpuCommandStream* stream) {
|
| + base::AutoLock lock(lock_);
|
| +
|
| + auto it = FindStream(stream);
|
| + DCHECK(it != streams_.end());
|
| + it->ready = true;
|
| + if (it->priority == GpuStreamPriority::REAL_TIME &&
|
| + FindScheduledStream(stream) == work_queue_.end()) {
|
| + needs_rescheduling_ = true;
|
| + }
|
| +
|
| + if (!running_) {
|
| + TRACE_EVENT_ASYNC_BEGIN0("gpu", "GpuScheduler::Running", this);
|
| + running_ = true;
|
| + task_runner_->PostTask(FROM_HERE, base::Bind(&GpuScheduler::RunNextStream,
|
| + weak_factory_.GetWeakPtr()));
|
| + }
|
| +}
|
| +
|
| +void GpuScheduler::DescheduleStream(GpuCommandStream* stream) {
|
| + base::AutoLock lock(lock_);
|
| +
|
| + auto it = FindStream(stream);
|
| + DCHECK(it != streams_.end());
|
| + it->ready = false;
|
| +
|
| + auto work_queue_it = FindScheduledStream(stream);
|
| + if (work_queue_it != work_queue_.end()) {
|
| + work_queue_.erase(work_queue_it);
|
| + std::make_heap(work_queue_.begin(), work_queue_.end());
|
| + }
|
| +}
|
| +
|
| +bool GpuScheduler::ShouldYield() {
|
| + base::AutoLock lock(lock_);
|
| + base::TimeTicks now = Now();
|
| + // Don't run for less than a minimum time slice to minimize context switches.
|
| + if (now - running_stream_start_time_ < settings_.min_time_slice)
|
| + return false;
|
| +
|
| + // If we need to reschedule, don't run streams for longer than minimum.
|
| + if (needs_rescheduling_)
|
| + return true;
|
| +
|
| + // Don't run for too long to share the thread with other tasks.
|
| + if (now - running_stream_start_time_ >= settings_.max_time_slice)
|
| + return true;
|
| +
|
| + return false;
|
| +}
|
| +
|
| +void GpuScheduler::RunNextStream() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| +
|
| + ScheduledStream sched_stream = GetNextStream();
|
| + if (sched_stream.stream) {
|
| + running_stream_start_time_ = Now();
|
| + TRACE_EVENT2("gpu", "GpuScheduler::RunNextStream", "priority",
|
| + GpuStreamPriorityToString(sched_stream.priority), "start_time",
|
| + running_stream_start_time_);
|
| + sched_stream.stream->Run();
|
| +
|
| + task_runner_->PostTask(FROM_HERE, base::Bind(&GpuScheduler::RunNextStream,
|
| + weak_factory_.GetWeakPtr()));
|
| + }
|
| +}
|
| +
|
| +GpuScheduler::ScheduledStream GpuScheduler::GetNextStream() {
|
| + base::AutoLock lock(lock_);
|
| +
|
| + if (work_queue_.empty()) {
|
| + BuildWorkQueue();
|
| + needs_rescheduling_ = false;
|
| + }
|
| +
|
| + if (work_queue_.empty()) {
|
| + TRACE_EVENT_ASYNC_END0("gpu", "GpuScheduler::Running", this);
|
| + running_ = false;
|
| + return ScheduledStream();
|
| + }
|
| +
|
| + ScheduledStream sched_stream = work_queue_.front();
|
| + std::pop_heap(work_queue_.begin(), work_queue_.end());
|
| + work_queue_.pop_back();
|
| +
|
| + return sched_stream;
|
| +}
|
| +
|
| +void GpuScheduler::BuildWorkQueue() {
|
| + TRACE_EVENT0("gpu", "GpuScheduler::BuildWorkQueue");
|
| + work_queue_.clear();
|
| + for (const StreamInfo& stream_info : streams_) {
|
| + if (stream_info.ready) {
|
| + ScheduledStream sched_stream;
|
| + sched_stream.stream = stream_info.stream;
|
| + sched_stream.priority = stream_info.priority;
|
| + work_queue_.push_back(sched_stream);
|
| + }
|
| + }
|
| + std::make_heap(work_queue_.begin(), work_queue_.end());
|
| +}
|
| +
|
| +} // namespace gpu
|
|
|