Index: gpu/ipc/service/gpu_channel.cc |
diff --git a/gpu/ipc/service/gpu_channel.cc b/gpu/ipc/service/gpu_channel.cc |
index 1d67b33ae2d0022e75a54dbdefcb57551c25c2d9..37781d45f01464ec5d19edaa2d8e3e98b6acdf2b 100644 |
--- a/gpu/ipc/service/gpu_channel.cc |
+++ b/gpu/ipc/service/gpu_channel.cc |
@@ -34,6 +34,7 @@ |
#include "gpu/command_buffer/service/image_factory.h" |
#include "gpu/command_buffer/service/mailbox_manager.h" |
#include "gpu/command_buffer/service/preemption_flag.h" |
+#include "gpu/command_buffer/service/scheduler.h" |
#include "gpu/ipc/common/gpu_messages.h" |
#include "gpu/ipc/service/gpu_channel_manager.h" |
#include "gpu/ipc/service/gpu_channel_manager_delegate.h" |
@@ -99,21 +100,20 @@ void SyncChannelFilteredSender::RemoveFilter(IPC::MessageFilter* filter) { |
GpuChannelMessageQueue::GpuChannelMessageQueue( |
GpuChannel* channel, |
+ scoped_refptr<SyncPointOrderData> sync_point_order_data, |
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner, |
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, |
scoped_refptr<PreemptionFlag> preempting_flag, |
- scoped_refptr<PreemptionFlag> preempted_flag, |
- SyncPointManager* sync_point_manager) |
+ scoped_refptr<PreemptionFlag> preempted_flag) |
: channel_(channel), |
max_preemption_time_( |
base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)), |
timer_(new base::OneShotTimer), |
- sync_point_order_data_(sync_point_manager->CreateSyncPointOrderData()), |
+ sync_point_order_data_(std::move(sync_point_order_data)), |
main_task_runner_(std::move(main_task_runner)), |
io_task_runner_(std::move(io_task_runner)), |
preempting_flag_(std::move(preempting_flag)), |
- preempted_flag_(std::move(preempted_flag)), |
- sync_point_manager_(sync_point_manager) { |
+ preempted_flag_(std::move(preempted_flag)) { |
timer_->SetTaskRunner(io_task_runner_); |
io_thread_checker_.DetachFromThread(); |
} |
@@ -123,20 +123,6 @@ GpuChannelMessageQueue::~GpuChannelMessageQueue() { |
} |
void GpuChannelMessageQueue::Destroy() { |
- // We guarantee that the queue will no longer be modified after Destroy is |
- // called, it is now safe to modify the queue without the lock. All public |
- // facing modifying functions check enabled_ while all private modifying |
- // functions DCHECK(enabled_) to enforce this. |
- while (!channel_messages_.empty()) { |
- const IPC::Message& msg = channel_messages_.front()->message; |
- if (msg.is_sync()) { |
- IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg); |
- reply->set_reply_error(); |
- channel_->Send(reply); |
- } |
- channel_messages_.pop_front(); |
- } |
- |
sync_point_order_data_->Destroy(); |
if (preempting_flag_) |
@@ -439,9 +425,11 @@ void GpuChannelMessageQueue::TransitionToWouldPreemptDescheduled() { |
GpuChannelMessageFilter::GpuChannelMessageFilter( |
GpuChannel* gpu_channel, |
+ Scheduler* scheduler, |
scoped_refptr<GpuChannelMessageQueue> message_queue, |
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner) |
: gpu_channel_(gpu_channel), |
+ scheduler_(scheduler), |
message_queue_(std::move(message_queue)), |
main_task_runner_(std::move(main_task_runner)) {} |
@@ -454,6 +442,21 @@ void GpuChannelMessageFilter::Destroy() { |
gpu_channel_ = nullptr; |
} |
+void GpuChannelMessageFilter::AddRoute(int32_t route_id, |
+ SequenceId sequence_id) { |
+ base::AutoLock auto_lock(gpu_channel_lock_); |
+ DCHECK(gpu_channel_); |
+ DCHECK(scheduler_); |
+ route_sequences_[route_id] = sequence_id; |
+} |
+ |
+void GpuChannelMessageFilter::RemoveRoute(int32_t route_id) { |
+ base::AutoLock auto_lock(gpu_channel_lock_); |
+ DCHECK(gpu_channel_); |
+ DCHECK(scheduler_); |
+ route_sequences_.erase(route_id); |
+} |
+ |
void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) { |
DCHECK(!ipc_channel_); |
ipc_channel_ = channel; |
@@ -504,6 +507,9 @@ void GpuChannelMessageFilter::RemoveChannelFilter( |
bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
DCHECK(ipc_channel_); |
+ if (!gpu_channel_) |
+ return MessageErrorHandler(message, "Channel destroyed"); |
+ |
if (message.should_unblock() || message.is_reply()) |
return MessageErrorHandler(message, "Unexpected message type"); |
@@ -530,6 +536,23 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
main_task_runner_->PostTask(FROM_HERE, |
base::Bind(&GpuChannel::HandleOutOfOrderMessage, |
gpu_channel_->AsWeakPtr(), message)); |
+ } else if (scheduler_) { |
+ SequenceId sequence_id = route_sequences_[message.routing_id()]; |
+ if (sequence_id.is_null()) |
+ return MessageErrorHandler(message, "Invalid route"); |
+ |
+ std::vector<SyncToken> sync_token_fences; |
+ if (message.type() == GpuCommandBufferMsg_AsyncFlush::ID) { |
+ GpuCommandBufferMsg_AsyncFlush::Param params; |
+ if (!GpuCommandBufferMsg_AsyncFlush::Read(&message, ¶ms)) |
+ return MessageErrorHandler(message, "Invalid flush message"); |
+ sync_token_fences = std::get<3>(params); |
+ } |
+ |
+ scheduler_->ScheduleTask(sequence_id, |
+ base::BindOnce(&GpuChannel::HandleMessage, |
+ gpu_channel_->AsWeakPtr(), message), |
+ sync_token_fences); |
} else { |
// Message queue takes care of PostTask. |
message_queue_->PushBackMessage(message); |
@@ -561,6 +584,7 @@ FilteredSender::~FilteredSender() = default; |
GpuChannel::GpuChannel( |
GpuChannelManager* gpu_channel_manager, |
+ Scheduler* scheduler, |
SyncPointManager* sync_point_manager, |
GpuWatchdogThread* watchdog, |
scoped_refptr<gl::GLShareGroup> share_group, |
@@ -573,6 +597,7 @@ GpuChannel::GpuChannel( |
uint64_t client_tracing_id, |
bool is_gpu_host) |
: gpu_channel_manager_(gpu_channel_manager), |
+ scheduler_(scheduler), |
sync_point_manager_(sync_point_manager), |
preempting_flag_(preempting_flag), |
preempted_flag_(preempted_flag), |
@@ -585,14 +610,17 @@ GpuChannel::GpuChannel( |
watchdog_(watchdog), |
is_gpu_host_(is_gpu_host), |
weak_factory_(this) { |
- DCHECK(gpu_channel_manager); |
- DCHECK(client_id); |
+ DCHECK(gpu_channel_manager_); |
+ DCHECK(client_id_); |
- message_queue_ = new GpuChannelMessageQueue(this, task_runner, io_task_runner, |
- preempting_flag, preempted_flag, |
- sync_point_manager); |
+ if (!scheduler_) { |
+ message_queue_ = new GpuChannelMessageQueue( |
+ this, sync_point_manager->CreateSyncPointOrderData(), task_runner, |
+ io_task_runner, preempting_flag, preempted_flag); |
+ } |
- filter_ = new GpuChannelMessageFilter(this, message_queue_, task_runner); |
+ filter_ = |
+ new GpuChannelMessageFilter(this, scheduler, message_queue_, task_runner); |
} |
GpuChannel::~GpuChannel() { |
@@ -602,7 +630,12 @@ GpuChannel::~GpuChannel() { |
// Destroy filter first so that no message queue gets no more messages. |
filter_->Destroy(); |
- message_queue_->Destroy(); |
+ if (scheduler_) { |
+ for (const auto& kv : stream_sequences_) |
+ scheduler_->DestroySequence(kv.second); |
+ } else { |
+ message_queue_->Destroy(); |
+ } |
DCHECK(!preempting_flag_ || !preempting_flag_->IsSet()); |
} |
@@ -656,13 +689,19 @@ bool GpuChannel::Send(IPC::Message* message) { |
} |
void GpuChannel::OnCommandBufferScheduled(GpuCommandBufferStub* stub) { |
- message_queue_->SetScheduled(true); |
- // TODO(sunnyps): Enable gpu scheduler task queue for stub's sequence. |
+ if (scheduler_) { |
+ scheduler_->EnableSequence(stub->sequence_id()); |
+ } else { |
+ message_queue_->SetScheduled(true); |
+ } |
} |
void GpuChannel::OnCommandBufferDescheduled(GpuCommandBufferStub* stub) { |
- message_queue_->SetScheduled(false); |
- // TODO(sunnyps): Disable gpu scheduler task queue for stub's sequence. |
+ if (scheduler_) { |
+ scheduler_->DisableSequence(stub->sequence_id()); |
+ } else { |
+ message_queue_->SetScheduled(false); |
+ } |
} |
GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) { |
@@ -685,11 +724,14 @@ void GpuChannel::MarkAllContextsLost() { |
bool GpuChannel::AddRoute(int32_t route_id, |
SequenceId sequence_id, |
IPC::Listener* listener) { |
- // TODO(sunnyps): Add route id to sequence id mapping to filter. |
+ if (scheduler_) |
+ filter_->AddRoute(route_id, sequence_id); |
return router_.AddRoute(route_id, listener); |
} |
void GpuChannel::RemoveRoute(int32_t route_id) { |
+ if (scheduler_) |
+ filter_->RemoveRoute(route_id); |
router_.RemoveRoute(route_id); |
} |
@@ -707,6 +749,27 @@ bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { |
return handled; |
} |
+void GpuChannel::HandleMessage(const IPC::Message& msg) { |
+ int32_t routing_id = msg.routing_id(); |
+ GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id); |
+ |
+ DCHECK(!stub || stub->IsScheduled()); |
+ |
+ DVLOG(1) << "received message @" << &msg << " on channel @" << this |
+ << " with type " << msg.type(); |
+ |
+ HandleMessageHelper(msg); |
+ |
+ // If we get descheduled or yield while processing a message. |
+ if (stub && (stub->HasUnprocessedCommands() || !stub->IsScheduled())) { |
+ DCHECK((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID == msg.type() || |
+ (uint32_t)GpuCommandBufferMsg_WaitSyncToken::ID == msg.type()); |
+ scheduler_->ContinueTask( |
+ stub->sequence_id(), |
+ base::BindOnce(&GpuChannel::HandleMessage, AsWeakPtr(), msg)); |
+ } |
+} |
+ |
void GpuChannel::HandleMessageOnQueue() { |
const GpuChannelMessage* channel_msg = |
message_queue_->BeginMessageProcessing(); |
@@ -799,7 +862,7 @@ std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer( |
std::unique_ptr<base::SharedMemory> shared_state_shm) { |
if (init_params.surface_handle != kNullSurfaceHandle && !is_gpu_host_) { |
DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a " |
- "view context on a non-priviledged channel"; |
+ "view context on a non-privileged channel"; |
return nullptr; |
} |
@@ -818,10 +881,10 @@ std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer( |
return nullptr; |
} |
- GpuStreamPriority stream_priority = init_params.stream_priority; |
- if (stream_priority == GpuStreamPriority::REAL_TIME && !is_gpu_host_) { |
- DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): real time stream " |
- "priority not allowed"; |
+ SchedulingPriority stream_priority = init_params.stream_priority; |
+ if (stream_priority <= SchedulingPriority::kHigh && !is_gpu_host_) { |
+ DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): high priority stream " |
+ "not allowed on a non-privileged channel"; |
return nullptr; |
} |
@@ -842,8 +905,16 @@ std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer( |
CommandBufferId command_buffer_id = |
GenerateCommandBufferId(client_id_, route_id); |
- // TODO(sunnyps): Lookup sequence id using stream id to sequence id map. |
- SequenceId sequence_id = message_queue_->sequence_id(); |
+ SequenceId sequence_id; |
+ if (scheduler_) { |
+ sequence_id = stream_sequences_[stream_id]; |
+ if (sequence_id.is_null()) { |
+ sequence_id = scheduler_->CreateSequence(stream_priority); |
+ stream_sequences_[stream_id] = sequence_id; |
+ } |
+ } else { |
+ sequence_id = message_queue_->sequence_id(); |
+ } |
std::unique_ptr<GpuCommandBufferStub> stub(GpuCommandBufferStub::Create( |
this, share_group, init_params, command_buffer_id, sequence_id, stream_id, |