Index: gpu/ipc/service/gpu_channel.cc |
diff --git a/gpu/ipc/service/gpu_channel.cc b/gpu/ipc/service/gpu_channel.cc |
index 1d67b33ae2d0022e75a54dbdefcb57551c25c2d9..0d500deac425d4251d6a76e66dd7ccdd82418421 100644 |
--- a/gpu/ipc/service/gpu_channel.cc |
+++ b/gpu/ipc/service/gpu_channel.cc |
@@ -34,6 +34,7 @@ |
#include "gpu/command_buffer/service/image_factory.h" |
#include "gpu/command_buffer/service/mailbox_manager.h" |
#include "gpu/command_buffer/service/preemption_flag.h" |
+#include "gpu/command_buffer/service/scheduler.h" |
#include "gpu/ipc/common/gpu_messages.h" |
#include "gpu/ipc/service/gpu_channel_manager.h" |
#include "gpu/ipc/service/gpu_channel_manager_delegate.h" |
@@ -99,21 +100,20 @@ void SyncChannelFilteredSender::RemoveFilter(IPC::MessageFilter* filter) { |
GpuChannelMessageQueue::GpuChannelMessageQueue( |
GpuChannel* channel, |
+ scoped_refptr<SyncPointOrderData> sync_point_order_data, |
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner, |
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner, |
scoped_refptr<PreemptionFlag> preempting_flag, |
- scoped_refptr<PreemptionFlag> preempted_flag, |
- SyncPointManager* sync_point_manager) |
+ scoped_refptr<PreemptionFlag> preempted_flag) |
: channel_(channel), |
max_preemption_time_( |
base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)), |
timer_(new base::OneShotTimer), |
- sync_point_order_data_(sync_point_manager->CreateSyncPointOrderData()), |
+ sync_point_order_data_(std::move(sync_point_order_data)), |
main_task_runner_(std::move(main_task_runner)), |
io_task_runner_(std::move(io_task_runner)), |
preempting_flag_(std::move(preempting_flag)), |
- preempted_flag_(std::move(preempted_flag)), |
- sync_point_manager_(sync_point_manager) { |
+ preempted_flag_(std::move(preempted_flag)) { |
timer_->SetTaskRunner(io_task_runner_); |
io_thread_checker_.DetachFromThread(); |
} |
@@ -439,9 +439,11 @@ void GpuChannelMessageQueue::TransitionToWouldPreemptDescheduled() { |
GpuChannelMessageFilter::GpuChannelMessageFilter( |
GpuChannel* gpu_channel, |
+ Scheduler* scheduler, |
scoped_refptr<GpuChannelMessageQueue> message_queue, |
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner) |
: gpu_channel_(gpu_channel), |
+ scheduler_(scheduler), |
message_queue_(std::move(message_queue)), |
main_task_runner_(std::move(main_task_runner)) {} |
@@ -454,6 +456,21 @@ void GpuChannelMessageFilter::Destroy() { |
gpu_channel_ = nullptr; |
} |
+void GpuChannelMessageFilter::AddRoute(int32_t route_id, |
+ SequenceId sequence_id) { |
+ base::AutoLock auto_lock(gpu_channel_lock_); |
+ DCHECK(gpu_channel_); |
+ DCHECK(scheduler_); |
+ route_sequences_[route_id] = sequence_id; |
+} |
+ |
+void GpuChannelMessageFilter::RemoveRoute(int32_t route_id) { |
+ base::AutoLock auto_lock(gpu_channel_lock_); |
+ DCHECK(gpu_channel_); |
+ DCHECK(scheduler_); |
+ route_sequences_.erase(route_id); |
+} |
+ |
void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) { |
DCHECK(!ipc_channel_); |
ipc_channel_ = channel; |
@@ -504,6 +521,9 @@ void GpuChannelMessageFilter::RemoveChannelFilter( |
bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
DCHECK(ipc_channel_); |
+ if (!gpu_channel_) |
+ return MessageErrorHandler(message, "Channel destroyed"); |
+ |
if (message.should_unblock() || message.is_reply()) |
return MessageErrorHandler(message, "Unexpected message type"); |
@@ -530,6 +550,23 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
main_task_runner_->PostTask(FROM_HERE, |
base::Bind(&GpuChannel::HandleOutOfOrderMessage, |
gpu_channel_->AsWeakPtr(), message)); |
+ } else if (scheduler_) { |
+ SequenceId sequence_id = route_sequences_[message.routing_id()]; |
+ if (sequence_id.is_null()) |
+ return MessageErrorHandler(message, "Invalid route"); |
+ |
+ std::vector<SyncToken> sync_token_fences; |
+ if (message.type() == GpuCommandBufferMsg_AsyncFlush::ID) { |
+ GpuCommandBufferMsg_AsyncFlush::Param params; |
+ if (!GpuCommandBufferMsg_AsyncFlush::Read(&message, ¶ms)) |
+ return MessageErrorHandler(message, "Invalid flush message"); |
+ sync_token_fences = std::get<3>(params); |
+ } |
+ |
+ scheduler_->ScheduleTask(sequence_id, |
+ base::BindOnce(&GpuChannel::HandleMessage, |
+ gpu_channel_->AsWeakPtr(), message), |
+ sync_token_fences); |
} else { |
// Message queue takes care of PostTask. |
message_queue_->PushBackMessage(message); |
@@ -561,6 +598,7 @@ FilteredSender::~FilteredSender() = default; |
GpuChannel::GpuChannel( |
GpuChannelManager* gpu_channel_manager, |
+ Scheduler* scheduler, |
SyncPointManager* sync_point_manager, |
GpuWatchdogThread* watchdog, |
scoped_refptr<gl::GLShareGroup> share_group, |
@@ -573,6 +611,7 @@ GpuChannel::GpuChannel( |
uint64_t client_tracing_id, |
bool is_gpu_host) |
: gpu_channel_manager_(gpu_channel_manager), |
+ scheduler_(scheduler), |
sync_point_manager_(sync_point_manager), |
preempting_flag_(preempting_flag), |
preempted_flag_(preempted_flag), |
@@ -585,14 +624,17 @@ GpuChannel::GpuChannel( |
watchdog_(watchdog), |
is_gpu_host_(is_gpu_host), |
weak_factory_(this) { |
- DCHECK(gpu_channel_manager); |
- DCHECK(client_id); |
+ DCHECK(gpu_channel_manager_); |
+ DCHECK(client_id_); |
- message_queue_ = new GpuChannelMessageQueue(this, task_runner, io_task_runner, |
- preempting_flag, preempted_flag, |
- sync_point_manager); |
+ if (!scheduler_) { |
+ message_queue_ = new GpuChannelMessageQueue( |
+ this, sync_point_manager->CreateSyncPointOrderData(), task_runner, |
+ io_task_runner, preempting_flag, preempted_flag); |
+ } |
- filter_ = new GpuChannelMessageFilter(this, message_queue_, task_runner); |
+ filter_ = |
+ new GpuChannelMessageFilter(this, scheduler, message_queue_, task_runner); |
} |
GpuChannel::~GpuChannel() { |
@@ -602,7 +644,15 @@ GpuChannel::~GpuChannel() { |
// Destroy filter first so that no message queue gets no more messages. |
filter_->Destroy(); |
- message_queue_->Destroy(); |
+ // Release IPC channel. This acts like a flag for any posted tasks. |
+ channel_ = nullptr; |
+ |
+ if (scheduler_) { |
+ for (const auto& kv : stream_sequences_) |
+ scheduler_->DestroySequence(kv.second); |
+ } else { |
+ message_queue_->Destroy(); |
+ } |
DCHECK(!preempting_flag_ || !preempting_flag_->IsSet()); |
} |
@@ -656,13 +706,19 @@ bool GpuChannel::Send(IPC::Message* message) { |
} |
void GpuChannel::OnCommandBufferScheduled(GpuCommandBufferStub* stub) { |
- message_queue_->SetScheduled(true); |
- // TODO(sunnyps): Enable gpu scheduler task queue for stub's sequence. |
+ if (scheduler_) { |
+ scheduler_->EnableSequence(stub->sequence_id()); |
+ } else { |
+ message_queue_->SetScheduled(true); |
+ } |
} |
void GpuChannel::OnCommandBufferDescheduled(GpuCommandBufferStub* stub) { |
- message_queue_->SetScheduled(false); |
- // TODO(sunnyps): Disable gpu scheduler task queue for stub's sequence. |
+ if (scheduler_) { |
+ scheduler_->DisableSequence(stub->sequence_id()); |
+ } else { |
+ message_queue_->SetScheduled(false); |
+ } |
} |
GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) { |
@@ -685,11 +741,14 @@ void GpuChannel::MarkAllContextsLost() { |
bool GpuChannel::AddRoute(int32_t route_id, |
SequenceId sequence_id, |
IPC::Listener* listener) { |
- // TODO(sunnyps): Add route id to sequence id mapping to filter. |
+ if (scheduler_) |
+ filter_->AddRoute(route_id, sequence_id); |
return router_.AddRoute(route_id, listener); |
} |
void GpuChannel::RemoveRoute(int32_t route_id) { |
+ if (scheduler_) |
+ filter_->RemoveRoute(route_id); |
router_.RemoveRoute(route_id); |
} |
@@ -707,6 +766,38 @@ bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { |
return handled; |
} |
+void GpuChannel::HandleMessage(const IPC::Message& msg) { |
+ // Even if we're in the process of being destroyed, be sure to reply to sync |
+ // messages. |
+ if (!channel_) { |
+ if (msg.is_sync()) { |
+ IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg); |
+ reply->set_reply_error(); |
+ Send(reply); |
piman
2017/05/09 00:48:38
nit: if channel_ is nullptr, Send won't do anythin
sunnyps
2017/05/10 23:15:15
Done.
|
+ } |
+ return; |
+ } |
+ |
+ int32_t routing_id = msg.routing_id(); |
+ GpuCommandBufferStub* stub = LookupCommandBuffer(routing_id); |
+ |
+ DCHECK(!stub || stub->IsScheduled()); |
+ |
+ DVLOG(1) << "received message @" << &msg << " on channel @" << this |
+ << " with type " << msg.type(); |
+ |
+ HandleMessageHelper(msg); |
+ |
+ // If we get descheduled or yield while processing a message. |
+ if (stub && (stub->HasUnprocessedCommands() || !stub->IsScheduled())) { |
+ DCHECK((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID == msg.type() || |
+ (uint32_t)GpuCommandBufferMsg_WaitSyncToken::ID == msg.type()); |
+ scheduler_->ContinueTask( |
+ stub->sequence_id(), |
+ base::BindOnce(&GpuChannel::HandleMessage, AsWeakPtr(), msg)); |
+ } |
+} |
+ |
void GpuChannel::HandleMessageOnQueue() { |
const GpuChannelMessage* channel_msg = |
message_queue_->BeginMessageProcessing(); |
@@ -799,7 +890,7 @@ std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer( |
std::unique_ptr<base::SharedMemory> shared_state_shm) { |
if (init_params.surface_handle != kNullSurfaceHandle && !is_gpu_host_) { |
DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a " |
- "view context on a non-priviledged channel"; |
+ "view context on a non-privileged channel"; |
return nullptr; |
} |
@@ -819,9 +910,9 @@ std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer( |
} |
GpuStreamPriority stream_priority = init_params.stream_priority; |
- if (stream_priority == GpuStreamPriority::REAL_TIME && !is_gpu_host_) { |
- DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): real time stream " |
- "priority not allowed"; |
+ if (stream_priority <= GpuStreamPriority::HIGH && !is_gpu_host_) { |
+ DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): high priority stream " |
+ "not allowed on a non-privileged channel"; |
return nullptr; |
} |
@@ -842,8 +933,16 @@ std::unique_ptr<GpuCommandBufferStub> GpuChannel::CreateCommandBuffer( |
CommandBufferId command_buffer_id = |
GenerateCommandBufferId(client_id_, route_id); |
- // TODO(sunnyps): Lookup sequence id using stream id to sequence id map. |
- SequenceId sequence_id = message_queue_->sequence_id(); |
+ SequenceId sequence_id; |
+ if (scheduler_) { |
+ sequence_id = stream_sequences_[stream_id]; |
+ if (sequence_id.is_null()) { |
+ sequence_id = scheduler_->CreateSequence(stream_priority); |
+ stream_sequences_[stream_id] = sequence_id; |
+ } |
+ } else { |
+ sequence_id = message_queue_->sequence_id(); |
+ } |
std::unique_ptr<GpuCommandBufferStub> stub(GpuCommandBufferStub::Create( |
this, share_group, init_params, command_buffer_id, sequence_id, stream_id, |