Chromium Code Reviews| Index: gpu/ipc/service/gpu_channel.cc |
| diff --git a/gpu/ipc/service/gpu_channel.cc b/gpu/ipc/service/gpu_channel.cc |
| index 08213b66d18ef02b6e3cb504b3d9d3a2c84876d5..dad0dcdd95dece2691a1d077703da3a459744506 100644 |
| --- a/gpu/ipc/service/gpu_channel.cc |
| +++ b/gpu/ipc/service/gpu_channel.cc |
| @@ -88,8 +88,7 @@ GpuChannelMessageQueue::GpuChannelMessageQueue( |
| scoped_refptr<PreemptionFlag> preempting_flag, |
| scoped_refptr<PreemptionFlag> preempted_flag, |
| SyncPointManager* sync_point_manager) |
| - : enabled_(true), |
| - scheduled_(true), |
| + : scheduled_(true), |
| channel_(channel), |
| preemption_state_(IDLE), |
| max_preemption_time_( |
| @@ -105,17 +104,10 @@ GpuChannelMessageQueue::GpuChannelMessageQueue( |
| } |
| GpuChannelMessageQueue::~GpuChannelMessageQueue() { |
| - DCHECK(!enabled_); |
| DCHECK(channel_messages_.empty()); |
| } |
| -void GpuChannelMessageQueue::Disable() { |
| - { |
| - base::AutoLock auto_lock(channel_lock_); |
| - DCHECK(enabled_); |
| - enabled_ = false; |
| - } |
| - |
| +void GpuChannelMessageQueue::Destroy() { |
| // We guarantee that the queues will no longer be modified after enabled_ |
| // is set to false, it is now safe to modify the queue without the lock. |
| // All public facing modifying functions check enabled_ while all |
| @@ -130,18 +122,12 @@ void GpuChannelMessageQueue::Disable() { |
| channel_messages_.pop_front(); |
| } |
| - if (sync_point_order_data_) { |
| - sync_point_order_data_->Destroy(); |
| - sync_point_order_data_ = nullptr; |
| - } |
| + sync_point_order_data_->Destroy(); |
| + // Destroy timer on io thread. |
| io_task_runner_->PostTask( |
| - FROM_HERE, base::Bind(&GpuChannelMessageQueue::DisableIO, this)); |
| -} |
| - |
| -void GpuChannelMessageQueue::DisableIO() { |
| - DCHECK(io_thread_checker_.CalledOnValidThread()); |
| - timer_ = nullptr; |
| + FROM_HERE, base::Bind([](std::unique_ptr<base::OneShotTimer>) {}, |
| + base::Passed(&timer_))); |
| } |
| bool GpuChannelMessageQueue::IsScheduled() const { |
| @@ -151,12 +137,11 @@ bool GpuChannelMessageQueue::IsScheduled() const { |
| void GpuChannelMessageQueue::SetScheduled(bool scheduled) { |
| base::AutoLock lock(channel_lock_); |
| - DCHECK(enabled_); |
| if (scheduled_ == scheduled) |
| return; |
| scheduled_ = scheduled; |
| if (scheduled) |
| - channel_->PostHandleMessage(); |
| + channel_->PostHandleMessageOnQueue(); |
| if (preempting_flag_) { |
| io_task_runner_->PostTask( |
| FROM_HERE, |
| @@ -164,41 +149,28 @@ void GpuChannelMessageQueue::SetScheduled(bool scheduled) { |
| } |
| } |
| -bool GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) { |
| +void GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) { |
| base::AutoLock auto_lock(channel_lock_); |
| - if (enabled_) { |
| - if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || |
| - message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { |
| - channel_->PostHandleOutOfOrderMessage(message); |
| - return true; |
| - } |
| - |
| - uint32_t order_num = |
| - sync_point_order_data_->GenerateUnprocessedOrderNumber(); |
| - std::unique_ptr<GpuChannelMessage> msg( |
| - new GpuChannelMessage(message, order_num, base::TimeTicks::Now())); |
| + uint32_t order_num = sync_point_order_data_->GenerateUnprocessedOrderNumber(); |
| + std::unique_ptr<GpuChannelMessage> msg( |
| + new GpuChannelMessage(message, order_num, base::TimeTicks::Now())); |
| - if (channel_messages_.empty()) { |
| - DCHECK(scheduled_); |
| - channel_->PostHandleMessage(); |
| - } |
| - |
| - channel_messages_.push_back(std::move(msg)); |
| + if (channel_messages_.empty()) { |
| + DCHECK(scheduled_); |
| + channel_->PostHandleMessageOnQueue(); |
| + } |
| - if (preempting_flag_) |
| - UpdatePreemptionStateHelper(); |
| + channel_messages_.push_back(std::move(msg)); |
| - return true; |
| - } |
| - return false; |
| + if (preempting_flag_) |
| + UpdatePreemptionStateHelper(); |
| } |
| const GpuChannelMessage* GpuChannelMessageQueue::BeginMessageProcessing() { |
| base::AutoLock auto_lock(channel_lock_); |
| - DCHECK(enabled_); |
| // If we have been preempted by another channel, just post a task to wake up. |
| if (preempted_flag_ && preempted_flag_->IsSet()) { |
| - channel_->PostHandleMessage(); |
| + channel_->PostHandleMessageOnQueue(); |
| return nullptr; |
| } |
| if (channel_messages_.empty()) |
| @@ -214,7 +186,7 @@ void GpuChannelMessageQueue::PauseMessageProcessing() { |
| // If we have been preempted by another channel, just post a task to wake up. |
| if (scheduled_) |
| - channel_->PostHandleMessage(); |
| + channel_->PostHandleMessageOnQueue(); |
| sync_point_order_data_->PauseProcessingOrderNumber( |
| channel_messages_.front()->order_number); |
| @@ -230,7 +202,7 @@ void GpuChannelMessageQueue::FinishMessageProcessing() { |
| channel_messages_.pop_front(); |
| if (!channel_messages_.empty()) |
| - channel_->PostHandleMessage(); |
| + channel_->PostHandleMessageOnQueue(); |
| if (preempting_flag_) { |
| io_task_runner_->PostTask( |
| @@ -430,69 +402,79 @@ void GpuChannelMessageQueue::TransitionToWouldPreemptDescheduled() { |
| TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); |
| } |
| -GpuChannelMessageFilter::GpuChannelMessageFilter( |
| - scoped_refptr<GpuChannelMessageQueue> message_queue) |
| - : message_queue_(std::move(message_queue)), |
| - channel_(nullptr), |
| - peer_pid_(base::kNullProcessId) {} |
| +GpuChannelMessageFilter::GpuChannelMessageFilter(GpuChannel* gpu_channel) |
| + : gpu_channel_(gpu_channel) {} |
| -GpuChannelMessageFilter::~GpuChannelMessageFilter() {} |
| +GpuChannelMessageFilter::~GpuChannelMessageFilter() { |
| + DCHECK(!gpu_channel_); |
| +} |
| + |
| +void GpuChannelMessageFilter::Destroy() { |
| + base::AutoLock auto_lock(lock_); |
| + gpu_channel_ = nullptr; |
| +} |
| void GpuChannelMessageFilter::OnFilterAdded(IPC::Channel* channel) { |
| - DCHECK(!channel_); |
| - channel_ = channel; |
| - for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| - filter->OnFilterAdded(channel_); |
| - } |
| + base::AutoLock auto_lock(lock_); |
| + DCHECK(!ipc_channel_); |
| + ipc_channel_ = channel; |
| + for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) |
| + filter->OnFilterAdded(ipc_channel_); |
| } |
| void GpuChannelMessageFilter::OnFilterRemoved() { |
| - for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| + base::AutoLock auto_lock(lock_); |
| + for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) |
| filter->OnFilterRemoved(); |
| - } |
| - channel_ = nullptr; |
| + ipc_channel_ = nullptr; |
| peer_pid_ = base::kNullProcessId; |
| } |
| void GpuChannelMessageFilter::OnChannelConnected(int32_t peer_pid) { |
| + base::AutoLock auto_lock(lock_); |
| DCHECK(peer_pid_ == base::kNullProcessId); |
| peer_pid_ = peer_pid; |
| - for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| + for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) |
| filter->OnChannelConnected(peer_pid); |
| - } |
| } |
| void GpuChannelMessageFilter::OnChannelError() { |
| - for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| + base::AutoLock auto_lock(lock_); |
| + for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) |
| filter->OnChannelError(); |
| - } |
| } |
| void GpuChannelMessageFilter::OnChannelClosing() { |
| - for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| + base::AutoLock auto_lock(lock_); |
| + for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) |
| filter->OnChannelClosing(); |
| - } |
| } |
| void GpuChannelMessageFilter::AddChannelFilter( |
| scoped_refptr<IPC::MessageFilter> filter) { |
| + base::AutoLock auto_lock(lock_); |
| channel_filters_.push_back(filter); |
| - if (channel_) |
| - filter->OnFilterAdded(channel_); |
| + if (ipc_channel_) |
| + filter->OnFilterAdded(ipc_channel_); |
| if (peer_pid_ != base::kNullProcessId) |
| filter->OnChannelConnected(peer_pid_); |
| } |
| void GpuChannelMessageFilter::RemoveChannelFilter( |
| scoped_refptr<IPC::MessageFilter> filter) { |
| - if (channel_) |
| + base::AutoLock auto_lock(lock_); |
| + if (ipc_channel_) |
| filter->OnFilterRemoved(); |
| - channel_filters_.erase( |
| - std::find(channel_filters_.begin(), channel_filters_.end(), filter)); |
| + base::Erase(channel_filters_, filter); |
| } |
| bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
| - DCHECK(channel_); |
| + base::AutoLock auto_lock(lock_); |
| + |
| + DCHECK(ipc_channel_); |
| + |
| + if (!gpu_channel_) |
| + return MessageErrorHandler(message, "Channel destroyed"); |
| if (message.should_unblock() || message.is_reply()) |
| return MessageErrorHandler(message, "Unexpected message type"); |
| @@ -508,14 +490,12 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
| return true; |
| } |
| - if (!message_queue_->PushBackMessage(message)) |
| - return MessageErrorHandler(message, "Channel destroyed"); |
| - |
| + gpu_channel_->HandleMessageOnIOThread(message); |
| return true; |
| } |
| bool GpuChannelMessageFilter::Send(IPC::Message* message) { |
| - return channel_->Send(message); |
| + return ipc_channel_->Send(message); |
| } |
| bool GpuChannelMessageFilter::MessageErrorHandler(const IPC::Message& message, |
| @@ -561,8 +541,7 @@ GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, |
| mailbox_manager_(mailbox), |
| watchdog_(watchdog), |
| allow_view_command_buffers_(allow_view_command_buffers), |
| - allow_real_time_streams_(allow_real_time_streams), |
| - weak_factory_(this) { |
| + allow_real_time_streams_(allow_real_time_streams) { |
| DCHECK(gpu_channel_manager); |
| DCHECK(client_id); |
| @@ -570,14 +549,16 @@ GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, |
| GpuChannelMessageQueue::Create(this, io_task_runner, preempting_flag, |
| preempted_flag, sync_point_manager); |
| - filter_ = new GpuChannelMessageFilter(message_queue_); |
| + filter_ = new GpuChannelMessageFilter(this); |
| } |
| GpuChannel::~GpuChannel() { |
| // Clear stubs first because of dependencies. |
| stubs_.clear(); |
| - message_queue_->Disable(); |
| + message_queue_->Destroy(); |
|
piman
2017/03/24 04:31:40
This has a race, because the message queue gets de
sunnyps
2017/03/24 21:27:22
That's a mistake, I meant to do filter_->Destroy f
|
| + |
| + filter_->Destroy(); |
| if (preempting_flag_.get()) |
| preempting_flag_->Reset(); |
| @@ -601,10 +582,6 @@ void GpuChannel::SetUnhandledMessageListener(IPC::Listener* listener) { |
| unhandled_message_listener_ = listener; |
| } |
| -base::WeakPtr<GpuChannel> GpuChannel::AsWeakPtr() { |
| - return weak_factory_.GetWeakPtr(); |
| -} |
| - |
| base::ProcessId GpuChannel::GetClientPID() const { |
| DCHECK_NE(peer_pid_, base::kNullProcessId); |
| return peer_pid_; |
| @@ -692,18 +669,30 @@ bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { |
| return handled; |
| } |
| -void GpuChannel::PostHandleMessage() { |
| - task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage, |
| - weak_factory_.GetWeakPtr())); |
| +void GpuChannel::PostHandleMessageOnQueue() { |
| + task_runner_->PostTask( |
| + FROM_HERE, base::Bind(&GpuChannel::HandleMessageOnQueue, AsWeakPtr())); |
| } |
| void GpuChannel::PostHandleOutOfOrderMessage(const IPC::Message& msg) { |
| - task_runner_->PostTask(FROM_HERE, |
| - base::Bind(&GpuChannel::HandleOutOfOrderMessage, |
| - weak_factory_.GetWeakPtr(), msg)); |
| + task_runner_->PostTask( |
| + FROM_HERE, |
| + base::Bind(&GpuChannel::HandleOutOfOrderMessage, AsWeakPtr(), msg)); |
| } |
| -void GpuChannel::HandleMessage() { |
| +void GpuChannel::HandleMessageOnIOThread(const IPC::Message& msg) { |
| + if (msg.routing_id() == MSG_ROUTING_CONTROL || |
|
sunnyps
2017/03/24 21:27:22
Actually control messages should not be PostTask'd
|
| + msg.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || |
| + msg.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { |
| + PostHandleOutOfOrderMessage(msg); |
| + } else { |
| + // TODO(sunnyps): Lookup sequence id and post task to that sequence in the |
| + // scheduler. |
| + message_queue_->PushBackMessage(msg); |
| + } |
| +} |
| + |
| +void GpuChannel::HandleMessageOnQueue() { |
| const GpuChannelMessage* channel_msg = |
| message_queue_->BeginMessageProcessing(); |
| if (!channel_msg) |
| @@ -895,15 +884,11 @@ void GpuChannel::CacheShader(const std::string& key, |
| } |
| void GpuChannel::AddFilter(IPC::MessageFilter* filter) { |
| - io_task_runner_->PostTask( |
| - FROM_HERE, base::Bind(&GpuChannelMessageFilter::AddChannelFilter, filter_, |
| - make_scoped_refptr(filter))); |
| + filter_->AddChannelFilter(filter); |
|
piman
2017/03/24 04:31:40
I don't think it's generally safe to call filters
sunnyps
2017/03/24 21:27:22
Done.
|
| } |
| void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) { |
| - io_task_runner_->PostTask( |
| - FROM_HERE, base::Bind(&GpuChannelMessageFilter::RemoveChannelFilter, |
| - filter_, make_scoped_refptr(filter))); |
| + filter_->RemoveChannelFilter(filter); |
|
piman
2017/03/24 04:31:40
ditto
sunnyps
2017/03/24 21:27:22
Done.
|
| } |
| uint64_t GpuChannel::GetMemoryUsage() { |