Chromium Code Reviews| Index: content/common/gpu/gpu_channel.cc |
| diff --git a/content/common/gpu/gpu_channel.cc b/content/common/gpu/gpu_channel.cc |
| index c16db443bebd86e23724f6d10408f224840c5524..8f1597e5397731743eddd8c3bc0ef3646ce243a0 100644 |
| --- a/content/common/gpu/gpu_channel.cc |
| +++ b/content/common/gpu/gpu_channel.cc |
| @@ -72,210 +72,173 @@ const uint32_t kOutOfOrderNumber = static_cast<uint32_t>(-1); |
| } // anonymous namespace |
| -struct GpuChannelMessage { |
| - uint32_t order_number; |
| - base::TimeTicks time_received; |
| - IPC::Message message; |
| - |
| - // TODO(dyen): Temporary sync point data, remove once new sync point lands. |
| - bool retire_sync_point; |
| - uint32 sync_point_number; |
| - |
| - GpuChannelMessage(uint32_t order_num, const IPC::Message& msg) |
| - : order_number(order_num), |
| - time_received(base::TimeTicks::Now()), |
| - message(msg), |
| - retire_sync_point(false), |
| - sync_point_number(0) {} |
| -}; |
| - |
| -class GpuChannelMessageQueue |
| - : public base::RefCountedThreadSafe<GpuChannelMessageQueue> { |
| - public: |
| - static scoped_refptr<GpuChannelMessageQueue> Create( |
| - base::WeakPtr<GpuChannel> gpu_channel, |
| - scoped_refptr<base::SingleThreadTaskRunner> task_runner) { |
| - return new GpuChannelMessageQueue(gpu_channel, task_runner); |
| - } |
| +scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create( |
| + const base::WeakPtr<GpuChannel>& gpu_channel, |
| + base::SingleThreadTaskRunner* task_runner) { |
| + return new GpuChannelMessageQueue(gpu_channel, task_runner); |
| +} |
| - uint32_t GetUnprocessedOrderNum() { |
| - base::AutoLock auto_lock(channel_messages_lock_); |
| - return unprocessed_order_num_; |
| - } |
| +GpuChannelMessageQueue::GpuChannelMessageQueue( |
| + const base::WeakPtr<GpuChannel>& gpu_channel, |
| + base::SingleThreadTaskRunner* task_runner) |
| + : enabled_(true), |
| + unprocessed_order_num_(0), |
| + processed_order_num_(0), |
| + gpu_channel_(gpu_channel), |
| + task_runner_(task_runner) {} |
| - void PushBackMessage(uint32_t order_number, const IPC::Message& message) { |
| - base::AutoLock auto_lock(channel_messages_lock_); |
| - if (enabled_) { |
| - PushMessageHelper(order_number, |
| - new GpuChannelMessage(order_number, message)); |
| - } |
| - } |
| +GpuChannelMessageQueue::~GpuChannelMessageQueue() { |
| + DCHECK(channel_messages_.empty()); |
| + DCHECK(out_of_order_messages_.empty()); |
| +} |
| - void PushOutOfOrderMessage(const IPC::Message& message) { |
| - // These are pushed out of order so should not have any order messages. |
| - base::AutoLock auto_lock(channel_messages_lock_); |
| - if (enabled_) { |
| - PushOutOfOrderHelper(new GpuChannelMessage(kOutOfOrderNumber, message)); |
| - } |
| - } |
| +uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const { |
| + base::AutoLock auto_lock(channel_messages_lock_); |
| + return unprocessed_order_num_; |
| +} |
| - bool GenerateSyncPointMessage(gpu::SyncPointManager* sync_point_manager, |
| - uint32_t order_number, |
| - const IPC::Message& message, |
| - bool retire_sync_point, |
| - uint32_t* sync_point_number) { |
| - DCHECK(message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID); |
| - base::AutoLock auto_lock(channel_messages_lock_); |
| - if (enabled_) { |
| - const uint32 sync_point = sync_point_manager->GenerateSyncPoint(); |
| +void GpuChannelMessageQueue::PushBackMessage(uint32_t order_number, |
| + const IPC::Message& message) { |
| + base::AutoLock auto_lock(channel_messages_lock_); |
| + if (enabled_) |
|
piman
2015/09/11 00:38:33
nit: needs {} per style.
sunnyps
2015/09/11 01:53:22
Done.
|
| + PushMessageHelper( |
| + make_scoped_ptr(new GpuChannelMessage(order_number, message))); |
| +} |
| - GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); |
| - msg->retire_sync_point = retire_sync_point; |
| - msg->sync_point_number = sync_point; |
| +bool GpuChannelMessageQueue::GenerateSyncPointMessage( |
| + gpu::SyncPointManager* sync_point_manager, |
| + uint32_t order_number, |
| + const IPC::Message& message, |
| + bool retire_sync_point, |
| + uint32_t* sync_point) { |
| + DCHECK_EQ(message.type(), GpuCommandBufferMsg_InsertSyncPoint::ID); |
| + DCHECK(sync_point); |
| + base::AutoLock auto_lock(channel_messages_lock_); |
| + if (enabled_) { |
| + *sync_point = sync_point_manager->GenerateSyncPoint(); |
| - *sync_point_number = sync_point; |
| - PushMessageHelper(order_number, msg); |
| - return true; |
| - } |
| - return false; |
| - } |
| + GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); |
| + msg->retire_sync_point = retire_sync_point; |
| + msg->sync_point = *sync_point; |
| - bool HasQueuedMessages() { |
| - base::AutoLock auto_lock(channel_messages_lock_); |
| - return HasQueuedMessagesLocked(); |
| + PushMessageHelper(make_scoped_ptr(msg)); |
| + return true; |
| } |
| + return false; |
| +} |
| - base::TimeTicks GetNextMessageTimeTick() { |
| - base::AutoLock auto_lock(channel_messages_lock_); |
| +bool GpuChannelMessageQueue::HasQueuedMessages() const { |
| + base::AutoLock auto_lock(channel_messages_lock_); |
| + return HasQueuedMessagesHelper(); |
| +} |
| - base::TimeTicks next_message_tick; |
| - if (!channel_messages_.empty()) |
| - next_message_tick = channel_messages_.front()->time_received; |
| +base::TimeTicks GpuChannelMessageQueue::GetNextMessageTimeTick() const { |
| + base::AutoLock auto_lock(channel_messages_lock_); |
| - base::TimeTicks next_out_of_order_tick; |
| - if (!out_of_order_messages_.empty()) |
| - next_out_of_order_tick = out_of_order_messages_.front()->time_received; |
| + base::TimeTicks next_message_tick; |
| + if (!channel_messages_.empty()) |
| + next_message_tick = channel_messages_.front()->time_received; |
| - if (next_message_tick.is_null()) |
| - return next_out_of_order_tick; |
| - else if (next_out_of_order_tick.is_null()) |
| - return next_message_tick; |
| - else |
| - return std::min(next_message_tick, next_out_of_order_tick); |
| - } |
| + base::TimeTicks next_out_of_order_tick; |
| + if (!out_of_order_messages_.empty()) |
| + next_out_of_order_tick = out_of_order_messages_.front()->time_received; |
| - protected: |
| - virtual ~GpuChannelMessageQueue() { |
| - DCHECK(channel_messages_.empty()); |
| - DCHECK(out_of_order_messages_.empty()); |
| - } |
| + if (next_message_tick.is_null()) |
| + return next_out_of_order_tick; |
| + else if (next_out_of_order_tick.is_null()) |
| + return next_message_tick; |
| + else |
| + return std::min(next_message_tick, next_out_of_order_tick); |
| +} |
| - private: |
| - friend class GpuChannel; |
| - friend class base::RefCountedThreadSafe<GpuChannelMessageQueue>; |
| - |
| - GpuChannelMessageQueue( |
| - base::WeakPtr<GpuChannel> gpu_channel, |
| - scoped_refptr<base::SingleThreadTaskRunner> task_runner) |
| - : enabled_(true), |
| - unprocessed_order_num_(0), |
| - gpu_channel_(gpu_channel), |
| - task_runner_(task_runner) {} |
| - |
| - void DeleteAndDisableMessages(GpuChannelManager* gpu_channel_manager) { |
| - { |
| - base::AutoLock auto_lock(channel_messages_lock_); |
| - DCHECK(enabled_); |
| - enabled_ = false; |
| - } |
| +GpuChannelMessage* GpuChannelMessageQueue::GetNextMessage() const { |
| + base::AutoLock auto_lock(channel_messages_lock_); |
| + if (!out_of_order_messages_.empty()) { |
| + return out_of_order_messages_.front(); |
| + } else if (!channel_messages_.empty()) { |
| + return channel_messages_.front(); |
| + } else { |
| + return nullptr; |
| + } |
| +} |
| - // We guarantee that the queues will no longer be modified after enabled_ |
| - // is set to false, it is now safe to modify the queue without the lock. |
| - // All public facing modifying functions check enabled_ while all |
| - // private modifying functions DCHECK(enabled_) to enforce this. |
| - while (!channel_messages_.empty()) { |
| - GpuChannelMessage* msg = channel_messages_.front(); |
| - // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and |
| - // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check |
| - // if we have a sync point number here. |
| - if (msg->sync_point_number) { |
| - gpu_channel_manager->sync_point_manager()->RetireSyncPoint( |
| - msg->sync_point_number); |
| - } |
| - delete msg; |
| - channel_messages_.pop_front(); |
| - } |
| - STLDeleteElements(&out_of_order_messages_); |
| +bool GpuChannelMessageQueue::MessageProcessed(uint32_t order_number) { |
| + base::AutoLock auto_lock(channel_messages_lock_); |
| + if (order_number != kOutOfOrderNumber) { |
| + DCHECK(!channel_messages_.empty()); |
| + GpuChannelMessage* msg = channel_messages_.front(); |
| + DCHECK_EQ(order_number, msg->order_number); |
| + processed_order_num_ = order_number; |
| + channel_messages_.pop_front(); |
| + delete msg; |
| + } else { |
| + DCHECK(!out_of_order_messages_.empty()); |
| + GpuChannelMessage* msg = out_of_order_messages_.front(); |
| + out_of_order_messages_.pop_front(); |
| + delete msg; |
| } |
| + return HasQueuedMessagesHelper(); |
| +} |
| - void PushUnfinishedMessage(uint32_t order_number, |
| - const IPC::Message& message) { |
| - // This is pushed only if it was unfinished, so order number is kept. |
| - GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); |
| +void GpuChannelMessageQueue::DeleteAndDisableMessages( |
| + GpuChannelManager* gpu_channel_manager) { |
| + { |
| base::AutoLock auto_lock(channel_messages_lock_); |
| DCHECK(enabled_); |
| - const bool had_messages = HasQueuedMessagesLocked(); |
| - if (order_number == kOutOfOrderNumber) |
| - out_of_order_messages_.push_front(msg); |
| - else |
| - channel_messages_.push_front(msg); |
| - |
| - if (!had_messages) |
| - ScheduleHandleMessage(); |
| - } |
| - |
| - void ScheduleHandleMessage() { |
| - task_runner_->PostTask( |
| - FROM_HERE, base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); |
| + enabled_ = false; |
| } |
| - void PushMessageHelper(uint32_t order_number, GpuChannelMessage* msg) { |
| - channel_messages_lock_.AssertAcquired(); |
| - DCHECK(enabled_); |
| - unprocessed_order_num_ = order_number; |
| - const bool had_messages = HasQueuedMessagesLocked(); |
| - channel_messages_.push_back(msg); |
| - if (!had_messages) |
| - ScheduleHandleMessage(); |
| + // We guarantee that the queues will no longer be modified after enabled_ |
| + // is set to false, it is now safe to modify the queue without the lock. |
| + // All public facing modifying functions check enabled_ while all |
| + // private modifying functions DCHECK(enabled_) to enforce this. |
| + while (!channel_messages_.empty()) { |
| + GpuChannelMessage* msg = channel_messages_.front(); |
| + // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and |
| + // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check |
| + // if we have a sync point number here. |
| + if (msg->sync_point) { |
| + gpu_channel_manager->sync_point_manager()->RetireSyncPoint( |
| + msg->sync_point); |
| + } |
| + delete msg; |
| + channel_messages_.pop_front(); |
| } |
| + STLDeleteElements(&out_of_order_messages_); |
| +} |
| - void PushOutOfOrderHelper(GpuChannelMessage* msg) { |
| - channel_messages_lock_.AssertAcquired(); |
| - DCHECK(enabled_); |
| - const bool had_messages = HasQueuedMessagesLocked(); |
| - out_of_order_messages_.push_back(msg); |
| - if (!had_messages) |
| - ScheduleHandleMessage(); |
| - } |
| +void GpuChannelMessageQueue::ScheduleHandleMessage() { |
| + task_runner_->PostTask(FROM_HERE, |
| + base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); |
| +} |
| - bool HasQueuedMessagesLocked() { |
| - channel_messages_lock_.AssertAcquired(); |
| - return !channel_messages_.empty() || !out_of_order_messages_.empty(); |
| +void GpuChannelMessageQueue::PushMessageHelper( |
| + scoped_ptr<GpuChannelMessage> msg) { |
| + channel_messages_lock_.AssertAcquired(); |
| + DCHECK(enabled_); |
| + bool had_messages = HasQueuedMessagesHelper(); |
| + if (msg->order_number != kOutOfOrderNumber) { |
| + unprocessed_order_num_ = msg->order_number; |
| + channel_messages_.push_back(msg.release()); |
| + } else { |
| + out_of_order_messages_.push_back(msg.release()); |
| } |
| + if (!had_messages) |
| + ScheduleHandleMessage(); |
| +} |
| - bool enabled_; |
| - |
| - // Highest IPC order number seen, set when queued on the IO thread. |
| - uint32_t unprocessed_order_num_; |
| - std::deque<GpuChannelMessage*> channel_messages_; |
| - std::deque<GpuChannelMessage*> out_of_order_messages_; |
| - |
| - // This lock protects enabled_, unprocessed_order_num_, and both deques. |
| - base::Lock channel_messages_lock_; |
| - |
| - base::WeakPtr<GpuChannel> gpu_channel_; |
| - scoped_refptr<base::SingleThreadTaskRunner> task_runner_; |
| - |
| - DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue); |
| -}; |
| +bool GpuChannelMessageQueue::HasQueuedMessagesHelper() const { |
| + channel_messages_lock_.AssertAcquired(); |
| + return !channel_messages_.empty() || !out_of_order_messages_.empty(); |
| +} |
| // Begin order numbers at 1 so 0 can mean no orders. |
| uint32_t GpuChannelMessageFilter::global_order_counter_ = 1; |
| GpuChannelMessageFilter::GpuChannelMessageFilter( |
| - scoped_refptr<GpuChannelMessageQueue> message_queue, |
| + GpuChannelMessageQueue* message_queue, |
| gpu::SyncPointManager* sync_point_manager, |
| - scoped_refptr<base::SingleThreadTaskRunner> task_runner, |
| + base::SingleThreadTaskRunner* task_runner, |
| bool future_sync_points) |
| : preemption_state_(IDLE), |
| message_queue_(message_queue), |
| @@ -362,15 +325,16 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
| } |
| if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
| - base::Tuple<bool> retire; |
| + base::Tuple<bool> params; |
| IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, |
| - &retire)) { |
| + ¶ms)) { |
| reply->set_reply_error(); |
| Send(reply); |
| return true; |
| } |
| - if (!future_sync_points_ && !base::get<0>(retire)) { |
| + bool retire_sync_point = base::get<0>(params); |
| + if (!future_sync_points_ && !retire_sync_point) { |
| LOG(ERROR) << "Untrusted contexts can't create future sync points"; |
| reply->set_reply_error(); |
| Send(reply); |
| @@ -381,7 +345,7 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
| // message queue could be disabled from the main thread during generation. |
| uint32_t sync_point = 0u; |
| if (!message_queue_->GenerateSyncPointMessage( |
| - sync_point_manager_, order_number, message, base::get<0>(retire), |
| + sync_point_manager_, order_number, message, retire_sync_point, |
| &sync_point)) { |
| LOG(ERROR) << "GpuChannel has been destroyed."; |
| reply->set_reply_error(); |
| @@ -401,7 +365,7 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
| message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { |
| // Move Wait commands to the head of the queue, so the renderer |
| // doesn't have to wait any longer than necessary. |
| - message_queue_->PushOutOfOrderMessage(message); |
| + message_queue_->PushBackMessage(kOutOfOrderNumber, message); |
| } else { |
| message_queue_->PushBackMessage(order_number, message); |
| } |
| @@ -618,8 +582,6 @@ GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, |
| pending_valuebuffer_state_(new gpu::ValueStateMap), |
| watchdog_(watchdog), |
| software_(software), |
| - current_order_num_(0), |
| - processed_order_num_(0), |
| num_stubs_descheduled_(0), |
| allow_future_sync_points_(allow_future_sync_points), |
| allow_real_time_streams_(allow_real_time_streams), |
| @@ -631,8 +593,8 @@ GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, |
| GpuChannelMessageQueue::Create(weak_factory_.GetWeakPtr(), task_runner); |
| filter_ = new GpuChannelMessageFilter( |
| - message_queue_, gpu_channel_manager_->sync_point_manager(), task_runner_, |
| - allow_future_sync_points_); |
| + message_queue_.get(), gpu_channel_manager_->sync_point_manager(), |
| + task_runner_.get(), allow_future_sync_points_); |
| subscription_ref_set_->AddObserver(this); |
| } |
| @@ -676,6 +638,14 @@ base::ProcessId GpuChannel::GetClientPID() const { |
| return channel_->GetPeerPID(); |
| } |
| +uint32_t GpuChannel::GetProcessedOrderNum() const { |
| + return message_queue_->processed_order_num(); |
| +} |
| + |
| +uint32_t GpuChannel::GetUnprocessedOrderNum() const { |
| + return message_queue_->GetUnprocessedOrderNum(); |
| +} |
| + |
| bool GpuChannel::OnMessageReceived(const IPC::Message& message) { |
| // All messages should be pushed to channel_messages_ and handled separately. |
| NOTREACHED(); |
| @@ -716,7 +686,7 @@ void GpuChannel::StubSchedulingChanged(bool scheduled) { |
| bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; |
| if (scheduled) { |
| num_stubs_descheduled_--; |
| - message_queue_->ScheduleHandleMessage(); |
| + ScheduleHandleMessage(); |
| } else { |
| num_stubs_descheduled_++; |
| } |
| @@ -864,117 +834,75 @@ bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { |
| } |
| void GpuChannel::HandleMessage() { |
| - GpuChannelMessage* m = nullptr; |
| - GpuCommandBufferStub* stub = nullptr; |
| - bool has_more_messages = false; |
| - { |
| - base::AutoLock auto_lock(message_queue_->channel_messages_lock_); |
| - if (!message_queue_->out_of_order_messages_.empty()) { |
| - m = message_queue_->out_of_order_messages_.front(); |
| - DCHECK(m->order_number == kOutOfOrderNumber); |
| - message_queue_->out_of_order_messages_.pop_front(); |
| - } else if (!message_queue_->channel_messages_.empty()) { |
| - m = message_queue_->channel_messages_.front(); |
| - DCHECK(m->order_number != kOutOfOrderNumber); |
| - message_queue_->channel_messages_.pop_front(); |
| - } else { |
| - // No messages to process |
| - return; |
| - } |
| + GpuChannelMessage* m = message_queue_->GetNextMessage(); |
| - has_more_messages = message_queue_->HasQueuedMessagesLocked(); |
| - } |
| + // TODO(sunnyps): This could be a DCHECK maybe? |
| + if (!m) |
| + return; |
| - bool retry_message = false; |
| - stub = stubs_.get(m->message.routing_id()); |
| - if (stub) { |
| - if (!stub->IsScheduled()) { |
| - retry_message = true; |
| - } |
| - if (stub->IsPreempted()) { |
| - retry_message = true; |
| - message_queue_->ScheduleHandleMessage(); |
| - } |
| - } |
| + uint32_t order_number = m->order_number; |
| + IPC::Message& message = m->message; |
| + int32_t routing_id = message.routing_id(); |
| + GpuCommandBufferStub* stub = stubs_.get(routing_id); |
| - if (retry_message) { |
| - base::AutoLock auto_lock(message_queue_->channel_messages_lock_); |
| - if (m->order_number == kOutOfOrderNumber) |
| - message_queue_->out_of_order_messages_.push_front(m); |
| - else |
| - message_queue_->channel_messages_.push_front(m); |
| - return; |
| - } else if (has_more_messages) { |
| - message_queue_->ScheduleHandleMessage(); |
| - } |
| + DVLOG(1) << "received message @" << &message << " on channel @" << this |
| + << " with type " << message.type(); |
|
piman
2015/09/11 00:38:33
Are you saying that we have a strong guarantee tha
sunnyps
2015/09/11 01:53:22
I think this is true most of the time - the messag
piman
2015/09/11 02:30:07
What about the example I mentioned?
If on a previo
sunnyps
2015/09/11 06:21:59
I misunderstood your original comment thinking it
|
| + |
| + bool handled = false; |
| - scoped_ptr<GpuChannelMessage> scoped_message(m); |
| - const uint32_t order_number = m->order_number; |
| - const int32_t routing_id = m->message.routing_id(); |
| - |
| - // TODO(dyen): Temporary handling of old sync points. |
| - // This must ensure that the sync point will be retired. Normally we'll |
| - // find the stub based on the routing ID, and associate the sync point |
| - // with it, but if that fails for any reason (channel or stub already |
| - // deleted, invalid routing id), we need to retire the sync point |
| - // immediately. |
| - if (m->message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
| - const bool retire = m->retire_sync_point; |
| - const uint32_t sync_point = m->sync_point_number; |
| + if (routing_id == MSG_ROUTING_CONTROL) { |
| + handled = OnControlMessageReceived(message); |
| + } else if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
| + // TODO(dyen): Temporary handling of old sync points. |
| + // This must ensure that the sync point will be retired. Normally we'll |
| + // find the stub based on the routing ID, and associate the sync point |
| + // with it, but if that fails for any reason (channel or stub already |
| + // deleted, invalid routing id), we need to retire the sync point |
| + // immediately. |
| if (stub) { |
| - stub->AddSyncPoint(sync_point); |
| - if (retire) { |
| - m->message = |
| - GpuCommandBufferMsg_RetireSyncPoint(routing_id, sync_point); |
| - } |
| + stub->AddSyncPoint(m->sync_point, m->retire_sync_point); |
| } else { |
| - current_order_num_ = order_number; |
| - gpu_channel_manager_->sync_point_manager()->RetireSyncPoint(sync_point); |
| - MessageProcessed(order_number); |
| - return; |
| + gpu_channel_manager_->sync_point_manager()->RetireSyncPoint( |
| + m->sync_point); |
| } |
| + handled = true; |
| + } else { |
| + handled = router_.RouteMessage(message); |
| } |
| - IPC::Message* message = &m->message; |
| - bool message_processed = true; |
| - |
| - DVLOG(1) << "received message @" << message << " on channel @" << this |
| - << " with type " << message->type(); |
| + // Respond to sync messages even if router failed to route. |
| + if (!handled && message.is_sync()) { |
| + IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| + reply->set_reply_error(); |
| + Send(reply); |
| + handled = true; |
| + } |
| - if (order_number != kOutOfOrderNumber) { |
| - // Make sure this is a valid unprocessed order number. |
| - DCHECK(order_number <= GetUnprocessedOrderNum() && |
| - order_number >= GetProcessedOrderNum()); |
| + // A command buffer may be descheduled or preempted but only in the middle of |
| + // a flush. In this case we should not pop the message from the queue. |
| + if (stub && stub->HasUnprocessedCommands()) { |
| + DCHECK(message.type() == GpuCommandBufferMsg_AsyncFlush::ID); |
| + // If the stub was preempted then we need to schedule a wakeup otherwise |
| + // some other event will wake us up e.g. sync point completion. |
| + if (stub->IsPreempted()) |
| + ScheduleHandleMessage(); |
| + return; |
| + } |
| - current_order_num_ = order_number; |
| + if (message_queue_->MessageProcessed(order_number)) { |
| + ScheduleHandleMessage(); |
| } |
| - bool result = false; |
| - if (routing_id == MSG_ROUTING_CONTROL) |
| - result = OnControlMessageReceived(*message); |
| - else |
| - result = router_.RouteMessage(*message); |
| - if (!result) { |
| - // Respond to sync messages even if router failed to route. |
| - if (message->is_sync()) { |
| - IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); |
| - reply->set_reply_error(); |
| - Send(reply); |
| - } |
| - } else { |
| - // If the command buffer becomes unscheduled as a result of handling the |
| - // message but still has more commands to process, synthesize an IPC |
| - // message to flush that command buffer. |
| - if (stub) { |
| - if (stub->HasUnprocessedCommands()) { |
| - message_queue_->PushUnfinishedMessage( |
| - order_number, GpuCommandBufferMsg_Rescheduled(stub->route_id())); |
| - message_processed = false; |
| - } |
| - } |
| + if (preempting_flag_) { |
| + io_task_runner_->PostTask( |
| + FROM_HERE, |
| + base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_)); |
| } |
| - if (message_processed) |
| - MessageProcessed(order_number); |
| +} |
| + |
| +void GpuChannel::ScheduleHandleMessage() { |
| + task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage, |
| + weak_factory_.GetWeakPtr())); |
| } |
| void GpuChannel::OnCreateOffscreenCommandBuffer( |
| @@ -1078,19 +1006,6 @@ void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { |
| jpeg_decoder_->AddClient(route_id, reply_msg); |
| } |
| -void GpuChannel::MessageProcessed(uint32_t order_number) { |
| - if (order_number != kOutOfOrderNumber) { |
| - DCHECK(current_order_num_ == order_number); |
| - DCHECK(processed_order_num_ < order_number); |
| - processed_order_num_ = order_number; |
| - } |
| - if (preempting_flag_.get()) { |
| - io_task_runner_->PostTask( |
| - FROM_HERE, |
| - base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_)); |
| - } |
| -} |
| - |
| void GpuChannel::CacheShader(const std::string& key, |
| const std::string& shader) { |
| gpu_channel_manager_->Send( |
| @@ -1160,8 +1075,4 @@ void GpuChannel::HandleUpdateValueState( |
| pending_valuebuffer_state_->UpdateState(target, state); |
| } |
| -uint32_t GpuChannel::GetUnprocessedOrderNum() const { |
| - return message_queue_->GetUnprocessedOrderNum(); |
| -} |
| - |
| } // namespace content |