Index: content/common/gpu/gpu_channel.cc |
diff --git a/content/common/gpu/gpu_channel.cc b/content/common/gpu/gpu_channel.cc |
index c16db443bebd86e23724f6d10408f224840c5524..a4b277c2dec77c5c8b3d9a7fe990b50b0a4359b0 100644 |
--- a/content/common/gpu/gpu_channel.cc |
+++ b/content/common/gpu/gpu_channel.cc |
@@ -72,202 +72,163 @@ const uint32_t kOutOfOrderNumber = static_cast<uint32_t>(-1); |
} // anonymous namespace |
-struct GpuChannelMessage { |
- uint32_t order_number; |
- base::TimeTicks time_received; |
- IPC::Message message; |
- |
- // TODO(dyen): Temporary sync point data, remove once new sync point lands. |
- bool retire_sync_point; |
- uint32 sync_point_number; |
- |
- GpuChannelMessage(uint32_t order_num, const IPC::Message& msg) |
- : order_number(order_num), |
- time_received(base::TimeTicks::Now()), |
- message(msg), |
- retire_sync_point(false), |
- sync_point_number(0) {} |
-}; |
- |
-class GpuChannelMessageQueue |
- : public base::RefCountedThreadSafe<GpuChannelMessageQueue> { |
- public: |
- static scoped_refptr<GpuChannelMessageQueue> Create( |
- base::WeakPtr<GpuChannel> gpu_channel, |
- scoped_refptr<base::SingleThreadTaskRunner> task_runner) { |
- return new GpuChannelMessageQueue(gpu_channel, task_runner); |
- } |
+scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create( |
+ base::WeakPtr<GpuChannel> gpu_channel, |
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) { |
+ return new GpuChannelMessageQueue(gpu_channel, task_runner); |
+} |
- uint32_t GetUnprocessedOrderNum() { |
- base::AutoLock auto_lock(channel_messages_lock_); |
- return unprocessed_order_num_; |
- } |
+GpuChannelMessageQueue::GpuChannelMessageQueue( |
+ base::WeakPtr<GpuChannel> gpu_channel, |
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) |
+ : enabled_(true), |
+ unprocessed_order_num_(0), |
+ processed_order_num_(0), |
+ gpu_channel_(gpu_channel), |
+ task_runner_(task_runner) {} |
- void PushBackMessage(uint32_t order_number, const IPC::Message& message) { |
- base::AutoLock auto_lock(channel_messages_lock_); |
- if (enabled_) { |
- PushMessageHelper(order_number, |
- new GpuChannelMessage(order_number, message)); |
- } |
- } |
+GpuChannelMessageQueue::~GpuChannelMessageQueue() { |
+ DCHECK(channel_messages_.empty()); |
+ DCHECK(out_of_order_messages_.empty()); |
+} |
- void PushOutOfOrderMessage(const IPC::Message& message) { |
- // These are pushed out of order so should not have any order messages. |
- base::AutoLock auto_lock(channel_messages_lock_); |
- if (enabled_) { |
- PushOutOfOrderHelper(new GpuChannelMessage(kOutOfOrderNumber, message)); |
- } |
- } |
+uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const { |
+ base::AutoLock auto_lock(channel_messages_lock_); |
+ return unprocessed_order_num_; |
+} |
- bool GenerateSyncPointMessage(gpu::SyncPointManager* sync_point_manager, |
- uint32_t order_number, |
- const IPC::Message& message, |
- bool retire_sync_point, |
- uint32_t* sync_point_number) { |
- DCHECK(message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID); |
- base::AutoLock auto_lock(channel_messages_lock_); |
- if (enabled_) { |
- const uint32 sync_point = sync_point_manager->GenerateSyncPoint(); |
+uint32_t GpuChannelMessageQueue::GetProccessedOrderNum() const { |
+ return processed_order_num_; |
+} |
+ |
+void GpuChannelMessageQueue::PushBackMessage(uint32_t order_number, |
+ const IPC::Message& message) { |
+ base::AutoLock auto_lock(channel_messages_lock_); |
+ if (enabled_) |
+ PushMessageHelper(new GpuChannelMessage(order_number, message)); |
+} |
- GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); |
- msg->retire_sync_point = retire_sync_point; |
- msg->sync_point_number = sync_point; |
+bool GpuChannelMessageQueue::GenerateSyncPointMessage( |
+ gpu::SyncPointManager* sync_point_manager, |
+ uint32_t order_number, |
+ const IPC::Message& message, |
+ bool retire_sync_point, |
+ uint32_t* sync_point) { |
+ DCHECK(message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID); |
dcheng
2015/09/10 21:30:37
DCHECK_EQ
sunnyps
2015/09/10 23:38:36
Done.
|
+ DCHECK(sync_point); |
+ base::AutoLock auto_lock(channel_messages_lock_); |
+ if (enabled_) { |
+ *sync_point = sync_point_manager->GenerateSyncPoint(); |
- *sync_point_number = sync_point; |
- PushMessageHelper(order_number, msg); |
- return true; |
- } |
- return false; |
- } |
+ GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); |
+ msg->retire_sync_point = retire_sync_point; |
+ msg->sync_point = *sync_point; |
- bool HasQueuedMessages() { |
- base::AutoLock auto_lock(channel_messages_lock_); |
- return HasQueuedMessagesLocked(); |
+ PushMessageHelper(msg); |
+ return true; |
} |
+ return false; |
+} |
- base::TimeTicks GetNextMessageTimeTick() { |
- base::AutoLock auto_lock(channel_messages_lock_); |
+bool GpuChannelMessageQueue::HasQueuedMessages() const { |
+ base::AutoLock auto_lock(channel_messages_lock_); |
+ return HasQueuedMessagesHelper(); |
+} |
- base::TimeTicks next_message_tick; |
- if (!channel_messages_.empty()) |
- next_message_tick = channel_messages_.front()->time_received; |
+base::TimeTicks GpuChannelMessageQueue::GetNextMessageTimeTick() const { |
+ base::AutoLock auto_lock(channel_messages_lock_); |
- base::TimeTicks next_out_of_order_tick; |
- if (!out_of_order_messages_.empty()) |
- next_out_of_order_tick = out_of_order_messages_.front()->time_received; |
+ base::TimeTicks next_message_tick; |
+ if (!channel_messages_.empty()) |
+ next_message_tick = channel_messages_.front()->time_received; |
- if (next_message_tick.is_null()) |
- return next_out_of_order_tick; |
- else if (next_out_of_order_tick.is_null()) |
- return next_message_tick; |
- else |
- return std::min(next_message_tick, next_out_of_order_tick); |
- } |
+ base::TimeTicks next_out_of_order_tick; |
+ if (!out_of_order_messages_.empty()) |
+ next_out_of_order_tick = out_of_order_messages_.front()->time_received; |
- protected: |
- virtual ~GpuChannelMessageQueue() { |
- DCHECK(channel_messages_.empty()); |
- DCHECK(out_of_order_messages_.empty()); |
- } |
+ if (next_message_tick.is_null()) |
+ return next_out_of_order_tick; |
+ else if (next_out_of_order_tick.is_null()) |
+ return next_message_tick; |
+ else |
+ return std::min(next_message_tick, next_out_of_order_tick); |
+} |
- private: |
- friend class GpuChannel; |
- friend class base::RefCountedThreadSafe<GpuChannelMessageQueue>; |
- |
- GpuChannelMessageQueue( |
- base::WeakPtr<GpuChannel> gpu_channel, |
- scoped_refptr<base::SingleThreadTaskRunner> task_runner) |
- : enabled_(true), |
- unprocessed_order_num_(0), |
- gpu_channel_(gpu_channel), |
- task_runner_(task_runner) {} |
- |
- void DeleteAndDisableMessages(GpuChannelManager* gpu_channel_manager) { |
- { |
- base::AutoLock auto_lock(channel_messages_lock_); |
- DCHECK(enabled_); |
- enabled_ = false; |
- } |
+GpuChannelMessage* GpuChannelMessageQueue::GetNextMessage() const { |
+ base::AutoLock auto_lock(channel_messages_lock_); |
+ if (!out_of_order_messages_.empty()) { |
+ return out_of_order_messages_.front(); |
+ } else if (!channel_messages_.empty()) { |
+ return channel_messages_.front(); |
+ } else { |
+ return nullptr; |
+ } |
+} |
- // We guarantee that the queues will no longer be modified after enabled_ |
- // is set to false, it is now safe to modify the queue without the lock. |
- // All public facing modifying functions check enabled_ while all |
- // private modifying functions DCHECK(enabled_) to enforce this. |
- while (!channel_messages_.empty()) { |
- GpuChannelMessage* msg = channel_messages_.front(); |
- // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and |
- // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check |
- // if we have a sync point number here. |
- if (msg->sync_point_number) { |
- gpu_channel_manager->sync_point_manager()->RetireSyncPoint( |
- msg->sync_point_number); |
- } |
- delete msg; |
- channel_messages_.pop_front(); |
- } |
- STLDeleteElements(&out_of_order_messages_); |
+bool GpuChannelMessageQueue::MessageProcessed(uint32_t order_number) { |
+ base::AutoLock auto_lock(channel_messages_lock_); |
+ if (order_number != kOutOfOrderNumber) { |
+ DCHECK(!channel_messages_.empty()); |
+ DCHECK(order_number == channel_messages_.front()->order_number); |
+ processed_order_num_ = order_number; |
+ channel_messages_.pop_front(); |
+ } else { |
+ DCHECK(!out_of_order_messages_.empty()); |
+ out_of_order_messages_.pop_front(); |
} |
dcheng
2015/09/10 21:30:37
Won't this leak the popped element?
sunnyps
2015/09/10 23:38:36
Thanks for catching this. Fixed this for now but I
|
+ return HasQueuedMessagesHelper(); |
+} |
- void PushUnfinishedMessage(uint32_t order_number, |
- const IPC::Message& message) { |
- // This is pushed only if it was unfinished, so order number is kept. |
- GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); |
+void GpuChannelMessageQueue::DeleteAndDisableMessages( |
+ GpuChannelManager* gpu_channel_manager) { |
+ { |
base::AutoLock auto_lock(channel_messages_lock_); |
DCHECK(enabled_); |
- const bool had_messages = HasQueuedMessagesLocked(); |
- if (order_number == kOutOfOrderNumber) |
- out_of_order_messages_.push_front(msg); |
- else |
- channel_messages_.push_front(msg); |
- |
- if (!had_messages) |
- ScheduleHandleMessage(); |
+ enabled_ = false; |
} |
- void ScheduleHandleMessage() { |
- task_runner_->PostTask( |
- FROM_HERE, base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); |
+ // We guarantee that the queues will no longer be modified after enabled_ |
+ // is set to false, it is now safe to modify the queue without the lock. |
+ // All public facing modifying functions check enabled_ while all |
+ // private modifying functions DCHECK(enabled_) to enforce this. |
+ while (!channel_messages_.empty()) { |
+ GpuChannelMessage* msg = channel_messages_.front(); |
+ // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and |
+ // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check |
+ // if we have a sync point number here. |
+ if (msg->sync_point) { |
+ gpu_channel_manager->sync_point_manager()->RetireSyncPoint( |
+ msg->sync_point); |
+ } |
+ delete msg; |
+ channel_messages_.pop_front(); |
} |
+ STLDeleteElements(&out_of_order_messages_); |
+} |
- void PushMessageHelper(uint32_t order_number, GpuChannelMessage* msg) { |
- channel_messages_lock_.AssertAcquired(); |
- DCHECK(enabled_); |
- unprocessed_order_num_ = order_number; |
- const bool had_messages = HasQueuedMessagesLocked(); |
- channel_messages_.push_back(msg); |
- if (!had_messages) |
- ScheduleHandleMessage(); |
- } |
+void GpuChannelMessageQueue::ScheduleHandleMessage() { |
+ task_runner_->PostTask(FROM_HERE, |
+ base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); |
+} |
- void PushOutOfOrderHelper(GpuChannelMessage* msg) { |
- channel_messages_lock_.AssertAcquired(); |
- DCHECK(enabled_); |
- const bool had_messages = HasQueuedMessagesLocked(); |
+void GpuChannelMessageQueue::PushMessageHelper(GpuChannelMessage* msg) { |
+ channel_messages_lock_.AssertAcquired(); |
+ DCHECK(enabled_); |
+ bool had_messages = HasQueuedMessagesHelper(); |
+ if (msg->order_number != kOutOfOrderNumber) { |
+ unprocessed_order_num_ = msg->order_number; |
+ channel_messages_.push_back(msg); |
+ } else { |
out_of_order_messages_.push_back(msg); |
- if (!had_messages) |
- ScheduleHandleMessage(); |
- } |
- |
- bool HasQueuedMessagesLocked() { |
- channel_messages_lock_.AssertAcquired(); |
- return !channel_messages_.empty() || !out_of_order_messages_.empty(); |
} |
+ if (!had_messages) |
+ ScheduleHandleMessage(); |
+} |
- bool enabled_; |
- |
- // Highest IPC order number seen, set when queued on the IO thread. |
- uint32_t unprocessed_order_num_; |
- std::deque<GpuChannelMessage*> channel_messages_; |
- std::deque<GpuChannelMessage*> out_of_order_messages_; |
- |
- // This lock protects enabled_, unprocessed_order_num_, and both deques. |
- base::Lock channel_messages_lock_; |
- |
- base::WeakPtr<GpuChannel> gpu_channel_; |
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_; |
- |
- DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue); |
-}; |
+bool GpuChannelMessageQueue::HasQueuedMessagesHelper() const { |
+ channel_messages_lock_.AssertAcquired(); |
+ return !channel_messages_.empty() || !out_of_order_messages_.empty(); |
+} |
// Begin order numbers at 1 so 0 can mean no orders. |
uint32_t GpuChannelMessageFilter::global_order_counter_ = 1; |
@@ -362,15 +323,16 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
} |
if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
- base::Tuple<bool> retire; |
+ base::Tuple<bool> params; |
IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, |
- &retire)) { |
+ ¶ms)) { |
reply->set_reply_error(); |
Send(reply); |
return true; |
} |
- if (!future_sync_points_ && !base::get<0>(retire)) { |
+ bool retire_sync_point = base::get<0>(params); |
+ if (!future_sync_points_ && !retire_sync_point) { |
LOG(ERROR) << "Untrusted contexts can't create future sync points"; |
reply->set_reply_error(); |
Send(reply); |
@@ -381,7 +343,7 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
// message queue could be disabled from the main thread during generation. |
uint32_t sync_point = 0u; |
if (!message_queue_->GenerateSyncPointMessage( |
- sync_point_manager_, order_number, message, base::get<0>(retire), |
+ sync_point_manager_, order_number, message, retire_sync_point, |
&sync_point)) { |
LOG(ERROR) << "GpuChannel has been destroyed."; |
reply->set_reply_error(); |
@@ -401,7 +363,7 @@ bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { |
// Move Wait commands to the head of the queue, so the renderer |
// doesn't have to wait any longer than necessary. |
- message_queue_->PushOutOfOrderMessage(message); |
+ message_queue_->PushBackMessage(kOutOfOrderNumber, message); |
} else { |
message_queue_->PushBackMessage(order_number, message); |
} |
@@ -618,8 +580,6 @@ GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, |
pending_valuebuffer_state_(new gpu::ValueStateMap), |
watchdog_(watchdog), |
software_(software), |
- current_order_num_(0), |
- processed_order_num_(0), |
num_stubs_descheduled_(0), |
allow_future_sync_points_(allow_future_sync_points), |
allow_real_time_streams_(allow_real_time_streams), |
@@ -676,6 +636,14 @@ base::ProcessId GpuChannel::GetClientPID() const { |
return channel_->GetPeerPID(); |
} |
+uint32_t GpuChannel::GetProcessedOrderNum() const { |
+ return message_queue_->GetProccessedOrderNum(); |
+} |
+ |
+uint32_t GpuChannel::GetUnprocessedOrderNum() const { |
+ return message_queue_->GetUnprocessedOrderNum(); |
+} |
+ |
bool GpuChannel::OnMessageReceived(const IPC::Message& message) { |
// All messages should be pushed to channel_messages_ and handled separately. |
NOTREACHED(); |
@@ -716,7 +684,7 @@ void GpuChannel::StubSchedulingChanged(bool scheduled) { |
bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; |
if (scheduled) { |
num_stubs_descheduled_--; |
- message_queue_->ScheduleHandleMessage(); |
+ ScheduleHandleMessage(); |
} else { |
num_stubs_descheduled_++; |
} |
@@ -864,117 +832,75 @@ bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { |
} |
void GpuChannel::HandleMessage() { |
- GpuChannelMessage* m = nullptr; |
- GpuCommandBufferStub* stub = nullptr; |
- bool has_more_messages = false; |
- { |
- base::AutoLock auto_lock(message_queue_->channel_messages_lock_); |
- if (!message_queue_->out_of_order_messages_.empty()) { |
- m = message_queue_->out_of_order_messages_.front(); |
- DCHECK(m->order_number == kOutOfOrderNumber); |
- message_queue_->out_of_order_messages_.pop_front(); |
- } else if (!message_queue_->channel_messages_.empty()) { |
- m = message_queue_->channel_messages_.front(); |
- DCHECK(m->order_number != kOutOfOrderNumber); |
- message_queue_->channel_messages_.pop_front(); |
- } else { |
- // No messages to process |
- return; |
- } |
+ GpuChannelMessage* m = message_queue_->GetNextMessage(); |
- has_more_messages = message_queue_->HasQueuedMessagesLocked(); |
- } |
+ // TODO(sunnyps): This could be a DCHECK maybe? |
+ if (!m) |
+ return; |
- bool retry_message = false; |
- stub = stubs_.get(m->message.routing_id()); |
- if (stub) { |
- if (!stub->IsScheduled()) { |
- retry_message = true; |
- } |
- if (stub->IsPreempted()) { |
- retry_message = true; |
- message_queue_->ScheduleHandleMessage(); |
- } |
- } |
+ uint32_t order_number = m->order_number; |
+ IPC::Message& message = m->message; |
+ int32_t routing_id = message.routing_id(); |
+ GpuCommandBufferStub* stub = stubs_.get(routing_id); |
- if (retry_message) { |
- base::AutoLock auto_lock(message_queue_->channel_messages_lock_); |
- if (m->order_number == kOutOfOrderNumber) |
- message_queue_->out_of_order_messages_.push_front(m); |
- else |
- message_queue_->channel_messages_.push_front(m); |
- return; |
- } else if (has_more_messages) { |
- message_queue_->ScheduleHandleMessage(); |
- } |
+ DVLOG(1) << "received message @" << &message << " on channel @" << this |
+ << " with type " << message.type(); |
- scoped_ptr<GpuChannelMessage> scoped_message(m); |
- const uint32_t order_number = m->order_number; |
- const int32_t routing_id = m->message.routing_id(); |
- |
- // TODO(dyen): Temporary handling of old sync points. |
- // This must ensure that the sync point will be retired. Normally we'll |
- // find the stub based on the routing ID, and associate the sync point |
- // with it, but if that fails for any reason (channel or stub already |
- // deleted, invalid routing id), we need to retire the sync point |
- // immediately. |
- if (m->message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
- const bool retire = m->retire_sync_point; |
- const uint32_t sync_point = m->sync_point_number; |
+ bool handled = false; |
+ |
+ if (routing_id == MSG_ROUTING_CONTROL) { |
+ handled = OnControlMessageReceived(message); |
+ } else if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
+ // TODO(dyen): Temporary handling of old sync points. |
+ // This must ensure that the sync point will be retired. Normally we'll |
+ // find the stub based on the routing ID, and associate the sync point |
+ // with it, but if that fails for any reason (channel or stub already |
+ // deleted, invalid routing id), we need to retire the sync point |
+ // immediately. |
if (stub) { |
- stub->AddSyncPoint(sync_point); |
- if (retire) { |
- m->message = |
- GpuCommandBufferMsg_RetireSyncPoint(routing_id, sync_point); |
- } |
+ stub->AddSyncPoint(m->sync_point, m->retire_sync_point); |
} else { |
- current_order_num_ = order_number; |
- gpu_channel_manager_->sync_point_manager()->RetireSyncPoint(sync_point); |
- MessageProcessed(order_number); |
- return; |
+ gpu_channel_manager_->sync_point_manager()->RetireSyncPoint( |
+ m->sync_point); |
} |
+ handled = true; |
+ } else { |
+ handled = router_.RouteMessage(message); |
} |
- IPC::Message* message = &m->message; |
- bool message_processed = true; |
- |
- DVLOG(1) << "received message @" << message << " on channel @" << this |
- << " with type " << message->type(); |
+ // Respond to sync messages even if router failed to route. |
+ if (!handled && message.is_sync()) { |
+ IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
+ reply->set_reply_error(); |
+ Send(reply); |
+ handled = true; |
+ } |
- if (order_number != kOutOfOrderNumber) { |
- // Make sure this is a valid unprocessed order number. |
- DCHECK(order_number <= GetUnprocessedOrderNum() && |
- order_number >= GetProcessedOrderNum()); |
+ // A command buffer may be descheduled or preempted but only in the middle of |
+ // a flush. In this case we should not pop the message from the queue. |
+ if (stub && stub->HasUnprocessedCommands()) { |
+ DCHECK(message.type() == GpuCommandBufferMsg_AsyncFlush::ID); |
+ // If the stub was preempted then we need to schedule a wakeup otherwise |
+ // some other event will wake us up e.g. sync point completion. |
+ if (stub->IsPreempted()) |
David Yen
2015/09/10 22:42:06
Don't we want to check for premption before we han
sunnyps
2015/09/10 23:38:36
The flush will early out if the stub is preempted
|
+ ScheduleHandleMessage(); |
+ return; |
+ } |
- current_order_num_ = order_number; |
+ if (message_queue_->MessageProcessed(order_number)) { |
+ ScheduleHandleMessage(); |
David Yen
2015/09/10 22:42:06
Did you intended to make messages in a channel onl
sunnyps
2015/09/10 23:38:36
The first HandleMessage is called from the message
|
} |
- bool result = false; |
- if (routing_id == MSG_ROUTING_CONTROL) |
- result = OnControlMessageReceived(*message); |
- else |
- result = router_.RouteMessage(*message); |
- if (!result) { |
- // Respond to sync messages even if router failed to route. |
- if (message->is_sync()) { |
- IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); |
- reply->set_reply_error(); |
- Send(reply); |
- } |
- } else { |
- // If the command buffer becomes unscheduled as a result of handling the |
- // message but still has more commands to process, synthesize an IPC |
- // message to flush that command buffer. |
- if (stub) { |
- if (stub->HasUnprocessedCommands()) { |
- message_queue_->PushUnfinishedMessage( |
- order_number, GpuCommandBufferMsg_Rescheduled(stub->route_id())); |
- message_processed = false; |
- } |
- } |
+ if (preempting_flag_.get()) { |
dcheng
2015/09/10 21:30:37
if (preempting_flag_) {
sunnyps
2015/09/10 23:38:36
Done.
|
+ io_task_runner_->PostTask( |
+ FROM_HERE, |
+ base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_)); |
} |
- if (message_processed) |
- MessageProcessed(order_number); |
+} |
+ |
+void GpuChannel::ScheduleHandleMessage() { |
+ task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage, |
+ weak_factory_.GetWeakPtr())); |
} |
void GpuChannel::OnCreateOffscreenCommandBuffer( |
@@ -1078,19 +1004,6 @@ void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { |
jpeg_decoder_->AddClient(route_id, reply_msg); |
} |
-void GpuChannel::MessageProcessed(uint32_t order_number) { |
- if (order_number != kOutOfOrderNumber) { |
- DCHECK(current_order_num_ == order_number); |
- DCHECK(processed_order_num_ < order_number); |
- processed_order_num_ = order_number; |
- } |
- if (preempting_flag_.get()) { |
- io_task_runner_->PostTask( |
- FROM_HERE, |
- base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_)); |
- } |
-} |
- |
void GpuChannel::CacheShader(const std::string& key, |
const std::string& shader) { |
gpu_channel_manager_->Send( |
@@ -1160,8 +1073,4 @@ void GpuChannel::HandleUpdateValueState( |
pending_valuebuffer_state_->UpdateState(target, state); |
} |
-uint32_t GpuChannel::GetUnprocessedOrderNum() const { |
- return message_queue_->GetUnprocessedOrderNum(); |
-} |
- |
} // namespace content |