Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(84)

Side by Side Diff: content/common/gpu/gpu_channel.cc

Issue 1355643003: content/gpu: Remove separate queue for out-of-order messages. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@stub_scheduling
Patch Set: piman's review Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/gpu_channel.h" 5 #include "content/common/gpu/gpu_channel.h"
6 6
7 #if defined(OS_WIN) 7 #if defined(OS_WIN)
8 #include <windows.h> 8 #include <windows.h>
9 #endif 9 #endif
10 10
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
61 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs; 61 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
62 62
63 // Once we trigger a preemption, the maximum duration that we will wait 63 // Once we trigger a preemption, the maximum duration that we will wait
64 // before clearing the preemption. 64 // before clearing the preemption.
65 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; 65 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
66 66
67 // Stop the preemption once the time for the longest pending IPC drops 67 // Stop the preemption once the time for the longest pending IPC drops
68 // below this threshold. 68 // below this threshold.
69 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; 69 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
70 70
71 const uint32_t kOutOfOrderNumber = static_cast<uint32_t>(-1); 71 } // anonymous namespace
72 72
73 } // anonymous namespace 73 // Begin order numbers at 1 so 0 can mean no orders.
74 uint32_t GpuChannelMessageQueue::global_order_counter_ = 1;
74 75
75 scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create( 76 scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create(
76 const base::WeakPtr<GpuChannel>& gpu_channel, 77 const base::WeakPtr<GpuChannel>& gpu_channel,
77 base::SingleThreadTaskRunner* task_runner) { 78 base::SingleThreadTaskRunner* task_runner) {
78 return new GpuChannelMessageQueue(gpu_channel, task_runner); 79 return new GpuChannelMessageQueue(gpu_channel, task_runner);
79 } 80 }
80 81
81 GpuChannelMessageQueue::GpuChannelMessageQueue( 82 GpuChannelMessageQueue::GpuChannelMessageQueue(
82 const base::WeakPtr<GpuChannel>& gpu_channel, 83 const base::WeakPtr<GpuChannel>& gpu_channel,
83 base::SingleThreadTaskRunner* task_runner) 84 base::SingleThreadTaskRunner* task_runner)
84 : enabled_(true), 85 : enabled_(true),
85 unprocessed_order_num_(0), 86 unprocessed_order_num_(0),
86 processed_order_num_(0), 87 processed_order_num_(0),
87 gpu_channel_(gpu_channel), 88 gpu_channel_(gpu_channel),
88 task_runner_(task_runner) {} 89 task_runner_(task_runner) {}
89 90
90 GpuChannelMessageQueue::~GpuChannelMessageQueue() { 91 GpuChannelMessageQueue::~GpuChannelMessageQueue() {
91 DCHECK(channel_messages_.empty()); 92 DCHECK(channel_messages_.empty());
92 DCHECK(out_of_order_messages_.empty());
93 } 93 }
94 94
95 uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const { 95 uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const {
96 base::AutoLock auto_lock(channel_messages_lock_); 96 base::AutoLock auto_lock(channel_messages_lock_);
97 return unprocessed_order_num_; 97 return unprocessed_order_num_;
98 } 98 }
99 99
100 void GpuChannelMessageQueue::PushBackMessage(uint32_t order_number, 100 void GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) {
101 const IPC::Message& message) {
102 base::AutoLock auto_lock(channel_messages_lock_); 101 base::AutoLock auto_lock(channel_messages_lock_);
103 if (enabled_) { 102 if (enabled_)
104 PushMessageHelper( 103 PushMessageHelper(make_scoped_ptr(new GpuChannelMessage(message)));
105 make_scoped_ptr(new GpuChannelMessage(order_number, message)));
106 }
107 } 104 }
108 105
109 bool GpuChannelMessageQueue::GenerateSyncPointMessage( 106 bool GpuChannelMessageQueue::GenerateSyncPointMessage(
110 gpu::SyncPointManager* sync_point_manager, 107 gpu::SyncPointManager* sync_point_manager,
111 uint32_t order_number,
112 const IPC::Message& message, 108 const IPC::Message& message,
113 bool retire_sync_point, 109 bool retire_sync_point,
114 uint32_t* sync_point) { 110 uint32_t* sync_point) {
115 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_InsertSyncPoint::ID, message.type()); 111 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_InsertSyncPoint::ID, message.type());
116 DCHECK(sync_point); 112 DCHECK(sync_point);
117 base::AutoLock auto_lock(channel_messages_lock_); 113 base::AutoLock auto_lock(channel_messages_lock_);
118 if (enabled_) { 114 if (enabled_) {
119 *sync_point = sync_point_manager->GenerateSyncPoint(); 115 *sync_point = sync_point_manager->GenerateSyncPoint();
120 116
121 scoped_ptr<GpuChannelMessage> msg( 117 scoped_ptr<GpuChannelMessage> msg(new GpuChannelMessage(message));
122 new GpuChannelMessage(order_number, message));
123 msg->retire_sync_point = retire_sync_point; 118 msg->retire_sync_point = retire_sync_point;
124 msg->sync_point = *sync_point; 119 msg->sync_point = *sync_point;
125 120
126 PushMessageHelper(msg.Pass()); 121 PushMessageHelper(msg.Pass());
127 return true; 122 return true;
128 } 123 }
129 return false; 124 return false;
130 } 125 }
131 126
132 bool GpuChannelMessageQueue::HasQueuedMessages() const { 127 bool GpuChannelMessageQueue::HasQueuedMessages() const {
133 base::AutoLock auto_lock(channel_messages_lock_); 128 base::AutoLock auto_lock(channel_messages_lock_);
134 return HasQueuedMessagesHelper(); 129 return !channel_messages_.empty();
135 } 130 }
136 131
137 base::TimeTicks GpuChannelMessageQueue::GetNextMessageTimeTick() const { 132 base::TimeTicks GpuChannelMessageQueue::GetNextMessageTimeTick() const {
138 base::AutoLock auto_lock(channel_messages_lock_); 133 base::AutoLock auto_lock(channel_messages_lock_);
139
140 base::TimeTicks next_message_tick;
141 if (!channel_messages_.empty()) 134 if (!channel_messages_.empty())
142 next_message_tick = channel_messages_.front()->time_received; 135 return channel_messages_.front()->time_received;
143 136 return base::TimeTicks();
144 base::TimeTicks next_out_of_order_tick;
145 if (!out_of_order_messages_.empty())
146 next_out_of_order_tick = out_of_order_messages_.front()->time_received;
147
148 if (next_message_tick.is_null())
149 return next_out_of_order_tick;
150 else if (next_out_of_order_tick.is_null())
151 return next_message_tick;
152 else
153 return std::min(next_message_tick, next_out_of_order_tick);
154 } 137 }
155 138
156 GpuChannelMessage* GpuChannelMessageQueue::GetNextMessage() const { 139 GpuChannelMessage* GpuChannelMessageQueue::GetNextMessage() const {
157 base::AutoLock auto_lock(channel_messages_lock_); 140 base::AutoLock auto_lock(channel_messages_lock_);
158 if (!out_of_order_messages_.empty()) { 141 if (!channel_messages_.empty()) {
159 DCHECK_EQ(out_of_order_messages_.front()->order_number, kOutOfOrderNumber);
160 return out_of_order_messages_.front();
161 } else if (!channel_messages_.empty()) {
162 DCHECK_GT(channel_messages_.front()->order_number, processed_order_num_); 142 DCHECK_GT(channel_messages_.front()->order_number, processed_order_num_);
163 DCHECK_LE(channel_messages_.front()->order_number, unprocessed_order_num_); 143 DCHECK_LE(channel_messages_.front()->order_number, unprocessed_order_num_);
164 return channel_messages_.front(); 144 return channel_messages_.front();
165 } else {
166 return nullptr;
167 } 145 }
146 return nullptr;
168 } 147 }
169 148
170 bool GpuChannelMessageQueue::MessageProcessed(uint32_t order_number) { 149 bool GpuChannelMessageQueue::MessageProcessed() {
171 base::AutoLock auto_lock(channel_messages_lock_); 150 base::AutoLock auto_lock(channel_messages_lock_);
172 if (order_number != kOutOfOrderNumber) { 151 DCHECK(!channel_messages_.empty());
173 DCHECK(!channel_messages_.empty()); 152 scoped_ptr<GpuChannelMessage> msg(channel_messages_.front());
174 scoped_ptr<GpuChannelMessage> msg(channel_messages_.front()); 153 channel_messages_.pop_front();
175 channel_messages_.pop_front(); 154 processed_order_num_ = msg->order_number;
176 DCHECK_EQ(order_number, msg->order_number); 155 return !channel_messages_.empty();
177 processed_order_num_ = order_number;
178 } else {
179 DCHECK(!out_of_order_messages_.empty());
180 scoped_ptr<GpuChannelMessage> msg(out_of_order_messages_.front());
181 out_of_order_messages_.pop_front();
182 }
183 return HasQueuedMessagesHelper();
184 } 156 }
185 157
186 void GpuChannelMessageQueue::DeleteAndDisableMessages( 158 void GpuChannelMessageQueue::DeleteAndDisableMessages(
187 GpuChannelManager* gpu_channel_manager) { 159 GpuChannelManager* gpu_channel_manager) {
188 { 160 {
189 base::AutoLock auto_lock(channel_messages_lock_); 161 base::AutoLock auto_lock(channel_messages_lock_);
190 DCHECK(enabled_); 162 DCHECK(enabled_);
191 enabled_ = false; 163 enabled_ = false;
192 } 164 }
193 165
194 // We guarantee that the queues will no longer be modified after enabled_ 166 // We guarantee that the queues will no longer be modified after enabled_
195 // is set to false, it is now safe to modify the queue without the lock. 167 // is set to false, it is now safe to modify the queue without the lock.
196 // All public facing modifying functions check enabled_ while all 168 // All public facing modifying functions check enabled_ while all
197 // private modifying functions DCHECK(enabled_) to enforce this. 169 // private modifying functions DCHECK(enabled_) to enforce this.
198 while (!channel_messages_.empty()) { 170 while (!channel_messages_.empty()) {
199 scoped_ptr<GpuChannelMessage> msg(channel_messages_.front()); 171 scoped_ptr<GpuChannelMessage> msg(channel_messages_.front());
200 channel_messages_.pop_front(); 172 channel_messages_.pop_front();
201 // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and 173 // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and
202 // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check 174 // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check
203 // if we have a sync point number here. 175 // if we have a sync point number here.
204 if (msg->sync_point) { 176 if (msg->sync_point) {
205 gpu_channel_manager->sync_point_manager()->RetireSyncPoint( 177 gpu_channel_manager->sync_point_manager()->RetireSyncPoint(
206 msg->sync_point); 178 msg->sync_point);
207 } 179 }
208 } 180 }
209 STLDeleteElements(&out_of_order_messages_);
210 } 181 }
211 182
212 void GpuChannelMessageQueue::ScheduleHandleMessage() { 183 void GpuChannelMessageQueue::ScheduleHandleMessage() {
213 task_runner_->PostTask(FROM_HERE, 184 task_runner_->PostTask(FROM_HERE,
214 base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); 185 base::Bind(&GpuChannel::HandleMessage, gpu_channel_));
215 } 186 }
216 187
217 void GpuChannelMessageQueue::PushMessageHelper( 188 void GpuChannelMessageQueue::PushMessageHelper(
218 scoped_ptr<GpuChannelMessage> msg) { 189 scoped_ptr<GpuChannelMessage> msg) {
219 channel_messages_lock_.AssertAcquired(); 190 channel_messages_lock_.AssertAcquired();
220 DCHECK(enabled_); 191 DCHECK(enabled_);
221 bool had_messages = HasQueuedMessagesHelper(); 192
222 if (msg->order_number != kOutOfOrderNumber) { 193 msg->order_number = global_order_counter_++;
223 unprocessed_order_num_ = msg->order_number; 194 msg->time_received = base::TimeTicks::Now();
224 channel_messages_.push_back(msg.release()); 195
225 } else { 196 unprocessed_order_num_ = msg->order_number;
226 out_of_order_messages_.push_back(msg.release()); 197
227 } 198 bool had_messages = !channel_messages_.empty();
199 channel_messages_.push_back(msg.release());
228 if (!had_messages) 200 if (!had_messages)
229 ScheduleHandleMessage(); 201 ScheduleHandleMessage();
230 } 202 }
231 203
232 bool GpuChannelMessageQueue::HasQueuedMessagesHelper() const {
233 channel_messages_lock_.AssertAcquired();
234 return !channel_messages_.empty() || !out_of_order_messages_.empty();
235 }
236
237 // Begin order numbers at 1 so 0 can mean no orders.
238 uint32_t GpuChannelMessageFilter::global_order_counter_ = 1;
239
240 GpuChannelMessageFilter::GpuChannelMessageFilter( 204 GpuChannelMessageFilter::GpuChannelMessageFilter(
205 const base::WeakPtr<GpuChannel>& gpu_channel,
241 GpuChannelMessageQueue* message_queue, 206 GpuChannelMessageQueue* message_queue,
242 gpu::SyncPointManager* sync_point_manager, 207 gpu::SyncPointManager* sync_point_manager,
243 base::SingleThreadTaskRunner* task_runner, 208 base::SingleThreadTaskRunner* task_runner,
244 bool future_sync_points) 209 bool future_sync_points)
245 : preemption_state_(IDLE), 210 : preemption_state_(IDLE),
211 gpu_channel_(gpu_channel),
246 message_queue_(message_queue), 212 message_queue_(message_queue),
247 sender_(nullptr), 213 sender_(nullptr),
248 peer_pid_(base::kNullProcessId), 214 peer_pid_(base::kNullProcessId),
249 sync_point_manager_(sync_point_manager), 215 sync_point_manager_(sync_point_manager),
250 task_runner_(task_runner), 216 task_runner_(task_runner),
251 a_stub_is_descheduled_(false), 217 a_stub_is_descheduled_(false),
252 future_sync_points_(future_sync_points) {} 218 future_sync_points_(future_sync_points) {}
253 219
254 GpuChannelMessageFilter::~GpuChannelMessageFilter() {} 220 GpuChannelMessageFilter::~GpuChannelMessageFilter() {}
255 221
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
304 void GpuChannelMessageFilter::RemoveChannelFilter( 270 void GpuChannelMessageFilter::RemoveChannelFilter(
305 scoped_refptr<IPC::MessageFilter> filter) { 271 scoped_refptr<IPC::MessageFilter> filter) {
306 if (sender_) 272 if (sender_)
307 filter->OnFilterRemoved(); 273 filter->OnFilterRemoved();
308 channel_filters_.erase( 274 channel_filters_.erase(
309 std::find(channel_filters_.begin(), channel_filters_.end(), filter)); 275 std::find(channel_filters_.begin(), channel_filters_.end(), filter));
310 } 276 }
311 277
312 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { 278 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) {
313 DCHECK(sender_); 279 DCHECK(sender_);
280
281 if (message.should_unblock() || message.is_reply()) {
282 LOG(ERROR) << "Unexpected message type";
piman 2015/09/18 20:03:40 nit: DLOG/DVLOG. No need to include the string and
sunnyps 2015/09/18 21:02:08 Done.
283 return true;
284 }
285
314 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { 286 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) {
315 if (filter->OnMessageReceived(message)) { 287 if (filter->OnMessageReceived(message)) {
316 return true; 288 return true;
317 } 289 }
318 } 290 }
319 291
320 const uint32_t order_number = global_order_counter_++;
321 bool handled = false; 292 bool handled = false;
322 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && 293 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) &&
323 !future_sync_points_) { 294 !future_sync_points_) {
324 DLOG(ERROR) << "Untrusted client should not send " 295 LOG(ERROR) << "Untrusted client should not send "
piman 2015/09/18 20:03:40 same here.
sunnyps 2015/09/18 21:02:08 Done.
325 "GpuCommandBufferMsg_RetireSyncPoint message"; 296 "GpuCommandBufferMsg_RetireSyncPoint message";
326 return true; 297 return true;
327 } 298 }
328 299
329 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { 300 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
330 base::Tuple<bool> params; 301 base::Tuple<bool> params;
331 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); 302 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
332 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, 303 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message,
333 &params)) { 304 &params)) {
334 reply->set_reply_error(); 305 reply->set_reply_error();
335 Send(reply); 306 Send(reply);
336 return true; 307 return true;
337 } 308 }
338 bool retire_sync_point = base::get<0>(params); 309 bool retire_sync_point = base::get<0>(params);
339 if (!future_sync_points_ && !retire_sync_point) { 310 if (!future_sync_points_ && !retire_sync_point) {
340 LOG(ERROR) << "Untrusted contexts can't create future sync points"; 311 LOG(ERROR) << "Untrusted contexts can't create future sync points";
341 reply->set_reply_error(); 312 reply->set_reply_error();
342 Send(reply); 313 Send(reply);
343 return true; 314 return true;
344 } 315 }
345 316
346 // Message queue must handle the entire sync point generation because the 317 // Message queue must handle the entire sync point generation because the
347 // message queue could be disabled from the main thread during generation. 318 // message queue could be disabled from the main thread during generation.
348 uint32_t sync_point = 0u; 319 uint32_t sync_point = 0u;
349 if (!message_queue_->GenerateSyncPointMessage( 320 if (!message_queue_->GenerateSyncPointMessage(
350 sync_point_manager_, order_number, message, retire_sync_point, 321 sync_point_manager_, message, retire_sync_point, &sync_point)) {
351 &sync_point)) {
352 LOG(ERROR) << "GpuChannel has been destroyed."; 322 LOG(ERROR) << "GpuChannel has been destroyed.";
353 reply->set_reply_error(); 323 reply->set_reply_error();
354 Send(reply); 324 Send(reply);
355 return true; 325 return true;
356 } 326 }
357 327
358 DCHECK_NE(sync_point, 0u); 328 DCHECK_NE(sync_point, 0u);
359 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); 329 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
360 Send(reply); 330 Send(reply);
361 handled = true; 331 handled = true;
362 } 332 }
363 333
364 // Forward all other messages to the GPU Channel. 334 // Forward all other messages to the GPU Channel.
365 if (!handled && !message.is_reply() && !message.should_unblock()) { 335 if (!handled) {
366 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || 336 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
367 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { 337 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
368 // Move Wait commands to the head of the queue, so the renderer 338 task_runner_->PostTask(FROM_HERE,
369 // doesn't have to wait any longer than necessary. 339 base::Bind(&GpuChannel::HandleOutOfOrderMessage,
370 message_queue_->PushBackMessage(kOutOfOrderNumber, message); 340 gpu_channel_, message));
371 } else { 341 } else {
372 message_queue_->PushBackMessage(order_number, message); 342 message_queue_->PushBackMessage(message);
373 } 343 }
374 handled = true; 344 handled = true;
375 } 345 }
376 346
377 UpdatePreemptionState(); 347 UpdatePreemptionState();
378 return handled; 348 return handled;
379 } 349 }
380 350
381 void GpuChannelMessageFilter::OnMessageProcessed() { 351 void GpuChannelMessageFilter::OnMessageProcessed() {
382 UpdatePreemptionState(); 352 UpdatePreemptionState();
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after
588 allow_future_sync_points_(allow_future_sync_points), 558 allow_future_sync_points_(allow_future_sync_points),
589 allow_real_time_streams_(allow_real_time_streams), 559 allow_real_time_streams_(allow_real_time_streams),
590 weak_factory_(this) { 560 weak_factory_(this) {
591 DCHECK(gpu_channel_manager); 561 DCHECK(gpu_channel_manager);
592 DCHECK(client_id); 562 DCHECK(client_id);
593 563
594 message_queue_ = 564 message_queue_ =
595 GpuChannelMessageQueue::Create(weak_factory_.GetWeakPtr(), task_runner); 565 GpuChannelMessageQueue::Create(weak_factory_.GetWeakPtr(), task_runner);
596 566
597 filter_ = new GpuChannelMessageFilter( 567 filter_ = new GpuChannelMessageFilter(
598 message_queue_.get(), gpu_channel_manager_->sync_point_manager(), 568 weak_factory_.GetWeakPtr(), message_queue_.get(),
599 task_runner_.get(), allow_future_sync_points_); 569 gpu_channel_manager_->sync_point_manager(), task_runner_.get(),
570 allow_future_sync_points_);
600 571
601 subscription_ref_set_->AddObserver(this); 572 subscription_ref_set_->AddObserver(this);
602 } 573 }
603 574
604 GpuChannel::~GpuChannel() { 575 GpuChannel::~GpuChannel() {
605 // Clear stubs first because of dependencies. 576 // Clear stubs first because of dependencies.
606 stubs_.clear(); 577 stubs_.clear();
607 578
608 message_queue_->DeleteAndDisableMessages(gpu_channel_manager_); 579 message_queue_->DeleteAndDisableMessages(gpu_channel_manager_);
609 580
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after
841 ScheduleHandleMessage(); 812 ScheduleHandleMessage();
842 return; 813 return;
843 } 814 }
844 815
845 GpuChannelMessage* m = message_queue_->GetNextMessage(); 816 GpuChannelMessage* m = message_queue_->GetNextMessage();
846 817
847 // TODO(sunnyps): This could be a DCHECK maybe? 818 // TODO(sunnyps): This could be a DCHECK maybe?
848 if (!m) 819 if (!m)
849 return; 820 return;
850 821
851 uint32_t order_number = m->order_number; 822 current_order_num_ = m->order_number;
852 const IPC::Message& message = m->message; 823 const IPC::Message& message = m->message;
853 int32_t routing_id = message.routing_id(); 824 int32_t routing_id = message.routing_id();
854 GpuCommandBufferStub* stub = stubs_.get(routing_id); 825 GpuCommandBufferStub* stub = stubs_.get(routing_id);
855 826
856 DCHECK(!stub || stub->IsScheduled()); 827 DCHECK(!stub || stub->IsScheduled());
857 828
858 DVLOG(1) << "received message @" << &message << " on channel @" << this 829 DVLOG(1) << "received message @" << &message << " on channel @" << this
859 << " with type " << message.type(); 830 << " with type " << message.type();
860 831
861 current_order_num_ = order_number;
862
863 bool handled = false; 832 bool handled = false;
864 833
865 if (routing_id == MSG_ROUTING_CONTROL) { 834 if (routing_id == MSG_ROUTING_CONTROL) {
866 handled = OnControlMessageReceived(message); 835 handled = OnControlMessageReceived(message);
867 } else if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { 836 } else if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
868 // TODO(dyen): Temporary handling of old sync points. 837 // TODO(dyen): Temporary handling of old sync points.
869 // This must ensure that the sync point will be retired. Normally we'll 838 // This must ensure that the sync point will be retired. Normally we'll
870 // find the stub based on the routing ID, and associate the sync point 839 // find the stub based on the routing ID, and associate the sync point
871 // with it, but if that fails for any reason (channel or stub already 840 // with it, but if that fails for any reason (channel or stub already
872 // deleted, invalid routing id), we need to retire the sync point 841 // deleted, invalid routing id), we need to retire the sync point
(...skipping 12 matching lines...) Expand all
885 // Respond to sync messages even if router failed to route. 854 // Respond to sync messages even if router failed to route.
886 if (!handled && message.is_sync()) { 855 if (!handled && message.is_sync()) {
887 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); 856 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
888 reply->set_reply_error(); 857 reply->set_reply_error();
889 Send(reply); 858 Send(reply);
890 handled = true; 859 handled = true;
891 } 860 }
892 861
893 // A command buffer may be descheduled or preempted but only in the middle of 862 // A command buffer may be descheduled or preempted but only in the middle of
894 // a flush. In this case we should not pop the message from the queue. 863 // a flush. In this case we should not pop the message from the queue.
895 if (stub && stub->HasUnprocessedCommands() && 864 if (stub && stub->HasUnprocessedCommands()) {
896 order_number != kOutOfOrderNumber) {
897 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID, message.type()); 865 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID, message.type());
898 // If the stub is still scheduled then we were preempted and need to 866 // If the stub is still scheduled then we were preempted and need to
899 // schedule a wakeup otherwise some other event will wake us up e.g. sync 867 // schedule a wakeup otherwise some other event will wake us up e.g. sync
900 // point completion. No DCHECK for preemption flag because that can change 868 // point completion. No DCHECK for preemption flag because that can change
901 // any time. 869 // any time.
902 if (stub->IsScheduled()) 870 if (stub->IsScheduled())
903 ScheduleHandleMessage(); 871 ScheduleHandleMessage();
904 return; 872 return;
905 } 873 }
906 874
907 if (message_queue_->MessageProcessed(order_number)) { 875 if (message_queue_->MessageProcessed())
908 ScheduleHandleMessage(); 876 ScheduleHandleMessage();
909 }
910 877
911 if (preempting_flag_) { 878 if (preempting_flag_) {
912 io_task_runner_->PostTask( 879 io_task_runner_->PostTask(
913 FROM_HERE, 880 FROM_HERE,
914 base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_)); 881 base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_));
915 } 882 }
916 } 883 }
917 884
918 void GpuChannel::ScheduleHandleMessage() { 885 void GpuChannel::ScheduleHandleMessage() {
919 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage, 886 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage,
920 weak_factory_.GetWeakPtr())); 887 weak_factory_.GetWeakPtr()));
921 } 888 }
922 889
890 void GpuChannel::HandleOutOfOrderMessage(const IPC::Message& msg) {
891 switch (msg.type()) {
892 case GpuCommandBufferMsg_WaitForGetOffsetInRange::ID:
893 case GpuCommandBufferMsg_WaitForTokenInRange::ID:
894 router_.RouteMessage(msg);
895 break;
896 default:
897 NOTREACHED();
898 }
899 }
900
923 void GpuChannel::OnCreateOffscreenCommandBuffer( 901 void GpuChannel::OnCreateOffscreenCommandBuffer(
924 const gfx::Size& size, 902 const gfx::Size& size,
925 const GPUCreateCommandBufferConfig& init_params, 903 const GPUCreateCommandBufferConfig& init_params,
926 int32 route_id, 904 int32 route_id,
927 bool* succeeded) { 905 bool* succeeded) {
928 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", 906 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id",
929 route_id); 907 route_id);
930 908
931 int32 share_group_id = init_params.share_group_id; 909 int32 share_group_id = init_params.share_group_id;
932 GpuCommandBufferStub* share_group = stubs_.get(share_group_id); 910 GpuCommandBufferStub* share_group = stubs_.get(share_group_id);
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
1084 } 1062 }
1085 } 1063 }
1086 } 1064 }
1087 1065
1088 void GpuChannel::HandleUpdateValueState( 1066 void GpuChannel::HandleUpdateValueState(
1089 unsigned int target, const gpu::ValueState& state) { 1067 unsigned int target, const gpu::ValueState& state) {
1090 pending_valuebuffer_state_->UpdateState(target, state); 1068 pending_valuebuffer_state_->UpdateState(target, state);
1091 } 1069 }
1092 1070
1093 } // namespace content 1071 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698