OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/common/gpu/gpu_channel.h" | 5 #include "content/common/gpu/gpu_channel.h" |
6 | 6 |
7 #if defined(OS_WIN) | 7 #if defined(OS_WIN) |
8 #include <windows.h> | 8 #include <windows.h> |
9 #endif | 9 #endif |
10 | 10 |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
65 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; | 65 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; |
66 | 66 |
67 // Stop the preemption once the time for the longest pending IPC drops | 67 // Stop the preemption once the time for the longest pending IPC drops |
68 // below this threshold. | 68 // below this threshold. |
69 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; | 69 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; |
70 | 70 |
71 const uint32_t kOutOfOrderNumber = static_cast<uint32_t>(-1); | 71 const uint32_t kOutOfOrderNumber = static_cast<uint32_t>(-1); |
72 | 72 |
73 } // anonymous namespace | 73 } // anonymous namespace |
74 | 74 |
75 struct GpuChannelMessage { | 75 scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create( |
76 uint32_t order_number; | 76 const base::WeakPtr<GpuChannel>& gpu_channel, |
77 base::TimeTicks time_received; | 77 base::SingleThreadTaskRunner* task_runner) { |
78 IPC::Message message; | 78 return new GpuChannelMessageQueue(gpu_channel, task_runner); |
79 } | |
79 | 80 |
80 // TODO(dyen): Temporary sync point data, remove once new sync point lands. | 81 GpuChannelMessageQueue::GpuChannelMessageQueue( |
81 bool retire_sync_point; | 82 const base::WeakPtr<GpuChannel>& gpu_channel, |
82 uint32 sync_point_number; | 83 base::SingleThreadTaskRunner* task_runner) |
84 : enabled_(true), | |
85 unprocessed_order_num_(0), | |
86 processed_order_num_(0), | |
87 gpu_channel_(gpu_channel), | |
88 task_runner_(task_runner) {} | |
83 | 89 |
84 GpuChannelMessage(uint32_t order_num, const IPC::Message& msg) | 90 GpuChannelMessageQueue::~GpuChannelMessageQueue() { |
85 : order_number(order_num), | 91 DCHECK(channel_messages_.empty()); |
86 time_received(base::TimeTicks::Now()), | 92 DCHECK(out_of_order_messages_.empty()); |
87 message(msg), | 93 } |
88 retire_sync_point(false), | |
89 sync_point_number(0) {} | |
90 }; | |
91 | 94 |
92 class GpuChannelMessageQueue | 95 uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const { |
93 : public base::RefCountedThreadSafe<GpuChannelMessageQueue> { | 96 base::AutoLock auto_lock(channel_messages_lock_); |
94 public: | 97 return unprocessed_order_num_; |
95 static scoped_refptr<GpuChannelMessageQueue> Create( | 98 } |
96 base::WeakPtr<GpuChannel> gpu_channel, | 99 |
97 scoped_refptr<base::SingleThreadTaskRunner> task_runner) { | 100 void GpuChannelMessageQueue::PushBackMessage(uint32_t order_number, |
98 return new GpuChannelMessageQueue(gpu_channel, task_runner); | 101 const IPC::Message& message) { |
102 base::AutoLock auto_lock(channel_messages_lock_); | |
103 if (enabled_) { | |
104 PushMessageHelper( | |
105 make_scoped_ptr(new GpuChannelMessage(order_number, message))); | |
106 } | |
107 } | |
108 | |
109 bool GpuChannelMessageQueue::GenerateSyncPointMessage( | |
110 gpu::SyncPointManager* sync_point_manager, | |
111 uint32_t order_number, | |
112 const IPC::Message& message, | |
113 bool retire_sync_point, | |
114 uint32_t* sync_point) { | |
115 DCHECK_EQ(message.type(), GpuCommandBufferMsg_InsertSyncPoint::ID); | |
dcheng
2015/09/17 00:15:18
For consistency with gtest, it's nice to put the e
sunnyps
2015/09/17 00:32:39
Done.
| |
116 DCHECK(sync_point); | |
117 base::AutoLock auto_lock(channel_messages_lock_); | |
118 if (enabled_) { | |
119 *sync_point = sync_point_manager->GenerateSyncPoint(); | |
120 | |
121 GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); | |
dcheng
2015/09/17 00:15:18
Just make this a scoped_ptr.
sunnyps
2015/09/17 00:32:39
Done.
| |
122 msg->retire_sync_point = retire_sync_point; | |
123 msg->sync_point = *sync_point; | |
124 | |
125 PushMessageHelper(make_scoped_ptr(msg)); | |
126 return true; | |
127 } | |
128 return false; | |
129 } | |
130 | |
131 bool GpuChannelMessageQueue::HasQueuedMessages() const { | |
132 base::AutoLock auto_lock(channel_messages_lock_); | |
133 return HasQueuedMessagesHelper(); | |
134 } | |
135 | |
136 base::TimeTicks GpuChannelMessageQueue::GetNextMessageTimeTick() const { | |
137 base::AutoLock auto_lock(channel_messages_lock_); | |
138 | |
139 base::TimeTicks next_message_tick; | |
140 if (!channel_messages_.empty()) | |
141 next_message_tick = channel_messages_.front()->time_received; | |
142 | |
143 base::TimeTicks next_out_of_order_tick; | |
144 if (!out_of_order_messages_.empty()) | |
145 next_out_of_order_tick = out_of_order_messages_.front()->time_received; | |
146 | |
147 if (next_message_tick.is_null()) | |
148 return next_out_of_order_tick; | |
149 else if (next_out_of_order_tick.is_null()) | |
150 return next_message_tick; | |
151 else | |
152 return std::min(next_message_tick, next_out_of_order_tick); | |
153 } | |
154 | |
155 GpuChannelMessage* GpuChannelMessageQueue::GetNextMessage() const { | |
156 base::AutoLock auto_lock(channel_messages_lock_); | |
157 if (!out_of_order_messages_.empty()) { | |
158 DCHECK_EQ(out_of_order_messages_.front()->order_number, kOutOfOrderNumber); | |
159 return out_of_order_messages_.front(); | |
160 } else if (!channel_messages_.empty()) { | |
161 DCHECK_GT(channel_messages_.front()->order_number, processed_order_num_); | |
162 DCHECK_LE(channel_messages_.front()->order_number, unprocessed_order_num_); | |
163 return channel_messages_.front(); | |
164 } else { | |
165 return nullptr; | |
166 } | |
167 } | |
168 | |
169 bool GpuChannelMessageQueue::MessageProcessed(uint32_t order_number) { | |
170 base::AutoLock auto_lock(channel_messages_lock_); | |
171 if (order_number != kOutOfOrderNumber) { | |
172 DCHECK(!channel_messages_.empty()); | |
173 GpuChannelMessage* msg = channel_messages_.front(); | |
174 DCHECK_EQ(order_number, msg->order_number); | |
175 processed_order_num_ = order_number; | |
176 channel_messages_.pop_front(); | |
177 delete msg; | |
178 } else { | |
179 DCHECK(!out_of_order_messages_.empty()); | |
180 GpuChannelMessage* msg = out_of_order_messages_.front(); | |
dcheng
2015/09/17 00:15:18
Just make |msg| here and above a scoped_ptr<> and
sunnyps
2015/09/17 00:32:39
Done.
| |
181 out_of_order_messages_.pop_front(); | |
182 delete msg; | |
183 } | |
184 return HasQueuedMessagesHelper(); | |
185 } | |
186 | |
187 void GpuChannelMessageQueue::DeleteAndDisableMessages( | |
188 GpuChannelManager* gpu_channel_manager) { | |
189 { | |
190 base::AutoLock auto_lock(channel_messages_lock_); | |
191 DCHECK(enabled_); | |
192 enabled_ = false; | |
99 } | 193 } |
100 | 194 |
101 uint32_t GetUnprocessedOrderNum() { | 195 // We guarantee that the queues will no longer be modified after enabled_ |
102 base::AutoLock auto_lock(channel_messages_lock_); | 196 // is set to false, it is now safe to modify the queue without the lock. |
103 return unprocessed_order_num_; | 197 // All public facing modifying functions check enabled_ while all |
198 // private modifying functions DCHECK(enabled_) to enforce this. | |
199 while (!channel_messages_.empty()) { | |
200 GpuChannelMessage* msg = channel_messages_.front(); | |
dcheng
2015/09/17 00:15:18
Ditto.
sunnyps
2015/09/17 00:32:39
Done.
| |
201 // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and | |
202 // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check | |
203 // if we have a sync point number here. | |
204 if (msg->sync_point) { | |
205 gpu_channel_manager->sync_point_manager()->RetireSyncPoint( | |
206 msg->sync_point); | |
207 } | |
208 delete msg; | |
209 channel_messages_.pop_front(); | |
104 } | 210 } |
211 STLDeleteElements(&out_of_order_messages_); | |
212 } | |
105 | 213 |
106 void PushBackMessage(uint32_t order_number, const IPC::Message& message) { | 214 void GpuChannelMessageQueue::ScheduleHandleMessage() { |
107 base::AutoLock auto_lock(channel_messages_lock_); | 215 task_runner_->PostTask(FROM_HERE, |
108 if (enabled_) { | 216 base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); |
109 PushMessageHelper(order_number, | 217 } |
110 new GpuChannelMessage(order_number, message)); | 218 |
111 } | 219 void GpuChannelMessageQueue::PushMessageHelper( |
220 scoped_ptr<GpuChannelMessage> msg) { | |
221 channel_messages_lock_.AssertAcquired(); | |
222 DCHECK(enabled_); | |
223 bool had_messages = HasQueuedMessagesHelper(); | |
224 if (msg->order_number != kOutOfOrderNumber) { | |
225 unprocessed_order_num_ = msg->order_number; | |
226 channel_messages_.push_back(msg.release()); | |
227 } else { | |
228 out_of_order_messages_.push_back(msg.release()); | |
112 } | 229 } |
230 if (!had_messages) | |
231 ScheduleHandleMessage(); | |
232 } | |
113 | 233 |
114 void PushOutOfOrderMessage(const IPC::Message& message) { | 234 bool GpuChannelMessageQueue::HasQueuedMessagesHelper() const { |
115 // These are pushed out of order so should not have any order messages. | 235 channel_messages_lock_.AssertAcquired(); |
116 base::AutoLock auto_lock(channel_messages_lock_); | 236 return !channel_messages_.empty() || !out_of_order_messages_.empty(); |
117 if (enabled_) { | 237 } |
118 PushOutOfOrderHelper(new GpuChannelMessage(kOutOfOrderNumber, message)); | |
119 } | |
120 } | |
121 | |
122 bool GenerateSyncPointMessage(gpu::SyncPointManager* sync_point_manager, | |
123 uint32_t order_number, | |
124 const IPC::Message& message, | |
125 bool retire_sync_point, | |
126 uint32_t* sync_point_number) { | |
127 DCHECK(message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID); | |
128 base::AutoLock auto_lock(channel_messages_lock_); | |
129 if (enabled_) { | |
130 const uint32 sync_point = sync_point_manager->GenerateSyncPoint(); | |
131 | |
132 GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); | |
133 msg->retire_sync_point = retire_sync_point; | |
134 msg->sync_point_number = sync_point; | |
135 | |
136 *sync_point_number = sync_point; | |
137 PushMessageHelper(order_number, msg); | |
138 return true; | |
139 } | |
140 return false; | |
141 } | |
142 | |
143 bool HasQueuedMessages() { | |
144 base::AutoLock auto_lock(channel_messages_lock_); | |
145 return HasQueuedMessagesLocked(); | |
146 } | |
147 | |
148 base::TimeTicks GetNextMessageTimeTick() { | |
149 base::AutoLock auto_lock(channel_messages_lock_); | |
150 | |
151 base::TimeTicks next_message_tick; | |
152 if (!channel_messages_.empty()) | |
153 next_message_tick = channel_messages_.front()->time_received; | |
154 | |
155 base::TimeTicks next_out_of_order_tick; | |
156 if (!out_of_order_messages_.empty()) | |
157 next_out_of_order_tick = out_of_order_messages_.front()->time_received; | |
158 | |
159 if (next_message_tick.is_null()) | |
160 return next_out_of_order_tick; | |
161 else if (next_out_of_order_tick.is_null()) | |
162 return next_message_tick; | |
163 else | |
164 return std::min(next_message_tick, next_out_of_order_tick); | |
165 } | |
166 | |
167 protected: | |
168 virtual ~GpuChannelMessageQueue() { | |
169 DCHECK(channel_messages_.empty()); | |
170 DCHECK(out_of_order_messages_.empty()); | |
171 } | |
172 | |
173 private: | |
174 friend class GpuChannel; | |
175 friend class base::RefCountedThreadSafe<GpuChannelMessageQueue>; | |
176 | |
177 GpuChannelMessageQueue( | |
178 base::WeakPtr<GpuChannel> gpu_channel, | |
179 scoped_refptr<base::SingleThreadTaskRunner> task_runner) | |
180 : enabled_(true), | |
181 unprocessed_order_num_(0), | |
182 gpu_channel_(gpu_channel), | |
183 task_runner_(task_runner) {} | |
184 | |
185 void DeleteAndDisableMessages(GpuChannelManager* gpu_channel_manager) { | |
186 { | |
187 base::AutoLock auto_lock(channel_messages_lock_); | |
188 DCHECK(enabled_); | |
189 enabled_ = false; | |
190 } | |
191 | |
192 // We guarantee that the queues will no longer be modified after enabled_ | |
193 // is set to false, it is now safe to modify the queue without the lock. | |
194 // All public facing modifying functions check enabled_ while all | |
195 // private modifying functions DCHECK(enabled_) to enforce this. | |
196 while (!channel_messages_.empty()) { | |
197 GpuChannelMessage* msg = channel_messages_.front(); | |
198 // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and | |
199 // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check | |
200 // if we have a sync point number here. | |
201 if (msg->sync_point_number) { | |
202 gpu_channel_manager->sync_point_manager()->RetireSyncPoint( | |
203 msg->sync_point_number); | |
204 } | |
205 delete msg; | |
206 channel_messages_.pop_front(); | |
207 } | |
208 STLDeleteElements(&out_of_order_messages_); | |
209 } | |
210 | |
211 void PushUnfinishedMessage(uint32_t order_number, | |
212 const IPC::Message& message) { | |
213 // This is pushed only if it was unfinished, so order number is kept. | |
214 GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); | |
215 base::AutoLock auto_lock(channel_messages_lock_); | |
216 DCHECK(enabled_); | |
217 const bool had_messages = HasQueuedMessagesLocked(); | |
218 if (order_number == kOutOfOrderNumber) | |
219 out_of_order_messages_.push_front(msg); | |
220 else | |
221 channel_messages_.push_front(msg); | |
222 | |
223 if (!had_messages) | |
224 ScheduleHandleMessage(); | |
225 } | |
226 | |
227 void ScheduleHandleMessage() { | |
228 task_runner_->PostTask( | |
229 FROM_HERE, base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); | |
230 } | |
231 | |
232 void PushMessageHelper(uint32_t order_number, GpuChannelMessage* msg) { | |
233 channel_messages_lock_.AssertAcquired(); | |
234 DCHECK(enabled_); | |
235 unprocessed_order_num_ = order_number; | |
236 const bool had_messages = HasQueuedMessagesLocked(); | |
237 channel_messages_.push_back(msg); | |
238 if (!had_messages) | |
239 ScheduleHandleMessage(); | |
240 } | |
241 | |
242 void PushOutOfOrderHelper(GpuChannelMessage* msg) { | |
243 channel_messages_lock_.AssertAcquired(); | |
244 DCHECK(enabled_); | |
245 const bool had_messages = HasQueuedMessagesLocked(); | |
246 out_of_order_messages_.push_back(msg); | |
247 if (!had_messages) | |
248 ScheduleHandleMessage(); | |
249 } | |
250 | |
251 bool HasQueuedMessagesLocked() { | |
252 channel_messages_lock_.AssertAcquired(); | |
253 return !channel_messages_.empty() || !out_of_order_messages_.empty(); | |
254 } | |
255 | |
256 bool enabled_; | |
257 | |
258 // Highest IPC order number seen, set when queued on the IO thread. | |
259 uint32_t unprocessed_order_num_; | |
260 std::deque<GpuChannelMessage*> channel_messages_; | |
261 std::deque<GpuChannelMessage*> out_of_order_messages_; | |
262 | |
263 // This lock protects enabled_, unprocessed_order_num_, and both deques. | |
264 base::Lock channel_messages_lock_; | |
265 | |
266 base::WeakPtr<GpuChannel> gpu_channel_; | |
267 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; | |
268 | |
269 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue); | |
270 }; | |
271 | 238 |
272 // Begin order numbers at 1 so 0 can mean no orders. | 239 // Begin order numbers at 1 so 0 can mean no orders. |
273 uint32_t GpuChannelMessageFilter::global_order_counter_ = 1; | 240 uint32_t GpuChannelMessageFilter::global_order_counter_ = 1; |
274 | 241 |
275 GpuChannelMessageFilter::GpuChannelMessageFilter( | 242 GpuChannelMessageFilter::GpuChannelMessageFilter( |
276 scoped_refptr<GpuChannelMessageQueue> message_queue, | 243 GpuChannelMessageQueue* message_queue, |
277 gpu::SyncPointManager* sync_point_manager, | 244 gpu::SyncPointManager* sync_point_manager, |
278 scoped_refptr<base::SingleThreadTaskRunner> task_runner, | 245 base::SingleThreadTaskRunner* task_runner, |
279 bool future_sync_points) | 246 bool future_sync_points) |
280 : preemption_state_(IDLE), | 247 : preemption_state_(IDLE), |
281 message_queue_(message_queue), | 248 message_queue_(message_queue), |
282 sender_(nullptr), | 249 sender_(nullptr), |
283 peer_pid_(base::kNullProcessId), | 250 peer_pid_(base::kNullProcessId), |
284 sync_point_manager_(sync_point_manager), | 251 sync_point_manager_(sync_point_manager), |
285 task_runner_(task_runner), | 252 task_runner_(task_runner), |
286 a_stub_is_descheduled_(false), | 253 a_stub_is_descheduled_(false), |
287 future_sync_points_(future_sync_points) {} | 254 future_sync_points_(future_sync_points) {} |
288 | 255 |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
355 const uint32_t order_number = global_order_counter_++; | 322 const uint32_t order_number = global_order_counter_++; |
356 bool handled = false; | 323 bool handled = false; |
357 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && | 324 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && |
358 !future_sync_points_) { | 325 !future_sync_points_) { |
359 DLOG(ERROR) << "Untrusted client should not send " | 326 DLOG(ERROR) << "Untrusted client should not send " |
360 "GpuCommandBufferMsg_RetireSyncPoint message"; | 327 "GpuCommandBufferMsg_RetireSyncPoint message"; |
361 return true; | 328 return true; |
362 } | 329 } |
363 | 330 |
364 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { | 331 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
365 base::Tuple<bool> retire; | 332 base::Tuple<bool> params; |
366 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); | 333 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
367 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, | 334 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, |
368 &retire)) { | 335 ¶ms)) { |
369 reply->set_reply_error(); | 336 reply->set_reply_error(); |
370 Send(reply); | 337 Send(reply); |
371 return true; | 338 return true; |
372 } | 339 } |
373 if (!future_sync_points_ && !base::get<0>(retire)) { | 340 bool retire_sync_point = base::get<0>(params); |
341 if (!future_sync_points_ && !retire_sync_point) { | |
374 LOG(ERROR) << "Untrusted contexts can't create future sync points"; | 342 LOG(ERROR) << "Untrusted contexts can't create future sync points"; |
375 reply->set_reply_error(); | 343 reply->set_reply_error(); |
376 Send(reply); | 344 Send(reply); |
377 return true; | 345 return true; |
378 } | 346 } |
379 | 347 |
380 // Message queue must handle the entire sync point generation because the | 348 // Message queue must handle the entire sync point generation because the |
381 // message queue could be disabled from the main thread during generation. | 349 // message queue could be disabled from the main thread during generation. |
382 uint32_t sync_point = 0u; | 350 uint32_t sync_point = 0u; |
383 if (!message_queue_->GenerateSyncPointMessage( | 351 if (!message_queue_->GenerateSyncPointMessage( |
384 sync_point_manager_, order_number, message, base::get<0>(retire), | 352 sync_point_manager_, order_number, message, retire_sync_point, |
385 &sync_point)) { | 353 &sync_point)) { |
386 LOG(ERROR) << "GpuChannel has been destroyed."; | 354 LOG(ERROR) << "GpuChannel has been destroyed."; |
387 reply->set_reply_error(); | 355 reply->set_reply_error(); |
388 Send(reply); | 356 Send(reply); |
389 return true; | 357 return true; |
390 } | 358 } |
391 | 359 |
392 DCHECK_NE(sync_point, 0u); | 360 DCHECK_NE(sync_point, 0u); |
393 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); | 361 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); |
394 Send(reply); | 362 Send(reply); |
395 handled = true; | 363 handled = true; |
396 } | 364 } |
397 | 365 |
398 // Forward all other messages to the GPU Channel. | 366 // Forward all other messages to the GPU Channel. |
399 if (!handled && !message.is_reply() && !message.should_unblock()) { | 367 if (!handled && !message.is_reply() && !message.should_unblock()) { |
400 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || | 368 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || |
401 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { | 369 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { |
402 // Move Wait commands to the head of the queue, so the renderer | 370 // Move Wait commands to the head of the queue, so the renderer |
403 // doesn't have to wait any longer than necessary. | 371 // doesn't have to wait any longer than necessary. |
404 message_queue_->PushOutOfOrderMessage(message); | 372 message_queue_->PushBackMessage(kOutOfOrderNumber, message); |
405 } else { | 373 } else { |
406 message_queue_->PushBackMessage(order_number, message); | 374 message_queue_->PushBackMessage(order_number, message); |
407 } | 375 } |
408 handled = true; | 376 handled = true; |
409 } | 377 } |
410 | 378 |
411 UpdatePreemptionState(); | 379 UpdatePreemptionState(); |
412 return handled; | 380 return handled; |
413 } | 381 } |
414 | 382 |
(...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
611 task_runner_(task_runner), | 579 task_runner_(task_runner), |
612 io_task_runner_(io_task_runner), | 580 io_task_runner_(io_task_runner), |
613 share_group_(share_group ? share_group : new gfx::GLShareGroup), | 581 share_group_(share_group ? share_group : new gfx::GLShareGroup), |
614 mailbox_manager_(mailbox | 582 mailbox_manager_(mailbox |
615 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox) | 583 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox) |
616 : gpu::gles2::MailboxManager::Create()), | 584 : gpu::gles2::MailboxManager::Create()), |
617 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), | 585 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), |
618 pending_valuebuffer_state_(new gpu::ValueStateMap), | 586 pending_valuebuffer_state_(new gpu::ValueStateMap), |
619 watchdog_(watchdog), | 587 watchdog_(watchdog), |
620 software_(software), | 588 software_(software), |
621 current_order_num_(0), | |
622 processed_order_num_(0), | |
623 num_stubs_descheduled_(0), | 589 num_stubs_descheduled_(0), |
624 allow_future_sync_points_(allow_future_sync_points), | 590 allow_future_sync_points_(allow_future_sync_points), |
625 allow_real_time_streams_(allow_real_time_streams), | 591 allow_real_time_streams_(allow_real_time_streams), |
626 weak_factory_(this) { | 592 weak_factory_(this) { |
627 DCHECK(gpu_channel_manager); | 593 DCHECK(gpu_channel_manager); |
628 DCHECK(client_id); | 594 DCHECK(client_id); |
629 | 595 |
630 message_queue_ = | 596 message_queue_ = |
631 GpuChannelMessageQueue::Create(weak_factory_.GetWeakPtr(), task_runner); | 597 GpuChannelMessageQueue::Create(weak_factory_.GetWeakPtr(), task_runner); |
632 | 598 |
633 filter_ = new GpuChannelMessageFilter( | 599 filter_ = new GpuChannelMessageFilter( |
634 message_queue_, gpu_channel_manager_->sync_point_manager(), task_runner_, | 600 message_queue_.get(), gpu_channel_manager_->sync_point_manager(), |
635 allow_future_sync_points_); | 601 task_runner_.get(), allow_future_sync_points_); |
636 | 602 |
637 subscription_ref_set_->AddObserver(this); | 603 subscription_ref_set_->AddObserver(this); |
638 } | 604 } |
639 | 605 |
640 GpuChannel::~GpuChannel() { | 606 GpuChannel::~GpuChannel() { |
641 // Clear stubs first because of dependencies. | 607 // Clear stubs first because of dependencies. |
642 stubs_.clear(); | 608 stubs_.clear(); |
643 | 609 |
644 message_queue_->DeleteAndDisableMessages(gpu_channel_manager_); | 610 message_queue_->DeleteAndDisableMessages(gpu_channel_manager_); |
645 | 611 |
(...skipping 23 matching lines...) Expand all Loading... | |
669 | 635 |
670 channel_->AddFilter(filter_.get()); | 636 channel_->AddFilter(filter_.get()); |
671 | 637 |
672 return channel_handle; | 638 return channel_handle; |
673 } | 639 } |
674 | 640 |
675 base::ProcessId GpuChannel::GetClientPID() const { | 641 base::ProcessId GpuChannel::GetClientPID() const { |
676 return channel_->GetPeerPID(); | 642 return channel_->GetPeerPID(); |
677 } | 643 } |
678 | 644 |
645 uint32_t GpuChannel::GetProcessedOrderNum() const { | |
646 return message_queue_->processed_order_num(); | |
647 } | |
648 | |
649 uint32_t GpuChannel::GetUnprocessedOrderNum() const { | |
650 return message_queue_->GetUnprocessedOrderNum(); | |
651 } | |
652 | |
679 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { | 653 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { |
680 // All messages should be pushed to channel_messages_ and handled separately. | 654 // All messages should be pushed to channel_messages_ and handled separately. |
681 NOTREACHED(); | 655 NOTREACHED(); |
682 return false; | 656 return false; |
683 } | 657 } |
684 | 658 |
685 void GpuChannel::OnChannelError() { | 659 void GpuChannel::OnChannelError() { |
686 gpu_channel_manager_->RemoveChannel(client_id_); | 660 gpu_channel_manager_->RemoveChannel(client_id_); |
687 } | 661 } |
688 | 662 |
(...skipping 20 matching lines...) Expand all Loading... | |
709 | 683 |
710 void GpuChannel::OnRemoveSubscription(unsigned int target) { | 684 void GpuChannel::OnRemoveSubscription(unsigned int target) { |
711 gpu_channel_manager()->Send( | 685 gpu_channel_manager()->Send( |
712 new GpuHostMsg_RemoveSubscription(client_id_, target)); | 686 new GpuHostMsg_RemoveSubscription(client_id_, target)); |
713 } | 687 } |
714 | 688 |
715 void GpuChannel::StubSchedulingChanged(bool scheduled) { | 689 void GpuChannel::StubSchedulingChanged(bool scheduled) { |
716 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; | 690 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; |
717 if (scheduled) { | 691 if (scheduled) { |
718 num_stubs_descheduled_--; | 692 num_stubs_descheduled_--; |
719 message_queue_->ScheduleHandleMessage(); | 693 ScheduleHandleMessage(); |
720 } else { | 694 } else { |
721 num_stubs_descheduled_++; | 695 num_stubs_descheduled_++; |
722 } | 696 } |
723 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); | 697 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); |
724 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; | 698 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; |
725 | 699 |
726 if (a_stub_is_descheduled != a_stub_was_descheduled) { | 700 if (a_stub_is_descheduled != a_stub_was_descheduled) { |
727 if (preempting_flag_.get()) { | 701 if (preempting_flag_.get()) { |
728 io_task_runner_->PostTask( | 702 io_task_runner_->PostTask( |
729 FROM_HERE, | 703 FROM_HERE, |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
857 OnDestroyCommandBuffer) | 831 OnDestroyCommandBuffer) |
858 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder, | 832 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder, |
859 OnCreateJpegDecoder) | 833 OnCreateJpegDecoder) |
860 IPC_MESSAGE_UNHANDLED(handled = false) | 834 IPC_MESSAGE_UNHANDLED(handled = false) |
861 IPC_END_MESSAGE_MAP() | 835 IPC_END_MESSAGE_MAP() |
862 DCHECK(handled) << msg.type(); | 836 DCHECK(handled) << msg.type(); |
863 return handled; | 837 return handled; |
864 } | 838 } |
865 | 839 |
866 void GpuChannel::HandleMessage() { | 840 void GpuChannel::HandleMessage() { |
867 GpuChannelMessage* m = nullptr; | 841 // If we have been preempted by another channel, just post a task to wake up. |
868 GpuCommandBufferStub* stub = nullptr; | 842 if (preempted_flag_ && preempted_flag_->IsSet()) { |
869 bool has_more_messages = false; | 843 ScheduleHandleMessage(); |
870 { | 844 return; |
871 base::AutoLock auto_lock(message_queue_->channel_messages_lock_); | |
872 if (!message_queue_->out_of_order_messages_.empty()) { | |
873 m = message_queue_->out_of_order_messages_.front(); | |
874 DCHECK(m->order_number == kOutOfOrderNumber); | |
875 message_queue_->out_of_order_messages_.pop_front(); | |
876 } else if (!message_queue_->channel_messages_.empty()) { | |
877 m = message_queue_->channel_messages_.front(); | |
878 DCHECK(m->order_number != kOutOfOrderNumber); | |
879 message_queue_->channel_messages_.pop_front(); | |
880 } else { | |
881 // No messages to process | |
882 return; | |
883 } | |
884 | |
885 has_more_messages = message_queue_->HasQueuedMessagesLocked(); | |
886 } | 845 } |
887 | 846 |
888 bool retry_message = false; | 847 GpuChannelMessage* m = message_queue_->GetNextMessage(); |
889 stub = stubs_.get(m->message.routing_id()); | 848 |
890 if (stub) { | 849 // TODO(sunnyps): This could be a DCHECK maybe? |
891 if (!stub->IsScheduled()) { | 850 if (!m) |
892 retry_message = true; | 851 return; |
852 | |
853 uint32_t order_number = m->order_number; | |
854 IPC::Message& message = m->message; | |
dcheng
2015/09/17 00:15:18
Why does this need to be a mutable ref?
| |
855 int32_t routing_id = message.routing_id(); | |
856 GpuCommandBufferStub* stub = stubs_.get(routing_id); | |
857 | |
858 DCHECK(!stub || stub->IsScheduled()); | |
859 | |
860 DVLOG(1) << "received message @" << &message << " on channel @" << this | |
861 << " with type " << message.type(); | |
862 | |
863 current_order_num_ = order_number; | |
864 | |
865 bool handled = false; | |
866 | |
867 if (routing_id == MSG_ROUTING_CONTROL) { | |
868 handled = OnControlMessageReceived(message); | |
869 } else if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { | |
870 // TODO(dyen): Temporary handling of old sync points. | |
871 // This must ensure that the sync point will be retired. Normally we'll | |
872 // find the stub based on the routing ID, and associate the sync point | |
873 // with it, but if that fails for any reason (channel or stub already | |
874 // deleted, invalid routing id), we need to retire the sync point | |
875 // immediately. | |
876 if (stub) { | |
877 stub->AddSyncPoint(m->sync_point, m->retire_sync_point); | |
878 } else { | |
879 gpu_channel_manager_->sync_point_manager()->RetireSyncPoint( | |
880 m->sync_point); | |
893 } | 881 } |
894 if (stub->IsPreempted()) { | 882 handled = true; |
895 retry_message = true; | 883 } else { |
896 message_queue_->ScheduleHandleMessage(); | 884 handled = router_.RouteMessage(message); |
897 } | |
898 } | 885 } |
899 | 886 |
900 if (retry_message) { | 887 // Respond to sync messages even if router failed to route. |
901 base::AutoLock auto_lock(message_queue_->channel_messages_lock_); | 888 if (!handled && message.is_sync()) { |
902 if (m->order_number == kOutOfOrderNumber) | 889 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
903 message_queue_->out_of_order_messages_.push_front(m); | 890 reply->set_reply_error(); |
904 else | 891 Send(reply); |
905 message_queue_->channel_messages_.push_front(m); | 892 handled = true; |
906 return; | |
907 } else if (has_more_messages) { | |
908 message_queue_->ScheduleHandleMessage(); | |
909 } | 893 } |
910 | 894 |
911 scoped_ptr<GpuChannelMessage> scoped_message(m); | 895 // A command buffer may be descheduled or preempted but only in the middle of |
912 const uint32_t order_number = m->order_number; | 896 // a flush. In this case we should not pop the message from the queue. |
913 const int32_t routing_id = m->message.routing_id(); | 897 if (stub && stub->HasUnprocessedCommands()) { |
914 | 898 DCHECK(message.type() == GpuCommandBufferMsg_AsyncFlush::ID); |
915 // TODO(dyen): Temporary handling of old sync points. | 899 // If the stub is still scheduled then we were preempted and need to |
916 // This must ensure that the sync point will be retired. Normally we'll | 900 // schedule a wakeup otherwise some other event will wake us up e.g. sync |
917 // find the stub based on the routing ID, and associate the sync point | 901 // point completion. No DCHECK for preemption flag because that can change |
918 // with it, but if that fails for any reason (channel or stub already | 902 // any time. |
919 // deleted, invalid routing id), we need to retire the sync point | 903 if (stub->IsScheduled()) |
920 // immediately. | 904 ScheduleHandleMessage(); |
921 if (m->message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { | 905 return; |
922 const bool retire = m->retire_sync_point; | |
923 const uint32_t sync_point = m->sync_point_number; | |
924 if (stub) { | |
925 stub->AddSyncPoint(sync_point); | |
926 if (retire) { | |
927 m->message = | |
928 GpuCommandBufferMsg_RetireSyncPoint(routing_id, sync_point); | |
929 } | |
930 } else { | |
931 current_order_num_ = order_number; | |
932 gpu_channel_manager_->sync_point_manager()->RetireSyncPoint(sync_point); | |
933 MessageProcessed(order_number); | |
934 return; | |
935 } | |
936 } | 906 } |
937 | 907 |
938 IPC::Message* message = &m->message; | 908 if (message_queue_->MessageProcessed(order_number)) { |
939 bool message_processed = true; | 909 ScheduleHandleMessage(); |
910 } | |
940 | 911 |
941 DVLOG(1) << "received message @" << message << " on channel @" << this | 912 if (preempting_flag_) { |
942 << " with type " << message->type(); | 913 io_task_runner_->PostTask( |
914 FROM_HERE, | |
915 base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_)); | |
916 } | |
917 } | |
943 | 918 |
944 if (order_number != kOutOfOrderNumber) { | 919 void GpuChannel::ScheduleHandleMessage() { |
945 // Make sure this is a valid unprocessed order number. | 920 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage, |
946 DCHECK(order_number <= GetUnprocessedOrderNum() && | 921 weak_factory_.GetWeakPtr())); |
947 order_number >= GetProcessedOrderNum()); | |
948 | |
949 current_order_num_ = order_number; | |
950 } | |
951 bool result = false; | |
952 if (routing_id == MSG_ROUTING_CONTROL) | |
953 result = OnControlMessageReceived(*message); | |
954 else | |
955 result = router_.RouteMessage(*message); | |
956 | |
957 if (!result) { | |
958 // Respond to sync messages even if router failed to route. | |
959 if (message->is_sync()) { | |
960 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); | |
961 reply->set_reply_error(); | |
962 Send(reply); | |
963 } | |
964 } else { | |
965 // If the command buffer becomes unscheduled as a result of handling the | |
966 // message but still has more commands to process, synthesize an IPC | |
967 // message to flush that command buffer. | |
968 if (stub) { | |
969 if (stub->HasUnprocessedCommands()) { | |
970 message_queue_->PushUnfinishedMessage( | |
971 order_number, GpuCommandBufferMsg_Rescheduled(stub->route_id())); | |
972 message_processed = false; | |
973 } | |
974 } | |
975 } | |
976 if (message_processed) | |
977 MessageProcessed(order_number); | |
978 } | 922 } |
979 | 923 |
980 void GpuChannel::OnCreateOffscreenCommandBuffer( | 924 void GpuChannel::OnCreateOffscreenCommandBuffer( |
981 const gfx::Size& size, | 925 const gfx::Size& size, |
982 const GPUCreateCommandBufferConfig& init_params, | 926 const GPUCreateCommandBufferConfig& init_params, |
983 int32 route_id, | 927 int32 route_id, |
984 bool* succeeded) { | 928 bool* succeeded) { |
985 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", | 929 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", |
986 route_id); | 930 route_id); |
987 | 931 |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1071 } | 1015 } |
1072 } | 1016 } |
1073 | 1017 |
1074 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { | 1018 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { |
1075 if (!jpeg_decoder_) { | 1019 if (!jpeg_decoder_) { |
1076 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_)); | 1020 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_)); |
1077 } | 1021 } |
1078 jpeg_decoder_->AddClient(route_id, reply_msg); | 1022 jpeg_decoder_->AddClient(route_id, reply_msg); |
1079 } | 1023 } |
1080 | 1024 |
1081 void GpuChannel::MessageProcessed(uint32_t order_number) { | |
1082 if (order_number != kOutOfOrderNumber) { | |
1083 DCHECK(current_order_num_ == order_number); | |
1084 DCHECK(processed_order_num_ < order_number); | |
1085 processed_order_num_ = order_number; | |
1086 } | |
1087 if (preempting_flag_.get()) { | |
1088 io_task_runner_->PostTask( | |
1089 FROM_HERE, | |
1090 base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_)); | |
1091 } | |
1092 } | |
1093 | |
1094 void GpuChannel::CacheShader(const std::string& key, | 1025 void GpuChannel::CacheShader(const std::string& key, |
1095 const std::string& shader) { | 1026 const std::string& shader) { |
1096 gpu_channel_manager_->Send( | 1027 gpu_channel_manager_->Send( |
1097 new GpuHostMsg_CacheShader(client_id_, key, shader)); | 1028 new GpuHostMsg_CacheShader(client_id_, key, shader)); |
1098 } | 1029 } |
1099 | 1030 |
1100 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { | 1031 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { |
1101 io_task_runner_->PostTask( | 1032 io_task_runner_->PostTask( |
1102 FROM_HERE, base::Bind(&GpuChannelMessageFilter::AddChannelFilter, | 1033 FROM_HERE, base::Bind(&GpuChannelMessageFilter::AddChannelFilter, |
1103 filter_, make_scoped_refptr(filter))); | 1034 filter_, make_scoped_refptr(filter))); |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1153 client_id_); | 1084 client_id_); |
1154 } | 1085 } |
1155 } | 1086 } |
1156 } | 1087 } |
1157 | 1088 |
1158 void GpuChannel::HandleUpdateValueState( | 1089 void GpuChannel::HandleUpdateValueState( |
1159 unsigned int target, const gpu::ValueState& state) { | 1090 unsigned int target, const gpu::ValueState& state) { |
1160 pending_valuebuffer_state_->UpdateState(target, state); | 1091 pending_valuebuffer_state_->UpdateState(target, state); |
1161 } | 1092 } |
1162 | 1093 |
1163 uint32_t GpuChannel::GetUnprocessedOrderNum() const { | |
1164 return message_queue_->GetUnprocessedOrderNum(); | |
1165 } | |
1166 | |
1167 } // namespace content | 1094 } // namespace content |
OLD | NEW |