Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/common/gpu/gpu_channel.h" | 5 #include "content/common/gpu/gpu_channel.h" |
| 6 | 6 |
| 7 #if defined(OS_WIN) | 7 #if defined(OS_WIN) |
| 8 #include <windows.h> | 8 #include <windows.h> |
| 9 #endif | 9 #endif |
| 10 | 10 |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 65 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; | 65 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; |
| 66 | 66 |
| 67 // Stop the preemption once the time for the longest pending IPC drops | 67 // Stop the preemption once the time for the longest pending IPC drops |
| 68 // below this threshold. | 68 // below this threshold. |
| 69 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; | 69 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; |
| 70 | 70 |
| 71 const uint32_t kOutOfOrderNumber = static_cast<uint32_t>(-1); | 71 const uint32_t kOutOfOrderNumber = static_cast<uint32_t>(-1); |
| 72 | 72 |
| 73 } // anonymous namespace | 73 } // anonymous namespace |
| 74 | 74 |
| 75 struct GpuChannelMessage { | 75 scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create( |
| 76 uint32_t order_number; | 76 base::WeakPtr<GpuChannel> gpu_channel, |
| 77 base::TimeTicks time_received; | 77 scoped_refptr<base::SingleThreadTaskRunner> task_runner) { |
| 78 IPC::Message message; | 78 return new GpuChannelMessageQueue(gpu_channel, task_runner); |
| 79 } | |
| 79 | 80 |
| 80 // TODO(dyen): Temporary sync point data, remove once new sync point lands. | 81 GpuChannelMessageQueue::GpuChannelMessageQueue( |
| 81 bool retire_sync_point; | 82 base::WeakPtr<GpuChannel> gpu_channel, |
| 82 uint32 sync_point_number; | 83 scoped_refptr<base::SingleThreadTaskRunner> task_runner) |
| 84 : enabled_(true), | |
| 85 unprocessed_order_num_(0), | |
| 86 processed_order_num_(0), | |
| 87 gpu_channel_(gpu_channel), | |
| 88 task_runner_(task_runner) {} | |
| 83 | 89 |
| 84 GpuChannelMessage(uint32_t order_num, const IPC::Message& msg) | 90 GpuChannelMessageQueue::~GpuChannelMessageQueue() { |
| 85 : order_number(order_num), | 91 DCHECK(channel_messages_.empty()); |
| 86 time_received(base::TimeTicks::Now()), | 92 DCHECK(out_of_order_messages_.empty()); |
| 87 message(msg), | 93 } |
| 88 retire_sync_point(false), | |
| 89 sync_point_number(0) {} | |
| 90 }; | |
| 91 | 94 |
| 92 class GpuChannelMessageQueue | 95 uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const { |
| 93 : public base::RefCountedThreadSafe<GpuChannelMessageQueue> { | 96 base::AutoLock auto_lock(channel_messages_lock_); |
| 94 public: | 97 return unprocessed_order_num_; |
| 95 static scoped_refptr<GpuChannelMessageQueue> Create( | 98 } |
| 96 base::WeakPtr<GpuChannel> gpu_channel, | 99 |
| 97 scoped_refptr<base::SingleThreadTaskRunner> task_runner) { | 100 uint32_t GpuChannelMessageQueue::GetProccessedOrderNum() const { |
| 98 return new GpuChannelMessageQueue(gpu_channel, task_runner); | 101 return processed_order_num_; |
| 102 } | |
| 103 | |
| 104 void GpuChannelMessageQueue::PushBackMessage(uint32_t order_number, | |
| 105 const IPC::Message& message) { | |
| 106 base::AutoLock auto_lock(channel_messages_lock_); | |
| 107 if (enabled_) | |
| 108 PushMessageHelper(new GpuChannelMessage(order_number, message)); | |
| 109 } | |
| 110 | |
| 111 bool GpuChannelMessageQueue::GenerateSyncPointMessage( | |
| 112 gpu::SyncPointManager* sync_point_manager, | |
| 113 uint32_t order_number, | |
| 114 const IPC::Message& message, | |
| 115 bool retire_sync_point, | |
| 116 uint32_t* sync_point) { | |
| 117 DCHECK(message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID); | |
|
dcheng
2015/09/10 21:30:37
DCHECK_EQ
sunnyps
2015/09/10 23:38:36
Done.
| |
| 118 DCHECK(sync_point); | |
| 119 base::AutoLock auto_lock(channel_messages_lock_); | |
| 120 if (enabled_) { | |
| 121 *sync_point = sync_point_manager->GenerateSyncPoint(); | |
| 122 | |
| 123 GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); | |
| 124 msg->retire_sync_point = retire_sync_point; | |
| 125 msg->sync_point = *sync_point; | |
| 126 | |
| 127 PushMessageHelper(msg); | |
| 128 return true; | |
| 129 } | |
| 130 return false; | |
| 131 } | |
| 132 | |
| 133 bool GpuChannelMessageQueue::HasQueuedMessages() const { | |
| 134 base::AutoLock auto_lock(channel_messages_lock_); | |
| 135 return HasQueuedMessagesHelper(); | |
| 136 } | |
| 137 | |
| 138 base::TimeTicks GpuChannelMessageQueue::GetNextMessageTimeTick() const { | |
| 139 base::AutoLock auto_lock(channel_messages_lock_); | |
| 140 | |
| 141 base::TimeTicks next_message_tick; | |
| 142 if (!channel_messages_.empty()) | |
| 143 next_message_tick = channel_messages_.front()->time_received; | |
| 144 | |
| 145 base::TimeTicks next_out_of_order_tick; | |
| 146 if (!out_of_order_messages_.empty()) | |
| 147 next_out_of_order_tick = out_of_order_messages_.front()->time_received; | |
| 148 | |
| 149 if (next_message_tick.is_null()) | |
| 150 return next_out_of_order_tick; | |
| 151 else if (next_out_of_order_tick.is_null()) | |
| 152 return next_message_tick; | |
| 153 else | |
| 154 return std::min(next_message_tick, next_out_of_order_tick); | |
| 155 } | |
| 156 | |
| 157 GpuChannelMessage* GpuChannelMessageQueue::GetNextMessage() const { | |
| 158 base::AutoLock auto_lock(channel_messages_lock_); | |
| 159 if (!out_of_order_messages_.empty()) { | |
| 160 return out_of_order_messages_.front(); | |
| 161 } else if (!channel_messages_.empty()) { | |
| 162 return channel_messages_.front(); | |
| 163 } else { | |
| 164 return nullptr; | |
| 165 } | |
| 166 } | |
| 167 | |
| 168 bool GpuChannelMessageQueue::MessageProcessed(uint32_t order_number) { | |
| 169 base::AutoLock auto_lock(channel_messages_lock_); | |
| 170 if (order_number != kOutOfOrderNumber) { | |
| 171 DCHECK(!channel_messages_.empty()); | |
| 172 DCHECK(order_number == channel_messages_.front()->order_number); | |
| 173 processed_order_num_ = order_number; | |
| 174 channel_messages_.pop_front(); | |
| 175 } else { | |
| 176 DCHECK(!out_of_order_messages_.empty()); | |
| 177 out_of_order_messages_.pop_front(); | |
| 178 } | |
|
dcheng
2015/09/10 21:30:37
Won't this leak the popped element?
sunnyps
2015/09/10 23:38:36
Thanks for catching this. Fixed this for now but I
| |
| 179 return HasQueuedMessagesHelper(); | |
| 180 } | |
| 181 | |
| 182 void GpuChannelMessageQueue::DeleteAndDisableMessages( | |
| 183 GpuChannelManager* gpu_channel_manager) { | |
| 184 { | |
| 185 base::AutoLock auto_lock(channel_messages_lock_); | |
| 186 DCHECK(enabled_); | |
| 187 enabled_ = false; | |
| 99 } | 188 } |
| 100 | 189 |
| 101 uint32_t GetUnprocessedOrderNum() { | 190 // We guarantee that the queues will no longer be modified after enabled_ |
| 102 base::AutoLock auto_lock(channel_messages_lock_); | 191 // is set to false, it is now safe to modify the queue without the lock. |
| 103 return unprocessed_order_num_; | 192 // All public facing modifying functions check enabled_ while all |
| 193 // private modifying functions DCHECK(enabled_) to enforce this. | |
| 194 while (!channel_messages_.empty()) { | |
| 195 GpuChannelMessage* msg = channel_messages_.front(); | |
| 196 // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and | |
| 197 // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check | |
| 198 // if we have a sync point number here. | |
| 199 if (msg->sync_point) { | |
| 200 gpu_channel_manager->sync_point_manager()->RetireSyncPoint( | |
| 201 msg->sync_point); | |
| 202 } | |
| 203 delete msg; | |
| 204 channel_messages_.pop_front(); | |
| 104 } | 205 } |
| 206 STLDeleteElements(&out_of_order_messages_); | |
| 207 } | |
| 105 | 208 |
| 106 void PushBackMessage(uint32_t order_number, const IPC::Message& message) { | 209 void GpuChannelMessageQueue::ScheduleHandleMessage() { |
| 107 base::AutoLock auto_lock(channel_messages_lock_); | 210 task_runner_->PostTask(FROM_HERE, |
| 108 if (enabled_) { | 211 base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); |
| 109 PushMessageHelper(order_number, | 212 } |
| 110 new GpuChannelMessage(order_number, message)); | 213 |
| 111 } | 214 void GpuChannelMessageQueue::PushMessageHelper(GpuChannelMessage* msg) { |
| 215 channel_messages_lock_.AssertAcquired(); | |
| 216 DCHECK(enabled_); | |
| 217 bool had_messages = HasQueuedMessagesHelper(); | |
| 218 if (msg->order_number != kOutOfOrderNumber) { | |
| 219 unprocessed_order_num_ = msg->order_number; | |
| 220 channel_messages_.push_back(msg); | |
| 221 } else { | |
| 222 out_of_order_messages_.push_back(msg); | |
| 112 } | 223 } |
| 224 if (!had_messages) | |
| 225 ScheduleHandleMessage(); | |
| 226 } | |
| 113 | 227 |
| 114 void PushOutOfOrderMessage(const IPC::Message& message) { | 228 bool GpuChannelMessageQueue::HasQueuedMessagesHelper() const { |
| 115 // These are pushed out of order so should not have any order messages. | 229 channel_messages_lock_.AssertAcquired(); |
| 116 base::AutoLock auto_lock(channel_messages_lock_); | 230 return !channel_messages_.empty() || !out_of_order_messages_.empty(); |
| 117 if (enabled_) { | 231 } |
| 118 PushOutOfOrderHelper(new GpuChannelMessage(kOutOfOrderNumber, message)); | |
| 119 } | |
| 120 } | |
| 121 | |
| 122 bool GenerateSyncPointMessage(gpu::SyncPointManager* sync_point_manager, | |
| 123 uint32_t order_number, | |
| 124 const IPC::Message& message, | |
| 125 bool retire_sync_point, | |
| 126 uint32_t* sync_point_number) { | |
| 127 DCHECK(message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID); | |
| 128 base::AutoLock auto_lock(channel_messages_lock_); | |
| 129 if (enabled_) { | |
| 130 const uint32 sync_point = sync_point_manager->GenerateSyncPoint(); | |
| 131 | |
| 132 GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); | |
| 133 msg->retire_sync_point = retire_sync_point; | |
| 134 msg->sync_point_number = sync_point; | |
| 135 | |
| 136 *sync_point_number = sync_point; | |
| 137 PushMessageHelper(order_number, msg); | |
| 138 return true; | |
| 139 } | |
| 140 return false; | |
| 141 } | |
| 142 | |
| 143 bool HasQueuedMessages() { | |
| 144 base::AutoLock auto_lock(channel_messages_lock_); | |
| 145 return HasQueuedMessagesLocked(); | |
| 146 } | |
| 147 | |
| 148 base::TimeTicks GetNextMessageTimeTick() { | |
| 149 base::AutoLock auto_lock(channel_messages_lock_); | |
| 150 | |
| 151 base::TimeTicks next_message_tick; | |
| 152 if (!channel_messages_.empty()) | |
| 153 next_message_tick = channel_messages_.front()->time_received; | |
| 154 | |
| 155 base::TimeTicks next_out_of_order_tick; | |
| 156 if (!out_of_order_messages_.empty()) | |
| 157 next_out_of_order_tick = out_of_order_messages_.front()->time_received; | |
| 158 | |
| 159 if (next_message_tick.is_null()) | |
| 160 return next_out_of_order_tick; | |
| 161 else if (next_out_of_order_tick.is_null()) | |
| 162 return next_message_tick; | |
| 163 else | |
| 164 return std::min(next_message_tick, next_out_of_order_tick); | |
| 165 } | |
| 166 | |
| 167 protected: | |
| 168 virtual ~GpuChannelMessageQueue() { | |
| 169 DCHECK(channel_messages_.empty()); | |
| 170 DCHECK(out_of_order_messages_.empty()); | |
| 171 } | |
| 172 | |
| 173 private: | |
| 174 friend class GpuChannel; | |
| 175 friend class base::RefCountedThreadSafe<GpuChannelMessageQueue>; | |
| 176 | |
| 177 GpuChannelMessageQueue( | |
| 178 base::WeakPtr<GpuChannel> gpu_channel, | |
| 179 scoped_refptr<base::SingleThreadTaskRunner> task_runner) | |
| 180 : enabled_(true), | |
| 181 unprocessed_order_num_(0), | |
| 182 gpu_channel_(gpu_channel), | |
| 183 task_runner_(task_runner) {} | |
| 184 | |
| 185 void DeleteAndDisableMessages(GpuChannelManager* gpu_channel_manager) { | |
| 186 { | |
| 187 base::AutoLock auto_lock(channel_messages_lock_); | |
| 188 DCHECK(enabled_); | |
| 189 enabled_ = false; | |
| 190 } | |
| 191 | |
| 192 // We guarantee that the queues will no longer be modified after enabled_ | |
| 193 // is set to false, it is now safe to modify the queue without the lock. | |
| 194 // All public facing modifying functions check enabled_ while all | |
| 195 // private modifying functions DCHECK(enabled_) to enforce this. | |
| 196 while (!channel_messages_.empty()) { | |
| 197 GpuChannelMessage* msg = channel_messages_.front(); | |
| 198 // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and | |
| 199 // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check | |
| 200 // if we have a sync point number here. | |
| 201 if (msg->sync_point_number) { | |
| 202 gpu_channel_manager->sync_point_manager()->RetireSyncPoint( | |
| 203 msg->sync_point_number); | |
| 204 } | |
| 205 delete msg; | |
| 206 channel_messages_.pop_front(); | |
| 207 } | |
| 208 STLDeleteElements(&out_of_order_messages_); | |
| 209 } | |
| 210 | |
| 211 void PushUnfinishedMessage(uint32_t order_number, | |
| 212 const IPC::Message& message) { | |
| 213 // This is pushed only if it was unfinished, so order number is kept. | |
| 214 GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); | |
| 215 base::AutoLock auto_lock(channel_messages_lock_); | |
| 216 DCHECK(enabled_); | |
| 217 const bool had_messages = HasQueuedMessagesLocked(); | |
| 218 if (order_number == kOutOfOrderNumber) | |
| 219 out_of_order_messages_.push_front(msg); | |
| 220 else | |
| 221 channel_messages_.push_front(msg); | |
| 222 | |
| 223 if (!had_messages) | |
| 224 ScheduleHandleMessage(); | |
| 225 } | |
| 226 | |
| 227 void ScheduleHandleMessage() { | |
| 228 task_runner_->PostTask( | |
| 229 FROM_HERE, base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); | |
| 230 } | |
| 231 | |
| 232 void PushMessageHelper(uint32_t order_number, GpuChannelMessage* msg) { | |
| 233 channel_messages_lock_.AssertAcquired(); | |
| 234 DCHECK(enabled_); | |
| 235 unprocessed_order_num_ = order_number; | |
| 236 const bool had_messages = HasQueuedMessagesLocked(); | |
| 237 channel_messages_.push_back(msg); | |
| 238 if (!had_messages) | |
| 239 ScheduleHandleMessage(); | |
| 240 } | |
| 241 | |
| 242 void PushOutOfOrderHelper(GpuChannelMessage* msg) { | |
| 243 channel_messages_lock_.AssertAcquired(); | |
| 244 DCHECK(enabled_); | |
| 245 const bool had_messages = HasQueuedMessagesLocked(); | |
| 246 out_of_order_messages_.push_back(msg); | |
| 247 if (!had_messages) | |
| 248 ScheduleHandleMessage(); | |
| 249 } | |
| 250 | |
| 251 bool HasQueuedMessagesLocked() { | |
| 252 channel_messages_lock_.AssertAcquired(); | |
| 253 return !channel_messages_.empty() || !out_of_order_messages_.empty(); | |
| 254 } | |
| 255 | |
| 256 bool enabled_; | |
| 257 | |
| 258 // Highest IPC order number seen, set when queued on the IO thread. | |
| 259 uint32_t unprocessed_order_num_; | |
| 260 std::deque<GpuChannelMessage*> channel_messages_; | |
| 261 std::deque<GpuChannelMessage*> out_of_order_messages_; | |
| 262 | |
| 263 // This lock protects enabled_, unprocessed_order_num_, and both deques. | |
| 264 base::Lock channel_messages_lock_; | |
| 265 | |
| 266 base::WeakPtr<GpuChannel> gpu_channel_; | |
| 267 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; | |
| 268 | |
| 269 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue); | |
| 270 }; | |
| 271 | 232 |
| 272 // Begin order numbers at 1 so 0 can mean no orders. | 233 // Begin order numbers at 1 so 0 can mean no orders. |
| 273 uint32_t GpuChannelMessageFilter::global_order_counter_ = 1; | 234 uint32_t GpuChannelMessageFilter::global_order_counter_ = 1; |
| 274 | 235 |
| 275 GpuChannelMessageFilter::GpuChannelMessageFilter( | 236 GpuChannelMessageFilter::GpuChannelMessageFilter( |
| 276 scoped_refptr<GpuChannelMessageQueue> message_queue, | 237 scoped_refptr<GpuChannelMessageQueue> message_queue, |
| 277 gpu::SyncPointManager* sync_point_manager, | 238 gpu::SyncPointManager* sync_point_manager, |
| 278 scoped_refptr<base::SingleThreadTaskRunner> task_runner, | 239 scoped_refptr<base::SingleThreadTaskRunner> task_runner, |
| 279 bool future_sync_points) | 240 bool future_sync_points) |
| 280 : preemption_state_(IDLE), | 241 : preemption_state_(IDLE), |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 345 } | 306 } |
| 346 | 307 |
| 347 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { | 308 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
| 348 DCHECK(sender_); | 309 DCHECK(sender_); |
| 349 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 310 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| 350 if (filter->OnMessageReceived(message)) { | 311 if (filter->OnMessageReceived(message)) { |
| 351 return true; | 312 return true; |
| 352 } | 313 } |
| 353 } | 314 } |
| 354 | 315 |
| 355 const uint32_t order_number = global_order_counter_++; | 316 const uint32_t order_number = global_order_counter_++; |
|
David Yen
2015/09/10 22:42:06
Did you mean to use GpuChannelManager::GetNextUnpr
sunnyps
2015/09/10 23:38:36
Yes. Thanks for catching this. I've removed the sh
| |
| 356 bool handled = false; | 317 bool handled = false; |
| 357 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && | 318 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && |
| 358 !future_sync_points_) { | 319 !future_sync_points_) { |
| 359 DLOG(ERROR) << "Untrusted client should not send " | 320 DLOG(ERROR) << "Untrusted client should not send " |
| 360 "GpuCommandBufferMsg_RetireSyncPoint message"; | 321 "GpuCommandBufferMsg_RetireSyncPoint message"; |
| 361 return true; | 322 return true; |
| 362 } | 323 } |
| 363 | 324 |
| 364 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { | 325 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
| 365 base::Tuple<bool> retire; | 326 base::Tuple<bool> params; |
| 366 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); | 327 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| 367 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, | 328 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, |
| 368 &retire)) { | 329 ¶ms)) { |
| 369 reply->set_reply_error(); | 330 reply->set_reply_error(); |
| 370 Send(reply); | 331 Send(reply); |
| 371 return true; | 332 return true; |
| 372 } | 333 } |
| 373 if (!future_sync_points_ && !base::get<0>(retire)) { | 334 bool retire_sync_point = base::get<0>(params); |
| 335 if (!future_sync_points_ && !retire_sync_point) { | |
| 374 LOG(ERROR) << "Untrusted contexts can't create future sync points"; | 336 LOG(ERROR) << "Untrusted contexts can't create future sync points"; |
| 375 reply->set_reply_error(); | 337 reply->set_reply_error(); |
| 376 Send(reply); | 338 Send(reply); |
| 377 return true; | 339 return true; |
| 378 } | 340 } |
| 379 | 341 |
| 380 // Message queue must handle the entire sync point generation because the | 342 // Message queue must handle the entire sync point generation because the |
| 381 // message queue could be disabled from the main thread during generation. | 343 // message queue could be disabled from the main thread during generation. |
| 382 uint32_t sync_point = 0u; | 344 uint32_t sync_point = 0u; |
| 383 if (!message_queue_->GenerateSyncPointMessage( | 345 if (!message_queue_->GenerateSyncPointMessage( |
| 384 sync_point_manager_, order_number, message, base::get<0>(retire), | 346 sync_point_manager_, order_number, message, retire_sync_point, |
| 385 &sync_point)) { | 347 &sync_point)) { |
| 386 LOG(ERROR) << "GpuChannel has been destroyed."; | 348 LOG(ERROR) << "GpuChannel has been destroyed."; |
| 387 reply->set_reply_error(); | 349 reply->set_reply_error(); |
| 388 Send(reply); | 350 Send(reply); |
| 389 return true; | 351 return true; |
| 390 } | 352 } |
| 391 | 353 |
| 392 DCHECK_NE(sync_point, 0u); | 354 DCHECK_NE(sync_point, 0u); |
| 393 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); | 355 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); |
| 394 Send(reply); | 356 Send(reply); |
| 395 handled = true; | 357 handled = true; |
| 396 } | 358 } |
| 397 | 359 |
| 398 // Forward all other messages to the GPU Channel. | 360 // Forward all other messages to the GPU Channel. |
| 399 if (!handled && !message.is_reply() && !message.should_unblock()) { | 361 if (!handled && !message.is_reply() && !message.should_unblock()) { |
| 400 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || | 362 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || |
| 401 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { | 363 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { |
| 402 // Move Wait commands to the head of the queue, so the renderer | 364 // Move Wait commands to the head of the queue, so the renderer |
| 403 // doesn't have to wait any longer than necessary. | 365 // doesn't have to wait any longer than necessary. |
| 404 message_queue_->PushOutOfOrderMessage(message); | 366 message_queue_->PushBackMessage(kOutOfOrderNumber, message); |
| 405 } else { | 367 } else { |
| 406 message_queue_->PushBackMessage(order_number, message); | 368 message_queue_->PushBackMessage(order_number, message); |
| 407 } | 369 } |
| 408 handled = true; | 370 handled = true; |
| 409 } | 371 } |
| 410 | 372 |
| 411 UpdatePreemptionState(); | 373 UpdatePreemptionState(); |
| 412 return handled; | 374 return handled; |
| 413 } | 375 } |
| 414 | 376 |
| (...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 611 task_runner_(task_runner), | 573 task_runner_(task_runner), |
| 612 io_task_runner_(io_task_runner), | 574 io_task_runner_(io_task_runner), |
| 613 share_group_(share_group ? share_group : new gfx::GLShareGroup), | 575 share_group_(share_group ? share_group : new gfx::GLShareGroup), |
| 614 mailbox_manager_(mailbox | 576 mailbox_manager_(mailbox |
| 615 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox) | 577 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox) |
| 616 : gpu::gles2::MailboxManager::Create()), | 578 : gpu::gles2::MailboxManager::Create()), |
| 617 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), | 579 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), |
| 618 pending_valuebuffer_state_(new gpu::ValueStateMap), | 580 pending_valuebuffer_state_(new gpu::ValueStateMap), |
| 619 watchdog_(watchdog), | 581 watchdog_(watchdog), |
| 620 software_(software), | 582 software_(software), |
| 621 current_order_num_(0), | |
| 622 processed_order_num_(0), | |
| 623 num_stubs_descheduled_(0), | 583 num_stubs_descheduled_(0), |
| 624 allow_future_sync_points_(allow_future_sync_points), | 584 allow_future_sync_points_(allow_future_sync_points), |
| 625 allow_real_time_streams_(allow_real_time_streams), | 585 allow_real_time_streams_(allow_real_time_streams), |
| 626 weak_factory_(this) { | 586 weak_factory_(this) { |
| 627 DCHECK(gpu_channel_manager); | 587 DCHECK(gpu_channel_manager); |
| 628 DCHECK(client_id); | 588 DCHECK(client_id); |
| 629 | 589 |
| 630 message_queue_ = | 590 message_queue_ = |
| 631 GpuChannelMessageQueue::Create(weak_factory_.GetWeakPtr(), task_runner); | 591 GpuChannelMessageQueue::Create(weak_factory_.GetWeakPtr(), task_runner); |
| 632 | 592 |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 669 | 629 |
| 670 channel_->AddFilter(filter_.get()); | 630 channel_->AddFilter(filter_.get()); |
| 671 | 631 |
| 672 return channel_handle; | 632 return channel_handle; |
| 673 } | 633 } |
| 674 | 634 |
| 675 base::ProcessId GpuChannel::GetClientPID() const { | 635 base::ProcessId GpuChannel::GetClientPID() const { |
| 676 return channel_->GetPeerPID(); | 636 return channel_->GetPeerPID(); |
| 677 } | 637 } |
| 678 | 638 |
| 639 uint32_t GpuChannel::GetProcessedOrderNum() const { | |
| 640 return message_queue_->GetProccessedOrderNum(); | |
| 641 } | |
| 642 | |
| 643 uint32_t GpuChannel::GetUnprocessedOrderNum() const { | |
| 644 return message_queue_->GetUnprocessedOrderNum(); | |
| 645 } | |
| 646 | |
| 679 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { | 647 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { |
| 680 // All messages should be pushed to channel_messages_ and handled separately. | 648 // All messages should be pushed to channel_messages_ and handled separately. |
| 681 NOTREACHED(); | 649 NOTREACHED(); |
| 682 return false; | 650 return false; |
| 683 } | 651 } |
| 684 | 652 |
| 685 void GpuChannel::OnChannelError() { | 653 void GpuChannel::OnChannelError() { |
| 686 gpu_channel_manager_->RemoveChannel(client_id_); | 654 gpu_channel_manager_->RemoveChannel(client_id_); |
| 687 } | 655 } |
| 688 | 656 |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 709 | 677 |
| 710 void GpuChannel::OnRemoveSubscription(unsigned int target) { | 678 void GpuChannel::OnRemoveSubscription(unsigned int target) { |
| 711 gpu_channel_manager()->Send( | 679 gpu_channel_manager()->Send( |
| 712 new GpuHostMsg_RemoveSubscription(client_id_, target)); | 680 new GpuHostMsg_RemoveSubscription(client_id_, target)); |
| 713 } | 681 } |
| 714 | 682 |
| 715 void GpuChannel::StubSchedulingChanged(bool scheduled) { | 683 void GpuChannel::StubSchedulingChanged(bool scheduled) { |
| 716 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; | 684 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; |
| 717 if (scheduled) { | 685 if (scheduled) { |
| 718 num_stubs_descheduled_--; | 686 num_stubs_descheduled_--; |
| 719 message_queue_->ScheduleHandleMessage(); | 687 ScheduleHandleMessage(); |
| 720 } else { | 688 } else { |
| 721 num_stubs_descheduled_++; | 689 num_stubs_descheduled_++; |
| 722 } | 690 } |
| 723 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); | 691 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); |
| 724 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; | 692 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; |
| 725 | 693 |
| 726 if (a_stub_is_descheduled != a_stub_was_descheduled) { | 694 if (a_stub_is_descheduled != a_stub_was_descheduled) { |
| 727 if (preempting_flag_.get()) { | 695 if (preempting_flag_.get()) { |
| 728 io_task_runner_->PostTask( | 696 io_task_runner_->PostTask( |
| 729 FROM_HERE, | 697 FROM_HERE, |
| (...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 857 OnDestroyCommandBuffer) | 825 OnDestroyCommandBuffer) |
| 858 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder, | 826 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder, |
| 859 OnCreateJpegDecoder) | 827 OnCreateJpegDecoder) |
| 860 IPC_MESSAGE_UNHANDLED(handled = false) | 828 IPC_MESSAGE_UNHANDLED(handled = false) |
| 861 IPC_END_MESSAGE_MAP() | 829 IPC_END_MESSAGE_MAP() |
| 862 DCHECK(handled) << msg.type(); | 830 DCHECK(handled) << msg.type(); |
| 863 return handled; | 831 return handled; |
| 864 } | 832 } |
| 865 | 833 |
| 866 void GpuChannel::HandleMessage() { | 834 void GpuChannel::HandleMessage() { |
| 867 GpuChannelMessage* m = nullptr; | 835 GpuChannelMessage* m = message_queue_->GetNextMessage(); |
| 868 GpuCommandBufferStub* stub = nullptr; | 836 |
| 869 bool has_more_messages = false; | 837 // TODO(sunnyps): This could be a DCHECK maybe? |
| 870 { | 838 if (!m) |
| 871 base::AutoLock auto_lock(message_queue_->channel_messages_lock_); | 839 return; |
| 872 if (!message_queue_->out_of_order_messages_.empty()) { | 840 |
| 873 m = message_queue_->out_of_order_messages_.front(); | 841 uint32_t order_number = m->order_number; |
| 874 DCHECK(m->order_number == kOutOfOrderNumber); | 842 IPC::Message& message = m->message; |
| 875 message_queue_->out_of_order_messages_.pop_front(); | 843 int32_t routing_id = message.routing_id(); |
| 876 } else if (!message_queue_->channel_messages_.empty()) { | 844 GpuCommandBufferStub* stub = stubs_.get(routing_id); |
| 877 m = message_queue_->channel_messages_.front(); | 845 |
| 878 DCHECK(m->order_number != kOutOfOrderNumber); | 846 DVLOG(1) << "received message @" << &message << " on channel @" << this |
| 879 message_queue_->channel_messages_.pop_front(); | 847 << " with type " << message.type(); |
| 848 | |
| 849 bool handled = false; | |
| 850 | |
| 851 if (routing_id == MSG_ROUTING_CONTROL) { | |
| 852 handled = OnControlMessageReceived(message); | |
| 853 } else if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { | |
| 854 // TODO(dyen): Temporary handling of old sync points. | |
| 855 // This must ensure that the sync point will be retired. Normally we'll | |
| 856 // find the stub based on the routing ID, and associate the sync point | |
| 857 // with it, but if that fails for any reason (channel or stub already | |
| 858 // deleted, invalid routing id), we need to retire the sync point | |
| 859 // immediately. | |
| 860 if (stub) { | |
| 861 stub->AddSyncPoint(m->sync_point, m->retire_sync_point); | |
| 880 } else { | 862 } else { |
| 881 // No messages to process | 863 gpu_channel_manager_->sync_point_manager()->RetireSyncPoint( |
| 882 return; | 864 m->sync_point); |
| 883 } | 865 } |
| 884 | 866 handled = true; |
| 885 has_more_messages = message_queue_->HasQueuedMessagesLocked(); | 867 } else { |
| 868 handled = router_.RouteMessage(message); | |
| 886 } | 869 } |
| 887 | 870 |
| 888 bool retry_message = false; | 871 // Respond to sync messages even if router failed to route. |
| 889 stub = stubs_.get(m->message.routing_id()); | 872 if (!handled && message.is_sync()) { |
| 890 if (stub) { | 873 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| 891 if (!stub->IsScheduled()) { | 874 reply->set_reply_error(); |
| 892 retry_message = true; | 875 Send(reply); |
| 893 } | 876 handled = true; |
| 894 if (stub->IsPreempted()) { | |
| 895 retry_message = true; | |
| 896 message_queue_->ScheduleHandleMessage(); | |
| 897 } | |
| 898 } | 877 } |
| 899 | 878 |
| 900 if (retry_message) { | 879 // A command buffer may be descheduled or preempted but only in the middle of |
| 901 base::AutoLock auto_lock(message_queue_->channel_messages_lock_); | 880 // a flush. In this case we should not pop the message from the queue. |
| 902 if (m->order_number == kOutOfOrderNumber) | 881 if (stub && stub->HasUnprocessedCommands()) { |
| 903 message_queue_->out_of_order_messages_.push_front(m); | 882 DCHECK(message.type() == GpuCommandBufferMsg_AsyncFlush::ID); |
| 904 else | 883 // If the stub was preempted then we need to schedule a wakeup otherwise |
| 905 message_queue_->channel_messages_.push_front(m); | 884 // some other event will wake us up e.g. sync point completion. |
| 885 if (stub->IsPreempted()) | |
|
David Yen
2015/09/10 22:42:06
Don't we want to check for premption before we han
sunnyps
2015/09/10 23:38:36
The flush will early out if the stub is preempted
| |
| 886 ScheduleHandleMessage(); | |
| 906 return; | 887 return; |
| 907 } else if (has_more_messages) { | |
| 908 message_queue_->ScheduleHandleMessage(); | |
| 909 } | 888 } |
| 910 | 889 |
| 911 scoped_ptr<GpuChannelMessage> scoped_message(m); | 890 if (message_queue_->MessageProcessed(order_number)) { |
| 912 const uint32_t order_number = m->order_number; | 891 ScheduleHandleMessage(); |
|
David Yen
2015/09/10 22:42:06
Did you intended to make messages in a channel onl
sunnyps
2015/09/10 23:38:36
The first HandleMessage is called from the message
| |
| 913 const int32_t routing_id = m->message.routing_id(); | |
| 914 | |
| 915 // TODO(dyen): Temporary handling of old sync points. | |
| 916 // This must ensure that the sync point will be retired. Normally we'll | |
| 917 // find the stub based on the routing ID, and associate the sync point | |
| 918 // with it, but if that fails for any reason (channel or stub already | |
| 919 // deleted, invalid routing id), we need to retire the sync point | |
| 920 // immediately. | |
| 921 if (m->message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { | |
| 922 const bool retire = m->retire_sync_point; | |
| 923 const uint32_t sync_point = m->sync_point_number; | |
| 924 if (stub) { | |
| 925 stub->AddSyncPoint(sync_point); | |
| 926 if (retire) { | |
| 927 m->message = | |
| 928 GpuCommandBufferMsg_RetireSyncPoint(routing_id, sync_point); | |
| 929 } | |
| 930 } else { | |
| 931 current_order_num_ = order_number; | |
| 932 gpu_channel_manager_->sync_point_manager()->RetireSyncPoint(sync_point); | |
| 933 MessageProcessed(order_number); | |
| 934 return; | |
| 935 } | |
| 936 } | 892 } |
| 937 | 893 |
| 938 IPC::Message* message = &m->message; | 894 if (preempting_flag_.get()) { |
|
dcheng
2015/09/10 21:30:37
if (preempting_flag_) {
sunnyps
2015/09/10 23:38:36
Done.
| |
| 939 bool message_processed = true; | 895 io_task_runner_->PostTask( |
| 896 FROM_HERE, | |
| 897 base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_)); | |
| 898 } | |
| 899 } | |
| 940 | 900 |
| 941 DVLOG(1) << "received message @" << message << " on channel @" << this | 901 void GpuChannel::ScheduleHandleMessage() { |
| 942 << " with type " << message->type(); | 902 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage, |
| 943 | 903 weak_factory_.GetWeakPtr())); |
| 944 if (order_number != kOutOfOrderNumber) { | |
| 945 // Make sure this is a valid unprocessed order number. | |
| 946 DCHECK(order_number <= GetUnprocessedOrderNum() && | |
| 947 order_number >= GetProcessedOrderNum()); | |
| 948 | |
| 949 current_order_num_ = order_number; | |
| 950 } | |
| 951 bool result = false; | |
| 952 if (routing_id == MSG_ROUTING_CONTROL) | |
| 953 result = OnControlMessageReceived(*message); | |
| 954 else | |
| 955 result = router_.RouteMessage(*message); | |
| 956 | |
| 957 if (!result) { | |
| 958 // Respond to sync messages even if router failed to route. | |
| 959 if (message->is_sync()) { | |
| 960 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); | |
| 961 reply->set_reply_error(); | |
| 962 Send(reply); | |
| 963 } | |
| 964 } else { | |
| 965 // If the command buffer becomes unscheduled as a result of handling the | |
| 966 // message but still has more commands to process, synthesize an IPC | |
| 967 // message to flush that command buffer. | |
| 968 if (stub) { | |
| 969 if (stub->HasUnprocessedCommands()) { | |
| 970 message_queue_->PushUnfinishedMessage( | |
| 971 order_number, GpuCommandBufferMsg_Rescheduled(stub->route_id())); | |
| 972 message_processed = false; | |
| 973 } | |
| 974 } | |
| 975 } | |
| 976 if (message_processed) | |
| 977 MessageProcessed(order_number); | |
| 978 } | 904 } |
| 979 | 905 |
| 980 void GpuChannel::OnCreateOffscreenCommandBuffer( | 906 void GpuChannel::OnCreateOffscreenCommandBuffer( |
| 981 const gfx::Size& size, | 907 const gfx::Size& size, |
| 982 const GPUCreateCommandBufferConfig& init_params, | 908 const GPUCreateCommandBufferConfig& init_params, |
| 983 int32 route_id, | 909 int32 route_id, |
| 984 bool* succeeded) { | 910 bool* succeeded) { |
| 985 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", | 911 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", |
| 986 route_id); | 912 route_id); |
| 987 | 913 |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1071 } | 997 } |
| 1072 } | 998 } |
| 1073 | 999 |
| 1074 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { | 1000 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { |
| 1075 if (!jpeg_decoder_) { | 1001 if (!jpeg_decoder_) { |
| 1076 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_)); | 1002 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_)); |
| 1077 } | 1003 } |
| 1078 jpeg_decoder_->AddClient(route_id, reply_msg); | 1004 jpeg_decoder_->AddClient(route_id, reply_msg); |
| 1079 } | 1005 } |
| 1080 | 1006 |
| 1081 void GpuChannel::MessageProcessed(uint32_t order_number) { | |
| 1082 if (order_number != kOutOfOrderNumber) { | |
| 1083 DCHECK(current_order_num_ == order_number); | |
| 1084 DCHECK(processed_order_num_ < order_number); | |
| 1085 processed_order_num_ = order_number; | |
| 1086 } | |
| 1087 if (preempting_flag_.get()) { | |
| 1088 io_task_runner_->PostTask( | |
| 1089 FROM_HERE, | |
| 1090 base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_)); | |
| 1091 } | |
| 1092 } | |
| 1093 | |
| 1094 void GpuChannel::CacheShader(const std::string& key, | 1007 void GpuChannel::CacheShader(const std::string& key, |
| 1095 const std::string& shader) { | 1008 const std::string& shader) { |
| 1096 gpu_channel_manager_->Send( | 1009 gpu_channel_manager_->Send( |
| 1097 new GpuHostMsg_CacheShader(client_id_, key, shader)); | 1010 new GpuHostMsg_CacheShader(client_id_, key, shader)); |
| 1098 } | 1011 } |
| 1099 | 1012 |
| 1100 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { | 1013 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { |
| 1101 io_task_runner_->PostTask( | 1014 io_task_runner_->PostTask( |
| 1102 FROM_HERE, base::Bind(&GpuChannelMessageFilter::AddChannelFilter, | 1015 FROM_HERE, base::Bind(&GpuChannelMessageFilter::AddChannelFilter, |
| 1103 filter_, make_scoped_refptr(filter))); | 1016 filter_, make_scoped_refptr(filter))); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1153 client_id_); | 1066 client_id_); |
| 1154 } | 1067 } |
| 1155 } | 1068 } |
| 1156 } | 1069 } |
| 1157 | 1070 |
| 1158 void GpuChannel::HandleUpdateValueState( | 1071 void GpuChannel::HandleUpdateValueState( |
| 1159 unsigned int target, const gpu::ValueState& state) { | 1072 unsigned int target, const gpu::ValueState& state) { |
| 1160 pending_valuebuffer_state_->UpdateState(target, state); | 1073 pending_valuebuffer_state_->UpdateState(target, state); |
| 1161 } | 1074 } |
| 1162 | 1075 |
| 1163 uint32_t GpuChannel::GetUnprocessedOrderNum() const { | |
| 1164 return message_queue_->GetUnprocessedOrderNum(); | |
| 1165 } | |
| 1166 | |
| 1167 } // namespace content | 1076 } // namespace content |
| OLD | NEW |