Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/common/gpu/gpu_channel.h" | 5 #include "content/common/gpu/gpu_channel.h" |
| 6 | 6 |
| 7 #if defined(OS_WIN) | 7 #if defined(OS_WIN) |
| 8 #include <windows.h> | 8 #include <windows.h> |
| 9 #endif | 9 #endif |
| 10 | 10 |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 61 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs; | 61 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs; |
| 62 | 62 |
| 63 // Once we trigger a preemption, the maximum duration that we will wait | 63 // Once we trigger a preemption, the maximum duration that we will wait |
| 64 // before clearing the preemption. | 64 // before clearing the preemption. |
| 65 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; | 65 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; |
| 66 | 66 |
| 67 // Stop the preemption once the time for the longest pending IPC drops | 67 // Stop the preemption once the time for the longest pending IPC drops |
| 68 // below this threshold. | 68 // below this threshold. |
| 69 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; | 69 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; |
| 70 | 70 |
| 71 const uint32_t kOutOfOrderNumber = static_cast<uint32_t>(-1); | 71 } // anonymous namespace |
| 72 | 72 |
| 73 } // anonymous namespace | 73 // Begin order numbers at 1 so 0 can mean no orders. |
| 74 uint32_t GpuChannelMessageQueue::global_order_counter_ = 1; | |
| 74 | 75 |
| 75 scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create( | 76 scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create( |
| 76 const base::WeakPtr<GpuChannel>& gpu_channel, | 77 const base::WeakPtr<GpuChannel>& gpu_channel, |
| 77 base::SingleThreadTaskRunner* task_runner) { | 78 base::SingleThreadTaskRunner* task_runner) { |
| 78 return new GpuChannelMessageQueue(gpu_channel, task_runner); | 79 return new GpuChannelMessageQueue(gpu_channel, task_runner); |
| 79 } | 80 } |
| 80 | 81 |
| 81 GpuChannelMessageQueue::GpuChannelMessageQueue( | 82 GpuChannelMessageQueue::GpuChannelMessageQueue( |
| 82 const base::WeakPtr<GpuChannel>& gpu_channel, | 83 const base::WeakPtr<GpuChannel>& gpu_channel, |
| 83 base::SingleThreadTaskRunner* task_runner) | 84 base::SingleThreadTaskRunner* task_runner) |
| 84 : enabled_(true), | 85 : enabled_(true), |
| 85 unprocessed_order_num_(0), | 86 unprocessed_order_num_(0), |
| 86 processed_order_num_(0), | 87 processed_order_num_(0), |
| 87 gpu_channel_(gpu_channel), | 88 gpu_channel_(gpu_channel), |
| 88 task_runner_(task_runner) {} | 89 task_runner_(task_runner) {} |
| 89 | 90 |
| 90 GpuChannelMessageQueue::~GpuChannelMessageQueue() { | 91 GpuChannelMessageQueue::~GpuChannelMessageQueue() { |
| 91 DCHECK(channel_messages_.empty()); | 92 DCHECK(channel_messages_.empty()); |
| 92 DCHECK(out_of_order_messages_.empty()); | |
| 93 } | 93 } |
| 94 | 94 |
| 95 uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const { | 95 uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const { |
| 96 base::AutoLock auto_lock(channel_messages_lock_); | 96 base::AutoLock auto_lock(channel_messages_lock_); |
| 97 return unprocessed_order_num_; | 97 return unprocessed_order_num_; |
| 98 } | 98 } |
| 99 | 99 |
| 100 void GpuChannelMessageQueue::PushBackMessage(uint32_t order_number, | 100 void GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) { |
| 101 const IPC::Message& message) { | |
| 102 base::AutoLock auto_lock(channel_messages_lock_); | 101 base::AutoLock auto_lock(channel_messages_lock_); |
| 103 if (enabled_) { | 102 if (enabled_) |
| 104 PushMessageHelper( | 103 PushMessageHelper(make_scoped_ptr(new GpuChannelMessage(message))); |
| 105 make_scoped_ptr(new GpuChannelMessage(order_number, message))); | |
| 106 } | |
| 107 } | 104 } |
| 108 | 105 |
| 109 bool GpuChannelMessageQueue::GenerateSyncPointMessage( | 106 bool GpuChannelMessageQueue::GenerateSyncPointMessage( |
| 110 gpu::SyncPointManager* sync_point_manager, | 107 gpu::SyncPointManager* sync_point_manager, |
| 111 uint32_t order_number, | |
| 112 const IPC::Message& message, | 108 const IPC::Message& message, |
| 113 bool retire_sync_point, | 109 bool retire_sync_point, |
| 114 uint32_t* sync_point) { | 110 uint32_t* sync_point) { |
| 115 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_InsertSyncPoint::ID, message.type()); | 111 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_InsertSyncPoint::ID, message.type()); |
| 116 DCHECK(sync_point); | 112 DCHECK(sync_point); |
| 117 base::AutoLock auto_lock(channel_messages_lock_); | 113 base::AutoLock auto_lock(channel_messages_lock_); |
| 118 if (enabled_) { | 114 if (enabled_) { |
| 119 *sync_point = sync_point_manager->GenerateSyncPoint(); | 115 *sync_point = sync_point_manager->GenerateSyncPoint(); |
| 120 | 116 |
| 121 scoped_ptr<GpuChannelMessage> msg( | 117 scoped_ptr<GpuChannelMessage> msg(new GpuChannelMessage(message)); |
| 122 new GpuChannelMessage(order_number, message)); | |
| 123 msg->retire_sync_point = retire_sync_point; | 118 msg->retire_sync_point = retire_sync_point; |
| 124 msg->sync_point = *sync_point; | 119 msg->sync_point = *sync_point; |
| 125 | 120 |
| 126 PushMessageHelper(msg.Pass()); | 121 PushMessageHelper(msg.Pass()); |
| 127 return true; | 122 return true; |
| 128 } | 123 } |
| 129 return false; | 124 return false; |
| 130 } | 125 } |
| 131 | 126 |
| 132 bool GpuChannelMessageQueue::HasQueuedMessages() const { | 127 bool GpuChannelMessageQueue::HasQueuedMessages() const { |
| 133 base::AutoLock auto_lock(channel_messages_lock_); | 128 base::AutoLock auto_lock(channel_messages_lock_); |
| 134 return HasQueuedMessagesHelper(); | 129 return !channel_messages_.empty(); |
| 135 } | 130 } |
| 136 | 131 |
| 137 base::TimeTicks GpuChannelMessageQueue::GetNextMessageTimeTick() const { | 132 base::TimeTicks GpuChannelMessageQueue::GetNextMessageTimeTick() const { |
| 138 base::AutoLock auto_lock(channel_messages_lock_); | 133 base::AutoLock auto_lock(channel_messages_lock_); |
| 139 | |
| 140 base::TimeTicks next_message_tick; | |
| 141 if (!channel_messages_.empty()) | 134 if (!channel_messages_.empty()) |
| 142 next_message_tick = channel_messages_.front()->time_received; | 135 return channel_messages_.front()->time_received; |
| 143 | 136 return base::TimeTicks(); |
| 144 base::TimeTicks next_out_of_order_tick; | |
| 145 if (!out_of_order_messages_.empty()) | |
| 146 next_out_of_order_tick = out_of_order_messages_.front()->time_received; | |
| 147 | |
| 148 if (next_message_tick.is_null()) | |
| 149 return next_out_of_order_tick; | |
| 150 else if (next_out_of_order_tick.is_null()) | |
| 151 return next_message_tick; | |
| 152 else | |
| 153 return std::min(next_message_tick, next_out_of_order_tick); | |
| 154 } | 137 } |
| 155 | 138 |
| 156 GpuChannelMessage* GpuChannelMessageQueue::GetNextMessage() const { | 139 GpuChannelMessage* GpuChannelMessageQueue::GetNextMessage() const { |
| 157 base::AutoLock auto_lock(channel_messages_lock_); | 140 base::AutoLock auto_lock(channel_messages_lock_); |
| 158 if (!out_of_order_messages_.empty()) { | 141 if (!channel_messages_.empty()) { |
| 159 DCHECK_EQ(out_of_order_messages_.front()->order_number, kOutOfOrderNumber); | |
| 160 return out_of_order_messages_.front(); | |
| 161 } else if (!channel_messages_.empty()) { | |
| 162 DCHECK_GT(channel_messages_.front()->order_number, processed_order_num_); | 142 DCHECK_GT(channel_messages_.front()->order_number, processed_order_num_); |
| 163 DCHECK_LE(channel_messages_.front()->order_number, unprocessed_order_num_); | 143 DCHECK_LE(channel_messages_.front()->order_number, unprocessed_order_num_); |
| 164 return channel_messages_.front(); | 144 return channel_messages_.front(); |
| 165 } else { | |
| 166 return nullptr; | |
| 167 } | 145 } |
| 146 return nullptr; | |
| 168 } | 147 } |
| 169 | 148 |
| 170 bool GpuChannelMessageQueue::MessageProcessed(uint32_t order_number) { | 149 bool GpuChannelMessageQueue::MessageProcessed() { |
| 171 base::AutoLock auto_lock(channel_messages_lock_); | 150 base::AutoLock auto_lock(channel_messages_lock_); |
| 172 if (order_number != kOutOfOrderNumber) { | 151 DCHECK(!channel_messages_.empty()); |
| 173 DCHECK(!channel_messages_.empty()); | 152 scoped_ptr<GpuChannelMessage> msg(channel_messages_.front()); |
| 174 scoped_ptr<GpuChannelMessage> msg(channel_messages_.front()); | 153 channel_messages_.pop_front(); |
| 175 channel_messages_.pop_front(); | 154 processed_order_num_ = msg->order_number; |
| 176 DCHECK_EQ(order_number, msg->order_number); | 155 return !channel_messages_.empty(); |
| 177 processed_order_num_ = order_number; | |
| 178 } else { | |
| 179 DCHECK(!out_of_order_messages_.empty()); | |
| 180 scoped_ptr<GpuChannelMessage> msg(out_of_order_messages_.front()); | |
| 181 out_of_order_messages_.pop_front(); | |
| 182 } | |
| 183 return HasQueuedMessagesHelper(); | |
| 184 } | 156 } |
| 185 | 157 |
| 186 void GpuChannelMessageQueue::DeleteAndDisableMessages( | 158 void GpuChannelMessageQueue::DeleteAndDisableMessages( |
| 187 GpuChannelManager* gpu_channel_manager) { | 159 GpuChannelManager* gpu_channel_manager) { |
| 188 { | 160 { |
| 189 base::AutoLock auto_lock(channel_messages_lock_); | 161 base::AutoLock auto_lock(channel_messages_lock_); |
| 190 DCHECK(enabled_); | 162 DCHECK(enabled_); |
| 191 enabled_ = false; | 163 enabled_ = false; |
| 192 } | 164 } |
| 193 | 165 |
| 194 // We guarantee that the queues will no longer be modified after enabled_ | 166 // We guarantee that the queues will no longer be modified after enabled_ |
| 195 // is set to false, it is now safe to modify the queue without the lock. | 167 // is set to false, it is now safe to modify the queue without the lock. |
| 196 // All public facing modifying functions check enabled_ while all | 168 // All public facing modifying functions check enabled_ while all |
| 197 // private modifying functions DCHECK(enabled_) to enforce this. | 169 // private modifying functions DCHECK(enabled_) to enforce this. |
| 198 while (!channel_messages_.empty()) { | 170 while (!channel_messages_.empty()) { |
| 199 scoped_ptr<GpuChannelMessage> msg(channel_messages_.front()); | 171 scoped_ptr<GpuChannelMessage> msg(channel_messages_.front()); |
| 200 channel_messages_.pop_front(); | 172 channel_messages_.pop_front(); |
| 201 // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and | 173 // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and |
| 202 // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check | 174 // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check |
| 203 // if we have a sync point number here. | 175 // if we have a sync point number here. |
| 204 if (msg->sync_point) { | 176 if (msg->sync_point) { |
| 205 gpu_channel_manager->sync_point_manager()->RetireSyncPoint( | 177 gpu_channel_manager->sync_point_manager()->RetireSyncPoint( |
| 206 msg->sync_point); | 178 msg->sync_point); |
| 207 } | 179 } |
| 208 } | 180 } |
| 209 STLDeleteElements(&out_of_order_messages_); | |
| 210 } | 181 } |
| 211 | 182 |
| 212 void GpuChannelMessageQueue::ScheduleHandleMessage() { | 183 void GpuChannelMessageQueue::ScheduleHandleMessage() { |
| 213 task_runner_->PostTask(FROM_HERE, | 184 task_runner_->PostTask(FROM_HERE, |
| 214 base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); | 185 base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); |
| 215 } | 186 } |
| 216 | 187 |
| 217 void GpuChannelMessageQueue::PushMessageHelper( | 188 void GpuChannelMessageQueue::PushMessageHelper( |
| 218 scoped_ptr<GpuChannelMessage> msg) { | 189 scoped_ptr<GpuChannelMessage> msg) { |
| 219 channel_messages_lock_.AssertAcquired(); | 190 channel_messages_lock_.AssertAcquired(); |
| 220 DCHECK(enabled_); | 191 DCHECK(enabled_); |
| 221 bool had_messages = HasQueuedMessagesHelper(); | 192 |
| 222 if (msg->order_number != kOutOfOrderNumber) { | 193 msg->order_number = global_order_counter_++; |
| 223 unprocessed_order_num_ = msg->order_number; | 194 msg->time_received = base::TimeTicks::Now(); |
| 224 channel_messages_.push_back(msg.release()); | 195 |
| 225 } else { | 196 unprocessed_order_num_ = msg->order_number; |
| 226 out_of_order_messages_.push_back(msg.release()); | 197 |
| 227 } | 198 bool had_messages = !channel_messages_.empty(); |
| 199 channel_messages_.push_back(msg.release()); | |
| 228 if (!had_messages) | 200 if (!had_messages) |
| 229 ScheduleHandleMessage(); | 201 ScheduleHandleMessage(); |
| 230 } | 202 } |
| 231 | 203 |
| 232 bool GpuChannelMessageQueue::HasQueuedMessagesHelper() const { | |
| 233 channel_messages_lock_.AssertAcquired(); | |
| 234 return !channel_messages_.empty() || !out_of_order_messages_.empty(); | |
| 235 } | |
| 236 | |
| 237 // Begin order numbers at 1 so 0 can mean no orders. | |
| 238 uint32_t GpuChannelMessageFilter::global_order_counter_ = 1; | |
| 239 | |
| 240 GpuChannelMessageFilter::GpuChannelMessageFilter( | 204 GpuChannelMessageFilter::GpuChannelMessageFilter( |
| 205 const base::WeakPtr<GpuChannel>& gpu_channel, | |
| 241 GpuChannelMessageQueue* message_queue, | 206 GpuChannelMessageQueue* message_queue, |
| 242 gpu::SyncPointManager* sync_point_manager, | 207 gpu::SyncPointManager* sync_point_manager, |
| 243 base::SingleThreadTaskRunner* task_runner, | 208 base::SingleThreadTaskRunner* task_runner, |
| 244 bool future_sync_points) | 209 bool future_sync_points) |
| 245 : preemption_state_(IDLE), | 210 : preemption_state_(IDLE), |
| 211 gpu_channel_(gpu_channel), | |
| 246 message_queue_(message_queue), | 212 message_queue_(message_queue), |
| 247 sender_(nullptr), | 213 sender_(nullptr), |
| 248 peer_pid_(base::kNullProcessId), | 214 peer_pid_(base::kNullProcessId), |
| 249 sync_point_manager_(sync_point_manager), | 215 sync_point_manager_(sync_point_manager), |
| 250 task_runner_(task_runner), | 216 task_runner_(task_runner), |
| 251 a_stub_is_descheduled_(false), | 217 a_stub_is_descheduled_(false), |
| 252 future_sync_points_(future_sync_points) {} | 218 future_sync_points_(future_sync_points) {} |
| 253 | 219 |
| 254 GpuChannelMessageFilter::~GpuChannelMessageFilter() {} | 220 GpuChannelMessageFilter::~GpuChannelMessageFilter() {} |
| 255 | 221 |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 304 void GpuChannelMessageFilter::RemoveChannelFilter( | 270 void GpuChannelMessageFilter::RemoveChannelFilter( |
| 305 scoped_refptr<IPC::MessageFilter> filter) { | 271 scoped_refptr<IPC::MessageFilter> filter) { |
| 306 if (sender_) | 272 if (sender_) |
| 307 filter->OnFilterRemoved(); | 273 filter->OnFilterRemoved(); |
| 308 channel_filters_.erase( | 274 channel_filters_.erase( |
| 309 std::find(channel_filters_.begin(), channel_filters_.end(), filter)); | 275 std::find(channel_filters_.begin(), channel_filters_.end(), filter)); |
| 310 } | 276 } |
| 311 | 277 |
| 312 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { | 278 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { |
| 313 DCHECK(sender_); | 279 DCHECK(sender_); |
| 280 DCHECK(!message.should_unblock() && !message.is_reply()); | |
|
piman
2015/09/18 03:44:15
nit: it's not really a valid DCHECK, a compromised
sunnyps
2015/09/18 18:47:28
Done.
| |
| 314 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 281 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| 315 if (filter->OnMessageReceived(message)) { | 282 if (filter->OnMessageReceived(message)) { |
| 316 return true; | 283 return true; |
| 317 } | 284 } |
| 318 } | 285 } |
| 319 | 286 |
| 320 const uint32_t order_number = global_order_counter_++; | |
| 321 bool handled = false; | 287 bool handled = false; |
| 322 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && | 288 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && |
| 323 !future_sync_points_) { | 289 !future_sync_points_) { |
| 324 DLOG(ERROR) << "Untrusted client should not send " | 290 DLOG(ERROR) << "Untrusted client should not send " |
| 325 "GpuCommandBufferMsg_RetireSyncPoint message"; | 291 "GpuCommandBufferMsg_RetireSyncPoint message"; |
| 326 return true; | 292 return true; |
| 327 } | 293 } |
| 328 | 294 |
| 329 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { | 295 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
| 330 base::Tuple<bool> params; | 296 base::Tuple<bool> params; |
| 331 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); | 297 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| 332 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, | 298 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, |
| 333 ¶ms)) { | 299 ¶ms)) { |
| 334 reply->set_reply_error(); | 300 reply->set_reply_error(); |
| 335 Send(reply); | 301 Send(reply); |
| 336 return true; | 302 return true; |
| 337 } | 303 } |
| 338 bool retire_sync_point = base::get<0>(params); | 304 bool retire_sync_point = base::get<0>(params); |
| 339 if (!future_sync_points_ && !retire_sync_point) { | 305 if (!future_sync_points_ && !retire_sync_point) { |
| 340 LOG(ERROR) << "Untrusted contexts can't create future sync points"; | 306 LOG(ERROR) << "Untrusted contexts can't create future sync points"; |
| 341 reply->set_reply_error(); | 307 reply->set_reply_error(); |
| 342 Send(reply); | 308 Send(reply); |
| 343 return true; | 309 return true; |
| 344 } | 310 } |
| 345 | 311 |
| 346 // Message queue must handle the entire sync point generation because the | 312 // Message queue must handle the entire sync point generation because the |
| 347 // message queue could be disabled from the main thread during generation. | 313 // message queue could be disabled from the main thread during generation. |
| 348 uint32_t sync_point = 0u; | 314 uint32_t sync_point = 0u; |
| 349 if (!message_queue_->GenerateSyncPointMessage( | 315 if (!message_queue_->GenerateSyncPointMessage( |
| 350 sync_point_manager_, order_number, message, retire_sync_point, | 316 sync_point_manager_, message, retire_sync_point, &sync_point)) { |
| 351 &sync_point)) { | |
| 352 LOG(ERROR) << "GpuChannel has been destroyed."; | 317 LOG(ERROR) << "GpuChannel has been destroyed."; |
| 353 reply->set_reply_error(); | 318 reply->set_reply_error(); |
| 354 Send(reply); | 319 Send(reply); |
| 355 return true; | 320 return true; |
| 356 } | 321 } |
| 357 | 322 |
| 358 DCHECK_NE(sync_point, 0u); | 323 DCHECK_NE(sync_point, 0u); |
| 359 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); | 324 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); |
| 360 Send(reply); | 325 Send(reply); |
| 361 handled = true; | 326 handled = true; |
| 362 } | 327 } |
| 363 | 328 |
| 364 // Forward all other messages to the GPU Channel. | 329 // Forward all other messages to the GPU Channel. |
| 365 if (!handled && !message.is_reply() && !message.should_unblock()) { | 330 if (!handled) { |
| 366 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || | 331 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || |
| 367 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { | 332 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { |
| 368 // Move Wait commands to the head of the queue, so the renderer | 333 task_runner_->PostTask(FROM_HERE, |
| 369 // doesn't have to wait any longer than necessary. | 334 base::Bind(&GpuChannel::HandleOutOfOrderMessage, |
| 370 message_queue_->PushBackMessage(kOutOfOrderNumber, message); | 335 gpu_channel_, message)); |
| 371 } else { | 336 } else { |
| 372 message_queue_->PushBackMessage(order_number, message); | 337 message_queue_->PushBackMessage(message); |
| 373 } | 338 } |
| 374 handled = true; | 339 handled = true; |
| 375 } | 340 } |
| 376 | 341 |
| 377 UpdatePreemptionState(); | 342 UpdatePreemptionState(); |
| 378 return handled; | 343 return handled; |
| 379 } | 344 } |
| 380 | 345 |
| 381 void GpuChannelMessageFilter::OnMessageProcessed() { | 346 void GpuChannelMessageFilter::OnMessageProcessed() { |
| 382 UpdatePreemptionState(); | 347 UpdatePreemptionState(); |
| (...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 588 allow_future_sync_points_(allow_future_sync_points), | 553 allow_future_sync_points_(allow_future_sync_points), |
| 589 allow_real_time_streams_(allow_real_time_streams), | 554 allow_real_time_streams_(allow_real_time_streams), |
| 590 weak_factory_(this) { | 555 weak_factory_(this) { |
| 591 DCHECK(gpu_channel_manager); | 556 DCHECK(gpu_channel_manager); |
| 592 DCHECK(client_id); | 557 DCHECK(client_id); |
| 593 | 558 |
| 594 message_queue_ = | 559 message_queue_ = |
| 595 GpuChannelMessageQueue::Create(weak_factory_.GetWeakPtr(), task_runner); | 560 GpuChannelMessageQueue::Create(weak_factory_.GetWeakPtr(), task_runner); |
| 596 | 561 |
| 597 filter_ = new GpuChannelMessageFilter( | 562 filter_ = new GpuChannelMessageFilter( |
| 598 message_queue_.get(), gpu_channel_manager_->sync_point_manager(), | 563 weak_factory_.GetWeakPtr(), message_queue_.get(), |
| 599 task_runner_.get(), allow_future_sync_points_); | 564 gpu_channel_manager_->sync_point_manager(), task_runner_.get(), |
| 565 allow_future_sync_points_); | |
| 600 | 566 |
| 601 subscription_ref_set_->AddObserver(this); | 567 subscription_ref_set_->AddObserver(this); |
| 602 } | 568 } |
| 603 | 569 |
| 604 GpuChannel::~GpuChannel() { | 570 GpuChannel::~GpuChannel() { |
| 605 // Clear stubs first because of dependencies. | 571 // Clear stubs first because of dependencies. |
| 606 stubs_.clear(); | 572 stubs_.clear(); |
| 607 | 573 |
| 608 message_queue_->DeleteAndDisableMessages(gpu_channel_manager_); | 574 message_queue_->DeleteAndDisableMessages(gpu_channel_manager_); |
| 609 | 575 |
| (...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 842 ScheduleHandleMessage(); | 808 ScheduleHandleMessage(); |
| 843 return; | 809 return; |
| 844 } | 810 } |
| 845 | 811 |
| 846 GpuChannelMessage* m = message_queue_->GetNextMessage(); | 812 GpuChannelMessage* m = message_queue_->GetNextMessage(); |
| 847 | 813 |
| 848 // TODO(sunnyps): This could be a DCHECK maybe? | 814 // TODO(sunnyps): This could be a DCHECK maybe? |
| 849 if (!m) | 815 if (!m) |
| 850 return; | 816 return; |
| 851 | 817 |
| 852 uint32_t order_number = m->order_number; | 818 current_order_num_ = m->order_number; |
| 853 const IPC::Message& message = m->message; | 819 const IPC::Message& message = m->message; |
| 854 int32_t routing_id = message.routing_id(); | 820 int32_t routing_id = message.routing_id(); |
| 855 GpuCommandBufferStub* stub = stubs_.get(routing_id); | 821 GpuCommandBufferStub* stub = stubs_.get(routing_id); |
| 856 | 822 |
| 857 DCHECK(!stub || stub->IsScheduled()); | 823 DCHECK(!stub || stub->IsScheduled()); |
| 858 | 824 |
| 859 DVLOG(1) << "received message @" << &message << " on channel @" << this | 825 DVLOG(1) << "received message @" << &message << " on channel @" << this |
| 860 << " with type " << message.type(); | 826 << " with type " << message.type(); |
| 861 | 827 |
| 862 current_order_num_ = order_number; | |
| 863 | |
| 864 bool handled = false; | 828 bool handled = false; |
| 865 | 829 |
| 866 if (routing_id == MSG_ROUTING_CONTROL) { | 830 if (routing_id == MSG_ROUTING_CONTROL) { |
| 867 handled = OnControlMessageReceived(message); | 831 handled = OnControlMessageReceived(message); |
| 868 } else if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { | 832 } else if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
| 869 // TODO(dyen): Temporary handling of old sync points. | 833 // TODO(dyen): Temporary handling of old sync points. |
| 870 // This must ensure that the sync point will be retired. Normally we'll | 834 // This must ensure that the sync point will be retired. Normally we'll |
| 871 // find the stub based on the routing ID, and associate the sync point | 835 // find the stub based on the routing ID, and associate the sync point |
| 872 // with it, but if that fails for any reason (channel or stub already | 836 // with it, but if that fails for any reason (channel or stub already |
| 873 // deleted, invalid routing id), we need to retire the sync point | 837 // deleted, invalid routing id), we need to retire the sync point |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 886 // Respond to sync messages even if router failed to route. | 850 // Respond to sync messages even if router failed to route. |
| 887 if (!handled && message.is_sync()) { | 851 if (!handled && message.is_sync()) { |
| 888 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); | 852 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| 889 reply->set_reply_error(); | 853 reply->set_reply_error(); |
| 890 Send(reply); | 854 Send(reply); |
| 891 handled = true; | 855 handled = true; |
| 892 } | 856 } |
| 893 | 857 |
| 894 // A command buffer may be descheduled or preempted but only in the middle of | 858 // A command buffer may be descheduled or preempted but only in the middle of |
| 895 // a flush. In this case we should not pop the message from the queue. | 859 // a flush. In this case we should not pop the message from the queue. |
| 896 if (stub && stub->HasUnprocessedCommands() && | 860 if (stub && stub->HasUnprocessedCommands()) { |
| 897 order_number != kOutOfOrderNumber) { | |
| 898 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID, message.type()); | 861 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID, message.type()); |
| 899 // If the stub is still scheduled then we were preempted and need to | 862 // If the stub is still scheduled then we were preempted and need to |
| 900 // schedule a wakeup otherwise some other event will wake us up e.g. sync | 863 // schedule a wakeup otherwise some other event will wake us up e.g. sync |
| 901 // point completion. No DCHECK for preemption flag because that can change | 864 // point completion. No DCHECK for preemption flag because that can change |
| 902 // any time. | 865 // any time. |
| 903 if (stub->IsScheduled()) | 866 if (stub->IsScheduled()) |
| 904 ScheduleHandleMessage(); | 867 ScheduleHandleMessage(); |
| 905 return; | 868 return; |
| 906 } | 869 } |
| 907 | 870 |
| 908 if (message_queue_->MessageProcessed(order_number)) { | 871 if (message_queue_->MessageProcessed()) |
| 909 ScheduleHandleMessage(); | 872 ScheduleHandleMessage(); |
| 910 } | |
| 911 | 873 |
| 912 if (preempting_flag_) { | 874 if (preempting_flag_) { |
| 913 io_task_runner_->PostTask( | 875 io_task_runner_->PostTask( |
| 914 FROM_HERE, | 876 FROM_HERE, |
| 915 base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_)); | 877 base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_)); |
| 916 } | 878 } |
| 917 } | 879 } |
| 918 | 880 |
| 919 void GpuChannel::ScheduleHandleMessage() { | 881 void GpuChannel::ScheduleHandleMessage() { |
| 920 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage, | 882 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage, |
| 921 weak_factory_.GetWeakPtr())); | 883 weak_factory_.GetWeakPtr())); |
| 922 } | 884 } |
| 923 | 885 |
| 886 void GpuChannel::HandleOutOfOrderMessage(const IPC::Message& msg) { | |
| 887 DCHECK((msg.type() == | |
| 888 (uint32_t)GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) || | |
|
piman
2015/09/18 03:44:15
nit: no c-style cast
sunnyps
2015/09/18 18:47:28
Replaced this with a switch statement which doesn'
| |
| 889 (msg.type() == (uint32_t)GpuCommandBufferMsg_WaitForTokenInRange::ID)); | |
| 890 router_.RouteMessage(msg); | |
| 891 } | |
| 892 | |
| 924 void GpuChannel::OnCreateOffscreenCommandBuffer( | 893 void GpuChannel::OnCreateOffscreenCommandBuffer( |
| 925 const gfx::Size& size, | 894 const gfx::Size& size, |
| 926 const GPUCreateCommandBufferConfig& init_params, | 895 const GPUCreateCommandBufferConfig& init_params, |
| 927 int32 route_id, | 896 int32 route_id, |
| 928 bool* succeeded) { | 897 bool* succeeded) { |
| 929 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", | 898 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", |
| 930 route_id); | 899 route_id); |
| 931 | 900 |
| 932 int32 share_group_id = init_params.share_group_id; | 901 int32 share_group_id = init_params.share_group_id; |
| 933 GpuCommandBufferStub* share_group = stubs_.get(share_group_id); | 902 GpuCommandBufferStub* share_group = stubs_.get(share_group_id); |
| (...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1085 } | 1054 } |
| 1086 } | 1055 } |
| 1087 } | 1056 } |
| 1088 | 1057 |
| 1089 void GpuChannel::HandleUpdateValueState( | 1058 void GpuChannel::HandleUpdateValueState( |
| 1090 unsigned int target, const gpu::ValueState& state) { | 1059 unsigned int target, const gpu::ValueState& state) { |
| 1091 pending_valuebuffer_state_->UpdateState(target, state); | 1060 pending_valuebuffer_state_->UpdateState(target, state); |
| 1092 } | 1061 } |
| 1093 | 1062 |
| 1094 } // namespace content | 1063 } // namespace content |
| OLD | NEW |