| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/common/gpu/gpu_channel.h" |
| 6 |
| 5 #if defined(OS_WIN) | 7 #if defined(OS_WIN) |
| 6 #include <windows.h> | 8 #include <windows.h> |
| 7 #endif | 9 #endif |
| 8 | 10 |
| 9 #include "content/common/gpu/gpu_channel.h" | |
| 10 | |
| 11 #include <algorithm> | 11 #include <algorithm> |
| 12 #include <queue> | 12 #include <deque> |
| 13 #include <set> |
| 13 #include <vector> | 14 #include <vector> |
| 14 | 15 |
| 16 #include "base/atomicops.h" |
| 15 #include "base/bind.h" | 17 #include "base/bind.h" |
| 16 #include "base/command_line.h" | 18 #include "base/command_line.h" |
| 17 #include "base/location.h" | 19 #include "base/location.h" |
| 18 #include "base/single_thread_task_runner.h" | 20 #include "base/single_thread_task_runner.h" |
| 19 #include "base/stl_util.h" | 21 #include "base/stl_util.h" |
| 20 #include "base/strings/string_util.h" | 22 #include "base/strings/string_util.h" |
| 23 #include "base/synchronization/lock.h" |
| 21 #include "base/thread_task_runner_handle.h" | 24 #include "base/thread_task_runner_handle.h" |
| 22 #include "base/timer/timer.h" | 25 #include "base/timer/timer.h" |
| 23 #include "base/trace_event/memory_dump_manager.h" | 26 #include "base/trace_event/memory_dump_manager.h" |
| 24 #include "base/trace_event/process_memory_dump.h" | 27 #include "base/trace_event/process_memory_dump.h" |
| 25 #include "base/trace_event/trace_event.h" | 28 #include "base/trace_event/trace_event.h" |
| 26 #include "content/common/gpu/gpu_channel_manager.h" | 29 #include "content/common/gpu/gpu_channel_manager.h" |
| 27 #include "content/common/gpu/gpu_memory_buffer_factory.h" | 30 #include "content/common/gpu/gpu_memory_buffer_factory.h" |
| 28 #include "content/common/gpu/gpu_messages.h" | 31 #include "content/common/gpu/gpu_messages.h" |
| 29 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h" | 32 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h" |
| 30 #include "content/public/common/content_switches.h" | 33 #include "content/public/common/content_switches.h" |
| (...skipping 27 matching lines...) Expand all Loading... |
| 58 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs; | 61 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs; |
| 59 | 62 |
| 60 // Once we trigger a preemption, the maximum duration that we will wait | 63 // Once we trigger a preemption, the maximum duration that we will wait |
| 61 // before clearing the preemption. | 64 // before clearing the preemption. |
| 62 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; | 65 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; |
| 63 | 66 |
| 64 // Stop the preemption once the time for the longest pending IPC drops | 67 // Stop the preemption once the time for the longest pending IPC drops |
| 65 // below this threshold. | 68 // below this threshold. |
| 66 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; | 69 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; |
| 67 | 70 |
| 71 const uint32_t kOutOfOrderNumber = static_cast<uint32_t>(-1); |
| 72 |
| 68 } // anonymous namespace | 73 } // anonymous namespace |
| 69 | 74 |
| 75 struct GpuChannelMessage { |
| 76 uint32_t order_number; |
| 77 base::TimeTicks time_received; |
| 78 IPC::Message message; |
| 79 |
| 80 // TODO(dyen): Temporary sync point data, remove once new sync point lands. |
| 81 bool retire_sync_point; |
| 82 uint32 sync_point_number; |
| 83 |
| 84 GpuChannelMessage(uint32_t order_num, const IPC::Message& msg) |
| 85 : order_number(order_num), |
| 86 time_received(base::TimeTicks::Now()), |
| 87 message(msg), |
| 88 retire_sync_point(false), |
| 89 sync_point_number(0) {} |
| 90 }; |
| 91 |
| 92 class GpuChannelMessageQueue |
| 93 : public base::RefCountedThreadSafe<GpuChannelMessageQueue> { |
| 94 public: |
| 95 static scoped_refptr<GpuChannelMessageQueue> Create( |
| 96 base::WeakPtr<GpuChannel> gpu_channel, |
| 97 scoped_refptr<base::SingleThreadTaskRunner> task_runner) { |
| 98 return new GpuChannelMessageQueue(gpu_channel, task_runner); |
| 99 } |
| 100 |
| 101 uint32_t GetUnprocessedOrderNum() { |
| 102 base::AutoLock auto_lock(channel_messages_lock_); |
| 103 return unprocessed_order_num_; |
| 104 } |
| 105 |
| 106 void PushBackMessage(uint32_t order_number, const IPC::Message& message) { |
| 107 base::AutoLock auto_lock(channel_messages_lock_); |
| 108 if (enabled_) { |
| 109 PushMessageHelper(order_number, |
| 110 new GpuChannelMessage(order_number, message)); |
| 111 } |
| 112 } |
| 113 |
| 114 void PushOutOfOrderMessage(const IPC::Message& message) { |
| 115 // These are pushed out of order so should not have any order messages. |
| 116 base::AutoLock auto_lock(channel_messages_lock_); |
| 117 if (enabled_) { |
| 118 PushOutOfOrderHelper(new GpuChannelMessage(kOutOfOrderNumber, message)); |
| 119 } |
| 120 } |
| 121 |
| 122 bool GenerateSyncPointMessage(gpu::SyncPointManager* sync_point_manager, |
| 123 uint32_t order_number, |
| 124 const IPC::Message& message, |
| 125 bool retire_sync_point, |
| 126 uint32_t* sync_point_number) { |
| 127 DCHECK(message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID); |
| 128 base::AutoLock auto_lock(channel_messages_lock_); |
| 129 if (enabled_) { |
| 130 const uint32 sync_point = sync_point_manager->GenerateSyncPoint(); |
| 131 |
| 132 GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); |
| 133 msg->retire_sync_point = retire_sync_point; |
| 134 msg->sync_point_number = sync_point; |
| 135 |
| 136 *sync_point_number = sync_point; |
| 137 PushMessageHelper(order_number, msg); |
| 138 return true; |
| 139 } |
| 140 return false; |
| 141 } |
| 142 |
| 143 bool HasQueuedMessages() { |
| 144 base::AutoLock auto_lock(channel_messages_lock_); |
| 145 return HasQueuedMessagesLocked(); |
| 146 } |
| 147 |
| 148 base::TimeTicks GetNextMessageTimeTick() { |
| 149 base::AutoLock auto_lock(channel_messages_lock_); |
| 150 |
| 151 base::TimeTicks next_message_tick; |
| 152 if (!channel_messages_.empty()) |
| 153 next_message_tick = channel_messages_.front()->time_received; |
| 154 |
| 155 base::TimeTicks next_out_of_order_tick; |
| 156 if (!out_of_order_messages_.empty()) |
| 157 next_out_of_order_tick = out_of_order_messages_.front()->time_received; |
| 158 |
| 159 if (next_message_tick.is_null()) |
| 160 return next_out_of_order_tick; |
| 161 else if (next_out_of_order_tick.is_null()) |
| 162 return next_message_tick; |
| 163 else |
| 164 return std::min(next_message_tick, next_out_of_order_tick); |
| 165 } |
| 166 |
| 167 protected: |
| 168 virtual ~GpuChannelMessageQueue() { |
| 169 DCHECK(channel_messages_.empty()); |
| 170 DCHECK(out_of_order_messages_.empty()); |
| 171 } |
| 172 |
| 173 private: |
| 174 friend class GpuChannel; |
| 175 friend class base::RefCountedThreadSafe<GpuChannelMessageQueue>; |
| 176 |
| 177 GpuChannelMessageQueue( |
| 178 base::WeakPtr<GpuChannel> gpu_channel, |
| 179 scoped_refptr<base::SingleThreadTaskRunner> task_runner) |
| 180 : enabled_(true), |
| 181 unprocessed_order_num_(0), |
| 182 gpu_channel_(gpu_channel), |
| 183 task_runner_(task_runner) {} |
| 184 |
| 185 void DeleteAndDisableMessages(GpuChannelManager* gpu_channel_manager) { |
| 186 { |
| 187 base::AutoLock auto_lock(channel_messages_lock_); |
| 188 DCHECK(enabled_); |
| 189 enabled_ = false; |
| 190 } |
| 191 |
| 192 // We guarantee that the queues will no longer be modified after enabled_ |
| 193 // is set to false, it is now safe to modify the queue without the lock. |
| 194 // All public facing modifying functions check enabled_ while all |
| 195 // private modifying functions DCHECK(enabled_) to enforce this. |
| 196 while (!channel_messages_.empty()) { |
| 197 GpuChannelMessage* msg = channel_messages_.front(); |
| 198 // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and |
| 199 // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check |
| 200 // if we have a sync point number here. |
| 201 if (msg->sync_point_number) { |
| 202 gpu_channel_manager->sync_point_manager()->RetireSyncPoint( |
| 203 msg->sync_point_number); |
| 204 } |
| 205 delete msg; |
| 206 channel_messages_.pop_front(); |
| 207 } |
| 208 STLDeleteElements(&out_of_order_messages_); |
| 209 } |
| 210 |
| 211 void PushUnfinishedMessage(uint32_t order_number, |
| 212 const IPC::Message& message) { |
| 213 // This is pushed only if it was unfinished, so order number is kept. |
| 214 GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); |
| 215 base::AutoLock auto_lock(channel_messages_lock_); |
| 216 DCHECK(enabled_); |
| 217 const bool had_messages = HasQueuedMessagesLocked(); |
| 218 if (order_number == kOutOfOrderNumber) |
| 219 out_of_order_messages_.push_front(msg); |
| 220 else |
| 221 channel_messages_.push_front(msg); |
| 222 |
| 223 if (!had_messages) |
| 224 ScheduleHandleMessage(); |
| 225 } |
| 226 |
| 227 void ScheduleHandleMessage() { |
| 228 task_runner_->PostTask( |
| 229 FROM_HERE, base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); |
| 230 } |
| 231 |
| 232 void PushMessageHelper(uint32_t order_number, GpuChannelMessage* msg) { |
| 233 channel_messages_lock_.AssertAcquired(); |
| 234 DCHECK(enabled_); |
| 235 unprocessed_order_num_ = order_number; |
| 236 const bool had_messages = HasQueuedMessagesLocked(); |
| 237 channel_messages_.push_back(msg); |
| 238 if (!had_messages) |
| 239 ScheduleHandleMessage(); |
| 240 } |
| 241 |
| 242 void PushOutOfOrderHelper(GpuChannelMessage* msg) { |
| 243 channel_messages_lock_.AssertAcquired(); |
| 244 DCHECK(enabled_); |
| 245 const bool had_messages = HasQueuedMessagesLocked(); |
| 246 out_of_order_messages_.push_back(msg); |
| 247 if (!had_messages) |
| 248 ScheduleHandleMessage(); |
| 249 } |
| 250 |
| 251 bool HasQueuedMessagesLocked() { |
| 252 channel_messages_lock_.AssertAcquired(); |
| 253 return !channel_messages_.empty() || !out_of_order_messages_.empty(); |
| 254 } |
| 255 |
| 256 bool enabled_; |
| 257 |
| 258 // Highest IPC order number seen, set when queued on the IO thread. |
| 259 uint32_t unprocessed_order_num_; |
| 260 std::deque<GpuChannelMessage*> channel_messages_; |
| 261 std::deque<GpuChannelMessage*> out_of_order_messages_; |
| 262 |
| 263 // This lock protects enabled_, unprocessed_order_num_, and both deques. |
| 264 base::Lock channel_messages_lock_; |
| 265 |
| 266 base::WeakPtr<GpuChannel> gpu_channel_; |
| 267 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; |
| 268 |
| 269 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue); |
| 270 }; |
| 271 |
| 70 // This filter does three things: | 272 // This filter does three things: |
| 71 // - it counts and timestamps each message forwarded to the channel | 273 // - it counts and timestamps each message forwarded to the channel |
| 72 // so that we can preempt other channels if a message takes too long to | 274 // so that we can preempt other channels if a message takes too long to |
| 73 // process. To guarantee fairness, we must wait a minimum amount of time | 275 // process. To guarantee fairness, we must wait a minimum amount of time |
| 74 // before preempting and we limit the amount of time that we can preempt in | 276 // before preempting and we limit the amount of time that we can preempt in |
| 75 // one shot (see constants above). | 277 // one shot (see constants above). |
| 76 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO | 278 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO |
| 77 // thread, generating the sync point ID and responding immediately, and then | 279 // thread, generating the sync point ID and responding immediately, and then |
| 78 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message | 280 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message |
| 79 // into the channel's queue. | 281 // into the channel's queue. |
| 80 // - it generates mailbox names for clients of the GPU process on the IO thread. | 282 // - it generates mailbox names for clients of the GPU process on the IO thread. |
| 81 class GpuChannelMessageFilter : public IPC::MessageFilter { | 283 class GpuChannelMessageFilter : public IPC::MessageFilter { |
| 82 public: | 284 public: |
| 83 GpuChannelMessageFilter( | 285 GpuChannelMessageFilter( |
| 84 base::WeakPtr<GpuChannel> gpu_channel, | 286 scoped_refptr<GpuChannelMessageQueue> message_queue, |
| 85 gpu::SyncPointManager* sync_point_manager, | 287 gpu::SyncPointManager* sync_point_manager, |
| 86 scoped_refptr<base::SingleThreadTaskRunner> task_runner, | 288 scoped_refptr<base::SingleThreadTaskRunner> task_runner, |
| 87 bool future_sync_points) | 289 bool future_sync_points) |
| 88 : preemption_state_(IDLE), | 290 : preemption_state_(IDLE), |
| 89 gpu_channel_(gpu_channel), | 291 message_queue_(message_queue), |
| 90 sender_(nullptr), | 292 sender_(nullptr), |
| 91 peer_pid_(base::kNullProcessId), | 293 peer_pid_(base::kNullProcessId), |
| 92 sync_point_manager_(sync_point_manager), | 294 sync_point_manager_(sync_point_manager), |
| 93 task_runner_(task_runner), | 295 task_runner_(task_runner), |
| 94 messages_forwarded_to_channel_(0), | |
| 95 a_stub_is_descheduled_(false), | 296 a_stub_is_descheduled_(false), |
| 96 future_sync_points_(future_sync_points) {} | 297 future_sync_points_(future_sync_points) {} |
| 97 | 298 |
| 98 void OnFilterAdded(IPC::Sender* sender) override { | 299 void OnFilterAdded(IPC::Sender* sender) override { |
| 99 DCHECK(!sender_); | 300 DCHECK(!sender_); |
| 100 sender_ = sender; | 301 sender_ = sender; |
| 101 timer_ = make_scoped_ptr(new base::OneShotTimer<GpuChannelMessageFilter>); | 302 timer_ = make_scoped_ptr(new base::OneShotTimer<GpuChannelMessageFilter>); |
| 102 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 303 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| 103 filter->OnFilterAdded(sender_); | 304 filter->OnFilterAdded(sender_); |
| 104 } | 305 } |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 144 | 345 |
| 145 void RemoveChannelFilter(scoped_refptr<IPC::MessageFilter> filter) { | 346 void RemoveChannelFilter(scoped_refptr<IPC::MessageFilter> filter) { |
| 146 if (sender_) | 347 if (sender_) |
| 147 filter->OnFilterRemoved(); | 348 filter->OnFilterRemoved(); |
| 148 channel_filters_.erase( | 349 channel_filters_.erase( |
| 149 std::find(channel_filters_.begin(), channel_filters_.end(), filter)); | 350 std::find(channel_filters_.begin(), channel_filters_.end(), filter)); |
| 150 } | 351 } |
| 151 | 352 |
| 152 bool OnMessageReceived(const IPC::Message& message) override { | 353 bool OnMessageReceived(const IPC::Message& message) override { |
| 153 DCHECK(sender_); | 354 DCHECK(sender_); |
| 154 | |
| 155 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | 355 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { |
| 156 if (filter->OnMessageReceived(message)) { | 356 if (filter->OnMessageReceived(message)) { |
| 157 return true; | 357 return true; |
| 158 } | 358 } |
| 159 } | 359 } |
| 160 | 360 |
| 361 const uint32_t order_number = global_order_counter_++; |
| 161 bool handled = false; | 362 bool handled = false; |
| 162 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && | 363 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && |
| 163 !future_sync_points_) { | 364 !future_sync_points_) { |
| 164 DLOG(ERROR) << "Untrusted client should not send " | 365 DLOG(ERROR) << "Untrusted client should not send " |
| 165 "GpuCommandBufferMsg_RetireSyncPoint message"; | 366 "GpuCommandBufferMsg_RetireSyncPoint message"; |
| 166 return true; | 367 return true; |
| 167 } | 368 } |
| 168 | 369 |
| 169 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { | 370 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
| 170 base::Tuple<bool> retire; | 371 base::Tuple<bool> retire; |
| 171 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); | 372 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| 172 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, | 373 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, |
| 173 &retire)) { | 374 &retire)) { |
| 174 reply->set_reply_error(); | 375 reply->set_reply_error(); |
| 175 Send(reply); | 376 Send(reply); |
| 176 return true; | 377 return true; |
| 177 } | 378 } |
| 178 if (!future_sync_points_ && !base::get<0>(retire)) { | 379 if (!future_sync_points_ && !base::get<0>(retire)) { |
| 179 LOG(ERROR) << "Untrusted contexts can't create future sync points"; | 380 LOG(ERROR) << "Untrusted contexts can't create future sync points"; |
| 180 reply->set_reply_error(); | 381 reply->set_reply_error(); |
| 181 Send(reply); | 382 Send(reply); |
| 182 return true; | 383 return true; |
| 183 } | 384 } |
| 184 uint32 sync_point = sync_point_manager_->GenerateSyncPoint(); | 385 |
| 386 // Message queue must handle the entire sync point generation because the |
| 387 // message queue could be disabled from the main thread during generation. |
| 388 uint32_t sync_point = 0u; |
| 389 if (!message_queue_->GenerateSyncPointMessage( |
| 390 sync_point_manager_, order_number, message, base::get<0>(retire), |
| 391 &sync_point)) { |
| 392 LOG(ERROR) << "GpuChannel has been destroyed."; |
| 393 reply->set_reply_error(); |
| 394 Send(reply); |
| 395 return true; |
| 396 } |
| 397 |
| 398 DCHECK_NE(sync_point, 0u); |
| 185 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); | 399 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); |
| 186 Send(reply); | 400 Send(reply); |
| 187 task_runner_->PostTask( | |
| 188 FROM_HERE, | |
| 189 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread, | |
| 190 gpu_channel_, sync_point_manager_, message.routing_id(), | |
| 191 base::get<0>(retire), sync_point)); | |
| 192 handled = true; | 401 handled = true; |
| 193 } | 402 } |
| 194 | 403 |
| 195 // All other messages get processed by the GpuChannel. | 404 // Forward all other messages to the GPU Channel. |
| 196 messages_forwarded_to_channel_++; | 405 if (!handled && !message.is_reply() && !message.should_unblock()) { |
| 197 if (preempting_flag_.get()) | 406 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || |
| 198 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_)); | 407 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { |
| 408 // Move Wait commands to the head of the queue, so the renderer |
| 409 // doesn't have to wait any longer than necessary. |
| 410 message_queue_->PushOutOfOrderMessage(message); |
| 411 } else { |
| 412 message_queue_->PushBackMessage(order_number, message); |
| 413 } |
| 414 handled = true; |
| 415 } |
| 416 |
| 199 UpdatePreemptionState(); | 417 UpdatePreemptionState(); |
| 200 | |
| 201 return handled; | 418 return handled; |
| 202 } | 419 } |
| 203 | 420 |
| 204 void MessageProcessed(uint64 messages_processed) { | 421 void OnMessageProcessed() { UpdatePreemptionState(); } |
| 205 while (!pending_messages_.empty() && | |
| 206 pending_messages_.front().message_number <= messages_processed) | |
| 207 pending_messages_.pop(); | |
| 208 UpdatePreemptionState(); | |
| 209 } | |
| 210 | 422 |
| 211 void SetPreemptingFlagAndSchedulingState( | 423 void SetPreemptingFlagAndSchedulingState( |
| 212 gpu::PreemptionFlag* preempting_flag, | 424 gpu::PreemptionFlag* preempting_flag, |
| 213 bool a_stub_is_descheduled) { | 425 bool a_stub_is_descheduled) { |
| 214 preempting_flag_ = preempting_flag; | 426 preempting_flag_ = preempting_flag; |
| 215 a_stub_is_descheduled_ = a_stub_is_descheduled; | 427 a_stub_is_descheduled_ = a_stub_is_descheduled; |
| 216 } | 428 } |
| 217 | 429 |
| 218 void UpdateStubSchedulingState(bool a_stub_is_descheduled) { | 430 void UpdateStubSchedulingState(bool a_stub_is_descheduled) { |
| 219 a_stub_is_descheduled_ = a_stub_is_descheduled; | 431 a_stub_is_descheduled_ = a_stub_is_descheduled; |
| (...skipping 23 matching lines...) Expand all Loading... |
| 243 // We would like to preempt, but some stub is descheduled. | 455 // We would like to preempt, but some stub is descheduled. |
| 244 WOULD_PREEMPT_DESCHEDULED, | 456 WOULD_PREEMPT_DESCHEDULED, |
| 245 }; | 457 }; |
| 246 | 458 |
| 247 PreemptionState preemption_state_; | 459 PreemptionState preemption_state_; |
| 248 | 460 |
| 249 // Maximum amount of time that we can spend in PREEMPTING. | 461 // Maximum amount of time that we can spend in PREEMPTING. |
| 250 // It is reset when we transition to IDLE. | 462 // It is reset when we transition to IDLE. |
| 251 base::TimeDelta max_preemption_time_; | 463 base::TimeDelta max_preemption_time_; |
| 252 | 464 |
| 253 struct PendingMessage { | |
| 254 uint64 message_number; | |
| 255 base::TimeTicks time_received; | |
| 256 | |
| 257 explicit PendingMessage(uint64 message_number) | |
| 258 : message_number(message_number), | |
| 259 time_received(base::TimeTicks::Now()) { | |
| 260 } | |
| 261 }; | |
| 262 | |
| 263 void UpdatePreemptionState() { | 465 void UpdatePreemptionState() { |
| 264 switch (preemption_state_) { | 466 switch (preemption_state_) { |
| 265 case IDLE: | 467 case IDLE: |
| 266 if (preempting_flag_.get() && !pending_messages_.empty()) | 468 if (preempting_flag_.get() && message_queue_->HasQueuedMessages()) |
| 267 TransitionToWaiting(); | 469 TransitionToWaiting(); |
| 268 break; | 470 break; |
| 269 case WAITING: | 471 case WAITING: |
| 270 // A timer will transition us to CHECKING. | 472 // A timer will transition us to CHECKING. |
| 271 DCHECK(timer_->IsRunning()); | 473 DCHECK(timer_->IsRunning()); |
| 272 break; | 474 break; |
| 273 case CHECKING: | 475 case CHECKING: { |
| 274 if (!pending_messages_.empty()) { | 476 base::TimeTicks time_tick = message_queue_->GetNextMessageTimeTick(); |
| 275 base::TimeDelta time_elapsed = | 477 if (!time_tick.is_null()) { |
| 276 base::TimeTicks::Now() - pending_messages_.front().time_received; | 478 base::TimeDelta time_elapsed = base::TimeTicks::Now() - time_tick; |
| 277 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) { | 479 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) { |
| 278 // Schedule another check for when the IPC may go long. | 480 // Schedule another check for when the IPC may go long. |
| 279 timer_->Start( | 481 timer_->Start( |
| 280 FROM_HERE, | 482 FROM_HERE, |
| 281 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) - | 483 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) - |
| 282 time_elapsed, | 484 time_elapsed, |
| 283 this, &GpuChannelMessageFilter::UpdatePreemptionState); | 485 this, &GpuChannelMessageFilter::UpdatePreemptionState); |
| 284 } else { | 486 } else { |
| 285 if (a_stub_is_descheduled_) | 487 if (a_stub_is_descheduled_) |
| 286 TransitionToWouldPreemptDescheduled(); | 488 TransitionToWouldPreemptDescheduled(); |
| 287 else | 489 else |
| 288 TransitionToPreempting(); | 490 TransitionToPreempting(); |
| 289 } | 491 } |
| 290 } | 492 } |
| 291 break; | 493 } break; |
| 292 case PREEMPTING: | 494 case PREEMPTING: |
| 293 // A TransitionToIdle() timer should always be running in this state. | 495 // A TransitionToIdle() timer should always be running in this state. |
| 294 DCHECK(timer_->IsRunning()); | 496 DCHECK(timer_->IsRunning()); |
| 295 if (a_stub_is_descheduled_) | 497 if (a_stub_is_descheduled_) |
| 296 TransitionToWouldPreemptDescheduled(); | 498 TransitionToWouldPreemptDescheduled(); |
| 297 else | 499 else |
| 298 TransitionToIdleIfCaughtUp(); | 500 TransitionToIdleIfCaughtUp(); |
| 299 break; | 501 break; |
| 300 case WOULD_PREEMPT_DESCHEDULED: | 502 case WOULD_PREEMPT_DESCHEDULED: |
| 301 // A TransitionToIdle() timer should never be running in this state. | 503 // A TransitionToIdle() timer should never be running in this state. |
| 302 DCHECK(!timer_->IsRunning()); | 504 DCHECK(!timer_->IsRunning()); |
| 303 if (!a_stub_is_descheduled_) | 505 if (!a_stub_is_descheduled_) |
| 304 TransitionToPreempting(); | 506 TransitionToPreempting(); |
| 305 else | 507 else |
| 306 TransitionToIdleIfCaughtUp(); | 508 TransitionToIdleIfCaughtUp(); |
| 307 break; | 509 break; |
| 308 default: | 510 default: |
| 309 NOTREACHED(); | 511 NOTREACHED(); |
| 310 } | 512 } |
| 311 } | 513 } |
| 312 | 514 |
| 313 void TransitionToIdleIfCaughtUp() { | 515 void TransitionToIdleIfCaughtUp() { |
| 314 DCHECK(preemption_state_ == PREEMPTING || | 516 DCHECK(preemption_state_ == PREEMPTING || |
| 315 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); | 517 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); |
| 316 if (pending_messages_.empty()) { | 518 base::TimeTicks next_tick = message_queue_->GetNextMessageTimeTick(); |
| 519 if (next_tick.is_null()) { |
| 317 TransitionToIdle(); | 520 TransitionToIdle(); |
| 318 } else { | 521 } else { |
| 319 base::TimeDelta time_elapsed = | 522 base::TimeDelta time_elapsed = base::TimeTicks::Now() - next_tick; |
| 320 base::TimeTicks::Now() - pending_messages_.front().time_received; | |
| 321 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs) | 523 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs) |
| 322 TransitionToIdle(); | 524 TransitionToIdle(); |
| 323 } | 525 } |
| 324 } | 526 } |
| 325 | 527 |
| 326 void TransitionToIdle() { | 528 void TransitionToIdle() { |
| 327 DCHECK(preemption_state_ == PREEMPTING || | 529 DCHECK(preemption_state_ == PREEMPTING || |
| 328 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); | 530 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); |
| 329 // Stop any outstanding timer set to force us from PREEMPTING to IDLE. | 531 // Stop any outstanding timer set to force us from PREEMPTING to IDLE. |
| 330 timer_->Stop(); | 532 timer_->Stop(); |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 396 } | 598 } |
| 397 } | 599 } |
| 398 | 600 |
| 399 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; | 601 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; |
| 400 preempting_flag_->Reset(); | 602 preempting_flag_->Reset(); |
| 401 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); | 603 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); |
| 402 | 604 |
| 403 UpdatePreemptionState(); | 605 UpdatePreemptionState(); |
| 404 } | 606 } |
| 405 | 607 |
| 406 static void InsertSyncPointOnMainThread( | 608 // The message_queue_ is used to handle messages on the main thread. |
| 407 base::WeakPtr<GpuChannel> gpu_channel, | 609 scoped_refptr<GpuChannelMessageQueue> message_queue_; |
| 408 gpu::SyncPointManager* manager, | |
| 409 int32 routing_id, | |
| 410 bool retire, | |
| 411 uint32 sync_point) { | |
| 412 // This function must ensure that the sync point will be retired. Normally | |
| 413 // we'll find the stub based on the routing ID, and associate the sync point | |
| 414 // with it, but if that fails for any reason (channel or stub already | |
| 415 // deleted, invalid routing id), we need to retire the sync point | |
| 416 // immediately. | |
| 417 if (gpu_channel) { | |
| 418 GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id); | |
| 419 if (stub) { | |
| 420 stub->AddSyncPoint(sync_point); | |
| 421 if (retire) { | |
| 422 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point); | |
| 423 gpu_channel->OnMessageReceived(message); | |
| 424 } | |
| 425 return; | |
| 426 } else { | |
| 427 gpu_channel->MessageProcessed(); | |
| 428 } | |
| 429 } | |
| 430 manager->RetireSyncPoint(sync_point); | |
| 431 } | |
| 432 | |
| 433 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only | |
| 434 // passed through - therefore the WeakPtr assumptions are respected. | |
| 435 base::WeakPtr<GpuChannel> gpu_channel_; | |
| 436 IPC::Sender* sender_; | 610 IPC::Sender* sender_; |
| 437 base::ProcessId peer_pid_; | 611 base::ProcessId peer_pid_; |
| 438 gpu::SyncPointManager* sync_point_manager_; | 612 gpu::SyncPointManager* sync_point_manager_; |
| 439 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; | 613 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; |
| 440 scoped_refptr<gpu::PreemptionFlag> preempting_flag_; | 614 scoped_refptr<gpu::PreemptionFlag> preempting_flag_; |
| 441 | |
| 442 std::queue<PendingMessage> pending_messages_; | |
| 443 std::vector<scoped_refptr<IPC::MessageFilter> > channel_filters_; | 615 std::vector<scoped_refptr<IPC::MessageFilter> > channel_filters_; |
| 444 | 616 |
| 445 // Count of the number of IPCs forwarded to the GpuChannel. | |
| 446 uint64 messages_forwarded_to_channel_; | |
| 447 | |
| 448 // This timer is created and destroyed on the IO thread. | 617 // This timer is created and destroyed on the IO thread. |
| 449 scoped_ptr<base::OneShotTimer<GpuChannelMessageFilter>> timer_; | 618 scoped_ptr<base::OneShotTimer<GpuChannelMessageFilter>> timer_; |
| 450 | 619 |
| 451 bool a_stub_is_descheduled_; | 620 bool a_stub_is_descheduled_; |
| 452 | 621 |
| 453 // True if this channel can create future sync points. | 622 // True if this channel can create future sync points. |
| 454 bool future_sync_points_; | 623 bool future_sync_points_; |
| 624 |
| 625 // This number is only ever incremented/read on the IO thread. |
| 626 static uint32_t global_order_counter_; |
| 455 }; | 627 }; |
| 456 | 628 |
| 629 // Begin order numbers at 1 so 0 can mean no orders. |
| 630 uint32_t GpuChannelMessageFilter::global_order_counter_ = 1; |
| 631 |
| 457 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, | 632 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, |
| 458 GpuWatchdog* watchdog, | 633 GpuWatchdog* watchdog, |
| 459 gfx::GLShareGroup* share_group, | 634 gfx::GLShareGroup* share_group, |
| 460 gpu::gles2::MailboxManager* mailbox, | 635 gpu::gles2::MailboxManager* mailbox, |
| 461 base::SingleThreadTaskRunner* task_runner, | 636 base::SingleThreadTaskRunner* task_runner, |
| 462 base::SingleThreadTaskRunner* io_task_runner, | 637 base::SingleThreadTaskRunner* io_task_runner, |
| 463 int client_id, | 638 int client_id, |
| 464 uint64_t client_tracing_id, | 639 uint64_t client_tracing_id, |
| 465 bool software, | 640 bool software, |
| 466 bool allow_future_sync_points) | 641 bool allow_future_sync_points) |
| 467 : gpu_channel_manager_(gpu_channel_manager), | 642 : gpu_channel_manager_(gpu_channel_manager), |
| 468 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")), | 643 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")), |
| 469 messages_processed_(0), | |
| 470 client_id_(client_id), | 644 client_id_(client_id), |
| 471 client_tracing_id_(client_tracing_id), | 645 client_tracing_id_(client_tracing_id), |
| 472 task_runner_(task_runner), | 646 task_runner_(task_runner), |
| 473 io_task_runner_(io_task_runner), | 647 io_task_runner_(io_task_runner), |
| 474 share_group_(share_group ? share_group : new gfx::GLShareGroup), | 648 share_group_(share_group ? share_group : new gfx::GLShareGroup), |
| 475 mailbox_manager_(mailbox | 649 mailbox_manager_(mailbox |
| 476 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox) | 650 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox) |
| 477 : gpu::gles2::MailboxManager::Create()), | 651 : gpu::gles2::MailboxManager::Create()), |
| 478 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), | 652 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), |
| 479 pending_valuebuffer_state_(new gpu::ValueStateMap), | 653 pending_valuebuffer_state_(new gpu::ValueStateMap), |
| 480 watchdog_(watchdog), | 654 watchdog_(watchdog), |
| 481 software_(software), | 655 software_(software), |
| 482 handle_messages_scheduled_(false), | 656 current_order_num_(0), |
| 483 currently_processing_message_(nullptr), | 657 processed_order_num_(0), |
| 484 num_stubs_descheduled_(0), | 658 num_stubs_descheduled_(0), |
| 485 allow_future_sync_points_(allow_future_sync_points), | 659 allow_future_sync_points_(allow_future_sync_points), |
| 486 weak_factory_(this) { | 660 weak_factory_(this) { |
| 487 DCHECK(gpu_channel_manager); | 661 DCHECK(gpu_channel_manager); |
| 488 DCHECK(client_id); | 662 DCHECK(client_id); |
| 489 | 663 |
| 664 message_queue_ = |
| 665 GpuChannelMessageQueue::Create(weak_factory_.GetWeakPtr(), task_runner); |
| 666 |
| 490 filter_ = new GpuChannelMessageFilter( | 667 filter_ = new GpuChannelMessageFilter( |
| 491 weak_factory_.GetWeakPtr(), gpu_channel_manager_->sync_point_manager(), | 668 message_queue_, gpu_channel_manager_->sync_point_manager(), task_runner_, |
| 492 task_runner_, allow_future_sync_points_); | 669 allow_future_sync_points_); |
| 493 | 670 |
| 494 subscription_ref_set_->AddObserver(this); | 671 subscription_ref_set_->AddObserver(this); |
| 495 } | 672 } |
| 496 | 673 |
| 497 GpuChannel::~GpuChannel() { | 674 GpuChannel::~GpuChannel() { |
| 498 // Clear stubs first because of dependencies. | 675 // Clear stubs first because of dependencies. |
| 499 stubs_.clear(); | 676 stubs_.clear(); |
| 500 | 677 |
| 501 STLDeleteElements(&deferred_messages_); | 678 message_queue_->DeleteAndDisableMessages(gpu_channel_manager_); |
| 679 |
| 502 subscription_ref_set_->RemoveObserver(this); | 680 subscription_ref_set_->RemoveObserver(this); |
| 503 if (preempting_flag_.get()) | 681 if (preempting_flag_.get()) |
| 504 preempting_flag_->Reset(); | 682 preempting_flag_->Reset(); |
| 505 } | 683 } |
| 506 | 684 |
| 507 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event, | 685 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event, |
| 508 IPC::AttachmentBroker* attachment_broker) { | 686 IPC::AttachmentBroker* attachment_broker) { |
| 509 DCHECK(shutdown_event); | 687 DCHECK(shutdown_event); |
| 510 DCHECK(!channel_); | 688 DCHECK(!channel_); |
| 511 | 689 |
| (...skipping 14 matching lines...) Expand all Loading... |
| 526 channel_->AddFilter(filter_.get()); | 704 channel_->AddFilter(filter_.get()); |
| 527 | 705 |
| 528 return channel_handle; | 706 return channel_handle; |
| 529 } | 707 } |
| 530 | 708 |
| 531 base::ProcessId GpuChannel::GetClientPID() const { | 709 base::ProcessId GpuChannel::GetClientPID() const { |
| 532 return channel_->GetPeerPID(); | 710 return channel_->GetPeerPID(); |
| 533 } | 711 } |
| 534 | 712 |
| 535 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { | 713 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { |
| 536 DVLOG(1) << "received message @" << &message << " on channel @" << this | 714 // All messages should be pushed to channel_messages_ and handled separately. |
| 537 << " with type " << message.type(); | 715 NOTREACHED(); |
| 538 | 716 return false; |
| 539 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || | |
| 540 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { | |
| 541 // Move Wait commands to the head of the queue, so the renderer | |
| 542 // doesn't have to wait any longer than necessary. | |
| 543 deferred_messages_.push_front(new IPC::Message(message)); | |
| 544 } else { | |
| 545 deferred_messages_.push_back(new IPC::Message(message)); | |
| 546 } | |
| 547 | |
| 548 OnScheduled(); | |
| 549 | |
| 550 return true; | |
| 551 } | 717 } |
| 552 | 718 |
| 553 void GpuChannel::OnChannelError() { | 719 void GpuChannel::OnChannelError() { |
| 554 gpu_channel_manager_->RemoveChannel(client_id_); | 720 gpu_channel_manager_->RemoveChannel(client_id_); |
| 555 } | 721 } |
| 556 | 722 |
| 557 bool GpuChannel::Send(IPC::Message* message) { | 723 bool GpuChannel::Send(IPC::Message* message) { |
| 558 // The GPU process must never send a synchronous IPC message to the renderer | 724 // The GPU process must never send a synchronous IPC message to the renderer |
| 559 // process. This could result in deadlock. | 725 // process. This could result in deadlock. |
| 560 DCHECK(!message->is_sync()); | 726 DCHECK(!message->is_sync()); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 573 void GpuChannel::OnAddSubscription(unsigned int target) { | 739 void GpuChannel::OnAddSubscription(unsigned int target) { |
| 574 gpu_channel_manager()->Send( | 740 gpu_channel_manager()->Send( |
| 575 new GpuHostMsg_AddSubscription(client_id_, target)); | 741 new GpuHostMsg_AddSubscription(client_id_, target)); |
| 576 } | 742 } |
| 577 | 743 |
| 578 void GpuChannel::OnRemoveSubscription(unsigned int target) { | 744 void GpuChannel::OnRemoveSubscription(unsigned int target) { |
| 579 gpu_channel_manager()->Send( | 745 gpu_channel_manager()->Send( |
| 580 new GpuHostMsg_RemoveSubscription(client_id_, target)); | 746 new GpuHostMsg_RemoveSubscription(client_id_, target)); |
| 581 } | 747 } |
| 582 | 748 |
| 583 void GpuChannel::RequeueMessage() { | |
| 584 DCHECK(currently_processing_message_); | |
| 585 deferred_messages_.push_front( | |
| 586 new IPC::Message(*currently_processing_message_)); | |
| 587 messages_processed_--; | |
| 588 currently_processing_message_ = NULL; | |
| 589 } | |
| 590 | |
| 591 void GpuChannel::OnScheduled() { | |
| 592 if (handle_messages_scheduled_) | |
| 593 return; | |
| 594 // Post a task to handle any deferred messages. The deferred message queue is | |
| 595 // not emptied here, which ensures that OnMessageReceived will continue to | |
| 596 // defer newly received messages until the ones in the queue have all been | |
| 597 // handled by HandleMessage. HandleMessage is invoked as a | |
| 598 // task to prevent reentrancy. | |
| 599 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage, | |
| 600 weak_factory_.GetWeakPtr())); | |
| 601 handle_messages_scheduled_ = true; | |
| 602 } | |
| 603 | |
| 604 void GpuChannel::StubSchedulingChanged(bool scheduled) { | 749 void GpuChannel::StubSchedulingChanged(bool scheduled) { |
| 605 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; | 750 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; |
| 606 if (scheduled) { | 751 if (scheduled) { |
| 607 num_stubs_descheduled_--; | 752 num_stubs_descheduled_--; |
| 608 OnScheduled(); | 753 message_queue_->ScheduleHandleMessage(); |
| 609 } else { | 754 } else { |
| 610 num_stubs_descheduled_++; | 755 num_stubs_descheduled_++; |
| 611 } | 756 } |
| 612 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); | 757 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); |
| 613 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; | 758 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; |
| 614 | 759 |
| 615 if (a_stub_is_descheduled != a_stub_was_descheduled) { | 760 if (a_stub_is_descheduled != a_stub_was_descheduled) { |
| 616 if (preempting_flag_.get()) { | 761 if (preempting_flag_.get()) { |
| 617 io_task_runner_->PostTask( | 762 io_task_runner_->PostTask( |
| 618 FROM_HERE, | 763 FROM_HERE, |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 724 OnDestroyCommandBuffer) | 869 OnDestroyCommandBuffer) |
| 725 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder, | 870 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder, |
| 726 OnCreateJpegDecoder) | 871 OnCreateJpegDecoder) |
| 727 IPC_MESSAGE_UNHANDLED(handled = false) | 872 IPC_MESSAGE_UNHANDLED(handled = false) |
| 728 IPC_END_MESSAGE_MAP() | 873 IPC_END_MESSAGE_MAP() |
| 729 DCHECK(handled) << msg.type(); | 874 DCHECK(handled) << msg.type(); |
| 730 return handled; | 875 return handled; |
| 731 } | 876 } |
| 732 | 877 |
| 733 void GpuChannel::HandleMessage() { | 878 void GpuChannel::HandleMessage() { |
| 734 handle_messages_scheduled_ = false; | 879 GpuChannelMessage* m = nullptr; |
| 735 if (deferred_messages_.empty()) | 880 GpuCommandBufferStub* stub = nullptr; |
| 881 bool has_more_messages = false; |
| 882 { |
| 883 base::AutoLock auto_lock(message_queue_->channel_messages_lock_); |
| 884 if (!message_queue_->out_of_order_messages_.empty()) { |
| 885 m = message_queue_->out_of_order_messages_.front(); |
| 886 DCHECK(m->order_number == kOutOfOrderNumber); |
| 887 message_queue_->out_of_order_messages_.pop_front(); |
| 888 } else if (!message_queue_->channel_messages_.empty()) { |
| 889 m = message_queue_->channel_messages_.front(); |
| 890 DCHECK(m->order_number != kOutOfOrderNumber); |
| 891 message_queue_->channel_messages_.pop_front(); |
| 892 } else { |
| 893 // No messages to process |
| 894 return; |
| 895 } |
| 896 |
| 897 has_more_messages = message_queue_->HasQueuedMessagesLocked(); |
| 898 } |
| 899 |
| 900 bool retry_message = false; |
| 901 stub = stubs_.get(m->message.routing_id()); |
| 902 if (stub) { |
| 903 if (!stub->IsScheduled()) { |
| 904 retry_message = true; |
| 905 } |
| 906 if (stub->IsPreempted()) { |
| 907 retry_message = true; |
| 908 message_queue_->ScheduleHandleMessage(); |
| 909 } |
| 910 } |
| 911 |
| 912 if (retry_message) { |
| 913 base::AutoLock auto_lock(message_queue_->channel_messages_lock_); |
| 914 if (m->order_number == kOutOfOrderNumber) |
| 915 message_queue_->out_of_order_messages_.push_front(m); |
| 916 else |
| 917 message_queue_->channel_messages_.push_front(m); |
| 736 return; | 918 return; |
| 919 } else if (has_more_messages) { |
| 920 message_queue_->ScheduleHandleMessage(); |
| 921 } |
| 737 | 922 |
| 738 IPC::Message* m = NULL; | 923 scoped_ptr<GpuChannelMessage> scoped_message(m); |
| 739 GpuCommandBufferStub* stub = NULL; | 924 const uint32_t order_number = m->order_number; |
| 925 const int32_t routing_id = m->message.routing_id(); |
| 740 | 926 |
| 741 m = deferred_messages_.front(); | 927 // TODO(dyen): Temporary handling of old sync points. |
| 742 stub = stubs_.get(m->routing_id()); | 928 // This must ensure that the sync point will be retired. Normally we'll |
| 743 if (stub) { | 929 // find the stub based on the routing ID, and associate the sync point |
| 744 if (!stub->IsScheduled()) | 930 // with it, but if that fails for any reason (channel or stub already |
| 745 return; | 931 // deleted, invalid routing id), we need to retire the sync point |
| 746 if (stub->IsPreempted()) { | 932 // immediately. |
| 747 OnScheduled(); | 933 if (m->message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
| 934 const bool retire = m->retire_sync_point; |
| 935 const uint32_t sync_point = m->sync_point_number; |
| 936 if (stub) { |
| 937 stub->AddSyncPoint(sync_point); |
| 938 if (retire) { |
| 939 m->message = |
| 940 GpuCommandBufferMsg_RetireSyncPoint(routing_id, sync_point); |
| 941 } |
| 942 } else { |
| 943 current_order_num_ = order_number; |
| 944 gpu_channel_manager_->sync_point_manager()->RetireSyncPoint(sync_point); |
| 945 MessageProcessed(order_number); |
| 748 return; | 946 return; |
| 749 } | 947 } |
| 750 } | 948 } |
| 751 | 949 |
| 752 scoped_ptr<IPC::Message> message(m); | 950 IPC::Message* message = &m->message; |
| 753 deferred_messages_.pop_front(); | |
| 754 bool message_processed = true; | 951 bool message_processed = true; |
| 755 | 952 |
| 756 currently_processing_message_ = message.get(); | 953 DVLOG(1) << "received message @" << message << " on channel @" << this |
| 757 bool result; | 954 << " with type " << message->type(); |
| 758 if (message->routing_id() == MSG_ROUTING_CONTROL) | 955 |
| 956 if (order_number != kOutOfOrderNumber) { |
| 957 // Make sure this is a valid unprocessed order number. |
| 958 DCHECK(order_number <= GetUnprocessedOrderNum() && |
| 959 order_number >= GetProcessedOrderNum()); |
| 960 |
| 961 current_order_num_ = order_number; |
| 962 } |
| 963 bool result = false; |
| 964 if (routing_id == MSG_ROUTING_CONTROL) |
| 759 result = OnControlMessageReceived(*message); | 965 result = OnControlMessageReceived(*message); |
| 760 else | 966 else |
| 761 result = router_.RouteMessage(*message); | 967 result = router_.RouteMessage(*message); |
| 762 currently_processing_message_ = NULL; | |
| 763 | 968 |
| 764 if (!result) { | 969 if (!result) { |
| 765 // Respond to sync messages even if router failed to route. | 970 // Respond to sync messages even if router failed to route. |
| 766 if (message->is_sync()) { | 971 if (message->is_sync()) { |
| 767 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); | 972 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); |
| 768 reply->set_reply_error(); | 973 reply->set_reply_error(); |
| 769 Send(reply); | 974 Send(reply); |
| 770 } | 975 } |
| 771 } else { | 976 } else { |
| 772 // If the command buffer becomes unscheduled as a result of handling the | 977 // If the command buffer becomes unscheduled as a result of handling the |
| 773 // message but still has more commands to process, synthesize an IPC | 978 // message but still has more commands to process, synthesize an IPC |
| 774 // message to flush that command buffer. | 979 // message to flush that command buffer. |
| 775 if (stub) { | 980 if (stub) { |
| 776 if (stub->HasUnprocessedCommands()) { | 981 if (stub->HasUnprocessedCommands()) { |
| 777 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled( | 982 message_queue_->PushUnfinishedMessage( |
| 778 stub->route_id())); | 983 order_number, GpuCommandBufferMsg_Rescheduled(stub->route_id())); |
| 779 message_processed = false; | 984 message_processed = false; |
| 780 } | 985 } |
| 781 } | 986 } |
| 782 } | 987 } |
| 783 if (message_processed) | 988 if (message_processed) |
| 784 MessageProcessed(); | 989 MessageProcessed(order_number); |
| 785 | |
| 786 if (!deferred_messages_.empty()) { | |
| 787 OnScheduled(); | |
| 788 } | |
| 789 } | 990 } |
| 790 | 991 |
| 791 void GpuChannel::OnCreateOffscreenCommandBuffer( | 992 void GpuChannel::OnCreateOffscreenCommandBuffer( |
| 792 const gfx::Size& size, | 993 const gfx::Size& size, |
| 793 const GPUCreateCommandBufferConfig& init_params, | 994 const GPUCreateCommandBufferConfig& init_params, |
| 794 int32 route_id, | 995 int32 route_id, |
| 795 bool* succeeded) { | 996 bool* succeeded) { |
| 796 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", | 997 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", |
| 797 route_id); | 998 route_id); |
| 798 | 999 |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 847 } | 1048 } |
| 848 } | 1049 } |
| 849 | 1050 |
| 850 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { | 1051 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { |
| 851 if (!jpeg_decoder_) { | 1052 if (!jpeg_decoder_) { |
| 852 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_)); | 1053 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_)); |
| 853 } | 1054 } |
| 854 jpeg_decoder_->AddClient(route_id, reply_msg); | 1055 jpeg_decoder_->AddClient(route_id, reply_msg); |
| 855 } | 1056 } |
| 856 | 1057 |
| 857 void GpuChannel::MessageProcessed() { | 1058 void GpuChannel::MessageProcessed(uint32_t order_number) { |
| 858 messages_processed_++; | 1059 if (order_number != kOutOfOrderNumber) { |
| 1060 DCHECK(current_order_num_ == order_number); |
| 1061 DCHECK(processed_order_num_ < order_number); |
| 1062 processed_order_num_ = order_number; |
| 1063 } |
| 859 if (preempting_flag_.get()) { | 1064 if (preempting_flag_.get()) { |
| 860 io_task_runner_->PostTask( | 1065 io_task_runner_->PostTask( |
| 861 FROM_HERE, base::Bind(&GpuChannelMessageFilter::MessageProcessed, | 1066 FROM_HERE, |
| 862 filter_, messages_processed_)); | 1067 base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_)); |
| 863 } | 1068 } |
| 864 } | 1069 } |
| 865 | 1070 |
| 866 void GpuChannel::CacheShader(const std::string& key, | 1071 void GpuChannel::CacheShader(const std::string& key, |
| 867 const std::string& shader) { | 1072 const std::string& shader) { |
| 868 gpu_channel_manager_->Send( | 1073 gpu_channel_manager_->Send( |
| 869 new GpuHostMsg_CacheShader(client_id_, key, shader)); | 1074 new GpuHostMsg_CacheShader(client_id_, key, shader)); |
| 870 } | 1075 } |
| 871 | 1076 |
| 872 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { | 1077 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 925 client_id_); | 1130 client_id_); |
| 926 } | 1131 } |
| 927 } | 1132 } |
| 928 } | 1133 } |
| 929 | 1134 |
| 930 void GpuChannel::HandleUpdateValueState( | 1135 void GpuChannel::HandleUpdateValueState( |
| 931 unsigned int target, const gpu::ValueState& state) { | 1136 unsigned int target, const gpu::ValueState& state) { |
| 932 pending_valuebuffer_state_->UpdateState(target, state); | 1137 pending_valuebuffer_state_->UpdateState(target, state); |
| 933 } | 1138 } |
| 934 | 1139 |
| 1140 uint32_t GpuChannel::GetUnprocessedOrderNum() const { |
| 1141 return message_queue_->GetUnprocessedOrderNum(); |
| 1142 } |
| 1143 |
| 935 } // namespace content | 1144 } // namespace content |
| OLD | NEW |