Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #if defined(OS_WIN) | 5 #if defined(OS_WIN) |
| 6 #include <windows.h> | 6 #include <windows.h> |
| 7 #endif | 7 #endif |
| 8 | 8 |
| 9 #include "content/common/gpu/gpu_channel.h" | 9 #include "content/common/gpu/gpu_channel.h" |
| 10 | 10 |
| 11 #include <algorithm> | 11 #include <algorithm> |
| 12 #include <queue> | 12 #include <deque> |
| 13 #include <vector> | 13 #include <vector> |
| 14 | 14 |
| 15 #include "base/bind.h" | 15 #include "base/bind.h" |
| 16 #include "base/command_line.h" | 16 #include "base/command_line.h" |
| 17 #include "base/location.h" | 17 #include "base/location.h" |
| 18 #include "base/single_thread_task_runner.h" | 18 #include "base/single_thread_task_runner.h" |
| 19 #include "base/stl_util.h" | 19 #include "base/stl_util.h" |
| 20 #include "base/strings/string_util.h" | 20 #include "base/strings/string_util.h" |
| 21 #include "base/synchronization/lock.h" | |
| 21 #include "base/thread_task_runner_handle.h" | 22 #include "base/thread_task_runner_handle.h" |
| 22 #include "base/timer/timer.h" | |
| 23 #include "base/trace_event/memory_dump_manager.h" | 23 #include "base/trace_event/memory_dump_manager.h" |
| 24 #include "base/trace_event/process_memory_dump.h" | 24 #include "base/trace_event/process_memory_dump.h" |
| 25 #include "base/trace_event/trace_event.h" | 25 #include "base/trace_event/trace_event.h" |
| 26 #include "content/common/gpu/gpu_channel_manager.h" | 26 #include "content/common/gpu/gpu_channel_manager.h" |
| 27 #include "content/common/gpu/gpu_memory_buffer_factory.h" | 27 #include "content/common/gpu/gpu_memory_buffer_factory.h" |
| 28 #include "content/common/gpu/gpu_messages.h" | 28 #include "content/common/gpu/gpu_messages.h" |
| 29 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h" | 29 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h" |
| 30 #include "content/public/common/content_switches.h" | 30 #include "content/public/common/content_switches.h" |
| 31 #include "gpu/command_buffer/common/mailbox.h" | 31 #include "gpu/command_buffer/common/mailbox.h" |
| 32 #include "gpu/command_buffer/common/value_state.h" | 32 #include "gpu/command_buffer/common/value_state.h" |
| (...skipping 27 matching lines...) Expand all Loading... | |
| 60 // Once we trigger a preemption, the maximum duration that we will wait | 60 // Once we trigger a preemption, the maximum duration that we will wait |
| 61 // before clearing the preemption. | 61 // before clearing the preemption. |
| 62 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; | 62 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; |
| 63 | 63 |
| 64 // Stop the preemption once the time for the longest pending IPC drops | 64 // Stop the preemption once the time for the longest pending IPC drops |
| 65 // below this threshold. | 65 // below this threshold. |
| 66 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; | 66 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; |
| 67 | 67 |
| 68 } // anonymous namespace | 68 } // anonymous namespace |
| 69 | 69 |
| 70 struct ChannelMessage { | |
| 71 uint32_t order_number; | |
| 72 base::TimeTicks time_received; | |
| 73 IPC::Message message; | |
| 74 | |
| 75 // TODO(dyen): Temporary sync point data, remove once new sync point lands. | |
| 76 bool retire_sync_point; | |
| 77 uint32 sync_point_number; | |
| 78 | |
| 79 ChannelMessage(uint32_t order_num, const IPC::Message& msg) | |
| 80 : order_number(order_num), | |
| 81 time_received(base::TimeTicks::Now()), | |
| 82 message(msg), | |
| 83 retire_sync_point(false), | |
| 84 sync_point_number(0) {} | |
| 85 }; | |
| 86 | |
| 87 class GpuChannelMessageQueue : public base::RefCounted<GpuChannelMessageQueue> { | |
|
piman
2015/09/01 03:55:26
Needs to be RefCountedThreadSafe
David Yen
2015/09/01 19:08:02
Done.
| |
| 88 public: | |
| 89 static scoped_refptr<GpuChannelMessageQueue> Create( | |
| 90 GpuChannelManager* gpu_channel_manager, | |
| 91 base::WeakPtr<GpuChannel> gpu_channel, | |
| 92 scoped_refptr<base::SingleThreadTaskRunner> task_runner) { | |
| 93 return new GpuChannelMessageQueue(gpu_channel_manager, gpu_channel, | |
| 94 task_runner); | |
| 95 } | |
| 96 | |
| 97 void PushBackMessage(uint32_t order_number, | |
| 98 const IPC::Message& message) { | |
|
piman
2015/09/01 03:55:26
nit: indent, here and below (run git cl format)
David Yen
2015/09/01 19:08:02
Done.
| |
| 99 base::subtle::Release_Store(&unprocessed_order_num_, order_number); | |
| 100 | |
| 101 base::AutoLock auto_lock(channel_messages_lock_); | |
| 102 const bool was_empty = channel_messages_.empty(); | |
| 103 channel_messages_.push_back(new ChannelMessage(order_number, message)); | |
| 104 if (was_empty) | |
| 105 ScheduleHandleMessageLocked(); | |
| 106 } | |
| 107 | |
| 108 void PushFrontMessage(const IPC::Message& message) { | |
| 109 // These are pushed out of order so should not have any order messages. | |
| 110 base::AutoLock auto_lock(channel_messages_lock_); | |
| 111 const bool was_empty = channel_messages_.empty(); | |
| 112 channel_messages_.push_front(new ChannelMessage(static_cast<uint32_t>(-1), | |
|
piman
2015/09/01 03:55:26
nit: maybe define a const uint32_t kOutOfOrderNume
David Yen
2015/09/01 19:08:02
Done.
| |
| 113 message)); | |
| 114 if (was_empty) | |
| 115 ScheduleHandleMessageLocked(); | |
| 116 } | |
| 117 | |
| 118 void PushSyncPointMessage(uint32_t order_number, | |
| 119 const IPC::Message& message, | |
| 120 bool retire_sync_point, | |
| 121 uint32_t sync_point_num) { | |
| 122 DCHECK(message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID); | |
| 123 | |
| 124 base::subtle::Release_Store(&unprocessed_order_num_, order_number); | |
| 125 ChannelMessage* msg = new ChannelMessage(order_number, message); | |
| 126 msg->retire_sync_point = retire_sync_point; | |
| 127 msg->sync_point_number = sync_point_num; | |
| 128 | |
| 129 base::AutoLock auto_lock(channel_messages_lock_); | |
| 130 const bool was_empty = channel_messages_.empty(); | |
| 131 channel_messages_.push_back(msg); | |
| 132 if (was_empty) | |
| 133 ScheduleHandleMessageLocked(); | |
| 134 } | |
|
piman
2015/09/01 03:55:26
nit: all 3 functions (and PushUnfinishedMessage) c
David Yen
2015/09/01 19:08:02
Done.
| |
| 135 | |
| 136 bool HasQueuedMessages() { | |
| 137 base::AutoLock auto_lock(channel_messages_lock_); | |
| 138 return !channel_messages_.empty(); | |
| 139 } | |
| 140 | |
| 141 base::TimeTicks GetNextMessageTimeTick() { | |
| 142 // We have to account for messages that are pushed out of order, the out | |
| 143 // of order messages are pushed back to front and have order numbers of -1. | |
| 144 base::TimeTicks next_time_tick; | |
| 145 base::AutoLock auto_lock(channel_messages_lock_); | |
| 146 for (const auto& msg : channel_messages_) { | |
| 147 if (msg->order_number != static_cast<uint32_t>(-1)) { | |
| 148 // Return the earliest time tick if we have some out of order ones. | |
| 149 return next_time_tick.is_null() ? | |
| 150 msg->time_received : | |
| 151 std::min(msg->time_received, next_time_tick); | |
| 152 } else { | |
| 153 // Store the last out of order message in next_time_tick. | |
| 154 next_time_tick = msg->time_received; | |
| 155 } | |
| 156 } | |
| 157 return next_time_tick; | |
| 158 } | |
| 159 | |
| 160 protected: | |
| 161 virtual ~GpuChannelMessageQueue() { | |
|
piman
2015/09/01 03:55:26
This logic needs to happen on the main thread.
Si
David Yen
2015/09/01 19:08:02
Ah, I just noticed that too. I've moved it to the
| |
| 162 base::AutoLock auto_lock(channel_messages_lock_); | |
| 163 for (const auto& msg : channel_messages_) { | |
|
piman
2015/09/01 03:55:26
I'd suggest making this ChannelMessage* msg for cl
David Yen
2015/09/01 19:08:02
Done.
| |
| 164 const uint32_t sync_point = msg->sync_point_number; | |
| 165 gpu_channel_manager_->sync_point_manager()->RetireSyncPoint(sync_point); | |
|
piman
2015/09/01 03:55:26
if (sync_point)
gpu_channel_manager_->sync_point
David Yen
2015/09/01 19:08:02
Done.
| |
| 166 } | |
| 167 STLDeleteElements(&channel_messages_); | |
| 168 } | |
| 169 | |
| 170 private: | |
| 171 friend class GpuChannel; | |
| 172 friend class base::RefCounted<GpuChannelMessageQueue>; | |
| 173 | |
| 174 GpuChannelMessageQueue( | |
| 175 GpuChannelManager* gpu_channel_manager, | |
| 176 base::WeakPtr<GpuChannel> gpu_channel, | |
| 177 scoped_refptr<base::SingleThreadTaskRunner> task_runner) | |
| 178 : unprocessed_order_num_(0), | |
| 179 handle_messages_scheduled_(false), | |
| 180 gpu_channel_manager_(gpu_channel_manager), | |
| 181 gpu_channel_(gpu_channel), | |
| 182 task_runner_(task_runner) { | |
| 183 } | |
| 184 | |
| 185 void PushUnfinishedMessage(uint32_t order_number, | |
| 186 const IPC::Message& message) { | |
| 187 // This is pushed only if it was unfinished, so order number is kept. | |
| 188 base::AutoLock auto_lock(channel_messages_lock_); | |
| 189 const bool was_empty = channel_messages_.empty(); | |
| 190 channel_messages_.push_front(new ChannelMessage(order_number, message)); | |
| 191 if (was_empty) | |
| 192 ScheduleHandleMessageLocked(); | |
| 193 } | |
| 194 | |
| 195 void ScheduleHandleMessage() { | |
| 196 base::AutoLock auto_lock(channel_messages_lock_); | |
| 197 ScheduleHandleMessageLocked(); | |
| 198 } | |
| 199 | |
| 200 void ScheduleHandleMessageLocked() { | |
| 201 channel_messages_lock_.AssertAcquired(); | |
| 202 if (!handle_messages_scheduled_) { | |
| 203 task_runner_->PostTask( | |
| 204 FROM_HERE, | |
| 205 base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); | |
| 206 handle_messages_scheduled_ = true; | |
| 207 } | |
| 208 } | |
| 209 | |
| 210 // Highest IPC order number seen, set when queued on the IO thread. | |
| 211 base::subtle::Atomic32 unprocessed_order_num_; | |
|
piman
2015/09/01 03:55:26
If we really need this, I'd prefer it to be a uint
David Yen
2015/09/01 19:08:02
Done.
| |
| 212 | |
| 213 std::deque<ChannelMessage*> channel_messages_; | |
| 214 bool handle_messages_scheduled_; | |
| 215 | |
| 216 // This lock protects both handle_messages_scheduled_ and channel_messages_. | |
| 217 base::Lock channel_messages_lock_; | |
| 218 | |
| 219 GpuChannelManager* gpu_channel_manager_; | |
| 220 base::WeakPtr<GpuChannel> gpu_channel_; | |
| 221 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; | |
| 222 | |
| 223 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue); | |
| 224 }; | |
| 225 | |
| 70 // This filter does three things: | 226 // This filter does three things: |
| 71 // - it counts and timestamps each message forwarded to the channel | 227 // - it counts and timestamps each message forwarded to the channel |
| 72 // so that we can preempt other channels if a message takes too long to | 228 // so that we can preempt other channels if a message takes too long to |
| 73 // process. To guarantee fairness, we must wait a minimum amount of time | 229 // process. To guarantee fairness, we must wait a minimum amount of time |
| 74 // before preempting and we limit the amount of time that we can preempt in | 230 // before preempting and we limit the amount of time that we can preempt in |
| 75 // one shot (see constants above). | 231 // one shot (see constants above). |
| 76 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO | 232 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO |
| 77 // thread, generating the sync point ID and responding immediately, and then | 233 // thread, generating the sync point ID and responding immediately, and then |
| 78 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message | 234 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message |
| 79 // into the channel's queue. | 235 // into the channel's queue. |
| 80 // - it generates mailbox names for clients of the GPU process on the IO thread. | 236 // - it generates mailbox names for clients of the GPU process on the IO thread. |
| 81 class GpuChannelMessageFilter : public IPC::MessageFilter { | 237 class GpuChannelMessageFilter : public IPC::MessageFilter { |
| 82 public: | 238 public: |
| 83 GpuChannelMessageFilter( | 239 GpuChannelMessageFilter( |
| 240 scoped_refptr<GpuChannelMessageQueue> message_queue, | |
| 84 base::WeakPtr<GpuChannel> gpu_channel, | 241 base::WeakPtr<GpuChannel> gpu_channel, |
| 85 gpu::SyncPointManager* sync_point_manager, | 242 gpu::SyncPointManager* sync_point_manager, |
| 86 scoped_refptr<base::SingleThreadTaskRunner> task_runner, | 243 scoped_refptr<base::SingleThreadTaskRunner> task_runner, |
| 87 bool future_sync_points) | 244 bool future_sync_points) |
| 88 : preemption_state_(IDLE), | 245 : preemption_state_(IDLE), |
| 246 message_queue_(message_queue), | |
| 89 gpu_channel_(gpu_channel), | 247 gpu_channel_(gpu_channel), |
| 90 sender_(nullptr), | 248 sender_(nullptr), |
| 91 sync_point_manager_(sync_point_manager), | 249 sync_point_manager_(sync_point_manager), |
| 92 task_runner_(task_runner), | 250 task_runner_(task_runner), |
| 93 messages_forwarded_to_channel_(0), | |
| 94 a_stub_is_descheduled_(false), | 251 a_stub_is_descheduled_(false), |
| 95 future_sync_points_(future_sync_points) {} | 252 future_sync_points_(future_sync_points) {} |
| 96 | 253 |
| 97 void OnFilterAdded(IPC::Sender* sender) override { | 254 void OnFilterAdded(IPC::Sender* sender) override { |
| 98 DCHECK(!sender_); | 255 DCHECK(!sender_); |
| 99 sender_ = sender; | 256 sender_ = sender; |
| 100 timer_ = make_scoped_ptr(new base::OneShotTimer<GpuChannelMessageFilter>); | 257 timer_ = make_scoped_ptr(new base::OneShotTimer<GpuChannelMessageFilter>); |
| 101 } | 258 } |
| 102 | 259 |
| 103 void OnFilterRemoved() override { | 260 void OnFilterRemoved() override { |
| 104 DCHECK(sender_); | 261 DCHECK(sender_); |
| 105 sender_ = nullptr; | 262 sender_ = nullptr; |
| 106 timer_ = nullptr; | 263 timer_ = nullptr; |
| 107 } | 264 } |
| 108 | 265 |
| 109 bool OnMessageReceived(const IPC::Message& message) override { | 266 bool OnMessageReceived(const IPC::Message& message) override { |
| 110 DCHECK(sender_); | 267 DCHECK(sender_); |
| 111 | 268 |
| 269 const uint32_t order_number = global_order_counter_++; | |
| 112 bool handled = false; | 270 bool handled = false; |
| 113 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && | 271 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && |
| 114 !future_sync_points_) { | 272 !future_sync_points_) { |
| 115 DLOG(ERROR) << "Untrusted client should not send " | 273 DLOG(ERROR) << "Untrusted client should not send " |
| 116 "GpuCommandBufferMsg_RetireSyncPoint message"; | 274 "GpuCommandBufferMsg_RetireSyncPoint message"; |
| 117 return true; | 275 return true; |
| 118 } | 276 } |
| 119 | 277 |
| 120 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { | 278 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
| 121 base::Tuple<bool> retire; | 279 base::Tuple<bool> retire; |
| 122 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); | 280 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
| 123 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, | 281 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, |
| 124 &retire)) { | 282 &retire)) { |
| 125 reply->set_reply_error(); | 283 reply->set_reply_error(); |
| 126 Send(reply); | 284 Send(reply); |
| 127 return true; | 285 return true; |
| 128 } | 286 } |
| 129 if (!future_sync_points_ && !base::get<0>(retire)) { | 287 if (!future_sync_points_ && !base::get<0>(retire)) { |
| 130 LOG(ERROR) << "Untrusted contexts can't create future sync points"; | 288 LOG(ERROR) << "Untrusted contexts can't create future sync points"; |
| 131 reply->set_reply_error(); | 289 reply->set_reply_error(); |
| 132 Send(reply); | 290 Send(reply); |
| 133 return true; | 291 return true; |
| 134 } | 292 } |
| 135 uint32 sync_point = sync_point_manager_->GenerateSyncPoint(); | 293 uint32 sync_point = sync_point_manager_->GenerateSyncPoint(); |
| 136 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); | 294 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); |
| 137 Send(reply); | 295 Send(reply); |
| 138 task_runner_->PostTask( | 296 |
| 139 FROM_HERE, | 297 message_queue_->PushSyncPointMessage(order_number, message, |
| 140 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread, | 298 base::get<0>(retire), sync_point); |
| 141 gpu_channel_, sync_point_manager_, message.routing_id(), | |
| 142 base::get<0>(retire), sync_point)); | |
| 143 handled = true; | 299 handled = true; |
| 144 } | 300 } |
| 145 | 301 |
| 146 // These are handled by GpuJpegDecodeAccelerator and | 302 // These are handled by GpuJpegDecodeAccelerator and |
| 147 // GpuVideoDecodeAccelerator. | 303 // GpuVideoDecodeAccelerator. |
| 148 // TODO(kcwu) Modify GpuChannel::AddFilter to handle additional filters by | 304 // TODO(kcwu) Modify GpuChannel::AddFilter to handle additional filters by |
| 149 // GpuChannelMessageFilter instead of by IPC::SyncChannel directly. Then we | 305 // GpuChannelMessageFilter instead of by IPC::SyncChannel directly. Then we |
| 150 // don't need to exclude them one by one here. | 306 // don't need to exclude them one by one here. |
| 151 if (message.type() == AcceleratedJpegDecoderMsg_Decode::ID || | 307 if (message.type() == AcceleratedJpegDecoderMsg_Decode::ID || |
| 152 message.type() == AcceleratedJpegDecoderMsg_Destroy::ID || | 308 message.type() == AcceleratedJpegDecoderMsg_Destroy::ID || |
| 153 message.type() == AcceleratedVideoDecoderMsg_Decode::ID) { | 309 message.type() == AcceleratedVideoDecoderMsg_Decode::ID) { |
| 154 return false; | 310 return false; |
| 155 } | 311 } |
| 156 | 312 |
| 157 // All other messages get processed by the GpuChannel. | 313 // Forward all other messages to the GPU Channel. |
| 158 messages_forwarded_to_channel_++; | 314 if (!handled && !message.is_reply() && !message.should_unblock()) { |
| 159 if (preempting_flag_.get()) | 315 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || |
| 160 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_)); | 316 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { |
| 317 // Move Wait commands to the head of the queue, so the renderer | |
| 318 // doesn't have to wait any longer than necessary. | |
| 319 message_queue_->PushFrontMessage(message); | |
| 320 } else { | |
| 321 message_queue_->PushBackMessage(order_number, message); | |
| 322 } | |
| 323 handled = true; | |
| 324 } | |
| 325 | |
| 161 UpdatePreemptionState(); | 326 UpdatePreemptionState(); |
| 162 | |
| 163 return handled; | 327 return handled; |
| 164 } | 328 } |
| 165 | 329 |
| 166 void MessageProcessed(uint64 messages_processed) { | 330 void OnMessageProcessed() { |
| 167 while (!pending_messages_.empty() && | |
| 168 pending_messages_.front().message_number <= messages_processed) | |
| 169 pending_messages_.pop(); | |
| 170 UpdatePreemptionState(); | 331 UpdatePreemptionState(); |
| 171 } | 332 } |
| 172 | 333 |
| 173 void SetPreemptingFlagAndSchedulingState( | 334 void SetPreemptingFlagAndSchedulingState( |
| 174 gpu::PreemptionFlag* preempting_flag, | 335 gpu::PreemptionFlag* preempting_flag, |
| 175 bool a_stub_is_descheduled) { | 336 bool a_stub_is_descheduled) { |
| 176 preempting_flag_ = preempting_flag; | 337 preempting_flag_ = preempting_flag; |
| 177 a_stub_is_descheduled_ = a_stub_is_descheduled; | 338 a_stub_is_descheduled_ = a_stub_is_descheduled; |
| 178 } | 339 } |
| 179 | 340 |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 205 // We would like to preempt, but some stub is descheduled. | 366 // We would like to preempt, but some stub is descheduled. |
| 206 WOULD_PREEMPT_DESCHEDULED, | 367 WOULD_PREEMPT_DESCHEDULED, |
| 207 }; | 368 }; |
| 208 | 369 |
| 209 PreemptionState preemption_state_; | 370 PreemptionState preemption_state_; |
| 210 | 371 |
| 211 // Maximum amount of time that we can spend in PREEMPTING. | 372 // Maximum amount of time that we can spend in PREEMPTING. |
| 212 // It is reset when we transition to IDLE. | 373 // It is reset when we transition to IDLE. |
| 213 base::TimeDelta max_preemption_time_; | 374 base::TimeDelta max_preemption_time_; |
| 214 | 375 |
| 215 struct PendingMessage { | |
| 216 uint64 message_number; | |
| 217 base::TimeTicks time_received; | |
| 218 | |
| 219 explicit PendingMessage(uint64 message_number) | |
| 220 : message_number(message_number), | |
| 221 time_received(base::TimeTicks::Now()) { | |
| 222 } | |
| 223 }; | |
| 224 | |
| 225 void UpdatePreemptionState() { | 376 void UpdatePreemptionState() { |
| 226 switch (preemption_state_) { | 377 switch (preemption_state_) { |
| 227 case IDLE: | 378 case IDLE: |
| 228 if (preempting_flag_.get() && !pending_messages_.empty()) | 379 if (preempting_flag_.get() && message_queue_->HasQueuedMessages()) |
| 229 TransitionToWaiting(); | 380 TransitionToWaiting(); |
| 230 break; | 381 break; |
| 231 case WAITING: | 382 case WAITING: |
| 232 // A timer will transition us to CHECKING. | 383 // A timer will transition us to CHECKING. |
| 233 DCHECK(timer_->IsRunning()); | 384 DCHECK(timer_->IsRunning()); |
| 234 break; | 385 break; |
| 235 case CHECKING: | 386 case CHECKING: |
| 236 if (!pending_messages_.empty()) { | 387 { |
| 237 base::TimeDelta time_elapsed = | 388 base::TimeTicks time_tick = message_queue_->GetNextMessageTimeTick(); |
| 238 base::TimeTicks::Now() - pending_messages_.front().time_received; | 389 if (!time_tick.is_null()) { |
| 239 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) { | 390 base::TimeDelta time_elapsed = base::TimeTicks::Now() - time_tick; |
| 240 // Schedule another check for when the IPC may go long. | 391 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) { |
| 241 timer_->Start( | 392 // Schedule another check for when the IPC may go long. |
| 242 FROM_HERE, | 393 timer_->Start( |
| 243 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) - | 394 FROM_HERE, |
| 244 time_elapsed, | 395 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) - |
| 245 this, &GpuChannelMessageFilter::UpdatePreemptionState); | 396 time_elapsed, |
| 246 } else { | 397 this, &GpuChannelMessageFilter::UpdatePreemptionState); |
| 247 if (a_stub_is_descheduled_) | 398 } else { |
| 248 TransitionToWouldPreemptDescheduled(); | 399 if (a_stub_is_descheduled_) |
| 249 else | 400 TransitionToWouldPreemptDescheduled(); |
| 250 TransitionToPreempting(); | 401 else |
| 402 TransitionToPreempting(); | |
| 403 } | |
| 251 } | 404 } |
| 252 } | 405 } |
| 253 break; | 406 break; |
| 254 case PREEMPTING: | 407 case PREEMPTING: |
| 255 // A TransitionToIdle() timer should always be running in this state. | 408 // A TransitionToIdle() timer should always be running in this state. |
| 256 DCHECK(timer_->IsRunning()); | 409 DCHECK(timer_->IsRunning()); |
| 257 if (a_stub_is_descheduled_) | 410 if (a_stub_is_descheduled_) |
| 258 TransitionToWouldPreemptDescheduled(); | 411 TransitionToWouldPreemptDescheduled(); |
| 259 else | 412 else |
| 260 TransitionToIdleIfCaughtUp(); | 413 TransitionToIdleIfCaughtUp(); |
| 261 break; | 414 break; |
| 262 case WOULD_PREEMPT_DESCHEDULED: | 415 case WOULD_PREEMPT_DESCHEDULED: |
| 263 // A TransitionToIdle() timer should never be running in this state. | 416 // A TransitionToIdle() timer should never be running in this state. |
| 264 DCHECK(!timer_->IsRunning()); | 417 DCHECK(!timer_->IsRunning()); |
| 265 if (!a_stub_is_descheduled_) | 418 if (!a_stub_is_descheduled_) |
| 266 TransitionToPreempting(); | 419 TransitionToPreempting(); |
| 267 else | 420 else |
| 268 TransitionToIdleIfCaughtUp(); | 421 TransitionToIdleIfCaughtUp(); |
| 269 break; | 422 break; |
| 270 default: | 423 default: |
| 271 NOTREACHED(); | 424 NOTREACHED(); |
| 272 } | 425 } |
| 273 } | 426 } |
| 274 | 427 |
| 275 void TransitionToIdleIfCaughtUp() { | 428 void TransitionToIdleIfCaughtUp() { |
| 276 DCHECK(preemption_state_ == PREEMPTING || | 429 DCHECK(preemption_state_ == PREEMPTING || |
| 277 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); | 430 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); |
| 278 if (pending_messages_.empty()) { | 431 base::TimeTicks next_tick = message_queue_->GetNextMessageTimeTick(); |
| 432 if (next_tick.is_null()) { | |
| 279 TransitionToIdle(); | 433 TransitionToIdle(); |
| 280 } else { | 434 } else { |
| 281 base::TimeDelta time_elapsed = | 435 base::TimeDelta time_elapsed = base::TimeTicks::Now() - next_tick; |
| 282 base::TimeTicks::Now() - pending_messages_.front().time_received; | |
| 283 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs) | 436 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs) |
| 284 TransitionToIdle(); | 437 TransitionToIdle(); |
| 285 } | 438 } |
| 286 } | 439 } |
| 287 | 440 |
| 288 void TransitionToIdle() { | 441 void TransitionToIdle() { |
| 289 DCHECK(preemption_state_ == PREEMPTING || | 442 DCHECK(preemption_state_ == PREEMPTING || |
| 290 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); | 443 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); |
| 291 // Stop any outstanding timer set to force us from PREEMPTING to IDLE. | 444 // Stop any outstanding timer set to force us from PREEMPTING to IDLE. |
| 292 timer_->Stop(); | 445 timer_->Stop(); |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 358 } | 511 } |
| 359 } | 512 } |
| 360 | 513 |
| 361 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; | 514 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; |
| 362 preempting_flag_->Reset(); | 515 preempting_flag_->Reset(); |
| 363 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); | 516 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); |
| 364 | 517 |
| 365 UpdatePreemptionState(); | 518 UpdatePreemptionState(); |
| 366 } | 519 } |
| 367 | 520 |
| 368 static void InsertSyncPointOnMainThread( | 521 // NOTE: The message_queue_ is used to handle messages on the main thread. |
| 369 base::WeakPtr<GpuChannel> gpu_channel, | 522 // The gpu_channel_ weak pointer is only dereferenced on the |
| 370 gpu::SyncPointManager* manager, | 523 // main thread - therefore the WeakPtr assumptions are respected. |
| 371 int32 routing_id, | 524 scoped_refptr<GpuChannelMessageQueue> message_queue_; |
| 372 bool retire, | |
| 373 uint32 sync_point) { | |
| 374 // This function must ensure that the sync point will be retired. Normally | |
| 375 // we'll find the stub based on the routing ID, and associate the sync point | |
| 376 // with it, but if that fails for any reason (channel or stub already | |
| 377 // deleted, invalid routing id), we need to retire the sync point | |
| 378 // immediately. | |
| 379 if (gpu_channel) { | |
| 380 GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id); | |
| 381 if (stub) { | |
| 382 stub->AddSyncPoint(sync_point); | |
| 383 if (retire) { | |
| 384 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point); | |
| 385 gpu_channel->OnMessageReceived(message); | |
| 386 } | |
| 387 return; | |
| 388 } else { | |
| 389 gpu_channel->MessageProcessed(); | |
| 390 } | |
| 391 } | |
| 392 manager->RetireSyncPoint(sync_point); | |
| 393 } | |
| 394 | |
| 395 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only | |
| 396 // passed through - therefore the WeakPtr assumptions are respected. | |
| 397 base::WeakPtr<GpuChannel> gpu_channel_; | 525 base::WeakPtr<GpuChannel> gpu_channel_; |
|
piman
2015/09/01 03:55:26
I don't think you need this any more.
David Yen
2015/09/01 19:08:02
Done.
| |
| 398 IPC::Sender* sender_; | 526 IPC::Sender* sender_; |
| 399 gpu::SyncPointManager* sync_point_manager_; | 527 gpu::SyncPointManager* sync_point_manager_; |
| 400 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; | 528 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; |
| 401 scoped_refptr<gpu::PreemptionFlag> preempting_flag_; | 529 scoped_refptr<gpu::PreemptionFlag> preempting_flag_; |
| 402 | 530 |
| 403 std::queue<PendingMessage> pending_messages_; | |
| 404 | |
| 405 // Count of the number of IPCs forwarded to the GpuChannel. | |
| 406 uint64 messages_forwarded_to_channel_; | |
| 407 | |
| 408 // This timer is created and destroyed on the IO thread. | 531 // This timer is created and destroyed on the IO thread. |
| 409 scoped_ptr<base::OneShotTimer<GpuChannelMessageFilter>> timer_; | 532 scoped_ptr<base::OneShotTimer<GpuChannelMessageFilter>> timer_; |
| 410 | 533 |
| 411 bool a_stub_is_descheduled_; | 534 bool a_stub_is_descheduled_; |
| 412 | 535 |
| 413 // True if this channel can create future sync points. | 536 // True if this channel can create future sync points. |
| 414 bool future_sync_points_; | 537 bool future_sync_points_; |
| 538 | |
| 539 // This number is only ever incremented/read on the IO thread. | |
| 540 static uint32_t global_order_counter_; | |
| 415 }; | 541 }; |
| 416 | 542 |
| 543 uint32_t GpuChannelMessageFilter::global_order_counter_ = 0; | |
| 544 | |
| 417 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, | 545 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, |
| 418 GpuWatchdog* watchdog, | 546 GpuWatchdog* watchdog, |
| 419 gfx::GLShareGroup* share_group, | 547 gfx::GLShareGroup* share_group, |
| 420 gpu::gles2::MailboxManager* mailbox, | 548 gpu::gles2::MailboxManager* mailbox, |
| 421 base::SingleThreadTaskRunner* task_runner, | 549 base::SingleThreadTaskRunner* task_runner, |
| 422 base::SingleThreadTaskRunner* io_task_runner, | 550 base::SingleThreadTaskRunner* io_task_runner, |
| 423 int client_id, | 551 int client_id, |
| 424 uint64_t client_tracing_id, | 552 uint64_t client_tracing_id, |
| 425 bool software, | 553 bool software, |
| 426 bool allow_future_sync_points) | 554 bool allow_future_sync_points) |
| 427 : gpu_channel_manager_(gpu_channel_manager), | 555 : gpu_channel_manager_(gpu_channel_manager), |
| 428 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")), | 556 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")), |
| 429 messages_processed_(0), | |
| 430 client_id_(client_id), | 557 client_id_(client_id), |
| 431 client_tracing_id_(client_tracing_id), | 558 client_tracing_id_(client_tracing_id), |
| 432 task_runner_(task_runner), | 559 task_runner_(task_runner), |
| 433 io_task_runner_(io_task_runner), | 560 io_task_runner_(io_task_runner), |
| 434 share_group_(share_group ? share_group : new gfx::GLShareGroup), | 561 share_group_(share_group ? share_group : new gfx::GLShareGroup), |
| 435 mailbox_manager_(mailbox | 562 mailbox_manager_(mailbox |
| 436 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox) | 563 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox) |
| 437 : gpu::gles2::MailboxManager::Create()), | 564 : gpu::gles2::MailboxManager::Create()), |
| 438 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), | 565 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), |
| 439 pending_valuebuffer_state_(new gpu::ValueStateMap), | 566 pending_valuebuffer_state_(new gpu::ValueStateMap), |
| 440 watchdog_(watchdog), | 567 watchdog_(watchdog), |
| 441 software_(software), | 568 software_(software), |
| 442 handle_messages_scheduled_(false), | 569 current_order_num_(0), |
| 443 currently_processing_message_(nullptr), | 570 processed_order_num_(0), |
| 444 num_stubs_descheduled_(0), | 571 num_stubs_descheduled_(0), |
| 445 allow_future_sync_points_(allow_future_sync_points), | 572 allow_future_sync_points_(allow_future_sync_points), |
| 446 weak_factory_(this) { | 573 weak_factory_(this) { |
| 447 DCHECK(gpu_channel_manager); | 574 DCHECK(gpu_channel_manager); |
| 448 DCHECK(client_id); | 575 DCHECK(client_id); |
| 449 | 576 |
| 577 message_queue_ = GpuChannelMessageQueue::Create(gpu_channel_manager, | |
| 578 weak_factory_.GetWeakPtr(), | |
| 579 task_runner); | |
| 580 | |
| 450 filter_ = new GpuChannelMessageFilter( | 581 filter_ = new GpuChannelMessageFilter( |
| 451 weak_factory_.GetWeakPtr(), gpu_channel_manager_->sync_point_manager(), | 582 message_queue_, weak_factory_.GetWeakPtr(), |
| 583 gpu_channel_manager_->sync_point_manager(), | |
| 452 task_runner_, allow_future_sync_points_); | 584 task_runner_, allow_future_sync_points_); |
| 453 | 585 |
| 454 subscription_ref_set_->AddObserver(this); | 586 subscription_ref_set_->AddObserver(this); |
| 455 } | 587 } |
| 456 | 588 |
| 457 GpuChannel::~GpuChannel() { | 589 GpuChannel::~GpuChannel() { |
| 458 // Clear stubs first because of dependencies. | 590 // Clear stubs first because of dependencies. |
| 459 stubs_.clear(); | 591 stubs_.clear(); |
| 460 | 592 |
| 461 STLDeleteElements(&deferred_messages_); | |
| 462 subscription_ref_set_->RemoveObserver(this); | 593 subscription_ref_set_->RemoveObserver(this); |
| 463 if (preempting_flag_.get()) | 594 if (preempting_flag_.get()) |
| 464 preempting_flag_->Reset(); | 595 preempting_flag_->Reset(); |
| 465 } | 596 } |
| 466 | 597 |
| 467 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event, | 598 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event, |
| 468 IPC::AttachmentBroker* attachment_broker) { | 599 IPC::AttachmentBroker* attachment_broker) { |
| 469 DCHECK(shutdown_event); | 600 DCHECK(shutdown_event); |
| 470 DCHECK(!channel_); | 601 DCHECK(!channel_); |
| 471 | 602 |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 486 channel_->AddFilter(filter_.get()); | 617 channel_->AddFilter(filter_.get()); |
| 487 | 618 |
| 488 return channel_handle; | 619 return channel_handle; |
| 489 } | 620 } |
| 490 | 621 |
| 491 base::ProcessId GpuChannel::GetClientPID() const { | 622 base::ProcessId GpuChannel::GetClientPID() const { |
| 492 return channel_->GetPeerPID(); | 623 return channel_->GetPeerPID(); |
| 493 } | 624 } |
| 494 | 625 |
| 495 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { | 626 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { |
| 496 DVLOG(1) << "received message @" << &message << " on channel @" << this | 627 // All messages should be pushed to channel_messages_ and handled separately. |
| 497 << " with type " << message.type(); | 628 NOTREACHED(); |
| 498 | 629 return false; |
| 499 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || | |
| 500 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { | |
| 501 // Move Wait commands to the head of the queue, so the renderer | |
| 502 // doesn't have to wait any longer than necessary. | |
| 503 deferred_messages_.push_front(new IPC::Message(message)); | |
| 504 } else { | |
| 505 deferred_messages_.push_back(new IPC::Message(message)); | |
| 506 } | |
| 507 | |
| 508 OnScheduled(); | |
| 509 | |
| 510 return true; | |
| 511 } | 630 } |
| 512 | 631 |
| 513 void GpuChannel::OnChannelError() { | 632 void GpuChannel::OnChannelError() { |
| 514 gpu_channel_manager_->RemoveChannel(client_id_); | 633 gpu_channel_manager_->RemoveChannel(client_id_); |
| 515 } | 634 } |
| 516 | 635 |
| 517 bool GpuChannel::Send(IPC::Message* message) { | 636 bool GpuChannel::Send(IPC::Message* message) { |
| 518 // The GPU process must never send a synchronous IPC message to the renderer | 637 // The GPU process must never send a synchronous IPC message to the renderer |
| 519 // process. This could result in deadlock. | 638 // process. This could result in deadlock. |
| 520 DCHECK(!message->is_sync()); | 639 DCHECK(!message->is_sync()); |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 533 void GpuChannel::OnAddSubscription(unsigned int target) { | 652 void GpuChannel::OnAddSubscription(unsigned int target) { |
| 534 gpu_channel_manager()->Send( | 653 gpu_channel_manager()->Send( |
| 535 new GpuHostMsg_AddSubscription(client_id_, target)); | 654 new GpuHostMsg_AddSubscription(client_id_, target)); |
| 536 } | 655 } |
| 537 | 656 |
| 538 void GpuChannel::OnRemoveSubscription(unsigned int target) { | 657 void GpuChannel::OnRemoveSubscription(unsigned int target) { |
| 539 gpu_channel_manager()->Send( | 658 gpu_channel_manager()->Send( |
| 540 new GpuHostMsg_RemoveSubscription(client_id_, target)); | 659 new GpuHostMsg_RemoveSubscription(client_id_, target)); |
| 541 } | 660 } |
| 542 | 661 |
| 543 void GpuChannel::RequeueMessage() { | |
| 544 DCHECK(currently_processing_message_); | |
| 545 deferred_messages_.push_front( | |
| 546 new IPC::Message(*currently_processing_message_)); | |
| 547 messages_processed_--; | |
| 548 currently_processing_message_ = NULL; | |
| 549 } | |
| 550 | |
| 551 void GpuChannel::OnScheduled() { | |
| 552 if (handle_messages_scheduled_) | |
| 553 return; | |
| 554 // Post a task to handle any deferred messages. The deferred message queue is | |
| 555 // not emptied here, which ensures that OnMessageReceived will continue to | |
| 556 // defer newly received messages until the ones in the queue have all been | |
| 557 // handled by HandleMessage. HandleMessage is invoked as a | |
| 558 // task to prevent reentrancy. | |
| 559 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage, | |
| 560 weak_factory_.GetWeakPtr())); | |
| 561 handle_messages_scheduled_ = true; | |
| 562 } | |
| 563 | |
| 564 void GpuChannel::StubSchedulingChanged(bool scheduled) { | 662 void GpuChannel::StubSchedulingChanged(bool scheduled) { |
| 565 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; | 663 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; |
| 566 if (scheduled) { | 664 if (scheduled) { |
| 567 num_stubs_descheduled_--; | 665 num_stubs_descheduled_--; |
| 568 OnScheduled(); | 666 message_queue_->ScheduleHandleMessage(); |
| 569 } else { | 667 } else { |
| 570 num_stubs_descheduled_++; | 668 num_stubs_descheduled_++; |
| 571 } | 669 } |
| 572 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); | 670 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); |
| 573 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; | 671 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; |
| 574 | 672 |
| 575 if (a_stub_is_descheduled != a_stub_was_descheduled) { | 673 if (a_stub_is_descheduled != a_stub_was_descheduled) { |
| 576 if (preempting_flag_.get()) { | 674 if (preempting_flag_.get()) { |
| 577 io_task_runner_->PostTask( | 675 io_task_runner_->PostTask( |
| 578 FROM_HERE, | 676 FROM_HERE, |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 655 preempting_flag_ = new gpu::PreemptionFlag; | 753 preempting_flag_ = new gpu::PreemptionFlag; |
| 656 io_task_runner_->PostTask( | 754 io_task_runner_->PostTask( |
| 657 FROM_HERE, | 755 FROM_HERE, |
| 658 base::Bind( | 756 base::Bind( |
| 659 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState, | 757 &GpuChannelMessageFilter::SetPreemptingFlagAndSchedulingState, |
| 660 filter_, preempting_flag_, num_stubs_descheduled_ > 0)); | 758 filter_, preempting_flag_, num_stubs_descheduled_ > 0)); |
| 661 } | 759 } |
| 662 return preempting_flag_.get(); | 760 return preempting_flag_.get(); |
| 663 } | 761 } |
| 664 | 762 |
| 763 bool GpuChannel::handle_messages_scheduled() const { | |
| 764 base::AutoLock auto_lock(message_queue_->channel_messages_lock_); | |
| 765 return message_queue_->handle_messages_scheduled_; | |
| 766 } | |
| 767 | |
| 665 void GpuChannel::SetPreemptByFlag( | 768 void GpuChannel::SetPreemptByFlag( |
| 666 scoped_refptr<gpu::PreemptionFlag> preempted_flag) { | 769 scoped_refptr<gpu::PreemptionFlag> preempted_flag) { |
| 667 preempted_flag_ = preempted_flag; | 770 preempted_flag_ = preempted_flag; |
| 668 | 771 |
| 669 for (auto& kv : stubs_) | 772 for (auto& kv : stubs_) |
| 670 kv.second->SetPreemptByFlag(preempted_flag_); | 773 kv.second->SetPreemptByFlag(preempted_flag_); |
| 671 } | 774 } |
| 672 | 775 |
| 673 void GpuChannel::OnDestroy() { | 776 void GpuChannel::OnDestroy() { |
| 674 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy"); | 777 TRACE_EVENT0("gpu", "GpuChannel::OnDestroy"); |
| 675 gpu_channel_manager_->RemoveChannel(client_id_); | 778 gpu_channel_manager_->RemoveChannel(client_id_); |
| 676 } | 779 } |
| 677 | 780 |
| 678 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { | 781 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { |
| 679 bool handled = true; | 782 bool handled = true; |
| 680 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) | 783 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) |
| 681 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer, | 784 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateOffscreenCommandBuffer, |
| 682 OnCreateOffscreenCommandBuffer) | 785 OnCreateOffscreenCommandBuffer) |
| 683 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, | 786 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, |
| 684 OnDestroyCommandBuffer) | 787 OnDestroyCommandBuffer) |
| 685 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder, | 788 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder, |
| 686 OnCreateJpegDecoder) | 789 OnCreateJpegDecoder) |
| 687 IPC_MESSAGE_UNHANDLED(handled = false) | 790 IPC_MESSAGE_UNHANDLED(handled = false) |
| 688 IPC_END_MESSAGE_MAP() | 791 IPC_END_MESSAGE_MAP() |
| 689 DCHECK(handled) << msg.type(); | 792 DCHECK(handled) << msg.type(); |
| 690 return handled; | 793 return handled; |
| 691 } | 794 } |
| 692 | 795 |
| 693 void GpuChannel::HandleMessage() { | 796 void GpuChannel::HandleMessage() { |
| 694 handle_messages_scheduled_ = false; | 797 ChannelMessage* m = nullptr; |
| 695 if (deferred_messages_.empty()) | 798 GpuCommandBufferStub* stub = nullptr; |
| 696 return; | 799 uint32_t sync_point_retired = 0; |
| 697 | 800 { |
| 698 IPC::Message* m = NULL; | 801 base::AutoLock auto_lock(message_queue_->channel_messages_lock_); |
| 699 GpuCommandBufferStub* stub = NULL; | 802 message_queue_->handle_messages_scheduled_ = false; |
| 700 | 803 if (message_queue_->channel_messages_.empty()) { |
| 701 m = deferred_messages_.front(); | |
| 702 stub = stubs_.get(m->routing_id()); | |
| 703 if (stub) { | |
| 704 if (!stub->IsScheduled()) | |
| 705 return; | |
| 706 if (stub->IsPreempted()) { | |
| 707 OnScheduled(); | |
| 708 return; | 804 return; |
| 709 } | 805 } |
| 806 m = message_queue_->channel_messages_.front(); | |
| 807 stub = stubs_.get(m->message.routing_id()); | |
| 808 | |
| 809 // TODO(dyen): Temporary handling of old sync points. | |
| 810 // This must ensure that the sync point will be retired. Normally we'll | |
| 811 // find the stub based on the routing ID, and associate the sync point | |
| 812 // with it, but if that fails for any reason (channel or stub already | |
| 813 // deleted, invalid routing id), we need to retire the sync point | |
| 814 // immediately. | |
| 815 if (m->message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { | |
|
piman
2015/09/01 03:55:26
Why not move that part of the logic after the lock
David Yen
2015/09/01 19:08:02
Done.
| |
| 816 const bool retire = m->retire_sync_point; | |
| 817 const uint32_t sync_point = m->sync_point_number; | |
| 818 const int32_t routing_id = m->message.routing_id(); | |
| 819 if (stub) { | |
| 820 stub->AddSyncPoint(sync_point); | |
| 821 if (retire) { | |
| 822 m->message = GpuCommandBufferMsg_RetireSyncPoint(routing_id, | |
| 823 sync_point); | |
| 824 } | |
| 825 } else { | |
| 826 sync_point_retired = sync_point; | |
| 827 } | |
| 828 } | |
| 829 | |
| 830 if (!sync_point_retired && stub) { | |
| 831 if (!stub->IsScheduled()) | |
| 832 return; | |
| 833 if (stub->IsPreempted()) { | |
| 834 message_queue_->ScheduleHandleMessageLocked(); | |
| 835 return; | |
| 836 } | |
| 837 } | |
| 838 | |
| 839 message_queue_->channel_messages_.pop_front(); | |
| 840 if (!message_queue_->channel_messages_.empty()) | |
| 841 message_queue_->ScheduleHandleMessageLocked(); | |
| 710 } | 842 } |
| 711 | 843 |
| 712 scoped_ptr<IPC::Message> message(m); | 844 // TODO(dyen): Temporary handling of old sync points. |
| 713 deferred_messages_.pop_front(); | 845 if (sync_point_retired) { |
| 846 current_order_num_ = m->order_number; | |
| 847 gpu_channel_manager_->sync_point_manager()->RetireSyncPoint( | |
| 848 sync_point_retired); | |
| 849 MessageProcessed(m->order_number); | |
| 850 return; | |
| 851 } | |
| 852 | |
| 853 scoped_ptr<ChannelMessage> scoped_message(m); | |
| 854 const uint32_t order_number = m->order_number; | |
| 855 IPC::Message* message = &m->message; | |
| 856 | |
| 857 DVLOG(1) << "received message @" << message << " on channel @" << this | |
| 858 << " with type " << message->type(); | |
| 859 | |
| 714 bool message_processed = true; | 860 bool message_processed = true; |
| 715 | 861 |
| 716 currently_processing_message_ = message.get(); | 862 if (order_number != static_cast<uint32_t>(-1)) { |
| 863 // Make sure this is a valid unprocessed order number. | |
| 864 DCHECK(order_number <= GetUnprocessedOrderNum() && | |
| 865 order_number >= GetProcessedOrderNum()); | |
| 866 | |
| 867 current_order_num_ = order_number; | |
| 868 } | |
| 717 bool result; | 869 bool result; |
| 718 if (message->routing_id() == MSG_ROUTING_CONTROL) | 870 if (message->routing_id() == MSG_ROUTING_CONTROL) |
| 719 result = OnControlMessageReceived(*message); | 871 result = OnControlMessageReceived(*message); |
| 720 else | 872 else |
| 721 result = router_.RouteMessage(*message); | 873 result = router_.RouteMessage(*message); |
| 722 currently_processing_message_ = NULL; | |
| 723 | 874 |
| 724 if (!result) { | 875 if (!result) { |
| 725 // Respond to sync messages even if router failed to route. | 876 // Respond to sync messages even if router failed to route. |
| 726 if (message->is_sync()) { | 877 if (message->is_sync()) { |
| 727 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); | 878 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); |
| 728 reply->set_reply_error(); | 879 reply->set_reply_error(); |
| 729 Send(reply); | 880 Send(reply); |
| 730 } | 881 } |
| 731 } else { | 882 } else { |
| 732 // If the command buffer becomes unscheduled as a result of handling the | 883 // If the command buffer becomes unscheduled as a result of handling the |
| 733 // message but still has more commands to process, synthesize an IPC | 884 // message but still has more commands to process, synthesize an IPC |
| 734 // message to flush that command buffer. | 885 // message to flush that command buffer. |
| 735 if (stub) { | 886 if (stub) { |
| 736 if (stub->HasUnprocessedCommands()) { | 887 if (stub->HasUnprocessedCommands()) { |
| 737 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled( | 888 message_queue_->PushUnfinishedMessage( |
| 738 stub->route_id())); | 889 order_number, GpuCommandBufferMsg_Rescheduled(stub->route_id())); |
| 739 message_processed = false; | 890 message_processed = false; |
| 740 } | 891 } |
| 741 } | 892 } |
| 742 } | 893 } |
| 743 if (message_processed) | 894 if (message_processed) |
| 744 MessageProcessed(); | 895 MessageProcessed(order_number); |
| 745 | |
| 746 if (!deferred_messages_.empty()) { | |
| 747 OnScheduled(); | |
| 748 } | |
| 749 } | 896 } |
| 750 | 897 |
| 751 void GpuChannel::OnCreateOffscreenCommandBuffer( | 898 void GpuChannel::OnCreateOffscreenCommandBuffer( |
| 752 const gfx::Size& size, | 899 const gfx::Size& size, |
| 753 const GPUCreateCommandBufferConfig& init_params, | 900 const GPUCreateCommandBufferConfig& init_params, |
| 754 int32 route_id, | 901 int32 route_id, |
| 755 bool* succeeded) { | 902 bool* succeeded) { |
| 756 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", | 903 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", |
| 757 route_id); | 904 route_id); |
| 758 | 905 |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 807 } | 954 } |
| 808 } | 955 } |
| 809 | 956 |
| 810 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { | 957 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { |
| 811 if (!jpeg_decoder_) { | 958 if (!jpeg_decoder_) { |
| 812 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_)); | 959 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_)); |
| 813 } | 960 } |
| 814 jpeg_decoder_->AddClient(route_id, reply_msg); | 961 jpeg_decoder_->AddClient(route_id, reply_msg); |
| 815 } | 962 } |
| 816 | 963 |
| 817 void GpuChannel::MessageProcessed() { | 964 void GpuChannel::MessageProcessed(uint32_t order_number) { |
| 818 messages_processed_++; | 965 if (order_number != static_cast<uint32_t>(-1)) { |
| 966 DCHECK(current_order_num_ == order_number); | |
| 967 processed_order_num_ = order_number; | |
| 968 } | |
| 819 if (preempting_flag_.get()) { | 969 if (preempting_flag_.get()) { |
| 820 io_task_runner_->PostTask( | 970 io_task_runner_->PostTask( |
| 821 FROM_HERE, base::Bind(&GpuChannelMessageFilter::MessageProcessed, | 971 FROM_HERE, base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, |
| 822 filter_, messages_processed_)); | 972 filter_)); |
| 823 } | 973 } |
| 824 } | 974 } |
| 825 | 975 |
| 826 void GpuChannel::CacheShader(const std::string& key, | 976 void GpuChannel::CacheShader(const std::string& key, |
| 827 const std::string& shader) { | 977 const std::string& shader) { |
| 828 gpu_channel_manager_->Send( | 978 gpu_channel_manager_->Send( |
| 829 new GpuHostMsg_CacheShader(client_id_, key, shader)); | 979 new GpuHostMsg_CacheShader(client_id_, key, shader)); |
| 830 } | 980 } |
| 831 | 981 |
| 832 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { | 982 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 881 client_id_); | 1031 client_id_); |
| 882 } | 1032 } |
| 883 } | 1033 } |
| 884 } | 1034 } |
| 885 | 1035 |
| 886 void GpuChannel::HandleUpdateValueState( | 1036 void GpuChannel::HandleUpdateValueState( |
| 887 unsigned int target, const gpu::ValueState& state) { | 1037 unsigned int target, const gpu::ValueState& state) { |
| 888 pending_valuebuffer_state_->UpdateState(target, state); | 1038 pending_valuebuffer_state_->UpdateState(target, state); |
| 889 } | 1039 } |
| 890 | 1040 |
| 1041 uint32_t GpuChannel::GetUnprocessedOrderNum() const { | |
| 1042 return base::subtle::Acquire_Load(&message_queue_->unprocessed_order_num_); | |
| 1043 } | |
| 1044 | |
| 891 } // namespace content | 1045 } // namespace content |
| OLD | NEW |