OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if defined(OS_WIN) | 5 #if defined(OS_WIN) |
6 #include <windows.h> | 6 #include <windows.h> |
7 #endif | 7 #endif |
8 | 8 |
9 #include "content/common/gpu/gpu_channel.h" | 9 #include "content/common/gpu/gpu_channel.h" |
10 | 10 |
11 #include <algorithm> | 11 #include <algorithm> |
12 #include <queue> | 12 #include <deque> |
13 #include <set> | |
13 #include <vector> | 14 #include <vector> |
14 | 15 |
15 #include "base/bind.h" | 16 #include "base/bind.h" |
16 #include "base/command_line.h" | 17 #include "base/command_line.h" |
17 #include "base/location.h" | 18 #include "base/location.h" |
18 #include "base/single_thread_task_runner.h" | 19 #include "base/single_thread_task_runner.h" |
19 #include "base/stl_util.h" | 20 #include "base/stl_util.h" |
20 #include "base/strings/string_util.h" | 21 #include "base/strings/string_util.h" |
22 #include "base/synchronization/lock.h" | |
21 #include "base/thread_task_runner_handle.h" | 23 #include "base/thread_task_runner_handle.h" |
22 #include "base/timer/timer.h" | |
23 #include "base/trace_event/memory_dump_manager.h" | 24 #include "base/trace_event/memory_dump_manager.h" |
24 #include "base/trace_event/process_memory_dump.h" | 25 #include "base/trace_event/process_memory_dump.h" |
25 #include "base/trace_event/trace_event.h" | 26 #include "base/trace_event/trace_event.h" |
26 #include "content/common/gpu/gpu_channel_manager.h" | 27 #include "content/common/gpu/gpu_channel_manager.h" |
27 #include "content/common/gpu/gpu_memory_buffer_factory.h" | 28 #include "content/common/gpu/gpu_memory_buffer_factory.h" |
28 #include "content/common/gpu/gpu_messages.h" | 29 #include "content/common/gpu/gpu_messages.h" |
29 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h" | 30 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h" |
30 #include "content/public/common/content_switches.h" | 31 #include "content/public/common/content_switches.h" |
31 #include "gpu/command_buffer/common/mailbox.h" | 32 #include "gpu/command_buffer/common/mailbox.h" |
32 #include "gpu/command_buffer/common/value_state.h" | 33 #include "gpu/command_buffer/common/value_state.h" |
(...skipping 25 matching lines...) Expand all Loading... | |
58 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs; | 59 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs; |
59 | 60 |
60 // Once we trigger a preemption, the maximum duration that we will wait | 61 // Once we trigger a preemption, the maximum duration that we will wait |
61 // before clearing the preemption. | 62 // before clearing the preemption. |
62 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; | 63 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; |
63 | 64 |
64 // Stop the preemption once the time for the longest pending IPC drops | 65 // Stop the preemption once the time for the longest pending IPC drops |
65 // below this threshold. | 66 // below this threshold. |
66 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; | 67 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; |
67 | 68 |
69 const uint32_t kOutOfOrderNumber = -1u; | |
70 | |
68 } // anonymous namespace | 71 } // anonymous namespace |
69 | 72 |
73 struct GpuChannelMessage { | |
74 uint32_t order_number; | |
75 base::TimeTicks time_received; | |
76 IPC::Message message; | |
77 | |
78 // TODO(dyen): Temporary sync point data, remove once new sync point lands. | |
79 bool retire_sync_point; | |
80 uint32 sync_point_number; | |
81 | |
82 GpuChannelMessage(uint32_t order_num, const IPC::Message& msg) | |
83 : order_number(order_num), | |
84 time_received(base::TimeTicks::Now()), | |
85 message(msg), | |
86 retire_sync_point(false), | |
87 sync_point_number(0) {} | |
88 }; | |
89 | |
90 class GpuChannelMessageQueue | |
91 : public base::RefCountedThreadSafe<GpuChannelMessageQueue> { | |
92 public: | |
93 static scoped_refptr<GpuChannelMessageQueue> Create( | |
94 GpuChannelManager* gpu_channel_manager, | |
95 base::WeakPtr<GpuChannel> gpu_channel, | |
96 scoped_refptr<base::SingleThreadTaskRunner> task_runner) { | |
97 return new GpuChannelMessageQueue(gpu_channel_manager, gpu_channel, | |
98 task_runner); | |
99 } | |
100 | |
101 uint32_t GetUnprocessedOrderNum() { | |
102 base::AutoLock auto_lock(channel_messages_lock_); | |
103 return unprocessed_order_num_; | |
104 } | |
105 | |
106 void PushBackMessage(uint32_t order_number, const IPC::Message& message) { | |
107 base::AutoLock auto_lock(channel_messages_lock_); | |
108 PushMessageHelper(order_number, | |
109 new GpuChannelMessage(order_number, message)); | |
110 } | |
111 | |
112 void PushOutOfOrderMessage(const IPC::Message& message) { | |
113 // These are pushed out of order so should not have any order messages. | |
114 base::AutoLock auto_lock(channel_messages_lock_); | |
115 PushOutOfOrderHelper(new GpuChannelMessage(kOutOfOrderNumber, message)); | |
116 } | |
117 | |
118 bool GenerateSyncPointMessage(gpu::SyncPointManager* sync_point_manager, | |
119 uint32_t order_number, | |
120 const IPC::Message& message, | |
121 bool retire_sync_point, | |
122 uint32_t* sync_point_number) { | |
123 DCHECK(message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID); | |
124 base::AutoLock auto_lock(channel_messages_lock_); | |
125 if (enabled_) { | |
126 const uint32 sync_point = sync_point_manager->GenerateSyncPoint(); | |
127 | |
128 GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); | |
129 msg->retire_sync_point = retire_sync_point; | |
130 msg->sync_point_number = sync_point; | |
131 | |
132 *sync_point_number = sync_point; | |
133 PushMessageHelper(order_number, msg); | |
134 return true; | |
135 } | |
136 return false; | |
137 } | |
138 | |
139 bool HasQueuedMessages() { | |
140 base::AutoLock auto_lock(channel_messages_lock_); | |
141 return !channel_messages_.empty(); | |
142 } | |
143 | |
144 base::TimeTicks GetNextMessageTimeTick() { | |
145 base::AutoLock auto_lock(channel_messages_lock_); | |
146 | |
147 base::TimeTicks next_message_tick; | |
148 if (!channel_messages_.empty()) | |
149 next_message_tick = channel_messages_.front()->time_received; | |
150 | |
151 base::TimeTicks next_out_of_order_tick; | |
152 if (!out_of_order_messages_.empty()) | |
153 next_out_of_order_tick = out_of_order_messages_.front()->time_received; | |
154 | |
155 if (next_message_tick.is_null()) | |
156 return next_out_of_order_tick; | |
157 else if (next_out_of_order_tick.is_null()) | |
158 return next_message_tick; | |
159 else | |
160 return std::min(next_message_tick, next_out_of_order_tick); | |
161 } | |
162 | |
163 protected: | |
164 virtual ~GpuChannelMessageQueue() {} | |
165 | |
166 private: | |
167 friend class GpuChannel; | |
168 friend class base::RefCountedThreadSafe<GpuChannelMessageQueue>; | |
169 | |
170 GpuChannelMessageQueue( | |
171 GpuChannelManager* gpu_channel_manager, | |
172 base::WeakPtr<GpuChannel> gpu_channel, | |
173 scoped_refptr<base::SingleThreadTaskRunner> task_runner) | |
174 : unprocessed_order_num_(0), | |
175 enabled_(true), | |
176 gpu_channel_manager_(gpu_channel_manager), | |
177 gpu_channel_(gpu_channel), | |
178 task_runner_(task_runner) {} | |
179 | |
180 void DeleteAndDisableMessages() { | |
181 base::AutoLock auto_lock(channel_messages_lock_); | |
182 enabled_ = false; | |
183 | |
184 while (!channel_messages_.empty()) { | |
185 GpuChannelMessage* msg = channel_messages_.front(); | |
186 // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and | |
187 // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check | |
188 // if we have a sync point number here. | |
189 if (msg->sync_point_number) { | |
190 gpu_channel_manager_->sync_point_manager()->RetireSyncPoint( | |
191 msg->sync_point_number); | |
piman
2015/09/01 19:23:44
Same comment as previous patch, we should probably
David Yen
2015/09/01 19:34:34
I thought of another issue, we will leak anything
| |
192 } | |
193 delete msg; | |
194 channel_messages_.pop_front(); | |
195 } | |
196 STLDeleteElements(&out_of_order_messages_); | |
197 } | |
198 | |
199 void PushUnfinishedMessage(uint32_t order_number, | |
200 const IPC::Message& message) { | |
201 // This is pushed only if it was unfinished, so order number is kept. | |
202 GpuChannelMessage* msg = new GpuChannelMessage(order_number, message); | |
203 base::AutoLock auto_lock(channel_messages_lock_); | |
204 const bool was_empty = | |
205 (channel_messages_.empty() && out_of_order_messages_.empty()); | |
206 if (order_number == kOutOfOrderNumber) | |
207 out_of_order_messages_.push_front(msg); | |
208 else | |
209 channel_messages_.push_front(msg); | |
210 | |
211 if (was_empty) | |
212 ScheduleHandleMessage(); | |
213 } | |
214 | |
215 void ScheduleHandleMessage() { | |
216 task_runner_->PostTask( | |
217 FROM_HERE, base::Bind(&GpuChannel::HandleMessage, gpu_channel_)); | |
218 } | |
219 | |
220 void PushMessageHelper(uint32_t order_number, GpuChannelMessage* msg) { | |
221 channel_messages_lock_.AssertAcquired(); | |
222 unprocessed_order_num_ = order_number; | |
223 const bool was_empty = | |
224 channel_messages_.empty() && out_of_order_messages_.empty(); | |
225 channel_messages_.push_back(msg); | |
226 if (was_empty) | |
227 ScheduleHandleMessage(); | |
228 } | |
229 | |
230 void PushOutOfOrderHelper(GpuChannelMessage* msg) { | |
231 channel_messages_lock_.AssertAcquired(); | |
232 const bool was_empty = | |
233 channel_messages_.empty() && out_of_order_messages_.empty(); | |
234 out_of_order_messages_.push_back(msg); | |
235 if (was_empty) | |
236 ScheduleHandleMessage(); | |
237 } | |
238 | |
239 // Highest IPC order number seen, set when queued on the IO thread. | |
240 uint32_t unprocessed_order_num_; | |
241 | |
242 bool enabled_; | |
243 std::deque<GpuChannelMessage*> channel_messages_; | |
244 std::deque<GpuChannelMessage*> out_of_order_messages_; | |
245 base::Lock channel_messages_lock_; | |
246 | |
247 GpuChannelManager* gpu_channel_manager_; | |
248 base::WeakPtr<GpuChannel> gpu_channel_; | |
249 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; | |
250 | |
251 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue); | |
252 }; | |
253 | |
70 // This filter does three things: | 254 // This filter does three things: |
71 // - it counts and timestamps each message forwarded to the channel | 255 // - it counts and timestamps each message forwarded to the channel |
72 // so that we can preempt other channels if a message takes too long to | 256 // so that we can preempt other channels if a message takes too long to |
73 // process. To guarantee fairness, we must wait a minimum amount of time | 257 // process. To guarantee fairness, we must wait a minimum amount of time |
74 // before preempting and we limit the amount of time that we can preempt in | 258 // before preempting and we limit the amount of time that we can preempt in |
75 // one shot (see constants above). | 259 // one shot (see constants above). |
76 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO | 260 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO |
77 // thread, generating the sync point ID and responding immediately, and then | 261 // thread, generating the sync point ID and responding immediately, and then |
78 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message | 262 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message |
79 // into the channel's queue. | 263 // into the channel's queue. |
80 // - it generates mailbox names for clients of the GPU process on the IO thread. | 264 // - it generates mailbox names for clients of the GPU process on the IO thread. |
81 class GpuChannelMessageFilter : public IPC::MessageFilter { | 265 class GpuChannelMessageFilter : public IPC::MessageFilter { |
82 public: | 266 public: |
83 GpuChannelMessageFilter( | 267 GpuChannelMessageFilter( |
84 base::WeakPtr<GpuChannel> gpu_channel, | 268 scoped_refptr<GpuChannelMessageQueue> message_queue, |
85 gpu::SyncPointManager* sync_point_manager, | 269 gpu::SyncPointManager* sync_point_manager, |
86 scoped_refptr<base::SingleThreadTaskRunner> task_runner, | 270 scoped_refptr<base::SingleThreadTaskRunner> task_runner, |
87 bool future_sync_points) | 271 bool future_sync_points) |
88 : preemption_state_(IDLE), | 272 : preemption_state_(IDLE), |
89 gpu_channel_(gpu_channel), | 273 message_queue_(message_queue), |
90 sender_(nullptr), | 274 sender_(nullptr), |
91 sync_point_manager_(sync_point_manager), | 275 sync_point_manager_(sync_point_manager), |
92 task_runner_(task_runner), | 276 task_runner_(task_runner), |
93 messages_forwarded_to_channel_(0), | |
94 a_stub_is_descheduled_(false), | 277 a_stub_is_descheduled_(false), |
95 future_sync_points_(future_sync_points) {} | 278 future_sync_points_(future_sync_points) {} |
96 | 279 |
97 void OnFilterAdded(IPC::Sender* sender) override { | 280 void OnFilterAdded(IPC::Sender* sender) override { |
98 DCHECK(!sender_); | 281 DCHECK(!sender_); |
99 sender_ = sender; | 282 sender_ = sender; |
100 timer_ = make_scoped_ptr(new base::OneShotTimer<GpuChannelMessageFilter>); | 283 timer_ = make_scoped_ptr(new base::OneShotTimer<GpuChannelMessageFilter>); |
101 } | 284 } |
102 | 285 |
103 void OnFilterRemoved() override { | 286 void OnFilterRemoved() override { |
104 DCHECK(sender_); | 287 DCHECK(sender_); |
105 sender_ = nullptr; | 288 sender_ = nullptr; |
106 timer_ = nullptr; | 289 timer_ = nullptr; |
107 } | 290 } |
108 | 291 |
109 bool OnMessageReceived(const IPC::Message& message) override { | 292 bool OnMessageReceived(const IPC::Message& message) override { |
110 DCHECK(sender_); | 293 DCHECK(sender_); |
111 | 294 |
295 const uint32_t order_number = global_order_counter_++; | |
112 bool handled = false; | 296 bool handled = false; |
113 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && | 297 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && |
114 !future_sync_points_) { | 298 !future_sync_points_) { |
115 DLOG(ERROR) << "Untrusted client should not send " | 299 DLOG(ERROR) << "Untrusted client should not send " |
116 "GpuCommandBufferMsg_RetireSyncPoint message"; | 300 "GpuCommandBufferMsg_RetireSyncPoint message"; |
117 return true; | 301 return true; |
118 } | 302 } |
119 | 303 |
120 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { | 304 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
121 base::Tuple<bool> retire; | 305 base::Tuple<bool> retire; |
122 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); | 306 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); |
123 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, | 307 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, |
124 &retire)) { | 308 &retire)) { |
125 reply->set_reply_error(); | 309 reply->set_reply_error(); |
126 Send(reply); | 310 Send(reply); |
127 return true; | 311 return true; |
128 } | 312 } |
129 if (!future_sync_points_ && !base::get<0>(retire)) { | 313 if (!future_sync_points_ && !base::get<0>(retire)) { |
130 LOG(ERROR) << "Untrusted contexts can't create future sync points"; | 314 LOG(ERROR) << "Untrusted contexts can't create future sync points"; |
131 reply->set_reply_error(); | 315 reply->set_reply_error(); |
132 Send(reply); | 316 Send(reply); |
133 return true; | 317 return true; |
134 } | 318 } |
135 uint32 sync_point = sync_point_manager_->GenerateSyncPoint(); | 319 |
320 // Message queue must handle the entire sync point generation because the | |
321 // message queue could be disabled from the main thread during generation. | |
322 uint32_t sync_point = 0; | |
323 if (!message_queue_->GenerateSyncPointMessage( | |
324 sync_point_manager_, order_number, message, base::get<0>(retire), | |
325 &sync_point)) { | |
326 LOG(ERROR) << "GpuChannel has been destroyed."; | |
327 reply->set_reply_error(); | |
328 Send(reply); | |
329 return true; | |
330 } | |
331 | |
332 DCHECK_NE(sync_point, 0); | |
136 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); | 333 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); |
137 Send(reply); | 334 Send(reply); |
138 task_runner_->PostTask( | |
139 FROM_HERE, | |
140 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread, | |
141 gpu_channel_, sync_point_manager_, message.routing_id(), | |
142 base::get<0>(retire), sync_point)); | |
143 handled = true; | 335 handled = true; |
144 } | 336 } |
145 | 337 |
146 // These are handled by GpuJpegDecodeAccelerator and | 338 // These are handled by GpuJpegDecodeAccelerator and |
147 // GpuVideoDecodeAccelerator. | 339 // GpuVideoDecodeAccelerator. |
148 // TODO(kcwu) Modify GpuChannel::AddFilter to handle additional filters by | 340 // TODO(kcwu) Modify GpuChannel::AddFilter to handle additional filters by |
149 // GpuChannelMessageFilter instead of by IPC::SyncChannel directly. Then we | 341 // GpuChannelMessageFilter instead of by IPC::SyncChannel directly. Then we |
150 // don't need to exclude them one by one here. | 342 // don't need to exclude them one by one here. |
151 if (message.type() == AcceleratedJpegDecoderMsg_Decode::ID || | 343 if (message.type() == AcceleratedJpegDecoderMsg_Decode::ID || |
152 message.type() == AcceleratedJpegDecoderMsg_Destroy::ID || | 344 message.type() == AcceleratedJpegDecoderMsg_Destroy::ID || |
153 message.type() == AcceleratedVideoDecoderMsg_Decode::ID) { | 345 message.type() == AcceleratedVideoDecoderMsg_Decode::ID) { |
154 return false; | 346 return false; |
155 } | 347 } |
156 | 348 |
157 // All other messages get processed by the GpuChannel. | 349 // Forward all other messages to the GPU Channel. |
158 messages_forwarded_to_channel_++; | 350 if (!handled && !message.is_reply() && !message.should_unblock()) { |
159 if (preempting_flag_.get()) | 351 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || |
160 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_)); | 352 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { |
353 // Move Wait commands to the head of the queue, so the renderer | |
354 // doesn't have to wait any longer than necessary. | |
355 message_queue_->PushOutOfOrderMessage(message); | |
356 } else { | |
357 message_queue_->PushBackMessage(order_number, message); | |
358 } | |
359 handled = true; | |
360 } | |
361 | |
161 UpdatePreemptionState(); | 362 UpdatePreemptionState(); |
162 | |
163 return handled; | 363 return handled; |
164 } | 364 } |
165 | 365 |
166 void MessageProcessed(uint64 messages_processed) { | 366 void OnMessageProcessed() { UpdatePreemptionState(); } |
167 while (!pending_messages_.empty() && | |
168 pending_messages_.front().message_number <= messages_processed) | |
169 pending_messages_.pop(); | |
170 UpdatePreemptionState(); | |
171 } | |
172 | 367 |
173 void SetPreemptingFlagAndSchedulingState( | 368 void SetPreemptingFlagAndSchedulingState( |
174 gpu::PreemptionFlag* preempting_flag, | 369 gpu::PreemptionFlag* preempting_flag, |
175 bool a_stub_is_descheduled) { | 370 bool a_stub_is_descheduled) { |
176 preempting_flag_ = preempting_flag; | 371 preempting_flag_ = preempting_flag; |
177 a_stub_is_descheduled_ = a_stub_is_descheduled; | 372 a_stub_is_descheduled_ = a_stub_is_descheduled; |
178 } | 373 } |
179 | 374 |
180 void UpdateStubSchedulingState(bool a_stub_is_descheduled) { | 375 void UpdateStubSchedulingState(bool a_stub_is_descheduled) { |
181 a_stub_is_descheduled_ = a_stub_is_descheduled; | 376 a_stub_is_descheduled_ = a_stub_is_descheduled; |
(...skipping 23 matching lines...) Expand all Loading... | |
205 // We would like to preempt, but some stub is descheduled. | 400 // We would like to preempt, but some stub is descheduled. |
206 WOULD_PREEMPT_DESCHEDULED, | 401 WOULD_PREEMPT_DESCHEDULED, |
207 }; | 402 }; |
208 | 403 |
209 PreemptionState preemption_state_; | 404 PreemptionState preemption_state_; |
210 | 405 |
211 // Maximum amount of time that we can spend in PREEMPTING. | 406 // Maximum amount of time that we can spend in PREEMPTING. |
212 // It is reset when we transition to IDLE. | 407 // It is reset when we transition to IDLE. |
213 base::TimeDelta max_preemption_time_; | 408 base::TimeDelta max_preemption_time_; |
214 | 409 |
215 struct PendingMessage { | |
216 uint64 message_number; | |
217 base::TimeTicks time_received; | |
218 | |
219 explicit PendingMessage(uint64 message_number) | |
220 : message_number(message_number), | |
221 time_received(base::TimeTicks::Now()) { | |
222 } | |
223 }; | |
224 | |
225 void UpdatePreemptionState() { | 410 void UpdatePreemptionState() { |
226 switch (preemption_state_) { | 411 switch (preemption_state_) { |
227 case IDLE: | 412 case IDLE: |
228 if (preempting_flag_.get() && !pending_messages_.empty()) | 413 if (preempting_flag_.get() && message_queue_->HasQueuedMessages()) |
229 TransitionToWaiting(); | 414 TransitionToWaiting(); |
230 break; | 415 break; |
231 case WAITING: | 416 case WAITING: |
232 // A timer will transition us to CHECKING. | 417 // A timer will transition us to CHECKING. |
233 DCHECK(timer_->IsRunning()); | 418 DCHECK(timer_->IsRunning()); |
234 break; | 419 break; |
235 case CHECKING: | 420 case CHECKING: { |
236 if (!pending_messages_.empty()) { | 421 base::TimeTicks time_tick = message_queue_->GetNextMessageTimeTick(); |
237 base::TimeDelta time_elapsed = | 422 if (!time_tick.is_null()) { |
238 base::TimeTicks::Now() - pending_messages_.front().time_received; | 423 base::TimeDelta time_elapsed = base::TimeTicks::Now() - time_tick; |
239 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) { | 424 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) { |
240 // Schedule another check for when the IPC may go long. | 425 // Schedule another check for when the IPC may go long. |
241 timer_->Start( | 426 timer_->Start( |
242 FROM_HERE, | 427 FROM_HERE, |
243 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) - | 428 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) - |
244 time_elapsed, | 429 time_elapsed, |
245 this, &GpuChannelMessageFilter::UpdatePreemptionState); | 430 this, &GpuChannelMessageFilter::UpdatePreemptionState); |
246 } else { | 431 } else { |
247 if (a_stub_is_descheduled_) | 432 if (a_stub_is_descheduled_) |
248 TransitionToWouldPreemptDescheduled(); | 433 TransitionToWouldPreemptDescheduled(); |
249 else | 434 else |
250 TransitionToPreempting(); | 435 TransitionToPreempting(); |
251 } | 436 } |
252 } | 437 } |
253 break; | 438 } break; |
254 case PREEMPTING: | 439 case PREEMPTING: |
255 // A TransitionToIdle() timer should always be running in this state. | 440 // A TransitionToIdle() timer should always be running in this state. |
256 DCHECK(timer_->IsRunning()); | 441 DCHECK(timer_->IsRunning()); |
257 if (a_stub_is_descheduled_) | 442 if (a_stub_is_descheduled_) |
258 TransitionToWouldPreemptDescheduled(); | 443 TransitionToWouldPreemptDescheduled(); |
259 else | 444 else |
260 TransitionToIdleIfCaughtUp(); | 445 TransitionToIdleIfCaughtUp(); |
261 break; | 446 break; |
262 case WOULD_PREEMPT_DESCHEDULED: | 447 case WOULD_PREEMPT_DESCHEDULED: |
263 // A TransitionToIdle() timer should never be running in this state. | 448 // A TransitionToIdle() timer should never be running in this state. |
264 DCHECK(!timer_->IsRunning()); | 449 DCHECK(!timer_->IsRunning()); |
265 if (!a_stub_is_descheduled_) | 450 if (!a_stub_is_descheduled_) |
266 TransitionToPreempting(); | 451 TransitionToPreempting(); |
267 else | 452 else |
268 TransitionToIdleIfCaughtUp(); | 453 TransitionToIdleIfCaughtUp(); |
269 break; | 454 break; |
270 default: | 455 default: |
271 NOTREACHED(); | 456 NOTREACHED(); |
272 } | 457 } |
273 } | 458 } |
274 | 459 |
275 void TransitionToIdleIfCaughtUp() { | 460 void TransitionToIdleIfCaughtUp() { |
276 DCHECK(preemption_state_ == PREEMPTING || | 461 DCHECK(preemption_state_ == PREEMPTING || |
277 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); | 462 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); |
278 if (pending_messages_.empty()) { | 463 base::TimeTicks next_tick = message_queue_->GetNextMessageTimeTick(); |
464 if (next_tick.is_null()) { | |
279 TransitionToIdle(); | 465 TransitionToIdle(); |
280 } else { | 466 } else { |
281 base::TimeDelta time_elapsed = | 467 base::TimeDelta time_elapsed = base::TimeTicks::Now() - next_tick; |
282 base::TimeTicks::Now() - pending_messages_.front().time_received; | |
283 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs) | 468 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs) |
284 TransitionToIdle(); | 469 TransitionToIdle(); |
285 } | 470 } |
286 } | 471 } |
287 | 472 |
288 void TransitionToIdle() { | 473 void TransitionToIdle() { |
289 DCHECK(preemption_state_ == PREEMPTING || | 474 DCHECK(preemption_state_ == PREEMPTING || |
290 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); | 475 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); |
291 // Stop any outstanding timer set to force us from PREEMPTING to IDLE. | 476 // Stop any outstanding timer set to force us from PREEMPTING to IDLE. |
292 timer_->Stop(); | 477 timer_->Stop(); |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
358 } | 543 } |
359 } | 544 } |
360 | 545 |
361 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; | 546 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; |
362 preempting_flag_->Reset(); | 547 preempting_flag_->Reset(); |
363 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); | 548 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); |
364 | 549 |
365 UpdatePreemptionState(); | 550 UpdatePreemptionState(); |
366 } | 551 } |
367 | 552 |
368 static void InsertSyncPointOnMainThread( | 553 // The message_queue_ is used to handle messages on the main thread. |
369 base::WeakPtr<GpuChannel> gpu_channel, | 554 scoped_refptr<GpuChannelMessageQueue> message_queue_; |
370 gpu::SyncPointManager* manager, | |
371 int32 routing_id, | |
372 bool retire, | |
373 uint32 sync_point) { | |
374 // This function must ensure that the sync point will be retired. Normally | |
375 // we'll find the stub based on the routing ID, and associate the sync point | |
376 // with it, but if that fails for any reason (channel or stub already | |
377 // deleted, invalid routing id), we need to retire the sync point | |
378 // immediately. | |
379 if (gpu_channel) { | |
380 GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id); | |
381 if (stub) { | |
382 stub->AddSyncPoint(sync_point); | |
383 if (retire) { | |
384 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point); | |
385 gpu_channel->OnMessageReceived(message); | |
386 } | |
387 return; | |
388 } else { | |
389 gpu_channel->MessageProcessed(); | |
390 } | |
391 } | |
392 manager->RetireSyncPoint(sync_point); | |
393 } | |
394 | |
395 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only | |
396 // passed through - therefore the WeakPtr assumptions are respected. | |
397 base::WeakPtr<GpuChannel> gpu_channel_; | |
398 IPC::Sender* sender_; | 555 IPC::Sender* sender_; |
399 gpu::SyncPointManager* sync_point_manager_; | 556 gpu::SyncPointManager* sync_point_manager_; |
400 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; | 557 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; |
401 scoped_refptr<gpu::PreemptionFlag> preempting_flag_; | 558 scoped_refptr<gpu::PreemptionFlag> preempting_flag_; |
402 | 559 |
403 std::queue<PendingMessage> pending_messages_; | |
404 | |
405 // Count of the number of IPCs forwarded to the GpuChannel. | |
406 uint64 messages_forwarded_to_channel_; | |
407 | |
408 // This timer is created and destroyed on the IO thread. | 560 // This timer is created and destroyed on the IO thread. |
409 scoped_ptr<base::OneShotTimer<GpuChannelMessageFilter>> timer_; | 561 scoped_ptr<base::OneShotTimer<GpuChannelMessageFilter>> timer_; |
410 | 562 |
411 bool a_stub_is_descheduled_; | 563 bool a_stub_is_descheduled_; |
412 | 564 |
413 // True if this channel can create future sync points. | 565 // True if this channel can create future sync points. |
414 bool future_sync_points_; | 566 bool future_sync_points_; |
567 | |
568 // This number is only ever incremented/read on the IO thread. | |
569 static uint32_t global_order_counter_; | |
415 }; | 570 }; |
416 | 571 |
572 uint32_t GpuChannelMessageFilter::global_order_counter_ = 0; | |
573 | |
417 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, | 574 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, |
418 GpuWatchdog* watchdog, | 575 GpuWatchdog* watchdog, |
419 gfx::GLShareGroup* share_group, | 576 gfx::GLShareGroup* share_group, |
420 gpu::gles2::MailboxManager* mailbox, | 577 gpu::gles2::MailboxManager* mailbox, |
421 base::SingleThreadTaskRunner* task_runner, | 578 base::SingleThreadTaskRunner* task_runner, |
422 base::SingleThreadTaskRunner* io_task_runner, | 579 base::SingleThreadTaskRunner* io_task_runner, |
423 int client_id, | 580 int client_id, |
424 uint64_t client_tracing_id, | 581 uint64_t client_tracing_id, |
425 bool software, | 582 bool software, |
426 bool allow_future_sync_points) | 583 bool allow_future_sync_points) |
427 : gpu_channel_manager_(gpu_channel_manager), | 584 : gpu_channel_manager_(gpu_channel_manager), |
428 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")), | 585 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")), |
429 messages_processed_(0), | |
430 client_id_(client_id), | 586 client_id_(client_id), |
431 client_tracing_id_(client_tracing_id), | 587 client_tracing_id_(client_tracing_id), |
432 task_runner_(task_runner), | 588 task_runner_(task_runner), |
433 io_task_runner_(io_task_runner), | 589 io_task_runner_(io_task_runner), |
434 share_group_(share_group ? share_group : new gfx::GLShareGroup), | 590 share_group_(share_group ? share_group : new gfx::GLShareGroup), |
435 mailbox_manager_(mailbox | 591 mailbox_manager_(mailbox |
436 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox) | 592 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox) |
437 : gpu::gles2::MailboxManager::Create()), | 593 : gpu::gles2::MailboxManager::Create()), |
438 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), | 594 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), |
439 pending_valuebuffer_state_(new gpu::ValueStateMap), | 595 pending_valuebuffer_state_(new gpu::ValueStateMap), |
440 watchdog_(watchdog), | 596 watchdog_(watchdog), |
441 software_(software), | 597 software_(software), |
442 handle_messages_scheduled_(false), | 598 current_order_num_(0), |
443 currently_processing_message_(nullptr), | 599 processed_order_num_(0), |
444 num_stubs_descheduled_(0), | 600 num_stubs_descheduled_(0), |
445 allow_future_sync_points_(allow_future_sync_points), | 601 allow_future_sync_points_(allow_future_sync_points), |
446 weak_factory_(this) { | 602 weak_factory_(this) { |
447 DCHECK(gpu_channel_manager); | 603 DCHECK(gpu_channel_manager); |
448 DCHECK(client_id); | 604 DCHECK(client_id); |
449 | 605 |
606 message_queue_ = GpuChannelMessageQueue::Create( | |
607 gpu_channel_manager, weak_factory_.GetWeakPtr(), task_runner); | |
608 | |
450 filter_ = new GpuChannelMessageFilter( | 609 filter_ = new GpuChannelMessageFilter( |
451 weak_factory_.GetWeakPtr(), gpu_channel_manager_->sync_point_manager(), | 610 message_queue_, gpu_channel_manager_->sync_point_manager(), task_runner_, |
452 task_runner_, allow_future_sync_points_); | 611 allow_future_sync_points_); |
453 | 612 |
454 subscription_ref_set_->AddObserver(this); | 613 subscription_ref_set_->AddObserver(this); |
455 } | 614 } |
456 | 615 |
457 GpuChannel::~GpuChannel() { | 616 GpuChannel::~GpuChannel() { |
458 // Clear stubs first because of dependencies. | 617 // Clear stubs first because of dependencies. |
459 stubs_.clear(); | 618 stubs_.clear(); |
460 | 619 |
461 STLDeleteElements(&deferred_messages_); | 620 message_queue_->DeleteAndDisableMessages(); |
621 | |
462 subscription_ref_set_->RemoveObserver(this); | 622 subscription_ref_set_->RemoveObserver(this); |
463 if (preempting_flag_.get()) | 623 if (preempting_flag_.get()) |
464 preempting_flag_->Reset(); | 624 preempting_flag_->Reset(); |
465 } | 625 } |
466 | 626 |
467 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event, | 627 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event, |
468 IPC::AttachmentBroker* attachment_broker) { | 628 IPC::AttachmentBroker* attachment_broker) { |
469 DCHECK(shutdown_event); | 629 DCHECK(shutdown_event); |
470 DCHECK(!channel_); | 630 DCHECK(!channel_); |
471 | 631 |
(...skipping 13 matching lines...) Expand all Loading... | |
485 | 645 |
486 channel_->AddFilter(filter_.get()); | 646 channel_->AddFilter(filter_.get()); |
487 | 647 |
488 return channel_handle; | 648 return channel_handle; |
489 } | 649 } |
490 | 650 |
491 base::ProcessId GpuChannel::GetClientPID() const { | 651 base::ProcessId GpuChannel::GetClientPID() const { |
492 return channel_->GetPeerPID(); | 652 return channel_->GetPeerPID(); |
493 } | 653 } |
494 | 654 |
495 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { | 655 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { |
jbauman
2015/09/01 20:03:06
Eventually we should restructure the code to remov
| |
496 DVLOG(1) << "received message @" << &message << " on channel @" << this | 656 // All messages should be pushed to channel_messages_ and handled separately. |
497 << " with type " << message.type(); | 657 NOTREACHED(); |
498 | 658 return false; |
499 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || | |
500 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { | |
501 // Move Wait commands to the head of the queue, so the renderer | |
502 // doesn't have to wait any longer than necessary. | |
503 deferred_messages_.push_front(new IPC::Message(message)); | |
504 } else { | |
505 deferred_messages_.push_back(new IPC::Message(message)); | |
506 } | |
507 | |
508 OnScheduled(); | |
509 | |
510 return true; | |
511 } | 659 } |
512 | 660 |
513 void GpuChannel::OnChannelError() { | 661 void GpuChannel::OnChannelError() { |
514 gpu_channel_manager_->RemoveChannel(client_id_); | 662 gpu_channel_manager_->RemoveChannel(client_id_); |
515 } | 663 } |
516 | 664 |
517 bool GpuChannel::Send(IPC::Message* message) { | 665 bool GpuChannel::Send(IPC::Message* message) { |
518 // The GPU process must never send a synchronous IPC message to the renderer | 666 // The GPU process must never send a synchronous IPC message to the renderer |
519 // process. This could result in deadlock. | 667 // process. This could result in deadlock. |
520 DCHECK(!message->is_sync()); | 668 DCHECK(!message->is_sync()); |
(...skipping 12 matching lines...) Expand all Loading... | |
533 void GpuChannel::OnAddSubscription(unsigned int target) { | 681 void GpuChannel::OnAddSubscription(unsigned int target) { |
534 gpu_channel_manager()->Send( | 682 gpu_channel_manager()->Send( |
535 new GpuHostMsg_AddSubscription(client_id_, target)); | 683 new GpuHostMsg_AddSubscription(client_id_, target)); |
536 } | 684 } |
537 | 685 |
538 void GpuChannel::OnRemoveSubscription(unsigned int target) { | 686 void GpuChannel::OnRemoveSubscription(unsigned int target) { |
539 gpu_channel_manager()->Send( | 687 gpu_channel_manager()->Send( |
540 new GpuHostMsg_RemoveSubscription(client_id_, target)); | 688 new GpuHostMsg_RemoveSubscription(client_id_, target)); |
541 } | 689 } |
542 | 690 |
543 void GpuChannel::RequeueMessage() { | |
544 DCHECK(currently_processing_message_); | |
545 deferred_messages_.push_front( | |
546 new IPC::Message(*currently_processing_message_)); | |
547 messages_processed_--; | |
548 currently_processing_message_ = NULL; | |
549 } | |
550 | |
551 void GpuChannel::OnScheduled() { | |
552 if (handle_messages_scheduled_) | |
553 return; | |
554 // Post a task to handle any deferred messages. The deferred message queue is | |
555 // not emptied here, which ensures that OnMessageReceived will continue to | |
556 // defer newly received messages until the ones in the queue have all been | |
557 // handled by HandleMessage. HandleMessage is invoked as a | |
558 // task to prevent reentrancy. | |
559 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage, | |
560 weak_factory_.GetWeakPtr())); | |
561 handle_messages_scheduled_ = true; | |
562 } | |
563 | |
564 void GpuChannel::StubSchedulingChanged(bool scheduled) { | 691 void GpuChannel::StubSchedulingChanged(bool scheduled) { |
565 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; | 692 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; |
566 if (scheduled) { | 693 if (scheduled) { |
567 num_stubs_descheduled_--; | 694 num_stubs_descheduled_--; |
568 OnScheduled(); | 695 message_queue_->ScheduleHandleMessage(); |
569 } else { | 696 } else { |
570 num_stubs_descheduled_++; | 697 num_stubs_descheduled_++; |
571 } | 698 } |
572 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); | 699 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); |
573 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; | 700 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; |
574 | 701 |
575 if (a_stub_is_descheduled != a_stub_was_descheduled) { | 702 if (a_stub_is_descheduled != a_stub_was_descheduled) { |
576 if (preempting_flag_.get()) { | 703 if (preempting_flag_.get()) { |
577 io_task_runner_->PostTask( | 704 io_task_runner_->PostTask( |
578 FROM_HERE, | 705 FROM_HERE, |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
684 OnDestroyCommandBuffer) | 811 OnDestroyCommandBuffer) |
685 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder, | 812 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder, |
686 OnCreateJpegDecoder) | 813 OnCreateJpegDecoder) |
687 IPC_MESSAGE_UNHANDLED(handled = false) | 814 IPC_MESSAGE_UNHANDLED(handled = false) |
688 IPC_END_MESSAGE_MAP() | 815 IPC_END_MESSAGE_MAP() |
689 DCHECK(handled) << msg.type(); | 816 DCHECK(handled) << msg.type(); |
690 return handled; | 817 return handled; |
691 } | 818 } |
692 | 819 |
693 void GpuChannel::HandleMessage() { | 820 void GpuChannel::HandleMessage() { |
694 handle_messages_scheduled_ = false; | 821 GpuChannelMessage* m = nullptr; |
695 if (deferred_messages_.empty()) | 822 GpuCommandBufferStub* stub = nullptr; |
823 { | |
824 base::AutoLock auto_lock(message_queue_->channel_messages_lock_); | |
825 if (!message_queue_->out_of_order_messages_.empty()) { | |
826 m = message_queue_->out_of_order_messages_.front(); | |
827 DCHECK(m->order_number == kOutOfOrderNumber); | |
828 message_queue_->out_of_order_messages_.pop_front(); | |
829 } else if (!message_queue_->channel_messages_.empty()) { | |
830 m = message_queue_->channel_messages_.front(); | |
831 DCHECK(m->order_number != kOutOfOrderNumber); | |
832 message_queue_->channel_messages_.pop_front(); | |
833 } else { | |
834 // No messages to process | |
835 return; | |
836 } | |
837 | |
838 if (!message_queue_->out_of_order_messages_.empty() || | |
839 !message_queue_->channel_messages_.empty()) { | |
840 message_queue_->ScheduleHandleMessage(); | |
jbauman
2015/09/01 20:03:06
If the stub is descheduled and there's at least tw
piman
2015/09/01 20:36:36
Good point.
David Yen
2015/09/01 21:36:38
I think it's easier to store whether the message q
| |
841 } | |
842 } | |
843 | |
844 bool retry_message = false; | |
845 stub = stubs_.get(m->message.routing_id()); | |
846 if (stub) { | |
847 if (!stub->IsScheduled()) { | |
848 retry_message = true; | |
849 } | |
850 if (stub->IsPreempted()) { | |
851 retry_message = true; | |
852 message_queue_->ScheduleHandleMessage(); | |
853 } | |
854 } | |
855 | |
856 if (retry_message) { | |
857 base::AutoLock auto_lock(message_queue_->channel_messages_lock_); | |
858 if (m->order_number == kOutOfOrderNumber) | |
859 message_queue_->out_of_order_messages_.push_front(m); | |
860 else | |
861 message_queue_->channel_messages_.push_front(m); | |
696 return; | 862 return; |
863 } | |
697 | 864 |
698 IPC::Message* m = NULL; | 865 scoped_ptr<GpuChannelMessage> scoped_message(m); |
699 GpuCommandBufferStub* stub = NULL; | 866 const uint32_t order_number = m->order_number; |
867 const int32_t routing_id = m->message.routing_id(); | |
700 | 868 |
701 m = deferred_messages_.front(); | 869 // TODO(dyen): Temporary handling of old sync points. |
702 stub = stubs_.get(m->routing_id()); | 870 // This must ensure that the sync point will be retired. Normally we'll |
703 if (stub) { | 871 // find the stub based on the routing ID, and associate the sync point |
704 if (!stub->IsScheduled()) | 872 // with it, but if that fails for any reason (channel or stub already |
705 return; | 873 // deleted, invalid routing id), we need to retire the sync point |
706 if (stub->IsPreempted()) { | 874 // immediately. |
707 OnScheduled(); | 875 if (m->message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { |
876 const bool retire = m->retire_sync_point; | |
877 const uint32_t sync_point = m->sync_point_number; | |
878 if (stub) { | |
879 stub->AddSyncPoint(sync_point); | |
880 if (retire) { | |
881 m->message = | |
882 GpuCommandBufferMsg_RetireSyncPoint(routing_id, sync_point); | |
883 } | |
884 } else { | |
885 current_order_num_ = order_number; | |
886 gpu_channel_manager_->sync_point_manager()->RetireSyncPoint(sync_point); | |
887 MessageProcessed(order_number); | |
708 return; | 888 return; |
709 } | 889 } |
710 } | 890 } |
711 | 891 |
712 scoped_ptr<IPC::Message> message(m); | 892 IPC::Message* message = &m->message; |
713 deferred_messages_.pop_front(); | |
714 bool message_processed = true; | 893 bool message_processed = true; |
715 | 894 |
716 currently_processing_message_ = message.get(); | 895 DVLOG(1) << "received message @" << message << " on channel @" << this |
717 bool result; | 896 << " with type " << message->type(); |
718 if (message->routing_id() == MSG_ROUTING_CONTROL) | 897 |
898 if (order_number != kOutOfOrderNumber) { | |
899 // Make sure this is a valid unprocessed order number. | |
900 DCHECK(order_number <= GetUnprocessedOrderNum() && | |
901 order_number >= GetProcessedOrderNum()); | |
902 | |
903 current_order_num_ = order_number; | |
904 } | |
905 bool result = false; | |
906 if (routing_id == MSG_ROUTING_CONTROL) | |
719 result = OnControlMessageReceived(*message); | 907 result = OnControlMessageReceived(*message); |
720 else | 908 else |
721 result = router_.RouteMessage(*message); | 909 result = router_.RouteMessage(*message); |
722 currently_processing_message_ = NULL; | |
723 | 910 |
724 if (!result) { | 911 if (!result) { |
725 // Respond to sync messages even if router failed to route. | 912 // Respond to sync messages even if router failed to route. |
726 if (message->is_sync()) { | 913 if (message->is_sync()) { |
727 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); | 914 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); |
728 reply->set_reply_error(); | 915 reply->set_reply_error(); |
729 Send(reply); | 916 Send(reply); |
730 } | 917 } |
731 } else { | 918 } else { |
732 // If the command buffer becomes unscheduled as a result of handling the | 919 // If the command buffer becomes unscheduled as a result of handling the |
733 // message but still has more commands to process, synthesize an IPC | 920 // message but still has more commands to process, synthesize an IPC |
734 // message to flush that command buffer. | 921 // message to flush that command buffer. |
735 if (stub) { | 922 if (stub) { |
736 if (stub->HasUnprocessedCommands()) { | 923 if (stub->HasUnprocessedCommands()) { |
737 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled( | 924 message_queue_->PushUnfinishedMessage( |
738 stub->route_id())); | 925 order_number, GpuCommandBufferMsg_Rescheduled(stub->route_id())); |
739 message_processed = false; | 926 message_processed = false; |
740 } | 927 } |
741 } | 928 } |
742 } | 929 } |
743 if (message_processed) | 930 if (message_processed) |
744 MessageProcessed(); | 931 MessageProcessed(order_number); |
745 | |
746 if (!deferred_messages_.empty()) { | |
747 OnScheduled(); | |
748 } | |
749 } | 932 } |
750 | 933 |
751 void GpuChannel::OnCreateOffscreenCommandBuffer( | 934 void GpuChannel::OnCreateOffscreenCommandBuffer( |
752 const gfx::Size& size, | 935 const gfx::Size& size, |
753 const GPUCreateCommandBufferConfig& init_params, | 936 const GPUCreateCommandBufferConfig& init_params, |
754 int32 route_id, | 937 int32 route_id, |
755 bool* succeeded) { | 938 bool* succeeded) { |
756 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", | 939 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", |
757 route_id); | 940 route_id); |
758 | 941 |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
807 } | 990 } |
808 } | 991 } |
809 | 992 |
810 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { | 993 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { |
811 if (!jpeg_decoder_) { | 994 if (!jpeg_decoder_) { |
812 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_)); | 995 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_)); |
813 } | 996 } |
814 jpeg_decoder_->AddClient(route_id, reply_msg); | 997 jpeg_decoder_->AddClient(route_id, reply_msg); |
815 } | 998 } |
816 | 999 |
817 void GpuChannel::MessageProcessed() { | 1000 void GpuChannel::MessageProcessed(uint32_t order_number) { |
818 messages_processed_++; | 1001 if (order_number != kOutOfOrderNumber) { |
1002 DCHECK(current_order_num_ == order_number); | |
1003 processed_order_num_ = order_number; | |
1004 } | |
819 if (preempting_flag_.get()) { | 1005 if (preempting_flag_.get()) { |
820 io_task_runner_->PostTask( | 1006 io_task_runner_->PostTask( |
821 FROM_HERE, base::Bind(&GpuChannelMessageFilter::MessageProcessed, | 1007 FROM_HERE, |
822 filter_, messages_processed_)); | 1008 base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_)); |
823 } | 1009 } |
824 } | 1010 } |
825 | 1011 |
826 void GpuChannel::CacheShader(const std::string& key, | 1012 void GpuChannel::CacheShader(const std::string& key, |
827 const std::string& shader) { | 1013 const std::string& shader) { |
828 gpu_channel_manager_->Send( | 1014 gpu_channel_manager_->Send( |
829 new GpuHostMsg_CacheShader(client_id_, key, shader)); | 1015 new GpuHostMsg_CacheShader(client_id_, key, shader)); |
830 } | 1016 } |
831 | 1017 |
832 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { | 1018 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
881 client_id_); | 1067 client_id_); |
882 } | 1068 } |
883 } | 1069 } |
884 } | 1070 } |
885 | 1071 |
886 void GpuChannel::HandleUpdateValueState( | 1072 void GpuChannel::HandleUpdateValueState( |
887 unsigned int target, const gpu::ValueState& state) { | 1073 unsigned int target, const gpu::ValueState& state) { |
888 pending_valuebuffer_state_->UpdateState(target, state); | 1074 pending_valuebuffer_state_->UpdateState(target, state); |
889 } | 1075 } |
890 | 1076 |
1077 uint32_t GpuChannel::GetUnprocessedOrderNum() const { | |
1078 return message_queue_->GetUnprocessedOrderNum(); | |
1079 } | |
1080 | |
891 } // namespace content | 1081 } // namespace content |
OLD | NEW |