Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(556)

Side by Side Diff: content/common/gpu/gpu_channel.cc

Issue 1308913004: GPU Channel's now maintain a global order number for each processed IPC. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: No need to forward declare GpuChannelMessage Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | content/common/gpu/gpu_channel_manager.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(OS_WIN) 5 #if defined(OS_WIN)
6 #include <windows.h> 6 #include <windows.h>
7 #endif 7 #endif
8 8
9 #include "content/common/gpu/gpu_channel.h" 9 #include "content/common/gpu/gpu_channel.h"
10 10
11 #include <algorithm> 11 #include <algorithm>
12 #include <queue> 12 #include <deque>
13 #include <set>
13 #include <vector> 14 #include <vector>
14 15
16 #include "base/atomicops.h"
15 #include "base/bind.h" 17 #include "base/bind.h"
16 #include "base/command_line.h" 18 #include "base/command_line.h"
17 #include "base/location.h" 19 #include "base/location.h"
18 #include "base/single_thread_task_runner.h" 20 #include "base/single_thread_task_runner.h"
19 #include "base/stl_util.h" 21 #include "base/stl_util.h"
20 #include "base/strings/string_util.h" 22 #include "base/strings/string_util.h"
23 #include "base/synchronization/lock.h"
21 #include "base/thread_task_runner_handle.h" 24 #include "base/thread_task_runner_handle.h"
22 #include "base/timer/timer.h" 25 #include "base/timer/timer.h"
23 #include "base/trace_event/memory_dump_manager.h" 26 #include "base/trace_event/memory_dump_manager.h"
24 #include "base/trace_event/process_memory_dump.h" 27 #include "base/trace_event/process_memory_dump.h"
25 #include "base/trace_event/trace_event.h" 28 #include "base/trace_event/trace_event.h"
26 #include "content/common/gpu/gpu_channel_manager.h" 29 #include "content/common/gpu/gpu_channel_manager.h"
27 #include "content/common/gpu/gpu_memory_buffer_factory.h" 30 #include "content/common/gpu/gpu_memory_buffer_factory.h"
28 #include "content/common/gpu/gpu_messages.h" 31 #include "content/common/gpu/gpu_messages.h"
29 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h" 32 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
30 #include "content/public/common/content_switches.h" 33 #include "content/public/common/content_switches.h"
(...skipping 27 matching lines...) Expand all
58 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs; 61 const int64 kPreemptWaitTimeMs = 2 * kVsyncIntervalMs;
59 62
60 // Once we trigger a preemption, the maximum duration that we will wait 63 // Once we trigger a preemption, the maximum duration that we will wait
61 // before clearing the preemption. 64 // before clearing the preemption.
62 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs; 65 const int64 kMaxPreemptTimeMs = kVsyncIntervalMs;
63 66
64 // Stop the preemption once the time for the longest pending IPC drops 67 // Stop the preemption once the time for the longest pending IPC drops
65 // below this threshold. 68 // below this threshold.
66 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs; 69 const int64 kStopPreemptThresholdMs = kVsyncIntervalMs;
67 70
71 const uint32_t kOutOfOrderNumber = -1u;
72
68 } // anonymous namespace 73 } // anonymous namespace
69 74
75 struct GpuChannelMessage {
76 uint32_t order_number;
77 base::TimeTicks time_received;
78 IPC::Message message;
79
80 // TODO(dyen): Temporary sync point data, remove once new sync point lands.
81 bool retire_sync_point;
82 uint32 sync_point_number;
83
84 GpuChannelMessage(uint32_t order_num, const IPC::Message& msg)
85 : order_number(order_num),
86 time_received(base::TimeTicks::Now()),
87 message(msg),
88 retire_sync_point(false),
89 sync_point_number(0) {}
90 };
91
92 class GpuChannelMessageQueue
93 : public base::RefCountedThreadSafe<GpuChannelMessageQueue> {
94 public:
95 static scoped_refptr<GpuChannelMessageQueue> Create(
96 base::WeakPtr<GpuChannel> gpu_channel,
97 scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
98 return new GpuChannelMessageQueue(gpu_channel, task_runner);
99 }
100
101 uint32_t GetUnprocessedOrderNum() {
102 base::AutoLock auto_lock(channel_messages_lock_);
103 return unprocessed_order_num_;
104 }
105
106 void PushBackMessage(uint32_t order_number, const IPC::Message& message) {
107 base::AutoLock auto_lock(channel_messages_lock_);
108 if (enabled_) {
109 PushMessageHelper(order_number,
110 new GpuChannelMessage(order_number, message));
111 }
112 }
113
114 void PushOutOfOrderMessage(const IPC::Message& message) {
115 // These are pushed out of order so should not have any order messages.
116 base::AutoLock auto_lock(channel_messages_lock_);
117 if (enabled_) {
118 PushOutOfOrderHelper(new GpuChannelMessage(kOutOfOrderNumber, message));
119 }
120 }
121
122 bool GenerateSyncPointMessage(gpu::SyncPointManager* sync_point_manager,
123 uint32_t order_number,
124 const IPC::Message& message,
125 bool retire_sync_point,
126 uint32_t* sync_point_number) {
127 DCHECK(message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID);
128 base::AutoLock auto_lock(channel_messages_lock_);
129 if (enabled_) {
130 const uint32 sync_point = sync_point_manager->GenerateSyncPoint();
131
132 GpuChannelMessage* msg = new GpuChannelMessage(order_number, message);
133 msg->retire_sync_point = retire_sync_point;
134 msg->sync_point_number = sync_point;
135
136 *sync_point_number = sync_point;
137 PushMessageHelper(order_number, msg);
138 return true;
139 }
140 return false;
141 }
142
143 bool HasQueuedMessages() {
144 base::AutoLock auto_lock(channel_messages_lock_);
145 return !channel_messages_.empty();
146 }
147
148 base::TimeTicks GetNextMessageTimeTick() {
149 base::AutoLock auto_lock(channel_messages_lock_);
150
151 base::TimeTicks next_message_tick;
152 if (!channel_messages_.empty())
153 next_message_tick = channel_messages_.front()->time_received;
154
155 base::TimeTicks next_out_of_order_tick;
156 if (!out_of_order_messages_.empty())
157 next_out_of_order_tick = out_of_order_messages_.front()->time_received;
158
159 if (next_message_tick.is_null())
160 return next_out_of_order_tick;
161 else if (next_out_of_order_tick.is_null())
162 return next_message_tick;
163 else
164 return std::min(next_message_tick, next_out_of_order_tick);
165 }
166
167 protected:
168 virtual ~GpuChannelMessageQueue() {}
jbauman 2015/09/01 22:30:07 DCHECK(channel_messages_.empty()); DCHECK(out_of_o
David Yen 2015/09/01 22:46:52 Done.
169
170 private:
171 friend class GpuChannel;
172 friend class base::RefCountedThreadSafe<GpuChannelMessageQueue>;
173
174 GpuChannelMessageQueue(
175 base::WeakPtr<GpuChannel> gpu_channel,
176 scoped_refptr<base::SingleThreadTaskRunner> task_runner)
177 : enabled_(true),
178 unprocessed_order_num_(0),
179 gpu_channel_(gpu_channel),
180 task_runner_(task_runner) {}
181
182 void DeleteAndDisableMessages(GpuChannelManager* gpu_channel_manager) {
183 {
184 base::AutoLock auto_lock(channel_messages_lock_);
185 DCHECK(enabled_);
186 enabled_ = false;
187 }
188
189 // We guarantee that the queues will no longer be modified after enabled_
190 // is set to false, it is now safe to modify the queue without the lock.
191 // All public facing modifying functions check enabled_ while all
192 // private modifying functions DCHECK(enabled_) to enforce this.
193 while (!channel_messages_.empty()) {
194 GpuChannelMessage* msg = channel_messages_.front();
195 // This needs to clean up both GpuCommandBufferMsg_InsertSyncPoint and
196 // GpuCommandBufferMsg_RetireSyncPoint messages, safer to just check
197 // if we have a sync point number here.
198 if (msg->sync_point_number) {
199 gpu_channel_manager->sync_point_manager()->RetireSyncPoint(
200 msg->sync_point_number);
201 }
202 delete msg;
203 channel_messages_.pop_front();
204 }
205 STLDeleteElements(&out_of_order_messages_);
206 }
207
208 void PushUnfinishedMessage(uint32_t order_number,
209 const IPC::Message& message) {
210 // This is pushed only if it was unfinished, so order number is kept.
211 GpuChannelMessage* msg = new GpuChannelMessage(order_number, message);
212 base::AutoLock auto_lock(channel_messages_lock_);
213 DCHECK(enabled_);
214 const bool was_empty =
215 (channel_messages_.empty() && out_of_order_messages_.empty());
216 if (order_number == kOutOfOrderNumber)
217 out_of_order_messages_.push_front(msg);
218 else
219 channel_messages_.push_front(msg);
220
221 if (was_empty)
222 ScheduleHandleMessage();
223 }
224
225 void ScheduleHandleMessage() {
226 task_runner_->PostTask(
227 FROM_HERE, base::Bind(&GpuChannel::HandleMessage, gpu_channel_));
228 }
229
230 void PushMessageHelper(uint32_t order_number, GpuChannelMessage* msg) {
231 channel_messages_lock_.AssertAcquired();
232 DCHECK(enabled_);
233 unprocessed_order_num_ = order_number;
234 const bool was_empty =
235 channel_messages_.empty() && out_of_order_messages_.empty();
236 channel_messages_.push_back(msg);
237 if (was_empty)
238 ScheduleHandleMessage();
239 }
240
241 void PushOutOfOrderHelper(GpuChannelMessage* msg) {
242 channel_messages_lock_.AssertAcquired();
243 DCHECK(enabled_);
244 const bool was_empty =
245 channel_messages_.empty() && out_of_order_messages_.empty();
246 out_of_order_messages_.push_back(msg);
247 if (was_empty)
248 ScheduleHandleMessage();
249 }
250
251 bool enabled_;
252
253 // Highest IPC order number seen, set when queued on the IO thread.
254 uint32_t unprocessed_order_num_;
255 std::deque<GpuChannelMessage*> channel_messages_;
256 std::deque<GpuChannelMessage*> out_of_order_messages_;
257
258 // This lock protects enabled_, unprocessed_order_num_, and both deques.
259 base::Lock channel_messages_lock_;
260
261 base::WeakPtr<GpuChannel> gpu_channel_;
262 scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
263
264 DISALLOW_COPY_AND_ASSIGN(GpuChannelMessageQueue);
265 };
266
70 // This filter does three things: 267 // This filter does three things:
71 // - it counts and timestamps each message forwarded to the channel 268 // - it counts and timestamps each message forwarded to the channel
72 // so that we can preempt other channels if a message takes too long to 269 // so that we can preempt other channels if a message takes too long to
73 // process. To guarantee fairness, we must wait a minimum amount of time 270 // process. To guarantee fairness, we must wait a minimum amount of time
74 // before preempting and we limit the amount of time that we can preempt in 271 // before preempting and we limit the amount of time that we can preempt in
75 // one shot (see constants above). 272 // one shot (see constants above).
76 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO 273 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
77 // thread, generating the sync point ID and responding immediately, and then 274 // thread, generating the sync point ID and responding immediately, and then
78 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message 275 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
79 // into the channel's queue. 276 // into the channel's queue.
80 // - it generates mailbox names for clients of the GPU process on the IO thread. 277 // - it generates mailbox names for clients of the GPU process on the IO thread.
81 class GpuChannelMessageFilter : public IPC::MessageFilter { 278 class GpuChannelMessageFilter : public IPC::MessageFilter {
82 public: 279 public:
83 GpuChannelMessageFilter( 280 GpuChannelMessageFilter(
84 base::WeakPtr<GpuChannel> gpu_channel, 281 scoped_refptr<GpuChannelMessageQueue> message_queue,
85 gpu::SyncPointManager* sync_point_manager, 282 gpu::SyncPointManager* sync_point_manager,
86 scoped_refptr<base::SingleThreadTaskRunner> task_runner, 283 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
87 bool future_sync_points) 284 bool future_sync_points)
88 : preemption_state_(IDLE), 285 : preemption_state_(IDLE),
89 gpu_channel_(gpu_channel), 286 message_queue_(message_queue),
90 sender_(nullptr), 287 sender_(nullptr),
91 sync_point_manager_(sync_point_manager), 288 sync_point_manager_(sync_point_manager),
92 task_runner_(task_runner), 289 task_runner_(task_runner),
93 messages_forwarded_to_channel_(0),
94 a_stub_is_descheduled_(false), 290 a_stub_is_descheduled_(false),
95 future_sync_points_(future_sync_points) {} 291 future_sync_points_(future_sync_points) {}
96 292
97 void OnFilterAdded(IPC::Sender* sender) override { 293 void OnFilterAdded(IPC::Sender* sender) override {
98 DCHECK(!sender_); 294 DCHECK(!sender_);
99 sender_ = sender; 295 sender_ = sender;
100 timer_ = make_scoped_ptr(new base::OneShotTimer<GpuChannelMessageFilter>); 296 timer_ = make_scoped_ptr(new base::OneShotTimer<GpuChannelMessageFilter>);
101 } 297 }
102 298
103 void OnFilterRemoved() override { 299 void OnFilterRemoved() override {
104 DCHECK(sender_); 300 DCHECK(sender_);
105 sender_ = nullptr; 301 sender_ = nullptr;
106 timer_ = nullptr; 302 timer_ = nullptr;
107 } 303 }
108 304
109 bool OnMessageReceived(const IPC::Message& message) override { 305 bool OnMessageReceived(const IPC::Message& message) override {
110 DCHECK(sender_); 306 DCHECK(sender_);
111 307
308 const uint32_t order_number = global_order_counter_++;
112 bool handled = false; 309 bool handled = false;
113 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && 310 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) &&
114 !future_sync_points_) { 311 !future_sync_points_) {
115 DLOG(ERROR) << "Untrusted client should not send " 312 DLOG(ERROR) << "Untrusted client should not send "
116 "GpuCommandBufferMsg_RetireSyncPoint message"; 313 "GpuCommandBufferMsg_RetireSyncPoint message";
117 return true; 314 return true;
118 } 315 }
119 316
120 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { 317 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
121 base::Tuple<bool> retire; 318 base::Tuple<bool> retire;
122 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); 319 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
123 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, 320 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message,
124 &retire)) { 321 &retire)) {
125 reply->set_reply_error(); 322 reply->set_reply_error();
126 Send(reply); 323 Send(reply);
127 return true; 324 return true;
128 } 325 }
129 if (!future_sync_points_ && !base::get<0>(retire)) { 326 if (!future_sync_points_ && !base::get<0>(retire)) {
130 LOG(ERROR) << "Untrusted contexts can't create future sync points"; 327 LOG(ERROR) << "Untrusted contexts can't create future sync points";
131 reply->set_reply_error(); 328 reply->set_reply_error();
132 Send(reply); 329 Send(reply);
133 return true; 330 return true;
134 } 331 }
135 uint32 sync_point = sync_point_manager_->GenerateSyncPoint(); 332
333 // Message queue must handle the entire sync point generation because the
334 // message queue could be disabled from the main thread during generation.
335 uint32_t sync_point = 0u;
336 if (!message_queue_->GenerateSyncPointMessage(
337 sync_point_manager_, order_number, message, base::get<0>(retire),
338 &sync_point)) {
339 LOG(ERROR) << "GpuChannel has been destroyed.";
340 reply->set_reply_error();
341 Send(reply);
342 return true;
343 }
344
345 DCHECK_NE(sync_point, 0u);
136 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); 346 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
137 Send(reply); 347 Send(reply);
138 task_runner_->PostTask(
139 FROM_HERE,
140 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread,
141 gpu_channel_, sync_point_manager_, message.routing_id(),
142 base::get<0>(retire), sync_point));
143 handled = true; 348 handled = true;
144 } 349 }
145 350
146 // These are handled by GpuJpegDecodeAccelerator and 351 // These are handled by GpuJpegDecodeAccelerator and
147 // GpuVideoDecodeAccelerator. 352 // GpuVideoDecodeAccelerator.
148 // TODO(kcwu) Modify GpuChannel::AddFilter to handle additional filters by 353 // TODO(kcwu) Modify GpuChannel::AddFilter to handle additional filters by
149 // GpuChannelMessageFilter instead of by IPC::SyncChannel directly. Then we 354 // GpuChannelMessageFilter instead of by IPC::SyncChannel directly. Then we
150 // don't need to exclude them one by one here. 355 // don't need to exclude them one by one here.
151 if (message.type() == AcceleratedJpegDecoderMsg_Decode::ID || 356 if (message.type() == AcceleratedJpegDecoderMsg_Decode::ID ||
152 message.type() == AcceleratedJpegDecoderMsg_Destroy::ID || 357 message.type() == AcceleratedJpegDecoderMsg_Destroy::ID ||
153 message.type() == AcceleratedVideoDecoderMsg_Decode::ID) { 358 message.type() == AcceleratedVideoDecoderMsg_Decode::ID) {
154 return false; 359 return false;
155 } 360 }
156 361
157 // All other messages get processed by the GpuChannel. 362 // Forward all other messages to the GPU Channel.
158 messages_forwarded_to_channel_++; 363 if (!handled && !message.is_reply() && !message.should_unblock()) {
159 if (preempting_flag_.get()) 364 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
160 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_)); 365 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
366 // Move Wait commands to the head of the queue, so the renderer
367 // doesn't have to wait any longer than necessary.
368 message_queue_->PushOutOfOrderMessage(message);
369 } else {
370 message_queue_->PushBackMessage(order_number, message);
371 }
372 handled = true;
373 }
374
161 UpdatePreemptionState(); 375 UpdatePreemptionState();
162
163 return handled; 376 return handled;
164 } 377 }
165 378
166 void MessageProcessed(uint64 messages_processed) { 379 void OnMessageProcessed() { UpdatePreemptionState(); }
167 while (!pending_messages_.empty() &&
168 pending_messages_.front().message_number <= messages_processed)
169 pending_messages_.pop();
170 UpdatePreemptionState();
171 }
172 380
173 void SetPreemptingFlagAndSchedulingState( 381 void SetPreemptingFlagAndSchedulingState(
174 gpu::PreemptionFlag* preempting_flag, 382 gpu::PreemptionFlag* preempting_flag,
175 bool a_stub_is_descheduled) { 383 bool a_stub_is_descheduled) {
176 preempting_flag_ = preempting_flag; 384 preempting_flag_ = preempting_flag;
177 a_stub_is_descheduled_ = a_stub_is_descheduled; 385 a_stub_is_descheduled_ = a_stub_is_descheduled;
178 } 386 }
179 387
180 void UpdateStubSchedulingState(bool a_stub_is_descheduled) { 388 void UpdateStubSchedulingState(bool a_stub_is_descheduled) {
181 a_stub_is_descheduled_ = a_stub_is_descheduled; 389 a_stub_is_descheduled_ = a_stub_is_descheduled;
(...skipping 23 matching lines...) Expand all
205 // We would like to preempt, but some stub is descheduled. 413 // We would like to preempt, but some stub is descheduled.
206 WOULD_PREEMPT_DESCHEDULED, 414 WOULD_PREEMPT_DESCHEDULED,
207 }; 415 };
208 416
209 PreemptionState preemption_state_; 417 PreemptionState preemption_state_;
210 418
211 // Maximum amount of time that we can spend in PREEMPTING. 419 // Maximum amount of time that we can spend in PREEMPTING.
212 // It is reset when we transition to IDLE. 420 // It is reset when we transition to IDLE.
213 base::TimeDelta max_preemption_time_; 421 base::TimeDelta max_preemption_time_;
214 422
215 struct PendingMessage {
216 uint64 message_number;
217 base::TimeTicks time_received;
218
219 explicit PendingMessage(uint64 message_number)
220 : message_number(message_number),
221 time_received(base::TimeTicks::Now()) {
222 }
223 };
224
225 void UpdatePreemptionState() { 423 void UpdatePreemptionState() {
226 switch (preemption_state_) { 424 switch (preemption_state_) {
227 case IDLE: 425 case IDLE:
228 if (preempting_flag_.get() && !pending_messages_.empty()) 426 if (preempting_flag_.get() && message_queue_->HasQueuedMessages())
229 TransitionToWaiting(); 427 TransitionToWaiting();
230 break; 428 break;
231 case WAITING: 429 case WAITING:
232 // A timer will transition us to CHECKING. 430 // A timer will transition us to CHECKING.
233 DCHECK(timer_->IsRunning()); 431 DCHECK(timer_->IsRunning());
234 break; 432 break;
235 case CHECKING: 433 case CHECKING: {
236 if (!pending_messages_.empty()) { 434 base::TimeTicks time_tick = message_queue_->GetNextMessageTimeTick();
237 base::TimeDelta time_elapsed = 435 if (!time_tick.is_null()) {
238 base::TimeTicks::Now() - pending_messages_.front().time_received; 436 base::TimeDelta time_elapsed = base::TimeTicks::Now() - time_tick;
239 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) { 437 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
240 // Schedule another check for when the IPC may go long. 438 // Schedule another check for when the IPC may go long.
241 timer_->Start( 439 timer_->Start(
242 FROM_HERE, 440 FROM_HERE,
243 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) - 441 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
244 time_elapsed, 442 time_elapsed,
245 this, &GpuChannelMessageFilter::UpdatePreemptionState); 443 this, &GpuChannelMessageFilter::UpdatePreemptionState);
246 } else { 444 } else {
247 if (a_stub_is_descheduled_) 445 if (a_stub_is_descheduled_)
248 TransitionToWouldPreemptDescheduled(); 446 TransitionToWouldPreemptDescheduled();
249 else 447 else
250 TransitionToPreempting(); 448 TransitionToPreempting();
251 } 449 }
252 } 450 }
253 break; 451 } break;
254 case PREEMPTING: 452 case PREEMPTING:
255 // A TransitionToIdle() timer should always be running in this state. 453 // A TransitionToIdle() timer should always be running in this state.
256 DCHECK(timer_->IsRunning()); 454 DCHECK(timer_->IsRunning());
257 if (a_stub_is_descheduled_) 455 if (a_stub_is_descheduled_)
258 TransitionToWouldPreemptDescheduled(); 456 TransitionToWouldPreemptDescheduled();
259 else 457 else
260 TransitionToIdleIfCaughtUp(); 458 TransitionToIdleIfCaughtUp();
261 break; 459 break;
262 case WOULD_PREEMPT_DESCHEDULED: 460 case WOULD_PREEMPT_DESCHEDULED:
263 // A TransitionToIdle() timer should never be running in this state. 461 // A TransitionToIdle() timer should never be running in this state.
264 DCHECK(!timer_->IsRunning()); 462 DCHECK(!timer_->IsRunning());
265 if (!a_stub_is_descheduled_) 463 if (!a_stub_is_descheduled_)
266 TransitionToPreempting(); 464 TransitionToPreempting();
267 else 465 else
268 TransitionToIdleIfCaughtUp(); 466 TransitionToIdleIfCaughtUp();
269 break; 467 break;
270 default: 468 default:
271 NOTREACHED(); 469 NOTREACHED();
272 } 470 }
273 } 471 }
274 472
275 void TransitionToIdleIfCaughtUp() { 473 void TransitionToIdleIfCaughtUp() {
276 DCHECK(preemption_state_ == PREEMPTING || 474 DCHECK(preemption_state_ == PREEMPTING ||
277 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); 475 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
278 if (pending_messages_.empty()) { 476 base::TimeTicks next_tick = message_queue_->GetNextMessageTimeTick();
477 if (next_tick.is_null()) {
279 TransitionToIdle(); 478 TransitionToIdle();
280 } else { 479 } else {
281 base::TimeDelta time_elapsed = 480 base::TimeDelta time_elapsed = base::TimeTicks::Now() - next_tick;
282 base::TimeTicks::Now() - pending_messages_.front().time_received;
283 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs) 481 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
284 TransitionToIdle(); 482 TransitionToIdle();
285 } 483 }
286 } 484 }
287 485
288 void TransitionToIdle() { 486 void TransitionToIdle() {
289 DCHECK(preemption_state_ == PREEMPTING || 487 DCHECK(preemption_state_ == PREEMPTING ||
290 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); 488 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
291 // Stop any outstanding timer set to force us from PREEMPTING to IDLE. 489 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
292 timer_->Stop(); 490 timer_->Stop();
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
358 } 556 }
359 } 557 }
360 558
361 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; 559 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
362 preempting_flag_->Reset(); 560 preempting_flag_->Reset();
363 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); 561 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
364 562
365 UpdatePreemptionState(); 563 UpdatePreemptionState();
366 } 564 }
367 565
368 static void InsertSyncPointOnMainThread( 566 // The message_queue_ is used to handle messages on the main thread.
369 base::WeakPtr<GpuChannel> gpu_channel, 567 scoped_refptr<GpuChannelMessageQueue> message_queue_;
370 gpu::SyncPointManager* manager,
371 int32 routing_id,
372 bool retire,
373 uint32 sync_point) {
374 // This function must ensure that the sync point will be retired. Normally
375 // we'll find the stub based on the routing ID, and associate the sync point
376 // with it, but if that fails for any reason (channel or stub already
377 // deleted, invalid routing id), we need to retire the sync point
378 // immediately.
379 if (gpu_channel) {
380 GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id);
381 if (stub) {
382 stub->AddSyncPoint(sync_point);
383 if (retire) {
384 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
385 gpu_channel->OnMessageReceived(message);
386 }
387 return;
388 } else {
389 gpu_channel->MessageProcessed();
390 }
391 }
392 manager->RetireSyncPoint(sync_point);
393 }
394
395 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
396 // passed through - therefore the WeakPtr assumptions are respected.
397 base::WeakPtr<GpuChannel> gpu_channel_;
398 IPC::Sender* sender_; 568 IPC::Sender* sender_;
399 gpu::SyncPointManager* sync_point_manager_; 569 gpu::SyncPointManager* sync_point_manager_;
400 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; 570 scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
401 scoped_refptr<gpu::PreemptionFlag> preempting_flag_; 571 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
402 572
403 std::queue<PendingMessage> pending_messages_;
404
405 // Count of the number of IPCs forwarded to the GpuChannel.
406 uint64 messages_forwarded_to_channel_;
407
408 // This timer is created and destroyed on the IO thread. 573 // This timer is created and destroyed on the IO thread.
409 scoped_ptr<base::OneShotTimer<GpuChannelMessageFilter>> timer_; 574 scoped_ptr<base::OneShotTimer<GpuChannelMessageFilter>> timer_;
410 575
411 bool a_stub_is_descheduled_; 576 bool a_stub_is_descheduled_;
412 577
413 // True if this channel can create future sync points. 578 // True if this channel can create future sync points.
414 bool future_sync_points_; 579 bool future_sync_points_;
580
581 // This number is only ever incremented/read on the IO thread.
582 static uint32_t global_order_counter_;
415 }; 583 };
416 584
585 uint32_t GpuChannelMessageFilter::global_order_counter_ = 0;
586
417 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, 587 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
418 GpuWatchdog* watchdog, 588 GpuWatchdog* watchdog,
419 gfx::GLShareGroup* share_group, 589 gfx::GLShareGroup* share_group,
420 gpu::gles2::MailboxManager* mailbox, 590 gpu::gles2::MailboxManager* mailbox,
421 base::SingleThreadTaskRunner* task_runner, 591 base::SingleThreadTaskRunner* task_runner,
422 base::SingleThreadTaskRunner* io_task_runner, 592 base::SingleThreadTaskRunner* io_task_runner,
423 int client_id, 593 int client_id,
424 uint64_t client_tracing_id, 594 uint64_t client_tracing_id,
425 bool software, 595 bool software,
426 bool allow_future_sync_points) 596 bool allow_future_sync_points)
427 : gpu_channel_manager_(gpu_channel_manager), 597 : gpu_channel_manager_(gpu_channel_manager),
428 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")), 598 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")),
429 messages_processed_(0),
430 client_id_(client_id), 599 client_id_(client_id),
431 client_tracing_id_(client_tracing_id), 600 client_tracing_id_(client_tracing_id),
432 task_runner_(task_runner), 601 task_runner_(task_runner),
433 io_task_runner_(io_task_runner), 602 io_task_runner_(io_task_runner),
434 share_group_(share_group ? share_group : new gfx::GLShareGroup), 603 share_group_(share_group ? share_group : new gfx::GLShareGroup),
435 mailbox_manager_(mailbox 604 mailbox_manager_(mailbox
436 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox) 605 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox)
437 : gpu::gles2::MailboxManager::Create()), 606 : gpu::gles2::MailboxManager::Create()),
438 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), 607 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet),
439 pending_valuebuffer_state_(new gpu::ValueStateMap), 608 pending_valuebuffer_state_(new gpu::ValueStateMap),
440 watchdog_(watchdog), 609 watchdog_(watchdog),
441 software_(software), 610 software_(software),
442 handle_messages_scheduled_(false), 611 current_order_num_(0),
443 currently_processing_message_(nullptr), 612 processed_order_num_(0),
444 num_stubs_descheduled_(0), 613 num_stubs_descheduled_(0),
445 allow_future_sync_points_(allow_future_sync_points), 614 allow_future_sync_points_(allow_future_sync_points),
446 weak_factory_(this) { 615 weak_factory_(this) {
447 DCHECK(gpu_channel_manager); 616 DCHECK(gpu_channel_manager);
448 DCHECK(client_id); 617 DCHECK(client_id);
449 618
619 message_queue_ =
620 GpuChannelMessageQueue::Create(weak_factory_.GetWeakPtr(), task_runner);
621
450 filter_ = new GpuChannelMessageFilter( 622 filter_ = new GpuChannelMessageFilter(
451 weak_factory_.GetWeakPtr(), gpu_channel_manager_->sync_point_manager(), 623 message_queue_, gpu_channel_manager_->sync_point_manager(), task_runner_,
452 task_runner_, allow_future_sync_points_); 624 allow_future_sync_points_);
453 625
454 subscription_ref_set_->AddObserver(this); 626 subscription_ref_set_->AddObserver(this);
455 } 627 }
456 628
457 GpuChannel::~GpuChannel() { 629 GpuChannel::~GpuChannel() {
458 // Clear stubs first because of dependencies. 630 // Clear stubs first because of dependencies.
459 stubs_.clear(); 631 stubs_.clear();
460 632
461 STLDeleteElements(&deferred_messages_); 633 message_queue_->DeleteAndDisableMessages(gpu_channel_manager_);
634
462 subscription_ref_set_->RemoveObserver(this); 635 subscription_ref_set_->RemoveObserver(this);
463 if (preempting_flag_.get()) 636 if (preempting_flag_.get())
464 preempting_flag_->Reset(); 637 preempting_flag_->Reset();
465 } 638 }
466 639
467 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event, 640 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event,
468 IPC::AttachmentBroker* attachment_broker) { 641 IPC::AttachmentBroker* attachment_broker) {
469 DCHECK(shutdown_event); 642 DCHECK(shutdown_event);
470 DCHECK(!channel_); 643 DCHECK(!channel_);
471 644
(...skipping 14 matching lines...) Expand all
486 channel_->AddFilter(filter_.get()); 659 channel_->AddFilter(filter_.get());
487 660
488 return channel_handle; 661 return channel_handle;
489 } 662 }
490 663
491 base::ProcessId GpuChannel::GetClientPID() const { 664 base::ProcessId GpuChannel::GetClientPID() const {
492 return channel_->GetPeerPID(); 665 return channel_->GetPeerPID();
493 } 666 }
494 667
495 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { 668 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
496 DVLOG(1) << "received message @" << &message << " on channel @" << this 669 // All messages should be pushed to channel_messages_ and handled separately.
497 << " with type " << message.type(); 670 NOTREACHED();
498 671 return false;
499 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
500 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
501 // Move Wait commands to the head of the queue, so the renderer
502 // doesn't have to wait any longer than necessary.
503 deferred_messages_.push_front(new IPC::Message(message));
504 } else {
505 deferred_messages_.push_back(new IPC::Message(message));
506 }
507
508 OnScheduled();
509
510 return true;
511 } 672 }
512 673
513 void GpuChannel::OnChannelError() { 674 void GpuChannel::OnChannelError() {
514 gpu_channel_manager_->RemoveChannel(client_id_); 675 gpu_channel_manager_->RemoveChannel(client_id_);
515 } 676 }
516 677
517 bool GpuChannel::Send(IPC::Message* message) { 678 bool GpuChannel::Send(IPC::Message* message) {
518 // The GPU process must never send a synchronous IPC message to the renderer 679 // The GPU process must never send a synchronous IPC message to the renderer
519 // process. This could result in deadlock. 680 // process. This could result in deadlock.
520 DCHECK(!message->is_sync()); 681 DCHECK(!message->is_sync());
(...skipping 12 matching lines...) Expand all
533 void GpuChannel::OnAddSubscription(unsigned int target) { 694 void GpuChannel::OnAddSubscription(unsigned int target) {
534 gpu_channel_manager()->Send( 695 gpu_channel_manager()->Send(
535 new GpuHostMsg_AddSubscription(client_id_, target)); 696 new GpuHostMsg_AddSubscription(client_id_, target));
536 } 697 }
537 698
538 void GpuChannel::OnRemoveSubscription(unsigned int target) { 699 void GpuChannel::OnRemoveSubscription(unsigned int target) {
539 gpu_channel_manager()->Send( 700 gpu_channel_manager()->Send(
540 new GpuHostMsg_RemoveSubscription(client_id_, target)); 701 new GpuHostMsg_RemoveSubscription(client_id_, target));
541 } 702 }
542 703
543 void GpuChannel::RequeueMessage() {
544 DCHECK(currently_processing_message_);
545 deferred_messages_.push_front(
546 new IPC::Message(*currently_processing_message_));
547 messages_processed_--;
548 currently_processing_message_ = NULL;
549 }
550
551 void GpuChannel::OnScheduled() {
552 if (handle_messages_scheduled_)
553 return;
554 // Post a task to handle any deferred messages. The deferred message queue is
555 // not emptied here, which ensures that OnMessageReceived will continue to
556 // defer newly received messages until the ones in the queue have all been
557 // handled by HandleMessage. HandleMessage is invoked as a
558 // task to prevent reentrancy.
559 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage,
560 weak_factory_.GetWeakPtr()));
561 handle_messages_scheduled_ = true;
jbauman 2015/09/01 22:30:07 One thing I am a bit worried about is that we may
562 }
563
564 void GpuChannel::StubSchedulingChanged(bool scheduled) { 704 void GpuChannel::StubSchedulingChanged(bool scheduled) {
565 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; 705 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
566 if (scheduled) { 706 if (scheduled) {
567 num_stubs_descheduled_--; 707 num_stubs_descheduled_--;
568 OnScheduled(); 708 message_queue_->ScheduleHandleMessage();
569 } else { 709 } else {
570 num_stubs_descheduled_++; 710 num_stubs_descheduled_++;
571 } 711 }
572 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); 712 DCHECK_LE(num_stubs_descheduled_, stubs_.size());
573 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; 713 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
574 714
575 if (a_stub_is_descheduled != a_stub_was_descheduled) { 715 if (a_stub_is_descheduled != a_stub_was_descheduled) {
576 if (preempting_flag_.get()) { 716 if (preempting_flag_.get()) {
577 io_task_runner_->PostTask( 717 io_task_runner_->PostTask(
578 FROM_HERE, 718 FROM_HERE,
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
684 OnDestroyCommandBuffer) 824 OnDestroyCommandBuffer)
685 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder, 825 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder,
686 OnCreateJpegDecoder) 826 OnCreateJpegDecoder)
687 IPC_MESSAGE_UNHANDLED(handled = false) 827 IPC_MESSAGE_UNHANDLED(handled = false)
688 IPC_END_MESSAGE_MAP() 828 IPC_END_MESSAGE_MAP()
689 DCHECK(handled) << msg.type(); 829 DCHECK(handled) << msg.type();
690 return handled; 830 return handled;
691 } 831 }
692 832
693 void GpuChannel::HandleMessage() { 833 void GpuChannel::HandleMessage() {
694 handle_messages_scheduled_ = false; 834 GpuChannelMessage* m = nullptr;
695 if (deferred_messages_.empty()) 835 GpuCommandBufferStub* stub = nullptr;
836 bool message_queue_empty = true;
837 {
838 base::AutoLock auto_lock(message_queue_->channel_messages_lock_);
839 if (!message_queue_->out_of_order_messages_.empty()) {
840 m = message_queue_->out_of_order_messages_.front();
841 DCHECK(m->order_number == kOutOfOrderNumber);
842 message_queue_->out_of_order_messages_.pop_front();
843 } else if (!message_queue_->channel_messages_.empty()) {
844 m = message_queue_->channel_messages_.front();
845 DCHECK(m->order_number != kOutOfOrderNumber);
846 message_queue_->channel_messages_.pop_front();
847 } else {
848 // No messages to process
849 return;
850 }
851
852 message_queue_empty = message_queue_->out_of_order_messages_.empty() &&
853 message_queue_->channel_messages_.empty();
854 }
855
856 bool retry_message = false;
857 stub = stubs_.get(m->message.routing_id());
858 if (stub) {
859 if (!stub->IsScheduled()) {
860 retry_message = true;
861 }
862 if (stub->IsPreempted()) {
863 retry_message = true;
864 message_queue_->ScheduleHandleMessage();
865 }
866 }
867
868 if (retry_message) {
869 base::AutoLock auto_lock(message_queue_->channel_messages_lock_);
870 if (m->order_number == kOutOfOrderNumber)
871 message_queue_->out_of_order_messages_.push_front(m);
872 else
873 message_queue_->channel_messages_.push_front(m);
696 return; 874 return;
875 } else if (!message_queue_empty) {
876 message_queue_->ScheduleHandleMessage();
877 }
697 878
698 IPC::Message* m = NULL; 879 scoped_ptr<GpuChannelMessage> scoped_message(m);
699 GpuCommandBufferStub* stub = NULL; 880 const uint32_t order_number = m->order_number;
881 const int32_t routing_id = m->message.routing_id();
700 882
701 m = deferred_messages_.front(); 883 // TODO(dyen): Temporary handling of old sync points.
702 stub = stubs_.get(m->routing_id()); 884 // This must ensure that the sync point will be retired. Normally we'll
703 if (stub) { 885 // find the stub based on the routing ID, and associate the sync point
704 if (!stub->IsScheduled()) 886 // with it, but if that fails for any reason (channel or stub already
705 return; 887 // deleted, invalid routing id), we need to retire the sync point
706 if (stub->IsPreempted()) { 888 // immediately.
707 OnScheduled(); 889 if (m->message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
890 const bool retire = m->retire_sync_point;
891 const uint32_t sync_point = m->sync_point_number;
892 if (stub) {
893 stub->AddSyncPoint(sync_point);
894 if (retire) {
895 m->message =
896 GpuCommandBufferMsg_RetireSyncPoint(routing_id, sync_point);
897 }
898 } else {
899 current_order_num_ = order_number;
900 gpu_channel_manager_->sync_point_manager()->RetireSyncPoint(sync_point);
901 MessageProcessed(order_number);
708 return; 902 return;
709 } 903 }
710 } 904 }
711 905
712 scoped_ptr<IPC::Message> message(m); 906 IPC::Message* message = &m->message;
713 deferred_messages_.pop_front();
714 bool message_processed = true; 907 bool message_processed = true;
715 908
716 currently_processing_message_ = message.get(); 909 DVLOG(1) << "received message @" << message << " on channel @" << this
717 bool result; 910 << " with type " << message->type();
718 if (message->routing_id() == MSG_ROUTING_CONTROL) 911
912 if (order_number != kOutOfOrderNumber) {
913 // Make sure this is a valid unprocessed order number.
914 DCHECK(order_number <= GetUnprocessedOrderNum() &&
915 order_number >= GetProcessedOrderNum());
916
917 current_order_num_ = order_number;
918 }
919 bool result = false;
920 if (routing_id == MSG_ROUTING_CONTROL)
719 result = OnControlMessageReceived(*message); 921 result = OnControlMessageReceived(*message);
720 else 922 else
721 result = router_.RouteMessage(*message); 923 result = router_.RouteMessage(*message);
722 currently_processing_message_ = NULL;
723 924
724 if (!result) { 925 if (!result) {
725 // Respond to sync messages even if router failed to route. 926 // Respond to sync messages even if router failed to route.
726 if (message->is_sync()) { 927 if (message->is_sync()) {
727 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); 928 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
728 reply->set_reply_error(); 929 reply->set_reply_error();
729 Send(reply); 930 Send(reply);
730 } 931 }
731 } else { 932 } else {
732 // If the command buffer becomes unscheduled as a result of handling the 933 // If the command buffer becomes unscheduled as a result of handling the
733 // message but still has more commands to process, synthesize an IPC 934 // message but still has more commands to process, synthesize an IPC
734 // message to flush that command buffer. 935 // message to flush that command buffer.
735 if (stub) { 936 if (stub) {
736 if (stub->HasUnprocessedCommands()) { 937 if (stub->HasUnprocessedCommands()) {
737 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled( 938 message_queue_->PushUnfinishedMessage(
738 stub->route_id())); 939 order_number, GpuCommandBufferMsg_Rescheduled(stub->route_id()));
739 message_processed = false; 940 message_processed = false;
740 } 941 }
741 } 942 }
742 } 943 }
743 if (message_processed) 944 if (message_processed)
744 MessageProcessed(); 945 MessageProcessed(order_number);
745
746 if (!deferred_messages_.empty()) {
747 OnScheduled();
748 }
749 } 946 }
750 947
751 void GpuChannel::OnCreateOffscreenCommandBuffer( 948 void GpuChannel::OnCreateOffscreenCommandBuffer(
752 const gfx::Size& size, 949 const gfx::Size& size,
753 const GPUCreateCommandBufferConfig& init_params, 950 const GPUCreateCommandBufferConfig& init_params,
754 int32 route_id, 951 int32 route_id,
755 bool* succeeded) { 952 bool* succeeded) {
756 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", 953 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id",
757 route_id); 954 route_id);
758 955
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
807 } 1004 }
808 } 1005 }
809 1006
810 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { 1007 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) {
811 if (!jpeg_decoder_) { 1008 if (!jpeg_decoder_) {
812 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_)); 1009 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_));
813 } 1010 }
814 jpeg_decoder_->AddClient(route_id, reply_msg); 1011 jpeg_decoder_->AddClient(route_id, reply_msg);
815 } 1012 }
816 1013
817 void GpuChannel::MessageProcessed() { 1014 void GpuChannel::MessageProcessed(uint32_t order_number) {
818 messages_processed_++; 1015 if (order_number != kOutOfOrderNumber) {
1016 DCHECK(current_order_num_ == order_number);
1017 processed_order_num_ = order_number;
1018 }
819 if (preempting_flag_.get()) { 1019 if (preempting_flag_.get()) {
820 io_task_runner_->PostTask( 1020 io_task_runner_->PostTask(
821 FROM_HERE, base::Bind(&GpuChannelMessageFilter::MessageProcessed, 1021 FROM_HERE,
822 filter_, messages_processed_)); 1022 base::Bind(&GpuChannelMessageFilter::OnMessageProcessed, filter_));
823 } 1023 }
824 } 1024 }
825 1025
826 void GpuChannel::CacheShader(const std::string& key, 1026 void GpuChannel::CacheShader(const std::string& key,
827 const std::string& shader) { 1027 const std::string& shader) {
828 gpu_channel_manager_->Send( 1028 gpu_channel_manager_->Send(
829 new GpuHostMsg_CacheShader(client_id_, key, shader)); 1029 new GpuHostMsg_CacheShader(client_id_, key, shader));
830 } 1030 }
831 1031
832 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { 1032 void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
881 client_id_); 1081 client_id_);
882 } 1082 }
883 } 1083 }
884 } 1084 }
885 1085
886 void GpuChannel::HandleUpdateValueState( 1086 void GpuChannel::HandleUpdateValueState(
887 unsigned int target, const gpu::ValueState& state) { 1087 unsigned int target, const gpu::ValueState& state) {
888 pending_valuebuffer_state_->UpdateState(target, state); 1088 pending_valuebuffer_state_->UpdateState(target, state);
889 } 1089 }
890 1090
1091 uint32_t GpuChannel::GetUnprocessedOrderNum() const {
1092 return message_queue_->GetUnprocessedOrderNum();
1093 }
1094
891 } // namespace content 1095 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_channel.h ('k') | content/common/gpu/gpu_channel_manager.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698