Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(154)

Side by Side Diff: content/common/gpu/gpu_channel.cc

Issue 1308913004: GPU Channel's now maintain a global order number for each processed IPC. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Updated command buffer stub to use 32 bit order numbers Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(OS_WIN) 5 #if defined(OS_WIN)
6 #include <windows.h> 6 #include <windows.h>
7 #endif 7 #endif
8 8
9 #include "content/common/gpu/gpu_channel.h" 9 #include "content/common/gpu/gpu_channel.h"
10 10
11 #include <algorithm> 11 #include <algorithm>
12 #include <queue> 12 #include <queue>
13 #include <vector> 13 #include <vector>
14 14
15 #include "base/bind.h" 15 #include "base/bind.h"
16 #include "base/command_line.h" 16 #include "base/command_line.h"
17 #include "base/location.h" 17 #include "base/location.h"
18 #include "base/single_thread_task_runner.h" 18 #include "base/single_thread_task_runner.h"
19 #include "base/stl_util.h" 19 #include "base/stl_util.h"
20 #include "base/strings/string_util.h" 20 #include "base/strings/string_util.h"
21 #include "base/thread_task_runner_handle.h" 21 #include "base/thread_task_runner_handle.h"
22 #include "base/timer/timer.h"
23 #include "base/trace_event/memory_dump_manager.h" 22 #include "base/trace_event/memory_dump_manager.h"
24 #include "base/trace_event/process_memory_dump.h" 23 #include "base/trace_event/process_memory_dump.h"
25 #include "base/trace_event/trace_event.h" 24 #include "base/trace_event/trace_event.h"
26 #include "content/common/gpu/gpu_channel_manager.h" 25 #include "content/common/gpu/gpu_channel_manager.h"
27 #include "content/common/gpu/gpu_memory_buffer_factory.h" 26 #include "content/common/gpu/gpu_memory_buffer_factory.h"
28 #include "content/common/gpu/gpu_messages.h" 27 #include "content/common/gpu/gpu_messages.h"
29 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h" 28 #include "content/common/gpu/media/gpu_jpeg_decode_accelerator.h"
30 #include "content/public/common/content_switches.h" 29 #include "content/public/common/content_switches.h"
31 #include "gpu/command_buffer/common/mailbox.h" 30 #include "gpu/command_buffer/common/mailbox.h"
32 #include "gpu/command_buffer/common/value_state.h" 31 #include "gpu/command_buffer/common/value_state.h"
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
74 // before preempting and we limit the amount of time that we can preempt in 73 // before preempting and we limit the amount of time that we can preempt in
75 // one shot (see constants above). 74 // one shot (see constants above).
76 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO 75 // - it handles the GpuCommandBufferMsg_InsertSyncPoint message on the IO
77 // thread, generating the sync point ID and responding immediately, and then 76 // thread, generating the sync point ID and responding immediately, and then
78 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message 77 // posting a task to insert the GpuCommandBufferMsg_RetireSyncPoint message
79 // into the channel's queue. 78 // into the channel's queue.
80 // - it generates mailbox names for clients of the GPU process on the IO thread. 79 // - it generates mailbox names for clients of the GPU process on the IO thread.
81 class GpuChannelMessageFilter : public IPC::MessageFilter { 80 class GpuChannelMessageFilter : public IPC::MessageFilter {
82 public: 81 public:
83 GpuChannelMessageFilter( 82 GpuChannelMessageFilter(
83 GpuChannel* gpu_channel_io,
84 base::WeakPtr<GpuChannel> gpu_channel, 84 base::WeakPtr<GpuChannel> gpu_channel,
85 gpu::SyncPointManager* sync_point_manager, 85 gpu::SyncPointManager* sync_point_manager,
86 scoped_refptr<base::SingleThreadTaskRunner> task_runner, 86 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
87 bool future_sync_points) 87 bool future_sync_points)
88 : preemption_state_(IDLE), 88 : preemption_state_(IDLE),
89 gpu_channel_io_(gpu_channel_io),
89 gpu_channel_(gpu_channel), 90 gpu_channel_(gpu_channel),
90 sender_(nullptr), 91 sender_(nullptr),
91 sync_point_manager_(sync_point_manager), 92 sync_point_manager_(sync_point_manager),
92 task_runner_(task_runner), 93 task_runner_(task_runner),
93 messages_forwarded_to_channel_(0),
94 a_stub_is_descheduled_(false), 94 a_stub_is_descheduled_(false),
95 future_sync_points_(future_sync_points) {} 95 future_sync_points_(future_sync_points) {}
96 96
97 void OnFilterAdded(IPC::Sender* sender) override { 97 void OnFilterAdded(IPC::Sender* sender) override {
98 DCHECK(!sender_); 98 DCHECK(!sender_);
99 sender_ = sender; 99 sender_ = sender;
100 timer_ = make_scoped_ptr(new base::OneShotTimer<GpuChannelMessageFilter>); 100 timer_ = make_scoped_ptr(new base::OneShotTimer<GpuChannelMessageFilter>);
101 } 101 }
102 102
103 void OnFilterRemoved() override { 103 void OnFilterRemoved() override {
104 DCHECK(sender_); 104 DCHECK(sender_);
105 sender_ = nullptr; 105 sender_ = nullptr;
106 timer_ = nullptr; 106 timer_ = nullptr;
107 } 107 }
108 108
109 bool OnMessageReceived(const IPC::Message& message) override { 109 bool OnMessageReceived(const IPC::Message& message) override {
110 DCHECK(sender_); 110 DCHECK(sender_);
111 111
112 const uint32_t order_number =
113 gpu_channel_io_->gpu_channel_manager()->GenerateGlobalOrderNumber();
piman 2015/08/31 23:15:04 I don't think gpu_channel_io_ is safe to use, beca
David Yen 2015/09/01 02:01:52 Having the filter own the messages didn't feel rig
112 bool handled = false; 114 bool handled = false;
113 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) && 115 if ((message.type() == GpuCommandBufferMsg_RetireSyncPoint::ID) &&
114 !future_sync_points_) { 116 !future_sync_points_) {
115 DLOG(ERROR) << "Untrusted client should not send " 117 DLOG(ERROR) << "Untrusted client should not send "
116 "GpuCommandBufferMsg_RetireSyncPoint message"; 118 "GpuCommandBufferMsg_RetireSyncPoint message";
117 return true; 119 return true;
118 } 120 }
119 121
120 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) { 122 if (message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
121 base::Tuple<bool> retire; 123 base::Tuple<bool> retire;
122 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); 124 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message);
123 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message, 125 if (!GpuCommandBufferMsg_InsertSyncPoint::ReadSendParam(&message,
124 &retire)) { 126 &retire)) {
125 reply->set_reply_error(); 127 reply->set_reply_error();
126 Send(reply); 128 Send(reply);
127 return true; 129 return true;
128 } 130 }
129 if (!future_sync_points_ && !base::get<0>(retire)) { 131 if (!future_sync_points_ && !base::get<0>(retire)) {
130 LOG(ERROR) << "Untrusted contexts can't create future sync points"; 132 LOG(ERROR) << "Untrusted contexts can't create future sync points";
131 reply->set_reply_error(); 133 reply->set_reply_error();
132 Send(reply); 134 Send(reply);
133 return true; 135 return true;
134 } 136 }
135 uint32 sync_point = sync_point_manager_->GenerateSyncPoint(); 137 uint32 sync_point = sync_point_manager_->GenerateSyncPoint();
136 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point); 138 GpuCommandBufferMsg_InsertSyncPoint::WriteReplyParams(reply, sync_point);
137 Send(reply); 139 Send(reply);
138 task_runner_->PostTask( 140
139 FROM_HERE, 141 gpu_channel_io_->PushSyncPointMessage(order_number, message,
140 base::Bind(&GpuChannelMessageFilter::InsertSyncPointOnMainThread, 142 base::get<0>(retire), sync_point);
141 gpu_channel_, sync_point_manager_, message.routing_id(),
142 base::get<0>(retire), sync_point));
143 handled = true; 143 handled = true;
144 } 144 }
145 145
146 // These are handled by GpuJpegDecodeAccelerator and 146 // These are handled by GpuJpegDecodeAccelerator and
147 // GpuVideoDecodeAccelerator. 147 // GpuVideoDecodeAccelerator.
148 // TODO(kcwu) Modify GpuChannel::AddFilter to handle additional filters by 148 // TODO(kcwu) Modify GpuChannel::AddFilter to handle additional filters by
149 // GpuChannelMessageFilter instead of by IPC::SyncChannel directly. Then we 149 // GpuChannelMessageFilter instead of by IPC::SyncChannel directly. Then we
150 // don't need to exclude them one by one here. 150 // don't need to exclude them one by one here.
151 if (message.type() == AcceleratedJpegDecoderMsg_Decode::ID || 151 if (message.type() == AcceleratedJpegDecoderMsg_Decode::ID ||
152 message.type() == AcceleratedJpegDecoderMsg_Destroy::ID || 152 message.type() == AcceleratedJpegDecoderMsg_Destroy::ID ||
153 message.type() == AcceleratedVideoDecoderMsg_Decode::ID) { 153 message.type() == AcceleratedVideoDecoderMsg_Decode::ID) {
154 return false; 154 return false;
155 } 155 }
156 156
157 // All other messages get processed by the GpuChannel. 157 // Forward all other messages to the GPU Channel.
158 messages_forwarded_to_channel_++; 158 if (!handled && !message.is_reply() && !message.should_unblock()) {
159 if (preempting_flag_.get()) 159 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
160 pending_messages_.push(PendingMessage(messages_forwarded_to_channel_)); 160 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
161 // Move Wait commands to the head of the queue, so the renderer
162 // doesn't have to wait any longer than necessary.
163 gpu_channel_io_->PushFrontMessage(message);
164 } else {
165 gpu_channel_io_->PushBackMessage(order_number, message);
166 }
167 handled = true;
168 }
169
161 UpdatePreemptionState(); 170 UpdatePreemptionState();
162
163 return handled; 171 return handled;
164 } 172 }
165 173
166 void MessageProcessed(uint64 messages_processed) { 174 void OnMessageProcessed() {
167 while (!pending_messages_.empty() &&
168 pending_messages_.front().message_number <= messages_processed)
169 pending_messages_.pop();
170 UpdatePreemptionState(); 175 UpdatePreemptionState();
171 } 176 }
172 177
173 void SetPreemptingFlagAndSchedulingState( 178 void SetPreemptingFlagAndSchedulingState(
174 gpu::PreemptionFlag* preempting_flag, 179 gpu::PreemptionFlag* preempting_flag,
175 bool a_stub_is_descheduled) { 180 bool a_stub_is_descheduled) {
176 preempting_flag_ = preempting_flag; 181 preempting_flag_ = preempting_flag;
177 a_stub_is_descheduled_ = a_stub_is_descheduled; 182 a_stub_is_descheduled_ = a_stub_is_descheduled;
178 } 183 }
179 184
(...skipping 25 matching lines...) Expand all
205 // We would like to preempt, but some stub is descheduled. 210 // We would like to preempt, but some stub is descheduled.
206 WOULD_PREEMPT_DESCHEDULED, 211 WOULD_PREEMPT_DESCHEDULED,
207 }; 212 };
208 213
209 PreemptionState preemption_state_; 214 PreemptionState preemption_state_;
210 215
211 // Maximum amount of time that we can spend in PREEMPTING. 216 // Maximum amount of time that we can spend in PREEMPTING.
212 // It is reset when we transition to IDLE. 217 // It is reset when we transition to IDLE.
213 base::TimeDelta max_preemption_time_; 218 base::TimeDelta max_preemption_time_;
214 219
215 struct PendingMessage {
216 uint64 message_number;
217 base::TimeTicks time_received;
218
219 explicit PendingMessage(uint64 message_number)
220 : message_number(message_number),
221 time_received(base::TimeTicks::Now()) {
222 }
223 };
224
225 void UpdatePreemptionState() { 220 void UpdatePreemptionState() {
226 switch (preemption_state_) { 221 switch (preemption_state_) {
227 case IDLE: 222 case IDLE:
228 if (preempting_flag_.get() && !pending_messages_.empty()) 223 if (preempting_flag_.get() && gpu_channel_io_->HasQueuedMessages())
229 TransitionToWaiting(); 224 TransitionToWaiting();
230 break; 225 break;
231 case WAITING: 226 case WAITING:
232 // A timer will transition us to CHECKING. 227 // A timer will transition us to CHECKING.
233 DCHECK(timer_->IsRunning()); 228 DCHECK(timer_->IsRunning());
234 break; 229 break;
235 case CHECKING: 230 case CHECKING:
236 if (!pending_messages_.empty()) { 231 {
237 base::TimeDelta time_elapsed = 232 base::TimeTicks time_tick = gpu_channel_io_->GetNextMessageTimeTick();
238 base::TimeTicks::Now() - pending_messages_.front().time_received; 233 if (!time_tick.is_null()) {
239 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) { 234 base::TimeDelta time_elapsed = base::TimeTicks::Now() - time_tick;
240 // Schedule another check for when the IPC may go long. 235 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) {
241 timer_->Start( 236 // Schedule another check for when the IPC may go long.
242 FROM_HERE, 237 timer_->Start(
243 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) - 238 FROM_HERE,
244 time_elapsed, 239 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) -
245 this, &GpuChannelMessageFilter::UpdatePreemptionState); 240 time_elapsed,
246 } else { 241 this, &GpuChannelMessageFilter::UpdatePreemptionState);
247 if (a_stub_is_descheduled_) 242 } else {
248 TransitionToWouldPreemptDescheduled(); 243 if (a_stub_is_descheduled_)
249 else 244 TransitionToWouldPreemptDescheduled();
250 TransitionToPreempting(); 245 else
246 TransitionToPreempting();
247 }
251 } 248 }
252 } 249 }
253 break; 250 break;
254 case PREEMPTING: 251 case PREEMPTING:
255 // A TransitionToIdle() timer should always be running in this state. 252 // A TransitionToIdle() timer should always be running in this state.
256 DCHECK(timer_->IsRunning()); 253 DCHECK(timer_->IsRunning());
257 if (a_stub_is_descheduled_) 254 if (a_stub_is_descheduled_)
258 TransitionToWouldPreemptDescheduled(); 255 TransitionToWouldPreemptDescheduled();
259 else 256 else
260 TransitionToIdleIfCaughtUp(); 257 TransitionToIdleIfCaughtUp();
261 break; 258 break;
262 case WOULD_PREEMPT_DESCHEDULED: 259 case WOULD_PREEMPT_DESCHEDULED:
263 // A TransitionToIdle() timer should never be running in this state. 260 // A TransitionToIdle() timer should never be running in this state.
264 DCHECK(!timer_->IsRunning()); 261 DCHECK(!timer_->IsRunning());
265 if (!a_stub_is_descheduled_) 262 if (!a_stub_is_descheduled_)
266 TransitionToPreempting(); 263 TransitionToPreempting();
267 else 264 else
268 TransitionToIdleIfCaughtUp(); 265 TransitionToIdleIfCaughtUp();
269 break; 266 break;
270 default: 267 default:
271 NOTREACHED(); 268 NOTREACHED();
272 } 269 }
273 } 270 }
274 271
275 void TransitionToIdleIfCaughtUp() { 272 void TransitionToIdleIfCaughtUp() {
276 DCHECK(preemption_state_ == PREEMPTING || 273 DCHECK(preemption_state_ == PREEMPTING ||
277 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); 274 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
278 if (pending_messages_.empty()) { 275 base::TimeTicks next_tick = gpu_channel_io_->GetNextMessageTimeTick();
276 if (next_tick.is_null()) {
279 TransitionToIdle(); 277 TransitionToIdle();
280 } else { 278 } else {
281 base::TimeDelta time_elapsed = 279 base::TimeDelta time_elapsed = base::TimeTicks::Now() - next_tick;
282 base::TimeTicks::Now() - pending_messages_.front().time_received;
283 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs) 280 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs)
284 TransitionToIdle(); 281 TransitionToIdle();
285 } 282 }
286 } 283 }
287 284
288 void TransitionToIdle() { 285 void TransitionToIdle() {
289 DCHECK(preemption_state_ == PREEMPTING || 286 DCHECK(preemption_state_ == PREEMPTING ||
290 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); 287 preemption_state_ == WOULD_PREEMPT_DESCHEDULED);
291 // Stop any outstanding timer set to force us from PREEMPTING to IDLE. 288 // Stop any outstanding timer set to force us from PREEMPTING to IDLE.
292 timer_->Stop(); 289 timer_->Stop();
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
358 } 355 }
359 } 356 }
360 357
361 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; 358 preemption_state_ = WOULD_PREEMPT_DESCHEDULED;
362 preempting_flag_->Reset(); 359 preempting_flag_->Reset();
363 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); 360 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0);
364 361
365 UpdatePreemptionState(); 362 UpdatePreemptionState();
366 } 363 }
367 364
368 static void InsertSyncPointOnMainThread( 365 // NOTE: The gpu_channel_io_ pointer should only be used for thread-safe
369 base::WeakPtr<GpuChannel> gpu_channel, 366 // function calls. The gpu_channel_ weak pointer is only dereferenced on the
370 gpu::SyncPointManager* manager, 367 // main thread - therefore the WeakPtr assumptions are respected.
371 int32 routing_id, 368 GpuChannel* gpu_channel_io_;
372 bool retire,
373 uint32 sync_point) {
374 // This function must ensure that the sync point will be retired. Normally
375 // we'll find the stub based on the routing ID, and associate the sync point
376 // with it, but if that fails for any reason (channel or stub already
377 // deleted, invalid routing id), we need to retire the sync point
378 // immediately.
379 if (gpu_channel) {
380 GpuCommandBufferStub* stub = gpu_channel->LookupCommandBuffer(routing_id);
381 if (stub) {
382 stub->AddSyncPoint(sync_point);
383 if (retire) {
384 GpuCommandBufferMsg_RetireSyncPoint message(routing_id, sync_point);
385 gpu_channel->OnMessageReceived(message);
386 }
387 return;
388 } else {
389 gpu_channel->MessageProcessed();
390 }
391 }
392 manager->RetireSyncPoint(sync_point);
393 }
394
395 // NOTE: this weak pointer is never dereferenced on the IO thread, it's only
396 // passed through - therefore the WeakPtr assumptions are respected.
397 base::WeakPtr<GpuChannel> gpu_channel_; 369 base::WeakPtr<GpuChannel> gpu_channel_;
398 IPC::Sender* sender_; 370 IPC::Sender* sender_;
399 gpu::SyncPointManager* sync_point_manager_; 371 gpu::SyncPointManager* sync_point_manager_;
400 scoped_refptr<base::SingleThreadTaskRunner> task_runner_; 372 scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
401 scoped_refptr<gpu::PreemptionFlag> preempting_flag_; 373 scoped_refptr<gpu::PreemptionFlag> preempting_flag_;
402 374
403 std::queue<PendingMessage> pending_messages_;
404
405 // Count of the number of IPCs forwarded to the GpuChannel.
406 uint64 messages_forwarded_to_channel_;
407
408 // This timer is created and destroyed on the IO thread. 375 // This timer is created and destroyed on the IO thread.
409 scoped_ptr<base::OneShotTimer<GpuChannelMessageFilter>> timer_; 376 scoped_ptr<base::OneShotTimer<GpuChannelMessageFilter>> timer_;
410 377
411 bool a_stub_is_descheduled_; 378 bool a_stub_is_descheduled_;
412 379
413 // True if this channel can create future sync points. 380 // True if this channel can create future sync points.
414 bool future_sync_points_; 381 bool future_sync_points_;
415 }; 382 };
416 383
417 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, 384 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager,
418 GpuWatchdog* watchdog, 385 GpuWatchdog* watchdog,
419 gfx::GLShareGroup* share_group, 386 gfx::GLShareGroup* share_group,
420 gpu::gles2::MailboxManager* mailbox, 387 gpu::gles2::MailboxManager* mailbox,
421 base::SingleThreadTaskRunner* task_runner, 388 base::SingleThreadTaskRunner* task_runner,
422 base::SingleThreadTaskRunner* io_task_runner, 389 base::SingleThreadTaskRunner* io_task_runner,
423 int client_id, 390 int client_id,
424 uint64_t client_tracing_id, 391 uint64_t client_tracing_id,
425 bool software, 392 bool software,
426 bool allow_future_sync_points) 393 bool allow_future_sync_points)
427 : gpu_channel_manager_(gpu_channel_manager), 394 : gpu_channel_manager_(gpu_channel_manager),
428 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")), 395 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")),
429 messages_processed_(0), 396 handle_messages_scheduled_(false),
430 client_id_(client_id), 397 client_id_(client_id),
431 client_tracing_id_(client_tracing_id), 398 client_tracing_id_(client_tracing_id),
432 task_runner_(task_runner), 399 task_runner_(task_runner),
433 io_task_runner_(io_task_runner), 400 io_task_runner_(io_task_runner),
434 share_group_(share_group ? share_group : new gfx::GLShareGroup), 401 share_group_(share_group ? share_group : new gfx::GLShareGroup),
435 mailbox_manager_(mailbox 402 mailbox_manager_(mailbox
436 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox) 403 ? scoped_refptr<gpu::gles2::MailboxManager>(mailbox)
437 : gpu::gles2::MailboxManager::Create()), 404 : gpu::gles2::MailboxManager::Create()),
438 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), 405 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet),
439 pending_valuebuffer_state_(new gpu::ValueStateMap), 406 pending_valuebuffer_state_(new gpu::ValueStateMap),
440 watchdog_(watchdog), 407 watchdog_(watchdog),
441 software_(software), 408 software_(software),
442 handle_messages_scheduled_(false), 409 current_order_num_(0),
443 currently_processing_message_(nullptr), 410 processed_order_num_(0),
411 unprocessed_order_num_(0),
444 num_stubs_descheduled_(0), 412 num_stubs_descheduled_(0),
445 allow_future_sync_points_(allow_future_sync_points), 413 allow_future_sync_points_(allow_future_sync_points),
446 weak_factory_(this) { 414 weak_factory_(this) {
447 DCHECK(gpu_channel_manager); 415 DCHECK(gpu_channel_manager);
448 DCHECK(client_id); 416 DCHECK(client_id);
449 417
450 filter_ = new GpuChannelMessageFilter( 418 filter_ = new GpuChannelMessageFilter(
451 weak_factory_.GetWeakPtr(), gpu_channel_manager_->sync_point_manager(), 419 this, weak_factory_.GetWeakPtr(),
420 gpu_channel_manager_->sync_point_manager(),
452 task_runner_, allow_future_sync_points_); 421 task_runner_, allow_future_sync_points_);
453 422
454 subscription_ref_set_->AddObserver(this); 423 subscription_ref_set_->AddObserver(this);
455 } 424 }
456 425
457 GpuChannel::~GpuChannel() { 426 GpuChannel::~GpuChannel() {
458 // Clear stubs first because of dependencies. 427 // Clear stubs first because of dependencies.
459 stubs_.clear(); 428 stubs_.clear();
460 429
461 STLDeleteElements(&deferred_messages_); 430 {
431 base::AutoLock auto_lock(channel_messages_lock_);
432 STLDeleteElements(&channel_messages_);
piman 2015/08/31 23:15:04 We need to handle unprocessed InsertSyncPoint here
David Yen 2015/09/01 02:01:52 Done. Although isn't this the same as how it was d
piman 2015/09/01 03:55:26 Before, InsertSyncPointOnMainThread would always r
David Yen 2015/09/01 19:08:02 Oh I see, I didn't notice that it was static befor
433 }
462 subscription_ref_set_->RemoveObserver(this); 434 subscription_ref_set_->RemoveObserver(this);
463 if (preempting_flag_.get()) 435 if (preempting_flag_.get())
464 preempting_flag_->Reset(); 436 preempting_flag_->Reset();
465 } 437 }
466 438
467 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event, 439 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event,
468 IPC::AttachmentBroker* attachment_broker) { 440 IPC::AttachmentBroker* attachment_broker) {
469 DCHECK(shutdown_event); 441 DCHECK(shutdown_event);
470 DCHECK(!channel_); 442 DCHECK(!channel_);
471 443
(...skipping 13 matching lines...) Expand all
485 457
486 channel_->AddFilter(filter_.get()); 458 channel_->AddFilter(filter_.get());
487 459
488 return channel_handle; 460 return channel_handle;
489 } 461 }
490 462
491 base::ProcessId GpuChannel::GetClientPID() const { 463 base::ProcessId GpuChannel::GetClientPID() const {
492 return channel_->GetPeerPID(); 464 return channel_->GetPeerPID();
493 } 465 }
494 466
467 bool GpuChannel::HasQueuedMessages() {
468 base::AutoLock auto_lock(channel_messages_lock_);
469 return !channel_messages_.empty();
470 }
471
472 base::TimeTicks GpuChannel::GetNextMessageTimeTick() {
473 // We have to account for messages that are pushed out of order, the out
474 // of order messages are pushed back to front and have order numbers of -1.
475 base::TimeTicks next_time_tick;
476 base::AutoLock auto_lock(channel_messages_lock_);
477 for (const auto& msg : channel_messages_) {
478 if (msg->order_number != static_cast<uint32_t>(-1)) {
479 // Return the earliest time tick if we have some out of order ones.
480 return next_time_tick.is_null() ?
481 msg->time_received :
482 std::min(msg->time_received, next_time_tick);
483 } else {
484 // Store the last out of order message in next_time_tick.
485 next_time_tick = msg->time_received;
486 }
487 }
488 return next_time_tick;
489 }
490
491 void GpuChannel::PushBackMessage(uint32_t order_number,
492 const IPC::Message& message) {
493 base::subtle::Release_Store(&unprocessed_order_num_, order_number);
494
495 base::AutoLock auto_lock(channel_messages_lock_);
496 channel_messages_.push_back(new ChannelMessage(order_number, message));
497 ScheduleHandleMessageLocked();
498 }
499
500 void GpuChannel::PushFrontMessage(const IPC::Message& message) {
501 // These are pushed out of order so should not have any order messages.
502 base::AutoLock auto_lock(channel_messages_lock_);
503 channel_messages_.push_front(new ChannelMessage(static_cast<uint32_t>(-1),
504 message));
505 ScheduleHandleMessageLocked();
506 }
507
508 void GpuChannel::PushSyncPointMessage(uint32_t order_number,
509 const IPC::Message& message,
510 bool retire_sync_point,
511 uint32_t sync_point_num) {
512 DCHECK(message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID);
513
514 base::subtle::Release_Store(&unprocessed_order_num_, order_number);
515 ChannelMessage* msg = new ChannelMessage(order_number, message);
516 msg->retire_sync_point = retire_sync_point;
517 msg->sync_point_number = sync_point_num;
518
519 base::AutoLock auto_lock(channel_messages_lock_);
520 channel_messages_.push_back(msg);
521 ScheduleHandleMessageLocked();
522 }
523
495 bool GpuChannel::OnMessageReceived(const IPC::Message& message) { 524 bool GpuChannel::OnMessageReceived(const IPC::Message& message) {
496 DVLOG(1) << "received message @" << &message << " on channel @" << this 525 // All messages should be pushed to channel_messages_ and handled separately.
497 << " with type " << message.type(); 526 NOTREACHED();
498 527 return false;
499 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID ||
500 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) {
501 // Move Wait commands to the head of the queue, so the renderer
502 // doesn't have to wait any longer than necessary.
503 deferred_messages_.push_front(new IPC::Message(message));
504 } else {
505 deferred_messages_.push_back(new IPC::Message(message));
506 }
507
508 OnScheduled();
509
510 return true;
511 } 528 }
512 529
513 void GpuChannel::OnChannelError() { 530 void GpuChannel::OnChannelError() {
514 gpu_channel_manager_->RemoveChannel(client_id_); 531 gpu_channel_manager_->RemoveChannel(client_id_);
515 } 532 }
516 533
517 bool GpuChannel::Send(IPC::Message* message) { 534 bool GpuChannel::Send(IPC::Message* message) {
518 // The GPU process must never send a synchronous IPC message to the renderer 535 // The GPU process must never send a synchronous IPC message to the renderer
519 // process. This could result in deadlock. 536 // process. This could result in deadlock.
520 DCHECK(!message->is_sync()); 537 DCHECK(!message->is_sync());
(...skipping 12 matching lines...) Expand all
533 void GpuChannel::OnAddSubscription(unsigned int target) { 550 void GpuChannel::OnAddSubscription(unsigned int target) {
534 gpu_channel_manager()->Send( 551 gpu_channel_manager()->Send(
535 new GpuHostMsg_AddSubscription(client_id_, target)); 552 new GpuHostMsg_AddSubscription(client_id_, target));
536 } 553 }
537 554
538 void GpuChannel::OnRemoveSubscription(unsigned int target) { 555 void GpuChannel::OnRemoveSubscription(unsigned int target) {
539 gpu_channel_manager()->Send( 556 gpu_channel_manager()->Send(
540 new GpuHostMsg_RemoveSubscription(client_id_, target)); 557 new GpuHostMsg_RemoveSubscription(client_id_, target));
541 } 558 }
542 559
543 void GpuChannel::RequeueMessage() {
544 DCHECK(currently_processing_message_);
545 deferred_messages_.push_front(
546 new IPC::Message(*currently_processing_message_));
547 messages_processed_--;
548 currently_processing_message_ = NULL;
549 }
550
551 void GpuChannel::OnScheduled() {
552 if (handle_messages_scheduled_)
553 return;
554 // Post a task to handle any deferred messages. The deferred message queue is
555 // not emptied here, which ensures that OnMessageReceived will continue to
556 // defer newly received messages until the ones in the queue have all been
557 // handled by HandleMessage. HandleMessage is invoked as a
558 // task to prevent reentrancy.
559 task_runner_->PostTask(FROM_HERE, base::Bind(&GpuChannel::HandleMessage,
560 weak_factory_.GetWeakPtr()));
561 handle_messages_scheduled_ = true;
562 }
563
564 void GpuChannel::StubSchedulingChanged(bool scheduled) { 560 void GpuChannel::StubSchedulingChanged(bool scheduled) {
565 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0; 561 bool a_stub_was_descheduled = num_stubs_descheduled_ > 0;
566 if (scheduled) { 562 if (scheduled) {
567 num_stubs_descheduled_--; 563 num_stubs_descheduled_--;
568 OnScheduled(); 564 ScheduleHandleMessage();
569 } else { 565 } else {
570 num_stubs_descheduled_++; 566 num_stubs_descheduled_++;
571 } 567 }
572 DCHECK_LE(num_stubs_descheduled_, stubs_.size()); 568 DCHECK_LE(num_stubs_descheduled_, stubs_.size());
573 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0; 569 bool a_stub_is_descheduled = num_stubs_descheduled_ > 0;
574 570
575 if (a_stub_is_descheduled != a_stub_was_descheduled) { 571 if (a_stub_is_descheduled != a_stub_was_descheduled) {
576 if (preempting_flag_.get()) { 572 if (preempting_flag_.get()) {
577 io_task_runner_->PostTask( 573 io_task_runner_->PostTask(
578 FROM_HERE, 574 FROM_HERE,
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
684 OnDestroyCommandBuffer) 680 OnDestroyCommandBuffer)
685 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder, 681 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuMsg_CreateJpegDecoder,
686 OnCreateJpegDecoder) 682 OnCreateJpegDecoder)
687 IPC_MESSAGE_UNHANDLED(handled = false) 683 IPC_MESSAGE_UNHANDLED(handled = false)
688 IPC_END_MESSAGE_MAP() 684 IPC_END_MESSAGE_MAP()
689 DCHECK(handled) << msg.type(); 685 DCHECK(handled) << msg.type();
690 return handled; 686 return handled;
691 } 687 }
692 688
693 void GpuChannel::HandleMessage() { 689 void GpuChannel::HandleMessage() {
694 handle_messages_scheduled_ = false; 690 ChannelMessage* m = nullptr;
695 if (deferred_messages_.empty()) 691 GpuCommandBufferStub* stub = nullptr;
696 return; 692 {
697 693 base::AutoLock auto_lock(channel_messages_lock_);
698 IPC::Message* m = NULL; 694 handle_messages_scheduled_ = false;
699 GpuCommandBufferStub* stub = NULL; 695 if (channel_messages_.empty()) {
700
701 m = deferred_messages_.front();
702 stub = stubs_.get(m->routing_id());
703 if (stub) {
704 if (!stub->IsScheduled())
705 return;
706 if (stub->IsPreempted()) {
707 OnScheduled();
708 return; 696 return;
709 } 697 }
698 m = channel_messages_.front();
piman 2015/08/31 23:15:04 Can we pop_front and release the lock? RetireSyncP
David Yen 2015/09/01 02:01:52 I would have to think about it a bit if we can pop
piman 2015/09/01 03:55:26 That's fine. A possibility for out-of-order messa
David Yen 2015/09/01 19:08:02 Done.
699 stub = stubs_.get(m->message.routing_id());
700
701 // TODO(dyen): Temporary handling of old sync points.
702 // This must ensure that the sync point will be retired. Normally we'll
703 // find the stub based on the routing ID, and associate the sync point
704 // with it, but if that fails for any reason (channel or stub already
705 // deleted, invalid routing id), we need to retire the sync point
706 // immediately.
707 if (m->message.type() == GpuCommandBufferMsg_InsertSyncPoint::ID) {
708 const bool retire = m->retire_sync_point;
709 const uint32_t sync_point = m->sync_point_number;
710 const int32_t routing_id = m->message.routing_id();
711 if (stub) {
712 stub->AddSyncPoint(sync_point);
713 if (retire) {
714 m->message = GpuCommandBufferMsg_RetireSyncPoint(routing_id,
715 sync_point);
716 }
717 } else {
718 current_order_num_ = m->order_number;
719 gpu_channel_manager_->sync_point_manager()->RetireSyncPoint(
720 sync_point);
721 channel_messages_.pop_front();
722 MessageProcessed(m->order_number);
723 return;
724 }
725 }
726
727 if (stub) {
728 if (!stub->IsScheduled())
729 return;
730 if (stub->IsPreempted()) {
731 ScheduleHandleMessage();
732 return;
733 }
734 }
735
736 channel_messages_.pop_front();
710 } 737 }
711 738
712 scoped_ptr<IPC::Message> message(m); 739 scoped_ptr<ChannelMessage> scoped_message(m);
713 deferred_messages_.pop_front(); 740 const uint32_t order_number = m->order_number;
741 IPC::Message* message = &m->message;
742
743 DVLOG(1) << "received message @" << message << " on channel @" << this
744 << " with type " << message->type();
745
714 bool message_processed = true; 746 bool message_processed = true;
715 747
716 currently_processing_message_ = message.get(); 748 if (order_number != static_cast<uint32_t>(-1)) {
749 // Make sure this is a valid unprocessed order number.
750 DCHECK(order_number <= GetUnprocessedOrderNum() &&
751 order_number >= GetProcessedOrderNum());
752
753 current_order_num_ = order_number;
754 }
717 bool result; 755 bool result;
718 if (message->routing_id() == MSG_ROUTING_CONTROL) 756 if (message->routing_id() == MSG_ROUTING_CONTROL)
719 result = OnControlMessageReceived(*message); 757 result = OnControlMessageReceived(*message);
720 else 758 else
721 result = router_.RouteMessage(*message); 759 result = router_.RouteMessage(*message);
722 currently_processing_message_ = NULL;
723 760
724 if (!result) { 761 if (!result) {
725 // Respond to sync messages even if router failed to route. 762 // Respond to sync messages even if router failed to route.
726 if (message->is_sync()) { 763 if (message->is_sync()) {
727 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message); 764 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&*message);
728 reply->set_reply_error(); 765 reply->set_reply_error();
729 Send(reply); 766 Send(reply);
730 } 767 }
731 } else { 768 } else {
732 // If the command buffer becomes unscheduled as a result of handling the 769 // If the command buffer becomes unscheduled as a result of handling the
733 // message but still has more commands to process, synthesize an IPC 770 // message but still has more commands to process, synthesize an IPC
734 // message to flush that command buffer. 771 // message to flush that command buffer.
735 if (stub) { 772 if (stub) {
736 if (stub->HasUnprocessedCommands()) { 773 if (stub->HasUnprocessedCommands()) {
737 deferred_messages_.push_front(new GpuCommandBufferMsg_Rescheduled( 774 PushUnfinishedMessage(
738 stub->route_id())); 775 order_number, GpuCommandBufferMsg_Rescheduled(stub->route_id()));
739 message_processed = false; 776 message_processed = false;
740 } 777 }
741 } 778 }
742 } 779 }
743 if (message_processed) 780 if (message_processed)
744 MessageProcessed(); 781 MessageProcessed(order_number);
745 782
746 if (!deferred_messages_.empty()) { 783 base::AutoLock auto_lock(channel_messages_lock_);
747 OnScheduled(); 784 if (!channel_messages_.empty())
748 } 785 ScheduleHandleMessageLocked();
piman 2015/08/31 23:15:04 Arguably we could do this while the lock is taken
David Yen 2015/09/01 02:01:52 Good point, I guess I was trying to replicate old
749 } 786 }
750 787
751 void GpuChannel::OnCreateOffscreenCommandBuffer( 788 void GpuChannel::OnCreateOffscreenCommandBuffer(
752 const gfx::Size& size, 789 const gfx::Size& size,
753 const GPUCreateCommandBufferConfig& init_params, 790 const GPUCreateCommandBufferConfig& init_params,
754 int32 route_id, 791 int32 route_id,
755 bool* succeeded) { 792 bool* succeeded) {
756 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id", 793 TRACE_EVENT1("gpu", "GpuChannel::OnCreateOffscreenCommandBuffer", "route_id",
757 route_id); 794 route_id);
758 795
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
807 } 844 }
808 } 845 }
809 846
810 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) { 847 void GpuChannel::OnCreateJpegDecoder(int32 route_id, IPC::Message* reply_msg) {
811 if (!jpeg_decoder_) { 848 if (!jpeg_decoder_) {
812 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_)); 849 jpeg_decoder_.reset(new GpuJpegDecodeAccelerator(this, io_task_runner_));
813 } 850 }
814 jpeg_decoder_->AddClient(route_id, reply_msg); 851 jpeg_decoder_->AddClient(route_id, reply_msg);
815 } 852 }
816 853
817 void GpuChannel::MessageProcessed() { 854 void GpuChannel::PushUnfinishedMessage(uint32_t order_number,
818 messages_processed_++; 855 const IPC::Message& message) {
819 if (preempting_flag_.get()) { 856 // This is pushed in front only if it was unfinished, so order number is kept.
820 io_task_runner_->PostTask( 857 base::AutoLock auto_lock(channel_messages_lock_);
821 FROM_HERE, base::Bind(&GpuChannelMessageFilter::MessageProcessed, 858 channel_messages_.push_front(new ChannelMessage(order_number, message));
822 filter_, messages_processed_)); 859 ScheduleHandleMessageLocked();
860 }
861
862 void GpuChannel::ScheduleHandleMessage() {
863 base::AutoLock auto_lock(channel_messages_lock_);
864 ScheduleHandleMessageLocked();
865 }
866
867 void GpuChannel::ScheduleHandleMessageLocked() {
868 channel_messages_lock_.AssertAcquired();
869 if (!handle_messages_scheduled_) {
piman 2015/08/31 23:15:04 You really only need to post a task if the queue w
David Yen 2015/09/01 02:01:52 Done. Although we cannot get rid of handle_message
piman 2015/09/01 03:55:26 I think the only place this is used is GpuCommandB
David Yen 2015/09/01 19:08:02 Actually looking at GpuCommandBufferStub::PollWork
870 task_runner_->PostTask(
871 FROM_HERE,
872 base::Bind(&GpuChannel::HandleMessage, weak_factory_.GetWeakPtr()));
873 handle_messages_scheduled_ = true;
823 } 874 }
824 } 875 }
825 876
877 void GpuChannel::MessageProcessed(uint32_t order_number) {
878 if (order_number != static_cast<uint32_t>(-1)) {
879 DCHECK(current_order_num_ == order_number);
880 processed_order_num_ = order_number;
881 }
882 if (preempting_flag_.get()) {
883 io_task_runner_->PostTask(
884 FROM_HERE, base::Bind(&GpuChannelMessageFilter::OnMessageProcessed,
885 filter_));
886 }
887 }
888
826 void GpuChannel::CacheShader(const std::string& key, 889 void GpuChannel::CacheShader(const std::string& key,
827 const std::string& shader) { 890 const std::string& shader) {
828 gpu_channel_manager_->Send( 891 gpu_channel_manager_->Send(
829 new GpuHostMsg_CacheShader(client_id_, key, shader)); 892 new GpuHostMsg_CacheShader(client_id_, key, shader));
830 } 893 }
831 894
832 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { 895 void GpuChannel::AddFilter(IPC::MessageFilter* filter) {
833 channel_->AddFilter(filter); 896 channel_->AddFilter(filter);
834 } 897 }
835 898
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
882 } 945 }
883 } 946 }
884 } 947 }
885 948
886 void GpuChannel::HandleUpdateValueState( 949 void GpuChannel::HandleUpdateValueState(
887 unsigned int target, const gpu::ValueState& state) { 950 unsigned int target, const gpu::ValueState& state) {
888 pending_valuebuffer_state_->UpdateState(target, state); 951 pending_valuebuffer_state_->UpdateState(target, state);
889 } 952 }
890 953
891 } // namespace content 954 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698