OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/common/gpu/gpu_channel.h" | |
6 | |
7 #include <utility> | |
8 | |
9 #if defined(OS_WIN) | |
10 #include <windows.h> | |
11 #endif | |
12 | |
13 #include <algorithm> | |
14 #include <deque> | |
15 #include <set> | |
16 #include <vector> | |
17 | |
18 #include "base/atomicops.h" | |
19 #include "base/bind.h" | |
20 #include "base/command_line.h" | |
21 #include "base/location.h" | |
22 #include "base/numerics/safe_conversions.h" | |
23 #include "base/single_thread_task_runner.h" | |
24 #include "base/stl_util.h" | |
25 #include "base/strings/string_util.h" | |
26 #include "base/synchronization/lock.h" | |
27 #include "base/thread_task_runner_handle.h" | |
28 #include "base/timer/timer.h" | |
29 #include "base/trace_event/memory_dump_manager.h" | |
30 #include "base/trace_event/process_memory_dump.h" | |
31 #include "base/trace_event/trace_event.h" | |
32 #include "build/build_config.h" | |
33 #include "content/common/gpu/gpu_channel_manager.h" | |
34 #include "content/common/gpu/gpu_channel_manager_delegate.h" | |
35 #include "content/common/gpu/gpu_memory_buffer_factory.h" | |
36 #include "gpu/command_buffer/common/mailbox.h" | |
37 #include "gpu/command_buffer/common/value_state.h" | |
38 #include "gpu/command_buffer/service/command_executor.h" | |
39 #include "gpu/command_buffer/service/image_factory.h" | |
40 #include "gpu/command_buffer/service/mailbox_manager.h" | |
41 #include "gpu/command_buffer/service/sync_point_manager.h" | |
42 #include "gpu/command_buffer/service/valuebuffer_manager.h" | |
43 #include "gpu/ipc/common/gpu_messages.h" | |
44 #include "ipc/ipc_channel.h" | |
45 #include "ipc/message_filter.h" | |
46 #include "ui/gl/gl_context.h" | |
47 #include "ui/gl/gl_image_shared_memory.h" | |
48 #include "ui/gl/gl_surface.h" | |
49 | |
50 #if defined(OS_POSIX) | |
51 #include "ipc/ipc_channel_posix.h" | |
52 #endif | |
53 | |
54 namespace content { | |
55 namespace { | |
56 | |
57 // Number of milliseconds between successive vsync. Many GL commands block | |
58 // on vsync, so thresholds for preemption should be multiples of this. | |
59 const int64_t kVsyncIntervalMs = 17; | |
60 | |
61 // Amount of time that we will wait for an IPC to be processed before | |
62 // preempting. After a preemption, we must wait this long before triggering | |
63 // another preemption. | |
64 const int64_t kPreemptWaitTimeMs = 2 * kVsyncIntervalMs; | |
65 | |
66 // Once we trigger a preemption, the maximum duration that we will wait | |
67 // before clearing the preemption. | |
68 const int64_t kMaxPreemptTimeMs = kVsyncIntervalMs; | |
69 | |
70 // Stop the preemption once the time for the longest pending IPC drops | |
71 // below this threshold. | |
72 const int64_t kStopPreemptThresholdMs = kVsyncIntervalMs; | |
73 | |
74 } // anonymous namespace | |
75 | |
76 scoped_refptr<GpuChannelMessageQueue> GpuChannelMessageQueue::Create( | |
77 int32_t stream_id, | |
78 gpu::GpuStreamPriority stream_priority, | |
79 GpuChannel* channel, | |
80 const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner, | |
81 const scoped_refptr<gpu::PreemptionFlag>& preempting_flag, | |
82 const scoped_refptr<gpu::PreemptionFlag>& preempted_flag, | |
83 gpu::SyncPointManager* sync_point_manager) { | |
84 return new GpuChannelMessageQueue(stream_id, stream_priority, channel, | |
85 io_task_runner, preempting_flag, | |
86 preempted_flag, sync_point_manager); | |
87 } | |
88 | |
89 scoped_refptr<gpu::SyncPointOrderData> | |
90 GpuChannelMessageQueue::GetSyncPointOrderData() { | |
91 return sync_point_order_data_; | |
92 } | |
93 | |
94 GpuChannelMessageQueue::GpuChannelMessageQueue( | |
95 int32_t stream_id, | |
96 gpu::GpuStreamPriority stream_priority, | |
97 GpuChannel* channel, | |
98 const scoped_refptr<base::SingleThreadTaskRunner>& io_task_runner, | |
99 const scoped_refptr<gpu::PreemptionFlag>& preempting_flag, | |
100 const scoped_refptr<gpu::PreemptionFlag>& preempted_flag, | |
101 gpu::SyncPointManager* sync_point_manager) | |
102 : stream_id_(stream_id), | |
103 stream_priority_(stream_priority), | |
104 enabled_(true), | |
105 scheduled_(true), | |
106 channel_(channel), | |
107 preemption_state_(IDLE), | |
108 max_preemption_time_( | |
109 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)), | |
110 timer_(new base::OneShotTimer), | |
111 sync_point_order_data_(gpu::SyncPointOrderData::Create()), | |
112 io_task_runner_(io_task_runner), | |
113 preempting_flag_(preempting_flag), | |
114 preempted_flag_(preempted_flag), | |
115 sync_point_manager_(sync_point_manager) { | |
116 timer_->SetTaskRunner(io_task_runner); | |
117 io_thread_checker_.DetachFromThread(); | |
118 } | |
119 | |
120 GpuChannelMessageQueue::~GpuChannelMessageQueue() { | |
121 DCHECK(!enabled_); | |
122 DCHECK(channel_messages_.empty()); | |
123 } | |
124 | |
125 void GpuChannelMessageQueue::Disable() { | |
126 { | |
127 base::AutoLock auto_lock(channel_lock_); | |
128 DCHECK(enabled_); | |
129 enabled_ = false; | |
130 } | |
131 | |
132 // We guarantee that the queues will no longer be modified after enabled_ | |
133 // is set to false, it is now safe to modify the queue without the lock. | |
134 // All public facing modifying functions check enabled_ while all | |
135 // private modifying functions DCHECK(enabled_) to enforce this. | |
136 while (!channel_messages_.empty()) { | |
137 const IPC::Message& msg = channel_messages_.front()->message; | |
138 if (msg.is_sync()) { | |
139 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg); | |
140 reply->set_reply_error(); | |
141 channel_->Send(reply); | |
142 } | |
143 channel_messages_.pop_front(); | |
144 } | |
145 | |
146 sync_point_order_data_->Destroy(); | |
147 sync_point_order_data_ = nullptr; | |
148 | |
149 io_task_runner_->PostTask( | |
150 FROM_HERE, base::Bind(&GpuChannelMessageQueue::DisableIO, this)); | |
151 } | |
152 | |
153 void GpuChannelMessageQueue::DisableIO() { | |
154 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
155 timer_ = nullptr; | |
156 } | |
157 | |
158 bool GpuChannelMessageQueue::IsScheduled() const { | |
159 base::AutoLock lock(channel_lock_); | |
160 return scheduled_; | |
161 } | |
162 | |
163 void GpuChannelMessageQueue::OnRescheduled(bool scheduled) { | |
164 base::AutoLock lock(channel_lock_); | |
165 DCHECK(enabled_); | |
166 if (scheduled_ == scheduled) | |
167 return; | |
168 scheduled_ = scheduled; | |
169 if (scheduled) | |
170 channel_->PostHandleMessage(this); | |
171 if (preempting_flag_) { | |
172 io_task_runner_->PostTask( | |
173 FROM_HERE, | |
174 base::Bind(&GpuChannelMessageQueue::UpdatePreemptionState, this)); | |
175 } | |
176 } | |
177 | |
178 uint32_t GpuChannelMessageQueue::GetUnprocessedOrderNum() const { | |
179 return sync_point_order_data_->unprocessed_order_num(); | |
180 } | |
181 | |
182 uint32_t GpuChannelMessageQueue::GetProcessedOrderNum() const { | |
183 return sync_point_order_data_->processed_order_num(); | |
184 } | |
185 | |
186 bool GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) { | |
187 base::AutoLock auto_lock(channel_lock_); | |
188 if (enabled_) { | |
189 if (message.type() == GpuCommandBufferMsg_WaitForTokenInRange::ID || | |
190 message.type() == GpuCommandBufferMsg_WaitForGetOffsetInRange::ID) { | |
191 channel_->PostHandleOutOfOrderMessage(message); | |
192 return true; | |
193 } | |
194 | |
195 uint32_t order_num = sync_point_order_data_->GenerateUnprocessedOrderNumber( | |
196 sync_point_manager_); | |
197 scoped_ptr<GpuChannelMessage> msg( | |
198 new GpuChannelMessage(message, order_num, base::TimeTicks::Now())); | |
199 | |
200 if (channel_messages_.empty()) { | |
201 DCHECK(scheduled_); | |
202 channel_->PostHandleMessage(this); | |
203 } | |
204 | |
205 channel_messages_.push_back(std::move(msg)); | |
206 | |
207 if (preempting_flag_) | |
208 UpdatePreemptionStateHelper(); | |
209 | |
210 return true; | |
211 } | |
212 return false; | |
213 } | |
214 | |
215 const GpuChannelMessage* GpuChannelMessageQueue::BeginMessageProcessing() { | |
216 base::AutoLock auto_lock(channel_lock_); | |
217 DCHECK(enabled_); | |
218 // If we have been preempted by another channel, just post a task to wake up. | |
219 if (preempted_flag_ && preempted_flag_->IsSet()) { | |
220 channel_->PostHandleMessage(this); | |
221 return nullptr; | |
222 } | |
223 if (channel_messages_.empty()) | |
224 return nullptr; | |
225 sync_point_order_data_->BeginProcessingOrderNumber( | |
226 channel_messages_.front()->order_number); | |
227 return channel_messages_.front().get(); | |
228 } | |
229 | |
230 void GpuChannelMessageQueue::PauseMessageProcessing() { | |
231 base::AutoLock auto_lock(channel_lock_); | |
232 DCHECK(!channel_messages_.empty()); | |
233 | |
234 // If we have been preempted by another channel, just post a task to wake up. | |
235 if (scheduled_) | |
236 channel_->PostHandleMessage(this); | |
237 | |
238 sync_point_order_data_->PauseProcessingOrderNumber( | |
239 channel_messages_.front()->order_number); | |
240 } | |
241 | |
242 void GpuChannelMessageQueue::FinishMessageProcessing() { | |
243 base::AutoLock auto_lock(channel_lock_); | |
244 DCHECK(!channel_messages_.empty()); | |
245 DCHECK(scheduled_); | |
246 | |
247 sync_point_order_data_->FinishProcessingOrderNumber( | |
248 channel_messages_.front()->order_number); | |
249 channel_messages_.pop_front(); | |
250 | |
251 if (!channel_messages_.empty()) | |
252 channel_->PostHandleMessage(this); | |
253 | |
254 if (preempting_flag_) { | |
255 io_task_runner_->PostTask( | |
256 FROM_HERE, | |
257 base::Bind(&GpuChannelMessageQueue::UpdatePreemptionState, this)); | |
258 } | |
259 } | |
260 | |
261 void GpuChannelMessageQueue::UpdatePreemptionState() { | |
262 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
263 DCHECK(preempting_flag_); | |
264 base::AutoLock lock(channel_lock_); | |
265 UpdatePreemptionStateHelper(); | |
266 } | |
267 | |
268 void GpuChannelMessageQueue::UpdatePreemptionStateHelper() { | |
269 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
270 DCHECK(preempting_flag_); | |
271 channel_lock_.AssertAcquired(); | |
272 switch (preemption_state_) { | |
273 case IDLE: | |
274 UpdateStateIdle(); | |
275 break; | |
276 case WAITING: | |
277 UpdateStateWaiting(); | |
278 break; | |
279 case CHECKING: | |
280 UpdateStateChecking(); | |
281 break; | |
282 case PREEMPTING: | |
283 UpdateStatePreempting(); | |
284 break; | |
285 case WOULD_PREEMPT_DESCHEDULED: | |
286 UpdateStateWouldPreemptDescheduled(); | |
287 break; | |
288 default: | |
289 NOTREACHED(); | |
290 } | |
291 } | |
292 | |
293 void GpuChannelMessageQueue::UpdateStateIdle() { | |
294 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
295 DCHECK(preempting_flag_); | |
296 channel_lock_.AssertAcquired(); | |
297 DCHECK(!timer_->IsRunning()); | |
298 if (!channel_messages_.empty()) | |
299 TransitionToWaiting(); | |
300 } | |
301 | |
302 void GpuChannelMessageQueue::UpdateStateWaiting() { | |
303 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
304 DCHECK(preempting_flag_); | |
305 channel_lock_.AssertAcquired(); | |
306 // Transition to CHECKING if timer fired. | |
307 if (!timer_->IsRunning()) | |
308 TransitionToChecking(); | |
309 } | |
310 | |
311 void GpuChannelMessageQueue::UpdateStateChecking() { | |
312 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
313 DCHECK(preempting_flag_); | |
314 channel_lock_.AssertAcquired(); | |
315 if (!channel_messages_.empty()) { | |
316 base::TimeTicks time_recv = channel_messages_.front()->time_received; | |
317 base::TimeDelta time_elapsed = base::TimeTicks::Now() - time_recv; | |
318 if (time_elapsed.InMilliseconds() < kPreemptWaitTimeMs) { | |
319 // Schedule another check for when the IPC may go long. | |
320 timer_->Start( | |
321 FROM_HERE, | |
322 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs) - time_elapsed, | |
323 this, &GpuChannelMessageQueue::UpdatePreemptionState); | |
324 } else { | |
325 timer_->Stop(); | |
326 if (!scheduled_) | |
327 TransitionToWouldPreemptDescheduled(); | |
328 else | |
329 TransitionToPreempting(); | |
330 } | |
331 } | |
332 } | |
333 | |
334 void GpuChannelMessageQueue::UpdateStatePreempting() { | |
335 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
336 DCHECK(preempting_flag_); | |
337 channel_lock_.AssertAcquired(); | |
338 // We should stop preempting if the timer fired or for other conditions. | |
339 if (!timer_->IsRunning() || ShouldTransitionToIdle()) { | |
340 TransitionToIdle(); | |
341 } else if (!scheduled_) { | |
342 // Save the remaining preemption time before stopping the timer. | |
343 max_preemption_time_ = timer_->desired_run_time() - base::TimeTicks::Now(); | |
344 timer_->Stop(); | |
345 TransitionToWouldPreemptDescheduled(); | |
346 } | |
347 } | |
348 | |
349 void GpuChannelMessageQueue::UpdateStateWouldPreemptDescheduled() { | |
350 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
351 DCHECK(preempting_flag_); | |
352 channel_lock_.AssertAcquired(); | |
353 DCHECK(!timer_->IsRunning()); | |
354 if (ShouldTransitionToIdle()) { | |
355 TransitionToIdle(); | |
356 } else if (scheduled_) { | |
357 TransitionToPreempting(); | |
358 } | |
359 } | |
360 | |
361 bool GpuChannelMessageQueue::ShouldTransitionToIdle() const { | |
362 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
363 DCHECK(preempting_flag_); | |
364 channel_lock_.AssertAcquired(); | |
365 DCHECK(preemption_state_ == PREEMPTING || | |
366 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); | |
367 if (channel_messages_.empty()) { | |
368 return true; | |
369 } else { | |
370 base::TimeTicks next_tick = channel_messages_.front()->time_received; | |
371 base::TimeDelta time_elapsed = base::TimeTicks::Now() - next_tick; | |
372 if (time_elapsed.InMilliseconds() < kStopPreemptThresholdMs) | |
373 return true; | |
374 } | |
375 return false; | |
376 } | |
377 | |
378 void GpuChannelMessageQueue::TransitionToIdle() { | |
379 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
380 DCHECK(preempting_flag_); | |
381 channel_lock_.AssertAcquired(); | |
382 DCHECK(preemption_state_ == PREEMPTING || | |
383 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); | |
384 | |
385 preemption_state_ = IDLE; | |
386 preempting_flag_->Reset(); | |
387 | |
388 max_preemption_time_ = base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs); | |
389 timer_->Stop(); | |
390 | |
391 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); | |
392 | |
393 UpdateStateIdle(); | |
394 } | |
395 | |
396 void GpuChannelMessageQueue::TransitionToWaiting() { | |
397 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
398 DCHECK(preempting_flag_); | |
399 channel_lock_.AssertAcquired(); | |
400 DCHECK_EQ(preemption_state_, IDLE); | |
401 DCHECK(!timer_->IsRunning()); | |
402 | |
403 preemption_state_ = WAITING; | |
404 | |
405 timer_->Start(FROM_HERE, | |
406 base::TimeDelta::FromMilliseconds(kPreemptWaitTimeMs), this, | |
407 &GpuChannelMessageQueue::UpdatePreemptionState); | |
408 } | |
409 | |
410 void GpuChannelMessageQueue::TransitionToChecking() { | |
411 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
412 DCHECK(preempting_flag_); | |
413 channel_lock_.AssertAcquired(); | |
414 DCHECK_EQ(preemption_state_, WAITING); | |
415 DCHECK(!timer_->IsRunning()); | |
416 | |
417 preemption_state_ = CHECKING; | |
418 | |
419 UpdateStateChecking(); | |
420 } | |
421 | |
422 void GpuChannelMessageQueue::TransitionToPreempting() { | |
423 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
424 DCHECK(preempting_flag_); | |
425 channel_lock_.AssertAcquired(); | |
426 DCHECK(preemption_state_ == CHECKING || | |
427 preemption_state_ == WOULD_PREEMPT_DESCHEDULED); | |
428 DCHECK(scheduled_); | |
429 | |
430 preemption_state_ = PREEMPTING; | |
431 preempting_flag_->Set(); | |
432 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 1); | |
433 | |
434 DCHECK_LE(max_preemption_time_, | |
435 base::TimeDelta::FromMilliseconds(kMaxPreemptTimeMs)); | |
436 timer_->Start(FROM_HERE, max_preemption_time_, this, | |
437 &GpuChannelMessageQueue::UpdatePreemptionState); | |
438 } | |
439 | |
440 void GpuChannelMessageQueue::TransitionToWouldPreemptDescheduled() { | |
441 DCHECK(io_thread_checker_.CalledOnValidThread()); | |
442 DCHECK(preempting_flag_); | |
443 channel_lock_.AssertAcquired(); | |
444 DCHECK(preemption_state_ == CHECKING || preemption_state_ == PREEMPTING); | |
445 DCHECK(!scheduled_); | |
446 | |
447 preemption_state_ = WOULD_PREEMPT_DESCHEDULED; | |
448 preempting_flag_->Reset(); | |
449 TRACE_COUNTER_ID1("gpu", "GpuChannel::Preempting", this, 0); | |
450 } | |
451 | |
452 GpuChannelMessageFilter::GpuChannelMessageFilter() | |
453 : sender_(nullptr), peer_pid_(base::kNullProcessId) {} | |
454 | |
455 GpuChannelMessageFilter::~GpuChannelMessageFilter() {} | |
456 | |
457 void GpuChannelMessageFilter::OnFilterAdded(IPC::Sender* sender) { | |
458 DCHECK(!sender_); | |
459 sender_ = sender; | |
460 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | |
461 filter->OnFilterAdded(sender_); | |
462 } | |
463 } | |
464 | |
465 void GpuChannelMessageFilter::OnFilterRemoved() { | |
466 DCHECK(sender_); | |
467 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | |
468 filter->OnFilterRemoved(); | |
469 } | |
470 sender_ = nullptr; | |
471 peer_pid_ = base::kNullProcessId; | |
472 } | |
473 | |
474 void GpuChannelMessageFilter::OnChannelConnected(int32_t peer_pid) { | |
475 DCHECK(peer_pid_ == base::kNullProcessId); | |
476 peer_pid_ = peer_pid; | |
477 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | |
478 filter->OnChannelConnected(peer_pid); | |
479 } | |
480 } | |
481 | |
482 void GpuChannelMessageFilter::OnChannelError() { | |
483 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | |
484 filter->OnChannelError(); | |
485 } | |
486 } | |
487 | |
488 void GpuChannelMessageFilter::OnChannelClosing() { | |
489 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | |
490 filter->OnChannelClosing(); | |
491 } | |
492 } | |
493 | |
494 void GpuChannelMessageFilter::AddChannelFilter( | |
495 scoped_refptr<IPC::MessageFilter> filter) { | |
496 channel_filters_.push_back(filter); | |
497 if (sender_) | |
498 filter->OnFilterAdded(sender_); | |
499 if (peer_pid_ != base::kNullProcessId) | |
500 filter->OnChannelConnected(peer_pid_); | |
501 } | |
502 | |
503 void GpuChannelMessageFilter::RemoveChannelFilter( | |
504 scoped_refptr<IPC::MessageFilter> filter) { | |
505 if (sender_) | |
506 filter->OnFilterRemoved(); | |
507 channel_filters_.erase( | |
508 std::find(channel_filters_.begin(), channel_filters_.end(), filter)); | |
509 } | |
510 | |
511 // This gets called from the main thread and assumes that all messages which | |
512 // lead to creation of a new route are synchronous messages. | |
513 // TODO(sunnyps): Create routes (and streams) on the IO thread so that we can | |
514 // make the CreateCommandBuffer/VideoDecoder/VideoEncoder messages asynchronous. | |
515 void GpuChannelMessageFilter::AddRoute( | |
516 int32_t route_id, | |
517 const scoped_refptr<GpuChannelMessageQueue>& queue) { | |
518 base::AutoLock lock(routes_lock_); | |
519 routes_.insert(std::make_pair(route_id, queue)); | |
520 } | |
521 | |
522 void GpuChannelMessageFilter::RemoveRoute(int32_t route_id) { | |
523 base::AutoLock lock(routes_lock_); | |
524 routes_.erase(route_id); | |
525 } | |
526 | |
527 bool GpuChannelMessageFilter::OnMessageReceived(const IPC::Message& message) { | |
528 DCHECK(sender_); | |
529 | |
530 if (message.should_unblock() || message.is_reply()) | |
531 return MessageErrorHandler(message, "Unexpected message type"); | |
532 | |
533 if (message.type() == GpuChannelMsg_Nop::ID) { | |
534 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); | |
535 Send(reply); | |
536 return true; | |
537 } | |
538 | |
539 for (scoped_refptr<IPC::MessageFilter>& filter : channel_filters_) { | |
540 if (filter->OnMessageReceived(message)) | |
541 return true; | |
542 } | |
543 | |
544 scoped_refptr<GpuChannelMessageQueue> message_queue = | |
545 LookupStreamByRoute(message.routing_id()); | |
546 | |
547 if (!message_queue) | |
548 return MessageErrorHandler(message, "Could not find message queue"); | |
549 | |
550 if (!message_queue->PushBackMessage(message)) | |
551 return MessageErrorHandler(message, "Channel destroyed"); | |
552 | |
553 return true; | |
554 } | |
555 | |
556 bool GpuChannelMessageFilter::Send(IPC::Message* message) { | |
557 return sender_->Send(message); | |
558 } | |
559 | |
560 scoped_refptr<GpuChannelMessageQueue> | |
561 GpuChannelMessageFilter::LookupStreamByRoute(int32_t route_id) { | |
562 base::AutoLock lock(routes_lock_); | |
563 auto it = routes_.find(route_id); | |
564 if (it != routes_.end()) | |
565 return it->second; | |
566 return nullptr; | |
567 } | |
568 | |
569 bool GpuChannelMessageFilter::MessageErrorHandler(const IPC::Message& message, | |
570 const char* error_msg) { | |
571 DLOG(ERROR) << error_msg; | |
572 if (message.is_sync()) { | |
573 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&message); | |
574 reply->set_reply_error(); | |
575 Send(reply); | |
576 } | |
577 return true; | |
578 } | |
579 | |
580 GpuChannel::GpuChannel(GpuChannelManager* gpu_channel_manager, | |
581 gpu::SyncPointManager* sync_point_manager, | |
582 GpuWatchdog* watchdog, | |
583 gfx::GLShareGroup* share_group, | |
584 gpu::gles2::MailboxManager* mailbox, | |
585 gpu::PreemptionFlag* preempting_flag, | |
586 gpu::PreemptionFlag* preempted_flag, | |
587 base::SingleThreadTaskRunner* task_runner, | |
588 base::SingleThreadTaskRunner* io_task_runner, | |
589 int32_t client_id, | |
590 uint64_t client_tracing_id, | |
591 bool allow_view_command_buffers, | |
592 bool allow_real_time_streams) | |
593 : gpu_channel_manager_(gpu_channel_manager), | |
594 sync_point_manager_(sync_point_manager), | |
595 unhandled_message_listener_(nullptr), | |
596 channel_id_(IPC::Channel::GenerateVerifiedChannelID("gpu")), | |
597 preempting_flag_(preempting_flag), | |
598 preempted_flag_(preempted_flag), | |
599 client_id_(client_id), | |
600 client_tracing_id_(client_tracing_id), | |
601 task_runner_(task_runner), | |
602 io_task_runner_(io_task_runner), | |
603 share_group_(share_group), | |
604 mailbox_manager_(mailbox), | |
605 subscription_ref_set_(new gpu::gles2::SubscriptionRefSet), | |
606 pending_valuebuffer_state_(new gpu::ValueStateMap), | |
607 watchdog_(watchdog), | |
608 allow_view_command_buffers_(allow_view_command_buffers), | |
609 allow_real_time_streams_(allow_real_time_streams), | |
610 weak_factory_(this) { | |
611 DCHECK(gpu_channel_manager); | |
612 DCHECK(client_id); | |
613 | |
614 filter_ = new GpuChannelMessageFilter(); | |
615 | |
616 scoped_refptr<GpuChannelMessageQueue> control_queue = | |
617 CreateStream(gpu::GPU_STREAM_DEFAULT, gpu::GpuStreamPriority::HIGH); | |
618 AddRouteToStream(MSG_ROUTING_CONTROL, gpu::GPU_STREAM_DEFAULT); | |
619 | |
620 subscription_ref_set_->AddObserver(this); | |
621 } | |
622 | |
623 GpuChannel::~GpuChannel() { | |
624 // Clear stubs first because of dependencies. | |
625 stubs_.clear(); | |
626 | |
627 for (auto& kv : streams_) | |
628 kv.second->Disable(); | |
629 | |
630 subscription_ref_set_->RemoveObserver(this); | |
631 if (preempting_flag_.get()) | |
632 preempting_flag_->Reset(); | |
633 } | |
634 | |
635 IPC::ChannelHandle GpuChannel::Init(base::WaitableEvent* shutdown_event) { | |
636 DCHECK(shutdown_event); | |
637 DCHECK(!channel_); | |
638 | |
639 IPC::ChannelHandle channel_handle(channel_id_); | |
640 | |
641 channel_ = | |
642 IPC::SyncChannel::Create(channel_handle, IPC::Channel::MODE_SERVER, this, | |
643 io_task_runner_, false, shutdown_event); | |
644 | |
645 #if defined(OS_POSIX) | |
646 // On POSIX, pass the renderer-side FD. Also mark it as auto-close so | |
647 // that it gets closed after it has been sent. | |
648 base::ScopedFD renderer_fd = channel_->TakeClientFileDescriptor(); | |
649 DCHECK(renderer_fd.is_valid()); | |
650 channel_handle.socket = base::FileDescriptor(std::move(renderer_fd)); | |
651 #endif | |
652 | |
653 channel_->AddFilter(filter_.get()); | |
654 | |
655 return channel_handle; | |
656 } | |
657 | |
658 void GpuChannel::SetUnhandledMessageListener(IPC::Listener* listener) { | |
659 unhandled_message_listener_ = listener; | |
660 } | |
661 | |
662 base::WeakPtr<GpuChannel> GpuChannel::AsWeakPtr() { | |
663 return weak_factory_.GetWeakPtr(); | |
664 } | |
665 | |
666 base::ProcessId GpuChannel::GetClientPID() const { | |
667 return channel_->GetPeerPID(); | |
668 } | |
669 | |
670 uint32_t GpuChannel::GetProcessedOrderNum() const { | |
671 uint32_t processed_order_num = 0; | |
672 for (auto& kv : streams_) { | |
673 processed_order_num = | |
674 std::max(processed_order_num, kv.second->GetProcessedOrderNum()); | |
675 } | |
676 return processed_order_num; | |
677 } | |
678 | |
679 uint32_t GpuChannel::GetUnprocessedOrderNum() const { | |
680 uint32_t unprocessed_order_num = 0; | |
681 for (auto& kv : streams_) { | |
682 unprocessed_order_num = | |
683 std::max(unprocessed_order_num, kv.second->GetUnprocessedOrderNum()); | |
684 } | |
685 return unprocessed_order_num; | |
686 } | |
687 | |
688 bool GpuChannel::OnMessageReceived(const IPC::Message& msg) { | |
689 // All messages should be pushed to channel_messages_ and handled separately. | |
690 NOTREACHED(); | |
691 return false; | |
692 } | |
693 | |
694 void GpuChannel::OnChannelError() { | |
695 gpu_channel_manager_->RemoveChannel(client_id_); | |
696 } | |
697 | |
698 bool GpuChannel::Send(IPC::Message* message) { | |
699 // The GPU process must never send a synchronous IPC message to the renderer | |
700 // process. This could result in deadlock. | |
701 DCHECK(!message->is_sync()); | |
702 | |
703 DVLOG(1) << "sending message @" << message << " on channel @" << this | |
704 << " with type " << message->type(); | |
705 | |
706 if (!channel_) { | |
707 delete message; | |
708 return false; | |
709 } | |
710 | |
711 return channel_->Send(message); | |
712 } | |
713 | |
714 void GpuChannel::OnAddSubscription(unsigned int target) { | |
715 gpu_channel_manager()->delegate()->AddSubscription(client_id_, target); | |
716 } | |
717 | |
718 void GpuChannel::OnRemoveSubscription(unsigned int target) { | |
719 gpu_channel_manager()->delegate()->RemoveSubscription(client_id_, target); | |
720 } | |
721 | |
722 void GpuChannel::OnStreamRescheduled(int32_t stream_id, bool scheduled) { | |
723 scoped_refptr<GpuChannelMessageQueue> queue = LookupStream(stream_id); | |
724 DCHECK(queue); | |
725 queue->OnRescheduled(scheduled); | |
726 } | |
727 | |
728 GpuCommandBufferStub* GpuChannel::LookupCommandBuffer(int32_t route_id) { | |
729 return stubs_.get(route_id); | |
730 } | |
731 | |
732 void GpuChannel::LoseAllContexts() { | |
733 gpu_channel_manager_->LoseAllContexts(); | |
734 } | |
735 | |
736 void GpuChannel::MarkAllContextsLost() { | |
737 for (auto& kv : stubs_) | |
738 kv.second->MarkContextLost(); | |
739 } | |
740 | |
741 bool GpuChannel::AddRoute(int32_t route_id, | |
742 int32_t stream_id, | |
743 IPC::Listener* listener) { | |
744 if (router_.AddRoute(route_id, listener)) { | |
745 AddRouteToStream(route_id, stream_id); | |
746 return true; | |
747 } | |
748 return false; | |
749 } | |
750 | |
751 void GpuChannel::RemoveRoute(int32_t route_id) { | |
752 router_.RemoveRoute(route_id); | |
753 RemoveRouteFromStream(route_id); | |
754 } | |
755 | |
756 bool GpuChannel::OnControlMessageReceived(const IPC::Message& msg) { | |
757 bool handled = true; | |
758 IPC_BEGIN_MESSAGE_MAP(GpuChannel, msg) | |
759 IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateCommandBuffer, | |
760 OnCreateCommandBuffer) | |
761 IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroyCommandBuffer, | |
762 OnDestroyCommandBuffer) | |
763 IPC_MESSAGE_HANDLER(GpuChannelMsg_GetDriverBugWorkArounds, | |
764 OnGetDriverBugWorkArounds) | |
765 IPC_MESSAGE_UNHANDLED(handled = false) | |
766 IPC_END_MESSAGE_MAP() | |
767 return handled; | |
768 } | |
769 | |
770 scoped_refptr<gpu::SyncPointOrderData> GpuChannel::GetSyncPointOrderData( | |
771 int32_t stream_id) { | |
772 auto it = streams_.find(stream_id); | |
773 DCHECK(it != streams_.end()); | |
774 DCHECK(it->second); | |
775 return it->second->GetSyncPointOrderData(); | |
776 } | |
777 | |
778 void GpuChannel::PostHandleMessage( | |
779 const scoped_refptr<GpuChannelMessageQueue>& queue) { | |
780 task_runner_->PostTask(FROM_HERE, | |
781 base::Bind(&GpuChannel::HandleMessage, | |
782 weak_factory_.GetWeakPtr(), queue)); | |
783 } | |
784 | |
785 void GpuChannel::PostHandleOutOfOrderMessage(const IPC::Message& msg) { | |
786 task_runner_->PostTask(FROM_HERE, | |
787 base::Bind(&GpuChannel::HandleOutOfOrderMessage, | |
788 weak_factory_.GetWeakPtr(), msg)); | |
789 } | |
790 | |
791 void GpuChannel::HandleMessage( | |
792 const scoped_refptr<GpuChannelMessageQueue>& message_queue) { | |
793 const GpuChannelMessage* channel_msg = | |
794 message_queue->BeginMessageProcessing(); | |
795 if (!channel_msg) | |
796 return; | |
797 | |
798 const IPC::Message& msg = channel_msg->message; | |
799 int32_t routing_id = msg.routing_id(); | |
800 GpuCommandBufferStub* stub = stubs_.get(routing_id); | |
801 | |
802 DCHECK(!stub || stub->IsScheduled()); | |
803 | |
804 DVLOG(1) << "received message @" << &msg << " on channel @" << this | |
805 << " with type " << msg.type(); | |
806 | |
807 HandleMessageHelper(msg); | |
808 | |
809 // If we get descheduled or yield while processing a message. | |
810 if (stub && stub->HasUnprocessedCommands()) { | |
811 DCHECK_EQ((uint32_t)GpuCommandBufferMsg_AsyncFlush::ID, msg.type()); | |
812 message_queue->PauseMessageProcessing(); | |
813 } else { | |
814 message_queue->FinishMessageProcessing(); | |
815 } | |
816 } | |
817 | |
818 void GpuChannel::HandleMessageHelper(const IPC::Message& msg) { | |
819 int32_t routing_id = msg.routing_id(); | |
820 | |
821 bool handled = false; | |
822 if (routing_id == MSG_ROUTING_CONTROL) { | |
823 handled = OnControlMessageReceived(msg); | |
824 } else { | |
825 handled = router_.RouteMessage(msg); | |
826 } | |
827 | |
828 if (!handled && unhandled_message_listener_) | |
829 handled = unhandled_message_listener_->OnMessageReceived(msg); | |
830 | |
831 // Respond to sync messages even if router failed to route. | |
832 if (!handled && msg.is_sync()) { | |
833 IPC::Message* reply = IPC::SyncMessage::GenerateReply(&msg); | |
834 reply->set_reply_error(); | |
835 Send(reply); | |
836 } | |
837 } | |
838 | |
839 void GpuChannel::HandleOutOfOrderMessage(const IPC::Message& msg) { | |
840 HandleMessageHelper(msg); | |
841 } | |
842 | |
843 void GpuChannel::HandleMessageForTesting(const IPC::Message& msg) { | |
844 HandleMessageHelper(msg); | |
845 } | |
846 | |
847 scoped_refptr<GpuChannelMessageQueue> GpuChannel::CreateStream( | |
848 int32_t stream_id, | |
849 gpu::GpuStreamPriority stream_priority) { | |
850 DCHECK(streams_.find(stream_id) == streams_.end()); | |
851 scoped_refptr<GpuChannelMessageQueue> queue = GpuChannelMessageQueue::Create( | |
852 stream_id, stream_priority, this, io_task_runner_, | |
853 (stream_id == gpu::GPU_STREAM_DEFAULT) ? preempting_flag_ : nullptr, | |
854 preempted_flag_, sync_point_manager_); | |
855 streams_.insert(std::make_pair(stream_id, queue)); | |
856 streams_to_num_routes_.insert(std::make_pair(stream_id, 0)); | |
857 return queue; | |
858 } | |
859 | |
860 scoped_refptr<GpuChannelMessageQueue> GpuChannel::LookupStream( | |
861 int32_t stream_id) { | |
862 auto stream_it = streams_.find(stream_id); | |
863 if (stream_it != streams_.end()) | |
864 return stream_it->second; | |
865 return nullptr; | |
866 } | |
867 | |
868 void GpuChannel::DestroyStreamIfNecessary( | |
869 const scoped_refptr<GpuChannelMessageQueue>& queue) { | |
870 int32_t stream_id = queue->stream_id(); | |
871 if (streams_to_num_routes_[stream_id] == 0) { | |
872 queue->Disable(); | |
873 streams_to_num_routes_.erase(stream_id); | |
874 streams_.erase(stream_id); | |
875 } | |
876 } | |
877 | |
878 void GpuChannel::AddRouteToStream(int32_t route_id, int32_t stream_id) { | |
879 DCHECK(streams_.find(stream_id) != streams_.end()); | |
880 DCHECK(routes_to_streams_.find(route_id) == routes_to_streams_.end()); | |
881 streams_to_num_routes_[stream_id]++; | |
882 routes_to_streams_.insert(std::make_pair(route_id, stream_id)); | |
883 filter_->AddRoute(route_id, streams_[stream_id]); | |
884 } | |
885 | |
886 void GpuChannel::RemoveRouteFromStream(int32_t route_id) { | |
887 DCHECK(routes_to_streams_.find(route_id) != routes_to_streams_.end()); | |
888 int32_t stream_id = routes_to_streams_[route_id]; | |
889 DCHECK(streams_.find(stream_id) != streams_.end()); | |
890 routes_to_streams_.erase(route_id); | |
891 streams_to_num_routes_[stream_id]--; | |
892 filter_->RemoveRoute(route_id); | |
893 DestroyStreamIfNecessary(streams_[stream_id]); | |
894 } | |
895 | |
896 #if defined(OS_ANDROID) | |
897 const GpuCommandBufferStub* GpuChannel::GetOneStub() const { | |
898 for (const auto& kv : stubs_) { | |
899 const GpuCommandBufferStub* stub = kv.second; | |
900 if (stub->decoder() && !stub->decoder()->WasContextLost()) | |
901 return stub; | |
902 } | |
903 return nullptr; | |
904 } | |
905 #endif | |
906 | |
907 void GpuChannel::OnCreateCommandBuffer( | |
908 gpu::SurfaceHandle surface_handle, | |
909 const gfx::Size& size, | |
910 const GPUCreateCommandBufferConfig& init_params, | |
911 int32_t route_id, | |
912 bool* succeeded) { | |
913 TRACE_EVENT2("gpu", "GpuChannel::OnCreateCommandBuffer", "route_id", route_id, | |
914 "offscreen", (surface_handle == gpu::kNullSurfaceHandle)); | |
915 *succeeded = false; | |
916 if (surface_handle != gpu::kNullSurfaceHandle && | |
917 !allow_view_command_buffers_) { | |
918 DLOG(ERROR) << "GpuChannel::CreateCommandBuffer(): attempt to create a " | |
919 "view context on a non-priviledged channel"; | |
920 return; | |
921 } | |
922 | |
923 int32_t share_group_id = init_params.share_group_id; | |
924 GpuCommandBufferStub* share_group = stubs_.get(share_group_id); | |
925 | |
926 if (!share_group && share_group_id != MSG_ROUTING_NONE) { | |
927 DLOG(ERROR) | |
928 << "GpuChannel::OnCreateCommandBuffer(): invalid share group id"; | |
929 return; | |
930 } | |
931 | |
932 int32_t stream_id = init_params.stream_id; | |
933 if (share_group && stream_id != share_group->stream_id()) { | |
934 DLOG(ERROR) << "GpuChannel::OnCreateCommandBuffer(): stream id does not " | |
935 "match share group stream id"; | |
936 return; | |
937 } | |
938 | |
939 gpu::GpuStreamPriority stream_priority = init_params.stream_priority; | |
940 if (!allow_real_time_streams_ && | |
941 stream_priority == gpu::GpuStreamPriority::REAL_TIME) { | |
942 DLOG(ERROR) << "GpuChannel::OnCreateCommandBuffer(): real time stream " | |
943 "priority not allowed"; | |
944 return; | |
945 } | |
946 | |
947 scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub( | |
948 this, sync_point_manager_, task_runner_.get(), share_group, | |
949 surface_handle, mailbox_manager_.get(), preempted_flag_.get(), | |
950 subscription_ref_set_.get(), pending_valuebuffer_state_.get(), size, | |
951 disallowed_features_, init_params.attribs, init_params.gpu_preference, | |
952 init_params.stream_id, route_id, watchdog_, init_params.active_url)); | |
953 | |
954 scoped_refptr<GpuChannelMessageQueue> queue = LookupStream(stream_id); | |
955 if (!queue) | |
956 queue = CreateStream(stream_id, stream_priority); | |
957 | |
958 if (!AddRoute(route_id, stream_id, stub.get())) { | |
959 DestroyStreamIfNecessary(queue); | |
960 DLOG(ERROR) << "GpuChannel::OnCreateCommandBuffer(): failed to add route"; | |
961 return; | |
962 } | |
963 | |
964 stubs_.set(route_id, std::move(stub)); | |
965 *succeeded = true; | |
966 } | |
967 | |
968 void GpuChannel::OnDestroyCommandBuffer(int32_t route_id) { | |
969 TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer", | |
970 "route_id", route_id); | |
971 | |
972 scoped_ptr<GpuCommandBufferStub> stub = stubs_.take_and_erase(route_id); | |
973 // In case the renderer is currently blocked waiting for a sync reply from the | |
974 // stub, we need to make sure to reschedule the correct stream here. | |
975 if (stub && !stub->IsScheduled()) { | |
976 // This stub won't get a chance to reschedule the stream so do that now. | |
977 OnStreamRescheduled(stub->stream_id(), true); | |
978 } | |
979 | |
980 RemoveRoute(route_id); | |
981 } | |
982 | |
983 void GpuChannel::OnGetDriverBugWorkArounds( | |
984 std::vector<std::string>* gpu_driver_bug_workarounds) { | |
985 // TODO(j.isorce): http://crbug.com/599964 Do the extraction of workarounds in | |
986 // the GpuChannelManager constructor. Currently it is done in the FeatureInfo | |
987 // constructor. There is no need to extract them from the command-line every | |
988 // time a new FeatureInfo is created (i.e. per ContextGroup) since parsing | |
989 // result is a constant. | |
990 scoped_refptr<gpu::gles2::FeatureInfo> feature_info = | |
991 new gpu::gles2::FeatureInfo; | |
992 gpu_driver_bug_workarounds->clear(); | |
993 #define GPU_OP(type, name) \ | |
994 if (feature_info->workarounds().name) \ | |
995 gpu_driver_bug_workarounds->push_back(#name); | |
996 GPU_DRIVER_BUG_WORKAROUNDS(GPU_OP) | |
997 #undef GPU_OP | |
998 } | |
999 | |
1000 void GpuChannel::CacheShader(const std::string& key, | |
1001 const std::string& shader) { | |
1002 gpu_channel_manager_->delegate()->StoreShaderToDisk(client_id_, key, shader); | |
1003 } | |
1004 | |
1005 void GpuChannel::AddFilter(IPC::MessageFilter* filter) { | |
1006 io_task_runner_->PostTask( | |
1007 FROM_HERE, base::Bind(&GpuChannelMessageFilter::AddChannelFilter, | |
1008 filter_, make_scoped_refptr(filter))); | |
1009 } | |
1010 | |
1011 void GpuChannel::RemoveFilter(IPC::MessageFilter* filter) { | |
1012 io_task_runner_->PostTask( | |
1013 FROM_HERE, base::Bind(&GpuChannelMessageFilter::RemoveChannelFilter, | |
1014 filter_, make_scoped_refptr(filter))); | |
1015 } | |
1016 | |
1017 uint64_t GpuChannel::GetMemoryUsage() { | |
1018 // Collect the unique memory trackers in use by the |stubs_|. | |
1019 std::set<gpu::gles2::MemoryTracker*> unique_memory_trackers; | |
1020 for (auto& kv : stubs_) | |
1021 unique_memory_trackers.insert(kv.second->GetMemoryTracker()); | |
1022 | |
1023 // Sum the memory usage for all unique memory trackers. | |
1024 uint64_t size = 0; | |
1025 for (auto* tracker : unique_memory_trackers) { | |
1026 size += gpu_channel_manager()->gpu_memory_manager()->GetTrackerMemoryUsage( | |
1027 tracker); | |
1028 } | |
1029 | |
1030 return size; | |
1031 } | |
1032 | |
1033 scoped_refptr<gl::GLImage> GpuChannel::CreateImageForGpuMemoryBuffer( | |
1034 const gfx::GpuMemoryBufferHandle& handle, | |
1035 const gfx::Size& size, | |
1036 gfx::BufferFormat format, | |
1037 uint32_t internalformat) { | |
1038 switch (handle.type) { | |
1039 case gfx::SHARED_MEMORY_BUFFER: { | |
1040 if (!base::IsValueInRangeForNumericType<size_t>(handle.stride)) | |
1041 return nullptr; | |
1042 scoped_refptr<gl::GLImageSharedMemory> image( | |
1043 new gl::GLImageSharedMemory(size, internalformat)); | |
1044 if (!image->Initialize(handle.handle, handle.id, format, handle.offset, | |
1045 handle.stride)) { | |
1046 return nullptr; | |
1047 } | |
1048 | |
1049 return image; | |
1050 } | |
1051 default: { | |
1052 GpuChannelManager* manager = gpu_channel_manager(); | |
1053 if (!manager->gpu_memory_buffer_factory()) | |
1054 return nullptr; | |
1055 | |
1056 return manager->gpu_memory_buffer_factory() | |
1057 ->AsImageFactory() | |
1058 ->CreateImageForGpuMemoryBuffer(handle, | |
1059 size, | |
1060 format, | |
1061 internalformat, | |
1062 client_id_); | |
1063 } | |
1064 } | |
1065 } | |
1066 | |
1067 void GpuChannel::HandleUpdateValueState( | |
1068 unsigned int target, const gpu::ValueState& state) { | |
1069 pending_valuebuffer_state_->UpdateState(target, state); | |
1070 } | |
1071 | |
1072 } // namespace content | |
OLD | NEW |