OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/common/gpu/client/gpu_channel_host.h" | 5 #include "content/common/gpu/client/gpu_channel_host.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 | 8 |
9 #include "base/bind.h" | 9 #include "base/bind.h" |
10 #include "base/message_loop/message_loop.h" | 10 #include "base/location.h" |
11 #include "base/message_loop/message_loop_proxy.h" | |
12 #include "base/posix/eintr_wrapper.h" | 11 #include "base/posix/eintr_wrapper.h" |
| 12 #include "base/single_thread_task_runner.h" |
| 13 #include "base/thread_task_runner_handle.h" |
13 #include "base/threading/thread_restrictions.h" | 14 #include "base/threading/thread_restrictions.h" |
14 #include "base/trace_event/trace_event.h" | 15 #include "base/trace_event/trace_event.h" |
15 #include "content/common/gpu/client/command_buffer_proxy_impl.h" | 16 #include "content/common/gpu/client/command_buffer_proxy_impl.h" |
16 #include "content/common/gpu/gpu_messages.h" | 17 #include "content/common/gpu/gpu_messages.h" |
17 #include "ipc/ipc_sync_message_filter.h" | 18 #include "ipc/ipc_sync_message_filter.h" |
18 #include "url/gurl.h" | 19 #include "url/gurl.h" |
19 | 20 |
20 #if defined(OS_WIN) | 21 #if defined(OS_WIN) |
21 #include "content/public/common/sandbox_init.h" | 22 #include "content/public/common/sandbox_init.h" |
22 #endif | 23 #endif |
23 | 24 |
24 using base::AutoLock; | 25 using base::AutoLock; |
25 using base::MessageLoopProxy; | |
26 | 26 |
27 namespace content { | 27 namespace content { |
28 | 28 |
29 GpuListenerInfo::GpuListenerInfo() {} | 29 GpuListenerInfo::GpuListenerInfo() {} |
30 | 30 |
31 GpuListenerInfo::~GpuListenerInfo() {} | 31 GpuListenerInfo::~GpuListenerInfo() {} |
32 | 32 |
33 ProxyFlushInfo::ProxyFlushInfo() | 33 ProxyFlushInfo::ProxyFlushInfo() |
34 : flush_pending(false), | 34 : flush_pending(false), |
35 route_id(MSG_ROUTING_NONE), | 35 route_id(MSG_ROUTING_NONE), |
(...skipping 28 matching lines...) Expand all Loading... |
64 next_transfer_buffer_id_.GetNext(); | 64 next_transfer_buffer_id_.GetNext(); |
65 next_image_id_.GetNext(); | 65 next_image_id_.GetNext(); |
66 next_route_id_.GetNext(); | 66 next_route_id_.GetNext(); |
67 } | 67 } |
68 | 68 |
69 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle, | 69 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle, |
70 base::WaitableEvent* shutdown_event) { | 70 base::WaitableEvent* shutdown_event) { |
71 DCHECK(factory_->IsMainThread()); | 71 DCHECK(factory_->IsMainThread()); |
72 // Open a channel to the GPU process. We pass NULL as the main listener here | 72 // Open a channel to the GPU process. We pass NULL as the main listener here |
73 // since we need to filter everything to route it to the right thread. | 73 // since we need to filter everything to route it to the right thread. |
74 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy(); | 74 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = |
75 channel_ = IPC::SyncChannel::Create(channel_handle, | 75 factory_->GetIOThreadTaskRunner(); |
76 IPC::Channel::MODE_CLIENT, | 76 channel_ = |
77 NULL, | 77 IPC::SyncChannel::Create(channel_handle, IPC::Channel::MODE_CLIENT, NULL, |
78 io_loop.get(), | 78 io_task_runner.get(), true, shutdown_event); |
79 true, | |
80 shutdown_event); | |
81 | 79 |
82 sync_filter_ = new IPC::SyncMessageFilter(shutdown_event); | 80 sync_filter_ = new IPC::SyncMessageFilter(shutdown_event); |
83 | 81 |
84 channel_->AddFilter(sync_filter_.get()); | 82 channel_->AddFilter(sync_filter_.get()); |
85 | 83 |
86 channel_filter_ = new MessageFilter(); | 84 channel_filter_ = new MessageFilter(); |
87 | 85 |
88 // Install the filter last, because we intercept all leftover | 86 // Install the filter last, because we intercept all leftover |
89 // messages. | 87 // messages. |
90 channel_->AddFilter(channel_filter_.get()); | 88 channel_->AddFilter(channel_filter_.get()); |
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
179 CreateCommandBufferResult result = factory_->CreateViewCommandBuffer( | 177 CreateCommandBufferResult result = factory_->CreateViewCommandBuffer( |
180 surface_id, init_params, route_id); | 178 surface_id, init_params, route_id); |
181 if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) { | 179 if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) { |
182 LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed."; | 180 LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed."; |
183 | 181 |
184 if (result == CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST) { | 182 if (result == CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST) { |
185 // The GPU channel needs to be considered lost. The caller will | 183 // The GPU channel needs to be considered lost. The caller will |
186 // then set up a new connection, and the GPU channel and any | 184 // then set up a new connection, and the GPU channel and any |
187 // view command buffers will all be associated with the same GPU | 185 // view command buffers will all be associated with the same GPU |
188 // process. | 186 // process. |
189 DCHECK(MessageLoopProxy::current().get()); | 187 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = |
190 | 188 factory_->GetIOThreadTaskRunner(); |
191 scoped_refptr<base::MessageLoopProxy> io_loop = | 189 io_task_runner->PostTask( |
192 factory_->GetIOLoopProxy(); | 190 FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::OnChannelError, |
193 io_loop->PostTask( | 191 channel_filter_.get())); |
194 FROM_HERE, | |
195 base::Bind(&GpuChannelHost::MessageFilter::OnChannelError, | |
196 channel_filter_.get())); | |
197 } | 192 } |
198 | 193 |
199 return NULL; | 194 return NULL; |
200 } | 195 } |
201 | 196 |
202 CommandBufferProxyImpl* command_buffer = | 197 CommandBufferProxyImpl* command_buffer = |
203 new CommandBufferProxyImpl(this, route_id); | 198 new CommandBufferProxyImpl(this, route_id); |
204 AddRoute(route_id, command_buffer->AsWeakPtr()); | 199 AddRoute(route_id, command_buffer->AsWeakPtr()); |
205 | 200 |
206 AutoLock lock(context_lock_); | 201 AutoLock lock(context_lock_); |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
280 } | 275 } |
281 | 276 |
282 void GpuChannelHost::DestroyChannel() { | 277 void GpuChannelHost::DestroyChannel() { |
283 DCHECK(factory_->IsMainThread()); | 278 DCHECK(factory_->IsMainThread()); |
284 AutoLock lock(context_lock_); | 279 AutoLock lock(context_lock_); |
285 channel_.reset(); | 280 channel_.reset(); |
286 } | 281 } |
287 | 282 |
288 void GpuChannelHost::AddRoute( | 283 void GpuChannelHost::AddRoute( |
289 int route_id, base::WeakPtr<IPC::Listener> listener) { | 284 int route_id, base::WeakPtr<IPC::Listener> listener) { |
290 DCHECK(MessageLoopProxy::current().get()); | 285 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = |
291 | 286 factory_->GetIOThreadTaskRunner(); |
292 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy(); | 287 io_task_runner->PostTask(FROM_HERE, |
293 io_loop->PostTask(FROM_HERE, | 288 base::Bind(&GpuChannelHost::MessageFilter::AddRoute, |
294 base::Bind(&GpuChannelHost::MessageFilter::AddRoute, | 289 channel_filter_.get(), route_id, listener, |
295 channel_filter_.get(), route_id, listener, | 290 base::ThreadTaskRunnerHandle::Get())); |
296 MessageLoopProxy::current())); | |
297 } | 291 } |
298 | 292 |
299 void GpuChannelHost::RemoveRoute(int route_id) { | 293 void GpuChannelHost::RemoveRoute(int route_id) { |
300 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy(); | 294 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = |
301 io_loop->PostTask(FROM_HERE, | 295 factory_->GetIOThreadTaskRunner(); |
302 base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute, | 296 io_task_runner->PostTask( |
303 channel_filter_.get(), route_id)); | 297 FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute, |
| 298 channel_filter_.get(), route_id)); |
304 } | 299 } |
305 | 300 |
306 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess( | 301 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess( |
307 base::SharedMemoryHandle source_handle) { | 302 base::SharedMemoryHandle source_handle) { |
308 if (IsLost()) | 303 if (IsLost()) |
309 return base::SharedMemory::NULLHandle(); | 304 return base::SharedMemory::NULLHandle(); |
310 | 305 |
311 #if defined(OS_WIN) | 306 #if defined(OS_WIN) |
312 // Windows needs to explicitly duplicate the handle out to another process. | 307 // Windows needs to explicitly duplicate the handle out to another process. |
313 base::SharedMemoryHandle target_handle; | 308 base::SharedMemoryHandle target_handle; |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
380 | 375 |
381 GpuChannelHost::MessageFilter::MessageFilter() | 376 GpuChannelHost::MessageFilter::MessageFilter() |
382 : lost_(false) { | 377 : lost_(false) { |
383 } | 378 } |
384 | 379 |
385 GpuChannelHost::MessageFilter::~MessageFilter() {} | 380 GpuChannelHost::MessageFilter::~MessageFilter() {} |
386 | 381 |
387 void GpuChannelHost::MessageFilter::AddRoute( | 382 void GpuChannelHost::MessageFilter::AddRoute( |
388 int route_id, | 383 int route_id, |
389 base::WeakPtr<IPC::Listener> listener, | 384 base::WeakPtr<IPC::Listener> listener, |
390 scoped_refptr<MessageLoopProxy> loop) { | 385 scoped_refptr<base::SingleThreadTaskRunner> task_runner) { |
391 DCHECK(listeners_.find(route_id) == listeners_.end()); | 386 DCHECK(listeners_.find(route_id) == listeners_.end()); |
| 387 DCHECK(task_runner); |
392 GpuListenerInfo info; | 388 GpuListenerInfo info; |
393 info.listener = listener; | 389 info.listener = listener; |
394 info.loop = loop; | 390 info.task_runner = task_runner; |
395 listeners_[route_id] = info; | 391 listeners_[route_id] = info; |
396 } | 392 } |
397 | 393 |
398 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) { | 394 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) { |
399 ListenerMap::iterator it = listeners_.find(route_id); | 395 ListenerMap::iterator it = listeners_.find(route_id); |
400 if (it != listeners_.end()) | 396 if (it != listeners_.end()) |
401 listeners_.erase(it); | 397 listeners_.erase(it); |
402 } | 398 } |
403 | 399 |
404 bool GpuChannelHost::MessageFilter::OnMessageReceived( | 400 bool GpuChannelHost::MessageFilter::OnMessageReceived( |
405 const IPC::Message& message) { | 401 const IPC::Message& message) { |
406 // Never handle sync message replies or we will deadlock here. | 402 // Never handle sync message replies or we will deadlock here. |
407 if (message.is_reply()) | 403 if (message.is_reply()) |
408 return false; | 404 return false; |
409 | 405 |
410 ListenerMap::iterator it = listeners_.find(message.routing_id()); | 406 ListenerMap::iterator it = listeners_.find(message.routing_id()); |
411 if (it == listeners_.end()) | 407 if (it == listeners_.end()) |
412 return false; | 408 return false; |
413 | 409 |
414 const GpuListenerInfo& info = it->second; | 410 const GpuListenerInfo& info = it->second; |
415 info.loop->PostTask( | 411 info.task_runner->PostTask( |
416 FROM_HERE, | 412 FROM_HERE, |
417 base::Bind( | 413 base::Bind(base::IgnoreResult(&IPC::Listener::OnMessageReceived), |
418 base::IgnoreResult(&IPC::Listener::OnMessageReceived), | 414 info.listener, message)); |
419 info.listener, | |
420 message)); | |
421 return true; | 415 return true; |
422 } | 416 } |
423 | 417 |
424 void GpuChannelHost::MessageFilter::OnChannelError() { | 418 void GpuChannelHost::MessageFilter::OnChannelError() { |
425 // Set the lost state before signalling the proxies. That way, if they | 419 // Set the lost state before signalling the proxies. That way, if they |
426 // themselves post a task to recreate the context, they will not try to re-use | 420 // themselves post a task to recreate the context, they will not try to re-use |
427 // this channel host. | 421 // this channel host. |
428 { | 422 { |
429 AutoLock lock(lock_); | 423 AutoLock lock(lock_); |
430 lost_ = true; | 424 lost_ = true; |
431 } | 425 } |
432 | 426 |
433 // Inform all the proxies that an error has occurred. This will be reported | 427 // Inform all the proxies that an error has occurred. This will be reported |
434 // via OpenGL as a lost context. | 428 // via OpenGL as a lost context. |
435 for (ListenerMap::iterator it = listeners_.begin(); | 429 for (ListenerMap::iterator it = listeners_.begin(); |
436 it != listeners_.end(); | 430 it != listeners_.end(); |
437 it++) { | 431 it++) { |
438 const GpuListenerInfo& info = it->second; | 432 const GpuListenerInfo& info = it->second; |
439 info.loop->PostTask( | 433 info.task_runner->PostTask( |
440 FROM_HERE, | 434 FROM_HERE, base::Bind(&IPC::Listener::OnChannelError, info.listener)); |
441 base::Bind(&IPC::Listener::OnChannelError, info.listener)); | |
442 } | 435 } |
443 | 436 |
444 listeners_.clear(); | 437 listeners_.clear(); |
445 } | 438 } |
446 | 439 |
447 bool GpuChannelHost::MessageFilter::IsLost() const { | 440 bool GpuChannelHost::MessageFilter::IsLost() const { |
448 AutoLock lock(lock_); | 441 AutoLock lock(lock_); |
449 return lost_; | 442 return lost_; |
450 } | 443 } |
451 | 444 |
452 } // namespace content | 445 } // namespace content |
OLD | NEW |