Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(520)

Side by Side Diff: content/common/gpu/client/gpu_channel_host.cc

Issue 1135943005: Revert of content/common: Remove use of MessageLoopProxy and deprecated MessageLoop APIs (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/client/gpu_channel_host.h" 5 #include "content/common/gpu/client/gpu_channel_host.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/location.h" 10 #include "base/message_loop/message_loop.h"
11 #include "base/message_loop/message_loop_proxy.h"
11 #include "base/posix/eintr_wrapper.h" 12 #include "base/posix/eintr_wrapper.h"
12 #include "base/single_thread_task_runner.h"
13 #include "base/thread_task_runner_handle.h"
14 #include "base/threading/thread_restrictions.h" 13 #include "base/threading/thread_restrictions.h"
15 #include "base/trace_event/trace_event.h" 14 #include "base/trace_event/trace_event.h"
16 #include "content/common/gpu/client/command_buffer_proxy_impl.h" 15 #include "content/common/gpu/client/command_buffer_proxy_impl.h"
17 #include "content/common/gpu/gpu_messages.h" 16 #include "content/common/gpu/gpu_messages.h"
18 #include "ipc/ipc_sync_message_filter.h" 17 #include "ipc/ipc_sync_message_filter.h"
19 #include "url/gurl.h" 18 #include "url/gurl.h"
20 19
21 #if defined(OS_WIN) 20 #if defined(OS_WIN)
22 #include "content/public/common/sandbox_init.h" 21 #include "content/public/common/sandbox_init.h"
23 #endif 22 #endif
24 23
25 using base::AutoLock; 24 using base::AutoLock;
25 using base::MessageLoopProxy;
26 26
27 namespace content { 27 namespace content {
28 28
29 GpuListenerInfo::GpuListenerInfo() {} 29 GpuListenerInfo::GpuListenerInfo() {}
30 30
31 GpuListenerInfo::~GpuListenerInfo() {} 31 GpuListenerInfo::~GpuListenerInfo() {}
32 32
33 ProxyFlushInfo::ProxyFlushInfo() 33 ProxyFlushInfo::ProxyFlushInfo()
34 : flush_pending(false), 34 : flush_pending(false),
35 route_id(MSG_ROUTING_NONE), 35 route_id(MSG_ROUTING_NONE),
(...skipping 28 matching lines...) Expand all
64 next_transfer_buffer_id_.GetNext(); 64 next_transfer_buffer_id_.GetNext();
65 next_image_id_.GetNext(); 65 next_image_id_.GetNext();
66 next_route_id_.GetNext(); 66 next_route_id_.GetNext();
67 } 67 }
68 68
69 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle, 69 void GpuChannelHost::Connect(const IPC::ChannelHandle& channel_handle,
70 base::WaitableEvent* shutdown_event) { 70 base::WaitableEvent* shutdown_event) {
71 DCHECK(factory_->IsMainThread()); 71 DCHECK(factory_->IsMainThread());
72 // Open a channel to the GPU process. We pass NULL as the main listener here 72 // Open a channel to the GPU process. We pass NULL as the main listener here
73 // since we need to filter everything to route it to the right thread. 73 // since we need to filter everything to route it to the right thread.
74 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = 74 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
75 factory_->GetIOThreadTaskRunner(); 75 channel_ = IPC::SyncChannel::Create(channel_handle,
76 channel_ = 76 IPC::Channel::MODE_CLIENT,
77 IPC::SyncChannel::Create(channel_handle, IPC::Channel::MODE_CLIENT, NULL, 77 NULL,
78 io_task_runner.get(), true, shutdown_event); 78 io_loop.get(),
79 true,
80 shutdown_event);
79 81
80 sync_filter_ = new IPC::SyncMessageFilter(shutdown_event); 82 sync_filter_ = new IPC::SyncMessageFilter(shutdown_event);
81 83
82 channel_->AddFilter(sync_filter_.get()); 84 channel_->AddFilter(sync_filter_.get());
83 85
84 channel_filter_ = new MessageFilter(); 86 channel_filter_ = new MessageFilter();
85 87
86 // Install the filter last, because we intercept all leftover 88 // Install the filter last, because we intercept all leftover
87 // messages. 89 // messages.
88 channel_->AddFilter(channel_filter_.get()); 90 channel_->AddFilter(channel_filter_.get());
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
177 CreateCommandBufferResult result = factory_->CreateViewCommandBuffer( 179 CreateCommandBufferResult result = factory_->CreateViewCommandBuffer(
178 surface_id, init_params, route_id); 180 surface_id, init_params, route_id);
179 if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) { 181 if (result != CREATE_COMMAND_BUFFER_SUCCEEDED) {
180 LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed."; 182 LOG(ERROR) << "GpuChannelHost::CreateViewCommandBuffer failed.";
181 183
182 if (result == CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST) { 184 if (result == CREATE_COMMAND_BUFFER_FAILED_AND_CHANNEL_LOST) {
183 // The GPU channel needs to be considered lost. The caller will 185 // The GPU channel needs to be considered lost. The caller will
184 // then set up a new connection, and the GPU channel and any 186 // then set up a new connection, and the GPU channel and any
185 // view command buffers will all be associated with the same GPU 187 // view command buffers will all be associated with the same GPU
186 // process. 188 // process.
187 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = 189 DCHECK(MessageLoopProxy::current().get());
188 factory_->GetIOThreadTaskRunner(); 190
189 io_task_runner->PostTask( 191 scoped_refptr<base::MessageLoopProxy> io_loop =
190 FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::OnChannelError, 192 factory_->GetIOLoopProxy();
191 channel_filter_.get())); 193 io_loop->PostTask(
194 FROM_HERE,
195 base::Bind(&GpuChannelHost::MessageFilter::OnChannelError,
196 channel_filter_.get()));
192 } 197 }
193 198
194 return NULL; 199 return NULL;
195 } 200 }
196 201
197 CommandBufferProxyImpl* command_buffer = 202 CommandBufferProxyImpl* command_buffer =
198 new CommandBufferProxyImpl(this, route_id); 203 new CommandBufferProxyImpl(this, route_id);
199 AddRoute(route_id, command_buffer->AsWeakPtr()); 204 AddRoute(route_id, command_buffer->AsWeakPtr());
200 205
201 AutoLock lock(context_lock_); 206 AutoLock lock(context_lock_);
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
275 } 280 }
276 281
277 void GpuChannelHost::DestroyChannel() { 282 void GpuChannelHost::DestroyChannel() {
278 DCHECK(factory_->IsMainThread()); 283 DCHECK(factory_->IsMainThread());
279 AutoLock lock(context_lock_); 284 AutoLock lock(context_lock_);
280 channel_.reset(); 285 channel_.reset();
281 } 286 }
282 287
283 void GpuChannelHost::AddRoute( 288 void GpuChannelHost::AddRoute(
284 int route_id, base::WeakPtr<IPC::Listener> listener) { 289 int route_id, base::WeakPtr<IPC::Listener> listener) {
285 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = 290 DCHECK(MessageLoopProxy::current().get());
286 factory_->GetIOThreadTaskRunner(); 291
287 io_task_runner->PostTask(FROM_HERE, 292 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
288 base::Bind(&GpuChannelHost::MessageFilter::AddRoute, 293 io_loop->PostTask(FROM_HERE,
289 channel_filter_.get(), route_id, listener, 294 base::Bind(&GpuChannelHost::MessageFilter::AddRoute,
290 base::ThreadTaskRunnerHandle::Get())); 295 channel_filter_.get(), route_id, listener,
296 MessageLoopProxy::current()));
291 } 297 }
292 298
293 void GpuChannelHost::RemoveRoute(int route_id) { 299 void GpuChannelHost::RemoveRoute(int route_id) {
294 scoped_refptr<base::SingleThreadTaskRunner> io_task_runner = 300 scoped_refptr<base::MessageLoopProxy> io_loop = factory_->GetIOLoopProxy();
295 factory_->GetIOThreadTaskRunner(); 301 io_loop->PostTask(FROM_HERE,
296 io_task_runner->PostTask( 302 base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute,
297 FROM_HERE, base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute, 303 channel_filter_.get(), route_id));
298 channel_filter_.get(), route_id));
299 } 304 }
300 305
301 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess( 306 base::SharedMemoryHandle GpuChannelHost::ShareToGpuProcess(
302 base::SharedMemoryHandle source_handle) { 307 base::SharedMemoryHandle source_handle) {
303 if (IsLost()) 308 if (IsLost())
304 return base::SharedMemory::NULLHandle(); 309 return base::SharedMemory::NULLHandle();
305 310
306 #if defined(OS_WIN) 311 #if defined(OS_WIN)
307 // Windows needs to explicitly duplicate the handle out to another process. 312 // Windows needs to explicitly duplicate the handle out to another process.
308 base::SharedMemoryHandle target_handle; 313 base::SharedMemoryHandle target_handle;
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
375 380
376 GpuChannelHost::MessageFilter::MessageFilter() 381 GpuChannelHost::MessageFilter::MessageFilter()
377 : lost_(false) { 382 : lost_(false) {
378 } 383 }
379 384
380 GpuChannelHost::MessageFilter::~MessageFilter() {} 385 GpuChannelHost::MessageFilter::~MessageFilter() {}
381 386
382 void GpuChannelHost::MessageFilter::AddRoute( 387 void GpuChannelHost::MessageFilter::AddRoute(
383 int route_id, 388 int route_id,
384 base::WeakPtr<IPC::Listener> listener, 389 base::WeakPtr<IPC::Listener> listener,
385 scoped_refptr<base::SingleThreadTaskRunner> task_runner) { 390 scoped_refptr<MessageLoopProxy> loop) {
386 DCHECK(listeners_.find(route_id) == listeners_.end()); 391 DCHECK(listeners_.find(route_id) == listeners_.end());
387 DCHECK(task_runner);
388 GpuListenerInfo info; 392 GpuListenerInfo info;
389 info.listener = listener; 393 info.listener = listener;
390 info.task_runner = task_runner; 394 info.loop = loop;
391 listeners_[route_id] = info; 395 listeners_[route_id] = info;
392 } 396 }
393 397
394 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) { 398 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) {
395 ListenerMap::iterator it = listeners_.find(route_id); 399 ListenerMap::iterator it = listeners_.find(route_id);
396 if (it != listeners_.end()) 400 if (it != listeners_.end())
397 listeners_.erase(it); 401 listeners_.erase(it);
398 } 402 }
399 403
400 bool GpuChannelHost::MessageFilter::OnMessageReceived( 404 bool GpuChannelHost::MessageFilter::OnMessageReceived(
401 const IPC::Message& message) { 405 const IPC::Message& message) {
402 // Never handle sync message replies or we will deadlock here. 406 // Never handle sync message replies or we will deadlock here.
403 if (message.is_reply()) 407 if (message.is_reply())
404 return false; 408 return false;
405 409
406 ListenerMap::iterator it = listeners_.find(message.routing_id()); 410 ListenerMap::iterator it = listeners_.find(message.routing_id());
407 if (it == listeners_.end()) 411 if (it == listeners_.end())
408 return false; 412 return false;
409 413
410 const GpuListenerInfo& info = it->second; 414 const GpuListenerInfo& info = it->second;
411 info.task_runner->PostTask( 415 info.loop->PostTask(
412 FROM_HERE, 416 FROM_HERE,
413 base::Bind(base::IgnoreResult(&IPC::Listener::OnMessageReceived), 417 base::Bind(
414 info.listener, message)); 418 base::IgnoreResult(&IPC::Listener::OnMessageReceived),
419 info.listener,
420 message));
415 return true; 421 return true;
416 } 422 }
417 423
418 void GpuChannelHost::MessageFilter::OnChannelError() { 424 void GpuChannelHost::MessageFilter::OnChannelError() {
419 // Set the lost state before signalling the proxies. That way, if they 425 // Set the lost state before signalling the proxies. That way, if they
420 // themselves post a task to recreate the context, they will not try to re-use 426 // themselves post a task to recreate the context, they will not try to re-use
421 // this channel host. 427 // this channel host.
422 { 428 {
423 AutoLock lock(lock_); 429 AutoLock lock(lock_);
424 lost_ = true; 430 lost_ = true;
425 } 431 }
426 432
427 // Inform all the proxies that an error has occurred. This will be reported 433 // Inform all the proxies that an error has occurred. This will be reported
428 // via OpenGL as a lost context. 434 // via OpenGL as a lost context.
429 for (ListenerMap::iterator it = listeners_.begin(); 435 for (ListenerMap::iterator it = listeners_.begin();
430 it != listeners_.end(); 436 it != listeners_.end();
431 it++) { 437 it++) {
432 const GpuListenerInfo& info = it->second; 438 const GpuListenerInfo& info = it->second;
433 info.task_runner->PostTask( 439 info.loop->PostTask(
434 FROM_HERE, base::Bind(&IPC::Listener::OnChannelError, info.listener)); 440 FROM_HERE,
441 base::Bind(&IPC::Listener::OnChannelError, info.listener));
435 } 442 }
436 443
437 listeners_.clear(); 444 listeners_.clear();
438 } 445 }
439 446
440 bool GpuChannelHost::MessageFilter::IsLost() const { 447 bool GpuChannelHost::MessageFilter::IsLost() const {
441 AutoLock lock(lock_); 448 AutoLock lock(lock_);
442 return lost_; 449 return lost_;
443 } 450 }
444 451
445 } // namespace content 452 } // namespace content
OLDNEW
« no previous file with comments | « content/common/gpu/client/gpu_channel_host.h ('k') | content/common/gpu/client/gpu_video_decode_accelerator_host.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698