OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/gpu/gpu_channel_host.h" | 5 #include "content/renderer/gpu/gpu_channel_host.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/message_loop.h" | 8 #include "base/message_loop.h" |
9 #include "base/message_loop_proxy.h" | 9 #include "base/message_loop_proxy.h" |
10 #include "content/common/child_process.h" | 10 #include "content/common/child_thread.h" |
11 #include "content/common/gpu/gpu_messages.h" | 11 #include "content/common/gpu/gpu_messages.h" |
12 #include "content/renderer/gpu/command_buffer_proxy.h" | 12 #include "content/renderer/gpu/command_buffer_proxy.h" |
13 #include "content/renderer/render_process.h" | |
14 #include "content/renderer/render_thread_impl.h" | |
15 #include "googleurl/src/gurl.h" | 13 #include "googleurl/src/gurl.h" |
16 #include "ipc/ipc_sync_message_filter.h" | 14 #include "ipc/ipc_sync_message_filter.h" |
17 | 15 |
| 16 GpuChannelHostFactory* GpuChannelHostFactory::instance_ = NULL; |
| 17 |
| 18 GpuChannelHostFactory::~GpuChannelHostFactory() { |
| 19 DCHECK(!instance_); |
| 20 } |
| 21 |
18 using base::AutoLock; | 22 using base::AutoLock; |
19 using base::MessageLoopProxy; | 23 using base::MessageLoopProxy; |
20 | 24 |
21 GpuListenerInfo::GpuListenerInfo() { | 25 GpuListenerInfo::GpuListenerInfo() { |
22 } | 26 } |
23 | 27 |
24 GpuListenerInfo::~GpuListenerInfo() { | 28 GpuListenerInfo::~GpuListenerInfo() { |
25 } | 29 } |
26 | 30 |
27 GpuChannelHost::MessageFilter::MessageFilter(GpuChannelHost* parent) | 31 GpuChannelHost::MessageFilter::MessageFilter(GpuChannelHost* parent) |
28 : parent_(parent) { | 32 : parent_(parent) { |
29 } | 33 } |
30 | 34 |
31 GpuChannelHost::MessageFilter::~MessageFilter() { | 35 GpuChannelHost::MessageFilter::~MessageFilter() { |
32 | 36 |
33 } | 37 } |
34 | 38 |
35 void GpuChannelHost::MessageFilter::AddRoute( | 39 void GpuChannelHost::MessageFilter::AddRoute( |
36 int route_id, | 40 int route_id, |
37 base::WeakPtr<IPC::Channel::Listener> listener, | 41 base::WeakPtr<IPC::Channel::Listener> listener, |
38 scoped_refptr<MessageLoopProxy> loop) { | 42 scoped_refptr<MessageLoopProxy> loop) { |
39 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | 43 DCHECK(parent_->factory_->IsIOThread()); |
40 DCHECK(listeners_.find(route_id) == listeners_.end()); | 44 DCHECK(listeners_.find(route_id) == listeners_.end()); |
41 GpuListenerInfo info; | 45 GpuListenerInfo info; |
42 info.listener = listener; | 46 info.listener = listener; |
43 info.loop = loop; | 47 info.loop = loop; |
44 listeners_[route_id] = info; | 48 listeners_[route_id] = info; |
45 } | 49 } |
46 | 50 |
47 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) { | 51 void GpuChannelHost::MessageFilter::RemoveRoute(int route_id) { |
48 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | 52 DCHECK(parent_->factory_->IsIOThread()); |
49 ListenerMap::iterator it = listeners_.find(route_id); | 53 ListenerMap::iterator it = listeners_.find(route_id); |
50 if (it != listeners_.end()) | 54 if (it != listeners_.end()) |
51 listeners_.erase(it); | 55 listeners_.erase(it); |
52 } | 56 } |
53 | 57 |
54 bool GpuChannelHost::MessageFilter::OnMessageReceived( | 58 bool GpuChannelHost::MessageFilter::OnMessageReceived( |
55 const IPC::Message& message) { | 59 const IPC::Message& message) { |
56 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | 60 DCHECK(parent_->factory_->IsIOThread()); |
57 // Never handle sync message replies or we will deadlock here. | 61 // Never handle sync message replies or we will deadlock here. |
58 if (message.is_reply()) | 62 if (message.is_reply()) |
59 return false; | 63 return false; |
60 | 64 |
61 DCHECK(message.routing_id() != MSG_ROUTING_CONTROL); | 65 DCHECK(message.routing_id() != MSG_ROUTING_CONTROL); |
62 | 66 |
63 ListenerMap::iterator it = listeners_.find(message.routing_id()); | 67 ListenerMap::iterator it = listeners_.find(message.routing_id()); |
64 | 68 |
65 if (it != listeners_.end()) { | 69 if (it != listeners_.end()) { |
66 const GpuListenerInfo& info = it->second; | 70 const GpuListenerInfo& info = it->second; |
67 info.loop->PostTask( | 71 info.loop->PostTask( |
68 FROM_HERE, | 72 FROM_HERE, |
69 base::Bind( | 73 base::Bind( |
70 base::IgnoreResult(&IPC::Channel::Listener::OnMessageReceived), | 74 base::IgnoreResult(&IPC::Channel::Listener::OnMessageReceived), |
71 info.listener, | 75 info.listener, |
72 message)); | 76 message)); |
73 } | 77 } |
74 | 78 |
75 return true; | 79 return true; |
76 } | 80 } |
77 | 81 |
78 void GpuChannelHost::MessageFilter::OnChannelError() { | 82 void GpuChannelHost::MessageFilter::OnChannelError() { |
79 DCHECK(MessageLoop::current() == ChildProcess::current()->io_message_loop()); | 83 DCHECK(parent_->factory_->IsIOThread()); |
80 // Inform all the proxies that an error has occurred. This will be reported | 84 // Inform all the proxies that an error has occurred. This will be reported |
81 // via OpenGL as a lost context. | 85 // via OpenGL as a lost context. |
82 for (ListenerMap::iterator it = listeners_.begin(); | 86 for (ListenerMap::iterator it = listeners_.begin(); |
83 it != listeners_.end(); | 87 it != listeners_.end(); |
84 it++) { | 88 it++) { |
85 const GpuListenerInfo& info = it->second; | 89 const GpuListenerInfo& info = it->second; |
86 info.loop->PostTask( | 90 info.loop->PostTask( |
87 FROM_HERE, | 91 FROM_HERE, |
88 base::Bind(&IPC::Channel::Listener::OnChannelError, info.listener)); | 92 base::Bind(&IPC::Channel::Listener::OnChannelError, info.listener)); |
89 } | 93 } |
90 | 94 |
91 listeners_.clear(); | 95 listeners_.clear(); |
92 | 96 |
93 ChildThread* main_thread = RenderProcess::current()->main_thread(); | 97 MessageLoop* main_loop = parent_->factory_->GetMainLoop(); |
94 MessageLoop* main_loop = main_thread->message_loop(); | |
95 main_loop->PostTask(FROM_HERE, | 98 main_loop->PostTask(FROM_HERE, |
96 base::Bind(&GpuChannelHost::OnChannelError, parent_)); | 99 base::Bind(&GpuChannelHost::OnChannelError, parent_)); |
97 } | 100 } |
98 | 101 |
99 GpuChannelHost::GpuChannelHost() | 102 GpuChannelHost::GpuChannelHost(GpuChannelHostFactory* factory) |
100 : state_(kUnconnected) { | 103 : factory_(factory), |
| 104 state_(kUnconnected) { |
101 } | 105 } |
102 | 106 |
103 GpuChannelHost::~GpuChannelHost() { | 107 GpuChannelHost::~GpuChannelHost() { |
104 } | 108 } |
105 | 109 |
106 void GpuChannelHost::Connect( | 110 void GpuChannelHost::Connect( |
107 const IPC::ChannelHandle& channel_handle, | 111 const IPC::ChannelHandle& channel_handle, |
108 base::ProcessHandle renderer_process_for_gpu) { | 112 base::ProcessHandle renderer_process_for_gpu) { |
109 DCHECK(RenderThreadImpl::current()); | 113 DCHECK(factory_->IsMainThread()); |
110 // Open a channel to the GPU process. We pass NULL as the main listener here | 114 // Open a channel to the GPU process. We pass NULL as the main listener here |
111 // since we need to filter everything to route it to the right thread. | 115 // since we need to filter everything to route it to the right thread. |
112 channel_.reset(new IPC::SyncChannel( | 116 channel_.reset(new IPC::SyncChannel( |
113 channel_handle, IPC::Channel::MODE_CLIENT, NULL, | 117 channel_handle, IPC::Channel::MODE_CLIENT, NULL, |
114 ChildProcess::current()->io_message_loop_proxy(), true, | 118 factory_->GetIOLoopProxy(), true, |
115 ChildProcess::current()->GetShutDownEvent())); | 119 factory_->GetShutDownEvent())); |
116 | 120 |
117 sync_filter_ = new IPC::SyncMessageFilter( | 121 sync_filter_ = new IPC::SyncMessageFilter( |
118 ChildProcess::current()->GetShutDownEvent()); | 122 factory_->GetShutDownEvent()); |
119 | 123 |
120 channel_->AddFilter(sync_filter_.get()); | 124 channel_->AddFilter(sync_filter_.get()); |
121 | 125 |
122 channel_filter_ = new MessageFilter(this); | 126 channel_filter_ = new MessageFilter(this); |
123 | 127 |
124 // Install the filter last, because we intercept all leftover | 128 // Install the filter last, because we intercept all leftover |
125 // messages. | 129 // messages. |
126 channel_->AddFilter(channel_filter_.get()); | 130 channel_->AddFilter(channel_filter_.get()); |
127 | 131 |
128 // It is safe to send IPC messages before the channel completes the connection | 132 // It is safe to send IPC messages before the channel completes the connection |
(...skipping 27 matching lines...) Expand all Loading... |
156 } | 160 } |
157 | 161 |
158 bool GpuChannelHost::Send(IPC::Message* message) { | 162 bool GpuChannelHost::Send(IPC::Message* message) { |
159 // The GPU process never sends synchronous IPCs so clear the unblock flag to | 163 // The GPU process never sends synchronous IPCs so clear the unblock flag to |
160 // preserve order. | 164 // preserve order. |
161 message->set_unblock(false); | 165 message->set_unblock(false); |
162 | 166 |
163 // Currently we need to choose between two different mechanisms for sending. | 167 // Currently we need to choose between two different mechanisms for sending. |
164 // On the main thread we use the regular channel Send() method, on another | 168 // On the main thread we use the regular channel Send() method, on another |
165 // thread we use SyncMessageFilter. We also have to be careful interpreting | 169 // thread we use SyncMessageFilter. We also have to be careful interpreting |
166 // RenderThreadImpl::current() since it might return NULL during shutdown, | 170 // IsMainThread() since it might return false during shutdown, |
167 // impl we are actually calling from the main thread (discard message then). | 171 // impl we are actually calling from the main thread (discard message then). |
168 // | 172 // |
169 // TODO: Can we just always use sync_filter_ since we setup the channel | 173 // TODO: Can we just always use sync_filter_ since we setup the channel |
170 // without a main listener? | 174 // without a main listener? |
171 if (RenderThreadImpl::current()) { | 175 if (factory_->IsMainThread()) { |
172 if (channel_.get()) | 176 if (channel_.get()) |
173 return channel_->Send(message); | 177 return channel_->Send(message); |
174 } else if (MessageLoop::current()) { | 178 } else if (MessageLoop::current()) { |
175 return sync_filter_->Send(message); | 179 return sync_filter_->Send(message); |
176 } | 180 } |
177 | 181 |
178 // Callee takes ownership of message, regardless of whether Send is | 182 // Callee takes ownership of message, regardless of whether Send is |
179 // successful. See IPC::Message::Sender. | 183 // successful. See IPC::Message::Sender. |
180 delete message; | 184 delete message; |
181 return false; | 185 return false; |
182 } | 186 } |
183 | 187 |
184 CommandBufferProxy* GpuChannelHost::CreateViewCommandBuffer( | 188 CommandBufferProxy* GpuChannelHost::CreateViewCommandBuffer( |
185 int32 surface_id, | 189 int32 surface_id, |
186 CommandBufferProxy* share_group, | 190 CommandBufferProxy* share_group, |
187 const std::string& allowed_extensions, | 191 const std::string& allowed_extensions, |
188 const std::vector<int32>& attribs, | 192 const std::vector<int32>& attribs, |
189 const GURL& active_url, | 193 const GURL& active_url, |
190 gfx::GpuPreference gpu_preference) { | 194 gfx::GpuPreference gpu_preference) { |
191 DCHECK(ChildThread::current()); | 195 DCHECK(factory_->IsMainThread()); |
192 #if defined(ENABLE_GPU) | 196 #if defined(ENABLE_GPU) |
193 AutoLock lock(context_lock_); | 197 AutoLock lock(context_lock_); |
194 // An error occurred. Need to get the host again to reinitialize it. | 198 // An error occurred. Need to get the host again to reinitialize it. |
195 if (!channel_.get()) | 199 if (!channel_.get()) |
196 return NULL; | 200 return NULL; |
197 | 201 |
198 GPUCreateCommandBufferConfig init_params; | 202 GPUCreateCommandBufferConfig init_params; |
199 init_params.share_group_id = | 203 init_params.share_group_id = |
200 share_group ? share_group->route_id() : MSG_ROUTING_NONE; | 204 share_group ? share_group->route_id() : MSG_ROUTING_NONE; |
201 init_params.allowed_extensions = allowed_extensions; | 205 init_params.allowed_extensions = allowed_extensions; |
202 init_params.attribs = attribs; | 206 init_params.attribs = attribs; |
203 init_params.active_url = active_url; | 207 init_params.active_url = active_url; |
204 init_params.gpu_preference = gpu_preference; | 208 init_params.gpu_preference = gpu_preference; |
205 int32 route_id; | 209 int32 route_id = factory_->CreateViewCommandBuffer(surface_id, init_params); |
206 if (!ChildThread::current()->Send( | |
207 new GpuHostMsg_CreateViewCommandBuffer( | |
208 surface_id, | |
209 init_params, | |
210 &route_id))) { | |
211 return NULL; | |
212 } | |
213 | |
214 if (route_id == MSG_ROUTING_NONE) | 210 if (route_id == MSG_ROUTING_NONE) |
215 return NULL; | 211 return NULL; |
216 | 212 |
217 CommandBufferProxy* command_buffer = new CommandBufferProxy(this, route_id); | 213 CommandBufferProxy* command_buffer = new CommandBufferProxy(this, route_id); |
218 AddRoute(route_id, command_buffer->AsWeakPtr()); | 214 AddRoute(route_id, command_buffer->AsWeakPtr()); |
219 proxies_[route_id] = command_buffer; | 215 proxies_[route_id] = command_buffer; |
220 return command_buffer; | 216 return command_buffer; |
221 #else | 217 #else |
222 return NULL; | 218 return NULL; |
223 #endif | 219 #endif |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
284 proxies_.erase(route_id); | 280 proxies_.erase(route_id); |
285 RemoveRoute(route_id); | 281 RemoveRoute(route_id); |
286 delete command_buffer; | 282 delete command_buffer; |
287 #endif | 283 #endif |
288 } | 284 } |
289 | 285 |
290 void GpuChannelHost::AddRoute( | 286 void GpuChannelHost::AddRoute( |
291 int route_id, base::WeakPtr<IPC::Channel::Listener> listener) { | 287 int route_id, base::WeakPtr<IPC::Channel::Listener> listener) { |
292 DCHECK(MessageLoopProxy::current()); | 288 DCHECK(MessageLoopProxy::current()); |
293 | 289 |
294 MessageLoopProxy* io_loop = RenderProcess::current()->io_message_loop_proxy(); | 290 MessageLoopProxy* io_loop = factory_->GetIOLoopProxy(); |
295 io_loop->PostTask(FROM_HERE, | 291 io_loop->PostTask(FROM_HERE, |
296 base::Bind(&GpuChannelHost::MessageFilter::AddRoute, | 292 base::Bind(&GpuChannelHost::MessageFilter::AddRoute, |
297 channel_filter_.get(), route_id, listener, | 293 channel_filter_.get(), route_id, listener, |
298 MessageLoopProxy::current())); | 294 MessageLoopProxy::current())); |
299 } | 295 } |
300 | 296 |
301 void GpuChannelHost::RemoveRoute(int route_id) { | 297 void GpuChannelHost::RemoveRoute(int route_id) { |
302 MessageLoopProxy* io_loop = RenderProcess::current()->io_message_loop_proxy(); | 298 MessageLoopProxy* io_loop = factory_->GetIOLoopProxy(); |
303 io_loop->PostTask(FROM_HERE, | 299 io_loop->PostTask(FROM_HERE, |
304 base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute, | 300 base::Bind(&GpuChannelHost::MessageFilter::RemoveRoute, |
305 channel_filter_.get(), route_id)); | 301 channel_filter_.get(), route_id)); |
306 } | 302 } |
307 | 303 |
308 bool GpuChannelHost::WillGpuSwitchOccur( | 304 bool GpuChannelHost::WillGpuSwitchOccur( |
309 bool is_creating_context, gfx::GpuPreference gpu_preference) { | 305 bool is_creating_context, gfx::GpuPreference gpu_preference) { |
310 bool result = false; | 306 bool result = false; |
311 if (!Send(new GpuChannelMsg_WillGpuSwitchOccur(is_creating_context, | 307 if (!Send(new GpuChannelMsg_WillGpuSwitchOccur(is_creating_context, |
312 gpu_preference, | 308 gpu_preference, |
313 &result))) { | 309 &result))) { |
314 return false; | 310 return false; |
315 } | 311 } |
316 return result; | 312 return result; |
317 } | 313 } |
318 | 314 |
319 void GpuChannelHost::ForciblyCloseChannel() { | 315 void GpuChannelHost::ForciblyCloseChannel() { |
320 Send(new GpuChannelMsg_CloseChannel()); | 316 Send(new GpuChannelMsg_CloseChannel()); |
321 SetStateLost(); | 317 SetStateLost(); |
322 } | 318 } |
OLD | NEW |