OLD | NEW |
| (Empty) |
1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/logging.h" | |
6 #include "base/process_util.h" | |
7 #include "base/shared_memory.h" | |
8 #include "base/task.h" | |
9 #include "chrome/common/render_messages.h" | |
10 #include "chrome/renderer/command_buffer_proxy.h" | |
11 #include "chrome/renderer/plugin_channel_host.h" | |
12 #include "chrome/renderer/render_thread.h" | |
13 #include "content/common/gpu_messages.h" | |
14 #include "content/common/plugin_messages.h" | |
15 #include "gpu/command_buffer/common/cmd_buffer_common.h" | |
16 #include "ui/gfx/size.h" | |
17 | |
18 using gpu::Buffer; | |
19 | |
20 CommandBufferProxy::CommandBufferProxy( | |
21 IPC::Channel::Sender* channel, | |
22 int route_id) | |
23 : num_entries_(0), | |
24 channel_(channel), | |
25 route_id_(route_id) { | |
26 } | |
27 | |
28 CommandBufferProxy::~CommandBufferProxy() { | |
29 // Delete all the locally cached shared memory objects, closing the handle | |
30 // in this process. | |
31 for (TransferBufferMap::iterator it = transfer_buffers_.begin(); | |
32 it != transfer_buffers_.end(); | |
33 ++it) { | |
34 delete it->second.shared_memory; | |
35 it->second.shared_memory = NULL; | |
36 } | |
37 } | |
38 | |
39 bool CommandBufferProxy::OnMessageReceived(const IPC::Message& message) { | |
40 bool handled = true; | |
41 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxy, message) | |
42 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UpdateState, OnUpdateState); | |
43 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffers, OnSwapBuffers); | |
44 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_NotifyRepaint, | |
45 OnNotifyRepaint); | |
46 IPC_MESSAGE_UNHANDLED(handled = false) | |
47 IPC_END_MESSAGE_MAP() | |
48 DCHECK(handled); | |
49 return handled; | |
50 } | |
51 | |
52 void CommandBufferProxy::OnChannelError() { | |
53 // Prevent any further messages from being sent. | |
54 channel_ = NULL; | |
55 | |
56 // When the client sees that the context is lost, they should delete this | |
57 // CommandBufferProxy and create a new one. | |
58 last_state_.error = gpu::error::kLostContext; | |
59 | |
60 if (channel_error_callback_.get()) | |
61 channel_error_callback_->Run(); | |
62 } | |
63 | |
64 void CommandBufferProxy::SetChannelErrorCallback(Callback0::Type* callback) { | |
65 channel_error_callback_.reset(callback); | |
66 } | |
67 | |
68 bool CommandBufferProxy::Initialize(int32 size) { | |
69 DCHECK(!ring_buffer_.get()); | |
70 | |
71 RenderThread* render_thread = RenderThread::current(); | |
72 if (!render_thread) | |
73 return false; | |
74 | |
75 base::SharedMemoryHandle handle; | |
76 if (!render_thread->Send(new ViewHostMsg_AllocateSharedMemoryBuffer( | |
77 size, | |
78 &handle))) { | |
79 return false; | |
80 } | |
81 | |
82 if (!base::SharedMemory::IsHandleValid(handle)) | |
83 return false; | |
84 | |
85 #if defined(OS_POSIX) | |
86 handle.auto_close = false; | |
87 #endif | |
88 | |
89 // Take ownership of shared memory. This will close the handle if Send below | |
90 // fails. Otherwise, callee takes ownership before this variable | |
91 // goes out of scope. | |
92 base::SharedMemory shared_memory(handle, false); | |
93 | |
94 return Initialize(&shared_memory, size); | |
95 } | |
96 | |
97 bool CommandBufferProxy::Initialize(base::SharedMemory* buffer, int32 size) { | |
98 bool result; | |
99 if (!Send(new GpuCommandBufferMsg_Initialize(route_id_, | |
100 buffer->handle(), | |
101 size, | |
102 &result))) { | |
103 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize."; | |
104 return false; | |
105 } | |
106 | |
107 if (!result) { | |
108 LOG(ERROR) << "Failed to initialize command buffer service."; | |
109 return false; | |
110 } | |
111 | |
112 base::SharedMemoryHandle handle; | |
113 if (!buffer->GiveToProcess(base::GetCurrentProcessHandle(), &handle)) { | |
114 LOG(ERROR) << "Failed to duplicate command buffer handle."; | |
115 return false; | |
116 } | |
117 | |
118 ring_buffer_.reset(new base::SharedMemory(handle, false)); | |
119 if (!ring_buffer_->Map(size)) { | |
120 LOG(ERROR) << "Failed to map shared memory for command buffer."; | |
121 ring_buffer_.reset(); | |
122 return false; | |
123 } | |
124 | |
125 num_entries_ = size / sizeof(gpu::CommandBufferEntry); | |
126 return true; | |
127 } | |
128 | |
129 Buffer CommandBufferProxy::GetRingBuffer() { | |
130 DCHECK(ring_buffer_.get()); | |
131 // Return locally cached ring buffer. | |
132 Buffer buffer; | |
133 buffer.ptr = ring_buffer_->memory(); | |
134 buffer.size = num_entries_ * sizeof(gpu::CommandBufferEntry); | |
135 buffer.shared_memory = ring_buffer_.get(); | |
136 return buffer; | |
137 } | |
138 | |
139 gpu::CommandBuffer::State CommandBufferProxy::GetState() { | |
140 // Send will flag state with lost context if IPC fails. | |
141 if (last_state_.error == gpu::error::kNoError) | |
142 Send(new GpuCommandBufferMsg_GetState(route_id_, &last_state_)); | |
143 | |
144 return last_state_; | |
145 } | |
146 | |
147 void CommandBufferProxy::Flush(int32 put_offset) { | |
148 AsyncFlush(put_offset, NULL); | |
149 } | |
150 | |
151 gpu::CommandBuffer::State CommandBufferProxy::FlushSync(int32 put_offset) { | |
152 // Send will flag state with lost context if IPC fails. | |
153 if (last_state_.error == gpu::error::kNoError) { | |
154 Send(new GpuCommandBufferMsg_Flush(route_id_, | |
155 put_offset, | |
156 &last_state_)); | |
157 } | |
158 | |
159 return last_state_; | |
160 } | |
161 | |
162 void CommandBufferProxy::SetGetOffset(int32 get_offset) { | |
163 // Not implemented in proxy. | |
164 NOTREACHED(); | |
165 } | |
166 | |
167 int32 CommandBufferProxy::CreateTransferBuffer(size_t size) { | |
168 if (last_state_.error != gpu::error::kNoError) | |
169 return -1; | |
170 | |
171 RenderThread* render_thread = RenderThread::current(); | |
172 if (!render_thread) | |
173 return -1; | |
174 | |
175 base::SharedMemoryHandle handle; | |
176 if (!render_thread->Send(new ViewHostMsg_AllocateSharedMemoryBuffer( | |
177 size, | |
178 &handle))) { | |
179 return -1; | |
180 } | |
181 | |
182 if (!base::SharedMemory::IsHandleValid(handle)) | |
183 return -1; | |
184 | |
185 // Handle is closed by the SharedMemory object below. This stops | |
186 // base::FileDescriptor from closing it as well. | |
187 #if defined(OS_POSIX) | |
188 handle.auto_close = false; | |
189 #endif | |
190 | |
191 // Take ownership of shared memory. This will close the handle if Send below | |
192 // fails. Otherwise, callee takes ownership before this variable | |
193 // goes out of scope by duping the handle. | |
194 base::SharedMemory shared_memory(handle, false); | |
195 | |
196 int32 id; | |
197 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_, | |
198 handle, | |
199 size, | |
200 &id))) { | |
201 return -1; | |
202 } | |
203 | |
204 return id; | |
205 } | |
206 | |
207 int32 CommandBufferProxy::RegisterTransferBuffer( | |
208 base::SharedMemory* shared_memory, | |
209 size_t size) { | |
210 if (last_state_.error != gpu::error::kNoError) | |
211 return -1; | |
212 | |
213 int32 id; | |
214 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer( | |
215 route_id_, | |
216 shared_memory->handle(), // Returns FileDescriptor with auto_close off. | |
217 size, | |
218 &id))) { | |
219 return -1; | |
220 } | |
221 | |
222 return id; | |
223 } | |
224 | |
225 void CommandBufferProxy::DestroyTransferBuffer(int32 id) { | |
226 if (last_state_.error != gpu::error::kNoError) | |
227 return; | |
228 | |
229 // Remove the transfer buffer from the client side cache. | |
230 TransferBufferMap::iterator it = transfer_buffers_.find(id); | |
231 if (it != transfer_buffers_.end()) { | |
232 delete it->second.shared_memory; | |
233 transfer_buffers_.erase(it); | |
234 } | |
235 | |
236 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id)); | |
237 } | |
238 | |
239 Buffer CommandBufferProxy::GetTransferBuffer(int32 id) { | |
240 if (last_state_.error != gpu::error::kNoError) | |
241 return Buffer(); | |
242 | |
243 // Check local cache to see if there is already a client side shared memory | |
244 // object for this id. | |
245 TransferBufferMap::iterator it = transfer_buffers_.find(id); | |
246 if (it != transfer_buffers_.end()) { | |
247 return it->second; | |
248 } | |
249 | |
250 // Assuming we are in the renderer process, the service is responsible for | |
251 // duplicating the handle. This might not be true for NaCl. | |
252 base::SharedMemoryHandle handle; | |
253 uint32 size; | |
254 if (!Send(new GpuCommandBufferMsg_GetTransferBuffer(route_id_, | |
255 id, | |
256 &handle, | |
257 &size))) { | |
258 return Buffer(); | |
259 } | |
260 | |
261 // Cache the transfer buffer shared memory object client side. | |
262 base::SharedMemory* shared_memory = new base::SharedMemory(handle, false); | |
263 | |
264 // Map the shared memory on demand. | |
265 if (!shared_memory->memory()) { | |
266 if (!shared_memory->Map(size)) { | |
267 delete shared_memory; | |
268 return Buffer(); | |
269 } | |
270 } | |
271 | |
272 Buffer buffer; | |
273 buffer.ptr = shared_memory->memory(); | |
274 buffer.size = size; | |
275 buffer.shared_memory = shared_memory; | |
276 transfer_buffers_[id] = buffer; | |
277 | |
278 return buffer; | |
279 } | |
280 | |
281 void CommandBufferProxy::SetToken(int32 token) { | |
282 // Not implemented in proxy. | |
283 NOTREACHED(); | |
284 } | |
285 | |
286 void CommandBufferProxy::OnNotifyRepaint() { | |
287 if (notify_repaint_task_.get()) | |
288 MessageLoop::current()->PostNonNestableTask( | |
289 FROM_HERE, notify_repaint_task_.release()); | |
290 } | |
291 | |
292 void CommandBufferProxy::SetParseError( | |
293 gpu::error::Error error) { | |
294 // Not implemented in proxy. | |
295 NOTREACHED(); | |
296 } | |
297 | |
298 void CommandBufferProxy::OnSwapBuffers() { | |
299 if (swap_buffers_callback_.get()) | |
300 swap_buffers_callback_->Run(); | |
301 } | |
302 | |
303 void CommandBufferProxy::SetSwapBuffersCallback(Callback0::Type* callback) { | |
304 swap_buffers_callback_.reset(callback); | |
305 } | |
306 | |
307 void CommandBufferProxy::ResizeOffscreenFrameBuffer(const gfx::Size& size) { | |
308 if (last_state_.error != gpu::error::kNoError) | |
309 return; | |
310 | |
311 IPC::Message* message = | |
312 new GpuCommandBufferMsg_ResizeOffscreenFrameBuffer(route_id_, size); | |
313 | |
314 // We need to set the unblock flag on this message to guarantee the | |
315 // order in which it is processed in the GPU process. Ordinarily in | |
316 // certain situations, namely if a synchronous message is being | |
317 // processed, other synchronous messages may be processed before | |
318 // asynchronous messages. During some page reloads WebGL seems to | |
319 // send three messages (sync, async, sync) in rapid succession in | |
320 // that order, and the sync message (GpuCommandBufferMsg_Flush, on | |
321 // behalf of SwapBuffers) is sometimes processed before the async | |
322 // message (GpuCommandBufferMsg_ResizeOffscreenFrameBuffer). This | |
323 // causes the WebGL content to disappear because the back buffer is | |
324 // not correctly resized. | |
325 message->set_unblock(true); | |
326 Send(message); | |
327 } | |
328 | |
329 void CommandBufferProxy::SetNotifyRepaintTask(Task* task) { | |
330 notify_repaint_task_.reset(task); | |
331 } | |
332 | |
333 #if defined(OS_MACOSX) | |
334 void CommandBufferProxy::SetWindowSize(const gfx::Size& size) { | |
335 if (last_state_.error != gpu::error::kNoError) | |
336 return; | |
337 | |
338 Send(new GpuCommandBufferMsg_SetWindowSize(route_id_, size)); | |
339 } | |
340 #endif | |
341 | |
342 void CommandBufferProxy::AsyncGetState(Task* completion_task) { | |
343 if (last_state_.error != gpu::error::kNoError) | |
344 return; | |
345 | |
346 IPC::Message* message = new GpuCommandBufferMsg_AsyncGetState(route_id_); | |
347 | |
348 // Do not let a synchronous flush hold up this message. If this handler is | |
349 // deferred until after the synchronous flush completes, it will overwrite the | |
350 // cached last_state_ with out-of-date data. | |
351 message->set_unblock(true); | |
352 | |
353 if (Send(message)) | |
354 pending_async_flush_tasks_.push(linked_ptr<Task>(completion_task)); | |
355 } | |
356 | |
357 void CommandBufferProxy::AsyncFlush(int32 put_offset, Task* completion_task) { | |
358 if (last_state_.error != gpu::error::kNoError) | |
359 return; | |
360 | |
361 IPC::Message* message = new GpuCommandBufferMsg_AsyncFlush(route_id_, | |
362 put_offset); | |
363 | |
364 // Do not let a synchronous flush hold up this message. If this handler is | |
365 // deferred until after the synchronous flush completes, it will overwrite the | |
366 // cached last_state_ with out-of-date data. | |
367 message->set_unblock(true); | |
368 | |
369 if (Send(message)) | |
370 pending_async_flush_tasks_.push(linked_ptr<Task>(completion_task)); | |
371 } | |
372 | |
373 bool CommandBufferProxy::Send(IPC::Message* msg) { | |
374 // Caller should not intentionally send a message if the context is lost. | |
375 DCHECK(last_state_.error == gpu::error::kNoError); | |
376 | |
377 if (channel_) { | |
378 if (channel_->Send(msg)) { | |
379 return true; | |
380 } else { | |
381 // Flag the command buffer as lost. Defer deleting the channel until | |
382 // OnChannelError is called after returning to the message loop in case | |
383 // it is referenced elsewhere. | |
384 last_state_.error = gpu::error::kLostContext; | |
385 return false; | |
386 } | |
387 } | |
388 | |
389 // Callee takes ownership of message, regardless of whether Send is | |
390 // successful. See IPC::Message::Sender. | |
391 delete msg; | |
392 return false; | |
393 } | |
394 | |
395 void CommandBufferProxy::OnUpdateState(const gpu::CommandBuffer::State& state) { | |
396 last_state_ = state; | |
397 | |
398 linked_ptr<Task> task = pending_async_flush_tasks_.front(); | |
399 pending_async_flush_tasks_.pop(); | |
400 | |
401 if (task.get()) { | |
402 // Although we need need to update last_state_ while potentially waiting | |
403 // for a synchronous flush to complete, we do not need to invoke the | |
404 // callback synchonously. Also, post it as a non nestable task so it is | |
405 // always invoked by the outermost message loop. | |
406 MessageLoop::current()->PostNonNestableTask(FROM_HERE, task.release()); | |
407 } | |
408 } | |
OLD | NEW |