OLD | NEW |
| (Empty) |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "content/renderer/command_buffer_proxy.h" | |
6 | |
7 #include "base/debug/trace_event.h" | |
8 #include "base/logging.h" | |
9 #include "base/process_util.h" | |
10 #include "base/shared_memory.h" | |
11 #include "base/task.h" | |
12 #include "content/common/gpu/gpu_messages.h" | |
13 #include "content/common/plugin_messages.h" | |
14 #include "content/common/view_messages.h" | |
15 #include "content/renderer/plugin_channel_host.h" | |
16 #include "content/renderer/render_thread.h" | |
17 #include "gpu/command_buffer/common/cmd_buffer_common.h" | |
18 #include "ui/gfx/size.h" | |
19 | |
20 using gpu::Buffer; | |
21 | |
22 CommandBufferProxy::CommandBufferProxy( | |
23 IPC::Channel::Sender* channel, | |
24 int route_id) | |
25 : num_entries_(0), | |
26 channel_(channel), | |
27 route_id_(route_id) { | |
28 } | |
29 | |
30 CommandBufferProxy::~CommandBufferProxy() { | |
31 // Delete all the locally cached shared memory objects, closing the handle | |
32 // in this process. | |
33 for (TransferBufferMap::iterator it = transfer_buffers_.begin(); | |
34 it != transfer_buffers_.end(); | |
35 ++it) { | |
36 delete it->second.shared_memory; | |
37 it->second.shared_memory = NULL; | |
38 } | |
39 } | |
40 | |
41 bool CommandBufferProxy::OnMessageReceived(const IPC::Message& message) { | |
42 bool handled = true; | |
43 IPC_BEGIN_MESSAGE_MAP(CommandBufferProxy, message) | |
44 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_UpdateState, OnUpdateState); | |
45 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SwapBuffers, OnSwapBuffers); | |
46 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_NotifyRepaint, | |
47 OnNotifyRepaint); | |
48 IPC_MESSAGE_UNHANDLED(handled = false) | |
49 IPC_END_MESSAGE_MAP() | |
50 DCHECK(handled); | |
51 return handled; | |
52 } | |
53 | |
54 void CommandBufferProxy::OnChannelError() { | |
55 // Prevent any further messages from being sent. | |
56 channel_ = NULL; | |
57 | |
58 // When the client sees that the context is lost, they should delete this | |
59 // CommandBufferProxy and create a new one. | |
60 last_state_.error = gpu::error::kLostContext; | |
61 | |
62 if (channel_error_callback_.get()) | |
63 channel_error_callback_->Run(); | |
64 } | |
65 | |
66 void CommandBufferProxy::SetChannelErrorCallback(Callback0::Type* callback) { | |
67 channel_error_callback_.reset(callback); | |
68 } | |
69 | |
70 bool CommandBufferProxy::Initialize(int32 size) { | |
71 DCHECK(!ring_buffer_.get()); | |
72 | |
73 RenderThread* render_thread = RenderThread::current(); | |
74 if (!render_thread) | |
75 return false; | |
76 | |
77 base::SharedMemoryHandle handle; | |
78 if (!render_thread->Send(new ViewHostMsg_AllocateSharedMemoryBuffer( | |
79 size, | |
80 &handle))) { | |
81 return false; | |
82 } | |
83 | |
84 if (!base::SharedMemory::IsHandleValid(handle)) | |
85 return false; | |
86 | |
87 #if defined(OS_POSIX) | |
88 handle.auto_close = false; | |
89 #endif | |
90 | |
91 // Take ownership of shared memory. This will close the handle if Send below | |
92 // fails. Otherwise, callee takes ownership before this variable | |
93 // goes out of scope. | |
94 base::SharedMemory shared_memory(handle, false); | |
95 | |
96 return Initialize(&shared_memory, size); | |
97 } | |
98 | |
99 bool CommandBufferProxy::Initialize(base::SharedMemory* buffer, int32 size) { | |
100 bool result; | |
101 if (!Send(new GpuCommandBufferMsg_Initialize(route_id_, | |
102 buffer->handle(), | |
103 size, | |
104 &result))) { | |
105 LOG(ERROR) << "Could not send GpuCommandBufferMsg_Initialize."; | |
106 return false; | |
107 } | |
108 | |
109 if (!result) { | |
110 LOG(ERROR) << "Failed to initialize command buffer service."; | |
111 return false; | |
112 } | |
113 | |
114 base::SharedMemoryHandle handle; | |
115 if (!buffer->GiveToProcess(base::GetCurrentProcessHandle(), &handle)) { | |
116 LOG(ERROR) << "Failed to duplicate command buffer handle."; | |
117 return false; | |
118 } | |
119 | |
120 ring_buffer_.reset(new base::SharedMemory(handle, false)); | |
121 if (!ring_buffer_->Map(size)) { | |
122 LOG(ERROR) << "Failed to map shared memory for command buffer."; | |
123 ring_buffer_.reset(); | |
124 return false; | |
125 } | |
126 | |
127 num_entries_ = size / sizeof(gpu::CommandBufferEntry); | |
128 return true; | |
129 } | |
130 | |
131 Buffer CommandBufferProxy::GetRingBuffer() { | |
132 DCHECK(ring_buffer_.get()); | |
133 // Return locally cached ring buffer. | |
134 Buffer buffer; | |
135 buffer.ptr = ring_buffer_->memory(); | |
136 buffer.size = num_entries_ * sizeof(gpu::CommandBufferEntry); | |
137 buffer.shared_memory = ring_buffer_.get(); | |
138 return buffer; | |
139 } | |
140 | |
141 gpu::CommandBuffer::State CommandBufferProxy::GetState() { | |
142 // Send will flag state with lost context if IPC fails. | |
143 if (last_state_.error == gpu::error::kNoError) { | |
144 gpu::CommandBuffer::State state; | |
145 if (Send(new GpuCommandBufferMsg_GetState(route_id_, &state))) | |
146 OnUpdateState(state); | |
147 } | |
148 | |
149 return last_state_; | |
150 } | |
151 | |
152 void CommandBufferProxy::Flush(int32 put_offset) { | |
153 if (last_state_.error != gpu::error::kNoError) | |
154 return; | |
155 | |
156 Send(new GpuCommandBufferMsg_AsyncFlush(route_id_, put_offset)); | |
157 } | |
158 | |
159 gpu::CommandBuffer::State CommandBufferProxy::FlushSync(int32 put_offset, | |
160 int32 last_known_get) { | |
161 TRACE_EVENT0("gpu", "CommandBufferProxy::FlushSync"); | |
162 if (last_known_get == last_state_.get_offset) { | |
163 // Send will flag state with lost context if IPC fails. | |
164 if (last_state_.error == gpu::error::kNoError) { | |
165 gpu::CommandBuffer::State state; | |
166 if (Send(new GpuCommandBufferMsg_Flush(route_id_, | |
167 put_offset, | |
168 last_known_get, | |
169 &state))) | |
170 OnUpdateState(state); | |
171 } | |
172 } else { | |
173 Flush(put_offset); | |
174 } | |
175 | |
176 return last_state_; | |
177 } | |
178 | |
179 void CommandBufferProxy::SetGetOffset(int32 get_offset) { | |
180 // Not implemented in proxy. | |
181 NOTREACHED(); | |
182 } | |
183 | |
184 int32 CommandBufferProxy::CreateTransferBuffer(size_t size, int32 id_request) { | |
185 if (last_state_.error != gpu::error::kNoError) | |
186 return -1; | |
187 | |
188 RenderThread* render_thread = RenderThread::current(); | |
189 if (!render_thread) | |
190 return -1; | |
191 | |
192 base::SharedMemoryHandle handle; | |
193 if (!render_thread->Send(new ViewHostMsg_AllocateSharedMemoryBuffer( | |
194 size, | |
195 &handle))) { | |
196 return -1; | |
197 } | |
198 | |
199 if (!base::SharedMemory::IsHandleValid(handle)) | |
200 return -1; | |
201 | |
202 // Handle is closed by the SharedMemory object below. This stops | |
203 // base::FileDescriptor from closing it as well. | |
204 #if defined(OS_POSIX) | |
205 handle.auto_close = false; | |
206 #endif | |
207 | |
208 // Take ownership of shared memory. This will close the handle if Send below | |
209 // fails. Otherwise, callee takes ownership before this variable | |
210 // goes out of scope by duping the handle. | |
211 base::SharedMemory shared_memory(handle, false); | |
212 | |
213 int32 id; | |
214 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer(route_id_, | |
215 handle, | |
216 size, | |
217 id_request, | |
218 &id))) { | |
219 return -1; | |
220 } | |
221 | |
222 return id; | |
223 } | |
224 | |
225 int32 CommandBufferProxy::RegisterTransferBuffer( | |
226 base::SharedMemory* shared_memory, | |
227 size_t size, | |
228 int32 id_request) { | |
229 if (last_state_.error != gpu::error::kNoError) | |
230 return -1; | |
231 | |
232 int32 id; | |
233 if (!Send(new GpuCommandBufferMsg_RegisterTransferBuffer( | |
234 route_id_, | |
235 shared_memory->handle(), // Returns FileDescriptor with auto_close off. | |
236 size, | |
237 id_request, | |
238 &id))) { | |
239 return -1; | |
240 } | |
241 | |
242 return id; | |
243 } | |
244 | |
245 void CommandBufferProxy::DestroyTransferBuffer(int32 id) { | |
246 if (last_state_.error != gpu::error::kNoError) | |
247 return; | |
248 | |
249 // Remove the transfer buffer from the client side cache. | |
250 TransferBufferMap::iterator it = transfer_buffers_.find(id); | |
251 if (it != transfer_buffers_.end()) { | |
252 delete it->second.shared_memory; | |
253 transfer_buffers_.erase(it); | |
254 } | |
255 | |
256 Send(new GpuCommandBufferMsg_DestroyTransferBuffer(route_id_, id)); | |
257 } | |
258 | |
259 Buffer CommandBufferProxy::GetTransferBuffer(int32 id) { | |
260 if (last_state_.error != gpu::error::kNoError) | |
261 return Buffer(); | |
262 | |
263 // Check local cache to see if there is already a client side shared memory | |
264 // object for this id. | |
265 TransferBufferMap::iterator it = transfer_buffers_.find(id); | |
266 if (it != transfer_buffers_.end()) { | |
267 return it->second; | |
268 } | |
269 | |
270 // Assuming we are in the renderer process, the service is responsible for | |
271 // duplicating the handle. This might not be true for NaCl. | |
272 base::SharedMemoryHandle handle; | |
273 uint32 size; | |
274 if (!Send(new GpuCommandBufferMsg_GetTransferBuffer(route_id_, | |
275 id, | |
276 &handle, | |
277 &size))) { | |
278 return Buffer(); | |
279 } | |
280 | |
281 // Cache the transfer buffer shared memory object client side. | |
282 base::SharedMemory* shared_memory = new base::SharedMemory(handle, false); | |
283 | |
284 // Map the shared memory on demand. | |
285 if (!shared_memory->memory()) { | |
286 if (!shared_memory->Map(size)) { | |
287 delete shared_memory; | |
288 return Buffer(); | |
289 } | |
290 } | |
291 | |
292 Buffer buffer; | |
293 buffer.ptr = shared_memory->memory(); | |
294 buffer.size = size; | |
295 buffer.shared_memory = shared_memory; | |
296 transfer_buffers_[id] = buffer; | |
297 | |
298 return buffer; | |
299 } | |
300 | |
301 void CommandBufferProxy::SetToken(int32 token) { | |
302 // Not implemented in proxy. | |
303 NOTREACHED(); | |
304 } | |
305 | |
306 void CommandBufferProxy::OnNotifyRepaint() { | |
307 if (notify_repaint_task_.get()) | |
308 MessageLoop::current()->PostNonNestableTask( | |
309 FROM_HERE, notify_repaint_task_.release()); | |
310 } | |
311 | |
312 void CommandBufferProxy::SetParseError( | |
313 gpu::error::Error error) { | |
314 // Not implemented in proxy. | |
315 NOTREACHED(); | |
316 } | |
317 | |
318 void CommandBufferProxy::OnSwapBuffers() { | |
319 if (swap_buffers_callback_.get()) | |
320 swap_buffers_callback_->Run(); | |
321 } | |
322 | |
323 void CommandBufferProxy::SetSwapBuffersCallback(Callback0::Type* callback) { | |
324 swap_buffers_callback_.reset(callback); | |
325 } | |
326 | |
327 void CommandBufferProxy::ResizeOffscreenFrameBuffer(const gfx::Size& size) { | |
328 if (last_state_.error != gpu::error::kNoError) | |
329 return; | |
330 | |
331 IPC::Message* message = | |
332 new GpuCommandBufferMsg_ResizeOffscreenFrameBuffer(route_id_, size); | |
333 | |
334 // We need to set the unblock flag on this message to guarantee the | |
335 // order in which it is processed in the GPU process. Ordinarily in | |
336 // certain situations, namely if a synchronous message is being | |
337 // processed, other synchronous messages may be processed before | |
338 // asynchronous messages. During some page reloads WebGL seems to | |
339 // send three messages (sync, async, sync) in rapid succession in | |
340 // that order, and the sync message (GpuCommandBufferMsg_Flush, on | |
341 // behalf of SwapBuffers) is sometimes processed before the async | |
342 // message (GpuCommandBufferMsg_ResizeOffscreenFrameBuffer). This | |
343 // causes the WebGL content to disappear because the back buffer is | |
344 // not correctly resized. | |
345 message->set_unblock(true); | |
346 Send(message); | |
347 } | |
348 | |
349 void CommandBufferProxy::SetNotifyRepaintTask(Task* task) { | |
350 notify_repaint_task_.reset(task); | |
351 } | |
352 | |
353 #if defined(OS_MACOSX) | |
354 void CommandBufferProxy::SetWindowSize(const gfx::Size& size) { | |
355 if (last_state_.error != gpu::error::kNoError) | |
356 return; | |
357 | |
358 Send(new GpuCommandBufferMsg_SetWindowSize(route_id_, size)); | |
359 } | |
360 #endif | |
361 | |
362 bool CommandBufferProxy::Send(IPC::Message* msg) { | |
363 // Caller should not intentionally send a message if the context is lost. | |
364 DCHECK(last_state_.error == gpu::error::kNoError); | |
365 | |
366 if (channel_) { | |
367 if (channel_->Send(msg)) { | |
368 return true; | |
369 } else { | |
370 // Flag the command buffer as lost. Defer deleting the channel until | |
371 // OnChannelError is called after returning to the message loop in case | |
372 // it is referenced elsewhere. | |
373 last_state_.error = gpu::error::kLostContext; | |
374 return false; | |
375 } | |
376 } | |
377 | |
378 // Callee takes ownership of message, regardless of whether Send is | |
379 // successful. See IPC::Message::Sender. | |
380 delete msg; | |
381 return false; | |
382 } | |
383 | |
384 void CommandBufferProxy::OnUpdateState(const gpu::CommandBuffer::State& state) { | |
385 // Handle wraparound. It works as long as we don't have more than 2B state | |
386 // updates in flight across which reordering occurs. | |
387 if (state.generation - last_state_.generation < 0x80000000U) | |
388 last_state_ = state; | |
389 } | |
OLD | NEW |