Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(208)

Side by Side Diff: content/common/gpu/gpu_surface_stub.cc

Issue 6992010: Added proxy / stub pair for GPU surfaces. (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: '' Created 9 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « content/common/gpu/gpu_surface_stub.h ('k') | content/content_common.gypi » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if defined(ENABLE_GPU) 5 #if defined(ENABLE_GPU)
6 6
7 #include "base/bind.h" 7 #include "content/common/gpu/gpu_surface_stub.h"
8 #include "base/debug/trace_event.h" 8
9 #include "base/process_util.h"
10 #include "base/shared_memory.h"
11 #include "build/build_config.h"
12 #include "content/common/child_thread.h"
13 #include "content/common/gpu/gpu_channel.h" 9 #include "content/common/gpu/gpu_channel.h"
14 #include "content/common/gpu/gpu_channel_manager.h" 10 #include "ipc/ipc_message_macros.h"
15 #include "content/common/gpu/gpu_command_buffer_stub.h"
16 #include "content/common/gpu/gpu_messages.h"
17 #include "content/common/gpu/gpu_watchdog.h"
18 #include "gpu/command_buffer/common/constants.h"
19 #include "ui/gfx/gl/gl_context.h"
20 #include "ui/gfx/gl/gl_surface.h"
21 11
22 #if defined(OS_WIN) 12 GpuSurfaceStub::GpuSurfaceStub(GpuChannel* channel,
23 #include "base/win/wrapped_window_proc.h" 13 int route_id,
24 #endif 14 gfx::GLSurface* surface)
25
26 using gpu::Buffer;
27
28 #if defined(OS_WIN)
29 #define kCompositorWindowOwner L"CompositorWindowOwner"
30 #endif // defined(OS_WIN)
31
32 GpuCommandBufferStub::GpuCommandBufferStub(
33 GpuChannel* channel,
34 gfx::PluginWindowHandle handle,
35 GpuCommandBufferStub* parent,
36 const gfx::Size& size,
37 const gpu::gles2::DisallowedExtensions& disallowed_extensions,
38 const std::string& allowed_extensions,
39 const std::vector<int32>& attribs,
40 uint32 parent_texture_id,
41 int32 route_id,
42 int32 renderer_id,
43 int32 render_view_id,
44 GpuWatchdog* watchdog)
45 : channel_(channel), 15 : channel_(channel),
46 handle_(handle),
47 parent_(
48 parent ? parent->AsWeakPtr() : base::WeakPtr<GpuCommandBufferStub>()),
49 initial_size_(size),
50 disallowed_extensions_(disallowed_extensions),
51 allowed_extensions_(allowed_extensions),
52 requested_attribs_(attribs),
53 parent_texture_id_(parent_texture_id),
54 route_id_(route_id), 16 route_id_(route_id),
55 renderer_id_(renderer_id), 17 surface_(surface) {
56 render_view_id_(render_view_id),
57 watchdog_(watchdog),
58 task_factory_(ALLOW_THIS_IN_INITIALIZER_LIST(this)) {
59 } 18 }
60 19
61 GpuCommandBufferStub::~GpuCommandBufferStub() { 20 GpuSurfaceStub::~GpuSurfaceStub() {
62 if (scheduler_.get()) {
63 scheduler_->Destroy();
64 }
65
66 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
67 gpu_channel_manager->Send(new GpuHostMsg_DestroyCommandBuffer(
68 handle_, renderer_id_, render_view_id_));
69 } 21 }
70 22
71 bool GpuCommandBufferStub::OnMessageReceived(const IPC::Message& message) { 23 bool GpuSurfaceStub::OnMessageReceived(const IPC::Message& message) {
72 // If the scheduler is unscheduled, defer sync and async messages until it is
73 // rescheduled. Also, even if the scheduler is scheduled, do not allow newly
74 // received messages to be handled before previously received deferred ones;
75 // append them to the deferred queue as well.
76 if ((scheduler_.get() && !scheduler_->IsScheduled()) ||
77 !deferred_messages_.empty()) {
78 deferred_messages_.push(new IPC::Message(message));
79 return true;
80 }
81
82 // Always use IPC_MESSAGE_HANDLER_DELAY_REPLY for synchronous message handlers
83 // here. This is so the reply can be delayed if the scheduler is unscheduled.
84 bool handled = true; 24 bool handled = true;
85 IPC_BEGIN_MESSAGE_MAP(GpuCommandBufferStub, message) 25 //IPC_BEGIN_MESSAGE_MAP(GpuSurfaceStub, message)
86 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Initialize, 26 // IPC_MESSAGE_UNHANDLED(handled = false)
87 OnInitialize); 27 //IPC_END_MESSAGE_MAP()
88 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetState, OnGetState);
89 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_Flush, OnFlush);
90 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_AsyncFlush, OnAsyncFlush);
91 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_CreateTransferBuffer,
92 OnCreateTransferBuffer);
93 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_RegisterTransferBuffer,
94 OnRegisterTransferBuffer);
95 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_DestroyTransferBuffer,
96 OnDestroyTransferBuffer);
97 IPC_MESSAGE_HANDLER_DELAY_REPLY(GpuCommandBufferMsg_GetTransferBuffer,
98 OnGetTransferBuffer);
99 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_ResizeOffscreenFrameBuffer,
100 OnResizeOffscreenFrameBuffer);
101 #if defined(OS_MACOSX)
102 IPC_MESSAGE_HANDLER(GpuCommandBufferMsg_SetWindowSize, OnSetWindowSize);
103 #endif // defined(OS_MACOSX)
104 IPC_MESSAGE_UNHANDLED(handled = false)
105 IPC_END_MESSAGE_MAP()
106 DCHECK(handled); 28 DCHECK(handled);
107 return handled; 29 return handled;
108 } 30 }
109 31
110 bool GpuCommandBufferStub::Send(IPC::Message* message) { 32 bool GpuSurfaceStub::Send(IPC::Message* message) {
111 return channel_->Send(message); 33 return channel_->Send(message);
112 } 34 }
113 35
114 void GpuCommandBufferStub::OnInitialize(
115 base::SharedMemoryHandle ring_buffer,
116 int32 size,
117 IPC::Message* reply_message) {
118 DCHECK(!command_buffer_.get());
119
120 bool result = false;
121
122 command_buffer_.reset(new gpu::CommandBufferService);
123
124 #if defined(OS_WIN)
125 // Windows dups the shared memory handle it receives into the current process
126 // and closes it when this variable goes out of scope.
127 base::SharedMemory shared_memory(ring_buffer,
128 false,
129 channel_->renderer_process());
130 #else
131 // POSIX receives a dup of the shared memory handle and closes the dup when
132 // this variable goes out of scope.
133 base::SharedMemory shared_memory(ring_buffer, false);
134 #endif
135
136 // Initialize the CommandBufferService and GpuScheduler.
137 if (command_buffer_->Initialize(&shared_memory, size)) {
138 gpu::GpuScheduler* parent_processor =
139 parent_ ? parent_->scheduler_.get() : NULL;
140 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(), NULL));
141 if (scheduler_->Initialize(
142 handle_,
143 initial_size_,
144 disallowed_extensions_,
145 allowed_extensions_.c_str(),
146 requested_attribs_,
147 parent_processor,
148 parent_texture_id_)) {
149 command_buffer_->SetPutOffsetChangeCallback(
150 NewCallback(scheduler_.get(),
151 &gpu::GpuScheduler::PutChanged));
152 scheduler_->SetSwapBuffersCallback(
153 NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers));
154 scheduler_->SetLatchCallback(base::Bind(
155 &GpuChannel::OnLatchCallback, base::Unretained(channel_), route_id_));
156 scheduler_->SetScheduledCallback(
157 NewCallback(this, &GpuCommandBufferStub::OnScheduled));
158 if (watchdog_)
159 scheduler_->SetCommandProcessedCallback(
160 NewCallback(this, &GpuCommandBufferStub::OnCommandProcessed));
161
162 #if defined(OS_MACOSX)
163 if (handle_) {
164 // This context conceptually puts its output directly on the
165 // screen, rendered by the accelerated plugin layer in
166 // RenderWidgetHostViewMac. Set up a pathway to notify the
167 // browser process when its contents change.
168 scheduler_->SetSwapBuffersCallback(
169 NewCallback(this,
170 &GpuCommandBufferStub::SwapBuffersCallback));
171 }
172 #endif // defined(OS_MACOSX)
173
174 // Set up a pathway for resizing the output window or framebuffer at the
175 // right time relative to other GL commands.
176 scheduler_->SetResizeCallback(
177 NewCallback(this, &GpuCommandBufferStub::ResizeCallback));
178
179 result = true;
180 } else {
181 scheduler_.reset();
182 command_buffer_.reset();
183 }
184 }
185
186 GpuCommandBufferMsg_Initialize::WriteReplyParams(reply_message, result);
187 Send(reply_message);
188 }
189
190 void GpuCommandBufferStub::OnGetState(IPC::Message* reply_message) {
191 gpu::CommandBuffer::State state = command_buffer_->GetState();
192 if (state.error == gpu::error::kLostContext &&
193 gfx::GLContext::LosesAllContextsOnContextLost())
194 channel_->LoseAllContexts();
195
196 GpuCommandBufferMsg_GetState::WriteReplyParams(reply_message, state);
197 Send(reply_message);
198 }
199
200 void GpuCommandBufferStub::OnFlush(int32 put_offset,
201 int32 last_known_get,
202 IPC::Message* reply_message) {
203 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnFlush");
204 gpu::CommandBuffer::State state = command_buffer_->FlushSync(put_offset,
205 last_known_get);
206 if (state.error == gpu::error::kLostContext &&
207 gfx::GLContext::LosesAllContextsOnContextLost())
208 channel_->LoseAllContexts();
209
210 GpuCommandBufferMsg_Flush::WriteReplyParams(reply_message, state);
211 Send(reply_message);
212 }
213
214 void GpuCommandBufferStub::OnAsyncFlush(int32 put_offset) {
215 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnAsyncFlush");
216 command_buffer_->Flush(put_offset);
217 // TODO(piman): Do this everytime the scheduler finishes processing a batch of
218 // commands.
219 MessageLoop::current()->PostTask(FROM_HERE,
220 task_factory_.NewRunnableMethod(&GpuCommandBufferStub::ReportState));
221 }
222
223 void GpuCommandBufferStub::OnCreateTransferBuffer(int32 size,
224 int32 id_request,
225 IPC::Message* reply_message) {
226 int32 id = command_buffer_->CreateTransferBuffer(size, id_request);
227 GpuCommandBufferMsg_CreateTransferBuffer::WriteReplyParams(reply_message, id);
228 Send(reply_message);
229 }
230
231 void GpuCommandBufferStub::OnRegisterTransferBuffer(
232 base::SharedMemoryHandle transfer_buffer,
233 size_t size,
234 int32 id_request,
235 IPC::Message* reply_message) {
236 #if defined(OS_WIN)
237 // Windows dups the shared memory handle it receives into the current process
238 // and closes it when this variable goes out of scope.
239 base::SharedMemory shared_memory(transfer_buffer,
240 false,
241 channel_->renderer_process());
242 #else
243 // POSIX receives a dup of the shared memory handle and closes the dup when
244 // this variable goes out of scope.
245 base::SharedMemory shared_memory(transfer_buffer, false);
246 #endif
247
248 int32 id = command_buffer_->RegisterTransferBuffer(&shared_memory,
249 size,
250 id_request);
251
252 GpuCommandBufferMsg_RegisterTransferBuffer::WriteReplyParams(reply_message,
253 id);
254 Send(reply_message);
255 }
256
257 void GpuCommandBufferStub::OnDestroyTransferBuffer(
258 int32 id,
259 IPC::Message* reply_message) {
260 command_buffer_->DestroyTransferBuffer(id);
261 Send(reply_message);
262 }
263
264 void GpuCommandBufferStub::OnGetTransferBuffer(
265 int32 id,
266 IPC::Message* reply_message) {
267 base::SharedMemoryHandle transfer_buffer = base::SharedMemoryHandle();
268 uint32 size = 0;
269
270 // Fail if the renderer process has not provided its process handle.
271 if (!channel_->renderer_process())
272 return;
273
274 Buffer buffer = command_buffer_->GetTransferBuffer(id);
275 if (buffer.shared_memory) {
276 // Assume service is responsible for duplicating the handle to the calling
277 // process.
278 buffer.shared_memory->ShareToProcess(channel_->renderer_process(),
279 &transfer_buffer);
280 size = buffer.size;
281 }
282
283 GpuCommandBufferMsg_GetTransferBuffer::WriteReplyParams(reply_message,
284 transfer_buffer,
285 size);
286 Send(reply_message);
287 }
288
289 void GpuCommandBufferStub::OnResizeOffscreenFrameBuffer(const gfx::Size& size) {
290 scheduler_->ResizeOffscreenFrameBuffer(size);
291 }
292
293 void GpuCommandBufferStub::OnSwapBuffers() {
294 TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSwapBuffers");
295 ReportState();
296 Send(new GpuCommandBufferMsg_SwapBuffers(route_id_));
297 }
298
299 void GpuCommandBufferStub::OnCommandProcessed() {
300 if (watchdog_)
301 watchdog_->CheckArmed();
302 }
303
304 void GpuCommandBufferStub::HandleDeferredMessages() {
305 // Empty the deferred queue so OnMessageRecieved does not defer on that
306 // account and to prevent an infinite loop if the scheduler is unscheduled
307 // as a result of handling already deferred messages.
308 std::queue<IPC::Message*> deferred_messages_copy;
309 std::swap(deferred_messages_copy, deferred_messages_);
310
311 while (!deferred_messages_copy.empty()) {
312 scoped_ptr<IPC::Message> message(deferred_messages_copy.front());
313 deferred_messages_copy.pop();
314
315 OnMessageReceived(*message);
316 }
317 }
318
319 void GpuCommandBufferStub::OnScheduled() {
320 // Post a task to handle any deferred messages. The deferred message queue is
321 // not emptied here, which ensures that OnMessageReceived will continue to
322 // defer newly received messages until the ones in the queue have all been
323 // handled by HandleDeferredMessages. HandleDeferredMessages is invoked as a
324 // task to prevent reentrancy.
325 MessageLoop::current()->PostTask(
326 FROM_HERE,
327 task_factory_.NewRunnableMethod(
328 &GpuCommandBufferStub::HandleDeferredMessages));
329 }
330
331 #if defined(OS_MACOSX)
332 void GpuCommandBufferStub::OnSetWindowSize(const gfx::Size& size) {
333 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
334 // Try using the IOSurface version first.
335 uint64 new_backing_store = scheduler_->SetWindowSizeForIOSurface(size);
336 if (new_backing_store) {
337 GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params params;
338 params.renderer_id = renderer_id_;
339 params.render_view_id = render_view_id_;
340 params.window = handle_;
341 params.width = size.width();
342 params.height = size.height();
343 params.identifier = new_backing_store;
344 gpu_channel_manager->Send(
345 new GpuHostMsg_AcceleratedSurfaceSetIOSurface(params));
346 } else {
347 // TODO(kbr): figure out what to do here. It wouldn't be difficult
348 // to support the compositor on 10.5, but the performance would be
349 // questionable.
350 NOTREACHED();
351 }
352 }
353
354 void GpuCommandBufferStub::SwapBuffersCallback() {
355 TRACE_EVENT0("gpu", "GpuCommandBufferStub::SwapBuffersCallback");
356 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
357 GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params;
358 params.renderer_id = renderer_id_;
359 params.render_view_id = render_view_id_;
360 params.window = handle_;
361 params.surface_id = scheduler_->GetSurfaceId();
362 params.route_id = route_id();
363 params.swap_buffers_count = scheduler_->swap_buffers_count();
364 gpu_channel_manager->Send(
365 new GpuHostMsg_AcceleratedSurfaceBuffersSwapped(params));
366
367 scheduler_->SetScheduled(false);
368 }
369
370 void GpuCommandBufferStub::AcceleratedSurfaceBuffersSwapped(
371 uint64 swap_buffers_count) {
372 TRACE_EVENT0("gpu",
373 "GpuCommandBufferStub::AcceleratedSurfaceBuffersSwapped");
374
375 // Multiple swapbuffers may get consolidated together into a single
376 // AcceleratedSurfaceBuffersSwapped call. Since OnSwapBuffers expects to be
377 // called one time for every swap, make up the difference here.
378 uint64 delta = swap_buffers_count -
379 scheduler_->acknowledged_swap_buffers_count();
380
381 scheduler_->set_acknowledged_swap_buffers_count(swap_buffers_count);
382
383 for(uint64 i = 0; i < delta; i++)
384 OnSwapBuffers();
385
386 // Wake up the GpuScheduler to start doing work again.
387 scheduler_->SetScheduled(true);
388 }
389 #endif // defined(OS_MACOSX)
390
391 void GpuCommandBufferStub::ResizeCallback(gfx::Size size) {
392 if (handle_ == gfx::kNullPluginWindow) {
393 scheduler_->decoder()->ResizeOffscreenFrameBuffer(size);
394 scheduler_->decoder()->UpdateOffscreenFrameBufferSize();
395 } else {
396 #if defined(OS_LINUX) && !defined(TOUCH_UI) || defined(OS_WIN)
397 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager();
398 gpu_channel_manager->Send(
399 new GpuHostMsg_ResizeView(renderer_id_,
400 render_view_id_,
401 route_id_,
402 size));
403
404 scheduler_->SetScheduled(false);
405 #endif
406 }
407 }
408
409 void GpuCommandBufferStub::ViewResized() {
410 #if defined(OS_LINUX) && !defined(TOUCH_UI) || defined(OS_WIN)
411 DCHECK(handle_ != gfx::kNullPluginWindow);
412 scheduler_->SetScheduled(true);
413
414 // Recreate the view surface to match the window size. TODO(apatrick): this is
415 // likely not necessary on all platforms.
416 gfx::GLContext* context = scheduler_->decoder()->GetGLContext();
417 gfx::GLSurface* surface = scheduler_->decoder()->GetGLSurface();
418 context->ReleaseCurrent(surface);
419 if (surface) {
420 surface->Destroy();
421 surface->Initialize();
422 }
423 #endif
424 }
425
426 void GpuCommandBufferStub::ReportState() {
427 gpu::CommandBuffer::State state = command_buffer_->GetState();
428 if (state.error == gpu::error::kLostContext &&
429 gfx::GLContext::LosesAllContextsOnContextLost()) {
430 channel_->LoseAllContexts();
431 } else {
432 IPC::Message* msg = new GpuCommandBufferMsg_UpdateState(route_id_, state);
433 msg->set_unblock(true);
434 Send(msg);
435 }
436 }
437 36
438 #endif // defined(ENABLE_GPU) 37 #endif // defined(ENABLE_GPU)
OLDNEW
« no previous file with comments | « content/common/gpu/gpu_surface_stub.h ('k') | content/content_common.gypi » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698