OLD | NEW |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if defined(ENABLE_GPU) | 5 #if defined(ENABLE_GPU) |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/callback.h" | 8 #include "base/callback.h" |
9 #include "base/debug/trace_event.h" | 9 #include "base/debug/trace_event.h" |
10 #include "base/process_util.h" | 10 #include "base/process_util.h" |
11 #include "base/shared_memory.h" | 11 #include "base/shared_memory.h" |
12 #include "build/build_config.h" | 12 #include "build/build_config.h" |
13 #include "content/common/child_thread.h" | 13 #include "content/common/child_thread.h" |
14 #include "content/common/gpu/gpu_channel.h" | 14 #include "content/common/gpu/gpu_channel.h" |
15 #include "content/common/gpu/gpu_channel_manager.h" | 15 #include "content/common/gpu/gpu_channel_manager.h" |
16 #include "content/common/gpu/gpu_command_buffer_stub.h" | 16 #include "content/common/gpu/gpu_command_buffer_stub.h" |
17 #include "content/common/gpu/gpu_messages.h" | 17 #include "content/common/gpu/gpu_messages.h" |
18 #include "content/common/gpu/gpu_watchdog.h" | 18 #include "content/common/gpu/gpu_watchdog.h" |
19 #include "gpu/command_buffer/common/constants.h" | 19 #include "gpu/command_buffer/common/constants.h" |
20 #include "ui/gfx/gl/gl_context.h" | 20 #include "ui/gfx/gl/gl_context.h" |
21 #include "ui/gfx/gl/gl_surface.h" | 21 #include "ui/gfx/gl/gl_surface.h" |
22 | 22 |
23 #if defined(OS_WIN) | 23 #if defined(OS_WIN) |
24 #include "base/win/wrapped_window_proc.h" | 24 #include "base/win/wrapped_window_proc.h" |
| 25 #elif defined(TOUCH_UI) |
| 26 #include "content/common/gpu/image_transport_surface_linux.h" |
25 #endif | 27 #endif |
26 | 28 |
27 using gpu::Buffer; | 29 using gpu::Buffer; |
28 | 30 |
29 GpuCommandBufferStub::GpuCommandBufferStub( | 31 GpuCommandBufferStub::GpuCommandBufferStub( |
30 GpuChannel* channel, | 32 GpuChannel* channel, |
31 gfx::PluginWindowHandle handle, | 33 gfx::PluginWindowHandle handle, |
32 const gfx::Size& size, | 34 const gfx::Size& size, |
33 const gpu::gles2::DisallowedExtensions& disallowed_extensions, | 35 const gpu::gles2::DisallowedExtensions& disallowed_extensions, |
34 const std::string& allowed_extensions, | 36 const std::string& allowed_extensions, |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
111 handled = video_decoder_->OnMessageReceived(message); | 113 handled = video_decoder_->OnMessageReceived(message); |
112 | 114 |
113 DCHECK(handled); | 115 DCHECK(handled); |
114 return handled; | 116 return handled; |
115 } | 117 } |
116 | 118 |
117 bool GpuCommandBufferStub::Send(IPC::Message* message) { | 119 bool GpuCommandBufferStub::Send(IPC::Message* message) { |
118 return channel_->Send(message); | 120 return channel_->Send(message); |
119 } | 121 } |
120 | 122 |
| 123 void GpuCommandBufferStub::OnInitializeFailed(IPC::Message* reply_message) { |
| 124 scheduler_.reset(); |
| 125 command_buffer_.reset(); |
| 126 GpuCommandBufferMsg_Initialize::WriteReplyParams(reply_message, false); |
| 127 Send(reply_message); |
| 128 } |
| 129 |
121 void GpuCommandBufferStub::OnInitialize( | 130 void GpuCommandBufferStub::OnInitialize( |
122 base::SharedMemoryHandle ring_buffer, | 131 base::SharedMemoryHandle ring_buffer, |
123 int32 size, | 132 int32 size, |
124 IPC::Message* reply_message) { | 133 IPC::Message* reply_message) { |
125 DCHECK(!command_buffer_.get()); | 134 DCHECK(!command_buffer_.get()); |
126 | 135 |
127 bool result = false; | |
128 | |
129 command_buffer_.reset(new gpu::CommandBufferService); | 136 command_buffer_.reset(new gpu::CommandBufferService); |
130 | 137 |
131 #if defined(OS_WIN) | 138 #if defined(OS_WIN) |
132 // Windows dups the shared memory handle it receives into the current process | 139 // Windows dups the shared memory handle it receives into the current process |
133 // and closes it when this variable goes out of scope. | 140 // and closes it when this variable goes out of scope. |
134 base::SharedMemory shared_memory(ring_buffer, | 141 base::SharedMemory shared_memory(ring_buffer, |
135 false, | 142 false, |
136 channel_->renderer_process()); | 143 channel_->renderer_process()); |
137 #else | 144 #else |
138 // POSIX receives a dup of the shared memory handle and closes the dup when | 145 // POSIX receives a dup of the shared memory handle and closes the dup when |
139 // this variable goes out of scope. | 146 // this variable goes out of scope. |
140 base::SharedMemory shared_memory(ring_buffer, false); | 147 base::SharedMemory shared_memory(ring_buffer, false); |
141 #endif | 148 #endif |
142 | 149 |
143 // Initialize the CommandBufferService and GpuScheduler. | 150 // Initialize the CommandBufferService and GpuScheduler. |
144 if (command_buffer_->Initialize(&shared_memory, size)) { | 151 if (command_buffer_->Initialize(&shared_memory, size)) { |
145 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(), | 152 scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(), |
146 channel_, | 153 channel_, |
147 NULL)); | 154 NULL)); |
| 155 #if defined(TOUCH_UI) |
| 156 scoped_refptr<gfx::GLSurface> surface; |
| 157 if (handle_) |
| 158 surface = ImageTransportSurface::CreateSurface(this); |
| 159 else |
| 160 surface = gfx::GLSurface::CreateOffscreenGLSurface(gfx::Size(1, 1)); |
| 161 |
| 162 if (!surface.get()) { |
| 163 LOG(ERROR) << "GpuCommandBufferStub: failed to create surface."; |
| 164 OnInitializeFailed(reply_message); |
| 165 return; |
| 166 } |
| 167 |
| 168 scoped_refptr<gfx::GLContext> context( |
| 169 gfx::GLContext::CreateGLContext(channel_->share_group(), |
| 170 surface.get())); |
| 171 |
| 172 if (!context.get()) { |
| 173 LOG(ERROR) << "GpuCommandBufferStub: failed to create context."; |
| 174 OnInitializeFailed(reply_message); |
| 175 return; |
| 176 } |
| 177 |
| 178 if (scheduler_->InitializeCommon( |
| 179 surface, |
| 180 context, |
| 181 initial_size_, |
| 182 disallowed_extensions_, |
| 183 allowed_extensions_.c_str(), |
| 184 requested_attribs_)) { |
| 185 #else |
148 if (scheduler_->Initialize( | 186 if (scheduler_->Initialize( |
149 handle_, | 187 handle_, |
150 initial_size_, | 188 initial_size_, |
151 disallowed_extensions_, | 189 disallowed_extensions_, |
152 allowed_extensions_.c_str(), | 190 allowed_extensions_.c_str(), |
153 requested_attribs_, | 191 requested_attribs_, |
154 channel_->share_group())) { | 192 channel_->share_group())) { |
| 193 #endif |
155 command_buffer_->SetPutOffsetChangeCallback( | 194 command_buffer_->SetPutOffsetChangeCallback( |
156 NewCallback(scheduler_.get(), | 195 NewCallback(scheduler_.get(), |
157 &gpu::GpuScheduler::PutChanged)); | 196 &gpu::GpuScheduler::PutChanged)); |
158 command_buffer_->SetParseErrorCallback( | 197 command_buffer_->SetParseErrorCallback( |
159 NewCallback(this, &GpuCommandBufferStub::OnParseError)); | 198 NewCallback(this, &GpuCommandBufferStub::OnParseError)); |
160 scheduler_->SetSwapBuffersCallback( | 199 scheduler_->SetSwapBuffersCallback( |
161 NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers)); | 200 NewCallback(this, &GpuCommandBufferStub::OnSwapBuffers)); |
162 scheduler_->SetLatchCallback(base::Bind( | 201 scheduler_->SetLatchCallback(base::Bind( |
163 &GpuChannel::OnLatchCallback, base::Unretained(channel_), route_id_)); | 202 &GpuChannel::OnLatchCallback, base::Unretained(channel_), route_id_)); |
164 scheduler_->SetScheduledCallback( | 203 scheduler_->SetScheduledCallback( |
165 NewCallback(this, &GpuCommandBufferStub::OnScheduled)); | 204 NewCallback(this, &GpuCommandBufferStub::OnScheduled)); |
166 scheduler_->SetTokenCallback(base::Bind( | 205 scheduler_->SetTokenCallback(base::Bind( |
167 &GpuCommandBufferStub::OnSetToken, base::Unretained(this))); | 206 &GpuCommandBufferStub::OnSetToken, base::Unretained(this))); |
168 if (watchdog_) | 207 if (watchdog_) |
169 scheduler_->SetCommandProcessedCallback( | 208 scheduler_->SetCommandProcessedCallback( |
170 NewCallback(this, &GpuCommandBufferStub::OnCommandProcessed)); | 209 NewCallback(this, &GpuCommandBufferStub::OnCommandProcessed)); |
171 | 210 |
172 #if defined(OS_MACOSX) || defined(TOUCH_UI) | 211 #if defined(OS_MACOSX) |
173 if (handle_) { | 212 if (handle_) { |
174 // This context conceptually puts its output directly on the | 213 // This context conceptually puts its output directly on the |
175 // screen, rendered by the accelerated plugin layer in | 214 // screen, rendered by the accelerated plugin layer in |
176 // RenderWidgetHostViewMac. Set up a pathway to notify the | 215 // RenderWidgetHostViewMac. Set up a pathway to notify the |
177 // browser process when its contents change. | 216 // browser process when its contents change. |
178 scheduler_->SetSwapBuffersCallback( | 217 scheduler_->SetSwapBuffersCallback( |
179 NewCallback(this, | 218 NewCallback(this, |
180 &GpuCommandBufferStub::SwapBuffersCallback)); | 219 &GpuCommandBufferStub::SwapBuffersCallback)); |
181 } | 220 } |
182 #endif // defined(OS_MACOSX) || defined(TOUCH_UI) | 221 #endif // defined(OS_MACOSX) |
183 | 222 |
184 // Set up a pathway for resizing the output window or framebuffer at the | 223 // Set up a pathway for resizing the output window or framebuffer at the |
185 // right time relative to other GL commands. | 224 // right time relative to other GL commands. |
| 225 #if defined(TOUCH_UI) |
| 226 if (handle_ == gfx::kNullPluginWindow) { |
| 227 scheduler_->SetResizeCallback( |
| 228 NewCallback(this, &GpuCommandBufferStub::ResizeCallback)); |
| 229 } |
| 230 #else |
186 scheduler_->SetResizeCallback( | 231 scheduler_->SetResizeCallback( |
187 NewCallback(this, &GpuCommandBufferStub::ResizeCallback)); | 232 NewCallback(this, &GpuCommandBufferStub::ResizeCallback)); |
| 233 #endif |
188 | 234 |
189 if (parent_stub_for_initialization_) { | 235 if (parent_stub_for_initialization_) { |
190 scheduler_->SetParent(parent_stub_for_initialization_->scheduler_.get(), | 236 scheduler_->SetParent(parent_stub_for_initialization_->scheduler_.get(), |
191 parent_texture_for_initialization_); | 237 parent_texture_for_initialization_); |
192 parent_stub_for_initialization_.reset(); | 238 parent_stub_for_initialization_.reset(); |
193 parent_texture_for_initialization_ = 0; | 239 parent_texture_for_initialization_ = 0; |
194 } | 240 } |
195 | 241 |
196 result = true; | |
197 } else { | 242 } else { |
198 scheduler_.reset(); | 243 OnInitializeFailed(reply_message); |
199 command_buffer_.reset(); | 244 return; |
200 } | 245 } |
201 } | 246 } |
202 | 247 |
203 GpuCommandBufferMsg_Initialize::WriteReplyParams(reply_message, result); | 248 GpuCommandBufferMsg_Initialize::WriteReplyParams(reply_message, true); |
204 Send(reply_message); | 249 Send(reply_message); |
205 } | 250 } |
206 | 251 |
207 void GpuCommandBufferStub::OnSetParent(int32 parent_route_id, | 252 void GpuCommandBufferStub::OnSetParent(int32 parent_route_id, |
208 uint32 parent_texture_id, | 253 uint32 parent_texture_id, |
209 IPC::Message* reply_message) { | 254 IPC::Message* reply_message) { |
210 | 255 |
211 GpuCommandBufferStub* parent_stub = NULL; | 256 GpuCommandBufferStub* parent_stub = NULL; |
212 if (parent_route_id != MSG_ROUTING_NONE) { | 257 if (parent_route_id != MSG_ROUTING_NONE) { |
213 parent_stub = channel_->LookupCommandBuffer(parent_route_id); | 258 parent_stub = channel_->LookupCommandBuffer(parent_route_id); |
(...skipping 220 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
434 params.render_view_id = render_view_id_; | 479 params.render_view_id = render_view_id_; |
435 params.window = handle_; | 480 params.window = handle_; |
436 params.surface_id = scheduler_->GetSurfaceId(); | 481 params.surface_id = scheduler_->GetSurfaceId(); |
437 params.route_id = route_id(); | 482 params.route_id = route_id(); |
438 params.swap_buffers_count = scheduler_->swap_buffers_count(); | 483 params.swap_buffers_count = scheduler_->swap_buffers_count(); |
439 gpu_channel_manager->Send( | 484 gpu_channel_manager->Send( |
440 new GpuHostMsg_AcceleratedSurfaceBuffersSwapped(params)); | 485 new GpuHostMsg_AcceleratedSurfaceBuffersSwapped(params)); |
441 | 486 |
442 scheduler_->SetScheduled(false); | 487 scheduler_->SetScheduled(false); |
443 } | 488 } |
444 #endif // defined(OS_MACOSX) | |
445 | |
446 #if defined(TOUCH_UI) | |
447 void GpuCommandBufferStub::SwapBuffersCallback() { | |
448 TRACE_EVENT0("gpu", "GpuCommandBufferStub::SwapBuffersCallback"); | |
449 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); | |
450 GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params; | |
451 params.renderer_id = renderer_id_; | |
452 params.render_view_id = render_view_id_; | |
453 params.surface_id = scheduler_->GetFrontSurfaceId(); | |
454 params.route_id = route_id(); | |
455 params.swap_buffers_count = scheduler_->swap_buffers_count(); | |
456 gpu_channel_manager->Send( | |
457 new GpuHostMsg_AcceleratedSurfaceBuffersSwapped(params)); | |
458 | |
459 scheduler_->SetScheduled(false); | |
460 } | |
461 | 489 |
462 void GpuCommandBufferStub::AcceleratedSurfaceIOSurfaceSet(uint64 surface_id) { | |
463 scheduler_->SetScheduled(true); | |
464 } | |
465 | |
466 void GpuCommandBufferStub::AcceleratedSurfaceReleased(uint64 surface_id) { | |
467 scheduler_->ReleaseSurface(surface_id); | |
468 } | |
469 #endif // defined(TOUCH_UI) | |
470 | |
471 #if defined(OS_MACOSX) || defined(TOUCH_UI) | |
472 void GpuCommandBufferStub::AcceleratedSurfaceBuffersSwapped( | 490 void GpuCommandBufferStub::AcceleratedSurfaceBuffersSwapped( |
473 uint64 swap_buffers_count) { | 491 uint64 swap_buffers_count) { |
474 TRACE_EVENT1("gpu", | 492 TRACE_EVENT1("gpu", |
475 "GpuCommandBufferStub::AcceleratedSurfaceBuffersSwapped", | 493 "GpuCommandBufferStub::AcceleratedSurfaceBuffersSwapped", |
476 "frame", swap_buffers_count); | 494 "frame", swap_buffers_count); |
477 | 495 |
478 // Multiple swapbuffers may get consolidated together into a single | 496 // Multiple swapbuffers may get consolidated together into a single |
479 // AcceleratedSurfaceBuffersSwapped call. Since OnSwapBuffers expects to be | 497 // AcceleratedSurfaceBuffersSwapped call. Since OnSwapBuffers expects to be |
480 // called one time for every swap, make up the difference here. | 498 // called one time for every swap, make up the difference here. |
481 uint64 delta = swap_buffers_count - | 499 uint64 delta = swap_buffers_count - |
482 scheduler_->acknowledged_swap_buffers_count(); | 500 scheduler_->acknowledged_swap_buffers_count(); |
483 scheduler_->set_acknowledged_swap_buffers_count(swap_buffers_count); | 501 scheduler_->set_acknowledged_swap_buffers_count(swap_buffers_count); |
484 | 502 |
485 for(uint64 i = 0; i < delta; i++) { | 503 for(uint64 i = 0; i < delta; i++) { |
486 OnSwapBuffers(); | 504 OnSwapBuffers(); |
487 // Wake up the GpuScheduler to start doing work again. | 505 // Wake up the GpuScheduler to start doing work again. |
488 scheduler_->SetScheduled(true); | 506 scheduler_->SetScheduled(true); |
489 } | 507 } |
490 } | 508 } |
491 #endif // defined(OS_MACOSX) || defined(TOUCH_UI) | 509 #endif // defined(OS_MACOSX) |
492 | 510 |
493 void GpuCommandBufferStub::CommandBufferWasDestroyed() { | 511 void GpuCommandBufferStub::CommandBufferWasDestroyed() { |
494 TRACE_EVENT0("gpu", "GpuCommandBufferStub::CommandBufferWasDestroyed"); | 512 TRACE_EVENT0("gpu", "GpuCommandBufferStub::CommandBufferWasDestroyed"); |
495 // In case the renderer is currently blocked waiting for a sync reply from | 513 // In case the renderer is currently blocked waiting for a sync reply from |
496 // the stub, this method allows us to cleanup and unblock pending messages. | 514 // the stub, this method allows us to cleanup and unblock pending messages. |
497 if (scheduler_.get()) { | 515 if (scheduler_.get()) { |
498 while (!scheduler_->IsScheduled()) | 516 while (!scheduler_->IsScheduled()) |
499 scheduler_->SetScheduled(true); | 517 scheduler_->SetScheduled(true); |
500 } | 518 } |
501 // Handle any deferred messages now that the scheduler is not blocking | 519 // Handle any deferred messages now that the scheduler is not blocking |
(...skipping 18 matching lines...) Expand all Loading... |
520 } else { | 538 } else { |
521 #if defined(TOOLKIT_USES_GTK) && !defined(TOUCH_UI) || defined(OS_WIN) | 539 #if defined(TOOLKIT_USES_GTK) && !defined(TOUCH_UI) || defined(OS_WIN) |
522 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); | 540 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); |
523 gpu_channel_manager->Send( | 541 gpu_channel_manager->Send( |
524 new GpuHostMsg_ResizeView(renderer_id_, | 542 new GpuHostMsg_ResizeView(renderer_id_, |
525 render_view_id_, | 543 render_view_id_, |
526 route_id_, | 544 route_id_, |
527 size)); | 545 size)); |
528 | 546 |
529 scheduler_->SetScheduled(false); | 547 scheduler_->SetScheduled(false); |
530 #elif defined(TOUCH_UI) | |
531 if (scheduler_->GetBackSurfaceId()) { | |
532 GpuHostMsg_AcceleratedSurfaceRelease_Params params; | |
533 params.renderer_id = renderer_id_; | |
534 params.render_view_id = render_view_id_; | |
535 params.identifier = scheduler_->GetBackSurfaceId(); | |
536 params.route_id = route_id(); | |
537 | |
538 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); | |
539 gpu_channel_manager->Send( | |
540 new GpuHostMsg_AcceleratedSurfaceRelease(params)); | |
541 } | |
542 scheduler_->CreateBackSurface(size); | |
543 GpuHostMsg_AcceleratedSurfaceSetIOSurface_Params params; | |
544 params.renderer_id = renderer_id_; | |
545 params.render_view_id = render_view_id_; | |
546 params.width = size.width(); | |
547 params.height = size.height(); | |
548 params.identifier = scheduler_->GetBackSurfaceId(); | |
549 params.route_id = route_id(); | |
550 | |
551 GpuChannelManager* gpu_channel_manager = channel_->gpu_channel_manager(); | |
552 gpu_channel_manager->Send( | |
553 new GpuHostMsg_AcceleratedSurfaceSetIOSurface(params)); | |
554 scheduler_->SetScheduled(false); | |
555 #endif | 548 #endif |
556 } | 549 } |
557 } | 550 } |
558 | 551 |
559 void GpuCommandBufferStub::ViewResized() { | 552 void GpuCommandBufferStub::ViewResized() { |
560 #if defined(TOOLKIT_USES_GTK) && !defined(TOUCH_UI) || defined(OS_WIN) | 553 #if defined(TOOLKIT_USES_GTK) && !defined(TOUCH_UI) || defined(OS_WIN) |
561 DCHECK(handle_ != gfx::kNullPluginWindow); | 554 DCHECK(handle_ != gfx::kNullPluginWindow); |
562 scheduler_->SetScheduled(true); | 555 scheduler_->SetScheduled(true); |
563 | 556 |
564 // Recreate the view surface to match the window size. TODO(apatrick): this is | 557 // Recreate the view surface to match the window size. TODO(apatrick): this is |
(...skipping 26 matching lines...) Expand all Loading... |
591 new GpuVideoDecodeAccelerator(this, route_id_, this)); | 584 new GpuVideoDecodeAccelerator(this, route_id_, this)); |
592 video_decoder_->Initialize(configs); | 585 video_decoder_->Initialize(configs); |
593 } | 586 } |
594 | 587 |
595 void GpuCommandBufferStub::OnDestroyVideoDecoder() { | 588 void GpuCommandBufferStub::OnDestroyVideoDecoder() { |
596 LOG(ERROR) << "GpuCommandBufferStub::OnDestroyVideoDecoder"; | 589 LOG(ERROR) << "GpuCommandBufferStub::OnDestroyVideoDecoder"; |
597 video_decoder_.reset(); | 590 video_decoder_.reset(); |
598 } | 591 } |
599 | 592 |
600 #endif // defined(ENABLE_GPU) | 593 #endif // defined(ENABLE_GPU) |
OLD | NEW |