Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // TODO(jam): move this file to src/content once we have an interface that the | 1 // TODO(jam): move this file to src/content once we have an interface that the |
| 2 // embedder provides. We can then use it to get the resource and resize the | 2 // embedder provides. We can then use it to get the resource and resize the |
| 3 // window. | 3 // window. |
| 4 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 4 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 5 // Use of this source code is governed by a BSD-style license that can be | 5 // Use of this source code is governed by a BSD-style license that can be |
| 6 // found in the LICENSE file. | 6 // found in the LICENSE file. |
| 7 | 7 |
| 8 #include "chrome/browser/gpu_process_host_ui_shim.h" | 8 #include "chrome/browser/gpu_process_host_ui_shim.h" |
| 9 | 9 |
| 10 #include "base/command_line.h" | 10 #include "base/command_line.h" |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 105 | 105 |
| 106 RouteToGpuProcessHostUIShimTask::~RouteToGpuProcessHostUIShimTask() { | 106 RouteToGpuProcessHostUIShimTask::~RouteToGpuProcessHostUIShimTask() { |
| 107 } | 107 } |
| 108 | 108 |
| 109 void RouteToGpuProcessHostUIShimTask::Run() { | 109 void RouteToGpuProcessHostUIShimTask::Run() { |
| 110 GpuProcessHostUIShim* ui_shim = GpuProcessHostUIShim::FromID(host_id_); | 110 GpuProcessHostUIShim* ui_shim = GpuProcessHostUIShim::FromID(host_id_); |
| 111 if (ui_shim) | 111 if (ui_shim) |
| 112 ui_shim->OnMessageReceived(msg_); | 112 ui_shim->OnMessageReceived(msg_); |
| 113 } | 113 } |
| 114 | 114 |
| 115 class GpuProcessHostUIShim::ViewSurface { | 115 #if defined(OS_LINUX) |
| 116 // Used to put a lock on surfaces so that the window to which the GPU | |
| 117 // process is drawing to doesn't disappear while it is drawing when | |
| 118 // a tab is closed. | |
| 119 class GpuProcessHostUIShim::SurfaceLock { | |
|
jam
2011/04/14 22:42:00
nit: lock implies that locking is taking place, wh
jonathan.backer
2011/04/15 19:02:51
Done.
| |
| 116 public: | 120 public: |
| 117 explicit ViewSurface(ViewID view_id); | 121 explicit SurfaceLock(gfx::PluginWindowHandle surface); |
| 118 ~ViewSurface(); | 122 ~SurfaceLock(); |
| 119 gfx::PluginWindowHandle surface() { return surface_; } | |
| 120 private: | 123 private: |
| 121 RenderWidgetHostView* GetRenderWidgetHostView(); | |
| 122 ViewID view_id_; | |
| 123 gfx::PluginWindowHandle surface_; | 124 gfx::PluginWindowHandle surface_; |
| 124 }; | 125 }; |
| 125 | 126 |
| 126 GpuProcessHostUIShim::ViewSurface::ViewSurface(ViewID view_id) | 127 GpuProcessHostUIShim::SurfaceLock::SurfaceLock(gfx::PluginWindowHandle surface) |
| 127 : view_id_(view_id), surface_(gfx::kNullPluginWindow) { | 128 : surface_(surface) { |
| 128 RenderWidgetHostView* view = GetRenderWidgetHostView(); | 129 GtkNativeViewManager* manager = GtkNativeViewManager::GetInstance(); |
| 129 if (view) | 130 DCHECK(manager->AddRefPermanentXID(surface_)); |
|
jam
2011/04/14 22:42:00
DCHECK is compiled away in release builds, so you'
jonathan.backer
2011/04/15 19:02:51
Good catch! Done.
| |
| 130 surface_ = view->AcquireCompositingSurface(); | |
| 131 } | 131 } |
| 132 | 132 |
| 133 GpuProcessHostUIShim::ViewSurface::~ViewSurface() { | 133 GpuProcessHostUIShim::SurfaceLock::~SurfaceLock() { |
| 134 if (!surface_) | 134 // TODO(backer): ReleasePermanentXID has to be done on the UI thread. |
| 135 return; | 135 // Post task to release once we move this code to the IO thread. |
| 136 | 136 GtkNativeViewManager* manager = GtkNativeViewManager::GetInstance(); |
| 137 RenderWidgetHostView* view = GetRenderWidgetHostView(); | 137 manager->ReleasePermanentXID(surface_); |
| 138 if (view) | |
| 139 view->ReleaseCompositingSurface(surface_); | |
| 140 } | 138 } |
| 141 | 139 #endif // defined(OS_LINUX) |
| 142 // We do separate lookups for the RenderWidgetHostView when acquiring | |
| 143 // and releasing surfaces (rather than caching) because the | |
| 144 // RenderWidgetHostView could die without warning. In such a case, | |
| 145 // it's the RenderWidgetHostView's responsibility to cleanup. | |
| 146 RenderWidgetHostView* GpuProcessHostUIShim::ViewSurface:: | |
| 147 GetRenderWidgetHostView() { | |
| 148 RenderProcessHost* process = RenderProcessHost::FromID(view_id_.first); | |
| 149 RenderWidgetHost* host = NULL; | |
| 150 if (process) { | |
| 151 host = static_cast<RenderWidgetHost*>( | |
| 152 process->GetListenerByID(view_id_.second)); | |
| 153 } | |
| 154 | |
| 155 RenderWidgetHostView* view = NULL; | |
| 156 if (host) | |
| 157 view = host->view(); | |
| 158 | |
| 159 return view; | |
| 160 } | |
| 161 | 140 |
| 162 GpuProcessHostUIShim::GpuProcessHostUIShim(int host_id, | 141 GpuProcessHostUIShim::GpuProcessHostUIShim(int host_id, |
| 163 content::CauseForGpuLaunch cause_for_gpu_launch) | 142 content::CauseForGpuLaunch cause_for_gpu_launch) |
| 164 : host_id_(host_id), | 143 : host_id_(host_id), |
| 165 gpu_process_(base::kNullProcessHandle), | 144 gpu_process_(base::kNullProcessHandle), |
| 166 gpu_channel_manager_(NULL), | 145 gpu_channel_manager_(NULL), |
| 167 ui_thread_sender_(NULL) { | 146 ui_thread_sender_(NULL) { |
| 168 g_hosts_by_id.AddWithID(this, host_id_); | 147 g_hosts_by_id.AddWithID(this, host_id_); |
| 169 gpu_data_manager_ = GpuDataManager::GetInstance(); | 148 gpu_data_manager_ = GpuDataManager::GetInstance(); |
| 170 DCHECK(gpu_data_manager_); | 149 DCHECK(gpu_data_manager_); |
| (...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 408 linked_ptr<SynchronizeCallback> wrapped_callback(callback); | 387 linked_ptr<SynchronizeCallback> wrapped_callback(callback); |
| 409 | 388 |
| 410 if (Send(new GpuMsg_Synchronize())) { | 389 if (Send(new GpuMsg_Synchronize())) { |
| 411 synchronize_requests_.push(wrapped_callback); | 390 synchronize_requests_.push(wrapped_callback); |
| 412 } else { | 391 } else { |
| 413 SynchronizeError(wrapped_callback.release()); | 392 SynchronizeError(wrapped_callback.release()); |
| 414 } | 393 } |
| 415 } | 394 } |
| 416 | 395 |
| 417 void GpuProcessHostUIShim::CreateViewCommandBuffer( | 396 void GpuProcessHostUIShim::CreateViewCommandBuffer( |
| 397 gfx::PluginWindowHandle compositing_surface, | |
| 418 int32 render_view_id, | 398 int32 render_view_id, |
| 419 int32 renderer_id, | 399 int32 renderer_id, |
| 420 const GPUCreateCommandBufferConfig& init_params, | 400 const GPUCreateCommandBufferConfig& init_params, |
| 421 CreateCommandBufferCallback* callback) { | 401 CreateCommandBufferCallback* callback) { |
| 402 // FIXME(backer): We're gonna have to fix the ref counting on the | |
| 403 // compositing_surface being passed in (at least for Linux). | |
| 422 DCHECK(CalledOnValidThread()); | 404 DCHECK(CalledOnValidThread()); |
| 423 linked_ptr<CreateCommandBufferCallback> wrapped_callback(callback); | 405 linked_ptr<CreateCommandBufferCallback> wrapped_callback(callback); |
| 406 | |
| 407 #if defined(OS_LINUX) | |
| 424 ViewID view_id(renderer_id, render_view_id); | 408 ViewID view_id(renderer_id, render_view_id); |
| 425 | 409 |
| 426 // There should only be one such command buffer (for the compositor). In | 410 // There should only be one such command buffer (for the compositor). In |
| 427 // practice, if the GPU process lost a context, GraphicsContext3D with | 411 // practice, if the GPU process lost a context, GraphicsContext3D with |
| 428 // associated command buffer and view surface will not be gone until new | 412 // associated command buffer and view surface will not be gone until new |
| 429 // one is in place and all layers are reattached. | 413 // one is in place and all layers are reattached. |
| 430 linked_ptr<ViewSurface> view_surface; | 414 linked_ptr<SurfaceLock> surface_lock; |
| 431 ViewSurfaceMap::iterator it = acquired_surfaces_.find(view_id); | 415 SurfaceLockMap::iterator it = surface_locks_.find(view_id); |
| 432 if (it != acquired_surfaces_.end()) | 416 if (it != surface_locks_.end()) |
| 433 view_surface = (*it).second; | 417 surface_lock = (*it).second; |
| 434 else | 418 else |
| 435 view_surface.reset(new ViewSurface(view_id)); | 419 surface_lock.reset(new SurfaceLock(compositing_surface)); |
| 420 #endif // defined(OS_LINUX) | |
| 436 | 421 |
| 437 if (view_surface->surface() != gfx::kNullPluginWindow && | 422 if (compositing_surface != gfx::kNullPluginWindow && |
| 438 Send(new GpuMsg_CreateViewCommandBuffer( | 423 Send(new GpuMsg_CreateViewCommandBuffer( |
| 439 view_surface->surface(), render_view_id, renderer_id, init_params))) { | 424 compositing_surface, render_view_id, renderer_id, init_params))) { |
| 440 create_command_buffer_requests_.push(wrapped_callback); | 425 create_command_buffer_requests_.push(wrapped_callback); |
| 441 acquired_surfaces_.insert(std::pair<ViewID, linked_ptr<ViewSurface> >( | 426 #if defined(OS_LINUX) |
| 442 view_id, view_surface)); | 427 surface_locks_.insert(std::pair<ViewID, linked_ptr<SurfaceLock> >( |
| 428 view_id, surface_lock)); | |
| 429 #endif // defined(OS_LINUX) | |
| 443 } else { | 430 } else { |
| 444 CreateCommandBufferError(wrapped_callback.release(), MSG_ROUTING_NONE); | 431 CreateCommandBufferError(wrapped_callback.release(), MSG_ROUTING_NONE); |
| 445 } | 432 } |
| 446 } | 433 } |
| 447 | 434 |
| 448 #if defined(OS_MACOSX) | 435 #if defined(OS_MACOSX) |
| 449 | 436 |
| 450 void GpuProcessHostUIShim::DidDestroyAcceleratedSurface(int renderer_id, | 437 void GpuProcessHostUIShim::DidDestroyAcceleratedSurface(int renderer_id, |
| 451 int render_view_id) { | 438 int render_view_id) { |
| 452 // Destroy the command buffer that owns the accelerated surface. | 439 // Destroy the command buffer that owns the accelerated surface. |
| (...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 584 if (route_id == MSG_ROUTING_NONE) | 571 if (route_id == MSG_ROUTING_NONE) |
| 585 CreateCommandBufferError(callback.release(), route_id); | 572 CreateCommandBufferError(callback.release(), route_id); |
| 586 else | 573 else |
| 587 callback->Run(route_id); | 574 callback->Run(route_id); |
| 588 } | 575 } |
| 589 } | 576 } |
| 590 | 577 |
| 591 void GpuProcessHostUIShim::OnDestroyCommandBuffer( | 578 void GpuProcessHostUIShim::OnDestroyCommandBuffer( |
| 592 gfx::PluginWindowHandle window, int32 renderer_id, | 579 gfx::PluginWindowHandle window, int32 renderer_id, |
| 593 int32 render_view_id) { | 580 int32 render_view_id) { |
| 581 #if defined(OS_LINUX) | |
| 594 ViewID view_id(renderer_id, render_view_id); | 582 ViewID view_id(renderer_id, render_view_id); |
| 595 ViewSurfaceMap::iterator it = acquired_surfaces_.find(view_id); | 583 SurfaceLockMap::iterator it = surface_locks_.find(view_id); |
| 596 if (it != acquired_surfaces_.end()) | 584 if (it != surface_locks_.end()) |
| 597 acquired_surfaces_.erase(it); | 585 surface_locks_.erase(it); |
| 586 #endif // defined(OS_LINUX) | |
| 598 } | 587 } |
| 599 | 588 |
| 600 void GpuProcessHostUIShim::OnGraphicsInfoCollected(const GPUInfo& gpu_info) { | 589 void GpuProcessHostUIShim::OnGraphicsInfoCollected(const GPUInfo& gpu_info) { |
| 601 gpu_data_manager_->UpdateGpuInfo(gpu_info); | 590 gpu_data_manager_->UpdateGpuInfo(gpu_info); |
| 602 } | 591 } |
| 603 | 592 |
| 604 void GpuProcessHostUIShim::OnLogMessage(int level, | 593 void GpuProcessHostUIShim::OnLogMessage(int level, |
| 605 const std::string& header, | 594 const std::string& header, |
| 606 const std::string& message) { | 595 const std::string& message) { |
| 607 DictionaryValue* dict = new DictionaryValue(); | 596 DictionaryValue* dict = new DictionaryValue(); |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 669 int render_view_id) { | 658 int render_view_id) { |
| 670 RenderViewHost* host = RenderViewHost::FromID(renderer_id, | 659 RenderViewHost* host = RenderViewHost::FromID(renderer_id, |
| 671 render_view_id); | 660 render_view_id); |
| 672 if (!host) { | 661 if (!host) { |
| 673 return; | 662 return; |
| 674 } | 663 } |
| 675 host->ScheduleComposite(); | 664 host->ScheduleComposite(); |
| 676 } | 665 } |
| 677 | 666 |
| 678 #endif | 667 #endif |
| OLD | NEW |