OLD | NEW |
1 // TODO(jam): move this file to src/content once we have an interface that the | 1 // TODO(jam): move this file to src/content once we have an interface that the |
2 // embedder provides. We can then use it to get the resource and resize the | 2 // embedder provides. We can then use it to get the resource and resize the |
3 // window. | 3 // window. |
4 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 4 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
5 // Use of this source code is governed by a BSD-style license that can be | 5 // Use of this source code is governed by a BSD-style license that can be |
6 // found in the LICENSE file. | 6 // found in the LICENSE file. |
7 | 7 |
8 #include "chrome/browser/gpu_process_host_ui_shim.h" | 8 #include "chrome/browser/gpu_process_host_ui_shim.h" |
9 | 9 |
10 #include "base/command_line.h" | 10 #include "base/command_line.h" |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
105 | 105 |
106 RouteToGpuProcessHostUIShimTask::~RouteToGpuProcessHostUIShimTask() { | 106 RouteToGpuProcessHostUIShimTask::~RouteToGpuProcessHostUIShimTask() { |
107 } | 107 } |
108 | 108 |
109 void RouteToGpuProcessHostUIShimTask::Run() { | 109 void RouteToGpuProcessHostUIShimTask::Run() { |
110 GpuProcessHostUIShim* ui_shim = GpuProcessHostUIShim::FromID(host_id_); | 110 GpuProcessHostUIShim* ui_shim = GpuProcessHostUIShim::FromID(host_id_); |
111 if (ui_shim) | 111 if (ui_shim) |
112 ui_shim->OnMessageReceived(msg_); | 112 ui_shim->OnMessageReceived(msg_); |
113 } | 113 } |
114 | 114 |
115 class GpuProcessHostUIShim::ViewSurface { | 115 #if defined(OS_LINUX) |
| 116 // Used to put a lock on surfaces so that the window to which the GPU |
| 117 // process is drawing to doesn't disappear while it is drawing when |
| 118 // a tab is closed. |
| 119 class GpuProcessHostUIShim::SurfaceRef { |
116 public: | 120 public: |
117 explicit ViewSurface(ViewID view_id); | 121 explicit SurfaceRef(gfx::PluginWindowHandle surface); |
118 ~ViewSurface(); | 122 ~SurfaceRef(); |
119 gfx::PluginWindowHandle surface() { return surface_; } | |
120 private: | 123 private: |
121 RenderWidgetHostView* GetRenderWidgetHostView(); | |
122 ViewID view_id_; | |
123 gfx::PluginWindowHandle surface_; | 124 gfx::PluginWindowHandle surface_; |
124 }; | 125 }; |
125 | 126 |
126 GpuProcessHostUIShim::ViewSurface::ViewSurface(ViewID view_id) | 127 GpuProcessHostUIShim::SurfaceRef::SurfaceRef(gfx::PluginWindowHandle surface) |
127 : view_id_(view_id), surface_(gfx::kNullPluginWindow) { | 128 : surface_(surface) { |
128 RenderWidgetHostView* view = GetRenderWidgetHostView(); | 129 GtkNativeViewManager* manager = GtkNativeViewManager::GetInstance(); |
129 if (view) | 130 CHECK(manager->AddRefPermanentXID(surface_)); |
130 surface_ = view->AcquireCompositingSurface(); | |
131 } | 131 } |
132 | 132 |
133 GpuProcessHostUIShim::ViewSurface::~ViewSurface() { | 133 GpuProcessHostUIShim::SurfaceRef::~SurfaceRef() { |
134 if (!surface_) | 134 // TODO(backer): ReleasePermanentXID has to be done on the UI thread. |
135 return; | 135 // Post task to release once we move this code to the IO thread. |
136 | 136 GtkNativeViewManager* manager = GtkNativeViewManager::GetInstance(); |
137 RenderWidgetHostView* view = GetRenderWidgetHostView(); | 137 manager->ReleasePermanentXID(surface_); |
138 if (view) | |
139 view->ReleaseCompositingSurface(surface_); | |
140 } | 138 } |
141 | 139 #endif // defined(OS_LINUX) |
142 // We do separate lookups for the RenderWidgetHostView when acquiring | |
143 // and releasing surfaces (rather than caching) because the | |
144 // RenderWidgetHostView could die without warning. In such a case, | |
145 // it's the RenderWidgetHostView's responsibility to cleanup. | |
146 RenderWidgetHostView* GpuProcessHostUIShim::ViewSurface:: | |
147 GetRenderWidgetHostView() { | |
148 RenderProcessHost* process = RenderProcessHost::FromID(view_id_.first); | |
149 RenderWidgetHost* host = NULL; | |
150 if (process) { | |
151 host = static_cast<RenderWidgetHost*>( | |
152 process->GetListenerByID(view_id_.second)); | |
153 } | |
154 | |
155 RenderWidgetHostView* view = NULL; | |
156 if (host) | |
157 view = host->view(); | |
158 | |
159 return view; | |
160 } | |
161 | 140 |
162 GpuProcessHostUIShim::GpuProcessHostUIShim(int host_id, | 141 GpuProcessHostUIShim::GpuProcessHostUIShim(int host_id, |
163 content::CauseForGpuLaunch cause_for_gpu_launch) | 142 content::CauseForGpuLaunch cause_for_gpu_launch) |
164 : host_id_(host_id), | 143 : host_id_(host_id), |
165 gpu_process_(base::kNullProcessHandle), | 144 gpu_process_(base::kNullProcessHandle), |
166 gpu_channel_manager_(NULL), | 145 gpu_channel_manager_(NULL), |
167 ui_thread_sender_(NULL) { | 146 ui_thread_sender_(NULL) { |
168 g_hosts_by_id.AddWithID(this, host_id_); | 147 g_hosts_by_id.AddWithID(this, host_id_); |
169 gpu_data_manager_ = GpuDataManager::GetInstance(); | 148 gpu_data_manager_ = GpuDataManager::GetInstance(); |
170 DCHECK(gpu_data_manager_); | 149 DCHECK(gpu_data_manager_); |
(...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
411 linked_ptr<SynchronizeCallback> wrapped_callback(callback); | 390 linked_ptr<SynchronizeCallback> wrapped_callback(callback); |
412 | 391 |
413 if (Send(new GpuMsg_Synchronize())) { | 392 if (Send(new GpuMsg_Synchronize())) { |
414 synchronize_requests_.push(wrapped_callback); | 393 synchronize_requests_.push(wrapped_callback); |
415 } else { | 394 } else { |
416 SynchronizeError(wrapped_callback.release()); | 395 SynchronizeError(wrapped_callback.release()); |
417 } | 396 } |
418 } | 397 } |
419 | 398 |
420 void GpuProcessHostUIShim::CreateViewCommandBuffer( | 399 void GpuProcessHostUIShim::CreateViewCommandBuffer( |
| 400 gfx::PluginWindowHandle compositing_surface, |
421 int32 render_view_id, | 401 int32 render_view_id, |
422 int32 renderer_id, | 402 int32 renderer_id, |
423 const GPUCreateCommandBufferConfig& init_params, | 403 const GPUCreateCommandBufferConfig& init_params, |
424 CreateCommandBufferCallback* callback) { | 404 CreateCommandBufferCallback* callback) { |
425 DCHECK(CalledOnValidThread()); | 405 DCHECK(CalledOnValidThread()); |
426 linked_ptr<CreateCommandBufferCallback> wrapped_callback(callback); | 406 linked_ptr<CreateCommandBufferCallback> wrapped_callback(callback); |
| 407 |
| 408 #if defined(OS_LINUX) |
427 ViewID view_id(renderer_id, render_view_id); | 409 ViewID view_id(renderer_id, render_view_id); |
428 | 410 |
429 // There should only be one such command buffer (for the compositor). In | 411 // There should only be one such command buffer (for the compositor). In |
430 // practice, if the GPU process lost a context, GraphicsContext3D with | 412 // practice, if the GPU process lost a context, GraphicsContext3D with |
431 // associated command buffer and view surface will not be gone until new | 413 // associated command buffer and view surface will not be gone until new |
432 // one is in place and all layers are reattached. | 414 // one is in place and all layers are reattached. |
433 linked_ptr<ViewSurface> view_surface; | 415 linked_ptr<SurfaceRef> surface_ref; |
434 ViewSurfaceMap::iterator it = acquired_surfaces_.find(view_id); | 416 SurfaceRefMap::iterator it = surface_refs_.find(view_id); |
435 if (it != acquired_surfaces_.end()) | 417 if (it != surface_refs_.end()) |
436 view_surface = (*it).second; | 418 surface_ref = (*it).second; |
437 else | 419 else |
438 view_surface.reset(new ViewSurface(view_id)); | 420 surface_ref.reset(new SurfaceRef(compositing_surface)); |
| 421 #endif // defined(OS_LINUX) |
439 | 422 |
440 if (view_surface->surface() != gfx::kNullPluginWindow && | 423 if (compositing_surface != gfx::kNullPluginWindow && |
441 Send(new GpuMsg_CreateViewCommandBuffer( | 424 Send(new GpuMsg_CreateViewCommandBuffer( |
442 view_surface->surface(), render_view_id, renderer_id, init_params))) { | 425 compositing_surface, render_view_id, renderer_id, init_params))) { |
443 create_command_buffer_requests_.push(wrapped_callback); | 426 create_command_buffer_requests_.push(wrapped_callback); |
444 acquired_surfaces_.insert(std::pair<ViewID, linked_ptr<ViewSurface> >( | 427 #if defined(OS_LINUX) |
445 view_id, view_surface)); | 428 surface_refs_.insert(std::pair<ViewID, linked_ptr<SurfaceRef> >( |
| 429 view_id, surface_ref)); |
| 430 #endif // defined(OS_LINUX) |
446 } else { | 431 } else { |
447 CreateCommandBufferError(wrapped_callback.release(), MSG_ROUTING_NONE); | 432 CreateCommandBufferError(wrapped_callback.release(), MSG_ROUTING_NONE); |
448 } | 433 } |
449 } | 434 } |
450 | 435 |
451 #if defined(OS_MACOSX) | 436 #if defined(OS_MACOSX) |
452 | 437 |
453 void GpuProcessHostUIShim::DidDestroyAcceleratedSurface(int renderer_id, | 438 void GpuProcessHostUIShim::DidDestroyAcceleratedSurface(int renderer_id, |
454 int render_view_id) { | 439 int render_view_id) { |
455 // Destroy the command buffer that owns the accelerated surface. | 440 // Destroy the command buffer that owns the accelerated surface. |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
587 if (route_id == MSG_ROUTING_NONE) | 572 if (route_id == MSG_ROUTING_NONE) |
588 CreateCommandBufferError(callback.release(), route_id); | 573 CreateCommandBufferError(callback.release(), route_id); |
589 else | 574 else |
590 callback->Run(route_id); | 575 callback->Run(route_id); |
591 } | 576 } |
592 } | 577 } |
593 | 578 |
594 void GpuProcessHostUIShim::OnDestroyCommandBuffer( | 579 void GpuProcessHostUIShim::OnDestroyCommandBuffer( |
595 gfx::PluginWindowHandle window, int32 renderer_id, | 580 gfx::PluginWindowHandle window, int32 renderer_id, |
596 int32 render_view_id) { | 581 int32 render_view_id) { |
| 582 #if defined(OS_LINUX) |
597 ViewID view_id(renderer_id, render_view_id); | 583 ViewID view_id(renderer_id, render_view_id); |
598 ViewSurfaceMap::iterator it = acquired_surfaces_.find(view_id); | 584 SurfaceRefMap::iterator it = surface_refs_.find(view_id); |
599 if (it != acquired_surfaces_.end()) | 585 if (it != surface_refs_.end()) |
600 acquired_surfaces_.erase(it); | 586 surface_refs_.erase(it); |
| 587 #endif // defined(OS_LINUX) |
601 } | 588 } |
602 | 589 |
603 void GpuProcessHostUIShim::OnGraphicsInfoCollected(const GPUInfo& gpu_info) { | 590 void GpuProcessHostUIShim::OnGraphicsInfoCollected(const GPUInfo& gpu_info) { |
604 gpu_data_manager_->UpdateGpuInfo(gpu_info); | 591 gpu_data_manager_->UpdateGpuInfo(gpu_info); |
605 } | 592 } |
606 | 593 |
607 void GpuProcessHostUIShim::OnLogMessage(int level, | 594 void GpuProcessHostUIShim::OnLogMessage(int level, |
608 const std::string& header, | 595 const std::string& header, |
609 const std::string& message) { | 596 const std::string& message) { |
610 DictionaryValue* dict = new DictionaryValue(); | 597 DictionaryValue* dict = new DictionaryValue(); |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
672 int render_view_id) { | 659 int render_view_id) { |
673 RenderViewHost* host = RenderViewHost::FromID(renderer_id, | 660 RenderViewHost* host = RenderViewHost::FromID(renderer_id, |
674 render_view_id); | 661 render_view_id); |
675 if (!host) { | 662 if (!host) { |
676 return; | 663 return; |
677 } | 664 } |
678 host->ScheduleComposite(); | 665 host->ScheduleComposite(); |
679 } | 666 } |
680 | 667 |
681 #endif | 668 #endif |
OLD | NEW |