| OLD | NEW |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/browser/renderer_host/context_provider_factory_impl_android.h" | 5 #include "content/browser/renderer_host/context_provider_factory_impl_android.h" |
| 6 | 6 |
| 7 #include "base/auto_reset.h" | 7 #include "base/auto_reset.h" |
| 8 #include "base/command_line.h" | 8 #include "base/command_line.h" |
| 9 #include "base/lazy_instance.h" | 9 #include "base/lazy_instance.h" |
| 10 #include "base/memory/ref_counted.h" |
| 10 #include "base/memory/singleton.h" | 11 #include "base/memory/singleton.h" |
| 11 #include "cc/output/context_provider.h" | 12 #include "cc/output/context_provider.h" |
| 12 #include "cc/output/vulkan_in_process_context_provider.h" | 13 #include "cc/output/vulkan_in_process_context_provider.h" |
| 13 #include "cc/surfaces/surface_manager.h" | 14 #include "cc/surfaces/surface_manager.h" |
| 14 #include "content/browser/gpu/browser_gpu_channel_host_factory.h" | |
| 15 #include "content/browser/gpu/browser_gpu_memory_buffer_manager.h" | 15 #include "content/browser/gpu/browser_gpu_memory_buffer_manager.h" |
| 16 #include "content/browser/gpu/compositor_util.h" | 16 #include "content/browser/gpu/compositor_util.h" |
| 17 #include "content/browser/gpu/gpu_surface_tracker.h" | 17 #include "content/browser/gpu/gpu_surface_tracker.h" |
| 18 #include "content/common/gpu/client/context_provider_command_buffer.h" | 18 #include "content/common/gpu/client/context_provider_command_buffer.h" |
| 19 #include "content/common/host_shared_bitmap_manager.h" | 19 #include "content/common/host_shared_bitmap_manager.h" |
| 20 #include "content/public/common/content_switches.h" | 20 #include "content/public/common/content_switches.h" |
| 21 #include "gpu/command_buffer/client/gles2_interface.h" | 21 #include "gpu/command_buffer/client/gles2_interface.h" |
| 22 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" | 22 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" |
| 23 #include "gpu/ipc/client/gpu_channel_host.h" | 23 #include "gpu/ipc/client/gpu_channel_host.h" |
| 24 | 24 |
| 25 namespace content { | 25 namespace content { |
| 26 | 26 |
| 27 namespace { | 27 namespace { |
| 28 | 28 |
| 29 command_buffer_metrics::ContextType ToCommandBufferContextType( | 29 command_buffer_metrics::ContextType ToCommandBufferContextType( |
| 30 ui::ContextProviderFactory::ContextType context_type) { | 30 ui::ContextProviderFactory::ContextType context_type) { |
| 31 switch (context_type) { | 31 switch (context_type) { |
| 32 case ui::ContextProviderFactory::ContextType:: | 32 case ui::ContextProviderFactory::ContextType:: |
| 33 BLIMP_RENDER_COMPOSITOR_CONTEXT: | 33 BLIMP_RENDER_COMPOSITOR_CONTEXT: |
| 34 return command_buffer_metrics::BLIMP_RENDER_COMPOSITOR_CONTEXT; | 34 return command_buffer_metrics::BLIMP_RENDER_COMPOSITOR_CONTEXT; |
| 35 case ui::ContextProviderFactory::ContextType::BLIMP_RENDER_WORKER_CONTEXT: | 35 case ui::ContextProviderFactory::ContextType::BLIMP_RENDER_WORKER_CONTEXT: |
| 36 return command_buffer_metrics::BLIMP_RENDER_WORKER_CONTEXT; | 36 return command_buffer_metrics::BLIMP_RENDER_WORKER_CONTEXT; |
| 37 } | 37 } |
| 38 NOTREACHED(); | 38 NOTREACHED(); |
| 39 return command_buffer_metrics::CONTEXT_TYPE_UNKNOWN; | 39 return command_buffer_metrics::CONTEXT_TYPE_UNKNOWN; |
| 40 } | 40 } |
| 41 | 41 |
| 42 ContextProviderFactoryImpl* instance = nullptr; |
| 43 |
| 42 } // namespace | 44 } // namespace |
| 43 | 45 |
| 44 // static | 46 // static |
| 45 ContextProviderFactoryImpl* ContextProviderFactoryImpl::GetInstance() { | 47 void ContextProviderFactoryImpl::Initialize( |
| 46 return base::Singleton<ContextProviderFactoryImpl>::get(); | 48 gpu::GpuChannelEstablishFactory* gpu_channel_factory) { |
| 49 DCHECK(!instance); |
| 50 instance = new ContextProviderFactoryImpl(gpu_channel_factory); |
| 47 } | 51 } |
| 48 | 52 |
| 49 ContextProviderFactoryImpl::ContextProviderFactoryImpl() | 53 void ContextProviderFactoryImpl::Terminate() { |
| 50 : in_handle_pending_requests_(false), | 54 DCHECK(instance); |
| 55 delete instance; |
| 56 instance = nullptr; |
| 57 } |
| 58 |
| 59 // static |
| 60 ContextProviderFactoryImpl* ContextProviderFactoryImpl::GetInstance() { |
| 61 return instance; |
| 62 } |
| 63 |
| 64 ContextProviderFactoryImpl::ContextProviderFactoryImpl( |
| 65 gpu::GpuChannelEstablishFactory* gpu_channel_factory) |
| 66 : gpu_channel_factory_(gpu_channel_factory), |
| 67 in_handle_pending_requests_(false), |
| 68 in_shutdown_(false), |
| 51 surface_client_id_(0), | 69 surface_client_id_(0), |
| 52 weak_factory_(this) {} | 70 weak_factory_(this) { |
| 71 DCHECK(gpu_channel_factory_); |
| 72 } |
| 53 | 73 |
| 54 ContextProviderFactoryImpl::~ContextProviderFactoryImpl() {} | 74 ContextProviderFactoryImpl::~ContextProviderFactoryImpl() { |
| 55 | 75 in_shutdown_ = true; |
| 56 ContextProviderFactoryImpl::ContextProvidersRequest::ContextProvidersRequest() | 76 if (!gpu_channel_requests_.empty()) |
| 57 : context_type(command_buffer_metrics::CONTEXT_TYPE_UNKNOWN), | 77 HandlePendingRequests(nullptr, |
| 58 surface_handle(gpu::kNullSurfaceHandle), | 78 GpuChannelHostResult::FAILURE_FACTORY_SHUTDOWN); |
| 59 support_locking(false), | 79 } |
| 60 automatic_flushes(false), | |
| 61 shared_context_provider(nullptr) {} | |
| 62 | |
| 63 ContextProviderFactoryImpl::ContextProvidersRequest::ContextProvidersRequest( | |
| 64 const ContextProvidersRequest& other) = default; | |
| 65 | |
| 66 ContextProviderFactoryImpl::ContextProvidersRequest:: | |
| 67 ~ContextProvidersRequest() = default; | |
| 68 | 80 |
| 69 scoped_refptr<cc::VulkanContextProvider> | 81 scoped_refptr<cc::VulkanContextProvider> |
| 70 ContextProviderFactoryImpl::GetSharedVulkanContextProvider() { | 82 ContextProviderFactoryImpl::GetSharedVulkanContextProvider() { |
| 71 if (!shared_vulkan_context_provider_) | 83 if (!shared_vulkan_context_provider_) |
| 72 shared_vulkan_context_provider_ = | 84 shared_vulkan_context_provider_ = |
| 73 cc::VulkanInProcessContextProvider::Create(); | 85 cc::VulkanInProcessContextProvider::Create(); |
| 74 | 86 |
| 75 return shared_vulkan_context_provider_.get(); | 87 return shared_vulkan_context_provider_.get(); |
| 76 } | 88 } |
| 77 | 89 |
| 78 void ContextProviderFactoryImpl::CreateDisplayContextProvider( | 90 void ContextProviderFactoryImpl::RequestGpuChannelHost( |
| 91 GpuChannelHostCallback callback) { |
| 92 DCHECK(!in_shutdown_) |
| 93 << "The factory is shutting down, can't handle new requests"; |
| 94 |
| 95 gpu_channel_requests_.push(callback); |
| 96 // If the channel is available, the factory will run the callback |
| 97 // synchronously so we'll handle this request there. |
| 98 EstablishGpuChannel(); |
| 99 } |
| 100 |
| 101 scoped_refptr<cc::ContextProvider> |
| 102 ContextProviderFactoryImpl::CreateDisplayContextProvider( |
| 79 gpu::SurfaceHandle surface_handle, | 103 gpu::SurfaceHandle surface_handle, |
| 80 gpu::SharedMemoryLimits shared_memory_limits, | 104 gpu::SharedMemoryLimits shared_memory_limits, |
| 81 gpu::gles2::ContextCreationAttribHelper attributes, | 105 gpu::gles2::ContextCreationAttribHelper attributes, |
| 82 bool support_locking, | 106 bool support_locking, |
| 83 bool automatic_flushes, | 107 bool automatic_flushes, |
| 84 ContextProviderCallback result_callback) { | 108 scoped_refptr<gpu::GpuChannelHost> gpu_channel_host) { |
| 85 DCHECK(surface_handle != gpu::kNullSurfaceHandle); | 109 DCHECK(surface_handle != gpu::kNullSurfaceHandle); |
| 86 CreateContextProviderInternal( | 110 return CreateContextProviderInternal( |
| 87 command_buffer_metrics::DISPLAY_COMPOSITOR_ONSCREEN_CONTEXT, | 111 command_buffer_metrics::DISPLAY_COMPOSITOR_ONSCREEN_CONTEXT, |
| 88 surface_handle, shared_memory_limits, attributes, support_locking, | 112 surface_handle, shared_memory_limits, attributes, support_locking, |
| 89 automatic_flushes, nullptr, result_callback); | 113 automatic_flushes, nullptr, std::move(gpu_channel_host)); |
| 90 } | 114 } |
| 91 | 115 |
| 92 void ContextProviderFactoryImpl::CreateOffscreenContextProvider( | 116 scoped_refptr<cc::ContextProvider> |
| 117 ContextProviderFactoryImpl::CreateOffscreenContextProvider( |
| 93 ContextType context_type, | 118 ContextType context_type, |
| 94 gpu::SharedMemoryLimits shared_memory_limits, | 119 gpu::SharedMemoryLimits shared_memory_limits, |
| 95 gpu::gles2::ContextCreationAttribHelper attributes, | 120 gpu::gles2::ContextCreationAttribHelper attributes, |
| 96 bool support_locking, | 121 bool support_locking, |
| 97 bool automatic_flushes, | 122 bool automatic_flushes, |
| 98 cc::ContextProvider* shared_context_provider, | 123 cc::ContextProvider* shared_context_provider, |
| 99 ContextProviderCallback result_callback) { | 124 scoped_refptr<gpu::GpuChannelHost> gpu_channel_host) { |
| 100 CreateContextProviderInternal(ToCommandBufferContextType(context_type), | 125 return CreateContextProviderInternal( |
| 101 gpu::kNullSurfaceHandle, shared_memory_limits, | 126 ToCommandBufferContextType(context_type), gpu::kNullSurfaceHandle, |
| 102 attributes, support_locking, automatic_flushes, | 127 shared_memory_limits, attributes, support_locking, automatic_flushes, |
| 103 shared_context_provider, result_callback); | 128 shared_context_provider, std::move(gpu_channel_host)); |
| 104 } | 129 } |
| 105 | 130 |
| 106 cc::SurfaceManager* ContextProviderFactoryImpl::GetSurfaceManager() { | 131 cc::SurfaceManager* ContextProviderFactoryImpl::GetSurfaceManager() { |
| 107 if (!surface_manager_) | 132 if (!surface_manager_) |
| 108 surface_manager_ = base::WrapUnique(new cc::SurfaceManager); | 133 surface_manager_ = base::WrapUnique(new cc::SurfaceManager); |
| 109 | 134 |
| 110 return surface_manager_.get(); | 135 return surface_manager_.get(); |
| 111 } | 136 } |
| 112 | 137 |
| 113 uint32_t ContextProviderFactoryImpl::AllocateSurfaceClientId() { | 138 uint32_t ContextProviderFactoryImpl::AllocateSurfaceClientId() { |
| 114 return ++surface_client_id_; | 139 return ++surface_client_id_; |
| 115 } | 140 } |
| 116 | 141 |
| 117 cc::SharedBitmapManager* ContextProviderFactoryImpl::GetSharedBitmapManager() { | 142 cc::SharedBitmapManager* ContextProviderFactoryImpl::GetSharedBitmapManager() { |
| 118 return HostSharedBitmapManager::current(); | 143 return HostSharedBitmapManager::current(); |
| 119 } | 144 } |
| 120 | 145 |
| 121 gpu::GpuMemoryBufferManager* | 146 gpu::GpuMemoryBufferManager* |
| 122 ContextProviderFactoryImpl::GetGpuMemoryBufferManager() { | 147 ContextProviderFactoryImpl::GetGpuMemoryBufferManager() { |
| 123 return BrowserGpuMemoryBufferManager::current(); | 148 return BrowserGpuMemoryBufferManager::current(); |
| 124 } | 149 } |
| 125 | 150 |
| 126 void ContextProviderFactoryImpl::CreateContextProviderInternal( | 151 scoped_refptr<cc::ContextProvider> |
| 152 ContextProviderFactoryImpl::CreateContextProviderInternal( |
| 127 command_buffer_metrics::ContextType context_type, | 153 command_buffer_metrics::ContextType context_type, |
| 128 gpu::SurfaceHandle surface_handle, | 154 gpu::SurfaceHandle surface_handle, |
| 129 gpu::SharedMemoryLimits shared_memory_limits, | 155 gpu::SharedMemoryLimits shared_memory_limits, |
| 130 gpu::gles2::ContextCreationAttribHelper attributes, | 156 gpu::gles2::ContextCreationAttribHelper attributes, |
| 131 bool support_locking, | 157 bool support_locking, |
| 132 bool automatic_flushes, | 158 bool automatic_flushes, |
| 133 cc::ContextProvider* shared_context_provider, | 159 cc::ContextProvider* shared_context_provider, |
| 134 ContextProviderCallback result_callback) { | 160 scoped_refptr<gpu::GpuChannelHost> gpu_channel_host) { |
| 135 DCHECK(!result_callback.is_null()); | 161 return make_scoped_refptr(new ContextProviderCommandBuffer( |
| 136 | 162 std::move(gpu_channel_host), gpu::GPU_STREAM_DEFAULT, |
| 137 ContextProvidersRequest context_request; | 163 gpu::GpuStreamPriority::NORMAL, surface_handle, |
| 138 context_request.context_type = context_type; | 164 GURL(std::string("chrome://gpu/ContextProviderFactoryImpl::") + |
| 139 context_request.surface_handle = surface_handle; | 165 std::string("CompositorContextProvider")), |
| 140 context_request.shared_memory_limits = shared_memory_limits; | 166 automatic_flushes, support_locking, shared_memory_limits, attributes, |
| 141 context_request.attributes = attributes; | 167 static_cast<ContextProviderCommandBuffer*>(shared_context_provider), |
| 142 context_request.support_locking = support_locking; | 168 context_type)); |
| 143 context_request.automatic_flushes = automatic_flushes; | |
| 144 context_request.shared_context_provider = shared_context_provider; | |
| 145 context_request.result_callback = result_callback; | |
| 146 | |
| 147 context_provider_requests_.push_back(context_request); | |
| 148 HandlePendingRequests(); | |
| 149 } | 169 } |
| 150 | 170 |
| 151 void ContextProviderFactoryImpl::HandlePendingRequests() { | 171 void ContextProviderFactoryImpl::HandlePendingRequests( |
| 152 DCHECK(!context_provider_requests_.empty()) | 172 scoped_refptr<gpu::GpuChannelHost> gpu_channel_host, |
| 173 GpuChannelHostResult result) { |
| 174 DCHECK(!gpu_channel_requests_.empty()) |
| 153 << "We don't have any pending requests?"; | 175 << "We don't have any pending requests?"; |
| 176 DCHECK(gpu_channel_host || result != GpuChannelHostResult::SUCCESS); |
| 154 | 177 |
| 155 // Failure to initialize the context could result in new requests. Handle | 178 // Failure to initialize the channel could result in new requests. Handle |
| 156 // them after going through the current list. | 179 // them after going through the current list. |
| 157 if (in_handle_pending_requests_) | 180 if (in_handle_pending_requests_) |
| 158 return; | 181 return; |
| 159 | 182 |
| 160 { | 183 { |
| 161 base::AutoReset<bool> auto_reset_in_handle_requests( | 184 base::AutoReset<bool> auto_reset_in_handle_requests( |
| 162 &in_handle_pending_requests_, true); | 185 &in_handle_pending_requests_, true); |
| 163 | 186 |
| 164 scoped_refptr<gpu::GpuChannelHost> gpu_channel_host( | 187 std::queue<GpuChannelHostCallback> current_gpu_channel_requests; |
| 165 EnsureGpuChannelEstablished()); | 188 current_gpu_channel_requests.swap(gpu_channel_requests_); |
| 166 | 189 |
| 167 // If we don't have a Gpu Channel Host, we will come back here when the Gpu | 190 while (!current_gpu_channel_requests.empty()) { |
| 168 // channel is established, since OnGpuChannelEstablished triggers handling | 191 current_gpu_channel_requests.front().Run(gpu_channel_host, result); |
| 169 // of the requests we couldn't process right now. | 192 current_gpu_channel_requests.pop(); |
| 170 if (!gpu_channel_host) | |
| 171 return; | |
| 172 | |
| 173 std::list<ContextProvidersRequest> context_requests = | |
| 174 context_provider_requests_; | |
| 175 context_provider_requests_.clear(); | |
| 176 | |
| 177 for (ContextProvidersRequest& context_request : context_requests) { | |
| 178 scoped_refptr<cc::ContextProvider> context_provider; | |
| 179 | |
| 180 const bool create_onscreen_context = | |
| 181 context_request.surface_handle != gpu::kNullSurfaceHandle; | |
| 182 | |
| 183 // Is the request for an onscreen context? Make sure the surface is | |
| 184 // still valid in that case. DO NOT run the callback if we don't have a | |
| 185 // valid surface. | |
| 186 if (create_onscreen_context && | |
| 187 !GpuSurfaceTracker::GetInstance()->IsValidSurfaceHandle( | |
| 188 context_request.surface_handle)) { | |
| 189 continue; | |
| 190 } | |
| 191 | |
| 192 context_provider = new ContextProviderCommandBuffer( | |
| 193 gpu_channel_host, gpu::GPU_STREAM_DEFAULT, | |
| 194 gpu::GpuStreamPriority::NORMAL, context_request.surface_handle, | |
| 195 GURL(std::string("chrome://gpu/ContextProviderFactoryImpl::") + | |
| 196 std::string("CompositorContextProvider")), | |
| 197 context_request.automatic_flushes, context_request.support_locking, | |
| 198 context_request.shared_memory_limits, context_request.attributes, | |
| 199 static_cast<ContextProviderCommandBuffer*>( | |
| 200 context_request.shared_context_provider), | |
| 201 context_request.context_type); | |
| 202 context_request.result_callback.Run(context_provider); | |
| 203 } | 193 } |
| 204 } | 194 } |
| 205 | 195 |
| 206 if (!context_provider_requests_.empty()) | 196 if (!gpu_channel_requests_.empty()) |
| 207 HandlePendingRequests(); | 197 EstablishGpuChannel(); |
| 208 } | 198 } |
| 209 | 199 |
| 210 gpu::GpuChannelHost* ContextProviderFactoryImpl::EnsureGpuChannelEstablished() { | 200 void ContextProviderFactoryImpl::EstablishGpuChannel() { |
| 211 #if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || \ | 201 #if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || \ |
| 212 defined(SYZYASAN) || defined(CYGPROFILE_INSTRUMENTATION) | 202 defined(SYZYASAN) || defined(CYGPROFILE_INSTRUMENTATION) |
| 213 const int64_t kGpuChannelTimeoutInSeconds = 40; | 203 const int64_t kGpuChannelTimeoutInSeconds = 40; |
| 214 #else | 204 #else |
| 215 const int64_t kGpuChannelTimeoutInSeconds = 10; | 205 const int64_t kGpuChannelTimeoutInSeconds = 10; |
| 216 #endif | 206 #endif |
| 217 | 207 |
| 218 BrowserGpuChannelHostFactory* factory = | 208 // Start the timer first, if the result comes synchronously, we want it to |
| 219 BrowserGpuChannelHostFactory::instance(); | 209 // stop in the callback. |
| 220 | |
| 221 if (factory->GetGpuChannel()) | |
| 222 return factory->GetGpuChannel(); | |
| 223 | |
| 224 factory->EstablishGpuChannel( | |
| 225 base::Bind(&ContextProviderFactoryImpl::OnGpuChannelEstablished, | |
| 226 weak_factory_.GetWeakPtr())); | |
| 227 establish_gpu_channel_timeout_.Start( | 210 establish_gpu_channel_timeout_.Start( |
| 228 FROM_HERE, base::TimeDelta::FromSeconds(kGpuChannelTimeoutInSeconds), | 211 FROM_HERE, base::TimeDelta::FromSeconds(kGpuChannelTimeoutInSeconds), |
| 229 this, &ContextProviderFactoryImpl::OnGpuChannelTimeout); | 212 this, &ContextProviderFactoryImpl::OnGpuChannelTimeout); |
| 230 | 213 |
| 231 return nullptr; | 214 gpu_channel_factory_->EstablishGpuChannel( |
| 215 base::Bind(&ContextProviderFactoryImpl::OnGpuChannelEstablished, |
| 216 weak_factory_.GetWeakPtr())); |
| 232 } | 217 } |
| 233 | 218 |
| 234 void ContextProviderFactoryImpl::OnGpuChannelEstablished( | 219 void ContextProviderFactoryImpl::OnGpuChannelEstablished( |
| 235 scoped_refptr<gpu::GpuChannelHost> gpu_channel) { | 220 scoped_refptr<gpu::GpuChannelHost> gpu_channel) { |
| 236 establish_gpu_channel_timeout_.Stop(); | 221 establish_gpu_channel_timeout_.Stop(); |
| 237 | 222 |
| 238 // This should happen only during shutdown. So early out instead of queuing | |
| 239 // more requests with the factory. | |
| 240 if (!gpu_channel) | |
| 241 return; | |
| 242 | |
| 243 // We can queue the Gpu Channel initialization requests multiple times as | 223 // We can queue the Gpu Channel initialization requests multiple times as |
| 244 // we get context requests. So we might have already handled any pending | 224 // we get context requests. So we might have already handled any pending |
| 245 // requests when this callback runs. | 225 // requests when this callback runs. |
| 246 if (!context_provider_requests_.empty()) | 226 if (gpu_channel_requests_.empty()) |
| 247 HandlePendingRequests(); | 227 return; |
| 228 |
| 229 if (gpu_channel) { |
| 230 HandlePendingRequests(std::move(gpu_channel), |
| 231 GpuChannelHostResult::SUCCESS); |
| 232 } else { |
| 233 HandlePendingRequests( |
| 234 nullptr, |
| 235 GpuChannelHostResult::FAILURE_GPU_PROCESS_INITIALIZATION_FAILED); |
| 236 } |
| 248 } | 237 } |
| 249 | 238 |
| 250 void ContextProviderFactoryImpl::OnGpuChannelTimeout() { | 239 void ContextProviderFactoryImpl::OnGpuChannelTimeout() { |
| 251 LOG(FATAL) << "Timed out waiting for GPU channel."; | 240 LOG(FATAL) << "Timed out waiting for GPU channel."; |
| 252 } | 241 } |
| 253 | 242 |
| 254 } // namespace content | 243 } // namespace content |
| OLD | NEW |