Chromium Code Reviews| Index: content/browser/renderer_host/context_provider_factory_impl_android.cc |
| diff --git a/content/browser/renderer_host/context_provider_factory_impl_android.cc b/content/browser/renderer_host/context_provider_factory_impl_android.cc |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..341a6172509ab67bfc6d833b54c9973aa23507b3 |
| --- /dev/null |
| +++ b/content/browser/renderer_host/context_provider_factory_impl_android.cc |
| @@ -0,0 +1,262 @@ |
| +// Copyright 2016 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +#include "content/browser/renderer_host/context_provider_factory_impl_android.h" |
| + |
| +#include "base/auto_reset.h" |
| +#include "base/command_line.h" |
| +#include "base/lazy_instance.h" |
| +#include "base/memory/singleton.h" |
| +#include "cc/output/context_provider.h" |
| +#include "cc/output/vulkan_in_process_context_provider.h" |
| +#include "cc/surfaces/surface_manager.h" |
| +#include "content/browser/gpu/browser_gpu_channel_host_factory.h" |
| +#include "content/browser/gpu/compositor_util.h" |
| +#include "content/common/gpu/client/context_provider_command_buffer.h" |
| +#include "content/public/common/content_switches.h" |
| +#include "gpu/command_buffer/client/gles2_interface.h" |
| +#include "gpu/ipc/client/gpu_channel_host.h" |
| + |
| +namespace content { |
| + |
| +base::LazyInstance<scoped_refptr<cc::VulkanInProcessContextProvider>> |
| + g_shared_vulkan_context_provider_android = LAZY_INSTANCE_INITIALIZER; |
| + |
| +// static |
| +ContextProviderFactoryImpl* ContextProviderFactoryImpl::GetInstance() { |
| + return base::Singleton<ContextProviderFactoryImpl>::get(); |
| +} |
| + |
| +ContextProviderFactoryImpl::ContextProviderFactoryImpl() |
| + : in_handle_pending_requests_(false), |
| + surface_client_id_(0), |
| + weak_factory_(this) {} |
| + |
| +ContextProviderFactoryImpl::~ContextProviderFactoryImpl() {} |
| + |
| +void ContextProviderFactoryImpl::RequestRenderContextProvider( |
| + ContextProviderCallback result_callback) { |
|
no sievers
2016/07/29 19:14:30
DCHECK(!in_handle_pending_requests_) since it woul
Khushal
2016/07/29 21:30:55
We copy the list before iterating. I figured that
|
| + render_context_request_callbacks_.push_back(result_callback); |
| + |
| + HandlePendingRequests(); |
| +} |
| + |
| +cc::SurfaceManager* ContextProviderFactoryImpl::GetSurfaceManager() { |
| + if (!surface_manager_) |
| + surface_manager_ = base::WrapUnique(new cc::SurfaceManager); |
| + |
| + return surface_manager_.get(); |
| +} |
| + |
| +uint32_t ContextProviderFactoryImpl::AllocateSurfaceClientId() { |
| + return ++surface_client_id_; |
| +} |
| + |
| +void ContextProviderFactoryImpl::RequestGpuChannel( |
| + GpuChannelRequestCallback gpu_channel_request_callback) { |
| + gpu_channel_request_callback_ = gpu_channel_request_callback; |
| + |
| + HandlePendingRequests(); |
| +} |
| + |
| +scoped_refptr<cc::VulkanInProcessContextProvider> |
| +ContextProviderFactoryImpl::GetSharedVulkanContextProviderAndroid() { |
| + if (base::CommandLine::ForCurrentProcess()->HasSwitch( |
| + switches::kEnableVulkan)) { |
| + scoped_refptr<cc::VulkanInProcessContextProvider>* context_provider = |
| + g_shared_vulkan_context_provider_android.Pointer(); |
| + if (!*context_provider) |
| + *context_provider = cc::VulkanInProcessContextProvider::Create(); |
| + return *context_provider; |
| + } |
| + return nullptr; |
| +} |
| + |
| +void ContextProviderFactoryImpl::HandlePendingRequests() { |
| + // Failure to initialize the context could result in new requests. Handle |
| + // them after going through the current list. |
| + if (in_handle_pending_requests_) |
| + return; |
| + |
| + { |
| + base::AutoReset<bool> auto_reset_in_handle_requests( |
| + &in_handle_pending_requests_, true); |
| + |
| + scoped_refptr<cc::VulkanInProcessContextProvider> vulkan_context_provider = |
| + GetSharedVulkanContextProviderAndroid(); |
| + |
| + scoped_refptr<gpu::GpuChannelHost> gpu_channel_host( |
| + vulkan_context_provider ? nullptr : EnsureGpuChannelEstablished()); |
| + |
| + if (!vulkan_context_provider && !gpu_channel_host) |
| + return; |
| + |
| + if (!render_context_request_callbacks_.empty()) { |
| + std::list<ContextProviderCallback> context_requests = |
| + render_context_request_callbacks_; |
| + render_context_request_callbacks_.clear(); |
| + |
| + for (ContextProviderCallback& context_request : context_requests) { |
| + ContextProviders context_providers; |
| + |
| + if (vulkan_context_provider) |
| + context_providers.vulkan_context_provider = vulkan_context_provider; |
| + else |
| + CreateRenderCompositorContexts(context_providers, gpu_channel_host); |
| + |
| + context_request.Run(context_providers); |
| + } |
| + } |
| + |
| + if (!gpu_channel_request_callback_.is_null()) { |
| + if (!gpu_channel_host) |
| + gpu_channel_host = EnsureGpuChannelEstablished(); |
| + |
| + if (!gpu_channel_host) |
| + return; |
| + |
| + GpuChannelRequestCallback gpu_request_callback = |
| + gpu_channel_request_callback_; |
| + gpu_channel_request_callback_ = GpuChannelRequestCallback(); |
| + gpu_request_callback.Run(gpu_channel_host); |
| + } |
| + } |
| + |
| + if (!gpu_channel_request_callback_.is_null() || |
| + !render_context_request_callbacks_.empty()) |
| + HandlePendingRequests(); |
| +} |
| + |
| +void ContextProviderFactoryImpl::CreateRenderCompositorContexts( |
|
no sievers
2016/07/29 19:14:30
nit: this is mainly used for the browser composito
Khushal
2016/07/29 21:30:55
This is going to be used for Blimp compositor too.
no sievers
2016/07/29 21:42:41
Hmm or how about just passing the config bits as a
Khushal
2016/07/29 22:02:26
That's cool too. Let me put up a patch and see wha
|
| + ContextProviders& context_providers, |
| + scoped_refptr<gpu::GpuChannelHost> gpu_channel_host) { |
| + DCHECK(gpu_channel_host); |
| + |
| + // TODO(khushalsagar): Figure out if we can share this code with the |
| + // renderer compositor. |
| + scoped_refptr<ContextProviderCommandBuffer> worker_context_provider = |
| + SharedCompositorWorkerContextProvider(gpu_channel_host); |
| + if (!worker_context_provider) { |
| + // Cause the compositor to wait and try again. |
| + return; |
| + } |
| + |
| + // The renderer compositor context doesn't do a lot of stuff, so we don't |
| + // expect it to need a lot of space for commands or transfer. Raster and |
| + // uploads happen on the worker context instead. |
| + gpu::SharedMemoryLimits limits = gpu::SharedMemoryLimits::ForMailboxContext(); |
|
no sievers
2016/07/29 19:14:30
Why would 'ForMailboxContext' be correct for any s
danakj
2016/07/29 19:23:24
The (non-display) compositor context doesn't do an
|
| + |
| + // This is for an offscreen context for the compositor. So the default |
| + // framebuffer doesn't need alpha, depth, stencil, antialiasing. |
| + gpu::gles2::ContextCreationAttribHelper attributes; |
| + attributes.alpha_size = -1; |
| + attributes.depth_size = 0; |
| + attributes.stencil_size = 0; |
| + attributes.samples = 0; |
| + attributes.sample_buffers = 0; |
| + attributes.bind_generates_resource = false; |
| + attributes.lose_context_when_out_of_memory = true; |
| + |
| + constexpr bool automatic_flushes = false; |
| + constexpr bool support_locking = false; |
| + |
| + // The compositor context shares resources with the worker context unless |
| + // the worker is async. |
| + ContextProviderCommandBuffer* share_context = worker_context_provider.get(); |
| + if (IsAsyncWorkerContextEnabled()) |
| + share_context = nullptr; |
| + |
| + scoped_refptr<ContextProviderCommandBuffer> context_provider( |
| + new ContextProviderCommandBuffer( |
| + gpu_channel_host, gpu::GPU_STREAM_DEFAULT, |
| + gpu::GpuStreamPriority::NORMAL, gpu::kNullSurfaceHandle, |
| + GURL(std::string("chrome://gpu/ContextProviderFactoryImpl::") + |
| + std::string("CompositorContextProvider")), |
| + automatic_flushes, support_locking, limits, attributes, share_context, |
| + command_buffer_metrics::RENDER_COMPOSITOR_CONTEXT)); |
| + |
| + context_providers.compositor_context_provider = std::move(context_provider); |
| + context_providers.worker_context_provider = |
| + std::move(worker_context_provider); |
| +} |
| + |
| +scoped_refptr<ContextProviderCommandBuffer> |
| +ContextProviderFactoryImpl::SharedCompositorWorkerContextProvider( |
| + scoped_refptr<gpu::GpuChannelHost> gpu_channel_host) { |
| + // Try to reuse existing shared worker context provider. |
| + if (shared_worker_context_provider_) { |
| + // Note: If context is lost, delete reference after releasing the lock. |
| + cc::ContextProvider::ScopedContextLock lock( |
| + shared_worker_context_provider_.get()); |
| + if (shared_worker_context_provider_->ContextGL() |
| + ->GetGraphicsResetStatusKHR() == GL_NO_ERROR) |
| + return shared_worker_context_provider_; |
| + } |
| + |
| + int32_t stream_id = gpu::GPU_STREAM_DEFAULT; |
| + gpu::GpuStreamPriority stream_priority = gpu::GpuStreamPriority::NORMAL; |
| + if (IsAsyncWorkerContextEnabled()) { |
| + stream_id = gpu_channel_host->GenerateStreamID(); |
| + stream_priority = gpu::GpuStreamPriority::LOW; |
| + } |
| + |
| + bool support_locking = true; |
| + gpu::gles2::ContextCreationAttribHelper attributes; |
| + attributes.alpha_size = -1; |
| + attributes.depth_size = 0; |
| + attributes.stencil_size = 0; |
| + attributes.samples = 0; |
| + attributes.sample_buffers = 0; |
| + attributes.bind_generates_resource = false; |
| + attributes.lose_context_when_out_of_memory = true; |
| + const bool automatic_flushes = false; |
| + |
| + shared_worker_context_provider_ = new ContextProviderCommandBuffer( |
| + std::move(gpu_channel_host), stream_id, stream_priority, |
| + gpu::kNullSurfaceHandle, |
| + GURL(std::string("chrome://gpu/ContextProviderFactoryImpl::") + |
| + std::string("SharedCompositorWorkerContextProvider")), |
| + automatic_flushes, support_locking, gpu::SharedMemoryLimits(), attributes, |
| + nullptr, command_buffer_metrics::RENDER_WORKER_CONTEXT); |
| + |
| + if (!shared_worker_context_provider_->BindToCurrentThread()) |
| + shared_worker_context_provider_ = nullptr; |
| + return shared_worker_context_provider_; |
| +} |
| + |
| +gpu::GpuChannelHost* ContextProviderFactoryImpl::EnsureGpuChannelEstablished() { |
| +#if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) || \ |
| + defined(SYZYASAN) || defined(CYGPROFILE_INSTRUMENTATION) |
| + const int64_t kGpuChannelTimeoutInSeconds = 40; |
| +#else |
| + const int64_t kGpuChannelTimeoutInSeconds = 10; |
| +#endif |
| + |
| + BrowserGpuChannelHostFactory* factory = |
| + BrowserGpuChannelHostFactory::instance(); |
| + |
| + if (factory->GetGpuChannel()) |
| + return factory->GetGpuChannel(); |
| + |
| + factory->EstablishGpuChannel( |
| + CAUSE_FOR_GPU_LAUNCH_DISPLAY_COMPOSITOR_CONTEXT, |
| + base::Bind(&ContextProviderFactoryImpl::OnGpuChannelEstablished, |
| + weak_factory_.GetWeakPtr())); |
| + establish_gpu_channel_timeout_.Start( |
| + FROM_HERE, base::TimeDelta::FromSeconds(kGpuChannelTimeoutInSeconds), |
| + this, &ContextProviderFactoryImpl::OnGpuChannelTimeout); |
| + |
| + return nullptr; |
| +} |
| + |
| +void ContextProviderFactoryImpl::OnGpuChannelEstablished() { |
| + establish_gpu_channel_timeout_.Stop(); |
| + HandlePendingRequests(); |
| +} |
| + |
| +void ContextProviderFactoryImpl::OnGpuChannelTimeout() { |
| + LOG(FATAL) << "Timed out waiting for GPU channel."; |
| +} |
| + |
| +} // namespace content |