Index: services/ui/gpu/gpu_service_internal.cc |
diff --git a/services/ui/gpu/gpu_service_internal.cc b/services/ui/gpu/gpu_service_internal.cc |
index 2eaf203452a8cd89d47e29901b42340924e183d5..d771c280d2ab3a09a64a845c0e4b0f8a573fef92 100644 |
--- a/services/ui/gpu/gpu_service_internal.cc |
+++ b/services/ui/gpu/gpu_service_internal.cc |
@@ -5,6 +5,7 @@ |
#include "services/ui/gpu/gpu_service_internal.h" |
#include "base/memory/shared_memory.h" |
+#include "base/synchronization/waitable_event.h" |
#include "base/threading/thread_task_runner_handle.h" |
#include "build/build_config.h" |
#include "gpu/command_buffer/service/gpu_switches.h" |
@@ -33,6 +34,15 @@ |
#endif |
namespace ui { |
+namespace { |
+ |
+void EstablishGpuChannelDone( |
+ mojo::ScopedMessagePipeHandle* channel_handle, |
+ const GpuServiceInternal::EstablishGpuChannelCallback& callback) { |
+ callback.Run(std::move(*channel_handle)); |
+} |
+ |
+} // namespace |
GpuServiceInternal::GpuServiceInternal( |
const gpu::GPUInfo& gpu_info, |
@@ -46,13 +56,154 @@ |
watchdog_thread_(watchdog_thread), |
gpu_memory_buffer_factory_(gpu_memory_buffer_factory), |
gpu_info_(gpu_info), |
- binding_(this) { |
+ binding_(this) {} |
+ |
+GpuServiceInternal::~GpuServiceInternal() { |
+ // Signal this event before destroying the child process. That way all |
+ // background threads can cleanup. |
+ // For example, in the renderer the RenderThread instances will be able to |
+ // notice shutdown before the render process begins waiting for them to exit. |
+ shutdown_event_.Signal(); |
+ io_thread_.Stop(); |
+} |
+ |
+void GpuServiceInternal::Add(mojom::GpuServiceInternalRequest request) { |
+ binding_.Close(); |
+ binding_.Bind(std::move(request)); |
+} |
+ |
+void GpuServiceInternal::EstablishGpuChannelInternal( |
+ int32_t client_id, |
+ uint64_t client_tracing_id, |
+ bool preempts, |
+ bool allow_view_command_buffers, |
+ bool allow_real_time_streams, |
+ const EstablishGpuChannelCallback& callback) { |
+ DCHECK(CalledOnValidThread()); |
+ |
+ if (!gpu_channel_manager_) { |
+ callback.Run(mojo::ScopedMessagePipeHandle()); |
+ return; |
+ } |
+ |
+ auto* channel_handle = new mojo::ScopedMessagePipeHandle; |
+ gpu_thread_.task_runner()->PostTaskAndReply( |
+ FROM_HERE, |
+ base::Bind(&GpuServiceInternal::EstablishGpuChannelOnGpuThread, |
+ base::Unretained(this), client_id, client_tracing_id, preempts, |
+ allow_view_command_buffers, allow_real_time_streams, |
+ base::Unretained(channel_handle)), |
+ base::Bind(&EstablishGpuChannelDone, base::Owned(channel_handle), |
+ callback)); |
+} |
+ |
+gfx::GpuMemoryBufferHandle GpuServiceInternal::CreateGpuMemoryBuffer( |
+ gfx::GpuMemoryBufferId id, |
+ const gfx::Size& size, |
+ gfx::BufferFormat format, |
+ gfx::BufferUsage usage, |
+ int client_id, |
+ gpu::SurfaceHandle surface_handle) { |
+ DCHECK(CalledOnValidThread()); |
+ return gpu_memory_buffer_factory_->CreateGpuMemoryBuffer( |
+ id, size, format, usage, client_id, surface_handle); |
+} |
+ |
+void GpuServiceInternal::DestroyGpuMemoryBuffer( |
+ gfx::GpuMemoryBufferId id, |
+ int client_id, |
+ const gpu::SyncToken& sync_token) { |
+ DCHECK(CalledOnValidThread()); |
+ |
+ if (gpu_channel_manager_) |
+ gpu_channel_manager_->DestroyGpuMemoryBuffer(id, client_id, sync_token); |
+} |
+ |
+void GpuServiceInternal::DidCreateOffscreenContext(const GURL& active_url) { |
+ NOTIMPLEMENTED(); |
+} |
+ |
+void GpuServiceInternal::DidDestroyChannel(int client_id) { |
+ media_service_->RemoveChannel(client_id); |
+ NOTIMPLEMENTED(); |
+} |
+ |
+void GpuServiceInternal::DidDestroyOffscreenContext(const GURL& active_url) { |
+ NOTIMPLEMENTED(); |
+} |
+ |
+void GpuServiceInternal::DidLoseContext(bool offscreen, |
+ gpu::error::ContextLostReason reason, |
+ const GURL& active_url) { |
+ NOTIMPLEMENTED(); |
+} |
+ |
+void GpuServiceInternal::StoreShaderToDisk(int client_id, |
+ const std::string& key, |
+ const std::string& shader) { |
+ NOTIMPLEMENTED(); |
+} |
+ |
+#if defined(OS_WIN) |
+void GpuServiceInternal::SendAcceleratedSurfaceCreatedChildWindow( |
+ gpu::SurfaceHandle parent_window, |
+ gpu::SurfaceHandle child_window) { |
+ ::SetParent(child_window, parent_window); |
+} |
+#endif |
+ |
+void GpuServiceInternal::SetActiveURL(const GURL& url) { |
+ // TODO(penghuang): implement this function. |
+} |
+ |
+void GpuServiceInternal::InitializeOnGpuThread(base::WaitableEvent* event) { |
+ gpu_info_.video_decode_accelerator_capabilities = |
+ media::GpuVideoDecodeAccelerator::GetCapabilities(gpu_preferences_); |
+ gpu_info_.video_encode_accelerator_supported_profiles = |
+ media::GpuVideoEncodeAccelerator::GetSupportedProfiles(gpu_preferences_); |
+ gpu_info_.jpeg_decode_accelerator_supported = |
+ media::GpuJpegDecodeAccelerator::IsSupported(); |
+ |
+ DCHECK(!owned_sync_point_manager_); |
+ const bool allow_threaded_wait = false; |
+ owned_sync_point_manager_.reset( |
+ new gpu::SyncPointManager(allow_threaded_wait)); |
+ |
+ // Defer creation of the render thread. This is to prevent it from handling |
+ // IPC messages before the sandbox has been enabled and all other necessary |
+ // initialization has succeeded. |
+ gpu_channel_manager_.reset(new gpu::GpuChannelManager( |
+ gpu_preferences_, this, watchdog_thread_, |
+ base::ThreadTaskRunnerHandle::Get().get(), io_thread_.task_runner().get(), |
+ &shutdown_event_, owned_sync_point_manager_.get(), |
+ gpu_memory_buffer_factory_)); |
+ |
+ media_service_.reset(new media::MediaService(gpu_channel_manager_.get())); |
+ event->Signal(); |
+} |
+ |
+void GpuServiceInternal::EstablishGpuChannelOnGpuThread( |
+ int client_id, |
+ uint64_t client_tracing_id, |
+ bool preempts, |
+ bool allow_view_command_buffers, |
+ bool allow_real_time_streams, |
+ mojo::ScopedMessagePipeHandle* channel_handle) { |
+ if (gpu_channel_manager_) { |
+ auto handle = gpu_channel_manager_->EstablishChannel( |
+ client_id, client_tracing_id, preempts, allow_view_command_buffers, |
+ allow_real_time_streams); |
+ channel_handle->reset(handle.mojo_handle); |
+ media_service_->AddChannel(client_id); |
+ } |
+} |
+ |
+void GpuServiceInternal::Initialize(const InitializeCallback& callback) { |
+ DCHECK(CalledOnValidThread()); |
base::Thread::Options thread_options(base::MessageLoop::TYPE_DEFAULT, 0); |
thread_options.priority = base::ThreadPriority::NORMAL; |
CHECK(gpu_thread_.StartWithOptions(thread_options)); |
- // TODO(sad): We do not need the IO thread once gpu has a separate process. It |
- // should be possible to use |main_task_runner_| for doing IO tasks. |
thread_options = base::Thread::Options(base::MessageLoop::TYPE_IO, 0); |
thread_options.priority = base::ThreadPriority::NORMAL; |
#if defined(OS_ANDROID) |
@@ -61,121 +212,14 @@ |
thread_options.priority = base::ThreadPriority::DISPLAY; |
#endif |
CHECK(io_thread_.StartWithOptions(thread_options)); |
-} |
- |
-GpuServiceInternal::~GpuServiceInternal() { |
- if (binding_.is_bound()) { |
- // Tear down the binding in the gpu thread. |
- gpu_thread_.task_runner()->PostTask(FROM_HERE, |
- base::Bind(&mojo::Binding<mojom::GpuServiceInternal>::Close, |
- base::Unretained(&binding_))); |
- gpu_thread_.Stop(); |
- } |
- |
- // Signal this event before destroying the child process. That way all |
- // background threads can cleanup. |
- // For example, in the renderer the RenderThread instances will be able to |
- // notice shutdown before the render process begins waiting for them to exit. |
- shutdown_event_.Signal(); |
- io_thread_.Stop(); |
-} |
- |
-void GpuServiceInternal::Add(mojom::GpuServiceInternalRequest request) { |
- // Unretained() is OK here since the thread/task runner is owned by |this|. |
+ |
+ base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL, |
+ base::WaitableEvent::InitialState::NOT_SIGNALED); |
gpu_thread_.task_runner()->PostTask( |
- FROM_HERE, |
- base::Bind(&GpuServiceInternal::BindOnGpuThread, base::Unretained(this), |
- base::Passed(std::move(request)))); |
-} |
- |
-void GpuServiceInternal::BindOnGpuThread( |
- mojom::GpuServiceInternalRequest request) { |
- binding_.Close(); |
- binding_.Bind(std::move(request)); |
-} |
- |
-gfx::GpuMemoryBufferHandle GpuServiceInternal::CreateGpuMemoryBuffer( |
- gfx::GpuMemoryBufferId id, |
- const gfx::Size& size, |
- gfx::BufferFormat format, |
- gfx::BufferUsage usage, |
- int client_id, |
- gpu::SurfaceHandle surface_handle) { |
- DCHECK(gpu_thread_.task_runner()->BelongsToCurrentThread()); |
- return gpu_memory_buffer_factory_->CreateGpuMemoryBuffer( |
- id, size, format, usage, client_id, surface_handle); |
-} |
- |
-void GpuServiceInternal::DestroyGpuMemoryBuffer( |
- gfx::GpuMemoryBufferId id, |
- int client_id, |
- const gpu::SyncToken& sync_token) { |
- DCHECK(gpu_thread_.task_runner()->BelongsToCurrentThread()); |
- if (gpu_channel_manager_) |
- gpu_channel_manager_->DestroyGpuMemoryBuffer(id, client_id, sync_token); |
-} |
- |
-void GpuServiceInternal::DidCreateOffscreenContext(const GURL& active_url) { |
- NOTIMPLEMENTED(); |
-} |
- |
-void GpuServiceInternal::DidDestroyChannel(int client_id) { |
- media_service_->RemoveChannel(client_id); |
- NOTIMPLEMENTED(); |
-} |
- |
-void GpuServiceInternal::DidDestroyOffscreenContext(const GURL& active_url) { |
- NOTIMPLEMENTED(); |
-} |
- |
-void GpuServiceInternal::DidLoseContext(bool offscreen, |
- gpu::error::ContextLostReason reason, |
- const GURL& active_url) { |
- NOTIMPLEMENTED(); |
-} |
- |
-void GpuServiceInternal::StoreShaderToDisk(int client_id, |
- const std::string& key, |
- const std::string& shader) { |
- NOTIMPLEMENTED(); |
-} |
- |
-#if defined(OS_WIN) |
-void GpuServiceInternal::SendAcceleratedSurfaceCreatedChildWindow( |
- gpu::SurfaceHandle parent_window, |
- gpu::SurfaceHandle child_window) { |
- ::SetParent(child_window, parent_window); |
-} |
-#endif |
- |
-void GpuServiceInternal::SetActiveURL(const GURL& url) { |
- // TODO(penghuang): implement this function. |
-} |
- |
-void GpuServiceInternal::Initialize(const InitializeCallback& callback) { |
- DCHECK(gpu_thread_.task_runner()->BelongsToCurrentThread()); |
- gpu_info_.video_decode_accelerator_capabilities = |
- media::GpuVideoDecodeAccelerator::GetCapabilities(gpu_preferences_); |
- gpu_info_.video_encode_accelerator_supported_profiles = |
- media::GpuVideoEncodeAccelerator::GetSupportedProfiles(gpu_preferences_); |
- gpu_info_.jpeg_decode_accelerator_supported = |
- media::GpuJpegDecodeAccelerator::IsSupported(); |
- |
- DCHECK(!owned_sync_point_manager_); |
- const bool allow_threaded_wait = false; |
- owned_sync_point_manager_.reset( |
- new gpu::SyncPointManager(allow_threaded_wait)); |
- |
- // Defer creation of the render thread. This is to prevent it from handling |
- // IPC messages before the sandbox has been enabled and all other necessary |
- // initialization has succeeded. |
- gpu_channel_manager_.reset(new gpu::GpuChannelManager( |
- gpu_preferences_, this, watchdog_thread_, |
- base::ThreadTaskRunnerHandle::Get().get(), io_thread_.task_runner().get(), |
- &shutdown_event_, owned_sync_point_manager_.get(), |
- gpu_memory_buffer_factory_)); |
- |
- media_service_.reset(new media::MediaService(gpu_channel_manager_.get())); |
+ FROM_HERE, base::Bind(&GpuServiceInternal::InitializeOnGpuThread, |
+ base::Unretained(this), &event)); |
+ event.Wait(); |
+ |
callback.Run(gpu_info_); |
} |
@@ -184,23 +228,12 @@ |
uint64_t client_tracing_id, |
bool is_gpu_host, |
const EstablishGpuChannelCallback& callback) { |
- DCHECK(gpu_thread_.task_runner()->BelongsToCurrentThread()); |
- |
- if (!gpu_channel_manager_) { |
- callback.Run(mojo::ScopedMessagePipeHandle()); |
- return; |
- } |
- |
const bool preempts = is_gpu_host; |
const bool allow_view_command_buffers = is_gpu_host; |
const bool allow_real_time_streams = is_gpu_host; |
- mojo::ScopedMessagePipeHandle channel_handle; |
- IPC::ChannelHandle handle = gpu_channel_manager_->EstablishChannel( |
- client_id, client_tracing_id, preempts, allow_view_command_buffers, |
- allow_real_time_streams); |
- channel_handle.reset(handle.mojo_handle); |
- media_service_->AddChannel(client_id); |
- callback.Run(std::move(channel_handle)); |
+ EstablishGpuChannelInternal(client_id, client_tracing_id, preempts, |
+ allow_view_command_buffers, |
+ allow_real_time_streams, callback); |
} |
} // namespace ui |