| Index: gpu/ipc/client/gpu_host_memory_buffer_manager.cc
|
| diff --git a/gpu/ipc/client/gpu_host_memory_buffer_manager.cc b/gpu/ipc/client/gpu_host_memory_buffer_manager.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..c8093593b72294ff463019d537134f4c24a21d2c
|
| --- /dev/null
|
| +++ b/gpu/ipc/client/gpu_host_memory_buffer_manager.cc
|
| @@ -0,0 +1,367 @@
|
| +// Copyright 2016 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "gpu/ipc/client/gpu_host_memory_buffer_manager.h"
|
| +
|
| +#include "base/atomic_sequence_num.h"
|
| +#include "base/bind.h"
|
| +#include "base/synchronization/waitable_event.h"
|
| +#include "base/threading/thread_restrictions.h"
|
| +#include "base/threading/thread_task_runner_handle.h"
|
| +#include "base/trace_event/trace_event.h"
|
| +#include "gpu/ipc/client/gpu_memory_buffer_impl.h"
|
| +#include "gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h"
|
| +#include "mojo/public/cpp/system/buffer.h"
|
| +#include "mojo/public/cpp/system/platform_handle.h"
|
| +#include "ui/gfx/buffer_format_util.h"
|
| +
|
| +namespace {
|
| +
|
| +base::StaticAtomicSequenceNumber g_next_generic_shared_memory_id;
|
| +
|
| +gfx::GenericSharedMemoryId GetNextGenericSharedMemoryId() {
|
| + return gfx::GenericSharedMemoryId(g_next_generic_shared_memory_id.GetNext());
|
| +}
|
| +
|
| +void GpuMemoryBufferDeleted(
|
| + scoped_refptr<base::SingleThreadTaskRunner> destruction_task_runner,
|
| + const gpu::GpuMemoryBufferImpl::DestructionCallback& destruction_callback,
|
| + const gpu::SyncToken& sync_token) {
|
| + destruction_task_runner->PostTask(
|
| + FROM_HERE, base::Bind(destruction_callback, sync_token));
|
| +}
|
| +
|
| +} // namespace
|
| +
|
| +namespace gpu {
|
| +
|
| +////////////////////////////////////////////////////////////////////////////////
|
| +// GpuHostMemoryBufferManager::CreateGpuMemoryBufferRequest
|
| +
|
| +struct GpuHostMemoryBufferManager::CreateGpuMemoryBufferRequest {
|
| + CreateGpuMemoryBufferRequest(const gfx::Size& size,
|
| + gfx::BufferFormat format,
|
| + gfx::BufferUsage usage,
|
| + int client_id,
|
| + gpu::SurfaceHandle surface_handle)
|
| + : event(base::WaitableEvent::ResetPolicy::MANUAL,
|
| + base::WaitableEvent::InitialState::NOT_SIGNALED),
|
| + size(size),
|
| + format(format),
|
| + usage(usage),
|
| + client_id(client_id),
|
| + surface_handle(surface_handle) {}
|
| + ~CreateGpuMemoryBufferRequest() {}
|
| + base::WaitableEvent event;
|
| + gfx::Size size;
|
| + gfx::BufferFormat format;
|
| + gfx::BufferUsage usage;
|
| + int client_id;
|
| + gpu::SurfaceHandle surface_handle;
|
| + std::unique_ptr<gfx::GpuMemoryBuffer> result;
|
| +};
|
| +
|
| +////////////////////////////////////////////////////////////////////////////////
|
| +// GpuHostMemoryBufferManager::CreateGpuMemoryBufferFromHandleRequest
|
| +
|
| +struct GpuHostMemoryBufferManager::CreateGpuMemoryBufferFromHandleRequest
|
| + : public CreateGpuMemoryBufferRequest {
|
| + CreateGpuMemoryBufferFromHandleRequest(
|
| + const gfx::GpuMemoryBufferHandle& handle,
|
| + const gfx::Size& size,
|
| + gfx::BufferFormat format,
|
| + int client_id)
|
| + : CreateGpuMemoryBufferRequest(size,
|
| + format,
|
| + gfx::BufferUsage::GPU_READ,
|
| + client_id,
|
| + gpu::kNullSurfaceHandle),
|
| + handle(handle) {}
|
| + ~CreateGpuMemoryBufferFromHandleRequest() {}
|
| + gfx::GpuMemoryBufferHandle handle;
|
| +};
|
| +
|
| +////////////////////////////////////////////////////////////////////////////////
|
| +// GpuHostMemoryBufferManager::BufferInfo
|
| +
|
| +GpuHostMemoryBufferManager::BufferInfo::BufferInfo() = default;
|
| +
|
| +GpuHostMemoryBufferManager::BufferInfo::BufferInfo(
|
| + const gfx::Size& size,
|
| + gfx::GpuMemoryBufferType type,
|
| + gfx::BufferFormat format,
|
| + gfx::BufferUsage usage,
|
| + int gpu_host_id)
|
| + : size(size),
|
| + type(type),
|
| + format(format),
|
| + usage(usage),
|
| + gpu_host_id(gpu_host_id) {}
|
| +
|
| +GpuHostMemoryBufferManager::BufferInfo::BufferInfo(const BufferInfo& other) =
|
| + default;
|
| +
|
| +GpuHostMemoryBufferManager::BufferInfo::~BufferInfo() {}
|
| +
|
| +////////////////////////////////////////////////////////////////////////////////
|
| +// GpuMemoryBufferManager
|
| +
|
| +GpuHostMemoryBufferManager::GpuHostMemoryBufferManager() : gpu_client_id_(0) {}
|
| +
|
| +GpuHostMemoryBufferManager::~GpuHostMemoryBufferManager() {}
|
| +
|
| +void Run(const GpuHostMemoryBufferManager::AllocationCallback& callback,
|
| + gfx::GpuMemoryBufferHandle handle) {
|
| + callback.Run(handle);
|
| +}
|
| +
|
| +void Proxy(scoped_refptr<base::SingleThreadTaskRunner> runner,
|
| + const GpuHostMemoryBufferManager::AllocationCallback& callback,
|
| + const gfx::GpuMemoryBufferHandle& handle) {
|
| + runner->PostTask(FROM_HERE, base::Bind(&Run, callback, handle));
|
| +}
|
| +
|
| +gfx::GpuMemoryBufferHandle X(gfx::GpuMemoryBufferId id,
|
| + const gfx::Size& size,
|
| + gfx::BufferFormat format) {
|
| + size_t bytes = gfx::BufferSizeForBufferFormat(size, format);
|
| +
|
| + mojo::ScopedSharedBufferHandle scoped_handle =
|
| + mojo::SharedBufferHandle::Create(bytes);
|
| + DCHECK(scoped_handle.is_valid());
|
| +
|
| + base::SharedMemoryHandle platform_handle;
|
| + size_t shared_memory_size;
|
| + bool readonly;
|
| + MojoResult result = mojo::UnwrapSharedMemoryHandle(
|
| + std::move(scoped_handle), &platform_handle, &shared_memory_size, &readonly);
|
| + DCHECK_EQ(MOJO_RESULT_OK, result);
|
| + DCHECK_EQ(shared_memory_size, bytes);
|
| +
|
| + gfx::GpuMemoryBufferHandle handle;
|
| + handle.type = gfx::SHARED_MEMORY_BUFFER;
|
| + handle.id = id;
|
| + handle.offset = 0;
|
| + handle.stride = static_cast<int32_t>(
|
| + gfx::RowSizeForBufferFormat(size.width(), format, 0));
|
| + handle.handle = platform_handle;
|
| + return handle;
|
| +}
|
| +
|
| +void GpuHostMemoryBufferManager::AllocateGpuMemoryBufferForClient(
|
| + gfx::GpuMemoryBufferId id,
|
| + const gfx::Size& size,
|
| + gfx::BufferFormat format,
|
| + gfx::BufferUsage usage,
|
| + int child_client_id,
|
| + const AllocationCallback& callback) {
|
| +#if 0
|
| + DCHECK(io_task_runner_->BelongsToCurrentThread());
|
| +#else
|
| + if (!io_task_runner_->BelongsToCurrentThread()) {
|
| + io_task_runner_->PostTask(
|
| + FROM_HERE,
|
| + base::Bind(
|
| + &GpuHostMemoryBufferManager::AllocateGpuMemoryBufferForClient,
|
| + base::Unretained(this), id, size, format, usage, child_client_id,
|
| + base::Bind(&Proxy, base::ThreadTaskRunnerHandle::Get(), callback)));
|
| + return;
|
| + }
|
| +#endif
|
| +
|
| +#if 0
|
| + // Use service side allocation for native configurations.
|
| + if (IsNativeGpuMemoryBufferConfiguration(format, usage)) {
|
| + CreateGpuMemoryBufferOnIO(
|
| + base::Bind(&HostCreateGpuMemoryBuffer, gpu::kNullSurfaceHandle), id,
|
| + size, format, usage, child_client_id, false, callback);
|
| + return;
|
| + }
|
| +#endif
|
| +
|
| + // Early out if we cannot fallback to shared memory buffer.
|
| + if (!gpu::GpuMemoryBufferImplSharedMemory::IsUsageSupported(usage) ||
|
| + !gpu::GpuMemoryBufferImplSharedMemory::IsSizeValidForFormat(size,
|
| + format)) {
|
| + callback.Run(gfx::GpuMemoryBufferHandle());
|
| + return;
|
| + }
|
| +
|
| + BufferMap& buffers = clients_[child_client_id];
|
| +
|
| + // Allocate shared memory buffer as fallback.
|
| + auto insert_result = buffers.insert(std::make_pair(
|
| + id, BufferInfo(size, gfx::SHARED_MEMORY_BUFFER, format, usage, 0)));
|
| + if (!insert_result.second) {
|
| + DLOG(ERROR) << "Child process attempted to allocate a GpuMemoryBuffer with "
|
| + "an existing ID.";
|
| + callback.Run(gfx::GpuMemoryBufferHandle());
|
| + return;
|
| + }
|
| +
|
| + // TODO(sad): We need a valid ProcessHandle, but it is required only on win32.
|
| + base::ProcessHandle child_process_handle = base::kNullProcessHandle;
|
| + callback.Run(gpu::GpuMemoryBufferImplSharedMemory::AllocateForChildProcess(
|
| + id, size, format, child_process_handle));
|
| +}
|
| +
|
| +void GpuHostMemoryBufferManager::HandleCreateGpuMemoryBufferOnIO(
|
| + CreateGpuMemoryBufferRequest* request) {
|
| + DCHECK(io_task_runner_->BelongsToCurrentThread());
|
| +
|
| + gfx::GpuMemoryBufferId new_id = GetNextGenericSharedMemoryId();
|
| +
|
| +#if 0
|
| + // Use service side allocation for native configurations.
|
| + if (IsNativeGpuMemoryBufferConfiguration(request->format, request->usage)) {
|
| + // Note: Unretained is safe as this is only used for synchronous allocation
|
| + // from a non-IO thread.
|
| + CreateGpuMemoryBufferOnIO(
|
| + base::Bind(&HostCreateGpuMemoryBuffer, request->surface_handle), new_id,
|
| + request->size, request->format, request->usage, request->client_id,
|
| + false,
|
| + base::Bind(
|
| + &GpuHostMemoryBufferManager::HandleGpuMemoryBufferCreatedOnIO,
|
| + base::Unretained(this), base::Unretained(request)));
|
| + return;
|
| + }
|
| +#endif
|
| +
|
| + DCHECK(GpuMemoryBufferImplSharedMemory::IsUsageSupported(request->usage))
|
| + << static_cast<int>(request->usage);
|
| +
|
| + BufferMap& buffers = clients_[request->client_id];
|
| +
|
| + // Allocate shared memory buffer as fallback.
|
| + auto insert_result = buffers.insert(std::make_pair(
|
| + new_id, BufferInfo(request->size, gfx::SHARED_MEMORY_BUFFER,
|
| + request->format, request->usage, 0)));
|
| + DCHECK(insert_result.second);
|
| +
|
| + // Note: Unretained is safe as IO thread is stopped before manager is
|
| + // destroyed.
|
| + request->result = GpuMemoryBufferImplSharedMemory::Create(
|
| + new_id, request->size, request->format,
|
| + base::Bind(
|
| + &GpuMemoryBufferDeleted, io_task_runner_,
|
| + base::Bind(&GpuHostMemoryBufferManager::DestroyGpuMemoryBufferOnIO,
|
| + base::Unretained(this), new_id, request->client_id)));
|
| + request->event.Signal();
|
| +}
|
| +
|
| +void GpuHostMemoryBufferManager::HandleCreateGpuMemoryBufferFromHandleOnIO(
|
| + CreateGpuMemoryBufferFromHandleRequest* request) {
|
| + DCHECK(io_task_runner_->BelongsToCurrentThread());
|
| +
|
| + gfx::GpuMemoryBufferId new_id = GetNextGenericSharedMemoryId();
|
| +
|
| + BufferMap& buffers = clients_[request->client_id];
|
| + auto insert_result = buffers.insert(
|
| + std::make_pair(new_id, BufferInfo(request->size, request->handle.type,
|
| + request->format, request->usage, 0)));
|
| + DCHECK(insert_result.second);
|
| +
|
| + gfx::GpuMemoryBufferHandle handle = request->handle;
|
| + handle.id = new_id;
|
| +
|
| + // Note: Unretained is safe as IO thread is stopped before manager is
|
| + // destroyed.
|
| + request->result = GpuMemoryBufferImpl::CreateFromHandle(
|
| + handle, request->size, request->format, request->usage,
|
| + base::Bind(
|
| + &GpuMemoryBufferDeleted, io_task_runner_,
|
| + base::Bind(&GpuHostMemoryBufferManager::DestroyGpuMemoryBufferOnIO,
|
| + base::Unretained(this), new_id, request->client_id)));
|
| + request->event.Signal();
|
| +}
|
| +
|
| +void GpuHostMemoryBufferManager::DestroyGpuMemoryBufferOnIO(
|
| + gfx::GpuMemoryBufferId id,
|
| + int client_id,
|
| + const gpu::SyncToken& sync_token) {
|
| + DCHECK(io_task_runner_->BelongsToCurrentThread());
|
| + DCHECK(clients_.find(client_id) != clients_.end());
|
| +
|
| + BufferMap& buffers = clients_[client_id];
|
| +
|
| + BufferMap::iterator buffer_it = buffers.find(id);
|
| + if (buffer_it == buffers.end()) {
|
| + LOG(ERROR) << "Invalid GpuMemoryBuffer ID for client.";
|
| + return;
|
| + }
|
| +
|
| + // This can happen if a client managed to call this while a buffer is in the
|
| + // process of being allocated.
|
| + if (buffer_it->second.type == gfx::EMPTY_BUFFER) {
|
| + LOG(ERROR) << "Invalid GpuMemoryBuffer type.";
|
| + return;
|
| + }
|
| +
|
| +#if 0
|
| + GpuProcessHost* host = GpuProcessHost::FromID(buffer_it->second.gpu_host_id);
|
| + if (host)
|
| + host->DestroyGpuMemoryBuffer(id, client_id, sync_token);
|
| +#endif
|
| +
|
| + buffers.erase(buffer_it);
|
| +}
|
| +
|
| +std::unique_ptr<gfx::GpuMemoryBuffer>
|
| +GpuHostMemoryBufferManager::AllocateGpuMemoryBuffer(
|
| + const gfx::Size& size,
|
| + gfx::BufferFormat format,
|
| + gfx::BufferUsage usage,
|
| + SurfaceHandle surface_handle) {
|
| + DCHECK(!io_task_runner_->BelongsToCurrentThread());
|
| +
|
| + CreateGpuMemoryBufferRequest request(size, format, usage, gpu_client_id_,
|
| + surface_handle);
|
| + io_task_runner_->PostTask(
|
| + FROM_HERE,
|
| + base::Bind(&GpuHostMemoryBufferManager::HandleCreateGpuMemoryBufferOnIO,
|
| + base::Unretained(this), // Safe as we wait for result below.
|
| + base::Unretained(&request)));
|
| + TRACE_EVENT0("browser",
|
| + "GpuHostMemoryBufferManager::AllocateGpuMemoryBufferForSurface");
|
| + base::ThreadRestrictions::ScopedAllowWait allow_wait;
|
| + request.event.Wait();
|
| + return std::move(request.result);
|
| +}
|
| +
|
| +std::unique_ptr<gfx::GpuMemoryBuffer>
|
| +GpuHostMemoryBufferManager::CreateGpuMemoryBufferFromHandle(
|
| + const gfx::GpuMemoryBufferHandle& handle,
|
| + const gfx::Size& size,
|
| + gfx::BufferFormat format) {
|
| + DCHECK(!io_task_runner_->BelongsToCurrentThread());
|
| +
|
| + CreateGpuMemoryBufferFromHandleRequest request(handle, size, format,
|
| + gpu_client_id_);
|
| + io_task_runner_->PostTask(
|
| + FROM_HERE,
|
| + base::Bind(&GpuHostMemoryBufferManager::
|
| + HandleCreateGpuMemoryBufferFromHandleOnIO,
|
| + base::Unretained(this), // Safe as we wait for result below.
|
| + base::Unretained(&request)));
|
| +
|
| + // We're blocking the UI thread, which is generally undesirable.
|
| + TRACE_EVENT0(
|
| + "browser",
|
| + "GpuHostMemoryBufferManager::CreateGpuMemoryBufferFromHandle");
|
| + base::ThreadRestrictions::ScopedAllowWait allow_wait;
|
| + request.event.Wait();
|
| + return std::move(request.result);
|
| +}
|
| +
|
| +gfx::GpuMemoryBuffer*
|
| +GpuHostMemoryBufferManager::GpuMemoryBufferFromClientBuffer(
|
| + ClientBuffer buffer) {
|
| + return gpu::GpuMemoryBufferImpl::FromClientBuffer(buffer);
|
| +}
|
| +
|
| +void GpuHostMemoryBufferManager::SetDestructionSyncToken(
|
| + gfx::GpuMemoryBuffer* buffer,
|
| + const SyncToken& sync_token) {}
|
| +
|
| +} // namespace gpu
|
|
|