 Chromium Code Reviews
 Chromium Code Reviews Issue 2941933002:
  viz: Convert a sync api in ServerGpuMemoryBufferManager into async.  (Closed)
    
  
    Issue 2941933002:
  viz: Convert a sync api in ServerGpuMemoryBufferManager into async.  (Closed) 
  | Index: components/viz/common/server_gpu_memory_buffer_manager.cc | 
| diff --git a/components/viz/common/server_gpu_memory_buffer_manager.cc b/components/viz/common/server_gpu_memory_buffer_manager.cc | 
| index 23fe8942da670b8c84f1c0e9d4cbd04a6faad4fd..f9392494ff8bd9972dd5077a25614b887cccecf0 100644 | 
| --- a/components/viz/common/server_gpu_memory_buffer_manager.cc | 
| +++ b/components/viz/common/server_gpu_memory_buffer_manager.cc | 
| @@ -5,6 +5,7 @@ | 
| #include "components/viz/common/server_gpu_memory_buffer_manager.h" | 
| #include "base/logging.h" | 
| +#include "base/threading/thread_task_runner_handle.h" | 
| #include "gpu/ipc/client/gpu_memory_buffer_impl.h" | 
| #include "gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h" | 
| #include "gpu/ipc/common/gpu_memory_buffer_support.h" | 
| @@ -18,36 +19,45 @@ ServerGpuMemoryBufferManager::ServerGpuMemoryBufferManager( | 
| : gpu_service_(gpu_service), | 
| client_id_(client_id), | 
| native_configurations_(gpu::GetNativeGpuMemoryBufferConfigurations()), | 
| + task_runner_(base::ThreadTaskRunnerHandle::Get()), | 
| weak_factory_(this) {} | 
| ServerGpuMemoryBufferManager::~ServerGpuMemoryBufferManager() {} | 
| -gfx::GpuMemoryBufferHandle | 
| -ServerGpuMemoryBufferManager::CreateGpuMemoryBufferHandle( | 
| +void ServerGpuMemoryBufferManager::AllocateGpuMemoryBuffer( | 
| gfx::GpuMemoryBufferId id, | 
| int client_id, | 
| const gfx::Size& size, | 
| gfx::BufferFormat format, | 
| gfx::BufferUsage usage, | 
| - gpu::SurfaceHandle surface_handle) { | 
| - DCHECK(CalledOnValidThread()); | 
| + gpu::SurfaceHandle surface_handle, | 
| + base::OnceCallback<void(const gfx::GpuMemoryBufferHandle&)> callback) { | 
| + DCHECK(task_runner_->BelongsToCurrentThread()); | 
| if (gpu::GetNativeGpuMemoryBufferType() != gfx::EMPTY_BUFFER) { | 
| const bool is_native = native_configurations_.find(std::make_pair( | 
| format, usage)) != native_configurations_.end(); | 
| if (is_native) { | 
| - gfx::GpuMemoryBufferHandle handle; | 
| - gpu_service_->CreateGpuMemoryBuffer(id, size, format, usage, client_id, | 
| - surface_handle, &handle); | 
| - if (!handle.is_null()) | 
| - native_buffers_[client_id].insert(handle.id); | 
| - return handle; | 
| + pending_buffers_.insert(client_id); | 
| + gpu_service_->CreateGpuMemoryBuffer( | 
| + id, size, format, usage, client_id, surface_handle, | 
| + base::Bind(&ServerGpuMemoryBufferManager::OnGpuMemoryBufferAllocated, | 
| + weak_factory_.GetWeakPtr(), client_id, | 
| + base::Passed(std::move(callback)))); | 
| + return; | 
| } | 
| } | 
| - DCHECK(gpu::GpuMemoryBufferImplSharedMemory::IsUsageSupported(usage)) | 
| - << static_cast<int>(usage); | 
| - return gpu::GpuMemoryBufferImplSharedMemory::CreateGpuMemoryBuffer(id, size, | 
| - format); | 
| + // Early out if we cannot fallback to shared memory buffer. | 
| + if (!gpu::GpuMemoryBufferImplSharedMemory::IsUsageSupported(usage) || | 
| + !gpu::GpuMemoryBufferImplSharedMemory::IsSizeValidForFormat(size, | 
| + format)) { | 
| + std::move(callback).Run(gfx::GpuMemoryBufferHandle()); | 
| + return; | 
| + } | 
| + | 
| + std::move(callback).Run( | 
| + gpu::GpuMemoryBufferImplSharedMemory::CreateGpuMemoryBuffer(id, size, | 
| + format)); | 
| } | 
| std::unique_ptr<gfx::GpuMemoryBuffer> | 
| @@ -57,8 +67,23 @@ ServerGpuMemoryBufferManager::CreateGpuMemoryBuffer( | 
| gfx::BufferUsage usage, | 
| gpu::SurfaceHandle surface_handle) { | 
| gfx::GpuMemoryBufferId id(next_gpu_memory_id_++); | 
| - gfx::GpuMemoryBufferHandle handle = CreateGpuMemoryBufferHandle( | 
| - id, client_id_, size, format, usage, surface_handle); | 
| + gfx::GpuMemoryBufferHandle handle; | 
| + base::WaitableEvent wait_event( | 
| + base::WaitableEvent::ResetPolicy::MANUAL, | 
| + base::WaitableEvent::InitialState::NOT_SIGNALED); | 
| + // We block with a WaitableEvent until the callbacks are run. So using | 
| + // base::Unretained() is safe here. | 
| + DCHECK(!task_runner_->BelongsToCurrentThread()); | 
| + auto reply_callback = base::BindOnce( | 
| + &ServerGpuMemoryBufferManager::SignalGpuMemoryBufferAllocated, | 
| + base::Unretained(this), &handle, &wait_event); | 
| + auto allocate_callback = | 
| + base::BindOnce(&ServerGpuMemoryBufferManager::AllocateGpuMemoryBuffer, | 
| + base::Unretained(this), id, client_id_, size, format, | 
| + usage, surface_handle, std::move(reply_callback)); | 
| + task_runner_->PostTask(FROM_HERE, std::move(allocate_callback)); | 
| + base::ThreadRestrictions::ScopedAllowWait allow_wait; | 
| + wait_event.Wait(); | 
| 
danakj
2017/06/15 21:58:57
How is this not a deadlock?
 
sadrul
2017/06/15 22:01:32
|task_runner_| is not running in this thread. See
 
danakj
2017/06/15 22:06:00
What thread owns/creates this class? What threads
 
sadrul
2017/06/15 22:45:17
In mus-ws, this is created on the main-thread. It
 | 
| if (handle.is_null()) | 
| return nullptr; | 
| return gpu::GpuMemoryBufferImpl::CreateFromHandle( | 
| @@ -70,7 +95,6 @@ ServerGpuMemoryBufferManager::CreateGpuMemoryBuffer( | 
| void ServerGpuMemoryBufferManager::SetDestructionSyncToken( | 
| gfx::GpuMemoryBuffer* buffer, | 
| const gpu::SyncToken& sync_token) { | 
| - DCHECK(CalledOnValidThread()); | 
| static_cast<gpu::GpuMemoryBufferImpl*>(buffer)->set_destruction_sync_token( | 
| sync_token); | 
| } | 
| @@ -79,17 +103,44 @@ void ServerGpuMemoryBufferManager::DestroyGpuMemoryBuffer( | 
| gfx::GpuMemoryBufferId id, | 
| int client_id, | 
| const gpu::SyncToken& sync_token) { | 
| - DCHECK(CalledOnValidThread()); | 
| + DCHECK(task_runner_->BelongsToCurrentThread()); | 
| if (native_buffers_[client_id].erase(id)) | 
| gpu_service_->DestroyGpuMemoryBuffer(id, client_id, sync_token); | 
| } | 
| void ServerGpuMemoryBufferManager::DestroyAllGpuMemoryBufferForClient( | 
| int client_id) { | 
| - DCHECK(CalledOnValidThread()); | 
| + DCHECK(task_runner_->BelongsToCurrentThread()); | 
| for (gfx::GpuMemoryBufferId id : native_buffers_[client_id]) | 
| gpu_service_->DestroyGpuMemoryBuffer(id, client_id, gpu::SyncToken()); | 
| native_buffers_.erase(client_id); | 
| + pending_buffers_.erase(client_id); | 
| +} | 
| + | 
| +void ServerGpuMemoryBufferManager::OnGpuMemoryBufferAllocated( | 
| + int client_id, | 
| + base::OnceCallback<void(const gfx::GpuMemoryBufferHandle&)> callback, | 
| + const gfx::GpuMemoryBufferHandle& handle) { | 
| + DCHECK(task_runner_->BelongsToCurrentThread()); | 
| + if (pending_buffers_.find(client_id) == pending_buffers_.end()) { | 
| + // The client has been destroyed since the allocation request was made. | 
| + if (!handle.is_null()) | 
| + gpu_service_->DestroyGpuMemoryBuffer(handle.id, client_id, | 
| + gpu::SyncToken()); | 
| + std::move(callback).Run(gfx::GpuMemoryBufferHandle()); | 
| + return; | 
| + } | 
| + if (!handle.is_null()) | 
| + native_buffers_[client_id].insert(handle.id); | 
| + std::move(callback).Run(handle); | 
| +} | 
| + | 
| +void ServerGpuMemoryBufferManager::SignalGpuMemoryBufferAllocated( | 
| + gfx::GpuMemoryBufferHandle* handle, | 
| + base::WaitableEvent* wait_event, | 
| + const gfx::GpuMemoryBufferHandle& allocated_handle) { | 
| + *handle = allocated_handle; | 
| + wait_event->Signal(); | 
| } | 
| } // namespace viz |