Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(586)

Side by Side Diff: components/viz/common/server_gpu_memory_buffer_manager.cc

Issue 2941933002: viz: Convert a sync api in ServerGpuMemoryBufferManager into async. (Closed)
Patch Set: fix test build on windows Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "components/viz/common/server_gpu_memory_buffer_manager.h" 5 #include "components/viz/common/server_gpu_memory_buffer_manager.h"
6 6
7 #include "base/logging.h" 7 #include "base/logging.h"
8 #include "base/threading/sequenced_task_runner_handle.h"
8 #include "gpu/ipc/client/gpu_memory_buffer_impl.h" 9 #include "gpu/ipc/client/gpu_memory_buffer_impl.h"
9 #include "gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h" 10 #include "gpu/ipc/client/gpu_memory_buffer_impl_shared_memory.h"
10 #include "gpu/ipc/common/gpu_memory_buffer_support.h" 11 #include "gpu/ipc/common/gpu_memory_buffer_support.h"
11 #include "services/ui/gpu/interfaces/gpu_service.mojom.h" 12 #include "services/ui/gpu/interfaces/gpu_service.mojom.h"
12 13
13 namespace viz { 14 namespace viz {
14 15
15 ServerGpuMemoryBufferManager::ServerGpuMemoryBufferManager( 16 ServerGpuMemoryBufferManager::ServerGpuMemoryBufferManager(
16 ui::mojom::GpuService* gpu_service, 17 ui::mojom::GpuService* gpu_service,
17 int client_id) 18 int client_id)
18 : gpu_service_(gpu_service), 19 : gpu_service_(gpu_service),
19 client_id_(client_id), 20 client_id_(client_id),
20 native_configurations_(gpu::GetNativeGpuMemoryBufferConfigurations()), 21 native_configurations_(gpu::GetNativeGpuMemoryBufferConfigurations()),
22 task_runner_(base::SequencedTaskRunnerHandle::Get()),
21 weak_factory_(this) {} 23 weak_factory_(this) {}
22 24
23 ServerGpuMemoryBufferManager::~ServerGpuMemoryBufferManager() {} 25 ServerGpuMemoryBufferManager::~ServerGpuMemoryBufferManager() {}
24 26
25 gfx::GpuMemoryBufferHandle 27 void ServerGpuMemoryBufferManager::AllocateGpuMemoryBuffer(
26 ServerGpuMemoryBufferManager::CreateGpuMemoryBufferHandle(
27 gfx::GpuMemoryBufferId id, 28 gfx::GpuMemoryBufferId id,
28 int client_id, 29 int client_id,
29 const gfx::Size& size, 30 const gfx::Size& size,
30 gfx::BufferFormat format, 31 gfx::BufferFormat format,
31 gfx::BufferUsage usage, 32 gfx::BufferUsage usage,
32 gpu::SurfaceHandle surface_handle) { 33 gpu::SurfaceHandle surface_handle,
33 DCHECK(CalledOnValidThread()); 34 base::OnceCallback<void(const gfx::GpuMemoryBufferHandle&)> callback) {
35 DCHECK(task_runner_->RunsTasksInCurrentSequence());
34 if (gpu::GetNativeGpuMemoryBufferType() != gfx::EMPTY_BUFFER) { 36 if (gpu::GetNativeGpuMemoryBufferType() != gfx::EMPTY_BUFFER) {
35 const bool is_native = native_configurations_.find(std::make_pair( 37 const bool is_native = native_configurations_.find(std::make_pair(
36 format, usage)) != native_configurations_.end(); 38 format, usage)) != native_configurations_.end();
37 if (is_native) { 39 if (is_native) {
38 gfx::GpuMemoryBufferHandle handle; 40 pending_buffers_.insert(client_id);
39 gpu_service_->CreateGpuMemoryBuffer(id, size, format, usage, client_id, 41 gpu_service_->CreateGpuMemoryBuffer(
40 surface_handle, &handle); 42 id, size, format, usage, client_id, surface_handle,
41 if (!handle.is_null()) 43 base::Bind(&ServerGpuMemoryBufferManager::OnGpuMemoryBufferAllocated,
42 native_buffers_[client_id].insert(handle.id); 44 weak_factory_.GetWeakPtr(), client_id,
43 return handle; 45 base::Passed(std::move(callback))));
46 return;
44 } 47 }
45 } 48 }
46 49
47 DCHECK(gpu::GpuMemoryBufferImplSharedMemory::IsUsageSupported(usage)) 50 DCHECK(gpu::GpuMemoryBufferImplSharedMemory::IsUsageSupported(usage))
48 << static_cast<int>(usage); 51 << static_cast<int>(usage);
49 return gpu::GpuMemoryBufferImplSharedMemory::CreateGpuMemoryBuffer(id, size, 52 task_runner_->PostTask(
50 format); 53 FROM_HERE,
54 base::BindOnce(
55 std::move(callback),
56 gpu::GpuMemoryBufferImplSharedMemory::CreateGpuMemoryBuffer(id, size,
57 format)));
51 } 58 }
52 59
53 std::unique_ptr<gfx::GpuMemoryBuffer> 60 std::unique_ptr<gfx::GpuMemoryBuffer>
54 ServerGpuMemoryBufferManager::CreateGpuMemoryBuffer( 61 ServerGpuMemoryBufferManager::CreateGpuMemoryBuffer(
55 const gfx::Size& size, 62 const gfx::Size& size,
56 gfx::BufferFormat format, 63 gfx::BufferFormat format,
57 gfx::BufferUsage usage, 64 gfx::BufferUsage usage,
58 gpu::SurfaceHandle surface_handle) { 65 gpu::SurfaceHandle surface_handle) {
59 gfx::GpuMemoryBufferId id(next_gpu_memory_id_++); 66 gfx::GpuMemoryBufferId id(next_gpu_memory_id_++);
60 gfx::GpuMemoryBufferHandle handle = CreateGpuMemoryBufferHandle( 67 gfx::GpuMemoryBufferHandle handle;
61 id, client_id_, size, format, usage, surface_handle); 68 base::WaitableEvent wait_event(
69 base::WaitableEvent::ResetPolicy::MANUAL,
70 base::WaitableEvent::InitialState::NOT_SIGNALED);
71 DCHECK(!task_runner_->RunsTasksInCurrentSequence());
72 auto reply_callback = base::BindOnce(
73 [](gfx::GpuMemoryBufferHandle* handle, base::WaitableEvent* wait_event,
74 const gfx::GpuMemoryBufferHandle& allocated_buffer_handle) {
75 *handle = allocated_buffer_handle;
76 wait_event->Signal();
77 },
78 &handle, &wait_event);
79 // We block with a WaitableEvent until the callback is run. So using
80 // base::Unretained() is safe here.
81 auto allocate_callback =
82 base::BindOnce(&ServerGpuMemoryBufferManager::AllocateGpuMemoryBuffer,
83 base::Unretained(this), id, client_id_, size, format,
84 usage, surface_handle, std::move(reply_callback));
85 task_runner_->PostTask(FROM_HERE, std::move(allocate_callback));
86 base::ThreadRestrictions::ScopedAllowWait allow_wait;
87 wait_event.Wait();
62 if (handle.is_null()) 88 if (handle.is_null())
63 return nullptr; 89 return nullptr;
64 return gpu::GpuMemoryBufferImpl::CreateFromHandle( 90 return gpu::GpuMemoryBufferImpl::CreateFromHandle(
65 handle, size, format, usage, 91 handle, size, format, usage,
66 base::Bind(&ServerGpuMemoryBufferManager::DestroyGpuMemoryBuffer, 92 base::Bind(&ServerGpuMemoryBufferManager::DestroyGpuMemoryBuffer,
67 weak_factory_.GetWeakPtr(), id, client_id_)); 93 weak_factory_.GetWeakPtr(), id, client_id_));
68 } 94 }
69 95
70 void ServerGpuMemoryBufferManager::SetDestructionSyncToken( 96 void ServerGpuMemoryBufferManager::SetDestructionSyncToken(
71 gfx::GpuMemoryBuffer* buffer, 97 gfx::GpuMemoryBuffer* buffer,
72 const gpu::SyncToken& sync_token) { 98 const gpu::SyncToken& sync_token) {
73 DCHECK(CalledOnValidThread());
74 static_cast<gpu::GpuMemoryBufferImpl*>(buffer)->set_destruction_sync_token( 99 static_cast<gpu::GpuMemoryBufferImpl*>(buffer)->set_destruction_sync_token(
75 sync_token); 100 sync_token);
76 } 101 }
77 102
78 void ServerGpuMemoryBufferManager::DestroyGpuMemoryBuffer( 103 void ServerGpuMemoryBufferManager::DestroyGpuMemoryBuffer(
79 gfx::GpuMemoryBufferId id, 104 gfx::GpuMemoryBufferId id,
80 int client_id, 105 int client_id,
81 const gpu::SyncToken& sync_token) { 106 const gpu::SyncToken& sync_token) {
82 DCHECK(CalledOnValidThread()); 107 DCHECK(task_runner_->RunsTasksInCurrentSequence());
83 if (native_buffers_[client_id].erase(id)) 108 if (native_buffers_[client_id].erase(id))
84 gpu_service_->DestroyGpuMemoryBuffer(id, client_id, sync_token); 109 gpu_service_->DestroyGpuMemoryBuffer(id, client_id, sync_token);
85 } 110 }
86 111
87 void ServerGpuMemoryBufferManager::DestroyAllGpuMemoryBufferForClient( 112 void ServerGpuMemoryBufferManager::DestroyAllGpuMemoryBufferForClient(
88 int client_id) { 113 int client_id) {
89 DCHECK(CalledOnValidThread()); 114 DCHECK(task_runner_->RunsTasksInCurrentSequence());
90 for (gfx::GpuMemoryBufferId id : native_buffers_[client_id]) 115 for (gfx::GpuMemoryBufferId id : native_buffers_[client_id])
91 gpu_service_->DestroyGpuMemoryBuffer(id, client_id, gpu::SyncToken()); 116 gpu_service_->DestroyGpuMemoryBuffer(id, client_id, gpu::SyncToken());
92 native_buffers_.erase(client_id); 117 native_buffers_.erase(client_id);
118 pending_buffers_.erase(client_id);
119 }
120
121 void ServerGpuMemoryBufferManager::OnGpuMemoryBufferAllocated(
122 int client_id,
123 base::OnceCallback<void(const gfx::GpuMemoryBufferHandle&)> callback,
124 const gfx::GpuMemoryBufferHandle& handle) {
125 DCHECK(task_runner_->RunsTasksInCurrentSequence());
126 if (pending_buffers_.find(client_id) == pending_buffers_.end()) {
127 // The client has been destroyed since the allocation request was made.
128 if (!handle.is_null()) {
129 gpu_service_->DestroyGpuMemoryBuffer(handle.id, client_id,
130 gpu::SyncToken());
131 }
132 std::move(callback).Run(gfx::GpuMemoryBufferHandle());
133 return;
134 }
135 if (!handle.is_null())
136 native_buffers_[client_id].insert(handle.id);
137 std::move(callback).Run(handle);
93 } 138 }
94 139
95 } // namespace viz 140 } // namespace viz
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698