OLD | NEW |
| (Empty) |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "components/mus/public/cpp/lib/command_buffer_client_impl.h" | |
6 | |
7 #include <stddef.h> | |
8 #include <stdint.h> | |
9 | |
10 #include <limits> | |
11 #include <utility> | |
12 | |
13 #include "base/logging.h" | |
14 #include "base/process/process_handle.h" | |
15 #include "base/threading/thread_restrictions.h" | |
16 #include "components/mus/common/gpu_type_converters.h" | |
17 #include "components/mus/common/mojo_buffer_backing.h" | |
18 #include "components/mus/common/mojo_gpu_memory_buffer.h" | |
19 #include "gpu/command_buffer/client/gpu_control_client.h" | |
20 #include "gpu/command_buffer/common/command_buffer_id.h" | |
21 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" | |
22 #include "gpu/command_buffer/common/sync_token.h" | |
23 #include "mojo/public/cpp/system/platform_handle.h" | |
24 | |
25 namespace mus { | |
26 | |
27 namespace { | |
28 | |
29 bool CreateAndMapSharedBuffer(size_t size, | |
30 mojo::ScopedSharedBufferMapping* mapping, | |
31 mojo::ScopedSharedBufferHandle* handle) { | |
32 *handle = mojo::SharedBufferHandle::Create(size); | |
33 if (!handle->is_valid()) | |
34 return false; | |
35 | |
36 *mapping = (*handle)->Map(size); | |
37 if (!*mapping) | |
38 return false; | |
39 | |
40 return true; | |
41 } | |
42 | |
43 void MakeProgressCallback(gpu::CommandBuffer::State* output, | |
44 const gpu::CommandBuffer::State& input) { | |
45 *output = input; | |
46 } | |
47 | |
48 void InitializeCallback(mus::mojom::CommandBufferInitializeResultPtr* output, | |
49 mus::mojom::CommandBufferInitializeResultPtr input) { | |
50 *output = std::move(input); | |
51 } | |
52 | |
53 } // namespace | |
54 | |
55 CommandBufferClientImpl::CommandBufferClientImpl( | |
56 const std::vector<int32_t>& attribs, | |
57 mus::mojom::CommandBufferPtr command_buffer_ptr) | |
58 : gpu_control_client_(nullptr), | |
59 destroyed_(false), | |
60 attribs_(attribs), | |
61 client_binding_(this), | |
62 command_buffer_(std::move(command_buffer_ptr)), | |
63 command_buffer_id_(), | |
64 last_put_offset_(-1), | |
65 next_transfer_buffer_id_(0), | |
66 next_image_id_(0), | |
67 next_fence_sync_release_(1), | |
68 flushed_fence_sync_release_(0) { | |
69 command_buffer_.set_connection_error_handler( | |
70 base::Bind(&CommandBufferClientImpl::Destroyed, base::Unretained(this), | |
71 gpu::error::kUnknown, gpu::error::kLostContext)); | |
72 } | |
73 | |
74 CommandBufferClientImpl::~CommandBufferClientImpl() {} | |
75 | |
76 bool CommandBufferClientImpl::Initialize() { | |
77 const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState); | |
78 mojo::ScopedSharedBufferHandle handle; | |
79 bool result = | |
80 CreateAndMapSharedBuffer(kSharedStateSize, &shared_state_, &handle); | |
81 if (!result) | |
82 return false; | |
83 | |
84 shared_state()->Initialize(); | |
85 | |
86 mus::mojom::CommandBufferClientPtr client_ptr; | |
87 client_binding_.Bind(GetProxy(&client_ptr)); | |
88 | |
89 mus::mojom::CommandBufferInitializeResultPtr initialize_result; | |
90 command_buffer_->Initialize( | |
91 std::move(client_ptr), std::move(handle), | |
92 mojo::Array<int32_t>::From(attribs_), | |
93 base::Bind(&InitializeCallback, &initialize_result)); | |
94 | |
95 base::ThreadRestrictions::ScopedAllowWait wait; | |
96 if (!command_buffer_.WaitForIncomingResponse()) { | |
97 VLOG(1) << "Channel encountered error while creating command buffer."; | |
98 return false; | |
99 } | |
100 | |
101 if (!initialize_result) { | |
102 VLOG(1) << "Command buffer cannot be initialized successfully."; | |
103 return false; | |
104 } | |
105 | |
106 DCHECK_EQ(gpu::CommandBufferNamespace::MOJO, | |
107 initialize_result->command_buffer_namespace); | |
108 command_buffer_id_ = gpu::CommandBufferId::FromUnsafeValue( | |
109 initialize_result->command_buffer_id); | |
110 capabilities_ = initialize_result->capabilities; | |
111 return true; | |
112 } | |
113 | |
114 gpu::CommandBuffer::State CommandBufferClientImpl::GetLastState() { | |
115 return last_state_; | |
116 } | |
117 | |
118 int32_t CommandBufferClientImpl::GetLastToken() { | |
119 TryUpdateState(); | |
120 return last_state_.token; | |
121 } | |
122 | |
123 void CommandBufferClientImpl::Flush(int32_t put_offset) { | |
124 if (last_put_offset_ == put_offset) | |
125 return; | |
126 | |
127 last_put_offset_ = put_offset; | |
128 command_buffer_->Flush(put_offset); | |
129 flushed_fence_sync_release_ = next_fence_sync_release_ - 1; | |
130 } | |
131 | |
132 void CommandBufferClientImpl::OrderingBarrier(int32_t put_offset) { | |
133 // TODO(jamesr): Implement this more efficiently. | |
134 Flush(put_offset); | |
135 } | |
136 | |
137 void CommandBufferClientImpl::WaitForTokenInRange(int32_t start, int32_t end) { | |
138 TryUpdateState(); | |
139 while (!InRange(start, end, last_state_.token) && | |
140 last_state_.error == gpu::error::kNoError) { | |
141 MakeProgressAndUpdateState(); | |
142 } | |
143 } | |
144 | |
145 void CommandBufferClientImpl::WaitForGetOffsetInRange(int32_t start, | |
146 int32_t end) { | |
147 TryUpdateState(); | |
148 while (!InRange(start, end, last_state_.get_offset) && | |
149 last_state_.error == gpu::error::kNoError) { | |
150 MakeProgressAndUpdateState(); | |
151 } | |
152 } | |
153 | |
154 void CommandBufferClientImpl::SetGetBuffer(int32_t shm_id) { | |
155 command_buffer_->SetGetBuffer(shm_id); | |
156 last_put_offset_ = -1; | |
157 } | |
158 | |
159 scoped_refptr<gpu::Buffer> CommandBufferClientImpl::CreateTransferBuffer( | |
160 size_t size, | |
161 int32_t* id) { | |
162 if (size >= std::numeric_limits<uint32_t>::max()) | |
163 return NULL; | |
164 | |
165 mojo::ScopedSharedBufferMapping mapping; | |
166 mojo::ScopedSharedBufferHandle handle; | |
167 if (!CreateAndMapSharedBuffer(size, &mapping, &handle)) { | |
168 if (last_state_.error == gpu::error::kNoError) | |
169 last_state_.error = gpu::error::kLostContext; | |
170 return NULL; | |
171 } | |
172 | |
173 *id = ++next_transfer_buffer_id_; | |
174 | |
175 command_buffer_->RegisterTransferBuffer(*id, std::move(handle), | |
176 static_cast<uint32_t>(size)); | |
177 | |
178 std::unique_ptr<gpu::BufferBacking> backing( | |
179 new mus::MojoBufferBacking(std::move(mapping), size)); | |
180 scoped_refptr<gpu::Buffer> buffer(new gpu::Buffer(std::move(backing))); | |
181 return buffer; | |
182 } | |
183 | |
184 void CommandBufferClientImpl::DestroyTransferBuffer(int32_t id) { | |
185 command_buffer_->DestroyTransferBuffer(id); | |
186 } | |
187 | |
188 void CommandBufferClientImpl::SetGpuControlClient(gpu::GpuControlClient* c) { | |
189 gpu_control_client_ = c; | |
190 } | |
191 | |
192 gpu::Capabilities CommandBufferClientImpl::GetCapabilities() { | |
193 return capabilities_; | |
194 } | |
195 | |
196 int32_t CommandBufferClientImpl::CreateImage(ClientBuffer buffer, | |
197 size_t width, | |
198 size_t height, | |
199 unsigned internalformat) { | |
200 int32_t new_id = ++next_image_id_; | |
201 | |
202 gfx::Size size(static_cast<int32_t>(width), static_cast<int32_t>(height)); | |
203 | |
204 mus::MojoGpuMemoryBufferImpl* gpu_memory_buffer = | |
205 mus::MojoGpuMemoryBufferImpl::FromClientBuffer(buffer); | |
206 gfx::GpuMemoryBufferHandle handle = gpu_memory_buffer->GetHandle(); | |
207 | |
208 bool requires_sync_point = false; | |
209 if (handle.type != gfx::SHARED_MEMORY_BUFFER) { | |
210 requires_sync_point = true; | |
211 NOTIMPLEMENTED(); | |
212 return -1; | |
213 } | |
214 | |
215 base::SharedMemoryHandle dupd_handle = | |
216 base::SharedMemory::DuplicateHandle(handle.handle); | |
217 #if defined(OS_WIN) | |
218 HANDLE platform_handle = dupd_handle.GetHandle(); | |
219 #else | |
220 int platform_handle = dupd_handle.fd; | |
221 #endif | |
222 | |
223 mojo::ScopedHandle scoped_handle = mojo::WrapPlatformFile(platform_handle); | |
224 command_buffer_->CreateImage( | |
225 new_id, std::move(scoped_handle), handle.type, std::move(size), | |
226 static_cast<int32_t>(gpu_memory_buffer->GetFormat()), internalformat); | |
227 if (requires_sync_point) { | |
228 NOTIMPLEMENTED(); | |
229 // TODO(jam): need to support this if we support types other than | |
230 // SHARED_MEMORY_BUFFER. | |
231 // gpu_memory_buffer_manager->SetDestructionSyncPoint(gpu_memory_buffer, | |
232 // InsertSyncPoint()); | |
233 } | |
234 | |
235 return new_id; | |
236 } | |
237 | |
238 void CommandBufferClientImpl::DestroyImage(int32_t id) { | |
239 command_buffer_->DestroyImage(id); | |
240 } | |
241 | |
242 int32_t CommandBufferClientImpl::CreateGpuMemoryBufferImage( | |
243 size_t width, | |
244 size_t height, | |
245 unsigned internalformat, | |
246 unsigned usage) { | |
247 std::unique_ptr<gfx::GpuMemoryBuffer> buffer( | |
248 mus::MojoGpuMemoryBufferImpl::Create( | |
249 gfx::Size(static_cast<int>(width), static_cast<int>(height)), | |
250 gpu::DefaultBufferFormatForImageFormat(internalformat), | |
251 gfx::BufferUsage::SCANOUT)); | |
252 if (!buffer) | |
253 return -1; | |
254 | |
255 return CreateImage(buffer->AsClientBuffer(), width, height, internalformat); | |
256 } | |
257 | |
258 int32_t CommandBufferClientImpl::GetImageGpuMemoryBufferId(unsigned image_id) { | |
259 // TODO(erikchen): Once this class supports IOSurface GpuMemoryBuffer backed | |
260 // images, it will also need to keep a local cache from image id to | |
261 // GpuMemoryBuffer id. | |
262 NOTIMPLEMENTED(); | |
263 return -1; | |
264 } | |
265 | |
266 void CommandBufferClientImpl::SignalQuery(uint32_t query, | |
267 const base::Closure& callback) { | |
268 // TODO(piman) | |
269 NOTIMPLEMENTED(); | |
270 } | |
271 | |
272 void CommandBufferClientImpl::Destroyed(int32_t lost_reason, int32_t error) { | |
273 if (destroyed_) | |
274 return; | |
275 last_state_.context_lost_reason = | |
276 static_cast<gpu::error::ContextLostReason>(lost_reason); | |
277 last_state_.error = static_cast<gpu::error::Error>(error); | |
278 if (gpu_control_client_) | |
279 gpu_control_client_->OnGpuControlLostContext(); | |
280 destroyed_ = true; | |
281 } | |
282 | |
283 void CommandBufferClientImpl::SignalAck(uint32_t id) {} | |
284 | |
285 void CommandBufferClientImpl::SwapBuffersCompleted(int32_t result) {} | |
286 | |
287 void CommandBufferClientImpl::UpdateState( | |
288 const gpu::CommandBuffer::State& state) {} | |
289 | |
290 void CommandBufferClientImpl::UpdateVSyncParameters(int64_t timebase, | |
291 int64_t interval) {} | |
292 | |
293 void CommandBufferClientImpl::TryUpdateState() { | |
294 if (last_state_.error == gpu::error::kNoError) | |
295 shared_state()->Read(&last_state_); | |
296 } | |
297 | |
298 void CommandBufferClientImpl::MakeProgressAndUpdateState() { | |
299 gpu::CommandBuffer::State state; | |
300 command_buffer_->MakeProgress(last_state_.get_offset, | |
301 base::Bind(&MakeProgressCallback, &state)); | |
302 | |
303 base::ThreadRestrictions::ScopedAllowWait wait; | |
304 if (!command_buffer_.WaitForIncomingResponse()) { | |
305 VLOG(1) << "Channel encountered error while waiting for command buffer."; | |
306 // TODO(piman): is it ok for this to re-enter? | |
307 Destroyed(gpu::error::kUnknown, gpu::error::kLostContext); | |
308 return; | |
309 } | |
310 | |
311 if (state.generation - last_state_.generation < 0x80000000U) | |
312 last_state_ = state; | |
313 } | |
314 | |
315 void CommandBufferClientImpl::SetLock(base::Lock* lock) {} | |
316 | |
317 void CommandBufferClientImpl::EnsureWorkVisible() { | |
318 // This is only relevant for out-of-process command buffers. | |
319 } | |
320 | |
321 gpu::CommandBufferNamespace CommandBufferClientImpl::GetNamespaceID() const { | |
322 return gpu::CommandBufferNamespace::MOJO; | |
323 } | |
324 | |
325 gpu::CommandBufferId CommandBufferClientImpl::GetCommandBufferID() const { | |
326 return command_buffer_id_; | |
327 } | |
328 | |
329 int32_t CommandBufferClientImpl::GetExtraCommandBufferData() const { | |
330 return 0; | |
331 } | |
332 | |
333 uint64_t CommandBufferClientImpl::GenerateFenceSyncRelease() { | |
334 return next_fence_sync_release_++; | |
335 } | |
336 | |
337 bool CommandBufferClientImpl::IsFenceSyncRelease(uint64_t release) { | |
338 return release != 0 && release < next_fence_sync_release_; | |
339 } | |
340 | |
341 bool CommandBufferClientImpl::IsFenceSyncFlushed(uint64_t release) { | |
342 return release != 0 && release <= flushed_fence_sync_release_; | |
343 } | |
344 | |
345 bool CommandBufferClientImpl::IsFenceSyncFlushReceived(uint64_t release) { | |
346 return IsFenceSyncFlushed(release); | |
347 } | |
348 | |
349 void CommandBufferClientImpl::SignalSyncToken(const gpu::SyncToken& sync_token, | |
350 const base::Closure& callback) { | |
351 // TODO(dyen) | |
352 NOTIMPLEMENTED(); | |
353 } | |
354 | |
355 bool CommandBufferClientImpl::CanWaitUnverifiedSyncToken( | |
356 const gpu::SyncToken* sync_token) { | |
357 // Right now, MOJO_LOCAL is only used by trusted code, so it is safe to wait | |
358 // on a sync token in MOJO_LOCAL command buffer. | |
359 if (sync_token->namespace_id() == gpu::CommandBufferNamespace::MOJO_LOCAL) | |
360 return true; | |
361 | |
362 // It is also safe to wait on the same context. | |
363 if (sync_token->namespace_id() == gpu::CommandBufferNamespace::MOJO && | |
364 sync_token->command_buffer_id() == GetCommandBufferID()) | |
365 return true; | |
366 | |
367 return false; | |
368 } | |
369 | |
370 } // namespace mus | |
OLD | NEW |