| OLD | NEW |
| (Empty) |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "components/mus/gles2/command_buffer_local.h" | |
| 6 | |
| 7 #include "base/atomic_sequence_num.h" | |
| 8 #include "base/bind.h" | |
| 9 #include "base/memory/shared_memory.h" | |
| 10 #include "base/synchronization/waitable_event.h" | |
| 11 #include "base/threading/thread_task_runner_handle.h" | |
| 12 #include "components/mus/common/gpu_type_converters.h" | |
| 13 #include "components/mus/common/mojo_buffer_backing.h" | |
| 14 #include "components/mus/common/mojo_gpu_memory_buffer.h" | |
| 15 #include "components/mus/gles2/command_buffer_driver.h" | |
| 16 #include "components/mus/gles2/command_buffer_local_client.h" | |
| 17 #include "components/mus/gles2/gpu_memory_tracker.h" | |
| 18 #include "components/mus/gles2/gpu_state.h" | |
| 19 #include "gpu/command_buffer/client/gpu_control_client.h" | |
| 20 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" | |
| 21 #include "gpu/command_buffer/common/sync_token.h" | |
| 22 #include "gpu/command_buffer/service/command_buffer_service.h" | |
| 23 #include "gpu/command_buffer/service/context_group.h" | |
| 24 #include "gpu/command_buffer/service/image_manager.h" | |
| 25 #include "gpu/command_buffer/service/memory_tracking.h" | |
| 26 #include "gpu/command_buffer/service/shader_translator_cache.h" | |
| 27 #include "gpu/command_buffer/service/transfer_buffer_manager.h" | |
| 28 #include "mojo/public/cpp/system/platform_handle.h" | |
| 29 #include "ui/gfx/buffer_format_util.h" | |
| 30 #include "ui/gfx/vsync_provider.h" | |
| 31 #include "ui/gl/gl_context.h" | |
| 32 #include "ui/gl/gl_image_shared_memory.h" | |
| 33 #include "ui/gl/gl_surface.h" | |
| 34 | |
| 35 namespace mus { | |
| 36 | |
| 37 namespace { | |
| 38 | |
| 39 uint64_t g_next_command_buffer_id = 0; | |
| 40 | |
| 41 bool CreateAndMapSharedBuffer(size_t size, | |
| 42 mojo::ScopedSharedBufferMapping* mapping, | |
| 43 mojo::ScopedSharedBufferHandle* handle) { | |
| 44 *handle = mojo::SharedBufferHandle::Create(size); | |
| 45 if (!handle->is_valid()) | |
| 46 return false; | |
| 47 | |
| 48 *mapping = (*handle)->Map(size); | |
| 49 if (!*mapping) | |
| 50 return false; | |
| 51 | |
| 52 return true; | |
| 53 } | |
| 54 | |
| 55 void PostTask(const scoped_refptr<base::SingleThreadTaskRunner>& task_runner, | |
| 56 const base::Closure& callback) { | |
| 57 task_runner->PostTask(FROM_HERE, callback); | |
| 58 } | |
| 59 } | |
| 60 | |
| 61 const unsigned int GL_READ_WRITE_CHROMIUM = 0x78F2; | |
| 62 | |
| 63 CommandBufferLocal::CommandBufferLocal(CommandBufferLocalClient* client, | |
| 64 gfx::AcceleratedWidget widget, | |
| 65 const scoped_refptr<GpuState>& gpu_state) | |
| 66 : widget_(widget), | |
| 67 gpu_state_(gpu_state), | |
| 68 client_(client), | |
| 69 client_thread_task_runner_(base::ThreadTaskRunnerHandle::Get()), | |
| 70 gpu_control_client_(nullptr), | |
| 71 next_transfer_buffer_id_(0), | |
| 72 next_image_id_(0), | |
| 73 next_fence_sync_release_(1), | |
| 74 flushed_fence_sync_release_(0), | |
| 75 lost_context_(false), | |
| 76 sync_point_client_waiter_( | |
| 77 gpu_state->sync_point_manager()->CreateSyncPointClientWaiter()), | |
| 78 weak_factory_(this) { | |
| 79 weak_ptr_ = weak_factory_.GetWeakPtr(); | |
| 80 } | |
| 81 | |
| 82 void CommandBufferLocal::Destroy() { | |
| 83 DCHECK(CalledOnValidThread()); | |
| 84 // After this |Destroy()| call, this object will not be used by client anymore | |
| 85 // and it will be deleted on the GPU thread. So we have to detach it from the | |
| 86 // client thread first. | |
| 87 DetachFromThread(); | |
| 88 | |
| 89 weak_factory_.InvalidateWeakPtrs(); | |
| 90 // CommandBufferLocal is initialized on the GPU thread with | |
| 91 // InitializeOnGpuThread(), so we need delete memebers on the GPU thread | |
| 92 // too. Additionally we need to make sure we are deleted before returning, | |
| 93 // otherwise we may attempt to use the AcceleratedWidget which has since been | |
| 94 // destroyed. | |
| 95 base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL, | |
| 96 base::WaitableEvent::InitialState::NOT_SIGNALED); | |
| 97 gpu_state_->command_buffer_task_runner()->PostTask( | |
| 98 driver_.get(), base::Bind(&CommandBufferLocal::DeleteOnGpuThread, | |
| 99 base::Unretained(this), &event)); | |
| 100 event.Wait(); | |
| 101 } | |
| 102 | |
| 103 bool CommandBufferLocal::Initialize() { | |
| 104 DCHECK(CalledOnValidThread()); | |
| 105 base::ThreadRestrictions::ScopedAllowWait allow_wait; | |
| 106 base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL, | |
| 107 base::WaitableEvent::InitialState::NOT_SIGNALED); | |
| 108 bool result = false; | |
| 109 gpu_state_->command_buffer_task_runner()->task_runner()->PostTask( | |
| 110 FROM_HERE, | |
| 111 base::Bind(&CommandBufferLocal::InitializeOnGpuThread, | |
| 112 base::Unretained(this), base::Unretained(&event), | |
| 113 base::Unretained(&result))); | |
| 114 event.Wait(); | |
| 115 return result; | |
| 116 } | |
| 117 | |
| 118 gpu::CommandBuffer::State CommandBufferLocal::GetLastState() { | |
| 119 DCHECK(CalledOnValidThread()); | |
| 120 return last_state_; | |
| 121 } | |
| 122 | |
| 123 int32_t CommandBufferLocal::GetLastToken() { | |
| 124 DCHECK(CalledOnValidThread()); | |
| 125 TryUpdateState(); | |
| 126 return last_state_.token; | |
| 127 } | |
| 128 | |
| 129 void CommandBufferLocal::Flush(int32_t put_offset) { | |
| 130 DCHECK(CalledOnValidThread()); | |
| 131 if (last_put_offset_ == put_offset) | |
| 132 return; | |
| 133 | |
| 134 last_put_offset_ = put_offset; | |
| 135 gpu::SyncPointManager* sync_point_manager = gpu_state_->sync_point_manager(); | |
| 136 const uint32_t order_num = | |
| 137 driver_->sync_point_order_data()->GenerateUnprocessedOrderNumber( | |
| 138 sync_point_manager); | |
| 139 gpu_state_->command_buffer_task_runner()->PostTask( | |
| 140 driver_.get(), base::Bind(&CommandBufferLocal::FlushOnGpuThread, | |
| 141 base::Unretained(this), put_offset, order_num)); | |
| 142 flushed_fence_sync_release_ = next_fence_sync_release_ - 1; | |
| 143 } | |
| 144 | |
| 145 void CommandBufferLocal::OrderingBarrier(int32_t put_offset) { | |
| 146 DCHECK(CalledOnValidThread()); | |
| 147 // TODO(penghuang): Implement this more efficiently. | |
| 148 Flush(put_offset); | |
| 149 } | |
| 150 | |
| 151 void CommandBufferLocal::WaitForTokenInRange(int32_t start, int32_t end) { | |
| 152 DCHECK(CalledOnValidThread()); | |
| 153 TryUpdateState(); | |
| 154 while (!InRange(start, end, last_state_.token) && | |
| 155 last_state_.error == gpu::error::kNoError) { | |
| 156 MakeProgressAndUpdateState(); | |
| 157 } | |
| 158 } | |
| 159 | |
| 160 void CommandBufferLocal::WaitForGetOffsetInRange(int32_t start, int32_t end) { | |
| 161 DCHECK(CalledOnValidThread()); | |
| 162 TryUpdateState(); | |
| 163 while (!InRange(start, end, last_state_.get_offset) && | |
| 164 last_state_.error == gpu::error::kNoError) { | |
| 165 MakeProgressAndUpdateState(); | |
| 166 } | |
| 167 } | |
| 168 | |
| 169 void CommandBufferLocal::SetGetBuffer(int32_t buffer) { | |
| 170 DCHECK(CalledOnValidThread()); | |
| 171 gpu_state_->command_buffer_task_runner()->PostTask( | |
| 172 driver_.get(), base::Bind(&CommandBufferLocal::SetGetBufferOnGpuThread, | |
| 173 base::Unretained(this), buffer)); | |
| 174 last_put_offset_ = -1; | |
| 175 } | |
| 176 | |
| 177 scoped_refptr<gpu::Buffer> CommandBufferLocal::CreateTransferBuffer( | |
| 178 size_t size, | |
| 179 int32_t* id) { | |
| 180 DCHECK(CalledOnValidThread()); | |
| 181 if (size >= std::numeric_limits<uint32_t>::max()) | |
| 182 return nullptr; | |
| 183 | |
| 184 mojo::ScopedSharedBufferMapping mapping; | |
| 185 mojo::ScopedSharedBufferHandle handle; | |
| 186 if (!CreateAndMapSharedBuffer(size, &mapping, &handle)) { | |
| 187 if (last_state_.error == gpu::error::kNoError) | |
| 188 last_state_.error = gpu::error::kLostContext; | |
| 189 return nullptr; | |
| 190 } | |
| 191 | |
| 192 *id = ++next_transfer_buffer_id_; | |
| 193 | |
| 194 gpu_state_->command_buffer_task_runner()->PostTask( | |
| 195 driver_.get(), | |
| 196 base::Bind(&CommandBufferLocal::RegisterTransferBufferOnGpuThread, | |
| 197 base::Unretained(this), *id, base::Passed(&handle), | |
| 198 static_cast<uint32_t>(size))); | |
| 199 std::unique_ptr<gpu::BufferBacking> backing( | |
| 200 new mus::MojoBufferBacking(std::move(mapping), size)); | |
| 201 scoped_refptr<gpu::Buffer> buffer(new gpu::Buffer(std::move(backing))); | |
| 202 return buffer; | |
| 203 } | |
| 204 | |
| 205 void CommandBufferLocal::DestroyTransferBuffer(int32_t id) { | |
| 206 DCHECK(CalledOnValidThread()); | |
| 207 gpu_state_->command_buffer_task_runner()->PostTask( | |
| 208 driver_.get(), | |
| 209 base::Bind(&CommandBufferLocal::DestroyTransferBufferOnGpuThread, | |
| 210 base::Unretained(this), id)); | |
| 211 } | |
| 212 | |
| 213 void CommandBufferLocal::SetGpuControlClient(gpu::GpuControlClient* client) { | |
| 214 gpu_control_client_ = client; | |
| 215 } | |
| 216 | |
| 217 gpu::Capabilities CommandBufferLocal::GetCapabilities() { | |
| 218 DCHECK(CalledOnValidThread()); | |
| 219 return capabilities_; | |
| 220 } | |
| 221 | |
| 222 int32_t CommandBufferLocal::CreateImage(ClientBuffer buffer, | |
| 223 size_t width, | |
| 224 size_t height, | |
| 225 unsigned internal_format) { | |
| 226 DCHECK(CalledOnValidThread()); | |
| 227 int32_t new_id = ++next_image_id_; | |
| 228 gfx::Size size(static_cast<int32_t>(width), static_cast<int32_t>(height)); | |
| 229 | |
| 230 mus::MojoGpuMemoryBufferImpl* gpu_memory_buffer = | |
| 231 mus::MojoGpuMemoryBufferImpl::FromClientBuffer(buffer); | |
| 232 | |
| 233 bool requires_sync_point = false; | |
| 234 | |
| 235 if (gpu_memory_buffer->GetBufferType() == gfx::SHARED_MEMORY_BUFFER) { | |
| 236 gfx::GpuMemoryBufferHandle handle = gpu_memory_buffer->GetHandle(); | |
| 237 // TODO(rjkroege): Verify that this is required and update appropriately. | |
| 238 base::SharedMemoryHandle dupd_handle = | |
| 239 base::SharedMemory::DuplicateHandle(handle.handle); | |
| 240 #if defined(OS_WIN) | |
| 241 HANDLE platform_file = dupd_handle.GetHandle(); | |
| 242 #else | |
| 243 int platform_file = dupd_handle.fd; | |
| 244 #endif | |
| 245 | |
| 246 mojo::ScopedHandle scoped_handle = mojo::WrapPlatformFile(platform_file); | |
| 247 const int32_t format = static_cast<int32_t>(gpu_memory_buffer->GetFormat()); | |
| 248 gpu_state_->command_buffer_task_runner()->PostTask( | |
| 249 driver_.get(), | |
| 250 base::Bind(&CommandBufferLocal::CreateImageOnGpuThread, | |
| 251 base::Unretained(this), new_id, base::Passed(&scoped_handle), | |
| 252 handle.type, base::Passed(&size), format, internal_format)); | |
| 253 #if defined(USE_OZONE) | |
| 254 } else if (gpu_memory_buffer->GetBufferType() == gfx::OZONE_NATIVE_PIXMAP) { | |
| 255 gpu_state_->command_buffer_task_runner()->PostTask( | |
| 256 driver_.get(), | |
| 257 base::Bind(&CommandBufferLocal::CreateImageNativeOzoneOnGpuThread, | |
| 258 base::Unretained(this), new_id, | |
| 259 gpu_memory_buffer->GetBufferType(), | |
| 260 gpu_memory_buffer->GetSize(), gpu_memory_buffer->GetFormat(), | |
| 261 internal_format, | |
| 262 base::RetainedRef(gpu_memory_buffer->GetNativePixmap()))); | |
| 263 #endif | |
| 264 } else { | |
| 265 NOTIMPLEMENTED(); | |
| 266 return -1; | |
| 267 } | |
| 268 | |
| 269 if (requires_sync_point) { | |
| 270 NOTIMPLEMENTED() << "Require sync points"; | |
| 271 // TODO(jam): need to support this if we support types other than | |
| 272 // SHARED_MEMORY_BUFFER. | |
| 273 // gpu_memory_buffer_manager->SetDestructionSyncPoint(gpu_memory_buffer, | |
| 274 // InsertSyncPoint()); | |
| 275 } | |
| 276 | |
| 277 return new_id; | |
| 278 } | |
| 279 | |
| 280 void CommandBufferLocal::DestroyImage(int32_t id) { | |
| 281 DCHECK(CalledOnValidThread()); | |
| 282 gpu_state_->command_buffer_task_runner()->PostTask( | |
| 283 driver_.get(), base::Bind(&CommandBufferLocal::DestroyImageOnGpuThread, | |
| 284 base::Unretained(this), id)); | |
| 285 } | |
| 286 | |
| 287 int32_t CommandBufferLocal::CreateGpuMemoryBufferImage(size_t width, | |
| 288 size_t height, | |
| 289 unsigned internal_format, | |
| 290 unsigned usage) { | |
| 291 DCHECK(CalledOnValidThread()); | |
| 292 DCHECK_EQ(usage, static_cast<unsigned>(GL_READ_WRITE_CHROMIUM)); | |
| 293 std::unique_ptr<gfx::GpuMemoryBuffer> buffer(MojoGpuMemoryBufferImpl::Create( | |
| 294 gfx::Size(static_cast<int>(width), static_cast<int>(height)), | |
| 295 gpu::DefaultBufferFormatForImageFormat(internal_format), | |
| 296 gfx::BufferUsage::SCANOUT)); | |
| 297 if (!buffer) | |
| 298 return -1; | |
| 299 return CreateImage(buffer->AsClientBuffer(), width, height, internal_format); | |
| 300 } | |
| 301 | |
| 302 int32_t CommandBufferLocal::GetImageGpuMemoryBufferId(unsigned image_id) { | |
| 303 // TODO(erikchen): Once this class supports IOSurface GpuMemoryBuffer backed | |
| 304 // images, it will also need to keep a local cache from image id to | |
| 305 // GpuMemoryBuffer id. | |
| 306 NOTIMPLEMENTED(); | |
| 307 return -1; | |
| 308 } | |
| 309 | |
| 310 void CommandBufferLocal::SignalQuery(uint32_t query_id, | |
| 311 const base::Closure& callback) { | |
| 312 DCHECK(CalledOnValidThread()); | |
| 313 | |
| 314 gpu_state_->command_buffer_task_runner()->PostTask( | |
| 315 driver_.get(), base::Bind(&CommandBufferLocal::SignalQueryOnGpuThread, | |
| 316 base::Unretained(this), query_id, callback)); | |
| 317 } | |
| 318 | |
| 319 void CommandBufferLocal::SetLock(base::Lock* lock) { | |
| 320 DCHECK(CalledOnValidThread()); | |
| 321 NOTIMPLEMENTED(); | |
| 322 } | |
| 323 | |
| 324 void CommandBufferLocal::EnsureWorkVisible() { | |
| 325 // This is only relevant for out-of-process command buffers. | |
| 326 } | |
| 327 | |
| 328 gpu::CommandBufferNamespace CommandBufferLocal::GetNamespaceID() const { | |
| 329 DCHECK(CalledOnValidThread()); | |
| 330 return gpu::CommandBufferNamespace::MOJO_LOCAL; | |
| 331 } | |
| 332 | |
| 333 gpu::CommandBufferId CommandBufferLocal::GetCommandBufferID() const { | |
| 334 DCHECK(CalledOnValidThread()); | |
| 335 return driver_->GetCommandBufferID(); | |
| 336 } | |
| 337 | |
| 338 int32_t CommandBufferLocal::GetExtraCommandBufferData() const { | |
| 339 DCHECK(CalledOnValidThread()); | |
| 340 return 0; | |
| 341 } | |
| 342 | |
| 343 uint64_t CommandBufferLocal::GenerateFenceSyncRelease() { | |
| 344 DCHECK(CalledOnValidThread()); | |
| 345 return next_fence_sync_release_++; | |
| 346 } | |
| 347 | |
| 348 bool CommandBufferLocal::IsFenceSyncRelease(uint64_t release) { | |
| 349 DCHECK(CalledOnValidThread()); | |
| 350 return release != 0 && release < next_fence_sync_release_; | |
| 351 } | |
| 352 | |
| 353 bool CommandBufferLocal::IsFenceSyncFlushed(uint64_t release) { | |
| 354 DCHECK(CalledOnValidThread()); | |
| 355 return release != 0 && release <= flushed_fence_sync_release_; | |
| 356 } | |
| 357 | |
| 358 bool CommandBufferLocal::IsFenceSyncFlushReceived(uint64_t release) { | |
| 359 DCHECK(CalledOnValidThread()); | |
| 360 return IsFenceSyncFlushed(release); | |
| 361 } | |
| 362 | |
| 363 void CommandBufferLocal::SignalSyncToken(const gpu::SyncToken& sync_token, | |
| 364 const base::Closure& callback) { | |
| 365 DCHECK(CalledOnValidThread()); | |
| 366 scoped_refptr<gpu::SyncPointClientState> release_state = | |
| 367 gpu_state_->sync_point_manager()->GetSyncPointClientState( | |
| 368 sync_token.namespace_id(), sync_token.command_buffer_id()); | |
| 369 if (!release_state || | |
| 370 release_state->IsFenceSyncReleased(sync_token.release_count())) { | |
| 371 callback.Run(); | |
| 372 return; | |
| 373 } | |
| 374 | |
| 375 sync_point_client_waiter_->WaitOutOfOrderNonThreadSafe( | |
| 376 release_state.get(), sync_token.release_count(), | |
| 377 client_thread_task_runner_, callback); | |
| 378 } | |
| 379 | |
| 380 bool CommandBufferLocal::CanWaitUnverifiedSyncToken( | |
| 381 const gpu::SyncToken* sync_token) { | |
| 382 DCHECK(CalledOnValidThread()); | |
| 383 // Right now, MOJO_LOCAL is only used by trusted code, so it is safe to wait | |
| 384 // on a sync token in MOJO_LOCAL command buffer. | |
| 385 return sync_token->namespace_id() == gpu::CommandBufferNamespace::MOJO_LOCAL; | |
| 386 } | |
| 387 | |
| 388 void CommandBufferLocal::DidLoseContext(uint32_t reason) { | |
| 389 if (client_) { | |
| 390 driver_->set_client(nullptr); | |
| 391 client_thread_task_runner_->PostTask( | |
| 392 FROM_HERE, | |
| 393 base::Bind(&CommandBufferLocal::DidLoseContextOnClientThread, | |
| 394 weak_ptr_, reason)); | |
| 395 } | |
| 396 } | |
| 397 | |
| 398 void CommandBufferLocal::UpdateVSyncParameters( | |
| 399 const base::TimeTicks& timebase, | |
| 400 const base::TimeDelta& interval) { | |
| 401 if (client_) { | |
| 402 client_thread_task_runner_->PostTask( | |
| 403 FROM_HERE, | |
| 404 base::Bind(&CommandBufferLocal::UpdateVSyncParametersOnClientThread, | |
| 405 weak_ptr_, timebase, interval)); | |
| 406 } | |
| 407 } | |
| 408 | |
| 409 void CommandBufferLocal::OnGpuCompletedSwapBuffers(gfx::SwapResult result) { | |
| 410 if (client_) { | |
| 411 client_thread_task_runner_->PostTask( | |
| 412 FROM_HERE, | |
| 413 base::Bind(&CommandBufferLocal::OnGpuCompletedSwapBuffersOnClientThread, | |
| 414 weak_ptr_, result)); | |
| 415 } | |
| 416 } | |
| 417 | |
| 418 CommandBufferLocal::~CommandBufferLocal() {} | |
| 419 | |
| 420 void CommandBufferLocal::TryUpdateState() { | |
| 421 if (last_state_.error == gpu::error::kNoError) | |
| 422 shared_state()->Read(&last_state_); | |
| 423 } | |
| 424 | |
| 425 void CommandBufferLocal::MakeProgressAndUpdateState() { | |
| 426 base::ThreadRestrictions::ScopedAllowWait allow_wait; | |
| 427 base::WaitableEvent event(base::WaitableEvent::ResetPolicy::MANUAL, | |
| 428 base::WaitableEvent::InitialState::NOT_SIGNALED); | |
| 429 gpu::CommandBuffer::State state; | |
| 430 gpu_state_->command_buffer_task_runner()->PostTask( | |
| 431 driver_.get(), | |
| 432 base::Bind(&CommandBufferLocal::MakeProgressOnGpuThread, | |
| 433 base::Unretained(this), base::Unretained(&event), | |
| 434 base::Unretained(&state))); | |
| 435 event.Wait(); | |
| 436 if (state.generation - last_state_.generation < 0x80000000U) | |
| 437 last_state_ = state; | |
| 438 } | |
| 439 | |
| 440 void CommandBufferLocal::InitializeOnGpuThread(base::WaitableEvent* event, | |
| 441 bool* result) { | |
| 442 driver_.reset(new CommandBufferDriver( | |
| 443 gpu::CommandBufferNamespace::MOJO_LOCAL, | |
| 444 gpu::CommandBufferId::FromUnsafeValue(++g_next_command_buffer_id), | |
| 445 widget_, gpu_state_)); | |
| 446 driver_->set_client(this); | |
| 447 const size_t kSharedStateSize = sizeof(gpu::CommandBufferSharedState); | |
| 448 mojo::ScopedSharedBufferMapping mapping; | |
| 449 mojo::ScopedSharedBufferHandle handle; | |
| 450 *result = CreateAndMapSharedBuffer(kSharedStateSize, &shared_state_, &handle); | |
| 451 | |
| 452 if (!*result) { | |
| 453 event->Signal(); | |
| 454 return; | |
| 455 } | |
| 456 | |
| 457 shared_state()->Initialize(); | |
| 458 | |
| 459 *result = | |
| 460 driver_->Initialize(std::move(handle), mojo::Array<int32_t>::New(0)); | |
| 461 if (*result) | |
| 462 capabilities_ = driver_->GetCapabilities(); | |
| 463 event->Signal(); | |
| 464 } | |
| 465 | |
| 466 bool CommandBufferLocal::FlushOnGpuThread(int32_t put_offset, | |
| 467 uint32_t order_num) { | |
| 468 DCHECK(driver_->IsScheduled()); | |
| 469 driver_->sync_point_order_data()->BeginProcessingOrderNumber(order_num); | |
| 470 driver_->Flush(put_offset); | |
| 471 | |
| 472 // Return false if the Flush is not finished, so the CommandBufferTaskRunner | |
| 473 // will not remove this task from the task queue. | |
| 474 const bool complete = !driver_->HasUnprocessedCommands(); | |
| 475 if (complete) | |
| 476 driver_->sync_point_order_data()->FinishProcessingOrderNumber(order_num); | |
| 477 return complete; | |
| 478 } | |
| 479 | |
| 480 bool CommandBufferLocal::SetGetBufferOnGpuThread(int32_t buffer) { | |
| 481 DCHECK(driver_->IsScheduled()); | |
| 482 driver_->SetGetBuffer(buffer); | |
| 483 return true; | |
| 484 } | |
| 485 | |
| 486 bool CommandBufferLocal::RegisterTransferBufferOnGpuThread( | |
| 487 int32_t id, | |
| 488 mojo::ScopedSharedBufferHandle transfer_buffer, | |
| 489 uint32_t size) { | |
| 490 DCHECK(driver_->IsScheduled()); | |
| 491 driver_->RegisterTransferBuffer(id, std::move(transfer_buffer), size); | |
| 492 return true; | |
| 493 } | |
| 494 | |
| 495 bool CommandBufferLocal::DestroyTransferBufferOnGpuThread(int32_t id) { | |
| 496 DCHECK(driver_->IsScheduled()); | |
| 497 driver_->DestroyTransferBuffer(id); | |
| 498 return true; | |
| 499 } | |
| 500 | |
| 501 bool CommandBufferLocal::CreateImageOnGpuThread( | |
| 502 int32_t id, | |
| 503 mojo::ScopedHandle memory_handle, | |
| 504 int32_t type, | |
| 505 const gfx::Size& size, | |
| 506 int32_t format, | |
| 507 int32_t internal_format) { | |
| 508 DCHECK(driver_->IsScheduled()); | |
| 509 driver_->CreateImage(id, std::move(memory_handle), type, std::move(size), | |
| 510 format, internal_format); | |
| 511 return true; | |
| 512 } | |
| 513 | |
| 514 bool CommandBufferLocal::CreateImageNativeOzoneOnGpuThread( | |
| 515 int32_t id, | |
| 516 int32_t type, | |
| 517 gfx::Size size, | |
| 518 gfx::BufferFormat format, | |
| 519 uint32_t internal_format, | |
| 520 ui::NativePixmap* pixmap) { | |
| 521 DCHECK(driver_->IsScheduled()); | |
| 522 driver_->CreateImageNativeOzone(id, type, size, format, internal_format, | |
| 523 pixmap); | |
| 524 return true; | |
| 525 } | |
| 526 | |
| 527 bool CommandBufferLocal::DestroyImageOnGpuThread(int32_t id) { | |
| 528 DCHECK(driver_->IsScheduled()); | |
| 529 driver_->DestroyImage(id); | |
| 530 return true; | |
| 531 } | |
| 532 | |
| 533 bool CommandBufferLocal::MakeProgressOnGpuThread( | |
| 534 base::WaitableEvent* event, | |
| 535 gpu::CommandBuffer::State* state) { | |
| 536 DCHECK(driver_->IsScheduled()); | |
| 537 *state = driver_->GetLastState(); | |
| 538 event->Signal(); | |
| 539 return true; | |
| 540 } | |
| 541 | |
| 542 bool CommandBufferLocal::DeleteOnGpuThread(base::WaitableEvent* event) { | |
| 543 delete this; | |
| 544 event->Signal(); | |
| 545 return true; | |
| 546 } | |
| 547 | |
| 548 bool CommandBufferLocal::SignalQueryOnGpuThread(uint32_t query_id, | |
| 549 const base::Closure& callback) { | |
| 550 // |callback| should run on the client thread. | |
| 551 driver_->SignalQuery( | |
| 552 query_id, base::Bind(&PostTask, client_thread_task_runner_, callback)); | |
| 553 return true; | |
| 554 } | |
| 555 | |
| 556 void CommandBufferLocal::DidLoseContextOnClientThread(uint32_t reason) { | |
| 557 DCHECK(gpu_control_client_); | |
| 558 if (!lost_context_) | |
| 559 gpu_control_client_->OnGpuControlLostContext(); | |
| 560 lost_context_ = true; | |
| 561 } | |
| 562 | |
| 563 void CommandBufferLocal::UpdateVSyncParametersOnClientThread( | |
| 564 const base::TimeTicks& timebase, | |
| 565 const base::TimeDelta& interval) { | |
| 566 if (client_) | |
| 567 client_->UpdateVSyncParameters(timebase, interval); | |
| 568 } | |
| 569 | |
| 570 void CommandBufferLocal::OnGpuCompletedSwapBuffersOnClientThread( | |
| 571 gfx::SwapResult result) { | |
| 572 if (client_) | |
| 573 client_->GpuCompletedSwapBuffers(result); | |
| 574 } | |
| 575 | |
| 576 } // namespace mus | |
| OLD | NEW |