OLD | NEW |
(Empty) | |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "services/ui/gles2/command_buffer_driver.h" |
| 6 |
| 7 #include <stddef.h> |
| 8 #include <utility> |
| 9 |
| 10 #include "base/bind.h" |
| 11 #include "base/memory/shared_memory.h" |
| 12 #include "base/threading/thread_task_runner_handle.h" |
| 13 #include "build/build_config.h" |
| 14 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" |
| 15 #include "gpu/command_buffer/service/command_buffer_service.h" |
| 16 #include "gpu/command_buffer/service/command_executor.h" |
| 17 #include "gpu/command_buffer/service/context_group.h" |
| 18 #include "gpu/command_buffer/service/gles2_cmd_decoder.h" |
| 19 #include "gpu/command_buffer/service/image_manager.h" |
| 20 #include "gpu/command_buffer/service/mailbox_manager.h" |
| 21 #include "gpu/command_buffer/service/query_manager.h" |
| 22 #include "gpu/command_buffer/service/sync_point_manager.h" |
| 23 #include "gpu/command_buffer/service/transfer_buffer_manager.h" |
| 24 #include "mojo/public/cpp/system/platform_handle.h" |
| 25 #include "services/ui/common/mojo_buffer_backing.h" |
| 26 #include "services/ui/gles2/gl_surface_adapter.h" |
| 27 #include "services/ui/gles2/gpu_memory_tracker.h" |
| 28 #include "services/ui/gles2/gpu_state.h" |
| 29 #include "ui/gfx/buffer_format_util.h" |
| 30 #include "ui/gfx/gpu_memory_buffer.h" |
| 31 #include "ui/gfx/vsync_provider.h" |
| 32 #include "ui/gl/gl_context.h" |
| 33 #include "ui/gl/gl_image_shared_memory.h" |
| 34 #include "ui/gl/gl_surface.h" |
| 35 #include "ui/gl/init/gl_factory.h" |
| 36 |
| 37 #if defined(USE_OZONE) |
| 38 #include "ui/ozone/gl/gl_image_ozone_native_pixmap.h" |
| 39 #endif |
| 40 |
| 41 namespace ui { |
| 42 |
| 43 namespace { |
| 44 |
| 45 // The first time polling a fence, delay some extra time to allow other |
| 46 // stubs to process some work, or else the timing of the fences could |
| 47 // allow a pattern of alternating fast and slow frames to occur. |
| 48 const int64_t kHandleMoreWorkPeriodMs = 2; |
| 49 const int64_t kHandleMoreWorkPeriodBusyMs = 1; |
| 50 |
| 51 // Prevents idle work from being starved. |
| 52 const int64_t kMaxTimeSinceIdleMs = 10; |
| 53 |
| 54 } // namespace |
| 55 |
| 56 CommandBufferDriver::Client::~Client() {} |
| 57 |
| 58 CommandBufferDriver::CommandBufferDriver( |
| 59 gpu::CommandBufferNamespace command_buffer_namespace, |
| 60 gpu::CommandBufferId command_buffer_id, |
| 61 gfx::AcceleratedWidget widget, |
| 62 scoped_refptr<GpuState> gpu_state) |
| 63 : command_buffer_namespace_(command_buffer_namespace), |
| 64 command_buffer_id_(command_buffer_id), |
| 65 widget_(widget), |
| 66 client_(nullptr), |
| 67 gpu_state_(gpu_state), |
| 68 previous_processed_num_(0), |
| 69 weak_factory_(this) { |
| 70 DCHECK_EQ(base::ThreadTaskRunnerHandle::Get(), |
| 71 gpu_state_->command_buffer_task_runner()->task_runner()); |
| 72 } |
| 73 |
| 74 CommandBufferDriver::~CommandBufferDriver() { |
| 75 DCHECK(CalledOnValidThread()); |
| 76 DestroyDecoder(); |
| 77 } |
| 78 |
| 79 bool CommandBufferDriver::Initialize( |
| 80 mojo::ScopedSharedBufferHandle shared_state, |
| 81 mojo::Array<int32_t> attribs) { |
| 82 DCHECK(CalledOnValidThread()); |
| 83 gpu::gles2::ContextCreationAttribHelper attrib_helper; |
| 84 if (!attrib_helper.Parse(attribs.storage())) |
| 85 return false; |
| 86 // TODO(piman): attribs can't currently represent gpu_preference. |
| 87 |
| 88 const bool offscreen = widget_ == gfx::kNullAcceleratedWidget; |
| 89 if (offscreen) { |
| 90 surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size(0, 0)); |
| 91 } else { |
| 92 #if defined(USE_OZONE) |
| 93 scoped_refptr<gl::GLSurface> underlying_surface = |
| 94 gl::init::CreateSurfacelessViewGLSurface(widget_); |
| 95 if (!underlying_surface) |
| 96 underlying_surface = gl::init::CreateViewGLSurface(widget_); |
| 97 #else |
| 98 scoped_refptr<gl::GLSurface> underlying_surface = |
| 99 gl::init::CreateViewGLSurface(widget_); |
| 100 #endif |
| 101 scoped_refptr<GLSurfaceAdapterMus> surface_adapter = |
| 102 new GLSurfaceAdapterMus(underlying_surface); |
| 103 surface_adapter->SetGpuCompletedSwapBuffersCallback( |
| 104 base::Bind(&CommandBufferDriver::OnGpuCompletedSwapBuffers, |
| 105 weak_factory_.GetWeakPtr())); |
| 106 surface_ = surface_adapter; |
| 107 |
| 108 gfx::VSyncProvider* vsync_provider = |
| 109 surface_ ? surface_->GetVSyncProvider() : nullptr; |
| 110 if (vsync_provider) { |
| 111 vsync_provider->GetVSyncParameters( |
| 112 base::Bind(&CommandBufferDriver::OnUpdateVSyncParameters, |
| 113 weak_factory_.GetWeakPtr())); |
| 114 } |
| 115 } |
| 116 |
| 117 if (!surface_.get()) |
| 118 return false; |
| 119 |
| 120 // TODO(piman): virtual contexts. |
| 121 context_ = gl::init::CreateGLContext( |
| 122 gpu_state_->share_group(), surface_.get(), attrib_helper.gpu_preference); |
| 123 if (!context_.get()) |
| 124 return false; |
| 125 |
| 126 if (!context_->MakeCurrent(surface_.get())) |
| 127 return false; |
| 128 |
| 129 // TODO(piman): ShaderTranslatorCache is currently per-ContextGroup but |
| 130 // only needs to be per-thread. |
| 131 const bool bind_generates_resource = attrib_helper.bind_generates_resource; |
| 132 scoped_refptr<gpu::gles2::FeatureInfo> feature_info = |
| 133 new gpu::gles2::FeatureInfo(gpu_state_->gpu_driver_bug_workarounds()); |
| 134 // TODO(erikchen): The ContextGroup needs a reference to the |
| 135 // GpuMemoryBufferManager. |
| 136 scoped_refptr<gpu::gles2::ContextGroup> context_group = |
| 137 new gpu::gles2::ContextGroup( |
| 138 gpu_state_->gpu_preferences(), gpu_state_->mailbox_manager(), |
| 139 new GpuMemoryTracker, |
| 140 new gpu::gles2::ShaderTranslatorCache(gpu_state_->gpu_preferences()), |
| 141 new gpu::gles2::FramebufferCompletenessCache, feature_info, |
| 142 bind_generates_resource, nullptr); |
| 143 |
| 144 command_buffer_.reset( |
| 145 new gpu::CommandBufferService(context_group->transfer_buffer_manager())); |
| 146 |
| 147 decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group.get())); |
| 148 executor_.reset(new gpu::CommandExecutor(command_buffer_.get(), |
| 149 decoder_.get(), decoder_.get())); |
| 150 sync_point_order_data_ = gpu::SyncPointOrderData::Create(); |
| 151 sync_point_client_ = gpu_state_->sync_point_manager()->CreateSyncPointClient( |
| 152 sync_point_order_data_, GetNamespaceID(), command_buffer_id_); |
| 153 decoder_->set_engine(executor_.get()); |
| 154 decoder_->SetFenceSyncReleaseCallback(base::Bind( |
| 155 &CommandBufferDriver::OnFenceSyncRelease, base::Unretained(this))); |
| 156 decoder_->SetWaitFenceSyncCallback(base::Bind( |
| 157 &CommandBufferDriver::OnWaitFenceSync, base::Unretained(this))); |
| 158 decoder_->SetDescheduleUntilFinishedCallback(base::Bind( |
| 159 &CommandBufferDriver::OnDescheduleUntilFinished, base::Unretained(this))); |
| 160 decoder_->SetRescheduleAfterFinishedCallback(base::Bind( |
| 161 &CommandBufferDriver::OnRescheduleAfterFinished, base::Unretained(this))); |
| 162 |
| 163 gpu::gles2::DisallowedFeatures disallowed_features; |
| 164 |
| 165 if (!decoder_->Initialize(surface_, context_, offscreen, disallowed_features, |
| 166 attrib_helper)) |
| 167 return false; |
| 168 |
| 169 command_buffer_->SetPutOffsetChangeCallback(base::Bind( |
| 170 &gpu::CommandExecutor::PutChanged, base::Unretained(executor_.get()))); |
| 171 command_buffer_->SetGetBufferChangeCallback(base::Bind( |
| 172 &gpu::CommandExecutor::SetGetBuffer, base::Unretained(executor_.get()))); |
| 173 command_buffer_->SetParseErrorCallback( |
| 174 base::Bind(&CommandBufferDriver::OnParseError, base::Unretained(this))); |
| 175 |
| 176 // TODO(piman): other callbacks |
| 177 |
| 178 const size_t kSize = sizeof(gpu::CommandBufferSharedState); |
| 179 std::unique_ptr<gpu::BufferBacking> backing( |
| 180 MojoBufferBacking::Create(std::move(shared_state), kSize)); |
| 181 if (!backing) |
| 182 return false; |
| 183 |
| 184 command_buffer_->SetSharedStateBuffer(std::move(backing)); |
| 185 gpu_state_->driver_manager()->AddDriver(this); |
| 186 return true; |
| 187 } |
| 188 |
| 189 void CommandBufferDriver::SetGetBuffer(int32_t buffer) { |
| 190 DCHECK(CalledOnValidThread()); |
| 191 command_buffer_->SetGetBuffer(buffer); |
| 192 } |
| 193 |
| 194 void CommandBufferDriver::Flush(int32_t put_offset) { |
| 195 DCHECK(CalledOnValidThread()); |
| 196 if (!MakeCurrent()) |
| 197 return; |
| 198 |
| 199 command_buffer_->Flush(put_offset); |
| 200 ProcessPendingAndIdleWork(); |
| 201 } |
| 202 |
| 203 void CommandBufferDriver::RegisterTransferBuffer( |
| 204 int32_t id, |
| 205 mojo::ScopedSharedBufferHandle transfer_buffer, |
| 206 uint32_t size) { |
| 207 DCHECK(CalledOnValidThread()); |
| 208 // Take ownership of the memory and map it into this process. |
| 209 // This validates the size. |
| 210 std::unique_ptr<gpu::BufferBacking> backing( |
| 211 MojoBufferBacking::Create(std::move(transfer_buffer), size)); |
| 212 if (!backing) { |
| 213 DVLOG(0) << "Failed to map shared memory."; |
| 214 return; |
| 215 } |
| 216 command_buffer_->RegisterTransferBuffer(id, std::move(backing)); |
| 217 } |
| 218 |
| 219 void CommandBufferDriver::DestroyTransferBuffer(int32_t id) { |
| 220 DCHECK(CalledOnValidThread()); |
| 221 command_buffer_->DestroyTransferBuffer(id); |
| 222 } |
| 223 |
| 224 void CommandBufferDriver::CreateImage(int32_t id, |
| 225 mojo::ScopedHandle memory_handle, |
| 226 int32_t type, |
| 227 const gfx::Size& size, |
| 228 int32_t format, |
| 229 int32_t internal_format) { |
| 230 DCHECK(CalledOnValidThread()); |
| 231 if (!MakeCurrent()) |
| 232 return; |
| 233 |
| 234 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); |
| 235 if (image_manager->LookupImage(id)) { |
| 236 LOG(ERROR) << "Image already exists with same ID."; |
| 237 return; |
| 238 } |
| 239 |
| 240 gfx::BufferFormat gpu_format = static_cast<gfx::BufferFormat>(format); |
| 241 if (!gpu::IsGpuMemoryBufferFormatSupported(gpu_format, |
| 242 decoder_->GetCapabilities())) { |
| 243 LOG(ERROR) << "Format is not supported."; |
| 244 return; |
| 245 } |
| 246 |
| 247 if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, gpu_format)) { |
| 248 LOG(ERROR) << "Invalid image size for format."; |
| 249 return; |
| 250 } |
| 251 |
| 252 if (!gpu::IsImageFormatCompatibleWithGpuMemoryBufferFormat(internal_format, |
| 253 gpu_format)) { |
| 254 LOG(ERROR) << "Incompatible image format."; |
| 255 return; |
| 256 } |
| 257 |
| 258 if (type != gfx::SHARED_MEMORY_BUFFER) { |
| 259 NOTIMPLEMENTED(); |
| 260 return; |
| 261 } |
| 262 |
| 263 base::PlatformFile platform_file; |
| 264 MojoResult unwrap_result = mojo::UnwrapPlatformFile(std::move(memory_handle), |
| 265 &platform_file); |
| 266 if (unwrap_result != MOJO_RESULT_OK) { |
| 267 NOTREACHED(); |
| 268 return; |
| 269 } |
| 270 |
| 271 #if defined(OS_WIN) |
| 272 base::SharedMemoryHandle handle(platform_file, base::GetCurrentProcId()); |
| 273 #else |
| 274 base::FileDescriptor handle(platform_file, false); |
| 275 #endif |
| 276 |
| 277 scoped_refptr<gl::GLImageSharedMemory> image = |
| 278 new gl::GLImageSharedMemory(size, internal_format); |
| 279 // TODO(jam): also need a mojo enum for this enum |
| 280 if (!image->Initialize( |
| 281 handle, gfx::GpuMemoryBufferId(id), gpu_format, 0, |
| 282 gfx::RowSizeForBufferFormat(size.width(), gpu_format, 0))) { |
| 283 NOTREACHED(); |
| 284 return; |
| 285 } |
| 286 |
| 287 image_manager->AddImage(image.get(), id); |
| 288 } |
| 289 |
| 290 // TODO(rjkroege): It is conceivable that this code belongs in |
| 291 // ozone_gpu_memory_buffer.cc |
| 292 void CommandBufferDriver::CreateImageNativeOzone(int32_t id, |
| 293 int32_t type, |
| 294 gfx::Size size, |
| 295 gfx::BufferFormat format, |
| 296 uint32_t internal_format, |
| 297 ui::NativePixmap* pixmap) { |
| 298 #if defined(USE_OZONE) |
| 299 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); |
| 300 if (image_manager->LookupImage(id)) { |
| 301 LOG(ERROR) << "Image already exists with same ID."; |
| 302 return; |
| 303 } |
| 304 |
| 305 scoped_refptr<ui::GLImageOzoneNativePixmap> image = |
| 306 new ui::GLImageOzoneNativePixmap(size, internal_format); |
| 307 if (!image->Initialize(pixmap, format)) { |
| 308 NOTREACHED(); |
| 309 return; |
| 310 } |
| 311 |
| 312 image_manager->AddImage(image.get(), id); |
| 313 #endif |
| 314 } |
| 315 |
| 316 void CommandBufferDriver::DestroyImage(int32_t id) { |
| 317 DCHECK(CalledOnValidThread()); |
| 318 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); |
| 319 if (!image_manager->LookupImage(id)) { |
| 320 LOG(ERROR) << "Image with ID doesn't exist."; |
| 321 return; |
| 322 } |
| 323 if (!MakeCurrent()) |
| 324 return; |
| 325 image_manager->RemoveImage(id); |
| 326 } |
| 327 |
| 328 bool CommandBufferDriver::IsScheduled() const { |
| 329 DCHECK(CalledOnValidThread()); |
| 330 DCHECK(executor_); |
| 331 return executor_->scheduled(); |
| 332 } |
| 333 |
| 334 bool CommandBufferDriver::HasUnprocessedCommands() const { |
| 335 DCHECK(CalledOnValidThread()); |
| 336 if (command_buffer_) { |
| 337 gpu::CommandBuffer::State state = GetLastState(); |
| 338 return command_buffer_->GetPutOffset() != state.get_offset && |
| 339 !gpu::error::IsError(state.error); |
| 340 } |
| 341 return false; |
| 342 } |
| 343 |
| 344 gpu::Capabilities CommandBufferDriver::GetCapabilities() const { |
| 345 DCHECK(CalledOnValidThread()); |
| 346 return decoder_->GetCapabilities(); |
| 347 } |
| 348 |
| 349 gpu::CommandBuffer::State CommandBufferDriver::GetLastState() const { |
| 350 DCHECK(CalledOnValidThread()); |
| 351 return command_buffer_->GetLastState(); |
| 352 } |
| 353 |
| 354 uint32_t CommandBufferDriver::GetUnprocessedOrderNum() const { |
| 355 DCHECK(CalledOnValidThread()); |
| 356 return sync_point_order_data_->unprocessed_order_num(); |
| 357 } |
| 358 |
| 359 uint32_t CommandBufferDriver::GetProcessedOrderNum() const { |
| 360 DCHECK(CalledOnValidThread()); |
| 361 return sync_point_order_data_->processed_order_num(); |
| 362 } |
| 363 |
| 364 bool CommandBufferDriver::MakeCurrent() { |
| 365 DCHECK(CalledOnValidThread()); |
| 366 if (!decoder_) |
| 367 return false; |
| 368 if (decoder_->MakeCurrent()) |
| 369 return true; |
| 370 DLOG(ERROR) << "Context lost because MakeCurrent failed."; |
| 371 gpu::error::ContextLostReason reason = |
| 372 static_cast<gpu::error::ContextLostReason>( |
| 373 decoder_->GetContextLostReason()); |
| 374 command_buffer_->SetContextLostReason(reason); |
| 375 command_buffer_->SetParseError(gpu::error::kLostContext); |
| 376 OnContextLost(reason); |
| 377 return false; |
| 378 } |
| 379 |
| 380 void CommandBufferDriver::ProcessPendingAndIdleWork() { |
| 381 DCHECK(CalledOnValidThread()); |
| 382 executor_->ProcessPendingQueries(); |
| 383 ScheduleDelayedWork( |
| 384 base::TimeDelta::FromMilliseconds(kHandleMoreWorkPeriodMs)); |
| 385 } |
| 386 |
| 387 void CommandBufferDriver::ScheduleDelayedWork(base::TimeDelta delay) { |
| 388 DCHECK(CalledOnValidThread()); |
| 389 const bool has_more_work = |
| 390 executor_->HasPendingQueries() || executor_->HasMoreIdleWork(); |
| 391 if (!has_more_work) { |
| 392 last_idle_time_ = base::TimeTicks(); |
| 393 return; |
| 394 } |
| 395 |
| 396 const base::TimeTicks current_time = base::TimeTicks::Now(); |
| 397 // |process_delayed_work_time_| is set if processing of delayed work is |
| 398 // already scheduled. Just update the time if already scheduled. |
| 399 if (!process_delayed_work_time_.is_null()) { |
| 400 process_delayed_work_time_ = current_time + delay; |
| 401 return; |
| 402 } |
| 403 |
| 404 // Idle when no messages are processed between now and when PollWork is |
| 405 // called. |
| 406 previous_processed_num_ = |
| 407 gpu_state_->driver_manager()->GetProcessedOrderNum(); |
| 408 |
| 409 if (last_idle_time_.is_null()) |
| 410 last_idle_time_ = current_time; |
| 411 |
| 412 // scheduled() returns true after passing all unschedule fences and this is |
| 413 // when we can start performing idle work. Idle work is done synchronously |
| 414 // so we can set delay to 0 and instead poll for more work at the rate idle |
| 415 // work is performed. This also ensures that idle work is done as |
| 416 // efficiently as possible without any unnecessary delays. |
| 417 if (executor_->scheduled() && executor_->HasMoreIdleWork()) |
| 418 delay = base::TimeDelta(); |
| 419 |
| 420 process_delayed_work_time_ = current_time + delay; |
| 421 gpu_state_->command_buffer_task_runner()->task_runner()->PostDelayedTask( |
| 422 FROM_HERE, |
| 423 base::Bind(&CommandBufferDriver::PollWork, weak_factory_.GetWeakPtr()), |
| 424 delay); |
| 425 } |
| 426 |
| 427 void CommandBufferDriver::PollWork() { |
| 428 DCHECK(CalledOnValidThread()); |
| 429 // Post another delayed task if we have not yet reached the time at which |
| 430 // we should process delayed work. |
| 431 base::TimeTicks current_time = base::TimeTicks::Now(); |
| 432 DCHECK(!process_delayed_work_time_.is_null()); |
| 433 if (process_delayed_work_time_ > current_time) { |
| 434 gpu_state_->command_buffer_task_runner()->task_runner()->PostDelayedTask( |
| 435 FROM_HERE, |
| 436 base::Bind(&CommandBufferDriver::PollWork, weak_factory_.GetWeakPtr()), |
| 437 process_delayed_work_time_ - current_time); |
| 438 return; |
| 439 } |
| 440 process_delayed_work_time_ = base::TimeTicks(); |
| 441 PerformWork(); |
| 442 } |
| 443 |
| 444 void CommandBufferDriver::PerformWork() { |
| 445 DCHECK(CalledOnValidThread()); |
| 446 if (!MakeCurrent()) |
| 447 return; |
| 448 |
| 449 if (executor_) { |
| 450 const uint32_t current_unprocessed_num = |
| 451 gpu_state_->driver_manager()->GetUnprocessedOrderNum(); |
| 452 // We're idle when no messages were processed or scheduled. |
| 453 bool is_idle = (previous_processed_num_ == current_unprocessed_num); |
| 454 if (!is_idle && !last_idle_time_.is_null()) { |
| 455 base::TimeDelta time_since_idle = |
| 456 base::TimeTicks::Now() - last_idle_time_; |
| 457 base::TimeDelta max_time_since_idle = |
| 458 base::TimeDelta::FromMilliseconds(kMaxTimeSinceIdleMs); |
| 459 // Force idle when it's been too long since last time we were idle. |
| 460 if (time_since_idle > max_time_since_idle) |
| 461 is_idle = true; |
| 462 } |
| 463 |
| 464 if (is_idle) { |
| 465 last_idle_time_ = base::TimeTicks::Now(); |
| 466 executor_->PerformIdleWork(); |
| 467 } |
| 468 executor_->ProcessPendingQueries(); |
| 469 } |
| 470 |
| 471 ScheduleDelayedWork( |
| 472 base::TimeDelta::FromMilliseconds(kHandleMoreWorkPeriodBusyMs)); |
| 473 } |
| 474 |
| 475 void CommandBufferDriver::DestroyDecoder() { |
| 476 DCHECK(CalledOnValidThread()); |
| 477 if (decoder_) { |
| 478 gpu_state_->driver_manager()->RemoveDriver(this); |
| 479 bool have_context = decoder_->MakeCurrent(); |
| 480 decoder_->Destroy(have_context); |
| 481 decoder_.reset(); |
| 482 } |
| 483 } |
| 484 |
| 485 void CommandBufferDriver::OnUpdateVSyncParameters( |
| 486 const base::TimeTicks timebase, |
| 487 const base::TimeDelta interval) { |
| 488 DCHECK(CalledOnValidThread()); |
| 489 if (client_) |
| 490 client_->UpdateVSyncParameters(timebase, interval); |
| 491 } |
| 492 |
| 493 void CommandBufferDriver::OnFenceSyncRelease(uint64_t release) { |
| 494 DCHECK(CalledOnValidThread()); |
| 495 if (!sync_point_client_->client_state()->IsFenceSyncReleased(release)) |
| 496 sync_point_client_->ReleaseFenceSync(release); |
| 497 } |
| 498 |
| 499 bool CommandBufferDriver::OnWaitFenceSync( |
| 500 gpu::CommandBufferNamespace namespace_id, |
| 501 gpu::CommandBufferId command_buffer_id, |
| 502 uint64_t release) { |
| 503 DCHECK(CalledOnValidThread()); |
| 504 DCHECK(IsScheduled()); |
| 505 gpu::SyncPointManager* sync_point_manager = gpu_state_->sync_point_manager(); |
| 506 DCHECK(sync_point_manager); |
| 507 |
| 508 scoped_refptr<gpu::SyncPointClientState> release_state = |
| 509 sync_point_manager->GetSyncPointClientState(namespace_id, |
| 510 command_buffer_id); |
| 511 |
| 512 if (!release_state) |
| 513 return true; |
| 514 |
| 515 executor_->SetScheduled(false); |
| 516 sync_point_client_->Wait(release_state.get(), release, |
| 517 base::Bind(&gpu::CommandExecutor::SetScheduled, |
| 518 executor_->AsWeakPtr(), true)); |
| 519 return executor_->scheduled(); |
| 520 } |
| 521 |
| 522 void CommandBufferDriver::OnDescheduleUntilFinished() { |
| 523 DCHECK(CalledOnValidThread()); |
| 524 DCHECK(IsScheduled()); |
| 525 DCHECK(executor_->HasMoreIdleWork()); |
| 526 |
| 527 executor_->SetScheduled(false); |
| 528 } |
| 529 |
| 530 void CommandBufferDriver::OnRescheduleAfterFinished() { |
| 531 DCHECK(CalledOnValidThread()); |
| 532 DCHECK(!executor_->scheduled()); |
| 533 |
| 534 executor_->SetScheduled(true); |
| 535 } |
| 536 |
| 537 void CommandBufferDriver::OnParseError() { |
| 538 DCHECK(CalledOnValidThread()); |
| 539 gpu::CommandBuffer::State state = GetLastState(); |
| 540 OnContextLost(state.context_lost_reason); |
| 541 } |
| 542 |
| 543 void CommandBufferDriver::OnContextLost(uint32_t reason) { |
| 544 DCHECK(CalledOnValidThread()); |
| 545 if (client_) |
| 546 client_->DidLoseContext(reason); |
| 547 } |
| 548 |
| 549 void CommandBufferDriver::SignalQuery(uint32_t query_id, |
| 550 const base::Closure& callback) { |
| 551 DCHECK(CalledOnValidThread()); |
| 552 |
| 553 gpu::gles2::QueryManager* query_manager = decoder_->GetQueryManager(); |
| 554 gpu::gles2::QueryManager::Query* query = query_manager->GetQuery(query_id); |
| 555 if (query) |
| 556 query->AddCallback(callback); |
| 557 else |
| 558 callback.Run(); |
| 559 } |
| 560 |
| 561 void CommandBufferDriver::OnGpuCompletedSwapBuffers(gfx::SwapResult result) { |
| 562 DCHECK(CalledOnValidThread()); |
| 563 if (client_) { |
| 564 client_->OnGpuCompletedSwapBuffers(result); |
| 565 } |
| 566 } |
| 567 |
| 568 } // namespace ui |
OLD | NEW |