| OLD | NEW |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "gpu/ipc/in_process_command_buffer.h" | 5 #include "gpu/ipc/in_process_command_buffer.h" |
| 6 | 6 |
| 7 #include <stddef.h> | 7 #include <stddef.h> |
| 8 #include <stdint.h> | 8 #include <stdint.h> |
| 9 | 9 |
| 10 #include <queue> | 10 #include <queue> |
| 11 #include <set> | 11 #include <set> |
| 12 #include <utility> | 12 #include <utility> |
| 13 | 13 |
| 14 #include "base/bind.h" | 14 #include "base/bind.h" |
| 15 #include "base/bind_helpers.h" | 15 #include "base/bind_helpers.h" |
| 16 #include "base/command_line.h" | 16 #include "base/command_line.h" |
| 17 #include "base/lazy_instance.h" | 17 #include "base/lazy_instance.h" |
| 18 #include "base/location.h" | 18 #include "base/location.h" |
| 19 #include "base/logging.h" | 19 #include "base/logging.h" |
| 20 #include "base/memory/ptr_util.h" |
| 20 #include "base/memory/weak_ptr.h" | 21 #include "base/memory/weak_ptr.h" |
| 21 #include "base/numerics/safe_conversions.h" | 22 #include "base/numerics/safe_conversions.h" |
| 22 #include "base/sequence_checker.h" | 23 #include "base/sequence_checker.h" |
| 23 #include "base/single_thread_task_runner.h" | 24 #include "base/single_thread_task_runner.h" |
| 24 #include "base/threading/thread_task_runner_handle.h" | 25 #include "base/threading/thread_task_runner_handle.h" |
| 25 #include "gpu/command_buffer/client/gpu_control_client.h" | 26 #include "gpu/command_buffer/client/gpu_control_client.h" |
| 26 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" | 27 #include "gpu/command_buffer/client/gpu_memory_buffer_manager.h" |
| 27 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" | 28 #include "gpu/command_buffer/common/gpu_memory_buffer_support.h" |
| 28 #include "gpu/command_buffer/common/sync_token.h" | 29 #include "gpu/command_buffer/common/sync_token.h" |
| 29 #include "gpu/command_buffer/service/command_buffer_service.h" | 30 #include "gpu/command_buffer/service/command_buffer_service.h" |
| 30 #include "gpu/command_buffer/service/command_executor.h" | 31 #include "gpu/command_buffer/service/command_executor.h" |
| 31 #include "gpu/command_buffer/service/context_group.h" | 32 #include "gpu/command_buffer/service/context_group.h" |
| 32 #include "gpu/command_buffer/service/gl_context_virtual.h" | 33 #include "gpu/command_buffer/service/gl_context_virtual.h" |
| 33 #include "gpu/command_buffer/service/gpu_preferences.h" | 34 #include "gpu/command_buffer/service/gpu_preferences.h" |
| 34 #include "gpu/command_buffer/service/image_factory.h" | 35 #include "gpu/command_buffer/service/image_factory.h" |
| 35 #include "gpu/command_buffer/service/image_manager.h" | 36 #include "gpu/command_buffer/service/image_manager.h" |
| 36 #include "gpu/command_buffer/service/mailbox_manager.h" | 37 #include "gpu/command_buffer/service/mailbox_manager.h" |
| 37 #include "gpu/command_buffer/service/memory_program_cache.h" | 38 #include "gpu/command_buffer/service/memory_program_cache.h" |
| 38 #include "gpu/command_buffer/service/memory_tracking.h" | 39 #include "gpu/command_buffer/service/memory_tracking.h" |
| 39 #include "gpu/command_buffer/service/query_manager.h" | 40 #include "gpu/command_buffer/service/query_manager.h" |
| 40 #include "gpu/command_buffer/service/service_utils.h" | 41 #include "gpu/command_buffer/service/service_utils.h" |
| 41 #include "gpu/command_buffer/service/sync_point_manager.h" | 42 #include "gpu/command_buffer/service/sync_point_manager.h" |
| 42 #include "gpu/command_buffer/service/transfer_buffer_manager.h" | 43 #include "gpu/command_buffer/service/transfer_buffer_manager.h" |
| 44 #include "gpu/ipc/gpu_in_process_thread_service.h" |
| 45 #include "gpu/ipc/service/image_transport_surface.h" |
| 43 #include "ui/gfx/geometry/size.h" | 46 #include "ui/gfx/geometry/size.h" |
| 44 #include "ui/gl/gl_context.h" | 47 #include "ui/gl/gl_context.h" |
| 45 #include "ui/gl/gl_image.h" | 48 #include "ui/gl/gl_image.h" |
| 46 #include "ui/gl/gl_image_shared_memory.h" | 49 #include "ui/gl/gl_image_shared_memory.h" |
| 47 #include "ui/gl/gl_share_group.h" | 50 #include "ui/gl/gl_share_group.h" |
| 48 #include "ui/gl/init/gl_factory.h" | 51 #include "ui/gl/init/gl_factory.h" |
| 49 | 52 |
| 50 #if defined(OS_WIN) | 53 #if defined(OS_WIN) |
| 51 #include <windows.h> | 54 #include <windows.h> |
| 52 #include "base/process/process_handle.h" | 55 #include "base/process/process_handle.h" |
| 53 #endif | 56 #endif |
| 54 | 57 |
| 55 namespace gpu { | 58 namespace gpu { |
| 56 | 59 |
| 57 namespace { | 60 namespace { |
| 58 | 61 |
| 59 base::StaticAtomicSequenceNumber g_next_command_buffer_id; | 62 base::StaticAtomicSequenceNumber g_next_command_buffer_id; |
| 60 | 63 |
| 61 template <typename T> | 64 template <typename T> |
| 62 static void RunTaskWithResult(base::Callback<T(void)> task, | 65 static void RunTaskWithResult(base::Callback<T(void)> task, |
| 63 T* result, | 66 T* result, |
| 64 base::WaitableEvent* completion) { | 67 base::WaitableEvent* completion) { |
| 65 *result = task.Run(); | 68 *result = task.Run(); |
| 66 completion->Signal(); | 69 completion->Signal(); |
| 67 } | 70 } |
| 68 | 71 |
| 69 struct ScopedOrderNumberProcessor { | 72 class GpuInProcessThreadHolder : public base::Thread { |
| 70 ScopedOrderNumberProcessor(SyncPointOrderData* order_data, uint32_t order_num) | 73 public: |
| 71 : order_data_(order_data), order_num_(order_num) { | 74 GpuInProcessThreadHolder() |
| 72 order_data_->BeginProcessingOrderNumber(order_num_); | 75 : base::Thread("GpuThread"), |
| 76 sync_point_manager_(new SyncPointManager(false)) { |
| 77 Start(); |
| 73 } | 78 } |
| 74 | 79 |
| 75 ~ScopedOrderNumberProcessor() { | 80 ~GpuInProcessThreadHolder() override { Stop(); } |
| 76 order_data_->FinishProcessingOrderNumber(order_num_); | 81 |
| 82 const scoped_refptr<InProcessCommandBuffer::Service>& GetGpuThreadService() { |
| 83 if (!gpu_thread_service_) { |
| 84 gpu_thread_service_ = new GpuInProcessThreadService( |
| 85 task_runner(), sync_point_manager_.get(), nullptr, nullptr); |
| 86 } |
| 87 return gpu_thread_service_; |
| 77 } | 88 } |
| 78 | 89 |
| 79 private: | 90 private: |
| 80 SyncPointOrderData* order_data_; | 91 std::unique_ptr<SyncPointManager> sync_point_manager_; |
| 81 uint32_t order_num_; | 92 scoped_refptr<InProcessCommandBuffer::Service> gpu_thread_service_; |
| 82 }; | |
| 83 | |
| 84 struct GpuInProcessThreadHolder { | |
| 85 GpuInProcessThreadHolder() | |
| 86 : sync_point_manager(new SyncPointManager(false)), | |
| 87 gpu_thread(new GpuInProcessThread(sync_point_manager.get())) {} | |
| 88 std::unique_ptr<SyncPointManager> sync_point_manager; | |
| 89 scoped_refptr<InProcessCommandBuffer::Service> gpu_thread; | |
| 90 }; | 93 }; |
| 91 | 94 |
| 92 base::LazyInstance<GpuInProcessThreadHolder> g_default_service = | 95 base::LazyInstance<GpuInProcessThreadHolder> g_default_service = |
| 93 LAZY_INSTANCE_INITIALIZER; | 96 LAZY_INSTANCE_INITIALIZER; |
| 94 | 97 |
| 95 class ScopedEvent { | 98 class ScopedEvent { |
| 96 public: | 99 public: |
| 97 explicit ScopedEvent(base::WaitableEvent* event) : event_(event) {} | 100 explicit ScopedEvent(base::WaitableEvent* event) : event_(event) {} |
| 98 ~ScopedEvent() { event_->Signal(); } | 101 ~ScopedEvent() { event_->Signal(); } |
| 99 | 102 |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 134 if (service) | 137 if (service) |
| 135 return service; | 138 return service; |
| 136 | 139 |
| 137 // Call base::ThreadTaskRunnerHandle::IsSet() to ensure that it is | 140 // Call base::ThreadTaskRunnerHandle::IsSet() to ensure that it is |
| 138 // instantiated before we create the GPU thread, otherwise shutdown order will | 141 // instantiated before we create the GPU thread, otherwise shutdown order will |
| 139 // delete the ThreadTaskRunnerHandle before the GPU thread's message loop, | 142 // delete the ThreadTaskRunnerHandle before the GPU thread's message loop, |
| 140 // and when the message loop is shutdown, it will recreate | 143 // and when the message loop is shutdown, it will recreate |
| 141 // ThreadTaskRunnerHandle, which will re-add a new task to the, AtExitManager, | 144 // ThreadTaskRunnerHandle, which will re-add a new task to the, AtExitManager, |
| 142 // which causes a deadlock because it's already locked. | 145 // which causes a deadlock because it's already locked. |
| 143 base::ThreadTaskRunnerHandle::IsSet(); | 146 base::ThreadTaskRunnerHandle::IsSet(); |
| 144 return g_default_service.Get().gpu_thread; | 147 return g_default_service.Get().GetGpuThreadService(); |
| 145 } | 148 } |
| 146 | 149 |
| 147 } // anonyous namespace | 150 } // anonyous namespace |
| 148 | 151 |
| 149 InProcessCommandBuffer::Service::Service() | 152 InProcessCommandBuffer::Service::Service() |
| 150 : gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {} | 153 : gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {} |
| 151 | 154 |
| 152 InProcessCommandBuffer::Service::Service(const GpuPreferences& gpu_preferences) | 155 InProcessCommandBuffer::Service::Service(const GpuPreferences& gpu_preferences) |
| 153 : gpu_preferences_(gpu_preferences), | 156 : gpu_preferences_(gpu_preferences), |
| 154 gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {} | 157 gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()) {} |
| 155 | 158 |
| 159 InProcessCommandBuffer::Service::Service( |
| 160 gpu::gles2::MailboxManager* mailbox_manager, |
| 161 scoped_refptr<gl::GLShareGroup> share_group) |
| 162 : gpu_driver_bug_workarounds_(base::CommandLine::ForCurrentProcess()), |
| 163 mailbox_manager_(mailbox_manager), |
| 164 share_group_(share_group) {} |
| 165 |
| 156 InProcessCommandBuffer::Service::~Service() {} | 166 InProcessCommandBuffer::Service::~Service() {} |
| 157 | 167 |
| 158 const gpu::GpuPreferences& InProcessCommandBuffer::Service::gpu_preferences() { | 168 const gpu::GpuPreferences& InProcessCommandBuffer::Service::gpu_preferences() { |
| 159 return gpu_preferences_; | 169 return gpu_preferences_; |
| 160 } | 170 } |
| 161 | 171 |
| 162 const gpu::GpuDriverBugWorkarounds& | 172 const gpu::GpuDriverBugWorkarounds& |
| 163 InProcessCommandBuffer::Service::gpu_driver_bug_workarounds() { | 173 InProcessCommandBuffer::Service::gpu_driver_bug_workarounds() { |
| 164 return gpu_driver_bug_workarounds_; | 174 return gpu_driver_bug_workarounds_; |
| 165 } | 175 } |
| (...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 279 &capabilities, share_group, image_factory); | 289 &capabilities, share_group, image_factory); |
| 280 | 290 |
| 281 base::Callback<bool(void)> init_task = | 291 base::Callback<bool(void)> init_task = |
| 282 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, | 292 base::Bind(&InProcessCommandBuffer::InitializeOnGpuThread, |
| 283 base::Unretained(this), params); | 293 base::Unretained(this), params); |
| 284 | 294 |
| 285 base::WaitableEvent completion( | 295 base::WaitableEvent completion( |
| 286 base::WaitableEvent::ResetPolicy::MANUAL, | 296 base::WaitableEvent::ResetPolicy::MANUAL, |
| 287 base::WaitableEvent::InitialState::NOT_SIGNALED); | 297 base::WaitableEvent::InitialState::NOT_SIGNALED); |
| 288 bool result = false; | 298 bool result = false; |
| 289 QueueTask( | 299 QueueTask(true, base::Bind(&RunTaskWithResult<bool>, init_task, &result, |
| 290 base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion)); | 300 &completion)); |
| 291 completion.Wait(); | 301 completion.Wait(); |
| 292 | 302 |
| 293 gpu_memory_buffer_manager_ = gpu_memory_buffer_manager; | 303 gpu_memory_buffer_manager_ = gpu_memory_buffer_manager; |
| 294 | 304 |
| 295 if (result) | 305 if (result) |
| 296 capabilities_ = capabilities; | 306 capabilities_ = capabilities; |
| 297 | 307 |
| 298 return result; | 308 return result; |
| 299 } | 309 } |
| 300 | 310 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 313 &InProcessCommandBuffer::PumpCommandsOnGpuThread, gpu_thread_weak_ptr_)); | 323 &InProcessCommandBuffer::PumpCommandsOnGpuThread, gpu_thread_weak_ptr_)); |
| 314 command_buffer->SetParseErrorCallback(base::Bind( | 324 command_buffer->SetParseErrorCallback(base::Bind( |
| 315 &InProcessCommandBuffer::OnContextLostOnGpuThread, gpu_thread_weak_ptr_)); | 325 &InProcessCommandBuffer::OnContextLostOnGpuThread, gpu_thread_weak_ptr_)); |
| 316 | 326 |
| 317 gl_share_group_ = params.context_group ? params.context_group->gl_share_group_ | 327 gl_share_group_ = params.context_group ? params.context_group->gl_share_group_ |
| 318 : service_->share_group(); | 328 : service_->share_group(); |
| 319 | 329 |
| 320 bool bind_generates_resource = false; | 330 bool bind_generates_resource = false; |
| 321 scoped_refptr<gles2::FeatureInfo> feature_info = | 331 scoped_refptr<gles2::FeatureInfo> feature_info = |
| 322 new gles2::FeatureInfo(service_->gpu_driver_bug_workarounds()); | 332 new gles2::FeatureInfo(service_->gpu_driver_bug_workarounds()); |
| 323 decoder_.reset(gles2::GLES2Decoder::Create( | 333 |
| 334 context_group_ = |
| 324 params.context_group | 335 params.context_group |
| 325 ? params.context_group->decoder_->GetContextGroup() | 336 ? params.context_group->decoder_->GetContextGroup() |
| 326 : new gles2::ContextGroup( | 337 : new gles2::ContextGroup( |
| 327 service_->gpu_preferences(), service_->mailbox_manager(), NULL, | 338 service_->gpu_preferences(), service_->mailbox_manager(), NULL, |
| 328 service_->shader_translator_cache(), | 339 service_->shader_translator_cache(), |
| 329 service_->framebuffer_completeness_cache(), feature_info, | 340 service_->framebuffer_completeness_cache(), feature_info, |
| 330 bind_generates_resource, nullptr, nullptr))); | 341 bind_generates_resource, nullptr, nullptr); |
| 342 |
| 343 decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get())); |
| 331 | 344 |
| 332 executor_.reset(new CommandExecutor(command_buffer.get(), decoder_.get(), | 345 executor_.reset(new CommandExecutor(command_buffer.get(), decoder_.get(), |
| 333 decoder_.get())); | 346 decoder_.get())); |
| 334 command_buffer->SetGetBufferChangeCallback(base::Bind( | 347 command_buffer->SetGetBufferChangeCallback(base::Bind( |
| 335 &CommandExecutor::SetGetBuffer, base::Unretained(executor_.get()))); | 348 &CommandExecutor::SetGetBuffer, base::Unretained(executor_.get()))); |
| 336 command_buffer_ = std::move(command_buffer); | 349 command_buffer_ = std::move(command_buffer); |
| 337 | 350 |
| 338 decoder_->set_engine(executor_.get()); | 351 decoder_->set_engine(executor_.get()); |
| 339 | 352 |
| 340 if (!surface_.get()) { | 353 if (!surface_.get()) { |
| 341 if (params.is_offscreen) | 354 if (params.is_offscreen) { |
| 342 surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size()); | 355 surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size()); |
| 343 else | 356 } else { |
| 344 surface_ = gl::init::CreateViewGLSurface(params.window); | 357 surface_ = ImageTransportSurface::CreateNativeSurface( |
| 358 gpu_thread_weak_ptr_factory_.GetWeakPtr(), params.window, |
| 359 gl::GLSurface::SURFACE_DEFAULT); |
| 360 if (!surface_ || !surface_->Initialize(gl::GLSurface::SURFACE_DEFAULT)) { |
| 361 surface_ = nullptr; |
| 362 DLOG(ERROR) << "Failed to create surface."; |
| 363 return false; |
| 364 } |
| 365 } |
| 345 } | 366 } |
| 346 | 367 |
| 347 if (!surface_.get()) { | 368 if (!surface_.get()) { |
| 348 LOG(ERROR) << "Could not create GLSurface."; | 369 LOG(ERROR) << "Could not create GLSurface."; |
| 349 DestroyOnGpuThread(); | 370 DestroyOnGpuThread(); |
| 350 return false; | 371 return false; |
| 351 } | 372 } |
| 352 | 373 |
| 353 sync_point_order_data_ = SyncPointOrderData::Create(); | 374 sync_point_order_data_ = SyncPointOrderData::Create(); |
| 354 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient( | 375 sync_point_client_ = service_->sync_point_manager()->CreateSyncPointClient( |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 437 void InProcessCommandBuffer::Destroy() { | 458 void InProcessCommandBuffer::Destroy() { |
| 438 CheckSequencedThread(); | 459 CheckSequencedThread(); |
| 439 client_thread_weak_ptr_factory_.InvalidateWeakPtrs(); | 460 client_thread_weak_ptr_factory_.InvalidateWeakPtrs(); |
| 440 gpu_control_client_ = nullptr; | 461 gpu_control_client_ = nullptr; |
| 441 base::WaitableEvent completion( | 462 base::WaitableEvent completion( |
| 442 base::WaitableEvent::ResetPolicy::MANUAL, | 463 base::WaitableEvent::ResetPolicy::MANUAL, |
| 443 base::WaitableEvent::InitialState::NOT_SIGNALED); | 464 base::WaitableEvent::InitialState::NOT_SIGNALED); |
| 444 bool result = false; | 465 bool result = false; |
| 445 base::Callback<bool(void)> destroy_task = base::Bind( | 466 base::Callback<bool(void)> destroy_task = base::Bind( |
| 446 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); | 467 &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); |
| 447 QueueTask( | 468 QueueTask(false, base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, |
| 448 base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); | 469 &completion)); |
| 449 completion.Wait(); | 470 completion.Wait(); |
| 450 } | 471 } |
| 451 | 472 |
| 452 bool InProcessCommandBuffer::DestroyOnGpuThread() { | 473 bool InProcessCommandBuffer::DestroyOnGpuThread() { |
| 453 CheckSequencedThread(); | 474 CheckSequencedThread(); |
| 454 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs(); | 475 gpu_thread_weak_ptr_factory_.InvalidateWeakPtrs(); |
| 455 command_buffer_.reset(); | 476 command_buffer_.reset(); |
| 456 // Clean up GL resources if possible. | 477 // Clean up GL resources if possible. |
| 457 bool have_context = context_.get() && context_->MakeCurrent(surface_.get()); | 478 bool have_context = context_.get() && context_->MakeCurrent(surface_.get()); |
| 458 if (decoder_) { | 479 if (decoder_) { |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 497 } | 518 } |
| 498 | 519 |
| 499 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { | 520 CommandBuffer::State InProcessCommandBuffer::GetStateFast() { |
| 500 CheckSequencedThread(); | 521 CheckSequencedThread(); |
| 501 base::AutoLock lock(state_after_last_flush_lock_); | 522 base::AutoLock lock(state_after_last_flush_lock_); |
| 502 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U) | 523 if (state_after_last_flush_.generation - last_state_.generation < 0x80000000U) |
| 503 last_state_ = state_after_last_flush_; | 524 last_state_ = state_after_last_flush_; |
| 504 return last_state_; | 525 return last_state_; |
| 505 } | 526 } |
| 506 | 527 |
| 528 void InProcessCommandBuffer::QueueTask(bool out_of_order, |
| 529 const base::Closure& task) { |
| 530 if (out_of_order) { |
| 531 service_->ScheduleTask(task); |
| 532 return; |
| 533 } |
| 534 base::AutoLock lock(task_queue_lock_); |
| 535 SyncPointManager* sync_manager = service_->sync_point_manager(); |
| 536 uint32_t order_num = |
| 537 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager); |
| 538 task_queue_.push(base::MakeUnique<GpuTask>(task, order_num)); |
| 539 service_->ScheduleTask( |
| 540 base::Bind(&InProcessCommandBuffer::ProcessTasksOnGpuThread, |
| 541 base::Unretained(this))); |
| 542 } |
| 543 |
| 544 void InProcessCommandBuffer::ProcessTasksOnGpuThread() { |
| 545 while (executor_->scheduled()) { |
| 546 base::AutoLock lock(task_queue_lock_); |
| 547 if (task_queue_.empty()) |
| 548 break; |
| 549 GpuTask* task = task_queue_.front().get(); |
| 550 sync_point_order_data_->BeginProcessingOrderNumber(task->order_number); |
| 551 task->callback.Run(); |
| 552 if (!executor_->scheduled()) { |
| 553 sync_point_order_data_->PauseProcessingOrderNumber(task->order_number); |
| 554 return; |
| 555 } |
| 556 sync_point_order_data_->FinishProcessingOrderNumber(task->order_number); |
| 557 task_queue_.pop(); |
| 558 } |
| 559 } |
| 560 |
| 507 CommandBuffer::State InProcessCommandBuffer::GetLastState() { | 561 CommandBuffer::State InProcessCommandBuffer::GetLastState() { |
| 508 CheckSequencedThread(); | 562 CheckSequencedThread(); |
| 509 return last_state_; | 563 return last_state_; |
| 510 } | 564 } |
| 511 | 565 |
| 512 int32_t InProcessCommandBuffer::GetLastToken() { | 566 int32_t InProcessCommandBuffer::GetLastToken() { |
| 513 CheckSequencedThread(); | 567 CheckSequencedThread(); |
| 514 GetStateFast(); | 568 GetStateFast(); |
| 515 return last_state_.token; | 569 return last_state_.token; |
| 516 } | 570 } |
| 517 | 571 |
| 518 void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset, | 572 void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset) { |
| 519 uint32_t order_num) { | |
| 520 CheckSequencedThread(); | 573 CheckSequencedThread(); |
| 521 ScopedEvent handle_flush(&flush_event_); | 574 ScopedEvent handle_flush(&flush_event_); |
| 522 base::AutoLock lock(command_buffer_lock_); | 575 base::AutoLock lock(command_buffer_lock_); |
| 523 | 576 |
| 524 { | 577 { |
| 525 ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(), | |
| 526 order_num); | |
| 527 command_buffer_->Flush(put_offset); | 578 command_buffer_->Flush(put_offset); |
| 528 { | 579 { |
| 529 // Update state before signaling the flush event. | 580 // Update state before signaling the flush event. |
| 530 base::AutoLock lock(state_after_last_flush_lock_); | 581 base::AutoLock lock(state_after_last_flush_lock_); |
| 531 state_after_last_flush_ = command_buffer_->GetLastState(); | 582 state_after_last_flush_ = command_buffer_->GetLastState(); |
| 532 } | 583 } |
| 533 | |
| 534 // Currently the in process command buffer does not support being | |
| 535 // descheduled, if it does we would need to back off on calling the finish | |
| 536 // processing number function until the message is rescheduled and finished | |
| 537 // processing. This DCHECK is to enforce this. | |
| 538 DCHECK(error::IsError(state_after_last_flush_.error) || | |
| 539 put_offset == state_after_last_flush_.get_offset); | |
| 540 } | 584 } |
| 541 | 585 |
| 542 // If we've processed all pending commands but still have pending queries, | 586 // If we've processed all pending commands but still have pending queries, |
| 543 // pump idle work until the query is passed. | 587 // pump idle work until the query is passed. |
| 544 if (put_offset == state_after_last_flush_.get_offset && | 588 if (put_offset == state_after_last_flush_.get_offset && |
| 545 (executor_->HasMoreIdleWork() || executor_->HasPendingQueries())) { | 589 (executor_->HasMoreIdleWork() || executor_->HasPendingQueries())) { |
| 546 ScheduleDelayedWorkOnGpuThread(); | 590 ScheduleDelayedWorkOnGpuThread(); |
| 547 } | 591 } |
| 548 } | 592 } |
| 549 | 593 |
| (...skipping 21 matching lines...) Expand all Loading... |
| 571 } | 615 } |
| 572 | 616 |
| 573 void InProcessCommandBuffer::Flush(int32_t put_offset) { | 617 void InProcessCommandBuffer::Flush(int32_t put_offset) { |
| 574 CheckSequencedThread(); | 618 CheckSequencedThread(); |
| 575 if (last_state_.error != gpu::error::kNoError) | 619 if (last_state_.error != gpu::error::kNoError) |
| 576 return; | 620 return; |
| 577 | 621 |
| 578 if (last_put_offset_ == put_offset) | 622 if (last_put_offset_ == put_offset) |
| 579 return; | 623 return; |
| 580 | 624 |
| 581 SyncPointManager* sync_manager = service_->sync_point_manager(); | |
| 582 const uint32_t order_num = | |
| 583 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager); | |
| 584 last_put_offset_ = put_offset; | 625 last_put_offset_ = put_offset; |
| 585 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, | 626 base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, |
| 586 gpu_thread_weak_ptr_, put_offset, order_num); | 627 gpu_thread_weak_ptr_, put_offset); |
| 587 QueueTask(task); | 628 QueueTask(false, task); |
| 588 | 629 |
| 589 flushed_fence_sync_release_ = next_fence_sync_release_ - 1; | 630 flushed_fence_sync_release_ = next_fence_sync_release_ - 1; |
| 590 } | 631 } |
| 591 | 632 |
| 592 void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) { | 633 void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) { |
| 593 Flush(put_offset); | 634 Flush(put_offset); |
| 594 } | 635 } |
| 595 | 636 |
| 596 void InProcessCommandBuffer::WaitForTokenInRange(int32_t start, int32_t end) { | 637 void InProcessCommandBuffer::WaitForTokenInRange(int32_t start, int32_t end) { |
| 597 CheckSequencedThread(); | 638 CheckSequencedThread(); |
| 598 while (!InRange(start, end, GetLastToken()) && | 639 while (!InRange(start, end, GetLastToken()) && |
| 599 last_state_.error == gpu::error::kNoError) | 640 last_state_.error == gpu::error::kNoError) { |
| 600 flush_event_.Wait(); | 641 flush_event_.Wait(); |
| 642 } |
| 601 } | 643 } |
| 602 | 644 |
| 603 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32_t start, | 645 void InProcessCommandBuffer::WaitForGetOffsetInRange(int32_t start, |
| 604 int32_t end) { | 646 int32_t end) { |
| 605 CheckSequencedThread(); | 647 CheckSequencedThread(); |
| 606 | 648 |
| 607 GetStateFast(); | 649 GetStateFast(); |
| 608 while (!InRange(start, end, last_state_.get_offset) && | 650 while (!InRange(start, end, last_state_.get_offset) && |
| 609 last_state_.error == gpu::error::kNoError) { | 651 last_state_.error == gpu::error::kNoError) { |
| 610 flush_event_.Wait(); | 652 flush_event_.Wait(); |
| 611 GetStateFast(); | 653 GetStateFast(); |
| 612 } | 654 } |
| 613 } | 655 } |
| 614 | 656 |
| 615 void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) { | 657 void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) { |
| 616 CheckSequencedThread(); | 658 CheckSequencedThread(); |
| 617 if (last_state_.error != gpu::error::kNoError) | 659 if (last_state_.error != gpu::error::kNoError) |
| 618 return; | 660 return; |
| 619 | 661 |
| 620 base::WaitableEvent completion( | 662 base::WaitableEvent completion( |
| 621 base::WaitableEvent::ResetPolicy::MANUAL, | 663 base::WaitableEvent::ResetPolicy::MANUAL, |
| 622 base::WaitableEvent::InitialState::NOT_SIGNALED); | 664 base::WaitableEvent::InitialState::NOT_SIGNALED); |
| 623 base::Closure task = | 665 base::Closure task = |
| 624 base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread, | 666 base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread, |
| 625 base::Unretained(this), shm_id, &completion); | 667 base::Unretained(this), shm_id, &completion); |
| 626 QueueTask(task); | 668 QueueTask(false, task); |
| 627 completion.Wait(); | 669 completion.Wait(); |
| 628 | 670 |
| 629 { | 671 { |
| 630 base::AutoLock lock(state_after_last_flush_lock_); | 672 base::AutoLock lock(state_after_last_flush_lock_); |
| 631 state_after_last_flush_ = command_buffer_->GetLastState(); | 673 state_after_last_flush_ = command_buffer_->GetLastState(); |
| 632 } | 674 } |
| 633 } | 675 } |
| 634 | 676 |
| 635 void InProcessCommandBuffer::SetGetBufferOnGpuThread( | 677 void InProcessCommandBuffer::SetGetBufferOnGpuThread( |
| 636 int32_t shm_id, | 678 int32_t shm_id, |
| (...skipping 11 matching lines...) Expand all Loading... |
| 648 base::AutoLock lock(command_buffer_lock_); | 690 base::AutoLock lock(command_buffer_lock_); |
| 649 return command_buffer_->CreateTransferBuffer(size, id); | 691 return command_buffer_->CreateTransferBuffer(size, id); |
| 650 } | 692 } |
| 651 | 693 |
| 652 void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) { | 694 void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) { |
| 653 CheckSequencedThread(); | 695 CheckSequencedThread(); |
| 654 base::Closure task = | 696 base::Closure task = |
| 655 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread, | 697 base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread, |
| 656 base::Unretained(this), id); | 698 base::Unretained(this), id); |
| 657 | 699 |
| 658 QueueTask(task); | 700 QueueTask(false, task); |
| 659 } | 701 } |
| 660 | 702 |
| 661 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) { | 703 void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) { |
| 662 base::AutoLock lock(command_buffer_lock_); | 704 base::AutoLock lock(command_buffer_lock_); |
| 663 command_buffer_->DestroyTransferBuffer(id); | 705 command_buffer_->DestroyTransferBuffer(id); |
| 664 } | 706 } |
| 665 | 707 |
| 666 void InProcessCommandBuffer::SetGpuControlClient(GpuControlClient* client) { | 708 void InProcessCommandBuffer::SetGpuControlClient(GpuControlClient* client) { |
| 667 gpu_control_client_ = client; | 709 gpu_control_client_ = client; |
| 668 } | 710 } |
| (...skipping 20 matching lines...) Expand all Loading... |
| 689 DCHECK(gpu::IsImageFormatCompatibleWithGpuMemoryBufferFormat( | 731 DCHECK(gpu::IsImageFormatCompatibleWithGpuMemoryBufferFormat( |
| 690 internalformat, gpu_memory_buffer->GetFormat())); | 732 internalformat, gpu_memory_buffer->GetFormat())); |
| 691 | 733 |
| 692 // This handle is owned by the GPU thread and must be passed to it or it | 734 // This handle is owned by the GPU thread and must be passed to it or it |
| 693 // will leak. In otherwords, do not early out on error between here and the | 735 // will leak. In otherwords, do not early out on error between here and the |
| 694 // queuing of the CreateImage task below. | 736 // queuing of the CreateImage task below. |
| 695 bool requires_sync_point = false; | 737 bool requires_sync_point = false; |
| 696 gfx::GpuMemoryBufferHandle handle = ShareGpuMemoryBufferToGpuThread( | 738 gfx::GpuMemoryBufferHandle handle = ShareGpuMemoryBufferToGpuThread( |
| 697 gpu_memory_buffer->GetHandle(), &requires_sync_point); | 739 gpu_memory_buffer->GetHandle(), &requires_sync_point); |
| 698 | 740 |
| 699 SyncPointManager* sync_manager = service_->sync_point_manager(); | |
| 700 const uint32_t order_num = | |
| 701 sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager); | |
| 702 | |
| 703 uint64_t fence_sync = 0; | 741 uint64_t fence_sync = 0; |
| 704 if (requires_sync_point) { | 742 if (requires_sync_point) { |
| 705 fence_sync = GenerateFenceSyncRelease(); | 743 fence_sync = GenerateFenceSyncRelease(); |
| 706 | 744 |
| 707 // Previous fence syncs should be flushed already. | 745 // Previous fence syncs should be flushed already. |
| 708 DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_); | 746 DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_); |
| 709 } | 747 } |
| 710 | 748 |
| 711 QueueTask(base::Bind( | 749 QueueTask(false, base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread, |
| 712 &InProcessCommandBuffer::CreateImageOnGpuThread, base::Unretained(this), | 750 base::Unretained(this), new_id, handle, |
| 713 new_id, handle, gfx::Size(base::checked_cast<int>(width), | 751 gfx::Size(base::checked_cast<int>(width), |
| 714 base::checked_cast<int>(height)), | 752 base::checked_cast<int>(height)), |
| 715 gpu_memory_buffer->GetFormat(), | 753 gpu_memory_buffer->GetFormat(), |
| 716 base::checked_cast<uint32_t>(internalformat), order_num, fence_sync)); | 754 base::checked_cast<uint32_t>(internalformat), |
| 755 fence_sync)); |
| 717 | 756 |
| 718 if (fence_sync) { | 757 if (fence_sync) { |
| 719 flushed_fence_sync_release_ = fence_sync; | 758 flushed_fence_sync_release_ = fence_sync; |
| 720 SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(), | 759 SyncToken sync_token(GetNamespaceID(), GetExtraCommandBufferData(), |
| 721 GetCommandBufferID(), fence_sync); | 760 GetCommandBufferID(), fence_sync); |
| 722 sync_token.SetVerifyFlush(); | 761 sync_token.SetVerifyFlush(); |
| 723 gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer, | 762 gpu_memory_buffer_manager_->SetDestructionSyncToken(gpu_memory_buffer, |
| 724 sync_token); | 763 sync_token); |
| 725 } | 764 } |
| 726 | 765 |
| 727 return new_id; | 766 return new_id; |
| 728 } | 767 } |
| 729 | 768 |
| 730 void InProcessCommandBuffer::CreateImageOnGpuThread( | 769 void InProcessCommandBuffer::CreateImageOnGpuThread( |
| 731 int32_t id, | 770 int32_t id, |
| 732 const gfx::GpuMemoryBufferHandle& handle, | 771 const gfx::GpuMemoryBufferHandle& handle, |
| 733 const gfx::Size& size, | 772 const gfx::Size& size, |
| 734 gfx::BufferFormat format, | 773 gfx::BufferFormat format, |
| 735 uint32_t internalformat, | 774 uint32_t internalformat, |
| 736 uint32_t order_num, | |
| 737 uint64_t fence_sync) { | 775 uint64_t fence_sync) { |
| 738 ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(), | |
| 739 order_num); | |
| 740 if (!decoder_) | 776 if (!decoder_) |
| 741 return; | 777 return; |
| 742 | 778 |
| 743 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); | 779 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); |
| 744 DCHECK(image_manager); | 780 DCHECK(image_manager); |
| 745 if (image_manager->LookupImage(id)) { | 781 if (image_manager->LookupImage(id)) { |
| 746 LOG(ERROR) << "Image already exists with same ID."; | 782 LOG(ERROR) << "Image already exists with same ID."; |
| 747 return; | 783 return; |
| 748 } | 784 } |
| 749 | 785 |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 788 } | 824 } |
| 789 | 825 |
| 790 if (fence_sync) { | 826 if (fence_sync) { |
| 791 sync_point_client_->ReleaseFenceSync(fence_sync); | 827 sync_point_client_->ReleaseFenceSync(fence_sync); |
| 792 } | 828 } |
| 793 } | 829 } |
| 794 | 830 |
| 795 void InProcessCommandBuffer::DestroyImage(int32_t id) { | 831 void InProcessCommandBuffer::DestroyImage(int32_t id) { |
| 796 CheckSequencedThread(); | 832 CheckSequencedThread(); |
| 797 | 833 |
| 798 QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread, | 834 QueueTask(false, base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread, |
| 799 base::Unretained(this), id)); | 835 base::Unretained(this), id)); |
| 800 } | 836 } |
| 801 | 837 |
| 802 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32_t id) { | 838 void InProcessCommandBuffer::DestroyImageOnGpuThread(int32_t id) { |
| 803 if (!decoder_) | 839 if (!decoder_) |
| 804 return; | 840 return; |
| 805 | 841 |
| 806 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); | 842 gpu::gles2::ImageManager* image_manager = decoder_->GetImageManager(); |
| 807 DCHECK(image_manager); | 843 DCHECK(image_manager); |
| 808 if (!image_manager->LookupImage(id)) { | 844 if (!image_manager->LookupImage(id)) { |
| 809 LOG(ERROR) << "Image with ID doesn't exist."; | 845 LOG(ERROR) << "Image with ID doesn't exist."; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 843 mailbox_manager->PushTextureUpdates(sync_token); | 879 mailbox_manager->PushTextureUpdates(sync_token); |
| 844 } | 880 } |
| 845 | 881 |
| 846 sync_point_client_->ReleaseFenceSync(release); | 882 sync_point_client_->ReleaseFenceSync(release); |
| 847 } | 883 } |
| 848 | 884 |
| 849 bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread( | 885 bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread( |
| 850 gpu::CommandBufferNamespace namespace_id, | 886 gpu::CommandBufferNamespace namespace_id, |
| 851 gpu::CommandBufferId command_buffer_id, | 887 gpu::CommandBufferId command_buffer_id, |
| 852 uint64_t release) { | 888 uint64_t release) { |
| 889 DCHECK(!waiting_for_sync_point_); |
| 853 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager(); | 890 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager(); |
| 854 DCHECK(sync_point_manager); | 891 DCHECK(sync_point_manager); |
| 855 | 892 |
| 856 scoped_refptr<gpu::SyncPointClientState> release_state = | 893 scoped_refptr<gpu::SyncPointClientState> release_state = |
| 857 sync_point_manager->GetSyncPointClientState(namespace_id, | 894 sync_point_manager->GetSyncPointClientState(namespace_id, |
| 858 command_buffer_id); | 895 command_buffer_id); |
| 859 | 896 |
| 860 if (!release_state) | 897 if (!release_state) |
| 861 return true; | 898 return true; |
| 862 | 899 |
| 863 if (!release_state->IsFenceSyncReleased(release)) { | 900 if (release_state->IsFenceSyncReleased(release)) { |
| 901 gles2::MailboxManager* mailbox_manager = |
| 902 decoder_->GetContextGroup()->mailbox_manager(); |
| 903 SyncToken sync_token(namespace_id, 0, command_buffer_id, release); |
| 904 mailbox_manager->PullTextureUpdates(sync_token); |
| 905 return true; |
| 906 } |
| 907 |
| 908 if (service_->BlockThreadOnWaitSyncToken()) { |
| 864 // Use waitable event which is signalled when the release fence is released. | 909 // Use waitable event which is signalled when the release fence is released. |
| 865 sync_point_client_->Wait( | 910 sync_point_client_->Wait( |
| 866 release_state.get(), release, | 911 release_state.get(), release, |
| 867 base::Bind(&base::WaitableEvent::Signal, | 912 base::Bind(&base::WaitableEvent::Signal, |
| 868 base::Unretained(&fence_sync_wait_event_))); | 913 base::Unretained(&fence_sync_wait_event_))); |
| 869 fence_sync_wait_event_.Wait(); | 914 fence_sync_wait_event_.Wait(); |
| 915 return true; |
| 870 } | 916 } |
| 871 | 917 |
| 918 waiting_for_sync_point_ = true; |
| 919 sync_point_client_->Wait( |
| 920 release_state.get(), release, |
| 921 base::Bind(&InProcessCommandBuffer::OnWaitFenceSyncCompleted, |
| 922 gpu_thread_weak_ptr_factory_.GetWeakPtr(), namespace_id, |
| 923 command_buffer_id, release)); |
| 924 |
| 925 if (!waiting_for_sync_point_) |
| 926 return true; |
| 927 |
| 928 executor_->SetScheduled(false); |
| 929 return false; |
| 930 } |
| 931 |
| 932 void InProcessCommandBuffer::OnWaitFenceSyncCompleted( |
| 933 CommandBufferNamespace namespace_id, |
| 934 CommandBufferId command_buffer_id, |
| 935 uint64_t release) { |
| 936 DCHECK(waiting_for_sync_point_); |
| 872 gles2::MailboxManager* mailbox_manager = | 937 gles2::MailboxManager* mailbox_manager = |
| 873 decoder_->GetContextGroup()->mailbox_manager(); | 938 decoder_->GetContextGroup()->mailbox_manager(); |
| 874 SyncToken sync_token(namespace_id, 0, command_buffer_id, release); | 939 SyncToken sync_token(namespace_id, 0, command_buffer_id, release); |
| 875 mailbox_manager->PullTextureUpdates(sync_token); | 940 mailbox_manager->PullTextureUpdates(sync_token); |
| 876 return true; | 941 waiting_for_sync_point_ = false; |
| 942 executor_->SetScheduled(true); |
| 943 QueueTask(false, base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, |
| 944 gpu_thread_weak_ptr_, last_put_offset_)); |
| 877 } | 945 } |
| 878 | 946 |
| 879 void InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread() { | 947 void InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread() { |
| 880 NOTIMPLEMENTED(); | 948 DCHECK(executor_->scheduled()); |
| 949 DCHECK(executor_->HasPollingWork()); |
| 950 |
| 951 executor_->SetScheduled(false); |
| 881 } | 952 } |
| 882 | 953 |
| 883 void InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread() { | 954 void InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread() { |
| 884 NOTIMPLEMENTED(); | 955 DCHECK(!executor_->scheduled()); |
| 956 |
| 957 executor_->SetScheduled(true); |
| 958 ProcessTasksOnGpuThread(); |
| 885 } | 959 } |
| 886 | 960 |
| 887 void InProcessCommandBuffer::SignalSyncTokenOnGpuThread( | 961 void InProcessCommandBuffer::SignalSyncTokenOnGpuThread( |
| 888 const SyncToken& sync_token, | 962 const SyncToken& sync_token, |
| 889 const base::Closure& callback) { | 963 const base::Closure& callback) { |
| 890 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager(); | 964 gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager(); |
| 891 DCHECK(sync_point_manager); | 965 DCHECK(sync_point_manager); |
| 892 | 966 |
| 893 scoped_refptr<gpu::SyncPointClientState> release_state = | 967 scoped_refptr<gpu::SyncPointClientState> release_state = |
| 894 sync_point_manager->GetSyncPointClientState( | 968 sync_point_manager->GetSyncPointClientState( |
| 895 sync_token.namespace_id(), sync_token.command_buffer_id()); | 969 sync_token.namespace_id(), sync_token.command_buffer_id()); |
| 896 | 970 |
| 897 if (!release_state) { | 971 if (!release_state) { |
| 898 callback.Run(); | 972 callback.Run(); |
| 899 return; | 973 return; |
| 900 } | 974 } |
| 901 | 975 |
| 902 sync_point_client_->WaitOutOfOrder( | 976 sync_point_client_->WaitOutOfOrder( |
| 903 release_state.get(), sync_token.release_count(), WrapCallback(callback)); | 977 release_state.get(), sync_token.release_count(), WrapCallback(callback)); |
| 904 } | 978 } |
| 905 | 979 |
| 906 void InProcessCommandBuffer::SignalQuery(unsigned query_id, | 980 void InProcessCommandBuffer::SignalQuery(unsigned query_id, |
| 907 const base::Closure& callback) { | 981 const base::Closure& callback) { |
| 908 CheckSequencedThread(); | 982 CheckSequencedThread(); |
| 909 QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread, | 983 QueueTask(false, base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread, |
| 910 base::Unretained(this), query_id, | 984 base::Unretained(this), query_id, |
| 911 WrapCallback(callback))); | 985 WrapCallback(callback))); |
| 912 } | 986 } |
| 913 | 987 |
| 914 void InProcessCommandBuffer::SignalQueryOnGpuThread( | 988 void InProcessCommandBuffer::SignalQueryOnGpuThread( |
| 915 unsigned query_id, | 989 unsigned query_id, |
| 916 const base::Closure& callback) { | 990 const base::Closure& callback) { |
| 917 gles2::QueryManager* query_manager_ = decoder_->GetQueryManager(); | 991 gles2::QueryManager* query_manager_ = decoder_->GetQueryManager(); |
| 918 DCHECK(query_manager_); | 992 DCHECK(query_manager_); |
| 919 | 993 |
| 920 gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id); | 994 gles2::QueryManager::Query* query = query_manager_->GetQuery(query_id); |
| 921 if (!query) | 995 if (!query) |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 957 return release <= flushed_fence_sync_release_; | 1031 return release <= flushed_fence_sync_release_; |
| 958 } | 1032 } |
| 959 | 1033 |
| 960 bool InProcessCommandBuffer::IsFenceSyncFlushReceived(uint64_t release) { | 1034 bool InProcessCommandBuffer::IsFenceSyncFlushReceived(uint64_t release) { |
| 961 return IsFenceSyncFlushed(release); | 1035 return IsFenceSyncFlushed(release); |
| 962 } | 1036 } |
| 963 | 1037 |
| 964 void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token, | 1038 void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token, |
| 965 const base::Closure& callback) { | 1039 const base::Closure& callback) { |
| 966 CheckSequencedThread(); | 1040 CheckSequencedThread(); |
| 967 QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread, | 1041 QueueTask( |
| 968 base::Unretained(this), sync_token, | 1042 true, |
| 969 WrapCallback(callback))); | 1043 base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread, |
| 1044 base::Unretained(this), sync_token, WrapCallback(callback))); |
| 970 } | 1045 } |
| 971 | 1046 |
| 972 bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken( | 1047 bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken( |
| 973 const SyncToken* sync_token) { | 1048 const SyncToken* sync_token) { |
| 974 return sync_token->namespace_id() == GetNamespaceID(); | 1049 return sync_token->namespace_id() == GetNamespaceID(); |
| 975 } | 1050 } |
| 976 | 1051 |
| 1052 void InProcessCommandBuffer::DidSwapBuffersComplete( |
| 1053 SwapBuffersCompleteParams params) { |
| 1054 #if defined(OS_MACOSX) |
| 1055 gpu::GpuProcessHostedCALayerTreeParamsMac params_mac; |
| 1056 params_mac.ca_context_id = params.ca_context_id; |
| 1057 params_mac.fullscreen_low_power_ca_context_valid = |
| 1058 params.fullscreen_low_power_ca_context_valid; |
| 1059 params_mac.fullscreen_low_power_ca_context_id = |
| 1060 params.fullscreen_low_power_ca_context_id; |
| 1061 params_mac.io_surface.reset(IOSurfaceLookupFromMachPort(params.io_surface)); |
| 1062 params_mac.pixel_size = params.pixel_size; |
| 1063 params_mac.scale_factor = params.scale_factor; |
| 1064 params_mac.responses = std::move(params.in_use_responses); |
| 1065 gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = ¶ms_mac; |
| 1066 #else |
| 1067 gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = nullptr; |
| 1068 #endif |
| 1069 if (!swap_buffers_completion_callback_.is_null()) { |
| 1070 if (!ui::LatencyInfo::Verify( |
| 1071 params.latency_info, |
| 1072 "InProcessCommandBuffer::DidSwapBuffersComplete")) { |
| 1073 swap_buffers_completion_callback_.Run(std::vector<ui::LatencyInfo>(), |
| 1074 params.result, mac_frame_ptr); |
| 1075 } else { |
| 1076 swap_buffers_completion_callback_.Run(params.latency_info, params.result, |
| 1077 mac_frame_ptr); |
| 1078 } |
| 1079 } |
| 1080 } |
| 1081 |
| 1082 const gles2::FeatureInfo* InProcessCommandBuffer::GetFeatureInfo() const { |
| 1083 return context_group_->feature_info(); |
| 1084 } |
| 1085 |
| 1086 void InProcessCommandBuffer::SetLatencyInfoCallback( |
| 1087 const LatencyInfoCallback& callback) { |
| 1088 // TODO(fsamuel): Implement this. |
| 1089 } |
| 1090 |
| 1091 void InProcessCommandBuffer::UpdateVSyncParameters(base::TimeTicks timebase, |
| 1092 base::TimeDelta interval) { |
| 1093 if (!update_vsync_parameters_completion_callback_.is_null()) |
| 1094 update_vsync_parameters_completion_callback_.Run(timebase, interval); |
| 1095 } |
| 1096 |
| 1097 void InProcessCommandBuffer::SetSwapBuffersCompletionCallback( |
| 1098 const SwapBuffersCompletionCallback& callback) { |
| 1099 swap_buffers_completion_callback_ = callback; |
| 1100 } |
| 1101 |
| 1102 void InProcessCommandBuffer::SetUpdateVSyncParametersCallback( |
| 1103 const UpdateVSyncParametersCallback& callback) { |
| 1104 update_vsync_parameters_completion_callback_ = callback; |
| 1105 } |
| 1106 |
| 977 gpu::error::Error InProcessCommandBuffer::GetLastError() { | 1107 gpu::error::Error InProcessCommandBuffer::GetLastError() { |
| 978 CheckSequencedThread(); | 1108 CheckSequencedThread(); |
| 979 return last_state_.error; | 1109 return last_state_.error; |
| 980 } | 1110 } |
| 981 | 1111 |
| 982 namespace { | 1112 namespace { |
| 983 | 1113 |
| 984 void PostCallback( | 1114 void PostCallback( |
| 985 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner, | 1115 const scoped_refptr<base::SingleThreadTaskRunner>& task_runner, |
| 986 const base::Closure& callback) { | 1116 const base::Closure& callback) { |
| (...skipping 21 matching lines...) Expand all Loading... |
| 1008 base::Closure callback_on_client_thread = | 1138 base::Closure callback_on_client_thread = |
| 1009 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback)); | 1139 base::Bind(&RunOnTargetThread, base::Passed(&scoped_callback)); |
| 1010 base::Closure wrapped_callback = | 1140 base::Closure wrapped_callback = |
| 1011 base::Bind(&PostCallback, base::ThreadTaskRunnerHandle::IsSet() | 1141 base::Bind(&PostCallback, base::ThreadTaskRunnerHandle::IsSet() |
| 1012 ? base::ThreadTaskRunnerHandle::Get() | 1142 ? base::ThreadTaskRunnerHandle::Get() |
| 1013 : nullptr, | 1143 : nullptr, |
| 1014 callback_on_client_thread); | 1144 callback_on_client_thread); |
| 1015 return wrapped_callback; | 1145 return wrapped_callback; |
| 1016 } | 1146 } |
| 1017 | 1147 |
| 1018 GpuInProcessThread::GpuInProcessThread(SyncPointManager* sync_point_manager) | 1148 InProcessCommandBuffer::GpuTask::GpuTask(const base::Closure& callback, |
| 1019 : base::Thread("GpuThread"), sync_point_manager_(sync_point_manager) { | 1149 uint32_t order_number) |
| 1020 Start(); | 1150 : callback(callback), order_number(order_number) {} |
| 1021 } | |
| 1022 | 1151 |
| 1023 GpuInProcessThread::~GpuInProcessThread() { | 1152 InProcessCommandBuffer::GpuTask::~GpuTask() {} |
| 1024 Stop(); | |
| 1025 } | |
| 1026 | |
| 1027 void GpuInProcessThread::AddRef() const { | |
| 1028 base::RefCountedThreadSafe<GpuInProcessThread>::AddRef(); | |
| 1029 } | |
| 1030 void GpuInProcessThread::Release() const { | |
| 1031 base::RefCountedThreadSafe<GpuInProcessThread>::Release(); | |
| 1032 } | |
| 1033 | |
| 1034 void GpuInProcessThread::ScheduleTask(const base::Closure& task) { | |
| 1035 task_runner()->PostTask(FROM_HERE, task); | |
| 1036 } | |
| 1037 | |
| 1038 void GpuInProcessThread::ScheduleDelayedWork(const base::Closure& callback) { | |
| 1039 // Match delay with GpuCommandBufferStub. | |
| 1040 task_runner()->PostDelayedTask(FROM_HERE, callback, | |
| 1041 base::TimeDelta::FromMilliseconds(2)); | |
| 1042 } | |
| 1043 | |
| 1044 bool GpuInProcessThread::UseVirtualizedGLContexts() { | |
| 1045 return false; | |
| 1046 } | |
| 1047 | |
| 1048 scoped_refptr<gles2::ShaderTranslatorCache> | |
| 1049 GpuInProcessThread::shader_translator_cache() { | |
| 1050 if (!shader_translator_cache_.get()) { | |
| 1051 shader_translator_cache_ = | |
| 1052 new gpu::gles2::ShaderTranslatorCache(gpu_preferences()); | |
| 1053 } | |
| 1054 return shader_translator_cache_; | |
| 1055 } | |
| 1056 | |
| 1057 scoped_refptr<gles2::FramebufferCompletenessCache> | |
| 1058 GpuInProcessThread::framebuffer_completeness_cache() { | |
| 1059 if (!framebuffer_completeness_cache_.get()) | |
| 1060 framebuffer_completeness_cache_ = | |
| 1061 new gpu::gles2::FramebufferCompletenessCache; | |
| 1062 return framebuffer_completeness_cache_; | |
| 1063 } | |
| 1064 | |
| 1065 SyncPointManager* GpuInProcessThread::sync_point_manager() { | |
| 1066 return sync_point_manager_; | |
| 1067 } | |
| 1068 | 1153 |
| 1069 } // namespace gpu | 1154 } // namespace gpu |
| OLD | NEW |