Chromium Code Reviews| Index: gpu/ipc/in_process_command_buffer.cc |
| diff --git a/gpu/ipc/in_process_command_buffer.cc b/gpu/ipc/in_process_command_buffer.cc |
| index 54c0f0d3ebd463c6fcb5afd59fe04afc3ebac11a..d15e7faa474970f24aa9017b13d65839e417ee72 100644 |
| --- a/gpu/ipc/in_process_command_buffer.cc |
| +++ b/gpu/ipc/in_process_command_buffer.cc |
| @@ -17,6 +17,7 @@ |
| #include "base/lazy_instance.h" |
| #include "base/location.h" |
| #include "base/logging.h" |
| +#include "base/memory/ptr_util.h" |
| #include "base/memory/weak_ptr.h" |
| #include "base/numerics/safe_conversions.h" |
| #include "base/sequence_checker.h" |
| @@ -40,6 +41,7 @@ |
| #include "gpu/command_buffer/service/service_utils.h" |
| #include "gpu/command_buffer/service/sync_point_manager.h" |
| #include "gpu/command_buffer/service/transfer_buffer_manager.h" |
| +#include "gpu/ipc/service/image_transport_surface.h" |
| #include "ui/gfx/geometry/size.h" |
| #include "ui/gl/gl_context.h" |
| #include "ui/gl/gl_image.h" |
| @@ -66,21 +68,6 @@ static void RunTaskWithResult(base::Callback<T(void)> task, |
| completion->Signal(); |
| } |
| -struct ScopedOrderNumberProcessor { |
| - ScopedOrderNumberProcessor(SyncPointOrderData* order_data, uint32_t order_num) |
| - : order_data_(order_data), order_num_(order_num) { |
| - order_data_->BeginProcessingOrderNumber(order_num_); |
| - } |
| - |
| - ~ScopedOrderNumberProcessor() { |
| - order_data_->FinishProcessingOrderNumber(order_num_); |
| - } |
| - |
| - private: |
| - SyncPointOrderData* order_data_; |
| - uint32_t order_num_; |
| -}; |
| - |
| struct GpuInProcessThreadHolder { |
| GpuInProcessThreadHolder() |
| : sync_point_manager(new SyncPointManager(false)), |
| @@ -164,12 +151,6 @@ InProcessCommandBuffer::Service::gpu_driver_bug_workarounds() { |
| return gpu_driver_bug_workarounds_; |
| } |
| -scoped_refptr<gl::GLShareGroup> InProcessCommandBuffer::Service::share_group() { |
| - if (!share_group_.get()) |
| - share_group_ = new gl::GLShareGroup(); |
| - return share_group_; |
| -} |
| - |
| scoped_refptr<gles2::MailboxManager> |
| InProcessCommandBuffer::Service::mailbox_manager() { |
| if (!mailbox_manager_.get()) { |
| @@ -286,8 +267,8 @@ bool InProcessCommandBuffer::Initialize( |
| base::WaitableEvent::ResetPolicy::MANUAL, |
| base::WaitableEvent::InitialState::NOT_SIGNALED); |
| bool result = false; |
| - QueueTask( |
| - base::Bind(&RunTaskWithResult<bool>, init_task, &result, &completion)); |
| + QueueTask(true, base::Bind(&RunTaskWithResult<bool>, init_task, &result, |
| + &completion)); |
| completion.Wait(); |
| gpu_memory_buffer_manager_ = gpu_memory_buffer_manager; |
| @@ -315,19 +296,22 @@ bool InProcessCommandBuffer::InitializeOnGpuThread( |
| &InProcessCommandBuffer::OnContextLostOnGpuThread, gpu_thread_weak_ptr_)); |
| gl_share_group_ = params.context_group ? params.context_group->gl_share_group_ |
| - : service_->share_group(); |
| + : service_->GetShareGroup(); |
| bool bind_generates_resource = false; |
| scoped_refptr<gles2::FeatureInfo> feature_info = |
| new gles2::FeatureInfo(service_->gpu_driver_bug_workarounds()); |
| - decoder_.reset(gles2::GLES2Decoder::Create( |
| + |
| + context_group_ = |
| params.context_group |
| ? params.context_group->decoder_->GetContextGroup() |
| : new gles2::ContextGroup( |
| service_->gpu_preferences(), service_->mailbox_manager(), NULL, |
| service_->shader_translator_cache(), |
| service_->framebuffer_completeness_cache(), feature_info, |
| - bind_generates_resource, nullptr, nullptr))); |
| + bind_generates_resource, nullptr, nullptr); |
| + |
| + decoder_.reset(gles2::GLES2Decoder::Create(context_group_.get())); |
| executor_.reset(new CommandExecutor(command_buffer.get(), decoder_.get(), |
| decoder_.get())); |
| @@ -338,10 +322,14 @@ bool InProcessCommandBuffer::InitializeOnGpuThread( |
| decoder_->set_engine(executor_.get()); |
| if (!surface_.get()) { |
| - if (params.is_offscreen) |
| - surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size()); |
|
piman
2016/11/15 21:57:32
We need to keep the offscreen path, and only use I
Fady Samuel
2016/11/16 00:33:35
Done.
|
| - else |
| - surface_ = gl::init::CreateViewGLSurface(params.window); |
| + surface_ = ImageTransportSurface::CreateNativeSurface( |
| + gpu_thread_weak_ptr_factory_.GetWeakPtr(), params.window, |
| + gl::GLSurface::SURFACE_DEFAULT); |
| + if (!surface_ || !surface_->Initialize(gl::GLSurface::SURFACE_DEFAULT)) { |
| + surface_ = nullptr; |
| + DLOG(ERROR) << "Failed to create surface."; |
| + return false; |
| + } |
| } |
| if (!surface_.get()) { |
| @@ -444,8 +432,8 @@ void InProcessCommandBuffer::Destroy() { |
| bool result = false; |
| base::Callback<bool(void)> destroy_task = base::Bind( |
| &InProcessCommandBuffer::DestroyOnGpuThread, base::Unretained(this)); |
| - QueueTask( |
| - base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, &completion)); |
| + QueueTask(false, base::Bind(&RunTaskWithResult<bool>, destroy_task, &result, |
| + &completion)); |
| completion.Wait(); |
| } |
| @@ -504,6 +492,39 @@ CommandBuffer::State InProcessCommandBuffer::GetStateFast() { |
| return last_state_; |
| } |
| +void InProcessCommandBuffer::QueueTask(bool out_of_order, |
| + const base::Closure& task) { |
| + if (out_of_order) { |
| + service_->ScheduleTask(task); |
| + return; |
| + } |
| + base::AutoLock lock(task_queue_lock_); |
| + SyncPointManager* sync_manager = service_->sync_point_manager(); |
| + uint32_t order_num = |
| + sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager); |
| + task_queue_.push(base::MakeUnique<GpuTask>(task, order_num)); |
| + service_->ScheduleTask( |
| + base::Bind(&InProcessCommandBuffer::ProcessTasksOnGpuThread, |
| + base::Unretained(this))); |
| +} |
| + |
| +void InProcessCommandBuffer::ProcessTasksOnGpuThread() { |
| + while (executor_->scheduled()) { |
| + base::AutoLock lock(task_queue_lock_); |
| + if (task_queue_.empty()) |
| + break; |
| + GpuTask* task = task_queue_.front().get(); |
| + sync_point_order_data_->BeginProcessingOrderNumber(task->order_number); |
| + task->callback.Run(); |
| + if (!executor_->scheduled()) { |
| + sync_point_order_data_->PauseProcessingOrderNumber(task->order_number); |
| + return; |
| + } |
| + sync_point_order_data_->FinishProcessingOrderNumber(task->order_number); |
| + task_queue_.pop(); |
| + } |
| +} |
| + |
| CommandBuffer::State InProcessCommandBuffer::GetLastState() { |
| CheckSequencedThread(); |
| return last_state_; |
| @@ -515,28 +536,18 @@ int32_t InProcessCommandBuffer::GetLastToken() { |
| return last_state_.token; |
| } |
| -void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset, |
| - uint32_t order_num) { |
| +void InProcessCommandBuffer::FlushOnGpuThread(int32_t put_offset) { |
| CheckSequencedThread(); |
| ScopedEvent handle_flush(&flush_event_); |
| base::AutoLock lock(command_buffer_lock_); |
| { |
| - ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(), |
| - order_num); |
| command_buffer_->Flush(put_offset); |
| { |
| // Update state before signaling the flush event. |
| base::AutoLock lock(state_after_last_flush_lock_); |
| state_after_last_flush_ = command_buffer_->GetLastState(); |
| } |
| - |
| - // Currently the in process command buffer does not support being |
| - // descheduled, if it does we would need to back off on calling the finish |
| - // processing number function until the message is rescheduled and finished |
| - // processing. This DCHECK is to enforce this. |
| - DCHECK(error::IsError(state_after_last_flush_.error) || |
| - put_offset == state_after_last_flush_.get_offset); |
| } |
| // If we've processed all pending commands but still have pending queries, |
| @@ -578,13 +589,10 @@ void InProcessCommandBuffer::Flush(int32_t put_offset) { |
| if (last_put_offset_ == put_offset) |
| return; |
| - SyncPointManager* sync_manager = service_->sync_point_manager(); |
| - const uint32_t order_num = |
| - sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager); |
| last_put_offset_ = put_offset; |
| base::Closure task = base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, |
| - gpu_thread_weak_ptr_, put_offset, order_num); |
| - QueueTask(task); |
| + gpu_thread_weak_ptr_, put_offset); |
| + QueueTask(false, task); |
| flushed_fence_sync_release_ = next_fence_sync_release_ - 1; |
| } |
| @@ -596,8 +604,9 @@ void InProcessCommandBuffer::OrderingBarrier(int32_t put_offset) { |
| void InProcessCommandBuffer::WaitForTokenInRange(int32_t start, int32_t end) { |
| CheckSequencedThread(); |
| while (!InRange(start, end, GetLastToken()) && |
| - last_state_.error == gpu::error::kNoError) |
| + last_state_.error == gpu::error::kNoError) { |
| flush_event_.Wait(); |
| + } |
| } |
| void InProcessCommandBuffer::WaitForGetOffsetInRange(int32_t start, |
| @@ -623,7 +632,7 @@ void InProcessCommandBuffer::SetGetBuffer(int32_t shm_id) { |
| base::Closure task = |
| base::Bind(&InProcessCommandBuffer::SetGetBufferOnGpuThread, |
| base::Unretained(this), shm_id, &completion); |
| - QueueTask(task); |
| + QueueTask(false, task); |
| completion.Wait(); |
| { |
| @@ -655,7 +664,7 @@ void InProcessCommandBuffer::DestroyTransferBuffer(int32_t id) { |
| base::Bind(&InProcessCommandBuffer::DestroyTransferBufferOnGpuThread, |
| base::Unretained(this), id); |
| - QueueTask(task); |
| + QueueTask(false, task); |
| } |
| void InProcessCommandBuffer::DestroyTransferBufferOnGpuThread(int32_t id) { |
| @@ -696,10 +705,6 @@ int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer, |
| gfx::GpuMemoryBufferHandle handle = ShareGpuMemoryBufferToGpuThread( |
| gpu_memory_buffer->GetHandle(), &requires_sync_point); |
| - SyncPointManager* sync_manager = service_->sync_point_manager(); |
| - const uint32_t order_num = |
| - sync_point_order_data_->GenerateUnprocessedOrderNumber(sync_manager); |
| - |
| uint64_t fence_sync = 0; |
| if (requires_sync_point) { |
| fence_sync = GenerateFenceSyncRelease(); |
| @@ -708,12 +713,13 @@ int32_t InProcessCommandBuffer::CreateImage(ClientBuffer buffer, |
| DCHECK_EQ(fence_sync - 1, flushed_fence_sync_release_); |
| } |
| - QueueTask(base::Bind( |
| - &InProcessCommandBuffer::CreateImageOnGpuThread, base::Unretained(this), |
| - new_id, handle, gfx::Size(base::checked_cast<int>(width), |
| - base::checked_cast<int>(height)), |
| - gpu_memory_buffer->GetFormat(), |
| - base::checked_cast<uint32_t>(internalformat), order_num, fence_sync)); |
| + QueueTask(false, base::Bind(&InProcessCommandBuffer::CreateImageOnGpuThread, |
| + base::Unretained(this), new_id, handle, |
| + gfx::Size(base::checked_cast<int>(width), |
| + base::checked_cast<int>(height)), |
| + gpu_memory_buffer->GetFormat(), |
| + base::checked_cast<uint32_t>(internalformat), |
| + fence_sync)); |
| if (fence_sync) { |
| flushed_fence_sync_release_ = fence_sync; |
| @@ -733,10 +739,7 @@ void InProcessCommandBuffer::CreateImageOnGpuThread( |
| const gfx::Size& size, |
| gfx::BufferFormat format, |
| uint32_t internalformat, |
| - uint32_t order_num, |
| uint64_t fence_sync) { |
| - ScopedOrderNumberProcessor scoped_order_num(sync_point_order_data_.get(), |
| - order_num); |
| if (!decoder_) |
| return; |
| @@ -795,8 +798,8 @@ void InProcessCommandBuffer::CreateImageOnGpuThread( |
| void InProcessCommandBuffer::DestroyImage(int32_t id) { |
| CheckSequencedThread(); |
| - QueueTask(base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread, |
| - base::Unretained(this), id)); |
| + QueueTask(false, base::Bind(&InProcessCommandBuffer::DestroyImageOnGpuThread, |
| + base::Unretained(this), id)); |
| } |
| void InProcessCommandBuffer::DestroyImageOnGpuThread(int32_t id) { |
| @@ -850,6 +853,7 @@ bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread( |
| gpu::CommandBufferNamespace namespace_id, |
| gpu::CommandBufferId command_buffer_id, |
| uint64_t release) { |
| + DCHECK(!waiting_for_sync_point_); |
| gpu::SyncPointManager* sync_point_manager = service_->sync_point_manager(); |
| DCHECK(sync_point_manager); |
| @@ -860,28 +864,65 @@ bool InProcessCommandBuffer::WaitFenceSyncOnGpuThread( |
| if (!release_state) |
| return true; |
| - if (!release_state->IsFenceSyncReleased(release)) { |
| + if (release_state->IsFenceSyncReleased(release)) { |
| + gles2::MailboxManager* mailbox_manager = |
| + decoder_->GetContextGroup()->mailbox_manager(); |
| + SyncToken sync_token(namespace_id, 0, command_buffer_id, release); |
| + mailbox_manager->PullTextureUpdates(sync_token); |
| + return true; |
| + } |
| + |
| + if (service_->BlockThreadOnWaitSyncToken()) { |
| // Use waitable event which is signalled when the release fence is released. |
| sync_point_client_->Wait( |
| release_state.get(), release, |
| base::Bind(&base::WaitableEvent::Signal, |
| base::Unretained(&fence_sync_wait_event_))); |
| fence_sync_wait_event_.Wait(); |
| + return true; |
| } |
| + waiting_for_sync_point_ = true; |
| + sync_point_client_->Wait( |
| + release_state.get(), release, |
| + base::Bind(&InProcessCommandBuffer::OnWaitFenceSyncCompleted, |
| + gpu_thread_weak_ptr_factory_.GetWeakPtr(), namespace_id, |
| + command_buffer_id, release)); |
| + |
| + if (!waiting_for_sync_point_) |
| + return true; |
| + |
| + executor_->SetScheduled(false); |
| + return false; |
| +} |
| + |
| +void InProcessCommandBuffer::OnWaitFenceSyncCompleted( |
| + CommandBufferNamespace namespace_id, |
| + CommandBufferId command_buffer_id, |
| + uint64_t release) { |
| + DCHECK(waiting_for_sync_point_); |
| gles2::MailboxManager* mailbox_manager = |
| decoder_->GetContextGroup()->mailbox_manager(); |
| SyncToken sync_token(namespace_id, 0, command_buffer_id, release); |
| mailbox_manager->PullTextureUpdates(sync_token); |
| - return true; |
| + waiting_for_sync_point_ = false; |
| + executor_->SetScheduled(true); |
| + QueueTask(false, base::Bind(&InProcessCommandBuffer::FlushOnGpuThread, |
| + gpu_thread_weak_ptr_, last_put_offset_)); |
| } |
| void InProcessCommandBuffer::DescheduleUntilFinishedOnGpuThread() { |
| - NOTIMPLEMENTED(); |
| + DCHECK(executor_->scheduled()); |
| + DCHECK(executor_->HasPollingWork()); |
| + |
| + executor_->SetScheduled(false); |
| } |
| void InProcessCommandBuffer::RescheduleAfterFinishedOnGpuThread() { |
| - NOTIMPLEMENTED(); |
| + DCHECK(!executor_->scheduled()); |
| + |
| + executor_->SetScheduled(true); |
| + ProcessTasksOnGpuThread(); |
| } |
| void InProcessCommandBuffer::SignalSyncTokenOnGpuThread( |
| @@ -906,9 +947,9 @@ void InProcessCommandBuffer::SignalSyncTokenOnGpuThread( |
| void InProcessCommandBuffer::SignalQuery(unsigned query_id, |
| const base::Closure& callback) { |
| CheckSequencedThread(); |
| - QueueTask(base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread, |
| - base::Unretained(this), query_id, |
| - WrapCallback(callback))); |
| + QueueTask(false, base::Bind(&InProcessCommandBuffer::SignalQueryOnGpuThread, |
| + base::Unretained(this), query_id, |
| + WrapCallback(callback))); |
| } |
| void InProcessCommandBuffer::SignalQueryOnGpuThread( |
| @@ -964,9 +1005,10 @@ bool InProcessCommandBuffer::IsFenceSyncFlushReceived(uint64_t release) { |
| void InProcessCommandBuffer::SignalSyncToken(const SyncToken& sync_token, |
| const base::Closure& callback) { |
| CheckSequencedThread(); |
| - QueueTask(base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread, |
| - base::Unretained(this), sync_token, |
| - WrapCallback(callback))); |
| + QueueTask( |
| + true, |
| + base::Bind(&InProcessCommandBuffer::SignalSyncTokenOnGpuThread, |
| + base::Unretained(this), sync_token, WrapCallback(callback))); |
| } |
| bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken( |
| @@ -974,6 +1016,61 @@ bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken( |
| return sync_token->namespace_id() == GetNamespaceID(); |
| } |
| +void InProcessCommandBuffer::DidSwapBuffersComplete( |
| + SwapBuffersCompleteParams params) { |
| +#if defined(OS_MACOSX) |
| + gpu::GpuProcessHostedCALayerTreeParamsMac params_mac; |
| + params_mac.ca_context_id = params.ca_context_id; |
| + params_mac.fullscreen_low_power_ca_context_valid = |
| + params.fullscreen_low_power_ca_context_valid; |
| + params_mac.fullscreen_low_power_ca_context_id = |
| + params.fullscreen_low_power_ca_context_id; |
| + params_mac.io_surface.reset(IOSurfaceLookupFromMachPort(params.io_surface)); |
| + params_mac.pixel_size = params.pixel_size; |
| + params_mac.scale_factor = params.scale_factor; |
| + params_mac.responses = std::move(params.in_use_responses); |
| + gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = ¶ms_mac; |
| +#else |
| + gpu::GpuProcessHostedCALayerTreeParamsMac* mac_frame_ptr = nullptr; |
| +#endif |
| + if (!swap_buffers_completion_callback_.is_null()) { |
| + if (!ui::LatencyInfo::Verify( |
| + params.latency_info, |
| + "InProcessCommandBuffer::DidSwapBuffersComplete")) { |
| + swap_buffers_completion_callback_.Run(std::vector<ui::LatencyInfo>(), |
| + params.result, mac_frame_ptr); |
| + } else { |
| + swap_buffers_completion_callback_.Run(params.latency_info, params.result, |
| + mac_frame_ptr); |
| + } |
| + } |
| +} |
| + |
| +const gles2::FeatureInfo* InProcessCommandBuffer::GetFeatureInfo() const { |
| + return context_group_->feature_info(); |
| +} |
| + |
| +void InProcessCommandBuffer::SetLatencyInfoCallback( |
| + const LatencyInfoCallback& callback) { |
| + // TODO(fsamuel): Implement this. |
| +} |
| + |
| +void InProcessCommandBuffer::UpdateVSyncParameters(base::TimeTicks timebase, |
| + base::TimeDelta interval) { |
| + if (!update_vsync_parameters_completion_callback_.is_null()) |
| + update_vsync_parameters_completion_callback_.Run(timebase, interval); |
| +} |
| + |
| +void InProcessCommandBuffer::SetSwapBuffersCompletionCallback( |
| + const SwapBuffersCompletionCallback& callback) { |
| + swap_buffers_completion_callback_ = callback; |
| +} |
| + |
| +void InProcessCommandBuffer::SetUpdateVSyncParametersCallback( |
| + const UpdateVSyncParametersCallback& callback) { |
| + update_vsync_parameters_completion_callback_ = callback; |
| +} |
| + |
| gpu::error::Error InProcessCommandBuffer::GetLastError() { |
| CheckSequencedThread(); |
| return last_state_.error; |
| @@ -1066,4 +1163,19 @@ SyncPointManager* GpuInProcessThread::sync_point_manager() { |
| return sync_point_manager_; |
| } |
| +const scoped_refptr<gl::GLShareGroup>& GpuInProcessThread::GetShareGroup() |
| + const { |
| + return share_group_; |
| +} |
| + |
| +bool GpuInProcessThread::BlockThreadOnWaitSyncToken() const { |
| + return true; |
|
piman
2016/11/15 21:57:32
I think this should be false here, but set to true
Fady Samuel
2016/11/16 00:33:35
Done.
|
| +} |
| + |
| +InProcessCommandBuffer::GpuTask::GpuTask(const base::Closure& callback, |
| + uint32_t order_number) |
| + : callback(callback), order_number(order_number) {} |
| + |
| +InProcessCommandBuffer::GpuTask::~GpuTask() {} |
| + |
| } // namespace gpu |