Chromium Code Reviews| Index: gpu/ipc/client/command_buffer_proxy_impl.cc |
| diff --git a/gpu/ipc/client/command_buffer_proxy_impl.cc b/gpu/ipc/client/command_buffer_proxy_impl.cc |
| index 4564553ed21f9c799d0885bd84302955594752e9..8657ee97ade65a58409e88d504a0c1d03ce98681 100644 |
| --- a/gpu/ipc/client/command_buffer_proxy_impl.cc |
| +++ b/gpu/ipc/client/command_buffer_proxy_impl.cc |
| @@ -130,9 +130,10 @@ bool CommandBufferProxyImpl::OnMessageReceived(const IPC::Message& message) { |
| } |
| void CommandBufferProxyImpl::OnChannelError() { |
| - std::unique_ptr<base::AutoLock> lock; |
| + base::Optional<base::AutoLock> lock; |
| if (lock_) |
| - lock.reset(new base::AutoLock(*lock_)); |
| + lock.emplace(*lock_); |
| + base::AutoLock state_lock(state_lock_); |
| gpu::error::ContextLostReason context_lost_reason = |
| gpu::error::kGpuChannelLost; |
| @@ -210,7 +211,6 @@ bool CommandBufferProxyImpl::Initialize( |
| if (!base::SharedMemory::IsHandleValid(handle)) |
| return false; |
| - |
| // TODO(vadimt): Remove ScopedTracker below once crbug.com/125248 is fixed. |
| tracked_objects::ScopedTracker tracking_profile( |
| FROM_HERE_WITH_EXPLICIT_FUNCTION( |
| @@ -240,16 +240,20 @@ bool CommandBufferProxyImpl::Initialize( |
| } |
| gpu::CommandBuffer::State CommandBufferProxyImpl::GetLastState() { |
| + base::AutoLock lock(state_lock_); |
| + TryUpdateState(); |
| return last_state_; |
| } |
| int32_t CommandBufferProxyImpl::GetLastToken() { |
|
piman
2016/12/02 19:21:48
nit: maybe return GetLastState().token ?
Maybe we
sunnyps
2016/12/07 03:31:22
Got rid of both GetLastToken and GetLastError. Thi
|
| + base::AutoLock lock(state_lock_); |
| TryUpdateState(); |
| return last_state_.token; |
| } |
| void CommandBufferProxyImpl::Flush(int32_t put_offset) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| if (last_state_.error != gpu::error::kNoError) |
| return; |
| @@ -283,6 +287,7 @@ void CommandBufferProxyImpl::Flush(int32_t put_offset) { |
| void CommandBufferProxyImpl::OrderingBarrier(int32_t put_offset) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| if (last_state_.error != gpu::error::kNoError) |
| return; |
| @@ -333,30 +338,51 @@ void CommandBufferProxyImpl::SetUpdateVSyncParametersCallback( |
| update_vsync_parameters_completion_callback_ = callback; |
| } |
| -void CommandBufferProxyImpl::WaitForTokenInRange(int32_t start, int32_t end) { |
| +gpu::CommandBuffer::State CommandBufferProxyImpl::WaitForTokenInRange( |
| + int32_t start, |
| + int32_t end) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForToken", "start", start, |
| "end", end); |
| + // Error needs to be checked in case the state was updated on another thread. |
| + // We need to make sure that the reentrant context loss callback is called so |
| + // that the share group is also lost before this method returns. |
| + if (last_state_.error != gpu::error::kNoError) { |
| + OnGpuStateError(); |
|
piman
2016/12/02 19:21:48
I don't think OnGpuStateError is right, because th
sunnyps
2016/12/07 03:31:22
DisconnectChannel (the posted task) early outs if
|
| + return last_state_; |
| + } |
| TryUpdateState(); |
| if (!InRange(start, end, last_state_.token) && |
| last_state_.error == gpu::error::kNoError) { |
| gpu::CommandBuffer::State state; |
| if (Send(new GpuCommandBufferMsg_WaitForTokenInRange(route_id_, start, end, |
| - &state))) |
| + &state))) { |
| SetStateFromSyncReply(state); |
| + } |
| } |
| if (!InRange(start, end, last_state_.token) && |
| last_state_.error == gpu::error::kNoError) { |
| LOG(ERROR) << "GPU state invalid after WaitForTokenInRange."; |
| OnGpuSyncReplyError(); |
| } |
| + return last_state_; |
| } |
| -void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32_t start, |
| - int32_t end) { |
| +gpu::CommandBuffer::State CommandBufferProxyImpl::WaitForGetOffsetInRange( |
| + int32_t start, |
| + int32_t end) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| TRACE_EVENT2("gpu", "CommandBufferProxyImpl::WaitForGetOffset", "start", |
| start, "end", end); |
| + // Error needs to be checked in case the state was updated on another thread. |
| + // We need to make sure that the reentrant context loss callback is called so |
| + // that the share group is also lost before this method returns. |
| + if (last_state_.error != gpu::error::kNoError) { |
| + OnGpuStateError(); |
|
piman
2016/12/02 19:21:48
Ditto
sunnyps
2016/12/07 03:31:22
Done.
|
| + return last_state_; |
| + } |
| TryUpdateState(); |
| if (!InRange(start, end, last_state_.get_offset) && |
| last_state_.error == gpu::error::kNoError) { |
| @@ -370,10 +396,12 @@ void CommandBufferProxyImpl::WaitForGetOffsetInRange(int32_t start, |
| LOG(ERROR) << "GPU state invalid after WaitForGetOffsetInRange."; |
| OnGpuSyncReplyError(); |
| } |
| + return last_state_; |
| } |
| void CommandBufferProxyImpl::SetGetBuffer(int32_t shm_id) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| if (last_state_.error != gpu::error::kNoError) |
| return; |
| @@ -385,6 +413,7 @@ scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer( |
| size_t size, |
| int32_t* id) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| *id = -1; |
| if (last_state_.error != gpu::error::kNoError) |
| @@ -428,6 +457,7 @@ scoped_refptr<gpu::Buffer> CommandBufferProxyImpl::CreateTransferBuffer( |
| void CommandBufferProxyImpl::DestroyTransferBuffer(int32_t id) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| if (last_state_.error != gpu::error::kNoError) |
| return; |
| @@ -448,6 +478,7 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer, |
| size_t height, |
| unsigned internal_format) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| if (last_state_.error != gpu::error::kNoError) |
| return -1; |
| @@ -508,6 +539,7 @@ int32_t CommandBufferProxyImpl::CreateImage(ClientBuffer buffer, |
| void CommandBufferProxyImpl::DestroyImage(int32_t id) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| if (last_state_.error != gpu::error::kNoError) |
| return; |
| @@ -535,6 +567,7 @@ int32_t CommandBufferProxyImpl::CreateGpuMemoryBufferImage( |
| uint32_t CommandBufferProxyImpl::CreateStreamTexture(uint32_t texture_id) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| if (last_state_.error != gpu::error::kNoError) |
| return 0; |
| @@ -587,6 +620,7 @@ bool CommandBufferProxyImpl::IsFenceSyncFlushed(uint64_t release) { |
| bool CommandBufferProxyImpl::IsFenceSyncFlushReceived(uint64_t release) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| if (last_state_.error != gpu::error::kNoError) |
| return false; |
| @@ -610,9 +644,16 @@ bool CommandBufferProxyImpl::IsFenceSyncFlushReceived(uint64_t release) { |
| return false; |
| } |
| +bool CommandBufferProxyImpl::IsFenceSyncReleased(uint64_t release) { |
| + base::AutoLock lock(state_lock_); |
| + TryUpdateStateThreadSafe(); |
| + return release <= last_state_.release_count; |
| +} |
| + |
| void CommandBufferProxyImpl::SignalSyncToken(const gpu::SyncToken& sync_token, |
| const base::Closure& callback) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| if (last_state_.error != gpu::error::kNoError) |
| return; |
| @@ -647,6 +688,7 @@ bool CommandBufferProxyImpl::CanWaitUnverifiedSyncToken( |
| void CommandBufferProxyImpl::SignalQuery(uint32_t query, |
| const base::Closure& callback) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| if (last_state_.error != gpu::error::kNoError) |
| return; |
| @@ -665,6 +707,7 @@ void CommandBufferProxyImpl::SignalQuery(uint32_t query, |
| void CommandBufferProxyImpl::TakeFrontBuffer(const gpu::Mailbox& mailbox) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| if (last_state_.error != gpu::error::kNoError) |
| return; |
| @@ -675,6 +718,7 @@ void CommandBufferProxyImpl::ReturnFrontBuffer(const gpu::Mailbox& mailbox, |
| const gpu::SyncToken& sync_token, |
| bool is_lost) { |
| CheckLock(); |
| + base::AutoLock lock(state_lock_); |
| if (last_state_.error != gpu::error::kNoError) |
| return; |
| @@ -683,35 +727,49 @@ void CommandBufferProxyImpl::ReturnFrontBuffer(const gpu::Mailbox& mailbox, |
| } |
| gpu::error::Error CommandBufferProxyImpl::GetLastError() { |
|
piman
2016/12/02 19:21:48
nit: same as GetLastToken, I don't think we need t
|
| + base::AutoLock lock(state_lock_); |
| + TryUpdateState(); |
| return last_state_.error; |
| } |
| bool CommandBufferProxyImpl::Send(IPC::Message* msg) { |
| - // Caller should not intentionally send a message if the context is lost. |
| - DCHECK(last_state_.error == gpu::error::kNoError); |
| DCHECK(channel_); |
| + state_lock_.AssertAcquired(); |
| + DCHECK_EQ(gpu::error::kNoError, last_state_.error); |
| - if (!msg->is_sync()) { |
| - bool result = channel_->Send(msg); |
| - // Send() should always return true for async messages. |
| - DCHECK(result); |
| - return true; |
| + state_lock_.Release(); |
| + |
| + // Call is_sync() before sending message. |
| + bool is_sync = msg->is_sync(); |
| + bool result = channel_->Send(msg); |
| + // Send() should always return true for async messages. |
| + DCHECK(is_sync || result); |
| + |
| + state_lock_.Acquire(); |
| + |
| + if (last_state_.error != gpu::error::kNoError) { |
| + // Context might have been lost on another thread while we were waiting for |
| + // message send/reply. |
| + OnGpuStateError(); |
|
piman
2016/12/02 19:21:48
Ditto, I think you only want to call OnGpuControlL
sunnyps
2016/12/07 03:31:22
Done.
|
| + return false; |
| } |
| - if (channel_->Send(msg)) |
| - return true; |
| + if (!result) { |
| + // Flag the command buffer as lost. Defer deleting the channel until |
| + // OnChannelError is called after returning to the message loop in case it |
| + // is referenced elsewhere. |
| + DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context."; |
| + OnClientError(gpu::error::kLostContext); |
| + return false; |
| + } |
| - // Flag the command buffer as lost. Defer deleting the channel until |
| - // OnChannelError is called after returning to the message loop in case |
| - // it is referenced elsewhere. |
| - DVLOG(1) << "CommandBufferProxyImpl::Send failed. Losing context."; |
| - OnClientError(gpu::error::kLostContext); |
| - return false; |
| + return true; |
| } |
| void CommandBufferProxyImpl::SetStateFromSyncReply( |
| const gpu::CommandBuffer::State& state) { |
| - DCHECK(last_state_.error == gpu::error::kNoError); |
| + CheckLock(); |
| + state_lock_.AssertAcquired(); |
| // Handle wraparound. It works as long as we don't have more than 2B state |
| // updates in flight across which reordering occurs. |
| if (state.generation - last_state_.generation < 0x80000000U) |
| @@ -720,17 +778,37 @@ void CommandBufferProxyImpl::SetStateFromSyncReply( |
| OnGpuStateError(); |
| } |
| -void CommandBufferProxyImpl::TryUpdateState() { |
| +gpu::CommandBuffer::State CommandBufferProxyImpl::TryUpdateState() { |
|
piman
2016/12/02 19:21:48
I don't think it's necessary for this (and other T
sunnyps
2016/12/07 03:31:22
Done.
|
| + CheckLock(); |
| + state_lock_.AssertAcquired(); |
| if (last_state_.error == gpu::error::kNoError) { |
| shared_state()->Read(&last_state_); |
| if (last_state_.error != gpu::error::kNoError) |
| OnGpuStateError(); |
| } |
| + return last_state_; |
| } |
| -void CommandBufferProxyImpl::TryUpdateStateDontReportError() { |
| +gpu::CommandBuffer::State CommandBufferProxyImpl::TryUpdateStateThreadSafe() { |
| + state_lock_.AssertAcquired(); |
| + if (last_state_.error == gpu::error::kNoError) { |
| + shared_state()->Read(&last_state_); |
| + if (last_state_.error != gpu::error::kNoError) { |
| + callback_thread_->PostTask( |
| + FROM_HERE, |
| + base::Bind(&CommandBufferProxyImpl::LockAndDisconnectChannel, |
| + weak_this_)); |
| + } |
| + } |
| + return last_state_; |
| +} |
| + |
| +gpu::CommandBuffer::State |
| +CommandBufferProxyImpl::TryUpdateStateDontReportError() { |
| + state_lock_.AssertAcquired(); |
| if (last_state_.error == gpu::error::kNoError) |
| shared_state()->Read(&last_state_); |
| + return last_state_; |
| } |
| void CommandBufferProxyImpl::UpdateVerifiedReleases(uint32_t verified_flush) { |
| @@ -799,6 +877,8 @@ void CommandBufferProxyImpl::OnUpdateVSyncParameters(base::TimeTicks timebase, |
| } |
| void CommandBufferProxyImpl::OnGpuSyncReplyError() { |
| + CheckLock(); |
| + state_lock_.AssertAcquired(); |
| last_state_.error = gpu::error::kLostContext; |
| last_state_.context_lost_reason = gpu::error::kInvalidGpuMessage; |
| // This method may be inside a callstack from the GpuControlClient (we got a |
| @@ -811,6 +891,7 @@ void CommandBufferProxyImpl::OnGpuAsyncMessageError( |
| gpu::error::ContextLostReason reason, |
| gpu::error::Error error) { |
| CheckLock(); |
| + state_lock_.AssertAcquired(); |
| last_state_.error = error; |
| last_state_.context_lost_reason = reason; |
| // This method only occurs when receiving IPC messages, so we know it's not in |
| @@ -819,7 +900,9 @@ void CommandBufferProxyImpl::OnGpuAsyncMessageError( |
| } |
| void CommandBufferProxyImpl::OnGpuStateError() { |
| - DCHECK(last_state_.error != gpu::error::kNoError); |
| + CheckLock(); |
| + state_lock_.AssertAcquired(); |
| + DCHECK_NE(gpu::error::kNoError, last_state_.error); |
| // This method may be inside a callstack from the GpuControlClient (we |
| // encountered an error while trying to perform some action). So avoid |
| // re-entering the GpuControlClient here. |
| @@ -828,6 +911,7 @@ void CommandBufferProxyImpl::OnGpuStateError() { |
| void CommandBufferProxyImpl::OnClientError(gpu::error::Error error) { |
| CheckLock(); |
| + state_lock_.AssertAcquired(); |
| last_state_.error = error; |
| last_state_.context_lost_reason = gpu::error::kUnknown; |
| // This method may be inside a callstack from the GpuControlClient (we |
| @@ -838,6 +922,7 @@ void CommandBufferProxyImpl::OnClientError(gpu::error::Error error) { |
| void CommandBufferProxyImpl::DisconnectChannelInFreshCallStack() { |
| CheckLock(); |
| + state_lock_.AssertAcquired(); |
| // Inform the GpuControlClient of the lost state immediately, though this may |
| // be a re-entrant call to the client so we use the MaybeReentrant variant. |
| if (gpu_control_client_) |