Index: content/common/gpu/client/command_buffer_proxy_impl.cc |
diff --git a/content/common/gpu/client/command_buffer_proxy_impl.cc b/content/common/gpu/client/command_buffer_proxy_impl.cc |
index 17ecca2741cc2db4748c4f974cb56c5dacc27614..f0aa345f0876b994edcbcca8c79005df767356dd 100644 |
--- a/content/common/gpu/client/command_buffer_proxy_impl.cc |
+++ b/content/common/gpu/client/command_buffer_proxy_impl.cc |
@@ -46,6 +46,9 @@ CommandBufferProxyImpl::CommandBufferProxyImpl(GpuChannelHost* channel, |
flush_count_(0), |
last_put_offset_(-1), |
last_barrier_put_offset_(-1), |
+ next_fence_sync_release_(1), |
+ flushed_fence_sync_release_(0), |
+ verified_fence_sync_release_(0), |
next_signal_id_(0) { |
DCHECK(channel); |
} |
@@ -231,8 +234,19 @@ void CommandBufferProxyImpl::Flush(int32 put_offset) { |
last_barrier_put_offset_ = put_offset; |
if (channel_) { |
- channel_->OrderingBarrier(route_id_, stream_id_, put_offset, ++flush_count_, |
- latency_info_, put_offset_changed, true); |
+ const uint32_t flush_id = |
+ channel_->OrderingBarrier(route_id_, stream_id_, put_offset, |
+ ++flush_count_, latency_info_, |
+ put_offset_changed, true); |
+ if (put_offset_changed) { |
+ DCHECK(flush_id); |
+ const uint32_t fence_sync_release = next_fence_sync_release_ - 1; |
+ if (fence_sync_release > flushed_fence_sync_release_) { |
+ flushed_fence_sync_release_ = fence_sync_release; |
+ flushed_release_flush_id_.push(std::make_pair(fence_sync_release, |
+ flush_id)); |
piman
2015/09/26 00:09:51
TBH all this tracking seems costly, and most of it
David Yen
2015/09/26 00:24:56
None of these variables are atomic so there is not
|
+ } |
+ } |
} |
if (put_offset_changed) |
@@ -250,8 +264,19 @@ void CommandBufferProxyImpl::OrderingBarrier(int32 put_offset) { |
last_barrier_put_offset_ = put_offset; |
if (channel_) { |
- channel_->OrderingBarrier(route_id_, stream_id_, put_offset, ++flush_count_, |
- latency_info_, put_offset_changed, false); |
+ const uint32_t flush_id = |
+ channel_->OrderingBarrier(route_id_, stream_id_, put_offset, |
+ ++flush_count_, latency_info_, |
+ put_offset_changed, false); |
+ if (put_offset_changed) { |
+ DCHECK(flush_id); |
+ const uint32_t fence_sync_release = next_fence_sync_release_ - 1; |
+ if (fence_sync_release > flushed_fence_sync_release_) { |
+ flushed_fence_sync_release_ = fence_sync_release; |
+ flushed_release_flush_id_.push(std::make_pair(fence_sync_release, |
+ flush_id)); |
+ } |
+ } |
} |
if (put_offset_changed) |
@@ -492,6 +517,34 @@ uint64_t CommandBufferProxyImpl::GetCommandBufferID() const { |
return command_buffer_id_; |
} |
+uint32_t CommandBufferProxyImpl::GenerateFenceSyncRelease() { |
+ return next_fence_sync_release_++; |
+} |
+ |
+bool CommandBufferProxyImpl::IsFenceSyncRelease(uint32_t release) { |
+ return release != 0 && release < next_fence_sync_release_; |
piman
2015/09/26 00:09:51
Do we need to handle wraparound? 1 per frame would
David Yen
2015/09/28 17:38:18
What about just using a uint64? This number doesn'
|
+} |
+ |
+bool CommandBufferProxyImpl::IsFenceSyncFlushed(uint32_t release) { |
+ if (release <= verified_fence_sync_release_) |
+ return true; |
+ |
+ // Check if we have actually flushed the fence sync release. |
+ if (release <= flushed_fence_sync_release_) { |
+ DCHECK(!flushed_release_flush_id_.empty()); |
+ // Check if it has already been validated by another context. |
+ UpdateVerifiedReleases(channel_->GetHighestValidatedFlushID(stream_id_)); |
+ if (release <= verified_fence_sync_release_) |
+ return true; |
+ |
+ // Has not been validated, validate it now. |
+ UpdateVerifiedReleases(channel_->ValidateFlushIDReachedServer(stream_id_)); |
+ return (release <= verified_fence_sync_release_); |
+ } |
+ |
+ return false; |
+} |
+ |
uint32 CommandBufferProxyImpl::InsertSyncPoint() { |
CheckLock(); |
if (last_state_.error != gpu::error::kNoError) |
@@ -638,6 +691,17 @@ void CommandBufferProxyImpl::TryUpdateState() { |
shared_state()->Read(&last_state_); |
} |
+void CommandBufferProxyImpl::UpdateVerifiedReleases(uint32_t verified_flush) { |
+ while (!flushed_release_flush_id_.empty()) { |
+ const std::pair<uint32_t, uint32_t>& front_item = |
+ flushed_release_flush_id_.front(); |
+ if (front_item.second > verified_flush) |
+ break; |
+ verified_fence_sync_release_ = front_item.first; |
+ flushed_release_flush_id_.pop(); |
+ } |
+} |
+ |
gpu::CommandBufferSharedState* CommandBufferProxyImpl::shared_state() const { |
return reinterpret_cast<gpu::CommandBufferSharedState*>( |
shared_state_shm_->memory()); |