Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(718)

Unified Diff: content/common/gpu/gpu_command_buffer_stub.cc

Issue 1331843005: Implemented new fence syncs which replaces the old sync points. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Some fixes Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: content/common/gpu/gpu_command_buffer_stub.cc
diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc
index d91a65259efff40f2100f525e8bcab7dd98e8555..6d9c4db6663d29f30b19690a206063cdce4fde1d 100644
--- a/content/common/gpu/gpu_command_buffer_stub.cc
+++ b/content/common/gpu/gpu_command_buffer_stub.cc
@@ -450,6 +450,8 @@ void GpuCommandBufferStub::Destroy() {
// destroy it before those.
scheduler_.reset();
+ fence_sync_manager_.reset();
+
bool have_context = false;
if (decoder_ && decoder_->GetGLContext()) {
// Try to make the context current regardless of whether it was lost, so we
@@ -497,6 +499,7 @@ void GpuCommandBufferStub::OnInitialize(
scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(),
decoder_.get(),
decoder_.get()));
+ fence_sync_manager_.reset(new gpu::FenceSyncManager);
if (preemption_flag_.get())
scheduler_->SetPreemptByFlag(preemption_flag_);
@@ -613,6 +616,10 @@ void GpuCommandBufferStub::OnInitialize(
decoder_->SetWaitSyncPointCallback(
base::Bind(&GpuCommandBufferStub::OnWaitSyncPoint,
base::Unretained(this)));
+ decoder_->SetFenceSyncReleaseCallback(base::Bind(
+ &GpuCommandBufferStub::OnFenceSenceRelease, base::Unretained(this)));
+ decoder_->SetWaitFenceSyncCallback(base::Bind(
+ &GpuCommandBufferStub::OnWaitFenceSync, base::Unretained(this)));
command_buffer_->SetPutOffsetChangeCallback(
base::Bind(&GpuCommandBufferStub::PutChanged, base::Unretained(this)));
@@ -670,6 +677,16 @@ void GpuCommandBufferStub::SetLatencyInfoCallback(
latency_info_callback_ = callback;
}
+void GpuCommandBufferStub::ValidateFenceSyncRelease(uint32_t max_order_number,
+ uint32_t release) {
+ DCHECK(max_order_number > channel_->GetProcessedOrderNum());
+ DCHECK(max_order_number <= channel_->GetUnprocessedOrderNum());
+ order_fence_queue_.push(std::make_pair(max_order_number, release));
+ channel_->AddProcessedOrderNumberCallback(
+ max_order_number, base::Bind(&GpuCommandBufferStub::OnOrderNumberReached,
+ this->AsWeakPtr(), max_order_number));
+}
+
int32 GpuCommandBufferStub::GetRequestedAttribute(int attr) const {
// The command buffer is pairs of enum, value
// search for the requested attribute, return the value.
@@ -896,8 +913,13 @@ void GpuCommandBufferStub::OnRetireSyncPoint(uint32 sync_point) {
gpu::gles2::MailboxManager* mailbox_manager =
context_group_->mailbox_manager();
- if (mailbox_manager->UsesSync() && MakeCurrent())
- mailbox_manager->PushTextureUpdates(sync_point);
+ if (mailbox_manager->UsesSync() && MakeCurrent()) {
+ // Old sync points are global and not namespaced by gpu_channel & route_id.
+ // We can simply use the global sync point number as the release count with
+ // 0 for both gpu_channel and route_id. This will all be removed once the
+ // old sync points are replaced.
+ mailbox_manager->PushTextureUpdates(0, 0, sync_point);
+ }
GpuChannelManager* manager = channel_->gpu_channel_manager();
manager->sync_point_manager()->RetireSyncPoint(sync_point);
@@ -908,7 +930,11 @@ bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
return true;
GpuChannelManager* manager = channel_->gpu_channel_manager();
if (manager->sync_point_manager()->IsSyncPointRetired(sync_point)) {
- PullTextureUpdates(sync_point);
+ // Old sync points are global and not namespaced by gpu_channel & route_id.
+ // We can simply use the global sync point number as the release count with
+ // 0 for both gpu_channel and route_id. This will all be removed once the
+ // old sync points are replaced.
+ PullTextureUpdates(0, 0, sync_point);
return true;
}
@@ -927,7 +953,11 @@ bool GpuCommandBufferStub::OnWaitSyncPoint(uint32 sync_point) {
}
void GpuCommandBufferStub::OnWaitSyncPointCompleted(uint32 sync_point) {
- PullTextureUpdates(sync_point);
+ // Old sync points are global and not namespaced by gpu_channel & route_id.
+ // We can simply use the global sync point number as the release count with
+ // 0 for both gpu_channel and route_id. This will all be removed once the
+ // old sync points are replaced.
+ PullTextureUpdates(0, 0, sync_point);
--sync_point_wait_count_;
if (sync_point_wait_count_ == 0) {
TRACE_EVENT_ASYNC_END1("gpu", "WaitSyncPoint", this,
@@ -936,11 +966,14 @@ void GpuCommandBufferStub::OnWaitSyncPointCompleted(uint32 sync_point) {
scheduler_->SetScheduled(true);
}
-void GpuCommandBufferStub::PullTextureUpdates(uint32 sync_point) {
+void GpuCommandBufferStub::PullTextureUpdates(int channel_id,
+ uint32_t route_id,
+ uint32 release) {
gpu::gles2::MailboxManager* mailbox_manager =
context_group_->mailbox_manager();
- if (mailbox_manager->UsesSync() && MakeCurrent())
- mailbox_manager->PullTextureUpdates(sync_point);
+ if (mailbox_manager->UsesSync() && MakeCurrent()) {
+ mailbox_manager->PullTextureUpdates(channel_id, route_id, release);
+ }
}
void GpuCommandBufferStub::OnSignalSyncPoint(uint32 sync_point, uint32 id) {
@@ -975,6 +1008,99 @@ void GpuCommandBufferStub::OnSignalQuery(uint32 query_id, uint32 id) {
OnSignalSyncPointAck(id);
}
+void GpuCommandBufferStub::OnFenceSenceRelease(uint32_t release) {
+ if (!fence_sync_manager_->IsFenceSyncReleased(release)) {
+ gpu::gles2::MailboxManager* mailbox_manager =
+ context_group_->mailbox_manager();
+ if (mailbox_manager->UsesSync() && MakeCurrent()) {
+ mailbox_manager->PushTextureUpdates(channel_->client_id(), route_id_,
+ release);
+ }
+
+ fence_sync_manager_->ReleaseFenceSync(release);
+ }
+}
+
+bool GpuCommandBufferStub::OnWaitFenceSync(int channel_client_id,
+ uint32_t route_id,
+ uint32_t release) {
+ GpuChannel* release_channel =
+ channel_->gpu_channel_manager()->LookupChannel(channel_client_id);
+ if (!release_channel)
+ return true;
+
+ GpuCommandBufferStub* release_stub =
+ release_channel->LookupCommandBuffer(route_id);
+ if (!release_stub || release_stub == this)
+ return true;
+
+ gpu::FenceSyncManager* release_fence_sync_manager =
+ release_stub->fence_sync_manager();
+ // Check if it has already been released.
+ if (release_fence_sync_manager->IsFenceSyncReleased(release)) {
+ PullTextureUpdates(channel_client_id, route_id, release);
+ return true;
+ }
+
+ // Check if wait is valid, wait channel's order number should not be higher.
+ const uint32_t wait_order_num = channel_->GetCurrentOrderNum();
+ const uint32_t release_processed_num =
+ release_channel->GetProcessedOrderNum();
piman 2015/09/10 23:55:32 I think it would be good to start reasoning in ter
David Yen 2015/09/23 18:30:34 No longer relevant, although the ideas here were f
+ if (release_processed_num > wait_order_num)
+ return true;
+
+ // Check if wait is valid, wait should have unprocessed messages.
+ const uint32_t release_unprocessed_num =
+ release_channel->GetUnprocessedOrderNum();
piman 2015/09/10 23:55:32 I think same here, this should be a per-stream sta
David Yen 2015/09/23 18:30:34 Done.
+ if (release_unprocessed_num > release_processed_num)
+ return true;
+
+ if (sync_point_wait_count_ == 0) {
+ TRACE_EVENT_ASYNC_BEGIN1("gpu", "WaitFenceSync", this,
+ "GpuCommandBufferStub", this);
+ }
+ scheduler_->SetScheduled(false);
+ ++sync_point_wait_count_;
+
+ // Add callback when fence sync is signalled.
+ release_fence_sync_manager->AddFenceSyncCallback(
+ wait_order_num, release,
+ base::Bind(&GpuCommandBufferStub::OnWaitFenceSyncCompleted,
+ this->AsWeakPtr(), channel_client_id, route_id, release));
+
+ // Keep track of the processed order number on the signalling channel so that
+ // we can catch misbehaved fences which do not exist. The fence sync must be
+ // released by the current wait order number, or latest unprocessed order
+ // number on the channel.
+ const uint32_t max_signal_order_num =
+ std::min(release_unprocessed_num, wait_order_num);
+ release_stub->ValidateFenceSyncRelease(max_signal_order_num, release);
+
+ return scheduler_->IsScheduled();
+}
+
+void GpuCommandBufferStub::OnWaitFenceSyncCompleted(int channel_client_id,
+ uint32_t route_id,
+ uint32_t release) {
+ PullTextureUpdates(channel_client_id, route_id, release);
+ --sync_point_wait_count_;
+ if (sync_point_wait_count_ == 0) {
+ TRACE_EVENT_ASYNC_END1("gpu", "WaitFenceSync", this, "GpuCommandBufferStub",
+ this);
+ }
+ scheduler_->SetScheduled(true);
+}
+
+void GpuCommandBufferStub::OnOrderNumberReached(uint32_t order_number) {
+ while (!order_fence_queue_.empty() &&
+ order_fence_queue_.top().first <= order_number) {
+ const uint32_t fence_sync_release = order_fence_queue_.top().second;
+ if (!fence_sync_manager_->IsFenceSyncReleased(fence_sync_release)) {
+ fence_sync_manager_->ReleaseFenceSync(fence_sync_release);
+ }
+ order_fence_queue_.pop();
+ }
+}
void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback(
bool has_callback) {

Powered by Google App Engine
This is Rietveld 408576698