Chromium Code Reviews| Index: components/mus/gles2/command_buffer_local.cc |
| diff --git a/components/mus/gles2/command_buffer_local.cc b/components/mus/gles2/command_buffer_local.cc |
| index 9dc82bca267e5196201a1597e59b079889756fb4..c4e076ff8de434ce129b21bdd8e2d2799c9915f6 100644 |
| --- a/components/mus/gles2/command_buffer_local.cc |
| +++ b/components/mus/gles2/command_buffer_local.cc |
| @@ -4,6 +4,7 @@ |
| #include "components/mus/gles2/command_buffer_local.h" |
| +#include "base/atomic_sequence_num.h" |
| #include "base/bind.h" |
| #include "base/memory/shared_memory.h" |
| #include "components/mus/gles2/command_buffer_local_client.h" |
| @@ -26,12 +27,19 @@ |
| namespace mus { |
| +namespace { |
| + |
| +base::StaticAtomicSequenceNumber g_next_command_buffer_id; |
|
David Yen
2015/11/19 18:12:52
Same question as above.
Peng
2015/11/20 17:41:15
All instances will be in the same process (mus pro
|
| + |
| +} |
| + |
| const unsigned int GL_READ_WRITE_CHROMIUM = 0x78F2; |
| CommandBufferLocal::CommandBufferLocal(CommandBufferLocalClient* client, |
| gfx::AcceleratedWidget widget, |
| const scoped_refptr<GpuState>& gpu_state) |
| - : widget_(widget), |
| + : command_buffer_id_(g_next_command_buffer_id.GetNext()), |
| + widget_(widget), |
| gpu_state_(gpu_state), |
| client_(client), |
| next_fence_sync_release_(1), |
| @@ -90,6 +98,9 @@ bool CommandBufferLocal::Initialize() { |
| decoder_.reset(::gpu::gles2::GLES2Decoder::Create(context_group.get())); |
| scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(), decoder_.get(), |
| decoder_.get())); |
| + sync_point_order_data_ = gpu::SyncPointOrderData::Create(); |
|
David Yen
2015/11/19 18:12:52
Same comment about order data as above.
Peng
2015/11/20 20:32:15
Done.
|
| + sync_point_client_ = gpu_state_->sync_point_manager()->CreateSyncPointClient( |
| + sync_point_order_data_, GetNamespaceID(), GetCommandBufferID()); |
| decoder_->set_engine(scheduler_.get()); |
| decoder_->SetWaitSyncPointCallback( |
| base::Bind(&CommandBufferLocal::OnWaitSyncPoint, base::Unretained(this))); |
| @@ -212,13 +223,11 @@ bool CommandBufferLocal::IsGpuChannelLost() { |
| } |
| gpu::CommandBufferNamespace CommandBufferLocal::GetNamespaceID() const { |
| - NOTIMPLEMENTED(); |
| - return gpu::CommandBufferNamespace::INVALID; |
| + return gpu::CommandBufferNamespace::MOJO_LOCAL; |
| } |
| uint64_t CommandBufferLocal::GetCommandBufferID() const { |
| - NOTIMPLEMENTED(); |
| - return 0; |
| + return command_buffer_id_; |
| } |
| uint64_t CommandBufferLocal::GenerateFenceSyncRelease() { |
| @@ -270,29 +279,24 @@ bool CommandBufferLocal::OnWaitSyncPoint(uint32_t sync_point) { |
| if (!sync_point) |
| return true; |
| - bool context_changed = false; |
| - while (!gpu_state_->sync_point_manager()->IsSyncPointRetired(sync_point)) { |
| + if (gpu_state_->sync_point_manager()->IsSyncPointRetired(sync_point)) |
| + return true; |
| + |
| + do { |
| gpu_state_->command_buffer_task_runner()->RunOneTask(); |
| - context_changed = true; |
| - } |
| + } while (!gpu_state_->sync_point_manager()->IsSyncPointRetired(sync_point)); |
| // RunOneTask() changes the current GL context, so we have to recover it. |
| - if (context_changed) { |
| - if (!decoder_->MakeCurrent()) { |
| - command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); |
| - command_buffer_->SetParseError(::gpu::error::kLostContext); |
| - } |
| + if (!decoder_->MakeCurrent()) { |
| + command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); |
| + command_buffer_->SetParseError(::gpu::error::kLostContext); |
| } |
| return true; |
| } |
| void CommandBufferLocal::OnFenceSyncRelease(uint64_t release) { |
| - // TODO(dyen): Implement once CommandBufferID has been figured out and |
| - // we have a SyncPointClient. It would probably look like what is commented |
| - // out below: |
| - // if (!sync_point_client_->client_state()->IsFenceSyncReleased(release)) |
| - // sync_point_client_->ReleaseFenceSync(release); |
| - NOTIMPLEMENTED(); |
| + if (!sync_point_client_->client_state()->IsFenceSyncReleased(release)) |
| + sync_point_client_->ReleaseFenceSync(release); |
| } |
| bool CommandBufferLocal::OnWaitFenceSync( |
| @@ -312,17 +316,16 @@ bool CommandBufferLocal::OnWaitFenceSync( |
| if (release_state->IsFenceSyncReleased(release)) |
| return true; |
| - // TODO(dyen): Implement once CommandBufferID has been figured out and |
| - // we have a SyncPointClient. It would probably look like what is commented |
| - // out below: |
| - // scheduler_->SetScheduled(false); |
| - // sync_point_client_->Wait( |
| - // release_state.get(), |
| - // release, |
| - // base::Bind(&CommandBufferLocal::OnSyncPointRetired, |
| - // weak_factory_.GetWeakPtr())); |
| - NOTIMPLEMENTED(); |
| - return scheduler_->scheduled(); |
| + do { |
| + gpu_state_->command_buffer_task_runner()->RunOneTask(); |
| + } while (!release_state->IsFenceSyncReleased(release)); |
| + |
| + // RunOneTask() changes the current GL context, so we have to recover it. |
| + if (!decoder_->MakeCurrent()) { |
| + command_buffer_->SetContextLostReason(decoder_->GetContextLostReason()); |
| + command_buffer_->SetParseError(::gpu::error::kLostContext); |
| + } |
| + return true; |
| } |
| void CommandBufferLocal::OnParseError() { |