Index: content/common/gpu/gpu_command_buffer_stub.cc |
diff --git a/content/common/gpu/gpu_command_buffer_stub.cc b/content/common/gpu/gpu_command_buffer_stub.cc |
index a1167c9325562791e4ac88a6ddb5d41bf039d8f2..ceecbe4312e0e22be477bc44991ff7e61b3c0aee 100644 |
--- a/content/common/gpu/gpu_command_buffer_stub.cc |
+++ b/content/common/gpu/gpu_command_buffer_stub.cc |
@@ -243,7 +243,7 @@ void GpuCommandBufferStub::PollWork() { |
if (decoder_.get() && !MakeCurrent()) |
return; |
- if (scheduler_.get()) { |
+ if (scheduler_) { |
bool fences_complete = scheduler_->PollUnscheduleFences(); |
// Perform idle work if all fences are complete. |
if (fences_complete) { |
@@ -274,7 +274,7 @@ void GpuCommandBufferStub::PollWork() { |
} |
bool GpuCommandBufferStub::HasUnprocessedCommands() { |
- if (command_buffer_.get()) { |
+ if (command_buffer_) { |
gpu::CommandBuffer::State state = command_buffer_->GetLastState(); |
return state.put_offset != state.get_offset && |
!gpu::error::IsError(state.error); |
@@ -346,7 +346,7 @@ void GpuCommandBufferStub::Destroy() { |
while (!sync_points_.empty()) |
OnRetireSyncPoint(sync_points_.front()); |
- if (decoder_.get()) |
+ if (decoder_) |
decoder_->set_engine(NULL); |
// The scheduler has raw references to the decoder and the command buffer so |
@@ -354,14 +354,14 @@ void GpuCommandBufferStub::Destroy() { |
scheduler_.reset(); |
bool have_context = false; |
- if (decoder_.get()) |
+ if (decoder_) |
have_context = decoder_->MakeCurrent(); |
FOR_EACH_OBSERVER(DestructionObserver, |
destruction_observers_, |
OnWillDestroyStub(this)); |
scoped_refptr<gfx::GLContext> context; |
- if (decoder_.get()) { |
+ if (decoder_) { |
context = decoder_->GetGLContext(); |
decoder_->Destroy(have_context); |
decoder_.reset(); |
@@ -408,7 +408,7 @@ void GpuCommandBufferStub::OnInitialize( |
scheduler_.reset(new gpu::GpuScheduler(command_buffer_.get(), |
decoder_.get(), |
decoder_.get())); |
- if (preemption_flag_.get()) |
+ if (preemption_flag_) |
scheduler_->SetPreemptByFlag(preemption_flag_); |
decoder_->set_engine(scheduler_.get()); |
@@ -431,7 +431,7 @@ void GpuCommandBufferStub::OnInitialize( |
surface_ = manager->GetDefaultOffscreenSurface(); |
} |
- if (!surface_.get()) { |
+ if (!surface_) { |
DLOG(ERROR) << "Failed to create surface.\n"; |
OnInitializeFailed(reply_message); |
return; |
@@ -480,7 +480,7 @@ void GpuCommandBufferStub::OnInitialize( |
return; |
} |
- if (!context->MakeCurrent(surface_.get())) { |
+ if (!context->MakeCurrent(surface_)) { |
LOG(ERROR) << "Failed to make context current."; |
OnInitializeFailed(reply_message); |
return; |
@@ -580,7 +580,7 @@ void GpuCommandBufferStub::SetLatencyInfoCallback( |
void GpuCommandBufferStub::OnSetGetBuffer(int32 shm_id, |
IPC::Message* reply_message) { |
TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetGetBuffer"); |
- if (command_buffer_.get()) |
+ if (command_buffer_) |
command_buffer_->SetGetBuffer(shm_id); |
Send(reply_message); |
} |
@@ -595,7 +595,7 @@ void GpuCommandBufferStub::OnSetParent(int32 parent_route_id, |
} |
bool result = true; |
- if (scheduler_.get()) { |
+ if (scheduler_) { |
gpu::gles2::GLES2Decoder* parent_decoder = |
parent_stub ? parent_stub->decoder_.get() : NULL; |
result = decoder_->SetParent(parent_decoder, parent_texture_id); |
@@ -613,7 +613,7 @@ void GpuCommandBufferStub::OnSetParent(int32 parent_route_id, |
void GpuCommandBufferStub::OnGetState(IPC::Message* reply_message) { |
TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetState"); |
- if (command_buffer_.get()) { |
+ if (command_buffer_) { |
gpu::CommandBuffer::State state = command_buffer_->GetState(); |
if (state.error == gpu::error::kLostContext && |
gfx::GLContext::LosesAllContextsOnContextLost()) |
@@ -689,14 +689,14 @@ void GpuCommandBufferStub::OnRegisterTransferBuffer( |
TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnRegisterTransferBuffer"); |
base::SharedMemory shared_memory(transfer_buffer, false); |
- if (command_buffer_.get()) |
+ if (command_buffer_) |
command_buffer_->RegisterTransferBuffer(id, &shared_memory, size); |
} |
void GpuCommandBufferStub::OnDestroyTransferBuffer(int32 id) { |
TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnDestroyTransferBuffer"); |
- if (command_buffer_.get()) |
+ if (command_buffer_) |
command_buffer_->DestroyTransferBuffer(id); |
} |
@@ -704,7 +704,7 @@ void GpuCommandBufferStub::OnGetTransferBuffer( |
int32 id, |
IPC::Message* reply_message) { |
TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnGetTransferBuffer"); |
- if (command_buffer_.get()) { |
+ if (command_buffer_) { |
base::SharedMemoryHandle transfer_buffer = base::SharedMemoryHandle(); |
uint32 size = 0; |
@@ -765,7 +765,7 @@ void GpuCommandBufferStub::OnCreateVideoDecoder( |
void GpuCommandBufferStub::OnSetSurfaceVisible(bool visible) { |
TRACE_EVENT0("gpu", "GpuCommandBufferStub::OnSetSurfaceVisible"); |
- if (memory_manager_client_state_.get()) |
+ if (memory_manager_client_state_) |
memory_manager_client_state_->SetVisible(visible); |
} |
@@ -848,7 +848,7 @@ void GpuCommandBufferStub::OnReceivedClientManagedMemoryStats( |
TRACE_EVENT0( |
"gpu", |
"GpuCommandBufferStub::OnReceivedClientManagedMemoryStats"); |
- if (memory_manager_client_state_.get()) |
+ if (memory_manager_client_state_) |
memory_manager_client_state_->SetManagedMemoryStats(stats); |
} |
@@ -858,7 +858,7 @@ void GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback( |
"gpu", |
"GpuCommandBufferStub::OnSetClientHasMemoryAllocationChangedCallback"); |
if (has_callback) { |
- if (!memory_manager_client_state_.get()) { |
+ if (!memory_manager_client_state_) { |
memory_manager_client_state_.reset(GetMemoryManager()->CreateClientState( |
this, surface_id_ != 0, true)); |
} |
@@ -897,7 +897,7 @@ void GpuCommandBufferStub::RemoveDestructionObserver( |
void GpuCommandBufferStub::SetPreemptByFlag( |
scoped_refptr<gpu::PreemptionFlag> flag) { |
preemption_flag_ = flag; |
- if (scheduler_.get()) |
+ if (scheduler_) |
scheduler_->SetPreemptByFlag(preemption_flag_); |
} |