Index: content/common/gpu/gpu_command_buffer_stub.h |
diff --git a/content/common/gpu/gpu_command_buffer_stub.h b/content/common/gpu/gpu_command_buffer_stub.h |
index ed4724388ca7403be0717f4bb3f7f2c48c304565..932a0ce6dc0315e7ac0600755c9f57cb12829e88 100644 |
--- a/content/common/gpu/gpu_command_buffer_stub.h |
+++ b/content/common/gpu/gpu_command_buffer_stub.h |
@@ -104,11 +104,6 @@ class GpuCommandBufferStub |
// Whether this command buffer can currently handle IPC messages. |
bool IsScheduled(); |
- // If the command buffer is pre-empted and cannot process commands. |
- bool IsPreempted() const { |
- return scheduler_.get() && scheduler_->IsPreempted(); |
- } |
- |
// Whether there are commands in the buffer that haven't been processed. |
bool HasUnprocessedCommands(); |
@@ -142,7 +137,7 @@ class GpuCommandBufferStub |
// Associates a sync point to this stub. When the stub is destroyed, it will |
// retire all sync points that haven't been previously retired. |
- void AddSyncPoint(uint32 sync_point); |
+ void AddSyncPoint(uint32 sync_point, bool retire); |
void SetPreemptByFlag(scoped_refptr<gpu::PreemptionFlag> flag); |
@@ -182,7 +177,6 @@ class GpuCommandBufferStub |
IPC::Message* reply_message); |
void OnAsyncFlush(int32 put_offset, uint32 flush_count, |
const std::vector<ui::LatencyInfo>& latency_info); |
- void OnRescheduled(); |
void OnRegisterTransferBuffer(int32 id, |
base::SharedMemoryHandle transfer_buffer, |
uint32 size); |