| Index: gpu/ipc/service/gpu_channel.cc
|
| diff --git a/gpu/ipc/service/gpu_channel.cc b/gpu/ipc/service/gpu_channel.cc
|
| index 7661318de18f0a31e4997dd5267986cf54d0579a..dc485c0f9930f8dd6e2907a1af33d3442d2fc491 100644
|
| --- a/gpu/ipc/service/gpu_channel.cc
|
| +++ b/gpu/ipc/service/gpu_channel.cc
|
| @@ -194,7 +194,7 @@ bool GpuChannelMessageQueue::PushBackMessage(const IPC::Message& message) {
|
|
|
| uint32_t order_num = sync_point_order_data_->GenerateUnprocessedOrderNumber(
|
| sync_point_manager_);
|
| - scoped_ptr<GpuChannelMessage> msg(
|
| + std::unique_ptr<GpuChannelMessage> msg(
|
| new GpuChannelMessage(message, order_num, base::TimeTicks::Now()));
|
|
|
| if (channel_messages_.empty()) {
|
| @@ -944,7 +944,7 @@ void GpuChannel::OnCreateCommandBuffer(
|
| return;
|
| }
|
|
|
| - scoped_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
|
| + std::unique_ptr<GpuCommandBufferStub> stub(new GpuCommandBufferStub(
|
| this, sync_point_manager_, task_runner_.get(), share_group,
|
| surface_handle, mailbox_manager_.get(), preempted_flag_.get(),
|
| subscription_ref_set_.get(), pending_valuebuffer_state_.get(), size,
|
| @@ -969,7 +969,7 @@ void GpuChannel::OnDestroyCommandBuffer(int32_t route_id) {
|
| TRACE_EVENT1("gpu", "GpuChannel::OnDestroyCommandBuffer",
|
| "route_id", route_id);
|
|
|
| - scoped_ptr<GpuCommandBufferStub> stub = stubs_.take_and_erase(route_id);
|
| + std::unique_ptr<GpuCommandBufferStub> stub = stubs_.take_and_erase(route_id);
|
| // In case the renderer is currently blocked waiting for a sync reply from the
|
| // stub, we need to make sure to reschedule the correct stream here.
|
| if (stub && !stub->IsScheduled()) {
|
|
|