Index: media/cast/receiver/frame_receiver.cc |
diff --git a/media/cast/receiver/frame_receiver.cc b/media/cast/receiver/frame_receiver.cc |
index b3ad1b91f375a316081e118bc615fd39468c60d8..8f35966679307452a06cceb98b12769b4651c6ef 100644 |
--- a/media/cast/receiver/frame_receiver.cc |
+++ b/media/cast/receiver/frame_receiver.cc |
@@ -81,7 +81,7 @@ void FrameReceiver::RequestEncodedFrame( |
EmitAvailableEncodedFrames(); |
} |
-bool FrameReceiver::ProcessPacket(scoped_ptr<Packet> packet) { |
+bool FrameReceiver::ProcessPacket(std::unique_ptr<Packet> packet) { |
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
if (IsRtcpPacket(&packet->front(), packet->size())) { |
@@ -121,7 +121,7 @@ void FrameReceiver::ProcessParsedPacket(const RtpCastHeader& rtp_header, |
frame_id_to_rtp_timestamp_[rtp_header.frame_id & 0xff] = |
rtp_header.rtp_timestamp; |
- scoped_ptr<PacketEvent> receive_event(new PacketEvent()); |
+ std::unique_ptr<PacketEvent> receive_event(new PacketEvent()); |
receive_event->timestamp = now; |
receive_event->type = PACKET_RECEIVED; |
receive_event->media_type = event_media_type_; |
@@ -184,7 +184,7 @@ void FrameReceiver::CastFeedback(const RtcpCastMessage& cast_message) { |
RtpTimeTicks rtp_timestamp = |
frame_id_to_rtp_timestamp_[cast_message.ack_frame_id & 0xff]; |
- scoped_ptr<FrameEvent> ack_sent_event(new FrameEvent()); |
+ std::unique_ptr<FrameEvent> ack_sent_event(new FrameEvent()); |
ack_sent_event->timestamp = now; |
ack_sent_event->type = FRAME_ACK_SENT; |
ack_sent_event->media_type = event_media_type_; |
@@ -206,8 +206,7 @@ void FrameReceiver::EmitAvailableEncodedFrames() { |
// Attempt to peek at the next completed frame from the |framer_|. |
// TODO(miu): We should only be peeking at the metadata, and not copying the |
// payload yet! Or, at least, peek using a StringPiece instead of a copy. |
- scoped_ptr<EncodedFrame> encoded_frame( |
- new EncodedFrame()); |
+ std::unique_ptr<EncodedFrame> encoded_frame(new EncodedFrame()); |
bool is_consecutively_next_frame = false; |
bool have_multiple_complete_frames = false; |
if (!framer_.GetEncodedFrame(encoded_frame.get(), |
@@ -293,8 +292,9 @@ void FrameReceiver::EmitAvailableEncodedFramesAfterWaiting() { |
EmitAvailableEncodedFrames(); |
} |
-void FrameReceiver::EmitOneFrame(const ReceiveEncodedFrameCallback& callback, |
- scoped_ptr<EncodedFrame> encoded_frame) const { |
+void FrameReceiver::EmitOneFrame( |
+ const ReceiveEncodedFrameCallback& callback, |
+ std::unique_ptr<EncodedFrame> encoded_frame) const { |
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN)); |
if (!callback.is_null()) |
callback.Run(std::move(encoded_frame)); |