Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1015)

Unified Diff: media/cast/video_receiver/video_receiver.cc

Issue 225023010: [Cast] Refactor/clean-up VideoReceiver to match AudioReceiver as closely as possible. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: rebase Created 6 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « media/cast/video_receiver/video_receiver.h ('k') | media/cast/video_receiver/video_receiver.gypi » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: media/cast/video_receiver/video_receiver.cc
diff --git a/media/cast/video_receiver/video_receiver.cc b/media/cast/video_receiver/video_receiver.cc
index bb07b7940cae812eea3dc3218c5f17f9cdbfbf77..862733b7622a4d20269ce3c3d2ab417264b522e7 100644
--- a/media/cast/video_receiver/video_receiver.cc
+++ b/media/cast/video_receiver/video_receiver.cc
@@ -10,45 +10,23 @@
#include "base/debug/trace_event.h"
#include "base/logging.h"
#include "base/message_loop/message_loop.h"
-#include "media/cast/cast_defines.h"
-#include "media/cast/framer/framer.h"
-#include "media/cast/rtcp/receiver_rtcp_event_subscriber.h"
-#include "media/cast/rtcp/rtcp_sender.h"
+#include "media/base/video_frame.h"
+#include "media/cast/logging/logging_defines.h"
+#include "media/cast/transport/cast_transport_defines.h"
#include "media/cast/video_receiver/video_decoder.h"
namespace {
-
-static const int64 kMinSchedulingDelayMs = 1;
-static const int64 kMinTimeBetweenOffsetUpdatesMs = 1000;
-static const int kTimeOffsetMaxCounter = 10;
-
+const int kMinSchedulingDelayMs = 1;
+const int kMinTimeBetweenOffsetUpdatesMs = 1000;
+const int kTimeOffsetMaxCounter = 10;
} // namespace
namespace media {
namespace cast {
-// Local implementation of RtpPayloadFeedback (defined in rtp_defines.h)
-// Used to convey cast-specific feedback from receiver to sender.
-// Callback triggered by the Framer (cast message builder).
-class LocalRtpVideoFeedback : public RtpPayloadFeedback {
- public:
- explicit LocalRtpVideoFeedback(VideoReceiver* video_receiver)
- : video_receiver_(video_receiver) {}
-
- virtual void CastFeedback(const RtcpCastMessage& cast_message) OVERRIDE {
- video_receiver_->CastFeedback(cast_message);
- }
-
- private:
- VideoReceiver* video_receiver_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(LocalRtpVideoFeedback);
-};
-
VideoReceiver::VideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
const VideoReceiverConfig& video_config,
- transport::PacedPacketSender* const packet_sender,
- const SetTargetDelayCallback& target_delay_cb)
+ transport::PacedPacketSender* const packet_sender)
: RtpReceiver(cast_environment->Clock(), NULL, &video_config),
cast_environment_(cast_environment),
event_subscriber_(kReceiverRtcpEventHistorySize,
@@ -56,48 +34,52 @@ VideoReceiver::VideoReceiver(scoped_refptr<CastEnvironment> cast_environment,
codec_(video_config.codec),
target_delay_delta_(
base::TimeDelta::FromMilliseconds(video_config.rtp_max_delay_ms)),
- frame_delay_(base::TimeDelta::FromMilliseconds(
- 1000 / video_config.max_frame_rate)),
- incoming_payload_feedback_(new LocalRtpVideoFeedback(this)),
+ expected_frame_duration_(
+ base::TimeDelta::FromSeconds(1) / video_config.max_frame_rate),
+ framer_(cast_environment->Clock(),
+ this,
+ video_config.incoming_ssrc,
+ video_config.decoder_faster_than_max_frame_rate,
+ video_config.rtp_max_delay_ms * video_config.max_frame_rate /
+ 1000),
+ rtcp_(cast_environment_,
+ NULL,
+ NULL,
+ packet_sender,
+ GetStatistics(),
+ video_config.rtcp_mode,
+ base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
+ video_config.feedback_ssrc,
+ video_config.incoming_ssrc,
+ video_config.rtcp_c_name),
time_offset_counter_(0),
- decryptor_(),
time_incoming_packet_updated_(false),
incoming_rtp_timestamp_(0),
- target_delay_cb_(target_delay_cb),
+ is_waiting_for_consecutive_frame_(false),
weak_factory_(this) {
- int max_unacked_frames =
- video_config.rtp_max_delay_ms * video_config.max_frame_rate / 1000;
- DCHECK(max_unacked_frames) << "Invalid argument";
-
- decryptor_.Initialize(video_config.aes_key, video_config.aes_iv_mask);
- framer_.reset(new Framer(cast_environment->Clock(),
- incoming_payload_feedback_.get(),
- video_config.incoming_ssrc,
- video_config.decoder_faster_than_max_frame_rate,
- max_unacked_frames));
-
+ DCHECK_GT(video_config.rtp_max_delay_ms, 0);
+ DCHECK_GT(video_config.max_frame_rate, 0);
if (!video_config.use_external_decoder) {
- video_decoder_.reset(new VideoDecoder(video_config, cast_environment));
+ video_decoder_.reset(new VideoDecoder(cast_environment, video_config));
}
-
- rtcp_.reset(
- new Rtcp(cast_environment_,
- NULL,
- NULL,
- packet_sender,
- GetStatistics(),
- video_config.rtcp_mode,
- base::TimeDelta::FromMilliseconds(video_config.rtcp_interval),
- video_config.feedback_ssrc,
- video_config.incoming_ssrc,
- video_config.rtcp_c_name));
- // Set the target delay that will be conveyed to the sender.
- rtcp_->SetTargetDelay(target_delay_delta_);
+ decryptor_.Initialize(video_config.aes_key, video_config.aes_iv_mask);
+ rtcp_.SetTargetDelay(target_delay_delta_);
cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_);
memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_));
}
VideoReceiver::~VideoReceiver() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+
+ // If any callbacks for encoded video frames are queued, flush them out now.
+ // This is critical because some Closures in |frame_request_queue_| may have
+ // Unretained references to |this|.
+ while (!frame_request_queue_.empty()) {
+ frame_request_queue_.front().Run(
+ make_scoped_ptr<transport::EncodedVideoFrame>(NULL), base::TimeTicks());
+ frame_request_queue_.pop_front();
+ }
+
cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_);
}
@@ -110,209 +92,155 @@ void VideoReceiver::InitializeTimers() {
void VideoReceiver::GetRawVideoFrame(
const VideoFrameDecodedCallback& callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(!callback.is_null());
+ DCHECK(video_decoder_.get());
GetEncodedVideoFrame(base::Bind(
- &VideoReceiver::DecodeVideoFrame, base::Unretained(this), callback));
+ &VideoReceiver::DecodeEncodedVideoFrame,
+ // Note: Use of Unretained is safe since this Closure is guaranteed to be
+ // invoked before destruction of |this|.
+ base::Unretained(this),
+ callback));
}
-// Called when we have a frame to decode.
-void VideoReceiver::DecodeVideoFrame(
+void VideoReceiver::DecodeEncodedVideoFrame(
const VideoFrameDecodedCallback& callback,
scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks& render_time) {
+ const base::TimeTicks& playout_time) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
-
- // Hand the ownership of the encoded frame to the decode thread.
- cast_environment_->PostTask(CastEnvironment::VIDEO,
- FROM_HERE,
- base::Bind(&VideoReceiver::DecodeVideoFrameThread,
- base::Unretained(this),
- base::Passed(&encoded_frame),
- render_time,
- callback));
-}
-
-// Utility function to run the decoder on a designated decoding thread.
-void VideoReceiver::DecodeVideoFrameThread(
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame,
- const base::TimeTicks render_time,
- const VideoFrameDecodedCallback& frame_decoded_callback) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::VIDEO));
- DCHECK(video_decoder_);
-
- if (!(video_decoder_->DecodeVideoFrame(
- encoded_frame.get(), render_time, frame_decoded_callback))) {
- // This will happen if we decide to decode but not show a frame.
- cast_environment_->PostTask(CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(&VideoReceiver::GetRawVideoFrame,
- base::Unretained(this),
- frame_decoded_callback));
+ if (!encoded_frame) {
+ callback.Run(make_scoped_refptr<VideoFrame>(NULL), playout_time, false);
+ return;
}
+ const uint32 frame_id = encoded_frame->frame_id;
+ const uint32 rtp_timestamp = encoded_frame->rtp_timestamp;
+ video_decoder_->DecodeFrame(encoded_frame.Pass(),
+ base::Bind(&VideoReceiver::EmitRawVideoFrame,
+ cast_environment_,
+ callback,
+ frame_id,
+ rtp_timestamp,
+ playout_time));
}
-bool VideoReceiver::DecryptVideoFrame(
- scoped_ptr<transport::EncodedVideoFrame>* video_frame) {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
-
- if (!decryptor_.initialized())
- return false;
-
- std::string decrypted_video_data;
- if (!decryptor_.Decrypt((*video_frame)->frame_id,
- (*video_frame)->data,
- &decrypted_video_data)) {
- // Give up on this frame, release it from jitter buffer.
- framer_->ReleaseFrame((*video_frame)->frame_id);
- return false;
+// static
+void VideoReceiver::EmitRawVideoFrame(
+ const scoped_refptr<CastEnvironment>& cast_environment,
+ const VideoFrameDecodedCallback& callback,
+ uint32 frame_id,
+ uint32 rtp_timestamp,
+ const base::TimeTicks& playout_time,
+ const scoped_refptr<VideoFrame>& video_frame,
+ bool is_continuous) {
+ DCHECK(cast_environment->CurrentlyOn(CastEnvironment::MAIN));
+ if (video_frame) {
+ const base::TimeTicks now = cast_environment->Clock()->NowTicks();
+ cast_environment->Logging()->InsertFrameEvent(
+ now, kVideoFrameDecoded, rtp_timestamp, frame_id);
+ cast_environment->Logging()->InsertFrameEventWithDelay(
+ now, kVideoRenderDelay, rtp_timestamp, frame_id,
+ playout_time - now);
+ // Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
+ TRACE_EVENT_INSTANT1(
+ "cast_perf_test", "FrameDecoded",
+ TRACE_EVENT_SCOPE_THREAD,
+ "rtp_timestamp", rtp_timestamp);
}
- (*video_frame)->data.swap(decrypted_video_data);
- return true;
+ callback.Run(video_frame, playout_time, is_continuous);
}
-// Called from the main cast thread.
void VideoReceiver::GetEncodedVideoFrame(
const VideoFrameEncodedCallback& callback) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
- new transport::EncodedVideoFrame());
- bool next_frame = false;
-
- if (!framer_->GetEncodedVideoFrame(encoded_frame.get(), &next_frame)) {
- // We have no video frames. Wait for new packet(s).
- queued_encoded_callbacks_.push_back(callback);
- return;
- }
-
- if (decryptor_.initialized() && !DecryptVideoFrame(&encoded_frame)) {
- // Logging already done.
- queued_encoded_callbacks_.push_back(callback);
- return;
- }
-
- base::TimeTicks render_time;
- if (PullEncodedVideoFrame(next_frame, &encoded_frame, &render_time)) {
- cast_environment_->PostTask(
- CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(callback, base::Passed(&encoded_frame), render_time));
- } else {
- // We have a video frame; however we are missing packets and we have time
- // to wait for new packet(s).
- queued_encoded_callbacks_.push_back(callback);
- }
+ frame_request_queue_.push_back(callback);
+ EmitAvailableEncodedFrames();
}
-// Should we pull the encoded video frame from the framer? decided by if this is
-// the next frame or we are running out of time and have to pull the following
-// frame.
-// If the frame is too old to be rendered we set the don't show flag in the
-// video bitstream where possible.
-bool VideoReceiver::PullEncodedVideoFrame(
- bool next_frame,
- scoped_ptr<transport::EncodedVideoFrame>* encoded_frame,
- base::TimeTicks* render_time) {
+void VideoReceiver::EmitAvailableEncodedFrames() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeTicks now = cast_environment_->Clock()->NowTicks();
- *render_time = GetRenderTime(now, (*encoded_frame)->rtp_timestamp);
- // TODO(mikhal): Store actual render time and not diff.
- cast_environment_->Logging()->InsertFrameEventWithDelay(
- now,
- kVideoRenderDelay,
- (*encoded_frame)->rtp_timestamp,
- (*encoded_frame)->frame_id,
- now - *render_time);
-
- // Minimum time before a frame is due to be rendered before we pull it for
- // decode.
- base::TimeDelta min_wait_delta = frame_delay_;
- base::TimeDelta time_until_render = *render_time - now;
- if (!next_frame && (time_until_render > min_wait_delta)) {
- // Example:
- // We have decoded frame 1 and we have received the complete frame 3, but
- // not frame 2. If we still have time before frame 3 should be rendered we
- // will wait for 2 to arrive, however if 2 never show up this timer will hit
- // and we will pull out frame 3 for decoding and rendering.
- base::TimeDelta time_until_release = time_until_render - min_wait_delta;
- cast_environment_->PostDelayedTask(
- CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(&VideoReceiver::PlayoutTimeout, weak_factory_.GetWeakPtr()),
- time_until_release);
- VLOG(1) << "Wait before releasing frame "
- << static_cast<int>((*encoded_frame)->frame_id) << " time "
- << time_until_release.InMilliseconds();
- return false;
- }
-
- base::TimeDelta dont_show_timeout_delta =
- base::TimeDelta::FromMilliseconds(-kDontShowTimeoutMs);
- if (codec_ == transport::kVp8 &&
- time_until_render < dont_show_timeout_delta) {
- (*encoded_frame)->data[0] &= 0xef;
- VLOG(1) << "Don't show frame "
- << static_cast<int>((*encoded_frame)->frame_id)
- << " time_until_render:" << time_until_render.InMilliseconds();
- } else {
- VLOG(2) << "Show frame " << static_cast<int>((*encoded_frame)->frame_id)
- << " time_until_render:" << time_until_render.InMilliseconds();
- }
- // We have a copy of the frame, release this one.
- framer_->ReleaseFrame((*encoded_frame)->frame_id);
- (*encoded_frame)->codec = codec_;
-
- // Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
- TRACE_EVENT_INSTANT2(
- "cast_perf_test", "PullEncodedVideoFrame",
- TRACE_EVENT_SCOPE_THREAD,
- "rtp_timestamp", (*encoded_frame)->rtp_timestamp,
- "render_time", render_time->ToInternalValue());
-
- return true;
-}
-
-void VideoReceiver::PlayoutTimeout() {
- DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- if (queued_encoded_callbacks_.empty())
- return;
+ while (!frame_request_queue_.empty()) {
+ // Attempt to peek at the next completed frame from the |framer_|.
+ // TODO(miu): We should only be peeking at the metadata, and not copying the
+ // payload yet! Or, at least, peek using a StringPiece instead of a copy.
+ scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
+ new transport::EncodedVideoFrame());
+ bool is_consecutively_next_frame = false;
+ if (!framer_.GetEncodedVideoFrame(encoded_frame.get(),
+ &is_consecutively_next_frame)) {
+ VLOG(1) << "Wait for more video packets to produce a completed frame.";
+ return; // OnReceivedPayloadData() will invoke this method in the future.
+ }
- bool next_frame = false;
- scoped_ptr<transport::EncodedVideoFrame> encoded_frame(
- new transport::EncodedVideoFrame());
+ // If |framer_| has a frame ready that is out of sequence, examine the
+ // playout time to determine whether it's acceptable to continue, thereby
+ // skipping one or more frames. Skip if the missing frame wouldn't complete
+ // playing before the start of playback of the available frame.
+ const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
+ const base::TimeTicks playout_time =
+ GetPlayoutTime(now, encoded_frame->rtp_timestamp);
+ if (!is_consecutively_next_frame) {
+ // TODO(miu): Also account for expected decode time here?
+ const base::TimeTicks earliest_possible_end_time_of_missing_frame =
+ now + expected_frame_duration_;
+ if (earliest_possible_end_time_of_missing_frame < playout_time) {
+ VLOG(1) << "Wait for next consecutive frame instead of skipping.";
+ if (!is_waiting_for_consecutive_frame_) {
+ is_waiting_for_consecutive_frame_ = true;
+ cast_environment_->PostDelayedTask(
+ CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(&VideoReceiver::EmitAvailableEncodedFramesAfterWaiting,
+ weak_factory_.GetWeakPtr()),
+ playout_time - now);
+ }
+ return;
+ }
+ }
- if (!framer_->GetEncodedVideoFrame(encoded_frame.get(), &next_frame)) {
- // We have no video frames. Wait for new packet(s).
- // Since the application can post multiple VideoFrameEncodedCallback and
- // we only check the next frame to play out we might have multiple timeout
- // events firing after each other; however this should be a rare event.
- VLOG(1) << "Failed to retrieved a complete frame at this point in time";
- return;
- }
- VLOG(2) << "PlayoutTimeout retrieved frame "
- << static_cast<int>(encoded_frame->frame_id);
+ // Decrypt the payload data in the frame, if crypto is being used.
+ if (decryptor_.initialized()) {
+ std::string decrypted_video_data;
+ if (!decryptor_.Decrypt(encoded_frame->frame_id,
+ encoded_frame->data,
+ &decrypted_video_data)) {
+ // Decryption failed. Give up on this frame, releasing it from the
+ // jitter buffer.
+ framer_.ReleaseFrame(encoded_frame->frame_id);
+ continue;
+ }
+ encoded_frame->data.swap(decrypted_video_data);
+ }
- if (decryptor_.initialized() && !DecryptVideoFrame(&encoded_frame)) {
- // Logging already done.
- return;
+ // At this point, we have a decrypted EncodedVideoFrame ready to be emitted.
+ encoded_frame->codec = codec_;
+ framer_.ReleaseFrame(encoded_frame->frame_id);
+ // Used by chrome/browser/extension/api/cast_streaming/performance_test.cc
+ TRACE_EVENT_INSTANT2(
+ "cast_perf_test", "PullEncodedVideoFrame",
+ TRACE_EVENT_SCOPE_THREAD,
+ "rtp_timestamp", encoded_frame->rtp_timestamp,
+ // TODO(miu): Need to find an alternative to using ToInternalValue():
+ "render_time", playout_time.ToInternalValue());
+ cast_environment_->PostTask(CastEnvironment::MAIN,
+ FROM_HERE,
+ base::Bind(frame_request_queue_.front(),
+ base::Passed(&encoded_frame),
+ playout_time));
+ frame_request_queue_.pop_front();
}
+}
- base::TimeTicks render_time;
- if (PullEncodedVideoFrame(next_frame, &encoded_frame, &render_time)) {
- if (!queued_encoded_callbacks_.empty()) {
- VideoFrameEncodedCallback callback = queued_encoded_callbacks_.front();
- queued_encoded_callbacks_.pop_front();
- cast_environment_->PostTask(
- CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(callback, base::Passed(&encoded_frame), render_time));
- }
- }
- // Else we have a video frame; however we are missing packets and we have time
- // to wait for new packet(s).
+void VideoReceiver::EmitAvailableEncodedFramesAfterWaiting() {
+ DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
+ DCHECK(is_waiting_for_consecutive_frame_);
+ is_waiting_for_consecutive_frame_ = false;
+ EmitAvailableEncodedFrames();
}
-base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now,
- uint32 rtp_timestamp) {
+base::TimeTicks VideoReceiver::GetPlayoutTime(base::TimeTicks now,
+ uint32 rtp_timestamp) {
+ // TODO(miu): This and AudioReceiver::GetPlayoutTime() need to be reconciled!
+
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
// Senders time in ms when this frame was captured.
// Note: the senders clock and our local clock might not be synced.
@@ -321,17 +249,15 @@ base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now,
// Compute the time offset_in_ticks based on the incoming_rtp_timestamp_.
if (time_offset_counter_ == 0) {
// Check for received RTCP to sync the stream play it out asap.
- if (rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
- incoming_rtp_timestamp_,
- &rtp_timestamp_in_ticks)) {
-
+ if (rtcp_.RtpTimestampInSenderTime(kVideoFrequency,
+ incoming_rtp_timestamp_,
+ &rtp_timestamp_in_ticks)) {
++time_offset_counter_;
}
- return now;
} else if (time_incoming_packet_updated_) {
- if (rtcp_->RtpTimestampInSenderTime(kVideoFrequency,
- incoming_rtp_timestamp_,
- &rtp_timestamp_in_ticks)) {
+ if (rtcp_.RtpTimestampInSenderTime(kVideoFrequency,
+ incoming_rtp_timestamp_,
+ &rtp_timestamp_in_ticks)) {
// Time to update the time_offset.
base::TimeDelta time_offset =
time_incoming_packet_ - rtp_timestamp_in_ticks;
@@ -342,33 +268,47 @@ base::TimeTicks VideoReceiver::GetRenderTime(base::TimeTicks now,
// very slow, and negligible for this use case.
if (time_offset_counter_ == 1)
time_offset_ = time_offset;
- else if (time_offset_counter_ < kTimeOffsetMaxCounter) {
+ else if (time_offset_counter_ < kTimeOffsetMaxCounter) {
time_offset_ = std::min(time_offset_, time_offset);
}
- ++time_offset_counter_;
+ if (time_offset_counter_ < kTimeOffsetMaxCounter)
+ ++time_offset_counter_;
}
}
// Reset |time_incoming_packet_updated_| to enable a future measurement.
time_incoming_packet_updated_ = false;
// Compute the actual rtp_timestamp_in_ticks based on the current timestamp.
- if (!rtcp_->RtpTimestampInSenderTime(
+ if (!rtcp_.RtpTimestampInSenderTime(
kVideoFrequency, rtp_timestamp, &rtp_timestamp_in_ticks)) {
// This can fail if we have not received any RTCP packets in a long time.
- return now;
+ // BUG: These calculations are a placeholder, and to be revisited in a
+ // soon-upcoming change. http://crbug.com/356942
+ const int frequency_khz = kVideoFrequency / 1000;
+ const base::TimeDelta delta_based_on_rtp_timestamps =
+ base::TimeDelta::FromMilliseconds(
+ static_cast<int32>(rtp_timestamp - incoming_rtp_timestamp_) /
+ frequency_khz);
+ return time_incoming_packet_ + delta_based_on_rtp_timestamps;
}
base::TimeTicks render_time =
rtp_timestamp_in_ticks + time_offset_ + target_delay_delta_;
+ // TODO(miu): This is broken since this "getter" method may be called on
+ // frames received out-of-order, which means the playout times for earlier
+ // frames will be computed incorrectly.
+#if 0
if (last_render_time_ > render_time)
render_time = last_render_time_;
last_render_time_ = render_time;
+#endif
+
return render_time;
}
void VideoReceiver::IncomingPacket(scoped_ptr<Packet> packet) {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
if (Rtcp::IsRtcpPacket(&packet->front(), packet->size())) {
- rtcp_->IncomingRtcpPacket(&packet->front(), packet->size());
+ rtcp_.IncomingRtcpPacket(&packet->front(), packet->size());
} else {
ReceivedPacket(&packet->front(), packet->size());
}
@@ -407,9 +347,8 @@ void VideoReceiver::OnReceivedPayloadData(const uint8* payload_data,
payload_size);
bool duplicate = false;
- bool complete =
- framer_->InsertPacket(payload_data, payload_size, rtp_header, &duplicate);
-
+ const bool complete =
+ framer_.InsertPacket(payload_data, payload_size, rtp_header, &duplicate);
if (duplicate) {
cast_environment_->Logging()->InsertPacketEvent(
now,
@@ -424,16 +363,8 @@ void VideoReceiver::OnReceivedPayloadData(const uint8* payload_data,
}
if (!complete)
return; // Video frame not complete; wait for more packets.
- if (queued_encoded_callbacks_.empty())
- return; // No pending callback.
-
- VideoFrameEncodedCallback callback = queued_encoded_callbacks_.front();
- queued_encoded_callbacks_.pop_front();
- cast_environment_->PostTask(CastEnvironment::MAIN,
- FROM_HERE,
- base::Bind(&VideoReceiver::GetEncodedVideoFrame,
- weak_factory_.GetWeakPtr(),
- callback));
+
+ EmitAvailableEncodedFrames();
}
// Send a cast feedback message. Actual message created in the framer (cast
@@ -449,7 +380,7 @@ void VideoReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
event_subscriber_.GetRtcpEventsAndReset(&rtcp_events);
- rtcp_->SendRtcpFromRtpReceiver(&cast_message, &rtcp_events);
+ rtcp_.SendRtcpFromRtpReceiver(&cast_message, &rtcp_events);
}
// Cast messages should be sent within a maximum interval. Schedule a call
@@ -457,8 +388,7 @@ void VideoReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
void VideoReceiver::ScheduleNextCastMessage() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
base::TimeTicks send_time;
- framer_->TimeToSendNextCastMessage(&send_time);
-
+ framer_.TimeToSendNextCastMessage(&send_time);
base::TimeDelta time_to_send =
send_time - cast_environment_->Clock()->NowTicks();
time_to_send = std::max(
@@ -473,14 +403,13 @@ void VideoReceiver::ScheduleNextCastMessage() {
void VideoReceiver::SendNextCastMessage() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- framer_->SendCastMessage(); // Will only send a message if it is time.
+ framer_.SendCastMessage(); // Will only send a message if it is time.
ScheduleNextCastMessage();
}
-// Schedule the next RTCP report to be sent back to the sender.
void VideoReceiver::ScheduleNextRtcpReport() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- base::TimeDelta time_to_next = rtcp_->TimeToSendNextRtcpReport() -
+ base::TimeDelta time_to_next = rtcp_.TimeToSendNextRtcpReport() -
cast_environment_->Clock()->NowTicks();
time_to_next = std::max(
@@ -496,15 +425,9 @@ void VideoReceiver::ScheduleNextRtcpReport() {
void VideoReceiver::SendNextRtcpReport() {
DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
- rtcp_->SendRtcpFromRtpReceiver(NULL, NULL);
+ rtcp_.SendRtcpFromRtpReceiver(NULL, NULL);
ScheduleNextRtcpReport();
}
-void VideoReceiver::UpdateTargetDelay() {
- NOTIMPLEMENTED();
- rtcp_->SetTargetDelay(target_delay_delta_);
- target_delay_cb_.Run(target_delay_delta_);
-}
-
} // namespace cast
} // namespace media
« no previous file with comments | « media/cast/video_receiver/video_receiver.h ('k') | media/cast/video_receiver/video_receiver.gypi » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698