| Index: media/cast/receiver/frame_receiver.cc
|
| diff --git a/media/cast/audio_receiver/audio_receiver.cc b/media/cast/receiver/frame_receiver.cc
|
| similarity index 60%
|
| rename from media/cast/audio_receiver/audio_receiver.cc
|
| rename to media/cast/receiver/frame_receiver.cc
|
| index 1f47827ec648535185cf970af146b94fdc716c46..95f990c840f4feb6d053431717967aa6a43dec94 100644
|
| --- a/media/cast/audio_receiver/audio_receiver.cc
|
| +++ b/media/cast/receiver/frame_receiver.cc
|
| @@ -2,15 +2,15 @@
|
| // Use of this source code is governed by a BSD-style license that can be
|
| // found in the LICENSE file.
|
|
|
| -#include "media/cast/audio_receiver/audio_receiver.h"
|
| +#include "media/cast/receiver/frame_receiver.h"
|
|
|
| #include <algorithm>
|
|
|
| +#include "base/big_endian.h"
|
| #include "base/bind.h"
|
| #include "base/logging.h"
|
| #include "base/message_loop/message_loop.h"
|
| -#include "media/cast/audio_receiver/audio_decoder.h"
|
| -#include "media/cast/transport/cast_transport_defines.h"
|
| +#include "media/cast/cast_environment.h"
|
|
|
| namespace {
|
| const int kMinSchedulingDelayMs = 1;
|
| @@ -19,56 +19,102 @@ const int kMinSchedulingDelayMs = 1;
|
| namespace media {
|
| namespace cast {
|
|
|
| -AudioReceiver::AudioReceiver(scoped_refptr<CastEnvironment> cast_environment,
|
| - const FrameReceiverConfig& audio_config,
|
| - transport::PacedPacketSender* const packet_sender)
|
| - : RtpReceiver(cast_environment->Clock(), &audio_config, NULL),
|
| - cast_environment_(cast_environment),
|
| - event_subscriber_(kReceiverRtcpEventHistorySize, AUDIO_EVENT),
|
| - codec_(audio_config.codec.audio),
|
| - frequency_(audio_config.frequency),
|
| +FrameReceiver::FrameReceiver(
|
| + const scoped_refptr<CastEnvironment>& cast_environment,
|
| + const FrameReceiverConfig& config,
|
| + EventMediaType event_media_type,
|
| + transport::PacedPacketSender* const packet_sender)
|
| + : cast_environment_(cast_environment),
|
| + packet_parser_(config.incoming_ssrc, config.rtp_payload_type),
|
| + stats_(cast_environment->Clock()),
|
| + event_media_type_(event_media_type),
|
| + event_subscriber_(kReceiverRtcpEventHistorySize, event_media_type),
|
| + rtp_timebase_(config.frequency),
|
| target_playout_delay_(
|
| - base::TimeDelta::FromMilliseconds(audio_config.rtp_max_delay_ms)),
|
| + base::TimeDelta::FromMilliseconds(config.rtp_max_delay_ms)),
|
| expected_frame_duration_(
|
| - base::TimeDelta::FromSeconds(1) / audio_config.max_frame_rate),
|
| + base::TimeDelta::FromSeconds(1) / config.max_frame_rate),
|
| reports_are_scheduled_(false),
|
| framer_(cast_environment->Clock(),
|
| this,
|
| - audio_config.incoming_ssrc,
|
| + config.incoming_ssrc,
|
| true,
|
| - audio_config.rtp_max_delay_ms * audio_config.max_frame_rate /
|
| - 1000),
|
| - rtcp_(cast_environment,
|
| + config.rtp_max_delay_ms * config.max_frame_rate / 1000),
|
| + rtcp_(cast_environment_,
|
| NULL,
|
| NULL,
|
| packet_sender,
|
| - GetStatistics(),
|
| - audio_config.rtcp_mode,
|
| - base::TimeDelta::FromMilliseconds(audio_config.rtcp_interval),
|
| - audio_config.feedback_ssrc,
|
| - audio_config.incoming_ssrc,
|
| - audio_config.rtcp_c_name,
|
| - true),
|
| + &stats_,
|
| + config.rtcp_mode,
|
| + base::TimeDelta::FromMilliseconds(config.rtcp_interval),
|
| + config.feedback_ssrc,
|
| + config.incoming_ssrc,
|
| + config.rtcp_c_name,
|
| + event_media_type),
|
| is_waiting_for_consecutive_frame_(false),
|
| lip_sync_drift_(ClockDriftSmoother::GetDefaultTimeConstant()),
|
| weak_factory_(this) {
|
| - DCHECK_GT(audio_config.rtp_max_delay_ms, 0);
|
| - DCHECK_GT(audio_config.max_frame_rate, 0);
|
| - audio_decoder_.reset(new AudioDecoder(cast_environment, audio_config));
|
| - decryptor_.Initialize(audio_config.aes_key, audio_config.aes_iv_mask);
|
| + DCHECK_GT(config.rtp_max_delay_ms, 0);
|
| + DCHECK_GT(config.max_frame_rate, 0);
|
| + decryptor_.Initialize(config.aes_key, config.aes_iv_mask);
|
| rtcp_.SetTargetDelay(target_playout_delay_);
|
| cast_environment_->Logging()->AddRawEventSubscriber(&event_subscriber_);
|
| memset(frame_id_to_rtp_timestamp_, 0, sizeof(frame_id_to_rtp_timestamp_));
|
| }
|
|
|
| -AudioReceiver::~AudioReceiver() {
|
| +FrameReceiver::~FrameReceiver() {
|
| DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
| cast_environment_->Logging()->RemoveRawEventSubscriber(&event_subscriber_);
|
| }
|
|
|
| -void AudioReceiver::OnReceivedPayloadData(const uint8* payload_data,
|
| - size_t payload_size,
|
| - const RtpCastHeader& rtp_header) {
|
| +void FrameReceiver::RequestEncodedFrame(const FrameEncodedCallback& callback) {
|
| + DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
| + frame_request_queue_.push_back(callback);
|
| + EmitAvailableEncodedFrames();
|
| +}
|
| +
|
| +bool FrameReceiver::ProcessPacket(scoped_ptr<Packet> packet) {
|
| + DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
| +
|
| + if (Rtcp::IsRtcpPacket(&packet->front(), packet->size())) {
|
| + rtcp_.IncomingRtcpPacket(&packet->front(), packet->size());
|
| + } else {
|
| + RtpCastHeader rtp_header;
|
| + const uint8* payload_data;
|
| + size_t payload_size;
|
| + if (!packet_parser_.ParsePacket(&packet->front(),
|
| + packet->size(),
|
| + &rtp_header,
|
| + &payload_data,
|
| + &payload_size)) {
|
| + return false;
|
| + }
|
| +
|
| + ProcessParsedPacket(rtp_header, payload_data, payload_size);
|
| + stats_.UpdateStatistics(rtp_header);
|
| + }
|
| +
|
| + if (!reports_are_scheduled_) {
|
| + ScheduleNextRtcpReport();
|
| + ScheduleNextCastMessage();
|
| + reports_are_scheduled_ = true;
|
| + }
|
| +
|
| + return true;
|
| +}
|
| +
|
| +// static
|
| +bool FrameReceiver::ParseSenderSsrc(const uint8* packet,
|
| + size_t length,
|
| + uint32* ssrc) {
|
| + base::BigEndianReader big_endian_reader(
|
| + reinterpret_cast<const char*>(packet), length);
|
| + return big_endian_reader.Skip(8) && big_endian_reader.ReadU32(ssrc);
|
| +}
|
| +
|
| +void FrameReceiver::ProcessParsedPacket(const RtpCastHeader& rtp_header,
|
| + const uint8* payload_data,
|
| + size_t payload_size) {
|
| DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
|
|
| const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
|
| @@ -76,7 +122,7 @@ void AudioReceiver::OnReceivedPayloadData(const uint8* payload_data,
|
| frame_id_to_rtp_timestamp_[rtp_header.frame_id & 0xff] =
|
| rtp_header.rtp_timestamp;
|
| cast_environment_->Logging()->InsertPacketEvent(
|
| - now, PACKET_RECEIVED, AUDIO_EVENT, rtp_header.rtp_timestamp,
|
| + now, PACKET_RECEIVED, event_media_type_, rtp_header.rtp_timestamp,
|
| rtp_header.frame_id, rtp_header.packet_id, rtp_header.max_packet_id,
|
| payload_size);
|
|
|
| @@ -111,81 +157,35 @@ void AudioReceiver::OnReceivedPayloadData(const uint8* payload_data,
|
| } else {
|
| lip_sync_reference_time_ += RtpDeltaToTimeDelta(
|
| static_cast<int32>(fresh_sync_rtp - lip_sync_rtp_timestamp_),
|
| - frequency_);
|
| + rtp_timebase_);
|
| }
|
| lip_sync_rtp_timestamp_ = fresh_sync_rtp;
|
| lip_sync_drift_.Update(
|
| now, fresh_sync_reference - lip_sync_reference_time_);
|
| }
|
|
|
| - // Frame not complete; wait for more packets.
|
| - if (!complete)
|
| - return;
|
| -
|
| - EmitAvailableEncodedFrames();
|
| -}
|
| -
|
| -void AudioReceiver::GetRawAudioFrame(
|
| - const AudioFrameDecodedCallback& callback) {
|
| - DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
| - DCHECK(!callback.is_null());
|
| - DCHECK(audio_decoder_.get());
|
| - GetEncodedAudioFrame(base::Bind(
|
| - &AudioReceiver::DecodeEncodedAudioFrame,
|
| - // Note: Use of Unretained is safe since this Closure is guaranteed to be
|
| - // invoked before destruction of |this|.
|
| - base::Unretained(this),
|
| - callback));
|
| + // Another frame is complete from a non-duplicate packet. Attempt to emit
|
| + // more frames to satisfy enqueued requests.
|
| + if (complete)
|
| + EmitAvailableEncodedFrames();
|
| }
|
|
|
| -void AudioReceiver::DecodeEncodedAudioFrame(
|
| - const AudioFrameDecodedCallback& callback,
|
| - scoped_ptr<transport::EncodedFrame> encoded_frame) {
|
| +void FrameReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
|
| DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
| - if (!encoded_frame) {
|
| - callback.Run(make_scoped_ptr<AudioBus>(NULL), base::TimeTicks(), false);
|
| - return;
|
| - }
|
| - const uint32 frame_id = encoded_frame->frame_id;
|
| - const uint32 rtp_timestamp = encoded_frame->rtp_timestamp;
|
| - const base::TimeTicks playout_time = encoded_frame->reference_time;
|
| - audio_decoder_->DecodeFrame(encoded_frame.Pass(),
|
| - base::Bind(&AudioReceiver::EmitRawAudioFrame,
|
| - cast_environment_,
|
| - callback,
|
| - frame_id,
|
| - rtp_timestamp,
|
| - playout_time));
|
| -}
|
|
|
| -// static
|
| -void AudioReceiver::EmitRawAudioFrame(
|
| - const scoped_refptr<CastEnvironment>& cast_environment,
|
| - const AudioFrameDecodedCallback& callback,
|
| - uint32 frame_id,
|
| - uint32 rtp_timestamp,
|
| - const base::TimeTicks& playout_time,
|
| - scoped_ptr<AudioBus> audio_bus,
|
| - bool is_continuous) {
|
| - DCHECK(cast_environment->CurrentlyOn(CastEnvironment::MAIN));
|
| - if (audio_bus.get()) {
|
| - const base::TimeTicks now = cast_environment->Clock()->NowTicks();
|
| - cast_environment->Logging()->InsertFrameEvent(
|
| - now, FRAME_DECODED, AUDIO_EVENT, rtp_timestamp, frame_id);
|
| - cast_environment->Logging()->InsertFrameEventWithDelay(
|
| - now, FRAME_PLAYOUT, AUDIO_EVENT, rtp_timestamp, frame_id,
|
| - playout_time - now);
|
| - }
|
| - callback.Run(audio_bus.Pass(), playout_time, is_continuous);
|
| -}
|
| + base::TimeTicks now = cast_environment_->Clock()->NowTicks();
|
| + RtpTimestamp rtp_timestamp =
|
| + frame_id_to_rtp_timestamp_[cast_message.ack_frame_id_ & 0xff];
|
| + cast_environment_->Logging()->InsertFrameEvent(
|
| + now, FRAME_ACK_SENT, event_media_type_,
|
| + rtp_timestamp, cast_message.ack_frame_id_);
|
|
|
| -void AudioReceiver::GetEncodedAudioFrame(const FrameEncodedCallback& callback) {
|
| - DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
| - frame_request_queue_.push_back(callback);
|
| - EmitAvailableEncodedFrames();
|
| + ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
|
| + event_subscriber_.GetRtcpEventsAndReset(&rtcp_events);
|
| + rtcp_.SendRtcpFromRtpReceiver(&cast_message, &rtcp_events);
|
| }
|
|
|
| -void AudioReceiver::EmitAvailableEncodedFrames() {
|
| +void FrameReceiver::EmitAvailableEncodedFrames() {
|
| DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
|
|
| while (!frame_request_queue_.empty()) {
|
| @@ -199,8 +199,8 @@ void AudioReceiver::EmitAvailableEncodedFrames() {
|
| if (!framer_.GetEncodedFrame(encoded_frame.get(),
|
| &is_consecutively_next_frame,
|
| &have_multiple_complete_frames)) {
|
| - VLOG(1) << "Wait for more audio packets to produce a completed frame.";
|
| - return; // OnReceivedPayloadData() will invoke this method in the future.
|
| + VLOG(1) << "Wait for more packets to produce a completed frame.";
|
| + return; // ProcessParsedPacket() will invoke this method in the future.
|
| }
|
|
|
| const base::TimeTicks now = cast_environment_->Clock()->NowTicks();
|
| @@ -229,7 +229,7 @@ void AudioReceiver::EmitAvailableEncodedFrames() {
|
| cast_environment_->PostDelayedTask(
|
| CastEnvironment::MAIN,
|
| FROM_HERE,
|
| - base::Bind(&AudioReceiver::EmitAvailableEncodedFramesAfterWaiting,
|
| + base::Bind(&FrameReceiver::EmitAvailableEncodedFramesAfterWaiting,
|
| weak_factory_.GetWeakPtr()),
|
| playout_time - now);
|
| }
|
| @@ -239,16 +239,15 @@ void AudioReceiver::EmitAvailableEncodedFrames() {
|
|
|
| // Decrypt the payload data in the frame, if crypto is being used.
|
| if (decryptor_.initialized()) {
|
| - std::string decrypted_audio_data;
|
| + std::string decrypted_data;
|
| if (!decryptor_.Decrypt(encoded_frame->frame_id,
|
| encoded_frame->data,
|
| - &decrypted_audio_data)) {
|
| - // Decryption failed. Give up on this frame, releasing it from the
|
| - // jitter buffer.
|
| + &decrypted_data)) {
|
| + // Decryption failed. Give up on this frame.
|
| framer_.ReleaseFrame(encoded_frame->frame_id);
|
| continue;
|
| }
|
| - encoded_frame->data.swap(decrypted_audio_data);
|
| + encoded_frame->data.swap(decrypted_data);
|
| }
|
|
|
| // At this point, we have a decrypted EncodedFrame ready to be emitted.
|
| @@ -262,95 +261,64 @@ void AudioReceiver::EmitAvailableEncodedFrames() {
|
| }
|
| }
|
|
|
| -void AudioReceiver::EmitAvailableEncodedFramesAfterWaiting() {
|
| +void FrameReceiver::EmitAvailableEncodedFramesAfterWaiting() {
|
| DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
| DCHECK(is_waiting_for_consecutive_frame_);
|
| is_waiting_for_consecutive_frame_ = false;
|
| EmitAvailableEncodedFrames();
|
| }
|
|
|
| -base::TimeTicks AudioReceiver::GetPlayoutTime(uint32 rtp_timestamp) const {
|
| +base::TimeTicks FrameReceiver::GetPlayoutTime(uint32 rtp_timestamp) const {
|
| return lip_sync_reference_time_ +
|
| lip_sync_drift_.Current() +
|
| RtpDeltaToTimeDelta(
|
| static_cast<int32>(rtp_timestamp - lip_sync_rtp_timestamp_),
|
| - frequency_) +
|
| + rtp_timebase_) +
|
| target_playout_delay_;
|
| }
|
|
|
| -void AudioReceiver::IncomingPacket(scoped_ptr<Packet> packet) {
|
| +void FrameReceiver::ScheduleNextCastMessage() {
|
| DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
| - if (Rtcp::IsRtcpPacket(&packet->front(), packet->size())) {
|
| - rtcp_.IncomingRtcpPacket(&packet->front(), packet->size());
|
| - } else {
|
| - ReceivedPacket(&packet->front(), packet->size());
|
| - }
|
| - if (!reports_are_scheduled_) {
|
| - ScheduleNextRtcpReport();
|
| - ScheduleNextCastMessage();
|
| - reports_are_scheduled_ = true;
|
| - }
|
| -}
|
| -
|
| -void AudioReceiver::CastFeedback(const RtcpCastMessage& cast_message) {
|
| - DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
| - base::TimeTicks now = cast_environment_->Clock()->NowTicks();
|
| - RtpTimestamp rtp_timestamp =
|
| - frame_id_to_rtp_timestamp_[cast_message.ack_frame_id_ & 0xff];
|
| - cast_environment_->Logging()->InsertFrameEvent(
|
| - now, FRAME_ACK_SENT, AUDIO_EVENT, rtp_timestamp,
|
| - cast_message.ack_frame_id_);
|
| -
|
| - ReceiverRtcpEventSubscriber::RtcpEventMultiMap rtcp_events;
|
| - event_subscriber_.GetRtcpEventsAndReset(&rtcp_events);
|
| - rtcp_.SendRtcpFromRtpReceiver(&cast_message, &rtcp_events);
|
| -}
|
| -
|
| -void AudioReceiver::ScheduleNextRtcpReport() {
|
| - DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
| - base::TimeDelta time_to_send = rtcp_.TimeToSendNextRtcpReport() -
|
| - cast_environment_->Clock()->NowTicks();
|
| -
|
| + base::TimeTicks send_time;
|
| + framer_.TimeToSendNextCastMessage(&send_time);
|
| + base::TimeDelta time_to_send =
|
| + send_time - cast_environment_->Clock()->NowTicks();
|
| time_to_send = std::max(
|
| time_to_send, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
|
| -
|
| cast_environment_->PostDelayedTask(
|
| CastEnvironment::MAIN,
|
| FROM_HERE,
|
| - base::Bind(&AudioReceiver::SendNextRtcpReport,
|
| + base::Bind(&FrameReceiver::SendNextCastMessage,
|
| weak_factory_.GetWeakPtr()),
|
| time_to_send);
|
| }
|
|
|
| -void AudioReceiver::SendNextRtcpReport() {
|
| +void FrameReceiver::SendNextCastMessage() {
|
| DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
| - // TODO(pwestin): add logging.
|
| - rtcp_.SendRtcpFromRtpReceiver(NULL, NULL);
|
| - ScheduleNextRtcpReport();
|
| + framer_.SendCastMessage(); // Will only send a message if it is time.
|
| + ScheduleNextCastMessage();
|
| }
|
|
|
| -// Cast messages should be sent within a maximum interval. Schedule a call
|
| -// if not triggered elsewhere, e.g. by the cast message_builder.
|
| -void AudioReceiver::ScheduleNextCastMessage() {
|
| +void FrameReceiver::ScheduleNextRtcpReport() {
|
| DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
| - base::TimeTicks send_time;
|
| - framer_.TimeToSendNextCastMessage(&send_time);
|
| - base::TimeDelta time_to_send =
|
| - send_time - cast_environment_->Clock()->NowTicks();
|
| - time_to_send = std::max(
|
| - time_to_send, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
|
| + base::TimeDelta time_to_next = rtcp_.TimeToSendNextRtcpReport() -
|
| + cast_environment_->Clock()->NowTicks();
|
| +
|
| + time_to_next = std::max(
|
| + time_to_next, base::TimeDelta::FromMilliseconds(kMinSchedulingDelayMs));
|
| +
|
| cast_environment_->PostDelayedTask(
|
| CastEnvironment::MAIN,
|
| FROM_HERE,
|
| - base::Bind(&AudioReceiver::SendNextCastMessage,
|
| + base::Bind(&FrameReceiver::SendNextRtcpReport,
|
| weak_factory_.GetWeakPtr()),
|
| - time_to_send);
|
| + time_to_next);
|
| }
|
|
|
| -void AudioReceiver::SendNextCastMessage() {
|
| +void FrameReceiver::SendNextRtcpReport() {
|
| DCHECK(cast_environment_->CurrentlyOn(CastEnvironment::MAIN));
|
| - framer_.SendCastMessage(); // Will only send a message if it is time.
|
| - ScheduleNextCastMessage();
|
| + rtcp_.SendRtcpFromRtpReceiver(NULL, NULL);
|
| + ScheduleNextRtcpReport();
|
| }
|
|
|
| } // namespace cast
|
|
|