| OLD | NEW |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "media/cast/sender/audio_encoder.h" | 5 #include "media/cast/sender/audio_encoder.h" |
| 6 | 6 |
| 7 #include <stdint.h> | 7 #include <stdint.h> |
| 8 | 8 |
| 9 #include <algorithm> | 9 #include <algorithm> |
| 10 #include <limits> | 10 #include <limits> |
| 11 #include <string> | 11 #include <string> |
| 12 | 12 |
| 13 #include "base/bind.h" | 13 #include "base/bind.h" |
| 14 #include "base/bind_helpers.h" | 14 #include "base/bind_helpers.h" |
| 15 #include "base/location.h" | 15 #include "base/location.h" |
| 16 #include "base/macros.h" | 16 #include "base/macros.h" |
| 17 #include "base/stl_util.h" | 17 #include "base/stl_util.h" |
| 18 #include "base/sys_byteorder.h" | 18 #include "base/sys_byteorder.h" |
| 19 #include "base/time/time.h" | 19 #include "base/time/time.h" |
| 20 #include "base/trace_event/trace_event.h" | 20 #include "base/trace_event/trace_event.h" |
| 21 #include "build/build_config.h" | 21 #include "build/build_config.h" |
| 22 #include "media/cast/common/rtp_time.h" |
| 23 #include "media/cast/constants.h" |
| 22 | 24 |
| 23 #if !defined(OS_IOS) | 25 #if !defined(OS_IOS) |
| 24 #include "third_party/opus/src/include/opus.h" | 26 #include "third_party/opus/src/include/opus.h" |
| 25 #endif | 27 #endif |
| 26 | 28 |
| 27 #if defined(OS_MACOSX) | 29 #if defined(OS_MACOSX) |
| 28 #include <AudioToolbox/AudioToolbox.h> | 30 #include <AudioToolbox/AudioToolbox.h> |
| 29 #endif | 31 #endif |
| 30 | 32 |
| 31 namespace media { | 33 namespace media { |
| (...skipping 24 matching lines...) Expand all Loading... |
| 56 : cast_environment_(cast_environment), | 58 : cast_environment_(cast_environment), |
| 57 codec_(codec), | 59 codec_(codec), |
| 58 num_channels_(num_channels), | 60 num_channels_(num_channels), |
| 59 samples_per_frame_(samples_per_frame), | 61 samples_per_frame_(samples_per_frame), |
| 60 callback_(callback), | 62 callback_(callback), |
| 61 operational_status_(STATUS_UNINITIALIZED), | 63 operational_status_(STATUS_UNINITIALIZED), |
| 62 frame_duration_(base::TimeDelta::FromMicroseconds( | 64 frame_duration_(base::TimeDelta::FromMicroseconds( |
| 63 base::Time::kMicrosecondsPerSecond * samples_per_frame_ / | 65 base::Time::kMicrosecondsPerSecond * samples_per_frame_ / |
| 64 sampling_rate)), | 66 sampling_rate)), |
| 65 buffer_fill_end_(0), | 67 buffer_fill_end_(0), |
| 66 frame_id_(0), | 68 frame_id_(kFirstFrameId), |
| 67 frame_rtp_timestamp_(0), | |
| 68 samples_dropped_from_buffer_(0) { | 69 samples_dropped_from_buffer_(0) { |
| 69 // Support for max sampling rate of 48KHz, 2 channels, 100 ms duration. | 70 // Support for max sampling rate of 48KHz, 2 channels, 100 ms duration. |
| 70 const int kMaxSamplesTimesChannelsPerFrame = 48 * 2 * 100; | 71 const int kMaxSamplesTimesChannelsPerFrame = 48 * 2 * 100; |
| 71 if (num_channels_ <= 0 || samples_per_frame_ <= 0 || | 72 if (num_channels_ <= 0 || samples_per_frame_ <= 0 || |
| 72 frame_duration_ == base::TimeDelta() || | 73 frame_duration_ == base::TimeDelta() || |
| 73 samples_per_frame_ * num_channels_ > kMaxSamplesTimesChannelsPerFrame) { | 74 samples_per_frame_ * num_channels_ > kMaxSamplesTimesChannelsPerFrame) { |
| 74 operational_status_ = STATUS_INVALID_CONFIGURATION; | 75 operational_status_ = STATUS_INVALID_CONFIGURATION; |
| 75 } | 76 } |
| 76 } | 77 } |
| 77 | 78 |
| (...skipping 22 matching lines...) Expand all Loading... |
| 100 buffer_fill_end_ * frame_duration_ / samples_per_frame_; | 101 buffer_fill_end_ * frame_duration_ / samples_per_frame_; |
| 101 if (!frame_capture_time_.is_null()) { | 102 if (!frame_capture_time_.is_null()) { |
| 102 const base::TimeDelta amount_ahead_by = | 103 const base::TimeDelta amount_ahead_by = |
| 103 recorded_time - (frame_capture_time_ + buffer_fill_duration); | 104 recorded_time - (frame_capture_time_ + buffer_fill_duration); |
| 104 const int64_t num_frames_missed = amount_ahead_by / frame_duration_; | 105 const int64_t num_frames_missed = amount_ahead_by / frame_duration_; |
| 105 if (num_frames_missed > kUnderrunSkipThreshold) { | 106 if (num_frames_missed > kUnderrunSkipThreshold) { |
| 106 samples_dropped_from_buffer_ += buffer_fill_end_; | 107 samples_dropped_from_buffer_ += buffer_fill_end_; |
| 107 buffer_fill_end_ = 0; | 108 buffer_fill_end_ = 0; |
| 108 buffer_fill_duration = base::TimeDelta(); | 109 buffer_fill_duration = base::TimeDelta(); |
| 109 frame_rtp_timestamp_ += | 110 frame_rtp_timestamp_ += |
| 110 static_cast<uint32_t>(num_frames_missed * samples_per_frame_); | 111 RtpTimeDelta::FromTicks(num_frames_missed * samples_per_frame_); |
| 111 DVLOG(1) << "Skipping RTP timestamp ahead to account for " | 112 DVLOG(1) << "Skipping RTP timestamp ahead to account for " |
| 112 << num_frames_missed * samples_per_frame_ | 113 << num_frames_missed * samples_per_frame_ |
| 113 << " samples' worth of underrun."; | 114 << " samples' worth of underrun."; |
| 114 TRACE_EVENT_INSTANT2("cast.stream", "Audio Skip", | 115 TRACE_EVENT_INSTANT2("cast.stream", "Audio Skip", |
| 115 TRACE_EVENT_SCOPE_THREAD, | 116 TRACE_EVENT_SCOPE_THREAD, |
| 116 "frames missed", num_frames_missed, | 117 "frames missed", num_frames_missed, |
| 117 "samples dropped", samples_dropped_from_buffer_); | 118 "samples dropped", samples_dropped_from_buffer_); |
| 118 } | 119 } |
| 119 } | 120 } |
| 120 frame_capture_time_ = recorded_time - buffer_fill_duration; | 121 frame_capture_time_ = recorded_time - buffer_fill_duration; |
| (...skipping 18 matching lines...) Expand all Loading... |
| 139 break; | 140 break; |
| 140 | 141 |
| 141 scoped_ptr<SenderEncodedFrame> audio_frame( | 142 scoped_ptr<SenderEncodedFrame> audio_frame( |
| 142 new SenderEncodedFrame()); | 143 new SenderEncodedFrame()); |
| 143 audio_frame->dependency = EncodedFrame::KEY; | 144 audio_frame->dependency = EncodedFrame::KEY; |
| 144 audio_frame->frame_id = frame_id_; | 145 audio_frame->frame_id = frame_id_; |
| 145 audio_frame->referenced_frame_id = frame_id_; | 146 audio_frame->referenced_frame_id = frame_id_; |
| 146 audio_frame->rtp_timestamp = frame_rtp_timestamp_; | 147 audio_frame->rtp_timestamp = frame_rtp_timestamp_; |
| 147 audio_frame->reference_time = frame_capture_time_; | 148 audio_frame->reference_time = frame_capture_time_; |
| 148 | 149 |
| 149 TRACE_EVENT_ASYNC_BEGIN2("cast.stream", "Audio Encode", audio_frame.get(), | 150 TRACE_EVENT_ASYNC_BEGIN2( |
| 150 "frame_id", frame_id_, | 151 "cast.stream", |
| 151 "rtp_timestamp", frame_rtp_timestamp_); | 152 "Audio Encode", audio_frame.get(), |
| 153 "frame_id", frame_id_, |
| 154 "rtp_timestamp", frame_rtp_timestamp_.lower_32_bits()); |
| 152 if (EncodeFromFilledBuffer(&audio_frame->data)) { | 155 if (EncodeFromFilledBuffer(&audio_frame->data)) { |
| 153 // Compute deadline utilization as the real-world time elapsed divided | 156 // Compute deadline utilization as the real-world time elapsed divided |
| 154 // by the signal duration. | 157 // by the signal duration. |
| 155 audio_frame->deadline_utilization = | 158 audio_frame->deadline_utilization = |
| 156 (base::TimeTicks::Now() - start_time).InSecondsF() / | 159 (base::TimeTicks::Now() - start_time).InSecondsF() / |
| 157 frame_duration_.InSecondsF(); | 160 frame_duration_.InSecondsF(); |
| 158 | 161 |
| 159 TRACE_EVENT_ASYNC_END1("cast.stream", "Audio Encode", audio_frame.get(), | 162 TRACE_EVENT_ASYNC_END1("cast.stream", "Audio Encode", audio_frame.get(), |
| 160 "Deadline utilization", | 163 "Deadline utilization", |
| 161 audio_frame->deadline_utilization); | 164 audio_frame->deadline_utilization); |
| 162 | 165 |
| 163 audio_frame->encode_completion_time = | 166 audio_frame->encode_completion_time = |
| 164 cast_environment_->Clock()->NowTicks(); | 167 cast_environment_->Clock()->NowTicks(); |
| 165 cast_environment_->PostTask( | 168 cast_environment_->PostTask( |
| 166 CastEnvironment::MAIN, | 169 CastEnvironment::MAIN, |
| 167 FROM_HERE, | 170 FROM_HERE, |
| 168 base::Bind(callback_, | 171 base::Bind(callback_, |
| 169 base::Passed(&audio_frame), | 172 base::Passed(&audio_frame), |
| 170 samples_dropped_from_buffer_)); | 173 samples_dropped_from_buffer_)); |
| 171 samples_dropped_from_buffer_ = 0; | 174 samples_dropped_from_buffer_ = 0; |
| 172 } | 175 } |
| 173 | 176 |
| 174 // Reset the internal buffer, frame ID, and timestamps for the next frame. | 177 // Reset the internal buffer, frame ID, and timestamps for the next frame. |
| 175 buffer_fill_end_ = 0; | 178 buffer_fill_end_ = 0; |
| 176 ++frame_id_; | 179 ++frame_id_; |
| 177 frame_rtp_timestamp_ += samples_per_frame_; | 180 frame_rtp_timestamp_ += RtpTimeDelta::FromTicks(samples_per_frame_); |
| 178 frame_capture_time_ += frame_duration_; | 181 frame_capture_time_ += frame_duration_; |
| 179 } | 182 } |
| 180 } | 183 } |
| 181 | 184 |
| 182 protected: | 185 protected: |
| 183 friend class base::RefCountedThreadSafe<ImplBase>; | 186 friend class base::RefCountedThreadSafe<ImplBase>; |
| 184 virtual ~ImplBase() {} | 187 virtual ~ImplBase() {} |
| 185 | 188 |
| 186 virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus, | 189 virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus, |
| 187 int source_offset, | 190 int source_offset, |
| (...skipping 19 matching lines...) Expand all Loading... |
| 207 // buffer, this points to the position at which to populate data in a later | 210 // buffer, this points to the position at which to populate data in a later |
| 208 // call. | 211 // call. |
| 209 int buffer_fill_end_; | 212 int buffer_fill_end_; |
| 210 | 213 |
| 211 // A counter used to label EncodedFrames. | 214 // A counter used to label EncodedFrames. |
| 212 uint32_t frame_id_; | 215 uint32_t frame_id_; |
| 213 | 216 |
| 214 // The RTP timestamp for the next frame of encoded audio. This is defined as | 217 // The RTP timestamp for the next frame of encoded audio. This is defined as |
| 215 // the number of audio samples encoded so far, plus the estimated number of | 218 // the number of audio samples encoded so far, plus the estimated number of |
| 216 // samples that were missed due to data underruns. A receiver uses this value | 219 // samples that were missed due to data underruns. A receiver uses this value |
| 217 // to detect gaps in the audio signal data being provided. Per the spec, RTP | 220 // to detect gaps in the audio signal data being provided. |
| 218 // timestamp values are allowed to overflow and roll around past zero. | 221 RtpTimeTicks frame_rtp_timestamp_; |
| 219 uint32_t frame_rtp_timestamp_; | |
| 220 | 222 |
| 221 // The local system time associated with the start of the next frame of | 223 // The local system time associated with the start of the next frame of |
| 222 // encoded audio. This value is passed on to a receiver as a reference clock | 224 // encoded audio. This value is passed on to a receiver as a reference clock |
| 223 // timestamp for the purposes of synchronizing audio and video. Its | 225 // timestamp for the purposes of synchronizing audio and video. Its |
| 224 // progression is expected to drift relative to the elapsed time implied by | 226 // progression is expected to drift relative to the elapsed time implied by |
| 225 // the RTP timestamps. | 227 // the RTP timestamps. |
| 226 base::TimeTicks frame_capture_time_; | 228 base::TimeTicks frame_capture_time_; |
| 227 | 229 |
| 228 // Set to non-zero to indicate the next output frame skipped over audio | 230 // Set to non-zero to indicate the next output frame skipped over audio |
| 229 // samples in order to recover from an input underrun. | 231 // samples in order to recover from an input underrun. |
| (...skipping 636 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 866 cast_environment_->PostTask(CastEnvironment::AUDIO, | 868 cast_environment_->PostTask(CastEnvironment::AUDIO, |
| 867 FROM_HERE, | 869 FROM_HERE, |
| 868 base::Bind(&AudioEncoder::ImplBase::EncodeAudio, | 870 base::Bind(&AudioEncoder::ImplBase::EncodeAudio, |
| 869 impl_, | 871 impl_, |
| 870 base::Passed(&audio_bus), | 872 base::Passed(&audio_bus), |
| 871 recorded_time)); | 873 recorded_time)); |
| 872 } | 874 } |
| 873 | 875 |
| 874 } // namespace cast | 876 } // namespace cast |
| 875 } // namespace media | 877 } // namespace media |
| OLD | NEW |