Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(46)

Side by Side Diff: media/cast/sender/audio_encoder.cc

Issue 1515433002: Replace uses of raw uint32's with a type-checked RtpTimeTicks data type. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "media/cast/sender/audio_encoder.h" 5 #include "media/cast/sender/audio_encoder.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 #include <string> 9 #include <string>
10 10
11 #include "base/bind.h" 11 #include "base/bind.h"
12 #include "base/bind_helpers.h" 12 #include "base/bind_helpers.h"
13 #include "base/location.h" 13 #include "base/location.h"
14 #include "base/stl_util.h" 14 #include "base/stl_util.h"
15 #include "base/sys_byteorder.h" 15 #include "base/sys_byteorder.h"
16 #include "base/time/time.h" 16 #include "base/time/time.h"
17 #include "base/trace_event/trace_event.h" 17 #include "base/trace_event/trace_event.h"
18 #include "media/cast/common/rtp_time.h"
19 #include "media/cast/constants.h"
18 20
19 #if !defined(OS_IOS) 21 #if !defined(OS_IOS)
20 #include "third_party/opus/src/include/opus.h" 22 #include "third_party/opus/src/include/opus.h"
21 #endif 23 #endif
22 24
23 #if defined(OS_MACOSX) 25 #if defined(OS_MACOSX)
24 #include <AudioToolbox/AudioToolbox.h> 26 #include <AudioToolbox/AudioToolbox.h>
25 #endif 27 #endif
26 28
27 namespace media { 29 namespace media {
(...skipping 24 matching lines...) Expand all
52 : cast_environment_(cast_environment), 54 : cast_environment_(cast_environment),
53 codec_(codec), 55 codec_(codec),
54 num_channels_(num_channels), 56 num_channels_(num_channels),
55 samples_per_frame_(samples_per_frame), 57 samples_per_frame_(samples_per_frame),
56 callback_(callback), 58 callback_(callback),
57 operational_status_(STATUS_UNINITIALIZED), 59 operational_status_(STATUS_UNINITIALIZED),
58 frame_duration_(base::TimeDelta::FromMicroseconds( 60 frame_duration_(base::TimeDelta::FromMicroseconds(
59 base::Time::kMicrosecondsPerSecond * samples_per_frame_ / 61 base::Time::kMicrosecondsPerSecond * samples_per_frame_ /
60 sampling_rate)), 62 sampling_rate)),
61 buffer_fill_end_(0), 63 buffer_fill_end_(0),
62 frame_id_(0), 64 frame_id_(kFirstFrameId),
63 frame_rtp_timestamp_(0),
64 samples_dropped_from_buffer_(0) { 65 samples_dropped_from_buffer_(0) {
65 // Support for max sampling rate of 48KHz, 2 channels, 100 ms duration. 66 // Support for max sampling rate of 48KHz, 2 channels, 100 ms duration.
66 const int kMaxSamplesTimesChannelsPerFrame = 48 * 2 * 100; 67 const int kMaxSamplesTimesChannelsPerFrame = 48 * 2 * 100;
67 if (num_channels_ <= 0 || samples_per_frame_ <= 0 || 68 if (num_channels_ <= 0 || samples_per_frame_ <= 0 ||
68 frame_duration_ == base::TimeDelta() || 69 frame_duration_ == base::TimeDelta() ||
69 samples_per_frame_ * num_channels_ > kMaxSamplesTimesChannelsPerFrame) { 70 samples_per_frame_ * num_channels_ > kMaxSamplesTimesChannelsPerFrame) {
70 operational_status_ = STATUS_INVALID_CONFIGURATION; 71 operational_status_ = STATUS_INVALID_CONFIGURATION;
71 } 72 }
72 } 73 }
73 74
(...skipping 22 matching lines...) Expand all
96 buffer_fill_end_ * frame_duration_ / samples_per_frame_; 97 buffer_fill_end_ * frame_duration_ / samples_per_frame_;
97 if (!frame_capture_time_.is_null()) { 98 if (!frame_capture_time_.is_null()) {
98 const base::TimeDelta amount_ahead_by = 99 const base::TimeDelta amount_ahead_by =
99 recorded_time - (frame_capture_time_ + buffer_fill_duration); 100 recorded_time - (frame_capture_time_ + buffer_fill_duration);
100 const int64 num_frames_missed = amount_ahead_by / frame_duration_; 101 const int64 num_frames_missed = amount_ahead_by / frame_duration_;
101 if (num_frames_missed > kUnderrunSkipThreshold) { 102 if (num_frames_missed > kUnderrunSkipThreshold) {
102 samples_dropped_from_buffer_ += buffer_fill_end_; 103 samples_dropped_from_buffer_ += buffer_fill_end_;
103 buffer_fill_end_ = 0; 104 buffer_fill_end_ = 0;
104 buffer_fill_duration = base::TimeDelta(); 105 buffer_fill_duration = base::TimeDelta();
105 frame_rtp_timestamp_ += 106 frame_rtp_timestamp_ +=
106 static_cast<uint32>(num_frames_missed * samples_per_frame_); 107 RtpTimeDelta::FromTicks(num_frames_missed * samples_per_frame_);
107 DVLOG(1) << "Skipping RTP timestamp ahead to account for " 108 DVLOG(1) << "Skipping RTP timestamp ahead to account for "
108 << num_frames_missed * samples_per_frame_ 109 << num_frames_missed * samples_per_frame_
109 << " samples' worth of underrun."; 110 << " samples' worth of underrun.";
110 TRACE_EVENT_INSTANT2("cast.stream", "Audio Skip", 111 TRACE_EVENT_INSTANT2("cast.stream", "Audio Skip",
111 TRACE_EVENT_SCOPE_THREAD, 112 TRACE_EVENT_SCOPE_THREAD,
112 "frames missed", num_frames_missed, 113 "frames missed", num_frames_missed,
113 "samples dropped", samples_dropped_from_buffer_); 114 "samples dropped", samples_dropped_from_buffer_);
114 } 115 }
115 } 116 }
116 frame_capture_time_ = recorded_time - buffer_fill_duration; 117 frame_capture_time_ = recorded_time - buffer_fill_duration;
(...skipping 18 matching lines...) Expand all
135 break; 136 break;
136 137
137 scoped_ptr<SenderEncodedFrame> audio_frame( 138 scoped_ptr<SenderEncodedFrame> audio_frame(
138 new SenderEncodedFrame()); 139 new SenderEncodedFrame());
139 audio_frame->dependency = EncodedFrame::KEY; 140 audio_frame->dependency = EncodedFrame::KEY;
140 audio_frame->frame_id = frame_id_; 141 audio_frame->frame_id = frame_id_;
141 audio_frame->referenced_frame_id = frame_id_; 142 audio_frame->referenced_frame_id = frame_id_;
142 audio_frame->rtp_timestamp = frame_rtp_timestamp_; 143 audio_frame->rtp_timestamp = frame_rtp_timestamp_;
143 audio_frame->reference_time = frame_capture_time_; 144 audio_frame->reference_time = frame_capture_time_;
144 145
145 TRACE_EVENT_ASYNC_BEGIN2("cast.stream", "Audio Encode", audio_frame.get(), 146 TRACE_EVENT_ASYNC_BEGIN2(
146 "frame_id", frame_id_, 147 "cast.stream",
147 "rtp_timestamp", frame_rtp_timestamp_); 148 "Audio Encode", audio_frame.get(),
149 "frame_id", frame_id_,
150 "rtp_timestamp", frame_rtp_timestamp_.lower_32_bits());
148 if (EncodeFromFilledBuffer(&audio_frame->data)) { 151 if (EncodeFromFilledBuffer(&audio_frame->data)) {
149 // Compute deadline utilization as the real-world time elapsed divided 152 // Compute deadline utilization as the real-world time elapsed divided
150 // by the signal duration. 153 // by the signal duration.
151 audio_frame->deadline_utilization = 154 audio_frame->deadline_utilization =
152 (base::TimeTicks::Now() - start_time).InSecondsF() / 155 (base::TimeTicks::Now() - start_time).InSecondsF() /
153 frame_duration_.InSecondsF(); 156 frame_duration_.InSecondsF();
154 157
155 TRACE_EVENT_ASYNC_END1("cast.stream", "Audio Encode", audio_frame.get(), 158 TRACE_EVENT_ASYNC_END1("cast.stream", "Audio Encode", audio_frame.get(),
156 "Deadline utilization", 159 "Deadline utilization",
157 audio_frame->deadline_utilization); 160 audio_frame->deadline_utilization);
158 161
159 audio_frame->encode_completion_time = 162 audio_frame->encode_completion_time =
160 cast_environment_->Clock()->NowTicks(); 163 cast_environment_->Clock()->NowTicks();
161 cast_environment_->PostTask( 164 cast_environment_->PostTask(
162 CastEnvironment::MAIN, 165 CastEnvironment::MAIN,
163 FROM_HERE, 166 FROM_HERE,
164 base::Bind(callback_, 167 base::Bind(callback_,
165 base::Passed(&audio_frame), 168 base::Passed(&audio_frame),
166 samples_dropped_from_buffer_)); 169 samples_dropped_from_buffer_));
167 samples_dropped_from_buffer_ = 0; 170 samples_dropped_from_buffer_ = 0;
168 } 171 }
169 172
170 // Reset the internal buffer, frame ID, and timestamps for the next frame. 173 // Reset the internal buffer, frame ID, and timestamps for the next frame.
171 buffer_fill_end_ = 0; 174 buffer_fill_end_ = 0;
172 ++frame_id_; 175 ++frame_id_;
173 frame_rtp_timestamp_ += samples_per_frame_; 176 frame_rtp_timestamp_ += RtpTimeDelta::FromTicks(samples_per_frame_);
174 frame_capture_time_ += frame_duration_; 177 frame_capture_time_ += frame_duration_;
175 } 178 }
176 } 179 }
177 180
178 protected: 181 protected:
179 friend class base::RefCountedThreadSafe<ImplBase>; 182 friend class base::RefCountedThreadSafe<ImplBase>;
180 virtual ~ImplBase() {} 183 virtual ~ImplBase() {}
181 184
182 virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus, 185 virtual void TransferSamplesIntoBuffer(const AudioBus* audio_bus,
183 int source_offset, 186 int source_offset,
(...skipping 19 matching lines...) Expand all
203 // buffer, this points to the position at which to populate data in a later 206 // buffer, this points to the position at which to populate data in a later
204 // call. 207 // call.
205 int buffer_fill_end_; 208 int buffer_fill_end_;
206 209
207 // A counter used to label EncodedFrames. 210 // A counter used to label EncodedFrames.
208 uint32 frame_id_; 211 uint32 frame_id_;
209 212
210 // The RTP timestamp for the next frame of encoded audio. This is defined as 213 // The RTP timestamp for the next frame of encoded audio. This is defined as
211 // the number of audio samples encoded so far, plus the estimated number of 214 // the number of audio samples encoded so far, plus the estimated number of
212 // samples that were missed due to data underruns. A receiver uses this value 215 // samples that were missed due to data underruns. A receiver uses this value
213 // to detect gaps in the audio signal data being provided. Per the spec, RTP 216 // to detect gaps in the audio signal data being provided.
214 // timestamp values are allowed to overflow and roll around past zero. 217 RtpTimeTicks frame_rtp_timestamp_;
215 uint32 frame_rtp_timestamp_;
216 218
217 // The local system time associated with the start of the next frame of 219 // The local system time associated with the start of the next frame of
218 // encoded audio. This value is passed on to a receiver as a reference clock 220 // encoded audio. This value is passed on to a receiver as a reference clock
219 // timestamp for the purposes of synchronizing audio and video. Its 221 // timestamp for the purposes of synchronizing audio and video. Its
220 // progression is expected to drift relative to the elapsed time implied by 222 // progression is expected to drift relative to the elapsed time implied by
221 // the RTP timestamps. 223 // the RTP timestamps.
222 base::TimeTicks frame_capture_time_; 224 base::TimeTicks frame_capture_time_;
223 225
224 // Set to non-zero to indicate the next output frame skipped over audio 226 // Set to non-zero to indicate the next output frame skipped over audio
225 // samples in order to recover from an input underrun. 227 // samples in order to recover from an input underrun.
(...skipping 641 matching lines...) Expand 10 before | Expand all | Expand 10 after
867 cast_environment_->PostTask(CastEnvironment::AUDIO, 869 cast_environment_->PostTask(CastEnvironment::AUDIO,
868 FROM_HERE, 870 FROM_HERE,
869 base::Bind(&AudioEncoder::ImplBase::EncodeAudio, 871 base::Bind(&AudioEncoder::ImplBase::EncodeAudio,
870 impl_, 872 impl_,
871 base::Passed(&audio_bus), 873 base::Passed(&audio_bus),
872 recorded_time)); 874 recorded_time));
873 } 875 }
874 876
875 } // namespace cast 877 } // namespace cast
876 } // namespace media 878 } // namespace media
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698