OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/audio_track_recorder.h" | 5 #include "content/renderer/media/audio_track_recorder.h" |
6 | 6 |
7 #include <stdint.h> | 7 #include <stdint.h> |
8 #include <utility> | 8 #include <utility> |
9 | 9 |
10 #include "base/bind.h" | 10 #include "base/bind.h" |
11 #include "base/macros.h" | 11 #include "base/macros.h" |
12 #include "base/stl_util.h" | 12 #include "base/stl_util.h" |
13 #include "media/audio/audio_parameters.h" | 13 #include "media/audio/audio_parameters.h" |
14 #include "media/base/audio_bus.h" | 14 #include "media/base/audio_bus.h" |
15 #include "media/base/audio_converter.h" | |
16 #include "media/base/audio_fifo.h" | |
15 #include "media/base/bind_to_current_loop.h" | 17 #include "media/base/bind_to_current_loop.h" |
16 #include "third_party/opus/src/include/opus.h" | 18 #include "third_party/opus/src/include/opus.h" |
17 | 19 |
18 // Note that this code follows the Chrome media convention of defining a "frame" | 20 // Note that this code follows the Chrome media convention of defining a "frame" |
19 // as "one multi-channel sample" as opposed to another common definition | 21 // as "one multi-channel sample" as opposed to another common definition meaning |
20 // meaning "a chunk of samples". Here this second definition of "frame" is | 22 // "a chunk of samples". Here this second definition of "frame" is called a |
21 // called a "buffer"; so what might be called "frame duration" is instead | 23 // "buffer"; so what might be called "frame duration" is instead "buffer |
22 // "buffer duration", and so on. | 24 // duration", and so on. |
23 | 25 |
24 namespace content { | 26 namespace content { |
25 | 27 |
26 namespace { | 28 namespace { |
27 | 29 |
28 enum { | 30 // Maximum amount of buffers that can be held in the AudioFifo of AudioEncoder. |
29 // This is the recommended value, according to documentation in | 31 static const size_t kMaxNumberOfFifoBuffers = 2; |
30 // third_party/opus/src/include/opus.h, so that the Opus encoder does not | |
31 // degrade the audio due to memory constraints. | |
32 OPUS_MAX_PAYLOAD_SIZE = 4000, | |
33 | 32 |
34 // Support for max sampling rate of 48KHz, 2 channels, 60 ms duration. | 33 // Recommended value for opus_encode_float(), according to documentation in |
35 MAX_SAMPLES_PER_BUFFER = 48 * 2 * 60, | 34 // third_party/opus/src/include/opus.h, so that the Opus encoder does not |
36 }; | 35 // degrade the audio due to memory constraints. |
36 static const int kOpusMaxDataBytes = 4000; | |
37 | |
38 // Opus preferred sampling rate for encoding. This is also the one webm likes to | |
39 // have: https://wiki.xiph.org/MatroskaOpus. | |
40 static const int kOpusPreferredSamplingRate = 48000; | |
41 | |
42 // Media Stream Audio Tracks always send 10ms worth of Audio, which happens to | |
43 // be Opus-friendly (https://tools.ietf.org/html/rfc6716#section-2.1.4). | |
44 static const int kMediaStreamTrackBufferDurationMs = 10; | |
45 | |
46 // The amount of Frames in a 10 ms buffer @ 48000 samples/second. | |
47 static const int kOpusPreferredFramesPerBuffer = 480; | |
miu
2016/01/22 00:14:53
Opus will produce higher quality audio if encoding
mcasas
2016/01/22 22:03:53
Done.
Note that the input will still be chunks of
| |
48 | |
49 static_assert(kOpusPreferredFramesPerBuffer == | |
50 kOpusPreferredSamplingRate * | |
51 kMediaStreamTrackBufferDurationMs / | |
52 1000, | |
53 "kOpusPreferredFramesPerBuffer should be ==" | |
54 "kOpusPreferredSamplingRate * kMediaStreamTrackBufferDurationMs"); | |
55 | |
56 // Tries to encode |data_in|'s |num_samples| into |data_out|. | |
57 bool DoEncode(OpusEncoder* opus_encoder, | |
58 float* data_in, | |
59 int num_samples, | |
60 std::string* data_out) { | |
61 DCHECK_EQ(kOpusPreferredFramesPerBuffer, num_samples); | |
62 | |
63 data_out->resize(kOpusMaxDataBytes); | |
64 const opus_int32 result = opus_encode_float( | |
65 opus_encoder, data_in, num_samples, | |
66 reinterpret_cast<uint8_t*>(string_as_array(data_out)), kOpusMaxDataBytes); | |
67 | |
68 if (result > 1) { | |
69 // TODO(ajose): Investigate improving this. http://crbug.com/547918 | |
70 data_out->resize(result); | |
71 return true; | |
72 } | |
73 // If |result| in {0,1}, do nothing; the documentation says that a return | |
74 // value of zero or one means the packet does not need to be transmitted. | |
75 // Otherwise, we have an error. | |
76 DLOG_IF(ERROR, result < 0) << " encode failed: " << opus_strerror(result); | |
77 return false; | |
78 } | |
79 | |
80 // Interleaves |audio_bus| channels() of floats into a single output linear | |
81 // |buffer|. (AudioBus::ToInterleaved() does not support float). | |
miu
2016/01/22 00:14:53
It probably should! ;)
This code was originally
mcasas
2016/01/22 22:03:53
Bug it is. Happy to do it or it can a GoodFirstBug
| |
82 void ToInterleaved(media::AudioBus* audio_bus, float* buffer) { | |
83 for (int ch = 0; ch < audio_bus->channels(); ++ch) { | |
84 const float* src = audio_bus->channel(ch); | |
85 const float* const src_end = src + audio_bus->frames(); | |
86 float* dest = buffer + ch; | |
87 for (; src < src_end; ++src, dest += audio_bus->channels()) | |
88 *dest = *src; | |
89 } | |
90 } | |
37 | 91 |
38 } // anonymous namespace | 92 } // anonymous namespace |
39 | 93 |
40 // Nested class encapsulating opus-related encoding details. | 94 // Nested class encapsulating opus-related encoding details. It contains an |
41 // AudioEncoder is created and destroyed on ATR's main thread (usually the | 95 // AudioConverter to adapt incoming data to the format Opus likes to have. |
42 // main render thread) but otherwise should operate entirely on | 96 // AudioEncoder is created and destroyed on ATR's main thread (usually the main |
43 // |encoder_thread_|, which is owned by AudioTrackRecorder. Be sure to delete | 97 // render thread) but otherwise should operate entirely on |encoder_thread_|, |
44 // |encoder_thread_| before deleting the AudioEncoder using it. | 98 // which is owned by AudioTrackRecorder. Be sure to delete |encoder_thread_| |
99 // before deleting the AudioEncoder using it. | |
45 class AudioTrackRecorder::AudioEncoder | 100 class AudioTrackRecorder::AudioEncoder |
46 : public base::RefCountedThreadSafe<AudioEncoder> { | 101 : public base::RefCountedThreadSafe<AudioEncoder>, |
102 public media::AudioConverter::InputCallback { | |
47 public: | 103 public: |
48 explicit AudioEncoder(const OnEncodedAudioCB& on_encoded_audio_cb) | 104 explicit AudioEncoder(const OnEncodedAudioCB& on_encoded_audio_cb) |
49 : on_encoded_audio_cb_(on_encoded_audio_cb), opus_encoder_(nullptr) { | 105 : on_encoded_audio_cb_(on_encoded_audio_cb), opus_encoder_(nullptr) { |
50 // AudioEncoder is constructed on the thread that ATR lives on, but should | 106 // AudioEncoder is constructed on the thread that ATR lives on, but should |
51 // operate only on the encoder thread after that. Reset | 107 // operate only on the encoder thread after that. Reset |
52 // |encoder_thread_checker_| here, as the next call to CalledOnValidThread() | 108 // |encoder_thread_checker_| here, as the next call to CalledOnValidThread() |
53 // will be from the encoder thread. | 109 // will be from the encoder thread. |
54 encoder_thread_checker_.DetachFromThread(); | 110 encoder_thread_checker_.DetachFromThread(); |
55 } | 111 } |
56 | 112 |
113 // media::AudioConverted::InputCallback implementation. | |
114 double ProvideInput(media::AudioBus* audio_bus, | |
miu
2016/01/22 00:14:53
This should be private, since it's only meant to b
mcasas
2016/01/22 22:03:53
Done.
| |
115 base::TimeDelta buffer_delay) override; | |
116 | |
57 void OnSetFormat(const media::AudioParameters& params); | 117 void OnSetFormat(const media::AudioParameters& params); |
58 | 118 |
59 void EncodeAudio(scoped_ptr<media::AudioBus> audio_bus, | 119 void EncodeAudio(scoped_ptr<media::AudioBus> audio_bus, |
60 const base::TimeTicks& capture_time); | 120 const base::TimeTicks& capture_time); |
61 | 121 |
62 private: | 122 private: |
63 friend class base::RefCountedThreadSafe<AudioEncoder>; | 123 friend class base::RefCountedThreadSafe<AudioEncoder>; |
64 | 124 |
65 ~AudioEncoder(); | 125 ~AudioEncoder() override; |
66 | 126 |
67 bool is_initialized() const { return !!opus_encoder_; } | 127 bool is_initialized() const { return !!opus_encoder_; } |
68 | 128 |
69 void DestroyExistingOpusEncoder(); | 129 void DestroyExistingOpusEncoder(); |
70 | 130 |
71 void TransferSamplesIntoBuffer(const media::AudioBus* audio_bus, | |
72 int source_offset, | |
73 int buffer_fill_offset, | |
74 int num_samples); | |
75 bool EncodeFromFilledBuffer(std::string* out); | |
76 | |
77 const OnEncodedAudioCB on_encoded_audio_cb_; | 131 const OnEncodedAudioCB on_encoded_audio_cb_; |
78 | 132 |
79 base::ThreadChecker encoder_thread_checker_; | 133 base::ThreadChecker encoder_thread_checker_; |
80 | 134 |
81 // In the case where a call to EncodeAudio() cannot completely fill the | 135 // Track Audio (ingress) and Opus encoder input parameters, respectively. They |
82 // buffer, this points to the position at which to populate data in a later | 136 // only differ in their sample_rate() and frames_per_buffer(): output is |
83 // call. | 137 // 48ksamples/s and 480, respectively. |
84 int buffer_fill_end_; | 138 media::AudioParameters input_params_; |
139 media::AudioParameters output_params_; | |
85 | 140 |
86 int frames_per_buffer_; | 141 // Sampling rate adapter between an OpusEncoder supported and the provided. |
87 | 142 scoped_ptr<media::AudioConverter> converter_; |
88 // The duration of one set of frames of encoded audio samples. | 143 scoped_ptr<media::AudioFifo> fifo_; |
89 base::TimeDelta buffer_duration_; | |
90 | |
91 media::AudioParameters audio_params_; | |
92 | 144 |
93 // Buffer for passing AudioBus data to OpusEncoder. | 145 // Buffer for passing AudioBus data to OpusEncoder. |
94 scoped_ptr<float[]> buffer_; | 146 scoped_ptr<float[]> buffer_; |
95 | 147 |
96 OpusEncoder* opus_encoder_; | 148 OpusEncoder* opus_encoder_; |
97 | 149 |
98 DISALLOW_COPY_AND_ASSIGN(AudioEncoder); | 150 DISALLOW_COPY_AND_ASSIGN(AudioEncoder); |
99 }; | 151 }; |
100 | 152 |
101 AudioTrackRecorder::AudioEncoder::~AudioEncoder() { | 153 AudioTrackRecorder::AudioEncoder::~AudioEncoder() { |
102 // We don't DCHECK that we're on the encoder thread here, as it should have | 154 // We don't DCHECK that we're on the encoder thread here, as it should have |
103 // already been deleted at this point. | 155 // already been deleted at this point. |
104 DestroyExistingOpusEncoder(); | 156 DestroyExistingOpusEncoder(); |
105 } | 157 } |
106 | 158 |
159 double AudioTrackRecorder::AudioEncoder::ProvideInput( | |
160 media::AudioBus* audio_bus, | |
161 base::TimeDelta buffer_delay) { | |
162 if (fifo_->frames() >= audio_bus->frames()) | |
163 fifo_->Consume(audio_bus, 0, audio_bus->frames()); | |
164 else | |
165 audio_bus->Zero(); | |
166 // Return volume greater than zero to indicate we have more data. | |
167 return 1.0; | |
168 } | |
169 | |
107 void AudioTrackRecorder::AudioEncoder::OnSetFormat( | 170 void AudioTrackRecorder::AudioEncoder::OnSetFormat( |
108 const media::AudioParameters& params) { | 171 const media::AudioParameters& input_params) { |
172 DVLOG(1) << __FUNCTION__; | |
109 DCHECK(encoder_thread_checker_.CalledOnValidThread()); | 173 DCHECK(encoder_thread_checker_.CalledOnValidThread()); |
110 if (audio_params_.Equals(params)) | 174 if (input_params_.Equals(input_params)) |
111 return; | 175 return; |
112 | 176 |
113 DestroyExistingOpusEncoder(); | 177 DestroyExistingOpusEncoder(); |
114 | 178 |
115 if (!params.IsValid()) { | 179 if (!input_params.IsValid() || input_params.channels() > 2) { |
116 DLOG(ERROR) << "Invalid audio params: " << params.AsHumanReadableString(); | 180 DLOG(ERROR) << "Invalid params: " << input_params.AsHumanReadableString(); |
181 return; | |
182 } | |
183 input_params_ = input_params; | |
184 input_params_.set_frames_per_buffer(input_params_.sample_rate() * | |
miu
2016/01/22 00:14:53
Shouldn't this be:
input_params_.set_frames_per
mcasas
2016/01/22 22:03:53
No, input is always 10ms, the input sampling rate
| |
185 kMediaStreamTrackBufferDurationMs / | |
186 1000); | |
187 | |
188 output_params_ = media::AudioParameters( | |
189 media::AudioParameters::AUDIO_PCM_LOW_LATENCY, | |
190 media::GuessChannelLayout(input_params_.channels()), | |
191 kOpusPreferredSamplingRate, | |
192 input_params_.bits_per_sample(), | |
193 kOpusPreferredFramesPerBuffer); | |
194 DVLOG(1) << "|input_params_|:" << input_params_.AsHumanReadableString() | |
195 << " -->|output_params_|:" << output_params_.AsHumanReadableString(); | |
196 | |
197 converter_.reset(new media::AudioConverter(input_params_, output_params_, | |
198 false /* disable_fifo */)); | |
199 converter_->AddInput(this); | |
mcasas
2016/01/22 22:03:53
add here
|converter_->PrimeWithSilence()|
| |
200 | |
201 fifo_.reset(new media::AudioFifo( | |
202 input_params_.channels(), | |
203 kMaxNumberOfFifoBuffers * input_params_.frames_per_buffer())); | |
204 | |
205 buffer_.reset( | |
206 new float[output_params_.channels() * kOpusPreferredFramesPerBuffer * | |
207 output_params_.bits_per_sample() / 8]); | |
miu
2016/01/22 00:14:53
bits_per_sample() is erroneous (it really should b
mcasas
2016/01/22 22:03:53
Done.
| |
208 | |
209 // Initialize OpusEncoder. | |
210 int opus_result; | |
211 opus_encoder_ = opus_encoder_create(output_params_.sample_rate(), | |
212 output_params_.channels(), | |
213 OPUS_APPLICATION_AUDIO, | |
214 &opus_result); | |
215 if (opus_result < 0) { | |
216 DLOG(ERROR) << "Couldn't init opus encoder: " << opus_strerror(opus_result) | |
217 << ", sample rate: " << output_params_.sample_rate() | |
218 << ", channels: " << output_params_.channels(); | |
117 return; | 219 return; |
118 } | 220 } |
119 | 221 |
120 buffer_duration_ = base::TimeDelta::FromMilliseconds( | |
121 AudioTrackRecorder::GetOpusBufferDuration(params.sample_rate())); | |
122 if (buffer_duration_ == base::TimeDelta()) { | |
123 DLOG(ERROR) << "Could not find a valid |buffer_duration| for the given " | |
124 << "sample rate: " << params.sample_rate(); | |
125 return; | |
126 } | |
127 | |
128 frames_per_buffer_ = | |
129 params.sample_rate() * buffer_duration_.InMilliseconds() / 1000; | |
130 if (frames_per_buffer_ * params.channels() > MAX_SAMPLES_PER_BUFFER) { | |
131 DLOG(ERROR) << "Invalid |frames_per_buffer_|: " << frames_per_buffer_; | |
132 return; | |
133 } | |
134 | |
135 // Initialize AudioBus buffer for OpusEncoder. | |
136 buffer_fill_end_ = 0; | |
137 buffer_.reset(new float[params.channels() * frames_per_buffer_]); | |
138 | |
139 // Initialize OpusEncoder. | |
140 int opus_result; | |
141 opus_encoder_ = opus_encoder_create(params.sample_rate(), params.channels(), | |
142 OPUS_APPLICATION_AUDIO, &opus_result); | |
143 if (opus_result < 0) { | |
144 DLOG(ERROR) << "Couldn't init opus encoder: " << opus_strerror(opus_result) | |
145 << ", sample rate: " << params.sample_rate() | |
146 << ", channels: " << params.channels(); | |
147 return; | |
148 } | |
149 | |
150 // Note: As of 2013-10-31, the encoder in "auto bitrate" mode would use a | 222 // Note: As of 2013-10-31, the encoder in "auto bitrate" mode would use a |
151 // variable bitrate up to 102kbps for 2-channel, 48 kHz audio and a 10 ms | 223 // variable bitrate up to 102kbps for 2-channel, 48 kHz audio and a 10 ms |
152 // buffer duration. The opus library authors may, of course, adjust this in | 224 // buffer duration. The opus library authors may, of course, adjust this in |
153 // later versions. | 225 // later versions. |
154 if (opus_encoder_ctl(opus_encoder_, OPUS_SET_BITRATE(OPUS_AUTO)) != OPUS_OK) { | 226 if (opus_encoder_ctl(opus_encoder_, OPUS_SET_BITRATE(OPUS_AUTO)) != OPUS_OK) { |
155 DLOG(ERROR) << "Failed to set opus bitrate."; | 227 DLOG(ERROR) << "Failed to set opus bitrate."; |
156 return; | 228 return; |
157 } | 229 } |
158 | |
159 audio_params_ = params; | |
160 } | 230 } |
161 | 231 |
162 void AudioTrackRecorder::AudioEncoder::EncodeAudio( | 232 void AudioTrackRecorder::AudioEncoder::EncodeAudio( |
163 scoped_ptr<media::AudioBus> audio_bus, | 233 scoped_ptr<media::AudioBus> input_bus, |
164 const base::TimeTicks& capture_time) { | 234 const base::TimeTicks& capture_time) { |
235 DVLOG(1) << __FUNCTION__ << ", #frames " << input_bus->frames(); | |
165 DCHECK(encoder_thread_checker_.CalledOnValidThread()); | 236 DCHECK(encoder_thread_checker_.CalledOnValidThread()); |
166 DCHECK_EQ(audio_bus->channels(), audio_params_.channels()); | 237 DCHECK_EQ(input_bus->channels(), input_params_.channels()); |
238 DCHECK_EQ(input_bus->frames(), input_params_.frames_per_buffer()); | |
239 DCHECK(!capture_time.is_null()); | |
240 DCHECK(converter_); | |
167 | 241 |
168 if (!is_initialized()) | 242 if (!is_initialized()) |
169 return; | 243 return; |
244 fifo_->Push(input_bus.release()); // Push received data into |fifo_|. | |
170 | 245 |
171 base::TimeDelta buffer_fill_duration = | 246 scoped_ptr<media::AudioBus> audio_bus = media::AudioBus::Create( |
172 buffer_fill_end_ * buffer_duration_ / frames_per_buffer_; | 247 output_params_.channels(), kOpusPreferredFramesPerBuffer); |
173 base::TimeTicks buffer_capture_time = capture_time - buffer_fill_duration; | 248 converter_->Convert(audio_bus.get()); |
miu
2016/01/22 00:14:53
You can't call convert until you know there are en
mcasas
2016/01/22 22:03:53
Noted. This is experimented in another unrelated C
| |
249 ToInterleaved(audio_bus.release(), buffer_.get()); | |
174 | 250 |
175 // Encode all audio in |audio_bus| into zero or more packets. | 251 scoped_ptr<std::string> encoded_data(new std::string()); |
176 int src_pos = 0; | 252 if (DoEncode(opus_encoder_, buffer_.get(), kOpusPreferredFramesPerBuffer, |
177 while (src_pos < audio_bus->frames()) { | 253 encoded_data.get())) { |
178 const int num_samples_to_xfer = std::min( | 254 on_encoded_audio_cb_.Run(output_params_, std::move(encoded_data), |
179 frames_per_buffer_ - buffer_fill_end_, audio_bus->frames() - src_pos); | 255 capture_time); |
180 TransferSamplesIntoBuffer(audio_bus.get(), src_pos, buffer_fill_end_, | |
181 num_samples_to_xfer); | |
182 src_pos += num_samples_to_xfer; | |
183 buffer_fill_end_ += num_samples_to_xfer; | |
184 | |
185 if (buffer_fill_end_ < frames_per_buffer_) | |
186 break; | |
187 | |
188 scoped_ptr<std::string> encoded_data(new std::string()); | |
189 if (EncodeFromFilledBuffer(encoded_data.get())) { | |
190 on_encoded_audio_cb_.Run(audio_params_, std::move(encoded_data), | |
191 buffer_capture_time); | |
192 } | |
193 | |
194 // Reset the capture timestamp and internal buffer for next set of frames. | |
195 buffer_capture_time += buffer_duration_; | |
196 buffer_fill_end_ = 0; | |
197 } | 256 } |
198 } | 257 } |
199 | 258 |
200 void AudioTrackRecorder::AudioEncoder::DestroyExistingOpusEncoder() { | 259 void AudioTrackRecorder::AudioEncoder::DestroyExistingOpusEncoder() { |
201 // We don't DCHECK that we're on the encoder thread here, as this could be | 260 // We don't DCHECK that we're on the encoder thread here, as this could be |
202 // called from the dtor (main thread) or from OnSetForamt() (render thread); | 261 // called from the dtor (main thread) or from OnSetForamt() (render thread); |
203 if (opus_encoder_) { | 262 if (opus_encoder_) { |
204 opus_encoder_destroy(opus_encoder_); | 263 opus_encoder_destroy(opus_encoder_); |
205 opus_encoder_ = nullptr; | 264 opus_encoder_ = nullptr; |
206 } | 265 } |
207 } | 266 } |
208 | 267 |
209 void AudioTrackRecorder::AudioEncoder::TransferSamplesIntoBuffer( | |
210 const media::AudioBus* audio_bus, | |
211 int source_offset, | |
212 int buffer_fill_offset, | |
213 int num_samples) { | |
214 // TODO(ajose): Consider replacing with AudioBus::ToInterleaved(). | |
215 // http://crbug.com/547918 | |
216 DCHECK(encoder_thread_checker_.CalledOnValidThread()); | |
217 DCHECK(is_initialized()); | |
218 // Opus requires channel-interleaved samples in a single array. | |
219 for (int ch = 0; ch < audio_bus->channels(); ++ch) { | |
220 const float* src = audio_bus->channel(ch) + source_offset; | |
221 const float* const src_end = src + num_samples; | |
222 float* dest = | |
223 buffer_.get() + buffer_fill_offset * audio_params_.channels() + ch; | |
224 for (; src < src_end; ++src, dest += audio_params_.channels()) | |
225 *dest = *src; | |
226 } | |
227 } | |
228 | |
229 bool AudioTrackRecorder::AudioEncoder::EncodeFromFilledBuffer( | |
230 std::string* out) { | |
231 DCHECK(encoder_thread_checker_.CalledOnValidThread()); | |
232 DCHECK(is_initialized()); | |
233 | |
234 out->resize(OPUS_MAX_PAYLOAD_SIZE); | |
235 const opus_int32 result = opus_encode_float( | |
236 opus_encoder_, buffer_.get(), frames_per_buffer_, | |
237 reinterpret_cast<uint8_t*>(string_as_array(out)), OPUS_MAX_PAYLOAD_SIZE); | |
238 if (result > 1) { | |
239 // TODO(ajose): Investigate improving this. http://crbug.com/547918 | |
240 out->resize(result); | |
241 return true; | |
242 } | |
243 // If |result| in {0,1}, do nothing; the documentation says that a return | |
244 // value of zero or one means the packet does not need to be transmitted. | |
245 // Otherwise, we have an error. | |
246 DLOG_IF(ERROR, result < 0) << __FUNCTION__ | |
247 << " failed: " << opus_strerror(result); | |
248 return false; | |
249 } | |
250 | |
251 AudioTrackRecorder::AudioTrackRecorder( | 268 AudioTrackRecorder::AudioTrackRecorder( |
252 const blink::WebMediaStreamTrack& track, | 269 const blink::WebMediaStreamTrack& track, |
253 const OnEncodedAudioCB& on_encoded_audio_cb) | 270 const OnEncodedAudioCB& on_encoded_audio_cb) |
254 : track_(track), | 271 : track_(track), |
255 encoder_(new AudioEncoder(media::BindToCurrentLoop(on_encoded_audio_cb))), | 272 encoder_(new AudioEncoder(media::BindToCurrentLoop(on_encoded_audio_cb))), |
256 encoder_thread_("AudioEncoderThread") { | 273 encoder_thread_("AudioEncoderThread") { |
257 DCHECK(main_render_thread_checker_.CalledOnValidThread()); | 274 DCHECK(main_render_thread_checker_.CalledOnValidThread()); |
258 DCHECK(!track_.isNull()); | 275 DCHECK(!track_.isNull()); |
259 DCHECK(track_.extraData()); | 276 DCHECK(track_.extraData()); |
260 | 277 |
(...skipping 29 matching lines...) Expand all Loading... | |
290 | 307 |
291 scoped_ptr<media::AudioBus> audio_data = | 308 scoped_ptr<media::AudioBus> audio_data = |
292 media::AudioBus::Create(audio_bus.channels(), audio_bus.frames()); | 309 media::AudioBus::Create(audio_bus.channels(), audio_bus.frames()); |
293 audio_bus.CopyTo(audio_data.get()); | 310 audio_bus.CopyTo(audio_data.get()); |
294 | 311 |
295 encoder_thread_.task_runner()->PostTask( | 312 encoder_thread_.task_runner()->PostTask( |
296 FROM_HERE, base::Bind(&AudioEncoder::EncodeAudio, encoder_, | 313 FROM_HERE, base::Bind(&AudioEncoder::EncodeAudio, encoder_, |
297 base::Passed(&audio_data), capture_time)); | 314 base::Passed(&audio_data), capture_time)); |
298 } | 315 } |
299 | 316 |
300 int AudioTrackRecorder::GetOpusBufferDuration(int sample_rate) { | |
301 // Valid buffer durations in millseconds. Note there are other valid | |
302 // durations for Opus, see https://tools.ietf.org/html/rfc6716#section-2.1.4 | |
303 // Descending order as longer durations can increase compression performance. | |
304 const std::vector<int> opus_valid_buffer_durations_ms = {60, 40, 20, 10}; | |
305 | |
306 // Search for a duration such that |sample_rate| % |buffers_per_second| == 0, | |
307 // where |buffers_per_second| = 1000ms / |possible_duration|. | |
308 for (auto possible_duration : opus_valid_buffer_durations_ms) { | |
309 if (sample_rate * possible_duration % 1000 == 0) { | |
310 return possible_duration; | |
311 } | |
312 } | |
313 | |
314 // Otherwise, couldn't find a good duration. | |
315 return 0; | |
316 } | |
317 | |
318 } // namespace content | 317 } // namespace content |
OLD | NEW |