| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "content/renderer/media/audio_track_recorder.h" | 5 #include "content/renderer/media/audio_track_recorder.h" |
| 6 | 6 |
| 7 #include <stdint.h> | 7 #include <stdint.h> |
| 8 #include <utility> | 8 #include <utility> |
| 9 | 9 |
| 10 #include "base/bind.h" | 10 #include "base/bind.h" |
| 11 #include "base/macros.h" | 11 #include "base/macros.h" |
| 12 #include "base/stl_util.h" | 12 #include "base/stl_util.h" |
| 13 #include "content/renderer/media/media_stream_audio_track.h" | 13 #include "content/renderer/media/media_stream_audio_track.h" |
| 14 #include "media/base/audio_bus.h" | 14 #include "media/base/audio_bus.h" |
| 15 #include "media/base/audio_converter.h" | 15 #include "media/base/audio_converter.h" |
| 16 #include "media/base/audio_fifo.h" | 16 #include "media/base/audio_fifo.h" |
| 17 #include "media/base/audio_parameters.h" | 17 #include "media/base/audio_parameters.h" |
| 18 #include "media/base/audio_sample_types.h" |
| 18 #include "media/base/bind_to_current_loop.h" | 19 #include "media/base/bind_to_current_loop.h" |
| 19 #include "third_party/opus/src/include/opus.h" | 20 #include "third_party/opus/src/include/opus.h" |
| 20 | 21 |
| 21 // Note that this code follows the Chrome media convention of defining a "frame" | 22 // Note that this code follows the Chrome media convention of defining a "frame" |
| 22 // as "one multi-channel sample" as opposed to another common definition meaning | 23 // as "one multi-channel sample" as opposed to another common definition meaning |
| 23 // "a chunk of samples". Here this second definition of "frame" is called a | 24 // "a chunk of samples". Here this second definition of "frame" is called a |
| 24 // "buffer"; so what might be called "frame duration" is instead "buffer | 25 // "buffer"; so what might be called "frame duration" is instead "buffer |
| 25 // duration", and so on. | 26 // duration", and so on. |
| 26 | 27 |
| 27 namespace content { | 28 namespace content { |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 69 data_out->resize(result); | 70 data_out->resize(result); |
| 70 return true; | 71 return true; |
| 71 } | 72 } |
| 72 // If |result| in {0,1}, do nothing; the documentation says that a return | 73 // If |result| in {0,1}, do nothing; the documentation says that a return |
| 73 // value of zero or one means the packet does not need to be transmitted. | 74 // value of zero or one means the packet does not need to be transmitted. |
| 74 // Otherwise, we have an error. | 75 // Otherwise, we have an error. |
| 75 DLOG_IF(ERROR, result < 0) << " encode failed: " << opus_strerror(result); | 76 DLOG_IF(ERROR, result < 0) << " encode failed: " << opus_strerror(result); |
| 76 return false; | 77 return false; |
| 77 } | 78 } |
| 78 | 79 |
| 79 // Interleaves |audio_bus| channels() of floats into a single output linear | |
| 80 // |buffer|. | |
| 81 // TODO(mcasas) https://crbug.com/580391 use AudioBus::ToInterleavedFloat(). | |
| 82 void ToInterleaved(media::AudioBus* audio_bus, float* buffer) { | |
| 83 for (int ch = 0; ch < audio_bus->channels(); ++ch) { | |
| 84 const float* src = audio_bus->channel(ch); | |
| 85 const float* const src_end = src + audio_bus->frames(); | |
| 86 float* dest = buffer + ch; | |
| 87 for (; src < src_end; ++src, dest += audio_bus->channels()) | |
| 88 *dest = *src; | |
| 89 } | |
| 90 } | |
| 91 | |
| 92 } // anonymous namespace | 80 } // anonymous namespace |
| 93 | 81 |
| 94 // Nested class encapsulating opus-related encoding details. It contains an | 82 // Nested class encapsulating opus-related encoding details. It contains an |
| 95 // AudioConverter to adapt incoming data to the format Opus likes to have. | 83 // AudioConverter to adapt incoming data to the format Opus likes to have. |
| 96 // AudioEncoder is created and destroyed on ATR's main thread (usually the main | 84 // AudioEncoder is created and destroyed on ATR's main thread (usually the main |
| 97 // render thread) but otherwise should operate entirely on |encoder_thread_|, | 85 // render thread) but otherwise should operate entirely on |encoder_thread_|, |
| 98 // which is owned by AudioTrackRecorder. Be sure to delete |encoder_thread_| | 86 // which is owned by AudioTrackRecorder. Be sure to delete |encoder_thread_| |
| 99 // before deleting the AudioEncoder using it. | 87 // before deleting the AudioEncoder using it. |
| 100 class AudioTrackRecorder::AudioEncoder | 88 class AudioTrackRecorder::AudioEncoder |
| 101 : public base::RefCountedThreadSafe<AudioEncoder>, | 89 : public base::RefCountedThreadSafe<AudioEncoder>, |
| (...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 252 // instead of | 240 // instead of |
| 253 // an AudioFifo, to avoid copying data needlessly since we know the sizes of | 241 // an AudioFifo, to avoid copying data needlessly since we know the sizes of |
| 254 // both input and output and they are multiples. | 242 // both input and output and they are multiples. |
| 255 fifo_->Push(input_bus.get()); | 243 fifo_->Push(input_bus.get()); |
| 256 | 244 |
| 257 // Wait to have enough |input_bus|s to guarantee a satisfactory conversion. | 245 // Wait to have enough |input_bus|s to guarantee a satisfactory conversion. |
| 258 while (fifo_->frames() >= input_params_.frames_per_buffer()) { | 246 while (fifo_->frames() >= input_params_.frames_per_buffer()) { |
| 259 std::unique_ptr<media::AudioBus> audio_bus = media::AudioBus::Create( | 247 std::unique_ptr<media::AudioBus> audio_bus = media::AudioBus::Create( |
| 260 output_params_.channels(), kOpusPreferredFramesPerBuffer); | 248 output_params_.channels(), kOpusPreferredFramesPerBuffer); |
| 261 converter_->Convert(audio_bus.get()); | 249 converter_->Convert(audio_bus.get()); |
| 262 ToInterleaved(audio_bus.get(), buffer_.get()); | 250 audio_bus->ToInterleaved<media::Float32SampleTypeTraits>( |
| 251 audio_bus->frames(), buffer_.get()); |
| 263 | 252 |
| 264 std::unique_ptr<std::string> encoded_data(new std::string()); | 253 std::unique_ptr<std::string> encoded_data(new std::string()); |
| 265 if (DoEncode(opus_encoder_, buffer_.get(), kOpusPreferredFramesPerBuffer, | 254 if (DoEncode(opus_encoder_, buffer_.get(), kOpusPreferredFramesPerBuffer, |
| 266 encoded_data.get())) { | 255 encoded_data.get())) { |
| 267 const base::TimeTicks capture_time_of_first_sample = | 256 const base::TimeTicks capture_time_of_first_sample = |
| 268 capture_time - | 257 capture_time - |
| 269 base::TimeDelta::FromMicroseconds(fifo_->frames() * | 258 base::TimeDelta::FromMicroseconds(fifo_->frames() * |
| 270 base::Time::kMicrosecondsPerSecond / | 259 base::Time::kMicrosecondsPerSecond / |
| 271 input_params_.sample_rate()); | 260 input_params_.sample_rate()); |
| 272 on_encoded_audio_cb_.Run(output_params_, std::move(encoded_data), | 261 on_encoded_audio_cb_.Run(output_params_, std::move(encoded_data), |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 350 } | 339 } |
| 351 | 340 |
| 352 void AudioTrackRecorder::Resume() { | 341 void AudioTrackRecorder::Resume() { |
| 353 DCHECK(main_render_thread_checker_.CalledOnValidThread()); | 342 DCHECK(main_render_thread_checker_.CalledOnValidThread()); |
| 354 DCHECK(encoder_); | 343 DCHECK(encoder_); |
| 355 encoder_thread_.task_runner()->PostTask( | 344 encoder_thread_.task_runner()->PostTask( |
| 356 FROM_HERE, base::Bind(&AudioEncoder::set_paused, encoder_, false)); | 345 FROM_HERE, base::Bind(&AudioEncoder::set_paused, encoder_, false)); |
| 357 } | 346 } |
| 358 | 347 |
| 359 } // namespace content | 348 } // namespace content |
| OLD | NEW |