OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/webrtc_audio_device_impl.h" | 5 #include "content/renderer/media/webrtc_audio_device_impl.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/metrics/histogram.h" | 8 #include "base/metrics/histogram.h" |
9 #include "base/strings/string_util.h" | 9 #include "base/strings/string_util.h" |
10 #include "base/win/windows_version.h" | 10 #include "base/win/windows_version.h" |
11 #include "content/renderer/media/media_stream_audio_processor.h" | 11 #include "content/renderer/media/media_stream_audio_processor.h" |
12 #include "content/renderer/media/webrtc_audio_capturer.h" | 12 #include "content/renderer/media/webrtc_audio_capturer.h" |
13 #include "content/renderer/media/webrtc_audio_renderer.h" | 13 #include "content/renderer/media/webrtc_audio_renderer.h" |
14 #include "content/renderer/render_thread_impl.h" | 14 #include "content/renderer/render_thread_impl.h" |
15 #include "media/audio/audio_parameters.h" | 15 #include "media/audio/audio_parameters.h" |
16 #include "media/audio/sample_rates.h" | 16 #include "media/audio/sample_rates.h" |
17 | 17 |
18 using media::AudioParameters; | 18 using media::AudioParameters; |
19 using media::ChannelLayout; | 19 using media::ChannelLayout; |
20 | 20 |
21 namespace content { | 21 namespace content { |
22 | 22 |
23 WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl() | 23 WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl() |
24 : ref_count_(0), | 24 : ref_count_(0), |
25 audio_transport_callback_(NULL), | 25 audio_transport_callback_(NULL), |
26 input_delay_ms_(0), | |
27 output_delay_ms_(0), | 26 output_delay_ms_(0), |
28 initialized_(false), | 27 initialized_(false), |
29 playing_(false), | 28 playing_(false), |
30 recording_(false), | 29 recording_(false), |
31 microphone_volume_(0), | 30 microphone_volume_(0) { |
32 is_audio_track_processing_enabled_( | |
33 MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled()) { | |
34 DVLOG(1) << "WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()"; | 31 DVLOG(1) << "WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()"; |
35 } | 32 } |
36 | 33 |
37 WebRtcAudioDeviceImpl::~WebRtcAudioDeviceImpl() { | 34 WebRtcAudioDeviceImpl::~WebRtcAudioDeviceImpl() { |
38 DVLOG(1) << "WebRtcAudioDeviceImpl::~WebRtcAudioDeviceImpl()"; | 35 DVLOG(1) << "WebRtcAudioDeviceImpl::~WebRtcAudioDeviceImpl()"; |
39 DCHECK(thread_checker_.CalledOnValidThread()); | 36 DCHECK(thread_checker_.CalledOnValidThread()); |
40 Terminate(); | 37 Terminate(); |
41 } | 38 } |
42 | 39 |
43 int32_t WebRtcAudioDeviceImpl::AddRef() { | 40 int32_t WebRtcAudioDeviceImpl::AddRef() { |
44 DCHECK(thread_checker_.CalledOnValidThread()); | 41 DCHECK(thread_checker_.CalledOnValidThread()); |
45 return base::subtle::Barrier_AtomicIncrement(&ref_count_, 1); | 42 return base::subtle::Barrier_AtomicIncrement(&ref_count_, 1); |
46 } | 43 } |
47 | 44 |
48 int32_t WebRtcAudioDeviceImpl::Release() { | 45 int32_t WebRtcAudioDeviceImpl::Release() { |
49 DCHECK(thread_checker_.CalledOnValidThread()); | 46 DCHECK(thread_checker_.CalledOnValidThread()); |
50 int ret = base::subtle::Barrier_AtomicIncrement(&ref_count_, -1); | 47 int ret = base::subtle::Barrier_AtomicIncrement(&ref_count_, -1); |
51 if (ret == 0) { | 48 if (ret == 0) { |
52 delete this; | 49 delete this; |
53 } | 50 } |
54 return ret; | 51 return ret; |
55 } | 52 } |
56 int WebRtcAudioDeviceImpl::OnData(const int16* audio_data, | |
57 int sample_rate, | |
58 int number_of_channels, | |
59 int number_of_frames, | |
60 const std::vector<int>& channels, | |
61 int audio_delay_milliseconds, | |
62 int current_volume, | |
63 bool need_audio_processing, | |
64 bool key_pressed) { | |
65 int total_delay_ms = 0; | |
66 { | |
67 base::AutoLock auto_lock(lock_); | |
68 // Return immediately when not recording or |channels| is empty. | |
69 // See crbug.com/274017: renderer crash dereferencing invalid channels[0]. | |
70 if (!recording_ || channels.empty()) | |
71 return 0; | |
72 | |
73 // Store the reported audio delay locally. | |
74 input_delay_ms_ = audio_delay_milliseconds; | |
75 total_delay_ms = input_delay_ms_ + output_delay_ms_; | |
76 DVLOG(2) << "total delay: " << input_delay_ms_ + output_delay_ms_; | |
77 } | |
78 | |
79 // Write audio frames in blocks of 10 milliseconds to the registered | |
80 // webrtc::AudioTransport sink. Keep writing until our internal byte | |
81 // buffer is empty. | |
82 const int16* audio_buffer = audio_data; | |
83 const int frames_per_10_ms = (sample_rate / 100); | |
84 CHECK_EQ(number_of_frames % frames_per_10_ms, 0); | |
85 int accumulated_audio_frames = 0; | |
86 uint32_t new_volume = 0; | |
87 | |
88 // The lock here is to protect a race in the resampler inside webrtc when | |
89 // there are more than one input stream calling OnData(), which can happen | |
90 // when the users setup two getUserMedia, one for the microphone, another | |
91 // for WebAudio. Currently we don't have a better way to fix it except for | |
92 // adding a lock here to sequence the call. | |
93 // TODO(xians): Remove this workaround after we move the | |
94 // webrtc::AudioProcessing module to Chrome. See http://crbug/264611 for | |
95 // details. | |
96 base::AutoLock auto_lock(capture_callback_lock_); | |
97 while (accumulated_audio_frames < number_of_frames) { | |
98 // Deliver 10ms of recorded 16-bit linear PCM audio. | |
99 int new_mic_level = audio_transport_callback_->OnDataAvailable( | |
100 &channels[0], | |
101 channels.size(), | |
102 audio_buffer, | |
103 sample_rate, | |
104 number_of_channels, | |
105 frames_per_10_ms, | |
106 total_delay_ms, | |
107 current_volume, | |
108 key_pressed, | |
109 need_audio_processing); | |
110 | |
111 accumulated_audio_frames += frames_per_10_ms; | |
112 audio_buffer += frames_per_10_ms * number_of_channels; | |
113 | |
114 // The latest non-zero new microphone level will be returned. | |
115 if (new_mic_level) | |
116 new_volume = new_mic_level; | |
117 } | |
118 | |
119 return new_volume; | |
120 } | |
121 | |
122 void WebRtcAudioDeviceImpl::OnSetFormat( | |
123 const media::AudioParameters& params) { | |
124 DVLOG(1) << "WebRtcAudioDeviceImpl::OnSetFormat()"; | |
125 } | |
126 | 53 |
127 void WebRtcAudioDeviceImpl::RenderData(media::AudioBus* audio_bus, | 54 void WebRtcAudioDeviceImpl::RenderData(media::AudioBus* audio_bus, |
128 int sample_rate, | 55 int sample_rate, |
129 int audio_delay_milliseconds, | 56 int audio_delay_milliseconds, |
130 base::TimeDelta* current_time) { | 57 base::TimeDelta* current_time) { |
131 render_buffer_.resize(audio_bus->frames() * audio_bus->channels()); | 58 render_buffer_.resize(audio_bus->frames() * audio_bus->channels()); |
132 | 59 |
133 { | 60 { |
134 base::AutoLock auto_lock(lock_); | 61 base::AutoLock auto_lock(lock_); |
135 DCHECK(audio_transport_callback_); | 62 DCHECK(audio_transport_callback_); |
136 // Store the reported audio delay locally. | 63 // Store the reported audio delay locally. |
137 output_delay_ms_ = audio_delay_milliseconds; | 64 output_delay_ms_ = audio_delay_milliseconds; |
138 } | 65 } |
139 | 66 |
140 int frames_per_10_ms = (sample_rate / 100); | 67 int frames_per_10_ms = (sample_rate / 100); |
141 int bytes_per_sample = sizeof(render_buffer_[0]); | 68 int bytes_per_sample = sizeof(render_buffer_[0]); |
142 const int bytes_per_10_ms = | 69 const int bytes_per_10_ms = |
143 audio_bus->channels() * frames_per_10_ms * bytes_per_sample; | 70 audio_bus->channels() * frames_per_10_ms * bytes_per_sample; |
144 DCHECK_EQ(audio_bus->frames() % frames_per_10_ms, 0); | 71 DCHECK_EQ(audio_bus->frames() % frames_per_10_ms, 0); |
145 | 72 |
146 // Get audio frames in blocks of 10 milliseconds from the registered | 73 // Get audio frames in blocks of 10 milliseconds from the registered |
147 // webrtc::AudioTransport source. Keep reading until our internal buffer | 74 // webrtc::AudioTransport source. Keep reading until our internal buffer |
148 // is full. | 75 // is full. |
149 uint32_t num_audio_frames = 0; | |
150 int accumulated_audio_frames = 0; | 76 int accumulated_audio_frames = 0; |
151 int16* audio_data = &render_buffer_[0]; | 77 int16* audio_data = &render_buffer_[0]; |
152 while (accumulated_audio_frames < audio_bus->frames()) { | 78 while (accumulated_audio_frames < audio_bus->frames()) { |
153 // Get 10ms and append output to temporary byte buffer. | 79 // Get 10ms and append output to temporary byte buffer. |
154 int64_t elapsed_time_ms = -1; | 80 int64_t elapsed_time_ms = -1; |
155 int64_t ntp_time_ms = -1; | 81 int64_t ntp_time_ms = -1; |
156 if (is_audio_track_processing_enabled_) { | 82 static const int kBitsPerByte = 8; |
157 // When audio processing is enabled in the audio track, we use | 83 audio_transport_callback_->PullRenderData(bytes_per_sample * kBitsPerByte, |
158 // PullRenderData() instead of NeedMorePlayData() to avoid passing the | 84 sample_rate, |
159 // render data to the APM in WebRTC as reference signal for echo | 85 audio_bus->channels(), |
160 // cancellation. | 86 frames_per_10_ms, |
161 static const int kBitsPerByte = 8; | 87 audio_data, |
162 audio_transport_callback_->PullRenderData(bytes_per_sample * kBitsPerByte, | 88 &elapsed_time_ms, |
163 sample_rate, | 89 &ntp_time_ms); |
164 audio_bus->channels(), | 90 accumulated_audio_frames += frames_per_10_ms; |
165 frames_per_10_ms, | |
166 audio_data, | |
167 &elapsed_time_ms, | |
168 &ntp_time_ms); | |
169 accumulated_audio_frames += frames_per_10_ms; | |
170 } else { | |
171 // TODO(xians): Remove the following code after the APM in WebRTC is | |
172 // deprecated. | |
173 audio_transport_callback_->NeedMorePlayData(frames_per_10_ms, | |
174 bytes_per_sample, | |
175 audio_bus->channels(), | |
176 sample_rate, | |
177 audio_data, | |
178 num_audio_frames, | |
179 &elapsed_time_ms, | |
180 &ntp_time_ms); | |
181 accumulated_audio_frames += num_audio_frames; | |
182 } | |
183 if (elapsed_time_ms >= 0) { | 91 if (elapsed_time_ms >= 0) { |
184 *current_time = base::TimeDelta::FromMilliseconds(elapsed_time_ms); | 92 *current_time = base::TimeDelta::FromMilliseconds(elapsed_time_ms); |
185 } | 93 } |
186 audio_data += bytes_per_10_ms; | 94 audio_data += bytes_per_10_ms; |
187 } | 95 } |
188 | 96 |
189 // De-interleave each channel and convert to 32-bit floating-point | 97 // De-interleave each channel and convert to 32-bit floating-point |
190 // with nominal range -1.0 -> +1.0 to match the callback format. | 98 // with nominal range -1.0 -> +1.0 to match the callback format. |
191 audio_bus->FromInterleaved(&render_buffer_[0], | 99 audio_bus->FromInterleaved(&render_buffer_[0], |
192 audio_bus->frames(), | 100 audio_bus->frames(), |
(...skipping 225 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
418 return 0; | 326 return 0; |
419 } | 327 } |
420 | 328 |
421 int32_t WebRtcAudioDeviceImpl::PlayoutDelay(uint16_t* delay_ms) const { | 329 int32_t WebRtcAudioDeviceImpl::PlayoutDelay(uint16_t* delay_ms) const { |
422 base::AutoLock auto_lock(lock_); | 330 base::AutoLock auto_lock(lock_); |
423 *delay_ms = static_cast<uint16_t>(output_delay_ms_); | 331 *delay_ms = static_cast<uint16_t>(output_delay_ms_); |
424 return 0; | 332 return 0; |
425 } | 333 } |
426 | 334 |
427 int32_t WebRtcAudioDeviceImpl::RecordingDelay(uint16_t* delay_ms) const { | 335 int32_t WebRtcAudioDeviceImpl::RecordingDelay(uint16_t* delay_ms) const { |
428 base::AutoLock auto_lock(lock_); | 336 // There is no way to report a correct delay value to WebRTC since there |
429 *delay_ms = static_cast<uint16_t>(input_delay_ms_); | 337 // might be multiple WebRtcAudioCapturer instances. |
430 return 0; | 338 NOTREACHED(); |
| 339 return -1; |
431 } | 340 } |
432 | 341 |
433 int32_t WebRtcAudioDeviceImpl::RecordingSampleRate( | 342 int32_t WebRtcAudioDeviceImpl::RecordingSampleRate( |
434 uint32_t* sample_rate) const { | 343 uint32_t* sample_rate) const { |
435 // We use the default capturer as the recording sample rate. | 344 // We use the default capturer as the recording sample rate. |
436 scoped_refptr<WebRtcAudioCapturer> capturer(GetDefaultCapturer()); | 345 scoped_refptr<WebRtcAudioCapturer> capturer(GetDefaultCapturer()); |
437 if (!capturer.get()) | 346 if (!capturer.get()) |
438 return -1; | 347 return -1; |
439 | 348 |
440 *sample_rate = static_cast<uint32_t>( | 349 *sample_rate = static_cast<uint32_t>( |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
520 // If there is no capturer or there are more than one open capture devices, | 429 // If there is no capturer or there are more than one open capture devices, |
521 // return false. | 430 // return false. |
522 if (capturers_.empty() || capturers_.size() > 1) | 431 if (capturers_.empty() || capturers_.size() > 1) |
523 return false; | 432 return false; |
524 | 433 |
525 return GetDefaultCapturer()->GetPairedOutputParameters( | 434 return GetDefaultCapturer()->GetPairedOutputParameters( |
526 session_id, output_sample_rate, output_frames_per_buffer); | 435 session_id, output_sample_rate, output_frames_per_buffer); |
527 } | 436 } |
528 | 437 |
529 } // namespace content | 438 } // namespace content |
OLD | NEW |