OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "content/renderer/media/webrtc_audio_device_impl.h" | 5 #include "content/renderer/media/webrtc_audio_device_impl.h" |
6 | 6 |
7 #include "base/bind.h" | 7 #include "base/bind.h" |
8 #include "base/metrics/histogram.h" | 8 #include "base/metrics/histogram.h" |
9 #include "base/strings/string_util.h" | 9 #include "base/strings/string_util.h" |
10 #include "base/win/windows_version.h" | 10 #include "base/win/windows_version.h" |
11 #include "content/renderer/media/media_stream_audio_processor.h" | 11 #include "content/renderer/media/media_stream_audio_processor.h" |
12 #include "content/renderer/media/webrtc_audio_capturer.h" | 12 #include "content/renderer/media/webrtc_audio_capturer.h" |
13 #include "content/renderer/media/webrtc_audio_renderer.h" | 13 #include "content/renderer/media/webrtc_audio_renderer.h" |
14 #include "content/renderer/render_thread_impl.h" | 14 #include "content/renderer/render_thread_impl.h" |
15 #include "media/audio/audio_parameters.h" | 15 #include "media/audio/audio_parameters.h" |
16 #include "media/audio/sample_rates.h" | 16 #include "media/audio/sample_rates.h" |
17 | 17 |
18 using media::AudioParameters; | 18 using media::AudioParameters; |
19 using media::ChannelLayout; | 19 using media::ChannelLayout; |
20 | 20 |
21 namespace content { | 21 namespace content { |
22 | 22 |
23 WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl() | 23 WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl() |
24 : ref_count_(0), | 24 : ref_count_(0), |
25 audio_transport_callback_(NULL), | 25 audio_transport_callback_(NULL), |
26 input_delay_ms_(0), | |
27 output_delay_ms_(0), | 26 output_delay_ms_(0), |
28 initialized_(false), | 27 initialized_(false), |
29 playing_(false), | 28 playing_(false), |
30 recording_(false), | 29 recording_(false), |
31 microphone_volume_(0), | 30 microphone_volume_(0) { |
32 is_audio_track_processing_enabled_( | |
33 MediaStreamAudioProcessor::IsAudioTrackProcessingEnabled()) { | |
34 DVLOG(1) << "WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()"; | 31 DVLOG(1) << "WebRtcAudioDeviceImpl::WebRtcAudioDeviceImpl()"; |
35 // This object can be constructed on either the signaling thread or the main | 32 // This object can be constructed on either the signaling thread or the main |
36 // thread, so we need to detach these thread checkers here and have them | 33 // thread, so we need to detach these thread checkers here and have them |
37 // initialize automatically when the first methods are called. | 34 // initialize automatically when the first methods are called. |
38 signaling_thread_checker_.DetachFromThread(); | 35 signaling_thread_checker_.DetachFromThread(); |
39 main_thread_checker_.DetachFromThread(); | 36 main_thread_checker_.DetachFromThread(); |
40 | 37 |
41 worker_thread_checker_.DetachFromThread(); | 38 worker_thread_checker_.DetachFromThread(); |
42 } | 39 } |
43 | 40 |
(...skipping 12 matching lines...) Expand all Loading... |
56 int32_t WebRtcAudioDeviceImpl::Release() { | 53 int32_t WebRtcAudioDeviceImpl::Release() { |
57 // We can be AddRefed and released on both the UI thread as well as | 54 // We can be AddRefed and released on both the UI thread as well as |
58 // libjingle's signaling thread. | 55 // libjingle's signaling thread. |
59 int ret = base::subtle::Barrier_AtomicIncrement(&ref_count_, -1); | 56 int ret = base::subtle::Barrier_AtomicIncrement(&ref_count_, -1); |
60 if (ret == 0) { | 57 if (ret == 0) { |
61 delete this; | 58 delete this; |
62 } | 59 } |
63 return ret; | 60 return ret; |
64 } | 61 } |
65 | 62 |
66 int WebRtcAudioDeviceImpl::OnData(const int16* audio_data, | |
67 int sample_rate, | |
68 int number_of_channels, | |
69 int number_of_frames, | |
70 const std::vector<int>& channels, | |
71 int audio_delay_milliseconds, | |
72 int current_volume, | |
73 bool need_audio_processing, | |
74 bool key_pressed) { | |
75 DCHECK(worker_thread_checker_.CalledOnValidThread()); | |
76 int total_delay_ms = 0; | |
77 { | |
78 base::AutoLock auto_lock(lock_); | |
79 // Return immediately when not recording or |channels| is empty. | |
80 // See crbug.com/274017: renderer crash dereferencing invalid channels[0]. | |
81 if (!recording_ || channels.empty()) | |
82 return 0; | |
83 | |
84 // Store the reported audio delay locally. | |
85 input_delay_ms_ = audio_delay_milliseconds; | |
86 total_delay_ms = input_delay_ms_ + output_delay_ms_; | |
87 DVLOG(2) << "total delay: " << input_delay_ms_ + output_delay_ms_; | |
88 } | |
89 | |
90 // Write audio frames in blocks of 10 milliseconds to the registered | |
91 // webrtc::AudioTransport sink. Keep writing until our internal byte | |
92 // buffer is empty. | |
93 const int16* audio_buffer = audio_data; | |
94 const int frames_per_10_ms = (sample_rate / 100); | |
95 CHECK_EQ(number_of_frames % frames_per_10_ms, 0); | |
96 int accumulated_audio_frames = 0; | |
97 uint32_t new_volume = 0; | |
98 | |
99 // The lock here is to protect a race in the resampler inside webrtc when | |
100 // there are more than one input stream calling OnData(), which can happen | |
101 // when the users setup two getUserMedia, one for the microphone, another | |
102 // for WebAudio. Currently we don't have a better way to fix it except for | |
103 // adding a lock here to sequence the call. | |
104 // TODO(xians): Remove this workaround after we move the | |
105 // webrtc::AudioProcessing module to Chrome. See http://crbug/264611 for | |
106 // details. | |
107 base::AutoLock auto_lock(capture_callback_lock_); | |
108 while (accumulated_audio_frames < number_of_frames) { | |
109 // Deliver 10ms of recorded 16-bit linear PCM audio. | |
110 int new_mic_level = audio_transport_callback_->OnDataAvailable( | |
111 &channels[0], | |
112 channels.size(), | |
113 audio_buffer, | |
114 sample_rate, | |
115 number_of_channels, | |
116 frames_per_10_ms, | |
117 total_delay_ms, | |
118 current_volume, | |
119 key_pressed, | |
120 need_audio_processing); | |
121 | |
122 accumulated_audio_frames += frames_per_10_ms; | |
123 audio_buffer += frames_per_10_ms * number_of_channels; | |
124 | |
125 // The latest non-zero new microphone level will be returned. | |
126 if (new_mic_level) | |
127 new_volume = new_mic_level; | |
128 } | |
129 | |
130 return new_volume; | |
131 } | |
132 | |
133 void WebRtcAudioDeviceImpl::OnSetFormat( | |
134 const media::AudioParameters& params) { | |
135 DVLOG(1) << "WebRtcAudioDeviceImpl::OnSetFormat()"; | |
136 } | |
137 | |
138 void WebRtcAudioDeviceImpl::RenderData(media::AudioBus* audio_bus, | 63 void WebRtcAudioDeviceImpl::RenderData(media::AudioBus* audio_bus, |
139 int sample_rate, | 64 int sample_rate, |
140 int audio_delay_milliseconds, | 65 int audio_delay_milliseconds, |
141 base::TimeDelta* current_time) { | 66 base::TimeDelta* current_time) { |
142 render_buffer_.resize(audio_bus->frames() * audio_bus->channels()); | 67 render_buffer_.resize(audio_bus->frames() * audio_bus->channels()); |
143 | 68 |
144 { | 69 { |
145 base::AutoLock auto_lock(lock_); | 70 base::AutoLock auto_lock(lock_); |
146 DCHECK(audio_transport_callback_); | 71 DCHECK(audio_transport_callback_); |
147 // Store the reported audio delay locally. | 72 // Store the reported audio delay locally. |
148 output_delay_ms_ = audio_delay_milliseconds; | 73 output_delay_ms_ = audio_delay_milliseconds; |
149 } | 74 } |
150 | 75 |
151 int frames_per_10_ms = (sample_rate / 100); | 76 int frames_per_10_ms = (sample_rate / 100); |
152 int bytes_per_sample = sizeof(render_buffer_[0]); | 77 int bytes_per_sample = sizeof(render_buffer_[0]); |
153 const int bytes_per_10_ms = | 78 const int bytes_per_10_ms = |
154 audio_bus->channels() * frames_per_10_ms * bytes_per_sample; | 79 audio_bus->channels() * frames_per_10_ms * bytes_per_sample; |
155 DCHECK_EQ(audio_bus->frames() % frames_per_10_ms, 0); | 80 DCHECK_EQ(audio_bus->frames() % frames_per_10_ms, 0); |
156 | 81 |
157 // Get audio frames in blocks of 10 milliseconds from the registered | 82 // Get audio frames in blocks of 10 milliseconds from the registered |
158 // webrtc::AudioTransport source. Keep reading until our internal buffer | 83 // webrtc::AudioTransport source. Keep reading until our internal buffer |
159 // is full. | 84 // is full. |
160 uint32_t num_audio_frames = 0; | |
161 int accumulated_audio_frames = 0; | 85 int accumulated_audio_frames = 0; |
162 int16* audio_data = &render_buffer_[0]; | 86 int16* audio_data = &render_buffer_[0]; |
163 while (accumulated_audio_frames < audio_bus->frames()) { | 87 while (accumulated_audio_frames < audio_bus->frames()) { |
164 // Get 10ms and append output to temporary byte buffer. | 88 // Get 10ms and append output to temporary byte buffer. |
165 int64_t elapsed_time_ms = -1; | 89 int64_t elapsed_time_ms = -1; |
166 int64_t ntp_time_ms = -1; | 90 int64_t ntp_time_ms = -1; |
167 if (is_audio_track_processing_enabled_) { | 91 static const int kBitsPerByte = 8; |
168 // When audio processing is enabled in the audio track, we use | 92 audio_transport_callback_->PullRenderData(bytes_per_sample * kBitsPerByte, |
169 // PullRenderData() instead of NeedMorePlayData() to avoid passing the | 93 sample_rate, |
170 // render data to the APM in WebRTC as reference signal for echo | 94 audio_bus->channels(), |
171 // cancellation. | 95 frames_per_10_ms, |
172 static const int kBitsPerByte = 8; | 96 audio_data, |
173 audio_transport_callback_->PullRenderData(bytes_per_sample * kBitsPerByte, | 97 &elapsed_time_ms, |
174 sample_rate, | 98 &ntp_time_ms); |
175 audio_bus->channels(), | 99 accumulated_audio_frames += frames_per_10_ms; |
176 frames_per_10_ms, | |
177 audio_data, | |
178 &elapsed_time_ms, | |
179 &ntp_time_ms); | |
180 accumulated_audio_frames += frames_per_10_ms; | |
181 } else { | |
182 // TODO(xians): Remove the following code after the APM in WebRTC is | |
183 // deprecated. | |
184 audio_transport_callback_->NeedMorePlayData(frames_per_10_ms, | |
185 bytes_per_sample, | |
186 audio_bus->channels(), | |
187 sample_rate, | |
188 audio_data, | |
189 num_audio_frames, | |
190 &elapsed_time_ms, | |
191 &ntp_time_ms); | |
192 accumulated_audio_frames += num_audio_frames; | |
193 } | |
194 if (elapsed_time_ms >= 0) { | 100 if (elapsed_time_ms >= 0) { |
195 *current_time = base::TimeDelta::FromMilliseconds(elapsed_time_ms); | 101 *current_time = base::TimeDelta::FromMilliseconds(elapsed_time_ms); |
196 } | 102 } |
197 audio_data += bytes_per_10_ms; | 103 audio_data += bytes_per_10_ms; |
198 } | 104 } |
199 | 105 |
200 // De-interleave each channel and convert to 32-bit floating-point | 106 // De-interleave each channel and convert to 32-bit floating-point |
201 // with nominal range -1.0 -> +1.0 to match the callback format. | 107 // with nominal range -1.0 -> +1.0 to match the callback format. |
202 audio_bus->FromInterleaved(&render_buffer_[0], | 108 audio_bus->FromInterleaved(&render_buffer_[0], |
203 audio_bus->frames(), | 109 audio_bus->frames(), |
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
453 | 359 |
454 int32_t WebRtcAudioDeviceImpl::PlayoutDelay(uint16_t* delay_ms) const { | 360 int32_t WebRtcAudioDeviceImpl::PlayoutDelay(uint16_t* delay_ms) const { |
455 DCHECK(worker_thread_checker_.CalledOnValidThread()); | 361 DCHECK(worker_thread_checker_.CalledOnValidThread()); |
456 base::AutoLock auto_lock(lock_); | 362 base::AutoLock auto_lock(lock_); |
457 *delay_ms = static_cast<uint16_t>(output_delay_ms_); | 363 *delay_ms = static_cast<uint16_t>(output_delay_ms_); |
458 return 0; | 364 return 0; |
459 } | 365 } |
460 | 366 |
461 int32_t WebRtcAudioDeviceImpl::RecordingDelay(uint16_t* delay_ms) const { | 367 int32_t WebRtcAudioDeviceImpl::RecordingDelay(uint16_t* delay_ms) const { |
462 DCHECK(signaling_thread_checker_.CalledOnValidThread()); | 368 DCHECK(signaling_thread_checker_.CalledOnValidThread()); |
463 base::AutoLock auto_lock(lock_); | 369 |
464 *delay_ms = static_cast<uint16_t>(input_delay_ms_); | 370 // There is no way to report a correct delay value to WebRTC since there |
465 return 0; | 371 // might be multiple WebRtcAudioCapturer instances. |
| 372 NOTREACHED(); |
| 373 return -1; |
466 } | 374 } |
467 | 375 |
468 int32_t WebRtcAudioDeviceImpl::RecordingSampleRate( | 376 int32_t WebRtcAudioDeviceImpl::RecordingSampleRate( |
469 uint32_t* sample_rate) const { | 377 uint32_t* sample_rate) const { |
470 DCHECK(signaling_thread_checker_.CalledOnValidThread()); | 378 DCHECK(signaling_thread_checker_.CalledOnValidThread()); |
471 // We use the default capturer as the recording sample rate. | 379 // We use the default capturer as the recording sample rate. |
472 scoped_refptr<WebRtcAudioCapturer> capturer(GetDefaultCapturer()); | 380 scoped_refptr<WebRtcAudioCapturer> capturer(GetDefaultCapturer()); |
473 if (!capturer.get()) | 381 if (!capturer.get()) |
474 return -1; | 382 return -1; |
475 | 383 |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
563 // If there is no capturer or there are more than one open capture devices, | 471 // If there is no capturer or there are more than one open capture devices, |
564 // return false. | 472 // return false. |
565 if (capturers_.size() != 1) | 473 if (capturers_.size() != 1) |
566 return false; | 474 return false; |
567 | 475 |
568 return capturers_.back()->GetPairedOutputParameters( | 476 return capturers_.back()->GetPairedOutputParameters( |
569 session_id, output_sample_rate, output_frames_per_buffer); | 477 session_id, output_sample_rate, output_frames_per_buffer); |
570 } | 478 } |
571 | 479 |
572 } // namespace content | 480 } // namespace content |
OLD | NEW |