Chromium Code Reviews| Index: webrtc/modules/audio_processing/audio_processing_impl.cc |
| diff --git a/webrtc/modules/audio_processing/audio_processing_impl.cc b/webrtc/modules/audio_processing/audio_processing_impl.cc |
| index 21629c53158a2fc800a735d4baf923a0e1c38003..1c7b55cb223b51a356cceb439a0e5a29d5b2f5af 100644 |
| --- a/webrtc/modules/audio_processing/audio_processing_impl.cc |
| +++ b/webrtc/modules/audio_processing/audio_processing_impl.cc |
| @@ -37,7 +37,6 @@ extern "C" { |
| #include "webrtc/modules/audio_processing/transient/transient_suppressor.h" |
| #include "webrtc/modules/audio_processing/voice_detection_impl.h" |
| #include "webrtc/modules/include/module_common_types.h" |
| -#include "webrtc/system_wrappers/include/critical_section_wrapper.h" |
| #include "webrtc/system_wrappers/include/file_wrapper.h" |
| #include "webrtc/system_wrappers/include/logging.h" |
| #include "webrtc/system_wrappers/include/metrics.h" |
| @@ -75,9 +74,41 @@ static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) { |
| assert(false); |
| return false; |
| } |
| - |
| } // namespace |
| +struct ApmPublicSubmodules { |
| + ApmPublicSubmodules() |
| + : echo_cancellation(NULL), |
| + echo_control_mobile(NULL), |
| + gain_control(NULL), |
| + high_pass_filter(NULL), |
| + level_estimator(NULL), |
| + noise_suppression(NULL), |
| + voice_detection(NULL) {} |
|
kwiberg-webrtc
2015/11/23 22:15:10
nullptr
peah-webrtc
2015/11/24 21:42:23
Agree, and in the spirit of a boyscout I did a sea
|
| + // Accessed externally of APM without any lock acquired. |
| + EchoCancellationImpl* echo_cancellation; |
| + EchoControlMobileImpl* echo_control_mobile; |
| + GainControlImpl* gain_control; |
| + HighPassFilterImpl* high_pass_filter; |
| + LevelEstimatorImpl* level_estimator; |
| + NoiseSuppressionImpl* noise_suppression; |
| + VoiceDetectionImpl* voice_detection; |
| + rtc::scoped_ptr<GainControlForNewAgc> gain_control_for_new_agc; |
| + |
| + // Accessed internally from both render and capture. |
| + rtc::scoped_ptr<TransientSuppressor> transient_suppressor; |
| + rtc::scoped_ptr<IntelligibilityEnhancer> intelligibility_enhancer; |
| +}; |
| + |
| +struct ApmPrivateSubmodules { |
| + explicit ApmPrivateSubmodules(Beamformer<float>* beamformer) |
| + : beamformer(beamformer) {} |
| + // Accessed internally from capture or during initialization |
| + std::list<ProcessingComponent*> component_list; |
| + rtc::scoped_ptr<Beamformer<float>> beamformer; |
| + rtc::scoped_ptr<AgcManagerDirect> agc_manager; |
| +}; |
| + |
| // Throughout webrtc, it's assumed that success is represented by zero. |
| static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero"); |
| @@ -183,106 +214,100 @@ AudioProcessingImpl::AudioProcessingImpl(const Config& config) |
| AudioProcessingImpl::AudioProcessingImpl(const Config& config, |
| Beamformer<float>* beamformer) |
| - : echo_cancellation_(NULL), |
| - echo_control_mobile_(NULL), |
| - gain_control_(NULL), |
| - high_pass_filter_(NULL), |
| - level_estimator_(NULL), |
| - noise_suppression_(NULL), |
| - voice_detection_(NULL), |
| - crit_(CriticalSectionWrapper::CreateCriticalSection()), |
| -#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| - debug_file_(FileWrapper::Create()), |
| - event_msg_(new audioproc::Event()), |
| -#endif |
| - fwd_proc_format_(kSampleRate16kHz), |
| - rev_proc_format_(kSampleRate16kHz, 1), |
| - split_rate_(kSampleRate16kHz), |
| - stream_delay_ms_(0), |
| - delay_offset_ms_(0), |
| - was_stream_delay_set_(false), |
| - last_stream_delay_ms_(0), |
| - last_aec_system_delay_ms_(0), |
| - stream_delay_jumps_(-1), |
| - aec_system_delay_jumps_(-1), |
| - output_will_be_muted_(false), |
| - key_pressed_(false), |
| + : public_submodules_(new ApmPublicSubmodules()), |
| + private_submodules_(new ApmPrivateSubmodules(beamformer)), |
| #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
| - use_new_agc_(false), |
| + constants_(config.Get<ExperimentalAgc>().startup_min_volume, |
| + config.Get<Beamforming>().array_geometry, |
| + config.Get<Beamforming>().target_direction, |
| + false, |
| + config.Get<Intelligibility>().enabled, |
| + config.Get<Beamforming>().enabled), |
| #else |
| - use_new_agc_(config.Get<ExperimentalAgc>().enabled), |
| + constants_(config.Get<ExperimentalAgc>().startup_min_volume, |
| + config.Get<Beamforming>().array_geometry, |
| + config.Get<Beamforming>().target_direction, |
| + config.Get<ExperimentalAgc>().enabled, |
| + config.Get<Intelligibility>().enabled, |
| + config.Get<Beamforming>().enabled), |
| #endif |
|
kwiberg-webrtc
2015/11/23 22:15:10
You can shrink the ifdef region to one line, right
peah-webrtc
2015/11/24 21:42:23
Done.
|
| - agc_startup_min_volume_(config.Get<ExperimentalAgc>().startup_min_volume), |
| + |
| #if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS) |
| - transient_suppressor_enabled_(false), |
| + capture_(false) |
| #else |
| - transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled), |
| + capture_(config.Get<ExperimentalNs>().enabled) |
| #endif |
| - beamformer_enabled_(config.Get<Beamforming>().enabled), |
| - beamformer_(beamformer), |
| - array_geometry_(config.Get<Beamforming>().array_geometry), |
| - target_direction_(config.Get<Beamforming>().target_direction), |
| - intelligibility_enabled_(config.Get<Intelligibility>().enabled) { |
| +{ |
| render_thread_checker_.DetachFromThread(); |
| signal_thread_checker_.DetachFromThread(); |
| capture_thread_checker_.DetachFromThread(); |
| - echo_cancellation_ = |
| - new EchoCancellationImpl(this, crit_, &render_thread_checker_); |
| - component_list_.push_back(echo_cancellation_); |
| - |
| - echo_control_mobile_ = |
| - new EchoControlMobileImpl(this, crit_, &render_thread_checker_); |
| - component_list_.push_back(echo_control_mobile_); |
| - |
| - gain_control_ = new GainControlImpl(this, crit_, &render_thread_checker_, |
| - &capture_thread_checker_); |
| - component_list_.push_back(gain_control_); |
| - |
| - high_pass_filter_ = new HighPassFilterImpl(this, crit_); |
| - component_list_.push_back(high_pass_filter_); |
| - |
| - level_estimator_ = new LevelEstimatorImpl(this, crit_); |
| - component_list_.push_back(level_estimator_); |
| - |
| - noise_suppression_ = new NoiseSuppressionImpl(this, crit_); |
| - component_list_.push_back(noise_suppression_); |
| - |
| - voice_detection_ = new VoiceDetectionImpl(this, crit_); |
| - component_list_.push_back(voice_detection_); |
| - |
| - gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_)); |
| + { |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| + |
| + public_submodules_->echo_cancellation = new EchoCancellationImpl( |
| + this, &crit_render_, &crit_capture_, &render_thread_checker_); |
| + public_submodules_->echo_control_mobile = new EchoControlMobileImpl( |
| + this, &crit_render_, &crit_capture_, &render_thread_checker_); |
| + public_submodules_->gain_control = |
| + new GainControlImpl(this, &crit_capture_, &crit_capture_, |
| + &render_thread_checker_, &capture_thread_checker_); |
| + public_submodules_->high_pass_filter = |
| + new HighPassFilterImpl(this, &crit_capture_); |
| + public_submodules_->level_estimator = new LevelEstimatorImpl(this); |
| + public_submodules_->noise_suppression = |
| + new NoiseSuppressionImpl(this, &crit_capture_); |
| + public_submodules_->voice_detection = |
| + new VoiceDetectionImpl(this, &crit_capture_); |
| + public_submodules_->gain_control_for_new_agc.reset( |
| + new GainControlForNewAgc(public_submodules_->gain_control)); |
| + |
| + private_submodules_->component_list.push_back( |
| + public_submodules_->echo_cancellation); |
| + private_submodules_->component_list.push_back( |
| + public_submodules_->echo_control_mobile); |
| + private_submodules_->component_list.push_back( |
| + public_submodules_->gain_control); |
| + private_submodules_->component_list.push_back( |
| + public_submodules_->high_pass_filter); |
| + private_submodules_->component_list.push_back( |
| + public_submodules_->level_estimator); |
| + private_submodules_->component_list.push_back( |
| + public_submodules_->noise_suppression); |
| + private_submodules_->component_list.push_back( |
| + public_submodules_->voice_detection); |
| + } |
| SetExtraOptions(config); |
| } |
| AudioProcessingImpl::~AudioProcessingImpl() { |
| - { |
| - CriticalSectionScoped crit_scoped(crit_); |
| - // Depends on gain_control_ and gain_control_for_new_agc_. |
| - agc_manager_.reset(); |
| - // Depends on gain_control_. |
| - gain_control_for_new_agc_.reset(); |
| - while (!component_list_.empty()) { |
| - ProcessingComponent* component = component_list_.front(); |
| - component->Destroy(); |
| - delete component; |
| - component_list_.pop_front(); |
| - } |
| + // Depends on gain_control_ and |
| + // public_submodules_->gain_control_for_new_agc. |
| + private_submodules_->agc_manager.reset(); |
| + // Depends on gain_control_. |
| + public_submodules_->gain_control_for_new_agc.reset(); |
| + while (!private_submodules_->component_list.empty()) { |
| + ProcessingComponent* component = |
| + private_submodules_->component_list.front(); |
| + component->Destroy(); |
| + delete component; |
| + private_submodules_->component_list.pop_front(); |
| + } |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| - if (debug_file_->Open()) { |
| - debug_file_->CloseFile(); |
| - } |
| -#endif |
| + if (debug_dump_.debug_file->Open()) { |
| + debug_dump_.debug_file->CloseFile(); |
| } |
| - delete crit_; |
| - crit_ = NULL; |
| +#endif |
| } |
| int AudioProcessingImpl::Initialize() { |
| + // Run in a single-threaded manner during initialization. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| - CriticalSectionScoped crit_scoped(crit_); |
| return InitializeLocked(); |
| } |
| @@ -292,6 +317,9 @@ int AudioProcessingImpl::Initialize(int input_sample_rate_hz, |
| ChannelLayout input_layout, |
| ChannelLayout output_layout, |
| ChannelLayout reverse_layout) { |
| + // Run in a single-threaded manner during initialization. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| const ProcessingConfig processing_config = { |
| {{input_sample_rate_hz, |
| @@ -311,58 +339,64 @@ int AudioProcessingImpl::Initialize(int input_sample_rate_hz, |
| } |
| int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) { |
| + // Run in a single-threaded manner during initialization. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| - CriticalSectionScoped crit_scoped(crit_); |
| return InitializeLocked(processing_config); |
| } |
| // Calls InitializeLocked() if any of the audio parameters have changed from |
| -// their current values. |
| -int AudioProcessingImpl::MaybeInitializeLocked( |
| +// their current values (needs to be called while holding the crit_render_lock). |
| +int AudioProcessingImpl::MaybeInitialize( |
| const ProcessingConfig& processing_config) { |
| // Called from both threads. Thread check is therefore not possible. |
| - if (processing_config == shared_state_.api_format_) { |
| + if (processing_config == formats_.api_format) { |
| return kNoError; |
| } |
| + |
| + rtc::CritScope cs_capture(&crit_capture_); |
| return InitializeLocked(processing_config); |
| } |
| int AudioProcessingImpl::InitializeLocked() { |
| const int fwd_audio_buffer_channels = |
| - beamformer_enabled_ |
| - ? shared_state_.api_format_.input_stream().num_channels() |
| - : shared_state_.api_format_.output_stream().num_channels(); |
| + constants_.beamformer_enabled |
| + ? formats_.api_format.input_stream().num_channels() |
| + : formats_.api_format.output_stream().num_channels(); |
| const int rev_audio_buffer_out_num_frames = |
| - shared_state_.api_format_.reverse_output_stream().num_frames() == 0 |
| - ? rev_proc_format_.num_frames() |
| - : shared_state_.api_format_.reverse_output_stream().num_frames(); |
| - if (shared_state_.api_format_.reverse_input_stream().num_channels() > 0) { |
| - render_audio_.reset(new AudioBuffer( |
| - shared_state_.api_format_.reverse_input_stream().num_frames(), |
| - shared_state_.api_format_.reverse_input_stream().num_channels(), |
| - rev_proc_format_.num_frames(), rev_proc_format_.num_channels(), |
| + formats_.api_format.reverse_output_stream().num_frames() == 0 |
| + ? formats_.rev_proc_format.num_frames() |
| + : formats_.api_format.reverse_output_stream().num_frames(); |
| + if (formats_.api_format.reverse_input_stream().num_channels() > 0) { |
| + render_.render_audio.reset(new AudioBuffer( |
| + formats_.api_format.reverse_input_stream().num_frames(), |
| + formats_.api_format.reverse_input_stream().num_channels(), |
| + formats_.rev_proc_format.num_frames(), |
| + formats_.rev_proc_format.num_channels(), |
| rev_audio_buffer_out_num_frames)); |
| if (rev_conversion_needed()) { |
| - render_converter_ = AudioConverter::Create( |
| - shared_state_.api_format_.reverse_input_stream().num_channels(), |
| - shared_state_.api_format_.reverse_input_stream().num_frames(), |
| - shared_state_.api_format_.reverse_output_stream().num_channels(), |
| - shared_state_.api_format_.reverse_output_stream().num_frames()); |
| + render_.render_converter = AudioConverter::Create( |
| + formats_.api_format.reverse_input_stream().num_channels(), |
| + formats_.api_format.reverse_input_stream().num_frames(), |
| + formats_.api_format.reverse_output_stream().num_channels(), |
| + formats_.api_format.reverse_output_stream().num_frames()); |
| } else { |
| - render_converter_.reset(nullptr); |
| + render_.render_converter.reset(nullptr); |
| } |
| } else { |
| - render_audio_.reset(nullptr); |
| - render_converter_.reset(nullptr); |
| + render_.render_audio.reset(nullptr); |
| + render_.render_converter.reset(nullptr); |
| } |
| - capture_audio_.reset( |
| - new AudioBuffer(shared_state_.api_format_.input_stream().num_frames(), |
| - shared_state_.api_format_.input_stream().num_channels(), |
| - fwd_proc_format_.num_frames(), fwd_audio_buffer_channels, |
| - shared_state_.api_format_.output_stream().num_frames())); |
| + capture_.capture_audio.reset( |
| + new AudioBuffer(formats_.api_format.input_stream().num_frames(), |
| + formats_.api_format.input_stream().num_channels(), |
| + capture_nonlocked_.fwd_proc_format.num_frames(), |
| + fwd_audio_buffer_channels, |
| + formats_.api_format.output_stream().num_frames())); |
| // Initialize all components. |
| - for (auto item : component_list_) { |
| + for (auto item : private_submodules_->component_list) { |
| int err = item->Initialize(); |
| if (err != kNoError) { |
| return err; |
| @@ -378,7 +412,7 @@ int AudioProcessingImpl::InitializeLocked() { |
| InitializeIntelligibility(); |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| - if (debug_file_->Open()) { |
| + if (debug_dump_.debug_file->Open()) { |
| int err = WriteInitMessage(); |
| if (err != kNoError) { |
| return err; |
| @@ -411,18 +445,18 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
| return kBadNumberChannelsError; |
| } |
| - if (beamformer_enabled_ && |
| - (static_cast<size_t>(num_in_channels) != array_geometry_.size() || |
| - num_out_channels > 1)) { |
| + if (constants_.beamformer_enabled && (static_cast<size_t>(num_in_channels) != |
| + constants_.array_geometry.size() || |
| + num_out_channels > 1)) { |
| return kBadNumberChannelsError; |
| } |
| - shared_state_.api_format_ = config; |
| + formats_.api_format = config; |
| // We process at the closest native rate >= min(input rate, output rate)... |
| const int min_proc_rate = |
| - std::min(shared_state_.api_format_.input_stream().sample_rate_hz(), |
| - shared_state_.api_format_.output_stream().sample_rate_hz()); |
| + std::min(formats_.api_format.input_stream().sample_rate_hz(), |
| + formats_.api_format.output_stream().sample_rate_hz()); |
| int fwd_proc_rate; |
| for (size_t i = 0; i < kNumNativeSampleRates; ++i) { |
| fwd_proc_rate = kNativeSampleRatesHz[i]; |
| @@ -431,20 +465,20 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
| } |
| } |
| // ...with one exception. |
| - if (echo_control_mobile_->is_enabled() && |
| + if (public_submodules_->echo_control_mobile->is_enabled() && |
| min_proc_rate > kMaxAECMSampleRateHz) { |
| fwd_proc_rate = kMaxAECMSampleRateHz; |
| } |
| - fwd_proc_format_ = StreamConfig(fwd_proc_rate); |
| + capture_nonlocked_.fwd_proc_format = StreamConfig(fwd_proc_rate); |
| // We normally process the reverse stream at 16 kHz. Unless... |
| int rev_proc_rate = kSampleRate16kHz; |
| - if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) { |
| + if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate8kHz) { |
| // ...the forward stream is at 8 kHz. |
| rev_proc_rate = kSampleRate8kHz; |
| } else { |
| - if (shared_state_.api_format_.reverse_input_stream().sample_rate_hz() == |
| + if (formats_.api_format.reverse_input_stream().sample_rate_hz() == |
| kSampleRate32kHz) { |
| // ...or the input is at 32 kHz, in which case we use the splitting |
| // filter rather than the resampler. |
| @@ -454,63 +488,73 @@ int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) { |
| // Always downmix the reverse stream to mono for analysis. This has been |
| // demonstrated to work well for AEC in most practical scenarios. |
| - rev_proc_format_ = StreamConfig(rev_proc_rate, 1); |
| + formats_.rev_proc_format = StreamConfig(rev_proc_rate, 1); |
| - if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || |
| - fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { |
| - split_rate_ = kSampleRate16kHz; |
| + if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate32kHz || |
| + capture_nonlocked_.fwd_proc_format.sample_rate_hz() == kSampleRate48kHz) { |
| + capture_nonlocked_.split_rate = kSampleRate16kHz; |
| } else { |
| - split_rate_ = fwd_proc_format_.sample_rate_hz(); |
| + capture_nonlocked_.split_rate = |
| + capture_nonlocked_.fwd_proc_format.sample_rate_hz(); |
| } |
| return InitializeLocked(); |
| } |
| void AudioProcessingImpl::SetExtraOptions(const Config& config) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| + // Run in a single-threaded manner when setting the extra options. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| - for (auto item : component_list_) { |
| + for (auto item : private_submodules_->component_list) { |
| item->SetExtraOptions(config); |
| } |
| - if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) { |
| - transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled; |
| + if (capture_.transient_suppressor_enabled != |
| + config.Get<ExperimentalNs>().enabled) { |
| + capture_.transient_suppressor_enabled = |
| + config.Get<ExperimentalNs>().enabled; |
| InitializeTransient(); |
| } |
| } |
| int AudioProcessingImpl::proc_sample_rate_hz() const { |
| + // Only called from submodules beneath APM, hence locking is not needed. |
| // TODO(peah): Add threadchecker when possible. |
| - return fwd_proc_format_.sample_rate_hz(); |
| + return capture_nonlocked_.fwd_proc_format.sample_rate_hz(); |
| } |
| int AudioProcessingImpl::proc_split_sample_rate_hz() const { |
| + // Only called from submodules/tests beneath APM, hence locking is not needed. |
| // TODO(peah): Add threadchecker when possible. |
| - return split_rate_; |
| + return capture_nonlocked_.split_rate; |
| } |
| int AudioProcessingImpl::num_reverse_channels() const { |
| - // TODO(peah): Add threadchecker when possible. |
| - return rev_proc_format_.num_channels(); |
| + // Only called from submodules/tests beneath APM, hence locking is not needed. |
| + return formats_.rev_proc_format.num_channels(); |
| } |
| int AudioProcessingImpl::num_input_channels() const { |
| + // Only called from submodules/tests beneath APM, hence locking is not needed. |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| - return shared_state_.api_format_.input_stream().num_channels(); |
| + return formats_.api_format.input_stream().num_channels(); |
| } |
| int AudioProcessingImpl::num_output_channels() const { |
| + // Only called from submodules/tests beneath APM, hence locking is not needed. |
| // TODO(peah): Add appropriate thread checker when possible. |
| - return shared_state_.api_format_.output_stream().num_channels(); |
| + return formats_.api_format.output_stream().num_channels(); |
| } |
| void AudioProcessingImpl::set_output_will_be_muted(bool muted) { |
| - CriticalSectionScoped lock(crit_); |
| + rtc::CritScope cs(&crit_capture_); |
| RTC_DCHECK(signal_thread_checker_.CalledOnValidThread()); |
| - output_will_be_muted_ = muted; |
| - if (agc_manager_.get()) { |
| - agc_manager_->SetCaptureMuted(output_will_be_muted_); |
| + capture_.output_will_be_muted = muted; |
| + if (private_submodules_->agc_manager.get()) { |
| + private_submodules_->agc_manager->SetCaptureMuted( |
| + capture_.output_will_be_muted); |
| } |
| } |
| @@ -522,14 +566,21 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
| int output_sample_rate_hz, |
| ChannelLayout output_layout, |
| float* const* dest) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| - StreamConfig input_stream = shared_state_.api_format_.input_stream(); |
| + StreamConfig input_stream; |
| + StreamConfig output_stream; |
| + { |
| + // Access the formats_.api_format.input_stream beneath the capture lock. |
| + // The lock must be released as it is later required in the call |
| + // to ProcessStream(,,,); |
|
kwiberg-webrtc
2015/11/23 22:15:10
Are the locks reentrant or not? Above in AudioProc
peah-webrtc
2015/11/24 21:42:23
Great find!
I think they are probably not reentra
kwiberg-webrtc
2015/11/25 10:17:14
(As we later found out, the locks are in fact reen
|
| + rtc::CritScope cs(&crit_capture_); |
| + input_stream = formats_.api_format.input_stream(); |
| + output_stream = formats_.api_format.output_stream(); |
| + } |
| + |
| input_stream.set_sample_rate_hz(input_sample_rate_hz); |
| input_stream.set_num_channels(ChannelsFromLayout(input_layout)); |
| input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout)); |
| - |
| - StreamConfig output_stream = shared_state_.api_format_.output_stream(); |
| output_stream.set_sample_rate_hz(output_sample_rate_hz); |
| output_stream.set_num_channels(ChannelsFromLayout(output_layout)); |
| output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout)); |
| @@ -544,51 +595,60 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
| const StreamConfig& input_config, |
| const StreamConfig& output_config, |
| float* const* dest) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| + { |
| + // Acquire the capture lock in order to safely call the function |
| + // that retrieves the render side data. This function accesses apm |
| + // getters that need the capture lock held when being called. |
| + rtc::CritScope cs_capture(&crit_capture_); |
| + public_submodules_->echo_cancellation->ReadQueuedRenderData(); |
| + public_submodules_->echo_control_mobile->ReadQueuedRenderData(); |
| + public_submodules_->gain_control->ReadQueuedRenderData(); |
| + } |
| if (!src || !dest) { |
| return kNullPointerError; |
| } |
| - echo_cancellation_->ReadQueuedRenderData(); |
| - echo_control_mobile_->ReadQueuedRenderData(); |
| - gain_control_->ReadQueuedRenderData(); |
| - |
| - ProcessingConfig processing_config = shared_state_.api_format_; |
| + ProcessingConfig processing_config = formats_.api_format; |
| processing_config.input_stream() = input_config; |
| processing_config.output_stream() = output_config; |
| - RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| + { |
| + // Do conditional reinitialization. |
| + rtc::CritScope cs_render(&crit_render_); |
| + RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| + } |
| + rtc::CritScope cs_capture(&crit_capture_); |
| + |
| assert(processing_config.input_stream().num_frames() == |
| - shared_state_.api_format_.input_stream().num_frames()); |
| + formats_.api_format.input_stream().num_frames()); |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| - if (debug_file_->Open()) { |
| + if (debug_dump_.debug_file->Open()) { |
| RETURN_ON_ERR(WriteConfigMessage(false)); |
| - event_msg_->set_type(audioproc::Event::STREAM); |
| - audioproc::Stream* msg = event_msg_->mutable_stream(); |
| + debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| + audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| const size_t channel_size = |
| - sizeof(float) * shared_state_.api_format_.input_stream().num_frames(); |
| - for (int i = 0; i < shared_state_.api_format_.input_stream().num_channels(); |
| - ++i) |
| + sizeof(float) * formats_.api_format.input_stream().num_frames(); |
| + for (int i = 0; i < formats_.api_format.input_stream().num_channels(); ++i) |
| msg->add_input_channel(src[i], channel_size); |
| } |
| #endif |
| - capture_audio_->CopyFrom(src, shared_state_.api_format_.input_stream()); |
| + capture_.capture_audio->CopyFrom(src, formats_.api_format.input_stream()); |
| RETURN_ON_ERR(ProcessStreamLocked()); |
| - capture_audio_->CopyTo(shared_state_.api_format_.output_stream(), dest); |
| + capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| - if (debug_file_->Open()) { |
| - audioproc::Stream* msg = event_msg_->mutable_stream(); |
| + if (debug_dump_.debug_file->Open()) { |
| + audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| const size_t channel_size = |
| - sizeof(float) * shared_state_.api_format_.output_stream().num_frames(); |
| - for (int i = 0; |
| - i < shared_state_.api_format_.output_stream().num_channels(); ++i) |
| + sizeof(float) * formats_.api_format.output_stream().num_frames(); |
| + for (int i = 0; i < formats_.api_format.output_stream().num_channels(); ++i) |
| msg->add_output_channel(dest[i], channel_size); |
| - RETURN_ON_ERR(WriteMessageToDebugFile()); |
| + RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| + &crit_debug_, &debug_dump_.capture)); |
| } |
| #endif |
| @@ -596,11 +656,19 @@ int AudioProcessingImpl::ProcessStream(const float* const* src, |
| } |
| int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| - echo_cancellation_->ReadQueuedRenderData(); |
| - echo_control_mobile_->ReadQueuedRenderData(); |
| - gain_control_->ReadQueuedRenderData(); |
| + { |
| + // Acquire the capture lock in order to safely call the function |
| + // that retrieves the render side data. This function accesses apm |
| + // getters that need the capture lock held when being called. |
| + // The lock needs to be released as |
| + // public_submodules_->echo_control_mobile->is_enabled() aquires this lock |
| + // as well. |
| + rtc::CritScope cs_capture(&crit_capture_); |
| + public_submodules_->echo_cancellation->ReadQueuedRenderData(); |
| + public_submodules_->echo_control_mobile->ReadQueuedRenderData(); |
| + public_submodules_->gain_control->ReadQueuedRenderData(); |
| + } |
| if (!frame) { |
| return kNullPointerError; |
| @@ -613,47 +681,61 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| return kBadSampleRateError; |
| } |
| - if (echo_control_mobile_->is_enabled() && |
| + if (public_submodules_->echo_control_mobile->is_enabled() && |
| frame->sample_rate_hz_ > kMaxAECMSampleRateHz) { |
| LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates"; |
| return kUnsupportedComponentError; |
| } |
| - // TODO(ajm): The input and output rates and channels are currently |
| - // constrained to be identical in the int16 interface. |
| - ProcessingConfig processing_config = shared_state_.api_format_; |
| + ProcessingConfig processing_config; |
| + { |
| + // Aquire lock for the access of api_format. |
| + // The lock is released immediately due to the conditional |
| + // reinitialization. |
| + rtc::CritScope cs_capture(&crit_capture_); |
| + // TODO(ajm): The input and output rates and channels are currently |
| + // constrained to be identical in the int16 interface. |
| + processing_config = formats_.api_format; |
| + } |
| processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
| processing_config.input_stream().set_num_channels(frame->num_channels_); |
| processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_); |
| processing_config.output_stream().set_num_channels(frame->num_channels_); |
| - RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| + { |
| + // Do conditional reinitialization. |
| + rtc::CritScope cs_render(&crit_render_); |
| + RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| + } |
| + rtc::CritScope cs_capture(&crit_capture_); |
| if (frame->samples_per_channel_ != |
| - shared_state_.api_format_.input_stream().num_frames()) { |
| + formats_.api_format.input_stream().num_frames()) { |
| return kBadDataLengthError; |
| } |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| - if (debug_file_->Open()) { |
| - event_msg_->set_type(audioproc::Event::STREAM); |
| - audioproc::Stream* msg = event_msg_->mutable_stream(); |
| + if (debug_dump_.debug_file->Open()) { |
| + debug_dump_.capture.event_msg->set_type(audioproc::Event::STREAM); |
| + audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| const size_t data_size = |
| sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| msg->set_input_data(frame->data_, data_size); |
| } |
| #endif |
| - capture_audio_->DeinterleaveFrom(frame); |
| + capture_.capture_audio->DeinterleaveFrom(frame); |
| RETURN_ON_ERR(ProcessStreamLocked()); |
| - capture_audio_->InterleaveTo(frame, output_copy_needed(is_data_processed())); |
| + capture_.capture_audio->InterleaveTo(frame, |
| + output_copy_needed(is_data_processed())); |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| - if (debug_file_->Open()) { |
| - audioproc::Stream* msg = event_msg_->mutable_stream(); |
| + if (debug_dump_.debug_file->Open()) { |
| + audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| const size_t data_size = |
| sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| msg->set_output_data(frame->data_, data_size); |
| - RETURN_ON_ERR(WriteMessageToDebugFile()); |
| + RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| + &crit_debug_, &debug_dump_.capture)); |
| } |
| #endif |
| @@ -663,22 +745,25 @@ int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { |
| int AudioProcessingImpl::ProcessStreamLocked() { |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| - if (debug_file_->Open()) { |
| - audioproc::Stream* msg = event_msg_->mutable_stream(); |
| - msg->set_delay(stream_delay_ms_); |
| - msg->set_drift(echo_cancellation_->stream_drift_samples()); |
| + if (debug_dump_.debug_file->Open()) { |
| + audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); |
| + msg->set_delay(capture_nonlocked_.stream_delay_ms); |
| + msg->set_drift( |
| + public_submodules_->echo_cancellation->stream_drift_samples()); |
| msg->set_level(gain_control()->stream_analog_level()); |
| - msg->set_keypress(key_pressed_); |
| + msg->set_keypress(capture_.key_pressed); |
| } |
| #endif |
| MaybeUpdateHistograms(); |
| - AudioBuffer* ca = capture_audio_.get(); // For brevity. |
| + AudioBuffer* ca = capture_.capture_audio.get(); // For brevity. |
| - if (use_new_agc_ && gain_control_->is_enabled()) { |
| - agc_manager_->AnalyzePreProcess(ca->channels()[0], ca->num_channels(), |
| - fwd_proc_format_.num_frames()); |
| + if (constants_.use_new_agc && |
| + public_submodules_->gain_control->is_enabled()) { |
| + private_submodules_->agc_manager->AnalyzePreProcess( |
| + ca->channels()[0], ca->num_channels(), |
| + capture_nonlocked_.fwd_proc_format.num_frames()); |
| } |
| bool data_processed = is_data_processed(); |
| @@ -686,34 +771,41 @@ int AudioProcessingImpl::ProcessStreamLocked() { |
| ca->SplitIntoFrequencyBands(); |
| } |
| - if (intelligibility_enabled_) { |
| - intelligibility_enhancer_->AnalyzeCaptureAudio( |
| - ca->split_channels_f(kBand0To8kHz), split_rate_, ca->num_channels()); |
| + if (constants_.intelligibility_enabled) { |
| + public_submodules_->intelligibility_enhancer->AnalyzeCaptureAudio( |
| + ca->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate, |
| + ca->num_channels()); |
| } |
| - if (beamformer_enabled_) { |
| - beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f()); |
| + if (constants_.beamformer_enabled) { |
| + private_submodules_->beamformer->ProcessChunk(*ca->split_data_f(), |
| + ca->split_data_f()); |
| ca->set_num_channels(1); |
| } |
| - RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca)); |
| - RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca)); |
| - RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca)); |
| - RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca)); |
| + RETURN_ON_ERR(public_submodules_->high_pass_filter->ProcessCaptureAudio(ca)); |
| + RETURN_ON_ERR(public_submodules_->gain_control->AnalyzeCaptureAudio(ca)); |
| + RETURN_ON_ERR(public_submodules_->noise_suppression->AnalyzeCaptureAudio(ca)); |
| + RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessCaptureAudio(ca)); |
| - if (echo_control_mobile_->is_enabled() && noise_suppression_->is_enabled()) { |
| + if (public_submodules_->echo_control_mobile->is_enabled() && |
| + public_submodules_->noise_suppression->is_enabled()) { |
| ca->CopyLowPassToReference(); |
| } |
| - RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(ca)); |
| - RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca)); |
| - RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca)); |
| + RETURN_ON_ERR(public_submodules_->noise_suppression->ProcessCaptureAudio(ca)); |
| + RETURN_ON_ERR( |
| + public_submodules_->echo_control_mobile->ProcessCaptureAudio(ca)); |
| + RETURN_ON_ERR(public_submodules_->voice_detection->ProcessCaptureAudio(ca)); |
| - if (use_new_agc_ && gain_control_->is_enabled() && |
| - (!beamformer_enabled_ || beamformer_->is_target_present())) { |
| - agc_manager_->Process(ca->split_bands_const(0)[kBand0To8kHz], |
| - ca->num_frames_per_band(), split_rate_); |
| + if (constants_.use_new_agc && |
| + public_submodules_->gain_control->is_enabled() && |
| + (!constants_.beamformer_enabled || |
| + private_submodules_->beamformer->is_target_present())) { |
| + private_submodules_->agc_manager->Process( |
| + ca->split_bands_const(0)[kBand0To8kHz], ca->num_frames_per_band(), |
| + capture_nonlocked_.split_rate); |
| } |
| - RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(ca)); |
| + RETURN_ON_ERR(public_submodules_->gain_control->ProcessCaptureAudio(ca)); |
| if (synthesis_needed(data_processed)) { |
| ca->MergeFrequencyBands(); |
| @@ -721,21 +813,23 @@ int AudioProcessingImpl::ProcessStreamLocked() { |
| // TODO(aluebs): Investigate if the transient suppression placement should be |
| // before or after the AGC. |
| - if (transient_suppressor_enabled_) { |
| + if (capture_.transient_suppressor_enabled) { |
| float voice_probability = |
| - agc_manager_.get() ? agc_manager_->voice_probability() : 1.f; |
| + private_submodules_->agc_manager.get() |
| + ? private_submodules_->agc_manager->voice_probability() |
| + : 1.f; |
| - transient_suppressor_->Suppress( |
| + public_submodules_->transient_suppressor->Suppress( |
| ca->channels_f()[0], ca->num_frames(), ca->num_channels(), |
| ca->split_bands_const_f(0)[kBand0To8kHz], ca->num_frames_per_band(), |
| ca->keyboard_data(), ca->num_keyboard_frames(), voice_probability, |
| - key_pressed_); |
| + capture_.key_pressed); |
| } |
| // The level estimator operates on the recombined data. |
| - RETURN_ON_ERR(level_estimator_->ProcessStream(ca)); |
| + RETURN_ON_ERR(public_submodules_->level_estimator->ProcessStream(ca)); |
| - was_stream_delay_set_ = false; |
| + capture_.was_stream_delay_set = false; |
| return kNoError; |
| } |
| @@ -744,13 +838,14 @@ int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data, |
| int rev_sample_rate_hz, |
| ChannelLayout layout) { |
| RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| + rtc::CritScope cs(&crit_render_); |
| const StreamConfig reverse_config = { |
| rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout), |
| }; |
| if (samples_per_channel != reverse_config.num_frames()) { |
| return kBadDataLengthError; |
| } |
| - return AnalyzeReverseStream(data, reverse_config, reverse_config); |
| + return AnalyzeReverseStreamLocked(data, reverse_config, reverse_config); |
| } |
| int AudioProcessingImpl::ProcessReverseStream( |
| @@ -759,14 +854,16 @@ int AudioProcessingImpl::ProcessReverseStream( |
| const StreamConfig& reverse_output_config, |
| float* const* dest) { |
| RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| - RETURN_ON_ERR( |
| - AnalyzeReverseStream(src, reverse_input_config, reverse_output_config)); |
| + rtc::CritScope cs(&crit_render_); |
| + RETURN_ON_ERR(AnalyzeReverseStreamLocked(src, reverse_input_config, |
| + reverse_output_config)); |
| if (is_rev_processed()) { |
| - render_audio_->CopyTo(shared_state_.api_format_.reverse_output_stream(), |
| - dest); |
| + render_.render_audio->CopyTo(formats_.api_format.reverse_output_stream(), |
| + dest); |
| } else if (rev_conversion_needed()) { |
| - render_converter_->Convert(src, reverse_input_config.num_samples(), dest, |
| - reverse_output_config.num_samples()); |
| + render_.render_converter->Convert(src, reverse_input_config.num_samples(), |
| + dest, |
| + reverse_output_config.num_samples()); |
| } else { |
| CopyAudioIfNeeded(src, reverse_input_config.num_frames(), |
| reverse_input_config.num_channels(), dest); |
| @@ -775,11 +872,10 @@ int AudioProcessingImpl::ProcessReverseStream( |
| return kNoError; |
| } |
| -int AudioProcessingImpl::AnalyzeReverseStream( |
| +int AudioProcessingImpl::AnalyzeReverseStreamLocked( |
| const float* const* src, |
| const StreamConfig& reverse_input_config, |
| const StreamConfig& reverse_output_config) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| if (src == NULL) { |
| return kNullPointerError; |
| @@ -789,39 +885,40 @@ int AudioProcessingImpl::AnalyzeReverseStream( |
| return kBadNumberChannelsError; |
| } |
| - ProcessingConfig processing_config = shared_state_.api_format_; |
| + ProcessingConfig processing_config = formats_.api_format; |
| processing_config.reverse_input_stream() = reverse_input_config; |
| processing_config.reverse_output_stream() = reverse_output_config; |
| - RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| + RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| assert(reverse_input_config.num_frames() == |
| - shared_state_.api_format_.reverse_input_stream().num_frames()); |
| + formats_.api_format.reverse_input_stream().num_frames()); |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| - if (debug_file_->Open()) { |
| - event_msg_->set_type(audioproc::Event::REVERSE_STREAM); |
| - audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); |
| + if (debug_dump_.debug_file->Open()) { |
| + debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); |
| + audioproc::ReverseStream* msg = |
| + debug_dump_.render.event_msg->mutable_reverse_stream(); |
| const size_t channel_size = |
| - sizeof(float) * |
| - shared_state_.api_format_.reverse_input_stream().num_frames(); |
| + sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); |
| for (int i = 0; |
| - i < shared_state_.api_format_.reverse_input_stream().num_channels(); |
| - ++i) |
| + i < formats_.api_format.reverse_input_stream().num_channels(); ++i) |
| msg->add_channel(src[i], channel_size); |
| - RETURN_ON_ERR(WriteMessageToDebugFile()); |
| + RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| + &crit_debug_, &debug_dump_.render)); |
| } |
| #endif |
| - render_audio_->CopyFrom(src, |
| - shared_state_.api_format_.reverse_input_stream()); |
| + render_.render_audio->CopyFrom(src, |
| + formats_.api_format.reverse_input_stream()); |
| return ProcessReverseStreamLocked(); |
| } |
| int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| RETURN_ON_ERR(AnalyzeReverseStream(frame)); |
| + rtc::CritScope cs(&crit_render_); |
| if (is_rev_processed()) { |
| - render_audio_->InterleaveTo(frame, true); |
| + render_.render_audio->InterleaveTo(frame, true); |
| } |
| return kNoError; |
| @@ -829,7 +926,7 @@ int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { |
| int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
| RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| - CriticalSectionScoped crit_scoped(crit_); |
| + rtc::CritScope cs(&crit_render_); |
| if (frame == NULL) { |
| return kNullPointerError; |
| } |
| @@ -842,7 +939,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
| } |
| // This interface does not tolerate different forward and reverse rates. |
| if (frame->sample_rate_hz_ != |
| - shared_state_.api_format_.input_stream().sample_rate_hz()) { |
| + formats_.api_format.input_stream().sample_rate_hz()) { |
| return kBadSampleRateError; |
| } |
| @@ -850,7 +947,7 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
| return kBadNumberChannelsError; |
| } |
| - ProcessingConfig processing_config = shared_state_.api_format_; |
| + ProcessingConfig processing_config = formats_.api_format; |
| processing_config.reverse_input_stream().set_sample_rate_hz( |
| frame->sample_rate_hz_); |
| processing_config.reverse_input_stream().set_num_channels( |
| @@ -860,45 +957,53 @@ int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) { |
| processing_config.reverse_output_stream().set_num_channels( |
| frame->num_channels_); |
| - RETURN_ON_ERR(MaybeInitializeLocked(processing_config)); |
| + RETURN_ON_ERR(MaybeInitialize(processing_config)); |
| if (frame->samples_per_channel_ != |
| - shared_state_.api_format_.reverse_input_stream().num_frames()) { |
| + formats_.api_format.reverse_input_stream().num_frames()) { |
| return kBadDataLengthError; |
| } |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| - if (debug_file_->Open()) { |
| - event_msg_->set_type(audioproc::Event::REVERSE_STREAM); |
| - audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream(); |
| + if (debug_dump_.debug_file->Open()) { |
| + debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); |
| + audioproc::ReverseStream* msg = |
| + debug_dump_.render.event_msg->mutable_reverse_stream(); |
| const size_t data_size = |
| sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; |
| msg->set_data(frame->data_, data_size); |
| - RETURN_ON_ERR(WriteMessageToDebugFile()); |
| + RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| + &crit_debug_, &debug_dump_.render)); |
| } |
| #endif |
| - render_audio_->DeinterleaveFrom(frame); |
| + render_.render_audio->DeinterleaveFrom(frame); |
| return ProcessReverseStreamLocked(); |
| } |
| int AudioProcessingImpl::ProcessReverseStreamLocked() { |
| RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| - AudioBuffer* ra = render_audio_.get(); // For brevity. |
| - if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) { |
| + AudioBuffer* ra = render_.render_audio.get(); // For brevity. |
| + if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz) { |
| ra->SplitIntoFrequencyBands(); |
| } |
| - if (intelligibility_enabled_) { |
| - intelligibility_enhancer_->ProcessRenderAudio( |
| - ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels()); |
| + if (constants_.intelligibility_enabled) { |
| + // Currently run in single-threaded mode when the intelligibility |
| + // enhancer is activated. |
| + // TODO(peah): Fix to be properly multi-threaded. |
| + rtc::CritScope cs(&crit_capture_); |
| + public_submodules_->intelligibility_enhancer->ProcessRenderAudio( |
| + ra->split_channels_f(kBand0To8kHz), capture_nonlocked_.split_rate, |
| + ra->num_channels()); |
| } |
| - RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra)); |
| - RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra)); |
| - if (!use_new_agc_) { |
| - RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra)); |
| + RETURN_ON_ERR(public_submodules_->echo_cancellation->ProcessRenderAudio(ra)); |
| + RETURN_ON_ERR( |
| + public_submodules_->echo_control_mobile->ProcessRenderAudio(ra)); |
| + if (!constants_.use_new_agc) { |
| + RETURN_ON_ERR(public_submodules_->gain_control->ProcessRenderAudio(ra)); |
| } |
| - if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz && |
| + if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz && |
| is_rev_processed()) { |
| ra->MergeFrequencyBands(); |
| } |
| @@ -908,9 +1013,10 @@ int AudioProcessingImpl::ProcessReverseStreamLocked() { |
| int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| + rtc::CritScope cs(&crit_capture_); |
| Error retval = kNoError; |
| - was_stream_delay_set_ = true; |
| - delay += delay_offset_ms_; |
| + capture_.was_stream_delay_set = true; |
| + delay += capture_.delay_offset_ms; |
| if (delay < 0) { |
| delay = 0; |
| @@ -923,39 +1029,43 @@ int AudioProcessingImpl::set_stream_delay_ms(int delay) { |
| retval = kBadStreamParameterWarning; |
| } |
| - stream_delay_ms_ = delay; |
| + capture_nonlocked_.stream_delay_ms = delay; |
| return retval; |
| } |
| int AudioProcessingImpl::stream_delay_ms() const { |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| - return stream_delay_ms_; |
| + return capture_nonlocked_.stream_delay_ms; |
| } |
| bool AudioProcessingImpl::was_stream_delay_set() const { |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| - return was_stream_delay_set_; |
| + return capture_.was_stream_delay_set; |
| } |
| void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) { |
| + rtc::CritScope cs(&crit_capture_); |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| - key_pressed_ = key_pressed; |
| + capture_.key_pressed = key_pressed; |
| } |
| void AudioProcessingImpl::set_delay_offset_ms(int offset) { |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| - CriticalSectionScoped crit_scoped(crit_); |
| - delay_offset_ms_ = offset; |
| + rtc::CritScope cs(&crit_capture_); |
| + capture_.delay_offset_ms = offset; |
| } |
| int AudioProcessingImpl::delay_offset_ms() const { |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| - return delay_offset_ms_; |
| + rtc::CritScope cs(&crit_capture_); |
| + return capture_.delay_offset_ms; |
| } |
| int AudioProcessingImpl::StartDebugRecording( |
| const char filename[AudioProcessing::kMaxFilenameSize]) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| + // Run in a single-threaded manner. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); |
| @@ -965,14 +1075,14 @@ int AudioProcessingImpl::StartDebugRecording( |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| // Stop any ongoing recording. |
| - if (debug_file_->Open()) { |
| - if (debug_file_->CloseFile() == -1) { |
| + if (debug_dump_.debug_file->Open()) { |
| + if (debug_dump_.debug_file->CloseFile() == -1) { |
| return kFileError; |
| } |
| } |
| - if (debug_file_->OpenFile(filename, false) == -1) { |
| - debug_file_->CloseFile(); |
| + if (debug_dump_.debug_file->OpenFile(filename, false) == -1) { |
| + debug_dump_.debug_file->CloseFile(); |
| return kFileError; |
| } |
| @@ -985,7 +1095,9 @@ int AudioProcessingImpl::StartDebugRecording( |
| } |
| int AudioProcessingImpl::StartDebugRecording(FILE* handle) { |
| - CriticalSectionScoped crit_scoped(crit_); |
| + // Run in a single-threaded manner. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| if (handle == NULL) { |
| @@ -994,13 +1106,13 @@ int AudioProcessingImpl::StartDebugRecording(FILE* handle) { |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| // Stop any ongoing recording. |
| - if (debug_file_->Open()) { |
| - if (debug_file_->CloseFile() == -1) { |
| + if (debug_dump_.debug_file->Open()) { |
| + if (debug_dump_.debug_file->CloseFile() == -1) { |
| return kFileError; |
| } |
| } |
| - if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) { |
| + if (debug_dump_.debug_file->OpenFromFileHandle(handle, true, false) == -1) { |
| return kFileError; |
| } |
| @@ -1014,19 +1126,24 @@ int AudioProcessingImpl::StartDebugRecording(FILE* handle) { |
| int AudioProcessingImpl::StartDebugRecordingForPlatformFile( |
| rtc::PlatformFile handle) { |
| + // Run in a single-threaded manner. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| FILE* stream = rtc::FdopenPlatformFileForWriting(handle); |
| return StartDebugRecording(stream); |
| } |
| int AudioProcessingImpl::StopDebugRecording() { |
| - CriticalSectionScoped crit_scoped(crit_); |
| + // Run in a single-threaded manner. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| // We just return if recording hasn't started. |
| - if (debug_file_->Open()) { |
| - if (debug_file_->CloseFile() == -1) { |
| + if (debug_dump_.debug_file->Open()) { |
| + if (debug_dump_.debug_file->CloseFile() == -1) { |
| return kFileError; |
| } |
| } |
| @@ -1037,59 +1154,76 @@ int AudioProcessingImpl::StopDebugRecording() { |
| } |
| EchoCancellation* AudioProcessingImpl::echo_cancellation() const { |
| - return echo_cancellation_; |
| + // Adding a lock here has no effect as it allows any access to the submodule |
| + // from the returned pointer. |
| + return public_submodules_->echo_cancellation; |
| } |
| EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const { |
| - return echo_control_mobile_; |
| + // Adding a lock here has no effect as it allows any access to the submodule |
| + // from the returned pointer. |
| + return public_submodules_->echo_control_mobile; |
| } |
| GainControl* AudioProcessingImpl::gain_control() const { |
| - if (use_new_agc_) { |
| - return gain_control_for_new_agc_.get(); |
| + // Adding a lock here has no effect as it allows any access to the submodule |
| + // from the returned pointer. |
| + if (constants_.use_new_agc) { |
| + return public_submodules_->gain_control_for_new_agc.get(); |
| } |
| - return gain_control_; |
| + return public_submodules_->gain_control; |
| } |
| HighPassFilter* AudioProcessingImpl::high_pass_filter() const { |
| - return high_pass_filter_; |
| + // Adding a lock here has no effect as it allows any access to the submodule |
| + // from the returned pointer. |
| + return public_submodules_->high_pass_filter; |
| } |
| LevelEstimator* AudioProcessingImpl::level_estimator() const { |
| - return level_estimator_; |
| + // Adding a lock here has no effect as it allows any access to the submodule |
| + // from the returned pointer. |
| + return public_submodules_->level_estimator; |
| } |
| NoiseSuppression* AudioProcessingImpl::noise_suppression() const { |
| - return noise_suppression_; |
| + // Adding a lock here has no effect as it allows any access to the submodule |
| + // from the returned pointer. |
| + return public_submodules_->noise_suppression; |
| } |
| VoiceDetection* AudioProcessingImpl::voice_detection() const { |
| - return voice_detection_; |
| + // Adding a lock here has no effect as it allows any access to the submodule |
| + // from the returned pointer. |
| + return public_submodules_->voice_detection; |
| } |
| bool AudioProcessingImpl::is_data_processed() const { |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| - if (beamformer_enabled_) { |
| + if (constants_.beamformer_enabled) { |
| return true; |
| } |
| int enabled_count = 0; |
| - for (auto item : component_list_) { |
| + for (auto item : private_submodules_->component_list) { |
| if (item->is_component_enabled()) { |
| enabled_count++; |
| } |
| } |
| - // Data is unchanged if no components are enabled, or if only level_estimator_ |
| - // or voice_detection_ is enabled. |
| + // Data is unchanged if no components are enabled, or if only |
| + // public_submodules_->level_estimator |
| + // or public_submodules_->voice_detection is enabled. |
| if (enabled_count == 0) { |
| return false; |
| } else if (enabled_count == 1) { |
| - if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) { |
| + if (public_submodules_->level_estimator->is_enabled() || |
| + public_submodules_->voice_detection->is_enabled()) { |
| return false; |
| } |
| } else if (enabled_count == 2) { |
| - if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) { |
| + if (public_submodules_->level_estimator->is_enabled() && |
| + public_submodules_->voice_detection->is_enabled()) { |
| return false; |
| } |
| } |
| @@ -1099,27 +1233,33 @@ bool AudioProcessingImpl::is_data_processed() const { |
| bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const { |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| // Check if we've upmixed or downmixed the audio. |
| - return ((shared_state_.api_format_.output_stream().num_channels() != |
| - shared_state_.api_format_.input_stream().num_channels()) || |
| - is_data_processed || transient_suppressor_enabled_); |
| + return ((formats_.api_format.output_stream().num_channels() != |
| + formats_.api_format.input_stream().num_channels()) || |
| + is_data_processed || capture_.transient_suppressor_enabled); |
| } |
| bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const { |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| return (is_data_processed && |
| - (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || |
| - fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz)); |
| + (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == |
| + kSampleRate32kHz || |
| + capture_nonlocked_.fwd_proc_format.sample_rate_hz() == |
| + kSampleRate48kHz)); |
| } |
| bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| - if (!is_data_processed && !voice_detection_->is_enabled() && |
| - !transient_suppressor_enabled_) { |
| - // Only level_estimator_ is enabled. |
| + if (!is_data_processed && |
| + !public_submodules_->voice_detection->is_enabled() && |
| + !capture_.transient_suppressor_enabled) { |
| + // Only public_submodules_->level_estimator is enabled. |
| return false; |
| - } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz || |
| - fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) { |
| - // Something besides level_estimator_ is enabled, and we have super-wb. |
| + } else if (capture_nonlocked_.fwd_proc_format.sample_rate_hz() == |
| + kSampleRate32kHz || |
| + capture_nonlocked_.fwd_proc_format.sample_rate_hz() == |
| + kSampleRate48kHz) { |
| + // Something besides public_submodules_->level_estimator is enabled, and we |
| + // have super-wb. |
| return true; |
| } |
| return false; |
| @@ -1127,59 +1267,65 @@ bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const { |
| bool AudioProcessingImpl::is_rev_processed() const { |
| RTC_DCHECK(render_thread_checker_.CalledOnValidThread()); |
| - return intelligibility_enabled_ && intelligibility_enhancer_->active(); |
| + return constants_.intelligibility_enabled && |
| + public_submodules_->intelligibility_enhancer->active(); |
| } |
| bool AudioProcessingImpl::rev_conversion_needed() const { |
| // Called from several threads, thread check not possible. |
| - return (shared_state_.api_format_.reverse_input_stream() != |
| - shared_state_.api_format_.reverse_output_stream()); |
| + return (formats_.api_format.reverse_input_stream() != |
| + formats_.api_format.reverse_output_stream()); |
| } |
| void AudioProcessingImpl::InitializeExperimentalAgc() { |
| // Called from several threads, thread check not possible. |
| - if (use_new_agc_) { |
| - if (!agc_manager_.get()) { |
| - agc_manager_.reset(new AgcManagerDirect(gain_control_, |
| - gain_control_for_new_agc_.get(), |
| - agc_startup_min_volume_)); |
| + if (constants_.use_new_agc) { |
| + if (!private_submodules_->agc_manager.get()) { |
| + private_submodules_->agc_manager.reset(new AgcManagerDirect( |
| + public_submodules_->gain_control, |
| + public_submodules_->gain_control_for_new_agc.get(), |
| + constants_.agc_startup_min_volume)); |
| } |
| - agc_manager_->Initialize(); |
| - agc_manager_->SetCaptureMuted(output_will_be_muted_); |
| + private_submodules_->agc_manager->Initialize(); |
| + private_submodules_->agc_manager->SetCaptureMuted( |
| + capture_.output_will_be_muted); |
| } |
| } |
| void AudioProcessingImpl::InitializeTransient() { |
| // Called from several threads, thread check not possible. |
| - if (transient_suppressor_enabled_) { |
| - if (!transient_suppressor_.get()) { |
| - transient_suppressor_.reset(new TransientSuppressor()); |
| + if (capture_.transient_suppressor_enabled) { |
| + if (!public_submodules_->transient_suppressor.get()) { |
| + public_submodules_->transient_suppressor.reset(new TransientSuppressor()); |
| } |
| - transient_suppressor_->Initialize( |
| - fwd_proc_format_.sample_rate_hz(), split_rate_, |
| - shared_state_.api_format_.output_stream().num_channels()); |
| + public_submodules_->transient_suppressor->Initialize( |
| + capture_nonlocked_.fwd_proc_format.sample_rate_hz(), |
| + capture_nonlocked_.split_rate, |
| + formats_.api_format.output_stream().num_channels()); |
| } |
| } |
| void AudioProcessingImpl::InitializeBeamformer() { |
| // Called from several threads, thread check not possible. |
| - if (beamformer_enabled_) { |
| - if (!beamformer_) { |
| - beamformer_.reset( |
| - new NonlinearBeamformer(array_geometry_, target_direction_)); |
| + if (constants_.beamformer_enabled) { |
| + if (!private_submodules_->beamformer) { |
| + private_submodules_->beamformer.reset(new NonlinearBeamformer( |
| + constants_.array_geometry, constants_.target_direction)); |
| } |
| - beamformer_->Initialize(kChunkSizeMs, split_rate_); |
| + private_submodules_->beamformer->Initialize(kChunkSizeMs, |
| + capture_nonlocked_.split_rate); |
| } |
| } |
| void AudioProcessingImpl::InitializeIntelligibility() { |
| // Called from several threads, thread check not possible. |
| - if (intelligibility_enabled_) { |
| + if (constants_.intelligibility_enabled) { |
| IntelligibilityEnhancer::Config config; |
| - config.sample_rate_hz = split_rate_; |
| - config.num_capture_channels = capture_audio_->num_channels(); |
| - config.num_render_channels = render_audio_->num_channels(); |
| - intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config)); |
| + config.sample_rate_hz = capture_nonlocked_.split_rate; |
| + config.num_capture_channels = capture_.capture_audio->num_channels(); |
| + config.num_render_channels = render_.render_audio->num_channels(); |
| + public_submodules_->intelligibility_enhancer.reset( |
| + new IntelligibilityEnhancer(config)); |
| } |
| } |
| @@ -1190,69 +1336,79 @@ void AudioProcessingImpl::MaybeUpdateHistograms() { |
| if (echo_cancellation()->is_enabled()) { |
| // Activate delay_jumps_ counters if we know echo_cancellation is runnning. |
| // If a stream has echo we know that the echo_cancellation is in process. |
| - if (stream_delay_jumps_ == -1 && echo_cancellation()->stream_has_echo()) { |
| - stream_delay_jumps_ = 0; |
| + if (capture_.stream_delay_jumps == -1 && |
| + echo_cancellation()->stream_has_echo()) { |
| + capture_.stream_delay_jumps = 0; |
| } |
| - if (aec_system_delay_jumps_ == -1 && |
| + if (capture_.aec_system_delay_jumps == -1 && |
| echo_cancellation()->stream_has_echo()) { |
| - aec_system_delay_jumps_ = 0; |
| + capture_.aec_system_delay_jumps = 0; |
| } |
| // Detect a jump in platform reported system delay and log the difference. |
| - const int diff_stream_delay_ms = stream_delay_ms_ - last_stream_delay_ms_; |
| - if (diff_stream_delay_ms > kMinDiffDelayMs && last_stream_delay_ms_ != 0) { |
| + const int diff_stream_delay_ms = |
| + capture_nonlocked_.stream_delay_ms - capture_.last_stream_delay_ms; |
| + if (diff_stream_delay_ms > kMinDiffDelayMs && |
| + capture_.last_stream_delay_ms != 0) { |
| RTC_HISTOGRAM_COUNTS("WebRTC.Audio.PlatformReportedStreamDelayJump", |
| diff_stream_delay_ms, kMinDiffDelayMs, 1000, 100); |
| - if (stream_delay_jumps_ == -1) { |
| - stream_delay_jumps_ = 0; // Activate counter if needed. |
| + if (capture_.stream_delay_jumps == -1) { |
| + capture_.stream_delay_jumps = 0; // Activate counter if needed. |
| } |
| - stream_delay_jumps_++; |
| + capture_.stream_delay_jumps++; |
| } |
| - last_stream_delay_ms_ = stream_delay_ms_; |
| + capture_.last_stream_delay_ms = capture_nonlocked_.stream_delay_ms; |
| // Detect a jump in AEC system delay and log the difference. |
| - const int frames_per_ms = rtc::CheckedDivExact(split_rate_, 1000); |
| + const int frames_per_ms = |
| + rtc::CheckedDivExact(capture_nonlocked_.split_rate, 1000); |
| const int aec_system_delay_ms = |
| WebRtcAec_system_delay(echo_cancellation()->aec_core()) / frames_per_ms; |
| const int diff_aec_system_delay_ms = |
| - aec_system_delay_ms - last_aec_system_delay_ms_; |
| + aec_system_delay_ms - capture_.last_aec_system_delay_ms; |
| if (diff_aec_system_delay_ms > kMinDiffDelayMs && |
| - last_aec_system_delay_ms_ != 0) { |
| + capture_.last_aec_system_delay_ms != 0) { |
| RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecSystemDelayJump", |
| diff_aec_system_delay_ms, kMinDiffDelayMs, 1000, |
| 100); |
| - if (aec_system_delay_jumps_ == -1) { |
| - aec_system_delay_jumps_ = 0; // Activate counter if needed. |
| + if (capture_.aec_system_delay_jumps == -1) { |
| + capture_.aec_system_delay_jumps = 0; // Activate counter if needed. |
| } |
| - aec_system_delay_jumps_++; |
| + capture_.aec_system_delay_jumps++; |
| } |
| - last_aec_system_delay_ms_ = aec_system_delay_ms; |
| + capture_.last_aec_system_delay_ms = aec_system_delay_ms; |
| } |
| } |
| void AudioProcessingImpl::UpdateHistogramsOnCallEnd() { |
| + // Run in a single-threaded manner. |
| + rtc::CritScope cs_render(&crit_render_); |
| + rtc::CritScope cs_capture(&crit_capture_); |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| - CriticalSectionScoped crit_scoped(crit_); |
| - if (stream_delay_jumps_ > -1) { |
| + |
| + if (capture_.stream_delay_jumps > -1) { |
| RTC_HISTOGRAM_ENUMERATION( |
| "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps", |
| - stream_delay_jumps_, 51); |
| + capture_.stream_delay_jumps, 51); |
| } |
| - stream_delay_jumps_ = -1; |
| - last_stream_delay_ms_ = 0; |
| + capture_.stream_delay_jumps = -1; |
| + capture_.last_stream_delay_ms = 0; |
| - if (aec_system_delay_jumps_ > -1) { |
| + if (capture_.aec_system_delay_jumps > -1) { |
| RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", |
| - aec_system_delay_jumps_, 51); |
| + capture_.aec_system_delay_jumps, 51); |
| } |
| - aec_system_delay_jumps_ = -1; |
| - last_aec_system_delay_ms_ = 0; |
| + capture_.aec_system_delay_jumps = -1; |
| + capture_.last_aec_system_delay_ms = 0; |
| } |
| #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP |
| -int AudioProcessingImpl::WriteMessageToDebugFile() { |
| - RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| - int32_t size = event_msg_->ByteSize(); |
| +int AudioProcessingImpl::WriteMessageToDebugFile( |
| + FileWrapper* debug_file, |
| + rtc::CriticalSection* crit_debug, |
| + ApmDebugDumpThreadState* debug_state) { |
| + // Thread checker not possible due to function being static. |
| + int32_t size = debug_state->event_msg->ByteSize(); |
| if (size <= 0) { |
| return kUnspecifiedError; |
| } |
| @@ -1261,42 +1417,48 @@ int AudioProcessingImpl::WriteMessageToDebugFile() { |
| // pretty safe in assuming little-endian. |
| #endif |
| - if (!event_msg_->SerializeToString(&event_str_)) { |
| + if (!debug_state->event_msg->SerializeToString(&debug_state->event_str)) { |
| return kUnspecifiedError; |
| } |
| - // Write message preceded by its size. |
| - if (!debug_file_->Write(&size, sizeof(int32_t))) { |
| - return kFileError; |
| - } |
| - if (!debug_file_->Write(event_str_.data(), event_str_.length())) { |
| - return kFileError; |
| + { |
| + // Ensure atomic writes of the message. |
| + rtc::CritScope cs_capture(crit_debug); |
| + // Write message preceded by its size. |
| + if (!debug_file->Write(&size, sizeof(int32_t))) { |
| + return kFileError; |
| + } |
| + if (!debug_file->Write(debug_state->event_str.data(), |
| + debug_state->event_str.length())) { |
| + return kFileError; |
| + } |
| } |
| - event_msg_->Clear(); |
| + debug_state->event_msg->Clear(); |
| return kNoError; |
| } |
| int AudioProcessingImpl::WriteInitMessage() { |
| // Called from both render and capture threads, not threadchecker possible. |
| - event_msg_->set_type(audioproc::Event::INIT); |
| - audioproc::Init* msg = event_msg_->mutable_init(); |
| - msg->set_sample_rate( |
| - shared_state_.api_format_.input_stream().sample_rate_hz()); |
| + debug_dump_.capture.event_msg->set_type(audioproc::Event::INIT); |
| + audioproc::Init* msg = debug_dump_.capture.event_msg->mutable_init(); |
| + msg->set_sample_rate(formats_.api_format.input_stream().sample_rate_hz()); |
| msg->set_num_input_channels( |
| - shared_state_.api_format_.input_stream().num_channels()); |
| + formats_.api_format.input_stream().num_channels()); |
| msg->set_num_output_channels( |
| - shared_state_.api_format_.output_stream().num_channels()); |
| + formats_.api_format.output_stream().num_channels()); |
| msg->set_num_reverse_channels( |
| - shared_state_.api_format_.reverse_input_stream().num_channels()); |
| + formats_.api_format.reverse_input_stream().num_channels()); |
| msg->set_reverse_sample_rate( |
| - shared_state_.api_format_.reverse_input_stream().sample_rate_hz()); |
| + formats_.api_format.reverse_input_stream().sample_rate_hz()); |
| msg->set_output_sample_rate( |
| - shared_state_.api_format_.output_stream().sample_rate_hz()); |
| - // TODO(ekmeyerson): Add reverse output fields to event_msg_. |
| + formats_.api_format.output_stream().sample_rate_hz()); |
| + // TODO(ekmeyerson): Add reverse output fields to |
| + // debug_dump_.capture.event_msg. |
| - RETURN_ON_ERR(WriteMessageToDebugFile()); |
| + RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| + &crit_debug_, &debug_dump_.capture)); |
| return kNoError; |
| } |
| @@ -1304,45 +1466,52 @@ int AudioProcessingImpl::WriteConfigMessage(bool forced) { |
| RTC_DCHECK(capture_thread_checker_.CalledOnValidThread()); |
| audioproc::Config config; |
| - config.set_aec_enabled(echo_cancellation_->is_enabled()); |
| + config.set_aec_enabled(public_submodules_->echo_cancellation->is_enabled()); |
| config.set_aec_delay_agnostic_enabled( |
| - echo_cancellation_->is_delay_agnostic_enabled()); |
| + public_submodules_->echo_cancellation->is_delay_agnostic_enabled()); |
| config.set_aec_drift_compensation_enabled( |
| - echo_cancellation_->is_drift_compensation_enabled()); |
| + public_submodules_->echo_cancellation->is_drift_compensation_enabled()); |
| config.set_aec_extended_filter_enabled( |
| - echo_cancellation_->is_extended_filter_enabled()); |
| - config.set_aec_suppression_level( |
| - static_cast<int>(echo_cancellation_->suppression_level())); |
| + public_submodules_->echo_cancellation->is_extended_filter_enabled()); |
| + config.set_aec_suppression_level(static_cast<int>( |
| + public_submodules_->echo_cancellation->suppression_level())); |
| - config.set_aecm_enabled(echo_control_mobile_->is_enabled()); |
| + config.set_aecm_enabled( |
| + public_submodules_->echo_control_mobile->is_enabled()); |
| config.set_aecm_comfort_noise_enabled( |
| - echo_control_mobile_->is_comfort_noise_enabled()); |
| - config.set_aecm_routing_mode( |
| - static_cast<int>(echo_control_mobile_->routing_mode())); |
| + public_submodules_->echo_control_mobile->is_comfort_noise_enabled()); |
| + config.set_aecm_routing_mode(static_cast<int>( |
| + public_submodules_->echo_control_mobile->routing_mode())); |
| - config.set_agc_enabled(gain_control_->is_enabled()); |
| - config.set_agc_mode(static_cast<int>(gain_control_->mode())); |
| - config.set_agc_limiter_enabled(gain_control_->is_limiter_enabled()); |
| - config.set_noise_robust_agc_enabled(use_new_agc_); |
| + config.set_agc_enabled(public_submodules_->gain_control->is_enabled()); |
| + config.set_agc_mode( |
| + static_cast<int>(public_submodules_->gain_control->mode())); |
| + config.set_agc_limiter_enabled( |
| + public_submodules_->gain_control->is_limiter_enabled()); |
| + config.set_noise_robust_agc_enabled(constants_.use_new_agc); |
| - config.set_hpf_enabled(high_pass_filter_->is_enabled()); |
| + config.set_hpf_enabled(public_submodules_->high_pass_filter->is_enabled()); |
| - config.set_ns_enabled(noise_suppression_->is_enabled()); |
| - config.set_ns_level(static_cast<int>(noise_suppression_->level())); |
| + config.set_ns_enabled(public_submodules_->noise_suppression->is_enabled()); |
| + config.set_ns_level( |
| + static_cast<int>(public_submodules_->noise_suppression->level())); |
| - config.set_transient_suppression_enabled(transient_suppressor_enabled_); |
| + config.set_transient_suppression_enabled( |
| + capture_.transient_suppressor_enabled); |
| std::string serialized_config = config.SerializeAsString(); |
| - if (!forced && last_serialized_config_ == serialized_config) { |
| + if (!forced && |
| + debug_dump_.capture.last_serialized_config == serialized_config) { |
| return kNoError; |
| } |
| - last_serialized_config_ = serialized_config; |
| + debug_dump_.capture.last_serialized_config = serialized_config; |
| - event_msg_->set_type(audioproc::Event::CONFIG); |
| - event_msg_->mutable_config()->CopyFrom(config); |
| + debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); |
| + debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); |
| - RETURN_ON_ERR(WriteMessageToDebugFile()); |
| + RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), |
| + &crit_debug_, &debug_dump_.capture)); |
| return kNoError; |
| } |
| #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP |