Index: content/renderer/media/webrtc_audio_capturer.cc |
diff --git a/content/renderer/media/webrtc_audio_capturer.cc b/content/renderer/media/webrtc_audio_capturer.cc |
index 113233ebefd60e3db1ac9a3c08c5e04fc6d18349..0414fc01997354d947caf089b9033ecf9f8fae19 100644 |
--- a/content/renderer/media/webrtc_audio_capturer.cc |
+++ b/content/renderer/media/webrtc_audio_capturer.cc |
@@ -26,11 +26,6 @@ namespace content { |
namespace { |
-// Audio buffer sizes are specified in milliseconds. |
-const char kAudioLatency[] = "latencyMs"; |
-const int kMinAudioLatencyMs = 0; |
-const int kMaxAudioLatencyMs = 10000; |
- |
// Method to check if any of the data in |audio_source| has energy. |
bool HasDataEnergy(const media::AudioBus& audio_source) { |
mcasas
2016/02/26 01:28:19
Isn't this method exactly a !audio_source.AreFrame
miu
2016/02/27 03:46:37
Done.
|
for (int ch = 0; ch < audio_source.channels(); ++ch) { |
@@ -56,13 +51,10 @@ class WebRtcAudioCapturer::TrackOwner |
: delegate_(track) {} |
void Capture(const media::AudioBus& audio_bus, |
- base::TimeTicks estimated_capture_time, |
- bool force_report_nonzero_energy) { |
+ base::TimeTicks estimated_capture_time) { |
base::AutoLock lock(lock_); |
if (delegate_) { |
- delegate_->Capture(audio_bus, |
- estimated_capture_time, |
- force_report_nonzero_energy); |
+ delegate_->Capture(audio_bus, estimated_capture_time); |
} |
} |
@@ -124,14 +116,14 @@ class WebRtcAudioCapturer::TrackOwner |
}; |
// static |
-scoped_refptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer( |
+scoped_ptr<WebRtcAudioCapturer> WebRtcAudioCapturer::CreateCapturer( |
int render_frame_id, |
const StreamDeviceInfo& device_info, |
const blink::WebMediaConstraints& constraints, |
WebRtcAudioDeviceImpl* audio_device, |
MediaStreamAudioSource* audio_source) { |
- scoped_refptr<WebRtcAudioCapturer> capturer = new WebRtcAudioCapturer( |
- render_frame_id, device_info, constraints, audio_device, audio_source); |
+ scoped_ptr<WebRtcAudioCapturer> capturer(new WebRtcAudioCapturer( |
+ render_frame_id, device_info, constraints, audio_device, audio_source)); |
if (capturer->Initialize()) |
return capturer; |
@@ -207,27 +199,10 @@ bool WebRtcAudioCapturer::Initialize() { |
device_info_.device.input.sample_rate); |
} |
- // Initialize the buffer size to zero, which means it wasn't specified. |
- // If it is out of range, we return it to zero. |
- int buffer_size_ms = 0; |
- int buffer_size_samples = 0; |
- GetConstraintValueAsInteger(constraints_, kAudioLatency, &buffer_size_ms); |
- if (buffer_size_ms < kMinAudioLatencyMs || |
- buffer_size_ms > kMaxAudioLatencyMs) { |
- DVLOG(1) << "Ignoring out of range buffer size " << buffer_size_ms; |
- } else { |
- buffer_size_samples = |
- device_info_.device.input.sample_rate * buffer_size_ms / 1000; |
- } |
- DVLOG_IF(1, buffer_size_samples > 0) |
- << "Custom audio buffer size: " << buffer_size_samples << " samples"; |
- |
// Create and configure the default audio capturing source. |
SetCapturerSourceInternal( |
- AudioDeviceFactory::NewInputDevice(render_frame_id_), |
- channel_layout, |
- device_info_.device.input.sample_rate, |
- buffer_size_samples); |
+ AudioDeviceFactory::NewInputDevice(render_frame_id_), channel_layout, |
+ device_info_.device.input.sample_rate); |
// Add the capturer to the WebRtcAudioDeviceImpl since it needs some hardware |
// information from the capturer. |
@@ -254,7 +229,8 @@ WebRtcAudioCapturer::WebRtcAudioCapturer( |
volume_(0), |
peer_connection_mode_(false), |
audio_device_(audio_device), |
- audio_source_(audio_source) { |
+ audio_source_(audio_source), |
+ weak_factory_(this) { |
DVLOG(1) << "WebRtcAudioCapturer::WebRtcAudioCapturer()"; |
} |
@@ -266,9 +242,14 @@ WebRtcAudioCapturer::~WebRtcAudioCapturer() { |
} |
void WebRtcAudioCapturer::AddTrack(WebRtcLocalAudioTrack* track) { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
DCHECK(track); |
DVLOG(1) << "WebRtcAudioCapturer::AddTrack()"; |
+ track->AddStopObserver(base::Bind(&WebRtcAudioCapturer::RemoveTrack, |
+ weak_factory_.GetWeakPtr(), track)); |
+ track->SetLevel(level_calculator_.level()); |
+ |
{ |
base::AutoLock auto_lock(lock_); |
// Verify that |track| is not already added to the list. |
@@ -312,8 +293,7 @@ void WebRtcAudioCapturer::RemoveTrack(WebRtcLocalAudioTrack* track) { |
void WebRtcAudioCapturer::SetCapturerSourceInternal( |
const scoped_refptr<media::AudioCapturerSource>& source, |
media::ChannelLayout channel_layout, |
- int sample_rate, |
- int buffer_size) { |
+ int sample_rate) { |
DCHECK(thread_checker_.CalledOnValidThread()); |
DVLOG(1) << "SetCapturerSource(channel_layout=" << channel_layout << "," |
<< "sample_rate=" << sample_rate << ")"; |
@@ -334,19 +314,16 @@ void WebRtcAudioCapturer::SetCapturerSourceInternal( |
if (old_source.get()) |
old_source->Stop(); |
- // If the buffer size is zero, it has not been specified. |
- // We either default to 10ms, or use the hardware buffer size. |
- if (buffer_size == 0) |
- buffer_size = GetBufferSize(sample_rate); |
- |
// Dispatch the new parameters both to the sink(s) and to the new source, |
// also apply the new |constraints|. |
// The idea is to get rid of any dependency of the microphone parameters |
// which would normally be used by default. |
// bits_per_sample is always 16 for now. |
media::AudioParameters params(media::AudioParameters::AUDIO_PCM_LOW_LATENCY, |
- channel_layout, sample_rate, 16, buffer_size); |
+ channel_layout, sample_rate, 16, |
+ GetBufferSize(sample_rate)); |
params.set_effects(device_info_.device.input.effects); |
+ DCHECK(params.IsValid()); |
{ |
base::AutoLock auto_lock(lock_); |
@@ -358,7 +335,7 @@ void WebRtcAudioCapturer::SetCapturerSourceInternal( |
} |
if (source.get()) |
- source->Initialize(params, this, session_id()); |
+ source->Initialize(params, this, device_info_.session_id); |
Start(); |
} |
@@ -394,8 +371,7 @@ void WebRtcAudioCapturer::EnablePeerConnectionMode() { |
// WebRtc native buffer size. |
SetCapturerSourceInternal(AudioDeviceFactory::NewInputDevice(render_frame_id), |
input_params.channel_layout(), |
- input_params.sample_rate(), |
- 0); |
+ input_params.sample_rate()); |
} |
void WebRtcAudioCapturer::Start() { |
@@ -432,6 +408,10 @@ void WebRtcAudioCapturer::Stop() { |
if (audio_device_) |
audio_device_->RemoveAudioCapturer(this); |
+ // Invalidate the weak pointers since we don't need the tracks to call our |
+ // RemoveTrack() method when their Stop() method is called. |
+ weak_factory_.InvalidateWeakPtrs(); |
+ |
for (TrackList::ItemList::const_iterator it = tracks.begin(); |
it != tracks.end(); |
++it) { |
@@ -523,10 +503,9 @@ void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source, |
track->SetAudioProcessor(audio_processor_); |
} |
- // Figure out if the pre-processed data has any energy or not, the |
- // information will be passed to the track to force the calculator |
- // to report energy in case the post-processed data is zeroed by the audio |
- // processing. |
+ // Figure out if the pre-processed data has any energy or not. This |
+ // information will be passed to the level calculator to force it to report |
+ // energy in case the post-processed data is zeroed by the audio processing. |
const bool force_report_nonzero_energy = HasDataEnergy(*audio_source); |
// Push the data to the processor for processing. |
@@ -543,13 +522,13 @@ void WebRtcAudioCapturer::Capture(const media::AudioBus* audio_source, |
current_volume, key_pressed, |
&processed_data, &processed_data_audio_delay, &new_volume)) { |
DCHECK(processed_data); |
+ |
+ level_calculator_.Calculate(*processed_data, force_report_nonzero_energy); |
+ |
const base::TimeTicks processed_data_capture_time = |
reference_clock_snapshot - processed_data_audio_delay; |
- for (const auto& track : tracks) { |
- track->Capture(*processed_data, |
- processed_data_capture_time, |
- force_report_nonzero_energy); |
- } |
+ for (const auto& track : tracks) |
+ track->Capture(*processed_data, processed_data_capture_time); |
if (new_volume) { |
SetVolume(new_volume); |
@@ -564,28 +543,9 @@ void WebRtcAudioCapturer::OnCaptureError(const std::string& message) { |
WebRtcLogMessage("WAC::OnCaptureError: " + message); |
} |
-media::AudioParameters WebRtcAudioCapturer::source_audio_parameters() const { |
+media::AudioParameters WebRtcAudioCapturer::GetInputFormat() const { |
base::AutoLock auto_lock(lock_); |
- return audio_processor_.get() ? audio_processor_->InputFormat() |
- : media::AudioParameters(); |
-} |
- |
-bool WebRtcAudioCapturer::GetPairedOutputParameters( |
- int* session_id, |
- int* output_sample_rate, |
- int* output_frames_per_buffer) const { |
- // Don't set output parameters unless all of them are valid. |
- if (device_info_.session_id <= 0 || |
- !device_info_.device.matched_output.sample_rate || |
- !device_info_.device.matched_output.frames_per_buffer) |
- return false; |
- |
- *session_id = device_info_.session_id; |
- *output_sample_rate = device_info_.device.matched_output.sample_rate; |
- *output_frames_per_buffer = |
- device_info_.device.matched_output.frames_per_buffer; |
- |
- return true; |
+ return audio_processor_->InputFormat(); |
} |
int WebRtcAudioCapturer::GetBufferSize(int sample_rate) const { |
@@ -618,10 +578,8 @@ void WebRtcAudioCapturer::SetCapturerSource( |
const scoped_refptr<media::AudioCapturerSource>& source, |
media::AudioParameters params) { |
// Create a new audio stream as source which uses the new source. |
- SetCapturerSourceInternal(source, |
- params.channel_layout(), |
- params.sample_rate(), |
- 0); |
+ SetCapturerSourceInternal(source, params.channel_layout(), |
+ params.sample_rate()); |
} |
} // namespace content |