Index: content/renderer/media/audio_renderer_mixer_manager.cc |
diff --git a/content/renderer/media/audio_renderer_mixer_manager.cc b/content/renderer/media/audio_renderer_mixer_manager.cc |
index d03e615b66ed55d18cf71de6e970b1366f099656..789a7d8b7232bc83827a7291cef80ea0370c6b4e 100644 |
--- a/content/renderer/media/audio_renderer_mixer_manager.cc |
+++ b/content/renderer/media/audio_renderer_mixer_manager.cc |
@@ -4,6 +4,7 @@ |
#include "content/renderer/media/audio_renderer_mixer_manager.h" |
+#include <algorithm> |
#include <string> |
#include "base/bind.h" |
@@ -16,6 +17,74 @@ |
#include "media/base/audio_renderer_mixer.h" |
#include "media/base/audio_renderer_mixer_input.h" |
+namespace { |
+// calculate mixer output parameters based on input parameters and audio |
+// hardware configuration. |
+media::AudioParameters GetMixerOutputParams( |
+ const media::AudioParameters& input_params, |
+ const media::AudioParameters& hardware_params, |
+ media::AudioLatency::LatencyType latency) { |
+ int output_sample_rate = input_params.sample_rate(); |
+ bool valid_not_fake_hardware_params = |
+ hardware_params.format() != media::AudioParameters::AUDIO_FAKE && |
+ hardware_params.IsValid(); |
+ int preferred_high_latency_output_bufffer_size = 0; |
+ |
+#if !defined(OS_CHROMEOS) |
+ // On ChromeOS as well as when a fake device is used, we can rely on the |
+ // playback device to handle resampling, so don't waste cycles on it here. |
+ // On other systems if hardware parameters are valid and the device is not |
+ // fake, resample to hadrware sample rate. Otherwise, pass the input one and |
+ // let the browser side handle automatic fallback. |
+ if (valid_not_fake_hardware_params) { |
+ output_sample_rate = hardware_params.sample_rate(); |
+ preferred_high_latency_output_bufffer_size = |
+ hardware_params.frames_per_buffer(); |
+ } |
+#endif |
+ |
+ int output_buffer_size = input_params.frames_per_buffer(); |
o1ka
2016/06/23 16:36:15
Note that here we rely on the current situation wh
chcunningham
2016/06/27 23:12:24
You bring up an interesting point - now I'm confus
o1ka
2016/06/28 13:04:57
Unfortunately I did not let you know: I changed it
chcunningham
2016/06/28 18:43:49
Ack - my bad. This method looks good in the new PS
DaleCurtis
2016/06/28 21:44:07
I don't think it matters either way; the size is "
|
+ |
+ if (output_sample_rate != input_params.sample_rate()) { |
chcunningham
2016/06/27 23:12:25
Why do we only go down this path when in/out sampl
|
+ // Adjust output buffer size according to the latency requirement. |
+ switch (latency) { |
+ case media::AudioLatency::LATENCY_EXACT_MS: |
o1ka
2016/06/21 15:16:40
It's not used right now, but we'll need it for htt
|
+ // Keep the provided buffer duration. |
+ output_buffer_size = input_params.GetBufferDuration().InMicroseconds() * |
+ output_sample_rate / |
+ base::Time::kMicrosecondsPerSecond; |
+ break; |
+ case media::AudioLatency::LATENCY_INTERACTIVE: |
+ // WebAudio should provide correct callback size in frames; it does not |
+ // depend on the sample rate. |
+ DCHECK_EQ(output_buffer_size, |
+ media::AudioLatency::GetInteractiveBufferSize( |
o1ka
2016/06/21 15:16:41
Also not sure about this check, probably should be
chcunningham
2016/06/22 04:34:07
I think this would be wrong for android, where Get
o1ka
2016/06/23 16:36:15
Currently WebAudio sets the buffer size in AudioDe
chcunningham
2016/06/27 23:12:25
I think this gets at my comment on line 46 above -
o1ka
2016/06/28 13:04:57
Agreed.
|
+ hardware_params.frames_per_buffer())); |
+ break; |
+ case media::AudioLatency::LATENCY_RTC: |
+ output_buffer_size = media::AudioLatency::GetRtcBufferSize( |
+ output_sample_rate, valid_not_fake_hardware_params |
+ ? hardware_params.frames_per_buffer() |
+ : 0); |
+ break; |
+ case media::AudioLatency::LATENCY_PLAYBACK: |
chcunningham
2016/06/22 04:34:07
Could you combine this case with the default? Woul
|
+ output_buffer_size = media::AudioLatency::GetHighLatencyBufferSize( |
+ output_sample_rate, preferred_high_latency_output_bufffer_size); |
+ break; |
+ default: |
+ DCHECK(false); |
chcunningham
2016/06/22 04:34:07
If you don't combine with LATENCY_PLAYBACK, this s
o1ka
2016/06/23 16:36:15
I would prefer to not combine, because it may be t
chcunningham
2016/06/27 23:12:25
Sounds good.
|
+ } |
+ } |
+ |
+ // Force to 16-bit output for now since we know that works everywhere; |
+ // ChromeOS does not support other bit depths. |
+ return media::AudioParameters(input_params.format(), |
+ input_params.channel_layout(), |
+ output_sample_rate, 16, output_buffer_size); |
+} |
+ |
+} // namespace |
+ |
namespace content { |
AudioRendererMixerManager::AudioRendererMixerManager( |
@@ -40,7 +109,8 @@ media::AudioRendererMixerInput* AudioRendererMixerManager::CreateInput( |
int source_render_frame_id, |
int session_id, |
const std::string& device_id, |
- const url::Origin& security_origin) { |
+ const url::Origin& security_origin, |
+ media::AudioLatency::LatencyType latency) { |
chcunningham
2016/06/22 04:34:07
Has the spec settled on latency coming in through
o1ka
2016/06/23 16:36:15
Yes, it is passed in AudioContextOptions as a cons
chcunningham
2016/06/27 23:12:25
Acknowledged.
|
// AudioRendererMixerManager lives on the renderer thread and is destroyed on |
// renderer thread destruction, so it's safe to pass its pointer to a mixer |
// input. |
@@ -52,19 +122,20 @@ media::AudioRendererMixerInput* AudioRendererMixerManager::CreateInput( |
security_origin) |
.device_id() |
: device_id, |
- security_origin); |
+ security_origin, latency); |
} |
media::AudioRendererMixer* AudioRendererMixerManager::GetMixer( |
int source_render_frame_id, |
- const media::AudioParameters& params, |
+ const media::AudioParameters& input_params, |
+ media::AudioLatency::LatencyType latency, |
const std::string& device_id, |
const url::Origin& security_origin, |
media::OutputDeviceStatus* device_status) { |
// Effects are not passed through to output creation, so ensure none are set. |
- DCHECK_EQ(params.effects(), media::AudioParameters::NO_EFFECTS); |
+ DCHECK_EQ(input_params.effects(), media::AudioParameters::NO_EFFECTS); |
- const MixerKey key(source_render_frame_id, params, device_id, |
+ const MixerKey key(source_render_frame_id, input_params, latency, device_id, |
security_origin); |
base::AutoLock auto_lock(mixers_lock_); |
@@ -74,6 +145,7 @@ media::AudioRendererMixer* AudioRendererMixerManager::GetMixer( |
*device_status = media::OUTPUT_DEVICE_STATUS_OK; |
it->second.ref_count++; |
+ DVLOG(1) << "Reusing mixer: " << it->second.mixer; |
return it->second.mixer; |
} |
@@ -89,50 +161,26 @@ media::AudioRendererMixer* AudioRendererMixerManager::GetMixer( |
return nullptr; |
} |
- // On ChromeOS as well as when a fake device is used, we can rely on the |
- // playback device to handle resampling, so don't waste cycles on it here. |
- int sample_rate = params.sample_rate(); |
- int buffer_size = |
- media::AudioHardwareConfig::GetHighLatencyBufferSize(sample_rate, 0); |
- |
-#if !defined(OS_CHROMEOS) |
- const media::AudioParameters& hardware_params = device_info.output_params(); |
- |
- // If we have valid, non-fake hardware parameters, use them. Otherwise, pass |
- // on the input params and let the browser side handle automatic fallback. |
- if (hardware_params.format() != media::AudioParameters::AUDIO_FAKE && |
- hardware_params.IsValid()) { |
- sample_rate = hardware_params.sample_rate(); |
- buffer_size = media::AudioHardwareConfig::GetHighLatencyBufferSize( |
- sample_rate, hardware_params.frames_per_buffer()); |
- } |
-#endif |
- |
- // Create output parameters based on the audio hardware configuration for |
- // passing on to the output sink. Force to 16-bit output for now since we |
- // know that works everywhere; ChromeOS does not support other bit depths. |
- media::AudioParameters output_params( |
- media::AudioParameters::AUDIO_PCM_LOW_LATENCY, params.channel_layout(), |
- sample_rate, 16, buffer_size); |
- DCHECK(output_params.IsValid()); |
- |
+ const media::AudioParameters& mixer_output_params = |
+ GetMixerOutputParams(input_params, device_info.output_params(), latency); |
media::AudioRendererMixer* mixer = |
- new media::AudioRendererMixer(output_params, sink); |
+ new media::AudioRendererMixer(mixer_output_params, sink); |
AudioRendererMixerReference mixer_reference = {mixer, 1, sink.get()}; |
mixers_[key] = mixer_reference; |
+ DVLOG(1) << "GetMixer: mixer " << mixer << " latency " << latency |
chcunningham
2016/06/22 04:34:07
nit: use __FUNCTION__.
nit: can you format this l
o1ka
2016/06/23 16:36:15
Done.
|
+ << "\n input " << input_params.AsHumanReadableString() << "\noutput " |
+ << mixer_output_params.AsHumanReadableString(); |
return mixer; |
} |
void AudioRendererMixerManager::ReturnMixer( |
- int source_render_frame_id, |
- const media::AudioParameters& params, |
- const std::string& device_id, |
- const url::Origin& security_origin) { |
- const MixerKey key(source_render_frame_id, params, device_id, |
- security_origin); |
+ const media::AudioRendererMixer* mixer) { |
base::AutoLock auto_lock(mixers_lock_); |
- |
- AudioRendererMixerMap::iterator it = mixers_.find(key); |
+ AudioRendererMixerMap::iterator it = std::find_if( |
+ mixers_.begin(), mixers_.end(), |
+ [mixer](const std::pair<MixerKey, AudioRendererMixerReference>& val) { |
+ return val.second.mixer == mixer; |
+ }); |
DCHECK(it != mixers_.end()); |
// Only remove the mixer if AudioRendererMixerManager is the last owner. |
@@ -157,10 +205,12 @@ media::OutputDeviceInfo AudioRendererMixerManager::GetOutputDeviceInfo( |
AudioRendererMixerManager::MixerKey::MixerKey( |
int source_render_frame_id, |
const media::AudioParameters& params, |
+ media::AudioLatency::LatencyType latency, |
const std::string& device_id, |
const url::Origin& security_origin) |
: source_render_frame_id(source_render_frame_id), |
params(params), |
+ latency(latency), |
device_id(device_id), |
security_origin(security_origin) {} |