Index: third_party/WebKit/Source/platform/audio/AudioDestination.cpp |
diff --git a/third_party/WebKit/Source/platform/audio/AudioDestination.cpp b/third_party/WebKit/Source/platform/audio/AudioDestination.cpp |
index 733bd66cfd913e0e1c7cda19e1afdc63b003700a..ebfac7705996db6d1c021fb821e1e7a25a38525a 100644 |
--- a/third_party/WebKit/Source/platform/audio/AudioDestination.cpp |
+++ b/third_party/WebKit/Source/platform/audio/AudioDestination.cpp |
@@ -70,7 +70,9 @@ AudioDestination::AudioDestination(AudioIOCallback& callback, |
m_renderBus( |
AudioBus::create(numberOfOutputChannels, renderBufferSize, false)), |
m_sampleRate(sampleRate), |
- m_isPlaying(false) { |
+ m_isPlaying(false), |
+ m_framesElapsed(0), |
+ m_outputPosition() { |
// Histogram for audioHardwareBufferSize |
DEFINE_STATIC_LOCAL(SparseHistogram, hardwareBufferSizeHistogram, |
("WebAudio.AudioDestination.HardwareBufferSize")); |
@@ -165,7 +167,10 @@ unsigned long AudioDestination::maxChannelCount() { |
void AudioDestination::render(const WebVector<float*>& sourceData, |
const WebVector<float*>& audioData, |
- size_t numberOfFrames) { |
+ size_t numberOfFrames, |
+ double delay, |
+ double delayTimestamp, |
+ size_t priorFramesSkipped) { |
bool isNumberOfChannelsGood = audioData.size() == m_numberOfOutputChannels; |
if (!isNumberOfChannelsGood) { |
ASSERT_NOT_REACHED(); |
@@ -178,6 +183,13 @@ void AudioDestination::render(const WebVector<float*>& sourceData, |
return; |
} |
+ m_framesElapsed -= std::min(m_framesElapsed, priorFramesSkipped); |
+ double outputPosition = |
+ m_framesElapsed / static_cast<double>(m_sampleRate) - delay; |
+ m_outputPosition.position = outputPosition; |
+ m_outputPosition.timestamp = delayTimestamp; |
+ m_outputPositionReceivedTimestamp = base::TimeTicks::Now(); |
+ |
// Buffer optional live input. |
if (sourceData.size() >= 2) { |
// FIXME: handle multi-channel input and don't hard-code to stereo. |
@@ -191,6 +203,8 @@ void AudioDestination::render(const WebVector<float*>& sourceData, |
m_renderBus->setChannelMemory(i, audioData[i], numberOfFrames); |
m_fifo->consume(m_renderBus.get(), numberOfFrames); |
+ |
+ m_framesElapsed += numberOfFrames; |
} |
void AudioDestination::provideInput(AudioBus* bus, size_t framesToProcess) { |
@@ -200,7 +214,24 @@ void AudioDestination::provideInput(AudioBus* bus, size_t framesToProcess) { |
sourceBus = m_inputBus.get(); |
} |
- m_callback.render(sourceBus, bus, framesToProcess); |
+ AudioIOPosition outputPosition = m_outputPosition; |
+ |
+ // If platfrom buffer is more than two times longer than |framesToProcess| |
+ // we do not want output position to get stuck so we promote it |
+ // using the elapsed time from the moment it was initially obtained. |
+ if (m_callbackBufferSize > framesToProcess * 2) { |
Raymond Toy
2016/11/04 17:51:40
What happens if framesToProcess <= m_callbackBuffe
Mikhail
2016/11/07 18:28:03
the 'outputPosition' is initialized in 'render' fu
miu
2016/11/07 23:49:40
OOC, why not unconditionally advance outputPositio
Raymond Toy
2016/11/08 20:47:01
Ah, that makes sense. (Note that a huge number of
Mikhail
2016/11/28 15:15:59
I think it is fine as long as linear estimation is
Mikhail
2016/11/28 15:15:59
I am more keen on accuracy, as far as I understand
miu
2016/11/29 20:51:04
Right, but consider:
1. The outputPosition will a
|
+ double delta = (base::TimeTicks::Now() - m_outputPositionReceivedTimestamp) |
+ .InSecondsF(); |
+ outputPosition.position += delta; |
+ outputPosition.timestamp += delta; |
+ } |
+ |
+ // Some implementations give only rough estimation of |delay| so |
+ // we might have negative estimation |outputPosition| value. |
+ if (outputPosition.position < 0.0) |
+ outputPosition.position = 0.0; |
+ |
+ m_callback.render(sourceBus, bus, framesToProcess, outputPosition); |
} |
} // namespace blink |