Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010 Google Inc. All rights reserved. | 2 * Copyright (C) 2010 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * | 7 * |
| 8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 63 unsigned numberOfInputChannels, | 63 unsigned numberOfInputChannels, |
| 64 unsigned numberOfOutputChannels, | 64 unsigned numberOfOutputChannels, |
| 65 float sampleRate, | 65 float sampleRate, |
| 66 PassRefPtr<SecurityOrigin> securityOrigin) | 66 PassRefPtr<SecurityOrigin> securityOrigin) |
| 67 : m_callback(callback), | 67 : m_callback(callback), |
| 68 m_numberOfOutputChannels(numberOfOutputChannels), | 68 m_numberOfOutputChannels(numberOfOutputChannels), |
| 69 m_inputBus(AudioBus::create(numberOfInputChannels, renderBufferSize)), | 69 m_inputBus(AudioBus::create(numberOfInputChannels, renderBufferSize)), |
| 70 m_renderBus( | 70 m_renderBus( |
| 71 AudioBus::create(numberOfOutputChannels, renderBufferSize, false)), | 71 AudioBus::create(numberOfOutputChannels, renderBufferSize, false)), |
| 72 m_sampleRate(sampleRate), | 72 m_sampleRate(sampleRate), |
| 73 m_isPlaying(false) { | 73 m_isPlaying(false), |
| 74 m_framesElapsed(0), | |
| 75 m_outputPosition() { | |
| 74 // Histogram for audioHardwareBufferSize | 76 // Histogram for audioHardwareBufferSize |
| 75 DEFINE_STATIC_LOCAL(SparseHistogram, hardwareBufferSizeHistogram, | 77 DEFINE_STATIC_LOCAL(SparseHistogram, hardwareBufferSizeHistogram, |
| 76 ("WebAudio.AudioDestination.HardwareBufferSize")); | 78 ("WebAudio.AudioDestination.HardwareBufferSize")); |
| 77 // Histogram for the actual callback size used. Typically, this is the same | 79 // Histogram for the actual callback size used. Typically, this is the same |
| 78 // as audioHardwareBufferSize, but can be adjusted depending on some | 80 // as audioHardwareBufferSize, but can be adjusted depending on some |
| 79 // heuristics below. | 81 // heuristics below. |
| 80 DEFINE_STATIC_LOCAL(SparseHistogram, callbackBufferSizeHistogram, | 82 DEFINE_STATIC_LOCAL(SparseHistogram, callbackBufferSizeHistogram, |
| 81 ("WebAudio.AudioDestination.CallbackBufferSize")); | 83 ("WebAudio.AudioDestination.CallbackBufferSize")); |
| 82 | 84 |
| 83 // Use the optimal buffer size recommended by the audio backend. | 85 // Use the optimal buffer size recommended by the audio backend. |
| (...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 158 float AudioDestination::hardwareSampleRate() { | 160 float AudioDestination::hardwareSampleRate() { |
| 159 return static_cast<float>(Platform::current()->audioHardwareSampleRate()); | 161 return static_cast<float>(Platform::current()->audioHardwareSampleRate()); |
| 160 } | 162 } |
| 161 | 163 |
| 162 unsigned long AudioDestination::maxChannelCount() { | 164 unsigned long AudioDestination::maxChannelCount() { |
| 163 return static_cast<float>(Platform::current()->audioHardwareOutputChannels()); | 165 return static_cast<float>(Platform::current()->audioHardwareOutputChannels()); |
| 164 } | 166 } |
| 165 | 167 |
| 166 void AudioDestination::render(const WebVector<float*>& sourceData, | 168 void AudioDestination::render(const WebVector<float*>& sourceData, |
| 167 const WebVector<float*>& audioData, | 169 const WebVector<float*>& audioData, |
| 168 size_t numberOfFrames) { | 170 size_t numberOfFrames, |
| 171 double delay, | |
| 172 double delayTimestamp, | |
| 173 size_t priorFramesSkipped) { | |
| 169 bool isNumberOfChannelsGood = audioData.size() == m_numberOfOutputChannels; | 174 bool isNumberOfChannelsGood = audioData.size() == m_numberOfOutputChannels; |
| 170 if (!isNumberOfChannelsGood) { | 175 if (!isNumberOfChannelsGood) { |
| 171 ASSERT_NOT_REACHED(); | 176 ASSERT_NOT_REACHED(); |
| 172 return; | 177 return; |
| 173 } | 178 } |
| 174 | 179 |
| 175 bool isBufferSizeGood = numberOfFrames == m_callbackBufferSize; | 180 bool isBufferSizeGood = numberOfFrames == m_callbackBufferSize; |
| 176 if (!isBufferSizeGood) { | 181 if (!isBufferSizeGood) { |
| 177 ASSERT_NOT_REACHED(); | 182 ASSERT_NOT_REACHED(); |
| 178 return; | 183 return; |
| 179 } | 184 } |
| 180 | 185 |
| 186 m_framesElapsed -= std::min(m_framesElapsed, priorFramesSkipped); | |
| 187 double outputPosition = | |
| 188 m_framesElapsed / static_cast<double>(m_sampleRate) - delay; | |
| 189 m_outputPosition.position = outputPosition; | |
| 190 m_outputPosition.timestamp = delayTimestamp; | |
| 191 m_outputPositionReceivedTimestamp = base::TimeTicks::Now(); | |
| 192 | |
| 181 // Buffer optional live input. | 193 // Buffer optional live input. |
| 182 if (sourceData.size() >= 2) { | 194 if (sourceData.size() >= 2) { |
| 183 // FIXME: handle multi-channel input and don't hard-code to stereo. | 195 // FIXME: handle multi-channel input and don't hard-code to stereo. |
| 184 RefPtr<AudioBus> wrapperBus = AudioBus::create(2, numberOfFrames, false); | 196 RefPtr<AudioBus> wrapperBus = AudioBus::create(2, numberOfFrames, false); |
| 185 wrapperBus->setChannelMemory(0, sourceData[0], numberOfFrames); | 197 wrapperBus->setChannelMemory(0, sourceData[0], numberOfFrames); |
| 186 wrapperBus->setChannelMemory(1, sourceData[1], numberOfFrames); | 198 wrapperBus->setChannelMemory(1, sourceData[1], numberOfFrames); |
| 187 m_inputFifo->push(wrapperBus.get()); | 199 m_inputFifo->push(wrapperBus.get()); |
| 188 } | 200 } |
| 189 | 201 |
| 190 for (unsigned i = 0; i < m_numberOfOutputChannels; ++i) | 202 for (unsigned i = 0; i < m_numberOfOutputChannels; ++i) |
| 191 m_renderBus->setChannelMemory(i, audioData[i], numberOfFrames); | 203 m_renderBus->setChannelMemory(i, audioData[i], numberOfFrames); |
| 192 | 204 |
| 193 m_fifo->consume(m_renderBus.get(), numberOfFrames); | 205 m_fifo->consume(m_renderBus.get(), numberOfFrames); |
| 206 | |
| 207 m_framesElapsed += numberOfFrames; | |
| 194 } | 208 } |
| 195 | 209 |
| 196 void AudioDestination::provideInput(AudioBus* bus, size_t framesToProcess) { | 210 void AudioDestination::provideInput(AudioBus* bus, size_t framesToProcess) { |
| 197 AudioBus* sourceBus = nullptr; | 211 AudioBus* sourceBus = nullptr; |
| 198 if (m_inputFifo->framesInFifo() >= framesToProcess) { | 212 if (m_inputFifo->framesInFifo() >= framesToProcess) { |
| 199 m_inputFifo->consume(m_inputBus.get(), framesToProcess); | 213 m_inputFifo->consume(m_inputBus.get(), framesToProcess); |
| 200 sourceBus = m_inputBus.get(); | 214 sourceBus = m_inputBus.get(); |
| 201 } | 215 } |
| 202 | 216 |
| 203 m_callback.render(sourceBus, bus, framesToProcess); | 217 AudioIOPosition outputPosition = m_outputPosition; |
| 218 | |
| 219 // If platfrom buffer is more than two times longer than |framesToProcess| | |
| 220 // we do not want output position to get stuck so we promote it | |
| 221 // using the elapsed time from the moment it was initially obtained. | |
| 222 if (m_callbackBufferSize > framesToProcess * 2) { | |
|
Raymond Toy
2016/11/04 17:51:40
What happens if framesToProcess <= m_callbackBuffe
Mikhail
2016/11/07 18:28:03
the 'outputPosition' is initialized in 'render' fu
miu
2016/11/07 23:49:40
OOC, why not unconditionally advance outputPositio
Raymond Toy
2016/11/08 20:47:01
Ah, that makes sense. (Note that a huge number of
Mikhail
2016/11/28 15:15:59
I think it is fine as long as linear estimation is
Mikhail
2016/11/28 15:15:59
I am more keen on accuracy, as far as I understand
miu
2016/11/29 20:51:04
Right, but consider:
1. The outputPosition will a
| |
| 223 double delta = (base::TimeTicks::Now() - m_outputPositionReceivedTimestamp) | |
| 224 .InSecondsF(); | |
| 225 outputPosition.position += delta; | |
| 226 outputPosition.timestamp += delta; | |
| 227 } | |
| 228 | |
| 229 // Some implementations give only rough estimation of |delay| so | |
| 230 // we might have negative estimation |outputPosition| value. | |
| 231 if (outputPosition.position < 0.0) | |
| 232 outputPosition.position = 0.0; | |
| 233 | |
| 234 m_callback.render(sourceBus, bus, framesToProcess, outputPosition); | |
| 204 } | 235 } |
| 205 | 236 |
| 206 } // namespace blink | 237 } // namespace blink |
| OLD | NEW |