OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2010 Google Inc. All rights reserved. | 2 * Copyright (C) 2010 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
6 * are met: | 6 * are met: |
7 * | 7 * |
8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
63 float sampleRate, | 63 float sampleRate, |
64 PassRefPtr<SecurityOrigin> securityOrigin) | 64 PassRefPtr<SecurityOrigin> securityOrigin) |
65 : m_callback(callback), | 65 : m_callback(callback), |
66 m_numberOfOutputChannels(numberOfOutputChannels), | 66 m_numberOfOutputChannels(numberOfOutputChannels), |
67 m_inputBus(AudioBus::create(numberOfInputChannels, | 67 m_inputBus(AudioBus::create(numberOfInputChannels, |
68 AudioUtilities::kRenderQuantumFrames)), | 68 AudioUtilities::kRenderQuantumFrames)), |
69 m_renderBus(AudioBus::create(numberOfOutputChannels, | 69 m_renderBus(AudioBus::create(numberOfOutputChannels, |
70 AudioUtilities::kRenderQuantumFrames, | 70 AudioUtilities::kRenderQuantumFrames, |
71 false)), | 71 false)), |
72 m_sampleRate(sampleRate), | 72 m_sampleRate(sampleRate), |
73 m_isPlaying(false) { | 73 m_isPlaying(false), |
| 74 m_framesElapsed(0), |
| 75 m_outputPosition() { |
74 // Histogram for audioHardwareBufferSize | 76 // Histogram for audioHardwareBufferSize |
75 DEFINE_STATIC_LOCAL(SparseHistogram, hardwareBufferSizeHistogram, | 77 DEFINE_STATIC_LOCAL(SparseHistogram, hardwareBufferSizeHistogram, |
76 ("WebAudio.AudioDestination.HardwareBufferSize")); | 78 ("WebAudio.AudioDestination.HardwareBufferSize")); |
77 // Histogram for the actual callback size used. Typically, this is the same | 79 // Histogram for the actual callback size used. Typically, this is the same |
78 // as audioHardwareBufferSize, but can be adjusted depending on some | 80 // as audioHardwareBufferSize, but can be adjusted depending on some |
79 // heuristics below. | 81 // heuristics below. |
80 DEFINE_STATIC_LOCAL(SparseHistogram, callbackBufferSizeHistogram, | 82 DEFINE_STATIC_LOCAL(SparseHistogram, callbackBufferSizeHistogram, |
81 ("WebAudio.AudioDestination.CallbackBufferSize")); | 83 ("WebAudio.AudioDestination.CallbackBufferSize")); |
82 | 84 |
83 // Use the optimal buffer size recommended by the audio backend. | 85 // Use the optimal buffer size recommended by the audio backend. |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
160 float AudioDestination::hardwareSampleRate() { | 162 float AudioDestination::hardwareSampleRate() { |
161 return static_cast<float>(Platform::current()->audioHardwareSampleRate()); | 163 return static_cast<float>(Platform::current()->audioHardwareSampleRate()); |
162 } | 164 } |
163 | 165 |
164 unsigned long AudioDestination::maxChannelCount() { | 166 unsigned long AudioDestination::maxChannelCount() { |
165 return static_cast<float>(Platform::current()->audioHardwareOutputChannels()); | 167 return static_cast<float>(Platform::current()->audioHardwareOutputChannels()); |
166 } | 168 } |
167 | 169 |
168 void AudioDestination::render(const WebVector<float*>& sourceData, | 170 void AudioDestination::render(const WebVector<float*>& sourceData, |
169 const WebVector<float*>& audioData, | 171 const WebVector<float*>& audioData, |
170 size_t numberOfFrames) { | 172 size_t numberOfFrames, |
| 173 double delay, |
| 174 double delayTimestamp, |
| 175 size_t priorFramesSkipped) { |
171 bool isNumberOfChannelsGood = audioData.size() == m_numberOfOutputChannels; | 176 bool isNumberOfChannelsGood = audioData.size() == m_numberOfOutputChannels; |
172 if (!isNumberOfChannelsGood) { | 177 if (!isNumberOfChannelsGood) { |
173 ASSERT_NOT_REACHED(); | 178 ASSERT_NOT_REACHED(); |
174 return; | 179 return; |
175 } | 180 } |
176 | 181 |
177 bool isBufferSizeGood = numberOfFrames == m_callbackBufferSize; | 182 bool isBufferSizeGood = numberOfFrames == m_callbackBufferSize; |
178 if (!isBufferSizeGood) { | 183 if (!isBufferSizeGood) { |
179 ASSERT_NOT_REACHED(); | 184 ASSERT_NOT_REACHED(); |
180 return; | 185 return; |
181 } | 186 } |
182 | 187 |
| 188 m_framesElapsed -= std::min(m_framesElapsed, priorFramesSkipped); |
| 189 double outputPosition = |
| 190 m_framesElapsed / static_cast<double>(m_sampleRate) - delay; |
| 191 m_outputPosition.position = outputPosition; |
| 192 m_outputPosition.timestamp = delayTimestamp; |
| 193 m_outputPositionReceivedTimestamp = base::TimeTicks::Now(); |
| 194 |
183 // Buffer optional live input. | 195 // Buffer optional live input. |
184 if (sourceData.size() >= 2) { | 196 if (sourceData.size() >= 2) { |
185 // FIXME: handle multi-channel input and don't hard-code to stereo. | 197 // FIXME: handle multi-channel input and don't hard-code to stereo. |
186 RefPtr<AudioBus> wrapperBus = AudioBus::create(2, numberOfFrames, false); | 198 RefPtr<AudioBus> wrapperBus = AudioBus::create(2, numberOfFrames, false); |
187 wrapperBus->setChannelMemory(0, sourceData[0], numberOfFrames); | 199 wrapperBus->setChannelMemory(0, sourceData[0], numberOfFrames); |
188 wrapperBus->setChannelMemory(1, sourceData[1], numberOfFrames); | 200 wrapperBus->setChannelMemory(1, sourceData[1], numberOfFrames); |
189 m_inputFifo->push(wrapperBus.get()); | 201 m_inputFifo->push(wrapperBus.get()); |
190 } | 202 } |
191 | 203 |
192 for (unsigned i = 0; i < m_numberOfOutputChannels; ++i) | 204 for (unsigned i = 0; i < m_numberOfOutputChannels; ++i) |
193 m_renderBus->setChannelMemory(i, audioData[i], numberOfFrames); | 205 m_renderBus->setChannelMemory(i, audioData[i], numberOfFrames); |
194 | 206 |
195 m_fifo->consume(m_renderBus.get(), numberOfFrames); | 207 m_fifo->consume(m_renderBus.get(), numberOfFrames); |
| 208 |
| 209 m_framesElapsed += numberOfFrames; |
196 } | 210 } |
197 | 211 |
198 void AudioDestination::provideInput(AudioBus* bus, size_t framesToProcess) { | 212 void AudioDestination::provideInput(AudioBus* bus, size_t framesToProcess) { |
199 AudioBus* sourceBus = nullptr; | 213 AudioBus* sourceBus = nullptr; |
200 if (m_inputFifo->framesInFifo() >= framesToProcess) { | 214 if (m_inputFifo->framesInFifo() >= framesToProcess) { |
201 m_inputFifo->consume(m_inputBus.get(), framesToProcess); | 215 m_inputFifo->consume(m_inputBus.get(), framesToProcess); |
202 sourceBus = m_inputBus.get(); | 216 sourceBus = m_inputBus.get(); |
203 } | 217 } |
204 | 218 |
205 m_callback.render(sourceBus, bus, framesToProcess); | 219 AudioIOPosition outputPosition = m_outputPosition; |
| 220 |
| 221 // If platfrom buffer is more than two times longer than |framesToProcess| |
| 222 // we do not want output position to get stuck so we promote it |
| 223 // using the elapsed time from the moment it was initially obtained. |
| 224 if (m_callbackBufferSize > framesToProcess * 2) { |
| 225 double delta = (base::TimeTicks::Now() - m_outputPositionReceivedTimestamp) |
| 226 .InSecondsF(); |
| 227 outputPosition.position += delta; |
| 228 outputPosition.timestamp += delta; |
| 229 } |
| 230 |
| 231 // Some implementations give only rough estimation of |delay| so |
| 232 // we might have negative estimation |outputPosition| value. |
| 233 if (outputPosition.position < 0.0) |
| 234 outputPosition.position = 0.0; |
| 235 |
| 236 m_callback.render(sourceBus, bus, framesToProcess, outputPosition); |
206 } | 237 } |
207 | 238 |
208 } // namespace blink | 239 } // namespace blink |
OLD | NEW |