OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2010 Google Inc. All rights reserved. | 2 * Copyright (C) 2010 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
6 * are met: | 6 * are met: |
7 * | 7 * |
8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
63 float sampleRate, | 63 float sampleRate, |
64 PassRefPtr<SecurityOrigin> securityOrigin) | 64 PassRefPtr<SecurityOrigin> securityOrigin) |
65 : m_callback(callback), | 65 : m_callback(callback), |
66 m_numberOfOutputChannels(numberOfOutputChannels), | 66 m_numberOfOutputChannels(numberOfOutputChannels), |
67 m_inputBus(AudioBus::create(numberOfInputChannels, | 67 m_inputBus(AudioBus::create(numberOfInputChannels, |
68 AudioUtilities::kRenderQuantumFrames)), | 68 AudioUtilities::kRenderQuantumFrames)), |
69 m_renderBus(AudioBus::create(numberOfOutputChannels, | 69 m_renderBus(AudioBus::create(numberOfOutputChannels, |
70 AudioUtilities::kRenderQuantumFrames, | 70 AudioUtilities::kRenderQuantumFrames, |
71 false)), | 71 false)), |
72 m_sampleRate(sampleRate), | 72 m_sampleRate(sampleRate), |
73 m_isPlaying(false) { | 73 m_isPlaying(false), |
| 74 m_framesElapsed(0), |
| 75 m_outputPosition() { |
74 // Histogram for audioHardwareBufferSize | 76 // Histogram for audioHardwareBufferSize |
75 DEFINE_STATIC_LOCAL(SparseHistogram, hardwareBufferSizeHistogram, | 77 DEFINE_STATIC_LOCAL(SparseHistogram, hardwareBufferSizeHistogram, |
76 ("WebAudio.AudioDestination.HardwareBufferSize")); | 78 ("WebAudio.AudioDestination.HardwareBufferSize")); |
77 // Histogram for the actual callback size used. Typically, this is the same | 79 // Histogram for the actual callback size used. Typically, this is the same |
78 // as audioHardwareBufferSize, but can be adjusted depending on some | 80 // as audioHardwareBufferSize, but can be adjusted depending on some |
79 // heuristics below. | 81 // heuristics below. |
80 DEFINE_STATIC_LOCAL(SparseHistogram, callbackBufferSizeHistogram, | 82 DEFINE_STATIC_LOCAL(SparseHistogram, callbackBufferSizeHistogram, |
81 ("WebAudio.AudioDestination.CallbackBufferSize")); | 83 ("WebAudio.AudioDestination.CallbackBufferSize")); |
82 | 84 |
83 // Use the optimal buffer size recommended by the audio backend. | 85 // Use the optimal buffer size recommended by the audio backend. |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
161 float AudioDestination::hardwareSampleRate() { | 163 float AudioDestination::hardwareSampleRate() { |
162 return static_cast<float>(Platform::current()->audioHardwareSampleRate()); | 164 return static_cast<float>(Platform::current()->audioHardwareSampleRate()); |
163 } | 165 } |
164 | 166 |
165 unsigned long AudioDestination::maxChannelCount() { | 167 unsigned long AudioDestination::maxChannelCount() { |
166 return static_cast<float>(Platform::current()->audioHardwareOutputChannels()); | 168 return static_cast<float>(Platform::current()->audioHardwareOutputChannels()); |
167 } | 169 } |
168 | 170 |
169 void AudioDestination::render(const WebVector<float*>& sourceData, | 171 void AudioDestination::render(const WebVector<float*>& sourceData, |
170 const WebVector<float*>& audioData, | 172 const WebVector<float*>& audioData, |
171 size_t numberOfFrames) { | 173 size_t numberOfFrames, |
| 174 double delay, |
| 175 double delayTimestamp, |
| 176 size_t priorFramesSkipped) { |
172 bool isNumberOfChannelsGood = audioData.size() == m_numberOfOutputChannels; | 177 bool isNumberOfChannelsGood = audioData.size() == m_numberOfOutputChannels; |
173 if (!isNumberOfChannelsGood) { | 178 if (!isNumberOfChannelsGood) { |
174 ASSERT_NOT_REACHED(); | 179 ASSERT_NOT_REACHED(); |
175 return; | 180 return; |
176 } | 181 } |
177 | 182 |
178 bool isBufferSizeGood = numberOfFrames == m_callbackBufferSize; | 183 bool isBufferSizeGood = numberOfFrames == m_callbackBufferSize; |
179 if (!isBufferSizeGood) { | 184 if (!isBufferSizeGood) { |
180 ASSERT_NOT_REACHED(); | 185 ASSERT_NOT_REACHED(); |
181 return; | 186 return; |
182 } | 187 } |
183 | 188 |
| 189 m_framesElapsed -= std::min(m_framesElapsed, priorFramesSkipped); |
| 190 double outputPosition = |
| 191 m_framesElapsed / static_cast<double>(m_sampleRate) - delay; |
| 192 m_outputPosition.position = outputPosition; |
| 193 m_outputPosition.timestamp = delayTimestamp; |
| 194 m_outputPositionReceivedTimestamp = base::TimeTicks::Now(); |
| 195 |
184 // Buffer optional live input. | 196 // Buffer optional live input. |
185 if (sourceData.size() >= 2) { | 197 if (sourceData.size() >= 2) { |
186 // FIXME: handle multi-channel input and don't hard-code to stereo. | 198 // FIXME: handle multi-channel input and don't hard-code to stereo. |
187 RefPtr<AudioBus> wrapperBus = AudioBus::create(2, numberOfFrames, false); | 199 RefPtr<AudioBus> wrapperBus = AudioBus::create(2, numberOfFrames, false); |
188 wrapperBus->setChannelMemory(0, sourceData[0], numberOfFrames); | 200 wrapperBus->setChannelMemory(0, sourceData[0], numberOfFrames); |
189 wrapperBus->setChannelMemory(1, sourceData[1], numberOfFrames); | 201 wrapperBus->setChannelMemory(1, sourceData[1], numberOfFrames); |
190 m_inputFifo->push(wrapperBus.get()); | 202 m_inputFifo->push(wrapperBus.get()); |
191 } | 203 } |
192 | 204 |
193 for (unsigned i = 0; i < m_numberOfOutputChannels; ++i) | 205 for (unsigned i = 0; i < m_numberOfOutputChannels; ++i) |
194 m_renderBus->setChannelMemory(i, audioData[i], numberOfFrames); | 206 m_renderBus->setChannelMemory(i, audioData[i], numberOfFrames); |
195 | 207 |
196 m_fifo->consume(m_renderBus.get(), numberOfFrames); | 208 m_fifo->consume(m_renderBus.get(), numberOfFrames); |
| 209 |
| 210 m_framesElapsed += numberOfFrames; |
197 } | 211 } |
198 | 212 |
199 void AudioDestination::provideInput(AudioBus* bus, size_t framesToProcess) { | 213 void AudioDestination::provideInput(AudioBus* bus, size_t framesToProcess) { |
200 AudioBus* sourceBus = nullptr; | 214 AudioBus* sourceBus = nullptr; |
201 if (m_inputFifo->framesInFifo() >= framesToProcess) { | 215 if (m_inputFifo->framesInFifo() >= framesToProcess) { |
202 m_inputFifo->consume(m_inputBus.get(), framesToProcess); | 216 m_inputFifo->consume(m_inputBus.get(), framesToProcess); |
203 sourceBus = m_inputBus.get(); | 217 sourceBus = m_inputBus.get(); |
204 } | 218 } |
205 | 219 |
206 m_callback.render(sourceBus, bus, framesToProcess); | 220 AudioIOPosition outputPosition = m_outputPosition; |
| 221 |
| 222 // If platfrom buffer is more than two times longer than |framesToProcess| |
| 223 // we do not want output position to get stuck so we promote it |
| 224 // using the elapsed time from the moment it was initially obtained. |
| 225 if (m_callbackBufferSize > framesToProcess * 2) { |
| 226 double delta = (base::TimeTicks::Now() - m_outputPositionReceivedTimestamp) |
| 227 .InSecondsF(); |
| 228 outputPosition.position += delta; |
| 229 outputPosition.timestamp += delta; |
| 230 } |
| 231 |
| 232 // Some implementations give only rough estimation of |delay| so |
| 233 // we might have negative estimation |outputPosition| value. |
| 234 if (outputPosition.position < 0.0) |
| 235 outputPosition.position = 0.0; |
| 236 |
| 237 m_callback.render(sourceBus, bus, framesToProcess, outputPosition); |
207 } | 238 } |
208 | 239 |
209 } // namespace blink | 240 } // namespace blink |
OLD | NEW |