| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010, Google Inc. All rights reserved. | 2 * Copyright (C) 2010, Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
| 8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
| 9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
| 10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
| 11 * documentation and/or other materials provided with the distribution. | 11 * documentation and/or other materials provided with the distribution. |
| 12 * | 12 * |
| 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND AN
Y | 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND |
| 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 15 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 16 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR AN
Y | 16 * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE |
| 17 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| 18 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| 19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND O
N | 19 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
| 20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 20 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| 22 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH |
| 23 * DAMAGE. |
| 23 */ | 24 */ |
| 24 | 25 |
| 25 #include "platform/audio/HRTFPanner.h" | 26 #include "platform/audio/HRTFPanner.h" |
| 26 #include "platform/audio/AudioBus.h" | 27 #include "platform/audio/AudioBus.h" |
| 27 #include "platform/audio/AudioUtilities.h" | 28 #include "platform/audio/AudioUtilities.h" |
| 28 #include "platform/audio/HRTFDatabase.h" | 29 #include "platform/audio/HRTFDatabase.h" |
| 29 #include "wtf/MathExtras.h" | 30 #include "wtf/MathExtras.h" |
| 30 #include "wtf/RefPtr.h" | 31 #include "wtf/RefPtr.h" |
| 31 | 32 |
| 32 namespace blink { | 33 namespace blink { |
| 33 | 34 |
| 34 // The value of 2 milliseconds is larger than the largest delay which exists in
any HRTFKernel from the default HRTFDatabase (0.0136 seconds). | 35 // The value of 2 milliseconds is larger than the largest delay which exists in |
| 36 // any HRTFKernel from the default HRTFDatabase (0.0136 seconds). |
| 35 // We ASSERT the delay values used in process() with this value. | 37 // We ASSERT the delay values used in process() with this value. |
| 36 const double MaxDelayTimeSeconds = 0.002; | 38 const double MaxDelayTimeSeconds = 0.002; |
| 37 | 39 |
| 38 const int UninitializedAzimuth = -1; | 40 const int UninitializedAzimuth = -1; |
| 39 const unsigned RenderingQuantum = 128; | 41 const unsigned RenderingQuantum = 128; |
| 40 | 42 |
| 41 HRTFPanner::HRTFPanner(float sampleRate, HRTFDatabaseLoader* databaseLoader) | 43 HRTFPanner::HRTFPanner(float sampleRate, HRTFDatabaseLoader* databaseLoader) |
| 42 : Panner(PanningModelHRTF), | 44 : Panner(PanningModelHRTF), |
| 43 m_databaseLoader(databaseLoader), | 45 m_databaseLoader(databaseLoader), |
| 44 m_sampleRate(sampleRate), | 46 m_sampleRate(sampleRate), |
| (...skipping 13 matching lines...) Expand all Loading... |
| 58 m_tempL1(RenderingQuantum), | 60 m_tempL1(RenderingQuantum), |
| 59 m_tempR1(RenderingQuantum), | 61 m_tempR1(RenderingQuantum), |
| 60 m_tempL2(RenderingQuantum), | 62 m_tempL2(RenderingQuantum), |
| 61 m_tempR2(RenderingQuantum) { | 63 m_tempR2(RenderingQuantum) { |
| 62 ASSERT(databaseLoader); | 64 ASSERT(databaseLoader); |
| 63 } | 65 } |
| 64 | 66 |
| 65 HRTFPanner::~HRTFPanner() {} | 67 HRTFPanner::~HRTFPanner() {} |
| 66 | 68 |
| 67 size_t HRTFPanner::fftSizeForSampleRate(float sampleRate) { | 69 size_t HRTFPanner::fftSizeForSampleRate(float sampleRate) { |
| 68 // The HRTF impulse responses (loaded as audio resources) are 512 sample-frame
s @44.1KHz. | 70 // The HRTF impulse responses (loaded as audio resources) are 512 |
| 69 // Currently, we truncate the impulse responses to half this size, | 71 // sample-frames @44.1KHz. Currently, we truncate the impulse responses to |
| 70 // but an FFT-size of twice impulse response size is needed (for convolution). | 72 // half this size, but an FFT-size of twice impulse response size is needed |
| 71 // So for sample rates around 44.1KHz an FFT size of 512 is good. | 73 // (for convolution). So for sample rates around 44.1KHz an FFT size of 512 |
| 72 // For different sample rates, the truncated response is resampled. | 74 // is good. For different sample rates, the truncated response is resampled. |
| 73 // The resampled length is used to compute the FFT size by choosing a power of
two that is | 75 // The resampled length is used to compute the FFT size by choosing a power |
| 74 // greater than or equal the resampled length. This power of two is doubled to
get the actual FFT size. | 76 // of two that is greater than or equal the resampled length. This power of |
| 77 // two is doubled to get the actual FFT size. |
| 75 | 78 |
| 76 ASSERT(AudioUtilities::isValidAudioBufferSampleRate(sampleRate)); | 79 ASSERT(AudioUtilities::isValidAudioBufferSampleRate(sampleRate)); |
| 77 | 80 |
| 78 int truncatedImpulseLength = 256; | 81 int truncatedImpulseLength = 256; |
| 79 double sampleRateRatio = sampleRate / 44100; | 82 double sampleRateRatio = sampleRate / 44100; |
| 80 double resampledLength = truncatedImpulseLength * sampleRateRatio; | 83 double resampledLength = truncatedImpulseLength * sampleRateRatio; |
| 81 | 84 |
| 82 return 2 * (1 << static_cast<unsigned>(log2(resampledLength))); | 85 return 2 * (1 << static_cast<unsigned>(log2(resampledLength))); |
| 83 } | 86 } |
| 84 | 87 |
| 85 void HRTFPanner::reset() { | 88 void HRTFPanner::reset() { |
| 86 m_convolverL1.reset(); | 89 m_convolverL1.reset(); |
| 87 m_convolverR1.reset(); | 90 m_convolverR1.reset(); |
| 88 m_convolverL2.reset(); | 91 m_convolverL2.reset(); |
| 89 m_convolverR2.reset(); | 92 m_convolverR2.reset(); |
| 90 m_delayLineL.reset(); | 93 m_delayLineL.reset(); |
| 91 m_delayLineR.reset(); | 94 m_delayLineR.reset(); |
| 92 } | 95 } |
| 93 | 96 |
| 94 int HRTFPanner::calculateDesiredAzimuthIndexAndBlend(double azimuth, | 97 int HRTFPanner::calculateDesiredAzimuthIndexAndBlend(double azimuth, |
| 95 double& azimuthBlend) { | 98 double& azimuthBlend) { |
| 96 // Convert the azimuth angle from the range -180 -> +180 into the range 0 -> 3
60. | 99 // Convert the azimuth angle from the range -180 -> +180 into the range 0 -> |
| 97 // The azimuth index may then be calculated from this positive value. | 100 // 360. The azimuth index may then be calculated from this positive value. |
| 98 if (azimuth < 0) | 101 if (azimuth < 0) |
| 99 azimuth += 360.0; | 102 azimuth += 360.0; |
| 100 | 103 |
| 101 int numberOfAzimuths = HRTFDatabase::numberOfAzimuths(); | 104 int numberOfAzimuths = HRTFDatabase::numberOfAzimuths(); |
| 102 const double angleBetweenAzimuths = 360.0 / numberOfAzimuths; | 105 const double angleBetweenAzimuths = 360.0 / numberOfAzimuths; |
| 103 | 106 |
| 104 // Calculate the azimuth index and the blend (0 -> 1) for interpolation. | 107 // Calculate the azimuth index and the blend (0 -> 1) for interpolation. |
| 105 double desiredAzimuthIndexFloat = azimuth / angleBetweenAzimuths; | 108 double desiredAzimuthIndexFloat = azimuth / angleBetweenAzimuths; |
| 106 int desiredAzimuthIndex = static_cast<int>(desiredAzimuthIndexFloat); | 109 int desiredAzimuthIndex = static_cast<int>(desiredAzimuthIndexFloat); |
| 107 azimuthBlend = | 110 azimuthBlend = |
| 108 desiredAzimuthIndexFloat - static_cast<double>(desiredAzimuthIndex); | 111 desiredAzimuthIndexFloat - static_cast<double>(desiredAzimuthIndex); |
| 109 | 112 |
| 110 // We don't immediately start using this azimuth index, but instead approach t
his index from the last index we rendered at. | 113 // We don't immediately start using this azimuth index, but instead approach |
| 111 // This minimizes the clicks and graininess for moving sources which occur oth
erwise. | 114 // this index from the last index we rendered at. This minimizes the clicks |
| 115 // and graininess for moving sources which occur otherwise. |
| 112 desiredAzimuthIndex = clampTo(desiredAzimuthIndex, 0, numberOfAzimuths - 1); | 116 desiredAzimuthIndex = clampTo(desiredAzimuthIndex, 0, numberOfAzimuths - 1); |
| 113 return desiredAzimuthIndex; | 117 return desiredAzimuthIndex; |
| 114 } | 118 } |
| 115 | 119 |
| 116 void HRTFPanner::pan(double desiredAzimuth, | 120 void HRTFPanner::pan(double desiredAzimuth, |
| 117 double elevation, | 121 double elevation, |
| 118 const AudioBus* inputBus, | 122 const AudioBus* inputBus, |
| 119 AudioBus* outputBus, | 123 AudioBus* outputBus, |
| 120 size_t framesToProcess, | 124 size_t framesToProcess, |
| 121 AudioBus::ChannelInterpretation channelInterpretation) { | 125 AudioBus::ChannelInterpretation channelInterpretation) { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 133 outputBus->zero(); | 137 outputBus->zero(); |
| 134 return; | 138 return; |
| 135 } | 139 } |
| 136 | 140 |
| 137 HRTFDatabase* database = m_databaseLoader->database(); | 141 HRTFDatabase* database = m_databaseLoader->database(); |
| 138 if (!database) { | 142 if (!database) { |
| 139 outputBus->copyFrom(*inputBus, channelInterpretation); | 143 outputBus->copyFrom(*inputBus, channelInterpretation); |
| 140 return; | 144 return; |
| 141 } | 145 } |
| 142 | 146 |
| 143 // IRCAM HRTF azimuths values from the loaded database is reversed from the pa
nner's notion of azimuth. | 147 // IRCAM HRTF azimuths values from the loaded database is reversed from the |
| 148 // panner's notion of azimuth. |
| 144 double azimuth = -desiredAzimuth; | 149 double azimuth = -desiredAzimuth; |
| 145 | 150 |
| 146 bool isAzimuthGood = azimuth >= -180.0 && azimuth <= 180.0; | 151 bool isAzimuthGood = azimuth >= -180.0 && azimuth <= 180.0; |
| 147 ASSERT(isAzimuthGood); | 152 ASSERT(isAzimuthGood); |
| 148 if (!isAzimuthGood) { | 153 if (!isAzimuthGood) { |
| 149 outputBus->zero(); | 154 outputBus->zero(); |
| 150 return; | 155 return; |
| 151 } | 156 } |
| 152 | 157 |
| 153 // Normally, we'll just be dealing with mono sources. | 158 // Normally, we'll just be dealing with mono sources. |
| 154 // If we have a stereo input, implement stereo panning with left source proces
sed by left HRTF, and right source by right HRTF. | 159 // If we have a stereo input, implement stereo panning with left source |
| 160 // processed by left HRTF, and right source by right HRTF. |
| 155 const AudioChannel* inputChannelL = | 161 const AudioChannel* inputChannelL = |
| 156 inputBus->channelByType(AudioBus::ChannelLeft); | 162 inputBus->channelByType(AudioBus::ChannelLeft); |
| 157 const AudioChannel* inputChannelR = | 163 const AudioChannel* inputChannelR = |
| 158 numInputChannels > 1 ? inputBus->channelByType(AudioBus::ChannelRight) | 164 numInputChannels > 1 ? inputBus->channelByType(AudioBus::ChannelRight) |
| 159 : nullptr; | 165 : nullptr; |
| 160 | 166 |
| 161 // Get source and destination pointers. | 167 // Get source and destination pointers. |
| 162 const float* sourceL = inputChannelL->data(); | 168 const float* sourceL = inputChannelL->data(); |
| 163 const float* sourceR = numInputChannels > 1 ? inputChannelR->data() : sourceL; | 169 const float* sourceR = numInputChannels > 1 ? inputChannelR->data() : sourceL; |
| 164 float* destinationL = | 170 float* destinationL = |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 196 } | 202 } |
| 197 if (m_crossfadeX == 1 && m_crossfadeSelection == CrossfadeSelection2) { | 203 if (m_crossfadeX == 1 && m_crossfadeSelection == CrossfadeSelection2) { |
| 198 if (desiredAzimuthIndex != m_azimuthIndex2 || elevation != m_elevation2) { | 204 if (desiredAzimuthIndex != m_azimuthIndex2 || elevation != m_elevation2) { |
| 199 // Cross-fade from 2 -> 1 | 205 // Cross-fade from 2 -> 1 |
| 200 m_crossfadeIncr = -1 / fadeFrames; | 206 m_crossfadeIncr = -1 / fadeFrames; |
| 201 m_azimuthIndex1 = desiredAzimuthIndex; | 207 m_azimuthIndex1 = desiredAzimuthIndex; |
| 202 m_elevation1 = elevation; | 208 m_elevation1 = elevation; |
| 203 } | 209 } |
| 204 } | 210 } |
| 205 | 211 |
| 206 // This algorithm currently requires that we process in power-of-two size chun
ks at least RenderingQuantum. | 212 // This algorithm currently requires that we process in power-of-two size |
| 213 // chunks at least RenderingQuantum. |
| 207 ASSERT(1UL << static_cast<int>(log2(framesToProcess)) == framesToProcess); | 214 ASSERT(1UL << static_cast<int>(log2(framesToProcess)) == framesToProcess); |
| 208 ASSERT(framesToProcess >= RenderingQuantum); | 215 ASSERT(framesToProcess >= RenderingQuantum); |
| 209 | 216 |
| 210 const unsigned framesPerSegment = RenderingQuantum; | 217 const unsigned framesPerSegment = RenderingQuantum; |
| 211 const unsigned numberOfSegments = framesToProcess / framesPerSegment; | 218 const unsigned numberOfSegments = framesToProcess / framesPerSegment; |
| 212 | 219 |
| 213 for (unsigned segment = 0; segment < numberOfSegments; ++segment) { | 220 for (unsigned segment = 0; segment < numberOfSegments; ++segment) { |
| 214 // Get the HRTFKernels and interpolated delays. | 221 // Get the HRTFKernels and interpolated delays. |
| 215 HRTFKernel* kernelL1; | 222 HRTFKernel* kernelL1; |
| 216 HRTFKernel* kernelR1; | 223 HRTFKernel* kernelR1; |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 253 float* segmentDestinationR = destinationR + offset; | 260 float* segmentDestinationR = destinationR + offset; |
| 254 | 261 |
| 255 // First run through delay lines for inter-aural time difference. | 262 // First run through delay lines for inter-aural time difference. |
| 256 m_delayLineL.setDelayFrames(frameDelayL); | 263 m_delayLineL.setDelayFrames(frameDelayL); |
| 257 m_delayLineR.setDelayFrames(frameDelayR); | 264 m_delayLineR.setDelayFrames(frameDelayR); |
| 258 m_delayLineL.process(segmentSourceL, segmentDestinationL, framesPerSegment); | 265 m_delayLineL.process(segmentSourceL, segmentDestinationL, framesPerSegment); |
| 259 m_delayLineR.process(segmentSourceR, segmentDestinationR, framesPerSegment); | 266 m_delayLineR.process(segmentSourceR, segmentDestinationR, framesPerSegment); |
| 260 | 267 |
| 261 bool needsCrossfading = m_crossfadeIncr; | 268 bool needsCrossfading = m_crossfadeIncr; |
| 262 | 269 |
| 263 // Have the convolvers render directly to the final destination if we're not
cross-fading. | 270 // Have the convolvers render directly to the final destination if we're not |
| 271 // cross-fading. |
| 264 float* convolutionDestinationL1 = | 272 float* convolutionDestinationL1 = |
| 265 needsCrossfading ? m_tempL1.data() : segmentDestinationL; | 273 needsCrossfading ? m_tempL1.data() : segmentDestinationL; |
| 266 float* convolutionDestinationR1 = | 274 float* convolutionDestinationR1 = |
| 267 needsCrossfading ? m_tempR1.data() : segmentDestinationR; | 275 needsCrossfading ? m_tempR1.data() : segmentDestinationR; |
| 268 float* convolutionDestinationL2 = | 276 float* convolutionDestinationL2 = |
| 269 needsCrossfading ? m_tempL2.data() : segmentDestinationL; | 277 needsCrossfading ? m_tempL2.data() : segmentDestinationL; |
| 270 float* convolutionDestinationR2 = | 278 float* convolutionDestinationR2 = |
| 271 needsCrossfading ? m_tempR2.data() : segmentDestinationR; | 279 needsCrossfading ? m_tempR2.data() : segmentDestinationR; |
| 272 | 280 |
| 273 // Now do the convolutions. | 281 // Now do the convolutions. |
| 274 // Note that we avoid doing convolutions on both sets of convolvers if we're
not currently cross-fading. | 282 // Note that we avoid doing convolutions on both sets of convolvers if we're |
| 283 // not currently cross-fading. |
| 275 | 284 |
| 276 if (m_crossfadeSelection == CrossfadeSelection1 || needsCrossfading) { | 285 if (m_crossfadeSelection == CrossfadeSelection1 || needsCrossfading) { |
| 277 m_convolverL1.process(kernelL1->fftFrame(), segmentDestinationL, | 286 m_convolverL1.process(kernelL1->fftFrame(), segmentDestinationL, |
| 278 convolutionDestinationL1, framesPerSegment); | 287 convolutionDestinationL1, framesPerSegment); |
| 279 m_convolverR1.process(kernelR1->fftFrame(), segmentDestinationR, | 288 m_convolverR1.process(kernelR1->fftFrame(), segmentDestinationR, |
| 280 convolutionDestinationR1, framesPerSegment); | 289 convolutionDestinationR1, framesPerSegment); |
| 281 } | 290 } |
| 282 | 291 |
| 283 if (m_crossfadeSelection == CrossfadeSelection2 || needsCrossfading) { | 292 if (m_crossfadeSelection == CrossfadeSelection2 || needsCrossfading) { |
| 284 m_convolverL2.process(kernelL2->fftFrame(), segmentDestinationL, | 293 m_convolverL2.process(kernelL2->fftFrame(), segmentDestinationL, |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 316 } | 325 } |
| 317 } | 326 } |
| 318 | 327 |
| 319 void HRTFPanner::panWithSampleAccurateValues( | 328 void HRTFPanner::panWithSampleAccurateValues( |
| 320 double* desiredAzimuth, | 329 double* desiredAzimuth, |
| 321 double* elevation, | 330 double* elevation, |
| 322 const AudioBus* inputBus, | 331 const AudioBus* inputBus, |
| 323 AudioBus* outputBus, | 332 AudioBus* outputBus, |
| 324 size_t framesToProcess, | 333 size_t framesToProcess, |
| 325 AudioBus::ChannelInterpretation channelInterpretation) { | 334 AudioBus::ChannelInterpretation channelInterpretation) { |
| 326 // Sample-accurate (a-rate) HRTF panner is not implemented, just k-rate. Just
grab the current | 335 // Sample-accurate (a-rate) HRTF panner is not implemented, just k-rate. Just |
| 327 // azimuth/elevation and use that. | 336 // grab the current azimuth/elevation and use that. |
| 328 // | 337 // |
| 329 // We are assuming that the inherent smoothing in the HRTF processing is good
enough, and we | 338 // We are assuming that the inherent smoothing in the HRTF processing is good |
| 330 // don't want to increase the complexity of the HRTF panner by 15-20 times. (
We need to cmopute | 339 // enough, and we don't want to increase the complexity of the HRTF panner by |
| 331 // one output sample for each possibly different impulse response. That N^2.
Previously, we | 340 // 15-20 times. (We need to compute one output sample for each possibly |
| 332 // used an FFT to do them all at once for a complexity of N/log2(N). Hence, N
/log2(N) times | 341 // different impulse response. That N^2. Previously, we used an FFT to do |
| 342 // them all at once for a complexity of N/log2(N). Hence, N/log2(N) times |
| 333 // more complex.) | 343 // more complex.) |
| 334 pan(desiredAzimuth[0], elevation[0], inputBus, outputBus, framesToProcess, | 344 pan(desiredAzimuth[0], elevation[0], inputBus, outputBus, framesToProcess, |
| 335 channelInterpretation); | 345 channelInterpretation); |
| 336 } | 346 } |
| 337 | 347 |
| 338 double HRTFPanner::tailTime() const { | 348 double HRTFPanner::tailTime() const { |
| 339 // Because HRTFPanner is implemented with a DelayKernel and a FFTConvolver, th
e tailTime of the HRTFPanner | 349 // Because HRTFPanner is implemented with a DelayKernel and a FFTConvolver, |
| 340 // is the sum of the tailTime of the DelayKernel and the tailTime of the FFTCo
nvolver, which is MaxDelayTimeSeconds | 350 // the tailTime of the HRTFPanner is the sum of the tailTime of the |
| 341 // and fftSize() / 2, respectively. | 351 // DelayKernel and the tailTime of the FFTConvolver, which is |
| 352 // MaxDelayTimeSeconds and fftSize() / 2, respectively. |
| 342 return MaxDelayTimeSeconds + | 353 return MaxDelayTimeSeconds + |
| 343 (fftSize() / 2) / static_cast<double>(sampleRate()); | 354 (fftSize() / 2) / static_cast<double>(sampleRate()); |
| 344 } | 355 } |
| 345 | 356 |
| 346 double HRTFPanner::latencyTime() const { | 357 double HRTFPanner::latencyTime() const { |
| 347 // The latency of a FFTConvolver is also fftSize() / 2, and is in addition to
its tailTime of the | 358 // The latency of a FFTConvolver is also fftSize() / 2, and is in addition to |
| 348 // same value. | 359 // its tailTime of the same value. |
| 349 return (fftSize() / 2) / static_cast<double>(sampleRate()); | 360 return (fftSize() / 2) / static_cast<double>(sampleRate()); |
| 350 } | 361 } |
| 351 | 362 |
| 352 } // namespace blink | 363 } // namespace blink |
| OLD | NEW |