| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010 Google Inc. All rights reserved. | 2 * Copyright (C) 2010 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * | 7 * |
| 8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 82 if (response->sampleRate()) | 82 if (response->sampleRate()) |
| 83 scale *= GainCalibrationSampleRate / response->sampleRate(); | 83 scale *= GainCalibrationSampleRate / response->sampleRate(); |
| 84 | 84 |
| 85 // True-stereo compensation | 85 // True-stereo compensation |
| 86 if (response->numberOfChannels() == 4) | 86 if (response->numberOfChannels() == 4) |
| 87 scale *= 0.5f; | 87 scale *= 0.5f; |
| 88 | 88 |
| 89 return scale; | 89 return scale; |
| 90 } | 90 } |
| 91 | 91 |
| 92 Reverb::Reverb(AudioBus* impulseResponse, size_t renderSliceSize, size_t maxFFTS
ize, size_t numberOfChannels, bool useBackgroundThreads, bool normalize) | 92 Reverb::Reverb(AudioBus* impulseResponse, size_t renderSliceSize, size_t maxFFTS
ize, size_t numberOfChannels, bool hasRealtimeConstraint, bool normalize) |
| 93 { | 93 { |
| 94 float scale = 1; | 94 float scale = 1; |
| 95 | 95 |
| 96 if (normalize) { | 96 if (normalize) { |
| 97 scale = calculateNormalizationScale(impulseResponse); | 97 scale = calculateNormalizationScale(impulseResponse); |
| 98 | 98 |
| 99 if (scale) | 99 if (scale) |
| 100 impulseResponse->scale(scale); | 100 impulseResponse->scale(scale); |
| 101 } | 101 } |
| 102 | 102 |
| 103 initialize(impulseResponse, renderSliceSize, maxFFTSize, numberOfChannels, u
seBackgroundThreads); | 103 initialize(impulseResponse, renderSliceSize, maxFFTSize, numberOfChannels, h
asRealtimeConstraint); |
| 104 | 104 |
| 105 // Undo scaling since this shouldn't be a destructive operation on impulseRe
sponse. | 105 // Undo scaling since this shouldn't be a destructive operation on impulseRe
sponse. |
| 106 // FIXME: What about roundoff? Perhaps consider making a temporary scaled co
py | 106 // FIXME: What about roundoff? Perhaps consider making a temporary scaled co
py |
| 107 // instead of scaling and unscaling in place. | 107 // instead of scaling and unscaling in place. |
| 108 if (normalize && scale) | 108 if (normalize && scale) |
| 109 impulseResponse->scale(1 / scale); | 109 impulseResponse->scale(1 / scale); |
| 110 } | 110 } |
| 111 | 111 |
| 112 void Reverb::initialize(AudioBus* impulseResponseBuffer, size_t renderSliceSize,
size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads) | 112 void Reverb::initialize(AudioBus* impulseResponseBuffer, size_t renderSliceSize,
size_t maxFFTSize, size_t numberOfChannels, bool hasRealtimeConstraint) |
| 113 { | 113 { |
| 114 m_impulseResponseLength = impulseResponseBuffer->length(); | 114 m_impulseResponseLength = impulseResponseBuffer->length(); |
| 115 | 115 |
| 116 // The reverb can handle a mono impulse response and still do stereo process
ing | 116 // The reverb can handle a mono impulse response and still do stereo process
ing |
| 117 size_t numResponseChannels = impulseResponseBuffer->numberOfChannels(); | 117 size_t numResponseChannels = impulseResponseBuffer->numberOfChannels(); |
| 118 m_convolvers.reserveCapacity(numberOfChannels); | 118 m_convolvers.reserveCapacity(numberOfChannels); |
| 119 | 119 |
| 120 int convolverRenderPhase = 0; | 120 int convolverRenderPhase = 0; |
| 121 for (size_t i = 0; i < numResponseChannels; ++i) { | 121 for (size_t i = 0; i < numResponseChannels; ++i) { |
| 122 AudioChannel* channel = impulseResponseBuffer->channel(i); | 122 AudioChannel* channel = impulseResponseBuffer->channel(i); |
| 123 | 123 |
| 124 OwnPtr<ReverbConvolver> convolver = adoptPtr(new ReverbConvolver(channel
, renderSliceSize, maxFFTSize, convolverRenderPhase, useBackgroundThreads)); | 124 OwnPtr<ReverbConvolver> convolver = adoptPtr(new ReverbConvolver(channel
, renderSliceSize, maxFFTSize, convolverRenderPhase, hasRealtimeConstraint)); |
| 125 m_convolvers.append(convolver.release()); | 125 m_convolvers.append(convolver.release()); |
| 126 | 126 |
| 127 convolverRenderPhase += renderSliceSize; | 127 convolverRenderPhase += renderSliceSize; |
| 128 } | 128 } |
| 129 | 129 |
| 130 // For "True" stereo processing we allocate a temporary buffer to avoid repe
atedly allocating it in the process() method. | 130 // For "True" stereo processing we allocate a temporary buffer to avoid repe
atedly allocating it in the process() method. |
| 131 // It can be bad to allocate memory in a real-time thread. | 131 // It can be bad to allocate memory in a real-time thread. |
| 132 if (numResponseChannels == 4) | 132 if (numResponseChannels == 4) |
| 133 m_tempBuffer = AudioBus::create(2, MaxFrameSize); | 133 m_tempBuffer = AudioBus::create(2, MaxFrameSize); |
| 134 } | 134 } |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 232 } | 232 } |
| 233 | 233 |
| 234 size_t Reverb::latencyFrames() const | 234 size_t Reverb::latencyFrames() const |
| 235 { | 235 { |
| 236 return !m_convolvers.isEmpty() ? m_convolvers.first()->latencyFrames() : 0; | 236 return !m_convolvers.isEmpty() ? m_convolvers.first()->latencyFrames() : 0; |
| 237 } | 237 } |
| 238 | 238 |
| 239 } // namespace blink | 239 } // namespace blink |
| 240 | 240 |
| 241 #endif // ENABLE(WEB_AUDIO) | 241 #endif // ENABLE(WEB_AUDIO) |
| OLD | NEW |