| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2010 Google Inc. All rights reserved. | 2 * Copyright (C) 2010 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
| 6 * are met: | 6 * are met: |
| 7 * | 7 * |
| 8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 50 // Empirically, this has been found to be a good compromise between giving enoug
h time for scheduling slop, | 50 // Empirically, this has been found to be a good compromise between giving enoug
h time for scheduling slop, |
| 51 // while still minimizing the amount of processing done in the primary (high-pri
ority) thread. | 51 // while still minimizing the amount of processing done in the primary (high-pri
ority) thread. |
| 52 // This was found to be a good value on Mac OS X, and may work well on other pla
tforms as well, assuming | 52 // This was found to be a good value on Mac OS X, and may work well on other pla
tforms as well, assuming |
| 53 // the very rough scheduling latencies are similar on these time-scales. Of cou
rse, this code may need to be | 53 // the very rough scheduling latencies are similar on these time-scales. Of cou
rse, this code may need to be |
| 54 // tuned for individual platforms if this assumption is found to be incorrect. | 54 // tuned for individual platforms if this assumption is found to be incorrect. |
| 55 const size_t RealtimeFrameLimit = 8192 + 4096; // ~278msec @ 44.1KHz | 55 const size_t RealtimeFrameLimit = 8192 + 4096; // ~278msec @ 44.1KHz |
| 56 | 56 |
| 57 const size_t MinFFTSize = 128; | 57 const size_t MinFFTSize = 128; |
| 58 const size_t MaxRealtimeFFTSize = 2048; | 58 const size_t MaxRealtimeFFTSize = 2048; |
| 59 | 59 |
| 60 ReverbConvolver::ReverbConvolver(AudioChannel* impulseResponse, size_t renderSli
ceSize, size_t maxFFTSize, size_t convolverRenderPhase, bool useBackgroundThread
s) | 60 ReverbConvolver::ReverbConvolver(AudioChannel* impulseResponse, size_t renderSli
ceSize, size_t maxFFTSize, size_t convolverRenderPhase, bool hasRealtimeConstrai
nt) |
| 61 : m_impulseResponseLength(impulseResponse->length()) | 61 : m_impulseResponseLength(impulseResponse->length()) |
| 62 , m_accumulationBuffer(impulseResponse->length() + renderSliceSize) | 62 , m_accumulationBuffer(impulseResponse->length() + renderSliceSize) |
| 63 , m_inputBuffer(InputBufferSize) | 63 , m_inputBuffer(InputBufferSize) |
| 64 , m_minFFTSize(MinFFTSize) // First stage will have this size - successive s
tages will double in size each time | 64 , m_minFFTSize(MinFFTSize) // First stage will have this size - successive s
tages will double in size each time |
| 65 , m_maxFFTSize(maxFFTSize) // until we hit m_maxFFTSize | 65 , m_maxFFTSize(maxFFTSize) // until we hit m_maxFFTSize |
| 66 { | 66 { |
| 67 // If we are using background threads then don't exceed this FFT size for th
e | 67 // If we are using background threads then don't exceed this FFT size for th
e |
| 68 // stages which run in the real-time thread. This avoids having only one or
two | 68 // stages which run in the real-time thread. This avoids having only one or
two |
| 69 // large stages (size 16384 or so) at the end which take a lot of time every
several | 69 // large stages (size 16384 or so) at the end which take a lot of time every
several |
| 70 // processing slices. This way we amortize the cost over more processing sl
ices. | 70 // processing slices. This way we amortize the cost over more processing sl
ices. |
| 71 m_maxRealtimeFFTSize = MaxRealtimeFFTSize; | 71 m_maxRealtimeFFTSize = MaxRealtimeFFTSize; |
| 72 | 72 |
| 73 // For the moment, a good way to know if we have real-time constraint is to
check if we're using background threads. | |
| 74 // Otherwise, assume we're being run from a command-line tool. | |
| 75 bool hasRealtimeConstraint = useBackgroundThreads; | |
| 76 | |
| 77 const float* response = impulseResponse->data(); | 73 const float* response = impulseResponse->data(); |
| 78 size_t totalResponseLength = impulseResponse->length(); | 74 size_t totalResponseLength = impulseResponse->length(); |
| 79 | 75 |
| 80 // The total latency is zero because the direct-convolution is used in the l
eading portion. | 76 // The total latency is zero because the direct-convolution is used in the l
eading portion. |
| 81 size_t reverbTotalLatency = 0; | 77 size_t reverbTotalLatency = 0; |
| 82 | 78 |
| 83 size_t stageOffset = 0; | 79 size_t stageOffset = 0; |
| 84 int i = 0; | 80 int i = 0; |
| 85 size_t fftSize = m_minFFTSize; | 81 size_t fftSize = m_minFFTSize; |
| 86 while (stageOffset < totalResponseLength) { | 82 while (stageOffset < totalResponseLength) { |
| 87 size_t stageSize = fftSize / 2; | 83 size_t stageSize = fftSize / 2; |
| 88 | 84 |
| 89 // For the last stage, it's possible that stageOffset is such that we're
straddling the end | 85 // For the last stage, it's possible that stageOffset is such that we're
straddling the end |
| 90 // of the impulse response buffer (if we use stageSize), so reduce the l
ast stage's length... | 86 // of the impulse response buffer (if we use stageSize), so reduce the l
ast stage's length... |
| 91 if (stageSize + stageOffset > totalResponseLength) | 87 if (stageSize + stageOffset > totalResponseLength) |
| 92 stageSize = totalResponseLength - stageOffset; | 88 stageSize = totalResponseLength - stageOffset; |
| 93 | 89 |
| 94 // This "staggers" the time when each FFT happens so they don't all happ
en at the same time | 90 // This "staggers" the time when each FFT happens so they don't all happ
en at the same time |
| 95 int renderPhase = convolverRenderPhase + i * renderSliceSize; | 91 int renderPhase = convolverRenderPhase + i * renderSliceSize; |
| 96 | 92 |
| 97 bool useDirectConvolver = !stageOffset; | 93 bool useDirectConvolver = !stageOffset; |
| 98 | 94 |
| 99 OwnPtr<ReverbConvolverStage> stage = adoptPtr(new ReverbConvolverStage(r
esponse, totalResponseLength, reverbTotalLatency, stageOffset, stageSize, fftSiz
e, renderPhase, renderSliceSize, &m_accumulationBuffer, useDirectConvolver)); | 95 OwnPtr<ReverbConvolverStage> stage = adoptPtr(new ReverbConvolverStage(r
esponse, totalResponseLength, reverbTotalLatency, stageOffset, stageSize, fftSiz
e, renderPhase, renderSliceSize, &m_accumulationBuffer, useDirectConvolver)); |
| 100 | 96 |
| 101 bool isBackgroundStage = false; | 97 bool isBackgroundStage = false; |
| 102 | 98 |
| 103 if (useBackgroundThreads && stageOffset > RealtimeFrameLimit) { | 99 if (hasRealtimeConstraint && stageOffset > RealtimeFrameLimit) { |
| 104 m_backgroundStages.append(stage.release()); | 100 m_backgroundStages.append(stage.release()); |
| 105 isBackgroundStage = true; | 101 isBackgroundStage = true; |
| 106 } else | 102 } else |
| 107 m_stages.append(stage.release()); | 103 m_stages.append(stage.release()); |
| 108 | 104 |
| 109 stageOffset += stageSize; | 105 stageOffset += stageSize; |
| 110 ++i; | 106 ++i; |
| 111 | 107 |
| 112 if (!useDirectConvolver) { | 108 if (!useDirectConvolver) { |
| 113 // Figure out next FFT size | 109 // Figure out next FFT size |
| 114 fftSize *= 2; | 110 fftSize *= 2; |
| 115 } | 111 } |
| 116 | 112 |
| 117 if (hasRealtimeConstraint && !isBackgroundStage && fftSize > m_maxRealti
meFFTSize) | 113 if (hasRealtimeConstraint && !isBackgroundStage && fftSize > m_maxRealti
meFFTSize) |
| 118 fftSize = m_maxRealtimeFFTSize; | 114 fftSize = m_maxRealtimeFFTSize; |
| 119 if (fftSize > m_maxFFTSize) | 115 if (fftSize > m_maxFFTSize) |
| 120 fftSize = m_maxFFTSize; | 116 fftSize = m_maxFFTSize; |
| 121 } | 117 } |
| 122 | 118 |
| 123 // Start up background thread | 119 // Start up background thread |
| 124 // FIXME: would be better to up the thread priority here. It doesn't need t
o be real-time, but higher than the default... | 120 // FIXME: would be better to up the thread priority here. It doesn't need t
o be real-time, but higher than the default... |
| 125 if (useBackgroundThreads && m_backgroundStages.size() > 0) | 121 if (hasRealtimeConstraint && m_backgroundStages.size() > 0) |
| 126 m_backgroundThread = adoptPtr(Platform::current()->createThread("Reverb
convolution background thread")); | 122 m_backgroundThread = adoptPtr(Platform::current()->createThread("Reverb
convolution background thread")); |
| 127 } | 123 } |
| 128 | 124 |
| 129 ReverbConvolver::~ReverbConvolver() | 125 ReverbConvolver::~ReverbConvolver() |
| 130 { | 126 { |
| 131 // Wait for background thread to stop | 127 // Wait for background thread to stop |
| 132 m_backgroundThread.clear(); | 128 m_backgroundThread.clear(); |
| 133 } | 129 } |
| 134 | 130 |
| 135 void ReverbConvolver::processInBackground() | 131 void ReverbConvolver::processInBackground() |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 193 } | 189 } |
| 194 | 190 |
| 195 size_t ReverbConvolver::latencyFrames() const | 191 size_t ReverbConvolver::latencyFrames() const |
| 196 { | 192 { |
| 197 return 0; | 193 return 0; |
| 198 } | 194 } |
| 199 | 195 |
| 200 } // namespace blink | 196 } // namespace blink |
| 201 | 197 |
| 202 #endif // ENABLE(WEB_AUDIO) | 198 #endif // ENABLE(WEB_AUDIO) |
| OLD | NEW |