Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1088)

Unified Diff: third_party/WebKit/Source/platform/audio/ReverbConvolver.cpp

Issue 2384073002: reflow comments in platform/audio (Closed)
Patch Set: Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/WebKit/Source/platform/audio/ReverbConvolver.cpp
diff --git a/third_party/WebKit/Source/platform/audio/ReverbConvolver.cpp b/third_party/WebKit/Source/platform/audio/ReverbConvolver.cpp
index 412f643b22b2901f44408f3ae01d8c4e89dbdfa1..5fc5317c66147ba17d8346a37f0e91cb81cfdb04 100644
--- a/third_party/WebKit/Source/platform/audio/ReverbConvolver.cpp
+++ b/third_party/WebKit/Source/platform/audio/ReverbConvolver.cpp
@@ -44,13 +44,16 @@ using namespace VectorMath;
const int InputBufferSize = 8 * 16384;
-// We only process the leading portion of the impulse response in the real-time thread. We don't exceed this length.
-// It turns out then, that the background thread has about 278msec of scheduling slop.
-// Empirically, this has been found to be a good compromise between giving enough time for scheduling slop,
-// while still minimizing the amount of processing done in the primary (high-priority) thread.
-// This was found to be a good value on Mac OS X, and may work well on other platforms as well, assuming
-// the very rough scheduling latencies are similar on these time-scales. Of course, this code may need to be
-// tuned for individual platforms if this assumption is found to be incorrect.
+// We only process the leading portion of the impulse response in the real-time
+// thread. We don't exceed this length. It turns out then, that the
+// background thread has about 278msec of scheduling slop. Empirically, this
+// has been found to be a good compromise between giving enough time for
+// scheduling slop, while still minimizing the amount of processing done in the
+// primary (high-priority) thread. This was found to be a good value on Mac OS
+// X, and may work well on other platforms as well, assuming the very rough
+// scheduling latencies are similar on these time-scales. Of course, this code
+// may need to be tuned for individual platforms if this assumption is found to
+// be incorrect.
const size_t RealtimeFrameLimit = 8192 + 4096; // ~278msec @ 44.1KHz
const size_t MinFFTSize = 128;
@@ -64,21 +67,22 @@ ReverbConvolver::ReverbConvolver(AudioChannel* impulseResponse,
: m_impulseResponseLength(impulseResponse->length()),
m_accumulationBuffer(impulseResponse->length() + renderSliceSize),
m_inputBuffer(InputBufferSize),
- m_minFFTSize(
- MinFFTSize) // First stage will have this size - successive stages will double in size each time
- ,
- m_maxFFTSize(maxFFTSize) // until we hit m_maxFFTSize
+ m_minFFTSize(MinFFTSize), // First stage will have this size - successive
+ // stages will double in size each time
dcheng 2016/10/03 01:21:04 Nit: MIght be more readable if comments aren't at
Nico 2016/10/03 02:15:24 I think this is pretty good as-is.
+ m_maxFFTSize(maxFFTSize) // until we hit m_maxFFTSize
{
// If we are using background threads then don't exceed this FFT size for the
- // stages which run in the real-time thread. This avoids having only one or two
- // large stages (size 16384 or so) at the end which take a lot of time every several
- // processing slices. This way we amortize the cost over more processing slices.
+ // stages which run in the real-time thread. This avoids having only one or
+ // two large stages (size 16384 or so) at the end which take a lot of time
+ // every several processing slices. This way we amortize the cost over more
+ // processing slices.
m_maxRealtimeFFTSize = MaxRealtimeFFTSize;
const float* response = impulseResponse->data();
size_t totalResponseLength = impulseResponse->length();
- // The total latency is zero because the direct-convolution is used in the leading portion.
+ // The total latency is zero because the direct-convolution is used in the
+ // leading portion.
size_t reverbTotalLatency = 0;
size_t stageOffset = 0;
@@ -87,12 +91,14 @@ ReverbConvolver::ReverbConvolver(AudioChannel* impulseResponse,
while (stageOffset < totalResponseLength) {
size_t stageSize = fftSize / 2;
- // For the last stage, it's possible that stageOffset is such that we're straddling the end
- // of the impulse response buffer (if we use stageSize), so reduce the last stage's length...
+ // For the last stage, it's possible that stageOffset is such that we're
+ // straddling the end of the impulse response buffer (if we use stageSize),
+ // so reduce the last stage's length...
if (stageSize + stageOffset > totalResponseLength)
stageSize = totalResponseLength - stageOffset;
- // This "staggers" the time when each FFT happens so they don't all happen at the same time
+ // This "staggers" the time when each FFT happens so they don't all happen
+ // at the same time
int renderPhase = convolverRenderPhase + i * renderSliceSize;
bool useDirectConvolver = !stageOffset;
@@ -128,7 +134,8 @@ ReverbConvolver::ReverbConvolver(AudioChannel* impulseResponse,
}
// Start up background thread
- // FIXME: would be better to up the thread priority here. It doesn't need to be real-time, but higher than the default...
+ // FIXME: would be better to up the thread priority here. It doesn't need to
+ // be real-time, but higher than the default...
if (useBackgroundThreads && m_backgroundStages.size() > 0)
m_backgroundThread = wrapUnique(Platform::current()->createThread(
"Reverb convolution background thread"));
@@ -140,16 +147,19 @@ ReverbConvolver::~ReverbConvolver() {
}
void ReverbConvolver::processInBackground() {
- // Process all of the stages until their read indices reach the input buffer's write index
+ // Process all of the stages until their read indices reach the input buffer's
+ // write index
int writeIndex = m_inputBuffer.writeIndex();
- // Even though it doesn't seem like every stage needs to maintain its own version of readIndex
- // we do this in case we want to run in more than one background thread.
+ // Even though it doesn't seem like every stage needs to maintain its own
+ // version of readIndex we do this in case we want to run in more than one
+ // background thread.
int readIndex;
while ((readIndex = m_backgroundStages[0]->inputReadIndex()) !=
writeIndex) { // FIXME: do better to detect buffer overrun...
- // The ReverbConvolverStages need to process in amounts which evenly divide half the FFT size
+ // The ReverbConvolverStages need to process in amounts which evenly divide
+ // half the FFT size
const int SliceSize = MinFFTSize / 2;
// Accumulate contributions from each stage
@@ -185,7 +195,8 @@ void ReverbConvolver::process(const AudioChannel* sourceChannel,
// Finally read from accumulation buffer
m_accumulationBuffer.readAndClear(destination, framesToProcess);
- // Now that we've buffered more input, post another task to the background thread.
+ // Now that we've buffered more input, post another task to the background
+ // thread.
if (m_backgroundThread)
m_backgroundThread->getWebTaskRunner()->postTask(
BLINK_FROM_HERE, crossThreadBind(&ReverbConvolver::processInBackground,

Powered by Google App Engine
This is Rietveld 408576698