Index: third_party/WebKit/Source/platform/audio/HRTFKernel.cpp |
diff --git a/third_party/WebKit/Source/platform/audio/HRTFKernel.cpp b/third_party/WebKit/Source/platform/audio/HRTFKernel.cpp |
index 32e5a4f3841a5409bc680ab168509ff5ab050f6c..f6277e16081d8aac9f84dd2b365ad36c3f3060e6 100644 |
--- a/third_party/WebKit/Source/platform/audio/HRTFKernel.cpp |
+++ b/third_party/WebKit/Source/platform/audio/HRTFKernel.cpp |
@@ -53,8 +53,8 @@ static float extractAverageGroupDelay(AudioChannel* channel, |
return 0; |
// Check for power-of-2. |
- ASSERT(1UL << static_cast<unsigned>(log2(analysisFFTSize)) == |
- analysisFFTSize); |
+ DCHECK_EQ(1UL << static_cast<unsigned>(log2(analysisFFTSize)), |
+ analysisFFTSize); |
FFTFrame estimationFrame(analysisFFTSize); |
estimationFrame.doFFT(impulseP); |
@@ -83,7 +83,7 @@ HRTFKernel::HRTFKernel(AudioChannel* channel, size_t fftSize, float sampleRate) |
// Quick fade-out (apply window) at truncation point |
unsigned numberOfFadeOutFrames = static_cast<unsigned>( |
sampleRate / 4410); // 10 sample-frames @44.1KHz sample-rate |
- ASSERT(numberOfFadeOutFrames < truncatedResponseLength); |
+ DCHECK_LT(numberOfFadeOutFrames, truncatedResponseLength); |
if (numberOfFadeOutFrames < truncatedResponseLength) { |
for (unsigned i = truncatedResponseLength - numberOfFadeOutFrames; |
i < truncatedResponseLength; ++i) { |
@@ -116,16 +116,18 @@ std::unique_ptr<HRTFKernel> HRTFKernel::createInterpolatedKernel( |
HRTFKernel* kernel1, |
HRTFKernel* kernel2, |
float x) { |
- ASSERT(kernel1 && kernel2); |
+ DCHECK(kernel1); |
+ DCHECK(kernel2); |
if (!kernel1 || !kernel2) |
return nullptr; |
- ASSERT(x >= 0.0 && x < 1.0); |
+ DCHECK_GE(x, 0.0); |
+ DCHECK_LT(x, 1.0); |
x = clampTo(x, 0.0f, 1.0f); |
float sampleRate1 = kernel1->sampleRate(); |
float sampleRate2 = kernel2->sampleRate(); |
- ASSERT(sampleRate1 == sampleRate2); |
+ DCHECK_EQ(sampleRate1, sampleRate2); |
if (sampleRate1 != sampleRate2) |
return nullptr; |