Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(171)

Side by Side Diff: third_party/WebKit/Source/platform/audio/HRTFElevation.cpp

Issue 2803733002: Convert ASSERT(foo) to DCHECK(foo) in platform/audio (Closed)
Patch Set: Mechanical change from ASSERT(foo) to DCHECK(foo) Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2010 Google Inc. All rights reserved. 2 * Copyright (C) 2010 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions 5 * modification, are permitted provided that the following conditions
6 * are met: 6 * are met:
7 * 7 *
8 * 1. Redistributions of source code must retain the above copyright 8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright 10 * 2. Redistributions in binary form must reproduce the above copyright
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
73 DEFINE_THREAD_SAFE_STATIC_LOCAL(AudioBusMap, audioBusMap, new AudioBusMap()); 73 DEFINE_THREAD_SAFE_STATIC_LOCAL(AudioBusMap, audioBusMap, new AudioBusMap());
74 DEFINE_THREAD_SAFE_STATIC_LOCAL(Mutex, mutex, new Mutex()); 74 DEFINE_THREAD_SAFE_STATIC_LOCAL(Mutex, mutex, new Mutex());
75 75
76 MutexLocker locker(mutex); 76 MutexLocker locker(mutex);
77 RefPtr<AudioBus> bus; 77 RefPtr<AudioBus> bus;
78 AudioBusMap::iterator iterator = audioBusMap.find(subjectName); 78 AudioBusMap::iterator iterator = audioBusMap.find(subjectName);
79 if (iterator == audioBusMap.end()) { 79 if (iterator == audioBusMap.end()) {
80 RefPtr<AudioBus> concatenatedImpulseResponses( 80 RefPtr<AudioBus> concatenatedImpulseResponses(
81 AudioBus::loadPlatformResource(subjectName.utf8().data(), 81 AudioBus::loadPlatformResource(subjectName.utf8().data(),
82 ResponseSampleRate)); 82 ResponseSampleRate));
83 ASSERT(concatenatedImpulseResponses); 83 DCHECK(concatenatedImpulseResponses);
84 if (!concatenatedImpulseResponses) 84 if (!concatenatedImpulseResponses)
85 return nullptr; 85 return nullptr;
86 86
87 bus = concatenatedImpulseResponses; 87 bus = concatenatedImpulseResponses;
88 audioBusMap.set(subjectName, bus); 88 audioBusMap.set(subjectName, bus);
89 } else 89 } else
90 bus = iterator->value; 90 bus = iterator->value;
91 91
92 size_t responseLength = bus->length(); 92 size_t responseLength = bus->length();
93 size_t expectedLength = 93 size_t expectedLength =
94 static_cast<size_t>(TotalNumberOfResponses * ResponseFrameSize); 94 static_cast<size_t>(TotalNumberOfResponses * ResponseFrameSize);
95 95
96 // Check number of channels and length. For now these are fixed and known. 96 // Check number of channels and length. For now these are fixed and known.
97 bool isBusGood = 97 bool isBusGood =
98 responseLength == expectedLength && bus->numberOfChannels() == 2; 98 responseLength == expectedLength && bus->numberOfChannels() == 2;
99 ASSERT(isBusGood); 99 DCHECK(isBusGood);
100 if (!isBusGood) 100 if (!isBusGood)
101 return nullptr; 101 return nullptr;
102 102
103 return bus; 103 return bus;
104 } 104 }
105 #endif 105 #endif
106 106
107 bool HRTFElevation::calculateKernelsForAzimuthElevation( 107 bool HRTFElevation::calculateKernelsForAzimuthElevation(
108 int azimuth, 108 int azimuth,
109 int elevation, 109 int elevation,
110 float sampleRate, 110 float sampleRate,
111 const String& subjectName, 111 const String& subjectName,
112 std::unique_ptr<HRTFKernel>& kernelL, 112 std::unique_ptr<HRTFKernel>& kernelL,
113 std::unique_ptr<HRTFKernel>& kernelR) { 113 std::unique_ptr<HRTFKernel>& kernelR) {
114 // Valid values for azimuth are 0 -> 345 in 15 degree increments. 114 // Valid values for azimuth are 0 -> 345 in 15 degree increments.
115 // Valid values for elevation are -45 -> +90 in 15 degree increments. 115 // Valid values for elevation are -45 -> +90 in 15 degree increments.
116 116
117 bool isAzimuthGood = 117 bool isAzimuthGood =
118 azimuth >= 0 && azimuth <= 345 && (azimuth / 15) * 15 == azimuth; 118 azimuth >= 0 && azimuth <= 345 && (azimuth / 15) * 15 == azimuth;
119 ASSERT(isAzimuthGood); 119 DCHECK(isAzimuthGood);
120 if (!isAzimuthGood) 120 if (!isAzimuthGood)
121 return false; 121 return false;
122 122
123 bool isElevationGood = 123 bool isElevationGood =
124 elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation; 124 elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation;
125 ASSERT(isElevationGood); 125 DCHECK(isElevationGood);
126 if (!isElevationGood) 126 if (!isElevationGood)
127 return false; 127 return false;
128 128
129 // Construct the resource name from the subject name, azimuth, and elevation, 129 // Construct the resource name from the subject name, azimuth, and elevation,
130 // for example: 130 // for example:
131 // "IRC_Composite_C_R0195_T015_P000" 131 // "IRC_Composite_C_R0195_T015_P000"
132 // Note: the passed in subjectName is not a string passed in via JavaScript or 132 // Note: the passed in subjectName is not a string passed in via JavaScript or
133 // the web. It's passed in as an internal ASCII identifier and is an 133 // the web. It's passed in as an internal ASCII identifier and is an
134 // implementation detail. 134 // implementation detail.
135 int positiveElevation = elevation < 0 ? elevation + 360 : elevation; 135 int positiveElevation = elevation < 0 ? elevation + 360 : elevation;
136 136
137 #if USE(CONCATENATED_IMPULSE_RESPONSES) 137 #if USE(CONCATENATED_IMPULSE_RESPONSES)
138 RefPtr<AudioBus> bus(getConcatenatedImpulseResponsesForSubject(subjectName)); 138 RefPtr<AudioBus> bus(getConcatenatedImpulseResponsesForSubject(subjectName));
139 139
140 if (!bus) 140 if (!bus)
141 return false; 141 return false;
142 142
143 // Just sequentially search the table to find the correct index. 143 // Just sequentially search the table to find the correct index.
144 int elevationIndex = -1; 144 int elevationIndex = -1;
145 145
146 for (int k = 0; k < ElevationIndexTableSize; ++k) { 146 for (int k = 0; k < ElevationIndexTableSize; ++k) {
147 if (ElevationIndexTable[k] == positiveElevation) { 147 if (ElevationIndexTable[k] == positiveElevation) {
148 elevationIndex = k; 148 elevationIndex = k;
149 break; 149 break;
150 } 150 }
151 } 151 }
152 152
153 bool isElevationIndexGood = 153 bool isElevationIndexGood =
154 (elevationIndex >= 0) && (elevationIndex < ElevationIndexTableSize); 154 (elevationIndex >= 0) && (elevationIndex < ElevationIndexTableSize);
155 ASSERT(isElevationIndexGood); 155 DCHECK(isElevationIndexGood);
156 if (!isElevationIndexGood) 156 if (!isElevationIndexGood)
157 return false; 157 return false;
158 158
159 // The concatenated impulse response is a bus containing all 159 // The concatenated impulse response is a bus containing all
160 // the elevations per azimuth, for all azimuths by increasing 160 // the elevations per azimuth, for all azimuths by increasing
161 // order. So for a given azimuth and elevation we need to compute 161 // order. So for a given azimuth and elevation we need to compute
162 // the index of the wanted audio frames in the concatenated table. 162 // the index of the wanted audio frames in the concatenated table.
163 unsigned index = 163 unsigned index =
164 ((azimuth / AzimuthSpacing) * HRTFDatabase::NumberOfRawElevations) + 164 ((azimuth / AzimuthSpacing) * HRTFDatabase::NumberOfRawElevations) +
165 elevationIndex; 165 elevationIndex;
166 bool isIndexGood = index < TotalNumberOfResponses; 166 bool isIndexGood = index < TotalNumberOfResponses;
167 ASSERT(isIndexGood); 167 DCHECK(isIndexGood);
168 if (!isIndexGood) 168 if (!isIndexGood)
169 return false; 169 return false;
170 170
171 // Extract the individual impulse response from the concatenated 171 // Extract the individual impulse response from the concatenated
172 // responses and potentially sample-rate convert it to the desired 172 // responses and potentially sample-rate convert it to the desired
173 // (hardware) sample-rate. 173 // (hardware) sample-rate.
174 unsigned startFrame = index * ResponseFrameSize; 174 unsigned startFrame = index * ResponseFrameSize;
175 unsigned stopFrame = startFrame + ResponseFrameSize; 175 unsigned stopFrame = startFrame + ResponseFrameSize;
176 RefPtr<AudioBus> preSampleRateConvertedResponse( 176 RefPtr<AudioBus> preSampleRateConvertedResponse(
177 AudioBus::createBufferFromRange(bus.get(), startFrame, stopFrame)); 177 AudioBus::createBufferFromRange(bus.get(), startFrame, stopFrame));
178 RefPtr<AudioBus> response(AudioBus::createBySampleRateConverting( 178 RefPtr<AudioBus> response(AudioBus::createBySampleRateConverting(
179 preSampleRateConvertedResponse.get(), false, sampleRate)); 179 preSampleRateConvertedResponse.get(), false, sampleRate));
180 AudioChannel* leftEarImpulseResponse = 180 AudioChannel* leftEarImpulseResponse =
181 response->channel(AudioBus::ChannelLeft); 181 response->channel(AudioBus::ChannelLeft);
182 AudioChannel* rightEarImpulseResponse = 182 AudioChannel* rightEarImpulseResponse =
183 response->channel(AudioBus::ChannelRight); 183 response->channel(AudioBus::ChannelRight);
184 #else 184 #else
185 String resourceName = 185 String resourceName =
186 String::format("IRC_%s_C_R0195_T%03d_P%03d", subjectName.utf8().data(), 186 String::format("IRC_%s_C_R0195_T%03d_P%03d", subjectName.utf8().data(),
187 azimuth, positiveElevation); 187 azimuth, positiveElevation);
188 188
189 RefPtr<AudioBus> impulseResponse( 189 RefPtr<AudioBus> impulseResponse(
190 AudioBus::loadPlatformResource(resourceName.utf8().data(), sampleRate)); 190 AudioBus::loadPlatformResource(resourceName.utf8().data(), sampleRate));
191 191
192 ASSERT(impulseResponse.get()); 192 DCHECK(impulseResponse.get());
193 if (!impulseResponse.get()) 193 if (!impulseResponse.get())
194 return false; 194 return false;
195 195
196 size_t responseLength = impulseResponse->length(); 196 size_t responseLength = impulseResponse->length();
197 size_t expectedLength = static_cast<size_t>(256 * (sampleRate / 44100.0)); 197 size_t expectedLength = static_cast<size_t>(256 * (sampleRate / 44100.0));
198 198
199 // Check number of channels and length. For now these are fixed and known. 199 // Check number of channels and length. For now these are fixed and known.
200 bool isBusGood = responseLength == expectedLength && 200 bool isBusGood = responseLength == expectedLength &&
201 impulseResponse->numberOfChannels() == 2; 201 impulseResponse->numberOfChannels() == 2;
202 ASSERT(isBusGood); 202 DCHECK(isBusGood);
203 if (!isBusGood) 203 if (!isBusGood)
204 return false; 204 return false;
205 205
206 AudioChannel* leftEarImpulseResponse = 206 AudioChannel* leftEarImpulseResponse =
207 impulseResponse->channelByType(AudioBus::ChannelLeft); 207 impulseResponse->channelByType(AudioBus::ChannelLeft);
208 AudioChannel* rightEarImpulseResponse = 208 AudioChannel* rightEarImpulseResponse =
209 impulseResponse->channelByType(AudioBus::ChannelRight); 209 impulseResponse->channelByType(AudioBus::ChannelRight);
210 #endif 210 #endif
211 211
212 // Note that depending on the fftSize returned by the panner, we may be 212 // Note that depending on the fftSize returned by the panner, we may be
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
250 60, // 330 250 60, // 330
251 45 // 345 251 45 // 345
252 }; 252 };
253 253
254 std::unique_ptr<HRTFElevation> HRTFElevation::createForSubject( 254 std::unique_ptr<HRTFElevation> HRTFElevation::createForSubject(
255 const String& subjectName, 255 const String& subjectName,
256 int elevation, 256 int elevation,
257 float sampleRate) { 257 float sampleRate) {
258 bool isElevationGood = 258 bool isElevationGood =
259 elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation; 259 elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation;
260 ASSERT(isElevationGood); 260 DCHECK(isElevationGood);
261 if (!isElevationGood) 261 if (!isElevationGood)
262 return nullptr; 262 return nullptr;
263 263
264 std::unique_ptr<HRTFKernelList> kernelListL = 264 std::unique_ptr<HRTFKernelList> kernelListL =
265 WTF::makeUnique<HRTFKernelList>(NumberOfTotalAzimuths); 265 WTF::makeUnique<HRTFKernelList>(NumberOfTotalAzimuths);
266 std::unique_ptr<HRTFKernelList> kernelListR = 266 std::unique_ptr<HRTFKernelList> kernelListR =
267 WTF::makeUnique<HRTFKernelList>(NumberOfTotalAzimuths); 267 WTF::makeUnique<HRTFKernelList>(NumberOfTotalAzimuths);
268 268
269 // Load convolution kernels from HRTF files. 269 // Load convolution kernels from HRTF files.
270 int interpolatedIndex = 0; 270 int interpolatedIndex = 0;
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
343 return hrtfElevation; 343 return hrtfElevation;
344 } 344 }
345 345
346 void HRTFElevation::getKernelsFromAzimuth(double azimuthBlend, 346 void HRTFElevation::getKernelsFromAzimuth(double azimuthBlend,
347 unsigned azimuthIndex, 347 unsigned azimuthIndex,
348 HRTFKernel*& kernelL, 348 HRTFKernel*& kernelL,
349 HRTFKernel*& kernelR, 349 HRTFKernel*& kernelR,
350 double& frameDelayL, 350 double& frameDelayL,
351 double& frameDelayR) { 351 double& frameDelayR) {
352 bool checkAzimuthBlend = azimuthBlend >= 0.0 && azimuthBlend < 1.0; 352 bool checkAzimuthBlend = azimuthBlend >= 0.0 && azimuthBlend < 1.0;
353 ASSERT(checkAzimuthBlend); 353 DCHECK(checkAzimuthBlend);
354 if (!checkAzimuthBlend) 354 if (!checkAzimuthBlend)
355 azimuthBlend = 0.0; 355 azimuthBlend = 0.0;
356 356
357 unsigned numKernels = m_kernelListL->size(); 357 unsigned numKernels = m_kernelListL->size();
358 358
359 bool isIndexGood = azimuthIndex < numKernels; 359 bool isIndexGood = azimuthIndex < numKernels;
360 ASSERT(isIndexGood); 360 DCHECK(isIndexGood);
361 if (!isIndexGood) { 361 if (!isIndexGood) {
362 kernelL = 0; 362 kernelL = 0;
363 kernelR = 0; 363 kernelR = 0;
364 return; 364 return;
365 } 365 }
366 366
367 // Return the left and right kernels. 367 // Return the left and right kernels.
368 kernelL = m_kernelListL->at(azimuthIndex).get(); 368 kernelL = m_kernelListL->at(azimuthIndex).get();
369 kernelR = m_kernelListR->at(azimuthIndex).get(); 369 kernelR = m_kernelListR->at(azimuthIndex).get();
370 370
371 frameDelayL = m_kernelListL->at(azimuthIndex)->frameDelay(); 371 frameDelayL = m_kernelListL->at(azimuthIndex)->frameDelay();
372 frameDelayR = m_kernelListR->at(azimuthIndex)->frameDelay(); 372 frameDelayR = m_kernelListR->at(azimuthIndex)->frameDelay();
373 373
374 int azimuthIndex2 = (azimuthIndex + 1) % numKernels; 374 int azimuthIndex2 = (azimuthIndex + 1) % numKernels;
375 double frameDelay2L = m_kernelListL->at(azimuthIndex2)->frameDelay(); 375 double frameDelay2L = m_kernelListL->at(azimuthIndex2)->frameDelay();
376 double frameDelay2R = m_kernelListR->at(azimuthIndex2)->frameDelay(); 376 double frameDelay2R = m_kernelListR->at(azimuthIndex2)->frameDelay();
377 377
378 // Linearly interpolate delays. 378 // Linearly interpolate delays.
379 frameDelayL = 379 frameDelayL =
380 (1.0 - azimuthBlend) * frameDelayL + azimuthBlend * frameDelay2L; 380 (1.0 - azimuthBlend) * frameDelayL + azimuthBlend * frameDelay2L;
381 frameDelayR = 381 frameDelayR =
382 (1.0 - azimuthBlend) * frameDelayR + azimuthBlend * frameDelay2R; 382 (1.0 - azimuthBlend) * frameDelayR + azimuthBlend * frameDelay2R;
383 } 383 }
384 384
385 } // namespace blink 385 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698