Index: Source/core/platform/audio/HRTFElevation.cpp |
diff --git a/Source/core/platform/audio/HRTFElevation.cpp b/Source/core/platform/audio/HRTFElevation.cpp |
index 4278891593ebb2855ede7bf6e625ce3cd7481994..66a81d46c02d5f4e3d5534812e850f8af101f107 100644 |
--- a/Source/core/platform/audio/HRTFElevation.cpp |
+++ b/Source/core/platform/audio/HRTFElevation.cpp |
@@ -62,6 +62,38 @@ const size_t ResponseFrameSize = 256; |
// The impulse responses may be resampled to a different sample-rate (depending on the audio hardware) when they are loaded. |
const float ResponseSampleRate = 44100; |
+// Lazily load a concatenated HRTF database for given subject and store it in a |
+// local hash table to ensure quick efficient future retrievals. |
+static AudioBus* getConcatenatedImpulseResponsesForSubject(const String& subjectName) |
+{ |
+ typedef HashMap<String, AudioBus*> AudioBusMap; |
+ DEFINE_STATIC_LOCAL(AudioBusMap, audioBusMap, ()); |
+ |
+ AudioBus* bus; |
+ AudioBusMap::iterator iterator = audioBusMap.find(subjectName); |
+ if (iterator == audioBusMap.end()) { |
+ OwnPtr<AudioBus> concatenatedImpulseResponses = AudioBus::loadPlatformResource(subjectName.utf8().data(), ResponseSampleRate); |
+ ASSERT(concatenatedImpulseResponses); |
+ if (!concatenatedImpulseResponses) |
+ return 0; |
+ |
+ bus = concatenatedImpulseResponses.leakPtr(); |
+ audioBusMap.set(subjectName, bus); |
+ } else |
+ bus = iterator->value; |
+ |
+ size_t responseLength = bus->length(); |
+ size_t expectedLength = static_cast<size_t>(TotalNumberOfResponses * ResponseFrameSize); |
+ |
+ // Check number of channels and length. For now these are fixed and known. |
+ bool isBusGood = responseLength == expectedLength && bus->numberOfChannels() == 2; |
+ ASSERT(isBusGood); |
+ if (!isBusGood) |
+ return 0; |
+ |
+ return bus; |
+} |
+ |
// Takes advantage of the symmetry and creates a composite version of the two measured versions. For example, we have both azimuth 30 and -30 degrees |
// where the roles of left and right ears are reversed with respect to each other. |
bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName, |
@@ -105,31 +137,38 @@ bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevati |
if (!isElevationGood) |
return false; |
- // Construct the resource name from the subject name, azimuth, and elevation, for example: |
- // "IRC_Composite_C_R0195_T015_P000" |
+ int positiveElevation = elevation < 0 ? elevation + 360 : elevation; |
+ |
// Note: the passed in subjectName is not a string passed in via JavaScript or the web. |
// It's passed in as an internal ASCII identifier and is an implementation detail. |
Chris Rogers
2013/04/23 19:51:44
This comment used to be just before line 140 and y
|
- int positiveElevation = elevation < 0 ? elevation + 360 : elevation; |
+ AudioBus* bus(getConcatenatedImpulseResponsesForSubject(subjectName)); |
- String resourceName = String::format("IRC_%s_C_R0195_T%03d_P%03d", subjectName.utf8().data(), azimuth, positiveElevation); |
+ if (!bus) |
+ return false; |
- OwnPtr<AudioBus> impulseResponse(AudioBus::loadPlatformResource(resourceName.utf8().data(), sampleRate)); |
+ int elevationIndex = positiveElevation / AzimuthSpacing; |
+ if (positiveElevation > 90) |
+ elevationIndex -= AzimuthSpacing; |
- ASSERT(impulseResponse.get()); |
- if (!impulseResponse.get()) |
+ // The concatenated impulse response is a bus containing all |
+ // the elevations per azimuth, for all azimuths by increasing |
+ // order. So for a given azimuth and elevation we need to compute |
+ // the index of the wanted audio frames in the concatenated table. |
+ unsigned index = ((azimuth / AzimuthSpacing) * HRTFDatabase::NumberOfRawElevations) + elevationIndex; |
+ bool isIndexGood = index < TotalNumberOfResponses; |
+ ASSERT(isIndexGood); |
+ if (!isIndexGood) |
return false; |
- |
- size_t responseLength = impulseResponse->length(); |
- size_t expectedLength = static_cast<size_t>(256 * (sampleRate / 44100.0)); |
- // Check number of channels and length. For now these are fixed and known. |
- bool isBusGood = responseLength == expectedLength && impulseResponse->numberOfChannels() == 2; |
- ASSERT(isBusGood); |
- if (!isBusGood) |
- return false; |
- |
- AudioChannel* leftEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelLeft); |
- AudioChannel* rightEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelRight); |
+ // Extract the individual impulse response from the concatenated |
+ // responses and potentially sample-rate convert it to the desired |
+ // (hardware) sample-rate. |
+ unsigned startFrame = index * ResponseFrameSize; |
+ unsigned stopFrame = startFrame + ResponseFrameSize; |
+ OwnPtr<AudioBus> preSampleRateConvertedResponse = AudioBus::createBufferFromRange(bus, startFrame, stopFrame); |
+ OwnPtr<AudioBus> response = AudioBus::createBySampleRateConverting(preSampleRateConvertedResponse.get(), false, sampleRate); |
+ AudioChannel* leftEarImpulseResponse = response->channel(AudioBus::ChannelLeft); |
+ AudioChannel* rightEarImpulseResponse = response->channel(AudioBus::ChannelRight); |
// Note that depending on the fftSize returned by the panner, we may be truncating the impulse response we just loaded in. |
const size_t fftSize = HRTFPanner::fftSizeForSampleRate(sampleRate); |