| Index: Source/core/platform/audio/HRTFElevation.cpp
|
| diff --git a/Source/core/platform/audio/HRTFElevation.cpp b/Source/core/platform/audio/HRTFElevation.cpp
|
| index c3faba6f4340d4d1cc0ae68b5c8b5647e2153b5b..4278891593ebb2855ede7bf6e625ce3cd7481994 100644
|
| --- a/Source/core/platform/audio/HRTFElevation.cpp
|
| +++ b/Source/core/platform/audio/HRTFElevation.cpp
|
| @@ -62,44 +62,6 @@ const size_t ResponseFrameSize = 256;
|
| // The impulse responses may be resampled to a different sample-rate (depending on the audio hardware) when they are loaded.
|
| const float ResponseSampleRate = 44100;
|
|
|
| -#if USE(WEBAUDIO_GSTREAMER)
|
| -#define USE_CONCATENATED_IMPULSE_RESPONSES
|
| -#endif
|
| -
|
| -#ifdef USE_CONCATENATED_IMPULSE_RESPONSES
|
| -// Lazily load a concatenated HRTF database for given subject and store it in a
|
| -// local hash table to ensure quick efficient future retrievals.
|
| -static AudioBus* getConcatenatedImpulseResponsesForSubject(const String& subjectName)
|
| -{
|
| - typedef HashMap<String, AudioBus*> AudioBusMap;
|
| - DEFINE_STATIC_LOCAL(AudioBusMap, audioBusMap, ());
|
| -
|
| - AudioBus* bus;
|
| - AudioBusMap::iterator iterator = audioBusMap.find(subjectName);
|
| - if (iterator == audioBusMap.end()) {
|
| - OwnPtr<AudioBus> concatenatedImpulseResponses = AudioBus::loadPlatformResource(subjectName.utf8().data(), ResponseSampleRate);
|
| - ASSERT(concatenatedImpulseResponses);
|
| - if (!concatenatedImpulseResponses)
|
| - return 0;
|
| -
|
| - bus = concatenatedImpulseResponses.leakPtr();
|
| - audioBusMap.set(subjectName, bus);
|
| - } else
|
| - bus = iterator->value;
|
| -
|
| - size_t responseLength = bus->length();
|
| - size_t expectedLength = static_cast<size_t>(TotalNumberOfResponses * ResponseFrameSize);
|
| -
|
| - // Check number of channels and length. For now these are fixed and known.
|
| - bool isBusGood = responseLength == expectedLength && bus->numberOfChannels() == 2;
|
| - ASSERT(isBusGood);
|
| - if (!isBusGood)
|
| - return 0;
|
| -
|
| - return bus;
|
| -}
|
| -#endif
|
| -
|
| // Takes advantage of the symmetry and creates a composite version of the two measured versions. For example, we have both azimuth 30 and -30 degrees
|
| // where the roles of left and right ears are reversed with respect to each other.
|
| bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
|
| @@ -149,36 +111,6 @@ bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevati
|
| // It's passed in as an internal ASCII identifier and is an implementation detail.
|
| int positiveElevation = elevation < 0 ? elevation + 360 : elevation;
|
|
|
| -#ifdef USE_CONCATENATED_IMPULSE_RESPONSES
|
| - AudioBus* bus(getConcatenatedImpulseResponsesForSubject(subjectName));
|
| -
|
| - if (!bus)
|
| - return false;
|
| -
|
| - int elevationIndex = positiveElevation / AzimuthSpacing;
|
| - if (positiveElevation > 90)
|
| - elevationIndex -= AzimuthSpacing;
|
| -
|
| - // The concatenated impulse response is a bus containing all
|
| - // the elevations per azimuth, for all azimuths by increasing
|
| - // order. So for a given azimuth and elevation we need to compute
|
| - // the index of the wanted audio frames in the concatenated table.
|
| - unsigned index = ((azimuth / AzimuthSpacing) * HRTFDatabase::NumberOfRawElevations) + elevationIndex;
|
| - bool isIndexGood = index < TotalNumberOfResponses;
|
| - ASSERT(isIndexGood);
|
| - if (!isIndexGood)
|
| - return false;
|
| -
|
| - // Extract the individual impulse response from the concatenated
|
| - // responses and potentially sample-rate convert it to the desired
|
| - // (hardware) sample-rate.
|
| - unsigned startFrame = index * ResponseFrameSize;
|
| - unsigned stopFrame = startFrame + ResponseFrameSize;
|
| - OwnPtr<AudioBus> preSampleRateConvertedResponse = AudioBus::createBufferFromRange(bus, startFrame, stopFrame);
|
| - OwnPtr<AudioBus> response = AudioBus::createBySampleRateConverting(preSampleRateConvertedResponse.get(), false, sampleRate);
|
| - AudioChannel* leftEarImpulseResponse = response->channel(AudioBus::ChannelLeft);
|
| - AudioChannel* rightEarImpulseResponse = response->channel(AudioBus::ChannelRight);
|
| -#else
|
| String resourceName = String::format("IRC_%s_C_R0195_T%03d_P%03d", subjectName.utf8().data(), azimuth, positiveElevation);
|
|
|
| OwnPtr<AudioBus> impulseResponse(AudioBus::loadPlatformResource(resourceName.utf8().data(), sampleRate));
|
| @@ -198,7 +130,6 @@ bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevati
|
|
|
| AudioChannel* leftEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelLeft);
|
| AudioChannel* rightEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelRight);
|
| -#endif
|
|
|
| // Note that depending on the fftSize returned by the panner, we may be truncating the impulse response we just loaded in.
|
| const size_t fftSize = HRTFPanner::fftSizeForSampleRate(sampleRate);
|
|
|