Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(180)

Unified Diff: Source/core/platform/audio/HRTFElevation.cpp

Issue 14304002: Support concatenated spatialization data (Closed) Base URL: https://chromium.googlesource.com/chromium/blink.git@master
Patch Set: Rebase Created 7 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « Source/core/features.gypi ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: Source/core/platform/audio/HRTFElevation.cpp
diff --git a/Source/core/platform/audio/HRTFElevation.cpp b/Source/core/platform/audio/HRTFElevation.cpp
index 9bf6e5f742f48f51016249e5091de74fcdeca20f..e7092ec9927e8ea64a6a321f7ab07225dddd1d5c 100644
--- a/Source/core/platform/audio/HRTFElevation.cpp
+++ b/Source/core/platform/audio/HRTFElevation.cpp
@@ -62,6 +62,40 @@ const size_t ResponseFrameSize = 256;
// The impulse responses may be resampled to a different sample-rate (depending on the audio hardware) when they are loaded.
const float ResponseSampleRate = 44100;
+#if USE(CONCATENATED_IMPULSE_RESPONSES)
+// Lazily load a concatenated HRTF database for given subject and store it in a
+// local hash table to ensure quick efficient future retrievals.
+static PassRefPtr<AudioBus> getConcatenatedImpulseResponsesForSubject(const String& subjectName)
+{
+ typedef HashMap<String, RefPtr<AudioBus> > AudioBusMap;
+ DEFINE_STATIC_LOCAL(AudioBusMap, audioBusMap, ());
+
+ RefPtr<AudioBus> bus;
+ AudioBusMap::iterator iterator = audioBusMap.find(subjectName);
+ if (iterator == audioBusMap.end()) {
+ RefPtr<AudioBus> concatenatedImpulseResponses(AudioBus::loadPlatformResource(subjectName.utf8().data(), ResponseSampleRate));
+ ASSERT(concatenatedImpulseResponses);
+ if (!concatenatedImpulseResponses)
+ return 0;
+
+ bus = concatenatedImpulseResponses;
+ audioBusMap.set(subjectName, bus);
+ } else
+ bus = iterator->value;
+
+ size_t responseLength = bus->length();
+ size_t expectedLength = static_cast<size_t>(TotalNumberOfResponses * ResponseFrameSize);
+
+ // Check number of channels and length. For now these are fixed and known.
+ bool isBusGood = responseLength == expectedLength && bus->numberOfChannels() == 2;
+ ASSERT(isBusGood);
+ if (!isBusGood)
+ return 0;
+
+ return bus;
+}
+#endif
+
// Takes advantage of the symmetry and creates a composite version of the two measured versions. For example, we have both azimuth 30 and -30 degrees
// where the roles of left and right ears are reversed with respect to each other.
bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
@@ -111,6 +145,36 @@ bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevati
// It's passed in as an internal ASCII identifier and is an implementation detail.
int positiveElevation = elevation < 0 ? elevation + 360 : elevation;
+#if USE(CONCATENATED_IMPULSE_RESPONSES)
+ RefPtr<AudioBus> bus(getConcatenatedImpulseResponsesForSubject(subjectName));
+
+ if (!bus)
+ return false;
+
+ int elevationIndex = positiveElevation / AzimuthSpacing;
+ if (positiveElevation > 90)
+ elevationIndex -= AzimuthSpacing;
+
+ // The concatenated impulse response is a bus containing all
+ // the elevations per azimuth, for all azimuths by increasing
+ // order. So for a given azimuth and elevation we need to compute
+ // the index of the wanted audio frames in the concatenated table.
+ unsigned index = ((azimuth / AzimuthSpacing) * HRTFDatabase::NumberOfRawElevations) + elevationIndex;
+ bool isIndexGood = index < TotalNumberOfResponses;
+ ASSERT(isIndexGood);
+ if (!isIndexGood)
+ return false;
+
+ // Extract the individual impulse response from the concatenated
+ // responses and potentially sample-rate convert it to the desired
+ // (hardware) sample-rate.
+ unsigned startFrame = index * ResponseFrameSize;
+ unsigned stopFrame = startFrame + ResponseFrameSize;
+ RefPtr<AudioBus> preSampleRateConvertedResponse(AudioBus::createBufferFromRange(bus.get(), startFrame, stopFrame));
+ RefPtr<AudioBus> response(AudioBus::createBySampleRateConverting(preSampleRateConvertedResponse.get(), false, sampleRate));
+ AudioChannel* leftEarImpulseResponse = response->channel(AudioBus::ChannelLeft);
+ AudioChannel* rightEarImpulseResponse = response->channel(AudioBus::ChannelRight);
+#else
String resourceName = String::format("IRC_%s_C_R0195_T%03d_P%03d", subjectName.utf8().data(), azimuth, positiveElevation);
RefPtr<AudioBus> impulseResponse(AudioBus::loadPlatformResource(resourceName.utf8().data(), sampleRate));
@@ -130,6 +194,7 @@ bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevati
AudioChannel* leftEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelLeft);
AudioChannel* rightEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelRight);
+#endif
// Note that depending on the fftSize returned by the panner, we may be truncating the impulse response we just loaded in.
const size_t fftSize = HRTFPanner::fftSizeForSampleRate(sampleRate);
« no previous file with comments | « Source/core/features.gypi ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698