OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2010 Google Inc. All rights reserved. | 2 * Copyright (C) 2010 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
6 * are met: | 6 * are met: |
7 * | 7 * |
8 * 1. Redistributions of source code must retain the above copyright | 8 * 1. Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * 2. Redistributions in binary form must reproduce the above copyright | 10 * 2. Redistributions in binary form must reproduce the above copyright |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
55 // Total number of components of an HRTF database. | 55 // Total number of components of an HRTF database. |
56 const size_t TotalNumberOfResponses = 240; | 56 const size_t TotalNumberOfResponses = 240; |
57 | 57 |
58 // Number of frames in an individual impulse response. | 58 // Number of frames in an individual impulse response. |
59 const size_t ResponseFrameSize = 256; | 59 const size_t ResponseFrameSize = 256; |
60 | 60 |
61 // Sample-rate of the spatialization impulse responses as stored in the resource
file. | 61 // Sample-rate of the spatialization impulse responses as stored in the resource
file. |
62 // The impulse responses may be resampled to a different sample-rate (depending
on the audio hardware) when they are loaded. | 62 // The impulse responses may be resampled to a different sample-rate (depending
on the audio hardware) when they are loaded. |
63 const float ResponseSampleRate = 44100; | 63 const float ResponseSampleRate = 44100; |
64 | 64 |
| 65 #if USE(CONCATENATED_IMPULSE_RESPONSES) |
| 66 // Lazily load a concatenated HRTF database for given subject and store it in a |
| 67 // local hash table to ensure quick efficient future retrievals. |
| 68 static PassRefPtr<AudioBus> getConcatenatedImpulseResponsesForSubject(const Stri
ng& subjectName) |
| 69 { |
| 70 typedef HashMap<String, RefPtr<AudioBus> > AudioBusMap; |
| 71 DEFINE_STATIC_LOCAL(AudioBusMap, audioBusMap, ()); |
| 72 |
| 73 RefPtr<AudioBus> bus; |
| 74 AudioBusMap::iterator iterator = audioBusMap.find(subjectName); |
| 75 if (iterator == audioBusMap.end()) { |
| 76 RefPtr<AudioBus> concatenatedImpulseResponses(AudioBus::loadPlatformReso
urce(subjectName.utf8().data(), ResponseSampleRate)); |
| 77 ASSERT(concatenatedImpulseResponses); |
| 78 if (!concatenatedImpulseResponses) |
| 79 return 0; |
| 80 |
| 81 bus = concatenatedImpulseResponses; |
| 82 audioBusMap.set(subjectName, bus); |
| 83 } else |
| 84 bus = iterator->value; |
| 85 |
| 86 size_t responseLength = bus->length(); |
| 87 size_t expectedLength = static_cast<size_t>(TotalNumberOfResponses * Respons
eFrameSize); |
| 88 |
| 89 // Check number of channels and length. For now these are fixed and known. |
| 90 bool isBusGood = responseLength == expectedLength && bus->numberOfChannels()
== 2; |
| 91 ASSERT(isBusGood); |
| 92 if (!isBusGood) |
| 93 return 0; |
| 94 |
| 95 return bus; |
| 96 } |
| 97 #endif |
| 98 |
65 // Takes advantage of the symmetry and creates a composite version of the two me
asured versions. For example, we have both azimuth 30 and -30 degrees | 99 // Takes advantage of the symmetry and creates a composite version of the two me
asured versions. For example, we have both azimuth 30 and -30 degrees |
66 // where the roles of left and right ears are reversed with respect to each othe
r. | 100 // where the roles of left and right ears are reversed with respect to each othe
r. |
67 bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, in
t elevation, float sampleRate, const String& subjectName, | 101 bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, in
t elevation, float sampleRate, const String& subjectName, |
68 RefPtr<HRTFKern
el>& kernelL, RefPtr<HRTFKernel>& kernelR) | 102 RefPtr<HRTFKern
el>& kernelL, RefPtr<HRTFKernel>& kernelR) |
69 { | 103 { |
70 RefPtr<HRTFKernel> kernelL1; | 104 RefPtr<HRTFKernel> kernelL1; |
71 RefPtr<HRTFKernel> kernelR1; | 105 RefPtr<HRTFKernel> kernelR1; |
72 bool success = calculateKernelsForAzimuthElevation(azimuth, elevation, sampl
eRate, subjectName, kernelL1, kernelR1); | 106 bool success = calculateKernelsForAzimuthElevation(azimuth, elevation, sampl
eRate, subjectName, kernelL1, kernelR1); |
73 if (!success) | 107 if (!success) |
74 return false; | 108 return false; |
(...skipping 29 matching lines...) Expand all Loading... |
104 ASSERT(isElevationGood); | 138 ASSERT(isElevationGood); |
105 if (!isElevationGood) | 139 if (!isElevationGood) |
106 return false; | 140 return false; |
107 | 141 |
108 // Construct the resource name from the subject name, azimuth, and elevation
, for example: | 142 // Construct the resource name from the subject name, azimuth, and elevation
, for example: |
109 // "IRC_Composite_C_R0195_T015_P000" | 143 // "IRC_Composite_C_R0195_T015_P000" |
110 // Note: the passed in subjectName is not a string passed in via JavaScript
or the web. | 144 // Note: the passed in subjectName is not a string passed in via JavaScript
or the web. |
111 // It's passed in as an internal ASCII identifier and is an implementation d
etail. | 145 // It's passed in as an internal ASCII identifier and is an implementation d
etail. |
112 int positiveElevation = elevation < 0 ? elevation + 360 : elevation; | 146 int positiveElevation = elevation < 0 ? elevation + 360 : elevation; |
113 | 147 |
| 148 #if USE(CONCATENATED_IMPULSE_RESPONSES) |
| 149 RefPtr<AudioBus> bus(getConcatenatedImpulseResponsesForSubject(subjectName))
; |
| 150 |
| 151 if (!bus) |
| 152 return false; |
| 153 |
| 154 int elevationIndex = positiveElevation / AzimuthSpacing; |
| 155 if (positiveElevation > 90) |
| 156 elevationIndex -= AzimuthSpacing; |
| 157 |
| 158 // The concatenated impulse response is a bus containing all |
| 159 // the elevations per azimuth, for all azimuths by increasing |
| 160 // order. So for a given azimuth and elevation we need to compute |
| 161 // the index of the wanted audio frames in the concatenated table. |
| 162 unsigned index = ((azimuth / AzimuthSpacing) * HRTFDatabase::NumberOfRawElev
ations) + elevationIndex; |
| 163 bool isIndexGood = index < TotalNumberOfResponses; |
| 164 ASSERT(isIndexGood); |
| 165 if (!isIndexGood) |
| 166 return false; |
| 167 |
| 168 // Extract the individual impulse response from the concatenated |
| 169 // responses and potentially sample-rate convert it to the desired |
| 170 // (hardware) sample-rate. |
| 171 unsigned startFrame = index * ResponseFrameSize; |
| 172 unsigned stopFrame = startFrame + ResponseFrameSize; |
| 173 RefPtr<AudioBus> preSampleRateConvertedResponse(AudioBus::createBufferFromRa
nge(bus.get(), startFrame, stopFrame)); |
| 174 RefPtr<AudioBus> response(AudioBus::createBySampleRateConverting(preSampleRa
teConvertedResponse.get(), false, sampleRate)); |
| 175 AudioChannel* leftEarImpulseResponse = response->channel(AudioBus::ChannelLe
ft); |
| 176 AudioChannel* rightEarImpulseResponse = response->channel(AudioBus::ChannelR
ight); |
| 177 #else |
114 String resourceName = String::format("IRC_%s_C_R0195_T%03d_P%03d", subjectNa
me.utf8().data(), azimuth, positiveElevation); | 178 String resourceName = String::format("IRC_%s_C_R0195_T%03d_P%03d", subjectNa
me.utf8().data(), azimuth, positiveElevation); |
115 | 179 |
116 RefPtr<AudioBus> impulseResponse(AudioBus::loadPlatformResource(resourceName
.utf8().data(), sampleRate)); | 180 RefPtr<AudioBus> impulseResponse(AudioBus::loadPlatformResource(resourceName
.utf8().data(), sampleRate)); |
117 | 181 |
118 ASSERT(impulseResponse.get()); | 182 ASSERT(impulseResponse.get()); |
119 if (!impulseResponse.get()) | 183 if (!impulseResponse.get()) |
120 return false; | 184 return false; |
121 | 185 |
122 size_t responseLength = impulseResponse->length(); | 186 size_t responseLength = impulseResponse->length(); |
123 size_t expectedLength = static_cast<size_t>(256 * (sampleRate / 44100.0)); | 187 size_t expectedLength = static_cast<size_t>(256 * (sampleRate / 44100.0)); |
124 | 188 |
125 // Check number of channels and length. For now these are fixed and known. | 189 // Check number of channels and length. For now these are fixed and known. |
126 bool isBusGood = responseLength == expectedLength && impulseResponse->number
OfChannels() == 2; | 190 bool isBusGood = responseLength == expectedLength && impulseResponse->number
OfChannels() == 2; |
127 ASSERT(isBusGood); | 191 ASSERT(isBusGood); |
128 if (!isBusGood) | 192 if (!isBusGood) |
129 return false; | 193 return false; |
130 | 194 |
131 AudioChannel* leftEarImpulseResponse = impulseResponse->channelByType(AudioB
us::ChannelLeft); | 195 AudioChannel* leftEarImpulseResponse = impulseResponse->channelByType(AudioB
us::ChannelLeft); |
132 AudioChannel* rightEarImpulseResponse = impulseResponse->channelByType(Audio
Bus::ChannelRight); | 196 AudioChannel* rightEarImpulseResponse = impulseResponse->channelByType(Audio
Bus::ChannelRight); |
| 197 #endif |
133 | 198 |
134 // Note that depending on the fftSize returned by the panner, we may be trun
cating the impulse response we just loaded in. | 199 // Note that depending on the fftSize returned by the panner, we may be trun
cating the impulse response we just loaded in. |
135 const size_t fftSize = HRTFPanner::fftSizeForSampleRate(sampleRate); | 200 const size_t fftSize = HRTFPanner::fftSizeForSampleRate(sampleRate); |
136 kernelL = HRTFKernel::create(leftEarImpulseResponse, fftSize, sampleRate); | 201 kernelL = HRTFKernel::create(leftEarImpulseResponse, fftSize, sampleRate); |
137 kernelR = HRTFKernel::create(rightEarImpulseResponse, fftSize, sampleRate); | 202 kernelR = HRTFKernel::create(rightEarImpulseResponse, fftSize, sampleRate); |
138 | 203 |
139 return true; | 204 return true; |
140 } | 205 } |
141 | 206 |
142 // The range of elevations for the IRCAM impulse responses varies depending on a
zimuth, but the minimum elevation appears to always be -45. | 207 // The range of elevations for the IRCAM impulse responses varies depending on a
zimuth, but the minimum elevation appears to always be -45. |
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
277 void HRTFElevation::reportMemoryUsage(MemoryObjectInfo* memoryObjectInfo) const | 342 void HRTFElevation::reportMemoryUsage(MemoryObjectInfo* memoryObjectInfo) const |
278 { | 343 { |
279 MemoryClassInfo info(memoryObjectInfo, this, PlatformMemoryTypes::AudioShare
dData); | 344 MemoryClassInfo info(memoryObjectInfo, this, PlatformMemoryTypes::AudioShare
dData); |
280 info.addMember(m_kernelListL, "kernelListL"); | 345 info.addMember(m_kernelListL, "kernelListL"); |
281 info.addMember(m_kernelListR, "kernelListR"); | 346 info.addMember(m_kernelListR, "kernelListR"); |
282 } | 347 } |
283 | 348 |
284 } // namespace WebCore | 349 } // namespace WebCore |
285 | 350 |
286 #endif // ENABLE(WEB_AUDIO) | 351 #endif // ENABLE(WEB_AUDIO) |
OLD | NEW |