1 | /* |
2 | * Copyright (C) 2010 Google Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * |
8 | * 1. Redistributions of source code must retain the above copyright |
9 | * notice, this list of conditions and the following disclaimer. |
10 | * 2. Redistributions in binary form must reproduce the above copyright |
11 | * notice, this list of conditions and the following disclaimer in the |
12 | * documentation and/or other materials provided with the distribution. |
13 | * 3. Neither the name of Apple Inc. ("Apple") nor the names of |
14 | * its contributors may be used to endorse or promote products derived |
15 | * from this software without specific prior written permission. |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY |
18 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
19 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
20 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY |
21 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
22 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
23 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
24 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
28 | |
29 | #include "config.h" |
30 | |
31 | #if ENABLE(WEB_AUDIO) |
32 | |
33 | #include "HRTFElevation.h" |
34 | |
35 | #include "AudioBus.h" |
36 | #include "AudioFileReader.h" |
37 | #include "Biquad.h" |
38 | #include "FFTFrame.h" |
39 | #include "HRTFDatabaseLoader.h" |
40 | #include "HRTFPanner.h" |
41 | #include <algorithm> |
42 | #include <math.h> |
43 | #include <wtf/NeverDestroyed.h> |
44 | |
45 | namespace WebCore { |
46 | |
47 | const unsigned HRTFElevation::AzimuthSpacing = 15; |
48 | const unsigned HRTFElevation::NumberOfRawAzimuths = 360 / AzimuthSpacing; |
49 | const unsigned HRTFElevation::InterpolationFactor = 8; |
50 | const unsigned HRTFElevation::NumberOfTotalAzimuths = NumberOfRawAzimuths * InterpolationFactor; |
51 | |
52 | // Total number of components of an HRTF database. |
53 | const size_t TotalNumberOfResponses = 240; |
54 | |
55 | // Number of frames in an individual impulse response. |
56 | const size_t ResponseFrameSize = 256; |
57 | |
58 | // Sample-rate of the spatialization impulse responses as stored in the resource file. |
59 | // The impulse responses may be resampled to a different sample-rate (depending on the audio hardware) when they are loaded. |
60 | const float ResponseSampleRate = 44100; |
61 | |
62 | #if PLATFORM(COCOA) || USE(WEBAUDIO_GSTREAMER) |
63 | #define USE_CONCATENATED_IMPULSE_RESPONSES |
64 | #endif |
65 | |
66 | #ifdef USE_CONCATENATED_IMPULSE_RESPONSES |
67 | // Lazily load a concatenated HRTF database for given subject and store it in a |
68 | // local hash table to ensure quick efficient future retrievals. |
69 | static AudioBus* getConcatenatedImpulseResponsesForSubject(const String& subjectName) |
70 | { |
71 | typedef HashMap<String, AudioBus*> AudioBusMap; |
72 | static NeverDestroyed<AudioBusMap> audioBusMap; |
73 | |
74 | AudioBus* bus; |
75 | AudioBusMap::iterator iterator = audioBusMap.get().find(subjectName); |
76 | if (iterator == audioBusMap.get().end()) { |
77 | auto concatenatedImpulseResponses = AudioBus::loadPlatformResource(subjectName.utf8().data(), ResponseSampleRate); |
78 | ASSERT(concatenatedImpulseResponses); |
79 | if (!concatenatedImpulseResponses) |
80 | return 0; |
81 | |
82 | bus = concatenatedImpulseResponses.leakRef(); |
83 | audioBusMap.get().set(subjectName, bus); |
84 | } else |
85 | bus = iterator->value; |
86 | |
87 | size_t responseLength = bus->length(); |
88 | size_t expectedLength = static_cast<size_t>(TotalNumberOfResponses * ResponseFrameSize); |
89 | |
90 | // Check number of channels and length. For now these are fixed and known. |
91 | bool isBusGood = responseLength == expectedLength && bus->numberOfChannels() == 2; |
92 | ASSERT(isBusGood); |
93 | if (!isBusGood) |
94 | return 0; |
95 | |
96 | return bus; |
97 | } |
98 | #endif |
99 | |
100 | // Takes advantage of the symmetry and creates a composite version of the two measured versions. For example, we have both azimuth 30 and -30 degrees |
101 | // where the roles of left and right ears are reversed with respect to each other. |
102 | bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName, |
103 | RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR) |
104 | { |
105 | RefPtr<HRTFKernel> kernelL1; |
106 | RefPtr<HRTFKernel> kernelR1; |
107 | bool success = calculateKernelsForAzimuthElevation(azimuth, elevation, sampleRate, subjectName, kernelL1, kernelR1); |
108 | if (!success) |
109 | return false; |
110 | |
111 | // And symmetric version |
112 | int symmetricAzimuth = !azimuth ? 0 : 360 - azimuth; |
113 | |
114 | RefPtr<HRTFKernel> kernelL2; |
115 | RefPtr<HRTFKernel> kernelR2; |
116 | success = calculateKernelsForAzimuthElevation(symmetricAzimuth, elevation, sampleRate, subjectName, kernelL2, kernelR2); |
117 | if (!success) |
118 | return false; |
119 | |
120 | // Notice L/R reversal in symmetric version. |
121 | kernelL = HRTFKernel::createInterpolatedKernel(kernelL1.get(), kernelR2.get(), 0.5f); |
122 | kernelR = HRTFKernel::createInterpolatedKernel(kernelR1.get(), kernelL2.get(), 0.5f); |
123 | |
124 | return true; |
125 | } |
126 | |
127 | bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName, |
128 | RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR) |
129 | { |
130 | // Valid values for azimuth are 0 -> 345 in 15 degree increments. |
131 | // Valid values for elevation are -45 -> +90 in 15 degree increments. |
132 | |
133 | bool isAzimuthGood = azimuth >= 0 && azimuth <= 345 && (azimuth / 15) * 15 == azimuth; |
134 | ASSERT(isAzimuthGood); |
135 | if (!isAzimuthGood) |
136 | return false; |
137 | |
138 | bool isElevationGood = elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation; |
139 | ASSERT(isElevationGood); |
140 | if (!isElevationGood) |
141 | return false; |
142 | |
143 | // Construct the resource name from the subject name, azimuth, and elevation, for example: |
144 | // "IRC_Composite_C_R0195_T015_P000" |
145 | // Note: the passed in subjectName is not a string passed in via JavaScript or the web. |
146 | // It's passed in as an internal ASCII identifier and is an implementation detail. |
147 | int positiveElevation = elevation < 0 ? elevation + 360 : elevation; |
148 | |
149 | #ifdef USE_CONCATENATED_IMPULSE_RESPONSES |
150 | AudioBus* bus(getConcatenatedImpulseResponsesForSubject(subjectName)); |
151 | |
152 | if (!bus) |
153 | return false; |
154 | |
155 | int elevationIndex = positiveElevation / AzimuthSpacing; |
156 | if (positiveElevation > 90) |
157 | elevationIndex -= AzimuthSpacing; |
158 | |
159 | // The concatenated impulse response is a bus containing all |
160 | // the elevations per azimuth, for all azimuths by increasing |
161 | // order. So for a given azimuth and elevation we need to compute |
162 | // the index of the wanted audio frames in the concatenated table. |
163 | unsigned index = ((azimuth / AzimuthSpacing) * HRTFDatabase::NumberOfRawElevations) + elevationIndex; |
164 | bool isIndexGood = index < TotalNumberOfResponses; |
165 | ASSERT(isIndexGood); |
166 | if (!isIndexGood) |
167 | return false; |
168 | |
169 | // Extract the individual impulse response from the concatenated |
170 | // responses and potentially sample-rate convert it to the desired |
171 | // (hardware) sample-rate. |
172 | unsigned startFrame = index * ResponseFrameSize; |
173 | unsigned stopFrame = startFrame + ResponseFrameSize; |
174 | auto preSampleRateConvertedResponse = AudioBus::createBufferFromRange(bus, startFrame, stopFrame); |
175 | auto response = AudioBus::createBySampleRateConverting(preSampleRateConvertedResponse.get(), false, sampleRate); |
176 | AudioChannel* leftEarImpulseResponse = response->channel(AudioBus::ChannelLeft); |
177 | AudioChannel* rightEarImpulseResponse = response->channel(AudioBus::ChannelRight); |
178 | #else |
179 | String resourceName = makeString("IRC_" , subjectName, "_C_R0195_T" , pad('0', 3, azimuth), "_P" , pad('0', 3, positiveElevation)); |
180 | |
181 | RefPtr<AudioBus> impulseResponse(AudioBus::loadPlatformResource(resourceName.utf8().data(), sampleRate)); |
182 | |
183 | ASSERT(impulseResponse.get()); |
184 | if (!impulseResponse.get()) |
185 | return false; |
186 | |
187 | size_t responseLength = impulseResponse->length(); |
188 | size_t expectedLength = static_cast<size_t>(256 * (sampleRate / 44100.0)); |
189 | |
190 | // Check number of channels and length. For now these are fixed and known. |
191 | bool isBusGood = responseLength == expectedLength && impulseResponse->numberOfChannels() == 2; |
192 | ASSERT(isBusGood); |
193 | if (!isBusGood) |
194 | return false; |
195 | |
196 | AudioChannel* leftEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelLeft); |
197 | AudioChannel* rightEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelRight); |
198 | #endif |
199 | |
200 | // Note that depending on the fftSize returned by the panner, we may be truncating the impulse response we just loaded in. |
201 | const size_t fftSize = HRTFPanner::fftSizeForSampleRate(sampleRate); |
202 | kernelL = HRTFKernel::create(leftEarImpulseResponse, fftSize, sampleRate); |
203 | kernelR = HRTFKernel::create(rightEarImpulseResponse, fftSize, sampleRate); |
204 | |
205 | return true; |
206 | } |
207 | |
208 | // The range of elevations for the IRCAM impulse responses varies depending on azimuth, but the minimum elevation appears to always be -45. |
209 | // |
210 | // Here's how it goes: |
211 | static const int maxElevations[] = { |
212 | // Azimuth |
213 | // |
214 | 90, // 0 |
215 | 45, // 15 |
216 | 60, // 30 |
217 | 45, // 45 |
218 | 75, // 60 |
219 | 45, // 75 |
220 | 60, // 90 |
221 | 45, // 105 |
222 | 75, // 120 |
223 | 45, // 135 |
224 | 60, // 150 |
225 | 45, // 165 |
226 | 75, // 180 |
227 | 45, // 195 |
228 | 60, // 210 |
229 | 45, // 225 |
230 | 75, // 240 |
231 | 45, // 255 |
232 | 60, // 270 |
233 | 45, // 285 |
234 | 75, // 300 |
235 | 45, // 315 |
236 | 60, // 330 |
237 | 45 // 345 |
238 | }; |
239 | |
240 | std::unique_ptr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, float sampleRate) |
241 | { |
242 | bool isElevationGood = elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation; |
243 | ASSERT(isElevationGood); |
244 | if (!isElevationGood) |
245 | return nullptr; |
246 | |
247 | auto kernelListL = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths); |
248 | auto kernelListR = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths); |
249 | |
250 | // Load convolution kernels from HRTF files. |
251 | int interpolatedIndex = 0; |
252 | for (unsigned rawIndex = 0; rawIndex < NumberOfRawAzimuths; ++rawIndex) { |
253 | // Don't let elevation exceed maximum for this azimuth. |
254 | int maxElevation = maxElevations[rawIndex]; |
255 | int actualElevation = std::min(elevation, maxElevation); |
256 | |
257 | bool success = calculateKernelsForAzimuthElevation(rawIndex * AzimuthSpacing, actualElevation, sampleRate, subjectName, kernelListL->at(interpolatedIndex), kernelListR->at(interpolatedIndex)); |
258 | if (!success) |
259 | return nullptr; |
260 | |
261 | interpolatedIndex += InterpolationFactor; |
262 | } |
263 | |
264 | // Now go back and interpolate intermediate azimuth values. |
265 | for (unsigned i = 0; i < NumberOfTotalAzimuths; i += InterpolationFactor) { |
266 | int j = (i + InterpolationFactor) % NumberOfTotalAzimuths; |
267 | |
268 | // Create the interpolated convolution kernels and delays. |
269 | for (unsigned jj = 1; jj < InterpolationFactor; ++jj) { |
270 | float x = float(jj) / float(InterpolationFactor); // interpolate from 0 -> 1 |
271 | |
272 | (*kernelListL)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListL->at(i).get(), kernelListL->at(j).get(), x); |
273 | (*kernelListR)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListR->at(i).get(), kernelListR->at(j).get(), x); |
274 | } |
275 | } |
276 | |
277 | return std::make_unique<HRTFElevation>(WTFMove(kernelListL), WTFMove(kernelListR), elevation, sampleRate); |
278 | } |
279 | |
280 | std::unique_ptr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate) |
281 | { |
282 | ASSERT(hrtfElevation1 && hrtfElevation2); |
283 | if (!hrtfElevation1 || !hrtfElevation2) |
284 | return nullptr; |
285 | |
286 | ASSERT(x >= 0.0 && x < 1.0); |
287 | |
288 | auto kernelListL = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths); |
289 | auto kernelListR = std::make_unique<HRTFKernelList>(NumberOfTotalAzimuths); |
290 | |
291 | HRTFKernelList* kernelListL1 = hrtfElevation1->kernelListL(); |
292 | HRTFKernelList* kernelListR1 = hrtfElevation1->kernelListR(); |
293 | HRTFKernelList* kernelListL2 = hrtfElevation2->kernelListL(); |
294 | HRTFKernelList* kernelListR2 = hrtfElevation2->kernelListR(); |
295 | |
296 | // Interpolate kernels of corresponding azimuths of the two elevations. |
297 | for (unsigned i = 0; i < NumberOfTotalAzimuths; ++i) { |
298 | (*kernelListL)[i] = HRTFKernel::createInterpolatedKernel(kernelListL1->at(i).get(), kernelListL2->at(i).get(), x); |
299 | (*kernelListR)[i] = HRTFKernel::createInterpolatedKernel(kernelListR1->at(i).get(), kernelListR2->at(i).get(), x); |
300 | } |
301 | |
302 | // Interpolate elevation angle. |
303 | double angle = (1.0 - x) * hrtfElevation1->elevationAngle() + x * hrtfElevation2->elevationAngle(); |
304 | |
305 | return std::make_unique<HRTFElevation>(WTFMove(kernelListL), WTFMove(kernelListR), static_cast<int>(angle), sampleRate); |
306 | } |
307 | |
308 | void HRTFElevation::getKernelsFromAzimuth(double azimuthBlend, unsigned azimuthIndex, HRTFKernel* &kernelL, HRTFKernel* &kernelR, double& frameDelayL, double& frameDelayR) |
309 | { |
310 | bool checkAzimuthBlend = azimuthBlend >= 0.0 && azimuthBlend < 1.0; |
311 | ASSERT(checkAzimuthBlend); |
312 | if (!checkAzimuthBlend) |
313 | azimuthBlend = 0.0; |
314 | |
315 | unsigned numKernels = m_kernelListL->size(); |
316 | |
317 | bool isIndexGood = azimuthIndex < numKernels; |
318 | ASSERT(isIndexGood); |
319 | if (!isIndexGood) { |
320 | kernelL = 0; |
321 | kernelR = 0; |
322 | return; |
323 | } |
324 | |
325 | // Return the left and right kernels. |
326 | kernelL = m_kernelListL->at(azimuthIndex).get(); |
327 | kernelR = m_kernelListR->at(azimuthIndex).get(); |
328 | |
329 | frameDelayL = m_kernelListL->at(azimuthIndex)->frameDelay(); |
330 | frameDelayR = m_kernelListR->at(azimuthIndex)->frameDelay(); |
331 | |
332 | int azimuthIndex2 = (azimuthIndex + 1) % numKernels; |
333 | double frameDelay2L = m_kernelListL->at(azimuthIndex2)->frameDelay(); |
334 | double frameDelay2R = m_kernelListR->at(azimuthIndex2)->frameDelay(); |
335 | |
336 | // Linearly interpolate delays. |
337 | frameDelayL = (1.0 - azimuthBlend) * frameDelayL + azimuthBlend * frameDelay2L; |
338 | frameDelayR = (1.0 - azimuthBlend) * frameDelayR + azimuthBlend * frameDelay2R; |
339 | } |
340 | |
341 | } // namespace WebCore |
342 | |
343 | #endif // ENABLE(WEB_AUDIO) |
344 | |