b=815643 Import the HRTF panner implementation from Blink r=ehsan
authorKarl Tomlinson <karlt+@karlt.net>
Thu, 08 Aug 2013 21:37:36 +1200
changeset 142351 1e796e343ef843d890c2d58a0d52f2d09299631a
parent 142350 2900a53981346f53491b7f4296f4d761b31ffc4f
child 142352 5ddd533fc54cba690c22235c67a70b66b45d2bc5
push id32374
push userktomlinson@mozilla.com
push dateTue, 13 Aug 2013 02:49:14 +0000
treeherdermozilla-inbound@62ad090a94a4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersehsan
bugs815643, 153183
milestone26.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
b=815643 Import the HRTF panner implementation from Blink r=ehsan This code was copied from Blink SVN revision 153183.
content/media/webaudio/blink/FFTFrame.cpp
content/media/webaudio/blink/HRTFDatabase.cpp
content/media/webaudio/blink/HRTFDatabase.h
content/media/webaudio/blink/HRTFDatabaseLoader.cpp
content/media/webaudio/blink/HRTFDatabaseLoader.h
content/media/webaudio/blink/HRTFElevation.cpp
content/media/webaudio/blink/HRTFElevation.h
content/media/webaudio/blink/HRTFKernel.cpp
content/media/webaudio/blink/HRTFKernel.h
content/media/webaudio/blink/HRTFPanner.cpp
content/media/webaudio/blink/HRTFPanner.h
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/blink/FFTFrame.cpp
@@ -0,0 +1,311 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(WEB_AUDIO)
+
+#include "core/platform/audio/FFTFrame.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+#include "core/platform/Logging.h"
+#include "core/platform/PlatformMemoryInstrumentation.h"
+#include <wtf/Complex.h>
+#include <wtf/MathExtras.h>
+#include <wtf/MemoryObjectInfo.h>
+#include <wtf/OwnPtr.h>
+
+#if !USE_ACCELERATE_FFT && USE(WEBAUDIO_FFMPEG)
+void reportMemoryUsage(const RDFTContext* const&, WTF::MemoryObjectInfo*);
+#endif // USE(WEBAUDIO_FFMPEG)
+
+namespace WebCore {
+
+void FFTFrame::doPaddedFFT(const float* data, size_t dataSize)
+{
+    // Zero-pad the impulse response
+    AudioFloatArray paddedResponse(fftSize()); // zero-initialized
+    paddedResponse.copyToRange(data, 0, dataSize);
+
+    // Get the frequency-domain version of padded response
+    doFFT(paddedResponse.data());
+}
+
+PassOwnPtr<FFTFrame> FFTFrame::createInterpolatedFrame(const FFTFrame& frame1, const FFTFrame& frame2, double x)
+{
+    OwnPtr<FFTFrame> newFrame = adoptPtr(new FFTFrame(frame1.fftSize()));
+
+    newFrame->interpolateFrequencyComponents(frame1, frame2, x);
+
+    // In the time-domain, the 2nd half of the response must be zero, to avoid circular convolution aliasing...
+    int fftSize = newFrame->fftSize();
+    AudioFloatArray buffer(fftSize);
+    newFrame->doInverseFFT(buffer.data());
+    buffer.zeroRange(fftSize / 2, fftSize);
+
+    // Put back into frequency domain.
+    newFrame->doFFT(buffer.data());
+
+    return newFrame.release();
+}
+
+void FFTFrame::interpolateFrequencyComponents(const FFTFrame& frame1, const FFTFrame& frame2, double interp)
+{
+    // FIXME : with some work, this method could be optimized
+
+    float* realP = realData();
+    float* imagP = imagData();
+
+    const float* realP1 = frame1.realData();
+    const float* imagP1 = frame1.imagData();
+    const float* realP2 = frame2.realData();
+    const float* imagP2 = frame2.imagData();
+
+    m_FFTSize = frame1.fftSize();
+    m_log2FFTSize = frame1.log2FFTSize();
+
+    double s1base = (1.0 - interp);
+    double s2base = interp;
+
+    double phaseAccum = 0.0;
+    double lastPhase1 = 0.0;
+    double lastPhase2 = 0.0;
+
+    realP[0] = static_cast<float>(s1base * realP1[0] + s2base * realP2[0]);
+    imagP[0] = static_cast<float>(s1base * imagP1[0] + s2base * imagP2[0]);
+
+    int n = m_FFTSize / 2;
+
+    for (int i = 1; i < n; ++i) {
+        Complex c1(realP1[i], imagP1[i]);
+        Complex c2(realP2[i], imagP2[i]);
+
+        double mag1 = abs(c1);
+        double mag2 = abs(c2);
+
+        // Interpolate magnitudes in decibels
+        double mag1db = 20.0 * log10(mag1);
+        double mag2db = 20.0 * log10(mag2);
+
+        double s1 = s1base;
+        double s2 = s2base;
+
+        double magdbdiff = mag1db - mag2db;
+
+        // Empirical tweak to retain higher-frequency zeroes
+        double threshold =  (i > 16) ? 5.0 : 2.0;
+
+        if (magdbdiff < -threshold && mag1db < 0.0) {
+            s1 = pow(s1, 0.75);
+            s2 = 1.0 - s1;
+        } else if (magdbdiff > threshold && mag2db < 0.0) {
+            s2 = pow(s2, 0.75);
+            s1 = 1.0 - s2;
+        }
+
+        // Average magnitude by decibels instead of linearly
+        double magdb = s1 * mag1db + s2 * mag2db;
+        double mag = pow(10.0, 0.05 * magdb);
+
+        // Now, deal with phase
+        double phase1 = arg(c1);
+        double phase2 = arg(c2);
+
+        double deltaPhase1 = phase1 - lastPhase1;
+        double deltaPhase2 = phase2 - lastPhase2;
+        lastPhase1 = phase1;
+        lastPhase2 = phase2;
+
+        // Unwrap phase deltas
+        if (deltaPhase1 > piDouble)
+            deltaPhase1 -= 2.0 * piDouble;
+        if (deltaPhase1 < -piDouble)
+            deltaPhase1 += 2.0 * piDouble;
+        if (deltaPhase2 > piDouble)
+            deltaPhase2 -= 2.0 * piDouble;
+        if (deltaPhase2 < -piDouble)
+            deltaPhase2 += 2.0 * piDouble;
+
+        // Blend group-delays
+        double deltaPhaseBlend;
+
+        if (deltaPhase1 - deltaPhase2 > piDouble)
+            deltaPhaseBlend = s1 * deltaPhase1 + s2 * (2.0 * piDouble + deltaPhase2);
+        else if (deltaPhase2 - deltaPhase1 > piDouble)
+            deltaPhaseBlend = s1 * (2.0 * piDouble + deltaPhase1) + s2 * deltaPhase2;
+        else
+            deltaPhaseBlend = s1 * deltaPhase1 + s2 * deltaPhase2;
+
+        phaseAccum += deltaPhaseBlend;
+
+        // Unwrap
+        if (phaseAccum > piDouble)
+            phaseAccum -= 2.0 * piDouble;
+        if (phaseAccum < -piDouble)
+            phaseAccum += 2.0 * piDouble;
+
+        Complex c = complexFromMagnitudePhase(mag, phaseAccum);
+
+        realP[i] = static_cast<float>(c.real());
+        imagP[i] = static_cast<float>(c.imag());
+    }
+}
+
+double FFTFrame::extractAverageGroupDelay()
+{
+    float* realP = realData();
+    float* imagP = imagData();
+
+    double aveSum = 0.0;
+    double weightSum = 0.0;
+    double lastPhase = 0.0;
+
+    int halfSize = fftSize() / 2;
+
+    const double kSamplePhaseDelay = (2.0 * piDouble) / double(fftSize());
+
+    // Calculate weighted average group delay
+    for (int i = 0; i < halfSize; i++) {
+        Complex c(realP[i], imagP[i]);
+        double mag = abs(c);
+        double phase = arg(c);
+
+        double deltaPhase = phase - lastPhase;
+        lastPhase = phase;
+
+        // Unwrap
+        if (deltaPhase < -piDouble)
+            deltaPhase += 2.0 * piDouble;
+        if (deltaPhase > piDouble)
+            deltaPhase -= 2.0 * piDouble;
+
+        aveSum += mag * deltaPhase;
+        weightSum += mag;
+    }
+
+    // Note how we invert the phase delta wrt frequency since this is how group delay is defined
+    double ave = aveSum / weightSum;
+    double aveSampleDelay = -ave / kSamplePhaseDelay;
+
+    // Leave 20 sample headroom (for leading edge of impulse)
+    if (aveSampleDelay > 20.0)
+        aveSampleDelay -= 20.0;
+
+    // Remove average group delay (minus 20 samples for headroom)
+    addConstantGroupDelay(-aveSampleDelay);
+
+    // Remove DC offset
+    realP[0] = 0.0f;
+
+    return aveSampleDelay;
+}
+
+void FFTFrame::addConstantGroupDelay(double sampleFrameDelay)
+{
+    int halfSize = fftSize() / 2;
+
+    float* realP = realData();
+    float* imagP = imagData();
+
+    const double kSamplePhaseDelay = (2.0 * piDouble) / double(fftSize());
+
+    double phaseAdj = -sampleFrameDelay * kSamplePhaseDelay;
+
+    // Add constant group delay
+    for (int i = 1; i < halfSize; i++) {
+        Complex c(realP[i], imagP[i]);
+        double mag = abs(c);
+        double phase = arg(c);
+
+        phase += i * phaseAdj;
+
+        Complex c2 = complexFromMagnitudePhase(mag, phase);
+
+        realP[i] = static_cast<float>(c2.real());
+        imagP[i] = static_cast<float>(c2.imag());
+    }
+}
+
+void FFTFrame::reportMemoryUsage(MemoryObjectInfo* memoryObjectInfo) const
+{
+    MemoryClassInfo info(memoryObjectInfo, this, PlatformMemoryTypes::AudioSharedData);
+#if USE_ACCELERATE_FFT
+    info.addMember(m_frame, "frame");
+    info.addMember(m_realData, "realData");
+    info.addMember(m_imagData, "imagData");
+#else // !USE_ACCELERATE_FFT
+
+#if USE(WEBAUDIO_FFMPEG)
+    info.addMember(m_forwardContext, "forwardContext");
+    info.addMember(m_inverseContext, "inverseContext");
+    info.addMember(m_complexData, "complexData");
+    info.addMember(m_realData, "realData");
+    info.addMember(m_imagData, "imagData");
+#endif // USE(WEBAUDIO_FFMPEG)
+
+#if USE(WEBAUDIO_IPP)
+    int size = 0;
+    ippsDFTGetBufSize_R_32f(m_DFTSpec, &size);
+    info.addRawBuffer(m_buffer, size * sizeof(Ipp8u), "buffer");
+    ippsDFTGetSize_R_32f(m_FFTSize, IPP_FFT_NODIV_BY_ANY, ippAlgHintFast, &size, 0, 0);
+    info.addRawBuffer(m_DFTSpec, size, "DFTSpec");
+    info.addMember(m_complexData, "complexData");
+    info.addMember(m_realData, "realData");
+    info.addMember(m_imagData, "imagData");
+#endif // USE(WEBAUDIO_IPP)
+
+#endif // !USE_ACCELERATE_FFT
+}
+
+#ifndef NDEBUG
+void FFTFrame::print()
+{
+    FFTFrame& frame = *this;
+    float* realP = frame.realData();
+    float* imagP = frame.imagData();
+    LOG(WebAudio, "**** \n");
+    LOG(WebAudio, "DC = %f : nyquist = %f\n", realP[0], imagP[0]);
+
+    int n = m_FFTSize / 2;
+
+    for (int i = 1; i < n; i++) {
+        double mag = sqrt(realP[i] * realP[i] + imagP[i] * imagP[i]);
+        double phase = atan2(realP[i], imagP[i]);
+
+        LOG(WebAudio, "[%d] (%f %f)\n", i, mag, phase);
+    }
+    LOG(WebAudio, "****\n");
+}
+#endif // NDEBUG
+
+} // namespace WebCore
+
+#endif // ENABLE(WEB_AUDIO)
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/blink/HRTFDatabase.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(WEB_AUDIO)
+
+#include "core/platform/audio/HRTFDatabase.h"
+
+#include "core/platform/PlatformMemoryInstrumentation.h"
+#include "core/platform/audio/HRTFElevation.h"
+#include <wtf/MemoryInstrumentationVector.h>
+
+using namespace std;
+
+namespace WebCore {
+
+const int HRTFDatabase::MinElevation = -45;
+const int HRTFDatabase::MaxElevation = 90;
+const unsigned HRTFDatabase::RawElevationAngleSpacing = 15;
+const unsigned HRTFDatabase::NumberOfRawElevations = 10; // -45 -> +90 (each 15 degrees)
+const unsigned HRTFDatabase::InterpolationFactor = 1;
+const unsigned HRTFDatabase::NumberOfTotalElevations = NumberOfRawElevations * InterpolationFactor;
+
+PassOwnPtr<HRTFDatabase> HRTFDatabase::create(float sampleRate)
+{
+    OwnPtr<HRTFDatabase> hrtfDatabase = adoptPtr(new HRTFDatabase(sampleRate));
+    return hrtfDatabase.release();
+}
+
+HRTFDatabase::HRTFDatabase(float sampleRate)
+    : m_elevations(NumberOfTotalElevations)
+    , m_sampleRate(sampleRate)
+{
+    unsigned elevationIndex = 0;
+    for (int elevation = MinElevation; elevation <= MaxElevation; elevation += RawElevationAngleSpacing) {
+        OwnPtr<HRTFElevation> hrtfElevation = HRTFElevation::createForSubject("Composite", elevation, sampleRate);
+        ASSERT(hrtfElevation.get());
+        if (!hrtfElevation.get())
+            return;
+        
+        m_elevations[elevationIndex] = hrtfElevation.release();
+        elevationIndex += InterpolationFactor;
+    }
+
+    // Now, go back and interpolate elevations.
+    if (InterpolationFactor > 1) {
+        for (unsigned i = 0; i < NumberOfTotalElevations; i += InterpolationFactor) {
+            unsigned j = (i + InterpolationFactor);
+            if (j >= NumberOfTotalElevations)
+                j = i; // for last elevation interpolate with itself
+
+            // Create the interpolated convolution kernels and delays.
+            for (unsigned jj = 1; jj < InterpolationFactor; ++jj) {
+                float x = static_cast<float>(jj) / static_cast<float>(InterpolationFactor);
+                m_elevations[i + jj] = HRTFElevation::createByInterpolatingSlices(m_elevations[i].get(), m_elevations[j].get(), x, sampleRate);
+                ASSERT(m_elevations[i + jj].get());
+            }
+        }
+    }
+}
+
+void HRTFDatabase::getKernelsFromAzimuthElevation(double azimuthBlend, unsigned azimuthIndex, double elevationAngle, HRTFKernel* &kernelL, HRTFKernel* &kernelR,
+                                                  double& frameDelayL, double& frameDelayR)
+{
+    unsigned elevationIndex = indexFromElevationAngle(elevationAngle);
+    ASSERT_WITH_SECURITY_IMPLICATION(elevationIndex < m_elevations.size() && m_elevations.size() > 0);
+    
+    if (!m_elevations.size()) {
+        kernelL = 0;
+        kernelR = 0;
+        return;
+    }
+    
+    if (elevationIndex > m_elevations.size() - 1)
+        elevationIndex = m_elevations.size() - 1;    
+    
+    HRTFElevation* hrtfElevation = m_elevations[elevationIndex].get();
+    ASSERT(hrtfElevation);
+    if (!hrtfElevation) {
+        kernelL = 0;
+        kernelR = 0;
+        return;
+    }
+    
+    hrtfElevation->getKernelsFromAzimuth(azimuthBlend, azimuthIndex, kernelL, kernelR, frameDelayL, frameDelayR);
+}                                                     
+
+unsigned HRTFDatabase::indexFromElevationAngle(double elevationAngle)
+{
+    // Clamp to allowed range.
+    elevationAngle = max(static_cast<double>(MinElevation), elevationAngle);
+    elevationAngle = min(static_cast<double>(MaxElevation), elevationAngle);
+
+    unsigned elevationIndex = static_cast<int>(InterpolationFactor * (elevationAngle - MinElevation) / RawElevationAngleSpacing);    
+    return elevationIndex;
+}
+
+void HRTFDatabase::reportMemoryUsage(MemoryObjectInfo* memoryObjectInfo) const
+{
+    MemoryClassInfo info(memoryObjectInfo, this, PlatformMemoryTypes::AudioSharedData);
+    info.addMember(m_elevations, "elevations");
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(WEB_AUDIO)
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/blink/HRTFDatabase.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HRTFDatabase_h
+#define HRTFDatabase_h
+
+#include "core/platform/audio/HRTFElevation.h"
+#include <wtf/Forward.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/OwnPtr.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/Vector.h>
+
+namespace WebCore {
+
+class HRTFKernel;
+
+class HRTFDatabase {
+    WTF_MAKE_NONCOPYABLE(HRTFDatabase);
+public:
+    static PassOwnPtr<HRTFDatabase> create(float sampleRate);
+
+    // getKernelsFromAzimuthElevation() returns a left and right ear kernel, and an interpolated left and right frame delay for the given azimuth and elevation.
+    // azimuthBlend must be in the range 0 -> 1.
+    // Valid values for azimuthIndex are 0 -> HRTFElevation::NumberOfTotalAzimuths - 1 (corresponding to angles of 0 -> 360).
+    // Valid values for elevationAngle are MinElevation -> MaxElevation.
+    void getKernelsFromAzimuthElevation(double azimuthBlend, unsigned azimuthIndex, double elevationAngle, HRTFKernel* &kernelL, HRTFKernel* &kernelR, double& frameDelayL, double& frameDelayR);
+
+    // Returns the number of different azimuth angles.
+    static unsigned numberOfAzimuths() { return HRTFElevation::NumberOfTotalAzimuths; }
+
+    float sampleRate() const { return m_sampleRate; }
+
+    // Number of elevations loaded from resource.
+    static const unsigned NumberOfRawElevations;
+
+    void reportMemoryUsage(MemoryObjectInfo*) const;
+
+private:
+    explicit HRTFDatabase(float sampleRate);
+
+    // Minimum and maximum elevation angles (inclusive) for a HRTFDatabase.
+    static const int MinElevation;
+    static const int MaxElevation;
+    static const unsigned RawElevationAngleSpacing;
+
+    // Interpolates by this factor to get the total number of elevations from every elevation loaded from resource.
+    static const unsigned InterpolationFactor;
+    
+    // Total number of elevations after interpolation.
+    static const unsigned NumberOfTotalElevations;
+
+    // Returns the index for the correct HRTFElevation given the elevation angle.
+    static unsigned indexFromElevationAngle(double);
+
+    Vector<OwnPtr<HRTFElevation> > m_elevations;                                            
+    float m_sampleRate;
+};
+
+} // namespace WebCore
+
+#endif // HRTFDatabase_h
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/blink/HRTFDatabaseLoader.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(WEB_AUDIO)
+
+#include "core/platform/audio/HRTFDatabaseLoader.h"
+
+#include "core/platform/PlatformMemoryInstrumentation.h"
+#include "core/platform/audio/HRTFDatabase.h"
+#include "wtf/MainThread.h"
+#include "wtf/MemoryInstrumentationHashMap.h"
+
+namespace WebCore {
+
+// Singleton
+HRTFDatabaseLoader::LoaderMap* HRTFDatabaseLoader::s_loaderMap = 0;
+
+PassRefPtr<HRTFDatabaseLoader> HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(float sampleRate)
+{
+    ASSERT(isMainThread());
+
+    RefPtr<HRTFDatabaseLoader> loader;
+    
+    if (!s_loaderMap)
+        s_loaderMap = adoptPtr(new LoaderMap()).leakPtr();
+
+    loader = s_loaderMap->get(sampleRate);
+    if (loader) {
+        ASSERT(sampleRate == loader->databaseSampleRate());
+        return loader;
+    }
+
+    loader = adoptRef(new HRTFDatabaseLoader(sampleRate));
+    s_loaderMap->add(sampleRate, loader.get());
+
+    loader->loadAsynchronously();
+
+    return loader;
+}
+
+HRTFDatabaseLoader::HRTFDatabaseLoader(float sampleRate)
+    : m_databaseLoaderThread(0)
+    , m_databaseSampleRate(sampleRate)
+{
+    ASSERT(isMainThread());
+}
+
+HRTFDatabaseLoader::~HRTFDatabaseLoader()
+{
+    ASSERT(isMainThread());
+
+    waitForLoaderThreadCompletion();
+    m_hrtfDatabase.clear();
+
+    // Remove ourself from the map.
+    if (s_loaderMap)
+        s_loaderMap->remove(m_databaseSampleRate);
+}
+
+// Asynchronously load the database in this thread.
+static void databaseLoaderEntry(void* threadData)
+{
+    HRTFDatabaseLoader* loader = reinterpret_cast<HRTFDatabaseLoader*>(threadData);
+    ASSERT(loader);
+    loader->load();
+}
+
+void HRTFDatabaseLoader::load()
+{
+    ASSERT(!isMainThread());
+    if (!m_hrtfDatabase.get()) {
+        // Load the default HRTF database.
+        m_hrtfDatabase = HRTFDatabase::create(m_databaseSampleRate);
+    }
+}
+
+void HRTFDatabaseLoader::loadAsynchronously()
+{
+    ASSERT(isMainThread());
+
+    MutexLocker locker(m_threadLock);
+    
+    if (!m_hrtfDatabase.get() && !m_databaseLoaderThread) {
+        // Start the asynchronous database loading process.
+        m_databaseLoaderThread = createThread(databaseLoaderEntry, this, "HRTF database loader");
+    }
+}
+
+bool HRTFDatabaseLoader::isLoaded() const
+{
+    return m_hrtfDatabase.get();
+}
+
+void HRTFDatabaseLoader::waitForLoaderThreadCompletion()
+{
+    MutexLocker locker(m_threadLock);
+    
+    // waitForThreadCompletion() should not be called twice for the same thread.
+    if (m_databaseLoaderThread)
+        waitForThreadCompletion(m_databaseLoaderThread);
+    m_databaseLoaderThread = 0;
+}
+
+void HRTFDatabaseLoader::reportMemoryUsage(MemoryObjectInfo* memoryObjectInfo) const
+{
+    MemoryClassInfo info(memoryObjectInfo, this, PlatformMemoryTypes::AudioSharedData);
+    info.addMember(m_hrtfDatabase, "hrtfDatabase");
+    info.addMember(s_loaderMap, "loaderMap", WTF::RetainingPointer);
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(WEB_AUDIO)
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/blink/HRTFDatabaseLoader.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HRTFDatabaseLoader_h
+#define HRTFDatabaseLoader_h
+
+#include "core/platform/audio/HRTFDatabase.h"
+#include "wtf/HashMap.h"
+#include "wtf/PassRefPtr.h"
+#include "wtf/RefCounted.h"
+#include "wtf/RefPtr.h"
+#include "wtf/Threading.h"
+
+namespace WebCore {
+
+// HRTFDatabaseLoader will asynchronously load the default HRTFDatabase in a new thread.
+
+class HRTFDatabaseLoader : public RefCounted<HRTFDatabaseLoader> {
+public:
+    // Lazily creates a HRTFDatabaseLoader (if not already created) for the given sample-rate
+    // and starts loading asynchronously (when created the first time).
+    // Returns the HRTFDatabaseLoader.
+    // Must be called from the main thread.
+    static PassRefPtr<HRTFDatabaseLoader> createAndLoadAsynchronouslyIfNecessary(float sampleRate);
+
+    // Both constructor and destructor must be called from the main thread.
+    ~HRTFDatabaseLoader();
+    
+    // Returns true once the default database has been completely loaded.
+    bool isLoaded() const;
+
+    // waitForLoaderThreadCompletion() may be called more than once and is thread-safe.
+    void waitForLoaderThreadCompletion();
+    
+    HRTFDatabase* database() { return m_hrtfDatabase.get(); }
+
+    float databaseSampleRate() const { return m_databaseSampleRate; }
+    
+    // Called in asynchronous loading thread.
+    void load();
+
+    void reportMemoryUsage(MemoryObjectInfo*) const;
+
+private:
+    // Both constructor and destructor must be called from the main thread.
+    explicit HRTFDatabaseLoader(float sampleRate);
+    
+    // If it hasn't already been loaded, creates a new thread and initiates asynchronous loading of the default database.
+    // This must be called from the main thread.
+    void loadAsynchronously();
+
+    // Map from sample-rate to loader.
+    typedef HashMap<double, HRTFDatabaseLoader*> LoaderMap;
+
+    // Keeps track of loaders on a per-sample-rate basis.
+    static LoaderMap* s_loaderMap; // singleton
+
+    OwnPtr<HRTFDatabase> m_hrtfDatabase;
+
+    // Holding a m_threadLock is required when accessing m_databaseLoaderThread.
+    Mutex m_threadLock;
+    ThreadIdentifier m_databaseLoaderThread;
+
+    float m_databaseSampleRate;
+};
+
+} // namespace WebCore
+
+#endif // HRTFDatabaseLoader_h
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/blink/HRTFElevation.cpp
@@ -0,0 +1,348 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(WEB_AUDIO)
+
+#include "core/platform/audio/HRTFElevation.h"
+
+#include <math.h>
+#include <algorithm>
+#include "core/platform/PlatformMemoryInstrumentation.h"
+#include "core/platform/audio/AudioBus.h"
+#include "core/platform/audio/HRTFPanner.h"
+#include <wtf/MemoryInstrumentationVector.h>
+#include <wtf/OwnPtr.h>
+
+using namespace std;
+ 
+namespace WebCore {
+
+const unsigned HRTFElevation::AzimuthSpacing = 15;
+const unsigned HRTFElevation::NumberOfRawAzimuths = 360 / AzimuthSpacing;
+const unsigned HRTFElevation::InterpolationFactor = 8;
+const unsigned HRTFElevation::NumberOfTotalAzimuths = NumberOfRawAzimuths * InterpolationFactor;
+
+// Total number of components of an HRTF database.
+const size_t TotalNumberOfResponses = 240;
+
+// Number of frames in an individual impulse response.
+const size_t ResponseFrameSize = 256;
+
+// Sample-rate of the spatialization impulse responses as stored in the resource file.
+// The impulse responses may be resampled to a different sample-rate (depending on the audio hardware) when they are loaded.
+const float ResponseSampleRate = 44100;
+
+#if USE(CONCATENATED_IMPULSE_RESPONSES)
+// Lazily load a concatenated HRTF database for given subject and store it in a
+// local hash table to ensure quick efficient future retrievals.
+static PassRefPtr<AudioBus> getConcatenatedImpulseResponsesForSubject(const String& subjectName)
+{
+    typedef HashMap<String, RefPtr<AudioBus> > AudioBusMap;
+    DEFINE_STATIC_LOCAL(AudioBusMap, audioBusMap, ());
+
+    RefPtr<AudioBus> bus;
+    AudioBusMap::iterator iterator = audioBusMap.find(subjectName);
+    if (iterator == audioBusMap.end()) {
+        RefPtr<AudioBus> concatenatedImpulseResponses(AudioBus::loadPlatformResource(subjectName.utf8().data(), ResponseSampleRate));
+        ASSERT(concatenatedImpulseResponses);
+        if (!concatenatedImpulseResponses)
+            return 0;
+
+        bus = concatenatedImpulseResponses;
+        audioBusMap.set(subjectName, bus);
+    } else
+        bus = iterator->value;
+
+    size_t responseLength = bus->length();
+    size_t expectedLength = static_cast<size_t>(TotalNumberOfResponses * ResponseFrameSize);
+
+    // Check number of channels and length. For now these are fixed and known.
+    bool isBusGood = responseLength == expectedLength && bus->numberOfChannels() == 2;
+    ASSERT(isBusGood);
+    if (!isBusGood)
+        return 0;
+
+    return bus;
+}
+#endif
+
+// Takes advantage of the symmetry and creates a composite version of the two measured versions.  For example, we have both azimuth 30 and -30 degrees
+// where the roles of left and right ears are reversed with respect to each other.
+bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
+                                                                 RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
+{
+    RefPtr<HRTFKernel> kernelL1;
+    RefPtr<HRTFKernel> kernelR1;
+    bool success = calculateKernelsForAzimuthElevation(azimuth, elevation, sampleRate, subjectName, kernelL1, kernelR1);
+    if (!success)
+        return false;
+        
+    // And symmetric version
+    int symmetricAzimuth = !azimuth ? 0 : 360 - azimuth;
+                                                              
+    RefPtr<HRTFKernel> kernelL2;
+    RefPtr<HRTFKernel> kernelR2;
+    success = calculateKernelsForAzimuthElevation(symmetricAzimuth, elevation, sampleRate, subjectName, kernelL2, kernelR2);
+    if (!success)
+        return false;
+        
+    // Notice L/R reversal in symmetric version.
+    kernelL = HRTFKernel::createInterpolatedKernel(kernelL1.get(), kernelR2.get(), 0.5f);
+    kernelR = HRTFKernel::createInterpolatedKernel(kernelR1.get(), kernelL2.get(), 0.5f);
+    
+    return true;
+}
+
+bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
+                                                        RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
+{
+    // Valid values for azimuth are 0 -> 345 in 15 degree increments.
+    // Valid values for elevation are -45 -> +90 in 15 degree increments.
+
+    bool isAzimuthGood = azimuth >= 0 && azimuth <= 345 && (azimuth / 15) * 15 == azimuth;
+    ASSERT(isAzimuthGood);
+    if (!isAzimuthGood)
+        return false;
+
+    bool isElevationGood = elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation;
+    ASSERT(isElevationGood);
+    if (!isElevationGood)
+        return false;
+    
+    // Construct the resource name from the subject name, azimuth, and elevation, for example:
+    // "IRC_Composite_C_R0195_T015_P000"
+    // Note: the passed in subjectName is not a string passed in via JavaScript or the web.
+    // It's passed in as an internal ASCII identifier and is an implementation detail.
+    int positiveElevation = elevation < 0 ? elevation + 360 : elevation;
+
+#if USE(CONCATENATED_IMPULSE_RESPONSES)
+    RefPtr<AudioBus> bus(getConcatenatedImpulseResponsesForSubject(subjectName));
+
+    if (!bus)
+        return false;
+
+    int elevationIndex = positiveElevation / AzimuthSpacing;
+    if (positiveElevation > 90)
+        elevationIndex -= AzimuthSpacing;
+
+    // The concatenated impulse response is a bus containing all
+    // the elevations per azimuth, for all azimuths by increasing
+    // order. So for a given azimuth and elevation we need to compute
+    // the index of the wanted audio frames in the concatenated table.
+    unsigned index = ((azimuth / AzimuthSpacing) * HRTFDatabase::NumberOfRawElevations) + elevationIndex;
+    bool isIndexGood = index < TotalNumberOfResponses;
+    ASSERT(isIndexGood);
+    if (!isIndexGood)
+        return false;
+
+    // Extract the individual impulse response from the concatenated
+    // responses and potentially sample-rate convert it to the desired
+    // (hardware) sample-rate.
+    unsigned startFrame = index * ResponseFrameSize;
+    unsigned stopFrame = startFrame + ResponseFrameSize;
+    RefPtr<AudioBus> preSampleRateConvertedResponse(AudioBus::createBufferFromRange(bus.get(), startFrame, stopFrame));
+    RefPtr<AudioBus> response(AudioBus::createBySampleRateConverting(preSampleRateConvertedResponse.get(), false, sampleRate));
+    AudioChannel* leftEarImpulseResponse = response->channel(AudioBus::ChannelLeft);
+    AudioChannel* rightEarImpulseResponse = response->channel(AudioBus::ChannelRight);
+#else
+    String resourceName = String::format("IRC_%s_C_R0195_T%03d_P%03d", subjectName.utf8().data(), azimuth, positiveElevation);
+
+    RefPtr<AudioBus> impulseResponse(AudioBus::loadPlatformResource(resourceName.utf8().data(), sampleRate));
+
+    ASSERT(impulseResponse.get());
+    if (!impulseResponse.get())
+        return false;
+    
+    size_t responseLength = impulseResponse->length();
+    size_t expectedLength = static_cast<size_t>(256 * (sampleRate / 44100.0));
+
+    // Check number of channels and length.  For now these are fixed and known.
+    bool isBusGood = responseLength == expectedLength && impulseResponse->numberOfChannels() == 2;
+    ASSERT(isBusGood);
+    if (!isBusGood)
+        return false;
+    
+    AudioChannel* leftEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelLeft);
+    AudioChannel* rightEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelRight);
+#endif
+
+    // Note that depending on the fftSize returned by the panner, we may be truncating the impulse response we just loaded in.
+    const size_t fftSize = HRTFPanner::fftSizeForSampleRate(sampleRate);
+    kernelL = HRTFKernel::create(leftEarImpulseResponse, fftSize, sampleRate);
+    kernelR = HRTFKernel::create(rightEarImpulseResponse, fftSize, sampleRate);
+    
+    return true;
+}
+
+// The range of elevations for the IRCAM impulse responses varies depending on azimuth, but the minimum elevation appears to always be -45.
+//
+// Here's how it goes:
+static int maxElevations[] = {
+        //  Azimuth
+        //
+    90, // 0  
+    45, // 15 
+    60, // 30 
+    45, // 45 
+    75, // 60 
+    45, // 75 
+    60, // 90 
+    45, // 105 
+    75, // 120 
+    45, // 135 
+    60, // 150 
+    45, // 165 
+    75, // 180 
+    45, // 195 
+    60, // 210 
+    45, // 225 
+    75, // 240 
+    45, // 255 
+    60, // 270 
+    45, // 285 
+    75, // 300 
+    45, // 315 
+    60, // 330 
+    45 //  345 
+};
+
+PassOwnPtr<HRTFElevation> HRTFElevation::createForSubject(const String& subjectName, int elevation, float sampleRate)
+{
+    bool isElevationGood = elevation >= -45 && elevation <= 90 && (elevation / 15) * 15 == elevation;
+    ASSERT(isElevationGood);
+    if (!isElevationGood)
+        return nullptr;
+        
+    OwnPtr<HRTFKernelList> kernelListL = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));
+    OwnPtr<HRTFKernelList> kernelListR = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));
+
+    // Load convolution kernels from HRTF files.
+    int interpolatedIndex = 0;
+    for (unsigned rawIndex = 0; rawIndex < NumberOfRawAzimuths; ++rawIndex) {
+        // Don't let elevation exceed maximum for this azimuth.
+        int maxElevation = maxElevations[rawIndex];
+        int actualElevation = min(elevation, maxElevation);
+
+        bool success = calculateKernelsForAzimuthElevation(rawIndex * AzimuthSpacing, actualElevation, sampleRate, subjectName, kernelListL->at(interpolatedIndex), kernelListR->at(interpolatedIndex));
+        if (!success)
+            return nullptr;
+            
+        interpolatedIndex += InterpolationFactor;
+    }
+
+    // Now go back and interpolate intermediate azimuth values.
+    for (unsigned i = 0; i < NumberOfTotalAzimuths; i += InterpolationFactor) {
+        int j = (i + InterpolationFactor) % NumberOfTotalAzimuths;
+
+        // Create the interpolated convolution kernels and delays.
+        for (unsigned jj = 1; jj < InterpolationFactor; ++jj) {
+            float x = float(jj) / float(InterpolationFactor); // interpolate from 0 -> 1
+
+            (*kernelListL)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListL->at(i).get(), kernelListL->at(j).get(), x);
+            (*kernelListR)[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListR->at(i).get(), kernelListR->at(j).get(), x);
+        }
+    }
+    
+    OwnPtr<HRTFElevation> hrtfElevation = adoptPtr(new HRTFElevation(kernelListL.release(), kernelListR.release(), elevation, sampleRate));
+    return hrtfElevation.release();
+}
+
+PassOwnPtr<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate)
+{
+    ASSERT(hrtfElevation1 && hrtfElevation2);
+    if (!hrtfElevation1 || !hrtfElevation2)
+        return nullptr;
+        
+    ASSERT(x >= 0.0 && x < 1.0);
+    
+    OwnPtr<HRTFKernelList> kernelListL = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));
+    OwnPtr<HRTFKernelList> kernelListR = adoptPtr(new HRTFKernelList(NumberOfTotalAzimuths));
+
+    HRTFKernelList* kernelListL1 = hrtfElevation1->kernelListL();
+    HRTFKernelList* kernelListR1 = hrtfElevation1->kernelListR();
+    HRTFKernelList* kernelListL2 = hrtfElevation2->kernelListL();
+    HRTFKernelList* kernelListR2 = hrtfElevation2->kernelListR();
+    
+    // Interpolate kernels of corresponding azimuths of the two elevations.
+    for (unsigned i = 0; i < NumberOfTotalAzimuths; ++i) {
+        (*kernelListL)[i] = HRTFKernel::createInterpolatedKernel(kernelListL1->at(i).get(), kernelListL2->at(i).get(), x);
+        (*kernelListR)[i] = HRTFKernel::createInterpolatedKernel(kernelListR1->at(i).get(), kernelListR2->at(i).get(), x);
+    }
+
+    // Interpolate elevation angle.
+    double angle = (1.0 - x) * hrtfElevation1->elevationAngle() + x * hrtfElevation2->elevationAngle();
+    
+    OwnPtr<HRTFElevation> hrtfElevation = adoptPtr(new HRTFElevation(kernelListL.release(), kernelListR.release(), static_cast<int>(angle), sampleRate));
+    return hrtfElevation.release();  
+}
+
+void HRTFElevation::getKernelsFromAzimuth(double azimuthBlend, unsigned azimuthIndex, HRTFKernel* &kernelL, HRTFKernel* &kernelR, double& frameDelayL, double& frameDelayR)
+{
+    bool checkAzimuthBlend = azimuthBlend >= 0.0 && azimuthBlend < 1.0;
+    ASSERT(checkAzimuthBlend);
+    if (!checkAzimuthBlend)
+        azimuthBlend = 0.0;
+    
+    unsigned numKernels = m_kernelListL->size();
+
+    bool isIndexGood = azimuthIndex < numKernels;
+    ASSERT(isIndexGood);
+    if (!isIndexGood) {
+        kernelL = 0;
+        kernelR = 0;
+        return;
+    }
+    
+    // Return the left and right kernels.
+    kernelL = m_kernelListL->at(azimuthIndex).get();
+    kernelR = m_kernelListR->at(azimuthIndex).get();
+
+    frameDelayL = m_kernelListL->at(azimuthIndex)->frameDelay();
+    frameDelayR = m_kernelListR->at(azimuthIndex)->frameDelay();
+
+    int azimuthIndex2 = (azimuthIndex + 1) % numKernels;
+    double frameDelay2L = m_kernelListL->at(azimuthIndex2)->frameDelay();
+    double frameDelay2R = m_kernelListR->at(azimuthIndex2)->frameDelay();
+
+    // Linearly interpolate delays.
+    frameDelayL = (1.0 - azimuthBlend) * frameDelayL + azimuthBlend * frameDelay2L;
+    frameDelayR = (1.0 - azimuthBlend) * frameDelayR + azimuthBlend * frameDelay2R;
+}
+
+void HRTFElevation::reportMemoryUsage(MemoryObjectInfo* memoryObjectInfo) const
+{
+    MemoryClassInfo info(memoryObjectInfo, this, PlatformMemoryTypes::AudioSharedData);
+    info.addMember(m_kernelListL, "kernelListL");
+    info.addMember(m_kernelListR, "kernelListR");
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(WEB_AUDIO)
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/blink/HRTFElevation.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HRTFElevation_h
+#define HRTFElevation_h
+
+#include "core/platform/audio/HRTFKernel.h"
+#include <wtf/Noncopyable.h>
+#include <wtf/OwnPtr.h>
+#include <wtf/PassOwnPtr.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefCounted.h>
+#include <wtf/RefPtr.h>
+#include <wtf/text/CString.h>
+#include <wtf/text/WTFString.h>
+
+namespace WebCore {
+
+// HRTFElevation contains all of the HRTFKernels (one left ear and one right ear per azimuth angle) for a particular elevation.
+
+class HRTFElevation {
+    WTF_MAKE_NONCOPYABLE(HRTFElevation);
+public:
+    // Loads and returns an HRTFElevation with the given HRTF database subject name and elevation from browser (or WebKit.framework) resources.
+    // Normally, there will only be a single HRTF database set, but this API supports the possibility of multiple ones with different names.
+    // Interpolated azimuths will be generated based on InterpolationFactor.
+    // Valid values for elevation are -45 -> +90 in 15 degree increments.
+    static PassOwnPtr<HRTFElevation> createForSubject(const String& subjectName, int elevation, float sampleRate);
+
+    // Given two HRTFElevations, and an interpolation factor x: 0 -> 1, returns an interpolated HRTFElevation.
+    static PassOwnPtr<HRTFElevation> createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate);
+
+    // Returns the list of left or right ear HRTFKernels for all the azimuths going from 0 to 360 degrees.
+    HRTFKernelList* kernelListL() { return m_kernelListL.get(); }
+    HRTFKernelList* kernelListR() { return m_kernelListR.get(); }
+
+    double elevationAngle() const { return m_elevationAngle; }
+    unsigned numberOfAzimuths() const { return NumberOfTotalAzimuths; }
+    float sampleRate() const { return m_sampleRate; }
+    
+    // Returns the left and right kernels for the given azimuth index.
+    // The interpolated delays based on azimuthBlend: 0 -> 1 are returned in frameDelayL and frameDelayR.
+    void getKernelsFromAzimuth(double azimuthBlend, unsigned azimuthIndex, HRTFKernel* &kernelL, HRTFKernel* &kernelR, double& frameDelayL, double& frameDelayR);
+    
+    // Spacing, in degrees, between every azimuth loaded from resource.
+    static const unsigned AzimuthSpacing;
+    
+    // Number of azimuths loaded from resource.
+    static const unsigned NumberOfRawAzimuths;
+
+    // Interpolates by this factor to get the total number of azimuths from every azimuth loaded from resource.
+    static const unsigned InterpolationFactor;
+    
+    // Total number of azimuths after interpolation.
+    static const unsigned NumberOfTotalAzimuths;
+
+    // Given a specific azimuth and elevation angle, returns the left and right HRTFKernel.
+    // Valid values for azimuth are 0 -> 345 in 15 degree increments.
+    // Valid values for elevation are -45 -> +90 in 15 degree increments.
+    // Returns true on success.
+    static bool calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
+                                                    RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR);
+
+    // Given a specific azimuth and elevation angle, returns the left and right HRTFKernel in kernelL and kernelR.
+    // This method averages the measured response using symmetry of azimuth (for example by averaging the -30.0 and +30.0 azimuth responses).
+    // Returns true on success.
+    static bool calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
+                                                             RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR);
+
+    void reportMemoryUsage(MemoryObjectInfo*) const;
+
+private:
+    HRTFElevation(PassOwnPtr<HRTFKernelList> kernelListL, PassOwnPtr<HRTFKernelList> kernelListR, int elevation, float sampleRate)
+        : m_kernelListL(kernelListL)
+        , m_kernelListR(kernelListR)
+        , m_elevationAngle(elevation)
+        , m_sampleRate(sampleRate)
+    {
+    }
+
+    OwnPtr<HRTFKernelList> m_kernelListL;
+    OwnPtr<HRTFKernelList> m_kernelListR;
+    double m_elevationAngle;
+    float m_sampleRate;
+};
+
+} // namespace WebCore
+
+#endif // HRTFElevation_h
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/blink/HRTFKernel.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(WEB_AUDIO)
+
+#include "core/platform/audio/HRTFKernel.h"
+
+#include "core/platform/FloatConversion.h"
+#include "core/platform/PlatformMemoryInstrumentation.h"
+#include "core/platform/audio/AudioChannel.h"
+#include "core/platform/audio/FFTFrame.h"
+#include <wtf/MathExtras.h>
+
+using namespace std;
+
+namespace WebCore {
+
+// Takes the input AudioChannel as an input impulse response and calculates the average group delay.
+// This represents the initial delay before the most energetic part of the impulse response.
+// The sample-frame delay is removed from the impulseP impulse response, and this value  is returned.
+// the length of the passed in AudioChannel must be a power of 2.
+static float extractAverageGroupDelay(AudioChannel* channel, size_t analysisFFTSize)
+{
+    ASSERT(channel);
+        
+    float* impulseP = channel->mutableData();
+    
+    bool isSizeGood = channel->length() >= analysisFFTSize;
+    ASSERT(isSizeGood);
+    if (!isSizeGood)
+        return 0;
+    
+    // Check for power-of-2.
+    ASSERT(1UL << static_cast<unsigned>(log2(analysisFFTSize)) == analysisFFTSize);
+
+    FFTFrame estimationFrame(analysisFFTSize);
+    estimationFrame.doFFT(impulseP);
+
+    float frameDelay = narrowPrecisionToFloat(estimationFrame.extractAverageGroupDelay());
+    estimationFrame.doInverseFFT(impulseP);
+
+    return frameDelay;
+}
+
+HRTFKernel::HRTFKernel(AudioChannel* channel, size_t fftSize, float sampleRate)
+    : m_frameDelay(0)
+    , m_sampleRate(sampleRate)
+{
+    ASSERT(channel);
+
+    // Determine the leading delay (average group delay) for the response.
+    m_frameDelay = extractAverageGroupDelay(channel, fftSize / 2);
+
+    float* impulseResponse = channel->mutableData();
+    size_t responseLength = channel->length();
+
+    // We need to truncate to fit into 1/2 the FFT size (with zero padding) in order to do proper convolution.
+    size_t truncatedResponseLength = min(responseLength, fftSize / 2); // truncate if necessary to max impulse response length allowed by FFT
+
+    // Quick fade-out (apply window) at truncation point
+    unsigned numberOfFadeOutFrames = static_cast<unsigned>(sampleRate / 4410); // 10 sample-frames @44.1KHz sample-rate
+    ASSERT(numberOfFadeOutFrames < truncatedResponseLength);
+    if (numberOfFadeOutFrames < truncatedResponseLength) {
+        for (unsigned i = truncatedResponseLength - numberOfFadeOutFrames; i < truncatedResponseLength; ++i) {
+            float x = 1.0f - static_cast<float>(i - (truncatedResponseLength - numberOfFadeOutFrames)) / numberOfFadeOutFrames;
+            impulseResponse[i] *= x;
+        }
+    }
+
+    m_fftFrame = adoptPtr(new FFTFrame(fftSize));
+    m_fftFrame->doPaddedFFT(impulseResponse, truncatedResponseLength);
+}
+
+PassOwnPtr<AudioChannel> HRTFKernel::createImpulseResponse()
+{
+    OwnPtr<AudioChannel> channel = adoptPtr(new AudioChannel(fftSize()));
+    FFTFrame fftFrame(*m_fftFrame);
+
+    // Add leading delay back in.
+    fftFrame.addConstantGroupDelay(m_frameDelay);
+    fftFrame.doInverseFFT(channel->mutableData());
+
+    return channel.release();
+}
+
+// Interpolates two kernels with x: 0 -> 1 and returns the result.
+PassRefPtr<HRTFKernel> HRTFKernel::createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, float x)
+{
+    ASSERT(kernel1 && kernel2);
+    if (!kernel1 || !kernel2)
+        return 0;
+ 
+    ASSERT(x >= 0.0 && x < 1.0);
+    x = min(1.0f, max(0.0f, x));
+    
+    float sampleRate1 = kernel1->sampleRate();
+    float sampleRate2 = kernel2->sampleRate();
+    ASSERT(sampleRate1 == sampleRate2);
+    if (sampleRate1 != sampleRate2)
+        return 0;
+    
+    float frameDelay = (1 - x) * kernel1->frameDelay() + x * kernel2->frameDelay();
+    
+    OwnPtr<FFTFrame> interpolatedFrame = FFTFrame::createInterpolatedFrame(*kernel1->fftFrame(), *kernel2->fftFrame(), x);
+    return HRTFKernel::create(interpolatedFrame.release(), frameDelay, sampleRate1);
+}
+
+void HRTFKernel::reportMemoryUsage(MemoryObjectInfo* memoryObjectInfo) const
+{
+    MemoryClassInfo info(memoryObjectInfo, this, PlatformMemoryTypes::AudioSharedData);
+    info.addMember(m_fftFrame, "fftFrame");
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(WEB_AUDIO)
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/blink/HRTFKernel.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2010 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1.  Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in the
+ *     documentation and/or other materials provided with the distribution.
+ * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ *     its contributors may be used to endorse or promote products derived
+ *     from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HRTFKernel_h
+#define HRTFKernel_h
+
+#include "core/platform/audio/FFTFrame.h"
+#include <wtf/OwnPtr.h>
+#include <wtf/PassOwnPtr.h>
+#include <wtf/PassRefPtr.h>
+#include <wtf/RefCounted.h>
+#include <wtf/RefPtr.h>
+#include <wtf/Vector.h>
+
+namespace WebCore {
+
+class AudioChannel;
+    
+// HRTF stands for Head-Related Transfer Function.
+// HRTFKernel is a frequency-domain representation of an impulse-response used as part of the spatialized panning system.
+// For a given azimuth / elevation angle there will be one HRTFKernel for the left ear transfer function, and one for the right ear.
+// The leading delay (average group delay) for each impulse response is extracted:
+//      m_fftFrame is the frequency-domain representation of the impulse response with the delay removed
+//      m_frameDelay is the leading delay of the original impulse response.
+class HRTFKernel : public RefCounted<HRTFKernel> {
+public:
+    // Note: this is destructive on the passed in AudioChannel.
+    // The length of channel must be a power of two.
+    static PassRefPtr<HRTFKernel> create(AudioChannel* channel, size_t fftSize, float sampleRate)
+    {
+        return adoptRef(new HRTFKernel(channel, fftSize, sampleRate));
+    }
+
+    static PassRefPtr<HRTFKernel> create(PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
+    {
+        return adoptRef(new HRTFKernel(fftFrame, frameDelay, sampleRate));
+    }
+
+    // Given two HRTFKernels, and an interpolation factor x: 0 -> 1, returns an interpolated HRTFKernel.
+    static PassRefPtr<HRTFKernel> createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, float x);
+  
+    FFTFrame* fftFrame() { return m_fftFrame.get(); }
+    
+    size_t fftSize() const { return m_fftFrame->fftSize(); }
+    float frameDelay() const { return m_frameDelay; }
+
+    float sampleRate() const { return m_sampleRate; }
+    double nyquist() const { return 0.5 * sampleRate(); }
+
+    // Converts back into impulse-response form.
+    PassOwnPtr<AudioChannel> createImpulseResponse();
+
+    void reportMemoryUsage(MemoryObjectInfo*) const;
+
+private:
+    // Note: this is destructive on the passed in AudioChannel.
+    HRTFKernel(AudioChannel*, size_t fftSize, float sampleRate);
+    
+    HRTFKernel(PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
+        : m_fftFrame(fftFrame)
+        , m_frameDelay(frameDelay)
+        , m_sampleRate(sampleRate)
+    {
+    }
+    
+    OwnPtr<FFTFrame> m_fftFrame;
+    float m_frameDelay;
+    float m_sampleRate;
+};
+
+typedef Vector<RefPtr<HRTFKernel> > HRTFKernelList;
+
+} // namespace WebCore
+
+#endif // HRTFKernel_h
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/blink/HRTFPanner.cpp
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1.  Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(WEB_AUDIO)
+
+#include "core/platform/audio/HRTFPanner.h"
+
+#include <algorithm>
+#include "core/platform/audio/AudioBus.h"
+#include "core/platform/audio/FFTConvolver.h"
+#include "core/platform/audio/HRTFDatabase.h"
+#include <wtf/MathExtras.h>
+#include <wtf/RefPtr.h>
+
+using namespace std;
+
+namespace WebCore {
+
+// The value of 2 milliseconds is larger than the largest delay which exists in any HRTFKernel from the default HRTFDatabase (0.0136 seconds).
+// We ASSERT the delay values used in process() with this value.
+const double MaxDelayTimeSeconds = 0.002;
+
+const int UninitializedAzimuth = -1;
+const unsigned RenderingQuantum = 128;
+
+HRTFPanner::HRTFPanner(float sampleRate, HRTFDatabaseLoader* databaseLoader)
+    : Panner(PanningModelHRTF)
+    , m_databaseLoader(databaseLoader)
+    , m_sampleRate(sampleRate)
+    , m_crossfadeSelection(CrossfadeSelection1)
+    , m_azimuthIndex1(UninitializedAzimuth)
+    , m_elevation1(0)
+    , m_azimuthIndex2(UninitializedAzimuth)
+    , m_elevation2(0)
+    , m_crossfadeX(0)
+    , m_crossfadeIncr(0)
+    , m_convolverL1(fftSizeForSampleRate(sampleRate))
+    , m_convolverR1(fftSizeForSampleRate(sampleRate))
+    , m_convolverL2(fftSizeForSampleRate(sampleRate))
+    , m_convolverR2(fftSizeForSampleRate(sampleRate))
+    , m_delayLineL(MaxDelayTimeSeconds, sampleRate)
+    , m_delayLineR(MaxDelayTimeSeconds, sampleRate)
+    , m_tempL1(RenderingQuantum)
+    , m_tempR1(RenderingQuantum)
+    , m_tempL2(RenderingQuantum)
+    , m_tempR2(RenderingQuantum)
+{
+    ASSERT(databaseLoader);
+}
+
+HRTFPanner::~HRTFPanner()
+{
+}
+
+size_t HRTFPanner::fftSizeForSampleRate(float sampleRate)
+{
+    // The HRTF impulse responses (loaded as audio resources) are 512 sample-frames @44.1KHz.
+    // Currently, we truncate the impulse responses to half this size, but an FFT-size of twice impulse response size is needed (for convolution).
+    // So for sample rates around 44.1KHz an FFT size of 512 is good. We double the FFT-size only for sample rates at least double this.
+    ASSERT(sampleRate >= 44100 && sampleRate <= 96000.0);
+    return (sampleRate < 88200.0) ? 512 : 1024;
+}
+
+void HRTFPanner::reset()
+{
+    m_convolverL1.reset();
+    m_convolverR1.reset();
+    m_convolverL2.reset();
+    m_convolverR2.reset();
+    m_delayLineL.reset();
+    m_delayLineR.reset();
+}
+
+int HRTFPanner::calculateDesiredAzimuthIndexAndBlend(double azimuth, double& azimuthBlend)
+{
+    // Convert the azimuth angle from the range -180 -> +180 into the range 0 -> 360.
+    // The azimuth index may then be calculated from this positive value.
+    if (azimuth < 0)
+        azimuth += 360.0;
+
+    HRTFDatabase* database = m_databaseLoader->database();
+    ASSERT(database);
+
+    int numberOfAzimuths = database->numberOfAzimuths();
+    const double angleBetweenAzimuths = 360.0 / numberOfAzimuths;
+
+    // Calculate the azimuth index and the blend (0 -> 1) for interpolation.
+    double desiredAzimuthIndexFloat = azimuth / angleBetweenAzimuths;
+    int desiredAzimuthIndex = static_cast<int>(desiredAzimuthIndexFloat);
+    azimuthBlend = desiredAzimuthIndexFloat - static_cast<double>(desiredAzimuthIndex);
+
+    // We don't immediately start using this azimuth index, but instead approach this index from the last index we rendered at.
+    // This minimizes the clicks and graininess for moving sources which occur otherwise.
+    desiredAzimuthIndex = max(0, desiredAzimuthIndex);
+    desiredAzimuthIndex = min(numberOfAzimuths - 1, desiredAzimuthIndex);
+    return desiredAzimuthIndex;
+}
+
+void HRTFPanner::pan(double desiredAzimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess)
+{
+    unsigned numInputChannels = inputBus ? inputBus->numberOfChannels() : 0;
+
+    bool isInputGood = inputBus &&  numInputChannels >= 1 && numInputChannels <= 2;
+    ASSERT(isInputGood);
+
+    bool isOutputGood = outputBus && outputBus->numberOfChannels() == 2 && framesToProcess <= outputBus->length();
+    ASSERT(isOutputGood);
+
+    if (!isInputGood || !isOutputGood) {
+        if (outputBus)
+            outputBus->zero();
+        return;
+    }
+
+    HRTFDatabase* database = m_databaseLoader->database();
+    ASSERT(database);
+    if (!database) {
+        outputBus->zero();
+        return;
+    }
+
+    // IRCAM HRTF azimuths values from the loaded database is reversed from the panner's notion of azimuth.
+    double azimuth = -desiredAzimuth;
+
+    bool isAzimuthGood = azimuth >= -180.0 && azimuth <= 180.0;
+    ASSERT(isAzimuthGood);
+    if (!isAzimuthGood) {
+        outputBus->zero();
+        return;
+    }
+
+    // Normally, we'll just be dealing with mono sources.
+    // If we have a stereo input, implement stereo panning with left source processed by left HRTF, and right source by right HRTF.
+    const AudioChannel* inputChannelL = inputBus->channelByType(AudioBus::ChannelLeft);
+    const AudioChannel* inputChannelR = numInputChannels > 1 ? inputBus->channelByType(AudioBus::ChannelRight) : 0;
+
+    // Get source and destination pointers.
+    const float* sourceL = inputChannelL->data();
+    const float* sourceR = numInputChannels > 1 ? inputChannelR->data() : sourceL;
+    float* destinationL = outputBus->channelByType(AudioBus::ChannelLeft)->mutableData();
+    float* destinationR = outputBus->channelByType(AudioBus::ChannelRight)->mutableData();
+
+    double azimuthBlend;
+    int desiredAzimuthIndex = calculateDesiredAzimuthIndexAndBlend(azimuth, azimuthBlend);
+
+    // Initially snap azimuth and elevation values to first values encountered.
+    if (m_azimuthIndex1 == UninitializedAzimuth) {
+        m_azimuthIndex1 = desiredAzimuthIndex;
+        m_elevation1 = elevation;
+    }
+    if (m_azimuthIndex2 == UninitializedAzimuth) {
+        m_azimuthIndex2 = desiredAzimuthIndex;
+        m_elevation2 = elevation;
+    }
+
+    // Cross-fade / transition over a period of around 45 milliseconds.
+    // This is an empirical value tuned to be a reasonable trade-off between
+    // smoothness and speed.
+    const double fadeFrames = sampleRate() <= 48000 ? 2048 : 4096;
+
+    // Check for azimuth and elevation changes, initiating a cross-fade if needed.
+    if (!m_crossfadeX && m_crossfadeSelection == CrossfadeSelection1) {
+        if (desiredAzimuthIndex != m_azimuthIndex1 || elevation != m_elevation1) {
+            // Cross-fade from 1 -> 2
+            m_crossfadeIncr = 1 / fadeFrames;
+            m_azimuthIndex2 = desiredAzimuthIndex;
+            m_elevation2 = elevation;
+        }
+    }
+    if (m_crossfadeX == 1 && m_crossfadeSelection == CrossfadeSelection2) {
+        if (desiredAzimuthIndex != m_azimuthIndex2 || elevation != m_elevation2) {
+            // Cross-fade from 2 -> 1
+            m_crossfadeIncr = -1 / fadeFrames;
+            m_azimuthIndex1 = desiredAzimuthIndex;
+            m_elevation1 = elevation;
+        }
+    }
+
+    // This algorithm currently requires that we process in power-of-two size chunks at least RenderingQuantum.
+    ASSERT(1UL << static_cast<int>(log2(framesToProcess)) == framesToProcess);
+    ASSERT(framesToProcess >= RenderingQuantum);
+
+    const unsigned framesPerSegment = RenderingQuantum;
+    const unsigned numberOfSegments = framesToProcess / framesPerSegment;
+
+    for (unsigned segment = 0; segment < numberOfSegments; ++segment) {
+        // Get the HRTFKernels and interpolated delays.
+        HRTFKernel* kernelL1;
+        HRTFKernel* kernelR1;
+        HRTFKernel* kernelL2;
+        HRTFKernel* kernelR2;
+        double frameDelayL1;
+        double frameDelayR1;
+        double frameDelayL2;
+        double frameDelayR2;
+        database->getKernelsFromAzimuthElevation(azimuthBlend, m_azimuthIndex1, m_elevation1, kernelL1, kernelR1, frameDelayL1, frameDelayR1);
+        database->getKernelsFromAzimuthElevation(azimuthBlend, m_azimuthIndex2, m_elevation2, kernelL2, kernelR2, frameDelayL2, frameDelayR2);
+
+        bool areKernelsGood = kernelL1 && kernelR1 && kernelL2 && kernelR2;
+        ASSERT(areKernelsGood);
+        if (!areKernelsGood) {
+            outputBus->zero();
+            return;
+        }
+
+        ASSERT(frameDelayL1 / sampleRate() < MaxDelayTimeSeconds && frameDelayR1 / sampleRate() < MaxDelayTimeSeconds);
+        ASSERT(frameDelayL2 / sampleRate() < MaxDelayTimeSeconds && frameDelayR2 / sampleRate() < MaxDelayTimeSeconds);
+
+        // Crossfade inter-aural delays based on transitions.
+        double frameDelayL = (1 - m_crossfadeX) * frameDelayL1 + m_crossfadeX * frameDelayL2;
+        double frameDelayR = (1 - m_crossfadeX) * frameDelayR1 + m_crossfadeX * frameDelayR2;
+
+        // Calculate the source and destination pointers for the current segment.
+        unsigned offset = segment * framesPerSegment;
+        const float* segmentSourceL = sourceL + offset;
+        const float* segmentSourceR = sourceR + offset;
+        float* segmentDestinationL = destinationL + offset;
+        float* segmentDestinationR = destinationR + offset;
+
+        // First run through delay lines for inter-aural time difference.
+        m_delayLineL.setDelayFrames(frameDelayL);
+        m_delayLineR.setDelayFrames(frameDelayR);
+        m_delayLineL.process(segmentSourceL, segmentDestinationL, framesPerSegment);
+        m_delayLineR.process(segmentSourceR, segmentDestinationR, framesPerSegment);
+
+        bool needsCrossfading = m_crossfadeIncr;
+        
+        // Have the convolvers render directly to the final destination if we're not cross-fading.
+        float* convolutionDestinationL1 = needsCrossfading ? m_tempL1.data() : segmentDestinationL;
+        float* convolutionDestinationR1 = needsCrossfading ? m_tempR1.data() : segmentDestinationR;
+        float* convolutionDestinationL2 = needsCrossfading ? m_tempL2.data() : segmentDestinationL;
+        float* convolutionDestinationR2 = needsCrossfading ? m_tempR2.data() : segmentDestinationR;
+
+        // Now do the convolutions.
+        // Note that we avoid doing convolutions on both sets of convolvers if we're not currently cross-fading.
+        
+        if (m_crossfadeSelection == CrossfadeSelection1 || needsCrossfading) {
+            m_convolverL1.process(kernelL1->fftFrame(), segmentDestinationL, convolutionDestinationL1, framesPerSegment);
+            m_convolverR1.process(kernelR1->fftFrame(), segmentDestinationR, convolutionDestinationR1, framesPerSegment);
+        }
+
+        if (m_crossfadeSelection == CrossfadeSelection2 || needsCrossfading) {
+            m_convolverL2.process(kernelL2->fftFrame(), segmentDestinationL, convolutionDestinationL2, framesPerSegment);
+            m_convolverR2.process(kernelR2->fftFrame(), segmentDestinationR, convolutionDestinationR2, framesPerSegment);
+        }
+        
+        if (needsCrossfading) {
+            // Apply linear cross-fade.
+            float x = m_crossfadeX;
+            float incr = m_crossfadeIncr;
+            for (unsigned i = 0; i < framesPerSegment; ++i) {
+                segmentDestinationL[i] = (1 - x) * convolutionDestinationL1[i] + x * convolutionDestinationL2[i];
+                segmentDestinationR[i] = (1 - x) * convolutionDestinationR1[i] + x * convolutionDestinationR2[i];
+                x += incr;
+            }
+            // Update cross-fade value from local.
+            m_crossfadeX = x;
+
+            if (m_crossfadeIncr > 0 && fabs(m_crossfadeX - 1) < m_crossfadeIncr) {
+                // We've fully made the crossfade transition from 1 -> 2.
+                m_crossfadeSelection = CrossfadeSelection2;
+                m_crossfadeX = 1;
+                m_crossfadeIncr = 0;
+            } else if (m_crossfadeIncr < 0 && fabs(m_crossfadeX) < -m_crossfadeIncr) {
+                // We've fully made the crossfade transition from 2 -> 1.
+                m_crossfadeSelection = CrossfadeSelection1;
+                m_crossfadeX = 0;
+                m_crossfadeIncr = 0;
+            }
+        }
+    }
+}
+
+double HRTFPanner::tailTime() const
+{
+    // Because HRTFPanner is implemented with a DelayKernel and a FFTConvolver, the tailTime of the HRTFPanner
+    // is the sum of the tailTime of the DelayKernel and the tailTime of the FFTConvolver, which is MaxDelayTimeSeconds
+    // and fftSize() / 2, respectively.
+    return MaxDelayTimeSeconds + (fftSize() / 2) / static_cast<double>(sampleRate());
+}
+
+double HRTFPanner::latencyTime() const
+{
+    // The latency of a FFTConvolver is also fftSize() / 2, and is in addition to its tailTime of the
+    // same value.
+    return (fftSize() / 2) / static_cast<double>(sampleRate());
+}
+
+} // namespace WebCore
+
+#endif // ENABLE(WEB_AUDIO)
new file mode 100644
--- /dev/null
+++ b/content/media/webaudio/blink/HRTFPanner.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2010, Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1.  Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2.  Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HRTFPanner_h
+#define HRTFPanner_h
+
+#include "core/platform/audio/FFTConvolver.h"
+#include "core/platform/audio/HRTFDatabaseLoader.h"
+#include "core/platform/audio/Panner.h"
+#include "modules/webaudio/DelayDSPKernel.h"
+
+namespace WebCore {
+
+class HRTFPanner : public Panner {
+public:
+    HRTFPanner(float sampleRate, HRTFDatabaseLoader*);
+    virtual ~HRTFPanner();
+
+    // Panner
+    virtual void pan(double azimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess);
+    virtual void reset();
+
+    size_t fftSize() const { return fftSizeForSampleRate(m_sampleRate); }
+    static size_t fftSizeForSampleRate(float sampleRate);
+
+    float sampleRate() const { return m_sampleRate; }
+
+    virtual double tailTime() const OVERRIDE;
+    virtual double latencyTime() const OVERRIDE;
+
+private:
+    // Given an azimuth angle in the range -180 -> +180, returns the corresponding azimuth index for the database,
+    // and azimuthBlend which is an interpolation value from 0 -> 1.
+    int calculateDesiredAzimuthIndexAndBlend(double azimuth, double& azimuthBlend);
+
+    RefPtr<HRTFDatabaseLoader> m_databaseLoader;
+
+    float m_sampleRate;
+
+    // We maintain two sets of convolvers for smooth cross-faded interpolations when
+    // then azimuth and elevation are dynamically changing.
+    // When the azimuth and elevation are not changing, we simply process with one of the two sets.
+    // Initially we use CrossfadeSelection1 corresponding to m_convolverL1 and m_convolverR1.
+    // Whenever the azimuth or elevation changes, a crossfade is initiated to transition
+    // to the new position. So if we're currently processing with CrossfadeSelection1, then
+    // we transition to CrossfadeSelection2 (and vice versa).
+    // If we're in the middle of a transition, then we wait until it is complete before
+    // initiating a new transition.
+
+    // Selects either the convolver set (m_convolverL1, m_convolverR1) or (m_convolverL2, m_convolverR2).
+    enum CrossfadeSelection {
+        CrossfadeSelection1,
+        CrossfadeSelection2
+    };
+
+    CrossfadeSelection m_crossfadeSelection;
+
+    // azimuth/elevation for CrossfadeSelection1.
+    int m_azimuthIndex1;
+    double m_elevation1;
+
+    // azimuth/elevation for CrossfadeSelection2.
+    int m_azimuthIndex2;
+    double m_elevation2;
+
+    // A crossfade value 0 <= m_crossfadeX <= 1.
+    float m_crossfadeX;
+
+    // Per-sample-frame crossfade value increment.
+    float m_crossfadeIncr;
+
+    FFTConvolver m_convolverL1;
+    FFTConvolver m_convolverR1;
+    FFTConvolver m_convolverL2;
+    FFTConvolver m_convolverR2;
+
+    DelayDSPKernel m_delayLineL;
+    DelayDSPKernel m_delayLineR;
+
+    AudioFloatArray m_tempL1;
+    AudioFloatArray m_tempR1;
+    AudioFloatArray m_tempL2;
+    AudioFloatArray m_tempR2;
+};
+
+} // namespace WebCore
+
+#endif // HRTFPanner_h