b=815643 remove unused HRTF code r=ehsan
authorKarl Tomlinson <karlt+@karlt.net>
Thu, 08 Aug 2013 21:37:36 +1200
changeset 142352 5ddd533fc54c
parent 142351 1e796e343ef8
child 142353 07e1d8379a30
push id32374
push userktomlinson@mozilla.com
push dateTue, 13 Aug 2013 02:49:14 +0000
treeherdermozilla-inbound@62ad090a94a4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersehsan
bugs815643
milestone26.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
b=815643 remove unused HRTF code r=ehsan
content/media/webaudio/blink/HRTFElevation.cpp
content/media/webaudio/blink/HRTFElevation.h
content/media/webaudio/blink/HRTFKernel.cpp
content/media/webaudio/blink/HRTFKernel.h
--- a/content/media/webaudio/blink/HRTFElevation.cpp
+++ b/content/media/webaudio/blink/HRTFElevation.cpp
@@ -44,87 +44,19 @@ using namespace std;
  
 namespace WebCore {
 
 const unsigned HRTFElevation::AzimuthSpacing = 15;
 const unsigned HRTFElevation::NumberOfRawAzimuths = 360 / AzimuthSpacing;
 const unsigned HRTFElevation::InterpolationFactor = 8;
 const unsigned HRTFElevation::NumberOfTotalAzimuths = NumberOfRawAzimuths * InterpolationFactor;
 
-// Total number of components of an HRTF database.
-const size_t TotalNumberOfResponses = 240;
-
 // Number of frames in an individual impulse response.
 const size_t ResponseFrameSize = 256;
 
-// Sample-rate of the spatialization impulse responses as stored in the resource file.
-// The impulse responses may be resampled to a different sample-rate (depending on the audio hardware) when they are loaded.
-const float ResponseSampleRate = 44100;
-
-#if USE(CONCATENATED_IMPULSE_RESPONSES)
-// Lazily load a concatenated HRTF database for given subject and store it in a
-// local hash table to ensure quick efficient future retrievals.
-static PassRefPtr<AudioBus> getConcatenatedImpulseResponsesForSubject(const String& subjectName)
-{
-    typedef HashMap<String, RefPtr<AudioBus> > AudioBusMap;
-    DEFINE_STATIC_LOCAL(AudioBusMap, audioBusMap, ());
-
-    RefPtr<AudioBus> bus;
-    AudioBusMap::iterator iterator = audioBusMap.find(subjectName);
-    if (iterator == audioBusMap.end()) {
-        RefPtr<AudioBus> concatenatedImpulseResponses(AudioBus::loadPlatformResource(subjectName.utf8().data(), ResponseSampleRate));
-        ASSERT(concatenatedImpulseResponses);
-        if (!concatenatedImpulseResponses)
-            return 0;
-
-        bus = concatenatedImpulseResponses;
-        audioBusMap.set(subjectName, bus);
-    } else
-        bus = iterator->value;
-
-    size_t responseLength = bus->length();
-    size_t expectedLength = static_cast<size_t>(TotalNumberOfResponses * ResponseFrameSize);
-
-    // Check number of channels and length. For now these are fixed and known.
-    bool isBusGood = responseLength == expectedLength && bus->numberOfChannels() == 2;
-    ASSERT(isBusGood);
-    if (!isBusGood)
-        return 0;
-
-    return bus;
-}
-#endif
-
-// Takes advantage of the symmetry and creates a composite version of the two measured versions.  For example, we have both azimuth 30 and -30 degrees
-// where the roles of left and right ears are reversed with respect to each other.
-bool HRTFElevation::calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
-                                                                 RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
-{
-    RefPtr<HRTFKernel> kernelL1;
-    RefPtr<HRTFKernel> kernelR1;
-    bool success = calculateKernelsForAzimuthElevation(azimuth, elevation, sampleRate, subjectName, kernelL1, kernelR1);
-    if (!success)
-        return false;
-        
-    // And symmetric version
-    int symmetricAzimuth = !azimuth ? 0 : 360 - azimuth;
-                                                              
-    RefPtr<HRTFKernel> kernelL2;
-    RefPtr<HRTFKernel> kernelR2;
-    success = calculateKernelsForAzimuthElevation(symmetricAzimuth, elevation, sampleRate, subjectName, kernelL2, kernelR2);
-    if (!success)
-        return false;
-        
-    // Notice L/R reversal in symmetric version.
-    kernelL = HRTFKernel::createInterpolatedKernel(kernelL1.get(), kernelR2.get(), 0.5f);
-    kernelR = HRTFKernel::createInterpolatedKernel(kernelR1.get(), kernelL2.get(), 0.5f);
-    
-    return true;
-}
-
 bool HRTFElevation::calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
                                                         RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR)
 {
     // Valid values for azimuth are 0 -> 345 in 15 degree increments.
     // Valid values for elevation are -45 -> +90 in 15 degree increments.
 
     bool isAzimuthGood = azimuth >= 0 && azimuth <= 345 && (azimuth / 15) * 15 == azimuth;
     ASSERT(isAzimuthGood);
@@ -137,46 +69,16 @@ bool HRTFElevation::calculateKernelsForA
         return false;
     
     // Construct the resource name from the subject name, azimuth, and elevation, for example:
     // "IRC_Composite_C_R0195_T015_P000"
     // Note: the passed in subjectName is not a string passed in via JavaScript or the web.
     // It's passed in as an internal ASCII identifier and is an implementation detail.
     int positiveElevation = elevation < 0 ? elevation + 360 : elevation;
 
-#if USE(CONCATENATED_IMPULSE_RESPONSES)
-    RefPtr<AudioBus> bus(getConcatenatedImpulseResponsesForSubject(subjectName));
-
-    if (!bus)
-        return false;
-
-    int elevationIndex = positiveElevation / AzimuthSpacing;
-    if (positiveElevation > 90)
-        elevationIndex -= AzimuthSpacing;
-
-    // The concatenated impulse response is a bus containing all
-    // the elevations per azimuth, for all azimuths by increasing
-    // order. So for a given azimuth and elevation we need to compute
-    // the index of the wanted audio frames in the concatenated table.
-    unsigned index = ((azimuth / AzimuthSpacing) * HRTFDatabase::NumberOfRawElevations) + elevationIndex;
-    bool isIndexGood = index < TotalNumberOfResponses;
-    ASSERT(isIndexGood);
-    if (!isIndexGood)
-        return false;
-
-    // Extract the individual impulse response from the concatenated
-    // responses and potentially sample-rate convert it to the desired
-    // (hardware) sample-rate.
-    unsigned startFrame = index * ResponseFrameSize;
-    unsigned stopFrame = startFrame + ResponseFrameSize;
-    RefPtr<AudioBus> preSampleRateConvertedResponse(AudioBus::createBufferFromRange(bus.get(), startFrame, stopFrame));
-    RefPtr<AudioBus> response(AudioBus::createBySampleRateConverting(preSampleRateConvertedResponse.get(), false, sampleRate));
-    AudioChannel* leftEarImpulseResponse = response->channel(AudioBus::ChannelLeft);
-    AudioChannel* rightEarImpulseResponse = response->channel(AudioBus::ChannelRight);
-#else
     String resourceName = String::format("IRC_%s_C_R0195_T%03d_P%03d", subjectName.utf8().data(), azimuth, positiveElevation);
 
     RefPtr<AudioBus> impulseResponse(AudioBus::loadPlatformResource(resourceName.utf8().data(), sampleRate));
 
     ASSERT(impulseResponse.get());
     if (!impulseResponse.get())
         return false;
     
@@ -186,17 +88,16 @@ bool HRTFElevation::calculateKernelsForA
     // Check number of channels and length.  For now these are fixed and known.
     bool isBusGood = responseLength == expectedLength && impulseResponse->numberOfChannels() == 2;
     ASSERT(isBusGood);
     if (!isBusGood)
         return false;
     
     AudioChannel* leftEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelLeft);
     AudioChannel* rightEarImpulseResponse = impulseResponse->channelByType(AudioBus::ChannelRight);
-#endif
 
     // Note that depending on the fftSize returned by the panner, we may be truncating the impulse response we just loaded in.
     const size_t fftSize = HRTFPanner::fftSizeForSampleRate(sampleRate);
     kernelL = HRTFKernel::create(leftEarImpulseResponse, fftSize, sampleRate);
     kernelR = HRTFKernel::create(rightEarImpulseResponse, fftSize, sampleRate);
     
     return true;
 }
--- a/content/media/webaudio/blink/HRTFElevation.h
+++ b/content/media/webaudio/blink/HRTFElevation.h
@@ -81,22 +81,16 @@ public:
 
     // Given a specific azimuth and elevation angle, returns the left and right HRTFKernel.
     // Valid values for azimuth are 0 -> 345 in 15 degree increments.
     // Valid values for elevation are -45 -> +90 in 15 degree increments.
     // Returns true on success.
     static bool calculateKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
                                                     RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR);
 
-    // Given a specific azimuth and elevation angle, returns the left and right HRTFKernel in kernelL and kernelR.
-    // This method averages the measured response using symmetry of azimuth (for example by averaging the -30.0 and +30.0 azimuth responses).
-    // Returns true on success.
-    static bool calculateSymmetricKernelsForAzimuthElevation(int azimuth, int elevation, float sampleRate, const String& subjectName,
-                                                             RefPtr<HRTFKernel>& kernelL, RefPtr<HRTFKernel>& kernelR);
-
     void reportMemoryUsage(MemoryObjectInfo*) const;
 
 private:
     HRTFElevation(PassOwnPtr<HRTFKernelList> kernelListL, PassOwnPtr<HRTFKernelList> kernelListR, int elevation, float sampleRate)
         : m_kernelListL(kernelListL)
         , m_kernelListR(kernelListR)
         , m_elevationAngle(elevation)
         , m_sampleRate(sampleRate)
--- a/content/media/webaudio/blink/HRTFKernel.cpp
+++ b/content/media/webaudio/blink/HRTFKernel.cpp
@@ -93,28 +93,16 @@ HRTFKernel::HRTFKernel(AudioChannel* cha
             impulseResponse[i] *= x;
         }
     }
 
     m_fftFrame = adoptPtr(new FFTFrame(fftSize));
     m_fftFrame->doPaddedFFT(impulseResponse, truncatedResponseLength);
 }
 
-PassOwnPtr<AudioChannel> HRTFKernel::createImpulseResponse()
-{
-    OwnPtr<AudioChannel> channel = adoptPtr(new AudioChannel(fftSize()));
-    FFTFrame fftFrame(*m_fftFrame);
-
-    // Add leading delay back in.
-    fftFrame.addConstantGroupDelay(m_frameDelay);
-    fftFrame.doInverseFFT(channel->mutableData());
-
-    return channel.release();
-}
-
 // Interpolates two kernels with x: 0 -> 1 and returns the result.
 PassRefPtr<HRTFKernel> HRTFKernel::createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, float x)
 {
     ASSERT(kernel1 && kernel2);
     if (!kernel1 || !kernel2)
         return 0;
  
     ASSERT(x >= 0.0 && x < 1.0);
--- a/content/media/webaudio/blink/HRTFKernel.h
+++ b/content/media/webaudio/blink/HRTFKernel.h
@@ -67,19 +67,16 @@ public:
     FFTFrame* fftFrame() { return m_fftFrame.get(); }
     
     size_t fftSize() const { return m_fftFrame->fftSize(); }
     float frameDelay() const { return m_frameDelay; }
 
     float sampleRate() const { return m_sampleRate; }
     double nyquist() const { return 0.5 * sampleRate(); }
 
-    // Converts back into impulse-response form.
-    PassOwnPtr<AudioChannel> createImpulseResponse();
-
     void reportMemoryUsage(MemoryObjectInfo*) const;
 
 private:
     // Note: this is destructive on the passed in AudioChannel.
     HRTFKernel(AudioChannel*, size_t fftSize, float sampleRate);
     
     HRTFKernel(PassOwnPtr<FFTFrame> fftFrame, float frameDelay, float sampleRate)
         : m_fftFrame(fftFrame)