b=815643 Add Blink's HRTFPanner to the build r=ehsan
authorKarl Tomlinson <karlt+@karlt.net>
Fri, 09 Aug 2013 10:07:49 +1200
changeset 142366 746b2ba6cf30
parent 142365 d92240f69c48
child 142367 62ad090a94a4
push id32374
push userktomlinson@mozilla.com
push dateTue, 13 Aug 2013 02:49:14 +0000
treeherdermozilla-inbound@62ad090a94a4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersehsan
bugs815643
milestone26.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
b=815643 Add Blink's HRTFPanner to the build r=ehsan
content/media/webaudio/blink/HRTFPanner.cpp
content/media/webaudio/blink/HRTFPanner.h
content/media/webaudio/blink/Makefile.in
content/media/webaudio/blink/moz.build
--- a/content/media/webaudio/blink/HRTFPanner.cpp
+++ b/content/media/webaudio/blink/HRTFPanner.cpp
@@ -17,147 +17,146 @@
  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "config.h"
-
-#if ENABLE(WEB_AUDIO)
-
-#include "core/platform/audio/HRTFPanner.h"
+#include "HRTFPanner.h"
 
-#include <algorithm>
-#include "core/platform/audio/AudioBus.h"
-#include "core/platform/audio/FFTConvolver.h"
-#include "core/platform/audio/HRTFDatabase.h"
-#include <wtf/MathExtras.h>
-#include <wtf/RefPtr.h>
+#include "FFTConvolver.h"
+#include "HRTFDatabase.h"
+#include "WebAudioUtils.h"
 
 using namespace std;
+using namespace mozilla;
+using mozilla::dom::WebAudioUtils;
 
 namespace WebCore {
 
 // The value of 2 milliseconds is larger than the largest delay which exists in any HRTFKernel from the default HRTFDatabase (0.0136 seconds).
 // We ASSERT the delay values used in process() with this value.
 const double MaxDelayTimeSeconds = 0.002;
 
 const int UninitializedAzimuth = -1;
 const unsigned RenderingQuantum = 128;
 
 HRTFPanner::HRTFPanner(float sampleRate, HRTFDatabaseLoader* databaseLoader)
-    : Panner(PanningModelHRTF)
-    , m_databaseLoader(databaseLoader)
+    : m_databaseLoader(databaseLoader)
     , m_sampleRate(sampleRate)
     , m_crossfadeSelection(CrossfadeSelection1)
     , m_azimuthIndex1(UninitializedAzimuth)
     , m_elevation1(0)
     , m_azimuthIndex2(UninitializedAzimuth)
     , m_elevation2(0)
     , m_crossfadeX(0)
     , m_crossfadeIncr(0)
     , m_convolverL1(HRTFElevation::fftSizeForSampleRate(sampleRate))
-    , m_convolverR1(convolverL1.fftSize())
-    , m_convolverL2(convolverL1.fftSize())
-    , m_convolverR2(convolverL1.fftSize())
-    , m_delayLineL(MaxDelayTimeSeconds, sampleRate)
-    , m_delayLineR(MaxDelayTimeSeconds, sampleRate)
-    , m_tempL1(RenderingQuantum)
-    , m_tempR1(RenderingQuantum)
-    , m_tempL2(RenderingQuantum)
-    , m_tempR2(RenderingQuantum)
+    , m_convolverR1(m_convolverL1.fftSize())
+    , m_convolverL2(m_convolverL1.fftSize())
+    , m_convolverR2(m_convolverL1.fftSize())
+    , m_delayLineL(ceilf(MaxDelayTimeSeconds * sampleRate),
+                   WebAudioUtils::ComputeSmoothingRate(0.02, sampleRate))
+    , m_delayLineR(ceilf(MaxDelayTimeSeconds * sampleRate),
+                   WebAudioUtils::ComputeSmoothingRate(0.02, sampleRate))
 {
-    ASSERT(databaseLoader);
+    MOZ_ASSERT(databaseLoader);
+    m_tempL1.SetLength(RenderingQuantum);
+    m_tempR1.SetLength(RenderingQuantum);
+    m_tempL2.SetLength(RenderingQuantum);
+    m_tempR2.SetLength(RenderingQuantum);
 }
 
 HRTFPanner::~HRTFPanner()
 {
 }
 
 void HRTFPanner::reset()
 {
     m_convolverL1.reset();
     m_convolverR1.reset();
     m_convolverL2.reset();
     m_convolverR2.reset();
-    m_delayLineL.reset();
-    m_delayLineR.reset();
+    m_delayLineL.Reset();
+    m_delayLineR.Reset();
 }
 
 int HRTFPanner::calculateDesiredAzimuthIndexAndBlend(double azimuth, double& azimuthBlend)
 {
     // Convert the azimuth angle from the range -180 -> +180 into the range 0 -> 360.
     // The azimuth index may then be calculated from this positive value.
     if (azimuth < 0)
         azimuth += 360.0;
 
     HRTFDatabase* database = m_databaseLoader->database();
-    ASSERT(database);
+    MOZ_ASSERT(database);
 
     int numberOfAzimuths = database->numberOfAzimuths();
     const double angleBetweenAzimuths = 360.0 / numberOfAzimuths;
 
     // Calculate the azimuth index and the blend (0 -> 1) for interpolation.
     double desiredAzimuthIndexFloat = azimuth / angleBetweenAzimuths;
     int desiredAzimuthIndex = static_cast<int>(desiredAzimuthIndexFloat);
     azimuthBlend = desiredAzimuthIndexFloat - static_cast<double>(desiredAzimuthIndex);
 
     // We don't immediately start using this azimuth index, but instead approach this index from the last index we rendered at.
     // This minimizes the clicks and graininess for moving sources which occur otherwise.
     desiredAzimuthIndex = max(0, desiredAzimuthIndex);
     desiredAzimuthIndex = min(numberOfAzimuths - 1, desiredAzimuthIndex);
     return desiredAzimuthIndex;
 }
 
-void HRTFPanner::pan(double desiredAzimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess)
+void HRTFPanner::pan(double desiredAzimuth, double elevation, const AudioChunk* inputBus, AudioChunk* outputBus, TrackTicks framesToProcess)
 {
-    unsigned numInputChannels = inputBus ? inputBus->numberOfChannels() : 0;
+    unsigned numInputChannels =
+        inputBus->IsNull() ? 0 : inputBus->mChannelData.Length();
 
     bool isInputGood = inputBus &&  numInputChannels >= 1 && numInputChannels <= 2;
-    ASSERT(isInputGood);
+    MOZ_ASSERT(isInputGood);
+    MOZ_ASSERT(framesToProcess <= inputBus->mDuration);
 
-    bool isOutputGood = outputBus && outputBus->numberOfChannels() == 2 && framesToProcess <= outputBus->length();
-    ASSERT(isOutputGood);
+    bool isOutputGood = outputBus && outputBus->mChannelData.Length() == 2 && framesToProcess <= outputBus->mDuration;
+    MOZ_ASSERT(isOutputGood);
 
     if (!isInputGood || !isOutputGood) {
         if (outputBus)
-            outputBus->zero();
+            outputBus->SetNull(outputBus->mDuration);
         return;
     }
 
     HRTFDatabase* database = m_databaseLoader->database();
-    ASSERT(database);
+    MOZ_ASSERT(database);
     if (!database) {
-        outputBus->zero();
+        outputBus->SetNull(outputBus->mDuration);
         return;
     }
 
     // IRCAM HRTF azimuths values from the loaded database is reversed from the panner's notion of azimuth.
     double azimuth = -desiredAzimuth;
 
     bool isAzimuthGood = azimuth >= -180.0 && azimuth <= 180.0;
-    ASSERT(isAzimuthGood);
+    MOZ_ASSERT(isAzimuthGood);
     if (!isAzimuthGood) {
-        outputBus->zero();
+        outputBus->SetNull(outputBus->mDuration);
         return;
     }
 
     // Normally, we'll just be dealing with mono sources.
     // If we have a stereo input, implement stereo panning with left source processed by left HRTF, and right source by right HRTF.
-    const AudioChannel* inputChannelL = inputBus->channelByType(AudioBus::ChannelLeft);
-    const AudioChannel* inputChannelR = numInputChannels > 1 ? inputBus->channelByType(AudioBus::ChannelRight) : 0;
 
     // Get source and destination pointers.
-    const float* sourceL = inputChannelL->data();
-    const float* sourceR = numInputChannels > 1 ? inputChannelR->data() : sourceL;
-    float* destinationL = outputBus->channelByType(AudioBus::ChannelLeft)->mutableData();
-    float* destinationR = outputBus->channelByType(AudioBus::ChannelRight)->mutableData();
+    const float* sourceL = static_cast<const float*>(inputBus->mChannelData[0]);
+    const float* sourceR = numInputChannels > 1 ?
+        static_cast<const float*>(inputBus->mChannelData[1]) : sourceL;
+    float* destinationL =
+        static_cast<float*>(const_cast<void*>(outputBus->mChannelData[0]));
+    float* destinationR =
+        static_cast<float*>(const_cast<void*>(outputBus->mChannelData[1]));
 
     double azimuthBlend;
     int desiredAzimuthIndex = calculateDesiredAzimuthIndexAndBlend(azimuth, azimuthBlend);
 
     // Initially snap azimuth and elevation values to first values encountered.
     if (m_azimuthIndex1 == UninitializedAzimuth) {
         m_azimuthIndex1 = desiredAzimuthIndex;
         m_elevation1 = elevation;
@@ -186,18 +185,18 @@ void HRTFPanner::pan(double desiredAzimu
             // Cross-fade from 2 -> 1
             m_crossfadeIncr = -1 / fadeFrames;
             m_azimuthIndex1 = desiredAzimuthIndex;
             m_elevation1 = elevation;
         }
     }
 
     // This algorithm currently requires that we process in power-of-two size chunks at least RenderingQuantum.
-    ASSERT(1UL << static_cast<int>(log2(framesToProcess)) == framesToProcess);
-    ASSERT(framesToProcess >= RenderingQuantum);
+    MOZ_ASSERT(framesToProcess && 0 == (framesToProcess & (framesToProcess - 1)));
+    MOZ_ASSERT(framesToProcess >= RenderingQuantum);
 
     const unsigned framesPerSegment = RenderingQuantum;
     const unsigned numberOfSegments = framesToProcess / framesPerSegment;
 
     for (unsigned segment = 0; segment < numberOfSegments; ++segment) {
         // Get the HRTFKernels and interpolated delays.
         HRTFKernel* kernelL1;
         HRTFKernel* kernelR1;
@@ -206,49 +205,47 @@ void HRTFPanner::pan(double desiredAzimu
         double frameDelayL1;
         double frameDelayR1;
         double frameDelayL2;
         double frameDelayR2;
         database->getKernelsFromAzimuthElevation(azimuthBlend, m_azimuthIndex1, m_elevation1, kernelL1, kernelR1, frameDelayL1, frameDelayR1);
         database->getKernelsFromAzimuthElevation(azimuthBlend, m_azimuthIndex2, m_elevation2, kernelL2, kernelR2, frameDelayL2, frameDelayR2);
 
         bool areKernelsGood = kernelL1 && kernelR1 && kernelL2 && kernelR2;
-        ASSERT(areKernelsGood);
+        MOZ_ASSERT(areKernelsGood);
         if (!areKernelsGood) {
-            outputBus->zero();
+            outputBus->SetNull(outputBus->mDuration);
             return;
         }
 
-        ASSERT(frameDelayL1 / sampleRate() < MaxDelayTimeSeconds && frameDelayR1 / sampleRate() < MaxDelayTimeSeconds);
-        ASSERT(frameDelayL2 / sampleRate() < MaxDelayTimeSeconds && frameDelayR2 / sampleRate() < MaxDelayTimeSeconds);
+        MOZ_ASSERT(frameDelayL1 / sampleRate() < MaxDelayTimeSeconds && frameDelayR1 / sampleRate() < MaxDelayTimeSeconds);
+        MOZ_ASSERT(frameDelayL2 / sampleRate() < MaxDelayTimeSeconds && frameDelayR2 / sampleRate() < MaxDelayTimeSeconds);
 
         // Crossfade inter-aural delays based on transitions.
         double frameDelayL = (1 - m_crossfadeX) * frameDelayL1 + m_crossfadeX * frameDelayL2;
         double frameDelayR = (1 - m_crossfadeX) * frameDelayR1 + m_crossfadeX * frameDelayR2;
 
         // Calculate the source and destination pointers for the current segment.
         unsigned offset = segment * framesPerSegment;
         const float* segmentSourceL = sourceL + offset;
         const float* segmentSourceR = sourceR + offset;
         float* segmentDestinationL = destinationL + offset;
         float* segmentDestinationR = destinationR + offset;
 
         // First run through delay lines for inter-aural time difference.
-        m_delayLineL.setDelayFrames(frameDelayL);
-        m_delayLineR.setDelayFrames(frameDelayR);
-        m_delayLineL.process(segmentSourceL, segmentDestinationL, framesPerSegment);
-        m_delayLineR.process(segmentSourceR, segmentDestinationR, framesPerSegment);
+        m_delayLineL.Process(frameDelayL, &segmentSourceL, &segmentDestinationL, 1, framesPerSegment);
+        m_delayLineR.Process(frameDelayR, &segmentSourceR, &segmentDestinationR, 1, framesPerSegment);
 
         bool needsCrossfading = m_crossfadeIncr;
         
         // Have the convolvers render directly to the final destination if we're not cross-fading.
-        float* convolutionDestinationL1 = needsCrossfading ? m_tempL1.data() : segmentDestinationL;
-        float* convolutionDestinationR1 = needsCrossfading ? m_tempR1.data() : segmentDestinationR;
-        float* convolutionDestinationL2 = needsCrossfading ? m_tempL2.data() : segmentDestinationL;
-        float* convolutionDestinationR2 = needsCrossfading ? m_tempR2.data() : segmentDestinationR;
+        float* convolutionDestinationL1 = needsCrossfading ? m_tempL1.Elements() : segmentDestinationL;
+        float* convolutionDestinationR1 = needsCrossfading ? m_tempR1.Elements() : segmentDestinationR;
+        float* convolutionDestinationL2 = needsCrossfading ? m_tempL2.Elements() : segmentDestinationL;
+        float* convolutionDestinationR2 = needsCrossfading ? m_tempR2.Elements() : segmentDestinationR;
 
         // Now do the convolutions.
         // Note that we avoid doing convolutions on both sets of convolvers if we're not currently cross-fading.
         
         if (m_crossfadeSelection == CrossfadeSelection1 || needsCrossfading) {
             m_convolverL1.process(kernelL1->fftFrame(), segmentDestinationL, convolutionDestinationL1, framesPerSegment);
             m_convolverR1.process(kernelR1->fftFrame(), segmentDestinationR, convolutionDestinationR1, framesPerSegment);
         }
@@ -296,10 +293,8 @@ double HRTFPanner::tailTime() const
 double HRTFPanner::latencyTime() const
 {
     // The latency of a FFTConvolver is also fftSize() / 2, and is in addition to its tailTime of the
     // same value.
     return (fftSize() / 2) / static_cast<double>(sampleRate());
 }
 
 } // namespace WebCore
-
-#endif // ENABLE(WEB_AUDIO)
--- a/content/media/webaudio/blink/HRTFPanner.h
+++ b/content/media/webaudio/blink/HRTFPanner.h
@@ -20,45 +20,50 @@
  * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef HRTFPanner_h
 #define HRTFPanner_h
 
-#include "core/platform/audio/FFTConvolver.h"
-#include "core/platform/audio/HRTFDatabaseLoader.h"
-#include "core/platform/audio/Panner.h"
-#include "modules/webaudio/DelayDSPKernel.h"
+#include "FFTConvolver.h"
+#include "HRTFDatabaseLoader.h"
+#include "DelayProcessor.h"
+
+namespace mozilla {
+struct AudioChunk;
+}
 
 namespace WebCore {
 
-class HRTFPanner : public Panner {
+using mozilla::AudioChunk;
+
+class HRTFPanner {
 public:
     HRTFPanner(float sampleRate, HRTFDatabaseLoader*);
-    virtual ~HRTFPanner();
+    ~HRTFPanner();
 
-    // Panner
-    virtual void pan(double azimuth, double elevation, const AudioBus* inputBus, AudioBus* outputBus, size_t framesToProcess);
-    virtual void reset();
+    // framesToProcess must be a power of 2 and greater than 128
+    void pan(double azimuth, double elevation, const AudioChunk* inputBus, AudioChunk* outputBus, mozilla::TrackTicks framesToProcess);
+    void reset();
 
     size_t fftSize() const { return m_convolverL1.fftSize(); }
 
     float sampleRate() const { return m_sampleRate; }
 
-    virtual double tailTime() const OVERRIDE;
-    virtual double latencyTime() const OVERRIDE;
+    double tailTime() const;
+    double latencyTime() const;
 
 private:
     // Given an azimuth angle in the range -180 -> +180, returns the corresponding azimuth index for the database,
     // and azimuthBlend which is an interpolation value from 0 -> 1.
     int calculateDesiredAzimuthIndexAndBlend(double azimuth, double& azimuthBlend);
 
-    RefPtr<HRTFDatabaseLoader> m_databaseLoader;
+    mozilla::RefPtr<HRTFDatabaseLoader> m_databaseLoader;
 
     float m_sampleRate;
 
     // We maintain two sets of convolvers for smooth cross-faded interpolations when
     // then azimuth and elevation are dynamically changing.
     // When the azimuth and elevation are not changing, we simply process with one of the two sets.
     // Initially we use CrossfadeSelection1 corresponding to m_convolverL1 and m_convolverR1.
     // Whenever the azimuth or elevation changes, a crossfade is initiated to transition
@@ -89,18 +94,18 @@ private:
     // Per-sample-frame crossfade value increment.
     float m_crossfadeIncr;
 
     FFTConvolver m_convolverL1;
     FFTConvolver m_convolverR1;
     FFTConvolver m_convolverL2;
     FFTConvolver m_convolverR2;
 
-    DelayDSPKernel m_delayLineL;
-    DelayDSPKernel m_delayLineR;
+    mozilla::DelayProcessor m_delayLineL;
+    mozilla::DelayProcessor m_delayLineR;
 
     AudioFloatArray m_tempL1;
     AudioFloatArray m_tempR1;
     AudioFloatArray m_tempL2;
     AudioFloatArray m_tempR2;
 };
 
 } // namespace WebCore
--- a/content/media/webaudio/blink/Makefile.in
+++ b/content/media/webaudio/blink/Makefile.in
@@ -7,10 +7,12 @@ topsrcdir        := @top_srcdir@
 srcdir           := @srcdir@
 VPATH            := @srcdir@
 
 include $(DEPTH)/config/autoconf.mk
 
 LIBRARY_NAME   := gkconwebaudio_blink_s
 LIBXUL_LIBRARY := 1
 
+LOCAL_INCLUDES += -I$(topsrcdir)/content/media/webaudio
+
 include $(topsrcdir)/config/rules.mk
 include $(topsrcdir)/ipc/chromium/chromium-config.mk
--- a/content/media/webaudio/blink/moz.build
+++ b/content/media/webaudio/blink/moz.build
@@ -11,16 +11,17 @@ CPP_SOURCES += [
     'DirectConvolver.cpp',
     'DynamicsCompressor.cpp',
     'DynamicsCompressorKernel.cpp',
     'FFTConvolver.cpp',
     'HRTFDatabase.cpp',
     'HRTFDatabaseLoader.cpp',
     'HRTFElevation.cpp',
     'HRTFKernel.cpp',
+    'HRTFPanner.cpp',
     'Reverb.cpp',
     'ReverbAccumulationBuffer.cpp',
     'ReverbConvolver.cpp',
     'ReverbConvolverStage.cpp',
     'ReverbInputBuffer.cpp',
     'ZeroPole.cpp',
 ]