Bug 948267. Part 1 - add the interface DataSource to implement pull model and remove members no longer useful in the pull model. r=kinetik.
authorJW Wang <jwwang@mozilla.com>
Tue, 12 Jan 2016 21:48:25 +0800
changeset 320912 ec013d04843680fdf78e4865557dbd99734b3c3e
parent 320911 151695836c37eb591dab55cdb696d620b7092039
child 320913 9ce51229f0f9556c9e11e1ea201a4248ccfce989
push id9315
push useratolfsen@mozilla.com
push dateTue, 12 Jan 2016 19:08:25 +0000
reviewerskinetik
bugs948267
milestone46.0a1
Bug 948267. Part 1 - add the interface DataSource to implement pull model and remove members no longer useful in the pull model. r=kinetik.
dom/media/AudioStream.cpp
dom/media/AudioStream.h
--- a/dom/media/AudioStream.cpp
+++ b/dom/media/AudioStream.cpp
@@ -113,29 +113,29 @@ public:
     }
   }
 private:
   nsAutoTArray<Chunk, 7> mChunks;
   int64_t mBaseOffset;
   double mBasePosition;
 };
 
-AudioStream::AudioStream()
+AudioStream::AudioStream(DataSource& aSource)
   : mMonitor("AudioStream")
   , mInRate(0)
   , mOutRate(0)
   , mChannels(0)
   , mOutChannels(0)
-  , mWritten(0)
   , mAudioClock(this)
   , mTimeStretcher(nullptr)
   , mDumpFile(nullptr)
   , mBytesPerFrame(0)
   , mState(INITIALIZED)
   , mIsMonoAudioEnabled(gfxPrefs::MonoAudio())
+  , mDataSource(aSource)
 {
 }
 
 AudioStream::~AudioStream()
 {
   LOG(("AudioStream: delete %p, state %d", this, mState));
   MOZ_ASSERT(mState == SHUTDOWN && !mCubebStream,
              "Should've called Shutdown() before deleting an AudioStream");
@@ -151,18 +151,16 @@ size_t
 AudioStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
 {
   size_t amount = aMallocSizeOf(this);
 
   // Possibly add in the future:
   // - mTimeStretcher
   // - mCubebStream
 
-  amount += mBuffer.SizeOfExcludingThis(aMallocSizeOf);
-
   return amount;
 }
 
 nsresult AudioStream::EnsureTimeStretcherInitializedUnlocked()
 {
   mMonitor.AssertCurrentThreadOwns();
   if (!mTimeStretcher) {
     mTimeStretcher = soundtouch::createSoundTouchObj();
@@ -227,22 +225,16 @@ nsresult AudioStream::SetPreservesPitch(
     mTimeStretcher->setRate(mAudioClock.GetPlaybackRate());
   }
 
   mAudioClock.SetPreservesPitch(aPreservesPitch);
 
   return NS_OK;
 }
 
-int64_t AudioStream::GetWritten()
-{
-  MonitorAutoLock mon(mMonitor);
-  return mWritten;
-}
-
 static void SetUint16LE(uint8_t* aDest, uint16_t aValue)
 {
   aDest[0] = aValue & 0xFF;
   aDest[1] = aValue >> 8;
 }
 
 static void SetUint32LE(uint8_t* aDest, uint32_t aValue)
 {
@@ -346,23 +338,16 @@ AudioStream::Init(int32_t aNumChannels, 
     params.format = CUBEB_SAMPLE_S16NE;
   } else {
     params.format = CUBEB_SAMPLE_FLOAT32NE;
   }
   mBytesPerFrame = sizeof(AudioDataValue) * mOutChannels;
 
   mAudioClock.Init();
 
-  // Size mBuffer for one second of audio.  This value is arbitrary, and was
-  // selected based on the observed behaviour of the existing AudioStream
-  // implementations.
-  uint32_t bufferLimit = FramesToBytes(aRate);
-  MOZ_ASSERT(bufferLimit % mBytesPerFrame == 0, "Must buffer complete frames");
-  mBuffer.SetCapacity(bufferLimit);
-
   return OpenCubeb(params);
 }
 
 // This code used to live inside AudioStream::Init(), but on Mac (others?)
 // it has been known to take 300-800 (or even 8500) ms to execute(!)
 nsresult
 AudioStream::OpenCubeb(cubeb_stream_params &aParams)
 {
@@ -402,135 +387,54 @@ AudioStream::OpenCubeb(cubeb_stream_para
           (uint32_t) timeDelta.ToMilliseconds()));
     Telemetry::Accumulate(mIsFirst ? Telemetry::AUDIOSTREAM_FIRST_OPEN_MS :
         Telemetry::AUDIOSTREAM_LATER_OPEN_MS, timeDelta.ToMilliseconds());
   }
 
   return NS_OK;
 }
 
-// aTime is the time in ms the samples were inserted into MediaStreamGraph
-nsresult
-AudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames)
-{
-  MonitorAutoLock mon(mMonitor);
-
-  if (mState == ERRORED) {
-    return NS_ERROR_FAILURE;
-  }
-  NS_ASSERTION(mState == INITIALIZED || mState == STARTED || mState == RUNNING,
-    "Stream write in unexpected state.");
-
-  // Downmix to Stereo.
-  if (mChannels > 2 && mChannels <= 8) {
-    DownmixAudioToStereo(const_cast<AudioDataValue*> (aBuf), mChannels, aFrames);
-  } else if (mChannels > 8) {
-    return NS_ERROR_FAILURE;
-  }
-
-  if (mChannels >= 2 && mIsMonoAudioEnabled) {
-    DownmixStereoToMono(const_cast<AudioDataValue*> (aBuf), aFrames);
-  }
-
-  const uint8_t* src = reinterpret_cast<const uint8_t*>(aBuf);
-  uint32_t bytesToCopy = FramesToBytes(aFrames);
-
-  while (bytesToCopy > 0) {
-    uint32_t available = std::min(bytesToCopy, mBuffer.Available());
-    MOZ_ASSERT(available % mBytesPerFrame == 0,
-               "Must copy complete frames.");
-
-    mBuffer.AppendElements(src, available);
-    src += available;
-    bytesToCopy -= available;
-
-    if (bytesToCopy > 0) {
-     // If we are not playing, but our buffer is full, start playing to make
-     // room for soon-to-be-decoded data.
-     if (mState != STARTED && mState != RUNNING) {
-       MOZ_LOG(gAudioStreamLog, LogLevel::Warning, ("Starting stream %p in Write (%u waiting)",
-                                              this, bytesToCopy));
-       StartUnlocked();
-       if (mState == ERRORED) {
-         return NS_ERROR_FAILURE;
-       }
-     }
-     MOZ_LOG(gAudioStreamLog, LogLevel::Warning, ("Stream %p waiting in Write() (%u waiting)",
-                                              this, bytesToCopy));
-     mon.Wait();
-    }
-  }
-
-  mWritten += aFrames;
-  return NS_OK;
-}
-
-uint32_t
-AudioStream::Available()
-{
-  MonitorAutoLock mon(mMonitor);
-  MOZ_ASSERT(mBuffer.Length() % mBytesPerFrame == 0, "Buffer invariant violated.");
-  return BytesToFrames(mBuffer.Available());
-}
-
 void
 AudioStream::SetVolume(double aVolume)
 {
   MOZ_ASSERT(aVolume >= 0.0 && aVolume <= 1.0, "Invalid volume");
 
   if (cubeb_stream_set_volume(mCubebStream.get(), aVolume * CubebUtils::GetVolumeScale()) != CUBEB_OK) {
     NS_WARNING("Could not change volume on cubeb stream.");
   }
 }
 
 void
-AudioStream::Cancel()
-{
-  MonitorAutoLock mon(mMonitor);
-  mState = ERRORED;
-  mon.NotifyAll();
-}
-
-void
-AudioStream::Drain()
-{
-  MonitorAutoLock mon(mMonitor);
-  LOG(("AudioStream::Drain() for %p, state %d, avail %u", this, mState, mBuffer.Available()));
-  if (mState != STARTED && mState != RUNNING) {
-    NS_ASSERTION(mState == ERRORED || mBuffer.Available() == 0, "Draining without full buffer of unplayed audio");
-    return;
-  }
-  mState = DRAINING;
-  while (mState == DRAINING) {
-    mon.Wait();
-  }
-}
-
-void
 AudioStream::Start()
 {
   MonitorAutoLock mon(mMonitor);
   StartUnlocked();
 }
 
 void
 AudioStream::StartUnlocked()
 {
   mMonitor.AssertCurrentThreadOwns();
   if (!mCubebStream) {
     return;
   }
 
   if (mState == INITIALIZED) {
+    mState = STARTED;
     int r;
     {
       MonitorAutoUnlock mon(mMonitor);
       r = cubeb_stream_start(mCubebStream.get());
+      // DataCallback might be called before we exit this scope
+      // if cubeb_stream_start() succeeds. mState must be set to STARTED
+      // beforehand.
     }
-    mState = r == CUBEB_OK ? STARTED : ERRORED;
+    if (r != CUBEB_OK) {
+      mState = ERRORED;
+    }
     LOG(("AudioStream: started %p, state %s", this, mState == STARTED ? "STARTED" : "ERRORED"));
   }
 }
 
 void
 AudioStream::Pause()
 {
   MonitorAutoLock mon(mMonitor);
@@ -626,148 +530,177 @@ AudioStream::GetPositionInFramesUnlocked
 
 bool
 AudioStream::IsPaused()
 {
   MonitorAutoLock mon(mMonitor);
   return mState == STOPPED;
 }
 
+bool
+AudioStream::Downmix(AudioDataValue* aBuffer, uint32_t aFrames)
+{
+  if (mChannels > 8) {
+    return false;
+  }
+
+  if (mChannels > 2 && mChannels <= 8) {
+    DownmixAudioToStereo(aBuffer, mChannels, aFrames);
+  }
+
+  if (mChannels >= 2 && mIsMonoAudioEnabled) {
+    DownmixStereoToMono(aBuffer, aFrames);
+  }
+
+  return true;
+}
+
 long
 AudioStream::GetUnprocessed(void* aBuffer, long aFrames)
 {
   mMonitor.AssertCurrentThreadOwns();
   uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer);
 
   // Flush the timestretcher pipeline, if we were playing using a playback rate
   // other than 1.0.
   uint32_t flushedFrames = 0;
   if (mTimeStretcher && mTimeStretcher->numSamples()) {
     flushedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames);
     wpos += FramesToBytes(flushedFrames);
+
+    // TODO: There might be still unprocessed samples in the stretcher.
+    // We should either remove or flush them so they won't be in the output
+    // next time we switch a playback rate other than 1.0.
+    NS_WARN_IF(mTimeStretcher->numUnprocessedSamples() > 0);
   }
-  uint32_t toPopBytes = FramesToBytes(aFrames - flushedFrames);
-  uint32_t available = std::min(toPopBytes, mBuffer.Length());
 
-  void* input[2];
-  uint32_t input_size[2];
-  mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]);
-  memcpy(wpos, input[0], input_size[0]);
-  wpos += input_size[0];
-  memcpy(wpos, input[1], input_size[1]);
+  uint32_t toPopFrames = aFrames - flushedFrames;
+  while (toPopFrames > 0) {
+    UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
+    if (c->Frames() == 0) {
+      break;
+    }
+    MOZ_ASSERT(c->Frames() <= toPopFrames);
+    if (Downmix(c->GetWritable(), c->Frames())) {
+      memcpy(wpos, c->Data(), FramesToBytes(c->Frames()));
+    } else {
+      // Write silence if downmixing fails.
+      memset(wpos, 0, FramesToBytes(c->Frames()));
+    }
+    wpos += FramesToBytes(c->Frames());
+    toPopFrames -= c->Frames();
+  }
 
-  return BytesToFrames(available) + flushedFrames;
+  return aFrames - toPopFrames;
 }
 
 long
 AudioStream::GetTimeStretched(void* aBuffer, long aFrames)
 {
   mMonitor.AssertCurrentThreadOwns();
-  long processedFrames = 0;
 
   // We need to call the non-locking version, because we already have the lock.
   if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) {
     return 0;
   }
 
   uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer);
   double playbackRate = static_cast<double>(mInRate) / mOutRate;
-  uint32_t toPopBytes = FramesToBytes(ceil(aFrames * playbackRate));
-  uint32_t available = 0;
-  bool lowOnBufferedData = false;
-  do {
-    // Check if we already have enough data in the time stretcher pipeline.
-    if (mTimeStretcher->numSamples() <= static_cast<uint32_t>(aFrames)) {
-      void* input[2];
-      uint32_t input_size[2];
-      available = std::min(mBuffer.Length(), toPopBytes);
-      if (available != toPopBytes) {
-        lowOnBufferedData = true;
-      }
-      mBuffer.PopElements(available, &input[0], &input_size[0],
-                                     &input[1], &input_size[1]);
-      for(uint32_t i = 0; i < 2; i++) {
-        mTimeStretcher->putSamples(reinterpret_cast<AudioDataValue*>(input[i]), BytesToFrames(input_size[i]));
-      }
+  uint32_t toPopFrames = ceil(aFrames * playbackRate);
+
+  while (mTimeStretcher->numSamples() < static_cast<uint32_t>(aFrames)) {
+    UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
+    if (c->Frames() == 0) {
+      break;
     }
-    uint32_t receivedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames - processedFrames);
-    wpos += FramesToBytes(receivedFrames);
-    processedFrames += receivedFrames;
-  } while (processedFrames < aFrames && !lowOnBufferedData);
+    MOZ_ASSERT(c->Frames() <= toPopFrames);
+    if (Downmix(c->GetWritable(), c->Frames())) {
+      mTimeStretcher->putSamples(c->Data(), c->Frames());
+    } else {
+      // Write silence if downmixing fails.
+      nsAutoTArray<AudioDataValue, 1000> buf;
+      buf.SetLength(mOutChannels * c->Frames());
+      memset(buf.Elements(), 0, buf.Length() * sizeof(AudioDataValue));
+      mTimeStretcher->putSamples(buf.Elements(), c->Frames());
+    }
+  }
 
-  return processedFrames;
+  uint32_t receivedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames);
+  wpos += FramesToBytes(receivedFrames);
+  return receivedFrames;
 }
 
 long
 AudioStream::DataCallback(void* aBuffer, long aFrames)
 {
   MonitorAutoLock mon(mMonitor);
   MOZ_ASSERT(mState != SHUTDOWN, "No data callback after shutdown");
-  uint32_t available = std::min(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length());
-  MOZ_ASSERT(available % mBytesPerFrame == 0, "Must copy complete frames");
-  AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer);
   uint32_t underrunFrames = 0;
   uint32_t servicedFrames = 0;
 
+  // FIXME: cubeb_pulse sometimes calls us before cubeb_stream_start() is called.
+  // We don't want to consume audio data until Start() is called by the client.
+  if (mState == INITIALIZED) {
+    NS_WARNING("data callback fires before cubeb_stream_start() is called");
+    mAudioClock.UpdateFrameHistory(0, aFrames);
+    memset(aBuffer, 0, FramesToBytes(aFrames));
+    return aFrames;
+  }
+
   // NOTE: wasapi (others?) can call us back *after* stop()/Shutdown() (mState == SHUTDOWN)
   // Bug 996162
 
   // callback tells us cubeb succeeded initializing
   if (mState == STARTED) {
     mState = RUNNING;
   }
 
-  if (available) {
-    if (mInRate == mOutRate) {
-      servicedFrames = GetUnprocessed(output, aFrames);
-    } else {
-      servicedFrames = GetTimeStretched(output, aFrames);
-    }
-
-    MOZ_ASSERT(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames");
-
-    // Notify any blocked Write() call that more space is available in mBuffer.
-    mon.NotifyAll();
+  if (mInRate == mOutRate) {
+    servicedFrames = GetUnprocessed(aBuffer, aFrames);
+  } else {
+    servicedFrames = GetTimeStretched(aBuffer, aFrames);
   }
 
   underrunFrames = aFrames - servicedFrames;
 
   // Always send audible frames first, and silent frames later.
   // Otherwise it will break the assumption of FrameHistory.
-  if (mState != DRAINING) {
+  if (!mDataSource.Ended()) {
     mAudioClock.UpdateFrameHistory(servicedFrames, underrunFrames);
     uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames);
     memset(rpos, 0, FramesToBytes(underrunFrames));
     if (underrunFrames) {
       MOZ_LOG(gAudioStreamLog, LogLevel::Warning,
              ("AudioStream %p lost %d frames", this, underrunFrames));
     }
     servicedFrames += underrunFrames;
   } else {
+    // No more new data in the data source. Don't send silent frames so the
+    // cubeb stream can start draining.
     mAudioClock.UpdateFrameHistory(servicedFrames, 0);
   }
 
   WriteDumpFile(mDumpFile, this, aFrames, aBuffer);
 
   return servicedFrames;
 }
 
 void
 AudioStream::StateCallback(cubeb_state aState)
 {
   MonitorAutoLock mon(mMonitor);
   MOZ_ASSERT(mState != SHUTDOWN, "No state callback after shutdown");
   LOG(("AudioStream: StateCallback %p, mState=%d cubeb_state=%d", this, mState, aState));
   if (aState == CUBEB_STATE_DRAINED) {
     mState = DRAINED;
+    mDataSource.Drained();
   } else if (aState == CUBEB_STATE_ERROR) {
     LOG(("AudioStream::StateCallback() state %d cubeb error", mState));
     mState = ERRORED;
   }
-  mon.NotifyAll();
 }
 
 AudioClock::AudioClock(AudioStream* aStream)
  :mAudioStream(aStream),
   mOutRate(0),
   mInRate(0),
   mPreservesPitch(true),
   mFrameHistory(new FrameHistory())
--- a/dom/media/AudioStream.h
+++ b/dom/media/AudioStream.h
@@ -153,55 +153,61 @@ private:
 // GetPosition, GetPositionInFrames, SetVolume, and Get{Rate,Channels},
 // SetMicrophoneActive is thread-safe without external synchronization.
 class AudioStream final
 {
   virtual ~AudioStream();
 
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioStream)
-  AudioStream();
+
+  class Chunk {
+  public:
+    // Return a pointer to the audio data.
+    virtual const AudioDataValue* Data() const = 0;
+    // Return the number of frames in this chunk.
+    virtual uint32_t Frames() const = 0;
+    // Return a writable pointer for downmixing.
+    virtual AudioDataValue* GetWritable() const = 0;
+    virtual ~Chunk() {}
+  };
+
+  class DataSource {
+  public:
+    // Return a chunk which contains at most aFrames frames or zero if no
+    // frames in the source at all.
+    virtual UniquePtr<Chunk> PopFrames(uint32_t aFrames) = 0;
+    // Return true if no more data will be added to the source.
+    virtual bool Ended() const = 0;
+    // Notify that all data is drained by the AudioStream.
+    virtual void Drained() = 0;
+  protected:
+    virtual ~DataSource() {}
+  };
+
+  explicit AudioStream(DataSource& aSource);
 
   // Initialize the audio stream. aNumChannels is the number of audio
   // channels (1 for mono, 2 for stereo, etc) and aRate is the sample rate
   // (22050Hz, 44100Hz, etc).
   nsresult Init(int32_t aNumChannels, int32_t aRate,
                 const dom::AudioChannel aAudioStreamChannel);
 
   // Closes the stream. All future use of the stream is an error.
   void Shutdown();
 
   void Reset();
 
-  // Write audio data to the audio hardware.  aBuf is an array of AudioDataValues
-  // AudioDataValue of length aFrames*mChannels.  If aFrames is larger
-  // than the result of Available(), the write will block until sufficient
-  // buffer space is available.
-  nsresult Write(const AudioDataValue* aBuf, uint32_t aFrames);
-
-  // Return the number of audio frames that can be written without blocking.
-  uint32_t Available();
-
   // Set the current volume of the audio playback. This is a value from
   // 0 (meaning muted) to 1 (meaning full volume).  Thread-safe.
   void SetVolume(double aVolume);
 
-  // Block until buffered audio data has been consumed.
-  void Drain();
-
-  // Break any blocking operation and set the stream to shutdown.
-  void Cancel();
-
   // Start the stream.
   void Start();
 
-  // Return the number of frames written so far in the stream. This allow the
-  // caller to check if it is safe to start the stream, if needed.
-  int64_t GetWritten();
-
   // Pause audio playback.
   void Pause();
 
   // Resume audio playback.
   void Resume();
 
   // Return the position in microseconds of the audio frame being played by
   // the audio hardware, compensated for playback rate change. Thread-safe.
@@ -249,53 +255,45 @@ private:
   }
 
 
   long DataCallback(void* aBuffer, long aFrames);
   void StateCallback(cubeb_state aState);
 
   nsresult EnsureTimeStretcherInitializedUnlocked();
 
+  // Return true if downmixing succeeds otherwise false.
+  bool Downmix(AudioDataValue* aBuffer, uint32_t aFrames);
+
   long GetUnprocessed(void* aBuffer, long aFrames);
   long GetTimeStretched(void* aBuffer, long aFrames);
 
   void StartUnlocked();
 
-  // The monitor is held to protect all access to member variables.  Write()
-  // waits while mBuffer is full; DataCallback() notifies as it consumes
-  // data from mBuffer.  Drain() waits while mState is DRAINING;
-  // StateCallback() notifies when mState is DRAINED.
+  // The monitor is held to protect all access to member variables.
   Monitor mMonitor;
 
   // Input rate in Hz (characteristic of the media being played)
   int mInRate;
   // Output rate in Hz (characteristic of the playback rate)
   int mOutRate;
   int mChannels;
   int mOutChannels;
 #if defined(__ANDROID__)
   dom::AudioChannel mAudioChannel;
 #endif
-  // Number of frames written to the buffers.
-  int64_t mWritten;
   AudioClock mAudioClock;
   soundtouch::SoundTouch* mTimeStretcher;
 
   // Stream start time for stream open delay telemetry.
   TimeStamp mStartTime;
 
   // Output file for dumping audio
   FILE* mDumpFile;
 
-  // Temporary audio buffer.  Filled by Write() and consumed by
-  // DataCallback().  Once mBuffer is full, Write() blocks until sufficient
-  // space becomes available in mBuffer.  mBuffer is sized in bytes, not
-  // frames.
-  CircularByteBuffer mBuffer;
-
   // Owning reference to a cubeb_stream.
   UniquePtr<cubeb_stream, CubebDestroyPolicy> mCubebStream;
 
   uint32_t mBytesPerFrame;
 
   uint32_t BytesToFrames(uint32_t aBytes) {
     NS_ASSERTION(aBytes % mBytesPerFrame == 0,
                  "Byte count not aligned on frames size.");
@@ -306,26 +304,24 @@ private:
     return aFrames * mBytesPerFrame;
   }
 
   enum StreamState {
     INITIALIZED, // Initialized, playback has not begun.
     STARTED,     // cubeb started, but callbacks haven't started
     RUNNING,     // DataCallbacks have started after STARTED, or after Resume().
     STOPPED,     // Stopped by a call to Pause().
-    DRAINING,    // Drain requested.  DataCallback will indicate end of stream
-                 // once the remaining contents of mBuffer are requested by
-                 // cubeb, after which StateCallback will indicate drain
-                 // completion.
     DRAINED,     // StateCallback has indicated that the drain is complete.
     ERRORED,     // Stream disabled due to an internal error.
     SHUTDOWN     // Shutdown has been called
   };
 
   StreamState mState;
   bool mIsFirst;
   // Get this value from the preferece, if true, we would downmix the stereo.
   bool mIsMonoAudioEnabled;
+
+  DataSource& mDataSource;
 };
 
 } // namespace mozilla
 
 #endif