Bug 1404997 - P4. Make AudioProxyThread use AutoTaskQueue. r=pehrsons
authorJean-Yves Avenard <jyavenard@mozilla.com>
Thu, 30 Nov 2017 16:27:37 +0100
changeset 448406 749e749345cd40f6798c6af865eeb4232bde4e5a
parent 448405 844325b36ac8659fc8df3d3a46d75cadbd16bc62
child 448407 40967b7fe56bec7cf2f70f81d88c0244fb9a61e6
push id8527
push userCallek@gmail.com
push dateThu, 11 Jan 2018 21:05:50 +0000
treeherdermozilla-beta@95342d212a7a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspehrsons
bugs1404997
milestone59.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1404997 - P4. Make AudioProxyThread use AutoTaskQueue. r=pehrsons Also, pass arguments are const reference. We also rename class members as per coding style. MozReview-Commit-ID: 9IkV8wCMpz7
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -493,34 +493,28 @@ protected:
 // use multiple threads and a TaskQueue.
 class AudioProxyThread
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioProxyThread)
 
   explicit AudioProxyThread(AudioSessionConduit* aConduit)
     : mConduit(aConduit)
+    , mTaskQueue(new AutoTaskQueue(
+        SharedThreadPool::Get(NS_LITERAL_CSTRING("AudioProxy"), 1)))
   {
     MOZ_ASSERT(mConduit);
     MOZ_COUNT_CTOR(AudioProxyThread);
-
-    // Use only 1 thread; also forces FIFO operation
-    // We could use multiple threads, but that may be dicier with the webrtc.org
-    // code.  If so we'd need to use TaskQueues like the videoframe converter
-    RefPtr<SharedThreadPool> pool =
-      SharedThreadPool::Get(NS_LITERAL_CSTRING("AudioProxy"), 1);
-
-    mThread = pool.get();
   }
 
-  // called on mThread
   void InternalProcessAudioChunk(TrackRate rate,
-                                 AudioChunk& chunk,
+                                 const AudioChunk& chunk,
                                  bool enabled)
   {
+    MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
     // Convert to interleaved, 16-bits integer audio, with a maximum of two
     // channels (since the WebRTC.org code below makes the assumption that the
     // input audio is either mono or stereo).
     uint32_t outputChannels = chunk.ChannelCount() == 1 ? 1 : 2;
     const int16_t* samples = nullptr;
     UniquePtr<int16_t[]> convertedSamples;
 
@@ -557,60 +551,59 @@ public:
 
     // Check if the rate or the number of channels has changed since the last
     // time we came through. I realize it may be overkill to check if the rate
     // has changed, but I believe it is possible (e.g. if we change sources) and
     // it costs us very little to handle this case.
 
     uint32_t audio_10ms = rate / 100;
 
-    if (!packetizer_ || packetizer_->PacketSize() != audio_10ms ||
-        packetizer_->Channels() != outputChannels) {
+    if (!mPacketizer || mPacketizer->PacketSize() != audio_10ms ||
+        mPacketizer->Channels() != outputChannels) {
       // It's ok to drop the audio still in the packetizer here.
-      packetizer_ =
-        new AudioPacketizer<int16_t, int16_t>(audio_10ms, outputChannels);
+      mPacketizer = MakeUnique<AudioPacketizer<int16_t, int16_t>>(
+        audio_10ms, outputChannels);
     }
 
-    packetizer_->Input(samples, chunk.mDuration);
+    mPacketizer->Input(samples, chunk.mDuration);
 
-    while (packetizer_->PacketsAvailable()) {
-      packetizer_->Output(packet_);
+    while (mPacketizer->PacketsAvailable()) {
+      mPacketizer->Output(mPacket);
       mConduit->SendAudioFrame(
-        packet_, packetizer_->PacketSize(), rate, packetizer_->Channels(), 0);
+        mPacket, mPacketizer->PacketSize(), rate, mPacketizer->Channels(), 0);
     }
   }
 
-  void QueueAudioChunk(TrackRate rate, AudioChunk& chunk, bool enabled)
+  void QueueAudioChunk(TrackRate rate, const AudioChunk& chunk, bool enabled)
   {
-    RUN_ON_THREAD(mThread,
-                  WrapRunnable(RefPtr<AudioProxyThread>(this),
-                               &AudioProxyThread::InternalProcessAudioChunk,
-                               rate,
-                               chunk,
-                               enabled),
-                  NS_DISPATCH_NORMAL);
+    RefPtr<AudioProxyThread> self = this;
+    nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction(
+      "AudioProxyThread::QueueAudioChunk", [self, rate, chunk, enabled]() {
+        self->InternalProcessAudioChunk(rate, chunk, enabled);
+      }));
+    MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
   }
 
 protected:
   virtual ~AudioProxyThread()
   {
     // Conduits must be released on MainThread, and we might have the last
     // reference We don't need to worry about runnables still trying to access
     // the conduit, since the runnables hold a ref to AudioProxyThread.
     NS_ReleaseOnMainThreadSystemGroup("AudioProxyThread::mConduit",
                                       mConduit.forget());
     MOZ_COUNT_DTOR(AudioProxyThread);
   }
 
   RefPtr<AudioSessionConduit> mConduit;
-  nsCOMPtr<nsIEventTarget> mThread;
-  // Only accessed on mThread
-  nsAutoPtr<AudioPacketizer<int16_t, int16_t>> packetizer_;
+  RefPtr<AutoTaskQueue> mTaskQueue;
+  // Only accessed on mTaskQueue
+  UniquePtr<AudioPacketizer<int16_t, int16_t>> mPacketizer;
   // A buffer to hold a single packet of audio.
-  int16_t packet_[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
+  int16_t mPacket[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
 };
 
 static char kDTLSExporterLabel[] = "EXTRACTOR-dtls_srtp";
 
 MediaPipeline::MediaPipeline(const std::string& pc,
                              Direction direction,
                              nsCOMPtr<nsIEventTarget> main_thread,
                              nsCOMPtr<nsIEventTarget> sts_thread,