Bug 1404997 - P4. Make AudioProxyThread use AutoTaskQueue. r?pehrsons draft
authorJean-Yves Avenard <jyavenard@mozilla.com>
Thu, 30 Nov 2017 16:27:37 +0100
changeset 710534 a14d16aba30c72a2f1a60f6db2812cfe68423393
parent 710533 e4d4a60c2a27fcf0963af3d3890d32a4b0c57291
child 710535 08ca501afd5f337e94abed2470d52a06c5383de6
push id92845
push userbmo:jyavenard@mozilla.com
push dateSun, 10 Dec 2017 22:43:10 +0000
reviewerspehrsons
bugs1404997
milestone59.0a1
Bug 1404997 - P4. Make AudioProxyThread use AutoTaskQueue. r?pehrsons Also, pass arguments are const reference. We also rename class members as per coding style. MozReview-Commit-ID: 9IkV8wCMpz7
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -493,34 +493,28 @@ protected:
 // use multiple threads and a TaskQueue.
 class AudioProxyThread
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioProxyThread)
 
   explicit AudioProxyThread(AudioSessionConduit* aConduit)
     : mConduit(aConduit)
+    , mTaskQueue(new AutoTaskQueue(
+        SharedThreadPool::Get(NS_LITERAL_CSTRING("AudioProxy"), 1)))
   {
     MOZ_ASSERT(mConduit);
     MOZ_COUNT_CTOR(AudioProxyThread);
-
-    // Use only 1 thread; also forces FIFO operation
-    // We could use multiple threads, but that may be dicier with the webrtc.org
-    // code.  If so we'd need to use TaskQueues like the videoframe converter
-    RefPtr<SharedThreadPool> pool =
-      SharedThreadPool::Get(NS_LITERAL_CSTRING("AudioProxy"), 1);
-
-    mThread = pool.get();
   }
 
-  // called on mThread
   void InternalProcessAudioChunk(TrackRate rate,
-                                 AudioChunk& chunk,
+                                 const AudioChunk& chunk,
                                  bool enabled)
   {
+    MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
     // Convert to interleaved, 16-bits integer audio, with a maximum of two
     // channels (since the WebRTC.org code below makes the assumption that the
     // input audio is either mono or stereo).
     uint32_t outputChannels = chunk.ChannelCount() == 1 ? 1 : 2;
     const int16_t* samples = nullptr;
     UniquePtr<int16_t[]> convertedSamples;
 
@@ -557,60 +551,59 @@ public:
 
     // Check if the rate or the number of channels has changed since the last
     // time we came through. I realize it may be overkill to check if the rate
     // has changed, but I believe it is possible (e.g. if we change sources) and
     // it costs us very little to handle this case.
 
     uint32_t audio_10ms = rate / 100;
 
-    if (!packetizer_ || packetizer_->PacketSize() != audio_10ms ||
-        packetizer_->Channels() != outputChannels) {
+    if (!mPacketizer || mPacketizer->PacketSize() != audio_10ms ||
+        mPacketizer->Channels() != outputChannels) {
       // It's ok to drop the audio still in the packetizer here.
-      packetizer_ =
-        new AudioPacketizer<int16_t, int16_t>(audio_10ms, outputChannels);
+      mPacketizer = MakeUnique<AudioPacketizer<int16_t, int16_t>>(
+        audio_10ms, outputChannels);
     }
 
-    packetizer_->Input(samples, chunk.mDuration);
+    mPacketizer->Input(samples, chunk.mDuration);
 
-    while (packetizer_->PacketsAvailable()) {
-      packetizer_->Output(packet_);
+    while (mPacketizer->PacketsAvailable()) {
+      mPacketizer->Output(mPacket);
       mConduit->SendAudioFrame(
-        packet_, packetizer_->PacketSize(), rate, packetizer_->Channels(), 0);
+        mPacket, mPacketizer->PacketSize(), rate, mPacketizer->Channels(), 0);
     }
   }
 
-  void QueueAudioChunk(TrackRate rate, AudioChunk& chunk, bool enabled)
+  void QueueAudioChunk(TrackRate rate, const AudioChunk& chunk, bool enabled)
   {
-    RUN_ON_THREAD(mThread,
-                  WrapRunnable(RefPtr<AudioProxyThread>(this),
-                               &AudioProxyThread::InternalProcessAudioChunk,
-                               rate,
-                               chunk,
-                               enabled),
-                  NS_DISPATCH_NORMAL);
+    RefPtr<AudioProxyThread> self = this;
+    nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction(
+      "AudioProxyThread::QueueAudioChunk", [self, rate, chunk, enabled]() {
+        self->InternalProcessAudioChunk(rate, chunk, enabled);
+      }));
+    MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
   }
 
 protected:
   virtual ~AudioProxyThread()
   {
     // Conduits must be released on MainThread, and we might have the last
     // reference We don't need to worry about runnables still trying to access
     // the conduit, since the runnables hold a ref to AudioProxyThread.
     NS_ReleaseOnMainThreadSystemGroup("AudioProxyThread::mConduit",
                                       mConduit.forget());
     MOZ_COUNT_DTOR(AudioProxyThread);
   }
 
   RefPtr<AudioSessionConduit> mConduit;
-  nsCOMPtr<nsIEventTarget> mThread;
-  // Only accessed on mThread
-  nsAutoPtr<AudioPacketizer<int16_t, int16_t>> packetizer_;
+  RefPtr<AutoTaskQueue> mTaskQueue;
+  // Only accessed on mTaskQueue
+  UniquePtr<AudioPacketizer<int16_t, int16_t>> mPacketizer;
   // A buffer to hold a single packet of audio.
-  int16_t packet_[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
+  int16_t mPacket[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
 };
 
 static char kDTLSExporterLabel[] = "EXTRACTOR-dtls_srtp";
 
 MediaPipeline::MediaPipeline(const std::string& pc,
                              Direction direction,
                              nsCOMPtr<nsIEventTarget> main_thread,
                              nsCOMPtr<nsIEventTarget> sts_thread,