Backed out changeset ba3e7b53306b (bug 1271585)
authorSebastian Hengst <archaeopteryx@coole-files.de>
Mon, 16 May 2016 16:18:57 +0200
changeset 338540 10ea1cabf6d4676c3a4f6f07948eb41ed97395e0
parent 338539 8fcdc48065b79993547064cdca678827d3f55031
child 338541 349f5ef87e5be1c7be0a24c60895c0efe3fa2495
push id1183
push userraliiev@mozilla.com
push dateMon, 05 Sep 2016 20:01:49 +0000
treeherdermozilla-release@3148731bed45 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1271585
milestone49.0a1
backs outba3e7b53306bbd0e1e758ce14073895ebd86ff5e
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset ba3e7b53306b (bug 1271585)
media/webrtc/signaling/src/media-conduit/VideoConduit.h
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.h
@@ -3,16 +3,17 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef VIDEO_SESSION_H_
 #define VIDEO_SESSION_H_
 
 #include "nsAutoPtr.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/Atomics.h"
+#include "mozilla/SharedThreadPool.h"
 
 #include "MediaConduitInterface.h"
 #include "MediaEngineWrapper.h"
 #include "CodecStatistics.h"
 #include "LoadManagerFactory.h"
 #include "LoadManager.h"
 #include "runnable_utils.h"
 
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -37,17 +37,16 @@
 #include "MediaPipelineFilter.h"
 #include "databuffer.h"
 #include "transportflow.h"
 #include "transportlayer.h"
 #include "transportlayerdtls.h"
 #include "transportlayerice.h"
 #include "runnable_utils.h"
 #include "libyuv/convert.h"
-#include "mozilla/SharedThreadPool.h"
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
 #include "mozilla/PeerIdentity.h"
 #include "mozilla/TaskQueue.h"
 #endif
 #include "mozilla/gfx/Point.h"
 #include "mozilla/gfx/Types.h"
 #include "mozilla/UniquePtr.h"
 #include "mozilla/UniquePtrExtensions.h"
@@ -453,139 +452,16 @@ protected:
 #endif
 
   // mMutex guards the below variables.
   Mutex mMutex;
   nsTArray<RefPtr<VideoConverterListener>> mListeners;
 };
 #endif
 
-// An async inserter for audio data, to avoid running audio codec encoders
-// on the MSG/input audio thread.  Basically just bounces all the audio
-// data to a single audio processing/input queue.  We could if we wanted to
-// use multiple threads and a TaskQueue.
-class AudioProxyThread
-{
-public:
-  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioProxyThread)
-
-  AudioProxyThread()
-  {
-    MOZ_COUNT_CTOR(AudioProxyThread);
-
-#if !defined(MOZILLA_EXTERNAL_LINKAGE)
-    // Use only 1 thread; also forces FIFO operation
-    // We could use multiple threads, but that may be dicier with the webrtc.org
-    // code.  If so we'd need to use TaskQueues like the videoframe converter
-    RefPtr<SharedThreadPool> pool =
-      SharedThreadPool::Get(NS_LITERAL_CSTRING("AudioProxy"), 1);
-
-    mThread = pool.get();
-#else
-    nsCOMPtr<nsIThread> thread;
-    if (!NS_WARN_IF(NS_FAILED(NS_NewNamedThread("AudioProxy", getter_AddRefs(thread))))) {
-      mThread = thread;
-    }
-#endif
-  }
-
-  // called on mThread
-  void InternalProcessAudioChunk(
-    AudioSessionConduit *conduit,
-    TrackRate rate,
-    AudioChunk& chunk,
-    bool enabled) {
-
-    // Convert to interleaved, 16-bits integer audio, with a maximum of two
-    // channels (since the WebRTC.org code below makes the assumption that the
-    // input audio is either mono or stereo).
-    uint32_t outputChannels = chunk.ChannelCount() == 1 ? 1 : 2;
-    const int16_t* samples = nullptr;
-    UniquePtr<int16_t[]> convertedSamples;
-
-    // We take advantage of the fact that the common case (microphone directly to
-    // PeerConnection, that is, a normal call), the samples are already 16-bits
-    // mono, so the representation in interleaved and planar is the same, and we
-    // can just use that.
-    if (enabled && outputChannels == 1 && chunk.mBufferFormat == AUDIO_FORMAT_S16) {
-      samples = chunk.ChannelData<int16_t>().Elements()[0];
-    } else {
-      convertedSamples = MakeUnique<int16_t[]>(chunk.mDuration * outputChannels);
-
-      if (!enabled || chunk.mBufferFormat == AUDIO_FORMAT_SILENCE) {
-        PodZero(convertedSamples.get(), chunk.mDuration * outputChannels);
-      } else if (chunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
-        DownmixAndInterleave(chunk.ChannelData<float>(),
-                             chunk.mDuration, chunk.mVolume, outputChannels,
-                             convertedSamples.get());
-      } else if (chunk.mBufferFormat == AUDIO_FORMAT_S16) {
-        DownmixAndInterleave(chunk.ChannelData<int16_t>(),
-                             chunk.mDuration, chunk.mVolume, outputChannels,
-                             convertedSamples.get());
-      }
-      samples = convertedSamples.get();
-    }
-
-    MOZ_ASSERT(!(rate%100)); // rate should be a multiple of 100
-
-    // Check if the rate or the number of channels has changed since the last time
-    // we came through. I realize it may be overkill to check if the rate has
-    // changed, but I believe it is possible (e.g. if we change sources) and it
-    // costs us very little to handle this case.
-
-    uint32_t audio_10ms = rate / 100;
-
-    if (!packetizer_ ||
-        packetizer_->PacketSize() != audio_10ms ||
-        packetizer_->Channels() != outputChannels) {
-      // It's ok to drop the audio still in the packetizer here.
-      packetizer_ = new AudioPacketizer<int16_t, int16_t>(audio_10ms, outputChannels);
-    }
-
-    packetizer_->Input(samples, chunk.mDuration);
-
-    while (packetizer_->PacketsAvailable()) {
-      uint32_t samplesPerPacket = packetizer_->PacketSize() *
-                                  packetizer_->Channels();
-
-      // We know that webrtc.org's code going to copy the samples down the line,
-      // so we can just use a stack buffer here instead of malloc-ing.
-      // Max size given stereo is 480*2*2 = 1920 (10ms of 16-bits stereo audio at
-      // 48KHz)
-      const size_t AUDIO_SAMPLE_BUFFER_MAX = 1920;
-      int16_t packet[AUDIO_SAMPLE_BUFFER_MAX];
-
-      packetizer_->Output(packet);
-      conduit->SendAudioFrame(packet,
-                              samplesPerPacket,
-                              rate, 0);
-    }
-  }
-
-  void QueueAudioChunk(AudioSessionConduit *conduit,
-                       TrackRate rate, AudioChunk& chunk, bool enabled)
-  {
-    RUN_ON_THREAD(mThread,
-                  WrapRunnable(RefPtr<AudioProxyThread>(this),
-                               &AudioProxyThread::InternalProcessAudioChunk,
-                               conduit, rate, chunk, enabled),
-                  NS_DISPATCH_NORMAL);
-  }
-
-protected:
-  virtual ~AudioProxyThread()
-  {
-    MOZ_COUNT_DTOR(AudioProxyThread);
-  }
-
-  nsCOMPtr<nsIEventTarget> mThread;
-  // Only accessed on mThread
-  nsAutoPtr<AudioPacketizer<int16_t, int16_t>> packetizer_;
-};
-
 static char kDTLSExporterLabel[] = "EXTRACTOR-dtls_srtp";
 
 MediaPipeline::MediaPipeline(const std::string& pc,
                              Direction direction,
                              nsCOMPtr<nsIEventTarget> main_thread,
                              nsCOMPtr<nsIEventTarget> sts_thread,
                              const std::string& track_id,
                              int level,
@@ -1200,17 +1076,18 @@ friend class MediaPipelineTransmit;
 public:
   explicit PipelineListener(const RefPtr<MediaSessionConduit>& conduit)
     : conduit_(conduit),
       track_id_(TRACK_INVALID),
       mMutex("MediaPipelineTransmit::PipelineListener"),
       track_id_external_(TRACK_INVALID),
       active_(false),
       enabled_(false),
-      direct_connect_(false)
+      direct_connect_(false),
+      packetizer_(nullptr)
   {
   }
 
   ~PipelineListener()
   {
     if (!NS_IsMainThread()) {
       // release conduit on mainthread.  Must use forget()!
       nsresult rv = NS_DispatchToMainThread(new
@@ -1233,23 +1110,16 @@ public:
   // graph thread to keep it in sync with other MediaStreamGraph operations
   // like RemoveListener() and AddListener(). The TrackID will be updated on
   // the next NewData() callback.
   void UnsetTrackId(MediaStreamGraphImpl* graph);
 
   void SetActive(bool active) { active_ = active; }
   void SetEnabled(bool enabled) { enabled_ = enabled; }
 
-  // These are needed since nested classes don't have access to any particular
-  // instance of the parent
-  void SetAudioProxy(const RefPtr<AudioProxyThread>& proxy)
-  {
-    audio_processing_ = proxy;
-  }
-
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
   void SetVideoFrameConverter(const RefPtr<VideoFrameConverter>& converter)
   {
     converter_ = converter;
   }
 
   void OnVideoFrameConverted(unsigned char* aVideoFrame,
                              unsigned int aVideoFrameLength,
@@ -1287,18 +1157,20 @@ private:
     MutexAutoLock lock(mMutex);
     track_id_ = track_id_external_ = TRACK_INVALID;
   }
 
   void NewData(MediaStreamGraph* graph,
                StreamTime offset,
                const MediaSegment& media);
 
+  virtual void ProcessAudioChunk(AudioSessionConduit *conduit,
+                                 TrackRate rate, AudioChunk& chunk);
+
   RefPtr<MediaSessionConduit> conduit_;
-  RefPtr<AudioProxyThread> audio_processing_;
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
   RefPtr<VideoFrameConverter> converter_;
 #endif
 
   // May be TRACK_INVALID until we see data from the track
   TrackID track_id_; // this is the current TrackID this listener is attached to
   Mutex mMutex;
   // protected by mMutex
@@ -1308,16 +1180,18 @@ private:
   // active is true if there is a transport to send on
   mozilla::Atomic<bool> active_;
   // enabled is true if the media access control permits sending
   // actual content; when false you get black/silence
   mozilla::Atomic<bool> enabled_;
 
   // Written and read on the MediaStreamGraph thread
   bool direct_connect_;
+
+  nsAutoPtr<AudioPacketizer<int16_t, int16_t>> packetizer_;
 };
 
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
 // Implements VideoConverterListener for MediaPipeline.
 //
 // We pass converted frames on to MediaPipelineTransmit::PipelineListener
 // where they are further forwarded to VideoConduit.
 // MediaPipelineTransmit calls Detach() during shutdown to ensure there is
@@ -1390,22 +1264,18 @@ MediaPipelineTransmit::MediaPipelineTran
     RefPtr<TransportFlow> rtp_transport,
     RefPtr<TransportFlow> rtcp_transport,
     nsAutoPtr<MediaPipelineFilter> filter) :
   MediaPipeline(pc, TRANSMIT, main_thread, sts_thread, track_id, level,
                 conduit, rtp_transport, rtcp_transport, filter),
   listener_(new PipelineListener(conduit)),
   domtrack_(domtrack)
 {
-  if (!IsVideo()) {
-    audio_processing_ = MakeAndAddRef<AudioProxyThread>();
-    listener_->SetAudioProxy(audio_processing_);
-  }
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
-  else { // Video
+  if (IsVideo()) {
     // For video we send frames to an async VideoFrameConverter that calls
     // back to a VideoFrameFeeder that feeds I420 frames to VideoConduit.
 
     feeder_ = MakeAndAddRef<VideoFrameFeeder>(listener_);
 
     converter_ = MakeAndAddRef<VideoFrameConverter>();
     converter_->AddListener(feeder_);
 
@@ -1759,18 +1629,18 @@ NewData(MediaStreamGraph* graph,
     AudioSegment::ChunkIterator iter(*audio);
     while(!iter.IsEnded()) {
       TrackRate rate;
 #ifdef USE_FAKE_MEDIA_STREAMS
       rate = Fake_MediaStream::GraphRate();
 #else
       rate = graph->GraphRate();
 #endif
-      audio_processing_->QueueAudioChunk(static_cast<AudioSessionConduit*>(conduit_.get()),
-                                         rate, *iter, enabled_);
+      ProcessAudioChunk(static_cast<AudioSessionConduit*>(conduit_.get()),
+                        rate, *iter);
       iter.Next();
     }
   } else if (media.GetType() == MediaSegment::VIDEO) {
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
     VideoSegment* video = const_cast<VideoSegment *>(
         static_cast<const VideoSegment *>(&media));
 
     VideoSegment::ChunkIterator iter(*video);
@@ -1779,16 +1649,87 @@ NewData(MediaStreamGraph* graph,
       iter.Next();
     }
 #endif
   } else {
     // Ignore
   }
 }
 
+void MediaPipelineTransmit::PipelineListener::ProcessAudioChunk(
+    AudioSessionConduit *conduit,
+    TrackRate rate,
+    AudioChunk& chunk) {
+
+  // Convert to interleaved, 16-bits integer audio, with a maximum of two
+  // channels (since the WebRTC.org code below makes the assumption that the
+  // input audio is either mono or stereo).
+  uint32_t outputChannels = chunk.ChannelCount() == 1 ? 1 : 2;
+  const int16_t* samples = nullptr;
+  UniquePtr<int16_t[]> convertedSamples;
+
+  // We take advantage of the fact that the common case (microphone directly to
+  // PeerConnection, that is, a normal call), the samples are already 16-bits
+  // mono, so the representation in interleaved and planar is the same, and we
+  // can just use that.
+  if (enabled_ && outputChannels == 1 && chunk.mBufferFormat == AUDIO_FORMAT_S16) {
+    samples = chunk.ChannelData<int16_t>().Elements()[0];
+  } else {
+    convertedSamples = MakeUnique<int16_t[]>(chunk.mDuration * outputChannels);
+
+    if (!enabled_ || chunk.mBufferFormat == AUDIO_FORMAT_SILENCE) {
+      PodZero(convertedSamples.get(), chunk.mDuration * outputChannels);
+    } else if (chunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
+      DownmixAndInterleave(chunk.ChannelData<float>(),
+                           chunk.mDuration, chunk.mVolume, outputChannels,
+                           convertedSamples.get());
+    } else if (chunk.mBufferFormat == AUDIO_FORMAT_S16) {
+      DownmixAndInterleave(chunk.ChannelData<int16_t>(),
+                           chunk.mDuration, chunk.mVolume, outputChannels,
+                           convertedSamples.get());
+    }
+    samples = convertedSamples.get();
+  }
+
+  MOZ_ASSERT(!(rate%100)); // rate should be a multiple of 100
+
+  // Check if the rate or the number of channels has changed since the last time
+  // we came through. I realize it may be overkill to check if the rate has
+  // changed, but I believe it is possible (e.g. if we change sources) and it
+  // costs us very little to handle this case.
+
+  uint32_t audio_10ms = rate / 100;
+
+  if (!packetizer_ ||
+      packetizer_->PacketSize() != audio_10ms ||
+      packetizer_->Channels() != outputChannels) {
+    // It's ok to drop the audio still in the packetizer here.
+    packetizer_ = new AudioPacketizer<int16_t, int16_t>(audio_10ms, outputChannels);
+   }
+
+  packetizer_->Input(samples, chunk.mDuration);
+
+  while (packetizer_->PacketsAvailable()) {
+    uint32_t samplesPerPacket = packetizer_->PacketSize() *
+                                packetizer_->Channels();
+
+    // We know that webrtc.org's code going to copy the samples down the line,
+    // so we can just use a stack buffer here instead of malloc-ing.
+    // Max size given stereo is 480*2*2 = 1920 (10ms of 16-bits stereo audio at
+    // 48KHz)
+    const size_t AUDIO_SAMPLE_BUFFER_MAX = 1920;
+    int16_t packet[AUDIO_SAMPLE_BUFFER_MAX];
+
+    packetizer_->Output(packet);
+    conduit->SendAudioFrame(packet,
+                            samplesPerPacket,
+                            rate, 0);
+  }
+}
+
 class TrackAddedCallback {
  public:
   virtual void TrackAdded(TrackTicks current_ticks) = 0;
 
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TrackAddedCallback);
 
  protected:
   virtual ~TrackAddedCallback() {}
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
@@ -25,17 +25,16 @@
 
 #include "webrtc/modules/rtp_rtcp/interface/rtp_header_parser.h"
 
 class nsIPrincipal;
 
 namespace mozilla {
 class MediaPipelineFilter;
 class PeerIdentity;
-class AudioProxyThread;
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
 class VideoFrameConverter;
 #endif
 
 #ifndef USE_FAKE_MEDIA_STREAMS
 namespace dom {
   class MediaStreamTrack;
 } // namespace dom
@@ -340,17 +339,16 @@ public:
   class PipelineListener;
   class VideoFrameFeeder;
 
  protected:
   ~MediaPipelineTransmit();
 
  private:
   RefPtr<PipelineListener> listener_;
-  RefPtr<AudioProxyThread> audio_processing_;
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
   RefPtr<VideoFrameFeeder> feeder_;
   RefPtr<VideoFrameConverter> converter_;
 #endif
   dom::MediaStreamTrack* domtrack_;
 };