Merge autoland to mozilla-central r=merge a=merge
authorNoemi Erli <nerli@mozilla.com>
Sun, 17 Dec 2017 23:42:42 +0200
changeset 396675 e3048146437d
parent 396631 cd82901b2741 (current diff)
parent 396674 f12c25303026 (diff)
child 396677 e72be4397ade
push id33104
push usernerli@mozilla.com
push date2017-12-17 21:43 +0000
treeherdermozilla-central@e3048146437d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge, merge
milestone59.0a1
first release with
nightly linux32
e3048146437d / 59.0a1 / 20171217220404 / files
nightly linux64
e3048146437d / 59.0a1 / 20171217220404 / files
nightly mac
e3048146437d / 59.0a1 / 20171217220404 / files
nightly win32
e3048146437d / 59.0a1 / 20171217220404 / files
nightly win64
e3048146437d / 59.0a1 / 20171217220404 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge autoland to mozilla-central r=merge a=merge
--- a/browser/components/preferences/in-content/privacy.js
+++ b/browser/components/preferences/in-content/privacy.js
@@ -491,17 +491,17 @@ var gPrivacyPane = {
 
   /**
    * Update the privacy micro-management controls based on the
    * value of the private browsing auto-start checkbox.
    */
   updatePrivacyMicroControls() {
     if (document.getElementById("historyMode").value == "custom") {
       let disabled = this._autoStartPrivateBrowsing =
-        document.getElementById("privateBrowsingAutoStart").checked;
+        document.getElementById("browser.privatebrowsing.autostart").value;
       this.dependentControls.forEach(function(aElement) {
         let control = document.getElementById(aElement);
         let preferenceId = control.getAttribute("preference");
         if (!preferenceId) {
           let dependentControlId = control.getAttribute("control");
           if (dependentControlId) {
             let dependentControl = document.getElementById(dependentControlId);
             preferenceId = dependentControl.getAttribute("preference");
--- a/devtools/client/webconsole/new-console-output/new-console-output-wrapper.js
+++ b/devtools/client/webconsole/new-console-output/new-console-output-wrapper.js
@@ -206,18 +206,18 @@ NewConsoleOutputWrapper.prototype = {
         serviceContainer,
       });
 
       let provider = createElement(
         Provider,
         { store },
         dom.div(
           {className: "webconsole-output-wrapper"},
+          filterBar,
           consoleOutput,
-          filterBar,
           sideBar
         ));
       this.body = ReactDOM.render(provider, this.parentNode);
 
       this.jsterm.focus();
     });
   },
 
--- a/devtools/shared/css/generated/properties-db.js
+++ b/devtools/shared/css/generated/properties-db.js
@@ -9855,88 +9855,16 @@ exports.PREFERENCES = [
     "contain",
     "layout.css.contain.enabled"
   ],
   [
     "font-variation-settings",
     "layout.css.font-variations.enabled"
   ],
   [
-    "grid",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-area",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-auto-columns",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-auto-flow",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-auto-rows",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-column",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-column-end",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-column-gap",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-column-start",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-gap",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-row",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-row-end",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-row-gap",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-row-start",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-template",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-template-areas",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-template-columns",
-    "layout.css.grid.enabled"
-  ],
-  [
-    "grid-template-rows",
-    "layout.css.grid.enabled"
-  ],
-  [
     "initial-letter",
     "layout.css.initial-letter.enabled"
   ],
   [
     "image-orientation",
     "layout.css.image-orientation.enabled"
   ],
   [
--- a/dom/media/DOMMediaStream.cpp
+++ b/dom/media/DOMMediaStream.cpp
@@ -969,32 +969,32 @@ DOMMediaStream::InitInputStreamCommon(Me
 }
 
 void
 DOMMediaStream::InitOwnedStreamCommon(MediaStreamGraph* aGraph)
 {
   MOZ_ASSERT(!mPlaybackStream, "Owned stream must be initialized before playback stream");
 
   mOwnedStream = aGraph->CreateTrackUnionStream();
-  mOwnedStream->SetAutofinish(true);
+  mOwnedStream->QueueSetAutofinish(true);
   mOwnedStream->RegisterUser();
   if (mInputStream) {
     mOwnedPort = mOwnedStream->AllocateInputPort(mInputStream);
   }
 
   // Setup track listeners
   mOwnedListener = new OwnedStreamListener(this);
   mOwnedStream->AddListener(mOwnedListener);
 }
 
 void
 DOMMediaStream::InitPlaybackStreamCommon(MediaStreamGraph* aGraph)
 {
   mPlaybackStream = aGraph->CreateTrackUnionStream();
-  mPlaybackStream->SetAutofinish(true);
+  mPlaybackStream->QueueSetAutofinish(true);
   mPlaybackStream->RegisterUser();
   if (mOwnedStream) {
     mPlaybackPort = mPlaybackStream->AllocateInputPort(mOwnedStream);
   }
 
   mPlaybackListener = new PlaybackStreamListener(this);
   mPlaybackStream->AddListener(mPlaybackListener);
 
--- a/dom/media/MediaRecorder.cpp
+++ b/dom/media/MediaRecorder.cpp
@@ -8,16 +8,17 @@
 
 #include "AudioNodeEngine.h"
 #include "AudioNodeStream.h"
 #include "DOMMediaStream.h"
 #include "GeckoProfiler.h"
 #include "MediaDecoder.h"
 #include "MediaEncoder.h"
 #include "MediaStreamGraphImpl.h"
+#include "VideoUtils.h"
 #include "mozilla/DOMEventTargetHelper.h"
 #include "mozilla/dom/AudioStreamTrack.h"
 #include "mozilla/dom/BlobEvent.h"
 #include "mozilla/dom/File.h"
 #include "mozilla/dom/MediaRecorderErrorEvent.h"
 #include "mozilla/dom/MutableBlobStorage.h"
 #include "mozilla/dom/VideoStreamTrack.h"
 #include "mozilla/media/MediaUtils.h"
@@ -862,25 +863,26 @@ private:
     if (!mRunningState.isOk() || mRunningState.unwrap() != RunningState::Idling) {
       MOZ_ASSERT_UNREACHABLE("Double-init");
       return;
     }
 
     // Create a TaskQueue to read encode media data from MediaEncoder.
     MOZ_RELEASE_ASSERT(!mEncoderThread);
     RefPtr<SharedThreadPool> pool =
-      SharedThreadPool::Get(NS_LITERAL_CSTRING("MediaRecorderReadThread"));
+      GetMediaThreadPool(MediaThreadType::WEBRTC_DECODER);
     if (!pool) {
       LOG(LogLevel::Debug, ("Session.InitEncoder %p Failed to create "
                             "MediaRecorderReadThread thread pool", this));
       DoSessionEndTask(NS_ERROR_FAILURE);
       return;
     }
 
-    mEncoderThread = MakeAndAddRef<TaskQueue>(pool.forget());
+    mEncoderThread =
+      MakeAndAddRef<TaskQueue>(pool.forget(), "MediaRecorderReadThread");
 
     if (!gMediaRecorderShutdownBlocker) {
       // Add a shutdown blocker so mEncoderThread can be shutdown async.
       class Blocker : public ShutdownBlocker
       {
       public:
         Blocker()
           : ShutdownBlocker(NS_LITERAL_STRING(
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -29,16 +29,17 @@
 #include "GeckoProfiler.h"
 #include "VideoFrameContainer.h"
 #include "mozilla/AbstractThread.h"
 #include "mozilla/Unused.h"
 #ifdef MOZ_WEBRTC
 #include "AudioOutputObserver.h"
 #endif
 #include "mtransport/runnable_utils.h"
+#include "VideoUtils.h"
 
 #include "webaudio/blink/DenormalDisabler.h"
 #include "webaudio/blink/HRTFDatabaseLoader.h"
 
 using namespace mozilla::layers;
 using namespace mozilla::dom;
 using namespace mozilla::gfx;
 using namespace mozilla::media;
@@ -68,41 +69,16 @@ MediaStreamGraphImpl::~MediaStreamGraphI
 {
   MOZ_ASSERT(mStreams.IsEmpty() && mSuspendedStreams.IsEmpty(),
              "All streams should have been destroyed by messages from the main thread");
   LOG(LogLevel::Debug, ("MediaStreamGraph %p destroyed", this));
   LOG(LogLevel::Debug, ("MediaStreamGraphImpl::~MediaStreamGraphImpl"));
 }
 
 void
-MediaStreamGraphImpl::FinishStream(MediaStream* aStream)
-{
-  MOZ_ASSERT(OnGraphThreadOrNotRunning());
-  if (aStream->mFinished)
-    return;
-  LOG(LogLevel::Debug, ("MediaStream %p will finish", aStream));
-#ifdef DEBUG
-  for (StreamTracks::TrackIter track(aStream->mTracks);
-         !track.IsEnded(); track.Next()) {
-    if (!track->IsEnded()) {
-      LOG(LogLevel::Error,
-          ("MediaStream %p will finish, but track %d has not ended.",
-           aStream,
-           track->GetID()));
-      NS_ASSERTION(false, "Finished stream cannot contain live track");
-    }
-  }
-#endif
-  aStream->mFinished = true;
-  aStream->mTracks.AdvanceKnownTracksTime(STREAM_TIME_MAX);
-
-  SetStreamOrderDirty();
-}
-
-void
 MediaStreamGraphImpl::AddStreamGraphThread(MediaStream* aStream)
 {
   MOZ_ASSERT(OnGraphThreadOrNotRunning());
   aStream->mTracksStartTime = mProcessedTime;
 
   if (aStream->AsSourceStream()) {
     SourceMediaStream* source = aStream->AsSourceStream();
     TimeStamp currentTimeStamp = CurrentDriver()->GetCurrentTimeStamp();
@@ -167,166 +143,16 @@ MediaStreamGraphImpl::RemoveStreamGraphT
       ("Removed media stream %p from graph %p, count %zu",
        aStream,
        this,
        mStreams.Length()));
 
   NS_RELEASE(aStream); // probably destroying it
 }
 
-void
-MediaStreamGraphImpl::ExtractPendingInput(SourceMediaStream* aStream,
-                                          GraphTime aDesiredUpToTime,
-                                          bool* aEnsureNextIteration)
-{
-  MOZ_ASSERT(OnGraphThread());
-  bool finished;
-  {
-    MutexAutoLock lock(aStream->mMutex);
-    if (aStream->mPullEnabled && !aStream->mFinished &&
-        !aStream->mListeners.IsEmpty()) {
-      // Compute how much stream time we'll need assuming we don't block
-      // the stream at all.
-      StreamTime t = aStream->GraphTimeToStreamTime(aDesiredUpToTime);
-      LOG(LogLevel::Verbose,
-          ("Calling NotifyPull aStream=%p t=%f current end=%f",
-           aStream,
-           MediaTimeToSeconds(t),
-           MediaTimeToSeconds(aStream->mTracks.GetEnd())));
-      if (t > aStream->mTracks.GetEnd()) {
-        *aEnsureNextIteration = true;
-#ifdef DEBUG
-        if (aStream->mListeners.Length() == 0) {
-          LOG(
-            LogLevel::Error,
-            ("No listeners in NotifyPull aStream=%p desired=%f current end=%f",
-             aStream,
-             MediaTimeToSeconds(t),
-             MediaTimeToSeconds(aStream->mTracks.GetEnd())));
-          aStream->DumpTrackInfo();
-        }
-#endif
-        for (uint32_t j = 0; j < aStream->mListeners.Length(); ++j) {
-          MediaStreamListener* l = aStream->mListeners[j];
-          {
-            MutexAutoUnlock unlock(aStream->mMutex);
-            l->NotifyPull(this, t);
-          }
-        }
-      }
-    }
-    finished = aStream->mUpdateFinished;
-    bool shouldNotifyTrackCreated = false;
-    for (int32_t i = aStream->mUpdateTracks.Length() - 1; i >= 0; --i) {
-      SourceMediaStream::TrackData* data = &aStream->mUpdateTracks[i];
-      aStream->ApplyTrackDisabling(data->mID, data->mData);
-      // Dealing with NotifyQueuedTrackChanges and NotifyQueuedAudioData part.
-
-      // The logic is different from the manipulating of aStream->mTracks part.
-      // So it is not combined with the manipulating of aStream->mTracks part.
-      StreamTime offset =
-        (data->mCommands & SourceMediaStream::TRACK_CREATE)
-        ? data->mStart
-        : aStream->mTracks.FindTrack(data->mID)->GetSegment()->GetDuration();
-
-      // Audio case.
-      if (data->mData->GetType() == MediaSegment::AUDIO) {
-        if (data->mCommands) {
-          MOZ_ASSERT(!(data->mCommands & SourceMediaStream::TRACK_UNUSED));
-          for (MediaStreamListener* l : aStream->mListeners) {
-            if (data->mCommands & SourceMediaStream::TRACK_END) {
-              l->NotifyQueuedAudioData(this, data->mID,
-                                       offset, *(static_cast<AudioSegment*>(data->mData.get())));
-            }
-            l->NotifyQueuedTrackChanges(this, data->mID,
-                                        offset, static_cast<TrackEventCommand>(data->mCommands), *data->mData);
-            if (data->mCommands & SourceMediaStream::TRACK_CREATE) {
-              l->NotifyQueuedAudioData(this, data->mID,
-                                       offset, *(static_cast<AudioSegment*>(data->mData.get())));
-            }
-          }
-        } else {
-          for (MediaStreamListener* l : aStream->mListeners) {
-              l->NotifyQueuedAudioData(this, data->mID,
-                                       offset, *(static_cast<AudioSegment*>(data->mData.get())));
-          }
-        }
-      }
-
-      // Video case.
-      if (data->mData->GetType() == MediaSegment::VIDEO) {
-        if (data->mCommands) {
-          MOZ_ASSERT(!(data->mCommands & SourceMediaStream::TRACK_UNUSED));
-          for (MediaStreamListener* l : aStream->mListeners) {
-            l->NotifyQueuedTrackChanges(this, data->mID,
-                                        offset, static_cast<TrackEventCommand>(data->mCommands), *data->mData);
-          }
-        }
-      }
-
-      for (TrackBound<MediaStreamTrackListener>& b : aStream->mTrackListeners) {
-        if (b.mTrackID != data->mID) {
-          continue;
-        }
-        b.mListener->NotifyQueuedChanges(this, offset, *data->mData);
-        if (data->mCommands & SourceMediaStream::TRACK_END) {
-          b.mListener->NotifyEnded();
-        }
-      }
-      if (data->mCommands & SourceMediaStream::TRACK_CREATE) {
-        MediaSegment* segment = data->mData.forget();
-        LOG(LogLevel::Debug,
-            ("SourceMediaStream %p creating track %d, start %" PRId64
-             ", initial end %" PRId64,
-             aStream,
-             data->mID,
-             int64_t(data->mStart),
-             int64_t(segment->GetDuration())));
-
-        data->mEndOfFlushedData += segment->GetDuration();
-        aStream->mTracks.AddTrack(data->mID, data->mStart, segment);
-        // The track has taken ownership of data->mData, so let's replace
-        // data->mData with an empty clone.
-        data->mData = segment->CreateEmptyClone();
-        data->mCommands &= ~SourceMediaStream::TRACK_CREATE;
-        shouldNotifyTrackCreated = true;
-      } else if (data->mData->GetDuration() > 0) {
-        MediaSegment* dest = aStream->mTracks.FindTrack(data->mID)->GetSegment();
-        LOG(LogLevel::Verbose,
-            ("SourceMediaStream %p track %d, advancing end from %" PRId64
-             " to %" PRId64,
-             aStream,
-             data->mID,
-             int64_t(dest->GetDuration()),
-             int64_t(dest->GetDuration() + data->mData->GetDuration())));
-        data->mEndOfFlushedData += data->mData->GetDuration();
-        dest->AppendFrom(data->mData);
-      }
-      if (data->mCommands & SourceMediaStream::TRACK_END) {
-        aStream->mTracks.FindTrack(data->mID)->SetEnded();
-        aStream->mUpdateTracks.RemoveElementAt(i);
-      }
-    }
-    if (shouldNotifyTrackCreated) {
-      for (MediaStreamListener* l : aStream->mListeners) {
-        l->NotifyFinishedTrackCreation(this);
-      }
-    }
-    if (!aStream->mFinished) {
-      aStream->mTracks.AdvanceKnownTracksTime(aStream->mUpdateKnownTracksTime);
-    }
-  }
-  if (aStream->mTracks.GetEnd() > 0) {
-    aStream->mHasCurrentData = true;
-  }
-  if (finished) {
-    FinishStream(aStream);
-  }
-}
-
 StreamTime
 MediaStreamGraphImpl::GraphTimeToStreamTimeWithBlocking(const MediaStream* aStream,
                                                         GraphTime aTime) const
 {
   MOZ_ASSERT(aTime <= mStateComputedTime,
              "Don't ask about times where we haven't made blocking decisions yet");
   return std::max<StreamTime>(0,
       std::min(aTime, aStream->mStartBlocking) - aStream->mTracksStartTime);
@@ -1319,21 +1145,35 @@ MediaStreamGraphImpl::UpdateGraph(GraphT
   // been woken up right after having been to sleep.
   MOZ_ASSERT(aEndBlockingDecisions >= mStateComputedTime);
 
   UpdateStreamOrder();
 
   bool ensureNextIteration = false;
 
   // Grab pending stream input and compute blocking time
+  // TODO: Ensure that heap memory allocations isn't going to be a problem.
+  // Maybe modify code to use nsAutoTArray as out parameters.
+  nsTArray<RefPtr<SourceMediaStream::NotifyPullPromise>> promises;
   for (MediaStream* stream : mStreams) {
     if (SourceMediaStream* is = stream->AsSourceStream()) {
-      ExtractPendingInput(is, aEndBlockingDecisions, &ensureNextIteration);
+      promises.AppendElements(
+        is->PullNewData(aEndBlockingDecisions, &ensureNextIteration));
     }
-
+  }
+
+  // Wait until all PullEnabled stream's listeners have completed.
+  if (!promises.IsEmpty()) {
+    AwaitAll(do_AddRef(mThreadPool), promises);
+  }
+
+  for (MediaStream* stream : mStreams) {
+    if (SourceMediaStream* is = stream->AsSourceStream()) {
+      is->ExtractPendingInput();
+    }
     if (stream->mFinished) {
       // The stream's not suspended, and since it's finished, underruns won't
       // stop it playing out. So there's no blocking other than what we impose
       // here.
       GraphTime endTime = stream->GetStreamTracks().GetAllTracksEnd() +
           stream->mTracksStartTime;
       if (endTime <= mStateComputedTime) {
         LOG(LogLevel::Verbose,
@@ -1608,16 +1448,20 @@ public:
     // objects owning streams, or for expiration of mGraph->mShutdownTimer,
     // which won't otherwise release its reference on the graph until
     // nsTimerImpl::Shutdown(), which runs after xpcom-shutdown-threads.
     {
       MonitorAutoLock mon(mGraph->mMonitor);
       mGraph->SetCurrentDriver(nullptr);
     }
 
+    // Do not hold on our threadpool, global shutdown will hang otherwise as
+    // it waits for all thread pools to shutdown.
+    mGraph->mThreadPool = nullptr;
+
     // Safe to access these without the monitor since the graph isn't running.
     // We may be one of several graphs. Drop ticket to eventually unblock shutdown.
     if (mGraph->mShutdownTimer && !mGraph->mForceShutdownTicket) {
       MOZ_ASSERT(false,
         "AudioCallbackDriver took too long to shut down and we let shutdown"
         " continue - freezing and leaking");
 
       // The timer fired, so we may be deeper in shutdown now.  Block any further
@@ -1627,17 +1471,17 @@ public:
 
     // mGraph's thread is not running so it's OK to do whatever here
     for (MediaStream* stream : mGraph->AllStreams()) {
       // Clean up all MediaSegments since we cannot release Images too
       // late during shutdown. Also notify listeners that they were removed
       // so they can clean up any gfx resources.
       if (SourceMediaStream* source = stream->AsSourceStream()) {
         // Finishing a SourceStream prevents new data from being appended.
-        source->Finish();
+        source->FinishOnGraphThread();
       }
       stream->GetStreamTracks().Clear();
       stream->RemoveAllListenersImpl();
     }
 
     mGraph->mForceShutdownTicket = nullptr;
 
     // We can't block past the final LIFECYCLE_WAITING_FOR_STREAM_DESTRUCTION
@@ -2095,17 +1939,37 @@ StreamTime
 MediaStream::GraphTimeToStreamTimeWithBlocking(GraphTime aTime) const
 {
   return GraphImpl()->GraphTimeToStreamTimeWithBlocking(this, aTime);
 }
 
 void
 MediaStream::FinishOnGraphThread()
 {
-  GraphImpl()->FinishStream(this);
+  if (mFinished) {
+    return;
+  }
+  LOG(LogLevel::Debug, ("MediaStream %p will finish", this));
+#ifdef DEBUG
+  for (StreamTracks::TrackIter track(mTracks); !track.IsEnded(); track.Next()) {
+    if (!track->IsEnded()) {
+      LOG(LogLevel::Error,
+          ("MediaStream %p will finish, but track %d has not ended.",
+           this,
+           track->GetID()));
+      NS_ASSERTION(false, "Finished stream cannot contain live track");
+    }
+  }
+#endif
+  mFinished = true;
+  mTracks.AdvanceKnownTracksTime(STREAM_TIME_MAX);
+
+  // Let the MSG knows that this stream can be destroyed if necessary to avoid
+  // unnecessarily processing it in the future.
+  GraphImpl()->SetStreamOrderDirty();
 }
 
 StreamTracks::Track*
 MediaStream::FindTrack(TrackID aID) const
 {
   return mTracks.FindTrack(aID);
 }
 
@@ -2789,17 +2653,17 @@ MediaStream::AddMainThreadListener(MainT
   GraphImpl()->Dispatch(runnable.forget());
 }
 
 SourceMediaStream::SourceMediaStream()
   : MediaStream()
   , mMutex("mozilla::media::SourceMediaStream")
   , mUpdateKnownTracksTime(0)
   , mPullEnabled(false)
-  , mUpdateFinished(false)
+  , mFinishPending(false)
   , mNeedsMixing(false)
 {
 }
 
 nsresult
 SourceMediaStream::OpenAudioInput(int aID,
                                   AudioDataListener *aListener)
 {
@@ -2843,16 +2707,193 @@ SourceMediaStream::SetPullEnabled(bool a
 {
   MutexAutoLock lock(mMutex);
   mPullEnabled = aEnabled;
   if (mPullEnabled && GraphImpl()) {
     GraphImpl()->EnsureNextIteration();
   }
 }
 
+nsTArray<RefPtr<SourceMediaStream::NotifyPullPromise>>
+SourceMediaStream::PullNewData(StreamTime aDesiredUpToTime,
+                               bool* aEnsureNextIteration)
+{
+  // 2 is the average number of listeners per SourceMediaStream.
+  nsTArray<RefPtr<SourceMediaStream::NotifyPullPromise>> promises(2);
+  MutexAutoLock lock(mMutex);
+  if (!mPullEnabled || mFinished) {
+    return promises;
+  }
+  // Compute how much stream time we'll need assuming we don't block
+  // the stream at all.
+  StreamTime t = GraphTimeToStreamTime(aDesiredUpToTime);
+  StreamTime current = mTracks.GetEnd();
+  LOG(LogLevel::Verbose,
+      ("Calling NotifyPull aStream=%p t=%f current end=%f",
+        this,
+        GraphImpl()->MediaTimeToSeconds(t),
+        GraphImpl()->MediaTimeToSeconds(current)));
+  if (t <= current) {
+    return promises;
+  }
+  *aEnsureNextIteration = true;
+#ifdef DEBUG
+  if (mListeners.Length() == 0) {
+    LOG(
+      LogLevel::Error,
+      ("No listeners in NotifyPull aStream=%p desired=%f current end=%f",
+        this,
+        GraphImpl()->MediaTimeToSeconds(t),
+        GraphImpl()->MediaTimeToSeconds(current)));
+    DumpTrackInfo();
+  }
+#endif
+  for (uint32_t j = 0; j < mListeners.Length(); ++j) {
+    MediaStreamListener* l = mListeners[j];
+    {
+      MutexAutoUnlock unlock(mMutex);
+      promises.AppendElement(l->AsyncNotifyPull(GraphImpl(), t));
+    }
+  }
+  return promises;
+}
+
+void
+SourceMediaStream::ExtractPendingInput()
+{
+  MutexAutoLock lock(mMutex);
+
+  bool finished = mFinishPending;
+  bool shouldNotifyTrackCreated = false;
+
+  for (int32_t i = mUpdateTracks.Length() - 1; i >= 0; --i) {
+    SourceMediaStream::TrackData* data = &mUpdateTracks[i];
+    ApplyTrackDisabling(data->mID, data->mData);
+    // Dealing with NotifyQueuedTrackChanges and NotifyQueuedAudioData part.
+
+    // The logic is different from the manipulating of aStream->mTracks part.
+    // So it is not combined with the manipulating of aStream->mTracks part.
+    StreamTime offset =
+      (data->mCommands & SourceMediaStream::TRACK_CREATE)
+      ? data->mStart
+      : mTracks.FindTrack(data->mID)->GetSegment()->GetDuration();
+
+    // Audio case.
+    if (data->mData->GetType() == MediaSegment::AUDIO) {
+      if (data->mCommands) {
+        MOZ_ASSERT(!(data->mCommands & SourceMediaStream::TRACK_UNUSED));
+        for (MediaStreamListener* l : mListeners) {
+          if (data->mCommands & SourceMediaStream::TRACK_END) {
+            l->NotifyQueuedAudioData(
+              GraphImpl(),
+              data->mID,
+              offset,
+              *(static_cast<AudioSegment*>(data->mData.get())));
+          }
+          l->NotifyQueuedTrackChanges(
+            GraphImpl(),
+            data->mID,
+            offset,
+            static_cast<TrackEventCommand>(data->mCommands),
+            *data->mData);
+          if (data->mCommands & SourceMediaStream::TRACK_CREATE) {
+            l->NotifyQueuedAudioData(
+              GraphImpl(),
+              data->mID,
+              offset,
+              *(static_cast<AudioSegment*>(data->mData.get())));
+          }
+        }
+      } else {
+        for (MediaStreamListener* l : mListeners) {
+          l->NotifyQueuedAudioData(
+            GraphImpl(),
+            data->mID,
+            offset,
+            *(static_cast<AudioSegment*>(data->mData.get())));
+        }
+      }
+    }
+
+    // Video case.
+    if (data->mData->GetType() == MediaSegment::VIDEO) {
+      if (data->mCommands) {
+        MOZ_ASSERT(!(data->mCommands & SourceMediaStream::TRACK_UNUSED));
+        for (MediaStreamListener* l : mListeners) {
+          l->NotifyQueuedTrackChanges(
+            GraphImpl(),
+            data->mID,
+            offset,
+            static_cast<TrackEventCommand>(data->mCommands),
+            *data->mData);
+        }
+      }
+    }
+
+    for (TrackBound<MediaStreamTrackListener>& b : mTrackListeners) {
+      if (b.mTrackID != data->mID) {
+        continue;
+      }
+      b.mListener->NotifyQueuedChanges(GraphImpl(), offset, *data->mData);
+      if (data->mCommands & SourceMediaStream::TRACK_END) {
+        b.mListener->NotifyEnded();
+      }
+    }
+    if (data->mCommands & SourceMediaStream::TRACK_CREATE) {
+      MediaSegment* segment = data->mData.forget();
+      LOG(LogLevel::Debug,
+          ("SourceMediaStream %p creating track %d, start %" PRId64
+            ", initial end %" PRId64,
+            this,
+            data->mID,
+            int64_t(data->mStart),
+            int64_t(segment->GetDuration())));
+
+      data->mEndOfFlushedData += segment->GetDuration();
+      mTracks.AddTrack(data->mID, data->mStart, segment);
+      // The track has taken ownership of data->mData, so let's replace
+      // data->mData with an empty clone.
+      data->mData = segment->CreateEmptyClone();
+      data->mCommands &= ~SourceMediaStream::TRACK_CREATE;
+      shouldNotifyTrackCreated = true;
+    } else if (data->mData->GetDuration() > 0) {
+      MediaSegment* dest = mTracks.FindTrack(data->mID)->GetSegment();
+      LOG(LogLevel::Verbose,
+          ("SourceMediaStream %p track %d, advancing end from %" PRId64
+            " to %" PRId64,
+            this,
+            data->mID,
+            int64_t(dest->GetDuration()),
+            int64_t(dest->GetDuration() + data->mData->GetDuration())));
+      data->mEndOfFlushedData += data->mData->GetDuration();
+      dest->AppendFrom(data->mData);
+    }
+    if (data->mCommands & SourceMediaStream::TRACK_END) {
+      mTracks.FindTrack(data->mID)->SetEnded();
+      mUpdateTracks.RemoveElementAt(i);
+    }
+  }
+  if (shouldNotifyTrackCreated) {
+    for (MediaStreamListener* l : mListeners) {
+      l->NotifyFinishedTrackCreation(GraphImpl());
+    }
+  }
+  if (!mFinished) {
+    mTracks.AdvanceKnownTracksTime(mUpdateKnownTracksTime);
+  }
+
+  if (mTracks.GetEnd() > 0) {
+    mHasCurrentData = true;
+  }
+
+  if (finished) {
+    FinishOnGraphThread();
+  }
+}
+
 void
 SourceMediaStream::AddTrackInternal(TrackID aID, TrackRate aRate, StreamTime aStart,
                                     MediaSegment* aSegment, uint32_t aFlags)
 {
   MutexAutoLock lock(mMutex);
   nsTArray<TrackData> *track_data = (aFlags & ADDTRACK_QUEUED) ?
                                     &mPendingTracks : &mUpdateTracks;
   TrackData* data = track_data->AppendElement();
@@ -3131,20 +3172,20 @@ SourceMediaStream::AdvanceKnownTracksTim
   MOZ_ASSERT(aKnownTime >= mUpdateKnownTracksTime);
   mUpdateKnownTracksTime = aKnownTime;
   if (auto graph = GraphImpl()) {
     graph->EnsureNextIteration();
   }
 }
 
 void
-SourceMediaStream::FinishWithLockHeld()
+SourceMediaStream::FinishPendingWithLockHeld()
 {
   mMutex.AssertCurrentThreadOwns();
-  mUpdateFinished = true;
+  mFinishPending = true;
   if (auto graph = GraphImpl()) {
     graph->EnsureNextIteration();
   }
 }
 
 void
 SourceMediaStream::SetTrackEnabledImpl(TrackID aTrackID, DisabledTrackMode aMode)
 {
@@ -3180,17 +3221,17 @@ void
 SourceMediaStream::EndAllTrackAndFinish()
 {
   MutexAutoLock lock(mMutex);
   for (uint32_t i = 0; i < mUpdateTracks.Length(); ++i) {
     SourceMediaStream::TrackData* data = &mUpdateTracks[i];
     data->mCommands |= TrackEventCommand::TRACK_EVENT_ENDED;
   }
   mPendingTracks.Clear();
-  FinishWithLockHeld();
+  FinishPendingWithLockHeld();
   // we will call NotifyEvent() to let GetUserMedia know
 }
 
 void
 SourceMediaStream::RemoveAllDirectListeners()
 {
   GraphImpl()->AssertOnGraphThreadOrNotRunning();
 
@@ -3448,32 +3489,32 @@ ProcessedMediaStream::AllocateInputPort(
     }
   }
   port->SetGraphImpl(GraphImpl());
   GraphImpl()->AppendMessage(MakeUnique<Message>(port));
   return port.forget();
 }
 
 void
-ProcessedMediaStream::Finish()
+ProcessedMediaStream::QueueFinish()
 {
   class Message : public ControlMessage {
   public:
     explicit Message(ProcessedMediaStream* aStream)
       : ControlMessage(aStream) {}
     void Run() override
     {
-      mStream->GraphImpl()->FinishStream(mStream);
+      mStream->FinishOnGraphThread();
     }
   };
   GraphImpl()->AppendMessage(MakeUnique<Message>(this));
 }
 
 void
-ProcessedMediaStream::SetAutofinish(bool aAutofinish)
+ProcessedMediaStream::QueueSetAutofinish(bool aAutofinish)
 {
   class Message : public ControlMessage {
   public:
     Message(ProcessedMediaStream* aStream, bool aAutofinish)
       : ControlMessage(aStream), mAutofinish(aAutofinish) {}
     void Run() override
     {
       static_cast<ProcessedMediaStream*>(mStream)->SetAutofinishImpl(mAutofinish);
@@ -3519,16 +3560,17 @@ MediaStreamGraphImpl::MediaStreamGraphIm
   , mPostedRunInStableStateEvent(false)
   , mDetectedNotRunning(false)
   , mPostedRunInStableState(false)
   , mRealtime(aDriverRequested != OFFLINE_THREAD_DRIVER)
   , mNonRealtimeProcessing(false)
   , mStreamOrderDirty(false)
   , mLatencyLog(AsyncLatencyLogger::Get())
   , mAbstractMainThread(aMainThread)
+  , mThreadPool(GetMediaThreadPool(MediaThreadType::MSG_CONTROL))
 #ifdef MOZ_WEBRTC
   , mFarendObserverRef(nullptr)
 #endif
   , mSelfRef(this)
   , mOutputChannels(std::min<uint32_t>(8, CubebUtils::MaxNumberOfChannels()))
 #ifdef DEBUG
   , mCanRunMessagesSynchronously(false)
 #endif
--- a/dom/media/MediaStreamGraph.h
+++ b/dom/media/MediaStreamGraph.h
@@ -7,16 +7,17 @@
 #define MOZILLA_MEDIASTREAMGRAPH_H_
 
 #include "AudioStream.h"
 #include "MainThreadUtils.h"
 #include "MediaStreamTypes.h"
 #include "StreamTracks.h"
 #include "VideoSegment.h"
 #include "mozilla/LinkedList.h"
+#include "mozilla/MozPromise.h"
 #include "mozilla/Mutex.h"
 #include "mozilla/TaskQueue.h"
 #include "nsAutoPtr.h"
 #include "nsAutoRef.h"
 #include "nsIRunnable.h"
 #include "nsTArray.h"
 #include <speex/speex_resampler.h>
 
@@ -500,17 +501,17 @@ public:
    * Convert stream time to graph time. This assumes there is no blocking time
    * to take account of, which is always true except between a stream
    * having its blocking time calculated in UpdateGraph and its blocking time
    * taken account of in UpdateCurrentTimeForStreams.
    */
   GraphTime StreamTimeToGraphTime(StreamTime aTime) const;
 
   bool IsFinishedOnGraphThread() const { return mFinished; }
-  void FinishOnGraphThread();
+  virtual void FinishOnGraphThread();
 
   bool HasCurrentData() const { return mHasCurrentData; }
 
   /**
    * Find track by track id.
    */
   StreamTracks::Track* FindTrack(TrackID aID) const;
 
@@ -621,16 +622,17 @@ protected:
    * Number of outstanding suspend operations on this stream. Stream is
    * suspended when this is > 0.
    */
   int32_t mSuspendedCount;
 
   /**
    * When true, this means the stream will be finished once all
    * buffered data has been consumed.
+   * Only accessed on the graph thread
    */
   bool mFinished;
   /**
    * When true, mFinished is true and we've played all the data in this stream
    * and fired NotifyFinished notifications.
    */
   bool mNotifiedFinished;
   /**
@@ -693,16 +695,29 @@ public:
    * gets called on MediaStreamListeners for this stream during the
    * MediaStreamGraph control loop. Pulling is initially disabled.
    * Due to unavoidable race conditions, after a call to SetPullEnabled(false)
    * it is still possible for a NotifyPull to occur.
    */
   void SetPullEnabled(bool aEnabled);
 
   /**
+   * Call all MediaStreamListeners to request new data via the NotifyPull API
+   * (if enabled).
+   */
+  typedef MozPromise<bool, bool, true /* is exclusive */ > NotifyPullPromise;
+  nsTArray<RefPtr<NotifyPullPromise>> PullNewData(StreamTime aDesiredUpToTime,
+                                                  bool* aEnsureNextIteration);
+
+  /**
+   * Extract any state updates pending in the stream, and apply them.
+   */
+  void ExtractPendingInput();
+
+  /**
    * These add/remove DirectListeners, which allow bypassing the graph and any
    * synchronization delays for e.g. PeerConnection, which wants the data ASAP
    * and lets the far-end handle sync and playout timing.
    */
   void NotifyListenersEventImpl(MediaStreamGraphEvent aEvent);
   void NotifyListenersEvent(MediaStreamGraphEvent aEvent);
 
   enum {
@@ -752,26 +767,27 @@ public:
    * Ignored if the track does not exist.
    */
   void EndTrack(TrackID aID);
   /**
    * Indicate that no tracks will be added starting before time aKnownTime.
    * aKnownTime must be >= its value at the last call to AdvanceKnownTracksTime.
    */
   void AdvanceKnownTracksTime(StreamTime aKnownTime);
+  void AdvanceKnownTracksTimeWithLockHeld(StreamTime aKnownTime);
   /**
    * Indicate that this stream should enter the "finished" state. All tracks
    * must have been ended via EndTrack. The finish time of the stream is
    * when all tracks have ended.
    */
-  void FinishWithLockHeld();
-  void Finish()
+  void FinishPendingWithLockHeld();
+  void FinishPending()
   {
     MutexAutoLock lock(mMutex);
-    FinishWithLockHeld();
+    FinishPendingWithLockHeld();
   }
 
   // Overriding allows us to hold the mMutex lock while changing the track enable status
   void SetTrackEnabledImpl(TrackID aTrackID, DisabledTrackMode aMode) override;
 
   // Overriding allows us to ensure mMutex is locked while changing the track enable status
   void
   ApplyTrackDisabling(TrackID aTrackID, MediaSegment* aSegment,
@@ -896,17 +912,17 @@ protected:
   // This time stamp will be updated in adding and blocked SourceMediaStream,
   // |AddStreamGraphThread| and |AdvanceTimeVaryingValuesToCurrentTime| in
   // particularly.
   TimeStamp mStreamTracksStartTimeStamp;
   nsTArray<TrackData> mUpdateTracks;
   nsTArray<TrackData> mPendingTracks;
   nsTArray<TrackBound<DirectMediaStreamTrackListener>> mDirectTrackListeners;
   bool mPullEnabled;
-  bool mUpdateFinished;
+  bool mFinishPending;
   bool mNeedsMixing;
 };
 
 /**
  * The blocking mode decides how a track should be blocked in a MediaInputPort.
  */
 enum class BlockingMode
 {
@@ -1153,25 +1169,26 @@ public:
   already_AddRefed<MediaInputPort>
   AllocateInputPort(MediaStream* aStream,
                     TrackID aTrackID = TRACK_ANY,
                     TrackID aDestTrackID = TRACK_ANY,
                     uint16_t aInputNumber = 0,
                     uint16_t aOutputNumber = 0,
                     nsTArray<TrackID>* aBlockedTracks = nullptr);
   /**
-   * Force this stream into the finished state.
+   * Queue a message to force this stream into the finished state.
    */
-  void Finish();
+  void QueueFinish();
   /**
-   * Set the autofinish flag on this stream (defaults to false). When this flag
-   * is set, and all input streams are in the finished state (including if there
-   * are no input streams), this stream automatically enters the finished state.
+   * Queue a message to set the autofinish flag on this stream (defaults to
+   * false). When this flag is set, and all input streams are in the finished
+   * state (including if there are no input streams), this stream automatically
+   * enters the finished state.
    */
-  void SetAutofinish(bool aAutofinish);
+  void QueueSetAutofinish(bool aAutofinish);
 
   ProcessedMediaStream* AsProcessedStream() override { return this; }
 
   friend class MediaStreamGraphImpl;
 
   // Do not call these from outside MediaStreamGraph.cpp!
   virtual void AddInput(MediaInputPort* aPort);
   virtual void RemoveInput(MediaInputPort* aPort)
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -274,22 +274,16 @@ public:
     mMonitor.AssertCurrentThreadOwns();
     mFrontMessageQueue.SwapElements(mBackMessageQueue);
   }
   /**
    * Do all the processing and play the audio and video, from
    * mProcessedTime to mStateComputedTime.
    */
   void Process();
-  /**
-   * Extract any state updates pending in aStream, and apply them.
-   */
-  void ExtractPendingInput(SourceMediaStream* aStream,
-                           GraphTime aDesiredUpToTime,
-                           bool* aEnsureNextIteration);
 
   /**
    * For use during ProcessedMediaStream::ProcessInput() or
    * MediaStreamListener callbacks, when graph state cannot be changed.
    * Schedules |aMessage| to run after processing, at a time when graph state
    * can be changed.  Graph thread.
    */
   void RunMessageAfterProcessing(UniquePtr<ControlMessage> aMessage);
@@ -389,17 +383,16 @@ public:
    */
   void OpenAudioInputImpl(int aID,
                           AudioDataListener *aListener);
   virtual nsresult OpenAudioInput(int aID,
                                   AudioDataListener *aListener) override;
   void CloseAudioInputImpl(AudioDataListener *aListener);
   virtual void CloseAudioInput(AudioDataListener *aListener) override;
 
-  void FinishStream(MediaStream* aStream);
   /**
    * Compute how much stream data we would like to buffer for aStream.
    */
   StreamTime GetDesiredBufferEnd(MediaStream* aStream);
   /**
    * Returns true when there are no active streams.
    */
   bool IsEmpty() const
@@ -819,16 +812,17 @@ public:
    */
   bool mStreamOrderDirty;
   /**
    * Hold a ref to the Latency logger
    */
   RefPtr<AsyncLatencyLogger> mLatencyLog;
   AudioMixer mMixer;
   const RefPtr<AbstractThread> mAbstractMainThread;
+  RefPtr<SharedThreadPool> mThreadPool;
 #ifdef MOZ_WEBRTC
   RefPtr<AudioOutputObserver> mFarendObserverRef;
 #endif
 
   // used to limit graph shutdown time
   // Only accessed on the main thread.
   nsCOMPtr<nsITimer> mShutdownTimer;
 
--- a/dom/media/MediaStreamListener.h
+++ b/dom/media/MediaStreamListener.h
@@ -34,17 +34,18 @@ class VideoSegment;
  * You should do something non-blocking and non-reentrant (e.g. dispatch an
  * event to some thread) and return.
  * The listener is not allowed to add/remove any listeners from the stream.
  *
  * When a listener is first attached, we guarantee to send a NotifyBlockingChanged
  * callback to notify of the initial blocking state. Also, if a listener is
  * attached to a stream that has already finished, we'll call NotifyFinished.
  */
-class MediaStreamListener {
+class MediaStreamListener
+{
 protected:
   // Protected destructor, to discourage deletion outside of Release():
   virtual ~MediaStreamListener() {}
 
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaStreamListener)
 
   /**
@@ -55,16 +56,24 @@ public:
    * calls to add or remove MediaStreamListeners. It is not allowed to block
    * for any length of time.
    * aDesiredTime is the stream time we would like to get data up to. Data
    * beyond this point will not be played until NotifyPull runs again, so there's
    * not much point in providing it. Note that if the stream is blocked for
    * some reason, then data before aDesiredTime may not be played immediately.
    */
   virtual void NotifyPull(MediaStreamGraph* aGraph, StreamTime aDesiredTime) {}
+  virtual RefPtr<SourceMediaStream::NotifyPullPromise> AsyncNotifyPull(
+    MediaStreamGraph* aGraph,
+    StreamTime aDesiredTime)
+  {
+    NotifyPull(aGraph, aDesiredTime);
+    return SourceMediaStream::NotifyPullPromise::CreateAndResolve(true,
+                                                                  __func__);
+  }
 
   enum Blocking {
     BLOCKED,
     UNBLOCKED
   };
   /**
    * Notify that the blocking status of the stream changed. The initial state
    * is assumed to be BLOCKED.
--- a/dom/media/VideoUtils.cpp
+++ b/dom/media/VideoUtils.cpp
@@ -198,16 +198,22 @@ IsValidVideoRegion(const gfx::IntSize& a
 
 already_AddRefed<SharedThreadPool> GetMediaThreadPool(MediaThreadType aType)
 {
   const char *name;
   switch (aType) {
     case MediaThreadType::PLATFORM_DECODER:
       name = "MediaPDecoder";
       break;
+    case MediaThreadType::MSG_CONTROL:
+      name = "MSGControl";
+      break;
+    case MediaThreadType::WEBRTC_DECODER:
+      name = "WebRTCPD";
+      break;
     default:
       MOZ_FALLTHROUGH_ASSERT("Unexpected MediaThreadType");
     case MediaThreadType::PLAYBACK:
       name = "MediaPlayback";
       break;
   }
 
   static const uint32_t kMediaThreadPoolDefaultCount = 4;
--- a/dom/media/VideoUtils.h
+++ b/dom/media/VideoUtils.h
@@ -13,16 +13,17 @@
 #include "VideoLimits.h"
 #include "mozilla/gfx/Point.h" // for gfx::IntSize
 #include "mozilla/AbstractThread.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/CheckedInt.h"
 #include "mozilla/MozPromise.h"
 #include "mozilla/ReentrantMonitor.h"
 #include "mozilla/RefPtr.h"
+#include "mozilla/SharedThreadPool.h"
 #include "mozilla/UniquePtr.h"
 #include "nsAutoPtr.h"
 #include "nsCOMPtr.h"
 #include "nsINamed.h"
 #include "nsIThread.h"
 #include "nsITimer.h"
 
 #include "nsThreadUtils.h"
@@ -180,30 +181,21 @@ public:
   ~AutoSetOnScopeExit() {
     mVar = mValue;
   }
 private:
   T& mVar;
   const T mValue;
 };
 
-class SharedThreadPool;
-
-// The MediaDataDecoder API blocks, with implementations waiting on platform
-// decoder tasks.  These platform decoder tasks are queued on a separate
-// thread pool to ensure they can run when the MediaDataDecoder clients'
-// thread pool is blocked.  Tasks on the PLATFORM_DECODER thread pool must not
-// wait on tasks in the PLAYBACK thread pool.
-//
-// No new dependencies on this mechanism should be added, as methods are being
-// made async supported by MozPromise, making this unnecessary and
-// permitting unifying the pool.
 enum class MediaThreadType {
   PLAYBACK, // MediaDecoderStateMachine and MediaFormatReader
-  PLATFORM_DECODER
+  PLATFORM_DECODER, // MediaDataDecoder
+  MSG_CONTROL,
+  WEBRTC_DECODER
 };
 // Returns the thread pool that is shared amongst all decoder state machines
 // for decoding streams.
 already_AddRefed<SharedThreadPool> GetMediaThreadPool(MediaThreadType aType);
 
 enum H264_PROFILE {
   H264_PROFILE_UNKNOWN                     = 0,
   H264_PROFILE_BASE                        = 0x42,
--- a/dom/media/ipc/VideoDecoderManagerParent.cpp
+++ b/dom/media/ipc/VideoDecoderManagerParent.cpp
@@ -1,26 +1,26 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=99: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #include "VideoDecoderManagerParent.h"
 #include "VideoDecoderParent.h"
+#include "VideoUtils.h"
 #include "base/thread.h"
 #include "mozilla/UniquePtr.h"
 #include "mozilla/Services.h"
 #include "mozilla/Observer.h"
 #include "nsIObserverService.h"
 #include "nsIObserver.h"
 #include "nsIEventTarget.h"
 #include "nsThreadUtils.h"
 #include "ImageContainer.h"
 #include "mozilla/layers/VideoBridgeChild.h"
-#include "mozilla/SharedThreadPool.h"
 #include "mozilla/layers/ImageDataSerializer.h"
 #include "mozilla/SyncRunnable.h"
 
 #if XP_WIN
 #include <objbase.h>
 #endif
 
 namespace mozilla {
@@ -205,17 +205,17 @@ VideoDecoderManagerParent::AllocPVideoDe
                                                     const float& aFramerate,
                                                     const layers::TextureFactoryIdentifier& aIdentifier,
                                                     bool* aSuccess,
                                                     nsCString* aBlacklistedD3D11Driver,
                                                     nsCString* aBlacklistedD3D9Driver,
                                                     nsCString* aErrorDescription)
 {
   RefPtr<TaskQueue> decodeTaskQueue = new TaskQueue(
-    SharedThreadPool::Get(NS_LITERAL_CSTRING("VideoDecoderParent"), 4),
+    GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
     "VideoDecoderParent::mDecodeTaskQueue");
 
   auto* parent = new VideoDecoderParent(
     this, aVideoInfo, aFramerate, aIdentifier,
     sManagerTaskQueue, decodeTaskQueue, aSuccess, aErrorDescription);
 
 #ifdef XP_WIN
   *aBlacklistedD3D11Driver = GetFoundD3D11BlacklistedDLL();
--- a/dom/media/mediasink/DecodedStream.cpp
+++ b/dom/media/mediasink/DecodedStream.cpp
@@ -707,17 +707,17 @@ DecodedStream::SendData()
   SendVideo(mSameOrigin, mPrincipalHandle);
   AdvanceTracks();
 
   bool finished = (!mInfo.HasAudio() || mAudioQueue.IsFinished()) &&
                   (!mInfo.HasVideo() || mVideoQueue.IsFinished());
 
   if (finished && !mData->mHaveSentFinish) {
     mData->mHaveSentFinish = true;
-    mData->mStream->Finish();
+    mData->mStream->FinishPending();
   }
 }
 
 TimeUnit
 DecodedStream::GetEndTime(TrackType aType) const
 {
   AssertOwnerThread();
   if (aType == TrackInfo::kAudioTrack && mInfo.HasAudio() && mData) {
--- a/dom/media/mediasink/OutputStreamManager.cpp
+++ b/dom/media/mediasink/OutputStreamManager.cpp
@@ -75,17 +75,17 @@ void
 OutputStreamManager::Add(ProcessedMediaStream* aStream, bool aFinishWhenEnded)
 {
   MOZ_ASSERT(NS_IsMainThread());
   // All streams must belong to the same graph.
   MOZ_ASSERT(!Graph() || Graph() == aStream->Graph());
 
   // Ensure that aStream finishes the moment mDecodedStream does.
   if (aFinishWhenEnded) {
-    aStream->SetAutofinish(true);
+    aStream->QueueSetAutofinish(true);
   }
 
   OutputStreamData* p = mStreams.AppendElement();
   p->Init(this, aStream);
 
   // Connect to the input stream if we have one. Otherwise the output stream
   // will be connected in Connect().
   if (mInputStream) {
--- a/dom/media/mediasource/AutoTaskQueue.h
+++ b/dom/media/mediasource/AutoTaskQueue.h
@@ -3,33 +3,34 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_AUTOTASKQUEUE_H_
 #define MOZILLA_AUTOTASKQUEUE_H_
 
 #include "mozilla/RefPtr.h"
-#include "mozilla/SharedThreadPool.h"
 #include "mozilla/SystemGroup.h"
 #include "mozilla/TaskQueue.h"
 
+class nsIEventTarget;
+
 namespace mozilla {
 
 // A convenience TaskQueue not requiring explicit shutdown.
 class AutoTaskQueue : public AbstractThread
 {
 public:
-  explicit AutoTaskQueue(already_AddRefed<SharedThreadPool> aPool,
+  explicit AutoTaskQueue(already_AddRefed<nsIEventTarget> aPool,
                          bool aSupportsTailDispatch = false)
   : AbstractThread(aSupportsTailDispatch)
   , mTaskQueue(new TaskQueue(Move(aPool), aSupportsTailDispatch))
   {}
 
-  AutoTaskQueue(already_AddRefed<SharedThreadPool> aPool,
+  AutoTaskQueue(already_AddRefed<nsIEventTarget> aPool,
                 const char* aName,
                 bool aSupportsTailDispatch = false)
   : AbstractThread(aSupportsTailDispatch)
   , mTaskQueue(new TaskQueue(Move(aPool), aName, aSupportsTailDispatch))
   {}
 
   TaskDispatcher& TailDispatcher() override
   {
--- a/dom/media/mediasource/MediaSourceDemuxer.cpp
+++ b/dom/media/mediasource/MediaSourceDemuxer.cpp
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MediaSourceDemuxer.h"
 
 #include "MediaSourceUtils.h"
 #include "OpusDecoder.h"
 #include "SourceBufferList.h"
 #include "VorbisDecoder.h"
+#include "VideoUtils.h"
 #include "nsPrintfCString.h"
 
 #include <algorithm>
 #include <limits>
 #include <stdint.h>
 
 namespace mozilla {
 
--- a/dom/media/systemservices/MediaUtils.h
+++ b/dom/media/systemservices/MediaUtils.h
@@ -2,23 +2,30 @@
 /* vim: set sw=2 ts=8 et ft=cpp : */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef mozilla_MediaUtils_h
 #define mozilla_MediaUtils_h
 
+#include "AutoTaskQueue.h"
 #include "mozilla/Assertions.h"
+#include "mozilla/Monitor.h"
+#include "mozilla/MozPromise.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/SharedThreadPool.h"
 #include "mozilla/UniquePtr.h"
 #include "nsCOMPtr.h"
 #include "nsIAsyncShutdown.h"
 #include "nsISupportsImpl.h"
 #include "nsThreadUtils.h"
 
+class nsIEventTarget;
+
 namespace mozilla {
 namespace media {
 
 /*
  * media::Pledge - A promise-like pattern for c++ that takes lambda functions.
  *
  * Asynchronous APIs that proxy to another thread or to the chrome process and
  * back may find it useful to return a pledge to callers who then use
@@ -405,12 +412,134 @@ private:
   {
     nsCOMPtr<nsIAsyncShutdownClient> barrier = GetShutdownBarrier();
     barrier->RemoveBlocker(mBlocker);
   }
 
   nsCOMPtr<nsIAsyncShutdownBlocker> mBlocker;
 };
 
+/**
+ * Await convenience methods to block until the promise has been resolved or
+ * rejected. The Resolve/Reject functions, while called on a different thread,
+ * would be running just as on the current thread thanks to the memory barrier
+ * provided by the monitor.
+ * For now Await can only be used with an exclusive MozPromise if passed a
+ * Resolve/Reject function.
+ */
+template<typename ResolveValueType,
+         typename RejectValueType,
+         typename ResolveFunction,
+         typename RejectFunction>
+void
+Await(
+  already_AddRefed<nsIEventTarget> aPool,
+  RefPtr<MozPromise<ResolveValueType, RejectValueType, true>> aPromise,
+  ResolveFunction&& aResolveFunction,
+  RejectFunction&& aRejectFunction)
+{
+  Monitor mon(__func__);
+  RefPtr<AutoTaskQueue> taskQueue =
+    new AutoTaskQueue(Move(aPool), "MozPromiseAwait");
+  bool done = false;
+
+  aPromise->Then(taskQueue,
+                 __func__,
+                 [&](ResolveValueType&& aResolveValue) {
+                   MonitorAutoLock lock(mon);
+                   aResolveFunction(Forward<ResolveValueType>(aResolveValue));
+                   done = true;
+                   mon.Notify();
+                 },
+                 [&](RejectValueType&& aRejectValue) {
+                   MonitorAutoLock lock(mon);
+                   aRejectFunction(Forward<RejectValueType>(aRejectValue));
+                   done = true;
+                   mon.Notify();
+                 });
+
+  MonitorAutoLock lock(mon);
+  while (!done) {
+    mon.Wait();
+  }
+}
+
+template<typename ResolveValueType, typename RejectValueType, bool Excl>
+typename MozPromise<ResolveValueType, RejectValueType, Excl>::
+  ResolveOrRejectValue
+Await(already_AddRefed<nsIEventTarget> aPool,
+      RefPtr<MozPromise<ResolveValueType, RejectValueType, Excl>> aPromise)
+{
+  Monitor mon(__func__);
+  RefPtr<AutoTaskQueue> taskQueue =
+    new AutoTaskQueue(Move(aPool), "MozPromiseAwait");
+  bool done = false;
+
+  typename MozPromise<ResolveValueType, RejectValueType, Excl>::ResolveOrRejectValue val;
+  aPromise->Then(taskQueue,
+                 __func__,
+                 [&](ResolveValueType aResolveValue) {
+                   val.SetResolve(Move(aResolveValue));
+                   MonitorAutoLock lock(mon);
+                   done = true;
+                   mon.Notify();
+                 },
+                 [&](RejectValueType aRejectValue) {
+                   val.SetReject(Move(aRejectValue));
+                   MonitorAutoLock lock(mon);
+                   done = true;
+                   mon.Notify();
+                 });
+
+  MonitorAutoLock lock(mon);
+  while (!done) {
+    mon.Wait();
+  }
+
+  return val;
+}
+
+/**
+ * Similar to Await, takes an array of promises of the same type.
+ * MozPromise::All is used to handle the resolution/rejection of the promises.
+ */
+template<typename ResolveValueType,
+         typename RejectValueType,
+         typename ResolveFunction,
+         typename RejectFunction>
+void
+AwaitAll(already_AddRefed<nsIEventTarget> aPool,
+         nsTArray<RefPtr<MozPromise<ResolveValueType, RejectValueType, true>>>&
+           aPromises,
+         ResolveFunction&& aResolveFunction,
+         RejectFunction&& aRejectFunction)
+{
+  typedef MozPromise<ResolveValueType, RejectValueType, true> Promise;
+  RefPtr<nsIEventTarget> pool = aPool;
+  RefPtr<AutoTaskQueue> taskQueue =
+    new AutoTaskQueue(do_AddRef(pool), "MozPromiseAwaitAll");
+  RefPtr<typename Promise::AllPromiseType> p = Promise::All(taskQueue, aPromises);
+  Await(pool.forget(), p, Move(aResolveFunction), Move(aRejectFunction));
+}
+
+// Note: only works with exclusive MozPromise, as Promise::All would attempt
+// to perform copy of nsTArrays which are disallowed.
+template<typename ResolveValueType, typename RejectValueType>
+typename MozPromise<ResolveValueType,
+                    RejectValueType,
+                    true>::AllPromiseType::ResolveOrRejectValue
+AwaitAll(already_AddRefed<nsIEventTarget> aPool,
+         nsTArray<RefPtr<MozPromise<ResolveValueType, RejectValueType, true>>>&
+           aPromises)
+{
+  typedef MozPromise<ResolveValueType, RejectValueType, true> Promise;
+  RefPtr<nsIEventTarget> pool = aPool;
+  RefPtr<AutoTaskQueue> taskQueue =
+    new AutoTaskQueue(do_AddRef(pool), "MozPromiseAwaitAll");
+  RefPtr<typename Promise::AllPromiseType> p =
+    Promise::All(taskQueue, aPromises);
+  return Await(pool.forget(), p);
+}
+
 } // namespace media
 } // namespace mozilla
 
 #endif // mozilla_MediaUtils_h
--- a/editor/libeditor/HTMLAnonymousNodeEditor.cpp
+++ b/editor/libeditor/HTMLAnonymousNodeEditor.cpp
@@ -358,36 +358,32 @@ HTMLEditor::CheckSelectionStateForAnonym
   if (mIsAbsolutelyPositioningEnabled) {
     // Absolute Positioning support is enabled, is the selection contained
     // in an absolutely positioned element ?
     rv =
       GetAbsolutelyPositionedSelectionContainer(getter_AddRefs(absPosElement));
     NS_ENSURE_SUCCESS(rv, rv);
   }
 
-  nsCOMPtr<nsIDOMElement> cellElement;
+  RefPtr<Element> cellElement;
   if (mIsObjectResizingEnabled || mIsInlineTableEditingEnabled) {
     // Resizing or Inline Table Editing is enabled, we need to check if the
     // selection is contained in a table cell
-    rv = GetElementOrParentByTagName(NS_LITERAL_STRING("td"),
-                                     nullptr,
-                                     getter_AddRefs(cellElement));
-    NS_ENSURE_SUCCESS(rv, rv);
+    cellElement = GetElementOrParentByTagName(NS_LITERAL_STRING("td"), nullptr);
   }
 
   if (mIsObjectResizingEnabled && cellElement) {
     // we are here because Resizing is enabled AND selection is contained in
     // a cell
 
     // get the enclosing table
     if (nsGkAtoms::img != focusTagAtom) {
       // the element container of the selection is not an image, so we'll show
       // the resizers around the table
-      nsCOMPtr<nsIDOMNode> tableNode = GetEnclosingTable(cellElement);
-      focusElement = do_QueryInterface(tableNode);
+      focusElement = do_QueryInterface(GetEnclosingTable(cellElement));
       focusTagAtom = nsGkAtoms::table;
     }
   }
 
   // we allow resizers only around images, tables, and absolutely positioned
   // elements. If we don't have image/table, let's look at the latter case.
   if (nsGkAtoms::img != focusTagAtom && nsGkAtoms::table != focusTagAtom) {
     focusElement = absPosElement;
@@ -455,17 +451,17 @@ HTMLEditor::CheckSelectionStateForAnonym
       nsresult rv = ShowGrabberOnElement(absPosElement);
       if (NS_WARN_IF(NS_FAILED(rv))) {
         return rv;
       }
     }
   }
 
   if (mIsInlineTableEditingEnabled && cellElement &&
-      IsModifiableNode(cellElement) && cellElement != hostNode) {
+      IsModifiableNode(cellElement) && cellElement != hostContent) {
     if (mInlineEditedCell) {
       nsresult rv = RefreshInlineTableEditingUI();
       if (NS_WARN_IF(NS_FAILED(rv))) {
         return rv;
       }
     } else {
       nsresult rv = ShowInlineTableEditingUI(cellElement);
       if (NS_WARN_IF(NS_FAILED(rv))) {
--- a/editor/libeditor/HTMLEditor.h
+++ b/editor/libeditor/HTMLEditor.h
@@ -584,16 +584,19 @@ protected:
                            int32_t& aNewColCount);
 
   /**
    * Fallback method: Call this after using ClearSelection() and you
    * failed to set selection to some other content in the document.
    */
   nsresult SetSelectionAtDocumentStart(Selection* aSelection);
 
+  nsresult GetTableSize(Element* aTable,
+                        int32_t* aRowCount, int32_t* aColCount);
+
   // End of Table Editing utilities
 
   static Element* GetEnclosingTable(nsINode* aNode);
   static nsIDOMNode* GetEnclosingTable(nsIDOMNode* aNode);
 
   /**
    * Content-based query returns true if <aProperty aAttribute=aValue> effects
    * aNode.  If <aProperty aAttribute=aValue> contains aNode, but
@@ -1093,26 +1096,28 @@ protected:
   void AddPositioningOffset(int32_t& aX, int32_t& aY);
   void SnapToGrid(int32_t& newX, int32_t& newY);
   nsresult GrabberClicked();
   nsresult EndMoving();
   nsresult CheckPositionedElementBGandFG(nsIDOMElement* aElement,
                                          nsAString& aReturn);
 
   // inline table editing
-  nsCOMPtr<nsIDOMElement> mInlineEditedCell;
+  RefPtr<Element> mInlineEditedCell;
 
   ManualNACPtr mAddColumnBeforeButton;
   ManualNACPtr mRemoveColumnButton;
   ManualNACPtr mAddColumnAfterButton;
 
   ManualNACPtr mAddRowBeforeButton;
   ManualNACPtr mRemoveRowButton;
   ManualNACPtr mAddRowAfterButton;
 
+  nsresult ShowInlineTableEditingUI(Element* aCell);
+
   void AddMouseClickListener(Element* aElement);
   void RemoveMouseClickListener(Element* aElement);
 
   nsCOMPtr<nsILinkHandler> mLinkHandler;
 
   ParagraphSeparator mDefaultParagraphSeparator;
 
 public:
--- a/editor/libeditor/HTMLInlineTableEditor.cpp
+++ b/editor/libeditor/HTMLInlineTableEditor.cpp
@@ -40,25 +40,29 @@ HTMLEditor::GetInlineTableEditingEnabled
 {
   *aIsEnabled = mIsInlineTableEditingEnabled;
   return NS_OK;
 }
 
 NS_IMETHODIMP
 HTMLEditor::ShowInlineTableEditingUI(nsIDOMElement* aCell)
 {
-  NS_ENSURE_ARG_POINTER(aCell);
+  nsCOMPtr<Element> cell = do_QueryInterface(aCell);
+  return ShowInlineTableEditingUI(cell);
+}
 
+nsresult
+HTMLEditor::ShowInlineTableEditingUI(Element* aCell)
+{
   // do nothing if aCell is not a table cell...
-  nsCOMPtr<Element> cell = do_QueryInterface(aCell);
-  if (!cell || !HTMLEditUtils::IsTableCell(cell)) {
+  if (!aCell || !HTMLEditUtils::IsTableCell(aCell)) {
     return NS_OK;
   }
 
-  if (NS_WARN_IF(!IsDescendantOfEditorRoot(cell))) {
+  if (NS_WARN_IF(!IsDescendantOfEditorRoot(aCell))) {
     return NS_ERROR_UNEXPECTED;
   }
 
   if (mInlineEditedCell) {
     NS_ERROR("call HideInlineTableEditingUI first");
     return NS_ERROR_UNEXPECTED;
   }
 
@@ -136,24 +140,23 @@ HTMLEditor::DoInlineTableEditingAction(n
     nsAutoString anonclass;
     nsresult rv =
       aElement->GetAttribute(NS_LITERAL_STRING("_moz_anonclass"), anonclass);
     NS_ENSURE_SUCCESS(rv, rv);
 
     if (!StringBeginsWith(anonclass, NS_LITERAL_STRING("mozTable")))
       return NS_OK;
 
-    nsCOMPtr<nsIDOMNode> tableNode = GetEnclosingTable(mInlineEditedCell);
-    nsCOMPtr<nsIDOMElement> tableElement = do_QueryInterface(tableNode);
+    RefPtr<Element> tableElement = GetEnclosingTable(mInlineEditedCell);
     int32_t rowCount, colCount;
     rv = GetTableSize(tableElement, &rowCount, &colCount);
     NS_ENSURE_SUCCESS(rv, rv);
 
     bool hideUI = false;
-    bool hideResizersWithInlineTableUI = (GetAsDOMNode(mResizedObject) == tableElement);
+    bool hideResizersWithInlineTableUI = (mResizedObject == tableElement);
 
     if (anonclass.EqualsLiteral("mozTableAddColumnBefore"))
       InsertTableColumn(1, false);
     else if (anonclass.EqualsLiteral("mozTableAddColumnAfter"))
       InsertTableColumn(1, true);
     else if (anonclass.EqualsLiteral("mozTableAddRowBefore"))
       InsertTableRow(1, false);
     else if (anonclass.EqualsLiteral("mozTableAddRowAfter"))
@@ -206,38 +209,37 @@ HTMLEditor::RemoveMouseClickListener(Ele
     evtTarget->RemoveEventListener(NS_LITERAL_STRING("click"),
                                    mEventListener, true);
   }
 }
 
 NS_IMETHODIMP
 HTMLEditor::RefreshInlineTableEditingUI()
 {
+  if (!mInlineEditedCell) {
+   return NS_OK;
+  }
+
   nsCOMPtr<nsIDOMHTMLElement> htmlElement = do_QueryInterface(mInlineEditedCell);
   if (!htmlElement) {
     return NS_ERROR_NULL_POINTER;
   }
 
   int32_t xCell, yCell, wCell, hCell;
-  nsCOMPtr<Element> element = do_QueryInterface(mInlineEditedCell);
-  if (NS_WARN_IF(!element)) {
-   return NS_ERROR_FAILURE;
-  }
-  GetElementOrigin(*element, xCell, yCell);
+  GetElementOrigin(*mInlineEditedCell, xCell, yCell);
 
   nsresult rv = htmlElement->GetOffsetWidth(&wCell);
   NS_ENSURE_SUCCESS(rv, rv);
   rv = htmlElement->GetOffsetHeight(&hCell);
   NS_ENSURE_SUCCESS(rv, rv);
 
   int32_t xHoriz = xCell + wCell/2;
   int32_t yVert  = yCell + hCell/2;
 
-  nsCOMPtr<nsIDOMNode> tableNode = GetEnclosingTable(mInlineEditedCell);
-  nsCOMPtr<nsIDOMElement> tableElement = do_QueryInterface(tableNode);
+  RefPtr<Element> tableElement = GetEnclosingTable(mInlineEditedCell);
   int32_t rowCount, colCount;
   rv = GetTableSize(tableElement, &rowCount, &colCount);
   NS_ENSURE_SUCCESS(rv, rv);
 
   SetAnonymousElementPosition(xHoriz-10, yCell-7,  mAddColumnBeforeButton);
 #ifdef DISABLE_TABLE_DELETION
   if (colCount== 1) {
     mRemoveColumnButton->SetAttr(kNameSpaceID_None, nsGkAtoms::_class,
--- a/editor/libeditor/HTMLTableEditor.cpp
+++ b/editor/libeditor/HTMLTableEditor.cpp
@@ -2650,28 +2650,35 @@ HTMLEditor::GetNumberOfCellsInRow(nsIDOM
   return cellCount;
 }
 
 NS_IMETHODIMP
 HTMLEditor::GetTableSize(nsIDOMElement* aTable,
                          int32_t* aRowCount,
                          int32_t* aColCount)
 {
+  nsCOMPtr<Element> table = do_QueryInterface(aTable);
+  return GetTableSize(table, aRowCount, aColCount);
+}
+
+nsresult
+HTMLEditor::GetTableSize(Element* aTable,
+                         int32_t* aRowCount,
+                         int32_t* aColCount)
+{
   NS_ENSURE_ARG_POINTER(aRowCount);
   NS_ENSURE_ARG_POINTER(aColCount);
   *aRowCount = 0;
   *aColCount = 0;
-  nsCOMPtr<nsIDOMElement> table;
   // Get the selected talbe or the table enclosing the selection anchor
-  nsresult rv = GetElementOrParentByTagName(NS_LITERAL_STRING("table"), aTable,
-                                            getter_AddRefs(table));
-  NS_ENSURE_SUCCESS(rv, rv);
+  RefPtr<Element> table =
+    GetElementOrParentByTagName(NS_LITERAL_STRING("table"), aTable);
   NS_ENSURE_TRUE(table, NS_ERROR_FAILURE);
 
-  nsTableWrapperFrame* tableFrame = GetTableFrame(table.get());
+  nsTableWrapperFrame* tableFrame = do_QueryFrame(table->GetPrimaryFrame());
   NS_ENSURE_TRUE(tableFrame, NS_ERROR_FAILURE);
 
   *aRowCount = tableFrame->GetRowCount();
   *aColCount = tableFrame->GetColCount();
 
   return NS_OK;
 }
 
--- a/gfx/layers/D3D11ShareHandleImage.cpp
+++ b/gfx/layers/D3D11ShareHandleImage.cpp
@@ -54,17 +54,17 @@ D3D11ShareHandleImage::AllocateTexture(D
     newDesc.MiscFlags = D3D11_RESOURCE_MISC_SHARED;
 
     HRESULT hr = aDevice->CreateTexture2D(&newDesc, nullptr, getter_AddRefs(mTexture));
     return SUCCEEDED(hr);
   }
 }
 
 gfx::IntSize
-D3D11ShareHandleImage::GetSize()
+D3D11ShareHandleImage::GetSize() const
 {
   return mSize;
 }
 
 TextureClient*
 D3D11ShareHandleImage::GetTextureClient(KnowsCompositor* aForwarder)
 {
   return mTextureClient;
--- a/gfx/layers/D3D11ShareHandleImage.h
+++ b/gfx/layers/D3D11ShareHandleImage.h
@@ -26,42 +26,42 @@ public:
     , mDevice(aDevice)
   {}
 
   already_AddRefed<TextureClient>
   CreateOrRecycleClient(gfx::SurfaceFormat aFormat,
                         const gfx::IntSize& aSize);
 
 protected:
-  virtual already_AddRefed<TextureClient>
-  Allocate(gfx::SurfaceFormat aFormat,
-           gfx::IntSize aSize,
-           BackendSelector aSelector,
-           TextureFlags aTextureFlags,
-           TextureAllocationFlags aAllocFlags) override;
+  virtual already_AddRefed<TextureClient> Allocate(
+    gfx::SurfaceFormat aFormat,
+    gfx::IntSize aSize,
+    BackendSelector aSelector,
+    TextureFlags aTextureFlags,
+    TextureAllocationFlags aAllocFlags) override;
 
   RefPtr<ID3D11Device> mDevice;
 };
 
 // Image class that wraps a ID3D11Texture2D. This class copies the image
 // passed into SetData(), so that it can be accessed from other D3D devices.
 // This class also manages the synchronization of the copy, to ensure the
 // resource is ready to use.
 class D3D11ShareHandleImage final : public Image {
 public:
   D3D11ShareHandleImage(const gfx::IntSize& aSize,
                         const gfx::IntRect& aRect);
-  ~D3D11ShareHandleImage() override {}
+  virtual ~D3D11ShareHandleImage() {}
 
   bool AllocateTexture(D3D11RecycleAllocator* aAllocator, ID3D11Device* aDevice);
 
-  gfx::IntSize GetSize() override;
-  virtual already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
-  virtual TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
-  virtual gfx::IntRect GetPictureRect() override { return mPictureRect; }
+  gfx::IntSize GetSize() const override;
+  already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
+  TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
+  gfx::IntRect GetPictureRect() const override { return mPictureRect; }
 
   ID3D11Texture2D* GetTexture() const;
 
 private:
   gfx::IntSize mSize;
   gfx::IntRect mPictureRect;
   RefPtr<TextureClient> mTextureClient;
   RefPtr<ID3D11Texture2D> mTexture;
--- a/gfx/layers/D3D11YCbCrImage.cpp
+++ b/gfx/layers/D3D11YCbCrImage.cpp
@@ -118,17 +118,17 @@ D3D11YCbCrImage::SetData(KnowsCompositor
                          aData.mCbCrStride,
                          aData.mCbCrStride * aData.mCbCrSize.height);
 
 
   return true;
 }
 
 IntSize
-D3D11YCbCrImage::GetSize()
+D3D11YCbCrImage::GetSize() const
 {
   return mPictureRect.Size();
 }
 
 TextureClient*
 D3D11YCbCrImage::GetTextureClient(KnowsCompositor* aForwarder)
 {
   return mTextureClient;
--- a/gfx/layers/D3D11YCbCrImage.h
+++ b/gfx/layers/D3D11YCbCrImage.h
@@ -27,18 +27,18 @@ class D3D11YCbCrRecycleAllocator : publi
 public:
   explicit D3D11YCbCrRecycleAllocator(KnowsCompositor* aAllocator,
                                       ID3D11Device* aDevice)
     : TextureClientRecycleAllocator(aAllocator)
     , mDevice(aDevice)
   {
   }
 
-  ID3D11Device* GetDevice() { return mDevice; }
-  KnowsCompositor* GetAllocator() { return mSurfaceAllocator; }
+  ID3D11Device* GetDevice() const { return mDevice; }
+  KnowsCompositor* GetAllocator() const { return mSurfaceAllocator; }
 
 protected:
   already_AddRefed<TextureClient>
   Allocate(gfx::SurfaceFormat aFormat,
            gfx::IntSize aSize,
            BackendSelector aSelector,
            TextureFlags aTextureFlags,
            TextureAllocationFlags aAllocFlags) override;
@@ -54,23 +54,23 @@ public:
   virtual ~D3D11YCbCrImage();
 
   // Copies the surface into a sharable texture's surface, and initializes
   // the image.
   bool SetData(KnowsCompositor* aAllocator,
                ImageContainer* aContainer,
                const PlanarYCbCrData& aData);
 
-  gfx::IntSize GetSize() override;
+  gfx::IntSize GetSize() const override;
 
   already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
 
   TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
 
-  gfx::IntRect GetPictureRect() override { return mPictureRect; }
+  gfx::IntRect GetPictureRect() const override { return mPictureRect; }
 
 private:
   const DXGIYCbCrTextureData* GetData() const;
 
   gfx::IntSize mYSize;
   gfx::IntSize mCbCrSize;
   gfx::IntRect mPictureRect;
   YUVColorSpace mColorSpace;
--- a/gfx/layers/D3D9SurfaceImage.cpp
+++ b/gfx/layers/D3D9SurfaceImage.cpp
@@ -178,17 +178,17 @@ D3D9SurfaceImage::AllocateAndCopy(D3D9Re
   hr = device->StretchRect(surface, &src, textureSurface, nullptr, D3DTEXF_NONE);
   NS_ENSURE_TRUE(SUCCEEDED(hr), hr);
 
   mSize = aRegion.Size();
   return S_OK;
 }
 
 already_AddRefed<IDirect3DSurface9>
-D3D9SurfaceImage::GetD3D9Surface()
+D3D9SurfaceImage::GetD3D9Surface() const
 {
   RefPtr<IDirect3DSurface9> textureSurface;
   HRESULT hr = mTexture->GetSurfaceLevel(0, getter_AddRefs(textureSurface));
   NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
   return textureSurface.forget();
 }
 
 const D3DSURFACE_DESC&
@@ -199,17 +199,17 @@ D3D9SurfaceImage::GetDesc() const
 
 HANDLE
 D3D9SurfaceImage::GetShareHandle() const
 {
   return mShareHandle;
 }
 
 gfx::IntSize
-D3D9SurfaceImage::GetSize()
+D3D9SurfaceImage::GetSize() const
 {
   return mSize;
 }
 
 TextureClient*
 D3D9SurfaceImage::GetTextureClient(KnowsCompositor* aForwarder)
 {
   MOZ_ASSERT(mTextureClient);
--- a/gfx/layers/D3D9SurfaceImage.h
+++ b/gfx/layers/D3D9SurfaceImage.h
@@ -88,44 +88,44 @@ protected:
   D3DSURFACE_DESC mDesc;
 };
 
 
 // Image class that wraps a IDirect3DSurface9. This class copies the image
 // passed into SetData(), so that it can be accessed from other D3D devices.
 // This class also manages the synchronization of the copy, to ensure the
 // resource is ready to use.
-class D3D9SurfaceImage : public Image {
+class D3D9SurfaceImage : public Image
+{
 public:
-  explicit D3D9SurfaceImage();
+  D3D9SurfaceImage();
   virtual ~D3D9SurfaceImage();
 
   HRESULT AllocateAndCopy(D3D9RecycleAllocator* aAllocator,
                           IDirect3DSurface9* aSurface,
                           const gfx::IntRect& aRegion);
 
   // Returns the description of the shared surface.
   const D3DSURFACE_DESC& GetDesc() const;
 
-  gfx::IntSize GetSize() override;
+  gfx::IntSize GetSize() const override;
 
-  virtual already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
+  already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
 
-  virtual TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
+  TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
 
-  already_AddRefed<IDirect3DSurface9> GetD3D9Surface();
+  already_AddRefed<IDirect3DSurface9> GetD3D9Surface() const;
 
   HANDLE GetShareHandle() const;
 
-  virtual bool IsValid() override { return mValid; }
+  bool IsValid() const override { return mValid; }
 
   void Invalidate() { mValid = false; }
 
 private:
-
   gfx::IntSize mSize;
   RefPtr<TextureClient> mTextureClient;
   RefPtr<IDirect3DTexture9> mTexture;
   HANDLE mShareHandle;
   D3DSURFACE_DESC mDesc;
   bool mValid;
 };
 
--- a/gfx/layers/GLImages.h
+++ b/gfx/layers/GLImages.h
@@ -13,56 +13,50 @@
 #include "ImageContainer.h"             // for Image
 #include "ImageTypes.h"                 // for ImageFormat::SHARED_GLTEXTURE
 #include "nsCOMPtr.h"                   // for already_AddRefed
 #include "mozilla/gfx/Point.h"          // for IntSize
 
 namespace mozilla {
 namespace layers {
 
-class GLImage : public Image {
+class GLImage : public Image
+{
 public:
   explicit GLImage(ImageFormat aFormat) : Image(nullptr, aFormat){}
 
-  virtual already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
+  already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
 
-  GLImage* AsGLImage() override {
-    return this;
-  }
+  GLImage* AsGLImage() override { return this; }
 };
 
 #ifdef MOZ_WIDGET_ANDROID
 
-class SurfaceTextureImage : public GLImage {
+class SurfaceTextureImage : public GLImage
+{
 public:
   SurfaceTextureImage(AndroidSurfaceTextureHandle aHandle,
                       const gfx::IntSize& aSize,
                       bool aContinuous,
                       gl::OriginPos aOriginPos);
 
-  gfx::IntSize GetSize() override { return mSize; }
-  AndroidSurfaceTextureHandle GetHandle() const {
-    return mHandle;
-  }
-  bool GetContinuous() const {
-    return mContinuous;
-  }
-  gl::OriginPos GetOriginPos() const {
-    return mOriginPos;
-  }
+  gfx::IntSize GetSize() const override { return mSize; }
+  AndroidSurfaceTextureHandle GetHandle() const { return mHandle; }
+  bool GetContinuous() const { return mContinuous; }
+  gl::OriginPos GetOriginPos() const { return mOriginPos; }
 
-  virtual already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override {
-    // We can implement this, but currently don't want to because it will cause the
-    // SurfaceTexture to be permanently bound to the snapshot readback context.
+  already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override
+  {
+    // We can implement this, but currently don't want to because it will cause
+    // the SurfaceTexture to be permanently bound to the snapshot readback
+    // context.
     return nullptr;
   }
 
-  SurfaceTextureImage* AsSurfaceTextureImage() override {
-    return this;
-  }
+  SurfaceTextureImage* AsSurfaceTextureImage() override { return this; }
 
 private:
   AndroidSurfaceTextureHandle mHandle;
   gfx::IntSize mSize;
   bool mContinuous;
   gl::OriginPos mOriginPos;
 };
 
--- a/gfx/layers/GPUVideoImage.h
+++ b/gfx/layers/GPUVideoImage.h
@@ -19,17 +19,18 @@ class VideoDecoderManagerChild;
 }
 namespace gl {
 class GLBlitHelper;
 }
 namespace layers {
 
 // Image class that refers to a decoded video frame within
 // the GPU process.
-class GPUVideoImage final : public Image {
+class GPUVideoImage final : public Image
+{
   friend class gl::GLBlitHelper;
 public:
   GPUVideoImage(dom::VideoDecoderManagerChild* aManager,
                 const SurfaceDescriptorGPUVideo& aSD,
                 const gfx::IntSize& aSize)
     : Image(nullptr, ImageFormat::GPU_VIDEO)
     , mSize(aSize)
   {
@@ -41,39 +42,40 @@ public:
     // it too, and we want to make sure we don't send the delete message
     // until we've stopped being used on the compositor.
     mTextureClient =
       TextureClient::CreateWithData(new GPUVideoTextureData(aManager, aSD, aSize),
                                     TextureFlags::RECYCLE,
                                     ImageBridgeChild::GetSingleton().get());
   }
 
-  ~GPUVideoImage() override {}
+  virtual ~GPUVideoImage() {}
 
-  gfx::IntSize GetSize() override { return mSize; }
+  gfx::IntSize GetSize() const override { return mSize; }
 
 private:
-  GPUVideoTextureData* GetData() const {
+  GPUVideoTextureData* GetData() const
+  {
     if (!mTextureClient) {
       return nullptr;
     }
     return mTextureClient->GetInternalData()->AsGPUVideoTextureData();
   }
 
 public:
-  virtual already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override
+  already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override
   {
     GPUVideoTextureData* data = GetData();
     if (!data) {
       return nullptr;
     }
     return data->GetAsSourceSurface();
   }
 
-  virtual TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override
+  TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override
   {
     MOZ_ASSERT(aForwarder == ImageBridgeChild::GetSingleton(), "Must only use GPUVideo on ImageBridge");
     return mTextureClient;
   }
 
 private:
   gfx::IntSize mSize;
   RefPtr<TextureClient> mTextureClient;
--- a/gfx/layers/IMFYCbCrImage.h
+++ b/gfx/layers/IMFYCbCrImage.h
@@ -15,26 +15,26 @@
 namespace mozilla {
 namespace layers {
 
 class IMFYCbCrImage : public RecyclingPlanarYCbCrImage
 {
 public:
   IMFYCbCrImage(IMFMediaBuffer* aBuffer, IMF2DBuffer* a2DBuffer);
 
-  virtual bool IsValid() { return true; }
+  bool IsValid() const override { return true; }
 
-  virtual TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
+  TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
 
   static DXGIYCbCrTextureData* GetD3D11TextureData(Data aData,
-                                                  gfx::IntSize aSize);
+                                                   gfx::IntSize aSize);
 protected:
   TextureClient* GetD3D11TextureClient(KnowsCompositor* aForwarder);
 
-  ~IMFYCbCrImage();
+  virtual ~IMFYCbCrImage();
 
   RefPtr<IMFMediaBuffer> mBuffer;
   RefPtr<IMF2DBuffer> m2DBuffer;
   RefPtr<TextureClient> mTextureClient;
 };
 
 } // namepace layers
 } // namespace mozilla
--- a/gfx/layers/ImageContainer.cpp
+++ b/gfx/layers/ImageContainer.cpp
@@ -595,17 +595,17 @@ RecyclingPlanarYCbCrImage::CopyData(cons
             mData.mCbCrSize, mData.mCbCrStride, mData.mCrSkip);
 
   mSize = aData.mPicSize;
   mOrigin = gfx::IntPoint(aData.mPicX, aData.mPicY);
   return true;
 }
 
 gfxImageFormat
-PlanarYCbCrImage::GetOffscreenFormat()
+PlanarYCbCrImage::GetOffscreenFormat() const
 {
   return mOffscreenFormat == SurfaceFormat::UNKNOWN ?
     gfxVars::OffscreenFormat() :
     mOffscreenFormat;
 }
 
 bool
 PlanarYCbCrImage::AdoptData(const Data& aData)
@@ -654,23 +654,23 @@ NVImage::NVImage()
   : Image(nullptr, ImageFormat::NV_IMAGE)
   , mBufferSize(0)
 {
 }
 
 NVImage::~NVImage() = default;
 
 IntSize
-NVImage::GetSize()
+NVImage::GetSize() const
 {
   return mSize;
 }
 
 IntRect
-NVImage::GetPictureRect()
+NVImage::GetPictureRect() const
 {
   return mData.GetPictureRect();
 }
 
 already_AddRefed<SourceSurface>
 NVImage::GetAsSourceSurface()
 {
   if (mSourceSurface) {
@@ -735,17 +735,17 @@ NVImage::GetAsSourceSurface()
 
   // Release the temporary buffer.
   delete[] buffer;
 
   return surface.forget();
 }
 
 bool
-NVImage::IsValid()
+NVImage::IsValid() const
 {
   return !!mBufferSize;
 }
 
 uint32_t
 NVImage::GetBufferSize() const
 {
   return mBufferSize;
@@ -807,29 +807,32 @@ NVImage::GetData() const
 
 UniquePtr<uint8_t>
 NVImage::AllocateBuffer(uint32_t aSize)
 {
   UniquePtr<uint8_t> buffer(new uint8_t[aSize]);
   return buffer;
 }
 
-SourceSurfaceImage::SourceSurfaceImage(const gfx::IntSize& aSize, gfx::SourceSurface* aSourceSurface)
-  : Image(nullptr, ImageFormat::CAIRO_SURFACE),
-    mSize(aSize),
-    mSourceSurface(aSourceSurface),
-    mTextureFlags(TextureFlags::DEFAULT)
-{}
+SourceSurfaceImage::SourceSurfaceImage(const gfx::IntSize& aSize,
+                                       gfx::SourceSurface* aSourceSurface)
+  : Image(nullptr, ImageFormat::CAIRO_SURFACE)
+  , mSize(aSize)
+  , mSourceSurface(aSourceSurface)
+  , mTextureFlags(TextureFlags::DEFAULT)
+{
+}
 
 SourceSurfaceImage::SourceSurfaceImage(gfx::SourceSurface* aSourceSurface)
-  : Image(nullptr, ImageFormat::CAIRO_SURFACE),
-    mSize(aSourceSurface->GetSize()),
-    mSourceSurface(aSourceSurface),
-    mTextureFlags(TextureFlags::DEFAULT)
-{}
+  : Image(nullptr, ImageFormat::CAIRO_SURFACE)
+  , mSize(aSourceSurface->GetSize())
+  , mSourceSurface(aSourceSurface)
+  , mTextureFlags(TextureFlags::DEFAULT)
+{
+}
 
 SourceSurfaceImage::~SourceSurfaceImage() = default;
 
 TextureClient*
 SourceSurfaceImage::GetTextureClient(KnowsCompositor* aForwarder)
 {
   if (!aForwarder) {
     return nullptr;
--- a/gfx/layers/ImageContainer.h
+++ b/gfx/layers/ImageContainer.h
@@ -60,17 +60,18 @@ public:
   class SurfaceReleaser : public mozilla::Runnable {
   public:
     explicit SurfaceReleaser(RawRef aRef)
       : mozilla::Runnable(
           "nsAutoRefTraits<nsMainThreadSourceSurfaceRef>::SurfaceReleaser")
       , mRef(aRef)
     {
     }
-    NS_IMETHOD Run() override {
+    NS_IMETHOD Run() override
+    {
       mRef->Release();
       return NS_OK;
     }
     RawRef mRef;
   };
 
   static RawRef Void() { return nullptr; }
   static void Release(RawRef aRawRef)
@@ -88,32 +89,35 @@ public:
                  "Can only add a reference on the main thread");
     aRawRef->AddRef();
   }
 };
 
 class nsOwningThreadSourceSurfaceRef;
 
 template <>
-class nsAutoRefTraits<nsOwningThreadSourceSurfaceRef> {
+class nsAutoRefTraits<nsOwningThreadSourceSurfaceRef>
+{
 public:
   typedef mozilla::gfx::SourceSurface* RawRef;
 
   /**
    * The XPCOM event that will do the actual release on the creation thread.
    */
-  class SurfaceReleaser : public mozilla::Runnable {
+  class SurfaceReleaser : public mozilla::Runnable
+  {
   public:
     explicit SurfaceReleaser(RawRef aRef)
       : mozilla::Runnable(
           "nsAutoRefTraits<nsOwningThreadSourceSurfaceRef>::SurfaceReleaser")
       , mRef(aRef)
     {
     }
-    NS_IMETHOD Run() override {
+    NS_IMETHOD Run() override
+    {
       mRef->Release();
       return NS_OK;
     }
     RawRef mRef;
   };
 
   static RawRef Void() { return nullptr; }
   void Release(RawRef aRawRef)
@@ -191,51 +195,59 @@ class MacIOSurfaceImage;
  * modified after calling SetImage. Image implementations do not need to
  * perform locking; when filling an Image, the Image client is responsible
  * for ensuring only one thread accesses the Image at a time, and after
  * SetImage the image is immutable.
  *
  * When resampling an Image, only pixels within the buffer should be
  * sampled. For example, cairo images should be sampled in EXTEND_PAD mode.
  */
-class Image {
+class Image
+{
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Image)
 
 public:
-  ImageFormat GetFormat() { return mFormat; }
-  void* GetImplData() { return mImplData; }
+  ImageFormat GetFormat() const { return mFormat; }
+  void* GetImplData() const { return mImplData; }
 
-  virtual gfx::IntSize GetSize() = 0;
-  virtual gfx::IntPoint GetOrigin()
+  virtual gfx::IntSize GetSize() const = 0;
+  virtual gfx::IntPoint GetOrigin() const
   {
     return gfx::IntPoint(0, 0);
   }
-  virtual gfx::IntRect GetPictureRect()
+  virtual gfx::IntRect GetPictureRect() const
   {
     return gfx::IntRect(GetOrigin().x, GetOrigin().y, GetSize().width, GetSize().height);
   }
 
   ImageBackendData* GetBackendData(LayersBackend aBackend)
-  { return mBackendData[aBackend]; }
+  {
+    return mBackendData[aBackend];
+  }
   void SetBackendData(LayersBackend aBackend, ImageBackendData* aData)
-  { mBackendData[aBackend] = aData; }
+  {
+    mBackendData[aBackend] = aData;
+  }
 
-  int32_t GetSerial() { return mSerial; }
+  int32_t GetSerial() const { return mSerial; }
 
   virtual already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() = 0;
 
-  virtual bool IsValid() { return true; }
+  virtual bool IsValid() const { return true; }
 
-  virtual uint8_t* GetBuffer() { return nullptr; }
+  virtual uint8_t* GetBuffer() const { return nullptr; }
 
   /**
    * For use with the TextureForwarder only (so that the later can
    * synchronize the TextureClient with the TextureHost).
    */
-  virtual TextureClient* GetTextureClient(KnowsCompositor* aForwarder) { return nullptr; }
+  virtual TextureClient* GetTextureClient(KnowsCompositor* aForwarder)
+  {
+    return nullptr;
+  }
 
   /* Access to derived classes. */
   virtual GLImage* AsGLImage() { return nullptr; }
 #ifdef MOZ_WIDGET_ANDROID
   virtual SurfaceTextureImage* AsSurfaceTextureImage() { return nullptr; }
 #endif
 #ifdef XP_MACOSX
   virtual MacIOSurfaceImage* AsMacIOSurfaceImage() { return nullptr; }
@@ -268,17 +280,18 @@ protected:
 
 /**
  * A RecycleBin is owned by an ImageContainer. We store buffers in it that we
  * want to recycle from one image to the next.It's a separate object from
  * ImageContainer because images need to store a strong ref to their RecycleBin
  * and we must avoid creating a reference loop between an ImageContainer and
  * its active image.
  */
-class BufferRecycleBin final {
+class BufferRecycleBin final
+{
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(BufferRecycleBin)
 
   //typedef mozilla::gl::GLContext GLContext;
 
 public:
   BufferRecycleBin();
 
   void RecycleBuffer(mozilla::UniquePtr<uint8_t[]> aBuffer, uint32_t aSize);
@@ -330,17 +343,18 @@ protected:
   virtual ~ImageFactory() {}
 
   virtual RefPtr<PlanarYCbCrImage> CreatePlanarYCbCrImage(
     const gfx::IntSize& aScaleHint,
     BufferRecycleBin *aRecycleBin);
 };
 
 // Used to notify ImageContainer::NotifyComposite()
-class ImageContainerListener final {
+class ImageContainerListener final
+{
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(ImageContainerListener)
 
 public:
   explicit ImageContainerListener(ImageContainer* aImageContainer);
 
   void NotifyComposite(const ImageCompositeNotification& aNotification);
   void ClearImageContainer();
   void DropImageClient();
@@ -400,17 +414,18 @@ public:
   typedef uint32_t FrameID;
   typedef uint32_t ProducerID;
 
   RefPtr<PlanarYCbCrImage> CreatePlanarYCbCrImage();
 
   // Factory methods for shared image types.
   RefPtr<SharedRGBImage> CreateSharedRGBImage();
 
-  struct NonOwningImage {
+  struct NonOwningImage
+  {
     explicit NonOwningImage(Image* aImage = nullptr,
                             TimeStamp aTimeStamp = TimeStamp(),
                             FrameID aFrameID = 0,
                             ProducerID aProducerID = 0)
       : mImage(aImage), mTimeStamp(aTimeStamp), mFrameID(aFrameID),
         mProducerID(aProducerID) {}
     Image* mImage;
     TimeStamp mTimeStamp;
@@ -535,21 +550,19 @@ public:
 
   /**
    * Sets a size that the image is expected to be rendered at.
    * This is a hint for image backends to optimize scaling.
    * Default implementation in this class is to ignore the hint.
    * Can be called on any thread. This method takes mRecursiveMutex
    * when accessing thread-shared state.
    */
-  void SetScaleHint(const gfx::IntSize& aScaleHint)
-  { mScaleHint = aScaleHint; }
+  void SetScaleHint(const gfx::IntSize& aScaleHint) { mScaleHint = aScaleHint; }
 
-  const gfx::IntSize& GetScaleHint() const
-  { return mScaleHint; }
+  const gfx::IntSize& GetScaleHint() const { return mScaleHint; }
 
   void SetImageFactory(ImageFactory *aFactory)
   {
     RecursiveMutexAutoLock lock(mRecursiveMutex);
     mImageFactory = aFactory ? aFactory : new ImageFactory();
   }
 
   ImageFactory* GetImageFactory() const
@@ -575,17 +588,18 @@ public:
     RecursiveMutexAutoLock lock(mRecursiveMutex);
     return mPaintDelay;
   }
 
   /**
    * Returns the number of images which have been contained in this container
    * and painted at least once.  Can be called from any thread.
    */
-  uint32_t GetPaintCount() {
+  uint32_t GetPaintCount()
+  {
     RecursiveMutexAutoLock lock(mRecursiveMutex);
     return mPaintCount;
   }
 
   /**
    * An entry in the current image list "expires" when the entry has an
    * non-null timestamp, and in a SetCurrentImages call the new image list is
    * non-empty, the timestamp of the first new image is non-null and greater
@@ -721,17 +735,18 @@ public:
 
     return mImages[chosenIndex].mImage.get();
   }
 
 private:
   AutoTArray<ImageContainer::OwningImage,4> mImages;
 };
 
-struct PlanarYCbCrData {
+struct PlanarYCbCrData
+{
   // Luminance buffer
   uint8_t* mYChannel;
   int32_t mYStride;
   gfx::IntSize mYSize;
   int32_t mYSkip;
   // Chroma buffers
   uint8_t* mCbChannel;
   uint8_t* mCrChannel;
@@ -742,17 +757,18 @@ struct PlanarYCbCrData {
   // Picture region
   uint32_t mPicX;
   uint32_t mPicY;
   gfx::IntSize mPicSize;
   StereoMode mStereoMode;
   YUVColorSpace mYUVColorSpace;
   uint32_t mBitDepth;
 
-  gfx::IntRect GetPictureRect() const {
+  gfx::IntRect GetPictureRect() const
+  {
     return gfx::IntRect(mPicX, mPicY,
                      mPicSize.width,
                      mPicSize.height);
   }
 
   PlanarYCbCrData()
     : mYChannel(nullptr), mYStride(0), mYSize(0, 0), mYSkip(0)
     , mCbChannel(nullptr), mCrChannel(nullptr)
@@ -794,21 +810,23 @@ struct PlanarYCbCrData {
  *  0   3   6   9   12  15  18  21                659             669
  * |----------------------------------------------------------------|
  * |Y___Y___Y___Y___Y___Y___Y___Y...                      |%%%%%%%%%|
  * |Y___Y___Y___Y___Y___Y___Y___Y...                      |%%%%%%%%%|
  * |Y___Y___Y___Y___Y___Y___Y___Y...                      |%%%%%%%%%|
  * |            |<->|
  *                mYSkip
  */
-class PlanarYCbCrImage : public Image {
+class PlanarYCbCrImage : public Image
+{
 public:
   typedef PlanarYCbCrData Data;
 
-  enum {
+  enum
+  {
     MAX_DIMENSION = 16384
   };
 
   virtual ~PlanarYCbCrImage() {}
 
   /**
    * This makes a copy of the data buffers, in order to support functioning
    * in all different layer managers.
@@ -820,66 +838,66 @@ public:
    */
   virtual bool AdoptData(const Data& aData);
 
   /**
    * Ask this Image to not convert YUV to RGB during SetData, and make
    * the original data available through GetData. This is optional,
    * and not all PlanarYCbCrImages will support it.
    */
-  virtual void SetDelayedConversion(bool aDelayed) { }
+  virtual void SetDelayedConversion(bool aDelayed) {}
 
   /**
    * Grab the original YUV data. This is optional.
    */
-  virtual const Data* GetData() { return &mData; }
+  virtual const Data* GetData() const { return &mData; }
 
   /**
    * Return the number of bytes of heap memory used to store this image.
    */
-  virtual uint32_t GetDataSize() { return mBufferSize; }
+  uint32_t GetDataSize() const { return mBufferSize; }
 
-  virtual bool IsValid() { return !!mBufferSize; }
+  bool IsValid() const override { return !!mBufferSize; }
 
-  virtual gfx::IntSize GetSize() { return mSize; }
+  gfx::IntSize GetSize() const override { return mSize; }
 
-  virtual gfx::IntPoint GetOrigin() { return mOrigin; }
-
-  explicit PlanarYCbCrImage();
+  gfx::IntPoint GetOrigin() const override { return mOrigin; }
 
-  virtual SharedPlanarYCbCrImage *AsSharedPlanarYCbCrImage() { return nullptr; }
+  PlanarYCbCrImage();
 
-  virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
+  virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const
+  {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
   virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const = 0;
 
-  PlanarYCbCrImage* AsPlanarYCbCrImage() { return this; }
+  PlanarYCbCrImage* AsPlanarYCbCrImage() override { return this; }
 
 protected:
-  already_AddRefed<gfx::SourceSurface> GetAsSourceSurface();
+  already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
 
   void SetOffscreenFormat(gfxImageFormat aFormat) { mOffscreenFormat = aFormat; }
-  gfxImageFormat GetOffscreenFormat();
+  gfxImageFormat GetOffscreenFormat() const;
 
   Data mData;
   gfx::IntPoint mOrigin;
   gfx::IntSize mSize;
   gfxImageFormat mOffscreenFormat;
   nsCountedRef<nsMainThreadSourceSurfaceRef> mSourceSurface;
   uint32_t mBufferSize;
 };
 
-class RecyclingPlanarYCbCrImage: public PlanarYCbCrImage {
+class RecyclingPlanarYCbCrImage: public PlanarYCbCrImage
+{
 public:
   explicit RecyclingPlanarYCbCrImage(BufferRecycleBin *aRecycleBin) : mRecycleBin(aRecycleBin) {}
-  virtual ~RecyclingPlanarYCbCrImage() override;
-  virtual bool CopyData(const Data& aData) override;
-  virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
+  virtual ~RecyclingPlanarYCbCrImage();
+  bool CopyData(const Data& aData) override;
+  size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
 protected:
 
   /**
    * Return a buffer to store image data in.
    */
   mozilla::UniquePtr<uint8_t[]> AllocateBuffer(uint32_t aSize);
 
   RefPtr<BufferRecycleBin> mRecycleBin;
@@ -890,34 +908,35 @@ protected:
  * NVImage is used to store YUV420SP_NV12 and YUV420SP_NV21 data natively, which
  * are not supported by PlanarYCbCrImage. (PlanarYCbCrImage only stores YUV444P,
  * YUV422P and YUV420P, it converts YUV420SP_NV12 and YUV420SP_NV21 data into
  * YUV420P in its PlanarYCbCrImage::SetData() method.)
  *
  * PlanarYCbCrData is able to express all the YUV family and so we keep use it
  * in NVImage.
  */
-class NVImage: public Image {
+class NVImage final : public Image
+{
   typedef PlanarYCbCrData Data;
 
 public:
-  explicit NVImage();
-  virtual ~NVImage() override;
+  NVImage();
+  virtual ~NVImage();
 
   // Methods inherited from layers::Image.
-  virtual gfx::IntSize GetSize() override;
-  virtual gfx::IntRect GetPictureRect() override;
-  virtual already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
-  virtual bool IsValid() override;
-  virtual NVImage* AsNVImage() override;
+  gfx::IntSize GetSize() const override;
+  gfx::IntRect GetPictureRect() const override;
+  already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
+  bool IsValid() const override;
+  NVImage* AsNVImage() override;
 
   // Methods mimic layers::PlanarYCbCrImage.
-  virtual bool SetData(const Data& aData);
-  virtual const Data* GetData() const;
-  virtual uint32_t GetBufferSize() const;
+  bool SetData(const Data& aData) ;
+  const Data* GetData() const;
+  uint32_t GetBufferSize() const;
 
 protected:
 
   /**
    * Return a buffer to store image data in.
    */
   mozilla::UniquePtr<uint8_t> AllocateBuffer(uint32_t aSize);
 
@@ -928,32 +947,33 @@ protected:
   nsCountedRef<nsMainThreadSourceSurfaceRef> mSourceSurface;
 };
 
 /**
  * Currently, the data in a SourceSurfaceImage surface is treated as being in the
  * device output color space. This class is very simple as all backends
  * have to know about how to deal with drawing a cairo image.
  */
-class SourceSurfaceImage final : public Image {
+class SourceSurfaceImage final : public Image
+{
 public:
-  virtual already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override
+  already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override
   {
     RefPtr<gfx::SourceSurface> surface(mSourceSurface);
     return surface.forget();
   }
 
   void SetTextureFlags(TextureFlags aTextureFlags) { mTextureFlags = aTextureFlags; }
-  virtual TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
+  TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
 
-  virtual gfx::IntSize GetSize() override { return mSize; }
+  gfx::IntSize GetSize() const override { return mSize; }
 
   SourceSurfaceImage(const gfx::IntSize& aSize, gfx::SourceSurface* aSourceSurface);
   explicit SourceSurfaceImage(gfx::SourceSurface* aSourceSurface);
-  ~SourceSurfaceImage();
+  virtual ~SourceSurfaceImage();
 
 private:
   gfx::IntSize mSize;
   nsCountedRef<nsOwningThreadSourceSurfaceRef> mSourceSurface;
   nsDataHashtable<nsUint32HashKey, RefPtr<TextureClient> >  mTextureClients;
   TextureFlags mTextureFlags;
 };
 
--- a/gfx/layers/MacIOSurfaceImage.h
+++ b/gfx/layers/MacIOSurfaceImage.h
@@ -11,35 +11,39 @@
 #include "mozilla/gfx/MacIOSurface.h"
 #include "mozilla/gfx/Point.h"
 #include "mozilla/layers/TextureClient.h"
 
 namespace mozilla {
 
 namespace layers {
 
-class MacIOSurfaceImage : public Image {
+class MacIOSurfaceImage : public Image
+{
 public:
   explicit MacIOSurfaceImage(MacIOSurface* aSurface)
-   : Image(nullptr, ImageFormat::MAC_IOSURFACE),
-     mSurface(aSurface)
-  {}
+    : Image(nullptr, ImageFormat::MAC_IOSURFACE)
+    , mSurface(aSurface)
+  {
+  }
 
   MacIOSurface* GetSurface() { return mSurface; }
 
-  gfx::IntSize GetSize() override {
+  gfx::IntSize GetSize() const override
+  {
     return gfx::IntSize::Truncate(mSurface->GetDevicePixelWidth(),
                                   mSurface->GetDevicePixelHeight());
   }
 
-  virtual already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
+  already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
 
-  virtual TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
+  TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
 
-  virtual MacIOSurfaceImage* AsMacIOSurfaceImage() override {
+  MacIOSurfaceImage* AsMacIOSurfaceImage() override
+  {
     return this;
   }
 
 private:
   RefPtr<MacIOSurface> mSurface;
   RefPtr<TextureClient> mTextureClient;
 };
 
--- a/gfx/layers/TextureWrapperImage.cpp
+++ b/gfx/layers/TextureWrapperImage.cpp
@@ -17,23 +17,23 @@ TextureWrapperImage::TextureWrapperImage
 {
 }
 
 TextureWrapperImage::~TextureWrapperImage()
 {
 }
 
 gfx::IntSize
-TextureWrapperImage::GetSize()
+TextureWrapperImage::GetSize() const
 {
   return mTextureClient->GetSize();
 }
 
 gfx::IntRect
-TextureWrapperImage::GetPictureRect()
+TextureWrapperImage::GetPictureRect() const
 {
   return mPictureRect;
 }
 
 already_AddRefed<gfx::SourceSurface>
 TextureWrapperImage::GetAsSourceSurface()
 {
   TextureClientAutoLock autoLock(mTextureClient, OpenMode::OPEN_READ);
--- a/gfx/layers/TextureWrapperImage.h
+++ b/gfx/layers/TextureWrapperImage.h
@@ -15,20 +15,20 @@ namespace mozilla {
 namespace layers {
 
 // Wraps a TextureClient into an Image. This may only be used on the main
 // thread, and only with TextureClients that support BorrowDrawTarget().
 class TextureWrapperImage final : public Image
 {
 public:
   TextureWrapperImage(TextureClient* aClient, const gfx::IntRect& aPictureRect);
-  ~TextureWrapperImage() override;
+  virtual ~TextureWrapperImage();
 
-  gfx::IntSize GetSize() override;
-  gfx::IntRect GetPictureRect() override;
+  gfx::IntSize GetSize() const override;
+  gfx::IntRect GetPictureRect() const override;
   already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
   TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
 
 private:
   gfx::IntRect mPictureRect;
   RefPtr<TextureClient> mTextureClient;
 };
 
--- a/gfx/layers/client/SingleTiledContentClient.h
+++ b/gfx/layers/client/SingleTiledContentClient.h
@@ -39,40 +39,40 @@ public:
 
   // ClientTiledLayerBuffer
   void PaintThebes(const nsIntRegion& aNewValidRegion,
                    const nsIntRegion& aPaintRegion,
                    const nsIntRegion& aDirtyRegion,
                    LayerManager::DrawPaintedLayerCallback aCallback,
                    void* aCallbackData,
                    TilePaintFlags aFlags = TilePaintFlags::None) override;
- 
+
   bool SupportsProgressiveUpdate() override { return false; }
   bool ProgressiveUpdate(const nsIntRegion& aValidRegion,
                          const nsIntRegion& aInvalidRegion,
                          const nsIntRegion& aOldValidRegion,
                          nsIntRegion& aOutDrawnRegion,
                          BasicTiledLayerPaintData* aPaintData,
                          LayerManager::DrawPaintedLayerCallback aCallback,
                          void* aCallbackData) override
   {
     MOZ_ASSERT(false, "ProgressiveUpdate not supported!");
     return false;
   }
-  
+
   void ResetPaintedAndValidState() override {
     mPaintedRegion.SetEmpty();
     mValidRegion.SetEmpty();
     mTile.DiscardBuffers();
   }
-  
+
   const nsIntRegion& GetValidRegion() override {
     return mValidRegion;
   }
-  
+
   bool IsLowPrecision() const override {
     return false;
   }
 
   void ReleaseTiles();
 
   void DiscardBuffers();
 
--- a/gfx/layers/client/TextureClientPool.h
+++ b/gfx/layers/client/TextureClientPool.h
@@ -37,17 +37,17 @@ public:
    */
   virtual void ReturnTextureClientDeferred(TextureClient *aClient) = 0;
 
   virtual void ReportClientLost() = 0;
 };
 
 class TextureClientPool final : public TextureClientAllocator
 {
-  ~TextureClientPool();
+  virtual ~TextureClientPool();
 
 public:
   TextureClientPool(LayersBackend aBackend,
                     int32_t aMaxTextureSize,
                     gfx::SurfaceFormat aFormat,
                     gfx::IntSize aSize,
                     TextureFlags aFlags,
                     uint32_t aShrinkTimeoutMsec,
@@ -90,17 +90,17 @@ public:
    * mInitialPoolSize outstanding.
    */
   void ShrinkToMaximumSize();
 
   /**
    * Report that a client retrieved via GetTextureClient() has become
    * unusable, so that it will no longer be tracked.
    */
-  virtual void ReportClientLost() override;
+  void ReportClientLost() override;
 
   /**
    * Calling this will cause the pool to attempt to relinquish any unused
    * clients.
    */
   void Clear();
 
   LayersBackend GetBackend() const { return mBackend; }
--- a/gfx/layers/ipc/KnowsCompositor.h
+++ b/gfx/layers/ipc/KnowsCompositor.h
@@ -49,17 +49,18 @@ public:
     aResource->NotifyInactive();
   }
 };
 
 /**
  * An abstract interface for classes that are tied to a specific Compositor across
  * IPDL and uses TextureFactoryIdentifier to describe this Compositor.
  */
-class KnowsCompositor {
+class KnowsCompositor
+{
 public:
   NS_INLINE_DECL_PURE_VIRTUAL_REFCOUNTING
 
   KnowsCompositor();
   ~KnowsCompositor();
 
   void IdentifyTextureHost(const TextureFactoryIdentifier& aIdentifier);
 
@@ -112,21 +113,22 @@ public:
     return mTextureFactoryIdentifier.mCompositorUseANGLE;
   }
 
   const TextureFactoryIdentifier& GetTextureFactoryIdentifier() const
   {
     return mTextureFactoryIdentifier;
   }
 
-  bool DeviceCanReset() const {
+  bool DeviceCanReset() const
+  {
     return GetCompositorBackendType() != LayersBackend::LAYERS_BASIC;
   }
 
-  int32_t GetSerial() { return mSerial; }
+  int32_t GetSerial() const { return mSerial; }
 
   /**
    * Sends a synchronous ping to the compsoitor.
    *
    * This is bad for performance and should only be called as a last resort if the
    * compositor may be blocked for a long period of time, to avoid that the content
    * process accumulates resource allocations that the compositor is not consuming
    * and releasing.
--- a/gfx/layers/ipc/SharedPlanarYCbCrImage.cpp
+++ b/gfx/layers/ipc/SharedPlanarYCbCrImage.cpp
@@ -24,22 +24,23 @@
 #include "mozilla/ipc/Shmem.h"
 
 namespace mozilla {
 namespace layers {
 
 using namespace mozilla::ipc;
 
 SharedPlanarYCbCrImage::SharedPlanarYCbCrImage(ImageClient* aCompositable)
-: mCompositable(aCompositable)
+  : mCompositable(aCompositable)
 {
   MOZ_COUNT_CTOR(SharedPlanarYCbCrImage);
 }
 
-SharedPlanarYCbCrImage::~SharedPlanarYCbCrImage() {
+SharedPlanarYCbCrImage::~SharedPlanarYCbCrImage()
+{
   MOZ_COUNT_DTOR(SharedPlanarYCbCrImage);
 }
 
 size_t
 SharedPlanarYCbCrImage::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
 {
   // NB: Explicitly skipping mTextureClient, the memory is already reported
   //     at time of allocation in GfxMemoryImageReporter.
@@ -50,17 +51,17 @@ SharedPlanarYCbCrImage::SizeOfExcludingT
 
 TextureClient*
 SharedPlanarYCbCrImage::GetTextureClient(KnowsCompositor* aForwarder)
 {
   return mTextureClient.get();
 }
 
 uint8_t*
-SharedPlanarYCbCrImage::GetBuffer()
+SharedPlanarYCbCrImage::GetBuffer() const
 {
   // This should never be used
   MOZ_ASSERT(false);
   return nullptr;
 }
 
 already_AddRefed<gfx::SourceSurface>
 SharedPlanarYCbCrImage::GetAsSourceSurface()
@@ -129,17 +130,18 @@ SharedPlanarYCbCrImage::AdoptData(const 
                                    aData.mYUVColorSpace,
                                    aData.mBitDepth,
                                    hasIntermediateBuffer));
 
   return true;
 }
 
 bool
-SharedPlanarYCbCrImage::IsValid() {
+SharedPlanarYCbCrImage::IsValid() const
+{
   return mTextureClient && mTextureClient->IsValid();
 }
 
 bool
 SharedPlanarYCbCrImage::Allocate(PlanarYCbCrData& aData)
 {
   MOZ_ASSERT(!mTextureClient,
              "This image already has allocated data");
--- a/gfx/layers/ipc/SharedPlanarYCbCrImage.h
+++ b/gfx/layers/ipc/SharedPlanarYCbCrImage.h
@@ -26,33 +26,33 @@ class SharedPlanarYCbCrImage : public Pl
 {
 public:
   explicit SharedPlanarYCbCrImage(ImageClient* aCompositable);
 
 protected:
   virtual ~SharedPlanarYCbCrImage();
 
 public:
-  virtual TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
-  virtual uint8_t* GetBuffer() override;
+  TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
+  uint8_t* GetBuffer() const override;
 
-  virtual already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
-  virtual bool CopyData(const PlanarYCbCrData& aData) override;
-  virtual bool AdoptData(const Data& aData) override;
+  already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
+  bool CopyData(const PlanarYCbCrData& aData) override;
+  bool AdoptData(const Data& aData) override;
 
-  virtual bool Allocate(PlanarYCbCrData& aData);
+  bool Allocate(PlanarYCbCrData& aData);
 
-  virtual bool IsValid() override;
+  bool IsValid() const override;
 
-  virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
+  size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override
   {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
-  virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
+  size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
 
 private:
   RefPtr<TextureClient> mTextureClient;
   RefPtr<ImageClient> mCompositable;
 };
 
 } // namespace layers
 } // namespace mozilla
--- a/gfx/layers/ipc/SharedRGBImage.cpp
+++ b/gfx/layers/ipc/SharedRGBImage.cpp
@@ -72,27 +72,27 @@ SharedRGBImage::Allocate(gfx::IntSize aS
   mSize = aSize;
   mTextureClient = mCompositable->CreateBufferTextureClient(aFormat, aSize,
                                                             gfx::BackendType::NONE,
                                                             TextureFlags::DEFAULT);
   return !!mTextureClient;
 }
 
 uint8_t*
-SharedRGBImage::GetBuffer()
+SharedRGBImage::GetBuffer() const
 {
   MappedTextureData mapped;
   if (mTextureClient && mTextureClient->BorrowMappedData(mapped)) {
     return mapped.data;
   }
   return 0;
 }
 
 gfx::IntSize
-SharedRGBImage::GetSize()
+SharedRGBImage::GetSize() const
 {
   return mSize;
 }
 
 TextureClient*
 SharedRGBImage::GetTextureClient(KnowsCompositor* aForwarder)
 {
   return mTextureClient.get();
--- a/gfx/layers/ipc/SharedRGBImage.h
+++ b/gfx/layers/ipc/SharedRGBImage.h
@@ -35,21 +35,21 @@ class SharedRGBImage : public Image
 {
 public:
   explicit SharedRGBImage(ImageClient* aCompositable);
 
 protected:
   virtual ~SharedRGBImage();
 
 public:
-  virtual TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
+  TextureClient* GetTextureClient(KnowsCompositor* aForwarder) override;
 
-  virtual uint8_t* GetBuffer() override;
+  uint8_t* GetBuffer() const override;
 
-  gfx::IntSize GetSize() override;
+  gfx::IntSize GetSize() const override;
 
   already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override;
 
   bool Allocate(gfx::IntSize aSize, gfx::SurfaceFormat aFormat);
 private:
   gfx::IntSize mSize;
   RefPtr<ImageClient> mCompositable;
   RefPtr<TextureClient> mTextureClient;
--- a/layout/base/crashtests/1156588.html
+++ b/layout/base/crashtests/1156588.html
@@ -1,15 +1,12 @@
 <!DOCTYPE html>
 <html>
 <head>
 <meta charset="UTF-8">
-<!--
-user_pref("layout.css.grid.enabled", true);
--->
 <script>
 
 function boom()
 {
     document.documentElement.offsetHeight;
     document.getElementById("x").style.content = "'x'";
     document.documentElement.offsetHeight;
     document.getElementById("s").remove();
--- a/layout/base/crashtests/crashtests.list
+++ b/layout/base/crashtests/crashtests.list
@@ -465,17 +465,17 @@ load 1009036.html
 load 1043163-1.html
 load 1061028.html
 load 1107508-1.html
 load 1116104.html
 load 1127198-1.html
 load 1140198.html
 load 1143535.html
 load 1153716.html
-pref(layout.css.grid.enabled,true) load 1156588.html
+load 1156588.html
 load 1162813.xul
 load 1163583.html
 load 1234622-1.html
 load 1235467-1.html
 pref(dom.webcomponents.enabled,true) load 1261351.html
 load 1270797-1.html
 load 1278455-1.html
 load 1286889.html
--- a/layout/base/nsLayoutUtils.cpp
+++ b/layout/base/nsLayoutUtils.cpp
@@ -155,17 +155,16 @@
 
 using namespace mozilla;
 using namespace mozilla::dom;
 using namespace mozilla::image;
 using namespace mozilla::layers;
 using namespace mozilla::layout;
 using namespace mozilla::gfx;
 
-#define GRID_ENABLED_PREF_NAME "layout.css.grid.enabled"
 #define WEBKIT_PREFIXES_ENABLED_PREF_NAME "layout.css.prefixes.webkit"
 #define TEXT_ALIGN_UNSAFE_ENABLED_PREF_NAME "layout.css.text-align-unsafe-value.enabled"
 #define FLOAT_LOGICAL_VALUES_ENABLED_PREF_NAME "layout.css.float-logical-values.enabled"
 #define INTERCHARACTER_RUBY_ENABLED_PREF_NAME "layout.css.ruby.intercharacter.enabled"
 
 // The time in number of frames that we estimate for a refresh driver
 // to be quiescent
 #define DEFAULT_QUIESCENT_FRAMES 2
@@ -209,61 +208,16 @@ typedef nsDataHashtable<nsUint64HashKey,
 static ContentMap* sContentMap = nullptr;
 static ContentMap& GetContentMap() {
   if (!sContentMap) {
     sContentMap = new ContentMap();
   }
   return *sContentMap;
 }
 
-// When the pref "layout.css.grid.enabled" changes, this function is invoked
-// to let us update kDisplayKTable, to selectively disable or restore the
-// entries for "grid" and "inline-grid" in that table.
-static void
-GridEnabledPrefChangeCallback(const char* aPrefName, void* aClosure)
-{
-  MOZ_ASSERT(strncmp(aPrefName, GRID_ENABLED_PREF_NAME,
-                     ArrayLength(GRID_ENABLED_PREF_NAME)) == 0,
-             "We only registered this callback for a single pref, so it "
-             "should only be called for that pref");
-
-  static int32_t sIndexOfGridInDisplayTable;
-  static int32_t sIndexOfInlineGridInDisplayTable;
-  static bool sAreGridKeywordIndicesInitialized; // initialized to false
-
-  bool isGridEnabled =
-    Preferences::GetBool(GRID_ENABLED_PREF_NAME, false);
-  if (!sAreGridKeywordIndicesInitialized) {
-    // First run: find the position of "grid" and "inline-grid" in
-    // kDisplayKTable.
-    sIndexOfGridInDisplayTable =
-      nsCSSProps::FindIndexOfKeyword(eCSSKeyword_grid,
-                                     nsCSSProps::kDisplayKTable);
-    MOZ_ASSERT(sIndexOfGridInDisplayTable >= 0,
-               "Couldn't find grid in kDisplayKTable");
-    sIndexOfInlineGridInDisplayTable =
-      nsCSSProps::FindIndexOfKeyword(eCSSKeyword_inline_grid,
-                                     nsCSSProps::kDisplayKTable);
-    MOZ_ASSERT(sIndexOfInlineGridInDisplayTable >= 0,
-               "Couldn't find inline-grid in kDisplayKTable");
-    sAreGridKeywordIndicesInitialized = true;
-  }
-
-  // OK -- now, stomp on or restore the "grid" entries in kDisplayKTable,
-  // depending on whether the grid pref is enabled vs. disabled.
-  if (sIndexOfGridInDisplayTable >= 0) {
-    nsCSSProps::kDisplayKTable[sIndexOfGridInDisplayTable].mKeyword =
-      isGridEnabled ? eCSSKeyword_grid : eCSSKeyword_UNKNOWN;
-  }
-  if (sIndexOfInlineGridInDisplayTable >= 0) {
-    nsCSSProps::kDisplayKTable[sIndexOfInlineGridInDisplayTable].mKeyword =
-      isGridEnabled ? eCSSKeyword_inline_grid : eCSSKeyword_UNKNOWN;
-  }
-}
-
 // When the pref "layout.css.prefixes.webkit" changes, this function is invoked
 // to let us update kDisplayKTable, to selectively disable or restore the
 // entries for "-webkit-box" and "-webkit-inline-box" in that table.
 static void
 WebkitPrefixEnabledPrefChangeCallback(const char* aPrefName, void* aClosure)
 {
   MOZ_ASSERT(strncmp(aPrefName, WEBKIT_PREFIXES_ENABLED_PREF_NAME,
                      ArrayLength(WEBKIT_PREFIXES_ENABLED_PREF_NAME)) == 0,
@@ -8154,18 +8108,16 @@ nsLayoutUtils::SizeOfTextRunsForFrames(n
 }
 
 struct PrefCallbacks
 {
   const char* name;
   PrefChangedFunc func;
 };
 static const PrefCallbacks kPrefCallbacks[] = {
-  { GRID_ENABLED_PREF_NAME,
-    GridEnabledPrefChangeCallback },
   { WEBKIT_PREFIXES_ENABLED_PREF_NAME,
     WebkitPrefixEnabledPrefChangeCallback },
   { TEXT_ALIGN_UNSAFE_ENABLED_PREF_NAME,
     TextAlignUnsafeEnabledPrefChangeCallback },
   { FLOAT_LOGICAL_VALUES_ENABLED_PREF_NAME,
     FloatLogicalValuesEnabledPrefChangeCallback },
 };
 
--- a/layout/generic/crashtests/1015562.html
+++ b/layout/generic/crashtests/1015562.html
@@ -1,17 +1,12 @@
 <!DOCTYPE html>
 <html>
 <head>
 <script>
-
-/*
-user_pref("layout.css.grid.enabled", true);
-*/
-
 function boom()
 {
     document.getElementById("r").appendChild(document.createTextNode("B"));
 }
 
 </script>
 </head>
 <body onload="boom();">
--- a/layout/generic/crashtests/1156257.html
+++ b/layout/generic/crashtests/1156257.html
@@ -1,15 +1,12 @@
 <!DOCTYPE html>
 <html>
 <head>
 <meta charset="UTF-8">
-<!--
-user_pref("layout.css.grid.enabled", true);
--->
 <script>
 function boom()
 {
     document.documentElement.offsetHeight;
     document.getElementById("g").firstChild.remove();
 }
 </script>
 </head>
--- a/layout/generic/crashtests/crashtests.list
+++ b/layout/generic/crashtests/crashtests.list
@@ -569,17 +569,17 @@ load 964078.html
 load 970710.html
 load 973701-1.xhtml
 load 973701-2.xhtml
 load 986899.html
 load 1001233.html
 load 1001258-1.html
 load 1001994.html
 load 1003441.xul
-pref(layout.css.grid.enabled,true) load 1015562.html
+load 1015562.html
 asserts(1-2) load 1015563-1.html
 asserts(1-2) load 1015563-2.html
 asserts(11) asserts-if(stylo&&Android,274) load 1015844.html # bug 574889, bug 1374479
 pref(font.size.inflation.minTwips,200) load 1032450.html
 load 1032613-1.svg
 load 1032613-2.html
 load 1037903.html
 load 1039454-1.html
@@ -595,32 +595,32 @@ load 1140268-1.html
 load 1145768.html
 load 1145931.html
 load 1146103.html
 load 1146107.html
 load 1146114.html
 asserts(0-20) load 1153478.html # bug 1144852
 load 1153695.html
 load 1156222.html
-pref(layout.css.grid.enabled,true) load 1156257.html
+load 1156257.html
 load 1157011.html
 load 1169420-1.html
 load 1169420-2.html
 load 1183431.html
 load 1221112-1.html
 load 1221112-2.html
 load 1221874-1.html
 load 1222783.xhtml
 load 1223522.xhtml
 load 1223568-1.html
 load 1223568-2.html
 load 1224230-1.html
-pref(layout.css.grid.enabled,true) load 1225118.html
-pref(layout.css.grid.enabled,true) load 1225376.html
-pref(layout.css.grid.enabled,true) load 1225592.html
+load 1225118.html
+load 1225376.html
+load 1225592.html
 load 1229437-1.html
 load 1229437-2.html
 load details-containing-only-text.html
 load details-display-none-summary-1.html
 load details-display-none-summary-2.html
 load details-display-none-summary-3.html
 load details-open-overflow-auto.html
 load details-open-overflow-hidden.html
--- a/layout/reftests/bugs/reftest.list
+++ b/layout/reftests/bugs/reftest.list
@@ -1846,17 +1846,17 @@ pref(layout.css.moz-document.content.ena
 pref(layout.css.moz-document.content.enabled,false) == 1035091-2.html 1035091-ref.html
 == 1042104-1.html 1042104-1-ref.html
 == 1043537-1.html 1043537-1-ref.html
 == 1044198-1.html 1044198-1-ref.html
 == 1049499-1.html 1049499-1-ref.html
 == 1050493-1.html 1050493-1-ref.html
 == 1050788-1.html about:blank
 == 1053035-1-flex.html 1053035-1-ref.html
-test-pref(layout.css.grid.enabled,true) == 1053035-1-grid.html 1053035-1-ref.html
+== 1053035-1-grid.html 1053035-1-ref.html
 == 1059167-1.html 1059167-1-ref.html
 == 1059498-1.html 1059498-1-ref.html
 == 1059498-2.html 1059498-1-ref.html
 == 1059498-3.html 1059498-1-ref.html
 == 1062108-1.html 1062108-1-ref.html
 == 1062792-1.html 1062792-1-ref.html
 == 1062963-floatmanager-reflow.html 1062963-floatmanager-reflow-ref.html
 test-pref(dom.webcomponents.enabled,true) == 1066554-1.html 1066554-1-ref.html
--- a/layout/reftests/css-grid/reftest.list
+++ b/layout/reftests/css-grid/reftest.list
@@ -1,10 +1,8 @@
-default-preferences pref(layout.css.grid.enabled,true)
-
 fails == grid-whitespace-handling-1a.xhtml grid-whitespace-handling-1-ref.xhtml
 fails == grid-whitespace-handling-1b.xhtml grid-whitespace-handling-1-ref.xhtml
 == grid-whitespace-handling-2.xhtml  grid-whitespace-handling-2-ref.xhtml
 == grid-placement-definite-001.html grid-placement-definite-001-ref.html
 == grid-placement-definite-002.html grid-placement-definite-002-ref.html
 == grid-placement-definite-003.html grid-placement-definite-003-ref.html
 == grid-placement-negative-lines-001.html grid-placement-negative-lines-001-ref.html
 == grid-placement-auto-row-sparse-001.html grid-placement-auto-row-sparse-001-ref.html
--- a/layout/reftests/list-item/reftest.list
+++ b/layout/reftests/list-item/reftest.list
@@ -1,11 +1,11 @@
 fuzzy-if(OSX,55,4) == numbering-1.html numbering-1-ref.html
 == numbering-2.html numbering-2-ref.html
-pref(layout.css.grid.enabled,true) fuzzy-if(OSX,8,1) == numbering-3.html numbering-3-ref.html
+fuzzy-if(OSX,8,1) == numbering-3.html numbering-3-ref.html
 fuzzy-if(OSX,72,2) == numbering-4.html numbering-4-ref.html
 == numbering-5.html numbering-5-ref.html
 == ol-reversed-1a.html ol-reversed-1-ref.html
 asserts(1) == ol-reversed-1b.html ol-reversed-1-ref.html # bug 478135
 == ol-reversed-1c.html ol-reversed-1-ref.html
 == ol-reversed-2.html ol-reversed-2-ref.html
 == ol-reversed-3.html ol-reversed-3-ref.html
 == bullet-space-1.html bullet-space-1-ref.html
--- a/layout/style/nsCSSPropList.h
+++ b/layout/style/nsCSSPropList.h
@@ -2140,179 +2140,171 @@ CSS_PROP_UIRESET(
     nullptr,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Discrete) // bug 58646
 CSS_PROP_SHORTHAND(
     grid,
     grid,
     Grid,
     CSS_PROPERTY_PARSE_FUNCTION,
-    "layout.css.grid.enabled")
+    "")
 CSS_PROP_SHORTHAND(
     grid-area,
     grid_area,
     GridArea,
     CSS_PROPERTY_PARSE_FUNCTION,
-    "layout.css.grid.enabled")
+    "")
 CSS_PROP_POSITION(
     grid-auto-columns,
     grid_auto_columns,
     GridAutoColumns,
     CSS_PROPERTY_PARSE_FUNCTION |
-        CSS_PROPERTY_STORES_CALC |
-        CSS_PROPERTY_ENABLED_IN_UA_SHEETS,
-    "layout.css.grid.enabled",
+        CSS_PROPERTY_STORES_CALC,
+    "",
     0,
     kGridTrackBreadthKTable,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Discrete)
 CSS_PROP_POSITION(
     grid-auto-flow,
     grid_auto_flow,
     GridAutoFlow,
-    CSS_PROPERTY_PARSE_FUNCTION |
-        CSS_PROPERTY_ENABLED_IN_UA_SHEETS,
-    "layout.css.grid.enabled",
+    CSS_PROPERTY_PARSE_FUNCTION,
+    "",
     0,
     kGridAutoFlowKTable,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Discrete)
 CSS_PROP_POSITION(
     grid-auto-rows,
     grid_auto_rows,
     GridAutoRows,
     CSS_PROPERTY_PARSE_FUNCTION |
-        CSS_PROPERTY_STORES_CALC |
-        CSS_PROPERTY_ENABLED_IN_UA_SHEETS,
-    "layout.css.grid.enabled",
+        CSS_PROPERTY_STORES_CALC,
+    "",
     0,
     kGridTrackBreadthKTable,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Discrete)
 CSS_PROP_SHORTHAND(
     grid-column,
     grid_column,
     GridColumn,
     CSS_PROPERTY_PARSE_FUNCTION,
-    "layout.css.grid.enabled")
+    "")
 CSS_PROP_POSITION(
     grid-column-end,
     grid_column_end,
     GridColumnEnd,
     CSS_PROPERTY_PARSE_FUNCTION,
-    "layout.css.grid.enabled",
+    "",
     0,
     nullptr,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Discrete)
 CSS_PROP_POSITION(
     grid-column-gap,
     grid_column_gap,
     GridColumnGap,
     CSS_PROPERTY_PARSE_VALUE |
-        CSS_PROPERTY_VALUE_NONNEGATIVE |
-        CSS_PROPERTY_ENABLED_IN_UA_SHEETS,
-    "layout.css.grid.enabled",
+        CSS_PROPERTY_VALUE_NONNEGATIVE,
+    "",
     VARIANT_HLP | VARIANT_CALC,
     nullptr,
     offsetof(nsStylePosition, mGridColumnGap),
     eStyleAnimType_Coord)
 CSS_PROP_POSITION(
     grid-column-start,
     grid_column_start,
     GridColumnStart,
     CSS_PROPERTY_PARSE_FUNCTION,
-    "layout.css.grid.enabled",
+    "",
     0,
     nullptr,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Discrete)
 CSS_PROP_SHORTHAND(
     grid-gap,
     grid_gap,
     GridGap,
     CSS_PROPERTY_PARSE_FUNCTION,
-    "layout.css.grid.enabled")
+    "")
 CSS_PROP_SHORTHAND(
     grid-row,
     grid_row,
     GridRow,
     CSS_PROPERTY_PARSE_FUNCTION,
-    "layout.css.grid.enabled")
+    "")
 CSS_PROP_POSITION(
     grid-row-end,
     grid_row_end,
     GridRowEnd,
     CSS_PROPERTY_PARSE_FUNCTION,
-    "layout.css.grid.enabled",
+    "",
     0,
     nullptr,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Discrete)
 CSS_PROP_POSITION(
     grid-row-gap,
     grid_row_gap,
     GridRowGap,
     CSS_PROPERTY_PARSE_VALUE |
-        CSS_PROPERTY_VALUE_NONNEGATIVE |
-        CSS_PROPERTY_ENABLED_IN_UA_SHEETS,
-    "layout.css.grid.enabled",
+        CSS_PROPERTY_VALUE_NONNEGATIVE,
+    "",
     VARIANT_HLP | VARIANT_CALC,
     nullptr,
     offsetof(nsStylePosition, mGridRowGap),
     eStyleAnimType_Coord)
 CSS_PROP_POSITION(
     grid-row-start,
     grid_row_start,
     GridRowStart,
     CSS_PROPERTY_PARSE_FUNCTION,
-    "layout.css.grid.enabled",
+    "",
     0,
     nullptr,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Discrete)
 CSS_PROP_SHORTHAND(
     grid-template,
     grid_template,
     GridTemplate,
     CSS_PROPERTY_PARSE_FUNCTION,
-    "layout.css.grid.enabled")
+    "")
 CSS_PROP_POSITION(
     grid-template-areas,
     grid_template_areas,
     GridTemplateAreas,
-    CSS_PROPERTY_PARSE_FUNCTION |
-        CSS_PROPERTY_ENABLED_IN_UA_SHEETS,
-    "layout.css.grid.enabled",
+    CSS_PROPERTY_PARSE_FUNCTION,
+    "",
     0,
     nullptr,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Discrete)
 CSS_PROP_POSITION(
     grid-template-columns,
     grid_template_columns,
     GridTemplateColumns,
     CSS_PROPERTY_PARSE_FUNCTION |
         CSS_PROPERTY_STORES_CALC |
-        CSS_PROPERTY_GETCS_NEEDS_LAYOUT_FLUSH |
-        CSS_PROPERTY_ENABLED_IN_UA_SHEETS,
-    "layout.css.grid.enabled",
+        CSS_PROPERTY_GETCS_NEEDS_LAYOUT_FLUSH,
+    "",
     0,
     kGridTrackBreadthKTable,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Discrete)
 CSS_PROP_POSITION(
     grid-template-rows,
     grid_template_rows,
     GridTemplateRows,
     CSS_PROPERTY_PARSE_FUNCTION |
         CSS_PROPERTY_STORES_CALC |
-        CSS_PROPERTY_GETCS_NEEDS_LAYOUT_FLUSH |
-        CSS_PROPERTY_ENABLED_IN_UA_SHEETS,
-    "layout.css.grid.enabled",
+        CSS_PROPERTY_GETCS_NEEDS_LAYOUT_FLUSH,
+    "",
     0,
     kGridTrackBreadthKTable,
     CSS_PROP_NO_OFFSET,
     eStyleAnimType_Discrete)
 CSS_PROP_POSITION(
     height,
     height,
     Height,
--- a/layout/style/nsCSSProps.cpp
+++ b/layout/style/nsCSSProps.cpp
@@ -1296,17 +1296,16 @@ KTableEntry nsCSSProps::kDisplayKTable[]
 #endif
   { eCSSKeyword_flex,                StyleDisplay::Flex },
   { eCSSKeyword_inline_flex,         StyleDisplay::InlineFlex },
   { eCSSKeyword_ruby,                StyleDisplay::Ruby },
   { eCSSKeyword_ruby_base,           StyleDisplay::RubyBase },
   { eCSSKeyword_ruby_base_container, StyleDisplay::RubyBaseContainer },
   { eCSSKeyword_ruby_text,           StyleDisplay::RubyText },
   { eCSSKeyword_ruby_text_container, StyleDisplay::RubyTextContainer },
-  // The next two entries are controlled by the layout.css.grid.enabled pref.
   { eCSSKeyword_grid,                StyleDisplay::Grid },
   { eCSSKeyword_inline_grid,         StyleDisplay::InlineGrid },
   // The next 4 entries are controlled by the layout.css.prefixes.webkit pref.
   { eCSSKeyword__webkit_box,         StyleDisplay::WebkitBox },
   { eCSSKeyword__webkit_inline_box,  StyleDisplay::WebkitInlineBox },
   { eCSSKeyword__webkit_flex,        StyleDisplay::Flex },
   { eCSSKeyword__webkit_inline_flex, StyleDisplay::InlineFlex },
   { eCSSKeyword_contents,            StyleDisplay::Contents },
--- a/layout/style/nsLayoutStylesheetCache.cpp
+++ b/layout/style/nsLayoutStylesheetCache.cpp
@@ -402,18 +402,16 @@ nsLayoutStylesheetCache::For(StyleBacken
                                  true);
 
     // For each pref that controls a CSS feature that a UA style sheet depends
     // on (such as a pref that enables a property that a UA style sheet uses),
     // register DependentPrefChanged as a callback to ensure that the relevant
     // style sheets will be re-parsed.
     // Preferences::RegisterCallback(&DependentPrefChanged,
     //                               "layout.css.example-pref.enabled");
-    Preferences::RegisterCallback(&DependentPrefChanged,
-                                  "layout.css.grid.enabled");
   }
 
   return cache;
 }
 
 void
 nsLayoutStylesheetCache::InitFromProfile()
 {
@@ -860,17 +858,17 @@ nsLayoutStylesheetCache::DependentPrefCh
   // to be re-parsed by dropping the sheet from gCSSLoader_{Gecko,Servo}'s cache
   // then setting our cached sheet pointer to null.  This will only work for
   // sheets that are loaded lazily.
 
 #define INVALIDATE(sheet_) \
   InvalidateSheet(gStyleCache_Gecko ? &gStyleCache_Gecko->sheet_ : nullptr, \
                   gStyleCache_Servo ? &gStyleCache_Servo->sheet_ : nullptr);
 
-  INVALIDATE(mUASheet);  // for layout.css.grid.enabled
+  // INVALIDATE(mUASheet);  // for layout.css.example-pref.enabled
 
 #undef INVALIDATE
 }
 
 /* static */ void
 nsLayoutStylesheetCache::InvalidatePreferenceSheets()
 {
   if (gStyleCache_Gecko) {
--- a/layout/style/test/property_database.js
+++ b/layout/style/test/property_database.js
@@ -6671,570 +6671,568 @@ if (IsCSSPropertyPrefEnabled("layout.css
   // This is updated in Stylo but not Gecko.
   if (SpecialPowers.DOMWindowUtils.isStyledByServo) {
     gCSSProperties["filter"].other_values.push("hue-rotate(0)");
   } else {
     gCSSProperties["filter"].invalid_values.push("hue-rotate(0)");
   }
 }
 
-if (IsCSSPropertyPrefEnabled("layout.css.grid.enabled")) {
-  var isGridTemplateSubgridValueEnabled =
-    IsCSSPropertyPrefEnabled("layout.css.grid-template-subgrid-value.enabled");
-
-  gCSSProperties["display"].other_values.push("grid", "inline-grid");
-  gCSSProperties["grid-auto-flow"] = {
-    domProp: "gridAutoFlow",
-    inherited: false,
-    type: CSS_TYPE_LONGHAND,
-    initial_values: [ "row" ],
-    other_values: [
-      "column",
-      "column dense",
-      "row dense",
-      "dense column",
-      "dense row",
-      "dense",
-    ],
-    invalid_values: [
-      "",
-      "auto",
-      "none",
-      "10px",
-      "column row",
-      "dense row dense",
-    ]
-  };
-
-  gCSSProperties["grid-auto-columns"] = {
-    domProp: "gridAutoColumns",
-    inherited: false,
-    type: CSS_TYPE_LONGHAND,
-    initial_values: [ "auto" ],
-    other_values: [
-      "40px",
-      "2em",
-      "2.5fr",
-      "12%",
-      "min-content",
-      "max-content",
-      "calc(2px - 99%)",
-      "minmax(20px, max-content)",
-      "minmax(min-content, auto)",
-      "minmax(auto, max-content)",
-      "m\\69nmax(20px, 4Fr)",
-      "MinMax(min-content, calc(20px + 10%))",
-      "fit-content(1px)",
-      "fit-content(calc(1px - 99%))",
-      "fit-content(10%)",
-    ],
-    invalid_values: [
-      "",
-      "normal",
-      "40ms",
-      "-40px",
-      "-12%",
-      "-2em",
-      "-2.5fr",
-      "minmax()",
-      "minmax(20px)",
-      "mİnmax(20px, 100px)",
-      "minmax(20px, 100px, 200px)",
-      "maxmin(100px, 20px)",
-      "minmax(min-content, minmax(30px, max-content))",
-      "fit-content(-1px)",
-      "fit-content(auto)",
-      "fit-content(min-content)",
-    ]
-  };
-  gCSSProperties["grid-auto-rows"] = {
-    domProp: "gridAutoRows",
-    inherited: false,
-    type: CSS_TYPE_LONGHAND,
-    initial_values: gCSSProperties["grid-auto-columns"].initial_values,
-    other_values: gCSSProperties["grid-auto-columns"].other_values,
-    invalid_values: gCSSProperties["grid-auto-columns"].invalid_values
-  };
-
-  gCSSProperties["grid-template-columns"] = {
-    domProp: "gridTemplateColumns",
-    inherited: false,
-    type: CSS_TYPE_LONGHAND,
-    initial_values: [ "none" ],
-    other_values: [
-      "auto",
-      "40px",
-      "2.5fr",
-      "[normal] 40px [] auto [ ] 12%",
-      "[foo] 40px min-content [ bar ] calc(2px - 99%) max-content",
-      "40px min-content calc(20px + 10%) max-content",
-      "minmax(min-content, auto)",
-      "minmax(auto, max-content)",
-      "m\\69nmax(20px, 4Fr)",
-      "40px MinMax(min-content, calc(20px + 10%)) max-content",
-      "40px 2em",
-      "[] 40px [-foo] 2em [bar baz This\ is\ one\ ident]",
-      // TODO bug 978478: "[a] repeat(3, [b] 20px [c] 40px [d]) [e]",
-      "repeat(1, 20px)",
-      "repeat(1, [a] 20px)",
-      "[a] Repeat(4, [a] 20px [] auto [b c]) [d]",
-      "[a] 2.5fr Repeat(4, [a] 20px [] auto [b c]) [d]",
-      "[a] 2.5fr [z] Repeat(4, [a] 20px [] auto [b c]) [d]",
-      "[a] 2.5fr [z] Repeat(4, [a] 20px [] auto) [d]",
-      "[a] 2.5fr [z] Repeat(4, 20px [b c] auto [b c]) [d]",
-      "[a] 2.5fr [z] Repeat(4, 20px auto) [d]",
-      "repeat(auto-fill, 0)",
-      "[a] repeat( Auto-fill,1%)",
-      "minmax(auto,0) [a] repeat(Auto-fit, 0) minmax(0,auto)",
-      "minmax(calc(1% + 1px),auto) repeat(Auto-fit,[] 1%) minmax(auto,1%)",
-      "[a] repeat( auto-fit,[a b] minmax(0,0) )",
-      "[a] 40px repeat(auto-fit,[a b] minmax(1px, 0) [])",
-      "[a] calc(1px - 99%) [b] repeat(auto-fit,[a b] minmax(1mm, 1%) [c]) [c]",
-      "repeat(auto-fill,minmax(1%,auto))",
-      "repeat(auto-fill,minmax(1em,min-content)) minmax(min-content,0)",
-      "repeat(auto-fill,minmax(max-content,1mm))",
-      "repeat(2, fit-content(1px))",
-      "fit-content(1px) 1fr",
-      "[a] fit-content(calc(1px - 99%)) [b]",
-      "[a] fit-content(10%) [b c] fit-content(1em)",
-    ],
-    invalid_values: [
-      "",
-      "normal",
-      "40ms",
-      "-40px",
-      "-12%",
-      "-2fr",
-      "[foo]",
-      "[inherit] 40px",
-      "[initial] 40px",
-      "[unset] 40px",
-      "[default] 40px",
-      "[span] 40px",
-      "[6%] 40px",
-      "[5th] 40px",
-      "[foo[] bar] 40px",
-      "[foo]] 40px",
-      "(foo) 40px",
-      "[foo] [bar] 40px",
-      "40px [foo] [bar]",
-      "minmax()",
-      "minmax(20px)",
-      "mİnmax(20px, 100px)",
-      "minmax(20px, 100px, 200px)",
-      "maxmin(100px, 20px)",
-      "minmax(min-content, minmax(30px, max-content))",
-      "repeat(0, 20px)",
-      "repeat(-3, 20px)",
-      "rêpeat(1, 20px)",
-      "repeat(1)",
-      "repeat(1, )",
-      "repeat(3px, 20px)",
-      "repeat(2.0, 20px)",
-      "repeat(2.5, 20px)",
-      "repeat(2, (foo))",
-      "repeat(2, foo)",
-      "40px calc(0px + rubbish)",
-      "repeat(1, repeat(1, 20px))",
-      "repeat(auto-fill, auto)",
-      "repeat(auto-fit,auto)",
-      "repeat(auto-fill, fit-content(1px))",
-      "repeat(auto-fit, fit-content(1px))",
-      "repeat(auto-fit,[])",
-      "repeat(auto-fill, 0) repeat(auto-fit, 0) ",
-      "repeat(auto-fit, 0) repeat(auto-fill, 0) ",
-      "[a] repeat(auto-fit, 0) repeat(auto-fit, 0) ",
-      "[a] repeat(auto-fill, 0) [a] repeat(auto-fill, 0) ",
-      "repeat(auto-fill, 0 0)",
-      "repeat(auto-fill, 0 [] 0)",
-      "repeat(auto-fill, min-content)",
-      "repeat(auto-fit,max-content)",
-      "repeat(auto-fit,1fr)",
-      "repeat(auto-fit,minmax(auto,auto))",
-      "repeat(auto-fit,minmax(min-content,1fr))",
-      "repeat(auto-fit,minmax(1fr,auto))",
-      "repeat(auto-fill,minmax(1fr,1em))",
-      "repeat(auto-fill, 10px) auto",
-      "auto repeat(auto-fit, 10px)",
-      "minmax(min-content,max-content) repeat(auto-fit, 0)",
-      "10px [a] 10px [b a] 1fr [b] repeat(auto-fill, 0)",
-      "fit-content(-1px)",
-      "fit-content(auto)",
-      "fit-content(min-content)",
-      "fit-content(1px) repeat(auto-fit, 1px)",
-      "fit-content(1px) repeat(auto-fill, 1px)",
-    ],
-    unbalanced_values: [
-      "(foo] 40px",
-    ]
-  };
-  if (isGridTemplateSubgridValueEnabled) {
-    gCSSProperties["grid-template-columns"].other_values.push(
-      // See https://bugzilla.mozilla.org/show_bug.cgi?id=981300
-      "[none auto subgrid min-content max-content foo] 40px",
-
-      "subgrid",
-      "subgrid [] [foo bar]",
-      "subgrid repeat(1, [])",
-      "subgrid Repeat(4, [a] [b c] [] [d])",
-      "subgrid repeat(auto-fill, [])",
-      "subgrid [x] repeat( Auto-fill, [a b c]) []",
-      "subgrid [x] repeat(auto-fill, []) [y z]"
-    );
-    gCSSProperties["grid-template-columns"].invalid_values.push(
-      "subgrid [inherit]",
-      "subgrid [initial]",
-      "subgrid [unset]",
-      "subgrid [default]",
-      "subgrid [span]",
-      "subgrid [foo] 40px",
-      "subgrid [foo 40px]",
-      "[foo] subgrid",
-      "subgrid rêpeat(1, [])",
-      "subgrid repeat(0, [])",
-      "subgrid repeat(-3, [])",
-      "subgrid repeat(2.0, [])",
-      "subgrid repeat(2.5, [])",
-      "subgrid repeat(3px, [])",
-      "subgrid repeat(1)",
-      "subgrid repeat(1, )",
-      "subgrid repeat(2, [40px])",
-      "subgrid repeat(2, foo)",
-      "subgrid repeat(1, repeat(1, []))",
-      "subgrid repeat(auto-fit,[])",
-      "subgrid [] repeat(auto-fit,[])",
-      "subgrid [a] repeat(auto-fit,[])",
-      "subgrid repeat(auto-fill, 1px)",
-      "subgrid repeat(auto-fill, 1px [])",
-      "subgrid repeat(Auto-fill, [a] [b c] [] [d])",
-      "subgrid repeat(auto-fill, []) repeat(auto-fill, [])"
-    );
-  }
-  gCSSProperties["grid-template-rows"] = {
-    domProp: "gridTemplateRows",
-    inherited: false,
-    type: CSS_TYPE_LONGHAND,
-    initial_values: gCSSProperties["grid-template-columns"].initial_values,
-    other_values: gCSSProperties["grid-template-columns"].other_values,
-    invalid_values: gCSSProperties["grid-template-columns"].invalid_values
-  };
-  gCSSProperties["grid-template-areas"] = {
-    domProp: "gridTemplateAreas",
-    inherited: false,
-    type: CSS_TYPE_LONGHAND,
-    initial_values: [ "none" ],
-    other_values: [
-      "''",
-      "'' ''",
-      "'1a-é_ .' \"b .\"",
-      "' Z\t\\aZ' 'Z Z'",
-      " '. . a b'  '. .a b' ",
-      "'a.b' '. . .'",
-      "'.' '..'",
-      "'...' '.'",
-      "'...-blah' '. .'",
-      "'.. ..' '.. ...'",
-    ],
-    invalid_values: [
-      "'a b' 'a/b'",
-      "'a . a'",
-      "'. a a' 'a a a'",
-      "'a a .' 'a a a'",
-      "'a a' 'a .'",
-      "'a a'\n'..'\n'a a'",
-    ]
-  };
-
-  gCSSProperties["grid-template"] = {
-    domProp: "gridTemplate",
-    inherited: false,
-    type: CSS_TYPE_TRUE_SHORTHAND,
-    subproperties: [
-      "grid-template-areas",
-      "grid-template-rows",
-      "grid-template-columns",
-    ],
-    initial_values: [
-      "none",
-      "none / none",
-    ],
-    other_values: [
-      // <'grid-template-rows'> / <'grid-template-columns'>
-      "40px / 100px",
-      "[foo] 40px [bar] / [baz] repeat(auto-fill,100px) [fizz]",
-      " none/100px",
-      "40px/none",
-      // [ <line-names>? <string> <track-size>? <line-names>? ]+ [ / <explicit-track-list> ]?
-      "'fizz'",
-      "[bar] 'fizz'",
-      "'fizz' / [foo] 40px",
-      "[bar] 'fizz' / [foo] 40px",
-      "'fizz' 100px / [foo] 40px",
-      "[bar] 'fizz' 100px / [foo] 40px",
-      "[bar] 'fizz' 100px [buzz] / [foo] 40px",
-      "[bar] 'fizz' 100px [buzz] \n [a] '.' 200px [b] / [foo] 40px",
-    ],
-    invalid_values: [
-      "'fizz' / repeat(1, 100px)",
-      "'fizz' repeat(1, 100px) / 0px",
-      "[foo] [bar] 40px / 100px",
-      "[fizz] [buzz] 100px / 40px",
-      "[fizz] [buzz] 'foo' / 40px",
-      "'foo' / none"
-    ]
-  };
-  if (isGridTemplateSubgridValueEnabled) {
-    gCSSProperties["grid-template"].other_values.push(
-      "subgrid",
-      "subgrid/40px 20px",
-      "subgrid [foo] [] [bar baz] / 40px 20px",
-      "40px 20px/subgrid",
-      "40px 20px/subgrid  [foo] [] repeat(3, [a] [b]) [bar baz]",
-      "subgrid/subgrid",
-      "subgrid [foo] [] [bar baz]/subgrid [foo] [] [bar baz]"
-    );
-    gCSSProperties["grid-template"].invalid_values.push(
-      "subgrid []",
-      "subgrid [] / 'fizz'",
-      "subgrid / 'fizz'"
-    );
-  }
-
-  gCSSProperties["grid"] = {
-    domProp: "grid",
-    inherited: false,
-    type: CSS_TYPE_TRUE_SHORTHAND,
-    subproperties: [
-      "grid-template-areas",
-      "grid-template-rows",
-      "grid-template-columns",
-      "grid-auto-flow",
-      "grid-auto-rows",
-      "grid-auto-columns",
-    ],
-    initial_values: [
-      "none",
-      "none / none",
-    ],
-    other_values: [
-      "auto-flow 40px / none",
-      "auto-flow / 40px",
-      "auto-flow dense auto / auto",
-      "dense auto-flow minmax(min-content, 2fr) / auto",
-      "dense auto-flow / 100px",
-      "none / auto-flow 40px",
-      "40px / auto-flow",
-      "none / dense auto-flow auto",
-    ].concat(
-      gCSSProperties["grid-template"].other_values
-    ),
-    invalid_values: [
-      "auto-flow",
-      " / auto-flow",
-      "dense 0 / 0",
-      "dense dense 40px / 0",
-      "auto-flow / auto-flow",
-      "auto-flow / dense",
-      "auto-flow [a] 0 / 0",
-      "0 / auto-flow [a] 0",
-      "auto-flow -20px / 0",
-      "auto-flow 200ms / 0",
-      "auto-flow 40px 100px / 0",
-    ].concat(
-      gCSSProperties["grid-template"].invalid_values,
-      gCSSProperties["grid-auto-flow"].other_values,
-      gCSSProperties["grid-auto-flow"].invalid_values
-        .filter((v) => v != 'none')
-    )
-  };
-
-  var gridLineOtherValues = [
-    "foo",
-    "2",
-    "2 foo",
-    "foo 2",
-    "-3",
-    "-3 bar",
-    "bar -3",
-    "span 2",
-    "2 span",
-    "span foo",
-    "foo span",
-    "span 2 foo",
-    "span foo 2",
-    "2 foo span",
-    "foo 2 span",
-  ];
-  var gridLineInvalidValues = [
+var isGridTemplateSubgridValueEnabled =
+  IsCSSPropertyPrefEnabled("layout.css.grid-template-subgrid-value.enabled");
+
+gCSSProperties["display"].other_values.push("grid", "inline-grid");
+gCSSProperties["grid-auto-flow"] = {
+  domProp: "gridAutoFlow",
+  inherited: false,
+  type: CSS_TYPE_LONGHAND,
+  initial_values: [ "row" ],
+  other_values: [
+    "column",
+    "column dense",
+    "row dense",
+    "dense column",
+    "dense row",
+    "dense",
+  ],
+  invalid_values: [
+    "",
+    "auto",
+    "none",
+    "10px",
+    "column row",
+    "dense row dense",
+  ]
+};
+
+gCSSProperties["grid-auto-columns"] = {
+  domProp: "gridAutoColumns",
+  inherited: false,
+  type: CSS_TYPE_LONGHAND,
+  initial_values: [ "auto" ],
+  other_values: [
+    "40px",
+    "2em",
+    "2.5fr",
+    "12%",
+    "min-content",
+    "max-content",
+    "calc(2px - 99%)",
+    "minmax(20px, max-content)",
+    "minmax(min-content, auto)",
+    "minmax(auto, max-content)",
+    "m\\69nmax(20px, 4Fr)",
+    "MinMax(min-content, calc(20px + 10%))",
+    "fit-content(1px)",
+    "fit-content(calc(1px - 99%))",
+    "fit-content(10%)",
+  ],
+  invalid_values: [
+    "",
+    "normal",
+    "40ms",
+    "-40px",
+    "-12%",
+    "-2em",
+    "-2.5fr",
+    "minmax()",
+    "minmax(20px)",
+    "mİnmax(20px, 100px)",
+    "minmax(20px, 100px, 200px)",
+    "maxmin(100px, 20px)",
+    "minmax(min-content, minmax(30px, max-content))",
+    "fit-content(-1px)",
+    "fit-content(auto)",
+    "fit-content(min-content)",
+  ]
+};
+gCSSProperties["grid-auto-rows"] = {
+  domProp: "gridAutoRows",
+  inherited: false,
+  type: CSS_TYPE_LONGHAND,
+  initial_values: gCSSProperties["grid-auto-columns"].initial_values,
+  other_values: gCSSProperties["grid-auto-columns"].other_values,
+  invalid_values: gCSSProperties["grid-auto-columns"].invalid_values
+};
+
+gCSSProperties["grid-template-columns"] = {
+  domProp: "gridTemplateColumns",
+  inherited: false,
+  type: CSS_TYPE_LONGHAND,
+  initial_values: [ "none" ],
+  other_values: [
+    "auto",
+    "40px",
+    "2.5fr",
+    "[normal] 40px [] auto [ ] 12%",
+    "[foo] 40px min-content [ bar ] calc(2px - 99%) max-content",
+    "40px min-content calc(20px + 10%) max-content",
+    "minmax(min-content, auto)",
+    "minmax(auto, max-content)",
+    "m\\69nmax(20px, 4Fr)",
+    "40px MinMax(min-content, calc(20px + 10%)) max-content",
+    "40px 2em",
+    "[] 40px [-foo] 2em [bar baz This\ is\ one\ ident]",
+    // TODO bug 978478: "[a] repeat(3, [b] 20px [c] 40px [d]) [e]",
+    "repeat(1, 20px)",
+    "repeat(1, [a] 20px)",
+    "[a] Repeat(4, [a] 20px [] auto [b c]) [d]",
+    "[a] 2.5fr Repeat(4, [a] 20px [] auto [b c]) [d]",
+    "[a] 2.5fr [z] Repeat(4, [a] 20px [] auto [b c]) [d]",
+    "[a] 2.5fr [z] Repeat(4, [a] 20px [] auto) [d]",
+    "[a] 2.5fr [z] Repeat(4, 20px [b c] auto [b c]) [d]",
+    "[a] 2.5fr [z] Repeat(4, 20px auto) [d]",
+    "repeat(auto-fill, 0)",
+    "[a] repeat( Auto-fill,1%)",
+    "minmax(auto,0) [a] repeat(Auto-fit, 0) minmax(0,auto)",
+    "minmax(calc(1% + 1px),auto) repeat(Auto-fit,[] 1%) minmax(auto,1%)",
+    "[a] repeat( auto-fit,[a b] minmax(0,0) )",
+    "[a] 40px repeat(auto-fit,[a b] minmax(1px, 0) [])",
+    "[a] calc(1px - 99%) [b] repeat(auto-fit,[a b] minmax(1mm, 1%) [c]) [c]",
+    "repeat(auto-fill,minmax(1%,auto))",
+    "repeat(auto-fill,minmax(1em,min-content)) minmax(min-content,0)",
+    "repeat(auto-fill,minmax(max-content,1mm))",
+    "repeat(2, fit-content(1px))",
+    "fit-content(1px) 1fr",
+    "[a] fit-content(calc(1px - 99%)) [b]",
+    "[a] fit-content(10%) [b c] fit-content(1em)",
+  ],
+  invalid_values: [
     "",
-    "4th",
-    "span",
-    "inherit 2",
-    "2 inherit",
-    "20px",
-    "2 3",
-    "2.5",
-    "2.0",
-    "0",
-    "0 foo",
-    "span 0",
-    "2 foo 3",
-    "foo 2 foo",
-    "2 span foo",
-    "foo span 2",
-    "span -3",
-    "span -3 bar",
-    "span 2 span",
-    "span foo span",
-    "span 2 foo span",
-  ];
-
-  gCSSProperties["grid-column-start"] = {
-    domProp: "gridColumnStart",
-    inherited: false,
-    type: CSS_TYPE_LONGHAND,
-    initial_values: [ "auto" ],
-    other_values: gridLineOtherValues,
-    invalid_values: gridLineInvalidValues
-  };
-  gCSSProperties["grid-column-end"] = {
-    domProp: "gridColumnEnd",
-    inherited: false,
-    type: CSS_TYPE_LONGHAND,
-    initial_values: [ "auto" ],
-    other_values: gridLineOtherValues,
-    invalid_values: gridLineInvalidValues
-  };
-  gCSSProperties["grid-row-start"] = {
-    domProp: "gridRowStart",
-    inherited: false,
-    type: CSS_TYPE_LONGHAND,
-    initial_values: [ "auto" ],
-    other_values: gridLineOtherValues,
-    invalid_values: gridLineInvalidValues
-  };
-  gCSSProperties["grid-row-end"] = {
-    domProp: "gridRowEnd",
-    inherited: false,
-    type: CSS_TYPE_LONGHAND,
-    initial_values: [ "auto" ],
-    other_values: gridLineOtherValues,
-    invalid_values: gridLineInvalidValues
-  };
-
-  // The grid-column and grid-row shorthands take values of the form
-  //   <grid-line> [ / <grid-line> ]?
-  var gridColumnRowOtherValues = [].concat(gridLineOtherValues);
-  gridLineOtherValues.concat([ "auto" ]).forEach(function(val) {
-    gridColumnRowOtherValues.push(" foo / " + val);
-    gridColumnRowOtherValues.push(val + "/2");
-  });
-  var gridColumnRowInvalidValues = [
-    "foo, bar",
-    "foo / bar / baz",
-  ].concat(gridLineInvalidValues);
-  gridLineInvalidValues.forEach(function(val) {
-    gridColumnRowInvalidValues.push("span 3 / " + val);
-    gridColumnRowInvalidValues.push(val + " / foo");
-  });
-  gCSSProperties["grid-column"] = {
-    domProp: "gridColumn",
-    inherited: false,
-    type: CSS_TYPE_TRUE_SHORTHAND,
-    subproperties: [
-      "grid-column-start",
-      "grid-column-end"
-    ],
-    initial_values: [ "auto", "auto / auto" ],
-    other_values: gridColumnRowOtherValues,
-    invalid_values: gridColumnRowInvalidValues
-  };
-  gCSSProperties["grid-row"] = {
-    domProp: "gridRow",
-    inherited: false,
-    type: CSS_TYPE_TRUE_SHORTHAND,
-    subproperties: [
-      "grid-row-start",
-      "grid-row-end"
-    ],
-    initial_values: [ "auto", "auto / auto" ],
-    other_values: gridColumnRowOtherValues,
-    invalid_values: gridColumnRowInvalidValues
-  };
-
-  var gridAreaOtherValues = gridLineOtherValues.slice();
-  gridLineOtherValues.forEach(function(val) {
-    gridAreaOtherValues.push("foo / " + val);
-    gridAreaOtherValues.push(val + "/2/3");
-    gridAreaOtherValues.push("foo / bar / " + val + " / baz");
-  });
-  var gridAreaInvalidValues = [
-    "foo, bar",
-    "foo / bar / baz / fizz / buzz",
-    "default / foo / bar / baz",
-    "foo / initial / bar / baz",
-    "foo / bar / inherit / baz",
-    "foo / bar / baz / unset",
-  ].concat(gridLineInvalidValues);
-  gridLineInvalidValues.forEach(function(val) {
-    gridAreaInvalidValues.push("foo / " + val);
-    gridAreaInvalidValues.push("foo / bar / " + val);
-    gridAreaInvalidValues.push("foo / 4 / bar / " + val);
-  });
-
-  gCSSProperties["grid-area"] = {
-    domProp: "gridArea",
-    inherited: false,
-    type: CSS_TYPE_TRUE_SHORTHAND,
-    subproperties: [
-      "grid-row-start",
-      "grid-column-start",
-      "grid-row-end",
-      "grid-column-end"
-    ],
-    initial_values: [
-      "auto",
-      "auto / auto",
-      "auto / auto / auto",
-      "auto / auto / auto / auto"
-    ],
-    other_values: gridAreaOtherValues,
-    invalid_values: gridAreaInvalidValues
-  };
-
-  gCSSProperties["grid-column-gap"] = {
-    domProp: "gridColumnGap",
-    inherited: false,
-    type: CSS_TYPE_LONGHAND,
-    initial_values: [ "0" ],
-    other_values: [ "2px", "2%", "1em", "calc(1px + 1em)", "calc(1%)",
-                    "calc(1% + 1ch)" , "calc(1px - 99%)" ],
-    invalid_values: [ "-1px", "auto", "none", "1px 1px", "-1%", "fit-content(1px)" ],
-  };
-  gCSSProperties["grid-row-gap"] = {
-    domProp: "gridRowGap",
-    inherited: false,
-    type: CSS_TYPE_LONGHAND,
-    initial_values: [ "0" ],
-    other_values: [ "2px", "2%", "1em", "calc(1px + 1em)", "calc(1%)",
-                    "calc(1% + 1ch)" , "calc(1px - 99%)" ],
-    invalid_values: [ "-1px", "auto", "none", "1px 1px", "-1%", "min-content" ],
-  };
-  gCSSProperties["grid-gap"] = {
-    domProp: "gridGap",
-    inherited: false,
-    type: CSS_TYPE_TRUE_SHORTHAND,
-    subproperties: [ "grid-column-gap", "grid-row-gap" ],
-    initial_values: [ "0", "0 0" ],
-    other_values: [ "1ch 0", "1px 1%", "1em 1px", "calc(1px) calc(1%)" ],
-    invalid_values: [ "-1px", "1px -1px", "1px 1px 1px", "inherit 1px",
-                      "1px auto" ]
-  };
+    "normal",
+    "40ms",
+    "-40px",
+    "-12%",
+    "-2fr",
+    "[foo]",
+    "[inherit] 40px",
+    "[initial] 40px",
+    "[unset] 40px",
+    "[default] 40px",
+    "[span] 40px",
+    "[6%] 40px",
+    "[5th] 40px",
+    "[foo[] bar] 40px",
+    "[foo]] 40px",
+    "(foo) 40px",
+    "[foo] [bar] 40px",
+    "40px [foo] [bar]",
+    "minmax()",
+    "minmax(20px)",
+    "mİnmax(20px, 100px)",
+    "minmax(20px, 100px, 200px)",
+    "maxmin(100px, 20px)",
+    "minmax(min-content, minmax(30px, max-content))",
+    "repeat(0, 20px)",
+    "repeat(-3, 20px)",
+    "rêpeat(1, 20px)",
+    "repeat(1)",
+    "repeat(1, )",
+    "repeat(3px, 20px)",
+    "repeat(2.0, 20px)",
+    "repeat(2.5, 20px)",
+    "repeat(2, (foo))",
+    "repeat(2, foo)",
+    "40px calc(0px + rubbish)",
+    "repeat(1, repeat(1, 20px))",
+    "repeat(auto-fill, auto)",
+    "repeat(auto-fit,auto)",
+    "repeat(auto-fill, fit-content(1px))",
+    "repeat(auto-fit, fit-content(1px))",
+    "repeat(auto-fit,[])",
+    "repeat(auto-fill, 0) repeat(auto-fit, 0) ",
+    "repeat(auto-fit, 0) repeat(auto-fill, 0) ",
+    "[a] repeat(auto-fit, 0) repeat(auto-fit, 0) ",
+    "[a] repeat(auto-fill, 0) [a] repeat(auto-fill, 0) ",
+    "repeat(auto-fill, 0 0)",
+    "repeat(auto-fill, 0 [] 0)",
+    "repeat(auto-fill, min-content)",
+    "repeat(auto-fit,max-content)",
+    "repeat(auto-fit,1fr)",
+    "repeat(auto-fit,minmax(auto,auto))",
+    "repeat(auto-fit,minmax(min-content,1fr))",
+    "repeat(auto-fit,minmax(1fr,auto))",
+    "repeat(auto-fill,minmax(1fr,1em))",
+    "repeat(auto-fill, 10px) auto",
+    "auto repeat(auto-fit, 10px)",
+    "minmax(min-content,max-content) repeat(auto-fit, 0)",
+    "10px [a] 10px [b a] 1fr [b] repeat(auto-fill, 0)",
+    "fit-content(-1px)",
+    "fit-content(auto)",
+    "fit-content(min-content)",
+    "fit-content(1px) repeat(auto-fit, 1px)",
+    "fit-content(1px) repeat(auto-fill, 1px)",
+  ],
+  unbalanced_values: [
+    "(foo] 40px",
+  ]
+};
+if (isGridTemplateSubgridValueEnabled) {
+  gCSSProperties["grid-template-columns"].other_values.push(
+    // See https://bugzilla.mozilla.org/show_bug.cgi?id=981300
+    "[none auto subgrid min-content max-content foo] 40px",
+
+    "subgrid",
+    "subgrid [] [foo bar]",
+    "subgrid repeat(1, [])",
+    "subgrid Repeat(4, [a] [b c] [] [d])",
+    "subgrid repeat(auto-fill, [])",
+    "subgrid [x] repeat( Auto-fill, [a b c]) []",
+    "subgrid [x] repeat(auto-fill, []) [y z]"
+  );
+  gCSSProperties["grid-template-columns"].invalid_values.push(
+    "subgrid [inherit]",
+    "subgrid [initial]",
+    "subgrid [unset]",
+    "subgrid [default]",
+    "subgrid [span]",
+    "subgrid [foo] 40px",
+    "subgrid [foo 40px]",
+    "[foo] subgrid",
+    "subgrid rêpeat(1, [])",
+    "subgrid repeat(0, [])",
+    "subgrid repeat(-3, [])",
+    "subgrid repeat(2.0, [])",
+    "subgrid repeat(2.5, [])",
+    "subgrid repeat(3px, [])",
+    "subgrid repeat(1)",
+    "subgrid repeat(1, )",
+    "subgrid repeat(2, [40px])",
+    "subgrid repeat(2, foo)",
+    "subgrid repeat(1, repeat(1, []))",
+    "subgrid repeat(auto-fit,[])",
+    "subgrid [] repeat(auto-fit,[])",
+    "subgrid [a] repeat(auto-fit,[])",
+    "subgrid repeat(auto-fill, 1px)",
+    "subgrid repeat(auto-fill, 1px [])",
+    "subgrid repeat(Auto-fill, [a] [b c] [] [d])",
+    "subgrid repeat(auto-fill, []) repeat(auto-fill, [])"
+  );
+}
+gCSSProperties["grid-template-rows"] = {
+  domProp: "gridTemplateRows",
+  inherited: false,
+  type: CSS_TYPE_LONGHAND,
+  initial_values: gCSSProperties["grid-template-columns"].initial_values,
+  other_values: gCSSProperties["grid-template-columns"].other_values,
+  invalid_values: gCSSProperties["grid-template-columns"].invalid_values
+};
+gCSSProperties["grid-template-areas"] = {
+  domProp: "gridTemplateAreas",
+  inherited: false,
+  type: CSS_TYPE_LONGHAND,
+  initial_values: [ "none" ],
+  other_values: [
+    "''",
+    "'' ''",
+    "'1a-é_ .' \"b .\"",
+    "' Z\t\\aZ' 'Z Z'",
+    " '. . a b'  '. .a b' ",
+    "'a.b' '. . .'",
+    "'.' '..'",
+    "'...' '.'",
+    "'...-blah' '. .'",
+    "'.. ..' '.. ...'",
+  ],
+  invalid_values: [
+    "'a b' 'a/b'",
+    "'a . a'",
+    "'. a a' 'a a a'",
+    "'a a .' 'a a a'",
+    "'a a' 'a .'",
+    "'a a'\n'..'\n'a a'",
+  ]
+};
+
+gCSSProperties["grid-template"] = {
+  domProp: "gridTemplate",
+  inherited: false,
+  type: CSS_TYPE_TRUE_SHORTHAND,
+  subproperties: [
+    "grid-template-areas",
+    "grid-template-rows",
+    "grid-template-columns",
+  ],
+  initial_values: [
+    "none",
+    "none / none",
+  ],
+  other_values: [
+    // <'grid-template-rows'> / <'grid-template-columns'>
+    "40px / 100px",
+    "[foo] 40px [bar] / [baz] repeat(auto-fill,100px) [fizz]",
+    " none/100px",
+    "40px/none",
+    // [ <line-names>? <string> <track-size>? <line-names>? ]+ [ / <explicit-track-list> ]?
+    "'fizz'",
+    "[bar] 'fizz'",
+    "'fizz' / [foo] 40px",
+    "[bar] 'fizz' / [foo] 40px",
+    "'fizz' 100px / [foo] 40px",
+    "[bar] 'fizz' 100px / [foo] 40px",
+    "[bar] 'fizz' 100px [buzz] / [foo] 40px",
+    "[bar] 'fizz' 100px [buzz] \n [a] '.' 200px [b] / [foo] 40px",
+  ],
+  invalid_values: [
+    "'fizz' / repeat(1, 100px)",
+    "'fizz' repeat(1, 100px) / 0px",
+    "[foo] [bar] 40px / 100px",
+    "[fizz] [buzz] 100px / 40px",
+    "[fizz] [buzz] 'foo' / 40px",
+    "'foo' / none"
+  ]
+};
+if (isGridTemplateSubgridValueEnabled) {
+  gCSSProperties["grid-template"].other_values.push(
+    "subgrid",
+    "subgrid/40px 20px",
+    "subgrid [foo] [] [bar baz] / 40px 20px",
+    "40px 20px/subgrid",
+    "40px 20px/subgrid  [foo] [] repeat(3, [a] [b]) [bar baz]",
+    "subgrid/subgrid",
+    "subgrid [foo] [] [bar baz]/subgrid [foo] [] [bar baz]"
+  );
+  gCSSProperties["grid-template"].invalid_values.push(
+    "subgrid []",
+    "subgrid [] / 'fizz'",
+    "subgrid / 'fizz'"
+  );
 }
 
+gCSSProperties["grid"] = {
+  domProp: "grid",
+  inherited: false,
+  type: CSS_TYPE_TRUE_SHORTHAND,
+  subproperties: [
+    "grid-template-areas",
+    "grid-template-rows",
+    "grid-template-columns",
+    "grid-auto-flow",
+    "grid-auto-rows",
+    "grid-auto-columns",
+  ],
+  initial_values: [
+    "none",
+    "none / none",
+  ],
+  other_values: [
+    "auto-flow 40px / none",
+    "auto-flow / 40px",
+    "auto-flow dense auto / auto",
+    "dense auto-flow minmax(min-content, 2fr) / auto",
+    "dense auto-flow / 100px",
+    "none / auto-flow 40px",
+    "40px / auto-flow",
+    "none / dense auto-flow auto",
+  ].concat(
+    gCSSProperties["grid-template"].other_values
+  ),
+  invalid_values: [
+    "auto-flow",
+    " / auto-flow",
+    "dense 0 / 0",
+    "dense dense 40px / 0",
+    "auto-flow / auto-flow",
+    "auto-flow / dense",
+    "auto-flow [a] 0 / 0",
+    "0 / auto-flow [a] 0",
+    "auto-flow -20px / 0",
+    "auto-flow 200ms / 0",
+    "auto-flow 40px 100px / 0",
+  ].concat(
+    gCSSProperties["grid-template"].invalid_values,
+    gCSSProperties["grid-auto-flow"].other_values,
+    gCSSProperties["grid-auto-flow"].invalid_values
+      .filter((v) => v != 'none')
+  )
+};
+
+var gridLineOtherValues = [
+  "foo",
+  "2",
+  "2 foo",
+  "foo 2",
+  "-3",
+  "-3 bar",
+  "bar -3",
+  "span 2",
+  "2 span",
+  "span foo",
+  "foo span",
+  "span 2 foo",
+  "span foo 2",
+  "2 foo span",
+  "foo 2 span",
+];
+var gridLineInvalidValues = [
+  "",
+  "4th",
+  "span",
+  "inherit 2",
+  "2 inherit",
+  "20px",
+  "2 3",
+  "2.5",
+  "2.0",
+  "0",
+  "0 foo",
+  "span 0",
+  "2 foo 3",
+  "foo 2 foo",
+  "2 span foo",
+  "foo span 2",
+  "span -3",
+  "span -3 bar",
+  "span 2 span",
+  "span foo span",
+  "span 2 foo span",
+];
+
+gCSSProperties["grid-column-start"] = {
+  domProp: "gridColumnStart",
+  inherited: false,
+  type: CSS_TYPE_LONGHAND,
+  initial_values: [ "auto" ],
+  other_values: gridLineOtherValues,
+  invalid_values: gridLineInvalidValues
+};
+gCSSProperties["grid-column-end"] = {
+  domProp: "gridColumnEnd",
+  inherited: false,
+  type: CSS_TYPE_LONGHAND,
+  initial_values: [ "auto" ],
+  other_values: gridLineOtherValues,
+  invalid_values: gridLineInvalidValues
+};
+gCSSProperties["grid-row-start"] = {
+  domProp: "gridRowStart",
+  inherited: false,
+  type: CSS_TYPE_LONGHAND,
+  initial_values: [ "auto" ],
+  other_values: gridLineOtherValues,
+  invalid_values: gridLineInvalidValues
+};
+gCSSProperties["grid-row-end"] = {
+  domProp: "gridRowEnd",
+  inherited: false,
+  type: CSS_TYPE_LONGHAND,
+  initial_values: [ "auto" ],
+  other_values: gridLineOtherValues,
+  invalid_values: gridLineInvalidValues
+};
+
+// The grid-column and grid-row shorthands take values of the form
+//   <grid-line> [ / <grid-line> ]?
+var gridColumnRowOtherValues = [].concat(gridLineOtherValues);
+gridLineOtherValues.concat([ "auto" ]).forEach(function(val) {
+  gridColumnRowOtherValues.push(" foo / " + val);
+  gridColumnRowOtherValues.push(val + "/2");
+});
+var gridColumnRowInvalidValues = [
+  "foo, bar",
+  "foo / bar / baz",
+].concat(gridLineInvalidValues);
+gridLineInvalidValues.forEach(function(val) {
+  gridColumnRowInvalidValues.push("span 3 / " + val);
+  gridColumnRowInvalidValues.push(val + " / foo");
+});
+gCSSProperties["grid-column"] = {
+  domProp: "gridColumn",
+  inherited: false,
+  type: CSS_TYPE_TRUE_SHORTHAND,
+  subproperties: [
+    "grid-column-start",
+    "grid-column-end"
+  ],
+  initial_values: [ "auto", "auto / auto" ],
+  other_values: gridColumnRowOtherValues,
+  invalid_values: gridColumnRowInvalidValues
+};
+gCSSProperties["grid-row"] = {
+  domProp: "gridRow",
+  inherited: false,
+  type: CSS_TYPE_TRUE_SHORTHAND,
+  subproperties: [
+    "grid-row-start",
+    "grid-row-end"
+  ],
+  initial_values: [ "auto", "auto / auto" ],
+  other_values: gridColumnRowOtherValues,
+  invalid_values: gridColumnRowInvalidValues
+};
+
+var gridAreaOtherValues = gridLineOtherValues.slice();
+gridLineOtherValues.forEach(function(val) {
+  gridAreaOtherValues.push("foo / " + val);
+  gridAreaOtherValues.push(val + "/2/3");
+  gridAreaOtherValues.push("foo / bar / " + val + " / baz");
+});
+var gridAreaInvalidValues = [
+  "foo, bar",
+  "foo / bar / baz / fizz / buzz",
+  "default / foo / bar / baz",
+  "foo / initial / bar / baz",
+  "foo / bar / inherit / baz",
+  "foo / bar / baz / unset",
+].concat(gridLineInvalidValues);
+gridLineInvalidValues.forEach(function(val) {
+  gridAreaInvalidValues.push("foo / " + val);
+  gridAreaInvalidValues.push("foo / bar / " + val);
+  gridAreaInvalidValues.push("foo / 4 / bar / " + val);
+});
+
+gCSSProperties["grid-area"] = {
+  domProp: "gridArea",
+  inherited: false,
+  type: CSS_TYPE_TRUE_SHORTHAND,
+  subproperties: [
+    "grid-row-start",
+    "grid-column-start",
+    "grid-row-end",
+    "grid-column-end"
+  ],
+  initial_values: [
+    "auto",
+    "auto / auto",
+    "auto / auto / auto",
+    "auto / auto / auto / auto"
+  ],
+  other_values: gridAreaOtherValues,
+  invalid_values: gridAreaInvalidValues
+};
+
+gCSSProperties["grid-column-gap"] = {
+  domProp: "gridColumnGap",
+  inherited: false,
+  type: CSS_TYPE_LONGHAND,
+  initial_values: [ "0" ],
+  other_values: [ "2px", "2%", "1em", "calc(1px + 1em)", "calc(1%)",
+                  "calc(1% + 1ch)" , "calc(1px - 99%)" ],
+  invalid_values: [ "-1px", "auto", "none", "1px 1px", "-1%", "fit-content(1px)" ],
+};
+gCSSProperties["grid-row-gap"] = {
+  domProp: "gridRowGap",
+  inherited: false,
+  type: CSS_TYPE_LONGHAND,
+  initial_values: [ "0" ],
+  other_values: [ "2px", "2%", "1em", "calc(1px + 1em)", "calc(1%)",
+                  "calc(1% + 1ch)" , "calc(1px - 99%)" ],
+  invalid_values: [ "-1px", "auto", "none", "1px 1px", "-1%", "min-content" ],
+};
+gCSSProperties["grid-gap"] = {
+  domProp: "gridGap",
+  inherited: false,
+  type: CSS_TYPE_TRUE_SHORTHAND,
+  subproperties: [ "grid-column-gap", "grid-row-gap" ],
+  initial_values: [ "0", "0 0" ],
+  other_values: [ "1ch 0", "1px 1%", "1em 1px", "calc(1px) calc(1%)" ],
+  invalid_values: [ "-1px", "1px -1px", "1px 1px 1px", "inherit 1px",
+                    "1px auto" ]
+};
+
 if (IsCSSPropertyPrefEnabled("layout.css.contain.enabled")) {
   gCSSProperties["contain"] = {
     domProp: "contain",
     inherited: false,
     type: CSS_TYPE_LONGHAND,
     initial_values: [ "none" ],
     other_values: [
       "strict",
--- a/layout/style/test/test_transitions_per_property.html
+++ b/layout/style/test/test_transitions_per_property.html
@@ -2007,19 +2007,16 @@ function test_font_weight(prop) {
   div.style.setProperty("transition-property", prop, "");
   div.style.setProperty(prop, "100", "");
   is(cs.getPropertyValue(prop), "900",
      "font-weight property " + prop + ": clamping of values");
   div.style.setProperty("transition-timing-function", "linear", "");
 }
 
 function test_grid_gap(prop) {
-  if (!SpecialPowers.getBoolPref("layout.css.grid.enabled")) {
-    return;
-  }
   test_length_transition(prop);
   test_length_clamped(prop);
   test_percent_transition(prop);
   test_percent_clamped(prop);
 }
 
 function test_pos_integer_or_auto_transition(prop) {
   div.style.setProperty("transition-property", "none", "");
--- a/media/webrtc/signaling/gtest/mediapipeline_unittest.cpp
+++ b/media/webrtc/signaling/gtest/mediapipeline_unittest.cpp
@@ -296,29 +296,29 @@ class TestAgent {
 
   uint32_t GetLocalSSRC() {
     std::vector<uint32_t> res;
     res = audio_conduit_->GetLocalSSRCs();
     return res.empty() ? 0 : res[0];
   }
 
   int GetAudioRtpCountSent() {
-    return audio_pipeline_->rtp_packets_sent();
+    return audio_pipeline_->RtpPacketsSent();
   }
 
   int GetAudioRtpCountReceived() {
-    return audio_pipeline_->rtp_packets_received();
+    return audio_pipeline_->RtpPacketsReceived();
   }
 
   int GetAudioRtcpCountSent() {
-    return audio_pipeline_->rtcp_packets_sent();
+    return audio_pipeline_->RtcpPacketsSent();
   }
 
   int GetAudioRtcpCountReceived() {
-    return audio_pipeline_->rtcp_packets_received();
+    return audio_pipeline_->RtcpPacketsReceived();
   }
 
 
   void SetUsingBundle(bool use_bundle) {
     use_bundle_ = use_bundle;
   }
 
  protected:
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -710,28 +710,27 @@ WebrtcAudioConduit::SendAudioFrame(const
 MediaConduitErrorCode
 WebrtcAudioConduit::GetAudioFrame(int16_t speechData[],
                                   int32_t samplingFreqHz,
                                   int32_t capture_delay,
                                   int& lengthSamples)
 {
 
   CSFLogDebug(LOGTAG,  "%s ", __FUNCTION__);
-  unsigned int numSamples = 0;
 
   //validate params
   if(!speechData )
   {
     CSFLogError(LOGTAG,"%s Null Audio Buffer Pointer", __FUNCTION__);
     MOZ_ASSERT(PR_FALSE);
     return kMediaConduitMalformedArgument;
   }
 
   // Validate sample length
-  if((numSamples = GetNum10msSamplesForFrequency(samplingFreqHz)) == 0  )
+  if(GetNum10msSamplesForFrequency(samplingFreqHz) == 0)
   {
     CSFLogError(LOGTAG,"%s Invalid Sampling Frequency ", __FUNCTION__);
     MOZ_ASSERT(PR_FALSE);
     return kMediaConduitMalformedArgument;
   }
 
   //validate capture time
   if(capture_delay < 0 )
@@ -744,35 +743,35 @@ WebrtcAudioConduit::GetAudioFrame(int16_
   //Conduit should have reception enabled before we ask for decoded
   // samples
   if(!mEngineReceiving)
   {
     CSFLogError(LOGTAG, "%s Engine not Receiving ", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
-
+  int lengthSamplesAllowed = lengthSamples;
   lengthSamples = 0;  //output paramter
 
   if (mPtrVoEXmedia->GetAudioFrame(mChannel,
                                    samplingFreqHz,
                                    &mAudioFrame) != 0) {
     int error = mPtrVoEBase->LastError();
     CSFLogError(LOGTAG,  "%s Getting audio data Failed %d", __FUNCTION__, error);
     if(error == VE_RUNTIME_PLAY_ERROR)
     {
       return kMediaConduitPlayoutError;
     }
     return kMediaConduitUnknownError;
   }
 
   // XXX Annoying, have to copy to our buffers -- refactor?
   lengthSamples = mAudioFrame.samples_per_channel_ * mAudioFrame.num_channels_;
-  PodCopy(speechData, mAudioFrame.data_,
-          lengthSamples);
+  MOZ_RELEASE_ASSERT(lengthSamples <= lengthSamplesAllowed);
+  PodCopy(speechData, mAudioFrame.data_, lengthSamples);
 
   // Not #ifdef DEBUG or on a log module so we can use it for about:webrtc/etc
   mSamples += lengthSamples;
   if (mSamples >= mLastSyncLog + samplingFreqHz) {
     int jitter_buffer_delay_ms;
     int playout_buffer_delay_ms;
     int avsync_offset_ms;
     if (GetAVStats(&jitter_buffer_delay_ms,
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
@@ -135,16 +135,17 @@ public:
 
   /**
    * Function to grab a decoded audio-sample from the media engine for rendering
    * / playoutof length 10 milliseconds.
    *
    * @param speechData [in]: Pointer to a array to which a 10ms frame of audio will be copied
    * @param samplingFreqHz [in]: Frequency of the sampling for playback in Hertz (16000, 32000,..)
    * @param capture_delay [in]: Estimated Time between reading of the samples to rendering/playback
+   * @param lengthSamples [in]: Contain maximum length of speechData array.
    * @param lengthSamples [out]: Will contain length of the audio frame in samples at return.
                                  Ex: A value of 160 implies 160 samples each of 16-bits was copied
                                      into speechData
    * NOTE: This function should be invoked every 10 milliseconds for the best
    *          peformance
    * NOTE: ConfigureRecvMediaCodec() SHOULD be called before this function can be invoked
    *       This ensures the decoded samples are ready for reading and playout is enabled.
    *
--- a/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
+++ b/media/webrtc/signaling/src/media-conduit/MediaConduitInterface.h
@@ -380,23 +380,24 @@ public:
    * @param video_frame_length: size of the frame
    * @param width, height: dimensions of the frame
    * @param video_type: Type of the video frame - I420, RAW
    * @param captured_time: timestamp when the frame was captured.
    *                       if 0 timestamp is automatcally generated
    * NOTE: ConfigureSendMediaCodec() MUST be called before this function can be invoked
    *       This ensures the inserted video-frames can be transmitted by the conduit
    */
-  virtual MediaConduitErrorCode SendVideoFrame(unsigned char* video_frame,
+  virtual MediaConduitErrorCode SendVideoFrame(const unsigned char* video_frame,
                                                unsigned int video_frame_length,
                                                unsigned short width,
                                                unsigned short height,
                                                VideoType video_type,
                                                uint64_t capture_time) = 0;
-  virtual MediaConduitErrorCode SendVideoFrame(webrtc::VideoFrame& frame) = 0;
+  virtual MediaConduitErrorCode SendVideoFrame(
+    const webrtc::VideoFrame& frame) = 0;
 
   virtual MediaConduitErrorCode ConfigureCodecMode(webrtc::VideoCodecMode) = 0;
   /**
    * Function to configure send codec for the video session
    * @param sendSessionConfig: CodecConfiguration
    * @result: On Success, the video engine is configured with passed in codec for send
    *          On failure, video engine transmit functionality is disabled.
    * NOTE: This API can be invoked multiple time. Invoking this API may involve restarting
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.cpp
@@ -1687,17 +1687,17 @@ WebrtcVideoConduit::SelectBitrates(
 
 // XXX we need to figure out how to feed back changes in preferred capture
 // resolution to the getUserMedia source.
 // Returns boolean if we've submitted an async change (and took ownership
 // of *frame's data)
 bool
 WebrtcVideoConduit::SelectSendResolution(unsigned short width,
                                          unsigned short height,
-                                         webrtc::VideoFrame* frame) // may be null
+                                         const webrtc::VideoFrame* frame) // may be null
 {
   mCodecMutex.AssertCurrentThreadOwns();
   // XXX This will do bandwidth-resolution adaptation as well - bug 877954
 
   mLastWidth = width;
   mLastHeight = height;
   // Enforce constraints
   if (mCurSendCodecConfig) {
@@ -1789,17 +1789,17 @@ WebrtcVideoConduit::SelectSendResolution
     }
   }
   return false;
 }
 
 nsresult
 WebrtcVideoConduit::ReconfigureSendCodec(unsigned short width,
                                          unsigned short height,
-                                         webrtc::VideoFrame* frame)
+                                         const webrtc::VideoFrame* frame)
 {
   mCodecMutex.AssertCurrentThreadOwns();
 
   // Test in case the stream hasn't started yet!  We could get a frame in
   // before we get around to StartTransmitting(), and that would dispatch a
   // runnable to call this.
   mInReconfig = false;
   if (mSendStream) {
@@ -1834,17 +1834,17 @@ WebrtcVideoConduit::SelectSendFrameRate(
 
       new_framerate = MinIgnoreZero(new_framerate, codecConfig->mEncodingConstraints.maxFps);
     }
   }
   return new_framerate;
 }
 
 MediaConduitErrorCode
-WebrtcVideoConduit::SendVideoFrame(unsigned char* video_buffer,
+WebrtcVideoConduit::SendVideoFrame(const unsigned char* video_buffer,
                                    unsigned int video_length,
                                    unsigned short width,
                                    unsigned short height,
                                    VideoType video_type,
                                    uint64_t capture_time)
 {
   // check for parameter sanity
   if (!video_buffer || video_length == 0 || width == 0 || height == 0) {
@@ -1940,17 +1940,17 @@ WebrtcVideoConduit::OnSinkWantsChanged(
     }
 
     mVideoAdapter->OnResolutionRequest(max_pixel_count,
                                        max_pixel_count_step_up);
   }
 }
 
 MediaConduitErrorCode
-WebrtcVideoConduit::SendVideoFrame(webrtc::VideoFrame& frame)
+WebrtcVideoConduit::SendVideoFrame(const webrtc::VideoFrame& frame)
 {
   // XXX Google uses a "timestamp_aligner" to translate timestamps from the
   // camera via TranslateTimestamp(); we should look at doing the same.  This
   // avoids sampling error when capturing frames, but google had to deal with some
   // broken cameras, include Logitech c920's IIRC.
 
   CSFLogVerbose(LOGTAG, "%s (send SSRC %u (0x%x))", __FUNCTION__,
               mSendStreamConfig.rtp.ssrcs.front(), mSendStreamConfig.rtp.ssrcs.front());
--- a/media/webrtc/signaling/src/media-conduit/VideoConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/VideoConduit.h
@@ -168,27 +168,27 @@ public:
   /**
    * Function to select and change the encoding resolution based on incoming frame size
    * and current available bandwidth.
    * @param width, height: dimensions of the frame
    * @param frame: optional frame to submit for encoding after reconfig
    */
   bool SelectSendResolution(unsigned short width,
                             unsigned short height,
-                            webrtc::VideoFrame* frame);
+                            const webrtc::VideoFrame* frame);
 
   /**
    * Function to reconfigure the current send codec for a different
    * width/height/framerate/etc.
    * @param width, height: dimensions of the frame
    * @param frame: optional frame to submit for encoding after reconfig
    */
   nsresult ReconfigureSendCodec(unsigned short width,
                                 unsigned short height,
-                                webrtc::VideoFrame* frame);
+                                const webrtc::VideoFrame* frame);
 
   /**
    * Function to select and change the encoding frame rate based on incoming frame rate
    * and max-mbps setting.
    * @param current framerate
    * @result new framerate
    */
   unsigned int SelectSendFrameRate(const VideoCodecConfig* codecConfig,
@@ -202,25 +202,26 @@ public:
    * @param video_frame_length: size of the frame
    * @param width, height: dimensions of the frame
    * @param video_type: Type of the video frame - I420, RAW
    * @param captured_time: timestamp when the frame was captured.
    *                       if 0 timestamp is automatcally generated by the engine.
    *NOTE: ConfigureSendMediaCodec() SHOULD be called before this function can be invoked
    *       This ensures the inserted video-frames can be transmitted by the conduit
    */
-  virtual MediaConduitErrorCode SendVideoFrame(unsigned char* video_frame,
+  virtual MediaConduitErrorCode SendVideoFrame(const unsigned char* video_frame,
                                                unsigned int video_frame_length,
                                                unsigned short width,
                                                unsigned short height,
                                                VideoType video_type,
                                                uint64_t capture_time) override;
-  virtual MediaConduitErrorCode SendVideoFrame(webrtc::VideoFrame& frame) override;
+  virtual MediaConduitErrorCode SendVideoFrame(
+    const webrtc::VideoFrame& frame) override;
 
- /**
+  /**
    * webrtc::Transport method implementation
    * ---------------------------------------
    * Webrtc transport implementation to send and receive RTP packet.
    * VideoConduit registers itself as ExternalTransport to the VideoStream
    */
   virtual bool SendRtp(const uint8_t* packet, size_t length,
                        const webrtc::PacketOptions& options) override;
 
--- a/media/webrtc/signaling/src/media-conduit/WebrtcMediaDataDecoderCodec.cpp
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcMediaDataDecoderCodec.cpp
@@ -2,29 +2,29 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "WebrtcMediaDataDecoderCodec.h"
 #include "ImageContainer.h"
 #include "Layers.h"
 #include "PDMFactory.h"
 #include "VideoUtils.h"
+#include "mozilla/media/MediaUtils.h"
 #include "mozilla/layers/ImageBridgeChild.h"
 #include "webrtc/base/keep_ref_until_done.h"
 
 namespace mozilla {
 
 WebrtcMediaDataDecoder::WebrtcMediaDataDecoder()
-  : mTaskQueue(
-      new TaskQueue(GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER),
-                    "WebrtcMediaDataDecoder::mTaskQueue"))
+  : mThreadPool(GetMediaThreadPool(MediaThreadType::PLATFORM_DECODER))
+  , mTaskQueue(new TaskQueue(do_AddRef(mThreadPool),
+                             "WebrtcMediaDataDecoder::mTaskQueue"))
   , mImageContainer(layers::LayerManager::CreateImageContainer(
       layers::ImageContainer::ASYNCHRONOUS))
   , mFactory(new PDMFactory())
-  , mMonitor("WebrtcMediaDataDecoder")
 {
 }
 
 WebrtcMediaDataDecoder::~WebrtcMediaDataDecoder()
 {
   mTaskQueue->BeginShutdown();
   mTaskQueue->AwaitShutdownAndIdle();
 }
@@ -63,35 +63,20 @@ WebrtcMediaDataDecoder::InitDecode(const
       mTrackType,
       mImageContainer,
       knowsCompositor });
 
   if (!mDecoder) {
     return WEBRTC_VIDEO_CODEC_ERROR;
   }
 
-  MonitorAutoLock lock(mMonitor);
-  bool done = false;
-  mDecoder->Init()->Then(mTaskQueue,
-                         __func__,
-                         [&](TrackInfo::TrackType) {
-                           MonitorAutoLock lock(mMonitor);
-                           done = true;
-                           mMonitor.Notify();
-                         },
-                         [&](const MediaResult& aError) {
-                           MonitorAutoLock lock(mMonitor);
-                           done = true;
-                           mError = aError;
-                           mMonitor.Notify();
-                         });
-
-  while (!done) {
-    mMonitor.Wait();
-  }
+  media::Await(do_AddRef(mThreadPool),
+               mDecoder->Init(),
+               [](TrackInfo::TrackType) {},
+               [&](const MediaResult& aError) { mError = aError; });
 
   return NS_SUCCEEDED(mError) ? WEBRTC_VIDEO_CODEC_OK : WEBRTC_VIDEO_CODEC_ERROR;
 }
 
 int32_t
 WebrtcMediaDataDecoder::Decode(
   const webrtc::EncodedImage& aInputImage,
   bool aMissingFrames,
@@ -127,37 +112,22 @@ WebrtcMediaDataDecoder::Decode(
 
   compressedFrame->mTime =
     media::TimeUnit::FromMicroseconds(aInputImage._timeStamp);
   compressedFrame->mTimecode =
     media::TimeUnit::FromMicroseconds(aRenderTimeMs * 1000);
   compressedFrame->mKeyframe =
     aInputImage._frameType == webrtc::FrameType::kVideoFrameKey;
   {
-    MonitorAutoLock lock(mMonitor);
-    bool done = false;
-    mDecoder->Decode(compressedFrame)->Then(
-      mTaskQueue,
-      __func__,
-      [&](const MediaDataDecoder::DecodedData& aResults) {
-        MonitorAutoLock lock(mMonitor);
-        mResults = aResults;
-        done = true;
-        mMonitor.Notify();
-      },
-      [&](const MediaResult& aError) {
-        MonitorAutoLock lock(mMonitor);
-        mError = aError;
-        done = true;
-        mMonitor.Notify();
-      });
-
-    while (!done) {
-      mMonitor.Wait();
-    }
+    media::Await(do_AddRef(mThreadPool),
+                 mDecoder->Decode(compressedFrame),
+                 [&](const MediaDataDecoder::DecodedData& aResults) {
+                   mResults = aResults;
+                 },
+                 [&](const MediaResult& aError) { mError = aError; });
 
     for (auto& frame : mResults) {
       MOZ_ASSERT(frame->mType == MediaData::VIDEO_DATA);
       RefPtr<VideoData> video = frame->As<VideoData>();
       MOZ_ASSERT(video);
       if (!video->mImage) {
         // Nothing to display.
         continue;
@@ -183,35 +153,22 @@ WebrtcMediaDataDecoder::RegisterDecodeCo
 {
   mCallback = aCallback;
   return WEBRTC_VIDEO_CODEC_OK;
 }
 
 int32_t
 WebrtcMediaDataDecoder::Release()
 {
-  MonitorAutoLock lock(mMonitor);
-  bool done = false;
-  mDecoder->Flush()
-    ->Then(mTaskQueue,
-           __func__,
-           [this]() { return mDecoder->Shutdown(); },
-           [this](const MediaResult& aError) { return mDecoder->Shutdown(); })
-    ->Then(mTaskQueue,
-           __func__,
-           [&]() {
-             MonitorAutoLock lock(mMonitor);
-             done = true;
-             mMonitor.Notify();
-           },
-           []() { MOZ_ASSERT_UNREACHABLE("Shutdown promise always resolved"); });
-
-  while (!done) {
-    mMonitor.Wait();
-  }
+  RefPtr<ShutdownPromise> p =
+    mDecoder->Flush()->Then(mTaskQueue,
+                            __func__,
+                            [this]() { return mDecoder->Shutdown(); },
+                            [this]() { return mDecoder->Shutdown(); });
+  media::Await(do_AddRef(mThreadPool), p);
 
   mDecoder = nullptr;
   mNeedKeyframe = true;
 
   return WEBRTC_VIDEO_CODEC_OK;
 }
 
 bool
--- a/media/webrtc/signaling/src/media-conduit/WebrtcMediaDataDecoderCodec.h
+++ b/media/webrtc/signaling/src/media-conduit/WebrtcMediaDataDecoderCodec.h
@@ -17,16 +17,17 @@ namespace webrtc {
 }
 namespace mozilla {
 namespace layers {
   class Image;
   class ImageContainer;
 }
 
 class PDMFactory;
+class SharedThreadPool;
 class TaskQueue;
 
 class ImageBuffer : public webrtc::NativeHandleBuffer
 {
 public:
   explicit ImageBuffer(RefPtr<layers::Image>&& aImage);
   rtc::scoped_refptr<VideoFrameBuffer> NativeToI420Buffer() override;
 
@@ -57,27 +58,26 @@ public:
   int32_t Release() override;
 
 private:
   ~WebrtcMediaDataDecoder();
   void QueueFrame(MediaRawData* aFrame);
   AbstractThread* OwnerThread() const { return mTaskQueue; }
   bool OnTaskQueue() const;
 
+  const RefPtr<SharedThreadPool> mThreadPool;
   const RefPtr<TaskQueue> mTaskQueue;
   const RefPtr<layers::ImageContainer> mImageContainer;
   const RefPtr<PDMFactory> mFactory;
   RefPtr<MediaDataDecoder> mDecoder;
   webrtc::DecodedImageCallback* mCallback = nullptr;
   VideoInfo mInfo;
   TrackInfo::TrackType mTrackType;
   bool mNeedKeyframe = true;
   MozPromiseRequestHolder<MediaDataDecoder::DecodePromise> mDecodeRequest;
 
-  Monitor mMonitor;
-  // Members below are accessed via mMonitor
   MediaResult mError = NS_OK;
   MediaDataDecoder::DecodedData mResults;
 };
 
 } // namespace mozilla
 
 #endif // WebrtcMediaDataDecoderCodec_h__
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -2,111 +2,108 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 // Original author: ekr@rtfm.com
 
 #include "MediaPipeline.h"
 
-#include "MediaStreamGraphImpl.h"
-
 #include <inttypes.h>
 #include <math.h>
 
-#include "nspr.h"
-#include "srtp.h"
-
-#include "VideoSegment.h"
+#include "AudioSegment.h"
+#include "AutoTaskQueue.h"
+#include "CSFLog.h"
+#include "DOMMediaStream.h"
+#include "ImageContainer.h"
+#include "ImageTypes.h"
 #include "Layers.h"
 #include "LayersLogging.h"
-#include "ImageTypes.h"
-#include "ImageContainer.h"
+#include "MediaEngine.h"
+#include "MediaPipelineFilter.h"
+#include "MediaSegment.h"
+#include "MediaStreamGraphImpl.h"
+#include "MediaStreamListener.h"
 #include "MediaStreamTrack.h"
-#include "MediaStreamListener.h"
 #include "MediaStreamVideoSink.h"
-#include "VideoUtils.h"
+#include "RtpLogger.h"
+#include "VideoSegment.h"
 #include "VideoStreamTrack.h"
-#include "MediaEngine.h"
-
+#include "VideoUtils.h"
+#include "databuffer.h"
+#include "libyuv/convert.h"
+#include "mozilla/PeerIdentity.h"
+#include "mozilla/Preferences.h"
+#include "mozilla/SharedThreadPool.h"
+#include "mozilla/Sprintf.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/UniquePtrExtensions.h"
+#include "mozilla/dom/RTCStatsReportBinding.h"
+#include "mozilla/gfx/Point.h"
+#include "mozilla/gfx/Types.h"
 #include "nsError.h"
-#include "AudioSegment.h"
-#include "MediaSegment.h"
-#include "MediaPipelineFilter.h"
-#include "RtpLogger.h"
-#include "databuffer.h"
+#include "nsThreadUtils.h"
+#include "nspr.h"
+#include "runnable_utils.h"
+#include "srtp.h"
 #include "transportflow.h"
 #include "transportlayer.h"
 #include "transportlayerdtls.h"
 #include "transportlayerice.h"
-#include "runnable_utils.h"
-#include "libyuv/convert.h"
-#include "mozilla/dom/RTCStatsReportBinding.h"
-#include "mozilla/SharedThreadPool.h"
-#include "mozilla/PeerIdentity.h"
-#include "mozilla/Preferences.h"
-#include "mozilla/TaskQueue.h"
-#include "mozilla/gfx/Point.h"
-#include "mozilla/gfx/Types.h"
-#include "mozilla/UniquePtr.h"
-#include "mozilla/UniquePtrExtensions.h"
-#include "mozilla/Sprintf.h"
 
+#include "webrtc/base/bind.h"
 #include "webrtc/common_types.h"
-#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
 #include "webrtc/common_video/include/video_frame_buffer.h"
-#include "webrtc/base/bind.h"
-
-#include "nsThreadUtils.h"
-
-#include "CSFLog.h"
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
 
 // Max size given stereo is 480*2*2 = 1920 (10ms of 16-bits stereo audio at
 // 48KHz)
-#define AUDIO_SAMPLE_BUFFER_MAX_BYTES 480*2*2
-static_assert((WEBRTC_MAX_SAMPLE_RATE/100)*sizeof(uint16_t) * 2
+#define AUDIO_SAMPLE_BUFFER_MAX_BYTES (480 * 2 * 2)
+static_assert((WEBRTC_MAX_SAMPLE_RATE / 100) * sizeof(uint16_t) * 2
                <= AUDIO_SAMPLE_BUFFER_MAX_BYTES,
                "AUDIO_SAMPLE_BUFFER_MAX_BYTES is not large enough");
 
 using namespace mozilla;
 using namespace mozilla::dom;
 using namespace mozilla::gfx;
 using namespace mozilla::layers;
 
 static const char* mpLogTag = "MediaPipeline";
 #ifdef LOGTAG
 #undef LOGTAG
 #endif
 #define LOGTAG mpLogTag
 
 namespace mozilla {
-extern mozilla::LogModule* AudioLogModule();
+extern mozilla::LogModule*
+AudioLogModule();
 
 class VideoConverterListener
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoConverterListener)
 
-  virtual void OnVideoFrameConverted(unsigned char* aVideoFrame,
+  virtual void OnVideoFrameConverted(const unsigned char* aVideoFrame,
                                      unsigned int aVideoFrameLength,
                                      unsigned short aWidth,
                                      unsigned short aHeight,
                                      VideoType aVideoType,
                                      uint64_t aCaptureTime) = 0;
 
-  virtual void OnVideoFrameConverted(webrtc::VideoFrame& aVideoFrame) = 0;
+  virtual void OnVideoFrameConverted(const webrtc::VideoFrame& aVideoFrame) = 0;
 
 protected:
   virtual ~VideoConverterListener() {}
 };
 
 // I420 buffer size macros
-#define YSIZE(x,y) (CheckedInt<int>(x)*(y))
-#define CRSIZE(x,y) ((((x)+1) >> 1) * (((y)+1) >> 1))
-#define I420SIZE(x,y) (YSIZE((x),(y)) + 2 * CRSIZE((x),(y)))
+#define YSIZE(x, y) (CheckedInt<int>(x) * (y))
+#define CRSIZE(x, y) ((((x) + 1) >> 1) * (((y) + 1) >> 1))
+#define I420SIZE(x, y) (YSIZE((x), (y)) + 2 * CRSIZE((x), (y)))
 
 // An async video frame format converter.
 //
 // Input is typically a MediaStream(Track)Listener driven by MediaStreamGraph.
 //
 // We keep track of the size of the TaskQueue so we can drop frames if
 // conversion is taking too long.
 //
@@ -114,104 +111,113 @@ protected:
 // thread whenever a frame is converted.
 class VideoFrameConverter
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(VideoFrameConverter)
 
   VideoFrameConverter()
     : mLength(0)
-    , last_img_(-1) // -1 is not a guaranteed invalid serial. See bug 1262134.
+    , mTaskQueue(
+        new AutoTaskQueue(GetMediaThreadPool(MediaThreadType::WEBRTC_DECODER),
+                          "VideoFrameConverter"))
+    , mLastImage(-1) // -1 is not a guaranteed invalid serial. See bug 1262134.
 #ifdef DEBUG
     , mThrottleCount(0)
     , mThrottleRecord(0)
 #endif
     , mMutex("VideoFrameConverter")
   {
     MOZ_COUNT_CTOR(VideoFrameConverter);
-
-    RefPtr<SharedThreadPool> pool =
-      SharedThreadPool::Get(NS_LITERAL_CSTRING("VideoFrameConverter"));
-
-    mTaskQueue = MakeAndAddRef<TaskQueue>(pool.forget());
   }
 
-  void QueueVideoChunk(VideoChunk& aChunk, bool aForceBlack)
+  void QueueVideoChunk(const VideoChunk& aChunk, bool aForceBlack)
   {
     if (aChunk.IsNull()) {
       return;
     }
 
     // We get passed duplicate frames every ~10ms even with no frame change.
     int32_t serial = aChunk.mFrame.GetImage()->GetSerial();
-    if (serial == last_img_) {
+    if (serial == mLastImage) {
       return;
     }
-    last_img_ = serial;
+    mLastImage = serial;
 
     // A throttling limit of 1 allows us to convert 2 frames concurrently.
     // It's short enough to not build up too significant a delay, while
     // giving us a margin to not cause some machines to drop every other frame.
     const int32_t queueThrottlingLimit = 1;
     if (mLength > queueThrottlingLimit) {
-      CSFLogDebug(LOGTAG, "VideoFrameConverter %p queue is full. Throttling by throwing away a frame.",
+      CSFLogDebug(LOGTAG,
+                  "VideoFrameConverter %p queue is full. Throttling by "
+                  "throwing away a frame.",
                   this);
 #ifdef DEBUG
       ++mThrottleCount;
       mThrottleRecord = std::max(mThrottleCount, mThrottleRecord);
 #endif
       return;
     }
 
 #ifdef DEBUG
     if (mThrottleCount > 0) {
       if (mThrottleCount > 5) {
         // Log at a higher level when we have large drops.
-        CSFLogInfo(LOGTAG, "VideoFrameConverter %p stopped throttling after throwing away %d frames. Longest throttle so far was %d frames.",
-                   this, mThrottleCount, mThrottleRecord);
+        CSFLogInfo(LOGTAG,
+                   "VideoFrameConverter %p stopped throttling after throwing "
+                   "away %d frames. Longest throttle so far was %d frames.",
+                   this,
+                   mThrottleCount,
+                   mThrottleRecord);
       } else {
-        CSFLogDebug(LOGTAG, "VideoFrameConverter %p stopped throttling after throwing away %d frames. Longest throttle so far was %d frames.",
-                    this, mThrottleCount, mThrottleRecord);
+        CSFLogDebug(LOGTAG,
+                    "VideoFrameConverter %p stopped throttling after throwing "
+                    "away %d frames. Longest throttle so far was %d frames.",
+                    this,
+                    mThrottleCount,
+                    mThrottleRecord);
       }
       mThrottleCount = 0;
     }
 #endif
 
     bool forceBlack = aForceBlack || aChunk.mFrame.GetForceBlack();
 
     if (forceBlack) {
       // Reset the last-img check.
       // -1 is not a guaranteed invalid serial. See bug 1262134.
-      last_img_ = -1;
+      mLastImage = -1;
 
       // After disabling, we still want *some* frames to flow to the other side.
       // It could happen that we drop the packet that carried the first disabled
       // frame, for instance. Note that this still requires the application to
       // send a frame, or it doesn't trigger at all.
       const double disabledMinFps = 1.0;
       TimeStamp t = aChunk.mTimeStamp;
       MOZ_ASSERT(!t.IsNull());
-      if (!disabled_frame_sent_.IsNull() &&
-          (t - disabled_frame_sent_).ToSeconds() < (1.0 / disabledMinFps)) {
+      if (!mDisabledFrameSent.IsNull() &&
+          (t - mDisabledFrameSent).ToSeconds() < (1.0 / disabledMinFps)) {
         return;
       }
-
-      disabled_frame_sent_ = t;
+      mDisabledFrameSent = t;
     } else {
       // This sets it to the Null time.
-      disabled_frame_sent_ = TimeStamp();
+      mDisabledFrameSent = TimeStamp();
     }
 
     ++mLength; // Atomic
 
     nsCOMPtr<nsIRunnable> runnable =
       NewRunnableMethod<StoreRefPtrPassByPtr<Image>, bool>(
         "VideoFrameConverter::ProcessVideoFrame",
-        this, &VideoFrameConverter::ProcessVideoFrame,
-        aChunk.mFrame.GetImage(), forceBlack);
+        this,
+        &VideoFrameConverter::ProcessVideoFrame,
+        aChunk.mFrame.GetImage(),
+        forceBlack);
     nsresult rv = mTaskQueue->Dispatch(runnable.forget());
     MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
   }
 
   void AddListener(VideoConverterListener* aListener)
   {
     MutexAutoLock lock(mMutex);
 
@@ -223,33 +229,27 @@ public:
   {
     MutexAutoLock lock(mMutex);
 
     return mListeners.RemoveElement(aListener);
   }
 
   void Shutdown()
   {
-    mTaskQueue->BeginShutdown();
-    mTaskQueue->AwaitShutdownAndIdle();
+    MutexAutoLock lock(mMutex);
+    mListeners.Clear();
   }
 
 protected:
-  virtual ~VideoFrameConverter()
-  {
-    MOZ_COUNT_DTOR(VideoFrameConverter);
-  }
+  virtual ~VideoFrameConverter() { MOZ_COUNT_DTOR(VideoFrameConverter); }
 
-  static void DeleteBuffer(uint8 *data)
-  {
-    delete[] data;
-  }
+  static void DeleteBuffer(uint8* aData) { delete[] aData; }
 
-  // This takes ownership of the buffer and attached it to the VideoFrame we send
-  // to the listeners
+  // This takes ownership of the buffer and attached it to the VideoFrame we
+  // send to the listeners
   void VideoFrameConverted(UniquePtr<uint8[]> aBuffer,
                            unsigned int aVideoFrameLength,
                            unsigned short aWidth,
                            unsigned short aHeight,
                            VideoType aVideoType,
                            uint64_t aCaptureTime)
   {
     // check for parameter sanity
@@ -263,28 +263,34 @@ protected:
     const int stride_y = aWidth;
     const int stride_uv = (aWidth + 1) / 2;
 
     const uint8_t* buffer_y = aBuffer.get();
     const uint8_t* buffer_u = buffer_y + stride_y * aHeight;
     const uint8_t* buffer_v = buffer_u + stride_uv * ((aHeight + 1) / 2);
     rtc::scoped_refptr<webrtc::WrappedI420Buffer> video_frame_buffer(
       new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
-        aWidth, aHeight,
-        buffer_y, stride_y,
-        buffer_u, stride_uv,
-        buffer_v, stride_uv,
+        aWidth,
+        aHeight,
+        buffer_y,
+        stride_y,
+        buffer_u,
+        stride_uv,
+        buffer_v,
+        stride_uv,
         rtc::Bind(&DeleteBuffer, aBuffer.release())));
 
-    webrtc::VideoFrame video_frame(video_frame_buffer, aCaptureTime,
-                                   aCaptureTime, webrtc::kVideoRotation_0); // XXX
+    webrtc::VideoFrame video_frame(video_frame_buffer,
+                                   aCaptureTime,
+                                   aCaptureTime,
+                                   webrtc::kVideoRotation_0); // XXX
     VideoFrameConverted(video_frame);
   }
 
-  void VideoFrameConverted(webrtc::VideoFrame& aVideoFrame)
+  void VideoFrameConverted(const webrtc::VideoFrame& aVideoFrame)
   {
     MutexAutoLock lock(mMutex);
 
     for (RefPtr<VideoConverterListener>& listener : mListeners) {
       listener->OnVideoFrameConverted(aVideoFrame);
     }
   }
 
@@ -308,69 +314,79 @@ protected:
       auto pixelData = MakeUniqueFallible<uint8_t[]>(length.value());
       if (pixelData) {
         // YCrCb black = 0x10 0x80 0x80
         memset(pixelData.get(), 0x10, yPlaneLen.value());
         // Fill Cb/Cr planes
         memset(pixelData.get() + yPlaneLen.value(), 0x80, cbcrPlaneLen);
 
         CSFLogDebug(LOGTAG, "Sending a black video frame");
-        VideoFrameConverted(Move(pixelData), length.value(),
-                            size.width, size.height,
-                            mozilla::kVideoI420, 0);
+        VideoFrameConverted(Move(pixelData),
+                            length.value(),
+                            size.width,
+                            size.height,
+                            mozilla::kVideoI420,
+                            0);
       }
       return;
     }
 
     ImageFormat format = aImage->GetFormat();
     if (format == ImageFormat::PLANAR_YCBCR) {
       // Cast away constness b/c some of the accessors are non-const
-      PlanarYCbCrImage* yuv = const_cast<PlanarYCbCrImage *>(
-          static_cast<const PlanarYCbCrImage *>(aImage));
-
-      const PlanarYCbCrData *data = yuv->GetData();
+      const PlanarYCbCrData* data =
+        static_cast<const PlanarYCbCrImage*>(aImage)->GetData();
       if (data) {
-        uint8_t *y = data->mYChannel;
-        uint8_t *cb = data->mCbChannel;
-        uint8_t *cr = data->mCrChannel;
+        uint8_t* y = data->mYChannel;
+        uint8_t* cb = data->mCbChannel;
+        uint8_t* cr = data->mCrChannel;
         int32_t yStride = data->mYStride;
         int32_t cbCrStride = data->mCbCrStride;
-        uint32_t width = yuv->GetSize().width;
-        uint32_t height = yuv->GetSize().height;
+        uint32_t width = aImage->GetSize().width;
+        uint32_t height = aImage->GetSize().height;
 
         rtc::Callback0<void> callback_unused;
         rtc::scoped_refptr<webrtc::WrappedI420Buffer> video_frame_buffer(
           new rtc::RefCountedObject<webrtc::WrappedI420Buffer>(
-            width, height,
-            y, yStride,
-            cb, cbCrStride,
-            cr, cbCrStride,
+            width,
+            height,
+            y,
+            yStride,
+            cb,
+            cbCrStride,
+            cr,
+            cbCrStride,
             callback_unused));
 
         webrtc::VideoFrame i420_frame(video_frame_buffer,
-                                      0, 0, // not setting timestamps
+                                      0,
+                                      0, // not setting timestamps
                                       webrtc::kVideoRotation_0);
         CSFLogDebug(LOGTAG, "Sending an I420 video frame");
         VideoFrameConverted(i420_frame);
         return;
       }
     }
 
     RefPtr<SourceSurface> surf = aImage->GetAsSourceSurface();
     if (!surf) {
-      CSFLogError(LOGTAG, "Getting surface from %s image failed",
+      CSFLogError(LOGTAG,
+                  "Getting surface from %s image failed",
                   Stringify(format).c_str());
       return;
     }
 
     RefPtr<DataSourceSurface> data = surf->GetDataSurface();
     if (!data) {
-      CSFLogError(LOGTAG, "Getting data surface from %s image with %s (%s) surface failed",
-                  Stringify(format).c_str(), Stringify(surf->GetType()).c_str(),
-                  Stringify(surf->GetFormat()).c_str());
+      CSFLogError(
+        LOGTAG,
+        "Getting data surface from %s image with %s (%s) surface failed",
+        Stringify(format).c_str(),
+        Stringify(surf->GetType()).c_str(),
+        Stringify(surf->GetFormat()).c_str());
       return;
     }
 
     IntSize size = aImage->GetSize();
     // these don't need to be CheckedInt, any overflow will be caught by YSIZE
     int half_width = (size.width + 1) >> 1;
     int half_height = (size.height + 1) >> 1;
     int c_size = half_width * half_height;
@@ -383,65 +399,84 @@ protected:
     auto yuv_scoped = MakeUniqueFallible<uint8[]>(buffer_size.value());
     if (!yuv_scoped) {
       return;
     }
     uint8* yuv = yuv_scoped.get();
 
     DataSourceSurface::ScopedMap map(data, DataSourceSurface::READ);
     if (!map.IsMapped()) {
-      CSFLogError(LOGTAG, "Reading DataSourceSurface from %s image with %s (%s) surface failed",
-                  Stringify(format).c_str(), Stringify(surf->GetType()).c_str(),
-                  Stringify(surf->GetFormat()).c_str());
+      CSFLogError(
+        LOGTAG,
+        "Reading DataSourceSurface from %s image with %s (%s) surface failed",
+        Stringify(format).c_str(),
+        Stringify(surf->GetType()).c_str(),
+        Stringify(surf->GetFormat()).c_str());
       return;
     }
 
     int rv;
     int cb_offset = YSIZE(size.width, size.height).value();
     int cr_offset = cb_offset + c_size;
     switch (surf->GetFormat()) {
       case SurfaceFormat::B8G8R8A8:
       case SurfaceFormat::B8G8R8X8:
         rv = libyuv::ARGBToI420(static_cast<uint8*>(map.GetData()),
                                 map.GetStride(),
-                                yuv, size.width,
-                                yuv + cb_offset, half_width,
-                                yuv + cr_offset, half_width,
-                                size.width, size.height);
+                                yuv,
+                                size.width,
+                                yuv + cb_offset,
+                                half_width,
+                                yuv + cr_offset,
+                                half_width,
+                                size.width,
+                                size.height);
         break;
       case SurfaceFormat::R5G6B5_UINT16:
         rv = libyuv::RGB565ToI420(static_cast<uint8*>(map.GetData()),
                                   map.GetStride(),
-                                  yuv, size.width,
-                                  yuv + cb_offset, half_width,
-                                  yuv + cr_offset, half_width,
-                                  size.width, size.height);
+                                  yuv,
+                                  size.width,
+                                  yuv + cb_offset,
+                                  half_width,
+                                  yuv + cr_offset,
+                                  half_width,
+                                  size.width,
+                                  size.height);
         break;
       default:
-        CSFLogError(LOGTAG, "Unsupported RGB video format %s",
+        CSFLogError(LOGTAG,
+                    "Unsupported RGB video format %s",
                     Stringify(surf->GetFormat()).c_str());
         MOZ_ASSERT(PR_FALSE);
         return;
     }
     if (rv != 0) {
-      CSFLogError(LOGTAG, "%s to I420 conversion failed",
+      CSFLogError(LOGTAG,
+                  "%s to I420 conversion failed",
                   Stringify(surf->GetFormat()).c_str());
       return;
     }
-    CSFLogDebug(LOGTAG, "Sending an I420 video frame converted from %s",
+    CSFLogDebug(LOGTAG,
+                "Sending an I420 video frame converted from %s",
                 Stringify(surf->GetFormat()).c_str());
-    VideoFrameConverted(Move(yuv_scoped), buffer_size.value(), size.width, size.height, mozilla::kVideoI420, 0);
+    VideoFrameConverted(Move(yuv_scoped),
+                        buffer_size.value(),
+                        size.width,
+                        size.height,
+                        mozilla::kVideoI420,
+                        0);
   }
 
   Atomic<int32_t, Relaxed> mLength;
-  RefPtr<TaskQueue> mTaskQueue;
+  const RefPtr<AutoTaskQueue> mTaskQueue;
 
   // Written and read from the queueing thread (normally MSG).
-  int32_t last_img_; // serial number of last Image
-  TimeStamp disabled_frame_sent_; // The time we sent the last disabled frame.
+  int32_t mLastImage;           // serial number of last Image
+  TimeStamp mDisabledFrameSent; // The time we sent the last disabled frame.
 #ifdef DEBUG
   uint32_t mThrottleCount;
   uint32_t mThrottleRecord;
 #endif
 
   // mMutex guards the below variables.
   Mutex mMutex;
   nsTArray<RefPtr<VideoConverterListener>> mListeners;
@@ -451,860 +486,911 @@ protected:
 // on the MSG/input audio thread.  Basically just bounces all the audio
 // data to a single audio processing/input queue.  We could if we wanted to
 // use multiple threads and a TaskQueue.
 class AudioProxyThread
 {
 public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioProxyThread)
 
-  explicit AudioProxyThread(AudioSessionConduit *aConduit)
+  explicit AudioProxyThread(AudioSessionConduit* aConduit)
     : mConduit(aConduit)
+    , mTaskQueue(
+        new AutoTaskQueue(GetMediaThreadPool(MediaThreadType::WEBRTC_DECODER),
+                          "AudioProxy"))
   {
     MOZ_ASSERT(mConduit);
     MOZ_COUNT_CTOR(AudioProxyThread);
-
-    // Use only 1 thread; also forces FIFO operation
-    // We could use multiple threads, but that may be dicier with the webrtc.org
-    // code.  If so we'd need to use TaskQueues like the videoframe converter
-    RefPtr<SharedThreadPool> pool =
-      SharedThreadPool::Get(NS_LITERAL_CSTRING("AudioProxy"), 1);
-
-    mThread = pool.get();
   }
 
-  // called on mThread
-  void InternalProcessAudioChunk(
-    TrackRate rate,
-    AudioChunk& chunk,
-    bool enabled) {
+  void InternalProcessAudioChunk(TrackRate rate,
+                                 const AudioChunk& chunk,
+                                 bool enabled)
+  {
+    MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
     // Convert to interleaved, 16-bits integer audio, with a maximum of two
     // channels (since the WebRTC.org code below makes the assumption that the
     // input audio is either mono or stereo).
     uint32_t outputChannels = chunk.ChannelCount() == 1 ? 1 : 2;
     const int16_t* samples = nullptr;
     UniquePtr<int16_t[]> convertedSamples;
 
-    // We take advantage of the fact that the common case (microphone directly to
-    // PeerConnection, that is, a normal call), the samples are already 16-bits
-    // mono, so the representation in interleaved and planar is the same, and we
-    // can just use that.
-    if (enabled && outputChannels == 1 && chunk.mBufferFormat == AUDIO_FORMAT_S16) {
+    // We take advantage of the fact that the common case (microphone directly
+    // to PeerConnection, that is, a normal call), the samples are already
+    // 16-bits mono, so the representation in interleaved and planar is the
+    // same, and we can just use that.
+    if (enabled && outputChannels == 1 &&
+        chunk.mBufferFormat == AUDIO_FORMAT_S16) {
       samples = chunk.ChannelData<int16_t>().Elements()[0];
     } else {
-      convertedSamples = MakeUnique<int16_t[]>(chunk.mDuration * outputChannels);
+      convertedSamples =
+        MakeUnique<int16_t[]>(chunk.mDuration * outputChannels);
 
       if (!enabled || chunk.mBufferFormat == AUDIO_FORMAT_SILENCE) {
         PodZero(convertedSamples.get(), chunk.mDuration * outputChannels);
       } else if (chunk.mBufferFormat == AUDIO_FORMAT_FLOAT32) {
         DownmixAndInterleave(chunk.ChannelData<float>(),
-                             chunk.mDuration, chunk.mVolume, outputChannels,
+                             chunk.mDuration,
+                             chunk.mVolume,
+                             outputChannels,
                              convertedSamples.get());
       } else if (chunk.mBufferFormat == AUDIO_FORMAT_S16) {
         DownmixAndInterleave(chunk.ChannelData<int16_t>(),
-                             chunk.mDuration, chunk.mVolume, outputChannels,
+                             chunk.mDuration,
+                             chunk.mVolume,
+                             outputChannels,
                              convertedSamples.get());
       }
       samples = convertedSamples.get();
     }
 
-    MOZ_ASSERT(!(rate%100)); // rate should be a multiple of 100
+    MOZ_ASSERT(!(rate % 100)); // rate should be a multiple of 100
 
-    // Check if the rate or the number of channels has changed since the last time
-    // we came through. I realize it may be overkill to check if the rate has
-    // changed, but I believe it is possible (e.g. if we change sources) and it
-    // costs us very little to handle this case.
+    // Check if the rate or the number of channels has changed since the last
+    // time we came through. I realize it may be overkill to check if the rate
+    // has changed, but I believe it is possible (e.g. if we change sources) and
+    // it costs us very little to handle this case.
 
     uint32_t audio_10ms = rate / 100;
 
-    if (!packetizer_ ||
-        packetizer_->PacketSize() != audio_10ms ||
-        packetizer_->Channels() != outputChannels) {
+    if (!mPacketizer || mPacketizer->PacketSize() != audio_10ms ||
+        mPacketizer->Channels() != outputChannels) {
       // It's ok to drop the audio still in the packetizer here.
-      packetizer_ = new AudioPacketizer<int16_t, int16_t>(audio_10ms, outputChannels);
+      mPacketizer = MakeUnique<AudioPacketizer<int16_t, int16_t>>(
+        audio_10ms, outputChannels);
     }
 
-    packetizer_->Input(samples, chunk.mDuration);
+    mPacketizer->Input(samples, chunk.mDuration);
 
-    while (packetizer_->PacketsAvailable()) {
-      packetizer_->Output(packet_);
-      mConduit->SendAudioFrame(packet_, packetizer_->PacketSize(), rate, packetizer_->Channels(), 0);
+    while (mPacketizer->PacketsAvailable()) {
+      mPacketizer->Output(mPacket);
+      mConduit->SendAudioFrame(
+        mPacket, mPacketizer->PacketSize(), rate, mPacketizer->Channels(), 0);
     }
   }
 
-  void QueueAudioChunk(TrackRate rate, AudioChunk& chunk, bool enabled)
+  void QueueAudioChunk(TrackRate aRate, const AudioChunk& aChunk, bool aEnabled)
   {
-    RUN_ON_THREAD(mThread,
-                  WrapRunnable(RefPtr<AudioProxyThread>(this),
-                               &AudioProxyThread::InternalProcessAudioChunk,
-                               rate, chunk, enabled),
-                  NS_DISPATCH_NORMAL);
+    RefPtr<AudioProxyThread> self = this;
+    nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction(
+      "AudioProxyThread::QueueAudioChunk", [self, aRate, aChunk, aEnabled]() {
+        self->InternalProcessAudioChunk(aRate, aChunk, aEnabled);
+      }));
+    MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
   }
 
 protected:
   virtual ~AudioProxyThread()
   {
-    // Conduits must be released on MainThread, and we might have the last reference
-    // We don't need to worry about runnables still trying to access the conduit, since
-    // the runnables hold a ref to AudioProxyThread.
-    NS_ReleaseOnMainThreadSystemGroup(
-      "AudioProxyThread::mConduit", mConduit.forget());
+    // Conduits must be released on MainThread, and we might have the last
+    // reference We don't need to worry about runnables still trying to access
+    // the conduit, since the runnables hold a ref to AudioProxyThread.
+    NS_ReleaseOnMainThreadSystemGroup("AudioProxyThread::mConduit",
+                                      mConduit.forget());
     MOZ_COUNT_DTOR(AudioProxyThread);
   }
 
   RefPtr<AudioSessionConduit> mConduit;
-  nsCOMPtr<nsIEventTarget> mThread;
-  // Only accessed on mThread
-  nsAutoPtr<AudioPacketizer<int16_t, int16_t>> packetizer_;
+  const RefPtr<AutoTaskQueue> mTaskQueue;
+  // Only accessed on mTaskQueue
+  UniquePtr<AudioPacketizer<int16_t, int16_t>> mPacketizer;
   // A buffer to hold a single packet of audio.
-  int16_t packet_[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
+  int16_t mPacket[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
 };
 
 static char kDTLSExporterLabel[] = "EXTRACTOR-dtls_srtp";
 
-MediaPipeline::MediaPipeline(const std::string& pc,
-                             Direction direction,
-                             nsCOMPtr<nsIEventTarget> main_thread,
-                             nsCOMPtr<nsIEventTarget> sts_thread,
-                             RefPtr<MediaSessionConduit> conduit)
-  : direction_(direction),
-    level_(0),
-    conduit_(conduit),
-    rtp_(nullptr, RTP),
-    rtcp_(nullptr, RTCP),
-    main_thread_(main_thread),
-    sts_thread_(sts_thread),
-    rtp_packets_sent_(0),
-    rtcp_packets_sent_(0),
-    rtp_packets_received_(0),
-    rtcp_packets_received_(0),
-    rtp_bytes_sent_(0),
-    rtp_bytes_received_(0),
-    pc_(pc),
-    description_(),
-    rtp_parser_(webrtc::RtpHeaderParser::Create()){
-  // PipelineTransport() will access this->sts_thread_; moved here for safety
-  transport_ = new PipelineTransport(this);
-  packet_dumper_ = new PacketDumper(pc_);
-
-  if (direction_ == RECEIVE) {
-    conduit_->SetReceiverTransport(transport_);
+MediaPipeline::MediaPipeline(const std::string& aPc,
+                             DirectionType aDirection,
+                             nsCOMPtr<nsIEventTarget> aMainThread,
+                             nsCOMPtr<nsIEventTarget> aStsThread,
+                             RefPtr<MediaSessionConduit> aConduit)
+  : mDirection(aDirection)
+  , mLevel(0)
+  , mConduit(aConduit)
+  , mRtp(nullptr, RTP)
+  , mRtcp(nullptr, RTCP)
+  , mMainThread(aMainThread)
+  , mStsThread(aStsThread)
+  , mTransport(new PipelineTransport(this)) // PipelineTransport() will access
+                                            // this->mStsThread; moved here
+                                            // for safety
+  , mRtpPacketsSent(0)
+  , mRtcpPacketsSent(0)
+  , mRtpPacketsReceived(0)
+  , mRtcpPacketsReceived(0)
+  , mRtpBytesSent(0)
+  , mRtpBytesReceived(0)
+  , mPc(aPc)
+  , mRtpParser(webrtc::RtpHeaderParser::Create())
+  , mPacketDumper(new PacketDumper(mPc))
+{
+  if (mDirection == DirectionType::RECEIVE) {
+    mConduit->SetReceiverTransport(mTransport);
   } else {
-    conduit_->SetTransmitterTransport(transport_);
+    mConduit->SetTransmitterTransport(mTransport);
   }
 }
 
-MediaPipeline::~MediaPipeline() {
-  CSFLogInfo(LOGTAG, "Destroying MediaPipeline: %s", description_.c_str());
-  // MediaSessionConduit insists that it be released on main.
-  RUN_ON_THREAD(main_thread_, WrapRelease(conduit_.forget()),
-      NS_DISPATCH_NORMAL);
+MediaPipeline::~MediaPipeline()
+{
+  CSFLogInfo(LOGTAG, "Destroying MediaPipeline: %s", mDescription.c_str());
+  NS_ReleaseOnMainThreadSystemGroup("MediaPipeline::mConduit",
+                                    mConduit.forget());
 }
 
 void
 MediaPipeline::Shutdown_m()
 {
-  CSFLogInfo(LOGTAG, "%s in %s", description_.c_str(), __FUNCTION__);
+  CSFLogInfo(LOGTAG, "%s in %s", mDescription.c_str(), __FUNCTION__);
 
   Stop();
   DetachMedia();
 
-  RUN_ON_THREAD(sts_thread_,
-                WrapRunnable(
-                    RefPtr<MediaPipeline>(this),
-                    &MediaPipeline::DetachTransport_s),
+  RUN_ON_THREAD(mStsThread,
+                WrapRunnable(RefPtr<MediaPipeline>(this),
+                             &MediaPipeline::DetachTransport_s),
                 NS_DISPATCH_NORMAL);
 }
 
 void
 MediaPipeline::DetachTransport_s()
 {
-  ASSERT_ON_THREAD(sts_thread_);
+  ASSERT_ON_THREAD(mStsThread);
 
   disconnect_all();
-  transport_->Detach();
-  rtp_.Detach();
-  rtcp_.Detach();
+  mTransport->Detach();
+  mRtp.Detach();
+  mRtcp.Detach();
 
   // Make sure any cycles are broken
-  packet_dumper_ = nullptr;
+  mPacketDumper = nullptr;
 }
 
 nsresult
 MediaPipeline::AttachTransport_s()
 {
-  ASSERT_ON_THREAD(sts_thread_);
+  ASSERT_ON_THREAD(mStsThread);
   nsresult res;
-  MOZ_ASSERT(rtp_.transport_);
-  MOZ_ASSERT(rtcp_.transport_);
-  res = ConnectTransport_s(rtp_);
+  MOZ_ASSERT(mRtp.mTransport);
+  MOZ_ASSERT(mRtcp.mTransport);
+  res = ConnectTransport_s(mRtp);
   if (NS_FAILED(res)) {
     return res;
   }
 
-  if (rtcp_.transport_ != rtp_.transport_) {
-    res = ConnectTransport_s(rtcp_);
+  if (mRtcp.mTransport != mRtp.mTransport) {
+    res = ConnectTransport_s(mRtcp);
     if (NS_FAILED(res)) {
       return res;
     }
   }
 
-  transport_->Attach(this);
+  mTransport->Attach(this);
 
   return NS_OK;
 }
 
 void
-MediaPipeline::UpdateTransport_m(RefPtr<TransportFlow> rtp_transport,
-                                 RefPtr<TransportFlow> rtcp_transport,
-                                 nsAutoPtr<MediaPipelineFilter> filter)
+MediaPipeline::UpdateTransport_m(RefPtr<TransportFlow> aRtpTransport,
+                                 RefPtr<TransportFlow> aRtcpTransport,
+                                 nsAutoPtr<MediaPipelineFilter> aFilter)
 {
-  RUN_ON_THREAD(sts_thread_,
-                WrapRunnable(
-                    RefPtr<MediaPipeline>(this),
-                    &MediaPipeline::UpdateTransport_s,
-                    rtp_transport,
-                    rtcp_transport,
-                    filter),
+  RUN_ON_THREAD(mStsThread,
+                WrapRunnable(RefPtr<MediaPipeline>(this),
+                             &MediaPipeline::UpdateTransport_s,
+                             aRtpTransport,
+                             aRtcpTransport,
+                             aFilter),
                 NS_DISPATCH_NORMAL);
 }
 
 void
-MediaPipeline::UpdateTransport_s(RefPtr<TransportFlow> rtp_transport,
-                                 RefPtr<TransportFlow> rtcp_transport,
-                                 nsAutoPtr<MediaPipelineFilter> filter)
+MediaPipeline::UpdateTransport_s(RefPtr<TransportFlow> aRtpTransport,
+                                 RefPtr<TransportFlow> aRtcpTransport,
+                                 nsAutoPtr<MediaPipelineFilter> aFilter)
 {
   bool rtcp_mux = false;
-  if (!rtcp_transport) {
-    rtcp_transport = rtp_transport;
+  if (!aRtcpTransport) {
+    aRtcpTransport = aRtpTransport;
     rtcp_mux = true;
   }
 
-  if ((rtp_transport != rtp_.transport_) ||
-      (rtcp_transport != rtcp_.transport_)) {
+  if ((aRtpTransport != mRtp.mTransport) ||
+      (aRtcpTransport != mRtcp.mTransport)) {
     disconnect_all();
-    transport_->Detach();
-    rtp_.Detach();
-    rtcp_.Detach();
-    if (rtp_transport && rtcp_transport) {
-      rtp_ = TransportInfo(rtp_transport, rtcp_mux ? MUX : RTP);
-      rtcp_ = TransportInfo(rtcp_transport, rtcp_mux ? MUX : RTCP);
+    mTransport->Detach();
+    mRtp.Detach();
+    mRtcp.Detach();
+    if (aRtpTransport && aRtcpTransport) {
+      mRtp = TransportInfo(aRtpTransport, rtcp_mux ? MUX : RTP);
+      mRtcp = TransportInfo(aRtcpTransport, rtcp_mux ? MUX : RTCP);
       AttachTransport_s();
     }
   }
 
-  if (filter_ && filter) {
+  if (mFilter && aFilter) {
     // Use the new filter, but don't forget any remote SSRCs that we've learned
     // by receiving traffic.
-    filter_->Update(*filter);
+    mFilter->Update(*aFilter);
   } else {
-    filter_ = filter;
+    mFilter = aFilter;
   }
 }
 
 void
-MediaPipeline::AddRIDExtension_m(size_t extension_id)
+MediaPipeline::AddRIDExtension_m(size_t aExtensionId)
 {
-  RUN_ON_THREAD(sts_thread_,
+  RUN_ON_THREAD(mStsThread,
                 WrapRunnable(RefPtr<MediaPipeline>(this),
                              &MediaPipeline::AddRIDExtension_s,
-                             extension_id),
+                             aExtensionId),
                 NS_DISPATCH_NORMAL);
 }
 
 void
-MediaPipeline::AddRIDExtension_s(size_t extension_id)
+MediaPipeline::AddRIDExtension_s(size_t aExtensionId)
 {
-  rtp_parser_->RegisterRtpHeaderExtension(webrtc::kRtpExtensionRtpStreamId,
-                                          extension_id);
+  mRtpParser->RegisterRtpHeaderExtension(webrtc::kRtpExtensionRtpStreamId,
+                                         aExtensionId);
 }
 
 void
-MediaPipeline::AddRIDFilter_m(const std::string& rid)
+MediaPipeline::AddRIDFilter_m(const std::string& aRid)
 {
-  RUN_ON_THREAD(sts_thread_,
+  RUN_ON_THREAD(mStsThread,
                 WrapRunnable(RefPtr<MediaPipeline>(this),
                              &MediaPipeline::AddRIDFilter_s,
-                             rid),
+                             aRid),
                 NS_DISPATCH_NORMAL);
 }
 
 void
-MediaPipeline::AddRIDFilter_s(const std::string& rid)
+MediaPipeline::AddRIDFilter_s(const std::string& aRid)
 {
-  filter_ = new MediaPipelineFilter;
-  filter_->AddRemoteRtpStreamId(rid);
+  mFilter = new MediaPipelineFilter;
+  mFilter->AddRemoteRtpStreamId(aRid);
 }
 
 void
 MediaPipeline::GetContributingSourceStats(
-    const nsString& aInboundRtpStreamId,
-    FallibleTArray<dom::RTCRTPContributingSourceStats>& aArr) const
+  const nsString& aInboundRtpStreamId,
+  FallibleTArray<dom::RTCRTPContributingSourceStats>& aArr) const
 {
   // Get the expiry from now
   DOMHighResTimeStamp expiry = RtpCSRCStats::GetExpiryFromTime(GetNow());
-  for (auto info : csrc_stats_) {
+  for (auto info : mCsrcStats) {
     if (!info.second.Expired(expiry)) {
       RTCRTPContributingSourceStats stats;
       info.second.GetWebidlInstance(stats, aInboundRtpStreamId);
       aArr.AppendElement(stats, fallible);
     }
   }
 }
 
-void MediaPipeline::StateChange(TransportFlow *flow, TransportLayer::State state) {
-  TransportInfo* info = GetTransportInfo_s(flow);
+void
+MediaPipeline::StateChange(TransportFlow* aFlow, TransportLayer::State aState)
+{
+  TransportInfo* info = GetTransportInfo_s(aFlow);
   MOZ_ASSERT(info);
 
-  if (state == TransportLayer::TS_OPEN) {
+  if (aState == TransportLayer::TS_OPEN) {
     CSFLogInfo(LOGTAG, "Flow is ready");
     TransportReady_s(*info);
-  } else if (state == TransportLayer::TS_CLOSED ||
-             state == TransportLayer::TS_ERROR) {
+  } else if (aState == TransportLayer::TS_CLOSED ||
+             aState == TransportLayer::TS_ERROR) {
     TransportFailed_s(*info);
   }
 }
 
-static bool MakeRtpTypeToStringArray(const char** array) {
+static bool
+MakeRtpTypeToStringArray(const char** aArray)
+{
   static const char* RTP_str = "RTP";
   static const char* RTCP_str = "RTCP";
   static const char* MUX_str = "RTP/RTCP mux";
-  array[MediaPipeline::RTP] = RTP_str;
-  array[MediaPipeline::RTCP] = RTCP_str;
-  array[MediaPipeline::MUX] = MUX_str;
+  aArray[MediaPipeline::RTP] = RTP_str;
+  aArray[MediaPipeline::RTCP] = RTCP_str;
+  aArray[MediaPipeline::MUX] = MUX_str;
   return true;
 }
 
-static const char* ToString(MediaPipeline::RtpType type) {
-  static const char* array[(int)MediaPipeline::MAX_RTP_TYPE] = {nullptr};
+static const char*
+ToString(MediaPipeline::RtpType type)
+{
+  static const char* array[(int)MediaPipeline::MAX_RTP_TYPE] = { nullptr };
   // Dummy variable to cause init to happen only on first call
   static bool dummy = MakeRtpTypeToStringArray(array);
   (void)dummy;
   return array[type];
 }
 
-nsresult MediaPipeline::TransportReady_s(TransportInfo &info) {
-  MOZ_ASSERT(!description_.empty());
+nsresult
+MediaPipeline::TransportReady_s(TransportInfo& aInfo)
+{
+  MOZ_ASSERT(!mDescription.empty());
 
   // TODO(ekr@rtfm.com): implement some kind of notification on
   // failure. bug 852665.
-  if (info.state_ != MP_CONNECTING) {
-    CSFLogError(LOGTAG, "Transport ready for flow in wrong state:%s :%s",
-                description_.c_str(), ToString(info.type_));
+  if (aInfo.mState != StateType::MP_CONNECTING) {
+    CSFLogError(LOGTAG,
+                "Transport ready for flow in wrong state:%s :%s",
+                mDescription.c_str(),
+                ToString(aInfo.mType));
     return NS_ERROR_FAILURE;
   }
 
-  CSFLogInfo(LOGTAG, "Transport ready for pipeline %p flow %s: %s", this,
-             description_.c_str(), ToString(info.type_));
+  CSFLogInfo(LOGTAG,
+             "Transport ready for pipeline %p flow %s: %s",
+             this,
+             mDescription.c_str(),
+             ToString(aInfo.mType));
 
   // TODO(bcampen@mozilla.com): Should we disconnect from the flow on failure?
   nsresult res;
 
   // Now instantiate the SRTP objects
-  TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
-      info.transport_->GetLayer(TransportLayerDtls::ID()));
-  MOZ_ASSERT(dtls);  // DTLS is mandatory
+  TransportLayerDtls* dtls = static_cast<TransportLayerDtls*>(
+    aInfo.mTransport->GetLayer(TransportLayerDtls::ID()));
+  MOZ_ASSERT(dtls); // DTLS is mandatory
 
   uint16_t cipher_suite;
   res = dtls->GetSrtpCipher(&cipher_suite);
   if (NS_FAILED(res)) {
     CSFLogError(LOGTAG, "Failed to negotiate DTLS-SRTP. This is an error");
-    info.state_ = MP_CLOSED;
-    UpdateRtcpMuxState(info);
+    aInfo.mState = StateType::MP_CLOSED;
+    UpdateRtcpMuxState(aInfo);
     return res;
   }
 
   // SRTP Key Exporter as per RFC 5764 S 4.2
   unsigned char srtp_block[SRTP_TOTAL_KEY_LENGTH * 2];
-  res = dtls->ExportKeyingMaterial(kDTLSExporterLabel, false, "",
-                                   srtp_block, sizeof(srtp_block));
+  res = dtls->ExportKeyingMaterial(
+    kDTLSExporterLabel, false, "", srtp_block, sizeof(srtp_block));
   if (NS_FAILED(res)) {
     CSFLogError(LOGTAG, "Failed to compute DTLS-SRTP keys. This is an error");
-    info.state_ = MP_CLOSED;
-    UpdateRtcpMuxState(info);
-    MOZ_CRASH();  // TODO: Remove once we have enough field experience to
-                  // know it doesn't happen. bug 798797. Note that the
-                  // code after this never executes.
+    aInfo.mState = StateType::MP_CLOSED;
+    UpdateRtcpMuxState(aInfo);
+    MOZ_CRASH(); // TODO: Remove once we have enough field experience to
+                 // know it doesn't happen. bug 798797. Note that the
+                 // code after this never executes.
     return res;
   }
 
   // Slice and dice as per RFC 5764 S 4.2
   unsigned char client_write_key[SRTP_TOTAL_KEY_LENGTH];
   unsigned char server_write_key[SRTP_TOTAL_KEY_LENGTH];
   int offset = 0;
   memcpy(client_write_key, srtp_block + offset, SRTP_MASTER_KEY_LENGTH);
   offset += SRTP_MASTER_KEY_LENGTH;
   memcpy(server_write_key, srtp_block + offset, SRTP_MASTER_KEY_LENGTH);
   offset += SRTP_MASTER_KEY_LENGTH;
   memcpy(client_write_key + SRTP_MASTER_KEY_LENGTH,
-         srtp_block + offset, SRTP_MASTER_SALT_LENGTH);
+         srtp_block + offset,
+         SRTP_MASTER_SALT_LENGTH);
   offset += SRTP_MASTER_SALT_LENGTH;
   memcpy(server_write_key + SRTP_MASTER_KEY_LENGTH,
-         srtp_block + offset, SRTP_MASTER_SALT_LENGTH);
+         srtp_block + offset,
+         SRTP_MASTER_SALT_LENGTH);
   offset += SRTP_MASTER_SALT_LENGTH;
   MOZ_ASSERT(offset == sizeof(srtp_block));
 
-  unsigned char *write_key;
-  unsigned char *read_key;
+  unsigned char* write_key;
+  unsigned char* read_key;
 
   if (dtls->role() == TransportLayerDtls::CLIENT) {
     write_key = client_write_key;
     read_key = server_write_key;
   } else {
     write_key = server_write_key;
     read_key = client_write_key;
   }
 
-  MOZ_ASSERT(!info.send_srtp_ && !info.recv_srtp_);
-  info.send_srtp_ = SrtpFlow::Create(cipher_suite, false, write_key,
-                                     SRTP_TOTAL_KEY_LENGTH);
-  info.recv_srtp_ = SrtpFlow::Create(cipher_suite, true, read_key,
-                                     SRTP_TOTAL_KEY_LENGTH);
-  if (!info.send_srtp_ || !info.recv_srtp_) {
-    CSFLogError(LOGTAG, "Couldn't create SRTP flow for %s",
-                ToString(info.type_));
-    info.state_ = MP_CLOSED;
-    UpdateRtcpMuxState(info);
+  MOZ_ASSERT(!aInfo.mSendSrtp && !aInfo.mRecvSrtp);
+  aInfo.mSendSrtp =
+    SrtpFlow::Create(cipher_suite, false, write_key, SRTP_TOTAL_KEY_LENGTH);
+  aInfo.mRecvSrtp =
+    SrtpFlow::Create(cipher_suite, true, read_key, SRTP_TOTAL_KEY_LENGTH);
+  if (!aInfo.mSendSrtp || !aInfo.mRecvSrtp) {
+    CSFLogError(
+      LOGTAG, "Couldn't create SRTP flow for %s", ToString(aInfo.mType));
+    aInfo.mState = StateType::MP_CLOSED;
+    UpdateRtcpMuxState(aInfo);
     return NS_ERROR_FAILURE;
   }
 
-  if (direction_ == RECEIVE) {
-    CSFLogInfo(LOGTAG, "Listening for %s packets received on %p",
-               ToString(info.type_), dtls->downward());
+  if (mDirection == DirectionType::RECEIVE) {
+    CSFLogInfo(LOGTAG,
+               "Listening for %s packets received on %p",
+               ToString(aInfo.mType),
+               dtls->downward());
 
-    switch (info.type_) {
+    switch (aInfo.mType) {
       case RTP:
         dtls->downward()->SignalPacketReceived.connect(
-            this,
-            &MediaPipeline::RtpPacketReceived);
+          this, &MediaPipeline::RtpPacketReceived);
         break;
       case RTCP:
         dtls->downward()->SignalPacketReceived.connect(
-            this,
-            &MediaPipeline::RtcpPacketReceived);
+          this, &MediaPipeline::RtcpPacketReceived);
         break;
       case MUX:
         dtls->downward()->SignalPacketReceived.connect(
-            this,
-            &MediaPipeline::PacketReceived);
+          this, &MediaPipeline::PacketReceived);
         break;
       default:
         MOZ_CRASH();
     }
   }
 
-  info.state_ = MP_OPEN;
-  UpdateRtcpMuxState(info);
+  aInfo.mState = StateType::MP_OPEN;
+  UpdateRtcpMuxState(aInfo);
   return NS_OK;
 }
 
-nsresult MediaPipeline::TransportFailed_s(TransportInfo &info) {
-  ASSERT_ON_THREAD(sts_thread_);
+nsresult
+MediaPipeline::TransportFailed_s(TransportInfo& aInfo)
+{
+  ASSERT_ON_THREAD(mStsThread);
 
-  info.state_ = MP_CLOSED;
-  UpdateRtcpMuxState(info);
+  aInfo.mState = StateType::MP_CLOSED;
+  UpdateRtcpMuxState(aInfo);
 
-  CSFLogInfo(LOGTAG, "Transport closed for flow %s", ToString(info.type_));
+  CSFLogInfo(LOGTAG, "Transport closed for flow %s", ToString(aInfo.mType));
 
   NS_WARNING(
-      "MediaPipeline Transport failed. This is not properly cleaned up yet");
+    "MediaPipeline Transport failed. This is not properly cleaned up yet");
 
   // TODO(ekr@rtfm.com): SECURITY: Figure out how to clean up if the
   // connection was good and now it is bad.
   // TODO(ekr@rtfm.com): Report up so that the PC knows we
   // have experienced an error.
 
   return NS_OK;
 }
 
-void MediaPipeline::UpdateRtcpMuxState(TransportInfo &info) {
-  if (info.type_ == MUX) {
-    if (info.transport_ == rtcp_.transport_) {
-      rtcp_.state_ = info.state_;
-      if (!rtcp_.send_srtp_) {
-        rtcp_.send_srtp_ = info.send_srtp_;
-        rtcp_.recv_srtp_ = info.recv_srtp_;
+void
+MediaPipeline::UpdateRtcpMuxState(TransportInfo& aInfo)
+{
+  if (aInfo.mType == MUX) {
+    if (aInfo.mTransport == mRtcp.mTransport) {
+      mRtcp.mState = aInfo.mState;
+      if (!mRtcp.mSendSrtp) {
+        mRtcp.mSendSrtp = aInfo.mSendSrtp;
+        mRtcp.mRecvSrtp = aInfo.mRecvSrtp;
       }
     }
   }
 }
 
-nsresult MediaPipeline::SendPacket(TransportFlow *flow, const void *data,
-                                   int len) {
-  ASSERT_ON_THREAD(sts_thread_);
+nsresult
+MediaPipeline::SendPacket(const TransportFlow* aFlow, const void* aData, int aLen)
+{
+  ASSERT_ON_THREAD(mStsThread);
 
   // Note that we bypass the DTLS layer here
-  TransportLayerDtls *dtls = static_cast<TransportLayerDtls *>(
-      flow->GetLayer(TransportLayerDtls::ID()));
+  TransportLayerDtls* dtls =
+    static_cast<TransportLayerDtls*>(aFlow->GetLayer(TransportLayerDtls::ID()));
   MOZ_ASSERT(dtls);
 
-  TransportResult res = dtls->downward()->
-      SendPacket(static_cast<const unsigned char *>(data), len);
+  TransportResult res =
+    dtls->downward()->SendPacket(static_cast<const unsigned char*>(aData), aLen);
 
-  if (res != len) {
+  if (res != aLen) {
     // Ignore blocking indications
     if (res == TE_WOULDBLOCK)
       return NS_OK;
 
-    CSFLogError(LOGTAG, "Failed write on stream %s", description_.c_str());
+    CSFLogError(LOGTAG, "Failed write on stream %s", mDescription.c_str());
     return NS_BASE_STREAM_CLOSED;
   }
 
   return NS_OK;
 }
 
-void MediaPipeline::increment_rtp_packets_sent(int32_t bytes) {
-  ++rtp_packets_sent_;
-  rtp_bytes_sent_ += bytes;
+void
+MediaPipeline::IncrementRtpPacketsSent(int32_t aBytes)
+{
+  ++mRtpPacketsSent;
+  mRtpBytesSent += aBytes;
 
-  if (!(rtp_packets_sent_ % 100)) {
-    CSFLogInfo(LOGTAG, "RTP sent packet count for %s Pipeline %p Flow: %p: %u (%" PRId64 " bytes)",
-               description_.c_str(), this, static_cast<void *>(rtp_.transport_),
-               rtp_packets_sent_, rtp_bytes_sent_);
+  if (!(mRtpPacketsSent % 100)) {
+    CSFLogInfo(LOGTAG,
+               "RTP sent packet count for %s Pipeline %p Flow: %p: %u (%" PRId64
+               " bytes)",
+               mDescription.c_str(),
+               this,
+               static_cast<void*>(mRtp.mTransport),
+               mRtpPacketsSent,
+               mRtpBytesSent);
   }
 }
 
-void MediaPipeline::increment_rtcp_packets_sent() {
-  ++rtcp_packets_sent_;
-  if (!(rtcp_packets_sent_ % 100)) {
-    CSFLogInfo(LOGTAG, "RTCP sent packet count for %s Pipeline %p Flow: %p: %u",
-               description_.c_str(), this, static_cast<void *>(rtp_.transport_),
-               rtcp_packets_sent_);
+void
+MediaPipeline::IncrementRtcpPacketsSent()
+{
+  ++mRtcpPacketsSent;
+  if (!(mRtcpPacketsSent % 100)) {
+    CSFLogInfo(LOGTAG,
+               "RTCP sent packet count for %s Pipeline %p Flow: %p: %u",
+               mDescription.c_str(),
+               this,
+               static_cast<void*>(mRtp.mTransport),
+               mRtcpPacketsSent);
   }
 }
 
-void MediaPipeline::increment_rtp_packets_received(int32_t bytes) {
-  ++rtp_packets_received_;
-  rtp_bytes_received_ += bytes;
-  if (!(rtp_packets_received_ % 100)) {
-    CSFLogInfo(LOGTAG, "RTP received packet count for %s Pipeline %p Flow: %p: %u (%" PRId64 " bytes)",
-               description_.c_str(), this, static_cast<void *>(rtp_.transport_),
-               rtp_packets_received_, rtp_bytes_received_);
+void
+MediaPipeline::IncrementRtpPacketsReceived(int32_t aBytes)
+{
+  ++mRtpPacketsReceived;
+  mRtpBytesReceived += aBytes;
+  if (!(mRtpPacketsReceived % 100)) {
+    CSFLogInfo(
+      LOGTAG,
+      "RTP received packet count for %s Pipeline %p Flow: %p: %u (%" PRId64
+      " bytes)",
+      mDescription.c_str(),
+      this,
+      static_cast<void*>(mRtp.mTransport),
+      mRtpPacketsReceived,
+      mRtpBytesReceived);
   }
 }
 
-void MediaPipeline::increment_rtcp_packets_received() {
-  ++rtcp_packets_received_;
-  if (!(rtcp_packets_received_ % 100)) {
-    CSFLogInfo(LOGTAG, "RTCP received packet count for %s Pipeline %p Flow: %p: %u",
-               description_.c_str(), this, static_cast<void *>(rtp_.transport_),
-               rtcp_packets_received_);
+void
+MediaPipeline::IncrementRtcpPacketsReceived()
+{
+  ++mRtcpPacketsReceived;
+  if (!(mRtcpPacketsReceived % 100)) {
+    CSFLogInfo(LOGTAG,
+               "RTCP received packet count for %s Pipeline %p Flow: %p: %u",
+               mDescription.c_str(),
+               this,
+               static_cast<void*>(mRtp.mTransport),
+               mRtcpPacketsReceived);
   }
 }
 
-void MediaPipeline::RtpPacketReceived(TransportLayer *layer,
-                                      const unsigned char *data,
-                                      size_t len) {
-  if (direction_ == TRANSMIT) {
+void
+MediaPipeline::RtpPacketReceived(TransportLayer* aLayer,
+                                 const unsigned char* aData,
+                                 size_t aLen)
+{
+  if (mDirection == DirectionType::TRANSMIT) {
     return;
   }
 
-  if (!transport_->pipeline()) {
+  if (!mTransport->Pipeline()) {
     CSFLogError(LOGTAG, "Discarding incoming packet; transport disconnected");
     return;
   }
 
-  if (!conduit_) {
+  if (!mConduit) {
     CSFLogDebug(LOGTAG, "Discarding incoming packet; media disconnected");
     return;
   }
 
-  if (rtp_.state_ != MP_OPEN) {
+  if (mRtp.mState != StateType::MP_OPEN) {
     CSFLogError(LOGTAG, "Discarding incoming packet; pipeline not open");
     return;
   }
 
-  if (rtp_.transport_->state() != TransportLayer::TS_OPEN) {
+  if (mRtp.mTransport->state() != TransportLayer::TS_OPEN) {
     CSFLogError(LOGTAG, "Discarding incoming packet; transport not open");
     return;
   }
 
   // This should never happen.
-  MOZ_ASSERT(rtp_.recv_srtp_);
+  MOZ_ASSERT(mRtp.mRecvSrtp);
 
-  if (!len) {
+  if (!aLen) {
     return;
   }
 
   // Filter out everything but RTP/RTCP
-  if (data[0] < 128 || data[0] > 191) {
+  if (aData[0] < 128 || aData[0] > 191) {
     return;
   }
 
   webrtc::RTPHeader header;
-  if (!rtp_parser_->Parse(data, len, &header)) {
+  if (!mRtpParser->Parse(aData, aLen, &header)) {
     return;
   }
 
-  if (filter_ && !filter_->Filter(header)) {
+  if (mFilter && !mFilter->Filter(header)) {
     return;
   }
 
   // Make sure to only get the time once, and only if we need it by
   // using getTimestamp() for access
   DOMHighResTimeStamp now = 0.0;
   bool hasTime = false;
 
   // Remove expired RtpCSRCStats
-  if (!csrc_stats_.empty()) {
+  if (!mCsrcStats.empty()) {
     if (!hasTime) {
       now = GetNow();
       hasTime = true;
     }
     auto expiry = RtpCSRCStats::GetExpiryFromTime(now);
-    for (auto p = csrc_stats_.begin(); p != csrc_stats_.end();) {
+    for (auto p = mCsrcStats.begin(); p != mCsrcStats.end();) {
       if (p->second.Expired(expiry)) {
-        p = csrc_stats_.erase(p);
+        p = mCsrcStats.erase(p);
         continue;
       }
       p++;
     }
   }
 
   // Add new RtpCSRCStats
   if (header.numCSRCs) {
     for (auto i = 0; i < header.numCSRCs; i++) {
       if (!hasTime) {
         now = GetNow();
         hasTime = true;
       }
-      auto csrcInfo = csrc_stats_.find(header.arrOfCSRCs[i]);
-      if (csrcInfo == csrc_stats_.end()) {
-        csrc_stats_.insert(std::make_pair(header.arrOfCSRCs[i],
-            RtpCSRCStats(header.arrOfCSRCs[i],now)));
+      auto csrcInfo = mCsrcStats.find(header.arrOfCSRCs[i]);
+      if (csrcInfo == mCsrcStats.end()) {
+        mCsrcStats.insert(std::make_pair(
+          header.arrOfCSRCs[i], RtpCSRCStats(header.arrOfCSRCs[i], now)));
       } else {
         csrcInfo->second.SetTimestamp(now);
       }
     }
   }
 
-  packet_dumper_->Dump(
-      level_, dom::mozPacketDumpType::Srtp, false, data, len);
+  mPacketDumper->Dump(mLevel, dom::mozPacketDumpType::Srtp, false, aData, aLen);
 
   // Make a copy rather than cast away constness
-  auto inner_data = MakeUnique<unsigned char[]>(len);
-  memcpy(inner_data.get(), data, len);
-  int out_len = 0;
-  nsresult res = rtp_.recv_srtp_->UnprotectRtp(inner_data.get(),
-                                               len, len, &out_len);
+  auto innerData = MakeUnique<unsigned char[]>(aLen);
+  memcpy(innerData.get(), aData, aLen);
+  int outLen = 0;
+  nsresult res =
+    mRtp.mRecvSrtp->UnprotectRtp(innerData.get(), aLen, aLen, &outLen);
   if (!NS_SUCCEEDED(res)) {
     char tmp[16];
 
-    SprintfLiteral(tmp, "%.2x %.2x %.2x %.2x",
-                   inner_data[0],
-                   inner_data[1],
-                   inner_data[2],
-                   inner_data[3]);
+    SprintfLiteral(tmp,
+                   "%.2x %.2x %.2x %.2x",
+                   innerData[0],
+                   innerData[1],
+                   innerData[2],
+                   innerData[3]);
 
-    CSFLogError(LOGTAG, "Error unprotecting RTP in %s len= %zu [%s]",
-                description_.c_str(), len, tmp);
+    CSFLogError(LOGTAG,
+                "Error unprotecting RTP in %s len= %zu [%s]",
+                mDescription.c_str(),
+                aLen,
+                tmp);
     return;
   }
-  CSFLogDebug(LOGTAG, "%s received RTP packet.", description_.c_str());
-  increment_rtp_packets_received(out_len);
+  CSFLogDebug(LOGTAG, "%s received RTP packet.", mDescription.c_str());
+  IncrementRtpPacketsReceived(outLen);
   OnRtpPacketReceived();
 
-  RtpLogger::LogPacket(inner_data.get(), out_len, true, true, header.headerLength,
-                       description_);
+  RtpLogger::LogPacket(
+    innerData.get(), outLen, true, true, header.headerLength, mDescription);
 
-  packet_dumper_->Dump(
-      level_, dom::mozPacketDumpType::Rtp, false, inner_data.get(), out_len);
+  mPacketDumper->Dump(
+    mLevel, dom::mozPacketDumpType::Rtp, false, innerData.get(), outLen);
 
-  (void)conduit_->ReceivedRTPPacket(inner_data.get(), out_len, header.ssrc);  // Ignore error codes
+  (void)mConduit->ReceivedRTPPacket(
+    innerData.get(), outLen, header.ssrc); // Ignore error codes
 }
 
-void MediaPipeline::RtcpPacketReceived(TransportLayer *layer,
-                                       const unsigned char *data,
-                                       size_t len) {
-  if (!transport_->pipeline()) {
+void
+MediaPipeline::RtcpPacketReceived(TransportLayer* aLayer,
+                                  const unsigned char* aData,
+                                  size_t aLen)
+{
+  if (!mTransport->Pipeline()) {
     CSFLogDebug(LOGTAG, "Discarding incoming packet; transport disconnected");
     return;
   }
 
-  if (!conduit_) {
+  if (!mConduit) {
     CSFLogDebug(LOGTAG, "Discarding incoming packet; media disconnected");
     return;
   }
 
-  if (rtcp_.state_ != MP_OPEN) {
+  if (mRtcp.mState != StateType::MP_OPEN) {
     CSFLogDebug(LOGTAG, "Discarding incoming packet; pipeline not open");
     return;
   }
 
-  if (rtcp_.transport_->state() != TransportLayer::TS_OPEN) {
+  if (mRtcp.mTransport->state() != TransportLayer::TS_OPEN) {
     CSFLogError(LOGTAG, "Discarding incoming packet; transport not open");
     return;
   }
 
-  if (!len) {
+  if (!aLen) {
     return;
   }
 
   // Filter out everything but RTP/RTCP
-  if (data[0] < 128 || data[0] > 191) {
+  if (aData[0] < 128 || aData[0] > 191) {
     return;
   }
 
   // We do not filter receiver reports, since the webrtc.org code for
   // senders already has logic to ignore RRs that do not apply.
   // TODO bug 1279153: remove SR check for reduced size RTCP
-  if (filter_ && !filter_->FilterSenderReport(data, len)) {
+  if (mFilter && !mFilter->FilterSenderReport(aData, aLen)) {
     CSFLogWarn(LOGTAG, "Dropping incoming RTCP packet; filtered out");
     return;
   }
 
-  packet_dumper_->Dump(
-      level_, dom::mozPacketDumpType::Srtcp, false, data, len);
+  mPacketDumper->Dump(mLevel, dom::mozPacketDumpType::Srtcp, false, aData, aLen);
 
   // Make a copy rather than cast away constness
-  auto inner_data = MakeUnique<unsigned char[]>(len);
-  memcpy(inner_data.get(), data, len);
-  int out_len;
+  auto innerData = MakeUnique<unsigned char[]>(aLen);
+  memcpy(innerData.get(), aData, aLen);
+  int outLen;
 
-  nsresult res = rtcp_.recv_srtp_->UnprotectRtcp(inner_data.get(),
-                                                 len,
-                                                 len,
-                                                 &out_len);
+  nsresult res =
+    mRtcp.mRecvSrtp->UnprotectRtcp(innerData.get(), aLen, aLen, &outLen);
 
   if (!NS_SUCCEEDED(res))
     return;
 
-  CSFLogDebug(LOGTAG, "%s received RTCP packet.", description_.c_str());
-  increment_rtcp_packets_received();
+  CSFLogDebug(LOGTAG, "%s received RTCP packet.", mDescription.c_str());
+  IncrementRtcpPacketsReceived();
 
-  RtpLogger::LogPacket(inner_data.get(), out_len, true, false, 0, description_);
+  RtpLogger::LogPacket(innerData.get(), outLen, true, false, 0, mDescription);
 
-  packet_dumper_->Dump(
-      level_, dom::mozPacketDumpType::Rtcp, false, data, len);
+  mPacketDumper->Dump(mLevel, dom::mozPacketDumpType::Rtcp, false, aData, aLen);
 
-  MOZ_ASSERT(rtcp_.recv_srtp_);  // This should never happen
+  MOZ_ASSERT(mRtcp.mRecvSrtp); // This should never happen
 
-  (void)conduit_->ReceivedRTCPPacket(inner_data.get(), out_len);  // Ignore error codes
+  (void)mConduit->ReceivedRTCPPacket(innerData.get(),
+                                     outLen); // Ignore error codes
 }
 
-bool MediaPipeline::IsRtp(const unsigned char *data, size_t len) {
-  if (len < 2)
+bool
+MediaPipeline::IsRtp(const unsigned char* aData, size_t aLen) const
+{
+  if (aLen < 2)
     return false;
 
   // Check if this is a RTCP packet. Logic based on the types listed in
   // media/webrtc/trunk/src/modules/rtp_rtcp/source/rtp_utility.cc
 
   // Anything outside this range is RTP.
-  if ((data[1] < 192) || (data[1] > 207))
+  if ((aData[1] < 192) || (aData[1] > 207))
     return true;
 
-  if (data[1] == 192)  // FIR
+  if (aData[1] == 192) // FIR
     return false;
 
-  if (data[1] == 193)  // NACK, but could also be RTP. This makes us sad
-    return true;       // but it's how webrtc.org behaves.
+  if (aData[1] == 193) // NACK, but could also be RTP. This makes us sad
+    return true;      // but it's how webrtc.org behaves.
 
-  if (data[1] == 194)
+  if (aData[1] == 194)
     return true;
 
-  if (data[1] == 195)  // IJ.
+  if (aData[1] == 195) // IJ.
     return false;
 
-  if ((data[1] > 195) && (data[1] < 200))  // the > 195 is redundant
+  if ((aData[1] > 195) && (aData[1] < 200)) // the > 195 is redundant
     return true;
 
-  if ((data[1] >= 200) && (data[1] <= 207))  // SR, RR, SDES, BYE,
-    return false;                            // APP, RTPFB, PSFB, XR
+  if ((aData[1] >= 200) && (aData[1] <= 207)) // SR, RR, SDES, BYE,
+    return false;                           // APP, RTPFB, PSFB, XR
 
-  MOZ_ASSERT(false);  // Not reached, belt and suspenders.
+  MOZ_ASSERT(false); // Not reached, belt and suspenders.
   return true;
 }
 
-void MediaPipeline::PacketReceived(TransportLayer *layer,
-                                   const unsigned char *data,
-                                   size_t len) {
-  if (!transport_->pipeline()) {
+void
+MediaPipeline::PacketReceived(TransportLayer* aLayer,
+                              const unsigned char* aData,
+                              size_t aLen)
+{
+  if (!mTransport->Pipeline()) {
     CSFLogDebug(LOGTAG, "Discarding incoming packet; transport disconnected");
     return;
   }
 
-  if (IsRtp(data, len)) {
-    RtpPacketReceived(layer, data, len);
+  if (IsRtp(aData, aLen)) {
+    RtpPacketReceived(aLayer, aData, aLen);
   } else {
-    RtcpPacketReceived(layer, data, len);
+    RtcpPacketReceived(aLayer, aData, aLen);
   }
 }
 
-class MediaPipelineTransmit::PipelineListener
-  : public MediaStreamVideoSink
+class MediaPipelineTransmit::PipelineListener : public MediaStreamVideoSink
 {
-friend class MediaPipelineTransmit;
+  friend class MediaPipelineTransmit;
+
 public:
-  explicit PipelineListener(const RefPtr<MediaSessionConduit>& conduit)
-    : conduit_(conduit),
-      track_id_(TRACK_INVALID),
-      mMutex("MediaPipelineTransmit::PipelineListener"),
-      track_id_external_(TRACK_INVALID),
-      active_(false),
-      enabled_(false),
-      direct_connect_(false)
+  explicit PipelineListener(const RefPtr<MediaSessionConduit>& aConduit)
+    : mConduit(aConduit)
+    , mTrackId(TRACK_INVALID)
+    , mMutex("MediaPipelineTransmit::PipelineListener")
+    , mTrackIdexternal(TRACK_INVALID)
+    , mActive(false)
+    , mEnabled(false)
+    , mDirectConnect(false)
   {
   }
 
   ~PipelineListener()
   {
-    if (!NS_IsMainThread()) {
-      // release conduit on mainthread.  Must use forget()!
-      nsresult rv = NS_DispatchToMainThread(new
-                                            ConduitDeleteEvent(conduit_.forget()));
-      MOZ_ASSERT(!NS_FAILED(rv),"Could not dispatch conduit shutdown to main");
-      if (NS_FAILED(rv)) {
-        MOZ_CRASH();
-      }
-    } else {
-      conduit_ = nullptr;
-    }
-    if (converter_) {
-      converter_->Shutdown();
+    NS_ReleaseOnMainThreadSystemGroup("MediaPipeline::mConduit",
+                                      mConduit.forget());
+    if (mConverter) {
+      mConverter->Shutdown();
     }
   }
 
   // Dispatches setting the internal TrackID to TRACK_INVALID to the media
   // graph thread to keep it in sync with other MediaStreamGraph operations
   // like RemoveListener() and AddListener(). The TrackID will be updated on
   // the next NewData() callback.
-  void UnsetTrackId(MediaStreamGraphImpl* graph);
+  void UnsetTrackId(MediaStreamGraphImpl* aGraph);
 
-  void SetActive(bool active) { active_ = active; }
-  void SetEnabled(bool enabled) { enabled_ = enabled; }
+  void SetActive(bool aActive) { mActive = aActive; }
+  void SetEnabled(bool aEnabled) { mEnabled = aEnabled; }
 
   // These are needed since nested classes don't have access to any particular
   // instance of the parent
-  void SetAudioProxy(const RefPtr<AudioProxyThread>& proxy)
+  void SetAudioProxy(const RefPtr<AudioProxyThread>& aProxy)
   {
-    audio_processing_ = proxy;
+    mAudioProcessing = aProxy;
   }
 
-  void SetVideoFrameConverter(const RefPtr<VideoFrameConverter>& converter)
+  void SetVideoFrameConverter(const RefPtr<VideoFrameConverter>& aConverter)
   {
-    converter_ = converter;
+    mConverter = aConverter;
   }
 
-  void OnVideoFrameConverted(unsigned char* aVideoFrame,
+  void OnVideoFrameConverted(const unsigned char* aVideoFrame,
                              unsigned int aVideoFrameLength,
                              unsigned short aWidth,
                              unsigned short aHeight,
                              VideoType aVideoType,
                              uint64_t aCaptureTime)
   {
-    MOZ_RELEASE_ASSERT(conduit_->type() == MediaSessionConduit::VIDEO);
-    static_cast<VideoSessionConduit*>(conduit_.get())->SendVideoFrame(
-      aVideoFrame, aVideoFrameLength, aWidth, aHeight, aVideoType, aCaptureTime);
+    MOZ_RELEASE_ASSERT(mConduit->type() == MediaSessionConduit::VIDEO);
+    static_cast<VideoSessionConduit*>(mConduit.get())
+      ->SendVideoFrame(aVideoFrame,
+                       aVideoFrameLength,
+                       aWidth,
+                       aHeight,
+                       aVideoType,
+                       aCaptureTime);
   }
 
-  void OnVideoFrameConverted(webrtc::VideoFrame& aVideoFrame)
+  void OnVideoFrameConverted(const webrtc::VideoFrame& aVideoFrame)
   {
-    MOZ_RELEASE_ASSERT(conduit_->type() == MediaSessionConduit::VIDEO);
-    static_cast<VideoSessionConduit*>(conduit_.get())->SendVideoFrame(aVideoFrame);
+    MOZ_RELEASE_ASSERT(mConduit->type() == MediaSessionConduit::VIDEO);
+    static_cast<VideoSessionConduit*>(mConduit.get())
+      ->SendVideoFrame(aVideoFrame);
   }
 
   // Implement MediaStreamTrackListener
   void NotifyQueuedChanges(MediaStreamGraph* aGraph,
                            StreamTime aTrackOffset,
                            const MediaSegment& aQueuedMedia) override;
 
   // Implement DirectMediaStreamTrackListener
@@ -1314,1113 +1400,1212 @@ public:
   void NotifyDirectListenerInstalled(InstallationResult aResult) override;
   void NotifyDirectListenerUninstalled() override;
 
   // Implement MediaStreamVideoSink
   void SetCurrentFrames(const VideoSegment& aSegment) override;
   void ClearFrames() override {}
 
 private:
-  void UnsetTrackIdImpl() {
+  void UnsetTrackIdImpl()
+  {
     MutexAutoLock lock(mMutex);
-    track_id_ = track_id_external_ = TRACK_INVALID;
+    mTrackId = mTrackIdexternal = TRACK_INVALID;
   }
 
-  void NewData(const MediaSegment& media, TrackRate aRate = 0);
+  void NewData(const MediaSegment& aMedia, TrackRate aRate = 0);
 
-  RefPtr<MediaSessionConduit> conduit_;
-  RefPtr<AudioProxyThread> audio_processing_;
-  RefPtr<VideoFrameConverter> converter_;
+  RefPtr<MediaSessionConduit> mConduit;
+  RefPtr<AudioProxyThread> mAudioProcessing;
+  RefPtr<VideoFrameConverter> mConverter;
 
   // May be TRACK_INVALID until we see data from the track
-  TrackID track_id_; // this is the current TrackID this listener is attached to
+  TrackID mTrackId; // this is the current TrackID this listener is attached to
   Mutex mMutex;
   // protected by mMutex
   // May be TRACK_INVALID until we see data from the track
-  TrackID track_id_external_; // this is queried from other threads
+  TrackID mTrackIdexternal; // this is queried from other threads
 
   // active is true if there is a transport to send on
-  mozilla::Atomic<bool> active_;
+  mozilla::Atomic<bool> mActive;
   // enabled is true if the media access control permits sending
   // actual content; when false you get black/silence
-  mozilla::Atomic<bool> enabled_;
+  mozilla::Atomic<bool> mEnabled;
 
   // Written and read on the MediaStreamGraph thread
-  bool direct_connect_;
+  bool mDirectConnect;
 };
 
 // Implements VideoConverterListener for MediaPipeline.
 //
 // We pass converted frames on to MediaPipelineTransmit::PipelineListener
 // where they are further forwarded to VideoConduit.
 // MediaPipelineTransmit calls Detach() during shutdown to ensure there is
 // no cyclic dependencies between us and PipelineListener.
-class MediaPipelineTransmit::VideoFrameFeeder
-  : public VideoConverterListener
+class MediaPipelineTransmit::VideoFrameFeeder : public VideoConverterListener
 {
 public:
-  explicit VideoFrameFeeder(const RefPtr<PipelineListener>& listener)
-    : listener_(listener),
-      mutex_("VideoFrameFeeder")
+  explicit VideoFrameFeeder(const RefPtr<PipelineListener>& aListener)
+    : mListener(aListener)
+    , mMutex("VideoFrameFeeder")
   {
     MOZ_COUNT_CTOR(VideoFrameFeeder);
   }
 
   void Detach()
   {
-    MutexAutoLock lock(mutex_);
+    MutexAutoLock lock(mMutex);
 
-    listener_ = nullptr;
+    mListener = nullptr;
   }
 
-  void OnVideoFrameConverted(unsigned char* aVideoFrame,
+  void OnVideoFrameConverted(const unsigned char* aVideoFrame,
                              unsigned int aVideoFrameLength,
                              unsigned short aWidth,
                              unsigned short aHeight,
                              VideoType aVideoType,
                              uint64_t aCaptureTime) override
   {
-    MutexAutoLock lock(mutex_);
+    MutexAutoLock lock(mMutex);
 
-    if (!listener_) {
+    if (!mListener) {
       return;
     }
 
-    listener_->OnVideoFrameConverted(aVideoFrame, aVideoFrameLength,
-                                     aWidth, aHeight, aVideoType, aCaptureTime);
+    mListener->OnVideoFrameConverted(aVideoFrame,
+                                     aVideoFrameLength,
+                                     aWidth,
+                                     aHeight,
+                                     aVideoType,
+                                     aCaptureTime);
   }
 
-  void OnVideoFrameConverted(webrtc::VideoFrame& aVideoFrame) override
+  void OnVideoFrameConverted(const webrtc::VideoFrame& aVideoFrame) override
   {
-    MutexAutoLock lock(mutex_);
+    MutexAutoLock lock(mMutex);
 
-    if (!listener_) {
+    if (!mListener) {
       return;
     }
 
-    listener_->OnVideoFrameConverted(aVideoFrame);
+    mListener->OnVideoFrameConverted(aVideoFrame);
   }
 
 protected:
-  virtual ~VideoFrameFeeder()
-  {
-    MOZ_COUNT_DTOR(VideoFrameFeeder);
-  }
+  virtual ~VideoFrameFeeder() { MOZ_COUNT_DTOR(VideoFrameFeeder); }
 
-  RefPtr<PipelineListener> listener_;
-  Mutex mutex_;
+  RefPtr<PipelineListener> mListener;
+  Mutex mMutex;
 };
 
 MediaPipelineTransmit::MediaPipelineTransmit(
-    const std::string& pc,
-    nsCOMPtr<nsIEventTarget> main_thread,
-    nsCOMPtr<nsIEventTarget> sts_thread,
-    bool is_video,
-    dom::MediaStreamTrack* domtrack,
-    RefPtr<MediaSessionConduit> conduit) :
-  MediaPipeline(pc, TRANSMIT, main_thread, sts_thread, conduit),
-  listener_(new PipelineListener(conduit)),
-  is_video_(is_video),
-  domtrack_(domtrack),
-  transmitting_(false)
+  const std::string& aPc,
+  nsCOMPtr<nsIEventTarget> aMainThread,
+  nsCOMPtr<nsIEventTarget> aStsThread,
+  bool aIsVideo,
+  dom::MediaStreamTrack* aDomTrack,
+  RefPtr<MediaSessionConduit> aConduit)
+  : MediaPipeline(aPc,
+                  DirectionType::TRANSMIT,
+                  aMainThread,
+                  aStsThread,
+                  aConduit)
+  , mIsVideo(aIsVideo)
+  , mListener(new PipelineListener(aConduit))
+  , mFeeder(aIsVideo ? MakeAndAddRef<VideoFrameFeeder>(mListener)
+                     : nullptr) // For video we send frames to an
+                                // async VideoFrameConverter that
+                                // calls back to a VideoFrameFeeder
+                                // that feeds I420 frames to
+                                // VideoConduit.
+  , mDomTrack(aDomTrack)
+  , mTransmitting(false)
 {
   SetDescription();
   if (!IsVideo()) {
-    audio_processing_ = MakeAndAddRef<AudioProxyThread>(static_cast<AudioSessionConduit*>(conduit.get()));
-    listener_->SetAudioProxy(audio_processing_);
-  }
-  else { // Video
-    // For video we send frames to an async VideoFrameConverter that calls
-    // back to a VideoFrameFeeder that feeds I420 frames to VideoConduit.
-
-    feeder_ = MakeAndAddRef<VideoFrameFeeder>(listener_);
-
-    converter_ = MakeAndAddRef<VideoFrameConverter>();
-    converter_->AddListener(feeder_);
-
-    listener_->SetVideoFrameConverter(converter_);
+    mAudioProcessing = MakeAndAddRef<AudioProxyThread>(
+      static_cast<AudioSessionConduit*>(aConduit.get()));
+    mListener->SetAudioProxy(mAudioProcessing);
+  } else { // Video
+    mConverter = MakeAndAddRef<VideoFrameConverter>();
+    mConverter->AddListener(mFeeder);
+    mListener->SetVideoFrameConverter(mConverter);
   }
 }
 
 MediaPipelineTransmit::~MediaPipelineTransmit()
 {
-  if (feeder_) {
-    feeder_->Detach();
+  if (mFeeder) {
+    mFeeder->Detach();
   }
 
-  MOZ_ASSERT(!domtrack_);
+  MOZ_ASSERT(!mDomTrack);
 }
 
-void MediaPipelineTransmit::SetDescription() {
-  description_ = pc_ + "| ";
-  description_ += conduit_->type() == MediaSessionConduit::AUDIO ?
-      "Transmit audio[" : "Transmit video[";
+void
+MediaPipelineTransmit::SetDescription()
+{
+  mDescription = mPc + "| ";
+  mDescription += mConduit->type() == MediaSessionConduit::AUDIO
+                    ? "Transmit audio["
+                    : "Transmit video[";
 
-  if (!domtrack_) {
-    description_ += "no track]";
+  if (!mDomTrack) {
+    mDescription += "no track]";
     return;
   }
 
   nsString nsTrackId;
-  domtrack_->GetId(nsTrackId);
-  std::string track_id(NS_ConvertUTF16toUTF8(nsTrackId).get());
-  description_ += track_id;
-  description_ += "]";
+  mDomTrack->GetId(nsTrackId);
+  std::string trackId(NS_ConvertUTF16toUTF8(nsTrackId).get());
+  mDescription += trackId;
+  mDescription += "]";
 }
 
-void MediaPipelineTransmit::Stop() {
-  ASSERT_ON_THREAD(main_thread_);
+void
+MediaPipelineTransmit::Stop()
+{
+  ASSERT_ON_THREAD(mMainThread);
 
-  if (!domtrack_ || !transmitting_) {
+  if (!mDomTrack || !mTransmitting) {
     return;
   }
 
-  transmitting_ = false;
+  mTransmitting = false;
 
-  if (domtrack_->AsAudioStreamTrack()) {
-    domtrack_->RemoveDirectListener(listener_);
-    domtrack_->RemoveListener(listener_);
-  } else if (VideoStreamTrack* video = domtrack_->AsVideoStreamTrack()) {
-    video->RemoveVideoOutput(listener_);
+  if (mDomTrack->AsAudioStreamTrack()) {
+    mDomTrack->RemoveDirectListener(mListener);
+    mDomTrack->RemoveListener(mListener);
+  } else if (VideoStreamTrack* video = mDomTrack->AsVideoStreamTrack()) {
+    video->RemoveVideoOutput(mListener);
   } else {
     MOZ_ASSERT(false, "Unknown track type");
   }
 
-  conduit_->StopTransmitting();
+  mConduit->StopTransmitting();
 }
 
-void MediaPipelineTransmit::Start() {
-  ASSERT_ON_THREAD(main_thread_);
+void
+MediaPipelineTransmit::Start()
+{
+  ASSERT_ON_THREAD(mMainThread);
 
-  if (!domtrack_ || transmitting_) {
+  if (!mDomTrack || mTransmitting) {
     return;
   }
 
-  transmitting_ = true;
+  mTransmitting = true;
 
-  conduit_->StartTransmitting();
+  mConduit->StartTransmitting();
 
   // TODO(ekr@rtfm.com): Check for errors
-  CSFLogDebug(LOGTAG, "Attaching pipeline to track %p conduit type=%s", this,
-              (conduit_->type() == MediaSessionConduit::AUDIO ?"audio":"video"));
+  CSFLogDebug(
+    LOGTAG,
+    "Attaching pipeline to track %p conduit type=%s",
+    this,
+    (mConduit->type() == MediaSessionConduit::AUDIO ? "audio" : "video"));
 
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
   // With full duplex we don't risk having audio come in late to the MSG
   // so we won't need a direct listener.
   const bool enableDirectListener =
     !Preferences::GetBool("media.navigator.audio.full_duplex", false);
 #else
   const bool enableDirectListener = true;
 #endif
 
-  if (domtrack_->AsAudioStreamTrack()) {
+  if (mDomTrack->AsAudioStreamTrack()) {
     if (enableDirectListener) {
       // Register the Listener directly with the source if we can.
       // We also register it as a non-direct listener so we fall back to that
-      // if installing the direct listener fails. As a direct listener we get access
-      // to direct unqueued (and not resampled) data.
-      domtrack_->AddDirectListener(listener_);
+      // if installing the direct listener fails. As a direct listener we get
+      // access to direct unqueued (and not resampled) data.
+      mDomTrack->AddDirectListener(mListener);
     }
-    domtrack_->AddListener(listener_);
-  } else if (VideoStreamTrack* video = domtrack_->AsVideoStreamTrack()) {
-    video->AddVideoOutput(listener_);
+    mDomTrack->AddListener(mListener);
+  } else if (VideoStreamTrack* video = mDomTrack->AsVideoStreamTrack()) {
+    video->AddVideoOutput(mListener);
   } else {
     MOZ_ASSERT(false, "Unknown track type");
   }
 }
 
 bool
 MediaPipelineTransmit::IsVideo() const
 {
-  return is_video_;
+  return mIsVideo;
 }
 
-void MediaPipelineTransmit::UpdateSinkIdentity_m(MediaStreamTrack* track,
-                                                 nsIPrincipal* principal,
-                                                 const PeerIdentity* sinkIdentity) {
-  ASSERT_ON_THREAD(main_thread_);
+void
+MediaPipelineTransmit::UpdateSinkIdentity_m(const MediaStreamTrack* aTrack,
+                                            nsIPrincipal* aPrincipal,
+                                            const PeerIdentity* aSinkIdentity)
+{
+  ASSERT_ON_THREAD(mMainThread);
 
-  if (track != nullptr && track != domtrack_) {
+  if (aTrack != nullptr && aTrack != mDomTrack) {
     // If a track is specified, then it might not be for this pipeline,
     // since we receive notifications for all tracks on the PC.
     // nullptr means that the PeerIdentity has changed and shall be applied
     // to all tracks of the PC.
     return;
   }
 
-  bool enableTrack = principal->Subsumes(domtrack_->GetPrincipal());
+  bool enableTrack = aPrincipal->Subsumes(mDomTrack->GetPrincipal());
   if (!enableTrack) {
     // first try didn't work, but there's a chance that this is still available
     // if our track is bound to a peerIdentity, and the peer connection (our
     // sink) is bound to the same identity, then we can enable the track.
-    const PeerIdentity* trackIdentity = domtrack_->GetPeerIdentity();
-    if (sinkIdentity && trackIdentity) {
-      enableTrack = (*sinkIdentity == *trackIdentity);
+    const PeerIdentity* trackIdentity = mDomTrack->GetPeerIdentity();
+    if (aSinkIdentity && trackIdentity) {
+      enableTrack = (*aSinkIdentity == *trackIdentity);
     }
   }
 
-  listener_->SetEnabled(enableTrack);
+  mListener->SetEnabled(enableTrack);
 }
 
 void
 MediaPipelineTransmit::DetachMedia()
 {
-  ASSERT_ON_THREAD(main_thread_);
-  domtrack_ = nullptr;
+  ASSERT_ON_THREAD(mMainThread);
+  mDomTrack = nullptr;
   // Let the listener be destroyed with the pipeline (or later).
 }
 
-nsresult MediaPipelineTransmit::TransportReady_s(TransportInfo &info) {
-  ASSERT_ON_THREAD(sts_thread_);
+nsresult
+MediaPipelineTransmit::TransportReady_s(TransportInfo& aInfo)
+{
+  ASSERT_ON_THREAD(mStsThread);
   // Call base ready function.
-  MediaPipeline::TransportReady_s(info);
+  MediaPipeline::TransportReady_s(aInfo);
 
   // Should not be set for a transmitter
-  if (&info == &rtp_) {
-    listener_->SetActive(true);
+  if (&aInfo == &mRtp) {
+    mListener->SetActive(true);
   }
 
   return NS_OK;
 }
 
-nsresult MediaPipelineTransmit::ReplaceTrack(RefPtr<MediaStreamTrack>& domtrack) {
+nsresult
+MediaPipelineTransmit::ReplaceTrack(RefPtr<MediaStreamTrack>& aDomTrack)
+{
   // MainThread, checked in calls we make
-  if (domtrack) {
+  if (aDomTrack) {
     nsString nsTrackId;
-    domtrack->GetId(nsTrackId);
+    aDomTrack->GetId(nsTrackId);
     std::string track_id(NS_ConvertUTF16toUTF8(nsTrackId).get());
-    CSFLogDebug(LOGTAG, "Reattaching pipeline %s to track %p track %s conduit type: %s",
-                description_.c_str(), &domtrack, track_id.c_str(),
-                (conduit_->type() == MediaSessionConduit::AUDIO ?"audio":"video"));
+    CSFLogDebug(
+      LOGTAG,
+      "Reattaching pipeline %s to track %p track %s conduit type: %s",
+      mDescription.c_str(),
+      &aDomTrack,
+      track_id.c_str(),
+      (mConduit->type() == MediaSessionConduit::AUDIO ? "audio" : "video"));
   }
 
-  RefPtr<dom::MediaStreamTrack> oldTrack = domtrack_;
-  bool wasTransmitting = oldTrack && transmitting_;
+  RefPtr<dom::MediaStreamTrack> oldTrack = mDomTrack;
+  bool wasTransmitting = oldTrack && mTransmitting;
   Stop();
-  domtrack_ = domtrack;
+  mDomTrack = aDomTrack;
   SetDescription();
 
   if (oldTrack) {
     // Unsets the track id after RemoveListener() takes effect.
-    listener_->UnsetTrackId(oldTrack->GraphImpl());
+    mListener->UnsetTrackId(oldTrack->GraphImpl());
   }
 
   if (wasTransmitting) {
     Start();
   }
   return NS_OK;
 }
 
-nsresult MediaPipeline::ConnectTransport_s(TransportInfo &info) {
-  MOZ_ASSERT(info.transport_);
-  ASSERT_ON_THREAD(sts_thread_);
+nsresult
+MediaPipeline::ConnectTransport_s(TransportInfo& aInfo)
+{
+  MOZ_ASSERT(aInfo.mTransport);
+  ASSERT_ON_THREAD(mStsThread);
 
   // Look to see if the transport is ready
-  if (info.transport_->state() == TransportLayer::TS_OPEN) {
-    nsresult res = TransportReady_s(info);
+  if (aInfo.mTransport->state() == TransportLayer::TS_OPEN) {
+    nsresult res = TransportReady_s(aInfo);
     if (NS_FAILED(res)) {
-      CSFLogError(LOGTAG, "Error calling TransportReady(); res=%u in %s",
-                  static_cast<uint32_t>(res), __FUNCTION__);
+      CSFLogError(LOGTAG,
+                  "Error calling TransportReady(); res=%u in %s",
+                  static_cast<uint32_t>(res),
+                  __FUNCTION__);
       return res;
     }
-  } else if (info.transport_->state() == TransportLayer::TS_ERROR) {
-    CSFLogError(LOGTAG, "%s transport is already in error state",
-                ToString(info.type_));
-    TransportFailed_s(info);
+  } else if (aInfo.mTransport->state() == TransportLayer::TS_ERROR) {
+    CSFLogError(
+      LOGTAG, "%s transport is already in error state", ToString(aInfo.mType));
+    TransportFailed_s(aInfo);
     return NS_ERROR_FAILURE;
   }
 
-  info.transport_->SignalStateChange.connect(this,
-                                             &MediaPipeline::StateChange);
+  aInfo.mTransport->SignalStateChange.connect(this, &MediaPipeline::StateChange);
 
   return NS_OK;
 }
 
-MediaPipeline::TransportInfo* MediaPipeline::GetTransportInfo_s(
-    TransportFlow *flow) {
-  ASSERT_ON_THREAD(sts_thread_);
-  if (flow == rtp_.transport_) {
-    return &rtp_;
+MediaPipeline::TransportInfo*
+MediaPipeline::GetTransportInfo_s(TransportFlow* aFlow)
+{
+  ASSERT_ON_THREAD(mStsThread);
+  if (aFlow == mRtp.mTransport) {
+    return &mRtp;
   }
 
-  if (flow == rtcp_.transport_) {
-    return &rtcp_;
+  if (aFlow == mRtcp.mTransport) {
+    return &mRtcp;
   }
 
   return nullptr;
 }
 
-nsresult MediaPipeline::PipelineTransport::SendRtpPacket(
-    const uint8_t* data, size_t len) {
+nsresult
+MediaPipeline::PipelineTransport::SendRtpPacket(const uint8_t* aData, size_t aLen)
+{
 
-  nsAutoPtr<DataBuffer> buf(new DataBuffer(data, len, len + SRTP_MAX_EXPANSION));
+  nsAutoPtr<DataBuffer> buf(
+    new DataBuffer(aData, aLen, aLen + SRTP_MAX_EXPANSION));
 
-  RUN_ON_THREAD(sts_thread_,
-                WrapRunnable(
-                             RefPtr<MediaPipeline::PipelineTransport>(this),
-                             &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
-                             buf, true),
-                NS_DISPATCH_NORMAL);
+  RUN_ON_THREAD(
+    mStsThread,
+    WrapRunnable(RefPtr<MediaPipeline::PipelineTransport>(this),
+                 &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
+                 buf,
+                 true),
+    NS_DISPATCH_NORMAL);
 
   return NS_OK;
 }
 
-nsresult MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s(
-    nsAutoPtr<DataBuffer> data,
-    bool is_rtp) {
+nsresult
+MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s(
+  nsAutoPtr<DataBuffer> aData,
+  bool aIsRtp)
+{
 
-  ASSERT_ON_THREAD(sts_thread_);
-  if (!pipeline_) {
-    return NS_OK;  // Detached
+  ASSERT_ON_THREAD(mStsThread);
+  if (!mPipeline) {
+    return NS_OK; // Detached
   }
-  TransportInfo& transport = is_rtp ? pipeline_->rtp_ : pipeline_->rtcp_;
+  TransportInfo& transport = aIsRtp ? mPipeline->mRtp : mPipeline->mRtcp;
 
-  if (!transport.send_srtp_) {
+  if (!transport.mSendSrtp) {
     CSFLogDebug(LOGTAG, "Couldn't write RTP/RTCP packet; SRTP not set up yet");
     return NS_OK;
   }
 
-  MOZ_ASSERT(transport.transport_);
-  NS_ENSURE_TRUE(transport.transport_, NS_ERROR_NULL_POINTER);
+  MOZ_ASSERT(transport.mTransport);
+  NS_ENSURE_TRUE(transport.mTransport, NS_ERROR_NULL_POINTER);
 
   // libsrtp enciphers in place, so we need a big enough buffer.
-  MOZ_ASSERT(data->capacity() >= data->len() + SRTP_MAX_EXPANSION);
+  MOZ_ASSERT(aData->capacity() >= aData->len() + SRTP_MAX_EXPANSION);
 
   if (RtpLogger::IsPacketLoggingOn()) {
-    int header_len = 12;
+    int headerLen = 12;
     webrtc::RTPHeader header;
-    if (pipeline_->rtp_parser_ &&
-        pipeline_->rtp_parser_->Parse(data->data(), data->len(), &header)) {
-        header_len = header.headerLength;
+    if (mPipeline->mRtpParser &&
+        mPipeline->mRtpParser->Parse(aData->data(), aData->len(), &header)) {
+      headerLen = header.headerLength;
     }
-    RtpLogger::LogPacket(data->data(), data->len(), false, is_rtp, header_len,
-                         pipeline_->description_);
+    RtpLogger::LogPacket(aData->data(),
+                         aData->len(),
+                         false,
+                         aIsRtp,
+                         headerLen,
+                         mPipeline->mDescription);
   }
 
   int out_len;
   nsresult res;
-  if (is_rtp) {
-    pipeline_->packet_dumper_->Dump(
-        pipeline_->level(), dom::mozPacketDumpType::Rtp, true, data->data(), data->len());
+  if (aIsRtp) {
+    mPipeline->mPacketDumper->Dump(mPipeline->Level(),
+                                    dom::mozPacketDumpType::Rtp,
+                                    true,
+                                    aData->data(),
+                                    aData->len());
 
-    res = transport.send_srtp_->ProtectRtp(data->data(),
-                                           data->len(),
-                                           data->capacity(),
-                                           &out_len);
+    res = transport.mSendSrtp->ProtectRtp(
+      aData->data(), aData->len(), aData->capacity(), &out_len);
   } else {
-    pipeline_->packet_dumper_->Dump(
-        pipeline_->level(), dom::mozPacketDumpType::Rtcp, true, data->data(), data->len());
+    mPipeline->mPacketDumper->Dump(mPipeline->Level(),
+                                    dom::mozPacketDumpType::Rtcp,
+                                    true,
+                                    aData->data(),
+                                    aData->len());
 
-    res = transport.send_srtp_->ProtectRtcp(data->data(),
-                                            data->len(),
-                                            data->capacity(),
-                                            &out_len);
+    res = transport.mSendSrtp->ProtectRtcp(
+      aData->data(), aData->len(), aData->capacity(), &out_len);
   }
   if (!NS_SUCCEEDED(res)) {
     return res;
   }
 
   // paranoia; don't have uninitialized bytes included in data->len()
-  data->SetLength(out_len);
-
-  CSFLogDebug(LOGTAG, "%s sending %s packet", pipeline_->description_.c_str(),
-              (is_rtp ? "RTP" : "RTCP"));
-  if (is_rtp) {
-    pipeline_->packet_dumper_->Dump(
-        pipeline_->level(), dom::mozPacketDumpType::Srtp, true, data->data(), out_len);
+  aData->SetLength(out_len);
 
-    pipeline_->increment_rtp_packets_sent(out_len);
+  CSFLogDebug(LOGTAG,
+              "%s sending %s packet",
+              mPipeline->mDescription.c_str(),
+              (aIsRtp ? "RTP" : "RTCP"));
+  if (aIsRtp) {
+    mPipeline->mPacketDumper->Dump(mPipeline->Level(),
+                                    dom::mozPacketDumpType::Srtp,
+                                    true,
+                                    aData->data(),
+                                    out_len);
+
+    mPipeline->IncrementRtpPacketsSent(out_len);
   } else {
-    pipeline_->packet_dumper_->Dump(
-        pipeline_->level(), dom::mozPacketDumpType::Srtcp, true, data->data(), out_len);
+    mPipeline->mPacketDumper->Dump(mPipeline->Level(),
+                                    dom::mozPacketDumpType::Srtcp,
+                                    true,
+                                    aData->data(),
+                                    out_len);
 
-    pipeline_->increment_rtcp_packets_sent();
+    mPipeline->IncrementRtcpPacketsSent();
   }
-  return pipeline_->SendPacket(transport.transport_, data->data(), out_len);
+  return mPipeline->SendPacket(transport.mTransport, aData->data(), out_len);
 }
 
-nsresult MediaPipeline::PipelineTransport::SendRtcpPacket(
-    const uint8_t* data, size_t len) {
+nsresult
+MediaPipeline::PipelineTransport::SendRtcpPacket(const uint8_t* aData,
+                                                 size_t aLen)
+{
 
-  nsAutoPtr<DataBuffer> buf(new DataBuffer(data, len, len + SRTP_MAX_EXPANSION));
+  nsAutoPtr<DataBuffer> buf(
+    new DataBuffer(aData, aLen, aLen + SRTP_MAX_EXPANSION));
 
-  RUN_ON_THREAD(sts_thread_,
-                WrapRunnable(
-                             RefPtr<MediaPipeline::PipelineTransport>(this),
-                             &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
-                             buf, false),
-                NS_DISPATCH_NORMAL);
+  RUN_ON_THREAD(
+    mStsThread,
+    WrapRunnable(RefPtr<MediaPipeline::PipelineTransport>(this),
+                 &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
+                 buf,
+                 false),
+    NS_DISPATCH_NORMAL);
 
   return NS_OK;
 }
 
-void MediaPipelineTransmit::PipelineListener::
-UnsetTrackId(MediaStreamGraphImpl* graph) {
-  class Message : public ControlMessage {
+void
+MediaPipelineTransmit::PipelineListener::UnsetTrackId(
+  MediaStreamGraphImpl* aGraph)
+{
+  class Message : public ControlMessage
+  {
   public:
-    explicit Message(PipelineListener* listener) :
-      ControlMessage(nullptr), listener_(listener) {}
-    virtual void Run() override
+    explicit Message(PipelineListener* listener)
+      : ControlMessage(nullptr)
+      , mListener(listener)
     {
-      listener_->UnsetTrackIdImpl();
     }
-    RefPtr<PipelineListener> listener_;
+    virtual void Run() override { mListener->UnsetTrackIdImpl(); }
+    RefPtr<PipelineListener> mListener;
   };
-  graph->AppendMessage(MakeUnique<Message>(this));
+  aGraph->AppendMessage(MakeUnique<Message>(this));
 }
 // Called if we're attached with AddDirectListener()
-void MediaPipelineTransmit::PipelineListener::
-NotifyRealtimeTrackData(MediaStreamGraph* graph,
-                        StreamTime offset,
-                        const MediaSegment& media) {
-  CSFLogDebug(LOGTAG, "MediaPipeline::NotifyRealtimeTrackData() listener=%p, offset=%" PRId64 ", duration=%" PRId64,
-              this, offset, media.GetDuration());
+void
+MediaPipelineTransmit::PipelineListener::NotifyRealtimeTrackData(
+  MediaStreamGraph* aGraph,
+  StreamTime aOffset,
+  const MediaSegment& aMedia)
+{
+  CSFLogDebug(
+    LOGTAG,
+    "MediaPipeline::NotifyRealtimeTrackData() listener=%p, offset=%" PRId64
+    ", duration=%" PRId64,
+    this,
+    aOffset,
+    aMedia.GetDuration());
 
-  if (media.GetType() == MediaSegment::VIDEO) {
+  if (aMedia.GetType() == MediaSegment::VIDEO) {
     // We have to call the upstream NotifyRealtimeTrackData and
     // MediaStreamVideoSink will route them to SetCurrentFrames.
-    MediaStreamVideoSink::NotifyRealtimeTrackData(graph, offset, media);
+    MediaStreamVideoSink::NotifyRealtimeTrackData(aGraph, aOffset, aMedia);
     return;
   }
 
-  NewData(media, graph->GraphRate());
+  NewData(aMedia, aGraph->GraphRate());
 }
 
-void MediaPipelineTransmit::PipelineListener::
-NotifyQueuedChanges(MediaStreamGraph* graph,
-                    StreamTime offset,
-                    const MediaSegment& queued_media) {
+void
+MediaPipelineTransmit::PipelineListener::NotifyQueuedChanges(
+  MediaStreamGraph* aGraph,
+  StreamTime aOffset,
+  const MediaSegment& aQueuedMedia)
+{
   CSFLogDebug(LOGTAG, "MediaPipeline::NotifyQueuedChanges()");
 
-  if (queued_media.GetType() == MediaSegment::VIDEO) {
+  if (aQueuedMedia.GetType() == MediaSegment::VIDEO) {
     // We always get video from SetCurrentFrames().
     return;
   }
 
-  if (direct_connect_) {
+  if (mDirectConnect) {
     // ignore non-direct data if we're also getting direct data
     return;
   }
 
   size_t rate;
-  if (graph) {
-    rate = graph->GraphRate();
+  if (aGraph) {
+    rate = aGraph->GraphRate();
   } else {
     // When running tests, graph may be null. In that case use a default.
     rate = 16000;
   }
-  NewData(queued_media, rate);
+  NewData(aQueuedMedia, rate);
 }
 
-void MediaPipelineTransmit::PipelineListener::
-NotifyDirectListenerInstalled(InstallationResult aResult) {
-  CSFLogInfo(LOGTAG, "MediaPipeline::NotifyDirectListenerInstalled() listener=%p, result=%d",
-             this, static_cast<int32_t>(aResult));
+void
+MediaPipelineTransmit::PipelineListener::NotifyDirectListenerInstalled(
+  InstallationResult aResult)
+{
+  CSFLogInfo(
+    LOGTAG,
+    "MediaPipeline::NotifyDirectListenerInstalled() listener=%p, result=%d",
+    this,
+    static_cast<int32_t>(aResult));
 
-  direct_connect_ = InstallationResult::SUCCESS == aResult;
+  mDirectConnect = InstallationResult::SUCCESS == aResult;
 }
 
-void MediaPipelineTransmit::PipelineListener::
-NotifyDirectListenerUninstalled() {
-  CSFLogInfo(LOGTAG, "MediaPipeline::NotifyDirectListenerUninstalled() listener=%p", this);
+void
+MediaPipelineTransmit::PipelineListener::NotifyDirectListenerUninstalled()
+{
+  CSFLogInfo(LOGTAG,
+             "MediaPipeline::NotifyDirectListenerUninstalled() listener=%p",
+             this);
 
-  direct_connect_ = false;
+  mDirectConnect = false;
 }
 
-void MediaPipelineTransmit::PipelineListener::
-NewData(const MediaSegment& media, TrackRate aRate /* = 0 */) {
-  if (!active_) {
+void
+MediaPipelineTransmit::PipelineListener::NewData(const MediaSegment& aMedia,
+                                                 TrackRate aRate /* = 0 */)
+{
+  if (!mActive) {
     CSFLogDebug(LOGTAG, "Discarding packets because transport not ready");
     return;
   }
 
-  if (conduit_->type() !=
-      (media.GetType() == MediaSegment::AUDIO ? MediaSessionConduit::AUDIO :
-                                                MediaSessionConduit::VIDEO)) {
-    MOZ_ASSERT(false, "The media type should always be correct since the "
-                      "listener is locked to a specific track");
+  if (mConduit->type() != (aMedia.GetType() == MediaSegment::AUDIO
+                             ? MediaSessionConduit::AUDIO
+                             : MediaSessionConduit::VIDEO)) {
+    MOZ_ASSERT(false,
+               "The media type should always be correct since the "
+               "listener is locked to a specific track");
     return;
   }
 
   // TODO(ekr@rtfm.com): For now assume that we have only one
   // track type and it's destined for us
   // See bug 784517
-  if (media.GetType() == MediaSegment::AUDIO) {
+  if (aMedia.GetType() == MediaSegment::AUDIO) {
     MOZ_RELEASE_ASSERT(aRate > 0);
 
-    AudioSegment* audio = const_cast<AudioSegment *>(static_cast<const AudioSegment*>(&media));
-    for(AudioSegment::ChunkIterator iter(*audio); !iter.IsEnded(); iter.Next()) {
-      audio_processing_->QueueAudioChunk(aRate, *iter, enabled_);
+    const AudioSegment* audio = static_cast<const AudioSegment*>(&aMedia);
+    for (AudioSegment::ConstChunkIterator iter(*audio); !iter.IsEnded();
+         iter.Next()) {
+      mAudioProcessing->QueueAudioChunk(aRate, *iter, mEnabled);
     }
   } else {
-    VideoSegment* video = const_cast<VideoSegment *>(static_cast<const VideoSegment*>(&media));
-    VideoSegment::ChunkIterator iter(*video);
-    for(VideoSegment::ChunkIterator iter(*video); !iter.IsEnded(); iter.Next()) {
-      converter_->QueueVideoChunk(*iter, !enabled_);
+    const VideoSegment* video = static_cast<const VideoSegment*>(&aMedia);
+    for (VideoSegment::ConstChunkIterator iter(*video); !iter.IsEnded();
+         iter.Next()) {
+      mConverter->QueueVideoChunk(*iter, !mEnabled);
     }
   }
 }
 
-void MediaPipelineTransmit::PipelineListener::
-SetCurrentFrames(const VideoSegment& aSegment)
+void
+MediaPipelineTransmit::PipelineListener::SetCurrentFrames(
+  const VideoSegment& aSegment)
 {
   NewData(aSegment);
 }
 
-class TrackAddedCallback {
- public:
-  virtual void TrackAdded(TrackTicks current_ticks) = 0;
-
-  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TrackAddedCallback);
-
- protected:
-  virtual ~TrackAddedCallback() {}
-};
-
-class GenericReceiveListener;
-
-class GenericReceiveCallback : public TrackAddedCallback
-{
- public:
-  explicit GenericReceiveCallback(GenericReceiveListener* listener)
-    : listener_(listener) {}
-
-  void TrackAdded(TrackTicks time);
-
- private:
-  RefPtr<GenericReceiveListener> listener_;
-};
-
 class GenericReceiveListener : public MediaStreamListener
 {
- public:
-  explicit GenericReceiveListener(dom::MediaStreamTrack* track)
-    : track_(track),
-      played_ticks_(0),
-      last_log_(0),
-      principal_handle_(PRINCIPAL_HANDLE_NONE),
-      listening_(false),
-      maybe_track_needs_unmute_(true)
+public:
+  explicit GenericReceiveListener(dom::MediaStreamTrack* aTrack)
+    : mTrack(aTrack)
+    , mPlayedTicks(0)
+    , mPrincipalHandle(PRINCIPAL_HANDLE_NONE)
+    , mListening(false)
+    , mMaybeTrackNeedsUnmute(true)
   {
-    MOZ_ASSERT(track->GetInputStream()->AsSourceStream());
+    MOZ_ASSERT(aTrack->GetInputStream()->AsSourceStream());
   }
 
   virtual ~GenericReceiveListener()
   {
     NS_ReleaseOnMainThreadSystemGroup(
-      "GenericReceiveListener::track_", track_.forget());
+      "GenericReceiveListener::track_", mTrack.forget());
   }
 
   void AddSelf()
   {
-    if (!listening_) {
-      listening_ = true;
-      track_->GetInputStream()->AddListener(this);
-      maybe_track_needs_unmute_ = true;
+    if (!mListening) {
+      mListening = true;
+      mTrack->GetInputStream()->AddListener(this);
+      mMaybeTrackNeedsUnmute = true;
     }
   }
 
   void RemoveSelf()
   {
-    if (listening_) {
-      listening_ = false;
-      track_->GetInputStream()->RemoveListener(this);
+    if (mListening) {
+      mListening = false;
+      mTrack->GetInputStream()->RemoveListener(this);
     }
   }
 
   void OnRtpReceived()
   {
-    if (maybe_track_needs_unmute_) {
-      maybe_track_needs_unmute_ = false;
+    if (mMaybeTrackNeedsUnmute) {
+      mMaybeTrackNeedsUnmute = false;
       NS_DispatchToMainThread(NewRunnableMethod(
             "GenericReceiveListener::OnRtpReceived_m",
             this,
             &GenericReceiveListener::OnRtpReceived_m));
     }
   }
 
   void OnRtpReceived_m()
   {
-    if (listening_ && track_->Muted()) {
-      track_->MutedChanged(false);
+    if (mListening && mTrack->Muted()) {
+      mTrack->MutedChanged(false);
     }
   }
 
   void EndTrack()
   {
     CSFLogDebug(LOGTAG, "GenericReceiveListener ending track");
 
     // We do this on MSG to avoid it racing against StartTrack.
     class Message : public ControlMessage
     {
     public:
-      explicit Message(dom::MediaStreamTrack* track)
-        : ControlMessage(track->GetInputStream()),
-          track_id_(track->GetInputTrackId())
-      {}
-
-      void Run() override {
-        mStream->AsSourceStream()->EndTrack(track_id_);
+      explicit Message(dom::MediaStreamTrack* aTrack)
+        : ControlMessage(aTrack->GetInputStream())
+        , mTrackId(aTrack->GetInputTrackId())
+      {
       }
 
-      const TrackID track_id_;
+      void Run() override { mStream->AsSourceStream()->EndTrack(mTrackId); }
+
+      const TrackID mTrackId;
     };
 
-    track_->GraphImpl()->AppendMessage(MakeUnique<Message>(track_));
+    mTrack->GraphImpl()->AppendMessage(MakeUnique<Message>(mTrack));
     // This breaks the cycle with the SourceMediaStream
-    track_->GetInputStream()->RemoveListener(this);
+    mTrack->GetInputStream()->RemoveListener(this);
   }
 
   // Must be called on the main thread
-  void SetPrincipalHandle_m(const PrincipalHandle& principal_handle)
+  void SetPrincipalHandle_m(const PrincipalHandle& aPrincipalHandle)
   {
     class Message : public ControlMessage
     {
     public:
-      Message(GenericReceiveListener* listener,
-              const PrincipalHandle& principal_handle)
-        : ControlMessage(nullptr),
-          listener_(listener),
-          principal_handle_(principal_handle)
-      {}
-
-      void Run() override {
-        listener_->SetPrincipalHandle_msg(principal_handle_);
+      Message(GenericReceiveListener* aListener,
+              const PrincipalHandle& aPrincipalHandle)
+        : ControlMessage(nullptr)
+        , mListener(aListener)
+        , mPrincipalHandle(aPrincipalHandle)
+      {
       }
 
-      RefPtr<GenericReceiveListener> listener_;
-      PrincipalHandle principal_handle_;
+      void Run() override
+      {
+        mListener->SetPrincipalHandle_msg(mPrincipalHandle);
+      }
+
+      const RefPtr<GenericReceiveListener> mListener;
+      PrincipalHandle mPrincipalHandle;
     };
 
-    track_->GraphImpl()->AppendMessage(MakeUnique<Message>(this, principal_handle));
+    mTrack->GraphImpl()->AppendMessage(
+      MakeUnique<Message>(this, aPrincipalHandle));
   }
 
   // Must be called on the MediaStreamGraph thread
-  void SetPrincipalHandle_msg(const PrincipalHandle& principal_handle)
+  void SetPrincipalHandle_msg(const PrincipalHandle& aPrincipalHandle)
   {
-    principal_handle_ = principal_handle;
+    mPrincipalHandle = aPrincipalHandle;
   }
 
- protected:
-  RefPtr<dom::MediaStreamTrack> track_;
-  TrackTicks played_ticks_;
-  TrackTicks last_log_; // played_ticks_ when we last logged
-  PrincipalHandle principal_handle_;
-  bool listening_;
-  Atomic<bool> maybe_track_needs_unmute_;
+protected:
+  RefPtr<dom::MediaStreamTrack> mTrack;
+  TrackTicks mPlayedTicks;
+  PrincipalHandle mPrincipalHandle;
+  bool mListening;
+  Atomic<bool> mMaybeTrackNeedsUnmute;
 };
 
-MediaPipelineReceive::MediaPipelineReceive(
-    const std::string& pc,
-    nsCOMPtr<nsIEventTarget> main_thread,
-    nsCOMPtr<nsIEventTarget> sts_thread,
-    RefPtr<MediaSessionConduit> conduit) :
-  MediaPipeline(pc, RECEIVE, main_thread, sts_thread, conduit),
-  segments_added_(0)
+MediaPipelineReceive::MediaPipelineReceive(const std::string& aPc,
+                                           nsCOMPtr<nsIEventTarget> aMainThread,
+                                           nsCOMPtr<nsIEventTarget> aStsThread,
+                                           RefPtr<MediaSessionConduit> aConduit)
+  : MediaPipeline(aPc,
+                  DirectionType::RECEIVE,
+                  aMainThread,
+                  aStsThread,
+                  aConduit)
 {
 }
 
-MediaPipelineReceive::~MediaPipelineReceive()
-{
-}
+MediaPipelineReceive::~MediaPipelineReceive() {}
 
 class MediaPipelineReceiveAudio::PipelineListener
   : public GenericReceiveListener
 {
 public:
-  PipelineListener(dom::MediaStreamTrack* track,
-                   const RefPtr<MediaSessionConduit>& conduit)
-    : GenericReceiveListener(track),
-      conduit_(conduit)
-  {
-  }
-
-  ~PipelineListener()
+  PipelineListener(dom::MediaStreamTrack* aTrack,
+                   const RefPtr<MediaSessionConduit>& aConduit)
+    : GenericReceiveListener(aTrack)
+    , mConduit(aConduit)
+    , mSource(mTrack->GetInputStream()->AsSourceStream())
+    , mTrackId(mTrack->GetInputTrackId())
+    , mRate(mSource ? mSource->GraphRate() : 0)
+    , mTaskQueue(
+        new AutoTaskQueue(GetMediaThreadPool(MediaThreadType::WEBRTC_DECODER),
+                          "AudioPipelineListener"))
+    , mLastLog(0)
   {
-    if (!NS_IsMainThread()) {
-      // release conduit on mainthread.  Must use forget()!
-      nsresult rv = NS_DispatchToMainThread(new
-                                            ConduitDeleteEvent(conduit_.forget()));
-      MOZ_ASSERT(!NS_FAILED(rv),"Could not dispatch conduit shutdown to main");
-      if (NS_FAILED(rv)) {
-        MOZ_CRASH();
-      }
-    } else {
-      conduit_ = nullptr;
-    }
+    MOZ_ASSERT(mSource);
   }
 
   // Implement MediaStreamListener
-  void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) override
+  void NotifyPull(MediaStreamGraph* aGraph,
+                  StreamTime aDesiredTime) override
   {
-    RefPtr<SourceMediaStream> source =
-      track_->GetInputStream()->AsSourceStream();
-    MOZ_ASSERT(source);
-    if (!source) {
+    if (!mSource) {
       CSFLogError(LOGTAG, "NotifyPull() called from a non-SourceMediaStream");
       return;
     }
+    NotifyPullImpl(aDesiredTime);
+  }
 
-    TrackRate rate = graph->GraphRate();
-    uint32_t samples_per_10ms = rate/100;
+  RefPtr<SourceMediaStream::NotifyPullPromise> AsyncNotifyPull(
+    MediaStreamGraph* aGraph,
+    StreamTime aDesiredTime) override
+  {
+    if (!mSource) {
+      CSFLogError(LOGTAG, "NotifyPull() called from a non-SourceMediaStream");
+      return SourceMediaStream::NotifyPullPromise::CreateAndReject(true,
+                                                                   __func__);
+    }
+    RefPtr<PipelineListener> self = this;
+    return InvokeAsync(mTaskQueue, __func__, [self, aDesiredTime]() {
+      self->NotifyPullImpl(aDesiredTime);
+      return SourceMediaStream::NotifyPullPromise::CreateAndResolve(true,
+                                                                    __func__);
+    });
+  }
 
-    // This comparison is done in total time to avoid accumulated roundoff errors.
-    while (source->TicksToTimeRoundDown(rate,
-                                        played_ticks_) < desired_time) {
-      int16_t scratch_buffer[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
+private:
+  ~PipelineListener()
+  {
+    NS_ReleaseOnMainThreadSystemGroup("MediaPipeline::mConduit",
+                                      mConduit.forget());
+  }
 
-      int samples_length;
+  void NotifyPullImpl(StreamTime aDesiredTime)
+  {
+    uint32_t samplesPer10ms = mRate / 100;
+    // Determine how many frames we need.
+    // As we get frames from conduit_ at the same rate as the graph's rate,
+    // the number of frames needed straightfully determined.
+    TrackTicks framesNeeded = aDesiredTime - mPlayedTicks;
+
+    while (framesNeeded >= 0) {
+      const int scratchBufferLength =
+        AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t);
+      int16_t scratchBuffer[scratchBufferLength];
+
+      int samplesLength = scratchBufferLength;
 
       // This fetches 10ms of data, either mono or stereo
       MediaConduitErrorCode err =
-          static_cast<AudioSessionConduit*>(conduit_.get())->GetAudioFrame(
-              scratch_buffer,
-              rate,
-              0,  // TODO(ekr@rtfm.com): better estimate of "capture" (really playout) delay
-              samples_length);
+        static_cast<AudioSessionConduit*>(mConduit.get())
+          ->GetAudioFrame(scratchBuffer,
+                          mRate,
+                          0, // TODO(ekr@rtfm.com): better estimate of "capture"
+                             // (really playout) delay
+                          samplesLength);
 
       if (err != kMediaConduitNoError) {
         // Insert silence on conduit/GIPS failure (extremely unlikely)
-        CSFLogError(LOGTAG, "Audio conduit failed (%d) to return data @ %" PRId64 " (desired %" PRId64 " -> %f)",
-                    err, played_ticks_, desired_time,
-                    source->StreamTimeToSeconds(desired_time));
+        CSFLogError(LOGTAG,
+                    "Audio conduit failed (%d) to return data @ %" PRId64
+                    " (desired %" PRId64 " -> %f)",
+                    err,
+                    mPlayedTicks,
+                    aDesiredTime,
+                    mSource->StreamTimeToSeconds(aDesiredTime));
         // if this is not enough we'll loop and provide more
-        samples_length = samples_per_10ms;
-        PodArrayZero(scratch_buffer);
+        samplesLength = samplesPer10ms;
+        PodArrayZero(scratchBuffer);
       }
 
-      MOZ_ASSERT(samples_length * sizeof(uint16_t) <= AUDIO_SAMPLE_BUFFER_MAX_BYTES);
+      MOZ_RELEASE_ASSERT(samplesLength <= scratchBufferLength);
 
-      CSFLogDebug(LOGTAG, "Audio conduit returned buffer of length %u",
-                  samples_length);
+      CSFLogDebug(
+        LOGTAG, "Audio conduit returned buffer of length %u", samplesLength);
 
-      RefPtr<SharedBuffer> samples = SharedBuffer::Create(samples_length * sizeof(uint16_t));
-      int16_t *samples_data = static_cast<int16_t *>(samples->Data());
+      RefPtr<SharedBuffer> samples =
+        SharedBuffer::Create(samplesLength * sizeof(uint16_t));
+      int16_t* samplesData = static_cast<int16_t*>(samples->Data());
       AudioSegment segment;
-      // We derive the number of channels of the stream from the number of samples
-      // the AudioConduit gives us, considering it gives us packets of 10ms and we
-      // know the rate.
-      uint32_t channelCount = samples_length / samples_per_10ms;
+      // We derive the number of channels of the stream from the number of
+      // samples the AudioConduit gives us, considering it gives us packets of
+      // 10ms and we know the rate.
+      uint32_t channelCount = samplesLength / samplesPer10ms;
       AutoTArray<int16_t*,2> channels;
       AutoTArray<const int16_t*,2> outputChannels;
-      size_t frames = samples_length / channelCount;
+      size_t frames = samplesLength / channelCount;
 
       channels.SetLength(channelCount);
 
       size_t offset = 0;
       for (size_t i = 0; i < channelCount; i++) {
-        channels[i] = samples_data + offset;
+        channels[i] = samplesData + offset;
         offset += frames;
       }
 
-      DeinterleaveAndConvertBuffer(scratch_buffer,
-                                   frames,
-                                   channelCount,
-                                   channels.Elements());
+      DeinterleaveAndConvertBuffer(
+        scratchBuffer, frames, channelCount, channels.Elements());
 
       outputChannels.AppendElements(channels);
 
-      segment.AppendFrames(samples.forget(), outputChannels, frames,
-                           principal_handle_);
+      segment.AppendFrames(
+        samples.forget(), outputChannels, frames, mPrincipalHandle);
 
       // Handle track not actually added yet or removed/finished
-      if (source->AppendToTrack(track_->GetInputTrackId(), &segment)) {
-        played_ticks_ += frames;
+      if (mSource->AppendToTrack(mTrackId, &segment)) {
+        framesNeeded -= frames;
+        mPlayedTicks += frames;
         if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) {
-          if (played_ticks_ > last_log_ + rate) { // ~ 1 second
-            MOZ_LOG(AudioLogModule(), LogLevel::Debug,
-                    ("%p: Inserting %zu samples into track %d, total = %" PRIu64,
-                     (void*) this, frames, track_->GetInputTrackId(),
-                     played_ticks_));
-            last_log_ = played_ticks_;
+          if (mPlayedTicks > mLastLog + mRate) {
+            MOZ_LOG(AudioLogModule(),
+                    LogLevel::Debug,
+                    ("%p: Inserting samples into track %d, total = "
+                     "%" PRIu64,
+                     (void*)this,
+                     mTrackId,
+                     mPlayedTicks));
+            mLastLog = mPlayedTicks;
           }
         }
       } else {
         CSFLogError(LOGTAG, "AppendToTrack failed");
         // we can't un-read the data, but that's ok since we don't want to
         // buffer - but don't i-loop!
-        return;
+        break;
       }
     }
   }
 
-private:
-  RefPtr<MediaSessionConduit> conduit_;
+  RefPtr<MediaSessionConduit> mConduit;
+  const RefPtr<SourceMediaStream> mSource;
+  const TrackID mTrackId;
+  const TrackRate mRate;
+  const RefPtr<AutoTaskQueue> mTaskQueue;
+  // Graph's current sampling rate
+  TrackTicks mLastLog = 0; // mPlayedTicks when we last logged
 };
 
 MediaPipelineReceiveAudio::MediaPipelineReceiveAudio(
-    const std::string& pc,
-    nsCOMPtr<nsIEventTarget> main_thread,
-    nsCOMPtr<nsIEventTarget> sts_thread,
-    RefPtr<AudioSessionConduit> conduit,
-    dom::MediaStreamTrack* aTrack) :
-  MediaPipelineReceive(pc, main_thread, sts_thread, conduit),
-  listener_(aTrack ? new PipelineListener(aTrack, conduit_) : nullptr)
+  const std::string& aPc,
+  nsCOMPtr<nsIEventTarget> aMainThread,
+  nsCOMPtr<nsIEventTarget> aStsThread,
+  RefPtr<AudioSessionConduit> aConduit,
+  dom::MediaStreamTrack* aTrack)
+  : MediaPipelineReceive(aPc, aMainThread, aStsThread, aConduit)
+  , mListener(aTrack ? new PipelineListener(aTrack, mConduit) : nullptr)
 {
-  description_ = pc_ + "| Receive audio";
+  mDescription = mPc + "| Receive audio";
 }
 
-void MediaPipelineReceiveAudio::DetachMedia()
+void
+MediaPipelineReceiveAudio::DetachMedia()
 {
-  ASSERT_ON_THREAD(main_thread_);
-  if (listener_) {
-    listener_->EndTrack();
-    listener_ = nullptr;
+  ASSERT_ON_THREAD(mMainThread);
+  if (mListener) {
+    mListener->EndTrack();
+    mListener = nullptr;
   }
 }
 
-void MediaPipelineReceiveAudio::SetPrincipalHandle_m(const PrincipalHandle& principal_handle)
+void
+MediaPipelineReceiveAudio::SetPrincipalHandle_m(
+  const PrincipalHandle& aPrincipalHandle)
 {
-  if (listener_) {
-    listener_->SetPrincipalHandle_m(principal_handle);
+  if (mListener) {
+    mListener->SetPrincipalHandle_m(aPrincipalHandle);
   }
 }
 
 void
 MediaPipelineReceiveAudio::Start()
 {
-  conduit_->StartReceiving();
-  if (listener_) {
-    listener_->AddSelf();
+  mConduit->StartReceiving();
+  if (mListener) {
+    mListener->AddSelf();
   }
 }
 
 void
 MediaPipelineReceiveAudio::Stop()
 {
-  if (listener_) {
-    listener_->RemoveSelf();
+  if (mListener) {
+    mListener->RemoveSelf();
   }
-  conduit_->StopReceiving();
+  mConduit->StopReceiving();
 }
 
 void
 MediaPipelineReceiveAudio::OnRtpPacketReceived()
 {
-  if (listener_) {
-    listener_->OnRtpReceived();
+  if (mListener) {
+    mListener->OnRtpReceived();
   }
 }
 
 class MediaPipelineReceiveVideo::PipelineListener
-  : public GenericReceiveListener {
+  : public GenericReceiveListener
+{
 public:
-  explicit PipelineListener(dom::MediaStreamTrack* track)
-    : GenericReceiveListener(track)
-    , image_container_()
-    , image_()
-    , mutex_("Video PipelineListener")
+  explicit PipelineListener(dom::MediaStreamTrack* aTrack)
+    : GenericReceiveListener(aTrack)
+    , mImageContainer(
+        LayerManager::CreateImageContainer(ImageContainer::ASYNCHRONOUS))
+    , mMutex("Video PipelineListener")
   {
-    image_container_ =
-      LayerManager::CreateImageContainer(ImageContainer::ASYNCHRONOUS);
   }
 
   // Implement MediaStreamListener
-  void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) override
+  void NotifyPull(MediaStreamGraph* aGraph, StreamTime aDesiredTime) override
   {
-    MutexAutoLock lock(mutex_);
+    MutexAutoLock lock(mMutex);
 
-    RefPtr<Image> image = image_;
-    StreamTime delta = desired_time - played_ticks_;
+    RefPtr<Image> image = mImage;
+    StreamTime delta = aDesiredTime - mPlayedTicks;
 
     // Don't append if we've already provided a frame that supposedly
     // goes past the current aDesiredTime Doing so means a negative
     // delta and thus messes up handling of the graph
     if (delta > 0) {
       VideoSegment segment;
-      IntSize size = image ? image->GetSize() : IntSize(width_, height_);
-      segment.AppendFrame(image.forget(), delta, size, principal_handle_);
+      IntSize size = image ? image->GetSize() : IntSize(mWidth, mHeight);
+      segment.AppendFrame(image.forget(), delta, size, mPrincipalHandle);
       // Handle track not actually added yet or removed/finished
-      if (track_->GetInputStream()->AsSourceStream()->AppendToTrack(
-            track_->GetInputTrackId(), &segment)) {
-        played_ticks_ = desired_time;
-      } else {
+      if (!mTrack->GetInputStream()->AsSourceStream()->AppendToTrack(
+            mTrack->GetInputTrackId(), &segment)) {
         CSFLogError(LOGTAG, "AppendToTrack failed");
         return;
       }
+      mPlayedTicks = aDesiredTime;
     }
   }
 
   // Accessors for external writes from the renderer
-  void FrameSizeChange(unsigned int width,
-                       unsigned int height,
-                       unsigned int number_of_streams) {
-    MutexAutoLock enter(mutex_);
+  void FrameSizeChange(unsigned int aWidth,
+                       unsigned int aHeight,
+                       unsigned int aNumberOfStreams)
+  {
+    MutexAutoLock enter(mMutex);
 
-    width_ = width;
-    height_ = height;
+    mWidth = aWidth;
+    mHeight = aHeight;
   }
 
-  void RenderVideoFrame(const webrtc::VideoFrameBuffer& buffer,
-                        uint32_t time_stamp,
-                        int64_t render_time)
+  void RenderVideoFrame(const webrtc::VideoFrameBuffer& aBuffer,
+                        uint32_t aTimeStamp,
+                        int64_t aRenderTime)
   {
-    if (buffer.native_handle()) {
+    if (aBuffer.native_handle()) {
       // We assume that only native handles are used with the
       // WebrtcMediaDataDecoderCodec decoder.
-      RefPtr<Image> image = static_cast<Image*>(buffer.native_handle());
-      MutexAutoLock lock(mutex_);
-      image_ = image;
+      RefPtr<Image> image = static_cast<Image*>(aBuffer.native_handle());
+      MutexAutoLock lock(mMutex);
+      mImage = image;
       return;
     }
 
-    MOZ_ASSERT(buffer.DataY());
+    MOZ_ASSERT(aBuffer.DataY());
     // Create a video frame using |buffer|.
     RefPtr<PlanarYCbCrImage> yuvImage =
-      image_container_->CreatePlanarYCbCrImage();
+      mImageContainer->CreatePlanarYCbCrImage();
 
     PlanarYCbCrData yuvData;
-    yuvData.mYChannel = const_cast<uint8_t*>(buffer.DataY());
-    yuvData.mYSize = IntSize(buffer.width(), buffer.height());
-    yuvData.mYStride = buffer.StrideY();
-    MOZ_ASSERT(buffer.StrideU() == buffer.StrideV());
-    yuvData.mCbCrStride = buffer.StrideU();
-    yuvData.mCbChannel = const_cast<uint8_t*>(buffer.DataU());
-    yuvData.mCrChannel = const_cast<uint8_t*>(buffer.DataV());
+    yuvData.mYChannel = const_cast<uint8_t*>(aBuffer.DataY());
+    yuvData.mYSize = IntSize(aBuffer.width(), aBuffer.height());
+    yuvData.mYStride = aBuffer.StrideY();
+    MOZ_ASSERT(aBuffer.StrideU() == aBuffer.StrideV());
+    yuvData.mCbCrStride = aBuffer.StrideU();
+    yuvData.mCbChannel = const_cast<uint8_t*>(aBuffer.DataU());
+    yuvData.mCrChannel = const_cast<uint8_t*>(aBuffer.DataV());
     yuvData.mCbCrSize =
-      IntSize((buffer.width() + 1) >> 1, (buffer.height() + 1) >> 1);
+      IntSize((aBuffer.width() + 1) >> 1, (aBuffer.height() + 1) >> 1);
     yuvData.mPicX = 0;
     yuvData.mPicY = 0;
-    yuvData.mPicSize = IntSize(buffer.width(), buffer.height());
+    yuvData.mPicSize = IntSize(aBuffer.width(), aBuffer.height());
     yuvData.mStereoMode = StereoMode::MONO;
 
     if (!yuvImage->CopyData(yuvData)) {
       MOZ_ASSERT(false);
       return;
     }
 
-    MutexAutoLock lock(mutex_);
-    image_ = yuvImage;
+    MutexAutoLock lock(mMutex);
+    mImage = yuvImage;
   }
 
 private:
-  int width_;
-  int height_;
-  RefPtr<layers::ImageContainer> image_container_;
-  RefPtr<layers::Image> image_;
-  Mutex mutex_; // Mutex for processing WebRTC frames.
-                // Protects image_ against:
+  int mWidth;
+  int mHeight;
+  RefPtr<layers::ImageContainer> mImageContainer;
+  RefPtr<layers::Image> mImage;
+  Mutex mMutex; // Mutex for processing WebRTC frames.
+                // Protects mImage against:
                 // - Writing from the GIPS thread
                 // - Reading from the MSG thread
 };
 
-class MediaPipelineReceiveVideo::PipelineRenderer : public mozilla::VideoRenderer
+class MediaPipelineReceiveVideo::PipelineRenderer
+  : public mozilla::VideoRenderer
 {
 public:
-  explicit PipelineRenderer(MediaPipelineReceiveVideo *pipeline) :
-    pipeline_(pipeline) {}
+  explicit PipelineRenderer(MediaPipelineReceiveVideo* aPipeline)
+    : mPipeline(aPipeline)
+  {
+  }
 
-  void Detach() { pipeline_ = nullptr; }
+  void Detach() { mPipeline = nullptr; }
 
   // Implement VideoRenderer
-  void FrameSizeChange(unsigned int width,
-                       unsigned int height,
-                       unsigned int number_of_streams) override
+  void FrameSizeChange(unsigned int aWidth,
+                       unsigned int aHeight,
+                       unsigned int aNumberOfStreams) override
   {
-    pipeline_->listener_->FrameSizeChange(width, height, number_of_streams);
+    mPipeline->mListener->FrameSizeChange(aWidth, aHeight, aNumberOfStreams);
   }
 
-  void RenderVideoFrame(const webrtc::VideoFrameBuffer& buffer,
-                        uint32_t time_stamp,
-                        int64_t render_time) override
+  void RenderVideoFrame(const webrtc::VideoFrameBuffer& aBuffer,
+                        uint32_t aTimeStamp,
+                        int64_t aRenderTime) override
   {
-    pipeline_->listener_->RenderVideoFrame(buffer, time_stamp, render_time);
+    mPipeline->mListener->RenderVideoFrame(aBuffer, aTimeStamp, aRenderTime);
   }
 
 private:
-  MediaPipelineReceiveVideo *pipeline_;  // Raw pointer to avoid cycles
+  MediaPipelineReceiveVideo* mPipeline; // Raw pointer to avoid cycles
 };
 
-
 MediaPipelineReceiveVideo::MediaPipelineReceiveVideo(
-    const std::string& pc,
-    nsCOMPtr<nsIEventTarget> main_thread,
-    nsCOMPtr<nsIEventTarget> sts_thread,
-    RefPtr<VideoSessionConduit> conduit,
-    dom::MediaStreamTrack* aTrack) :
-  MediaPipelineReceive(pc, main_thread, sts_thread, conduit),
-  renderer_(new PipelineRenderer(this)),
-  listener_(aTrack ? new PipelineListener(aTrack) : nullptr)
+  const std::string& aPc,
+  nsCOMPtr<nsIEventTarget> aMainThread,
+  nsCOMPtr<nsIEventTarget> aStsThread,
+  RefPtr<VideoSessionConduit> aConduit,
+  dom::MediaStreamTrack* aTrack)
+  : MediaPipelineReceive(aPc, aMainThread, aStsThread, aConduit)
+  , mRenderer(new PipelineRenderer(this))
+  , mListener(aTrack ? new PipelineListener(aTrack) : nullptr)
 {
-  description_ = pc_ + "| Receive video";
-  conduit->AttachRenderer(renderer_);
+  mDescription = mPc + "| Receive video";
+  aConduit->AttachRenderer(mRenderer);
 }
 
-void MediaPipelineReceiveVideo::DetachMedia()
+void
+MediaPipelineReceiveVideo::DetachMedia()
 {
-  ASSERT_ON_THREAD(main_thread_);
+  ASSERT_ON_THREAD(mMainThread);
 
   // stop generating video and thus stop invoking the PipelineRenderer
   // and PipelineListener - the renderer has a raw ptr to the Pipeline to
   // avoid cycles, and the render callbacks are invoked from a different
   // thread so simple null-checks would cause TSAN bugs without locks.
-  static_cast<VideoSessionConduit*>(conduit_.get())->DetachRenderer();
-  if (listener_) {
-    listener_->EndTrack();
-    listener_ = nullptr;
+  static_cast<VideoSessionConduit*>(mConduit.get())->DetachRenderer();
+  if (mListener) {
+    mListener->EndTrack();
+    mListener = nullptr;
   }
 }
 
-void MediaPipelineReceiveVideo::SetPrincipalHandle_m(const PrincipalHandle& principal_handle)
+void
+MediaPipelineReceiveVideo::SetPrincipalHandle_m(
+  const PrincipalHandle& aPrincipalHandle)
 {
-  if (listener_) {
-    listener_->SetPrincipalHandle_m(principal_handle);
+  if (mListener) {
+    mListener->SetPrincipalHandle_m(aPrincipalHandle);
   }
 }
 
 void
 MediaPipelineReceiveVideo::Start()
 {
-  conduit_->StartReceiving();
-  if (listener_) {
-    listener_->AddSelf();
+  mConduit->StartReceiving();
+  if (mListener) {
+    mListener->AddSelf();
   }
 }
 
 void
 MediaPipelineReceiveVideo::Stop()
 {
-  if (listener_) {
-    listener_->RemoveSelf();
+  if (mListener) {
+    mListener->RemoveSelf();
   }
-  conduit_->StopReceiving();
+  mConduit->StopReceiving();
 }
 
 void
 MediaPipelineReceiveVideo::OnRtpPacketReceived()
 {
-  if (listener_) {
-    listener_->OnRtpReceived();
+  if (mListener) {
+    mListener->OnRtpReceived();
   }
 }
 
-DOMHighResTimeStamp MediaPipeline::GetNow() {
+DOMHighResTimeStamp
+MediaPipeline::GetNow()
+{
   return webrtc::Clock::GetRealTimeClock()->TimeInMilliseconds();
 }
 
 DOMHighResTimeStamp
-MediaPipeline::RtpCSRCStats::GetExpiryFromTime(
-    const DOMHighResTimeStamp aTime) {
+MediaPipeline::RtpCSRCStats::GetExpiryFromTime(const DOMHighResTimeStamp aTime)
+{
   // DOMHighResTimeStamp is a unit measured in ms
   return aTime - EXPIRY_TIME_MILLISECONDS;
 }
 
 MediaPipeline::RtpCSRCStats::RtpCSRCStats(const uint32_t aCsrc,
                                           const DOMHighResTimeStamp aTime)
   : mCsrc(aCsrc)
-  , mTimestamp(aTime) {}
+  , mTimestamp(aTime)
+{
+}
 
 void
 MediaPipeline::RtpCSRCStats::GetWebidlInstance(
-    dom::RTCRTPContributingSourceStats& aWebidlObj,
-    const nsString &aInboundRtpStreamId) const
+  dom::RTCRTPContributingSourceStats& aWebidlObj,
+  const nsString& aInboundRtpStreamId) const
 {
   nsString statId = NS_LITERAL_STRING("csrc_") + aInboundRtpStreamId;
   statId.AppendLiteral("_");
   statId.AppendInt(mCsrc);
   aWebidlObj.mId.Construct(statId);
   aWebidlObj.mType.Construct(RTCStatsType::Csrc);
   aWebidlObj.mTimestamp.Construct(mTimestamp);
   aWebidlObj.mContributorSsrc.Construct(mCsrc);
   aWebidlObj.mInboundRtpStreamId.Construct(aInboundRtpStreamId);
 }
 
-}  // end namespace
+} // end namespace
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
@@ -33,18 +33,18 @@ class nsIPrincipal;
 
 namespace mozilla {
 class MediaPipelineFilter;
 class PeerIdentity;
 class AudioProxyThread;
 class VideoFrameConverter;
 
 namespace dom {
-  class MediaStreamTrack;
-  struct RTCRTPContributingSourceStats;
+class MediaStreamTrack;
+struct RTCRTPContributingSourceStats;
 } // namespace dom
 
 class SourceMediaStream;
 
 // A class that represents the pipeline of audio and video
 // The dataflow looks like:
 //
 // TRANSMIT
@@ -70,396 +70,389 @@ class SourceMediaStream;
 // One or another GIPS threads
 //   * Receives RTCP messages to send to the other side
 //   * Processes video frames GIPS wants to render
 //
 // For a transmitting conduit, "output" is RTP and "input" is RTCP.
 // For a receiving conduit, "input" is RTP and "output" is RTCP.
 //
 
-class MediaPipeline : public sigslot::has_slots<> {
- public:
-  enum Direction { TRANSMIT, RECEIVE };
-  enum State { MP_CONNECTING, MP_OPEN, MP_CLOSED };
-  MediaPipeline(const std::string& pc,
-                Direction direction,
-                nsCOMPtr<nsIEventTarget> main_thread,
-                nsCOMPtr<nsIEventTarget> sts_thread,
-                RefPtr<MediaSessionConduit> conduit);
+class MediaPipeline : public sigslot::has_slots<>
+{
+public:
+  enum class DirectionType
+  {
+    TRANSMIT,
+    RECEIVE
+  };
+  enum class StateType
+  {
+    MP_CONNECTING,
+    MP_OPEN,
+    MP_CLOSED
+  };
+  MediaPipeline(const std::string& aPc,
+                DirectionType aDirection,
+                nsCOMPtr<nsIEventTarget> aMainThread,
+                nsCOMPtr<nsIEventTarget> aStsThread,
+                RefPtr<MediaSessionConduit> aConduit);
 
   virtual void Start() = 0;
   virtual void Stop() = 0;
   virtual void DetachMedia() {}
 
-  void SetLevel(size_t level) { level_ = level; }
+  void SetLevel(size_t aLevel) { mLevel = aLevel; }
 
   // Must be called on the main thread.
   void Shutdown_m();
 
-  void UpdateTransport_m(RefPtr<TransportFlow> rtp_transport,
-                         RefPtr<TransportFlow> rtcp_transport,
-                         nsAutoPtr<MediaPipelineFilter> filter);
+  void UpdateTransport_m(RefPtr<TransportFlow> aRtpTransport,
+                         RefPtr<TransportFlow> aRtcpTransport,
+                         nsAutoPtr<MediaPipelineFilter> aFilter);
 
-  void UpdateTransport_s(RefPtr<TransportFlow> rtp_transport,
-                         RefPtr<TransportFlow> rtcp_transport,
-                         nsAutoPtr<MediaPipelineFilter> filter);
+  void UpdateTransport_s(RefPtr<TransportFlow> aRtpTransport,
+                         RefPtr<TransportFlow> aRtcpTransport,
+                         nsAutoPtr<MediaPipelineFilter> aFilter);
 
   // Used only for testing; adds RTP header extension for RTP Stream Id with
   // the given id.
-  void AddRIDExtension_m(size_t extension_id);
-  void AddRIDExtension_s(size_t extension_id);
+  void AddRIDExtension_m(size_t aExtensionId);
+  void AddRIDExtension_s(size_t aExtensionId);
   // Used only for testing; installs a MediaPipelineFilter that filters
   // everything but the given RID
-  void AddRIDFilter_m(const std::string& rid);
-  void AddRIDFilter_s(const std::string& rid);
+  void AddRIDFilter_m(const std::string& aRid);
+  void AddRIDFilter_s(const std::string& aRid);
 
-  virtual Direction direction() const { return direction_; }
-  int level() const { return level_; }
+  virtual DirectionType Direction() const { return mDirection; }
+  int Level() const { return mLevel; }
   virtual bool IsVideo() const = 0;
 
-  bool IsDoingRtcpMux() const {
-    return (rtp_.type_ == MUX);
-  }
+  bool IsDoingRtcpMux() const { return mRtp.mType == MUX; }
 
-  class RtpCSRCStats {
+  class RtpCSRCStats
+  {
   public:
     // Gets an expiration time for CRC info given a reference time,
     //   this reference time would normally be the time of calling.
     //   This value can then be used to check if a RtpCSRCStats
     //   has expired via Expired(...)
-    static DOMHighResTimeStamp
-    GetExpiryFromTime(const DOMHighResTimeStamp aTime);
+    static DOMHighResTimeStamp GetExpiryFromTime(
+      const DOMHighResTimeStamp aTime);
 
-    RtpCSRCStats(const uint32_t aCsrc,
-                 const DOMHighResTimeStamp aTime);
-    ~RtpCSRCStats() {};
+    RtpCSRCStats(const uint32_t aCsrc, const DOMHighResTimeStamp aTime);
+    ~RtpCSRCStats(){};
     // Initialize a webidl representation suitable for adding to a report.
     //   This assumes that the webidl object is empty.
     // @param aWebidlObj the webidl binding object to popluate
-    // @param aRtpInboundStreamId the associated RTCInboundRTPStreamStats.id
-    void
-    GetWebidlInstance(dom::RTCRTPContributingSourceStats& aWebidlObj,
-                             const nsString &aInboundRtpStreamId) const;
+    // @param aInboundRtpStreamId the associated RTCInboundRTPStreamStats.id
+    void GetWebidlInstance(dom::RTCRTPContributingSourceStats& aWebidlObj,
+                           const nsString& aInboundRtpStreamId) const;
     void SetTimestamp(const DOMHighResTimeStamp aTime) { mTimestamp = aTime; }
     // Check if the RtpCSRCStats has expired, checks against a
     //   given expiration time.
-    bool Expired(const DOMHighResTimeStamp aExpiry) const {
+    bool Expired(const DOMHighResTimeStamp aExpiry) const
+    {
       return mTimestamp < aExpiry;
     }
+
   private:
     static const double constexpr EXPIRY_TIME_MILLISECONDS = 10 * 1000;
-    uint32_t mCsrc;
+    const uint32_t mCsrc;
     DOMHighResTimeStamp mTimestamp;
   };
 
   // Gets the gathered contributing source stats for the last expiration period.
   // @param aId the stream id to use for populating inboundRtpStreamId field
   // @param aArr the array to append the stats objects to
-  void
-  GetContributingSourceStats(
-      const nsString& aInboundStreamId,
-      FallibleTArray<dom::RTCRTPContributingSourceStats>& aArr) const;
+  void GetContributingSourceStats(
+    const nsString& aInboundStreamId,
+    FallibleTArray<dom::RTCRTPContributingSourceStats>& aArr) const;
 
-  int32_t rtp_packets_sent() const { return rtp_packets_sent_; }
-  int64_t rtp_bytes_sent() const { return rtp_bytes_sent_; }
-  int32_t rtcp_packets_sent() const { return rtcp_packets_sent_; }
-  int32_t rtp_packets_received() const { return rtp_packets_received_; }
-  int64_t rtp_bytes_received() const { return rtp_bytes_received_; }
-  int32_t rtcp_packets_received() const { return rtcp_packets_received_; }
+  int32_t RtpPacketsSent() const { return mRtpPacketsSent; }
+  int64_t RtpBytesSent() const { return mRtpBytesSent; }
+  int32_t RtcpPacketsSent() const { return mRtcpPacketsSent; }
+  int32_t RtpPacketsReceived() const { return mRtpPacketsReceived; }
+  int64_t RtpBytesReceived() const { return mRtpBytesReceived; }
+  int32_t RtcpPacketsReceived() const { return mRtcpPacketsReceived; }
 
-  MediaSessionConduit *Conduit() const { return conduit_; }
+  MediaSessionConduit* Conduit() const { return mConduit; }
 
   // Thread counting
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaPipeline)
 
-  typedef enum {
-    RTP,
-    RTCP,
-    MUX,
-    MAX_RTP_TYPE
-  } RtpType;
+  typedef enum { RTP, RTCP, MUX, MAX_RTP_TYPE } RtpType;
 
   // Separate class to allow ref counting
-  class PipelineTransport : public TransportInterface {
-   public:
+  class PipelineTransport : public TransportInterface
+  {
+  public:
     // Implement the TransportInterface functions
-    explicit PipelineTransport(MediaPipeline *pipeline)
-        : pipeline_(pipeline),
-          sts_thread_(pipeline->sts_thread_) {}
+    explicit PipelineTransport(MediaPipeline* aPipeline)
+      : mPipeline(aPipeline)
+      , mStsThread(aPipeline->mStsThread)
+    {
+    }
 
-    void Attach(MediaPipeline *pipeline) { pipeline_ = pipeline; }
-    void Detach() { pipeline_ = nullptr; }
-    MediaPipeline *pipeline() const { return pipeline_; }
+    void Attach(MediaPipeline* pipeline) { mPipeline = pipeline; }
+    void Detach() { mPipeline = nullptr; }
+    MediaPipeline* Pipeline() const { return mPipeline; }
 
-    virtual nsresult SendRtpPacket(const uint8_t* data, size_t len);
-    virtual nsresult SendRtcpPacket(const uint8_t* data, size_t len);
+    virtual nsresult SendRtpPacket(const uint8_t* aData, size_t aLen);
+    virtual nsresult SendRtcpPacket(const uint8_t* aData, size_t aLen);
 
-   private:
-    nsresult SendRtpRtcpPacket_s(nsAutoPtr<DataBuffer> data,
-                                 bool is_rtp);
+  private:
+    nsresult SendRtpRtcpPacket_s(nsAutoPtr<DataBuffer> aData, bool aIsRtp);
 
     // Creates a cycle, which we break with Detach
-    RefPtr<MediaPipeline> pipeline_;
-    nsCOMPtr<nsIEventTarget> sts_thread_;
+    RefPtr<MediaPipeline> mPipeline;
+    const nsCOMPtr<nsIEventTarget> mStsThread;
   };
 
- protected:
+protected:
   virtual ~MediaPipeline();
   nsresult AttachTransport_s();
   friend class PipelineTransport;
 
-  class TransportInfo {
-    public:
-      TransportInfo(RefPtr<TransportFlow> flow, RtpType type) :
-        transport_(flow),
-        state_(MP_CONNECTING),
-        type_(type) {
-      }
+  struct TransportInfo
+  {
+    TransportInfo(RefPtr<TransportFlow> aFlow, RtpType aType)
+      : mTransport(aFlow)
+      , mState(StateType::MP_CONNECTING)
+      , mType(aType)
+    {
+    }
 
-      void Detach()
-      {
-        transport_ = nullptr;
-        send_srtp_ = nullptr;
-        recv_srtp_ = nullptr;
-      }
+    void Detach()
+    {
+      mTransport = nullptr;
+      mSendSrtp = nullptr;
+      mRecvSrtp = nullptr;
+    }
 
-      RefPtr<TransportFlow> transport_;
-      State state_;
-      RefPtr<SrtpFlow> send_srtp_;
-      RefPtr<SrtpFlow> recv_srtp_;
-      RtpType type_;
+    RefPtr<TransportFlow> mTransport;
+    StateType mState;
+    RefPtr<SrtpFlow> mSendSrtp;
+    RefPtr<SrtpFlow> mRecvSrtp;
+    RtpType mType;
   };
 
   // The transport is down
-  virtual nsresult TransportFailed_s(TransportInfo &info);
+  virtual nsresult TransportFailed_s(TransportInfo& aInfo);
   // The transport is ready
-  virtual nsresult TransportReady_s(TransportInfo &info);
-  void UpdateRtcpMuxState(TransportInfo &info);
+  virtual nsresult TransportReady_s(TransportInfo& aInfo);
+  void UpdateRtcpMuxState(TransportInfo& aInfo);
 
-  nsresult ConnectTransport_s(TransportInfo &info);
+  nsresult ConnectTransport_s(TransportInfo& aInfo);
+
+  TransportInfo* GetTransportInfo_s(TransportFlow* aFlow);
 
-  TransportInfo* GetTransportInfo_s(TransportFlow *flow);
+  void IncrementRtpPacketsSent(int aBytes);
+  void IncrementRtcpPacketsSent();
+  void IncrementRtpPacketsReceived(int aBytes);
+  virtual void OnRtpPacketReceived() {};
+  void IncrementRtcpPacketsReceived();
 
-  void increment_rtp_packets_sent(int bytes);
-  void increment_rtcp_packets_sent();
-  void increment_rtp_packets_received(int bytes);
-  virtual void OnRtpPacketReceived() {};
-  void increment_rtcp_packets_received();
-
-  virtual nsresult SendPacket(TransportFlow *flow, const void *data, int len);
+  virtual nsresult SendPacket(const TransportFlow* aFlow,
+                              const void* aData,
+                              int aLen);
 
   // Process slots on transports
-  void StateChange(TransportFlow *flow, TransportLayer::State);
-  void RtpPacketReceived(TransportLayer *layer, const unsigned char *data,
-                         size_t len);
-  void RtcpPacketReceived(TransportLayer *layer, const unsigned char *data,
-                          size_t len);
-  void PacketReceived(TransportLayer *layer, const unsigned char *data,
-                      size_t len);
+  void StateChange(TransportFlow* aFlow, TransportLayer::State);
+  void RtpPacketReceived(TransportLayer* aLayer,
+                         const unsigned char* aData,
+                         size_t aLen);
+  void RtcpPacketReceived(TransportLayer* aLayer,
+                          const unsigned char* aData,
+                          size_t aLen);
+  void PacketReceived(TransportLayer* aLayer,
+                      const unsigned char* aData,
+                      size_t aLen);
 
-  Direction direction_;
-  size_t level_;
-  RefPtr<MediaSessionConduit> conduit_;  // Our conduit. Written on the main
-                                         // thread. Read on STS thread.
+  const DirectionType mDirection;
+  size_t mLevel;
+  RefPtr<MediaSessionConduit> mConduit; // Our conduit. Written on the main
+                                        // thread. Read on STS thread.
 
   // The transport objects. Read/written on STS thread.
-  TransportInfo rtp_;
-  TransportInfo rtcp_;
+  TransportInfo mRtp;
+  TransportInfo mRtcp;
 
   // Pointers to the threads we need. Initialized at creation
   // and used all over the place.
-  nsCOMPtr<nsIEventTarget> main_thread_;
-  nsCOMPtr<nsIEventTarget> sts_thread_;
+  const nsCOMPtr<nsIEventTarget> mMainThread;
+  const nsCOMPtr<nsIEventTarget> mStsThread;
 
   // Created in c'tor. Referenced by the conduit.
-  RefPtr<PipelineTransport> transport_;
+  RefPtr<PipelineTransport> mTransport;
 
   // Only safe to access from STS thread.
   // Build into TransportInfo?
-  int32_t rtp_packets_sent_;
-  int32_t rtcp_packets_sent_;
-  int32_t rtp_packets_received_;
-  int32_t rtcp_packets_received_;
-  int64_t rtp_bytes_sent_;
-  int64_t rtp_bytes_received_;
+  int32_t mRtpPacketsSent;
+  int32_t mRtcpPacketsSent;
+  int32_t mRtpPacketsReceived;
+  int32_t mRtcpPacketsReceived;
+  int64_t mRtpBytesSent;
+  int64_t mRtpBytesReceived;
 
   // Only safe to access from STS thread.
-  std::map<uint32_t, RtpCSRCStats> csrc_stats_;
+  std::map<uint32_t, RtpCSRCStats> mCsrcStats;
 
   // Written in c'tor. Read on STS thread.
-  std::string pc_;
-  std::string description_;
+  const std::string mPc;
+  std::string mDescription;
 
   // Written in c'tor, all following accesses are on the STS thread.
-  nsAutoPtr<MediaPipelineFilter> filter_;
-  nsAutoPtr<webrtc::RtpHeaderParser> rtp_parser_;
+  nsAutoPtr<MediaPipelineFilter> mFilter;
+  const nsAutoPtr<webrtc::RtpHeaderParser> mRtpParser;
 
-  nsAutoPtr<PacketDumper> packet_dumper_;
+  nsAutoPtr<PacketDumper> mPacketDumper;
 
- private:
+private:
   // Gets the current time as a DOMHighResTimeStamp
   static DOMHighResTimeStamp GetNow();
 
-  bool IsRtp(const unsigned char *data, size_t len);
+  bool IsRtp(const unsigned char* aData, size_t aLen) const;
   // Must be called on the STS thread.  Must be called after DetachMedia().
   void DetachTransport_s();
 };
 
-class ConduitDeleteEvent: public Runnable
+// A specialization of pipeline for reading from an input device
+// and transmitting to the network.
+class MediaPipelineTransmit : public MediaPipeline
 {
 public:
-  explicit ConduitDeleteEvent(already_AddRefed<MediaSessionConduit> aConduit) :
-    Runnable("ConduitDeleteEvent"),
-    mConduit(aConduit) {}
-
-  /* we exist solely to proxy release of the conduit */
-  NS_IMETHOD Run() override { return NS_OK; }
-private:
-  RefPtr<MediaSessionConduit> mConduit;
-};
-
-// A specialization of pipeline for reading from an input device
-// and transmitting to the network.
-class MediaPipelineTransmit : public MediaPipeline {
-public:
-  // Set rtcp_transport to nullptr to use rtcp-mux
-  MediaPipelineTransmit(const std::string& pc,
-                        nsCOMPtr<nsIEventTarget> main_thread,
-                        nsCOMPtr<nsIEventTarget> sts_thread,
-                        bool is_video,
-                        dom::MediaStreamTrack* domtrack,
-                        RefPtr<MediaSessionConduit> conduit);
+  // Set aRtcpTransport to nullptr to use rtcp-mux
+  MediaPipelineTransmit(const std::string& aPc,
+                        nsCOMPtr<nsIEventTarget> aMainThread,
+                        nsCOMPtr<nsIEventTarget> aStsThread,
+                        bool aIsVideo,
+                        dom::MediaStreamTrack* aDomTrack,
+                        RefPtr<MediaSessionConduit> aConduit);
 
   void Start() override;
   void Stop() override;
 
   // written and used from MainThread
   bool IsVideo() const override;
 
   // When the principal of the domtrack changes, it calls through to here
   // so that we can determine whether to enable track transmission.
-  // `track` has to be null or equal `domtrack_` for us to apply the update.
-  virtual void UpdateSinkIdentity_m(dom::MediaStreamTrack* track,
-                                    nsIPrincipal* principal,
-                                    const PeerIdentity* sinkIdentity);
+  // `track` has to be null or equal `mDomTrack` for us to apply the update.
+  virtual void UpdateSinkIdentity_m(const dom::MediaStreamTrack* aTrack,
+                                    nsIPrincipal* aPrincipal,
+                                    const PeerIdentity* aSinkIdentity);
 
   // Called on the main thread.
   void DetachMedia() override;
 
   // Override MediaPipeline::TransportReady.
-  nsresult TransportReady_s(TransportInfo &info) override;
+  nsresult TransportReady_s(TransportInfo& aInfo) override;
 
   // Replace a track with a different one
   // In non-compliance with the likely final spec, allow the new
   // track to be part of a different stream (since we don't support
   // multiple tracks of a type in a stream yet).  bug 1056650
-  virtual nsresult ReplaceTrack(RefPtr<dom::MediaStreamTrack>& domtrack);
+  virtual nsresult ReplaceTrack(RefPtr<dom::MediaStreamTrack>& aDomTrack);
 
   // Separate classes to allow ref counting
   class PipelineListener;
   class VideoFrameFeeder;
 
- protected:
+protected:
   ~MediaPipelineTransmit();
 
   void SetDescription();
 
- private:
-  RefPtr<PipelineListener> listener_;
-  RefPtr<AudioProxyThread> audio_processing_;
-  RefPtr<VideoFrameFeeder> feeder_;
-  RefPtr<VideoFrameConverter> converter_;
-  bool is_video_;
-  RefPtr<dom::MediaStreamTrack> domtrack_;
-  bool transmitting_;
+private:
+  const bool mIsVideo;
+  const RefPtr<PipelineListener> mListener;
+  const RefPtr<VideoFrameFeeder> mFeeder;
+  RefPtr<AudioProxyThread> mAudioProcessing;
+  RefPtr<VideoFrameConverter> mConverter;
+  RefPtr<dom::MediaStreamTrack> mDomTrack;
+  bool mTransmitting;
 };
 
-
 // A specialization of pipeline for reading from the network and
 // rendering media.
-class MediaPipelineReceive : public MediaPipeline {
- public:
-  // Set rtcp_transport to nullptr to use rtcp-mux
-  MediaPipelineReceive(const std::string& pc,
-                       nsCOMPtr<nsIEventTarget> main_thread,
-                       nsCOMPtr<nsIEventTarget> sts_thread,
-                       RefPtr<MediaSessionConduit> conduit);
-
-  int segments_added() const { return segments_added_; }
+class MediaPipelineReceive : public MediaPipeline
+{
+public:
+  // Set aRtcpTransport to nullptr to use rtcp-mux
+  MediaPipelineReceive(const std::string& aPc,
+                       nsCOMPtr<nsIEventTarget> aMainThread,
+                       nsCOMPtr<nsIEventTarget> aStsThread,
+                       RefPtr<MediaSessionConduit> aConduit);
 
   // Sets the PrincipalHandle we set on the media chunks produced by this
   // pipeline. Must be called on the main thread.
-  virtual void SetPrincipalHandle_m(const PrincipalHandle& principal_handle) = 0;
-
- protected:
-  ~MediaPipelineReceive();
+  virtual void SetPrincipalHandle_m(
+    const PrincipalHandle& aPrincipalHandle) = 0;
 
-  int segments_added_;
-
- private:
+protected:
+  ~MediaPipelineReceive();
 };
 
-
 // A specialization of pipeline for reading from the network and
 // rendering audio.
-class MediaPipelineReceiveAudio : public MediaPipelineReceive {
- public:
-  MediaPipelineReceiveAudio(const std::string& pc,
-                            nsCOMPtr<nsIEventTarget> main_thread,
-                            nsCOMPtr<nsIEventTarget> sts_thread,
-                            RefPtr<AudioSessionConduit> conduit,
+class MediaPipelineReceiveAudio : public MediaPipelineReceive
+{
+public:
+  MediaPipelineReceiveAudio(const std::string& aPc,
+                            nsCOMPtr<nsIEventTarget> aMainThread,
+                            nsCOMPtr<nsIEventTarget> aStsThread,
+                            RefPtr<AudioSessionConduit> aConduit,
                             dom::MediaStreamTrack* aTrack);
 
   void DetachMedia() override;
 
   bool IsVideo() const override { return false; }
 
-  void SetPrincipalHandle_m(const PrincipalHandle& principal_handle) override;
+  void SetPrincipalHandle_m(const PrincipalHandle& aPrincipalHandle) override;
 
   void Start() override;
   void Stop() override;
 
   void OnRtpPacketReceived() override;
 
- private:
+private:
   // Separate class to allow ref counting
   class PipelineListener;
 
-  RefPtr<PipelineListener> listener_;
+  RefPtr<PipelineListener> mListener;
 };
 
-
 // A specialization of pipeline for reading from the network and
 // rendering video.
-class MediaPipelineReceiveVideo : public MediaPipelineReceive {
- public:
-  MediaPipelineReceiveVideo(const std::string& pc,
-                            nsCOMPtr<nsIEventTarget> main_thread,
-                            nsCOMPtr<nsIEventTarget> sts_thread,
-                            RefPtr<VideoSessionConduit> conduit,
+class MediaPipelineReceiveVideo : public MediaPipelineReceive
+{
+public:
+  MediaPipelineReceiveVideo(const std::string& aPc,
+                            nsCOMPtr<nsIEventTarget> aMainThread,
+                            nsCOMPtr<nsIEventTarget> aStsThread,
+                            RefPtr<VideoSessionConduit> aConduit,
                             dom::MediaStreamTrack* aTrack);
 
   // Called on the main thread.
   void DetachMedia() override;
 
   bool IsVideo() const override { return true; }
 
-  void SetPrincipalHandle_m(const PrincipalHandle& principal_handle) override;
+  void SetPrincipalHandle_m(const PrincipalHandle& aPrincipalHandle) override;
 
   void Start() override;
   void Stop() override;
 
   void OnRtpPacketReceived() override;
 
- private:
+private:
   class PipelineRenderer;
   friend class PipelineRenderer;
 
   // Separate class to allow ref counting
   class PipelineListener;
 
-  RefPtr<PipelineRenderer> renderer_;
-  RefPtr<PipelineListener> listener_;
+  const RefPtr<PipelineRenderer> mRenderer;
+  RefPtr<PipelineListener> mListener;
 };
 
-
-}  // namespace mozilla
+} // namespace mozilla
 #endif
--- a/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
@@ -3464,18 +3464,18 @@ PeerConnectionImpl::ExecuteStatsQuery_s(
         NS_LITERAL_STRING("audio") : NS_LITERAL_STRING("video");
     nsString idstr = mediaType;
     idstr.AppendLiteral("_");
     idstr.AppendInt((uint32_t)p);
 
     // TODO(@@NG):ssrcs handle Conduits having multiple stats at the same level
     // This is pending spec work
     // Gather pipeline stats.
-    switch (mp.direction()) {
-      case MediaPipeline::TRANSMIT: {
+    switch (mp.Direction()) {
+      case MediaPipeline::DirectionType::TRANSMIT: {
         nsString localId = NS_LITERAL_STRING("outbound_rtp_") + idstr;
         nsString remoteId;
         nsString ssrc;
         std::vector<unsigned int> ssrcvals = mp.Conduit()->GetLocalSSRCs();
         if (!ssrcvals.empty()) {
           ssrc.AppendInt(ssrcvals[0]);
         }
         {
@@ -3522,18 +3522,18 @@ PeerConnectionImpl::ExecuteStatsQuery_s(
           s.mId.Construct(localId);
           s.mType.Construct(RTCStatsType::Outbound_rtp);
           if (ssrc.Length()) {
             s.mSsrc.Construct(ssrc);
           }
           s.mMediaType.Construct(mediaType);
           s.mRemoteId.Construct(remoteId);
           s.mIsRemote = false;
-          s.mPacketsSent.Construct(mp.rtp_packets_sent());
-          s.mBytesSent.Construct(mp.rtp_bytes_sent());
+          s.mPacketsSent.Construct(mp.RtpPacketsSent());
+          s.mBytesSent.Construct(mp.RtpBytesSent());
 
           // Fill in packet type statistics
           webrtc::RtcpPacketTypeCounter counters;
           if (mp.Conduit()->GetSendPacketTypeStats(&counters)) {
             s.mNackCount.Construct(counters.nack_packets);
             // Fill in video only packet type stats
             if (!isAudio) {
               s.mFirCount.Construct(counters.fir_packets);
@@ -3563,17 +3563,17 @@ PeerConnectionImpl::ExecuteStatsQuery_s(
               s.mFramesEncoded.Construct(framesEncoded);
             }
           }
           query->report->mOutboundRTPStreamStats.Value().AppendElement(s,
                                                                        fallible);
         }
         break;
       }
-      case MediaPipeline::RECEIVE: {
+      case MediaPipeline::DirectionType::RECEIVE: {
         nsString localId = NS_LITERAL_STRING("inbound_rtp_") + idstr;
         nsString remoteId;
         nsString ssrc;
         unsigned int ssrcval;
         if (mp.Conduit()->GetRemoteSSRC(&ssrcval)) {
           ssrc.AppendInt(ssrcval);
         }
         {
@@ -3613,18 +3613,18 @@ PeerConnectionImpl::ExecuteStatsQuery_s(
         if (mp.Conduit()->GetRTPStats(&jitterMs, &packetsLost)) {
           s.mJitter.Construct(double(jitterMs)/1000);
           s.mPacketsLost.Construct(packetsLost);
         }
         if (remoteId.Length()) {
           s.mRemoteId.Construct(remoteId);
         }
         s.mIsRemote = false;
-        s.mPacketsReceived.Construct(mp.rtp_packets_received());
-        s.mBytesReceived.Construct(mp.rtp_bytes_received());
+        s.mPacketsReceived.Construct(mp.RtpPacketsReceived());
+        s.mBytesReceived.Construct(mp.RtpBytesReceived());
 
         if (query->internalStats && isAudio) {
           int32_t jitterBufferDelay;
           int32_t playoutBufferDelay;
           int32_t avSyncDelta;
           if (mp.Conduit()->GetAVStats(&jitterBufferDelay,
                                        &playoutBufferDelay,
                                        &avSyncDelta)) {
--- a/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.cpp
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.cpp
@@ -1498,17 +1498,17 @@ PeerConnectionMedia::UpdateRemoteStreamP
   ASSERT_ON_THREAD(mMainThread);
 
   for (RefPtr<TransceiverImpl>& transceiver : mTransceivers) {
     transceiver->UpdatePrincipal(aPrincipal);
   }
 }
 
 void
-PeerConnectionMedia::UpdateSinkIdentity_m(MediaStreamTrack* aTrack,
+PeerConnectionMedia::UpdateSinkIdentity_m(const MediaStreamTrack* aTrack,
                                           nsIPrincipal* aPrincipal,
                                           const PeerIdentity* aSinkIdentity)
 {
   ASSERT_ON_THREAD(mMainThread);
 
   for (RefPtr<TransceiverImpl>& transceiver : mTransceivers) {
     transceiver->UpdateSinkIdentity(aTrack, aPrincipal, aSinkIdentity);
   }
--- a/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h
@@ -135,17 +135,17 @@ class PeerConnectionMedia : public sigsl
   nsresult AddRIDFilter(dom::MediaStreamTrack& aRecvTrack,
                         const nsAString& aRid);
 
   // In cases where the peer isn't yet identified, we disable the pipeline (not
   // the stream, that would potentially affect others), so that it sends
   // black/silence.  Once the peer is identified, re-enable those streams.
   // aTrack will be set if this update came from a principal change on aTrack.
   // TODO: Move to PeerConnectionImpl
-  void UpdateSinkIdentity_m(dom::MediaStreamTrack* aTrack,
+  void UpdateSinkIdentity_m(const dom::MediaStreamTrack* aTrack,
                             nsIPrincipal* aPrincipal,
                             const PeerIdentity* aSinkIdentity);
   // this determines if any track is peerIdentity constrained
   bool AnyLocalTrackHasPeerIdentity() const;
   // When we finally learn who is on the other end, we need to change the ownership
   // on streams
   void UpdateRemoteStreamPrincipals_m(nsIPrincipal* aPrincipal);
 
--- a/media/webrtc/signaling/src/peerconnection/TransceiverImpl.cpp
+++ b/media/webrtc/signaling/src/peerconnection/TransceiverImpl.cpp
@@ -115,17 +115,17 @@ TransceiverImpl::InitVideo()
       mPCHandle,
       mMainThread.get(),
       mStsThread.get(),
       static_cast<VideoSessionConduit*>(mConduit.get()),
       mReceiveTrack);
 }
 
 nsresult
-TransceiverImpl::UpdateSinkIdentity(dom::MediaStreamTrack* aTrack,
+TransceiverImpl::UpdateSinkIdentity(const dom::MediaStreamTrack* aTrack,
                                     nsIPrincipal* aPrincipal,
                                     const PeerIdentity* aSinkIdentity)
 {
   if (mJsepTransceiver->IsStopped()) {
     return NS_OK;
   }
 
   mTransmitPipeline->UpdateSinkIdentity_m(aTrack, aPrincipal, aSinkIdentity);
--- a/media/webrtc/signaling/src/peerconnection/TransceiverImpl.h
+++ b/media/webrtc/signaling/src/peerconnection/TransceiverImpl.h
@@ -60,17 +60,17 @@ public:
 
   bool IsValid() const
   {
     return !!mConduit;
   }
 
   nsresult UpdateSendTrack(dom::MediaStreamTrack* aSendTrack);
 
-  nsresult UpdateSinkIdentity(dom::MediaStreamTrack* aTrack,
+  nsresult UpdateSinkIdentity(const dom::MediaStreamTrack* aTrack,
                               nsIPrincipal* aPrincipal,
                               const PeerIdentity* aSinkIdentity);
 
   nsresult UpdateTransport(PeerConnectionMedia& aTransportManager);
 
   nsresult UpdateConduit();
 
   nsresult UpdatePrincipal(nsIPrincipal* aPrincipal);
--- a/mfbt/LinkedList.h
+++ b/mfbt/LinkedList.h
@@ -546,17 +546,17 @@ public:
    * Measures the memory consumption of the list excluding |this|.  Note that
    * it only measures the list elements themselves.  If the list elements
    * contain pointers to other memory blocks, those blocks must be measured
    * separately during a subsequent iteration over the list.
    */
   size_t sizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
   {
     size_t n = 0;
-    for (const T* t = getFirst(); t; t = t->getNext()) {
+    for (ConstRawType t = getFirst(); t; t = t->getNext()) {
       n += aMallocSizeOf(t);
     }
     return n;
   }
 
   /*
    * Like sizeOfExcludingThis(), but measures |this| as well.
    */
--- a/mfbt/Span.h
+++ b/mfbt/Span.h
@@ -31,21 +31,29 @@
 #include <algorithm>
 #include <array>
 #include <cstring>
 #include <iterator>
 
 // Classifications for reasons why constexpr was removed in C++14 to C++11
 // conversion. Once we upgrade compilers, we can try defining each of these
 // to constexpr to restore a category of constexprs at a time.
+#if !defined(__clang__) && defined(__GNUC__) && __cpp_constexpr < 201304
 #define MOZ_SPAN_ASSERTION_CONSTEXPR
 #define MOZ_SPAN_GCC_CONSTEXPR
 #define MOZ_SPAN_EXPLICITLY_DEFAULTED_CONSTEXPR
 #define MOZ_SPAN_CONSTEXPR_NOT_JUST_RETURN
 #define MOZ_SPAN_NON_CONST_CONSTEXPR
+#else
+#define MOZ_SPAN_ASSERTION_CONSTEXPR constexpr
+#define MOZ_SPAN_GCC_CONSTEXPR constexpr
+#define MOZ_SPAN_EXPLICITLY_DEFAULTED_CONSTEXPR constexpr
+#define MOZ_SPAN_CONSTEXPR_NOT_JUST_RETURN constexpr
+#define MOZ_SPAN_NON_CONST_CONSTEXPR constexpr
+#endif
 
 #ifdef _MSC_VER
 #pragma warning(push)
 
 // turn off some warnings that are noisy about our MOZ_RELEASE_ASSERT statements
 #pragma warning(disable : 4127) // conditional expression is constant
 
 // blanket turn off warnings from CppCoreCheck for now
@@ -186,44 +194,44 @@ public:
   operator=(const span_iterator<Span, IsConst>&) = default;
 
   MOZ_SPAN_GCC_CONSTEXPR reference operator*() const
   {
     MOZ_RELEASE_ASSERT(span_);
     return (*span_)[index_];
   }
 
-  MOZ_SPAN_GCC_CONSTEXPR pointer operator->() const
+  constexpr pointer operator->() const
   {
     MOZ_RELEASE_ASSERT(span_);
     return &((*span_)[index_]);
   }
 
   MOZ_SPAN_NON_CONST_CONSTEXPR span_iterator& operator++()
   {
     MOZ_RELEASE_ASSERT(span_ && index_ >= 0 && index_ < span_->Length());
     ++index_;
     return *this;
   }
 
-  MOZ_SPAN_NON_CONST_CONSTEXPR span_iterator operator++(int)
+  constexpr span_iterator operator++(int)
   {
     auto ret = *this;
     ++(*this);
     return ret;
   }
 
   MOZ_SPAN_NON_CONST_CONSTEXPR span_iterator& operator--()
   {
     MOZ_RELEASE_ASSERT(span_ && index_ > 0 && index_ <= span_->Length());
     --index_;
     return *this;
   }
 
-  MOZ_SPAN_NON_CONST_CONSTEXPR span_iterator operator--(int)
+  constexpr span_iterator operator--(int)
   {
     auto ret = *this;
     --(*this);
     return ret;
   }
 
   MOZ_SPAN_CONSTEXPR_NOT_JUST_RETURN span_iterator
   operator+(difference_type n) const
@@ -235,24 +243,24 @@ public:
   MOZ_SPAN_GCC_CONSTEXPR span_iterator& operator+=(difference_type n)
   {
     MOZ_RELEASE_ASSERT(span_ && (index_ + n) >= 0 &&
                        (index_ + n) <= span_->Length());
     index_ += n;
     return *this;
   }
 
-  MOZ_SPAN_CONSTEXPR_NOT_JUST_RETURN span_iterator
+  constexpr span_iterator
   operator-(difference_type n) const
   {
     auto ret = *this;
     return ret -= n;
   }
 
-  MOZ_SPAN_NON_CONST_CONSTEXPR span_iterator& operator-=(difference_type n)
+  constexpr span_iterator& operator-=(difference_type n)
 
   {
     return *this += -n;
   }
 
   MOZ_SPAN_GCC_CONSTEXPR difference_type
   operator-(const span_iterator& rhs) const
   {
@@ -279,29 +287,29 @@ public:
 
   MOZ_SPAN_GCC_CONSTEXPR friend bool operator<(const span_iterator& lhs,
                                                const span_iterator& rhs)
   {
     MOZ_RELEASE_ASSERT(lhs.span_ == rhs.span_);
     return lhs.index_ < rhs.index_;
   }
 
-  MOZ_SPAN_GCC_CONSTEXPR friend bool operator<=(const span_iterator& lhs,
+  constexpr friend bool operator<=(const span_iterator& lhs,
                                                 const span_iterator& rhs)
   {
     return !(rhs < lhs);
   }
 
-  MOZ_SPAN_GCC_CONSTEXPR friend bool operator>(const span_iterator& lhs,
+  constexpr friend bool operator>(const span_iterator& lhs,
                                                const span_iterator& rhs)
   {
     return rhs < lhs;
   }
 
-  MOZ_SPAN_GCC_CONSTEXPR friend bool operator>=(const span_iterator& lhs,
+  constexpr friend bool operator>=(const span_iterator& lhs,
                                                 const span_iterator& rhs)
   {
     return !(rhs > lhs);
   }
 
   void swap(span_iterator& rhs)
   {
     std::swap(index_, rhs.index_);
@@ -643,105 +651,105 @@ public:
   MOZ_SPAN_EXPLICITLY_DEFAULTED_CONSTEXPR Span& operator=(Span&& other)
     = default;
 
   // [Span.sub], Span subviews
   /**
    * Subspan with first N elements with compile-time N.
    */
   template<size_t Count>
-  MOZ_SPAN_GCC_CONSTEXPR Span<element_type, Count> First() const
+  constexpr Span<element_type, Count> First() const
   {
     MOZ_RELEASE_ASSERT(Count <= size());
     return { data(), Count };
   }
 
   /**
    * Subspan with last N elements with compile-time N.
    */
   template<size_t Count>
-  MOZ_SPAN_GCC_CONSTEXPR Span<element_type, Count> Last() const
+  constexpr Span<element_type, Count> Last() const
   {
     const size_t len = size();
     MOZ_RELEASE_ASSERT(Count <= len);
     return { data() + (len - Count), Count };
   }
 
   /**
    * Subspan with compile-time start index and length.
    */
   template<size_t Offset, size_t Count = dynamic_extent>
-  MOZ_SPAN_GCC_CONSTEXPR Span<element_type, Count> Subspan() const
+  constexpr Span<element_type, Count> Subspan() const
   {
     const size_t len = size();
     MOZ_RELEASE_ASSERT(Offset <= len &&
       (Count == dynamic_extent || (Offset + Count <= len)));
     return { data() + Offset,
              Count == dynamic_extent ? len - Offset : Count };
   }
 
   /**
    * Subspan with first N elements with run-time N.
    */
-  MOZ_SPAN_GCC_CONSTEXPR Span<element_type, dynamic_extent> First(
+  constexpr Span<element_type, dynamic_extent> First(
     index_type aCount) const
   {
     MOZ_RELEASE_ASSERT(aCount <= size());
     return { data(), aCount };
   }
 
   /**
    * Subspan with last N elements with run-time N.
    */
-  MOZ_SPAN_GCC_CONSTEXPR Span<element_type, dynamic_extent> Last(
+  constexpr Span<element_type, dynamic_extent> Last(
     index_type aCount) const
   {
     const size_t len = size();
     MOZ_RELEASE_ASSERT(aCount <= len);
     return { data() + (len - aCount), aCount };
   }
 
   /**
    * Subspan with run-time start index and length.
    */
-  MOZ_SPAN_GCC_CONSTEXPR Span<element_type, dynamic_extent> Subspan(
+  constexpr Span<element_type, dynamic_extent> Subspan(
     index_type aStart,
     index_type aLength = dynamic_extent) const
   {
     const size_t len = size();
     MOZ_RELEASE_ASSERT(aStart <= len &&
                        (aLength == dynamic_extent ||
                         (aStart + aLength <= len)));
     return { data() + aStart,
              aLength == dynamic_extent ? len - aStart : aLength };
   }
 
   /**
    * Subspan with run-time start index. (Rust's &foo[start..])
    */
-  MOZ_SPAN_GCC_CONSTEXPR Span<element_type, dynamic_extent> From(
+  constexpr Span<element_type, dynamic_extent> From(
     index_type aStart) const
   {
     return Subspan(aStart);
   }
 
   /**
    * Subspan with run-time exclusive end index. (Rust's &foo[..end])
    */
-  MOZ_SPAN_GCC_CONSTEXPR Span<element_type, dynamic_extent> To(
+  constexpr Span<element_type, dynamic_extent> To(
     index_type aEnd) const
   {
     return Subspan(0, aEnd);
   }
 
   /**
    * Subspan with run-time start index and exclusive end index.
    * (Rust's &foo[start..end])
    */
-  MOZ_SPAN_GCC_CONSTEXPR Span<element_type, dynamic_extent> FromTo(
+  constexpr Span<element_type, dynamic_extent> FromTo(
     index_type aStart,
     index_type aEnd) const
   {
     MOZ_RELEASE_ASSERT(aStart <= aEnd);
     return Subspan(aStart, aEnd - aStart);
   }
 
   // [Span.obs], Span observers
@@ -775,17 +783,17 @@ public:
 
   /**
    * Checks if the the length of the span is zero (standard-libray duck
    * typing version).
    */
   constexpr bool empty() const { return size() == 0; }
 
   // [Span.elem], Span element access
-  MOZ_SPAN_GCC_CONSTEXPR reference operator[](index_type idx) const
+  constexpr reference operator[](index_type idx) const
   {
     MOZ_RELEASE_ASSERT(idx < storage_.size());
     return data()[idx];
   }
 
   /**
    * Access element of span by index (standard-library duck typing version).
    */
--- a/modules/libpref/init/all.js
+++ b/modules/libpref/init/all.js
@@ -3019,19 +3019,16 @@ pref("layout.css.osx-font-smoothing.enab
 pref("layout.css.unset-value.enabled", true);
 
 // Is support for the "all" shorthand enabled?
 pref("layout.css.all-shorthand.enabled", true);
 
 // Is support for CSS overflow-clip-box enabled for non-UA sheets?
 pref("layout.css.overflow-clip-box.enabled", false);
 
-// Is support for CSS grid enabled?
-pref("layout.css.grid.enabled", true);
-
 // Is support for CSS "grid-template-{columns,rows}: subgrid X" enabled?
 pref("layout.css.grid-template-subgrid-value.enabled", false);
 
 // Is support for CSS contain enabled?
 pref("layout.css.contain.enabled", false);
 
 // Is support for CSS box-decoration-break enabled?
 pref("layout.css.box-decoration-break.enabled", true);
--- a/netwerk/dns/nsDNSService2.cpp
+++ b/netwerk/dns/nsDNSService2.cpp
@@ -290,18 +290,16 @@ nsDNSRecord::ReportUnusable(uint16_t aPo
     return NS_OK;
 }
 
 //-----------------------------------------------------------------------------
 
 class nsDNSAsyncRequest final : public nsResolveHostCallback
                               , public nsICancelable
 {
-    ~nsDNSAsyncRequest() = default;
-
 public:
     NS_DECL_THREADSAFE_ISUPPORTS
     NS_DECL_NSICANCELABLE
 
     nsDNSAsyncRequest(nsHostResolver   *res,
                       const nsACString &host,
                       const OriginAttributes &attrs,
                       nsIDNSListener   *listener,
@@ -326,38 +324,38 @@ public:
 
     RefPtr<nsHostResolver> mResolver;
     nsCString                mHost; // hostname we're resolving
     const OriginAttributes   mOriginAttributes; // The originAttributes for this resolving
     nsCOMPtr<nsIDNSListener> mListener;
     uint16_t                 mFlags;
     uint16_t                 mAF;
     nsCString                mNetworkInterface;
+private:
+    virtual ~nsDNSAsyncRequest() = default;
 };
 
+NS_IMPL_ISUPPORTS(nsDNSAsyncRequest, nsICancelable)
+
 void
 nsDNSAsyncRequest::OnResolveHostComplete(nsHostResolver *resolver,
                                          nsHostRecord   *hostRecord,
                                          nsresult        status)
 {
     // need to have an owning ref when we issue the callback to enable
     // the caller to be able to addref/release multiple times without
     // destroying the record prematurely.
     nsCOMPtr<nsIDNSRecord> rec;
     if (NS_SUCCEEDED(status)) {
         NS_ASSERTION(hostRecord, "no host record");
         rec = new nsDNSRecord(hostRecord);
     }
 
     mListener->OnLookupComplete(this, rec, status);
     mListener = nullptr;
-
-    // release the reference to ourselves that was added before we were
-    // handed off to the host resolver.
-    NS_RELEASE_THIS();
 }
 
 bool
 nsDNSAsyncRequest::EqualsAsyncListener(nsIDNSListener *aListener)
 {
     nsCOMPtr<nsIDNSListenerProxy> wrapper = do_QueryInterface(mListener);
     if (wrapper) {
         nsCOMPtr<nsIDNSListener> originalListener;
@@ -375,50 +373,53 @@ nsDNSAsyncRequest::SizeOfIncludingThis(M
     // The following fields aren't measured.
     // - mHost, because it's a non-owning pointer
     // - mResolver, because it's a non-owning pointer
     // - mListener, because it's a non-owning pointer
 
     return n;
 }
 
-NS_IMPL_ISUPPORTS(nsDNSAsyncRequest, nsICancelable)
-
 NS_IMETHODIMP
 nsDNSAsyncRequest::Cancel(nsresult reason)
 {
     NS_ENSURE_ARG(NS_FAILED(reason));
     mResolver->DetachCallback(mHost.get(), mOriginAttributes, mFlags, mAF,
                               mNetworkInterface.get(), this, reason);
     return NS_OK;
 }
 
 //-----------------------------------------------------------------------------
 
-class nsDNSSyncRequest : public nsResolveHostCallback
+class nsDNSSyncRequest
+    : public nsResolveHostCallback
 {
+    NS_DECL_THREADSAFE_ISUPPORTS
 public:
     explicit nsDNSSyncRequest(PRMonitor *mon)
         : mDone(false)
         , mStatus(NS_OK)
         , mMonitor(mon) {}
-    virtual ~nsDNSSyncRequest() = default;
 
     void OnResolveHostComplete(nsHostResolver *, nsHostRecord *, nsresult) override;
     bool EqualsAsyncListener(nsIDNSListener *aListener) override;
     size_t SizeOfIncludingThis(mozilla::MallocSizeOf) const override;
 
     bool                   mDone;
     nsresult               mStatus;
     RefPtr<nsHostRecord> mHostRecord;
 
 private:
+    virtual ~nsDNSSyncRequest() = default;
+
     PRMonitor             *mMonitor;
 };
 
+NS_IMPL_ISUPPORTS0(nsDNSSyncRequest)
+
 void
 nsDNSSyncRequest::OnResolveHostComplete(nsHostResolver *resolver,
                                         nsHostRecord   *hostRecord,
                                         nsresult        status)
 {
     // store results, and wake up nsDNSService::Resolve to process results.
     PR_EnterMonitor(mMonitor);
     mDone = true;
@@ -793,26 +794,27 @@ nsDNSService::AsyncResolveExtended(const
                                       listener, target_, attrs,
                                       result);
 }
 
 NS_IMETHODIMP
 nsDNSService::AsyncResolveExtendedNative(const nsACString        &aHostname,
                                          uint32_t                 flags,
                                          const nsACString        &aNetworkInterface,
-                                         nsIDNSListener          *listener,
+                                         nsIDNSListener          *aListener,
                                          nsIEventTarget          *target_,
                                          const OriginAttributes  &aOriginAttributes,
                                          nsICancelable          **result)
 {
     // grab reference to global host resolver and IDN service.  beware
     // simultaneous shutdown!!
     RefPtr<nsHostResolver> res;
     nsCOMPtr<nsIIDNService> idn;
     nsCOMPtr<nsIEventTarget> target = target_;
+    nsCOMPtr<nsIDNSListener> listener = aListener;
     bool localDomain = false;
     {
         MutexAutoLock lock(mLock);
 
         if (mDisablePrefetch && (flags & RESOLVE_SPECULATE))
             return NS_ERROR_DNS_LOOKUP_QUEUE_FULL;
 
         res = mResolver;
@@ -845,31 +847,26 @@ nsDNSService::AsyncResolveExtendedNative
     }
 
     if (target) {
         listener = new DNSListenerProxy(listener, target);
     }
 
     uint16_t af = GetAFForLookup(hostname, flags);
 
-    auto *req =
+    MOZ_ASSERT(listener);
+    RefPtr<nsDNSAsyncRequest> req =
         new nsDNSAsyncRequest(res, hostname, aOriginAttributes, listener, flags, af,
                               aNetworkInterface);
     if (!req)
         return NS_ERROR_OUT_OF_MEMORY;
-    NS_ADDREF(*result = req);
 
-    // addref for resolver; will be released when OnResolveHostComplete is called.
-    NS_ADDREF(req);
     rv = res->ResolveHost(req->mHost.get(), req->mOriginAttributes, flags, af,
                           req->mNetworkInterface.get(), req);
-    if (NS_FAILED(rv)) {
-        NS_RELEASE(req);
-        NS_RELEASE(*result);
-    }
+    req.forget(result);
     return rv;
 }
 
 NS_IMETHODIMP
 nsDNSService::CancelAsyncResolve(const nsACString &aHostname,
                                  uint32_t          aFlags,
                                  nsIDNSListener   *aListener,
                                  nsresult          aReason,
@@ -1050,35 +1047,33 @@ nsDNSService::ResolveInternal(const nsAC
     // we need to use a monitor! ;-)
     //
 
     PRMonitor *mon = PR_NewMonitor();
     if (!mon)
         return NS_ERROR_OUT_OF_MEMORY;
 
     PR_EnterMonitor(mon);
-    nsDNSSyncRequest syncReq(mon);
+    RefPtr<nsDNSSyncRequest> syncReq = new nsDNSSyncRequest(mon);
 
     uint16_t af = GetAFForLookup(hostname, flags);
 
-    rv = res->ResolveHost(hostname.get(), aOriginAttributes, flags, af, "", &syncReq);
+    rv = res->ResolveHost(hostname.get(), aOriginAttributes, flags, af, "", syncReq);
     if (NS_SUCCEEDED(rv)) {
         // wait for result
-        while (!syncReq.mDone)
+        while (!syncReq->mDone) {
             PR_Wait(mon, PR_INTERVAL_NO_TIMEOUT);
+        }
 
-        if (NS_FAILED(syncReq.mStatus))
-            rv = syncReq.mStatus;
-        else {
-            NS_ASSERTION(syncReq.mHostRecord, "no host record");
-            auto *rec = new nsDNSRecord(syncReq.mHostRecord);
-            if (!rec)
-                rv = NS_ERROR_OUT_OF_MEMORY;
-            else
-                NS_ADDREF(*result = rec);
+        if (NS_FAILED(syncReq->mStatus)) {
+            rv = syncReq->mStatus;
+        } else {
+            NS_ASSERTION(syncReq->mHostRecord, "no host record");
+            RefPtr<nsDNSRecord> rec = new nsDNSRecord(syncReq->mHostRecord);
+            rec.forget(result);
         }
     }
 
     PR_ExitMonitor(mon);
     PR_DestroyMonitor(mon);
     return rv;
 }
 
--- a/netwerk/dns/nsHostResolver.cpp
+++ b/netwerk/dns/nsHostResolver.cpp
@@ -184,17 +184,16 @@ nsHostRecord::nsHostRecord(const nsHostK
     af = key->af;
     netInterface = host + strlen(key->host) + 1;
     memcpy((char *) netInterface, key->netInterface,
            strlen(key->netInterface) + 1);
     originSuffix = netInterface + strlen(key->netInterface) + 1;
     memcpy((char *) originSuffix, key->originSuffix,
            strlen(key->originSuffix) + 1);
     PR_INIT_CLIST(this);
-    PR_INIT_CLIST(&callbacks);
 }
 
 nsresult
 nsHostRecord::Create(const nsHostKey *key, nsHostRecord **result)
 {
     size_t hostLen = strlen(key->host) + 1;
     size_t netInterfaceLen = strlen(key->netInterface) + 1;
     size_t originSuffixLen = strlen(key->originSuffix) + 1;
@@ -225,16 +224,18 @@ nsHostRecord::CopyExpirationTimesAndFlag
     mValidStart = aFromHostRecord->mValidStart;
     mValidEnd = aFromHostRecord->mValidEnd;
     mGraceStart = aFromHostRecord->mGraceStart;
     mDoomed = aFromHostRecord->mDoomed;
 }
 
 nsHostRecord::~nsHostRecord()
 {
+    mCallbacks.clear();
+
     Telemetry::Accumulate(Telemetry::DNS_BLACKLIST_COUNT, mBlacklistedCount);
     delete addr_info;
 }
 
 bool
 nsHostRecord::Blacklisted(NetAddr *aQuery)
 {
     // must call locked
@@ -321,41 +322,39 @@ nsHostRecord::HasUsableResult(const mozi
     if (CheckExpiration(now) == EXP_EXPIRED) {
         return false;
     }
 
     return addr_info || addr || negative;
 }
 
 static size_t
-SizeOfResolveHostCallbackListExcludingHead(const PRCList *head,
+SizeOfResolveHostCallbackListExcludingHead(const mozilla::LinkedList<RefPtr<nsResolveHostCallback>>& aCallbacks,
                                            MallocSizeOf mallocSizeOf)
 {
-    size_t n = 0;
-    PRCList *curr = head->next;
-    while (curr != head) {
-        nsResolveHostCallback *callback =
-            static_cast<nsResolveHostCallback*>(curr);
-        n += callback->SizeOfIncludingThis(mallocSizeOf);
-        curr = curr->next;
+    size_t n = aCallbacks.sizeOfIncludingThis(mallocSizeOf);
+
+    for (const nsResolveHostCallback* t = aCallbacks.getFirst(); t; t = t->getNext()) {
+      n += t->SizeOfIncludingThis(mallocSizeOf);
     }
+
     return n;
 }
 
 size_t
 nsHostRecord::SizeOfIncludingThis(MallocSizeOf mallocSizeOf) const
 {
     size_t n = mallocSizeOf(this);
 
     // The |host| field (inherited from nsHostKey) actually points to extra
     // memory that is allocated beyond the end of the nsHostRecord (see
     // nsHostRecord::Create()).  So it will be included in the
     // |mallocSizeOf(this)| call above.
 
-    n += SizeOfResolveHostCallbackListExcludingHead(&callbacks, mallocSizeOf);
+    n += SizeOfResolveHostCallbackListExcludingHead(mCallbacks, mallocSizeOf);
     n += addr_info ? addr_info->SizeOfIncludingThis(mallocSizeOf) : 0;
     n += mallocSizeOf(addr.get());
 
     n += mBlacklistedItems.ShallowSizeOfExcludingThis(mallocSizeOf);
     for (size_t i = 0; i < mBlacklistedItems.Length(); i++) {
         n += mBlacklistedItems[i].SizeOfExcludingThisIfUnshared(mallocSizeOf);
     }
     return n;
@@ -728,29 +727,30 @@ nsHostResolver::MoveQueue(nsHostRecord *
 }
 
 nsresult
 nsHostResolver::ResolveHost(const char             *host,
                             const OriginAttributes &aOriginAttributes,
                             uint16_t                flags,
                             uint16_t                af,
                             const char             *netInterface,
-                            nsResolveHostCallback  *callback)
+                            nsResolveHostCallback  *aCallback)
 {
     NS_ENSURE_TRUE(host && *host, NS_ERROR_UNEXPECTED);
     NS_ENSURE_TRUE(netInterface, NS_ERROR_UNEXPECTED);
 
     LOG(("Resolving host [%s%s%s]%s.\n", LOG_HOST(host, netInterface),
          flags & RES_BYPASS_CACHE ? " - bypassing cache" : ""));
 
     // ensure that we are working with a valid hostname before proceeding.  see
     // bug 304904 for details.
     if (!net_IsValidHostName(nsDependentCString(host)))
         return NS_ERROR_UNKNOWN_HOST;
 
+    RefPtr<nsResolveHostCallback> callback(aCallback);
     // if result is set inside the lock, then we need to issue the
     // callback before returning.
     RefPtr<nsHostRecord> result;
     nsresult status = NS_OK, rv = NS_OK;
     {
         MutexAutoLock lock(mLock);
 
         if (mShutdown)
@@ -926,36 +926,36 @@ nsHostResolver::ResolveHost(const char  
                 }
                 // If no valid address was found in the cache or this is an
                 // AF_UNSPEC request, then start a new lookup.
                 if (!result) {
                     LOG(("  No usable address in cache for host [%s%s%s].",
                          LOG_HOST(host, netInterface)));
 
                     // Add callback to the list of pending callbacks.
-                    PR_APPEND_LINK(callback, &he->rec->callbacks);
+                    he->rec->mCallbacks.insertBack(callback);
                     he->rec->flags = flags;
                     rv = IssueLookup(he->rec);
                     Telemetry::Accumulate(Telemetry::DNS_LOOKUP_METHOD2,
                                           METHOD_NETWORK_FIRST);
-                    if (NS_FAILED(rv)) {
-                        PR_REMOVE_AND_INIT_LINK(callback);
+                    if (NS_FAILED(rv) && callback->isInList()) {
+                        callback->remove();
                     }
                     else {
                         LOG(("  DNS lookup for host [%s%s%s] blocking "
                              "pending 'getaddrinfo' query: callback [%p]",
-                             LOG_HOST(host, netInterface), callback));
+                             LOG_HOST(host, netInterface), callback.get()));
                     }
                 }
             }
             else {
                 LOG(("  Host [%s%s%s] is being resolved. Appending callback "
-                     "[%p].", LOG_HOST(host, netInterface), callback));
+                     "[%p].", LOG_HOST(host, netInterface), callback.get()));
 
-                PR_APPEND_LINK(callback, &he->rec->callbacks);
+                he->rec->mCallbacks.insertBack(callback);
                 if (he->rec->onQueue) {
                     Telemetry::Accumulate(Telemetry::DNS_LOOKUP_METHOD2,
                                           METHOD_NETWORK_SHARED);
 
                     // Consider the case where we are on a pending queue of
                     // lower priority than the request is being made at.
                     // In that case we should upgrade to the higher queue.
 
@@ -971,60 +971,66 @@ nsHostResolver::ResolveHost(const char  
                         MoveQueue(he->rec, mMediumQ);
                         he->rec->flags = flags;
                         mIdleThreadCV.Notify();
                     }
                 }
             }
         }
     }
+
     if (result) {
+        if (callback->isInList()) {
+            callback->remove();
+        }
         callback->OnResolveHostComplete(this, result, status);
     }
 
     return rv;
 }
 
 void
 nsHostResolver::DetachCallback(const char             *host,
                                const OriginAttributes &aOriginAttributes,
                                uint16_t                flags,
                                uint16_t                af,
                                const char             *netInterface,
-                               nsResolveHostCallback  *callback,
+                               nsResolveHostCallback  *aCallback,
                                nsresult                status)
 {
     RefPtr<nsHostRecord> rec;
+    RefPtr<nsResolveHostCallback> callback(aCallback);
+
     {
         MutexAutoLock lock(mLock);
 
         nsAutoCString originSuffix;
         aOriginAttributes.CreateSuffix(originSuffix);
 
         nsHostKey key = { host, flags, af, netInterface, originSuffix.get() };
         auto he = static_cast<nsHostDBEnt*>(mDB.Search(&key));
         if (he) {
             // walk list looking for |callback|... we cannot assume
             // that it will be there!
-            PRCList *node = he->rec->callbacks.next;
-            while (node != &he->rec->callbacks) {
-                if (static_cast<nsResolveHostCallback *>(node) == callback) {
-                    PR_REMOVE_LINK(callback);
+
+            for (nsResolveHostCallback* c: he->rec->mCallbacks) {
+                if (c == callback) {
                     rec = he->rec;
+                    c->remove();
                     break;
                 }
-                node = node->next;
             }
         }
     }
 
     // complete callback with the given status code; this would only be done if
     // the record was in the process of being resolved.
-    if (rec)
+    if (rec) {
         callback->OnResolveHostComplete(this, rec, status);
+    }
 }
 
 nsresult
 nsHostResolver::ConditionallyCreateThread(nsHostRecord *rec)
 {
     if (mNumIdleThreads) {
         // wake up idle thread to process this lookup
         mIdleThreadCV.Notify();
@@ -1288,30 +1294,30 @@ different_rrset(AddrInfo *rrset1, AddrIn
 // CompleteLookup() checks if the resolving should be redone and if so it
 // returns LOOKUP_RESOLVEAGAIN, but only if 'status' is not NS_ERROR_ABORT.
 // takes ownership of AddrInfo parameter
 nsHostResolver::LookupStatus
 nsHostResolver::CompleteLookup(nsHostRecord* rec, nsresult status, AddrInfo* newRRSet)
 {
     // get the list of pending callbacks for this lookup, and notify
     // them that the lookup is complete.
-    PRCList cbs;
-    PR_INIT_CLIST(&cbs);
+    mozilla::LinkedList<RefPtr<nsResolveHostCallback>> cbs;
+
     {
         MutexAutoLock lock(mLock);
 
         if (rec->mResolveAgain && (status != NS_ERROR_ABORT)) {
             LOG(("nsHostResolver record %p resolve again due to flushcache\n", rec));
             rec->mResolveAgain = false;
             delete newRRSet;
             return LOOKUP_RESOLVEAGAIN;
         }
 
         // grab list of callbacks to notify
-        MoveCList(rec->callbacks, cbs);
+        cbs = mozilla::Move(rec->mCallbacks);
 
         // update record fields.  We might have a rec->addr_info already if a
         // previous lookup result expired and we're reresolving it..
         AddrInfo  *old_addr_info;
         {
             MutexAutoLock lock(rec->addr_info_lock);
             if (different_rrset(rec->addr_info, newRRSet)) {
                 LOG(("nsHostResolver record %p new gencnt\n", rec));
@@ -1369,25 +1375,18 @@ nsHostResolver::CompleteLookup(nsHostRec
                 NS_WARNING_ASSERTION(
                     NS_SUCCEEDED(rv),
                     "Could not issue second async lookup for TTL.");
             }
 #endif
         }
     }
 
-    if (!PR_CLIST_IS_EMPTY(&cbs)) {
-        PRCList *node = cbs.next;
-        while (node != &cbs) {
-            nsResolveHostCallback *callback =
-                    static_cast<nsResolveHostCallback *>(node);
-            node = node->next;
-            callback->OnResolveHostComplete(this, rec, status);
-            // NOTE: callback must not be dereferenced after this point!!
-        }
+    for (nsResolveHostCallback* c = cbs.getFirst(); c; c = c->removeAndGetNext()) {
+        c->OnResolveHostComplete(this, rec, status);
     }
 
     NS_RELEASE(rec);
 
     return LOOKUP_OK;
 }
 
 void
@@ -1405,34 +1404,28 @@ nsHostResolver::CancelAsyncRequest(const
     nsAutoCString originSuffix;
     aOriginAttributes.CreateSuffix(originSuffix);
 
     // Lookup the host record associated with host, flags & address family
     nsHostKey key = { host, flags, af, netInterface, originSuffix.get() };
     auto he = static_cast<nsHostDBEnt*>(mDB.Search(&key));
     if (he) {
         nsHostRecord* recPtr = nullptr;
-        PRCList *node = he->rec->callbacks.next;
-        // Remove the first nsDNSAsyncRequest callback which matches the
-        // supplied listener object
-        while (node != &he->rec->callbacks) {
-            nsResolveHostCallback *callback
-                = static_cast<nsResolveHostCallback *>(node);
-            if (callback && (callback->EqualsAsyncListener(aListener))) {
-                // Remove from the list of callbacks
-                PR_REMOVE_LINK(callback);
+
+        for (RefPtr<nsResolveHostCallback> c : he->rec->mCallbacks) {
+            if (c->EqualsAsyncListener(aListener)) {
+                c->remove();
                 recPtr = he->rec;
-                callback->OnResolveHostComplete(this, recPtr, status);
+                c->OnResolveHostComplete(this, recPtr, status);
                 break;
             }
-            node = node->next;
         }
 
         // If there are no more callbacks, remove the hash table entry
-        if (recPtr && PR_CLIST_IS_EMPTY(&recPtr->callbacks)) {
+        if (recPtr && recPtr->mCallbacks.isEmpty()) {
             mDB.Remove((nsHostKey *)recPtr);
             // If record is on a Queue, remove it and then deref it
             if (recPtr->next != recPtr) {
                 PR_REMOVE_LINK(recPtr);
                 NS_RELEASE(recPtr);
             }
         }
     }
--- a/netwerk/dns/nsHostResolver.h
+++ b/netwerk/dns/nsHostResolver.h
@@ -15,16 +15,17 @@
 #include "nsISupportsImpl.h"
 #include "nsIDNSListener.h"
 #include "nsIDNSService.h"
 #include "nsString.h"
 #include "nsTArray.h"
 #include "GetAddrInfo.h"
 #include "mozilla/net/DNS.h"
 #include "mozilla/net/DashboardTypes.h"
+#include "mozilla/LinkedList.h"
 #include "mozilla/TimeStamp.h"
 #include "mozilla/UniquePtr.h"
 
 class nsHostResolver;
 class nsHostRecord;
 class nsResolveHostCallback;
 
 #define MAX_RESOLVER_THREADS_FOR_ANY_PRIORITY  3
@@ -125,18 +126,17 @@ public:
     static DnsPriority GetPriority(uint16_t aFlags);
 
     bool RemoveOrRefresh(); // Mark records currently