Bug 1404997 - P8. Follow coding style for members and methods. r?pehrsons,r?drno draft
authorJean-Yves Avenard <jyavenard@mozilla.com>
Fri, 08 Dec 2017 21:32:38 +0100
changeset 712226 8a47c2cb3ee76070c536d2d0636dffc9a3842c5c
parent 712225 cdad4fb6c3ac13149eb3b1e3dc0ebe08fd97ca70
child 712227 d8a32c4811e957d65ae832c2f9e3a45694399ca4
push id93280
push userbmo:jyavenard@mozilla.com
push dateFri, 15 Dec 2017 20:49:10 +0000
reviewerspehrsons, drno
bugs1404997
milestone59.0a1
Bug 1404997 - P8. Follow coding style for members and methods. r?pehrsons,r?drno It allows to more easily distinguish between methods from upstream webrtc.org and our code. MozReview-Commit-ID: ILQhEAYbSmc
media/webrtc/signaling/gtest/mediapipeline_unittest.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
--- a/media/webrtc/signaling/gtest/mediapipeline_unittest.cpp
+++ b/media/webrtc/signaling/gtest/mediapipeline_unittest.cpp
@@ -296,29 +296,29 @@ class TestAgent {
 
   uint32_t GetLocalSSRC() {
     std::vector<uint32_t> res;
     res = audio_conduit_->GetLocalSSRCs();
     return res.empty() ? 0 : res[0];
   }
 
   int GetAudioRtpCountSent() {
-    return audio_pipeline_->rtp_packets_sent();
+    return audio_pipeline_->RtpPacketsSent();
   }
 
   int GetAudioRtpCountReceived() {
-    return audio_pipeline_->rtp_packets_received();
+    return audio_pipeline_->RtpPacketsReceived();
   }
 
   int GetAudioRtcpCountSent() {
-    return audio_pipeline_->rtcp_packets_sent();
+    return audio_pipeline_->RtcpPacketsSent();
   }
 
   int GetAudioRtcpCountReceived() {
-    return audio_pipeline_->rtcp_packets_received();
+    return audio_pipeline_->RtcpPacketsReceived();
   }
 
 
   void SetUsingBundle(bool use_bundle) {
     use_bundle_ = use_bundle;
   }
 
  protected:
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -239,17 +239,17 @@ public:
   {
     MutexAutoLock lock(mMutex);
     mListeners.Clear();
   }
 
 protected:
   virtual ~VideoFrameConverter() { MOZ_COUNT_DTOR(VideoFrameConverter); }
 
-  static void DeleteBuffer(uint8* data) { delete[] data; }
+  static void DeleteBuffer(uint8* aData) { delete[] aData; }
 
   // This takes ownership of the buffer and attached it to the VideoFrame we
   // send to the listeners
   void VideoFrameConverted(UniquePtr<uint8[]> aBuffer,
                            unsigned int aVideoFrameLength,
                            unsigned short aWidth,
                            unsigned short aHeight,
                            VideoType aVideoType,
@@ -567,22 +567,22 @@ public:
 
     while (mPacketizer->PacketsAvailable()) {
       mPacketizer->Output(mPacket);
       mConduit->SendAudioFrame(
         mPacket, mPacketizer->PacketSize(), rate, mPacketizer->Channels(), 0);
     }
   }
 
-  void QueueAudioChunk(TrackRate rate, const AudioChunk& chunk, bool enabled)
+  void QueueAudioChunk(TrackRate aRate, const AudioChunk& aChunk, bool aEnabled)
   {
     RefPtr<AudioProxyThread> self = this;
     nsresult rv = mTaskQueue->Dispatch(NS_NewRunnableFunction(
-      "AudioProxyThread::QueueAudioChunk", [self, rate, chunk, enabled]() {
-        self->InternalProcessAudioChunk(rate, chunk, enabled);
+      "AudioProxyThread::QueueAudioChunk", [self, aRate, aChunk, aEnabled]() {
+        self->InternalProcessAudioChunk(aRate, aChunk, aEnabled);
       }));
     MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
   }
 
 protected:
   virtual ~AudioProxyThread()
   {
     // Conduits must be released on MainThread, and we might have the last
@@ -598,289 +598,289 @@ protected:
   // Only accessed on mTaskQueue
   UniquePtr<AudioPacketizer<int16_t, int16_t>> mPacketizer;
   // A buffer to hold a single packet of audio.
   int16_t mPacket[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
 };
 
 static char kDTLSExporterLabel[] = "EXTRACTOR-dtls_srtp";
 
-MediaPipeline::MediaPipeline(const std::string& pc,
-                             Direction direction,
-                             nsCOMPtr<nsIEventTarget> main_thread,
-                             nsCOMPtr<nsIEventTarget> sts_thread,
-                             RefPtr<MediaSessionConduit> conduit)
-  : direction_(direction)
-  , level_(0)
-  , conduit_(conduit)
-  , rtp_(nullptr, RTP)
-  , rtcp_(nullptr, RTCP)
-  , main_thread_(main_thread)
-  , sts_thread_(sts_thread)
-  , rtp_packets_sent_(0)
-  , rtcp_packets_sent_(0)
-  , rtp_packets_received_(0)
-  , rtcp_packets_received_(0)
-  , rtp_bytes_sent_(0)
-  , rtp_bytes_received_(0)
-  , pc_(pc)
-  , description_()
-  , rtp_parser_(webrtc::RtpHeaderParser::Create())
+MediaPipeline::MediaPipeline(const std::string& aPc,
+                             DirectionType aDirection,
+                             nsCOMPtr<nsIEventTarget> aMainThread,
+                             nsCOMPtr<nsIEventTarget> aStsThread,
+                             RefPtr<MediaSessionConduit> aConduit)
+  : mDirection(aDirection)
+  , mLevel(0)
+  , mConduit(aConduit)
+  , mRtp(nullptr, RTP)
+  , mRtcp(nullptr, RTCP)
+  , mMainThread(aMainThread)
+  , mStsThread(aStsThread)
+  , mTransport(new PipelineTransport(this)) // PipelineTransport() will access
+                                            // this->mStsThread; moved here
+                                            // for safety
+  , mRtpPacketsSent(0)
+  , mRtcpPacketsSent(0)
+  , mRtpPacketsReceived(0)
+  , mRtcpPacketsReceived(0)
+  , mRtpBytesSent(0)
+  , mRtpBytesReceived(0)
+  , mPc(aPc)
+  , mDescription()
+  , mRtpParser(webrtc::RtpHeaderParser::Create())
+  , mPacketDumper(new PacketDumper(mPc))
 {
-  // PipelineTransport() will access this->sts_thread_; moved here for safety
-  transport_ = new PipelineTransport(this);
-  packet_dumper_ = new PacketDumper(pc_);
-
-  if (direction_ == RECEIVE) {
-    conduit_->SetReceiverTransport(transport_);
+  if (mDirection == RECEIVE) {
+    mConduit->SetReceiverTransport(mTransport);
   } else {
-    conduit_->SetTransmitterTransport(transport_);
+    mConduit->SetTransmitterTransport(mTransport);
   }
 }
 
 MediaPipeline::~MediaPipeline()
 {
-  CSFLogInfo(LOGTAG, "Destroying MediaPipeline: %s", description_.c_str());
+  CSFLogInfo(LOGTAG, "Destroying MediaPipeline: %s", mDescription.c_str());
   // MediaSessionConduit insists that it be released on main.
   RUN_ON_THREAD(
-    main_thread_, WrapRelease(conduit_.forget()), NS_DISPATCH_NORMAL);
+    mMainThread, WrapRelease(mConduit.forget()), NS_DISPATCH_NORMAL);
 }
 
 void
 MediaPipeline::Shutdown_m()
 {
-  CSFLogInfo(LOGTAG, "%s in %s", description_.c_str(), __FUNCTION__);
+  CSFLogInfo(LOGTAG, "%s in %s", mDescription.c_str(), __FUNCTION__);
 
   Stop();
   DetachMedia();
 
-  RUN_ON_THREAD(sts_thread_,
+  RUN_ON_THREAD(mStsThread,
                 WrapRunnable(RefPtr<MediaPipeline>(this),
                              &MediaPipeline::DetachTransport_s),
                 NS_DISPATCH_NORMAL);
 }
 
 void
 MediaPipeline::DetachTransport_s()
 {
-  ASSERT_ON_THREAD(sts_thread_);
+  ASSERT_ON_THREAD(mStsThread);
 
   disconnect_all();
-  transport_->Detach();
-  rtp_.Detach();
-  rtcp_.Detach();
+  mTransport->Detach();
+  mRtp.Detach();
+  mRtcp.Detach();
 
   // Make sure any cycles are broken
-  packet_dumper_ = nullptr;
+  mPacketDumper = nullptr;
 }
 
 nsresult
 MediaPipeline::AttachTransport_s()
 {
-  ASSERT_ON_THREAD(sts_thread_);
+  ASSERT_ON_THREAD(mStsThread);
   nsresult res;
-  MOZ_ASSERT(rtp_.transport_);
-  MOZ_ASSERT(rtcp_.transport_);
-  res = ConnectTransport_s(rtp_);
+  MOZ_ASSERT(mRtp.mTransport);
+  MOZ_ASSERT(mRtcp.mTransport);
+  res = ConnectTransport_s(mRtp);
   if (NS_FAILED(res)) {
     return res;
   }
 
-  if (rtcp_.transport_ != rtp_.transport_) {
-    res = ConnectTransport_s(rtcp_);
+  if (mRtcp.mTransport != mRtp.mTransport) {
+    res = ConnectTransport_s(mRtcp);
     if (NS_FAILED(res)) {
       return res;
     }
   }
 
-  transport_->Attach(this);
+  mTransport->Attach(this);
 
   return NS_OK;
 }
 
 void
-MediaPipeline::UpdateTransport_m(RefPtr<TransportFlow> rtp_transport,
-                                 RefPtr<TransportFlow> rtcp_transport,
-                                 nsAutoPtr<MediaPipelineFilter> filter)
+MediaPipeline::UpdateTransport_m(RefPtr<TransportFlow> aRtpTransport,
+                                 RefPtr<TransportFlow> aRtcpTransport,
+                                 nsAutoPtr<MediaPipelineFilter> aFilter)
 {
-  RUN_ON_THREAD(sts_thread_,
+  RUN_ON_THREAD(mStsThread,
                 WrapRunnable(RefPtr<MediaPipeline>(this),
                              &MediaPipeline::UpdateTransport_s,
-                             rtp_transport,
-                             rtcp_transport,
-                             filter),
+                             aRtpTransport,
+                             aRtcpTransport,
+                             aFilter),
                 NS_DISPATCH_NORMAL);
 }
 
 void
-MediaPipeline::UpdateTransport_s(RefPtr<TransportFlow> rtp_transport,
-                                 RefPtr<TransportFlow> rtcp_transport,
-                                 nsAutoPtr<MediaPipelineFilter> filter)
+MediaPipeline::UpdateTransport_s(RefPtr<TransportFlow> aRtpTransport,
+                                 RefPtr<TransportFlow> aRtcpTransport,
+                                 nsAutoPtr<MediaPipelineFilter> aFilter)
 {
   bool rtcp_mux = false;
-  if (!rtcp_transport) {
-    rtcp_transport = rtp_transport;
+  if (!aRtcpTransport) {
+    aRtcpTransport = aRtpTransport;
     rtcp_mux = true;
   }
 
-  if ((rtp_transport != rtp_.transport_) ||
-      (rtcp_transport != rtcp_.transport_)) {
+  if ((aRtpTransport != mRtp.mTransport) ||
+      (aRtcpTransport != mRtcp.mTransport)) {
     disconnect_all();
-    transport_->Detach();
-    rtp_.Detach();
-    rtcp_.Detach();
-    if (rtp_transport && rtcp_transport) {
-      rtp_ = TransportInfo(rtp_transport, rtcp_mux ? MUX : RTP);
-      rtcp_ = TransportInfo(rtcp_transport, rtcp_mux ? MUX : RTCP);
+    mTransport->Detach();
+    mRtp.Detach();
+    mRtcp.Detach();
+    if (aRtpTransport && aRtcpTransport) {
+      mRtp = TransportInfo(aRtpTransport, rtcp_mux ? MUX : RTP);
+      mRtcp = TransportInfo(aRtcpTransport, rtcp_mux ? MUX : RTCP);
       AttachTransport_s();
     }
   }
 
-  if (filter_ && filter) {
+  if (mFilter && aFilter) {
     // Use the new filter, but don't forget any remote SSRCs that we've learned
     // by receiving traffic.
-    filter_->Update(*filter);
+    mFilter->Update(*aFilter);
   } else {
-    filter_ = filter;
+    mFilter = aFilter;
   }
 }
 
 void
-MediaPipeline::AddRIDExtension_m(size_t extension_id)
+MediaPipeline::AddRIDExtension_m(size_t aExtensionId)
 {
-  RUN_ON_THREAD(sts_thread_,
+  RUN_ON_THREAD(mStsThread,
                 WrapRunnable(RefPtr<MediaPipeline>(this),
                              &MediaPipeline::AddRIDExtension_s,
-                             extension_id),
+                             aExtensionId),
                 NS_DISPATCH_NORMAL);
 }
 
 void
-MediaPipeline::AddRIDExtension_s(size_t extension_id)
+MediaPipeline::AddRIDExtension_s(size_t aExtensionId)
 {
-  rtp_parser_->RegisterRtpHeaderExtension(webrtc::kRtpExtensionRtpStreamId,
-                                          extension_id);
+  mRtpParser->RegisterRtpHeaderExtension(webrtc::kRtpExtensionRtpStreamId,
+                                         aExtensionId);
 }
 
 void
-MediaPipeline::AddRIDFilter_m(const std::string& rid)
+MediaPipeline::AddRIDFilter_m(const std::string& aRid)
 {
-  RUN_ON_THREAD(sts_thread_,
+  RUN_ON_THREAD(mStsThread,
                 WrapRunnable(RefPtr<MediaPipeline>(this),
                              &MediaPipeline::AddRIDFilter_s,
-                             rid),
+                             aRid),
                 NS_DISPATCH_NORMAL);
 }
 
 void
-MediaPipeline::AddRIDFilter_s(const std::string& rid)
+MediaPipeline::AddRIDFilter_s(const std::string& aRid)
 {
-  filter_ = new MediaPipelineFilter;
-  filter_->AddRemoteRtpStreamId(rid);
+  mFilter = new MediaPipelineFilter;
+  mFilter->AddRemoteRtpStreamId(aRid);
 }
 
 void
 MediaPipeline::GetContributingSourceStats(
   const nsString& aInboundRtpStreamId,
   FallibleTArray<dom::RTCRTPContributingSourceStats>& aArr) const
 {
   // Get the expiry from now
   DOMHighResTimeStamp expiry = RtpCSRCStats::GetExpiryFromTime(GetNow());
-  for (auto info : csrc_stats_) {
+  for (auto info : mCsrcStats) {
     if (!info.second.Expired(expiry)) {
       RTCRTPContributingSourceStats stats;
       info.second.GetWebidlInstance(stats, aInboundRtpStreamId);
       aArr.AppendElement(stats, fallible);
     }
   }
 }
 
 void
-MediaPipeline::StateChange(TransportFlow* flow, TransportLayer::State state)
+MediaPipeline::StateChange(TransportFlow* aFlow, TransportLayer::State aState)
 {
-  TransportInfo* info = GetTransportInfo_s(flow);
+  TransportInfo* info = GetTransportInfo_s(aFlow);
   MOZ_ASSERT(info);
 
-  if (state == TransportLayer::TS_OPEN) {
+  if (aState == TransportLayer::TS_OPEN) {
     CSFLogInfo(LOGTAG, "Flow is ready");
     TransportReady_s(*info);
-  } else if (state == TransportLayer::TS_CLOSED ||
-             state == TransportLayer::TS_ERROR) {
+  } else if (aState == TransportLayer::TS_CLOSED ||
+             aState == TransportLayer::TS_ERROR) {
     TransportFailed_s(*info);
   }
 }
 
 static bool
-MakeRtpTypeToStringArray(const char** array)
+MakeRtpTypeToStringArray(const char** aArray)
 {
   static const char* RTP_str = "RTP";
   static const char* RTCP_str = "RTCP";
   static const char* MUX_str = "RTP/RTCP mux";
-  array[MediaPipeline::RTP] = RTP_str;
-  array[MediaPipeline::RTCP] = RTCP_str;
-  array[MediaPipeline::MUX] = MUX_str;
+  aArray[MediaPipeline::RTP] = RTP_str;
+  aArray[MediaPipeline::RTCP] = RTCP_str;
+  aArray[MediaPipeline::MUX] = MUX_str;
   return true;
 }
 
 static const char*
 ToString(MediaPipeline::RtpType type)
 {
   static const char* array[(int)MediaPipeline::MAX_RTP_TYPE] = { nullptr };
   // Dummy variable to cause init to happen only on first call
   static bool dummy = MakeRtpTypeToStringArray(array);
   (void)dummy;
   return array[type];
 }
 
 nsresult
-MediaPipeline::TransportReady_s(TransportInfo& info)
+MediaPipeline::TransportReady_s(TransportInfo& aInfo)
 {
-  MOZ_ASSERT(!description_.empty());
+  MOZ_ASSERT(!mDescription.empty());
 
   // TODO(ekr@rtfm.com): implement some kind of notification on
   // failure. bug 852665.
-  if (info.state_ != MP_CONNECTING) {
+  if (aInfo.mState != MP_CONNECTING) {
     CSFLogError(LOGTAG,
                 "Transport ready for flow in wrong state:%s :%s",
-                description_.c_str(),
-                ToString(info.type_));
+                mDescription.c_str(),
+                ToString(aInfo.mType));
     return NS_ERROR_FAILURE;
   }
 
   CSFLogInfo(LOGTAG,
              "Transport ready for pipeline %p flow %s: %s",
              this,
-             description_.c_str(),
-             ToString(info.type_));
+             mDescription.c_str(),
+             ToString(aInfo.mType));
 
   // TODO(bcampen@mozilla.com): Should we disconnect from the flow on failure?
   nsresult res;
 
   // Now instantiate the SRTP objects
   TransportLayerDtls* dtls = static_cast<TransportLayerDtls*>(
-    info.transport_->GetLayer(TransportLayerDtls::ID()));
+    aInfo.mTransport->GetLayer(TransportLayerDtls::ID()));
   MOZ_ASSERT(dtls); // DTLS is mandatory
 
   uint16_t cipher_suite;
   res = dtls->GetSrtpCipher(&cipher_suite);
   if (NS_FAILED(res)) {
     CSFLogError(LOGTAG, "Failed to negotiate DTLS-SRTP. This is an error");
-    info.state_ = MP_CLOSED;
-    UpdateRtcpMuxState(info);
+    aInfo.mState = MP_CLOSED;
+    UpdateRtcpMuxState(aInfo);
     return res;
   }
 
   // SRTP Key Exporter as per RFC 5764 S 4.2
   unsigned char srtp_block[SRTP_TOTAL_KEY_LENGTH * 2];
   res = dtls->ExportKeyingMaterial(
     kDTLSExporterLabel, false, "", srtp_block, sizeof(srtp_block));
   if (NS_FAILED(res)) {
     CSFLogError(LOGTAG, "Failed to compute DTLS-SRTP keys. This is an error");
-    info.state_ = MP_CLOSED;
-    UpdateRtcpMuxState(info);
+    aInfo.mState = MP_CLOSED;
+    UpdateRtcpMuxState(aInfo);
     MOZ_CRASH(); // TODO: Remove once we have enough field experience to
                  // know it doesn't happen. bug 798797. Note that the
                  // code after this never executes.
     return res;
   }
 
   // Slice and dice as per RFC 5764 S 4.2
   unsigned char client_write_key[SRTP_TOTAL_KEY_LENGTH];
@@ -906,36 +906,36 @@ MediaPipeline::TransportReady_s(Transpor
   if (dtls->role() == TransportLayerDtls::CLIENT) {
     write_key = client_write_key;
     read_key = server_write_key;
   } else {
     write_key = server_write_key;
     read_key = client_write_key;
   }
 
-  MOZ_ASSERT(!info.send_srtp_ && !info.recv_srtp_);
-  info.send_srtp_ =
+  MOZ_ASSERT(!aInfo.mSendSrtp && !aInfo.mRecvSrtp);
+  aInfo.mSendSrtp =
     SrtpFlow::Create(cipher_suite, false, write_key, SRTP_TOTAL_KEY_LENGTH);
-  info.recv_srtp_ =
+  aInfo.mRecvSrtp =
     SrtpFlow::Create(cipher_suite, true, read_key, SRTP_TOTAL_KEY_LENGTH);
-  if (!info.send_srtp_ || !info.recv_srtp_) {
+  if (!aInfo.mSendSrtp || !aInfo.mRecvSrtp) {
     CSFLogError(
-      LOGTAG, "Couldn't create SRTP flow for %s", ToString(info.type_));
-    info.state_ = MP_CLOSED;
-    UpdateRtcpMuxState(info);
+      LOGTAG, "Couldn't create SRTP flow for %s", ToString(aInfo.mType));
+    aInfo.mState = MP_CLOSED;
+    UpdateRtcpMuxState(aInfo);
     return NS_ERROR_FAILURE;
   }
 
-  if (direction_ == RECEIVE) {
+  if (mDirection == RECEIVE) {
     CSFLogInfo(LOGTAG,
                "Listening for %s packets received on %p",
-               ToString(info.type_),
+               ToString(aInfo.mType),
                dtls->downward());
 
-    switch (info.type_) {
+    switch (aInfo.mType) {
       case RTP:
         dtls->downward()->SignalPacketReceived.connect(
           this, &MediaPipeline::RtpPacketReceived);
         break;
       case RTCP:
         dtls->downward()->SignalPacketReceived.connect(
           this, &MediaPipeline::RtcpPacketReceived);
         break;
@@ -943,468 +943,468 @@ MediaPipeline::TransportReady_s(Transpor
         dtls->downward()->SignalPacketReceived.connect(
           this, &MediaPipeline::PacketReceived);
         break;
       default:
         MOZ_CRASH();
     }
   }
 
-  info.state_ = MP_OPEN;
-  UpdateRtcpMuxState(info);
+  aInfo.mState = MP_OPEN;
+  UpdateRtcpMuxState(aInfo);
   return NS_OK;
 }
 
 nsresult
-MediaPipeline::TransportFailed_s(TransportInfo& info)
+MediaPipeline::TransportFailed_s(TransportInfo& aInfo)
 {
-  ASSERT_ON_THREAD(sts_thread_);
+  ASSERT_ON_THREAD(mStsThread);
 
-  info.state_ = MP_CLOSED;
-  UpdateRtcpMuxState(info);
+  aInfo.mState = MP_CLOSED;
+  UpdateRtcpMuxState(aInfo);
 
-  CSFLogInfo(LOGTAG, "Transport closed for flow %s", ToString(info.type_));
+  CSFLogInfo(LOGTAG, "Transport closed for flow %s", ToString(aInfo.mType));
 
   NS_WARNING(
     "MediaPipeline Transport failed. This is not properly cleaned up yet");
 
   // TODO(ekr@rtfm.com): SECURITY: Figure out how to clean up if the
   // connection was good and now it is bad.
   // TODO(ekr@rtfm.com): Report up so that the PC knows we
   // have experienced an error.
 
   return NS_OK;
 }
 
 void
-MediaPipeline::UpdateRtcpMuxState(TransportInfo& info)
+MediaPipeline::UpdateRtcpMuxState(TransportInfo& aInfo)
 {
-  if (info.type_ == MUX) {
-    if (info.transport_ == rtcp_.transport_) {
-      rtcp_.state_ = info.state_;
-      if (!rtcp_.send_srtp_) {
-        rtcp_.send_srtp_ = info.send_srtp_;
-        rtcp_.recv_srtp_ = info.recv_srtp_;
+  if (aInfo.mType == MUX) {
+    if (aInfo.mTransport == mRtcp.mTransport) {
+      mRtcp.mState = aInfo.mState;
+      if (!mRtcp.mSendSrtp) {
+        mRtcp.mSendSrtp = aInfo.mSendSrtp;
+        mRtcp.mRecvSrtp = aInfo.mRecvSrtp;
       }
     }
   }
 }
 
 nsresult
-MediaPipeline::SendPacket(TransportFlow* flow, const void* data, int len)
+MediaPipeline::SendPacket(TransportFlow* aFlow, const void* aData, int aLen)
 {
-  ASSERT_ON_THREAD(sts_thread_);
+  ASSERT_ON_THREAD(mStsThread);
 
   // Note that we bypass the DTLS layer here
   TransportLayerDtls* dtls =
-    static_cast<TransportLayerDtls*>(flow->GetLayer(TransportLayerDtls::ID()));
+    static_cast<TransportLayerDtls*>(aFlow->GetLayer(TransportLayerDtls::ID()));
   MOZ_ASSERT(dtls);
 
   TransportResult res =
-    dtls->downward()->SendPacket(static_cast<const unsigned char*>(data), len);
+    dtls->downward()->SendPacket(static_cast<const unsigned char*>(aData), aLen);
 
-  if (res != len) {
+  if (res != aLen) {
     // Ignore blocking indications
     if (res == TE_WOULDBLOCK)
       return NS_OK;
 
-    CSFLogError(LOGTAG, "Failed write on stream %s", description_.c_str());
+    CSFLogError(LOGTAG, "Failed write on stream %s", mDescription.c_str());
     return NS_BASE_STREAM_CLOSED;
   }
 
   return NS_OK;
 }
 
 void
-MediaPipeline::increment_rtp_packets_sent(int32_t bytes)
+MediaPipeline::IncrementRtpPacketsSent(int32_t aBytes)
 {
-  ++rtp_packets_sent_;
-  rtp_bytes_sent_ += bytes;
+  ++mRtpPacketsSent;
+  mRtpBytesSent += aBytes;
 
-  if (!(rtp_packets_sent_ % 100)) {
+  if (!(mRtpPacketsSent % 100)) {
     CSFLogInfo(LOGTAG,
                "RTP sent packet count for %s Pipeline %p Flow: %p: %u (%" PRId64
                " bytes)",
-               description_.c_str(),
+               mDescription.c_str(),
                this,
-               static_cast<void*>(rtp_.transport_),
-               rtp_packets_sent_,
-               rtp_bytes_sent_);
+               static_cast<void*>(mRtp.mTransport),
+               mRtpPacketsSent,
+               mRtpBytesSent);
   }
 }
 
 void
-MediaPipeline::increment_rtcp_packets_sent()
+MediaPipeline::IncrementRtcpPacketsSent()
 {
-  ++rtcp_packets_sent_;
-  if (!(rtcp_packets_sent_ % 100)) {
+  ++mRtcpPacketsSent;
+  if (!(mRtcpPacketsSent % 100)) {
     CSFLogInfo(LOGTAG,
                "RTCP sent packet count for %s Pipeline %p Flow: %p: %u",
-               description_.c_str(),
+               mDescription.c_str(),
                this,
-               static_cast<void*>(rtp_.transport_),
-               rtcp_packets_sent_);
+               static_cast<void*>(mRtp.mTransport),
+               mRtcpPacketsSent);
   }
 }
 
 void
-MediaPipeline::increment_rtp_packets_received(int32_t bytes)
+MediaPipeline::IncrementRtpPacketsReceived(int32_t aBytes)
 {
-  ++rtp_packets_received_;
-  rtp_bytes_received_ += bytes;
-  if (!(rtp_packets_received_ % 100)) {
+  ++mRtpPacketsReceived;
+  mRtpBytesReceived += aBytes;
+  if (!(mRtpPacketsReceived % 100)) {
     CSFLogInfo(
       LOGTAG,
       "RTP received packet count for %s Pipeline %p Flow: %p: %u (%" PRId64
       " bytes)",
-      description_.c_str(),
+      mDescription.c_str(),
       this,
-      static_cast<void*>(rtp_.transport_),
-      rtp_packets_received_,
-      rtp_bytes_received_);
+      static_cast<void*>(mRtp.mTransport),
+      mRtpPacketsReceived,
+      mRtpBytesReceived);
   }
 }
 
 void
-MediaPipeline::increment_rtcp_packets_received()
+MediaPipeline::IncrementRtcpPacketsReceived()
 {
-  ++rtcp_packets_received_;
-  if (!(rtcp_packets_received_ % 100)) {
+  ++mRtcpPacketsReceived;
+  if (!(mRtcpPacketsReceived % 100)) {
     CSFLogInfo(LOGTAG,
                "RTCP received packet count for %s Pipeline %p Flow: %p: %u",
-               description_.c_str(),
+               mDescription.c_str(),
                this,
-               static_cast<void*>(rtp_.transport_),
-               rtcp_packets_received_);
+               static_cast<void*>(mRtp.mTransport),
+               mRtcpPacketsReceived);
   }
 }
 
 void
-MediaPipeline::RtpPacketReceived(TransportLayer* layer,
-                                 const unsigned char* data,
-                                 size_t len)
+MediaPipeline::RtpPacketReceived(TransportLayer* aLayer,
+                                 const unsigned char* aData,
+                                 size_t aLen)
 {
-  if (direction_ == TRANSMIT) {
+  if (mDirection == TRANSMIT) {
     return;
   }
 
-  if (!transport_->pipeline()) {
+  if (!mTransport->Pipeline()) {
     CSFLogError(LOGTAG, "Discarding incoming packet; transport disconnected");
     return;
   }
 
-  if (!conduit_) {
+  if (!mConduit) {
     CSFLogDebug(LOGTAG, "Discarding incoming packet; media disconnected");
     return;
   }
 
-  if (rtp_.state_ != MP_OPEN) {
+  if (mRtp.mState != MP_OPEN) {
     CSFLogError(LOGTAG, "Discarding incoming packet; pipeline not open");
     return;
   }
 
-  if (rtp_.transport_->state() != TransportLayer::TS_OPEN) {
+  if (mRtp.mTransport->state() != TransportLayer::TS_OPEN) {
     CSFLogError(LOGTAG, "Discarding incoming packet; transport not open");
     return;
   }
 
   // This should never happen.
-  MOZ_ASSERT(rtp_.recv_srtp_);
+  MOZ_ASSERT(mRtp.mRecvSrtp);
 
-  if (!len) {
+  if (!aLen) {
     return;
   }
 
   // Filter out everything but RTP/RTCP
-  if (data[0] < 128 || data[0] > 191) {
+  if (aData[0] < 128 || aData[0] > 191) {
     return;
   }
 
   webrtc::RTPHeader header;
-  if (!rtp_parser_->Parse(data, len, &header)) {
+  if (!mRtpParser->Parse(aData, aLen, &header)) {
     return;
   }
 
-  if (filter_ && !filter_->Filter(header)) {
+  if (mFilter && !mFilter->Filter(header)) {
     return;
   }
 
   // Make sure to only get the time once, and only if we need it by
   // using getTimestamp() for access
   DOMHighResTimeStamp now = 0.0;
   bool hasTime = false;
 
   // Remove expired RtpCSRCStats
-  if (!csrc_stats_.empty()) {
+  if (!mCsrcStats.empty()) {
     if (!hasTime) {
       now = GetNow();
       hasTime = true;
     }
     auto expiry = RtpCSRCStats::GetExpiryFromTime(now);
-    for (auto p = csrc_stats_.begin(); p != csrc_stats_.end();) {
+    for (auto p = mCsrcStats.begin(); p != mCsrcStats.end();) {
       if (p->second.Expired(expiry)) {
-        p = csrc_stats_.erase(p);
+        p = mCsrcStats.erase(p);
         continue;
       }
       p++;
     }
   }
 
   // Add new RtpCSRCStats
   if (header.numCSRCs) {
     for (auto i = 0; i < header.numCSRCs; i++) {
       if (!hasTime) {
         now = GetNow();
         hasTime = true;
       }
-      auto csrcInfo = csrc_stats_.find(header.arrOfCSRCs[i]);
-      if (csrcInfo == csrc_stats_.end()) {
-        csrc_stats_.insert(std::make_pair(
+      auto csrcInfo = mCsrcStats.find(header.arrOfCSRCs[i]);
+      if (csrcInfo == mCsrcStats.end()) {
+        mCsrcStats.insert(std::make_pair(
           header.arrOfCSRCs[i], RtpCSRCStats(header.arrOfCSRCs[i], now)));
       } else {
         csrcInfo->second.SetTimestamp(now);
       }
     }
   }
 
-  packet_dumper_->Dump(level_, dom::mozPacketDumpType::Srtp, false, data, len);
+  mPacketDumper->Dump(mLevel, dom::mozPacketDumpType::Srtp, false, aData, aLen);
 
   // Make a copy rather than cast away constness
-  auto inner_data = MakeUnique<unsigned char[]>(len);
-  memcpy(inner_data.get(), data, len);
-  int out_len = 0;
+  auto innerData = MakeUnique<unsigned char[]>(aLen);
+  memcpy(innerData.get(), aData, aLen);
+  int outLen = 0;
   nsresult res =
-    rtp_.recv_srtp_->UnprotectRtp(inner_data.get(), len, len, &out_len);
+    mRtp.mRecvSrtp->UnprotectRtp(innerData.get(), aLen, aLen, &outLen);
   if (!NS_SUCCEEDED(res)) {
     char tmp[16];
 
     SprintfLiteral(tmp,
                    "%.2x %.2x %.2x %.2x",
-                   inner_data[0],
-                   inner_data[1],
-                   inner_data[2],
-                   inner_data[3]);
+                   innerData[0],
+                   innerData[1],
+                   innerData[2],
+                   innerData[3]);
 
     CSFLogError(LOGTAG,
                 "Error unprotecting RTP in %s len= %zu [%s]",
-                description_.c_str(),
-                len,
+                mDescription.c_str(),
+                aLen,
                 tmp);
     return;
   }
-  CSFLogDebug(LOGTAG, "%s received RTP packet.", description_.c_str());
-  increment_rtp_packets_received(out_len);
+  CSFLogDebug(LOGTAG, "%s received RTP packet.", mDescription.c_str());
+  IncrementRtpPacketsReceived(outLen);
   OnRtpPacketReceived();
 
   RtpLogger::LogPacket(
-    inner_data.get(), out_len, true, true, header.headerLength, description_);
+    innerData.get(), outLen, true, true, header.headerLength, mDescription);
 
-  packet_dumper_->Dump(
-    level_, dom::mozPacketDumpType::Rtp, false, inner_data.get(), out_len);
+  mPacketDumper->Dump(
+    mLevel, dom::mozPacketDumpType::Rtp, false, innerData.get(), outLen);
 
-  (void)conduit_->ReceivedRTPPacket(
-    inner_data.get(), out_len, header.ssrc); // Ignore error codes
+  (void)mConduit->ReceivedRTPPacket(
+    innerData.get(), outLen, header.ssrc); // Ignore error codes
 }
 
 void
-MediaPipeline::RtcpPacketReceived(TransportLayer* layer,
-                                  const unsigned char* data,
-                                  size_t len)
+MediaPipeline::RtcpPacketReceived(TransportLayer* aLayer,
+                                  const unsigned char* aData,
+                                  size_t aLen)
 {
-  if (!transport_->pipeline()) {
+  if (!mTransport->Pipeline()) {
     CSFLogDebug(LOGTAG, "Discarding incoming packet; transport disconnected");
     return;
   }
 
-  if (!conduit_) {
+  if (!mConduit) {
     CSFLogDebug(LOGTAG, "Discarding incoming packet; media disconnected");
     return;
   }
 
-  if (rtcp_.state_ != MP_OPEN) {
+  if (mRtcp.mState != MP_OPEN) {
     CSFLogDebug(LOGTAG, "Discarding incoming packet; pipeline not open");
     return;
   }
 
-  if (rtcp_.transport_->state() != TransportLayer::TS_OPEN) {
+  if (mRtcp.mTransport->state() != TransportLayer::TS_OPEN) {
     CSFLogError(LOGTAG, "Discarding incoming packet; transport not open");
     return;
   }
 
-  if (!len) {
+  if (!aLen) {
     return;
   }
 
   // Filter out everything but RTP/RTCP
-  if (data[0] < 128 || data[0] > 191) {
+  if (aData[0] < 128 || aData[0] > 191) {
     return;
   }
 
   // We do not filter receiver reports, since the webrtc.org code for
   // senders already has logic to ignore RRs that do not apply.
   // TODO bug 1279153: remove SR check for reduced size RTCP
-  if (filter_ && !filter_->FilterSenderReport(data, len)) {
+  if (mFilter && !mFilter->FilterSenderReport(aData, aLen)) {
     CSFLogWarn(LOGTAG, "Dropping incoming RTCP packet; filtered out");
     return;
   }
 
-  packet_dumper_->Dump(level_, dom::mozPacketDumpType::Srtcp, false, data, len);
+  mPacketDumper->Dump(mLevel, dom::mozPacketDumpType::Srtcp, false, aData, aLen);
 
   // Make a copy rather than cast away constness
-  auto inner_data = MakeUnique<unsigned char[]>(len);
-  memcpy(inner_data.get(), data, len);
-  int out_len;
+  auto innerData = MakeUnique<unsigned char[]>(aLen);
+  memcpy(innerData.get(), aData, aLen);
+  int outLen;
 
   nsresult res =
-    rtcp_.recv_srtp_->UnprotectRtcp(inner_data.get(), len, len, &out_len);
+    mRtcp.mRecvSrtp->UnprotectRtcp(innerData.get(), aLen, aLen, &outLen);
 
   if (!NS_SUCCEEDED(res))
     return;
 
-  CSFLogDebug(LOGTAG, "%s received RTCP packet.", description_.c_str());
-  increment_rtcp_packets_received();
+  CSFLogDebug(LOGTAG, "%s received RTCP packet.", mDescription.c_str());
+  IncrementRtcpPacketsReceived();
 
-  RtpLogger::LogPacket(inner_data.get(), out_len, true, false, 0, description_);
+  RtpLogger::LogPacket(innerData.get(), outLen, true, false, 0, mDescription);
 
-  packet_dumper_->Dump(level_, dom::mozPacketDumpType::Rtcp, false, data, len);
+  mPacketDumper->Dump(mLevel, dom::mozPacketDumpType::Rtcp, false, aData, aLen);
 
-  MOZ_ASSERT(rtcp_.recv_srtp_); // This should never happen
+  MOZ_ASSERT(mRtcp.mRecvSrtp); // This should never happen
 
-  (void)conduit_->ReceivedRTCPPacket(inner_data.get(),
-                                     out_len); // Ignore error codes
+  (void)mConduit->ReceivedRTCPPacket(innerData.get(),
+                                     outLen); // Ignore error codes
 }
 
 bool
-MediaPipeline::IsRtp(const unsigned char* data, size_t len)
+MediaPipeline::IsRtp(const unsigned char* aData, size_t aLen)
 {
-  if (len < 2)
+  if (aLen < 2)
     return false;
 
   // Check if this is a RTCP packet. Logic based on the types listed in
   // media/webrtc/trunk/src/modules/rtp_rtcp/source/rtp_utility.cc
 
   // Anything outside this range is RTP.
-  if ((data[1] < 192) || (data[1] > 207))
+  if ((aData[1] < 192) || (aData[1] > 207))
     return true;
 
-  if (data[1] == 192) // FIR
+  if (aData[1] == 192) // FIR
     return false;
 
-  if (data[1] == 193) // NACK, but could also be RTP. This makes us sad
+  if (aData[1] == 193) // NACK, but could also be RTP. This makes us sad
     return true;      // but it's how webrtc.org behaves.
 
-  if (data[1] == 194)
+  if (aData[1] == 194)
     return true;
 
-  if (data[1] == 195) // IJ.
+  if (aData[1] == 195) // IJ.
     return false;
 
-  if ((data[1] > 195) && (data[1] < 200)) // the > 195 is redundant
+  if ((aData[1] > 195) && (aData[1] < 200)) // the > 195 is redundant
     return true;
 
-  if ((data[1] >= 200) && (data[1] <= 207)) // SR, RR, SDES, BYE,
+  if ((aData[1] >= 200) && (aData[1] <= 207)) // SR, RR, SDES, BYE,
     return false;                           // APP, RTPFB, PSFB, XR
 
   MOZ_ASSERT(false); // Not reached, belt and suspenders.
   return true;
 }
 
 void
-MediaPipeline::PacketReceived(TransportLayer* layer,
-                              const unsigned char* data,
-                              size_t len)
+MediaPipeline::PacketReceived(TransportLayer* aLayer,
+                              const unsigned char* aData,
+                              size_t aLen)
 {
-  if (!transport_->pipeline()) {
+  if (!mTransport->Pipeline()) {
     CSFLogDebug(LOGTAG, "Discarding incoming packet; transport disconnected");
     return;
   }
 
-  if (IsRtp(data, len)) {
-    RtpPacketReceived(layer, data, len);
+  if (IsRtp(aData, aLen)) {
+    RtpPacketReceived(aLayer, aData, aLen);
   } else {
-    RtcpPacketReceived(layer, data, len);
+    RtcpPacketReceived(aLayer, aData, aLen);
   }
 }
 
 class MediaPipelineTransmit::PipelineListener : public MediaStreamVideoSink
 {
   friend class MediaPipelineTransmit;
 
 public:
-  explicit PipelineListener(const RefPtr<MediaSessionConduit>& conduit)
-    : conduit_(conduit)
-    , track_id_(TRACK_INVALID)
+  explicit PipelineListener(const RefPtr<MediaSessionConduit>& aConduit)
+    : mConduit(aConduit)
+    , mTrackId(TRACK_INVALID)
     , mMutex("MediaPipelineTransmit::PipelineListener")
-    , track_id_external_(TRACK_INVALID)
-    , active_(false)
-    , enabled_(false)
-    , direct_connect_(false)
+    , mTrackIdexternal(TRACK_INVALID)
+    , mActive(false)
+    , mEnabled(false)
+    , mDirectConnect(false)
   {
   }
 
   ~PipelineListener()
   {
     if (!NS_IsMainThread()) {
       // release conduit on mainthread.  Must use forget()!
       nsresult rv =
-        NS_DispatchToMainThread(new ConduitDeleteEvent(conduit_.forget()));
+        NS_DispatchToMainThread(new ConduitDeleteEvent(mConduit.forget()));
       MOZ_ASSERT(!NS_FAILED(rv), "Could not dispatch conduit shutdown to main");
       if (NS_FAILED(rv)) {
         MOZ_CRASH();
       }
     } else {
-      conduit_ = nullptr;
+      mConduit = nullptr;
     }
-    if (converter_) {
-      converter_->Shutdown();
+    if (mConverter) {
+      mConverter->Shutdown();
     }
   }
 
   // Dispatches setting the internal TrackID to TRACK_INVALID to the media
   // graph thread to keep it in sync with other MediaStreamGraph operations
   // like RemoveListener() and AddListener(). The TrackID will be updated on
   // the next NewData() callback.
-  void UnsetTrackId(MediaStreamGraphImpl* graph);
+  void UnsetTrackId(MediaStreamGraphImpl* aGraph);
 
-  void SetActive(bool active) { active_ = active; }
-  void SetEnabled(bool enabled) { enabled_ = enabled; }
+  void SetActive(bool aActive) { mActive = aActive; }
+  void SetEnabled(bool aEnabled) { mEnabled = aEnabled; }
 
   // These are needed since nested classes don't have access to any particular
   // instance of the parent
-  void SetAudioProxy(const RefPtr<AudioProxyThread>& proxy)
+  void SetAudioProxy(const RefPtr<AudioProxyThread>& aProxy)
   {
-    audio_processing_ = proxy;
+    mAudioProcessing = aProxy;
   }
 
-  void SetVideoFrameConverter(const RefPtr<VideoFrameConverter>& converter)
+  void SetVideoFrameConverter(const RefPtr<VideoFrameConverter>& aConverter)
   {
-    converter_ = converter;
+    mConverter = aConverter;
   }
 
   void OnVideoFrameConverted(const unsigned char* aVideoFrame,
                              unsigned int aVideoFrameLength,
                              unsigned short aWidth,
                              unsigned short aHeight,
                              VideoType aVideoType,
                              uint64_t aCaptureTime)
   {
-    MOZ_RELEASE_ASSERT(conduit_->type() == MediaSessionConduit::VIDEO);
-    static_cast<VideoSessionConduit*>(conduit_.get())
+    MOZ_RELEASE_ASSERT(mConduit->type() == MediaSessionConduit::VIDEO);
+    static_cast<VideoSessionConduit*>(mConduit.get())
       ->SendVideoFrame(aVideoFrame,
                        aVideoFrameLength,
                        aWidth,
                        aHeight,
                        aVideoType,
                        aCaptureTime);
   }
 
   void OnVideoFrameConverted(const webrtc::VideoFrame& aVideoFrame)
   {
-    MOZ_RELEASE_ASSERT(conduit_->type() == MediaSessionConduit::VIDEO);
-    static_cast<VideoSessionConduit*>(conduit_.get())
+    MOZ_RELEASE_ASSERT(mConduit->type() == MediaSessionConduit::VIDEO);
+    static_cast<VideoSessionConduit*>(mConduit.get())
       ->SendVideoFrame(aVideoFrame);
   }
 
   // Implement MediaStreamTrackListener
   void NotifyQueuedChanges(MediaStreamGraph* aGraph,
                            StreamTime aTrackOffset,
                            const MediaSegment& aQueuedMedia) override;
 
@@ -1418,1183 +1418,1179 @@ public:
   // Implement MediaStreamVideoSink
   void SetCurrentFrames(const VideoSegment& aSegment) override;
   void ClearFrames() override {}
 
 private:
   void UnsetTrackIdImpl()
   {
     MutexAutoLock lock(mMutex);
-    track_id_ = track_id_external_ = TRACK_INVALID;
+    mTrackId = mTrackIdexternal = TRACK_INVALID;
   }
 
-  void NewData(const MediaSegment& media, TrackRate aRate = 0);
+  void NewData(const MediaSegment& aMedia, TrackRate aRate = 0);
 
-  RefPtr<MediaSessionConduit> conduit_;
-  RefPtr<AudioProxyThread> audio_processing_;
-  RefPtr<VideoFrameConverter> converter_;
+  RefPtr<MediaSessionConduit> mConduit;
+  RefPtr<AudioProxyThread> mAudioProcessing;
+  RefPtr<VideoFrameConverter> mConverter;
 
   // May be TRACK_INVALID until we see data from the track
-  TrackID track_id_; // this is the current TrackID this listener is attached to
+  TrackID mTrackId; // this is the current TrackID this listener is attached to
   Mutex mMutex;
   // protected by mMutex
   // May be TRACK_INVALID until we see data from the track
-  TrackID track_id_external_; // this is queried from other threads
+  TrackID mTrackIdexternal; // this is queried from other threads
 
   // active is true if there is a transport to send on
-  mozilla::Atomic<bool> active_;
+  mozilla::Atomic<bool> mActive;
   // enabled is true if the media access control permits sending
   // actual content; when false you get black/silence
-  mozilla::Atomic<bool> enabled_;
+  mozilla::Atomic<bool> mEnabled;
 
   // Written and read on the MediaStreamGraph thread
-  bool direct_connect_;
+  bool mDirectConnect;
 };
 
 // Implements VideoConverterListener for MediaPipeline.
 //
 // We pass converted frames on to MediaPipelineTransmit::PipelineListener
 // where they are further forwarded to VideoConduit.
 // MediaPipelineTransmit calls Detach() during shutdown to ensure there is
 // no cyclic dependencies between us and PipelineListener.
 class MediaPipelineTransmit::VideoFrameFeeder : public VideoConverterListener
 {
 public:
-  explicit VideoFrameFeeder(const RefPtr<PipelineListener>& listener)
-    : listener_(listener)
-    , mutex_("VideoFrameFeeder")
+  explicit VideoFrameFeeder(const RefPtr<PipelineListener>& aListener)
+    : mListener(aListener)
+    , mMutex("VideoFrameFeeder")
   {
     MOZ_COUNT_CTOR(VideoFrameFeeder);
   }
 
   void Detach()
   {
-    MutexAutoLock lock(mutex_);
+    MutexAutoLock lock(mMutex);
 
-    listener_ = nullptr;
+    mListener = nullptr;
   }
 
   void OnVideoFrameConverted(const unsigned char* aVideoFrame,
                              unsigned int aVideoFrameLength,
                              unsigned short aWidth,
                              unsigned short aHeight,
                              VideoType aVideoType,
                              uint64_t aCaptureTime) override
   {
-    MutexAutoLock lock(mutex_);
+    MutexAutoLock lock(mMutex);
 
-    if (!listener_) {
+    if (!mListener) {
       return;
     }
 
-    listener_->OnVideoFrameConverted(aVideoFrame,
+    mListener->OnVideoFrameConverted(aVideoFrame,
                                      aVideoFrameLength,
                                      aWidth,
                                      aHeight,
                                      aVideoType,
                                      aCaptureTime);
   }
 
   void OnVideoFrameConverted(const webrtc::VideoFrame& aVideoFrame) override
   {
-    MutexAutoLock lock(mutex_);
+    MutexAutoLock lock(mMutex);
 
-    if (!listener_) {
+    if (!mListener) {
       return;
     }
 
-    listener_->OnVideoFrameConverted(aVideoFrame);
+    mListener->OnVideoFrameConverted(aVideoFrame);
   }
 
 protected:
   virtual ~VideoFrameFeeder() { MOZ_COUNT_DTOR(VideoFrameFeeder); }
 
-  RefPtr<PipelineListener> listener_;
-  Mutex mutex_;
+  RefPtr<PipelineListener> mListener;
+  Mutex mMutex;
 };
 
 MediaPipelineTransmit::MediaPipelineTransmit(
-  const std::string& pc,
-  nsCOMPtr<nsIEventTarget> main_thread,
-  nsCOMPtr<nsIEventTarget> sts_thread,
-  bool is_video,
-  dom::MediaStreamTrack* domtrack,
-  RefPtr<MediaSessionConduit> conduit)
-  : MediaPipeline(pc, TRANSMIT, main_thread, sts_thread, conduit)
-  , listener_(new PipelineListener(conduit))
-  , is_video_(is_video)
-  , domtrack_(domtrack)
-  , transmitting_(false)
+  const std::string& aPc,
+  nsCOMPtr<nsIEventTarget> aMainThread,
+  nsCOMPtr<nsIEventTarget> aStsThread,
+  bool aIsVideo,
+  dom::MediaStreamTrack* aDomTrack,
+  RefPtr<MediaSessionConduit> aConduit)
+  : MediaPipeline(aPc, TRANSMIT, aMainThread, aStsThread, aConduit)
+  , mListener(new PipelineListener(aConduit))
+  , mIsVideo(aIsVideo)
+  , mDomTrack(aDomTrack)
+  , mTransmitting(false)
 {
   SetDescription();
   if (!IsVideo()) {
-    audio_processing_ = MakeAndAddRef<AudioProxyThread>(
-      static_cast<AudioSessionConduit*>(conduit.get()));
-    listener_->SetAudioProxy(audio_processing_);
+    mAudioProcessing = MakeAndAddRef<AudioProxyThread>(
+      static_cast<AudioSessionConduit*>(aConduit.get()));
+    mListener->SetAudioProxy(mAudioProcessing);
   } else { // Video
     // For video we send frames to an async VideoFrameConverter that calls
     // back to a VideoFrameFeeder that feeds I420 frames to VideoConduit.
 
-    feeder_ = MakeAndAddRef<VideoFrameFeeder>(listener_);
+    mFeeder = MakeAndAddRef<VideoFrameFeeder>(mListener);
 
-    converter_ = MakeAndAddRef<VideoFrameConverter>();
-    converter_->AddListener(feeder_);
+    mConverter = MakeAndAddRef<VideoFrameConverter>();
+    mConverter->AddListener(mFeeder);
 
-    listener_->SetVideoFrameConverter(converter_);
+    mListener->SetVideoFrameConverter(mConverter);
   }
 }
 
 MediaPipelineTransmit::~MediaPipelineTransmit()
 {
-  if (feeder_) {
-    feeder_->Detach();
+  if (mFeeder) {
+    mFeeder->Detach();
   }
 
-  MOZ_ASSERT(!domtrack_);
+  MOZ_ASSERT(!mDomTrack);
 }
 
 void
 MediaPipelineTransmit::SetDescription()
 {
-  description_ = pc_ + "| ";
-  description_ += conduit_->type() == MediaSessionConduit::AUDIO
+  mDescription = mPc + "| ";
+  mDescription += mConduit->type() == MediaSessionConduit::AUDIO
                     ? "Transmit audio["
                     : "Transmit video[";
 
-  if (!domtrack_) {
-    description_ += "no track]";
+  if (!mDomTrack) {
+    mDescription += "no track]";
     return;
   }
 
   nsString nsTrackId;
-  domtrack_->GetId(nsTrackId);
-  std::string track_id(NS_ConvertUTF16toUTF8(nsTrackId).get());
-  description_ += track_id;
-  description_ += "]";
+  mDomTrack->GetId(nsTrackId);
+  std::string trackId(NS_ConvertUTF16toUTF8(nsTrackId).get());
+  mDescription += trackId;
+  mDescription += "]";
 }
 
 void
 MediaPipelineTransmit::Stop()
 {
-  ASSERT_ON_THREAD(main_thread_);
+  ASSERT_ON_THREAD(mMainThread);
 
-  if (!domtrack_ || !transmitting_) {
+  if (!mDomTrack || !mTransmitting) {
     return;
   }
 
-  transmitting_ = false;
+  mTransmitting = false;
 
-  if (domtrack_->AsAudioStreamTrack()) {
-    domtrack_->RemoveDirectListener(listener_);
-    domtrack_->RemoveListener(listener_);
-  } else if (VideoStreamTrack* video = domtrack_->AsVideoStreamTrack()) {
-    video->RemoveVideoOutput(listener_);
+  if (mDomTrack->AsAudioStreamTrack()) {
+    mDomTrack->RemoveDirectListener(mListener);
+    mDomTrack->RemoveListener(mListener);
+  } else if (VideoStreamTrack* video = mDomTrack->AsVideoStreamTrack()) {
+    video->RemoveVideoOutput(mListener);
   } else {
     MOZ_ASSERT(false, "Unknown track type");
   }
 
-  conduit_->StopTransmitting();
+  mConduit->StopTransmitting();
 }
 
 void
 MediaPipelineTransmit::Start()
 {
-  ASSERT_ON_THREAD(main_thread_);
+  ASSERT_ON_THREAD(mMainThread);
 
-  if (!domtrack_ || transmitting_) {
+  if (!mDomTrack || mTransmitting) {
     return;
   }
 
-  transmitting_ = true;
+  mTransmitting = true;
 
-  conduit_->StartTransmitting();
+  mConduit->StartTransmitting();
 
   // TODO(ekr@rtfm.com): Check for errors
   CSFLogDebug(
     LOGTAG,
     "Attaching pipeline to track %p conduit type=%s",
     this,
-    (conduit_->type() == MediaSessionConduit::AUDIO ? "audio" : "video"));
+    (mConduit->type() == MediaSessionConduit::AUDIO ? "audio" : "video"));
 
 #if !defined(MOZILLA_EXTERNAL_LINKAGE)
   // With full duplex we don't risk having audio come in late to the MSG
   // so we won't need a direct listener.
   const bool enableDirectListener =
     !Preferences::GetBool("media.navigator.audio.full_duplex", false);
 #else
   const bool enableDirectListener = true;
 #endif
 
-  if (domtrack_->AsAudioStreamTrack()) {
+  if (mDomTrack->AsAudioStreamTrack()) {
     if (enableDirectListener) {
       // Register the Listener directly with the source if we can.
       // We also register it as a non-direct listener so we fall back to that
       // if installing the direct listener fails. As a direct listener we get
       // access to direct unqueued (and not resampled) data.
-      domtrack_->AddDirectListener(listener_);
+      mDomTrack->AddDirectListener(mListener);
     }
-    domtrack_->AddListener(listener_);
-  } else if (VideoStreamTrack* video = domtrack_->AsVideoStreamTrack()) {
-    video->AddVideoOutput(listener_);
+    mDomTrack->AddListener(mListener);
+  } else if (VideoStreamTrack* video = mDomTrack->AsVideoStreamTrack()) {
+    video->AddVideoOutput(mListener);
   } else {
     MOZ_ASSERT(false, "Unknown track type");
   }
 }
 
 bool
 MediaPipelineTransmit::IsVideo() const
 {
-  return is_video_;
+  return mIsVideo;
 }
 
 void
-MediaPipelineTransmit::UpdateSinkIdentity_m(MediaStreamTrack* track,
-                                            nsIPrincipal* principal,
-                                            const PeerIdentity* sinkIdentity)
+MediaPipelineTransmit::UpdateSinkIdentity_m(MediaStreamTrack* aTrack,
+                                            nsIPrincipal* aPrincipal,
+                                            const PeerIdentity* aSinkIdentity)
 {
-  ASSERT_ON_THREAD(main_thread_);
+  ASSERT_ON_THREAD(mMainThread);
 
-  if (track != nullptr && track != domtrack_) {
+  if (aTrack != nullptr && aTrack != mDomTrack) {
     // If a track is specified, then it might not be for this pipeline,
     // since we receive notifications for all tracks on the PC.
     // nullptr means that the PeerIdentity has changed and shall be applied
     // to all tracks of the PC.
     return;
   }
 
-  bool enableTrack = principal->Subsumes(domtrack_->GetPrincipal());
+  bool enableTrack = aPrincipal->Subsumes(mDomTrack->GetPrincipal());
   if (!enableTrack) {
     // first try didn't work, but there's a chance that this is still available
     // if our track is bound to a peerIdentity, and the peer connection (our
     // sink) is bound to the same identity, then we can enable the track.
-    const PeerIdentity* trackIdentity = domtrack_->GetPeerIdentity();
-    if (sinkIdentity && trackIdentity) {
-      enableTrack = (*sinkIdentity == *trackIdentity);
+    const PeerIdentity* trackIdentity = mDomTrack->GetPeerIdentity();
+    if (aSinkIdentity && trackIdentity) {
+      enableTrack = (*aSinkIdentity == *trackIdentity);
     }
   }
 
-  listener_->SetEnabled(enableTrack);
+  mListener->SetEnabled(enableTrack);
 }
 
 void
 MediaPipelineTransmit::DetachMedia()
 {
-  ASSERT_ON_THREAD(main_thread_);
-  domtrack_ = nullptr;
+  ASSERT_ON_THREAD(mMainThread);
+  mDomTrack = nullptr;
   // Let the listener be destroyed with the pipeline (or later).
 }
 
 nsresult
-MediaPipelineTransmit::TransportReady_s(TransportInfo& info)
+MediaPipelineTransmit::TransportReady_s(TransportInfo& aInfo)
 {
-  ASSERT_ON_THREAD(sts_thread_);
+  ASSERT_ON_THREAD(mStsThread);
   // Call base ready function.
-  MediaPipeline::TransportReady_s(info);
+  MediaPipeline::TransportReady_s(aInfo);
 
   // Should not be set for a transmitter
-  if (&info == &rtp_) {
-    listener_->SetActive(true);
+  if (&aInfo == &mRtp) {
+    mListener->SetActive(true);
   }
 
   return NS_OK;
 }
 
 nsresult
-MediaPipelineTransmit::ReplaceTrack(RefPtr<MediaStreamTrack>& domtrack)
+MediaPipelineTransmit::ReplaceTrack(RefPtr<MediaStreamTrack>& aDomTrack)
 {
   // MainThread, checked in calls we make
-  if (domtrack) {
+  if (aDomTrack) {
     nsString nsTrackId;
-    domtrack->GetId(nsTrackId);
+    aDomTrack->GetId(nsTrackId);
     std::string track_id(NS_ConvertUTF16toUTF8(nsTrackId).get());
     CSFLogDebug(
       LOGTAG,
       "Reattaching pipeline %s to track %p track %s conduit type: %s",
-      description_.c_str(),
-      &domtrack,
+      mDescription.c_str(),
+      &aDomTrack,
       track_id.c_str(),
-      (conduit_->type() == MediaSessionConduit::AUDIO ? "audio" : "video"));
+      (mConduit->type() == MediaSessionConduit::AUDIO ? "audio" : "video"));
   }
 
-  RefPtr<dom::MediaStreamTrack> oldTrack = domtrack_;
-  bool wasTransmitting = oldTrack && transmitting_;
+  RefPtr<dom::MediaStreamTrack> oldTrack = mDomTrack;
+  bool wasTransmitting = oldTrack && mTransmitting;
   Stop();
-  domtrack_ = domtrack;
+  mDomTrack = aDomTrack;
   SetDescription();
 
   if (oldTrack) {
     // Unsets the track id after RemoveListener() takes effect.
-    listener_->UnsetTrackId(oldTrack->GraphImpl());
+    mListener->UnsetTrackId(oldTrack->GraphImpl());
   }
 
   if (wasTransmitting) {
     Start();
   }
   return NS_OK;
 }
 
 nsresult
-MediaPipeline::ConnectTransport_s(TransportInfo& info)
+MediaPipeline::ConnectTransport_s(TransportInfo& aInfo)
 {
-  MOZ_ASSERT(info.transport_);
-  ASSERT_ON_THREAD(sts_thread_);
+  MOZ_ASSERT(aInfo.mTransport);
+  ASSERT_ON_THREAD(mStsThread);
 
   // Look to see if the transport is ready
-  if (info.transport_->state() == TransportLayer::TS_OPEN) {
-    nsresult res = TransportReady_s(info);
+  if (aInfo.mTransport->state() == TransportLayer::TS_OPEN) {
+    nsresult res = TransportReady_s(aInfo);
     if (NS_FAILED(res)) {
       CSFLogError(LOGTAG,
                   "Error calling TransportReady(); res=%u in %s",
                   static_cast<uint32_t>(res),
                   __FUNCTION__);
       return res;
     }
-  } else if (info.transport_->state() == TransportLayer::TS_ERROR) {
+  } else if (aInfo.mTransport->state() == TransportLayer::TS_ERROR) {
     CSFLogError(
-      LOGTAG, "%s transport is already in error state", ToString(info.type_));
-    TransportFailed_s(info);
+      LOGTAG, "%s transport is already in error state", ToString(aInfo.mType));
+    TransportFailed_s(aInfo);
     return NS_ERROR_FAILURE;
   }
 
-  info.transport_->SignalStateChange.connect(this, &MediaPipeline::StateChange);
+  aInfo.mTransport->SignalStateChange.connect(this, &MediaPipeline::StateChange);
 
   return NS_OK;
 }
 
 MediaPipeline::TransportInfo*
-MediaPipeline::GetTransportInfo_s(TransportFlow* flow)
+MediaPipeline::GetTransportInfo_s(TransportFlow* aFlow)
 {
-  ASSERT_ON_THREAD(sts_thread_);
-  if (flow == rtp_.transport_) {
-    return &rtp_;
+  ASSERT_ON_THREAD(mStsThread);
+  if (aFlow == mRtp.mTransport) {
+    return &mRtp;
   }
 
-  if (flow == rtcp_.transport_) {
-    return &rtcp_;
+  if (aFlow == mRtcp.mTransport) {
+    return &mRtcp;
   }
 
   return nullptr;
 }
 
 nsresult
-MediaPipeline::PipelineTransport::SendRtpPacket(const uint8_t* data, size_t len)
+MediaPipeline::PipelineTransport::SendRtpPacket(const uint8_t* aData, size_t aLen)
 {
 
   nsAutoPtr<DataBuffer> buf(
-    new DataBuffer(data, len, len + SRTP_MAX_EXPANSION));
+    new DataBuffer(aData, aLen, aLen + SRTP_MAX_EXPANSION));
 
   RUN_ON_THREAD(
-    sts_thread_,
+    mStsThread,
     WrapRunnable(RefPtr<MediaPipeline::PipelineTransport>(this),
                  &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
                  buf,
                  true),
     NS_DISPATCH_NORMAL);
 
   return NS_OK;
 }
 
 nsresult
 MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s(
-  nsAutoPtr<DataBuffer> data,
-  bool is_rtp)
+  nsAutoPtr<DataBuffer> aData,
+  bool aIsRtp)
 {
 
-  ASSERT_ON_THREAD(sts_thread_);
-  if (!pipeline_) {
+  ASSERT_ON_THREAD(mStsThread);
+  if (!mPipeline) {
     return NS_OK; // Detached
   }
-  TransportInfo& transport = is_rtp ? pipeline_->rtp_ : pipeline_->rtcp_;
+  TransportInfo& transport = aIsRtp ? mPipeline->mRtp : mPipeline->mRtcp;
 
-  if (!transport.send_srtp_) {
+  if (!transport.mSendSrtp) {
     CSFLogDebug(LOGTAG, "Couldn't write RTP/RTCP packet; SRTP not set up yet");
     return NS_OK;
   }
 
-  MOZ_ASSERT(transport.transport_);
-  NS_ENSURE_TRUE(transport.transport_, NS_ERROR_NULL_POINTER);
+  MOZ_ASSERT(transport.mTransport);
+  NS_ENSURE_TRUE(transport.mTransport, NS_ERROR_NULL_POINTER);
 
   // libsrtp enciphers in place, so we need a big enough buffer.
-  MOZ_ASSERT(data->capacity() >= data->len() + SRTP_MAX_EXPANSION);
+  MOZ_ASSERT(aData->capacity() >= aData->len() + SRTP_MAX_EXPANSION);
 
   if (RtpLogger::IsPacketLoggingOn()) {
-    int header_len = 12;
+    int headerLen = 12;
     webrtc::RTPHeader header;
-    if (pipeline_->rtp_parser_ &&
-        pipeline_->rtp_parser_->Parse(data->data(), data->len(), &header)) {
-      header_len = header.headerLength;
+    if (mPipeline->mRtpParser &&
+        mPipeline->mRtpParser->Parse(aData->data(), aData->len(), &header)) {
+      headerLen = header.headerLength;
     }
-    RtpLogger::LogPacket(data->data(),
-                         data->len(),
+    RtpLogger::LogPacket(aData->data(),
+                         aData->len(),
                          false,
-                         is_rtp,
-                         header_len,
-                         pipeline_->description_);
+                         aIsRtp,
+                         headerLen,
+                         mPipeline->mDescription);
   }
 
   int out_len;
   nsresult res;
-  if (is_rtp) {
-    pipeline_->packet_dumper_->Dump(pipeline_->level(),
+  if (aIsRtp) {
+    mPipeline->mPacketDumper->Dump(mPipeline->Level(),
                                     dom::mozPacketDumpType::Rtp,
                                     true,
-                                    data->data(),
-                                    data->len());
+                                    aData->data(),
+                                    aData->len());
 
-    res = transport.send_srtp_->ProtectRtp(
-      data->data(), data->len(), data->capacity(), &out_len);
+    res = transport.mSendSrtp->ProtectRtp(
+      aData->data(), aData->len(), aData->capacity(), &out_len);
   } else {
-    pipeline_->packet_dumper_->Dump(pipeline_->level(),
+    mPipeline->mPacketDumper->Dump(mPipeline->Level(),
                                     dom::mozPacketDumpType::Rtcp,
                                     true,
-                                    data->data(),
-                                    data->len());
+                                    aData->data(),
+                                    aData->len());
 
-    res = transport.send_srtp_->ProtectRtcp(
-      data->data(), data->len(), data->capacity(), &out_len);
+    res = transport.mSendSrtp->ProtectRtcp(
+      aData->data(), aData->len(), aData->capacity(), &out_len);
   }
   if (!NS_SUCCEEDED(res)) {
     return res;
   }
 
   // paranoia; don't have uninitialized bytes included in data->len()
-  data->SetLength(out_len);
+  aData->SetLength(out_len);
 
   CSFLogDebug(LOGTAG,
               "%s sending %s packet",
-              pipeline_->description_.c_str(),
-              (is_rtp ? "RTP" : "RTCP"));
-  if (is_rtp) {
-    pipeline_->packet_dumper_->Dump(pipeline_->level(),
+              mPipeline->mDescription.c_str(),
+              (aIsRtp ? "RTP" : "RTCP"));
+  if (aIsRtp) {
+    mPipeline->mPacketDumper->Dump(mPipeline->Level(),
                                     dom::mozPacketDumpType::Srtp,
                                     true,
-                                    data->data(),
+                                    aData->data(),
                                     out_len);
 
-    pipeline_->increment_rtp_packets_sent(out_len);
+    mPipeline->IncrementRtpPacketsSent(out_len);
   } else {
-    pipeline_->packet_dumper_->Dump(pipeline_->level(),
+    mPipeline->mPacketDumper->Dump(mPipeline->Level(),
                                     dom::mozPacketDumpType::Srtcp,
                                     true,
-                                    data->data(),
+                                    aData->data(),
                                     out_len);
 
-    pipeline_->increment_rtcp_packets_sent();
+    mPipeline->IncrementRtcpPacketsSent();
   }
-  return pipeline_->SendPacket(transport.transport_, data->data(), out_len);
+  return mPipeline->SendPacket(transport.mTransport, aData->data(), out_len);
 }
 
 nsresult
-MediaPipeline::PipelineTransport::SendRtcpPacket(const uint8_t* data,
-                                                 size_t len)
+MediaPipeline::PipelineTransport::SendRtcpPacket(const uint8_t* aData,
+                                                 size_t aLen)
 {
 
   nsAutoPtr<DataBuffer> buf(
-    new DataBuffer(data, len, len + SRTP_MAX_EXPANSION));
+    new DataBuffer(aData, aLen, aLen + SRTP_MAX_EXPANSION));
 
   RUN_ON_THREAD(
-    sts_thread_,
+    mStsThread,
     WrapRunnable(RefPtr<MediaPipeline::PipelineTransport>(this),
                  &MediaPipeline::PipelineTransport::SendRtpRtcpPacket_s,
                  buf,
                  false),
     NS_DISPATCH_NORMAL);
 
   return NS_OK;
 }
 
 void
 MediaPipelineTransmit::PipelineListener::UnsetTrackId(
-  MediaStreamGraphImpl* graph)
+  MediaStreamGraphImpl* aGraph)
 {
   class Message : public ControlMessage
   {
   public:
     explicit Message(PipelineListener* listener)
       : ControlMessage(nullptr)
-      , listener_(listener)
+      , mListener(listener)
     {
     }
-    virtual void Run() override { listener_->UnsetTrackIdImpl(); }
-    RefPtr<PipelineListener> listener_;
+    virtual void Run() override { mListener->UnsetTrackIdImpl(); }
+    RefPtr<PipelineListener> mListener;
   };
-  graph->AppendMessage(MakeUnique<Message>(this));
+  aGraph->AppendMessage(MakeUnique<Message>(this));
 }
 // Called if we're attached with AddDirectListener()
 void
 MediaPipelineTransmit::PipelineListener::NotifyRealtimeTrackData(
-  MediaStreamGraph* graph,
-  StreamTime offset,
-  const MediaSegment& media)
+  MediaStreamGraph* aGraph,
+  StreamTime aOffset,
+  const MediaSegment& aMedia)
 {
   CSFLogDebug(
     LOGTAG,
     "MediaPipeline::NotifyRealtimeTrackData() listener=%p, offset=%" PRId64
     ", duration=%" PRId64,
     this,
-    offset,
-    media.GetDuration());
+    aOffset,
+    aMedia.GetDuration());
 
-  if (media.GetType() == MediaSegment::VIDEO) {
+  if (aMedia.GetType() == MediaSegment::VIDEO) {
     // We have to call the upstream NotifyRealtimeTrackData and
     // MediaStreamVideoSink will route them to SetCurrentFrames.
-    MediaStreamVideoSink::NotifyRealtimeTrackData(graph, offset, media);
+    MediaStreamVideoSink::NotifyRealtimeTrackData(aGraph, aOffset, aMedia);
     return;
   }
 
-  NewData(media, graph->GraphRate());
+  NewData(aMedia, aGraph->GraphRate());
 }
 
 void
 MediaPipelineTransmit::PipelineListener::NotifyQueuedChanges(
-  MediaStreamGraph* graph,
-  StreamTime offset,
-  const MediaSegment& queued_media)
+  MediaStreamGraph* aGraph,
+  StreamTime aOffset,
+  const MediaSegment& aQueuedMedia)
 {
   CSFLogDebug(LOGTAG, "MediaPipeline::NotifyQueuedChanges()");
 
-  if (queued_media.GetType() == MediaSegment::VIDEO) {
+  if (aQueuedMedia.GetType() == MediaSegment::VIDEO) {
     // We always get video from SetCurrentFrames().
     return;
   }
 
-  if (direct_connect_) {
+  if (mDirectConnect) {
     // ignore non-direct data if we're also getting direct data
     return;
   }
 
   size_t rate;
-  if (graph) {
-    rate = graph->GraphRate();
+  if (aGraph) {
+    rate = aGraph->GraphRate();
   } else {
     // When running tests, graph may be null. In that case use a default.
     rate = 16000;
   }
-  NewData(queued_media, rate);
+  NewData(aQueuedMedia, rate);
 }
 
 void
 MediaPipelineTransmit::PipelineListener::NotifyDirectListenerInstalled(
   InstallationResult aResult)
 {
   CSFLogInfo(
     LOGTAG,
     "MediaPipeline::NotifyDirectListenerInstalled() listener=%p, result=%d",
     this,
     static_cast<int32_t>(aResult));
 
-  direct_connect_ = InstallationResult::SUCCESS == aResult;
+  mDirectConnect = InstallationResult::SUCCESS == aResult;
 }
 
 void
 MediaPipelineTransmit::PipelineListener::NotifyDirectListenerUninstalled()
 {
   CSFLogInfo(LOGTAG,
              "MediaPipeline::NotifyDirectListenerUninstalled() listener=%p",
              this);
 
-  direct_connect_ = false;
+  mDirectConnect = false;
 }
 
 void
-MediaPipelineTransmit::PipelineListener::NewData(const MediaSegment& media,
+MediaPipelineTransmit::PipelineListener::NewData(const MediaSegment& aMedia,
                                                  TrackRate aRate /* = 0 */)
 {
-  if (!active_) {
+  if (!mActive) {
     CSFLogDebug(LOGTAG, "Discarding packets because transport not ready");
     return;
   }
 
-  if (conduit_->type() != (media.GetType() == MediaSegment::AUDIO
+  if (mConduit->type() != (aMedia.GetType() == MediaSegment::AUDIO
                              ? MediaSessionConduit::AUDIO
                              : MediaSessionConduit::VIDEO)) {
     MOZ_ASSERT(false,
                "The media type should always be correct since the "
                "listener is locked to a specific track");
     return;
   }
 
   // TODO(ekr@rtfm.com): For now assume that we have only one
   // track type and it's destined for us
   // See bug 784517
-  if (media.GetType() == MediaSegment::AUDIO) {
+  if (aMedia.GetType() == MediaSegment::AUDIO) {
     MOZ_RELEASE_ASSERT(aRate > 0);
 
     AudioSegment* audio =
-      const_cast<AudioSegment*>(static_cast<const AudioSegment*>(&media));
+      const_cast<AudioSegment*>(static_cast<const AudioSegment*>(&aMedia));
     for (AudioSegment::ChunkIterator iter(*audio); !iter.IsEnded();
          iter.Next()) {
-      audio_processing_->QueueAudioChunk(aRate, *iter, enabled_);
+      mAudioProcessing->QueueAudioChunk(aRate, *iter, mEnabled);
     }
   } else {
     VideoSegment* video =
-      const_cast<VideoSegment*>(static_cast<const VideoSegment*>(&media));
+      const_cast<VideoSegment*>(static_cast<const VideoSegment*>(&aMedia));
     VideoSegment::ChunkIterator iter(*video);
     for (VideoSegment::ChunkIterator iter(*video); !iter.IsEnded();
          iter.Next()) {
-      converter_->QueueVideoChunk(*iter, !enabled_);
+      mConverter->QueueVideoChunk(*iter, !mEnabled);
     }
   }
 }
 
 void
 MediaPipelineTransmit::PipelineListener::SetCurrentFrames(
   const VideoSegment& aSegment)
 {
   NewData(aSegment);
 }
 
 class TrackAddedCallback
 {
 public:
-  virtual void TrackAdded(TrackTicks current_ticks) = 0;
+  virtual void TrackAdded(TrackTicks aCurrentTicks) = 0;
 
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(TrackAddedCallback);
 
 protected:
   virtual ~TrackAddedCallback() {}
 };
 
 class GenericReceiveListener;
 
 class GenericReceiveCallback : public TrackAddedCallback
 {
 public:
-  explicit GenericReceiveCallback(GenericReceiveListener* listener)
-    : listener_(listener)
+  explicit GenericReceiveCallback(GenericReceiveListener* aListener)
+    : mListener(aListener)
   {
   }
 
-  void TrackAdded(TrackTicks time);
+  void TrackAdded(TrackTicks aTime);
 
 private:
-  RefPtr<GenericReceiveListener> listener_;
+  RefPtr<GenericReceiveListener> mListener;
 };
 
 class GenericReceiveListener : public MediaStreamListener
 {
 public:
-  explicit GenericReceiveListener(dom::MediaStreamTrack* track)
-    : track_(track)
-    , played_ticks_(0)
-    , principal_handle_(PRINCIPAL_HANDLE_NONE)
-    , listening_(false)
-    , maybe_track_needs_unmute_(true)
+  explicit GenericReceiveListener(dom::MediaStreamTrack* aTrack)
+    : mTrack(aTrack)
+    , mPlayedTicks(0)
+    , mPrincipalHandle(PRINCIPAL_HANDLE_NONE)
+    , mListening(false)
+    , mMaybeTrackNeedsUnmute(true)
   {
-    MOZ_ASSERT(track->GetInputStream()->AsSourceStream());
+    MOZ_ASSERT(aTrack->GetInputStream()->AsSourceStream());
   }
 
   virtual ~GenericReceiveListener()
   {
     NS_ReleaseOnMainThreadSystemGroup(
-      "GenericReceiveListener::track_", track_.forget());
+      "GenericReceiveListener::track_", mTrack.forget());
   }
 
   void AddSelf()
   {
-    if (!listening_) {
-      listening_ = true;
-      track_->GetInputStream()->AddListener(this);
-      maybe_track_needs_unmute_ = true;
+    if (!mListening) {
+      mListening = true;
+      mTrack->GetInputStream()->AddListener(this);
+      mMaybeTrackNeedsUnmute = true;
     }
   }
 
   void RemoveSelf()
   {
-    if (listening_) {
-      listening_ = false;
-      track_->GetInputStream()->RemoveListener(this);
+    if (mListening) {
+      mListening = false;
+      mTrack->GetInputStream()->RemoveListener(this);
     }
   }
 
   void OnRtpReceived()
   {
-    if (maybe_track_needs_unmute_) {
-      maybe_track_needs_unmute_ = false;
+    if (mMaybeTrackNeedsUnmute) {
+      mMaybeTrackNeedsUnmute = false;
       NS_DispatchToMainThread(NewRunnableMethod(
             "GenericReceiveListener::OnRtpReceived_m",
             this,
             &GenericReceiveListener::OnRtpReceived_m));
     }
   }
 
   void OnRtpReceived_m()
   {
-    if (listening_ && track_->Muted()) {
-      track_->MutedChanged(false);
+    if (mListening && mTrack->Muted()) {
+      mTrack->MutedChanged(false);
     }
   }
 
   void EndTrack()
   {
     CSFLogDebug(LOGTAG, "GenericReceiveListener ending track");
 
     // We do this on MSG to avoid it racing against StartTrack.
     class Message : public ControlMessage
     {
     public:
-      explicit Message(dom::MediaStreamTrack* track)
-        : ControlMessage(track->GetInputStream())
-        , track_id_(track->GetInputTrackId())
+      explicit Message(dom::MediaStreamTrack* aTrack)
+        : ControlMessage(aTrack->GetInputStream())
+        , mTrackId(aTrack->GetInputTrackId())
       {
       }
 
-      void Run() override { mStream->AsSourceStream()->EndTrack(track_id_); }
+      void Run() override { mStream->AsSourceStream()->EndTrack(mTrackId); }
 
-      const TrackID track_id_;
+      const TrackID mTrackId;
     };
 
-    track_->GraphImpl()->AppendMessage(MakeUnique<Message>(track_));
+    mTrack->GraphImpl()->AppendMessage(MakeUnique<Message>(mTrack));
     // This breaks the cycle with the SourceMediaStream
-    track_->GetInputStream()->RemoveListener(this);
+    mTrack->GetInputStream()->RemoveListener(this);
   }
 
   // Must be called on the main thread
-  void SetPrincipalHandle_m(const PrincipalHandle& principal_handle)
+  void SetPrincipalHandle_m(const PrincipalHandle& aPrincipalHandle)
   {
     class Message : public ControlMessage
     {
     public:
-      Message(GenericReceiveListener* listener,
-              const PrincipalHandle& principal_handle)
+      Message(GenericReceiveListener* aListener,
+              const PrincipalHandle& aPrincipalHandle)
         : ControlMessage(nullptr)
-        , listener_(listener)
-        , principal_handle_(principal_handle)
+        , mListener(aListener)
+        , mPrincipalHandle(aPrincipalHandle)
       {
       }
 
       void Run() override
       {
-        listener_->SetPrincipalHandle_msg(principal_handle_);
+        mListener->SetPrincipalHandle_msg(mPrincipalHandle);
       }
 
-      RefPtr<GenericReceiveListener> listener_;
-      PrincipalHandle principal_handle_;
+      RefPtr<GenericReceiveListener> mListener;
+      PrincipalHandle mPrincipalHandle;
     };
 
-    track_->GraphImpl()->AppendMessage(
-      MakeUnique<Message>(this, principal_handle));
+    mTrack->GraphImpl()->AppendMessage(
+      MakeUnique<Message>(this, aPrincipalHandle));
   }
 
   // Must be called on the MediaStreamGraph thread
-  void SetPrincipalHandle_msg(const PrincipalHandle& principal_handle)
+  void SetPrincipalHandle_msg(const PrincipalHandle& aPrincipalHandle)
   {
-    principal_handle_ = principal_handle;
+    mPrincipalHandle = aPrincipalHandle;
   }
 
 protected:
-  RefPtr<dom::MediaStreamTrack> track_;
-  TrackTicks played_ticks_;
-  PrincipalHandle principal_handle_;
-  bool listening_;
-  Atomic<bool> maybe_track_needs_unmute_;
+  RefPtr<dom::MediaStreamTrack> mTrack;
+  TrackTicks mPlayedTicks;
+  PrincipalHandle mPrincipalHandle;
+  bool mListening;
+  Atomic<bool> mMaybeTrackNeedsUnmute;
 };
 
-MediaPipelineReceive::MediaPipelineReceive(const std::string& pc,
-                                           nsCOMPtr<nsIEventTarget> main_thread,
-                                           nsCOMPtr<nsIEventTarget> sts_thread,
-                                           RefPtr<MediaSessionConduit> conduit)
-  : MediaPipeline(pc, RECEIVE, main_thread, sts_thread, conduit)
-  , segments_added_(0)
+MediaPipelineReceive::MediaPipelineReceive(const std::string& aPc,
+                                           nsCOMPtr<nsIEventTarget> aMainThread,
+                                           nsCOMPtr<nsIEventTarget> aStsThread,
+                                           RefPtr<MediaSessionConduit> aConduit)
+  : MediaPipeline(aPc, RECEIVE, aMainThread, aStsThread, aConduit)
 {
 }
 
 MediaPipelineReceive::~MediaPipelineReceive() {}
 
 class MediaPipelineReceiveAudio::PipelineListener
   : public GenericReceiveListener
 {
 public:
-  PipelineListener(dom::MediaStreamTrack* track,
-                   const RefPtr<MediaSessionConduit>& conduit)
-    : GenericReceiveListener(track)
-    , conduit_(conduit)
-    , last_log_(0)
+  PipelineListener(dom::MediaStreamTrack* aTrack,
+                   const RefPtr<MediaSessionConduit>& aConduit)
+    : GenericReceiveListener(aTrack)
+    , mConduit(aConduit)
+    , mLastLog(0)
   {
   }
 
   ~PipelineListener()
   {
     if (!NS_IsMainThread()) {
       // release conduit on mainthread.  Must use forget()!
       nsresult rv =
-        NS_DispatchToMainThread(new ConduitDeleteEvent(conduit_.forget()));
+        NS_DispatchToMainThread(new ConduitDeleteEvent(mConduit.forget()));
       MOZ_ASSERT(!NS_FAILED(rv), "Could not dispatch conduit shutdown to main");
       if (NS_FAILED(rv)) {
         MOZ_CRASH();
       }
     } else {
-      conduit_ = nullptr;
+      mConduit = nullptr;
     }
   }
 
   // Implement MediaStreamListener
-  void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) override
+  void NotifyPull(MediaStreamGraph* aGraph, StreamTime aDesiredTime) override
   {
     RefPtr<SourceMediaStream> source =
-      track_->GetInputStream()->AsSourceStream();
+      mTrack->GetInputStream()->AsSourceStream();
     MOZ_ASSERT(source);
     if (!source) {
       CSFLogError(LOGTAG, "NotifyPull() called from a non-SourceMediaStream");
       return;
     }
 
-    TrackRate rate = graph->GraphRate();
-    uint32_t samples_per_10ms = rate / 100;
+    const TrackRate rate = aGraph->GraphRate();
+    uint32_t samplesPer10ms = rate / 100;
     // Determine how many frames we need.
     // As we get frames from conduit_ at the same rate as the graph's rate,
     // the number of frames needed straightfully determined.
-    TrackTicks framesNeeded = desired_time - played_ticks_;
+    TrackTicks framesNeeded = aDesiredTime - mPlayedTicks;
 
     while (framesNeeded >= 0) {
-      int16_t scratch_buffer[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
+      int16_t scratchBuffer[AUDIO_SAMPLE_BUFFER_MAX_BYTES / sizeof(int16_t)];
 
-      int samples_length;
+      int samplesLength;
 
       // This fetches 10ms of data, either mono or stereo
       MediaConduitErrorCode err =
-        static_cast<AudioSessionConduit*>(conduit_.get())
-          ->GetAudioFrame(scratch_buffer,
+        static_cast<AudioSessionConduit*>(mConduit.get())
+          ->GetAudioFrame(scratchBuffer,
                           rate,
                           0, // TODO(ekr@rtfm.com): better estimate of "capture"
                              // (really playout) delay
-                          samples_length);
+                          samplesLength);
 
       if (err != kMediaConduitNoError) {
         // Insert silence on conduit/GIPS failure (extremely unlikely)
         CSFLogError(LOGTAG,
                     "Audio conduit failed (%d) to return data @ %" PRId64
                     " (desired %" PRId64 " -> %f)",
                     err,
-                    played_ticks_,
-                    desired_time,
-                    source->StreamTimeToSeconds(desired_time));
+                    mPlayedTicks,
+                    aDesiredTime,
+                    source->StreamTimeToSeconds(aDesiredTime));
         // if this is not enough we'll loop and provide more
-        samples_length = samples_per_10ms;
-        PodArrayZero(scratch_buffer);
+        samplesLength = samplesPer10ms;
+        PodArrayZero(scratchBuffer);
       }
 
-      MOZ_ASSERT(samples_length * sizeof(uint16_t) <=
+      MOZ_ASSERT(samplesLength * sizeof(uint16_t) <=
                  AUDIO_SAMPLE_BUFFER_MAX_BYTES);
 
       CSFLogDebug(
-        LOGTAG, "Audio conduit returned buffer of length %u", samples_length);
+        LOGTAG, "Audio conduit returned buffer of length %u", samplesLength);
 
       RefPtr<SharedBuffer> samples =
-        SharedBuffer::Create(samples_length * sizeof(uint16_t));
-      int16_t* samples_data = static_cast<int16_t*>(samples->Data());
+        SharedBuffer::Create(samplesLength * sizeof(uint16_t));
+      int16_t* samplesData = static_cast<int16_t*>(samples->Data());
       AudioSegment segment;
       // We derive the number of channels of the stream from the number of
       // samples the AudioConduit gives us, considering it gives us packets of
       // 10ms and we know the rate.
-      uint32_t channelCount = samples_length / samples_per_10ms;
+      uint32_t channelCount = samplesLength / samplesPer10ms;
       AutoTArray<int16_t*,2> channels;
       AutoTArray<const int16_t*,2> outputChannels;
-      size_t frames = samples_length / channelCount;
+      size_t frames = samplesLength / channelCount;
 
       channels.SetLength(channelCount);
 
       size_t offset = 0;
       for (size_t i = 0; i < channelCount; i++) {
-        channels[i] = samples_data + offset;
+        channels[i] = samplesData + offset;
         offset += frames;
       }
 
       DeinterleaveAndConvertBuffer(
-        scratch_buffer, frames, channelCount, channels.Elements());
+        scratchBuffer, frames, channelCount, channels.Elements());
 
       outputChannels.AppendElements(channels);
 
       segment.AppendFrames(
-        samples.forget(), outputChannels, frames, principal_handle_);
+        samples.forget(), outputChannels, frames, mPrincipalHandle);
 
       // Handle track not actually added yet or removed/finished
-      if (source->AppendToTrack(track_->GetInputTrackId(), &segment)) {
+      if (source->AppendToTrack(mTrack->GetInputTrackId(), &segment)) {
         framesNeeded -= frames;
-        played_ticks_ += frames;
+        mPlayedTicks += frames;
         if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) {
-          if (played_ticks_ > last_log_ + rate) { // ~ 1 second
+          if (mPlayedTicks > mLastLog + rate) { // ~ 1 second
             MOZ_LOG(
               AudioLogModule(),
               LogLevel::Debug,
               ("%p: Inserting %zu samples into track %d, total = %" PRIu64,
                (void*)this,
                frames,
-               track_->GetInputTrackId(),
-               played_ticks_));
-            last_log_ = played_ticks_;
+               mTrack->GetInputTrackId(),
+               mPlayedTicks));
+            mLastLog = mPlayedTicks;
           }
         }
       } else {
         CSFLogError(LOGTAG, "AppendToTrack failed");
         // we can't un-read the data, but that's ok since we don't want to
         // buffer - but don't i-loop!
         return;
       }
     }
   }
 
 private:
-  RefPtr<MediaSessionConduit> conduit_;
-  TrackTicks last_log_; // played_ticks_ when we last logged
+  RefPtr<MediaSessionConduit> mConduit;
+  TrackTicks mLastLog; // mPlayedTicks when we last logged
 };
 
 MediaPipelineReceiveAudio::MediaPipelineReceiveAudio(
-  const std::string& pc,
-  nsCOMPtr<nsIEventTarget> main_thread,
-  nsCOMPtr<nsIEventTarget> sts_thread,
-  RefPtr<AudioSessionConduit> conduit,
+  const std::string& aPc,
+  nsCOMPtr<nsIEventTarget> aMainThread,
+  nsCOMPtr<nsIEventTarget> aStsThread,
+  RefPtr<AudioSessionConduit> aConduit,
   dom::MediaStreamTrack* aTrack)
-  : MediaPipelineReceive(pc, main_thread, sts_thread, conduit)
-  , listener_(aTrack ? new PipelineListener(aTrack, conduit_) : nullptr)
+  : MediaPipelineReceive(aPc, aMainThread, aStsThread, aConduit)
+  , mListener(aTrack ? new PipelineListener(aTrack, mConduit) : nullptr)
 {
-  description_ = pc_ + "| Receive audio";
+  mDescription = mPc + "| Receive audio";
 }
 
 void
 MediaPipelineReceiveAudio::DetachMedia()
 {
-  ASSERT_ON_THREAD(main_thread_);
-  if (listener_) {
-    listener_->EndTrack();
-    listener_ = nullptr;
+  ASSERT_ON_THREAD(mMainThread);
+  if (mListener) {
+    mListener->EndTrack();
+    mListener = nullptr;
   }
 }
 
 void
 MediaPipelineReceiveAudio::SetPrincipalHandle_m(
-  const PrincipalHandle& principal_handle)
+  const PrincipalHandle& aPrincipalHandle)
 {
-  if (listener_) {
-    listener_->SetPrincipalHandle_m(principal_handle);
+  if (mListener) {
+    mListener->SetPrincipalHandle_m(aPrincipalHandle);
   }
 }
 
 void
 MediaPipelineReceiveAudio::Start()
 {
-  conduit_->StartReceiving();
-  if (listener_) {
-    listener_->AddSelf();
+  mConduit->StartReceiving();
+  if (mListener) {
+    mListener->AddSelf();
   }
 }
 
 void
 MediaPipelineReceiveAudio::Stop()
 {
-  if (listener_) {
-    listener_->RemoveSelf();
+  if (mListener) {
+    mListener->RemoveSelf();
   }
-  conduit_->StopReceiving();
+  mConduit->StopReceiving();
 }
 
 void
 MediaPipelineReceiveAudio::OnRtpPacketReceived()
 {
-  if (listener_) {
-    listener_->OnRtpReceived();
+  if (mListener) {
+    mListener->OnRtpReceived();
   }
 }
 
 class MediaPipelineReceiveVideo::PipelineListener
   : public GenericReceiveListener
 {
 public:
-  explicit PipelineListener(dom::MediaStreamTrack* track)
-    : GenericReceiveListener(track)
-    , image_container_()
-    , image_()
-    , mutex_("Video PipelineListener")
+  explicit PipelineListener(dom::MediaStreamTrack* aTrack)
+    : GenericReceiveListener(aTrack)
+    , mImageContainer(
+        LayerManager::CreateImageContainer(ImageContainer::ASYNCHRONOUS))
+    , mMutex("Video PipelineListener")
   {
-    image_container_ =
-      LayerManager::CreateImageContainer(ImageContainer::ASYNCHRONOUS);
   }
 
   // Implement MediaStreamListener
-  void NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) override
+  void NotifyPull(MediaStreamGraph* aGraph, StreamTime aDesiredTime) override
   {
-    MutexAutoLock lock(mutex_);
+    MutexAutoLock lock(mMutex);
 
-    RefPtr<Image> image = image_;
-    StreamTime delta = desired_time - played_ticks_;
+    RefPtr<Image> image = mImage;
+    StreamTime delta = aDesiredTime - mPlayedTicks;
 
     // Don't append if we've already provided a frame that supposedly
     // goes past the current aDesiredTime Doing so means a negative
     // delta and thus messes up handling of the graph
     if (delta > 0) {
       VideoSegment segment;
-      IntSize size = image ? image->GetSize() : IntSize(width_, height_);
-      segment.AppendFrame(image.forget(), delta, size, principal_handle_);
+      IntSize size = image ? image->GetSize() : IntSize(mWidth, mHeight);
+      segment.AppendFrame(image.forget(), delta, size, mPrincipalHandle);
       // Handle track not actually added yet or removed/finished
-      if (track_->GetInputStream()->AsSourceStream()->AppendToTrack(
-            track_->GetInputTrackId(), &segment)) {
-        played_ticks_ = desired_time;
-      } else {
+      if (!mTrack->GetInputStream()->AsSourceStream()->AppendToTrack(
+            mTrack->GetInputTrackId(), &segment)) {
         CSFLogError(LOGTAG, "AppendToTrack failed");
         return;
       }
+      mPlayedTicks = aDesiredTime;
     }
   }
 
   // Accessors for external writes from the renderer
-  void FrameSizeChange(unsigned int width,
-                       unsigned int height,
-                       unsigned int number_of_streams)
+  void FrameSizeChange(unsigned int aWidth,
+                       unsigned int aHeight,
+                       unsigned int aNumberOfStreams)
   {
-    MutexAutoLock enter(mutex_);
+    MutexAutoLock enter(mMutex);
 
-    width_ = width;
-    height_ = height;
+    mWidth = aWidth;
+    mHeight = aHeight;
   }
 
-  void RenderVideoFrame(const webrtc::VideoFrameBuffer& buffer,
-                        uint32_t time_stamp,
-                        int64_t render_time)
+  void RenderVideoFrame(const webrtc::VideoFrameBuffer& aBuffer,
+                        uint32_t aTimeStamp,
+                        int64_t aRenderTime)
   {
-    if (buffer.native_handle()) {
+    if (aBuffer.native_handle()) {
       // We assume that only native handles are used with the
       // WebrtcMediaDataDecoderCodec decoder.
-      RefPtr<Image> image = static_cast<Image*>(buffer.native_handle());
-      MutexAutoLock lock(mutex_);
-      image_ = image;
+      RefPtr<Image> image = static_cast<Image*>(aBuffer.native_handle());
+      MutexAutoLock lock(mMutex);
+      mImage = image;
       return;
     }
 
-    MOZ_ASSERT(buffer.DataY());
+    MOZ_ASSERT(aBuffer.DataY());
     // Create a video frame using |buffer|.
     RefPtr<PlanarYCbCrImage> yuvImage =
-      image_container_->CreatePlanarYCbCrImage();
+      mImageContainer->CreatePlanarYCbCrImage();
 
     PlanarYCbCrData yuvData;
-    yuvData.mYChannel = const_cast<uint8_t*>(buffer.DataY());
-    yuvData.mYSize = IntSize(buffer.width(), buffer.height());
-    yuvData.mYStride = buffer.StrideY();
-    MOZ_ASSERT(buffer.StrideU() == buffer.StrideV());
-    yuvData.mCbCrStride = buffer.StrideU();
-    yuvData.mCbChannel = const_cast<uint8_t*>(buffer.DataU());
-    yuvData.mCrChannel = const_cast<uint8_t*>(buffer.DataV());
+    yuvData.mYChannel = const_cast<uint8_t*>(aBuffer.DataY());
+    yuvData.mYSize = IntSize(aBuffer.width(), aBuffer.height());
+    yuvData.mYStride = aBuffer.StrideY();
+    MOZ_ASSERT(aBuffer.StrideU() == aBuffer.StrideV());
+    yuvData.mCbCrStride = aBuffer.StrideU();
+    yuvData.mCbChannel = const_cast<uint8_t*>(aBuffer.DataU());
+    yuvData.mCrChannel = const_cast<uint8_t*>(aBuffer.DataV());
     yuvData.mCbCrSize =
-      IntSize((buffer.width() + 1) >> 1, (buffer.height() + 1) >> 1);
+      IntSize((aBuffer.width() + 1) >> 1, (aBuffer.height() + 1) >> 1);
     yuvData.mPicX = 0;
     yuvData.mPicY = 0;
-    yuvData.mPicSize = IntSize(buffer.width(), buffer.height());
+    yuvData.mPicSize = IntSize(aBuffer.width(), aBuffer.height());
     yuvData.mStereoMode = StereoMode::MONO;
 
     if (!yuvImage->CopyData(yuvData)) {
       MOZ_ASSERT(false);
       return;
     }
 
-    MutexAutoLock lock(mutex_);
-    image_ = yuvImage;
+    MutexAutoLock lock(mMutex);
+    mImage = yuvImage;
   }
 
 private:
-  int width_;
-  int height_;
-  RefPtr<layers::ImageContainer> image_container_;
-  RefPtr<layers::Image> image_;
-  Mutex mutex_; // Mutex for processing WebRTC frames.
-                // Protects image_ against:
+  int mWidth;
+  int mHeight;
+  RefPtr<layers::ImageContainer> mImageContainer;
+  RefPtr<layers::Image> mImage;
+  Mutex mMutex; // Mutex for processing WebRTC frames.
+                // Protects mImage against:
                 // - Writing from the GIPS thread
                 // - Reading from the MSG thread
 };
 
 class MediaPipelineReceiveVideo::PipelineRenderer
   : public mozilla::VideoRenderer
 {
 public:
-  explicit PipelineRenderer(MediaPipelineReceiveVideo* pipeline)
-    : pipeline_(pipeline)
+  explicit PipelineRenderer(MediaPipelineReceiveVideo* aPipeline)
+    : mPipeline(aPipeline)
   {
   }
 
-  void Detach() { pipeline_ = nullptr; }
+  void Detach() { mPipeline = nullptr; }
 
   // Implement VideoRenderer
-  void FrameSizeChange(unsigned int width,
-                       unsigned int height,
-                       unsigned int number_of_streams) override
+  void FrameSizeChange(unsigned int aWidth,
+                       unsigned int aHeight,
+                       unsigned int aNumberOfStreams) override
   {
-    pipeline_->listener_->FrameSizeChange(width, height, number_of_streams);
+    mPipeline->mListener->FrameSizeChange(aWidth, aHeight, aNumberOfStreams);
   }
 
-  void RenderVideoFrame(const webrtc::VideoFrameBuffer& buffer,
-                        uint32_t time_stamp,
-                        int64_t render_time) override
+  void RenderVideoFrame(const webrtc::VideoFrameBuffer& aBuffer,
+                        uint32_t aTimeStamp,
+                        int64_t aRenderTime) override
   {
-    pipeline_->listener_->RenderVideoFrame(buffer, time_stamp, render_time);
+    mPipeline->mListener->RenderVideoFrame(aBuffer, aTimeStamp, aRenderTime);
   }
 
 private:
-  MediaPipelineReceiveVideo* pipeline_; // Raw pointer to avoid cycles
+  MediaPipelineReceiveVideo* mPipeline; // Raw pointer to avoid cycles
 };
 
 MediaPipelineReceiveVideo::MediaPipelineReceiveVideo(
-  const std::string& pc,
-  nsCOMPtr<nsIEventTarget> main_thread,
-  nsCOMPtr<nsIEventTarget> sts_thread,
-  RefPtr<VideoSessionConduit> conduit,
+  const std::string& aPc,
+  nsCOMPtr<nsIEventTarget> aMainThread,
+  nsCOMPtr<nsIEventTarget> aStsThread,
+  RefPtr<VideoSessionConduit> aConduit,
   dom::MediaStreamTrack* aTrack)
-  : MediaPipelineReceive(pc, main_thread, sts_thread, conduit)
-  , renderer_(new PipelineRenderer(this))
-  , listener_(aTrack ? new PipelineListener(aTrack) : nullptr)
+  : MediaPipelineReceive(aPc, aMainThread, aStsThread, aConduit)
+  , mRenderer(new PipelineRenderer(this))
+  , mListener(aTrack ? new PipelineListener(aTrack) : nullptr)
 {
-  description_ = pc_ + "| Receive video";
-  conduit->AttachRenderer(renderer_);
+  mDescription = mPc + "| Receive video";
+  aConduit->AttachRenderer(mRenderer);
 }
 
 void
 MediaPipelineReceiveVideo::DetachMedia()
 {
-  ASSERT_ON_THREAD(main_thread_);
+  ASSERT_ON_THREAD(mMainThread);
 
   // stop generating video and thus stop invoking the PipelineRenderer
   // and PipelineListener - the renderer has a raw ptr to the Pipeline to
   // avoid cycles, and the render callbacks are invoked from a different
   // thread so simple null-checks would cause TSAN bugs without locks.
-  static_cast<VideoSessionConduit*>(conduit_.get())->DetachRenderer();
-  if (listener_) {
-    listener_->EndTrack();
-    listener_ = nullptr;
+  static_cast<VideoSessionConduit*>(mConduit.get())->DetachRenderer();
+  if (mListener) {
+    mListener->EndTrack();
+    mListener = nullptr;
   }
 }
 
 void
 MediaPipelineReceiveVideo::SetPrincipalHandle_m(
-  const PrincipalHandle& principal_handle)
+  const PrincipalHandle& aPrincipalHandle)
 {
-  if (listener_) {
-    listener_->SetPrincipalHandle_m(principal_handle);
+  if (mListener) {
+    mListener->SetPrincipalHandle_m(aPrincipalHandle);
   }
 }
 
 void
 MediaPipelineReceiveVideo::Start()
 {
-  conduit_->StartReceiving();
-  if (listener_) {
-    listener_->AddSelf();
+  mConduit->StartReceiving();
+  if (mListener) {
+    mListener->AddSelf();
   }
 }
 
 void
 MediaPipelineReceiveVideo::Stop()
 {
-  if (listener_) {
-    listener_->RemoveSelf();
+  if (mListener) {
+    mListener->RemoveSelf();
   }
-  conduit_->StopReceiving();
+  mConduit->StopReceiving();
 }
 
 void
 MediaPipelineReceiveVideo::OnRtpPacketReceived()
 {
-  if (listener_) {
-    listener_->OnRtpReceived();
+  if (mListener) {
+    mListener->OnRtpReceived();
   }
 }
 
 DOMHighResTimeStamp
 MediaPipeline::GetNow()
 {
   return webrtc::Clock::GetRealTimeClock()->TimeInMilliseconds();
 }
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.h
@@ -73,81 +73,81 @@ class SourceMediaStream;
 //
 // For a transmitting conduit, "output" is RTP and "input" is RTCP.
 // For a receiving conduit, "input" is RTP and "output" is RTCP.
 //
 
 class MediaPipeline : public sigslot::has_slots<>
 {
 public:
-  enum Direction
+  enum DirectionType
   {
     TRANSMIT,
     RECEIVE
   };
   enum State
   {
     MP_CONNECTING,
     MP_OPEN,
     MP_CLOSED
   };
-  MediaPipeline(const std::string& pc,
-                Direction direction,
-                nsCOMPtr<nsIEventTarget> main_thread,
-                nsCOMPtr<nsIEventTarget> sts_thread,
-                RefPtr<MediaSessionConduit> conduit);
+  MediaPipeline(const std::string& aPc,
+                DirectionType aDirection,
+                nsCOMPtr<nsIEventTarget> aMainThread,
+                nsCOMPtr<nsIEventTarget> aStsThread,
+                RefPtr<MediaSessionConduit> aConduit);
 
   virtual void Start() = 0;
   virtual void Stop() = 0;
   virtual void DetachMedia() {}
 
-  void SetLevel(size_t level) { level_ = level; }
+  void SetLevel(size_t aLevel) { mLevel = aLevel; }
 
   // Must be called on the main thread.
   void Shutdown_m();
 
-  void UpdateTransport_m(RefPtr<TransportFlow> rtp_transport,
-                         RefPtr<TransportFlow> rtcp_transport,
-                         nsAutoPtr<MediaPipelineFilter> filter);
+  void UpdateTransport_m(RefPtr<TransportFlow> aRtpTransport,
+                         RefPtr<TransportFlow> aRtcpTransport,
+                         nsAutoPtr<MediaPipelineFilter> aFilter);
 
-  void UpdateTransport_s(RefPtr<TransportFlow> rtp_transport,
-                         RefPtr<TransportFlow> rtcp_transport,
-                         nsAutoPtr<MediaPipelineFilter> filter);
+  void UpdateTransport_s(RefPtr<TransportFlow> aRtpTransport,
+                         RefPtr<TransportFlow> aRtcpTransport,
+                         nsAutoPtr<MediaPipelineFilter> aFilter);
 
   // Used only for testing; adds RTP header extension for RTP Stream Id with
   // the given id.
-  void AddRIDExtension_m(size_t extension_id);
-  void AddRIDExtension_s(size_t extension_id);
+  void AddRIDExtension_m(size_t aExtensionId);
+  void AddRIDExtension_s(size_t aExtensionId);
   // Used only for testing; installs a MediaPipelineFilter that filters
   // everything but the given RID
-  void AddRIDFilter_m(const std::string& rid);
-  void AddRIDFilter_s(const std::string& rid);
+  void AddRIDFilter_m(const std::string& aRid);
+  void AddRIDFilter_s(const std::string& aRid);
 
-  virtual Direction direction() const { return direction_; }
-  int level() const { return level_; }
+  virtual DirectionType Direction() const { return mDirection; }
+  int Level() const { return mLevel; }
   virtual bool IsVideo() const = 0;
 
-  bool IsDoingRtcpMux() const { return (rtp_.type_ == MUX); }
+  bool IsDoingRtcpMux() const { return mRtp.mType == MUX; }
 
   class RtpCSRCStats
   {
   public:
     // Gets an expiration time for CRC info given a reference time,
     //   this reference time would normally be the time of calling.
     //   This value can then be used to check if a RtpCSRCStats
     //   has expired via Expired(...)
     static DOMHighResTimeStamp GetExpiryFromTime(
       const DOMHighResTimeStamp aTime);
 
     RtpCSRCStats(const uint32_t aCsrc, const DOMHighResTimeStamp aTime);
     ~RtpCSRCStats(){};
     // Initialize a webidl representation suitable for adding to a report.
     //   This assumes that the webidl object is empty.
     // @param aWebidlObj the webidl binding object to popluate
-    // @param aRtpInboundStreamId the associated RTCInboundRTPStreamStats.id
+    // @param aInboundRtpStreamId the associated RTCInboundRTPStreamStats.id
     void GetWebidlInstance(dom::RTCRTPContributingSourceStats& aWebidlObj,
                            const nsString& aInboundRtpStreamId) const;
     void SetTimestamp(const DOMHighResTimeStamp aTime) { mTimestamp = aTime; }
     // Check if the RtpCSRCStats has expired, checks against a
     //   given expiration time.
     bool Expired(const DOMHighResTimeStamp aExpiry) const
     {
       return mTimestamp < aExpiry;
@@ -161,159 +161,159 @@ public:
 
   // Gets the gathered contributing source stats for the last expiration period.
   // @param aId the stream id to use for populating inboundRtpStreamId field
   // @param aArr the array to append the stats objects to
   void GetContributingSourceStats(
     const nsString& aInboundStreamId,
     FallibleTArray<dom::RTCRTPContributingSourceStats>& aArr) const;
 
-  int32_t rtp_packets_sent() const { return rtp_packets_sent_; }
-  int64_t rtp_bytes_sent() const { return rtp_bytes_sent_; }
-  int32_t rtcp_packets_sent() const { return rtcp_packets_sent_; }
-  int32_t rtp_packets_received() const { return rtp_packets_received_; }
-  int64_t rtp_bytes_received() const { return rtp_bytes_received_; }
-  int32_t rtcp_packets_received() const { return rtcp_packets_received_; }
+  int32_t RtpPacketsSent() const { return mRtpPacketsSent; }
+  int64_t RtpBytesSent() const { return mRtpBytesSent; }
+  int32_t RtcpPacketsSent() const { return mRtcpPacketsSent; }
+  int32_t RtpPacketsReceived() const { return mRtpPacketsReceived; }
+  int64_t RtpBytesReceived() const { return mRtpBytesReceived; }
+  int32_t RtcpPacketsReceived() const { return mRtcpPacketsReceived; }
 
-  MediaSessionConduit* Conduit() const { return conduit_; }
+  MediaSessionConduit* Conduit() const { return mConduit; }
 
   // Thread counting
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaPipeline)
 
   typedef enum { RTP, RTCP, MUX, MAX_RTP_TYPE } RtpType;
 
   // Separate class to allow ref counting
   class PipelineTransport : public TransportInterface
   {
   public:
     // Implement the TransportInterface functions
-    explicit PipelineTransport(MediaPipeline* pipeline)
-      : pipeline_(pipeline)
-      , sts_thread_(pipeline->sts_thread_)
+    explicit PipelineTransport(MediaPipeline* aPipeline)
+      : mPipeline(aPipeline)
+      , mStsThread(aPipeline->mStsThread)
     {
     }
 
-    void Attach(MediaPipeline* pipeline) { pipeline_ = pipeline; }
-    void Detach() { pipeline_ = nullptr; }
-    MediaPipeline* pipeline() const { return pipeline_; }
+    void Attach(MediaPipeline* pipeline) { mPipeline = pipeline; }
+    void Detach() { mPipeline = nullptr; }
+    MediaPipeline* Pipeline() const { return mPipeline; }
 
-    virtual nsresult SendRtpPacket(const uint8_t* data, size_t len);
-    virtual nsresult SendRtcpPacket(const uint8_t* data, size_t len);
+    virtual nsresult SendRtpPacket(const uint8_t* aData, size_t aLen);
+    virtual nsresult SendRtcpPacket(const uint8_t* aData, size_t aLen);
 
   private:
-    nsresult SendRtpRtcpPacket_s(nsAutoPtr<DataBuffer> data, bool is_rtp);
+    nsresult SendRtpRtcpPacket_s(nsAutoPtr<DataBuffer> aData, bool aIsRtp);
 
     // Creates a cycle, which we break with Detach
-    RefPtr<MediaPipeline> pipeline_;
-    nsCOMPtr<nsIEventTarget> sts_thread_;
+    RefPtr<MediaPipeline> mPipeline;
+    nsCOMPtr<nsIEventTarget> mStsThread;
   };
 
 protected:
   virtual ~MediaPipeline();
   nsresult AttachTransport_s();
   friend class PipelineTransport;
 
   class TransportInfo
   {
   public:
-    TransportInfo(RefPtr<TransportFlow> flow, RtpType type)
-      : transport_(flow)
-      , state_(MP_CONNECTING)
-      , type_(type)
+    TransportInfo(RefPtr<TransportFlow> aFlow, RtpType aType)
+      : mTransport(aFlow)
+      , mState(MP_CONNECTING)
+      , mType(aType)
     {
     }
 
     void Detach()
     {
-      transport_ = nullptr;
-      send_srtp_ = nullptr;
-      recv_srtp_ = nullptr;
+      mTransport = nullptr;
+      mSendSrtp = nullptr;
+      mRecvSrtp = nullptr;
     }
 
-    RefPtr<TransportFlow> transport_;
-    State state_;
-    RefPtr<SrtpFlow> send_srtp_;
-    RefPtr<SrtpFlow> recv_srtp_;
-    RtpType type_;
+    RefPtr<TransportFlow> mTransport;
+    State mState;
+    RefPtr<SrtpFlow> mSendSrtp;
+    RefPtr<SrtpFlow> mRecvSrtp;
+    RtpType mType;
   };
 
   // The transport is down
-  virtual nsresult TransportFailed_s(TransportInfo& info);
+  virtual nsresult TransportFailed_s(TransportInfo& aInfo);
   // The transport is ready
-  virtual nsresult TransportReady_s(TransportInfo& info);
-  void UpdateRtcpMuxState(TransportInfo& info);
+  virtual nsresult TransportReady_s(TransportInfo& aInfo);
+  void UpdateRtcpMuxState(TransportInfo& aInfo);
 
-  nsresult ConnectTransport_s(TransportInfo& info);
+  nsresult ConnectTransport_s(TransportInfo& aInfo);
 
-  TransportInfo* GetTransportInfo_s(TransportFlow* flow);
+  TransportInfo* GetTransportInfo_s(TransportFlow* aFlow);
 
-  void increment_rtp_packets_sent(int bytes);
-  void increment_rtcp_packets_sent();
-  void increment_rtp_packets_received(int bytes);
+  void IncrementRtpPacketsSent(int aBytes);
+  void IncrementRtcpPacketsSent();
+  void IncrementRtpPacketsReceived(int aBytes);
   virtual void OnRtpPacketReceived() {};
-  void increment_rtcp_packets_received();
+  void IncrementRtcpPacketsReceived();
 
-  virtual nsresult SendPacket(TransportFlow* flow, const void* data, int len);
+  virtual nsresult SendPacket(TransportFlow* aFlow, const void* aData, int aLen);
 
   // Process slots on transports
   void StateChange(TransportFlow* flow, TransportLayer::State);
-  void RtpPacketReceived(TransportLayer* layer,
-                         const unsigned char* data,
-                         size_t len);
-  void RtcpPacketReceived(TransportLayer* layer,
-                          const unsigned char* data,
-                          size_t len);
-  void PacketReceived(TransportLayer* layer,
-                      const unsigned char* data,
-                      size_t len);
+  void RtpPacketReceived(TransportLayer* aLayer,
+                         const unsigned char* aData,
+                         size_t aLen);
+  void RtcpPacketReceived(TransportLayer* aLayer,
+                          const unsigned char* aData,
+                          size_t aLen);
+  void PacketReceived(TransportLayer* aLayer,
+                      const unsigned char* aData,
+                      size_t aLen);
 
-  Direction direction_;
-  size_t level_;
-  RefPtr<MediaSessionConduit> conduit_; // Our conduit. Written on the main
+  DirectionType mDirection;
+  size_t mLevel;
+  RefPtr<MediaSessionConduit> mConduit; // Our conduit. Written on the main
                                         // thread. Read on STS thread.
 
   // The transport objects. Read/written on STS thread.
-  TransportInfo rtp_;
-  TransportInfo rtcp_;
+  TransportInfo mRtp;
+  TransportInfo mRtcp;
 
   // Pointers to the threads we need. Initialized at creation
   // and used all over the place.
-  nsCOMPtr<nsIEventTarget> main_thread_;
-  nsCOMPtr<nsIEventTarget> sts_thread_;
+  nsCOMPtr<nsIEventTarget> mMainThread;
+  nsCOMPtr<nsIEventTarget> mStsThread;
 
   // Created in c'tor. Referenced by the conduit.
-  RefPtr<PipelineTransport> transport_;
+  RefPtr<PipelineTransport> mTransport;
 
   // Only safe to access from STS thread.
   // Build into TransportInfo?
-  int32_t rtp_packets_sent_;
-  int32_t rtcp_packets_sent_;
-  int32_t rtp_packets_received_;
-  int32_t rtcp_packets_received_;
-  int64_t rtp_bytes_sent_;
-  int64_t rtp_bytes_received_;
+  int32_t mRtpPacketsSent;
+  int32_t mRtcpPacketsSent;
+  int32_t mRtpPacketsReceived;
+  int32_t mRtcpPacketsReceived;
+  int64_t mRtpBytesSent;
+  int64_t mRtpBytesReceived;
 
   // Only safe to access from STS thread.
-  std::map<uint32_t, RtpCSRCStats> csrc_stats_;
+  std::map<uint32_t, RtpCSRCStats> mCsrcStats;
 
   // Written in c'tor. Read on STS thread.
-  std::string pc_;
-  std::string description_;
+  std::string mPc;
+  std::string mDescription;
 
   // Written in c'tor, all following accesses are on the STS thread.
-  nsAutoPtr<MediaPipelineFilter> filter_;
-  nsAutoPtr<webrtc::RtpHeaderParser> rtp_parser_;
+  nsAutoPtr<MediaPipelineFilter> mFilter;
+  nsAutoPtr<webrtc::RtpHeaderParser> mRtpParser;
 
-  nsAutoPtr<PacketDumper> packet_dumper_;
+  nsAutoPtr<PacketDumper> mPacketDumper;
 
 private:
   // Gets the current time as a DOMHighResTimeStamp
   static DOMHighResTimeStamp GetNow();
 
-  bool IsRtp(const unsigned char* data, size_t len);
+  bool IsRtp(const unsigned char* aData, size_t aLen);
   // Must be called on the STS thread.  Must be called after DetachMedia().
   void DetachTransport_s();
 };
 
 class ConduitDeleteEvent : public Runnable
 {
 public:
   explicit ConduitDeleteEvent(already_AddRefed<MediaSessionConduit> aConduit)
@@ -329,151 +329,147 @@ private:
   RefPtr<MediaSessionConduit> mConduit;
 };
 
 // A specialization of pipeline for reading from an input device
 // and transmitting to the network.
 class MediaPipelineTransmit : public MediaPipeline
 {
 public:
-  // Set rtcp_transport to nullptr to use rtcp-mux
-  MediaPipelineTransmit(const std::string& pc,
-                        nsCOMPtr<nsIEventTarget> main_thread,
-                        nsCOMPtr<nsIEventTarget> sts_thread,
-                        bool is_video,
-                        dom::MediaStreamTrack* domtrack,
-                        RefPtr<MediaSessionConduit> conduit);
+  // Set aRtcpTransport to nullptr to use rtcp-mux
+  MediaPipelineTransmit(const std::string& aPc,
+                        nsCOMPtr<nsIEventTarget> aMainThread,
+                        nsCOMPtr<nsIEventTarget> aStsThread,
+                        bool aIsVideo,
+                        dom::MediaStreamTrack* aDomTrack,
+                        RefPtr<MediaSessionConduit> aConduit);
 
   void Start() override;
   void Stop() override;
 
   // written and used from MainThread
   bool IsVideo() const override;
 
   // When the principal of the domtrack changes, it calls through to here
   // so that we can determine whether to enable track transmission.
-  // `track` has to be null or equal `domtrack_` for us to apply the update.
-  virtual void UpdateSinkIdentity_m(dom::MediaStreamTrack* track,
-                                    nsIPrincipal* principal,
-                                    const PeerIdentity* sinkIdentity);
+  // `track` has to be null or equal `mDomTrack` for us to apply the update.
+  virtual void UpdateSinkIdentity_m(dom::MediaStreamTrack* aTrack,
+                                    nsIPrincipal* aPrincipal,
+                                    const PeerIdentity* aSinkIdentity);
 
   // Called on the main thread.
   void DetachMedia() override;
 
   // Override MediaPipeline::TransportReady.
-  nsresult TransportReady_s(TransportInfo& info) override;
+  nsresult TransportReady_s(TransportInfo& aInfo) override;
 
   // Replace a track with a different one
   // In non-compliance with the likely final spec, allow the new
   // track to be part of a different stream (since we don't support
   // multiple tracks of a type in a stream yet).  bug 1056650
-  virtual nsresult ReplaceTrack(RefPtr<dom::MediaStreamTrack>& domtrack);
+  virtual nsresult ReplaceTrack(RefPtr<dom::MediaStreamTrack>& aDomTrack);
 
   // Separate classes to allow ref counting
   class PipelineListener;
   class VideoFrameFeeder;
 
 protected:
   ~MediaPipelineTransmit();
 
   void SetDescription();
 
 private:
-  RefPtr<PipelineListener> listener_;
-  RefPtr<AudioProxyThread> audio_processing_;
-  RefPtr<VideoFrameFeeder> feeder_;
-  RefPtr<VideoFrameConverter> converter_;
-  bool is_video_;
-  RefPtr<dom::MediaStreamTrack> domtrack_;
-  bool transmitting_;
+  RefPtr<PipelineListener> mListener;
+  RefPtr<AudioProxyThread> mAudioProcessing;
+  RefPtr<VideoFrameFeeder> mFeeder;
+  RefPtr<VideoFrameConverter> mConverter;
+  bool mIsVideo;
+  RefPtr<dom::MediaStreamTrack> mDomTrack;
+  bool mTransmitting;
 };
 
 // A specialization of pipeline for reading from the network and
 // rendering media.
 class MediaPipelineReceive : public MediaPipeline
 {
 public:
-  // Set rtcp_transport to nullptr to use rtcp-mux
-  MediaPipelineReceive(const std::string& pc,
-                       nsCOMPtr<nsIEventTarget> main_thread,
-                       nsCOMPtr<nsIEventTarget> sts_thread,
-                       RefPtr<MediaSessionConduit> conduit);
-
-  int segments_added() const { return segments_added_; }
+  // Set aRtcpTransport to nullptr to use rtcp-mux
+  MediaPipelineReceive(const std::string& aPc,
+                       nsCOMPtr<nsIEventTarget> aMainThread,
+                       nsCOMPtr<nsIEventTarget> aStsThread,
+                       RefPtr<MediaSessionConduit> aConduit);
 
   // Sets the PrincipalHandle we set on the media chunks produced by this
   // pipeline. Must be called on the main thread.
   virtual void SetPrincipalHandle_m(
-    const PrincipalHandle& principal_handle) = 0;
+    const PrincipalHandle& aPrincipalHandle) = 0;
 
 protected:
   ~MediaPipelineReceive();
 
-  int segments_added_;
-
 private:
 };
 
 // A specialization of pipeline for reading from the network and
 // rendering audio.
 class MediaPipelineReceiveAudio : public MediaPipelineReceive
 {
 public:
-  MediaPipelineReceiveAudio(const std::string& pc,
-                            nsCOMPtr<nsIEventTarget> main_thread,
-                            nsCOMPtr<nsIEventTarget> sts_thread,
-                            RefPtr<AudioSessionConduit> conduit,
+  MediaPipelineReceiveAudio(const std::string& aPc,
+                            nsCOMPtr<nsIEventTarget> aMainThread,
+                            nsCOMPtr<nsIEventTarget> aStsThread,
+                            RefPtr<AudioSessionConduit> aConduit,
                             dom::MediaStreamTrack* aTrack);
 
   void DetachMedia() override;
 
   bool IsVideo() const override { return false; }
 
-  void SetPrincipalHandle_m(const PrincipalHandle& principal_handle) override;
+  void SetPrincipalHandle_m(const PrincipalHandle& aPrincipalHandle) override;
 
   void Start() override;
   void Stop() override;
 
   void OnRtpPacketReceived() override;
 
 private:
   // Separate class to allow ref counting
   class PipelineListener;
 
-  RefPtr<PipelineListener> listener_;
+  RefPtr<PipelineListener> mListener;
 };
 
 // A specialization of pipeline for reading from the network and
 // rendering video.
 class MediaPipelineReceiveVideo : public MediaPipelineReceive
 {
 public:
-  MediaPipelineReceiveVideo(const std::string& pc,
-                            nsCOMPtr<nsIEventTarget> main_thread,
-                            nsCOMPtr<nsIEventTarget> sts_thread,
-                            RefPtr<VideoSessionConduit> conduit,
+  MediaPipelineReceiveVideo(const std::string& aPc,
+                            nsCOMPtr<nsIEventTarget> aMainThread,
+                            nsCOMPtr<nsIEventTarget> aStsThread,
+                            RefPtr<VideoSessionConduit> aConduit,
                             dom::MediaStreamTrack* aTrack);
 
   // Called on the main thread.
   void DetachMedia() override;
 
   bool IsVideo() const override { return true; }
 
-  void SetPrincipalHandle_m(const PrincipalHandle& principal_handle) override;
+  void SetPrincipalHandle_m(const PrincipalHandle& aPrincipalHandle) override;
 
   void Start() override;
   void Stop() override;
 
   void OnRtpPacketReceived() override;
 
 private:
   class PipelineRenderer;
   friend class PipelineRenderer;
 
   // Separate class to allow ref counting
   class PipelineListener;
 
-  RefPtr<PipelineRenderer> renderer_;
-  RefPtr<PipelineListener> listener_;
+  RefPtr<PipelineRenderer> mRenderer;
+  RefPtr<PipelineListener> mListener;
 };
 
 } // namespace mozilla
 #endif
--- a/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionImpl.cpp
@@ -3464,17 +3464,17 @@ PeerConnectionImpl::ExecuteStatsQuery_s(
         NS_LITERAL_STRING("audio") : NS_LITERAL_STRING("video");
     nsString idstr = mediaType;
     idstr.AppendLiteral("_");
     idstr.AppendInt((uint32_t)p);
 
     // TODO(@@NG):ssrcs handle Conduits having multiple stats at the same level
     // This is pending spec work
     // Gather pipeline stats.
-    switch (mp.direction()) {
+    switch (mp.Direction()) {
       case MediaPipeline::TRANSMIT: {
         nsString localId = NS_LITERAL_STRING("outbound_rtp_") + idstr;
         nsString remoteId;
         nsString ssrc;
         std::vector<unsigned int> ssrcvals = mp.Conduit()->GetLocalSSRCs();
         if (!ssrcvals.empty()) {
           ssrc.AppendInt(ssrcvals[0]);
         }
@@ -3522,18 +3522,18 @@ PeerConnectionImpl::ExecuteStatsQuery_s(
           s.mId.Construct(localId);
           s.mType.Construct(RTCStatsType::Outbound_rtp);
           if (ssrc.Length()) {
             s.mSsrc.Construct(ssrc);
           }
           s.mMediaType.Construct(mediaType);
           s.mRemoteId.Construct(remoteId);
           s.mIsRemote = false;
-          s.mPacketsSent.Construct(mp.rtp_packets_sent());
-          s.mBytesSent.Construct(mp.rtp_bytes_sent());
+          s.mPacketsSent.Construct(mp.RtpPacketsSent());
+          s.mBytesSent.Construct(mp.RtpBytesSent());
 
           // Fill in packet type statistics
           webrtc::RtcpPacketTypeCounter counters;
           if (mp.Conduit()->GetSendPacketTypeStats(&counters)) {
             s.mNackCount.Construct(counters.nack_packets);
             // Fill in video only packet type stats
             if (!isAudio) {
               s.mFirCount.Construct(counters.fir_packets);
@@ -3613,18 +3613,18 @@ PeerConnectionImpl::ExecuteStatsQuery_s(
         if (mp.Conduit()->GetRTPStats(&jitterMs, &packetsLost)) {
           s.mJitter.Construct(double(jitterMs)/1000);
           s.mPacketsLost.Construct(packetsLost);
         }
         if (remoteId.Length()) {
           s.mRemoteId.Construct(remoteId);
         }
         s.mIsRemote = false;
-        s.mPacketsReceived.Construct(mp.rtp_packets_received());
-        s.mBytesReceived.Construct(mp.rtp_bytes_received());
+        s.mPacketsReceived.Construct(mp.RtpPacketsReceived());
+        s.mBytesReceived.Construct(mp.RtpBytesReceived());
 
         if (query->internalStats && isAudio) {
           int32_t jitterBufferDelay;
           int32_t playoutBufferDelay;
           int32_t avSyncDelta;
           if (mp.Conduit()->GetAVStats(&jitterBufferDelay,
                                        &playoutBufferDelay,
                                        &avSyncDelta)) {