Bug 920325: Add WebRTC latency logging from capture to RTP and from RTP to speakers r=padenot
authorRandell Jesup <rjesup@jesup.org>
Fri, 25 Oct 2013 18:13:42 -0400
changeset 167086 c37be651071ce167b0f18cfe23a5d28637c61a24
parent 167085 eef27ae8eb3a8e185c89c99a82d36fdd9fc7ee1d
child 167087 32625a7468b2a107857235b5e23e37afb5c88b80
push id428
push userbbajaj@mozilla.com
push dateTue, 28 Jan 2014 00:16:25 +0000
treeherdermozilla-release@cd72a7ff3a75 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs920325
milestone27.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 920325: Add WebRTC latency logging from capture to RTP and from RTP to speakers r=padenot
content/media/AudioSegment.cpp
content/media/AudioStream.cpp
content/media/AudioStream.h
content/media/Latency.cpp
content/media/Latency.h
content/media/MediaSegment.h
content/media/MediaStreamGraph.cpp
content/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/MediaManager.cpp
media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
media/webrtc/signaling/src/media-conduit/AudioConduit.h
media/webrtc/signaling/test/FakeMediaStreamsImpl.h
media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
--- a/content/media/AudioSegment.cpp
+++ b/content/media/AudioSegment.cpp
@@ -150,19 +150,22 @@ AudioSegment::WriteTo(uint64_t aID, Audi
                                      duration, c.mVolume,
                                      outputChannels,
                                      buf.Elements());
         }
       } else {
         // Assumes that a bit pattern of zeroes == 0.0f
         memset(buf.Elements(), 0, buf.Length()*sizeof(AudioDataValue));
       }
-      aOutput->Write(buf.Elements(), int32_t(duration));
-      if(!c.mTimeStamp.IsNull())
-        LogLatency(AsyncLatencyLogger::AudioMediaStreamTrack, aID,
-                   (mozilla::TimeStamp::Now() - c.mTimeStamp).ToMilliseconds());
+      aOutput->Write(buf.Elements(), int32_t(duration), &(c.mTimeStamp));
+      if(!c.mTimeStamp.IsNull()) {
+        TimeStamp now = TimeStamp::Now();
+        // would be more efficient to c.mTimeStamp to ms on create time then pass here
+        LogTime(AsyncLatencyLogger::AudioMediaStreamTrack, aID,
+                (now - c.mTimeStamp).ToMilliseconds(), c.mTimeStamp);
+      }
       offset += duration;
     }
   }
   aOutput->Start();
 }
 
 }
--- a/content/media/AudioStream.cpp
+++ b/content/media/AudioStream.cpp
@@ -138,17 +138,18 @@ static cubeb_stream_type ConvertChannelT
 }
 #endif
 
 AudioStream::AudioStream()
 : mInRate(0),
   mOutRate(0),
   mChannels(0),
   mWritten(0),
-  mAudioClock(MOZ_THIS_IN_INITIALIZER_LIST())
+  mAudioClock(MOZ_THIS_IN_INITIALIZER_LIST()),
+  mReadPoint(0)
 {}
 
 void AudioStream::InitLibrary()
 {
 #ifdef PR_LOGGING
   gAudioStreamLog = PR_NewLogModule("AudioStream");
 #endif
   gAudioPrefsLock = new Mutex("AudioStream::gAudioPrefsLock");
@@ -323,28 +324,29 @@ class BufferedAudioStream : public Audio
  public:
   BufferedAudioStream();
   ~BufferedAudioStream();
 
   nsresult Init(int32_t aNumChannels, int32_t aRate,
                 const dom::AudioChannelType aAudioChannelType,
                 AudioStream::LatencyRequest aLatencyRequest);
   void Shutdown();
-  nsresult Write(const AudioDataValue* aBuf, uint32_t aFrames);
+  nsresult Write(const AudioDataValue* aBuf, uint32_t aFrames, TimeStamp *aTime = nullptr);
   uint32_t Available();
   void SetVolume(double aVolume);
   void Drain();
   void Start();
   void Pause();
   void Resume();
   int64_t GetPosition();
   int64_t GetPositionInFrames();
   int64_t GetPositionInFramesInternal();
   int64_t GetLatencyInFrames();
   bool IsPaused();
+  void GetBufferInsertTime(int64_t &aTimeMs);
   // This method acquires the monitor and forward the call to the base
   // class, to prevent a race on |mTimeStretcher|, in
   // |AudioStream::EnsureTimeStretcherInitialized|.
   nsresult EnsureTimeStretcherInitialized();
 
 private:
   static long DataCallback_S(cubeb_stream*, void* aThis, void* aBuffer, long aFrames)
   {
@@ -354,20 +356,19 @@ private:
   static void StateCallback_S(cubeb_stream*, void* aThis, cubeb_state aState)
   {
     static_cast<BufferedAudioStream*>(aThis)->StateCallback(aState);
   }
 
   long DataCallback(void* aBuffer, long aFrames);
   void StateCallback(cubeb_state aState);
 
-  long GetUnprocessed(void* aBuffer, long aFrames);
-
-  long GetTimeStretched(void* aBuffer, long aFrames);
-
+  // aTime is the time in ms the samples were inserted into MediaStreamGraph
+  long GetUnprocessed(void* aBuffer, long aFrames, int64_t &aTime);
+  long GetTimeStretched(void* aBuffer, long aFrames, int64_t &aTime);
 
   // Shared implementation of underflow adjusted position calculation.
   // Caller must own the monitor.
   int64_t GetPositionInFramesUnlocked();
 
   void StartUnlocked();
 
   // The monitor is held to protect all access to member variables.  Write()
@@ -537,26 +538,26 @@ WriteDumpFile(FILE* aDumpFile, AudioStre
   fwrite(output, 2, samples, aDumpFile);
   fflush(aDumpFile);
 }
 
 BufferedAudioStream::BufferedAudioStream()
   : mMonitor("BufferedAudioStream"), mLostFrames(0), mDumpFile(nullptr),
     mVolume(1.0), mBytesPerFrame(0), mState(INITIALIZED)
 {
-  AsyncLatencyLogger::Get(true)->AddRef();
+  // keep a ref in case we shut down later than nsLayoutStatics
+  mLatencyLog = AsyncLatencyLogger::Get(true);
 }
 
 BufferedAudioStream::~BufferedAudioStream()
 {
   Shutdown();
   if (mDumpFile) {
     fclose(mDumpFile);
   }
-  AsyncLatencyLogger::Get()->Release();
 }
 
 nsresult
 BufferedAudioStream::EnsureTimeStretcherInitialized()
 {
   MonitorAutoLock mon(mMonitor);
   return AudioStream::EnsureTimeStretcherInitialized();
 }
@@ -567,16 +568,18 @@ BufferedAudioStream::Init(int32_t aNumCh
                           AudioStream::LatencyRequest aLatencyRequest)
 {
   cubeb* cubebContext = GetCubebContext();
 
   if (!cubebContext || aNumChannels < 0 || aRate < 0) {
     return NS_ERROR_FAILURE;
   }
 
+  PR_LOG(gAudioStreamLog, PR_LOG_DEBUG,
+    ("%s  channels: %d, rate: %d", __FUNCTION__, aNumChannels, aRate));
   mInRate = mOutRate = aRate;
   mChannels = aNumChannels;
 
   mDumpFile = OpenDumpFile(this);
 
   cubeb_stream_params params;
   params.rate = aRate;
   params.channels = aNumChannels;
@@ -640,29 +643,46 @@ BufferedAudioStream::Shutdown()
   if (mState == STARTED) {
     Pause();
   }
   if (mCubebStream) {
     mCubebStream.reset();
   }
 }
 
+// aTime is the time in ms the samples were inserted into MediaStreamGraph
 nsresult
-BufferedAudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames)
+BufferedAudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames, TimeStamp *aTime)
 {
   MonitorAutoLock mon(mMonitor);
   if (!mCubebStream || mState == ERRORED) {
     return NS_ERROR_FAILURE;
   }
   NS_ASSERTION(mState == INITIALIZED || mState == STARTED,
     "Stream write in unexpected state.");
 
   const uint8_t* src = reinterpret_cast<const uint8_t*>(aBuf);
   uint32_t bytesToCopy = FramesToBytes(aFrames);
 
+  // XXX this will need to change if we want to enable this on-the-fly!
+  if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) {
+    // Record the position and time this data was inserted
+    int64_t timeMs;
+    if (aTime && !aTime->IsNull()) {
+      if (mStartTime.IsNull()) {
+        AsyncLatencyLogger::Get(true)->GetStartTime(mStartTime);
+      }
+      timeMs = (*aTime - mStartTime).ToMilliseconds();
+    } else {
+      timeMs = 0;
+    }
+    struct Inserts insert = { timeMs, aFrames};
+    mInserts.AppendElement(insert);
+  }
+
   while (bytesToCopy > 0) {
     uint32_t available = std::min(bytesToCopy, mBuffer.Available());
     NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0,
         "Must copy complete frames.");
 
     mBuffer.AppendElements(src, available);
     src += available;
     bytesToCopy -= available;
@@ -841,18 +861,35 @@ BufferedAudioStream::GetLatencyInFrames(
 
 bool
 BufferedAudioStream::IsPaused()
 {
   MonitorAutoLock mon(mMonitor);
   return mState == STOPPED;
 }
 
+void
+BufferedAudioStream::GetBufferInsertTime(int64_t &aTimeMs)
+{
+  if (mInserts.Length() > 0) {
+    // Find the right block, but don't leave the array empty
+    while (mInserts.Length() > 1 && mReadPoint >= mInserts[0].mFrames) {
+      mReadPoint -= mInserts[0].mFrames;
+      mInserts.RemoveElementAt(0);
+    }
+    // offset for amount already read
+    // XXX Note: could misreport if we couldn't find a block in the right timeframe
+    aTimeMs = mInserts[0].mTimeMs + ((mReadPoint * 1000) / mOutRate);
+  } else {
+    aTimeMs = INT64_MAX;
+  }
+}
+
 long
-BufferedAudioStream::GetUnprocessed(void* aBuffer, long aFrames)
+BufferedAudioStream::GetUnprocessed(void* aBuffer, long aFrames, int64_t &aTimeMs)
 {
   uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer);
 
   // Flush the timestretcher pipeline, if we were playing using a playback rate
   // other than 1.0.
   uint32_t flushedFrames = 0;
   if (mTimeStretcher && mTimeStretcher->numSamples()) {
     flushedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames);
@@ -862,21 +899,26 @@ BufferedAudioStream::GetUnprocessed(void
   uint32_t available = std::min(toPopBytes, mBuffer.Length());
 
   void* input[2];
   uint32_t input_size[2];
   mBuffer.PopElements(available, &input[0], &input_size[0], &input[1], &input_size[1]);
   memcpy(wpos, input[0], input_size[0]);
   wpos += input_size[0];
   memcpy(wpos, input[1], input_size[1]);
+
+  // First time block now has our first returned sample
+  mReadPoint += BytesToFrames(available);
+  GetBufferInsertTime(aTimeMs);
+
   return BytesToFrames(available) + flushedFrames;
 }
 
 long
-BufferedAudioStream::GetTimeStretched(void* aBuffer, long aFrames)
+BufferedAudioStream::GetTimeStretched(void* aBuffer, long aFrames, int64_t &aTimeMs)
 {
   long processedFrames = 0;
 
   // We need to call the non-locking version, because we already have the lock.
   if (AudioStream::EnsureTimeStretcherInitialized() != NS_OK) {
     return 0;
   }
 
@@ -891,77 +933,87 @@ BufferedAudioStream::GetTimeStretched(vo
       void* input[2];
       uint32_t input_size[2];
       available = std::min(mBuffer.Length(), toPopBytes);
       if (available != toPopBytes) {
         lowOnBufferedData = true;
       }
       mBuffer.PopElements(available, &input[0], &input_size[0],
                                      &input[1], &input_size[1]);
+      mReadPoint += BytesToFrames(available);
       for(uint32_t i = 0; i < 2; i++) {
         mTimeStretcher->putSamples(reinterpret_cast<AudioDataValue*>(input[i]), BytesToFrames(input_size[i]));
       }
     }
     uint32_t receivedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames - processedFrames);
     wpos += FramesToBytes(receivedFrames);
     processedFrames += receivedFrames;
   } while (processedFrames < aFrames && !lowOnBufferedData);
 
+  GetBufferInsertTime(aTimeMs);
+
   return processedFrames;
 }
 
 long
 BufferedAudioStream::DataCallback(void* aBuffer, long aFrames)
 {
   MonitorAutoLock mon(mMonitor);
   uint32_t available = std::min(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length());
   NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames");
+  AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer);
   uint32_t underrunFrames = 0;
   uint32_t servicedFrames = 0;
+  int64_t insertTime;
 
   if (available) {
-    AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer);
     if (mInRate == mOutRate) {
-      servicedFrames = GetUnprocessed(output, aFrames);
+      servicedFrames = GetUnprocessed(output, aFrames, insertTime);
     } else {
-      servicedFrames = GetTimeStretched(output, aFrames);
+      servicedFrames = GetTimeStretched(output, aFrames, insertTime);
     }
     float scaled_volume = float(GetVolumeScale() * mVolume);
 
     ScaleAudioSamples(output, aFrames * mChannels, scaled_volume);
 
     NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames");
 
     // Notify any blocked Write() call that more space is available in mBuffer.
     mon.NotifyAll();
+  } else {
+    GetBufferInsertTime(insertTime);
   }
 
   underrunFrames = aFrames - servicedFrames;
 
   if (mState != DRAINING) {
     uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames);
     memset(rpos, 0, FramesToBytes(underrunFrames));
-#ifdef PR_LOGGING
     if (underrunFrames) {
       PR_LOG(gAudioStreamLog, PR_LOG_WARNING,
              ("AudioStream %p lost %d frames", this, underrunFrames));
     }
-#endif
     mLostFrames += underrunFrames;
     servicedFrames += underrunFrames;
   }
 
   WriteDumpFile(mDumpFile, this, aFrames, aBuffer);
-  if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) {
+  // Don't log if we're not interested or if the stream is inactive
+  if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG) &&
+      insertTime != INT64_MAX && servicedFrames > underrunFrames) {
     uint32_t latency = UINT32_MAX;
     if (cubeb_stream_get_latency(mCubebStream, &latency)) {
       NS_WARNING("Could not get latency from cubeb.");
     }
-    mLatencyLog->Log(AsyncLatencyLogger::AudioStream, 0, (mBuffer.Length() * 1000) / mOutRate);
-    mLatencyLog->Log(AsyncLatencyLogger::Cubeb, 0, (latency * 1000) / mOutRate);
+    TimeStamp now = TimeStamp::Now();
+
+    mLatencyLog->Log(AsyncLatencyLogger::AudioStream, reinterpret_cast<uint64_t>(this),
+                     insertTime, now);
+    mLatencyLog->Log(AsyncLatencyLogger::Cubeb, reinterpret_cast<uint64_t>(mCubebStream.get()),
+                     (latency * 1000) / mOutRate, now);
   }
 
   mAudioClock.UpdateWritePosition(servicedFrames);
   return servicedFrames;
 }
 
 void
 BufferedAudioStream::StateCallback(cubeb_state aState)
--- a/content/media/AudioStream.h
+++ b/content/media/AudioStream.h
@@ -129,18 +129,19 @@ public:
                         LatencyRequest aLatencyRequest) = 0;
 
   // Closes the stream. All future use of the stream is an error.
   virtual void Shutdown() = 0;
 
   // Write audio data to the audio hardware.  aBuf is an array of AudioDataValues
   // AudioDataValue of length aFrames*mChannels.  If aFrames is larger
   // than the result of Available(), the write will block until sufficient
-  // buffer space is available.
-  virtual nsresult Write(const mozilla::AudioDataValue* aBuf, uint32_t aFrames) = 0;
+  // buffer space is available.  aTime is the time in ms associated with the first sample
+  // for latency calculations
+  virtual nsresult Write(const mozilla::AudioDataValue* aBuf, uint32_t aFrames, TimeStamp *aTime = nullptr) = 0;
 
   // Return the number of audio frames that can be written without blocking.
   virtual uint32_t Available() = 0;
 
   // Set the current volume of the audio playback. This is a value from
   // 0 (meaning muted) to 1 (meaning full volume).  Thread-safe.
   virtual void SetVolume(double aVolume) = 0;
 
@@ -198,13 +199,26 @@ protected:
   // Output rate in Hz (characteristic of the playback rate)
   int mOutRate;
   int mChannels;
   // Number of frames written to the buffers.
   int64_t mWritten;
   AudioClock mAudioClock;
   nsAutoPtr<soundtouch::SoundTouch> mTimeStretcher;
   nsRefPtr<AsyncLatencyLogger> mLatencyLog;
+
+  // copy of Latency logger's starting time for offset calculations
+  TimeStamp mStartTime;
+  // Where in the current mInserts[0] block cubeb has read to
+  int64_t mReadPoint;
+  // Keep track of each inserted block of samples and the time it was inserted
+  // so we can estimate the clock time for a specific sample's insertion (for when
+  // we send data to cubeb).  Blocks are aged out as needed.
+  struct Inserts {
+    int64_t mTimeMs;
+    int64_t mFrames;
+  };
+  nsAutoTArray<Inserts,8> mInserts;
 };
 
 } // namespace mozilla
 
 #endif
--- a/content/media/Latency.cpp
+++ b/content/media/Latency.cpp
@@ -19,17 +19,28 @@
 
 using namespace mozilla;
 
 const char* LatencyLogIndex2Strings[] = {
   "Audio MediaStreamTrack",
   "Video MediaStreamTrack",
   "Cubeb",
   "AudioStream",
-  "NetStat"
+  "NetEQ",
+  "AudioCapture Base",
+  "AudioCapture Samples",
+  "AudioTrackInsertion",
+  "MediaPipeline Audio Insertion",
+  "AudioTransmit",
+  "AudioReceive",
+  "MediaPipelineAudioPlayout",
+  "MediaStream Create",
+  "AudioStream Create",
+  "AudioSendRTP",
+  "AudioRecvRTP"
 };
 
 static StaticRefPtr<AsyncLatencyLogger> gAsyncLogger;
 
 PRLogModuleInfo*
 GetLatencyLog()
 {
   static PRLogModuleInfo* sLog;
@@ -38,39 +49,72 @@ GetLatencyLog()
   }
   return sLog;
 }
 
 
 class LogEvent : public nsRunnable
 {
 public:
+  LogEvent(AsyncLatencyLogger::LatencyLogIndex aIndex, uint64_t aID, int64_t aValue,
+           TimeStamp aTimeStamp) :
+    mIndex(aIndex),
+    mID(aID),
+    mValue(aValue),
+    mTimeStamp(aTimeStamp)
+  {}
   LogEvent(AsyncLatencyLogger::LatencyLogIndex aIndex, uint64_t aID, int64_t aValue) :
     mIndex(aIndex),
     mID(aID),
-    mValue(aValue)
+    mValue(aValue),
+    mTimeStamp(TimeStamp())
   {}
   ~LogEvent() {}
 
   NS_IMETHOD Run() {
-    AsyncLatencyLogger::Get(true)->WriteLog(mIndex, mID, mValue);
+    AsyncLatencyLogger::Get(true)->WriteLog(mIndex, mID, mValue, mTimeStamp);
     return NS_OK;
   }
 
 protected:
   AsyncLatencyLogger::LatencyLogIndex mIndex;
   uint64_t mID;
   int64_t mValue;
+  TimeStamp mTimeStamp;
 };
 
 void LogLatency(AsyncLatencyLogger::LatencyLogIndex aIndex, uint64_t aID, int64_t aValue)
 {
   AsyncLatencyLogger::Get()->Log(aIndex, aID, aValue);
 }
 
+void LogTime(AsyncLatencyLogger::LatencyLogIndex aIndex, uint64_t aID, int64_t aValue)
+{
+  TimeStamp now = TimeStamp::Now();
+  AsyncLatencyLogger::Get()->Log(aIndex, aID, aValue, now);
+}
+
+void LogTime(AsyncLatencyLogger::LatencyLogIndex aIndex, uint64_t aID, int64_t aValue, TimeStamp &aTime)
+{
+  AsyncLatencyLogger::Get()->Log(aIndex, aID, aValue, aTime);
+}
+
+void LogTime(uint32_t aIndex, uint64_t aID, int64_t aValue)
+{
+  LogTime(static_cast<AsyncLatencyLogger::LatencyLogIndex>(aIndex), aID, aValue);
+}
+void LogTime(uint32_t aIndex, uint64_t aID, int64_t aValue, TimeStamp &aTime)
+{
+  LogTime(static_cast<AsyncLatencyLogger::LatencyLogIndex>(aIndex), aID, aValue, aTime);
+}
+void LogLatency(uint32_t aIndex, uint64_t aID, int64_t aValue)
+{
+  LogLatency(static_cast<AsyncLatencyLogger::LatencyLogIndex>(aIndex), aID, aValue);
+}
+
 /* static */
 void AsyncLatencyLogger::InitializeStatics()
 {
   NS_ASSERTION(NS_IsMainThread(), "Main thread only");
   GetLatencyLog();
   gAsyncLogger = new AsyncLatencyLogger();
 }
 
@@ -123,42 +167,62 @@ void AsyncLatencyLogger::Init()
   MutexAutoLock lock(mMutex);
   if (mStart.IsNull()) {
     nsresult rv = NS_NewNamedThread("Latency Logger", getter_AddRefs(mThread));
     NS_ENSURE_SUCCESS_VOID(rv);
     mStart = TimeStamp::Now();
   }
 }
 
+void AsyncLatencyLogger::GetStartTime(TimeStamp &aStart)
+{
+  MutexAutoLock lock(mMutex);
+  aStart = mStart;
+}
+
 nsresult
 AsyncLatencyLogger::Observe(nsISupports* aSubject, const char* aTopic,
                             const PRUnichar* aData)
 {
   MOZ_ASSERT(NS_IsMainThread());
   if (strcmp(aTopic, NS_XPCOM_SHUTDOWN_OBSERVER_ID) == 0) {
     Shutdown();
   }
   return NS_OK;
 }
 
 // aID is a sub-identifier (in particular a specific MediaStramTrack)
-void AsyncLatencyLogger::WriteLog(LatencyLogIndex aIndex, uint64_t aID, int64_t aValue)
+void AsyncLatencyLogger::WriteLog(LatencyLogIndex aIndex, uint64_t aID, int64_t aValue,
+                                  TimeStamp aTimeStamp)
 {
-  PR_LOG(GetLatencyLog(), PR_LOG_DEBUG,
-         ("%s,%llu,%lld.,%lld.",
-          LatencyLogIndex2Strings[aIndex], aID, GetTimeStamp(), aValue));
+  if (aTimeStamp.IsNull()) {
+    PR_LOG(GetLatencyLog(), PR_LOG_DEBUG,
+      ("Latency: %s,%llu,%lld,%lld",
+       LatencyLogIndex2Strings[aIndex], aID, GetTimeStamp(), aValue));
+  } else {
+    PR_LOG(GetLatencyLog(), PR_LOG_DEBUG,
+      ("Latency: %s,%llu,%lld,%lld,%lld",
+       LatencyLogIndex2Strings[aIndex], aID, GetTimeStamp(), aValue,
+       static_cast<int64_t>((aTimeStamp - gAsyncLogger->mStart).ToMilliseconds())));
+  }
 }
 
 int64_t AsyncLatencyLogger::GetTimeStamp()
 {
   TimeDuration t = TimeStamp::Now() - mStart;
   return t.ToMilliseconds();
 }
 
 void AsyncLatencyLogger::Log(LatencyLogIndex aIndex, uint64_t aID, int64_t aValue)
 {
+  TimeStamp null;
+  Log(aIndex, aID, aValue, null);
+}
+
+void AsyncLatencyLogger::Log(LatencyLogIndex aIndex, uint64_t aID, int64_t aValue, TimeStamp &aTime)
+{
   if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) {
-    nsCOMPtr<nsIRunnable> event = new LogEvent(aIndex, aID, aValue);
+    nsCOMPtr<nsIRunnable> event = new LogEvent(aIndex, aID, aValue, aTime);
     if (mThread) {
       mThread->Dispatch(event, NS_DISPATCH_NORMAL);
     }
   }
 }
--- a/content/media/Latency.h
+++ b/content/media/Latency.h
@@ -3,17 +3,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_LATENCY_H
 #define MOZILLA_LATENCY_H
 
 #include "mozilla/TimeStamp.h"
-#include "prlog.h"
+#include "nspr/prlog.h"
 #include "nsCOMPtr.h"
 #include "nsIThread.h"
 #include "mozilla/Monitor.h"
 #include "nsISupportsImpl.h"
 #include "nsObserverService.h"
 
 class AsyncLatencyLogger;
 class LogEvent;
@@ -24,25 +24,44 @@ PRLogModuleInfo* GetLatencyLog();
 class AsyncLatencyLogger : public nsIObserver
 {
   NS_DECL_THREADSAFE_ISUPPORTS
   NS_DECL_NSIOBSERVER
 
 public:
 
   enum LatencyLogIndex {
-    AudioMediaStreamTrack,
+    AudioMediaStreamTrack = 0,
     VideoMediaStreamTrack,
     Cubeb,
     AudioStream,
     NetEQ,
+    AudioCaptureBase, // base time for capturing an audio stream
+    AudioCapture, // records number of samples captured and the time
+    AudioTrackInsertion, // # of samples inserted into a mediastreamtrack and the time
+    MediaPipelineAudioInsertion, // Timestamp and time of timestamp
+    AudioTransmit, // Timestamp and socket send time
+    AudioReceive, // Timestamp and receive time
+    MediaPipelineAudioPlayout, // Timestamp and playout into MST time
+    MediaStreamCreate, // Source and TrackUnion streams
+    AudioStreamCreate, // TrackUnion stream and AudioStream
+    AudioSendRTP,
+    AudioRecvRTP,
     _MAX_INDEX
   };
-  void Log(LatencyLogIndex index, uint64_t aID, int64_t value);
-  void WriteLog(LatencyLogIndex index, uint64_t aID, int64_t value);
+  // Log with a null timestamp
+  void Log(LatencyLogIndex index, uint64_t aID, int64_t aValue);
+  // Log with a timestamp
+  void Log(LatencyLogIndex index, uint64_t aID, int64_t aValue,
+           mozilla::TimeStamp &aTime);
+  // Write a log message to NSPR
+  void WriteLog(LatencyLogIndex index, uint64_t aID, int64_t aValue,
+                mozilla::TimeStamp timestamp);
+  // Get the base time used by the logger for delta calculations
+  void GetStartTime(mozilla::TimeStamp &aStart);
 
   static AsyncLatencyLogger* Get(bool aStartTimer = false);
   static void InitializeStatics();
   // After this is called, the global log object may go away
   static void ShutdownLogger();
 private:
   AsyncLatencyLogger();
   virtual ~AsyncLatencyLogger();
@@ -58,11 +77,24 @@ private:
   // thread only.
   mozilla::TimeStamp mStart;
   // This monitor protects mStart and mMediaLatencyLog for the
   // initialization sequence. It is initialized at layout startup, and
   // destroyed at layout shutdown.
   mozilla::Mutex mMutex;
 };
 
-void LogLatency(AsyncLatencyLogger::LatencyLogIndex index, uint64_t aID, int64_t value);
+// need uint32_t versions for access from webrtc/trunk code
+// Log without a time delta
+void LogLatency(AsyncLatencyLogger::LatencyLogIndex index, uint64_t aID, int64_t aValue);
+void LogLatency(uint32_t index, uint64_t aID, int64_t aValue);
+// Log TimeStamp::Now() (as delta)
+void LogTime(AsyncLatencyLogger::LatencyLogIndex index, uint64_t aID, int64_t aValue);
+void LogTime(uint32_t index, uint64_t aID, int64_t aValue);
+// Log the specified time (as delta)
+void LogTime(AsyncLatencyLogger::LatencyLogIndex index, uint64_t aID, int64_t aValue,
+             mozilla::TimeStamp &aTime);
+
+// For generating unique-ish ids for logged sources
+#define LATENCY_STREAM_ID(source, trackID) \
+  ((((uint64_t) (source)) & ~0x0F) | (trackID))
 
 #endif
--- a/content/media/MediaSegment.h
+++ b/content/media/MediaSegment.h
@@ -228,16 +228,23 @@ public:
     MediaSegmentBase<C, Chunk>& mSegment;
     uint32_t mIndex;
   };
 
   void RemoveLeading(TrackTicks aDuration)
   {
     RemoveLeading(aDuration, 0);
   }
+
+#ifdef MOZILLA_INTERNAL_API
+  void GetStartTime(TimeStamp &aTime) {
+    aTime = mChunks[0].mTimeStamp;
+  }
+#endif
+
 protected:
   MediaSegmentBase(Type aType) : MediaSegment(aType) {}
 
   /**
    * Appends the contents of aSource to this segment, clearing aSource.
    */
   void AppendFromInternal(MediaSegmentBase<C, Chunk>* aSource)
   {
--- a/content/media/MediaStreamGraph.cpp
+++ b/content/media/MediaStreamGraph.cpp
@@ -767,16 +767,20 @@ MediaStreamGraphImpl::CreateOrDestroyAud
           aStream->mAudioOutputStreams.AppendElement();
         audioOutputStream->mAudioPlaybackStartTime = aAudioOutputStartTime;
         audioOutputStream->mBlockedAudioTime = 0;
         audioOutputStream->mStream = AudioStream::AllocateStream();
         // XXX for now, allocate stereo output. But we need to fix this to
         // match the system's ideal channel configuration.
         audioOutputStream->mStream->Init(2, tracks->GetRate(), AUDIO_CHANNEL_NORMAL, AudioStream::LowLatency);
         audioOutputStream->mTrackID = tracks->GetID();
+
+        LogLatency(AsyncLatencyLogger::AudioStreamCreate,
+                   reinterpret_cast<uint64_t>(aStream),
+                   reinterpret_cast<int64_t>(audioOutputStream->mStream.get()));
       }
     }
   }
 
   for (int32_t i = audioOutputStreamsFound.Length() - 1; i >= 0; --i) {
     if (!audioOutputStreamsFound[i]) {
       aStream->mAudioOutputStreams[i].mStream->Shutdown();
       aStream->mAudioOutputStreams.RemoveElementAt(i);
@@ -849,18 +853,19 @@ MediaStreamGraphImpl::PlayAudio(MediaStr
         output.AppendNullData(endTicks - sliceEnd);
         NS_ASSERTION(endTicks == sliceEnd || track->IsEnded(),
                      "Ran out of data but track not ended?");
         output.ApplyVolume(volume);
         LOG(PR_LOG_DEBUG+1, ("MediaStream %p writing samples for %f to %f (samples %lld to %lld)",
                              aStream, MediaTimeToSeconds(t), MediaTimeToSeconds(end),
                              startTicks, endTicks));
       }
-      // XXX need unique id for stream & track
-      output.WriteTo((((uint64_t)i) << 32) | track->GetID(), audioOutput.mStream);
+      // Need unique id for stream & track - and we want it to match the inserter
+      output.WriteTo(LATENCY_STREAM_ID(aStream, track->GetID()),
+                     audioOutput.mStream);
       t = end;
     }
   }
 }
 
 static void
 SetImageToBlackPixel(PlanarYCbCrImage* aImage)
 {
--- a/content/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/content/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -151,23 +151,27 @@ MediaEngineWebRTCAudioSource::Start(Sour
   {
     MonitorAutoLock lock(mMonitor);
     mSources.AppendElement(aStream);
   }
 
   AudioSegment* segment = new AudioSegment();
   aStream->AddTrack(aID, SAMPLE_FREQUENCY, 0, segment);
   aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
-  LOG(("Initial audio"));
-  mTrackID = aID;
+  LOG(("Start audio for stream %p", aStream));
 
   if (mState == kStarted) {
+    MOZ_ASSERT(aID == mTrackID);
     return NS_OK;
   }
   mState = kStarted;
+  mTrackID = aID;
+
+  // Make sure logger starts before capture
+  AsyncLatencyLogger::Get(true);
 
   // Configure audio processing in webrtc code
   Config(mEchoOn, webrtc::kEcUnchanged,
          mAgcOn, webrtc::kAgcUnchanged,
          mNoiseOn, webrtc::kNsUnchanged);
 
   if (mVoEBase->StartReceive(mChannel)) {
     return NS_ERROR_FAILURE;
@@ -365,21 +369,28 @@ MediaEngineWebRTCAudioSource::Process(in
 
     sample* dest = static_cast<sample*>(buffer->Data());
     memcpy(dest, audio10ms, length * sizeof(sample));
 
     AudioSegment segment;
     nsAutoTArray<const sample*,1> channels;
     channels.AppendElement(dest);
     segment.AppendFrames(buffer.forget(), channels, length);
+    TimeStamp insertTime;
+    segment.GetStartTime(insertTime);
 
     SourceMediaStream *source = mSources[i];
     if (source) {
       // This is safe from any thread, and is safe if the track is Finished
-      // or Destroyed
+      // or Destroyed.
+      // Make sure we include the stream and the track.
+      // The 0:1 is a flag to note when we've done the final insert for a given input block.
+      LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(source, mTrackID),
+              (i+1 < len) ? 0 : 1, insertTime);
+
       source->AppendToTrack(mTrackID, &segment);
     }
   }
 
   return;
 }
 
 }
--- a/dom/media/MediaManager.cpp
+++ b/dom/media/MediaManager.cpp
@@ -21,16 +21,18 @@
 #include "nsIDocument.h"
 #include "nsISupportsPrimitives.h"
 #include "nsIInterfaceRequestorUtils.h"
 #include "nsIScriptSecurityManager.h"
 #include "mozilla/dom/TabChild.h"
 #include "mozilla/dom/MediaStreamTrackBinding.h"
 #include "mozilla/dom/GetUserMediaRequestBinding.h"
 
+#include "Latency.h"
+
 // For PR_snprintf
 #include "prprf.h"
 
 #include "nsJSUtils.h"
 #include "nsDOMFile.h"
 #include "nsGlobalWindow.h"
 
 #include "mozilla/Preferences.h"
@@ -601,16 +603,22 @@ public:
     nsRefPtr<SourceMediaStream> stream = gm->CreateSourceStream(nullptr);
 
     // connect the source stream to the track union stream to avoid us blocking
     trackunion->GetStream()->AsProcessedStream()->SetAutofinish(true);
     nsRefPtr<MediaInputPort> port = trackunion->GetStream()->AsProcessedStream()->
       AllocateInputPort(stream, MediaInputPort::FLAG_BLOCK_OUTPUT);
     trackunion->mSourceStream = stream;
     trackunion->mPort = port.forget();
+    // Log the relationship between SourceMediaStream and TrackUnion stream
+    // Make sure logger starts before capture
+    AsyncLatencyLogger::Get(true);
+    LogLatency(AsyncLatencyLogger::MediaStreamCreate,
+               reinterpret_cast<uint64_t>(stream.get()),
+               reinterpret_cast<int64_t>(trackunion->GetStream()));
 
     trackunion->CombineWithPrincipal(window->GetExtantDoc()->NodePrincipal());
 
     // The listener was added at the begining in an inactive state.
     // Activate our listener. We'll call Start() on the source when get a callback
     // that the MediaStream has started consuming. The listener is freed
     // when the page is invalidated (on navigation or close).
     mListener->Activate(stream.forget(), mAudioSource, mVideoSource);
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.cpp
@@ -7,16 +7,19 @@
 
 #include "AudioConduit.h"
 #include "nsCOMPtr.h"
 #include "mozilla/Services.h"
 #include "nsServiceManagerUtils.h"
 #include "nsIPrefService.h"
 #include "nsIPrefBranch.h"
 #include "nsThreadUtils.h"
+#ifdef MOZILLA_INTERNAL_API
+#include "Latency.h"
+#endif
 
 #include "webrtc/voice_engine/include/voe_errors.h"
 
 #ifdef MOZ_WIDGET_ANDROID
 #include "AndroidJNIWrapper.h"
 #endif
 
 namespace mozilla {
@@ -199,16 +202,22 @@ MediaConduitErrorCode WebrtcAudioConduit
   }
 
   if(!(mPtrVoEXmedia = VoEExternalMedia::GetInterface(mVoiceEngine)))
   {
     CSFLogError(logTag, "%s Unable to initialize VoEExternalMedia", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
+  if(!(mPtrVoEVideoSync = VoEVideoSync::GetInterface(mVoiceEngine)))
+  {
+    CSFLogError(logTag, "%s Unable to initialize VoEVideoSync", __FUNCTION__);
+    return kMediaConduitSessionNotInited;
+  }
+
   if (other) {
     mChannel = other->mChannel;
   } else {
     // init the engine with our audio device layer
     if(mPtrVoEBase->Init() == -1)
     {
       CSFLogError(logTag, "%s VoiceEngine Base Not Initialized", __FUNCTION__);
       return kMediaConduitSessionNotInited;
@@ -505,16 +514,23 @@ WebrtcAudioConduit::SendAudioFrame(const
 
   // if transmission is not started .. conduit cannot insert frames
   if(!mEngineTransmitting)
   {
     CSFLogError(logTag, "%s Engine not transmitting ", __FUNCTION__);
     return kMediaConduitSessionNotInited;
   }
 
+#ifdef MOZILLA_INTERNAL_API
+    if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) {
+      struct Processing insert = { TimeStamp::Now(), 0 };
+      mProcessing.AppendElement(insert);
+    }
+#endif
+
   capture_delay = mCaptureDelay;
   //Insert the samples
   if(mPtrVoEXmedia->ExternalRecordingInsertData(audio_data,
                                                 lengthSamples,
                                                 samplingFreqHz,
                                                 capture_delay) == -1)
   {
     int error = mPtrVoEBase->LastError();
@@ -583,29 +599,62 @@ WebrtcAudioConduit::GetAudioFrame(int16_
     CSFLogError(logTag,  "%s Getting audio data Failed %d", __FUNCTION__, error);
     if(error == VE_RUNTIME_PLAY_ERROR)
     {
       return kMediaConduitPlayoutError;
     }
     return kMediaConduitUnknownError;
   }
 
+#ifdef MOZILLA_INTERNAL_API
+  if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) {
+    if (mProcessing.Length() > 0) {
+      unsigned int now;
+      mPtrVoEVideoSync->GetPlayoutTimestamp(mChannel, now);
+      if (static_cast<uint32_t>(now) != mLastTimestamp) {
+        mLastTimestamp = static_cast<uint32_t>(now);
+        // Find the block that includes this timestamp in the network input
+        while (mProcessing.Length() > 0) {
+          // FIX! assumes 20ms @ 48000Hz
+          // FIX handle wrap-around
+          if (mProcessing[0].mRTPTimeStamp + 20*(48000/1000) >= now) {
+            TimeDuration t = TimeStamp::Now() - mProcessing[0].mTimeStamp;
+            // Wrap-around?
+            int64_t delta = t.ToMilliseconds() + (now - mProcessing[0].mRTPTimeStamp)/(48000/1000);
+            LogTime(AsyncLatencyLogger::AudioRecvRTP, ((uint64_t) this), delta);
+            break;
+          }
+          mProcessing.RemoveElementAt(0);
+        }
+      }
+    }
+  }
+#endif
   CSFLogDebug(logTag,"%s GetAudioFrame:Got samples: length %d ",__FUNCTION__,
                                                                lengthSamples);
   return kMediaConduitNoError;
 }
 
 // Transport Layer Callbacks
 MediaConduitErrorCode
 WebrtcAudioConduit::ReceivedRTPPacket(const void *data, int len)
 {
   CSFLogDebug(logTag,  "%s : channel %d", __FUNCTION__, mChannel);
 
   if(mEngineReceiving)
   {
+#ifdef MOZILLA_INTERNAL_API
+    if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) {
+      // timestamp is at 32 bits in ([1])
+      struct Processing insert = { TimeStamp::Now(),
+                                   ntohl(static_cast<const uint32_t *>(data)[1]) };
+      mProcessing.AppendElement(insert);
+    }
+#endif
+
     if(mPtrVoENetwork->ReceivedRTPPacket(mChannel,data,len) == -1)
     {
       int error = mPtrVoEBase->LastError();
       CSFLogError(logTag, "%s RTP Processing Error %d", __FUNCTION__, error);
       if(error == VE_RTP_RTCP_MODULE_ERROR)
       {
         return kMediaConduitRTPRTCPModuleError;
       }
@@ -654,16 +703,28 @@ int WebrtcAudioConduit::SendPacket(int c
     if (mOtherDirection)
     {
       return mOtherDirection->SendPacket(channel, data, len);
     }
     CSFLogDebug(logTag,  "%s : Asked to send RTP without an RTP sender on channel %d",
                 __FUNCTION__, channel);
     return -1;
   } else {
+#ifdef MOZILLA_INTERNAL_API
+    if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) {
+      if (mProcessing.Length() > 0) {
+        TimeStamp started = mProcessing[0].mTimeStamp;
+        mProcessing.RemoveElementAt(0);
+        mProcessing.RemoveElementAt(0); // 20ms packetization!  Could automate this by watching sizes
+        TimeDuration t = TimeStamp::Now() - started;
+        int64_t delta = t.ToMilliseconds();
+        LogTime(AsyncLatencyLogger::AudioSendRTP, ((uint64_t) this), delta);
+      }
+    }
+#endif
     if(mTransport && (mTransport->SendRtpPacket(data, len) == NS_OK))
     {
       CSFLogDebug(logTag, "%s Sent RTP Packet ", __FUNCTION__);
       return len;
     } else {
       CSFLogError(logTag, "%s RTP Packet Send Failed ", __FUNCTION__);
       return -1;
     }
--- a/media/webrtc/signaling/src/media-conduit/AudioConduit.h
+++ b/media/webrtc/signaling/src/media-conduit/AudioConduit.h
@@ -2,35 +2,39 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 
 #ifndef AUDIO_SESSION_H_
 #define AUDIO_SESSION_H_
 
 #include "mozilla/Attributes.h"
+#include "mozilla/TimeStamp.h"
+#include "nsTArray.h"
 
 #include "MediaConduitInterface.h"
 
 // Audio Engine Includes
 #include "webrtc/common_types.h"
 #include "webrtc/voice_engine/include/voe_base.h"
 #include "webrtc/voice_engine/include/voe_volume_control.h"
 #include "webrtc/voice_engine/include/voe_codec.h"
 #include "webrtc/voice_engine/include/voe_file.h"
 #include "webrtc/voice_engine/include/voe_network.h"
 #include "webrtc/voice_engine/include/voe_external_media.h"
 #include "webrtc/voice_engine/include/voe_audio_processing.h"
+#include "webrtc/voice_engine/include/voe_video_sync.h"
 
 //Some WebRTC types for short notations
  using webrtc::VoEBase;
  using webrtc::VoENetwork;
  using webrtc::VoECodec;
  using webrtc::VoEExternalMedia;
  using webrtc::VoEAudioProcessing;
+ using webrtc::VoEVideoSync;
 
 /** This file hosts several structures identifying different aspects
  * of a RTP Session.
  */
 
 namespace mozilla {
 
 /**
@@ -142,16 +146,17 @@ public:
 
   WebrtcAudioConduit():
                       mOtherDirection(nullptr),
                       mShutDown(false),
                       mVoiceEngine(nullptr),
                       mTransport(nullptr),
                       mEngineTransmitting(false),
                       mEngineReceiving(false),
+                      mLastTimestamp(0),
                       mChannel(-1),
                       mCurSendCodecConfig(nullptr),
                       mCaptureDelay(150),
                       mEchoOn(true),
                       mEchoCancel(webrtc::kEcAec)
   {
   }
 
@@ -204,22 +209,33 @@ private:
   // conduit to die
   webrtc::VoiceEngine* mVoiceEngine;
   mozilla::RefPtr<TransportInterface> mTransport;
   webrtc::VoENetwork*  mPtrVoENetwork;
   webrtc::VoEBase*     mPtrVoEBase;
   webrtc::VoECodec*    mPtrVoECodec;
   webrtc::VoEExternalMedia* mPtrVoEXmedia;
   webrtc::VoEAudioProcessing* mPtrVoEProcessing;
+  webrtc::VoEVideoSync* mPtrVoEVideoSync;
 
   //engine states of our interets
   bool mEngineTransmitting; // If true => VoiceEngine Send-subsystem is up
   bool mEngineReceiving;    // If true => VoiceEngine Receive-subsystem is up
                             // and playout is enabled
 
+  // Keep track of each inserted RTP block and the time it was inserted
+  // so we can estimate the clock time for a specific TimeStamp coming out
+  // (for when we send data to MediaStreamTracks).  Blocks are aged out as needed.
+  struct Processing {
+    TimeStamp mTimeStamp;
+    uint32_t mRTPTimeStamp; // RTP timestamps received
+  };
+  nsAutoTArray<Processing,8> mProcessing;
+  uint32_t mLastTimestamp;
+
   int mChannel;
   RecvCodecList    mRecvCodecList;
   AudioCodecConfig* mCurSendCodecConfig;
 
   // Current "capture" delay (really output plus input delay)
   int32_t mCaptureDelay;
 
   bool mEchoOn;
--- a/media/webrtc/signaling/test/FakeMediaStreamsImpl.h
+++ b/media/webrtc/signaling/test/FakeMediaStreamsImpl.h
@@ -5,16 +5,19 @@
 #ifndef FAKE_MEDIA_STREAMIMPL_H_
 #define FAKE_MEDIA_STREAMIMPL_H_
 
 #include "FakeMediaStreams.h"
 
 #include "nspr.h"
 #include "nsError.h"
 
+void LogTime(AsyncLatencyLogger::LatencyLogIndex index, uint64_t b, int64_t c) {}
+void LogLatency(AsyncLatencyLogger::LatencyLogIndex index, uint64_t b, int64_t c) {}
+
 static const int AUDIO_BUFFER_SIZE = 1600;
 static const int NUM_CHANNELS      = 2;
 
 NS_IMPL_ISUPPORTS1(Fake_DOMMediaStream, nsIDOMMediaStream)
 
 // Fake_SourceMediaStream
 nsresult Fake_SourceMediaStream::Start() {
   mTimer = do_CreateInstance(NS_TIMER_CONTRACTID);
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
@@ -39,16 +39,22 @@
         'audio_device_utility.h',
         'audio_device_impl.cc',
         'audio_device_impl.h',
         'audio_device_config.h',
         'dummy/audio_device_dummy.h',
         'dummy/audio_device_utility_dummy.h',
       ],
       'conditions': [
+        ['build_with_mozilla==1', {
+          'include_dirs': [
+            '$(DIST)/include',
+            '$(DIST)/include/nspr',
+          ],
+        }],
         ['OS=="linux" or include_alsa_audio==1 or include_pulse_audio==1', {
           'include_dirs': [
             'linux',
           ],
         }], # OS=="linux" or include_alsa_audio==1 or include_pulse_audio==1
         ['OS=="ios"', {
           'include_dirs': [
             'ios',
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.cc
@@ -14,16 +14,23 @@
 #include "audio_device_alsa_linux.h"
 #include "audio_device_config.h"
 
 #include "event_wrapper.h"
 #include "system_wrappers/interface/sleep.h"
 #include "trace.h"
 #include "thread_wrapper.h"
 
+#include "Latency.h"
+
+#define LOG_FIRST_CAPTURE(x) LogTime(AsyncLatencyLogger::AudioCaptureBase, \
+                                     reinterpret_cast<uint64_t>(x), 0)
+#define LOG_CAPTURE_FRAMES(x, frames) LogLatency(AsyncLatencyLogger::AudioCapture, \
+                                                 reinterpret_cast<uint64_t>(x), frames)
+
 webrtc_adm_linux_alsa::AlsaSymbolTable AlsaSymbolTable;
 
 // Accesses ALSA functions through our late-binding symbol table instead of
 // directly. This way we don't have to link to libasound, which means our binary
 // will work on systems that don't have it.
 #define LATE(sym) \
   LATESYM_GET(webrtc_adm_linux_alsa::AlsaSymbolTable, &AlsaSymbolTable, sym)
 
@@ -91,16 +98,17 @@ AudioDeviceLinuxALSA::AudioDeviceLinuxAL
     _playChannels(ALSA_PLAYOUT_CH),
     _recordingBuffer(NULL),
     _playoutBuffer(NULL),
     _recordingFramesLeft(0),
     _playoutFramesLeft(0),
     _playBufType(AudioDeviceModule::kFixedBufferSize),
     _initialized(false),
     _recording(false),
+    _firstRecord(true),
     _playing(false),
     _recIsInitialized(false),
     _playIsInitialized(false),
     _AGC(false),
     _recordingDelay(0),
     _playoutDelay(0),
     _playWarning(0),
     _playError(0),
@@ -1444,16 +1452,17 @@ int32_t AudioDeviceLinuxALSA::StartRecor
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "   failed to alloc recording buffer");
         _recording = false;
         return -1;
     }
     // RECORDING
     const char* threadName = "webrtc_audio_module_capture_thread";
+    _firstRecord = true;
     _ptrThreadRec = ThreadWrapper::CreateThread(RecThreadFunc,
                                                 this,
                                                 kRealtimePriority,
                                                 threadName);
     if (_ptrThreadRec == NULL)
     {
         WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
                      "  failed to create the rec audio thread");
@@ -2262,16 +2271,21 @@ bool AudioDeviceLinuxALSA::RecThreadProc
         memcpy(&_recordingBuffer[_recordingBufferSizeIn10MS - left_size],
                buffer, size);
         _recordingFramesLeft -= frames;
 
         if (!_recordingFramesLeft)
         { // buf is full
             _recordingFramesLeft = _recordingFramesIn10MS;
 
+            if (_firstRecord) {
+              LOG_FIRST_CAPTURE(this);
+              _firstRecord = false;
+            }
+            LOG_CAPTURE_FRAMES(this, _recordingFramesIn10MS);
             // store the recorded buffer (no action will be taken if the
             // #recorded samples is not a full buffer)
             _ptrAudioBuffer->SetRecordedBuffer(_recordingBuffer,
                                                _recordingFramesIn10MS);
 
             uint32_t currentMicLevel = 0;
             uint32_t newMicLevel = 0;
 
--- a/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/linux/audio_device_alsa_linux.h
@@ -229,16 +229,17 @@ private:
     uint32_t _recordingFramesLeft;
     uint32_t _playoutFramesLeft;
 
     AudioDeviceModule::BufferType _playBufType;
 
 private:
     bool _initialized;
     bool _recording;
+    bool _firstRecord;
     bool _playing;
     bool _recIsInitialized;
     bool _playIsInitialized;
     bool _AGC;
 
     snd_pcm_sframes_t _recordingDelay;
     snd_pcm_sframes_t _playoutDelay;