Bug 1444976 - Trace some initial real-time media stuff. r=pehrsons
☠☠ backed out by 0e45c13b34e8 ☠ ☠
authorPaul Adenot <paul@paul.cx>
Thu, 12 Apr 2018 17:51:35 +0200
changeset 467682 79b7ee8b01ee122b3f7122f926e5ae9dc1132a96
parent 467681 35ca96cabe3c19d20868ba2453faf3fffa8df61b
child 467765 4af4ae0aee552a99a995ce4b32198b98294a95f7
child 467766 cde33317ae7ae625f4231372ad7ad122df77d0ba
push id9165
push userasasaki@mozilla.com
push dateThu, 26 Apr 2018 21:04:54 +0000
treeherdermozilla-beta@064c3804de2e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspehrsons
bugs1444976
milestone61.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1444976 - Trace some initial real-time media stuff. r=pehrsons MozReview-Commit-ID: 7mmDHVS2t8f
dom/media/GraphDriver.cpp
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamGraphImpl.h
dom/media/TrackUnionStream.cpp
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -1,19 +1,21 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include <MediaStreamGraphImpl.h>
 #include "mozilla/dom/AudioContext.h"
 #include "mozilla/SharedThreadPool.h"
 #include "mozilla/ClearOnShutdown.h"
 #include "mozilla/Unused.h"
 #include "CubebUtils.h"
+#include "Tracing.h"
 
 #ifdef MOZ_WEBRTC
 #include "webrtc/MediaEngineWebRTC.h"
 #endif
 
 #ifdef XP_MACOSX
 #include <sys/sysctl.h>
 #endif
@@ -876,16 +878,19 @@ AudioCallbackDriver::AutoInCallback::Aut
 AudioCallbackDriver::AutoInCallback::~AutoInCallback() {
   mDriver->mInCallback = false;
 }
 
 long
 AudioCallbackDriver::DataCallback(const AudioDataValue* aInputBuffer,
                                   AudioDataValue* aOutputBuffer, long aFrames)
 {
+   TRACE_AUDIO_CALLBACK_BUDGET(aFrames, mSampleRate);
+   TRACE_AUDIO_CALLBACK();
+
   // Don't add the callback until we're inited and ready
   if (!mAddedMixer) {
     mGraphImpl->mMixer.AddCallback(this);
     mAddedMixer = true;
   }
 
 #ifdef DEBUG
   // DebugOnly<> doesn't work here... it forces an initialization that will cause
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -1124,16 +1124,17 @@ MediaStreamGraphImpl::RunMessageAfterPro
   // Only one block is used for messages from the graph thread.
   MOZ_ASSERT(mFrontMessageQueue.Length() == 1);
   mFrontMessageQueue[0].mMessages.AppendElement(Move(aMessage));
 }
 
 void
 MediaStreamGraphImpl::RunMessagesInQueue()
 {
+  TRACE_AUDIO_CALLBACK();
   MOZ_ASSERT(OnGraphThread());
   // Calculate independent action times for each batch of messages (each
   // batch corresponding to an event loop task). This isolates the performance
   // of different scripts to some extent.
   for (uint32_t i = 0; i < mFrontMessageQueue.Length(); ++i) {
     nsTArray<UniquePtr<ControlMessage>>& messages = mFrontMessageQueue[i].mMessages;
 
     for (uint32_t j = 0; j < messages.Length(); ++j) {
@@ -1141,16 +1142,17 @@ MediaStreamGraphImpl::RunMessagesInQueue
     }
   }
   mFrontMessageQueue.Clear();
 }
 
 void
 MediaStreamGraphImpl::UpdateGraph(GraphTime aEndBlockingDecisions)
 {
+  TRACE_AUDIO_CALLBACK();
   MOZ_ASSERT(OnGraphThread());
   MOZ_ASSERT(aEndBlockingDecisions >= mProcessedTime);
   // The next state computed time can be the same as the previous: it
   // means the driver would have been blocking indefinitly, but the graph has
   // been woken up right after having been to sleep.
   MOZ_ASSERT(aEndBlockingDecisions >= mStateComputedTime);
 
   UpdateStreamOrder();
@@ -1236,16 +1238,17 @@ MediaStreamGraphImpl::UpdateGraph(GraphT
       aEndBlockingDecisions == mStateComputedTime) {
     EnsureNextIteration();
   }
 }
 
 void
 MediaStreamGraphImpl::Process()
 {
+  TRACE_AUDIO_CALLBACK();
   MOZ_ASSERT(OnGraphThread());
   // Play stream contents.
   bool allBlockedForever = true;
   // True when we've done ProcessInput for all processed streams.
   bool doneAllProducing = false;
   // This is the number of frame that are written to the AudioStreams, for
   // this cycle.
   StreamTime ticksPlayed = 0;
@@ -1340,16 +1343,17 @@ MediaStreamGraphImpl::UpdateMainThreadSt
 
   SwapMessageQueues();
   return true;
 }
 
 bool
 MediaStreamGraphImpl::OneIteration(GraphTime aStateEnd)
 {
+  TRACE_AUDIO_CALLBACK();
   // Changes to LIFECYCLE_RUNNING occur before starting or reviving the graph
   // thread, and so the monitor need not be held to check mLifecycleState.
   // LIFECYCLE_THREAD_NOT_STARTED is possible when shutting down offline
   // graphs that have not started.
   MOZ_DIAGNOSTIC_ASSERT(mLifecycleState <= LIFECYCLE_RUNNING);
   MOZ_ASSERT(OnGraphThread());
   WebCore::DenormalDisabler disabler;
 
@@ -1546,16 +1550,17 @@ public:
                                                bool aSourceIsMSG)
     : Runnable("MediaStreamGraphStableStateRunnable")
     , mGraph(aGraph)
     , mSourceIsMSG(aSourceIsMSG)
   {
   }
   NS_IMETHOD Run() override
   {
+    TRACE();
     if (mGraph) {
       mGraph->RunInStableState(mSourceIsMSG);
     }
     return NS_OK;
   }
 private:
   RefPtr<MediaStreamGraphImpl> mGraph;
   bool mSourceIsMSG;
@@ -2767,16 +2772,17 @@ SourceMediaStream::SetPullEnabled(bool a
   GraphImpl()->AppendMessage(MakeUnique<Message>(this, aEnabled));
 }
 
 bool
 SourceMediaStream::PullNewData(
   StreamTime aDesiredUpToTime,
   nsTArray<RefPtr<SourceMediaStream::NotifyPullPromise>>& aPromises)
 {
+  TRACE_AUDIO_CALLBACK();
   MutexAutoLock lock(mMutex);
   if (!mPullEnabled || mFinished) {
     return false;
   }
   // Compute how much stream time we'll need assuming we don't block
   // the stream at all.
   StreamTime t = GraphTimeToStreamTime(aDesiredUpToTime);
   StreamTime current = mTracks.GetEnd();
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -18,16 +18,17 @@
 #include "mozilla/UniquePtr.h"
 #include "mozilla/WeakPtr.h"
 #include "nsDataHashtable.h"
 #include "nsIMemoryReporter.h"
 #include "nsINamed.h"
 #include "nsIRunnable.h"
 #include "nsIThread.h"
 #include "nsITimer.h"
+#include "AsyncLogger.h"
 
 namespace mozilla {
 
 namespace media {
 class ShutdownTicket;
 }
 
 template <typename T>
--- a/dom/media/TrackUnionStream.cpp
+++ b/dom/media/TrackUnionStream.cpp
@@ -63,16 +63,17 @@ TrackUnionStream::TrackUnionStream()
         EndTrack(i);
         mTrackMap.RemoveElementAt(i);
       }
     }
     ProcessedMediaStream::RemoveInput(aPort);
   }
   void TrackUnionStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
   {
+    TRACE();
     if (IsFinishedOnGraphThread()) {
       return;
     }
     AutoTArray<bool,8> mappedTracksFinished;
     AutoTArray<bool,8> mappedTracksWithMatchingInputTracks;
     for (uint32_t i = 0; i < mTrackMap.Length(); ++i) {
       mappedTracksFinished.AppendElement(true);
       mappedTracksWithMatchingInputTracks.AppendElement(false);
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -12,16 +12,17 @@
 #include "AudioConverter.h"
 #include "MediaManager.h"
 #include "MediaStreamGraphImpl.h"
 #include "MediaTrackConstraints.h"
 #include "mozilla/Assertions.h"
 #include "mozilla/ErrorNames.h"
 #include "mtransport/runnable_utils.h"
 #include "nsAutoPtr.h"
+#include "Tracing.h"
 
 // scoped_ptr.h uses FF
 #ifdef FF
 #undef FF
 #endif
 #include "webrtc/modules/audio_device/opensl/single_rw_fifo.h"
 #include "webrtc/voice_engine/voice_engine_defines.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
@@ -1178,16 +1179,17 @@ MediaEngineWebRTCMicrophoneSource::Inser
 // Note this can be called back after ::Shutdown()
 void
 MediaEngineWebRTCMicrophoneSource::NotifyInputData(MediaStreamGraph* aGraph,
                                                    const AudioDataValue* aBuffer,
                                                    size_t aFrames,
                                                    TrackRate aRate,
                                                    uint32_t aChannels)
 {
+  TRACE_AUDIO_CALLBACK();
   // If some processing is necessary, packetize and insert in the WebRTC.org
   // code. Otherwise, directly insert the mic data in the MSG, bypassing all processing.
   if (PassThrough()) {
     InsertInGraph<AudioDataValue>(aBuffer, aFrames, aChannels);
   } else {
     PacketizeAndProcess(aGraph, aBuffer, aFrames, aRate, aChannels);
   }
 }
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -45,16 +45,17 @@
 #include "nsThreadUtils.h"
 #include "nspr.h"
 #include "runnable_utils.h"
 #include "srtp.h"
 #include "transportflow.h"
 #include "transportlayer.h"
 #include "transportlayerdtls.h"
 #include "transportlayerice.h"
+#include "Tracing.h"
 
 #include "webrtc/base/bind.h"
 #include "webrtc/base/keep_ref_until_done.h"
 #include "webrtc/common_types.h"
 #include "webrtc/common_video/include/i420_buffer_pool.h"
 #include "webrtc/common_video/include/video_frame_buffer.h"
 #include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
 
@@ -1926,22 +1927,23 @@ MediaPipelineTransmit::PipelineListener:
     LOGTAG,
     "MediaPipeline::NotifyRealtimeTrackData() listener=%p, offset=%" PRId64
     ", duration=%" PRId64,
     this,
     aOffset,
     aMedia.GetDuration());
 
   if (aMedia.GetType() == MediaSegment::VIDEO) {
+    TRACE_COMMENT("Video");
     // We have to call the upstream NotifyRealtimeTrackData and
     // MediaStreamVideoSink will route them to SetCurrentFrames.
     MediaStreamVideoSink::NotifyRealtimeTrackData(aGraph, aOffset, aMedia);
     return;
   }
-
+  TRACE_COMMENT("Audio");
   NewData(aMedia, aGraph->GraphRate());
 }
 
 void
 MediaPipelineTransmit::PipelineListener::NotifyQueuedChanges(
   MediaStreamGraph* aGraph,
   StreamTime aOffset,
   const MediaSegment& aQueuedMedia)
@@ -2017,16 +2019,17 @@ MediaPipelineTransmit::PipelineListener:
 
     const AudioSegment* audio = static_cast<const AudioSegment*>(&aMedia);
     for (AudioSegment::ConstChunkIterator iter(*audio); !iter.IsEnded();
          iter.Next()) {
       mAudioProcessing->QueueAudioChunk(aRate, *iter, mEnabled);
     }
   } else {
     const VideoSegment* video = static_cast<const VideoSegment*>(&aMedia);
+
     for (VideoSegment::ConstChunkIterator iter(*video); !iter.IsEnded();
          iter.Next()) {
       mConverter->QueueVideoChunk(*iter, !mEnabled);
     }
   }
 }
 
 void
@@ -2229,16 +2232,17 @@ private:
   ~PipelineListener()
   {
     NS_ReleaseOnMainThreadSystemGroup("MediaPipeline::mConduit",
                                       mConduit.forget());
   }
 
   void NotifyPullImpl(StreamTime aDesiredTime)
   {
+    TRACE();
     uint32_t samplesPer10ms = mRate / 100;
 
     // mSource's rate is not necessarily the same as the graph rate, since there
     // are sample-rate constraints on the inbound audio: only 16, 32, 44.1 and
     // 48kHz are supported. The audio frames we get here is going to be
     // resampled when inserted into the graph.
     TrackTicks desired = mSource->TimeToTicksRoundUp(mRate, aDesiredTime);
     TrackTicks framesNeeded = desired - mPlayedTicks;