Bug 1487057 - Part 10 - Remove MediaEngineWebRTCAudio::mEnabled. r=pehrsons
authorPaul Adenot <paul@paul.cx>
Wed, 03 Oct 2018 14:58:28 +0200
changeset 499453 9cf36402deed806fb359c25f633d6dc9f31dc7ea
parent 499452 e1a790218e20b97d32957965379423a67196f54b
child 499454 0190c5793ffeeb0eec281cf377435c7a592415f3
push id1864
push userffxbld-merge
push dateMon, 03 Dec 2018 15:51:40 +0000
treeherdermozilla-release@f040763d99ad [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspehrsons
bugs1487057
milestone64.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1487057 - Part 10 - Remove MediaEngineWebRTCAudio::mEnabled. r=pehrsons It was redundant with mState. Differential Revision: https://phabricator.services.mozilla.com/D7602
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -58,17 +58,16 @@ MediaEngineWebRTCMicrophoneSource::Media
   RefPtr<AudioDeviceInfo> aInfo,
   const nsString& aDeviceName,
   const nsCString& aDeviceUUID,
   uint32_t aMaxChannelCount,
   bool aDelayAgnostic,
   bool aExtendedFilter)
   : mTrackID(TRACK_NONE)
   , mPrincipal(PRINCIPAL_HANDLE_NONE)
-  , mEnabled(false)
   , mDeviceInfo(std::move(aInfo))
   , mDelayAgnostic(aDelayAgnostic)
   , mExtendedFilter(aExtendedFilter)
   , mDeviceName(aDeviceName)
   , mDeviceUUID(aDeviceUUID)
   , mDeviceMaxChannelCount(aMaxChannelCount)
   , mSettings(
       new nsMainThreadPtrHolder<media::Refcountable<dom::MediaTrackSettings>>(
@@ -574,19 +573,16 @@ protected:
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Deallocate(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
 
   MOZ_ASSERT(mState == kStopped);
 
-  MOZ_DIAGNOSTIC_ASSERT(!mEnabled,
-                        "Source should be stopped for the track before removing");
-
   if (mStream && IsTrackIDExplicit(mTrackID)) {
     RefPtr<MediaStream> sourceStream = mStream;
     RefPtr<MediaStreamGraphImpl> graphImpl = mStream->GraphImpl();
     NS_DispatchToMainThread(media::NewRunnableFrom(
       [ graph = std::move(graphImpl),
         stream = std::move(sourceStream),
         trackID = mTrackID]() mutable {
         if (graph) {
@@ -601,17 +597,16 @@ MediaEngineWebRTCMicrophoneSource::Deall
   MOZ_ASSERT(mHandle, "Only deallocate once");
 
   // Reset all state. This is not strictly necessary, this instance will get
   // destroyed soon.
   mHandle = nullptr;
   mStream = nullptr;
   mTrackID = TRACK_NONE;
   mPrincipal = PRINCIPAL_HANDLE_NONE;
-  mEnabled = false;
 
   // If empty, no callbacks to deliver data should be occuring
   MOZ_ASSERT(mState != kReleased, "Source not allocated");
   MOZ_ASSERT(mState != kStarted, "Source not stopped");
 
   mState = kReleased;
   LOG(("Audio device %s deallocated", NS_ConvertUTF16toUTF8(mDeviceName).get()));
 
@@ -650,36 +645,43 @@ MediaEngineWebRTCMicrophoneSource::SetTr
 
   LOG(("Stream %p registered for microphone capture", aStream.get()));
   return NS_OK;
 }
 
 class StartStopMessage : public ControlMessage
 {
   public:
-    StartStopMessage(AudioInputProcessing* aInputProcessing,
-                     bool aStart)
-    : ControlMessage(nullptr)
-    , mInputProcessing(aInputProcessing)
-    , mStart(aStart)
-  {
+    enum StartStop
+    {
+      Start,
+      Stop
+    };
+
+    StartStopMessage(AudioInputProcessing* aInputProcessing, StartStop aAction)
+      : ControlMessage(nullptr)
+      , mInputProcessing(aInputProcessing)
+      , mAction(aAction)
+    {
   }
 
   void Run() override
   {
-    if (mStart) {
+    if (mAction == StartStopMessage::Start) {
       mInputProcessing->Start();
+    } else if (mAction == StartStopMessage::Stop){
+      mInputProcessing->Stop();
     } else {
-      mInputProcessing->Stop();
+      MOZ_CRASH("Invalid enum value");
     }
   }
 
 protected:
   RefPtr<AudioInputProcessing> mInputProcessing;
-  bool mStart;
+  StartStop mAction;
 };
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Start(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
 
   // This spans setting both the enabled state and mState.
@@ -708,39 +710,35 @@ MediaEngineWebRTCMicrophoneSource::Start
       sInputStreamsOpen == CubebUtils::GetMaxInputStreams()) {
     LOG(("%p Already capturing audio in this process, aborting", this));
     return NS_ERROR_FAILURE;
   }
 
   sInputStreamsOpen++;
 #endif
 
-  MOZ_ASSERT(!mEnabled, "Source already started");
-  mEnabled = true;
-
   AssertIsOnOwningThread();
 
   mInputProcessing = new AudioInputProcessing(
     mDeviceMaxChannelCount, mStream, mTrackID, mPrincipal);
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
   NS_DispatchToMainThread(media::NewRunnableFrom(
-    [ that, graph = std::move(gripGraph), enabled = mEnabled, deviceID ]() mutable {
+    [ that, graph = std::move(gripGraph), deviceID ]() mutable {
 
-    if (graph) {
-      graph->AppendMessage(
-          MakeUnique<StartStopMessage>(that->mInputProcessing, enabled));
-    }
+      if (graph) {
+        graph->AppendMessage(MakeUnique<StartStopMessage>(
+          that->mInputProcessing, StartStopMessage::Start));
+      }
 
-    that->mStream->OpenAudioInput(deviceID, that->mInputProcessing);
+      that->mStream->OpenAudioInput(deviceID, that->mInputProcessing);
 
-    return NS_OK;
-  }));
-
+      return NS_OK;
+    }));
 
   MOZ_ASSERT(mState != kReleased);
   mState = kStarted;
 
   ApplySettings(mNetPrefs, mStream->GraphImpl());
 
   return NS_OK;
 }
@@ -749,44 +747,41 @@ nsresult
 MediaEngineWebRTCMicrophoneSource::Stop(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
 
   LOG(("Mic source %p allocation %p Stop()", this, aHandle.get()));
 
   MOZ_ASSERT(mStream, "SetTrack must have been called before ::Stop");
 
-  if (!mEnabled) {
+  if (mState == kStopped) {
     // Already stopped - this is allowed
     return NS_OK;
   }
 
-  mEnabled = false;
-
 #ifdef MOZ_PULSEAUDIO
     MOZ_ASSERT(sInputStreamsOpen > 0);
     sInputStreamsOpen--;
 #endif
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   RefPtr<MediaStreamGraphImpl> gripGraph = mStream->GraphImpl();
   NS_DispatchToMainThread(media::NewRunnableFrom(
-    [ that, graph = std::move(gripGraph), enabled = mEnabled, stream = mStream]() mutable {
+    [ that, graph = std::move(gripGraph), stream = mStream ]() mutable {
 
-    if (graph) {
-      graph->AppendMessage(
-          MakeUnique<StartStopMessage>(that->mInputProcessing, enabled));
-    }
+      if (graph) {
+        graph->AppendMessage(MakeUnique<StartStopMessage>(
+          that->mInputProcessing, StartStopMessage::Stop));
+      }
 
-    CubebUtils::AudioDeviceID deviceID = that->mDeviceInfo->DeviceID();
-    Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
-    stream->CloseAudioInput(id, that->mInputProcessing);
+      CubebUtils::AudioDeviceID deviceID = that->mDeviceInfo->DeviceID();
+      Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
+      stream->CloseAudioInput(id, that->mInputProcessing);
 
-    return NS_OK;
-  }));
-
+      return NS_OK;
+    }));
 
   MOZ_ASSERT(mState == kStarted, "Should be started when stopping");
   mState = kStopped;
 
   return NS_OK;
 }
 
 void
@@ -824,19 +819,17 @@ AudioInputProcessing::Disconnect(MediaSt
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::Shutdown()
 {
   AssertIsOnOwningThread();
 
   if (mState == kStarted) {
-    if (mEnabled) {
-      Stop(mHandle);
-    }
+    Stop(mHandle);
     MOZ_ASSERT(mState == kStopped);
   }
 
   MOZ_ASSERT(mState == kAllocated || mState == kStopped);
   Deallocate(mHandle);
   MOZ_ASSERT(mState == kReleased);
 }