Bug 1487057 - Part 7 - Flatten mAllocation and fix some locking. r=pehrsons
authorPaul Adenot <paul@paul.cx>
Wed, 05 Sep 2018 16:00:33 +0200
changeset 496738 273c92182c3cccd9ad6ef2abaf30d11fd42b6aea
parent 496737 582e630a2fcb9b5cea5a320bd155bd2123af0982
child 496739 45d2c462dc92c43eed9582c1b309f8df0c66b37f
push id9984
push userffxbld-merge
push dateMon, 15 Oct 2018 21:07:35 +0000
treeherdermozilla-beta@183d27ea8570 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspehrsons
bugs1487057
milestone64.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1487057 - Part 7 - Flatten mAllocation and fix some locking. r=pehrsons Differential Revision: https://phabricator.services.mozilla.com/D5441
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/MediaEngineWebRTCAudio.h
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -104,31 +104,26 @@ WebRTCAudioDataListener::Disconnect(Medi
     mAudioSource = nullptr;
   }
 }
 
 /**
  * WebRTC Microphone MediaEngineSource.
  */
 
-MediaEngineWebRTCMicrophoneSource::Allocation::Allocation(
-    const RefPtr<AllocationHandle>& aHandle)
-  : mHandle(aHandle)
-{}
-
-MediaEngineWebRTCMicrophoneSource::Allocation::~Allocation() = default;
-
 MediaEngineWebRTCMicrophoneSource::MediaEngineWebRTCMicrophoneSource(
     RefPtr<AudioDeviceInfo> aInfo,
     const nsString& aDeviceName,
     const nsCString& aDeviceUUID,
     uint32_t aMaxChannelCount,
     bool aDelayAgnostic,
     bool aExtendedFilter)
-  : mDeviceInfo(std::move(aInfo))
+  : mTrackID(TRACK_NONE)
+  , mPrincipal(PRINCIPAL_HANDLE_NONE)
+  , mDeviceInfo(std::move(aInfo))
   , mDelayAgnostic(aDelayAgnostic)
   , mExtendedFilter(aExtendedFilter)
   , mDeviceName(aDeviceName)
   , mDeviceUUID(aDeviceUUID)
   , mSettings(
       new nsMainThreadPtrHolder<media::Refcountable<dom::MediaTrackSettings>>(
         "MediaEngineWebRTCMicrophoneSource::mSettings",
         new media::Refcountable<dom::MediaTrackSettings>(),
@@ -194,20 +189,19 @@ MediaEngineWebRTCMicrophoneSource::Reeva
     const MediaEnginePrefs& aPrefs,
     const nsString& aDeviceId,
     const char** aOutBadConstraint)
 {
   AssertIsOnOwningThread();
 
   // aHandle and/or aConstraintsUpdate may be nullptr (see below)
   AutoTArray<const NormalizedConstraints*, 10> allConstraints;
-  if (mAllocation) {
-    if (!(aConstraintsUpdate && mAllocation->mHandle == aHandle)) {
-      allConstraints.AppendElement(&mAllocation->mHandle->mConstraints);
-    }
+
+  if (mHandle && !(aConstraintsUpdate && mHandle == aHandle)) {
+    allConstraints.AppendElement(&mHandle->mConstraints);
   }
 
   if (aConstraintsUpdate) {
     allConstraints.AppendElement(aConstraintsUpdate);
   } else if (aHandle) {
     // In the case of AddShareOfSingleSource, the handle isn't registered yet.
     allConstraints.AppendElement(&aHandle->mConstraints);
   }
@@ -236,16 +230,17 @@ nsresult
 MediaEngineWebRTCMicrophoneSource::Reconfigure(const RefPtr<AllocationHandle>& aHandle,
                                                const dom::MediaTrackConstraints& aConstraints,
                                                const MediaEnginePrefs& aPrefs,
                                                const nsString& aDeviceId,
                                                const char** aOutBadConstraint)
 {
   AssertIsOnOwningThread();
   MOZ_ASSERT(aHandle);
+  MOZ_ASSERT(mStream);
 
   LOG(("Mic source %p allocation %p Reconfigure()", this, aHandle.get()));
 
   NormalizedConstraints constraints(aConstraints);
   nsresult rv = ReevaluateAllocation(aHandle, &constraints, aPrefs, aDeviceId,
                                      aOutBadConstraint);
   if (NS_FAILED(rv)) {
     if (aOutBadConstraint) {
@@ -255,17 +250,17 @@ MediaEngineWebRTCMicrophoneSource::Recon
     nsAutoCString name;
     GetErrorName(rv, name);
     LOG(("Mic source %p Reconfigure() failed unexpectedly. rv=%s",
          this, name.Data()));
     Stop(aHandle);
     return NS_ERROR_UNEXPECTED;
   }
 
-  ApplySettings(mNetPrefs, mAllocation->mStream->GraphImpl());
+  ApplySettings(mNetPrefs, mStream->GraphImpl());
 
   return NS_OK;
 }
 
 bool operator == (const MediaEnginePrefs& a, const MediaEnginePrefs& b)
 {
   return !memcmp(&a, &b, sizeof(MediaEnginePrefs));
 };
@@ -502,27 +497,20 @@ MediaEngineWebRTCMicrophoneSource::PassT
   MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
   return mSkipProcessing;
 }
 void
 MediaEngineWebRTCMicrophoneSource::SetPassThrough(bool aPassThrough)
 {
   {
     MutexAutoLock lock(mMutex);
-    if (!mAllocation) {
-      // This can be the case, for now, because we're mixing mutable shared state
-      // and linearization via message queue. This is temporary.
-      return;
-    }
-
     // mStream is always valid because it's set right before ::Start is called.
     // SetPassThrough cannot be called before that, because it's running on the
     // graph thread, and this cannot happen before the source has been started.
-    MOZ_ASSERT(mAllocation->mStream &&
-               mAllocation->mStream->GraphImpl()->CurrentDriver()->OnThread(),
+    MOZ_ASSERT(mStream->GraphImpl()->CurrentDriver()->OnThread(),
                "Wrong calling pattern, don't call this before ::SetTrack.");
   }
   mSkipProcessing = aPassThrough;
 }
 
 uint32_t
 MediaEngineWebRTCMicrophoneSource::GetRequestedInputChannelCount(MediaStreamGraphImpl* aGraphImpl)
 {
@@ -540,24 +528,20 @@ MediaEngineWebRTCMicrophoneSource::GetRe
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::SetRequestedInputChannelCount(
   uint32_t aRequestedInputChannelCount)
 {
   MutexAutoLock lock(mMutex);
 
-  if (!mAllocation) {
-      return;
-  }
-  MOZ_ASSERT(mAllocation->mStream &&
-             mAllocation->mStream->GraphImpl()->CurrentDriver()->OnThread(),
-             "Wrong calling pattern, don't call this before ::SetTrack.");
+  MOZ_ASSERT(mStream->GraphImpl()->CurrentDriver()->OnThread(),
+      "Wrong calling pattern, don't call this before ::SetTrack.");
   mRequestedInputChannelCount = aRequestedInputChannelCount;
-  mAllocation->mStream->GraphImpl()->ReevaluateInputDevice();
+  mStream->GraphImpl()->ReevaluateInputDevice();
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::ApplySettings(const MediaEnginePrefs& aPrefs,
                                                  RefPtr<MediaStreamGraphImpl> aGraph)
 {
   AssertIsOnOwningThread();
   MOZ_DIAGNOSTIC_ASSERT(aGraph);
@@ -620,41 +604,53 @@ MediaEngineWebRTCMicrophoneSource::Alloc
   nsresult rv = ReevaluateAllocation(handle, nullptr, aPrefs, aDeviceId,
                                      aOutBadConstraint);
   if (NS_FAILED(rv)) {
     return rv;
   }
 
   {
     MutexAutoLock lock(mMutex);
-    MOZ_ASSERT(!mAllocation, "Only allocate once.");
-    mAllocation = MakeUnique<Allocation>(Allocation(handle));
+    MOZ_ASSERT(!mHandle, "Only allocate once.");
+    mHandle = handle;
   }
 
   handle.forget(aOutHandle);
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Deallocate(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
 
   MOZ_ASSERT(mState == kStopped);
 
-  MOZ_DIAGNOSTIC_ASSERT(!mAllocation->mEnabled,
+  MOZ_DIAGNOSTIC_ASSERT(!mEnabled,
                         "Source should be stopped for the track before removing");
 
-  if (mAllocation->mStream && IsTrackIDExplicit(mAllocation->mTrackID)) {
-    mAllocation->mStream->EndTrack(mAllocation->mTrackID);
+  if (mStream && IsTrackIDExplicit(mTrackID)) {
+    mStream->EndTrack(mTrackID);
   }
 
   MutexAutoLock lock(mMutex);
-  MOZ_ASSERT(mAllocation, "Only deallocate once");
-  mAllocation = nullptr;
+  MOZ_ASSERT(mHandle, "Only deallocate once");
+
+  // Reset all state. This is not strictly necessary, this instance will get
+  // destroyed soon.
+#ifdef DEBUG
+  mLastCallbackAppendTime = 0;
+#endif
+  mLiveFramesAppended = false;
+  mLiveSilenceAppended = false;
+  mHandle = nullptr;
+  mStream = nullptr;
+  mTrackID = TRACK_NONE;
+  mPrincipal = PRINCIPAL_HANDLE_NONE;
+  mEnabled = false;
 
   // If empty, no callbacks to deliver data should be occuring
   MOZ_ASSERT(mState != kReleased, "Source not allocated");
   MOZ_ASSERT(mState != kStarted, "Source not stopped");
 
   mState = kReleased;
   LOG(("Audio device %s deallocated", NS_ConvertUTF16toUTF8(mDeviceName).get()));
 
@@ -666,31 +662,30 @@ MediaEngineWebRTCMicrophoneSource::SetTr
                                             const RefPtr<SourceMediaStream>& aStream,
                                             TrackID aTrackID,
                                             const PrincipalHandle& aPrincipal)
 {
   AssertIsOnOwningThread();
   MOZ_ASSERT(aStream);
   MOZ_ASSERT(IsTrackIDExplicit(aTrackID));
 
-  if (mAllocation &&
-      mAllocation->mStream &&
-      mAllocation->mStream->Graph() != aStream->Graph()) {
+
+  MutexAutoLock lock(mMutex);
+
+  if (mStream &&
+      mStream->Graph() != aStream->Graph()) {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
-  MOZ_ASSERT(!mAllocation->mStream);
-  MOZ_ASSERT(mAllocation->mTrackID == TRACK_NONE);
-  MOZ_ASSERT(mAllocation->mPrincipal == PRINCIPAL_HANDLE_NONE);
-  {
-    MutexAutoLock lock(mMutex);
-    mAllocation->mStream = aStream;
-    mAllocation->mTrackID = aTrackID;
-    mAllocation->mPrincipal = aPrincipal;
-  }
+  MOZ_ASSERT(!mStream);
+  MOZ_ASSERT(mTrackID == TRACK_NONE);
+  MOZ_ASSERT(mPrincipal == PRINCIPAL_HANDLE_NONE);
+  mStream = aStream;
+  mTrackID = aTrackID;
+  mPrincipal = aPrincipal;
 
   AudioSegment* segment = new AudioSegment();
 
   aStream->AddAudioTrack(aTrackID,
                          aStream->GraphRate(),
                          0,
                          segment,
                          SourceMediaStream::ADDTRACK_QUEUED);
@@ -698,25 +693,28 @@ MediaEngineWebRTCMicrophoneSource::SetTr
   LOG(("Stream %p registered for microphone capture", aStream.get()));
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Start(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
+
+  MutexAutoLock lock(mMutex);
+  // This spans setting both the enabled state and mState.
   if (mState == kStarted) {
     return NS_OK;
   }
 
   MOZ_ASSERT(mState == kAllocated || mState == kStopped);
 
   CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID();
-  if (mAllocation->mStream->GraphImpl()->InputDeviceID() &&
-      mAllocation->mStream->GraphImpl()->InputDeviceID() != deviceID) {
+  if (mStream->GraphImpl()->InputDeviceID() &&
+      mStream->GraphImpl()->InputDeviceID() != deviceID) {
     // For now, we only allow opening a single audio input device per document,
     // because we can only have one MSG per document.
     return NS_ERROR_FAILURE;
   }
 
   // On Linux with PulseAudio, we still only allow a certain number of audio
   // input stream in each content process, because of issues related to audio
   // remoting and PulseAudio.
@@ -728,77 +726,68 @@ MediaEngineWebRTCMicrophoneSource::Start
       sInputStreamsOpen == CubebUtils::GetMaxInputStreams()) {
     LOG(("%p Already capturing audio in this process, aborting", this));
     return NS_ERROR_FAILURE;
   }
 
   sInputStreamsOpen++;
 #endif
 
-  MOZ_ASSERT(!mAllocation->mEnabled, "Source already started");
-  {
-    // This spans setting both the enabled state and mState.
-    MutexAutoLock lock(mMutex);
-    mAllocation->mEnabled = true;
+  MOZ_ASSERT(!mEnabled, "Source already started");
+  mEnabled = true;
 
 #ifdef DEBUG
-    // Ensure that callback-tracking state is reset when callbacks start coming.
-    mAllocation->mLastCallbackAppendTime = 0;
+  // Ensure that callback-tracking state is reset when callbacks start coming.
+  mLastCallbackAppendTime = 0;
 #endif
-    mAllocation->mLiveFramesAppended = false;
-    mAllocation->mLiveSilenceAppended = false;
+  mLiveFramesAppended = false;
+  mLiveSilenceAppended = false;
 
-    if (!mListener) {
-      mListener = new WebRTCAudioDataListener(this);
-    }
-
-    mAllocation->mStream->OpenAudioInput(deviceID, mListener);
-
-    MOZ_ASSERT(mState != kReleased);
-    mState = kStarted;
+  if (!mListener) {
+    mListener = new WebRTCAudioDataListener(this);
   }
 
-  ApplySettings(mNetPrefs, mAllocation->mStream->GraphImpl());
+  mStream->OpenAudioInput(deviceID, mListener);
+
+  MOZ_ASSERT(mState != kReleased);
+  mState = kStarted;
+
+  ApplySettings(mNetPrefs, mStream->GraphImpl());
 
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Stop(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
 
   LOG(("Mic source %p allocation %p Stop()", this, aHandle.get()));
 
-  MOZ_ASSERT(mAllocation->mStream, "SetTrack must have been called before ::Stop");
-
-  if (!mAllocation->mEnabled) {
-    // Already stopped - this is allowed
-    return NS_OK;
-  }
+  MOZ_ASSERT(mStream, "SetTrack must have been called before ::Stop");
 
   {
     // This spans setting both the enabled state and mState.
     MutexAutoLock lock(mMutex);
-    mAllocation->mEnabled = false;
+    if (!mEnabled) {
+      // Already stopped - this is allowed
+      return NS_OK;
+    }
+
+    mEnabled = false;
 
     CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID();
     Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
-    mAllocation->mStream->CloseAudioInput(id, mListener);
+    mStream->CloseAudioInput(id, mListener);
     mListener = nullptr;
 #ifdef MOZ_PULSEAUDIO
     MOZ_ASSERT(sInputStreamsOpen > 0);
     sInputStreamsOpen--;
 #endif
 
-    if (HasEnabledTrack()) {
-      // Another track is keeping us from stopping
-      return NS_OK;
-    }
-
     MOZ_ASSERT(mState == kStarted, "Should be started when stopping");
     mState = kStopped;
   }
 
   return NS_OK;
 }
 
 void
@@ -816,77 +805,77 @@ MediaEngineWebRTCMicrophoneSource::Pull(
                                         const PrincipalHandle& aPrincipalHandle)
 {
   TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i",
                                aStream.get(), aTrackID);
   StreamTime delta;
 
   {
     MutexAutoLock lock(mMutex);
-    if (!mAllocation) {
+    if (!mHandle) {
       // Deallocation already happened. Just return.
       return;
     }
 
     // We don't want to GetEndOfAppendedData() above at the declaration if the
     // allocation was removed and the track non-existant. An assert will fail.
     delta = aDesiredTime - aStream->GetEndOfAppendedData(aTrackID);
 
     if (delta < 0) {
       LOG_FRAMES(("Not appending silence for allocation %p; %" PRId64 " frames already buffered",
-                  mAllocation->mHandle.get(), -delta));
+                  mHandle.get(), -delta));
       return;
     }
 
-    if (!mAllocation->mLiveFramesAppended ||
-        !mAllocation->mLiveSilenceAppended) {
+    if (!mLiveFramesAppended ||
+        !mLiveSilenceAppended) {
       // These are the iterations after starting or resuming audio capture.
       // Make sure there's at least one extra block buffered until audio
       // callbacks come in. We also allow appending silence one time after
       // audio callbacks have started, to cover the case where audio callbacks
       // start appending data immediately and there is no extra data buffered.
       delta += WEBAUDIO_BLOCK_SIZE;
 
       // If we're supposed to be packetizing but there's no packetizer yet,
       // there must not have been any live frames appended yet.
       // If there were live frames appended and we haven't appended the
       // right amount of silence, we'll have to append silence once more,
       // failing the other assert below.
       MOZ_ASSERT_IF(!PassThrough(aStream->GraphImpl()) && !mPacketizerInput,
-                    !mAllocation->mLiveFramesAppended);
+                    !mLiveFramesAppended);
 
       if (!PassThrough(aStream->GraphImpl()) && mPacketizerInput) {
         // Processing is active and is processed in chunks of 10ms through the
         // input packetizer. We allow for 10ms of silence on the track to
         // accomodate the buffering worst-case.
         delta += mPacketizerInput->PacketSize();
       }
     }
 
     LOG_FRAMES(("Pulling %" PRId64 " frames of silence for allocation %p",
-                delta, mAllocation->mHandle.get()));
+                delta, mHandle.get()));
 
     // This assertion fails when we append silence here in the same iteration
     // as there were real audio samples already appended by the audio callback.
     // Note that this is exempted until live samples and a subsequent chunk of
     // silence have been appended to the track. This will cover cases like:
     // - After Start(), there is silence (maybe multiple times) appended before
     //   the first audio callback.
     // - After Start(), there is real data (maybe multiple times) appended
     //   before the first graph iteration.
     // And other combinations of order of audio sample sources.
     MOZ_ASSERT_IF(
-      mAllocation->mEnabled &&
-      mAllocation->mLiveFramesAppended &&
-      mAllocation->mLiveSilenceAppended,
+      mEnabled &&
+      mLiveFramesAppended &&
+      mLiveSilenceAppended,
       aStream->GraphImpl()->IterationEnd() >
-      mAllocation->mLastCallbackAppendTime);
+      mLastCallbackAppendTime);
 
-    if (mAllocation->mLiveFramesAppended) {
-      mAllocation->mLiveSilenceAppended = true;
+    if (mLiveFramesAppended) {
+      mLiveSilenceAppended = true;
     }
   }
 
   AudioSegment audio;
   audio.AppendNullData(delta);
   aStream->AppendToTrack(aTrackID, &audio);
 }
 
@@ -1063,82 +1052,80 @@ MediaEngineWebRTCMicrophoneSource::Packe
                                     outputConfig,
                                     processedOutputChannelPointers.Elements());
     MutexAutoLock lock(mMutex);
     if (mState != kStarted) {
       return;
     }
 
     AudioSegment segment;
-    if (!mAllocation->mStream->GraphImpl()) {
-      // The DOMMediaStream that owns mAllocation->mStream has been cleaned up
+    if (!mStream->GraphImpl()) {
+      // The DOMMediaStream that owns mStream has been cleaned up
       // and MediaStream::DestroyImpl() has run in the MSG. This is fine and
       // can happen before the MediaManager thread gets to stop capture for
-      // this allocation.
+      // this MediaStream.
       continue;
     }
 
-    if (!mAllocation->mEnabled) {
+    if (!mEnabled) {
       continue;
     }
 
     LOG_FRAMES(("Appending %" PRIu32 " frames of packetized audio for allocation %p",
-                mPacketizerInput->PacketSize(), mAllocation->mHandle.get()));
+                mPacketizerInput->PacketSize(), mHandle.get()));
 
 #ifdef DEBUG
-    mAllocation->mLastCallbackAppendTime =
-      mAllocation->mStream->GraphImpl()->IterationEnd();
+    mLastCallbackAppendTime = mStream->GraphImpl()->IterationEnd();
 #endif
-    mAllocation->mLiveFramesAppended = true;
+    mLiveFramesAppended = true;
 
     // We already have planar audio data of the right format. Insert into the
     // MSG.
     MOZ_ASSERT(processedOutputChannelPointers.Length() == aChannels);
     RefPtr<SharedBuffer> other = buffer;
     segment.AppendFrames(other.forget(),
                          processedOutputChannelPointersConst,
                          mPacketizerInput->PacketSize(),
-                         mAllocation->mPrincipal);
-    mAllocation->mStream->AppendToTrack(mAllocation->mTrackID, &segment);
+                         mPrincipal);
+    mStream->AppendToTrack(mTrackID, &segment);
   }
 }
 
 template<typename T>
 void
 MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer,
                                                  size_t aFrames,
                                                  uint32_t aChannels)
 {
   MutexAutoLock lock(mMutex);
 
   if (mState != kStarted) {
     return;
   }
 
-  if (!mAllocation->mStream) {
+  if (!mStream) {
     return;
   }
 
-  if (!mAllocation->mStream->GraphImpl()) {
-    // The DOMMediaStream that owns mAllocation->mStream has been cleaned up
+  if (!mStream->GraphImpl()) {
+    // The DOMMediaStream that owns mStream has been cleaned up
     // and MediaStream::DestroyImpl() has run in the MSG. This is fine and
     // can happen before the MediaManager thread gets to stop capture for
-    // this mAllocation->
+    // this MediaStream.
     return;
   }
 
-  if (!mAllocation->mEnabled) {
+  if (!mEnabled) {
     return;
   }
 
 #ifdef DEBUG
-  mAllocation->mLastCallbackAppendTime =
-    mAllocation->mStream->GraphImpl()->IterationEnd();
+  mLastCallbackAppendTime = mStream->GraphImpl()->IterationEnd();
 #endif
-  mAllocation->mLiveFramesAppended = true;
+  mLiveFramesAppended = true;
 
   // Bug 971528 - Support stereo capture in gUM
   MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels");
 
   AudioSegment segment;
   RefPtr<SharedBuffer> buffer =
     SharedBuffer::Create(aFrames * aChannels * sizeof(T));
   AutoTArray<const T*, 8> channels;
@@ -1159,46 +1146,46 @@ MediaEngineWebRTCMicrophoneSource::Inser
 
     DeinterleaveAndConvertBuffer(aBuffer,
         aFrames,
         aChannels,
         write_channels.Elements());
   }
 
   LOG_FRAMES(("Appending %zu frames of raw audio for allocation %p",
-        aFrames, mAllocation->mHandle.get()));
+        aFrames, mHandle.get()));
 
   MOZ_ASSERT(aChannels == channels.Length());
   segment.AppendFrames(buffer.forget(), channels, aFrames,
-      mAllocation->mPrincipal);
+      mPrincipal);
 
-  mAllocation->mStream->AppendToTrack(mAllocation->mTrackID, &segment);
+  mStream->AppendToTrack(mTrackID, &segment);
 }
 
 // Called back on GraphDriver thread!
 // Note this can be called back after ::Shutdown()
 void
 MediaEngineWebRTCMicrophoneSource::NotifyInputData(MediaStreamGraphImpl* aGraph,
                                                    const AudioDataValue* aBuffer,
                                                    size_t aFrames,
                                                    TrackRate aRate,
                                                    uint32_t aChannels)
 {
   MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
   TRACE_AUDIO_CALLBACK();
 
   {
     MutexAutoLock lock(mMutex);
-    if (!mAllocation) {
-      // This can happen because mAllocation is not yet using message passing, and
-      // is access both on the media manager thread and the MSG thread. This is to
-      // be fixed soon.
-      // When deallocating, the listener is removed via message passing, while the
-      // allocation is removed immediately, so there can be a few iterations where
-      // we need to return early here.
+    if (!mHandle) {
+      // This can happen because this class is not yet using message passing,
+      // and is accessed both on the media manager thread and the MSG thread.
+      // This is to be fixed soon.
+      // When deallocating, the listener is removed via message passing, while
+      // the allocation is removed immediately, so there can be a few iterations
+      // where we need to return early here.
       return;
     }
   }
   // If some processing is necessary, packetize and insert in the WebRTC.org
   // code. Otherwise, directly insert the mic data in the MSG, bypassing all processing.
   if (PassThrough(aGraph)) {
     InsertInGraph<AudioDataValue>(aBuffer, aFrames, aChannels);
   } else {
@@ -1246,24 +1233,24 @@ MediaEngineWebRTCMicrophoneSource::Disco
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::Shutdown()
 {
   AssertIsOnOwningThread();
 
   if (mState == kStarted) {
-    if (mAllocation->mEnabled) {
-      Stop(mAllocation->mHandle);
+    if (mEnabled) {
+      Stop(mHandle);
     }
     MOZ_ASSERT(mState == kStopped);
   }
 
   MOZ_ASSERT(mState == kAllocated || mState == kStopped);
-  Deallocate(mAllocation->mHandle);
+  Deallocate(mHandle);
   MOZ_ASSERT(mState == kReleased);
 }
 
 nsString
 MediaEngineWebRTCAudioCaptureSource::GetName() const
 {
   return NS_LITERAL_STRING(u"AudioCapture");
 }
@@ -1283,24 +1270,16 @@ MediaEngineWebRTCAudioCaptureSource::Get
 
   uuid.ToProvidedString(uuidBuffer);
   asciiString.AssignASCII(uuidBuffer);
 
   // Remove {} and the null terminator
   return nsCString(Substring(asciiString, 1, NSID_LENGTH - 3));
 }
 
-bool
-MediaEngineWebRTCMicrophoneSource::HasEnabledTrack() const
-{
-  AssertIsOnOwningThread();
-  MOZ_ASSERT(mAllocation);
-  return mAllocation->mEnabled;
-}
-
 nsresult
 MediaEngineWebRTCAudioCaptureSource::SetTrack(const RefPtr<const AllocationHandle>& aHandle,
                                               const RefPtr<SourceMediaStream>& aStream,
                                               TrackID aTrackID,
                                               const PrincipalHandle& aPrincipalHandle)
 {
   AssertIsOnOwningThread();
   // Nothing to do here. aStream is a placeholder dummy and not exposed.
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.h
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h
@@ -135,59 +135,16 @@ public:
 
   void Shutdown() override;
 
 protected:
   ~MediaEngineWebRTCMicrophoneSource() {}
 
 private:
   /**
-   * Representation of data tied to an AllocationHandle rather than to the source.
-   */
-  struct Allocation {
-    Allocation() = delete;
-    explicit Allocation(const RefPtr<AllocationHandle>& aHandle);
-    ~Allocation();
-
-#ifdef DEBUG
-    // The MSGImpl::IterationEnd() of the last time we appended data from an
-    // audio callback.
-    // Guarded by MediaEngineWebRTCMicrophoneSource::mMutex.
-    GraphTime mLastCallbackAppendTime = 0;
-#endif
-    // Set to false by Start(). Becomes true after the first time we append real
-    // audio frames from the audio callback.
-    // Guarded by MediaEngineWebRTCMicrophoneSource::mMutex.
-    bool mLiveFramesAppended = false;
-
-    // Set to false by Start(). Becomes true after the first time we append
-    // silence *after* the first audio callback has appended real frames.
-    // Guarded by MediaEngineWebRTCMicrophoneSource::mMutex.
-    bool mLiveSilenceAppended = false;
-
-    const RefPtr<AllocationHandle> mHandle;
-    RefPtr<SourceMediaStream> mStream;
-    TrackID mTrackID = TRACK_NONE;
-    PrincipalHandle mPrincipal = PRINCIPAL_HANDLE_NONE;
-    bool mEnabled = false;
-  };
-
-  /**
-   * Used with nsTArray<Allocation>::IndexOf to locate an Allocation by a handle.
-   */
-  class AllocationHandleComparator {
-  public:
-    bool Equals(const Allocation& aAllocation,
-                const RefPtr<const AllocationHandle>& aHandle) const
-    {
-      return aHandle == aAllocation.mHandle;
-    }
-  };
-
-  /**
    * Reevaluates the aggregated constraints of all allocations and restarts the
    * underlying device if necessary.
    *
    * If the given AllocationHandle was already registered, its constraints will
    * be updated before reevaluation. If not, they will be added before
    * reevaluation.
    */
   nsresult ReevaluateAllocation(const RefPtr<AllocationHandle>& aHandle,
@@ -235,17 +192,25 @@ private:
   uint32_t GetRequestedInputChannelCount(MediaStreamGraphImpl* aGraphImpl);
   // Graph thread only.
   void SetRequestedInputChannelCount(uint32_t aRequestedInputChannelCount);
   // This is true when all processing is disabled, we can skip
   // packetization, resampling and other processing passes.
   // Graph thread only.
   bool PassThrough(MediaStreamGraphImpl* aGraphImpl) const;
 
-  // Set on construction and then immutable, can be used anywhere.
+  // Those are written on the MediaManager thread, read on either the
+  // MediaManager thread or the MSG thread. Guarded by mMutex.
+  RefPtr<AllocationHandle> mHandle;
+  RefPtr<SourceMediaStream> mStream;
+  TrackID mTrackID = TRACK_NONE;
+  PrincipalHandle mPrincipal = PRINCIPAL_HANDLE_NONE;
+  bool mEnabled = false;
+
+  // Set on construction and then immutable. Used on the MediaManager thread.
   const RefPtr<AudioDeviceInfo> mDeviceInfo;
   // Those four members are set on construction, on the MediaManager thread.
   const bool mDelayAgnostic;
   const bool mExtendedFilter;
   const nsString mDeviceName;
   const nsCString mDeviceUUID;
   // The current settings for the underlying device.
   // Constructed on the MediaManager thread, and then only ever accessed on the
@@ -255,21 +220,19 @@ private:
   // representing the currently applied settings for this source. This is the
   // net result of the prefs across all allocations.
   // Owning thread only.
   MediaEnginePrefs mNetPrefs;
 
   // Current state of the shared resource for this source. Written on the
   // owning thread, read on either the owning thread or the MSG thread.
   Atomic<MediaEngineSourceState> mState;
+  // This mutex must be held to access mAllocation (and its members) and
+  // modifying mListener.
   Mutex mMutex;
-  // We set an allocation in Allocate() and remove it in Deallocate().
-  // Must be set on the MediaManager thread and is then accessed while holding
-  // mMutex on the MSG thread or the MediaManager thread.
-  UniquePtr<Allocation> mAllocation;
   // mListener is created on the MediaManager thread, and then sent to the MSG
   // thread. On shutdown, we send this pointer to the MSG thread again, telling
   // it to clean up.
   RefPtr<WebRTCAudioDataListener> mListener;
   // Created on the MediaManager thread, then used on the graph thread for
   // processing, and on the MediaManager thread when setting parameters (this is
   // thread safe).
   const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
@@ -292,16 +255,31 @@ private:
   // Stores the mixed audio output for the reverse-stream of the AEC.
   AlignedFloatBuffer mOutputBuffer;
   // Stores the microphone audio, to be processed by the APM.
   AlignedFloatBuffer mInputBuffer;
   // Stores the deinterleaved microphone audio
   AlignedFloatBuffer mDeinterleavedBuffer;
   // Stores the mixed down input audio
   AlignedFloatBuffer mInputDownmixBuffer;
+#ifdef DEBUG
+  // The MSGImpl::IterationEnd() of the last time we appended data from an
+  // audio callback.
+  // Guarded by MediaEngineWebRTCMicrophoneSource::mMutex.
+  GraphTime mLastCallbackAppendTime = 0;
+#endif
+  // Set to false by Start(). Becomes true after the first time we append real
+  // audio frames from the audio callback.
+  // Guarded by MediaEngineWebRTCMicrophoneSource::mMutex.
+  bool mLiveFramesAppended = false;
+
+  // Set to false by Start(). Becomes true after the first time we append
+  // silence *after* the first audio callback has appended real frames.
+  // Guarded by MediaEngineWebRTCMicrophoneSource::mMutex.
+  bool mLiveSilenceAppended = false;
 };
 
 
 class MediaEngineWebRTCAudioCaptureSource : public MediaEngineSource
 {
 public:
   explicit MediaEngineWebRTCAudioCaptureSource(const char* aUuid)
   {