Bug 1487057 - Part 3 - Turn the mAllocations array into an mAllocation UniquePtr, that can be nullptr. r=pehrsons
authorPaul Adenot <paul@paul.cx>
Wed, 29 Aug 2018 19:00:28 +0200
changeset 499446 2ff222ff2a723fc90210e9bd84968333c0fa5f86
parent 499445 7e6e230af6980d38db68c814c840a090bb5a354a
child 499447 304b4f68b942eb98cddfda6c58376332772b3e72
push id1864
push userffxbld-merge
push dateMon, 03 Dec 2018 15:51:40 +0000
treeherdermozilla-release@f040763d99ad [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspehrsons
bugs1487057
milestone64.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1487057 - Part 3 - Turn the mAllocations array into an mAllocation UniquePtr, that can be nullptr. r=pehrsons Differential Revision: https://phabricator.services.mozilla.com/D5437
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/MediaEngineWebRTCAudio.h
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -201,24 +201,23 @@ MediaEngineWebRTCMicrophoneSource::Reeva
     const NormalizedConstraints* aConstraintsUpdate,
     const MediaEnginePrefs& aPrefs,
     const nsString& aDeviceId,
     const char** aOutBadConstraint)
 {
   AssertIsOnOwningThread();
 
   // aHandle and/or aConstraintsUpdate may be nullptr (see below)
+  AutoTArray<const NormalizedConstraints*, 10> allConstraints;
+  if (mAllocation) {
+    if (!(aConstraintsUpdate && mAllocation->mHandle == aHandle)) {
+      allConstraints.AppendElement(&mAllocation->mHandle->mConstraints);
+    }
+  }
 
-  AutoTArray<const NormalizedConstraints*, 10> allConstraints;
-  for (const Allocation& registered : mAllocations) {
-    if (aConstraintsUpdate && registered.mHandle == aHandle) {
-      continue; // Don't count old constraints
-    }
-    allConstraints.AppendElement(&registered.mHandle->mConstraints);
-  }
   if (aConstraintsUpdate) {
     allConstraints.AppendElement(aConstraintsUpdate);
   } else if (aHandle) {
     // In the case of AddShareOfSingleSource, the handle isn't registered yet.
     allConstraints.AppendElement(&aHandle->mConstraints);
   }
 
   NormalizedConstraints netConstraints(allConstraints);
@@ -264,19 +263,17 @@ MediaEngineWebRTCMicrophoneSource::Recon
     nsAutoCString name;
     GetErrorName(rv, name);
     LOG(("Mic source %p Reconfigure() failed unexpectedly. rv=%s",
          this, name.Data()));
     Stop(aHandle);
     return NS_ERROR_UNEXPECTED;
   }
 
-  size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
-  MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex);
-  ApplySettings(mNetPrefs, mAllocations[i].mStream->GraphImpl());
+  ApplySettings(mNetPrefs, mAllocation->mStream->GraphImpl());
 
   return NS_OK;
 }
 
 bool operator == (const MediaEnginePrefs& a, const MediaEnginePrefs& b)
 {
   return !memcmp(&a, &b, sizeof(MediaEnginePrefs));
 };
@@ -513,28 +510,27 @@ MediaEngineWebRTCMicrophoneSource::PassT
   MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
   return mSkipProcessing;
 }
 void
 MediaEngineWebRTCMicrophoneSource::SetPassThrough(bool aPassThrough)
 {
   {
     MutexAutoLock lock(mMutex);
-    if (mAllocations.IsEmpty()) {
+    if (!mAllocation) {
       // This can be the case, for now, because we're mixing mutable shared state
       // and linearization via message queue. This is temporary.
       return;
     }
 
     // mStream is always valid because it's set right before ::Start is called.
     // SetPassThrough cannot be called before that, because it's running on the
     // graph thread, and this cannot happen before the source has been started.
-    MOZ_ASSERT(mAllocations.Length() == 1 &&
-               mAllocations[0].mStream &&
-               mAllocations[0].mStream->GraphImpl()->CurrentDriver()->OnThread(),
+    MOZ_ASSERT(mAllocation->mStream &&
+               mAllocation->mStream->GraphImpl()->CurrentDriver()->OnThread(),
                "Wrong calling pattern, don't call this before ::SetTrack.");
   }
   mSkipProcessing = aPassThrough;
 }
 
 uint32_t
 MediaEngineWebRTCMicrophoneSource::GetRequestedInputChannelCount(MediaStreamGraphImpl* aGraphImpl)
 {
@@ -552,41 +548,32 @@ MediaEngineWebRTCMicrophoneSource::GetRe
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::SetRequestedInputChannelCount(
   uint32_t aRequestedInputChannelCount)
 {
   MutexAutoLock lock(mMutex);
 
-  MOZ_ASSERT(mAllocations.Length() <= 1);
-
-  if (mAllocations.IsEmpty()) {
+  if (!mAllocation) {
       return;
   }
-  MOZ_ASSERT(mAllocations.Length() == 1 &&
-             mAllocations[0].mStream &&
-             mAllocations[0].mStream->GraphImpl()->CurrentDriver()->OnThread(),
+  MOZ_ASSERT(mAllocation->mStream &&
+             mAllocation->mStream->GraphImpl()->CurrentDriver()->OnThread(),
              "Wrong calling pattern, don't call this before ::SetTrack.");
   mRequestedInputChannelCount = aRequestedInputChannelCount;
-  mAllocations[0].mStream->GraphImpl()->ReevaluateInputDevice();
+  mAllocation->mStream->GraphImpl()->ReevaluateInputDevice();
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::ApplySettings(const MediaEnginePrefs& aPrefs,
                                                  RefPtr<MediaStreamGraphImpl> aGraph)
 {
   AssertIsOnOwningThread();
   MOZ_DIAGNOSTIC_ASSERT(aGraph);
-#ifdef DEBUG
-  {
-    MutexAutoLock lock(mMutex);
-    MOZ_ASSERT(mAllocations.Length() <= 1);
-  }
-#endif
 
   RefPtr<MediaEngineWebRTCMicrophoneSource> that = this;
   NS_DispatchToMainThread(media::NewRunnableFrom([that, graph = std::move(aGraph), aPrefs]() mutable {
     that->mSettings->mEchoCancellation.Value() = aPrefs.mAecOn;
     that->mSettings->mAutoGainControl.Value() = aPrefs.mAgcOn;
     that->mSettings->mNoiseSuppression.Value() = aPrefs.mNoiseOn;
     that->mSettings->mChannelCount.Value() = aPrefs.mChannels;
 
@@ -631,105 +618,86 @@ MediaEngineWebRTCMicrophoneSource::Alloc
                                             const ipc::PrincipalInfo& aPrincipalInfo,
                                             AllocationHandle** aOutHandle,
                                             const char** aOutBadConstraint)
 {
   AssertIsOnOwningThread();
   MOZ_ASSERT(aOutHandle);
   auto handle = MakeRefPtr<AllocationHandle>(aConstraints, aPrincipalInfo,
                                              aDeviceId);
-
-#ifdef DEBUG
-  {
-    MutexAutoLock lock(mMutex);
-    MOZ_ASSERT(mAllocations.Length() <= 1);
-  }
-#endif
   LOG(("Mic source %p allocation %p Allocate()", this, handle.get()));
 
   nsresult rv = ReevaluateAllocation(handle, nullptr, aPrefs, aDeviceId,
                                      aOutBadConstraint);
   if (NS_FAILED(rv)) {
     return rv;
   }
 
   {
     MutexAutoLock lock(mMutex);
-    MOZ_ASSERT(mAllocations.IsEmpty(), "Only allocate once.");
-    mAllocations.AppendElement(Allocation(handle));
+    MOZ_ASSERT(!mAllocation, "Only allocate once.");
+    mAllocation = MakeUnique<Allocation>(Allocation(handle));
   }
 
   handle.forget(aOutHandle);
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Deallocate(const RefPtr<const AllocationHandle>& aHandle)
 {
   AssertIsOnOwningThread();
 
   MOZ_ASSERT(mState == kStopped);
 
-  size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
-  MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex);
-  MOZ_DIAGNOSTIC_ASSERT(!mAllocations[i].mEnabled,
+  MOZ_DIAGNOSTIC_ASSERT(!mAllocation->mEnabled,
                         "Source should be stopped for the track before removing");
 
-  if (mAllocations[i].mStream && IsTrackIDExplicit(mAllocations[i].mTrackID)) {
-    mAllocations[i].mStream->EndTrack(mAllocations[i].mTrackID);
+  if (mAllocation->mStream && IsTrackIDExplicit(mAllocation->mTrackID)) {
+    mAllocation->mStream->EndTrack(mAllocation->mTrackID);
   }
 
-  {
-    MutexAutoLock lock(mMutex);
-    MOZ_ASSERT(mAllocations.Length() == 1, "Only allocate once.");
-    mAllocations.RemoveElementAt(i);
-  }
+  MutexAutoLock lock(mMutex);
+  MOZ_ASSERT(mAllocation, "Only deallocate once");
+  mAllocation = nullptr;
 
-  if (mAllocations.IsEmpty()) {
-    // If empty, no callbacks to deliver data should be occuring
-    MOZ_ASSERT(mState != kReleased, "Source not allocated");
-    MOZ_ASSERT(mState != kStarted, "Source not stopped");
+  // If empty, no callbacks to deliver data should be occuring
+  MOZ_ASSERT(mState != kReleased, "Source not allocated");
+  MOZ_ASSERT(mState != kStarted, "Source not stopped");
 
-    MutexAutoLock lock(mMutex);
-    mState = kReleased;
-    LOG(("Audio device %s deallocated", NS_ConvertUTF16toUTF8(mDeviceName).get()));
-  } else {
-    LOG(("Audio device %s deallocated but still in use", NS_ConvertUTF16toUTF8(mDeviceName).get()));
-  }
+  mState = kReleased;
+  LOG(("Audio device %s deallocated", NS_ConvertUTF16toUTF8(mDeviceName).get()));
+
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::SetTrack(const RefPtr<const AllocationHandle>& aHandle,
                                             const RefPtr<SourceMediaStream>& aStream,
                                             TrackID aTrackID,
                                             const PrincipalHandle& aPrincipal)
 {
   AssertIsOnOwningThread();
   MOZ_ASSERT(aStream);
   MOZ_ASSERT(IsTrackIDExplicit(aTrackID));
 
-  if (mAllocations.Length() == 1 &&
-      mAllocations[0].mStream &&
-      mAllocations[0].mStream->Graph() != aStream->Graph()) {
+  if (mAllocation &&
+      mAllocation->mStream &&
+      mAllocation->mStream->Graph() != aStream->Graph()) {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
-  MOZ_ASSERT(mAllocations.Length() == 1, "Only allocate once.");
-
-  size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
-  MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex);
-  MOZ_ASSERT(!mAllocations[i].mStream);
-  MOZ_ASSERT(mAllocations[i].mTrackID == TRACK_NONE);
-  MOZ_ASSERT(mAllocations[i].mPrincipal == PRINCIPAL_HANDLE_NONE);
+  MOZ_ASSERT(!mAllocation->mStream);
+  MOZ_ASSERT(mAllocation->mTrackID == TRACK_NONE);
+  MOZ_ASSERT(mAllocation->mPrincipal == PRINCIPAL_HANDLE_NONE);
   {
     MutexAutoLock lock(mMutex);
-    mAllocations[i].mStream = aStream;
-    mAllocations[i].mTrackID = aTrackID;
-    mAllocations[i].mPrincipal = aPrincipal;
+    mAllocation->mStream = aStream;
+    mAllocation->mTrackID = aTrackID;
+    mAllocation->mPrincipal = aPrincipal;
   }
 
   AudioSegment* segment = new AudioSegment();
 
   aStream->AddAudioTrack(aTrackID,
                          aStream->GraphRate(),
                          0,
                          segment,
@@ -744,24 +712,19 @@ MediaEngineWebRTCMicrophoneSource::Start
 {
   AssertIsOnOwningThread();
   if (mState == kStarted) {
     return NS_OK;
   }
 
   MOZ_ASSERT(mState == kAllocated || mState == kStopped);
 
-  size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
-  MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex,
-                        "Can't start track that hasn't been added");
-  Allocation& allocation = mAllocations[i];
-
   CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID();
-  if (allocation.mStream->GraphImpl()->InputDeviceID() &&
-      allocation.mStream->GraphImpl()->InputDeviceID() != deviceID) {
+  if (mAllocation->mStream->GraphImpl()->InputDeviceID() &&
+      mAllocation->mStream->GraphImpl()->InputDeviceID() != deviceID) {
     // For now, we only allow opening a single audio input device per document,
     // because we can only have one MSG per document.
     return NS_ERROR_FAILURE;
   }
 
   // On Linux with PulseAudio, we still only allow a certain number of audio
   // input stream in each content process, because of issues related to audio
   // remoting and PulseAudio.
@@ -773,74 +736,69 @@ MediaEngineWebRTCMicrophoneSource::Start
       sInputStreamsOpen == CubebUtils::GetMaxInputStreams()) {
     LOG(("%p Already capturing audio in this process, aborting", this));
     return NS_ERROR_FAILURE;
   }
 
   sInputStreamsOpen++;
 #endif
 
-  MOZ_ASSERT(!allocation.mEnabled, "Source already started");
+  MOZ_ASSERT(!mAllocation->mEnabled, "Source already started");
   {
     // This spans setting both the enabled state and mState.
     MutexAutoLock lock(mMutex);
-    allocation.mEnabled = true;
+    mAllocation->mEnabled = true;
 
 #ifdef DEBUG
     // Ensure that callback-tracking state is reset when callbacks start coming.
-    allocation.mLastCallbackAppendTime = 0;
+    mAllocation->mLastCallbackAppendTime = 0;
 #endif
-    allocation.mLiveFramesAppended = false;
-    allocation.mLiveSilenceAppended = false;
+    mAllocation->mLiveFramesAppended = false;
+    mAllocation->mLiveSilenceAppended = false;
 
     if (!mListener) {
       mListener = new WebRTCAudioDataListener(this);
     }
 
     // Make sure logger starts before capture
     AsyncLatencyLogger::Get(true);
 
-    allocation.mStream->OpenAudioInput(deviceID, mListener);
+    mAllocation->mStream->OpenAudioInput(deviceID, mListener);
 
     MOZ_ASSERT(mState != kReleased);
     mState = kStarted;
   }
 
-  ApplySettings(mNetPrefs, allocation.mStream->GraphImpl());
+  ApplySettings(mNetPrefs, mAllocation->mStream->GraphImpl());
 
   return NS_OK;
 }
 
 nsresult
 MediaEngineWebRTCMicrophoneSource::Stop(const RefPtr<const AllocationHandle>& aHandle)
 {
-  MOZ_ASSERT(mAllocations.Length() <= 1);
   AssertIsOnOwningThread();
 
   LOG(("Mic source %p allocation %p Stop()", this, aHandle.get()));
 
-  size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
-  MOZ_DIAGNOSTIC_ASSERT(i != mAllocations.NoIndex,
-                        "Cannot stop track that we don't know about");
-  Allocation& allocation = mAllocations[i];
-  MOZ_ASSERT(allocation.mStream, "SetTrack must have been called before ::Stop");
+  MOZ_ASSERT(mAllocation->mStream, "SetTrack must have been called before ::Stop");
 
-  if (!allocation.mEnabled) {
+  if (!mAllocation->mEnabled) {
     // Already stopped - this is allowed
     return NS_OK;
   }
 
   {
     // This spans setting both the enabled state and mState.
     MutexAutoLock lock(mMutex);
-    allocation.mEnabled = false;
+    mAllocation->mEnabled = false;
 
     CubebUtils::AudioDeviceID deviceID = mDeviceInfo->DeviceID();
     Maybe<CubebUtils::AudioDeviceID> id = Some(deviceID);
-    allocation.mStream->CloseAudioInput(id, mListener);
+    mAllocation->mStream->CloseAudioInput(id, mListener);
     mListener = nullptr;
 #ifdef MOZ_PULSEAUDIO
     MOZ_ASSERT(sInputStreamsOpen > 0);
     sInputStreamsOpen--;
 #endif
 
     if (HasEnabledTrack()) {
       // Another track is keeping us from stopping
@@ -869,79 +827,77 @@ MediaEngineWebRTCMicrophoneSource::Pull(
                                         const PrincipalHandle& aPrincipalHandle)
 {
   TRACE_AUDIO_CALLBACK_COMMENT("SourceMediaStream %p track %i",
                                aStream.get(), aTrackID);
   StreamTime delta;
 
   {
     MutexAutoLock lock(mMutex);
-    size_t i = mAllocations.IndexOf(aHandle, 0, AllocationHandleComparator());
-    if (i == mAllocations.NoIndex) {
-      // This handle must have been deallocated. That's fine, and its track
-      // will already be ended. No need to do anything.
+    if (!mAllocation) {
+      // Deallocation already happened. Just return.
       return;
     }
 
     // We don't want to GetEndOfAppendedData() above at the declaration if the
     // allocation was removed and the track non-existant. An assert will fail.
     delta = aDesiredTime - aStream->GetEndOfAppendedData(aTrackID);
 
     if (delta < 0) {
       LOG_FRAMES(("Not appending silence for allocation %p; %" PRId64 " frames already buffered",
-                  mAllocations[i].mHandle.get(), -delta));
+                  mAllocation->mHandle.get(), -delta));
       return;
     }
 
-    if (!mAllocations[i].mLiveFramesAppended ||
-        !mAllocations[i].mLiveSilenceAppended) {
+    if (!mAllocation->mLiveFramesAppended ||
+        !mAllocation->mLiveSilenceAppended) {
       // These are the iterations after starting or resuming audio capture.
       // Make sure there's at least one extra block buffered until audio
       // callbacks come in. We also allow appending silence one time after
       // audio callbacks have started, to cover the case where audio callbacks
       // start appending data immediately and there is no extra data buffered.
       delta += WEBAUDIO_BLOCK_SIZE;
 
       // If we're supposed to be packetizing but there's no packetizer yet,
       // there must not have been any live frames appended yet.
       // If there were live frames appended and we haven't appended the
       // right amount of silence, we'll have to append silence once more,
       // failing the other assert below.
       MOZ_ASSERT_IF(!PassThrough(aStream->GraphImpl()) && !mPacketizerInput,
-                    !mAllocations[i].mLiveFramesAppended);
+                    !mAllocation->mLiveFramesAppended);
 
       if (!PassThrough(aStream->GraphImpl()) && mPacketizerInput) {
         // Processing is active and is processed in chunks of 10ms through the
         // input packetizer. We allow for 10ms of silence on the track to
         // accomodate the buffering worst-case.
         delta += mPacketizerInput->PacketSize();
       }
     }
 
     LOG_FRAMES(("Pulling %" PRId64 " frames of silence for allocation %p",
-                delta, mAllocations[i].mHandle.get()));
+                delta, mAllocation->mHandle.get()));
 
     // This assertion fails when we append silence here in the same iteration
     // as there were real audio samples already appended by the audio callback.
     // Note that this is exempted until live samples and a subsequent chunk of
     // silence have been appended to the track. This will cover cases like:
     // - After Start(), there is silence (maybe multiple times) appended before
     //   the first audio callback.
     // - After Start(), there is real data (maybe multiple times) appended
     //   before the first graph iteration.
     // And other combinations of order of audio sample sources.
     MOZ_ASSERT_IF(
-      mAllocations[i].mEnabled &&
-      mAllocations[i].mLiveFramesAppended &&
-      mAllocations[i].mLiveSilenceAppended,
+      mAllocation->mEnabled &&
+      mAllocation->mLiveFramesAppended &&
+      mAllocation->mLiveSilenceAppended,
       aStream->GraphImpl()->IterationEnd() >
-      mAllocations[i].mLastCallbackAppendTime);
+      mAllocation->mLastCallbackAppendTime);
 
-    if (mAllocations[i].mLiveFramesAppended) {
-      mAllocations[i].mLiveSilenceAppended = true;
+    if (mAllocation->mLiveFramesAppended) {
+      mAllocation->mLiveSilenceAppended = true;
     }
   }
 
   AudioSegment audio;
   audio.AppendNullData(delta);
   aStream->AppendToTrack(aTrackID, &audio);
 }
 
@@ -1125,52 +1081,46 @@ MediaEngineWebRTCMicrophoneSource::Packe
                                     outputConfig,
                                     processedOutputChannelPointers.Elements());
     MutexAutoLock lock(mMutex);
     if (mState != kStarted) {
       return;
     }
 
     AudioSegment segment;
-    for (Allocation& allocation : mAllocations) {
-      if (!allocation.mStream) {
-        continue;
-      }
+    if (!mAllocation->mStream->GraphImpl()) {
+      // The DOMMediaStream that owns mAllocation->mStream has been cleaned up
+      // and MediaStream::DestroyImpl() has run in the MSG. This is fine and
+      // can happen before the MediaManager thread gets to stop capture for
+      // this allocation.
+      continue;
+    }
 
-      if (!allocation.mStream->GraphImpl()) {
-        // The DOMMediaStream that owns allocation.mStream has been cleaned up
-        // and MediaStream::DestroyImpl() has run in the MSG. This is fine and
-        // can happen before the MediaManager thread gets to stop capture for
-        // this allocation.
-        continue;
-      }
+    if (!mAllocation->mEnabled) {
+      continue;
+    }
 
-      if (!allocation.mEnabled) {
-        continue;
-      }
-
-      LOG_FRAMES(("Appending %" PRIu32 " frames of packetized audio for allocation %p",
-                  mPacketizerInput->PacketSize(), allocation.mHandle.get()));
+    LOG_FRAMES(("Appending %" PRIu32 " frames of packetized audio for allocation %p",
+                mPacketizerInput->PacketSize(), mAllocation->mHandle.get()));
 
 #ifdef DEBUG
-      allocation.mLastCallbackAppendTime =
-        allocation.mStream->GraphImpl()->IterationEnd();
+    mAllocation->mLastCallbackAppendTime =
+      mAllocation->mStream->GraphImpl()->IterationEnd();
 #endif
-      allocation.mLiveFramesAppended = true;
+    mAllocation->mLiveFramesAppended = true;
 
-      // We already have planar audio data of the right format. Insert into the
-      // MSG.
-      MOZ_ASSERT(processedOutputChannelPointers.Length() == aChannels);
-      RefPtr<SharedBuffer> other = buffer;
-      segment.AppendFrames(other.forget(),
-                           processedOutputChannelPointersConst,
-                           mPacketizerInput->PacketSize(),
-                           allocation.mPrincipal);
-      allocation.mStream->AppendToTrack(allocation.mTrackID, &segment);
-    }
+    // We already have planar audio data of the right format. Insert into the
+    // MSG.
+    MOZ_ASSERT(processedOutputChannelPointers.Length() == aChannels);
+    RefPtr<SharedBuffer> other = buffer;
+    segment.AppendFrames(other.forget(),
+                         processedOutputChannelPointersConst,
+                         mPacketizerInput->PacketSize(),
+                         mAllocation->mPrincipal);
+    mAllocation->mStream->AppendToTrack(mAllocation->mTrackID, &segment);
   }
 }
 
 template<typename T>
 void
 MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer,
                                                  size_t aFrames,
                                                  uint32_t aChannels)
@@ -1178,112 +1128,102 @@ MediaEngineWebRTCMicrophoneSource::Inser
   MutexAutoLock lock(mMutex);
 
   if (mState != kStarted) {
     return;
   }
 
   if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) {
     mTotalFrames += aFrames;
-    if (mAllocations[0].mStream &&
+    if (mAllocation->mStream &&
         mTotalFrames > mLastLogFrames +
-                       mAllocations[0].mStream->GraphRate()) { // ~ 1 second
+                       mAllocation->mStream->GraphRate()) { // ~ 1 second
       MOZ_LOG(AudioLogModule(), LogLevel::Debug,
               ("%p: Inserting %zu samples into graph, total frames = %" PRIu64,
                (void*)this, aFrames, mTotalFrames));
       mLastLogFrames = mTotalFrames;
     }
   }
 
-  for (Allocation& allocation : mAllocations) {
-    if (!allocation.mStream) {
-      continue;
-    }
+  if (!mAllocation->mStream) {
+    return;
+  }
 
-    if (!allocation.mStream->GraphImpl()) {
-      // The DOMMediaStream that owns allocation.mStream has been cleaned up
-      // and MediaStream::DestroyImpl() has run in the MSG. This is fine and
-      // can happen before the MediaManager thread gets to stop capture for
-      // this allocation.
-      continue;
-    }
+  if (!mAllocation->mStream->GraphImpl()) {
+    // The DOMMediaStream that owns mAllocation->mStream has been cleaned up
+    // and MediaStream::DestroyImpl() has run in the MSG. This is fine and
+    // can happen before the MediaManager thread gets to stop capture for
+    // this mAllocation->
+    return;
+  }
 
-    if (!allocation.mEnabled) {
-      continue;
-    }
+  if (!mAllocation->mEnabled) {
+    return;
+  }
 
 #ifdef DEBUG
-    allocation.mLastCallbackAppendTime =
-      allocation.mStream->GraphImpl()->IterationEnd();
+  mAllocation->mLastCallbackAppendTime =
+    mAllocation->mStream->GraphImpl()->IterationEnd();
 #endif
-    allocation.mLiveFramesAppended = true;
+  mAllocation->mLiveFramesAppended = true;
 
-    TimeStamp insertTime;
-    // Make sure we include the stream and the track.
-    // The 0:1 is a flag to note when we've done the final insert for a given input block.
-    LogTime(AsyncLatencyLogger::AudioTrackInsertion,
-            LATENCY_STREAM_ID(allocation.mStream.get(), allocation.mTrackID),
-            (&allocation != &mAllocations.LastElement()) ? 0 : 1, insertTime);
-
-    // Bug 971528 - Support stereo capture in gUM
-    MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels");
+  // Bug 971528 - Support stereo capture in gUM
+  MOZ_ASSERT(aChannels >= 1 && aChannels <= 8, "Support up to 8 channels");
 
-    AudioSegment segment;
-    RefPtr<SharedBuffer> buffer =
-      SharedBuffer::Create(aFrames * aChannels * sizeof(T));
-    AutoTArray<const T*, 8> channels;
-    if (aChannels == 1) {
-      PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames);
-      channels.AppendElement(static_cast<T*>(buffer->Data()));
-    } else {
-      channels.SetLength(aChannels);
-      AutoTArray<T*, 8> write_channels;
-      write_channels.SetLength(aChannels);
-      T * samples = static_cast<T*>(buffer->Data());
+  AudioSegment segment;
+  RefPtr<SharedBuffer> buffer =
+    SharedBuffer::Create(aFrames * aChannels * sizeof(T));
+  AutoTArray<const T*, 8> channels;
+  if (aChannels == 1) {
+    PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames);
+    channels.AppendElement(static_cast<T*>(buffer->Data()));
+  } else {
+    channels.SetLength(aChannels);
+    AutoTArray<T*, 8> write_channels;
+    write_channels.SetLength(aChannels);
+    T * samples = static_cast<T*>(buffer->Data());
 
-      size_t offset = 0;
-      for(uint32_t i = 0; i < aChannels; ++i) {
-        channels[i] = write_channels[i] = samples + offset;
-        offset += aFrames;
-      }
-
-      DeinterleaveAndConvertBuffer(aBuffer,
-                                   aFrames,
-                                   aChannels,
-                                   write_channels.Elements());
+    size_t offset = 0;
+    for(uint32_t i = 0; i < aChannels; ++i) {
+      channels[i] = write_channels[i] = samples + offset;
+      offset += aFrames;
     }
 
-    LOG_FRAMES(("Appending %zu frames of raw audio for allocation %p",
-                aFrames, allocation.mHandle.get()));
+    DeinterleaveAndConvertBuffer(aBuffer,
+        aFrames,
+        aChannels,
+        write_channels.Elements());
+  }
 
-    MOZ_ASSERT(aChannels == channels.Length());
-    segment.AppendFrames(buffer.forget(), channels, aFrames,
-                          allocation.mPrincipal);
-    segment.GetStartTime(insertTime);
+  LOG_FRAMES(("Appending %zu frames of raw audio for allocation %p",
+        aFrames, mAllocation->mHandle.get()));
 
-    allocation.mStream->AppendToTrack(allocation.mTrackID, &segment);
-  }
+  MOZ_ASSERT(aChannels == channels.Length());
+  segment.AppendFrames(buffer.forget(), channels, aFrames,
+      mAllocation->mPrincipal);
+
+  mAllocation->mStream->AppendToTrack(mAllocation->mTrackID, &segment);
 }
 
 // Called back on GraphDriver thread!
 // Note this can be called back after ::Shutdown()
 void
 MediaEngineWebRTCMicrophoneSource::NotifyInputData(MediaStreamGraphImpl* aGraph,
                                                    const AudioDataValue* aBuffer,
                                                    size_t aFrames,
                                                    TrackRate aRate,
                                                    uint32_t aChannels)
 {
   MOZ_ASSERT(aGraph->CurrentDriver()->OnThread());
   TRACE_AUDIO_CALLBACK();
 
   {
     MutexAutoLock lock(mMutex);
-    if (mAllocations.IsEmpty()) {
-      // This can happen because mAllocations is not yet using message passing, and
+    if (!mAllocation) {
+      // This can happen because mAllocation is not yet using message passing, and
       // is access both on the media manager thread and the MSG thread. This is to
       // be fixed soon.
       // When deallocating, the listener is removed via message passing, while the
       // allocation is removed immediately, so there can be a few iterations where
       // we need to return early here.
       return;
     }
   }
@@ -1336,28 +1276,24 @@ MediaEngineWebRTCMicrophoneSource::Disco
 }
 
 void
 MediaEngineWebRTCMicrophoneSource::Shutdown()
 {
   AssertIsOnOwningThread();
 
   if (mState == kStarted) {
-    for (const Allocation& allocation : mAllocations) {
-      if (allocation.mEnabled) {
-        Stop(allocation.mHandle);
-      }
+    if (mAllocation->mEnabled) {
+      Stop(mAllocation->mHandle);
     }
     MOZ_ASSERT(mState == kStopped);
   }
 
-  while (!mAllocations.IsEmpty()) {
-    MOZ_ASSERT(mState == kAllocated || mState == kStopped);
-    Deallocate(mAllocations[0].mHandle);
-  }
+  MOZ_ASSERT(mState == kAllocated || mState == kStopped);
+  Deallocate(mAllocation->mHandle);
   MOZ_ASSERT(mState == kReleased);
 }
 
 nsString
 MediaEngineWebRTCAudioCaptureSource::GetName() const
 {
   return NS_LITERAL_STRING(u"AudioCapture");
 }
@@ -1381,22 +1317,18 @@ MediaEngineWebRTCAudioCaptureSource::Get
   // Remove {} and the null terminator
   return nsCString(Substring(asciiString, 1, NSID_LENGTH - 3));
 }
 
 bool
 MediaEngineWebRTCMicrophoneSource::HasEnabledTrack() const
 {
   AssertIsOnOwningThread();
-  for (const Allocation& allocation : mAllocations) {
-    if (allocation.mEnabled) {
-      return true;
-    }
-  }
-  return false;
+  MOZ_ASSERT(mAllocation);
+  return mAllocation->mEnabled;
 }
 
 nsresult
 MediaEngineWebRTCAudioCaptureSource::SetTrack(const RefPtr<const AllocationHandle>& aHandle,
                                               const RefPtr<SourceMediaStream>& aStream,
                                               TrackID aTrackID,
                                               const PrincipalHandle& aPrincipalHandle)
 {
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.h
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h
@@ -251,20 +251,19 @@ private:
 
   // accessed from the GraphDriver thread except for deletion.
   nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
   nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizerOutput;
 
   // mMutex protects some of our members off the owning thread.
   Mutex mMutex;
 
-  // We append an allocation in Allocate() and remove it in Deallocate().
-  // Both the array and the Allocation members are modified under mMutex on
-  // the owning thread. Accessed under one of the two.
-  nsTArray<Allocation> mAllocations;
+  // We set an allocation in Allocate() and remove it in Deallocate().
+  // Must be set and/or accessed while holding mMutex.
+  UniquePtr<Allocation> mAllocation;
 
   // Current state of the shared resource for this source. Written on the
   // owning thread, read on either the owning thread or the MSG thread.
   Atomic<MediaEngineSourceState> mState;
 
   bool mDelayAgnostic;
   bool mExtendedFilter;
   bool mStarted;