Bug 1322095 - Part 2: Remove nsAutoPtr from dom/media. r=jya
authorEric Rahm <erahm@mozilla.com>
Fri, 21 Feb 2020 22:44:00 +0000
changeset 2648951 bb1ce9b0e5f64889ff6faa732414afe81f229e11
parent 2648950 0ea358101f67db9eaefb4ffb6a5c5914fc58d774
child 2648952 4b23f08adbb295c8f6b2da5d2d0a9beab70b2492
push id490874
push userdvarga@mozilla.com
push dateSat, 22 Feb 2020 10:42:44 +0000
treeherdertry@64a8e247759e [default view] [failures only]
reviewersjya
bugs1322095
milestone75.0a1
Bug 1322095 - Part 2: Remove nsAutoPtr from dom/media. r=jya This converts `nsAutoPtr` usage in dom/media to `UniquePtr`. Beyond just a search and replace we also needed to update assignment and access of the `UniquePtr`s. This falls into a few categories: - Assignment from a newly constructed object switches to `MakeUnique` - Assignment from a raw ptr switches to `UniquePtr::reset` - Handing out a raw ptr now requires `UniquePtr::get` - Uses `UniquePtr::release` rather than `nsAutoPtr::forget` - A few spots are updated to return a `UniquePtr` rather than a raw ptr Differential Revision: https://phabricator.services.mozilla.com/D60084
dom/media/AudioStream.h
dom/media/DOMMediaStream.h
dom/media/ForwardedInputTrack.h
dom/media/GetUserMediaRequest.h
dom/media/MediaDecoder.cpp
dom/media/MediaDecoder.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
dom/media/MediaFormatReader.h
dom/media/MediaMetadataManager.h
dom/media/MediaRecorder.cpp
dom/media/MediaTrackGraph.h
dom/media/VideoUtils.h
dom/media/encoder/VP8TrackEncoder.cpp
dom/media/encoder/VP8TrackEncoder.h
dom/media/flac/FlacDemuxer.h
dom/media/flac/FlacFrameParser.cpp
dom/media/flac/FlacFrameParser.h
dom/media/gmp/GMPServiceParent.cpp
dom/media/gmp/GMPTimerParent.cpp
dom/media/gmp/GMPVideoEncodedFrameImpl.h
dom/media/gtest/TestMediaSpan.cpp
dom/media/mediasource/ContainerParser.cpp
dom/media/mediasource/ContainerParser.h
dom/media/mediasource/TrackBuffersManager.h
dom/media/mediasource/gtest/TestContainerParser.cpp
dom/media/mp4/Index.cpp
dom/media/mp4/Index.h
dom/media/mp4/MP4Demuxer.cpp
dom/media/ogg/OggCodecState.cpp
dom/media/ogg/OggCodecState.h
dom/media/ogg/OggDemuxer.cpp
dom/media/platforms/agnostic/OpusDecoder.cpp
dom/media/platforms/agnostic/OpusDecoder.h
dom/media/platforms/apple/AppleVTDecoder.cpp
dom/media/platforms/omx/OmxPromiseLayer.cpp
dom/media/platforms/omx/OmxPromiseLayer.h
dom/media/platforms/wmf/DXVA2Manager.cpp
dom/media/platforms/wmf/DXVA2Manager.h
dom/media/platforms/wmf/WMFDecoderModule.cpp
dom/media/platforms/wmf/WMFMediaDataDecoder.h
dom/media/platforms/wmf/WMFVideoMFTManager.cpp
dom/media/platforms/wmf/WMFVideoMFTManager.h
dom/media/webaudio/AudioNodeTrack.cpp
dom/media/webaudio/AudioNodeTrack.h
dom/media/webaudio/ConvolverNode.cpp
dom/media/webaudio/DynamicsCompressorNode.cpp
dom/media/webaudio/IIRFilterNode.cpp
dom/media/webaudio/PannerNode.cpp
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webaudio/blink/DynamicsCompressor.h
dom/media/webaudio/blink/HRTFKernel.cpp
dom/media/webaudio/blink/HRTFKernel.h
dom/media/webaudio/blink/PeriodicWave.cpp
dom/media/webaudio/blink/PeriodicWave.h
dom/media/webaudio/blink/Reverb.cpp
dom/media/webaudio/blink/Reverb.h
dom/media/webaudio/blink/ReverbConvolver.cpp
dom/media/webaudio/blink/ReverbConvolver.h
dom/media/webaudio/blink/ReverbConvolverStage.cpp
dom/media/webaudio/blink/ReverbConvolverStage.h
dom/media/webm/WebMWriter.cpp
dom/media/webm/WebMWriter.h
dom/media/webrtc/MediaEngineDefault.h
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/MediaEngineWebRTCAudio.h
dom/media/webrtc/WebrtcGlobal.h
dom/media/webspeech/recognition/energy_endpointer.h
--- a/dom/media/AudioStream.h
+++ b/dom/media/AudioStream.h
@@ -8,17 +8,16 @@
 
 #  include "AudioSampleFormat.h"
 #  include "CubebUtils.h"
 #  include "MediaInfo.h"
 #  include "mozilla/Monitor.h"
 #  include "mozilla/RefPtr.h"
 #  include "mozilla/TimeStamp.h"
 #  include "mozilla/UniquePtr.h"
-#  include "nsAutoPtr.h"
 #  include "nsCOMPtr.h"
 #  include "nsThreadUtils.h"
 #  include "WavDumper.h"
 
 #  if defined(XP_WIN)
 #    include "mozilla/audio/AudioNotificationReceiver.h"
 #  endif
 
@@ -83,17 +82,17 @@ class AudioClock {
  private:
   // Output rate in Hz (characteristic of the playback rate)
   uint32_t mOutRate;
   // Input rate in Hz (characteristic of the media being played)
   uint32_t mInRate;
   // True if the we are timestretching, false if we are resampling.
   bool mPreservesPitch;
   // The history of frames sent to the audio engine in each DataCallback.
-  const nsAutoPtr<FrameHistory> mFrameHistory;
+  const UniquePtr<FrameHistory> mFrameHistory;
 };
 
 /*
  * A bookkeeping class to track the read/write position of an audio buffer.
  */
 class AudioBufferCursor {
  public:
   AudioBufferCursor(Span<AudioDataValue> aSpan, uint32_t aChannels,
--- a/dom/media/DOMMediaStream.h
+++ b/dom/media/DOMMediaStream.h
@@ -3,17 +3,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef NSDOMMEDIASTREAM_H_
 #define NSDOMMEDIASTREAM_H_
 
 #include "ImageContainer.h"
 
-#include "nsAutoPtr.h"
 #include "nsCycleCollectionParticipant.h"
 #include "nsWrapperCache.h"
 #include "nsIPrincipal.h"
 #include "MediaTrackConstraints.h"
 #include "mozilla/DOMEventTargetHelper.h"
 #include "mozilla/RelativeTimeline.h"
 
 namespace mozilla {
--- a/dom/media/ForwardedInputTrack.h
+++ b/dom/media/ForwardedInputTrack.h
@@ -2,17 +2,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_FORWARDEDINPUTTRACK_H_
 #define MOZILLA_FORWARDEDINPUTTRACK_H_
 
 #include "MediaTrackGraph.h"
-#include "nsAutoPtr.h"
 #include <algorithm>
 
 namespace mozilla {
 
 /**
  * See MediaTrackGraph::CreateForwardedInputTrack.
  */
 class ForwardedInputTrack : public ProcessedMediaTrack {
--- a/dom/media/GetUserMediaRequest.h
+++ b/dom/media/GetUserMediaRequest.h
@@ -1,18 +1,18 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef GetUserMediaRequest_h__
 #define GetUserMediaRequest_h__
 
 #include "mozilla/ErrorResult.h"
+#include "mozilla/UniquePtr.h"
 #include "nsISupportsImpl.h"
-#include "nsAutoPtr.h"
 #include "nsWrapperCache.h"
 #include "mozilla/dom/BindingUtils.h"
 #include "nsPIDOMWindow.h"
 
 namespace mozilla {
 namespace dom {
 
 struct MediaStreamConstraints;
@@ -44,17 +44,17 @@ class GetUserMediaRequest : public nsISu
 
  private:
   virtual ~GetUserMediaRequest() = default;
 
   uint64_t mInnerWindowID, mOuterWindowID;
   const nsString mCallID;
   const nsString mRawID;
   const nsString mMediaSource;
-  nsAutoPtr<MediaStreamConstraints> mConstraints;
+  UniquePtr<MediaStreamConstraints> mConstraints;
   bool mIsSecure;
   bool mIsHandlingUserInput;
 };
 
 }  // namespace dom
 }  // namespace mozilla
 
 #endif  // GetUserMediaRequest_h__
--- a/dom/media/MediaDecoder.cpp
+++ b/dom/media/MediaDecoder.cpp
@@ -701,23 +701,23 @@ void MediaDecoder::MetadataLoaded(
 
   LOG("MetadataLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d",
       aInfo->mAudio.mChannels, aInfo->mAudio.mRate, aInfo->HasAudio(),
       aInfo->HasVideo());
 
   mMediaSeekable = aInfo->mMediaSeekable;
   mMediaSeekableOnlyInBufferedRanges =
       aInfo->mMediaSeekableOnlyInBufferedRanges;
-  mInfo = aInfo.release();
+  mInfo = std::move(aInfo);
 
   // Make sure the element and the frame (if any) are told about
   // our new size.
   if (aEventVisibility != MediaDecoderEventVisibility::Suppressed) {
     mFiredMetadataLoaded = true;
-    GetOwner()->MetadataLoaded(mInfo, std::move(aTags));
+    GetOwner()->MetadataLoaded(mInfo.get(), std::move(aTags));
   }
   // Invalidate() will end up calling GetOwner()->UpdateMediaSize with the last
   // dimensions retrieved from the video frame container. The video frame
   // container contains more up to date dimensions than aInfo.
   // So we call Invalidate() after calling GetOwner()->MetadataLoaded to ensure
   // the media element has the latest dimensions.
   Invalidate();
 
@@ -758,27 +758,27 @@ void MediaDecoder::EnsureTelemetryReport
 
 const char* MediaDecoder::PlayStateStr() {
   MOZ_ASSERT(NS_IsMainThread());
   AbstractThread::AutoEnter context(AbstractMainThread());
   return ToPlayStateStr(mPlayState);
 }
 
 void MediaDecoder::FirstFrameLoaded(
-    nsAutoPtr<MediaInfo> aInfo, MediaDecoderEventVisibility aEventVisibility) {
+    UniquePtr<MediaInfo> aInfo, MediaDecoderEventVisibility aEventVisibility) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_DIAGNOSTIC_ASSERT(!IsShutdown());
   AbstractThread::AutoEnter context(AbstractMainThread());
 
   LOG("FirstFrameLoaded, channels=%u rate=%u hasAudio=%d hasVideo=%d "
       "mPlayState=%s transportSeekable=%d",
       aInfo->mAudio.mChannels, aInfo->mAudio.mRate, aInfo->HasAudio(),
       aInfo->HasVideo(), PlayStateStr(), IsTransportSeekable());
 
-  mInfo = aInfo.forget();
+  mInfo = std::move(aInfo);
 
   Invalidate();
 
   // The element can run javascript via events
   // before reaching here, so only change the
   // state if we're still set to the original
   // loading state.
   if (mPlayState == PLAY_STATE_LOADING) {
--- a/dom/media/MediaDecoder.h
+++ b/dom/media/MediaDecoder.h
@@ -20,17 +20,16 @@
 #  include "TimeUnits.h"
 #  include "mozilla/Atomics.h"
 #  include "mozilla/CDMProxy.h"
 #  include "mozilla/MozPromise.h"
 #  include "mozilla/ReentrantMonitor.h"
 #  include "mozilla/StateMirroring.h"
 #  include "mozilla/StateWatching.h"
 #  include "mozilla/dom/MediaDebugInfoBinding.h"
-#  include "nsAutoPtr.h"
 #  include "nsCOMPtr.h"
 #  include "nsIObserver.h"
 #  include "nsISupports.h"
 #  include "nsITimer.h"
 
 class AudioDeviceInfo;
 class nsIPrincipal;
 
@@ -402,17 +401,17 @@ class MediaDecoder : public DecoderDocto
 
   void GetDebugInfo(dom::MediaDecoderDebugInfo& aInfo);
 
  protected:
   virtual ~MediaDecoder();
 
   // Called when the first audio and/or video from the media file has been
   // loaded by the state machine. Call on the main thread only.
-  virtual void FirstFrameLoaded(nsAutoPtr<MediaInfo> aInfo,
+  virtual void FirstFrameLoaded(UniquePtr<MediaInfo> aInfo,
                                 MediaDecoderEventVisibility aEventVisibility);
 
   void SetStateMachineParameters();
 
   // Called when MediaDecoder shutdown is finished. Subclasses use this to clean
   // up internal structures, and unregister potential shutdown blockers when
   // they're done.
   virtual void ShutdownInternal();
@@ -556,17 +555,17 @@ class MediaDecoder : public DecoderDocto
   bool mMediaSeekable = true;
 
   // True if the media is only seekable within its buffered ranges
   // like WebMs with no cues.
   bool mMediaSeekableOnlyInBufferedRanges = false;
 
   // Stores media info, including info of audio tracks and video tracks, should
   // only be accessed from main thread.
-  nsAutoPtr<MediaInfo> mInfo;
+  UniquePtr<MediaInfo> mInfo;
 
   // Tracks the visibility status of owner element's document.
   bool mIsDocumentVisible;
 
   // Tracks the visibility status of owner element.
   Visibility mElementVisibility;
 
   // Tracks the owner is in-tree or not.
--- a/dom/media/MediaDecoderStateMachine.cpp
+++ b/dom/media/MediaDecoderStateMachine.cpp
@@ -3363,17 +3363,17 @@ void MediaDecoderStateMachine::DecodeErr
 void MediaDecoderStateMachine::EnqueueFirstFrameLoadedEvent() {
   MOZ_ASSERT(OnTaskQueue());
   // Track value of mSentFirstFrameLoadedEvent from before updating it
   bool firstFrameBeenLoaded = mSentFirstFrameLoadedEvent;
   mSentFirstFrameLoadedEvent = true;
   MediaDecoderEventVisibility visibility =
       firstFrameBeenLoaded ? MediaDecoderEventVisibility::Suppressed
                            : MediaDecoderEventVisibility::Observable;
-  mFirstFrameLoadedEvent.Notify(nsAutoPtr<MediaInfo>(new MediaInfo(Info())),
+  mFirstFrameLoadedEvent.Notify(UniquePtr<MediaInfo>(new MediaInfo(Info())),
                                 visibility);
 }
 
 void MediaDecoderStateMachine::FinishDecodeFirstFrame() {
   MOZ_ASSERT(OnTaskQueue());
   MOZ_ASSERT(!mSentFirstFrameLoadedEvent);
   LOG("FinishDecodeFirstFrame");
 
--- a/dom/media/MediaDecoderStateMachine.h
+++ b/dom/media/MediaDecoderStateMachine.h
@@ -93,17 +93,16 @@ hardware (via AudioStream).
 #  include "MediaSink.h"
 #  include "MediaStatistics.h"
 #  include "MediaTimer.h"
 #  include "SeekJob.h"
 #  include "mozilla/Attributes.h"
 #  include "mozilla/ReentrantMonitor.h"
 #  include "mozilla/StateMirroring.h"
 #  include "mozilla/dom/MediaDebugInfoBinding.h"
-#  include "nsAutoPtr.h"
 #  include "nsThreadUtils.h"
 
 namespace mozilla {
 
 class AbstractThread;
 class AudioSegment;
 class DecodedStream;
 class DOMMediaStream;
@@ -239,17 +238,17 @@ class MediaDecoderStateMachine
   MediaEventSource<void>& OnMediaNotSeekable() const;
 
   MediaEventSourceExc<UniquePtr<MediaInfo>, UniquePtr<MetadataTags>,
                       MediaDecoderEventVisibility>&
   MetadataLoadedEvent() {
     return mMetadataLoadedEvent;
   }
 
-  MediaEventSourceExc<nsAutoPtr<MediaInfo>, MediaDecoderEventVisibility>&
+  MediaEventSourceExc<UniquePtr<MediaInfo>, MediaDecoderEventVisibility>&
   FirstFrameLoadedEvent() {
     return mFirstFrameLoadedEvent;
   }
 
   MediaEventSource<MediaPlaybackEvent>& OnPlaybackEvent() {
     return mOnPlaybackEvent;
   }
   MediaEventSource<MediaResult>& OnPlaybackErrorEvent() {
@@ -667,17 +666,17 @@ class MediaDecoderStateMachine
   MediaEventListener mAudioQueueListener;
   MediaEventListener mVideoQueueListener;
   MediaEventListener mAudibleListener;
   MediaEventListener mOnMediaNotSeekable;
 
   MediaEventProducerExc<UniquePtr<MediaInfo>, UniquePtr<MetadataTags>,
                         MediaDecoderEventVisibility>
       mMetadataLoadedEvent;
-  MediaEventProducerExc<nsAutoPtr<MediaInfo>, MediaDecoderEventVisibility>
+  MediaEventProducerExc<UniquePtr<MediaInfo>, MediaDecoderEventVisibility>
       mFirstFrameLoadedEvent;
 
   MediaEventProducer<MediaPlaybackEvent> mOnPlaybackEvent;
   MediaEventProducer<MediaResult> mOnPlaybackErrorEvent;
 
   MediaEventProducer<DecoderDoctorEvent> mOnDecoderDoctorEvent;
 
   MediaEventProducer<NextFrameStatus> mOnNextFrameStatus;
--- a/dom/media/MediaFormatReader.h
+++ b/dom/media/MediaFormatReader.h
@@ -15,17 +15,16 @@
 #  include "mozilla/TaskQueue.h"
 #  include "mozilla/dom/MediaDebugInfoBinding.h"
 
 #  include "FrameStatistics.h"
 #  include "MediaEventSource.h"
 #  include "MediaDataDemuxer.h"
 #  include "MediaMetadataManager.h"
 #  include "MediaPromiseDefs.h"
-#  include "nsAutoPtr.h"
 #  include "PDMFactory.h"
 #  include "SeekTarget.h"
 
 namespace mozilla {
 
 class CDMProxy;
 class GMPCrashHelper;
 class MediaResource;
--- a/dom/media/MediaMetadataManager.h
+++ b/dom/media/MediaMetadataManager.h
@@ -5,33 +5,32 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #if !defined(MediaMetadataManager_h__)
 #  define MediaMetadataManager_h__
 
 #  include "mozilla/AbstractThread.h"
 #  include "mozilla/LinkedList.h"
 
-#  include "nsAutoPtr.h"
 #  include "MediaEventSource.h"
 #  include "TimeUnits.h"
 #  include "VideoUtils.h"
 
 namespace mozilla {
 
 class TimedMetadata;
 typedef MediaEventProducerExc<TimedMetadata> TimedMetadataEventProducer;
 typedef MediaEventSourceExc<TimedMetadata> TimedMetadataEventSource;
 
 // A struct that contains the metadata of a media, and the time at which those
 // metadata should start to be reported.
 class TimedMetadata : public LinkedListElement<TimedMetadata> {
  public:
   TimedMetadata(const media::TimeUnit& aPublishTime,
-                UniquePtr<MetadataTags>&& aTags, nsAutoPtr<MediaInfo>&& aInfo)
+                UniquePtr<MetadataTags>&& aTags, UniquePtr<MediaInfo>&& aInfo)
       : mPublishTime(aPublishTime),
         mTags(std::move(aTags)),
         mInfo(std::move(aInfo)) {}
 
   // Define our move constructor because we don't want to move the members of
   // LinkedListElement to change the list.
   TimedMetadata(TimedMetadata&& aOther)
       : mPublishTime(aOther.mPublishTime),
@@ -41,17 +40,17 @@ class TimedMetadata : public LinkedListE
   // The time, in microseconds, at which those metadata should be available.
   media::TimeUnit mPublishTime;
   // The metadata. The ownership is transfered to the element when dispatching
   // to the main threads.
   UniquePtr<MetadataTags> mTags;
   // The media info, including the info of audio tracks and video tracks.
   // The ownership is transfered to MediaDecoder when dispatching to the
   // main thread.
-  nsAutoPtr<MediaInfo> mInfo;
+  UniquePtr<MediaInfo> mInfo;
 };
 
 // This class encapsulate the logic to give the metadata from the reader to
 // the content, at the right time.
 class MediaMetadataManager {
  public:
   ~MediaMetadataManager() {
     TimedMetadata* element;
--- a/dom/media/MediaRecorder.cpp
+++ b/dom/media/MediaRecorder.cpp
@@ -22,17 +22,16 @@
 #include "mozilla/dom/MediaRecorderErrorEvent.h"
 #include "mozilla/dom/MutableBlobStorage.h"
 #include "mozilla/dom/VideoStreamTrack.h"
 #include "mozilla/media/MediaUtils.h"
 #include "mozilla/MemoryReporting.h"
 #include "mozilla/Preferences.h"
 #include "mozilla/StaticPtr.h"
 #include "mozilla/TaskQueue.h"
-#include "nsAutoPtr.h"
 #include "nsCharSeparatedTokenizer.h"
 #include "nsContentTypeParser.h"
 #include "nsContentUtils.h"
 #include "nsDocShell.h"
 #include "nsError.h"
 #include "mozilla/dom/Document.h"
 #include "nsIPrincipal.h"
 #include "nsIScriptError.h"
--- a/dom/media/MediaTrackGraph.h
+++ b/dom/media/MediaTrackGraph.h
@@ -10,17 +10,16 @@
 #include "CubebUtils.h"
 #include "MainThreadUtils.h"
 #include "MediaSegment.h"
 #include "mozilla/LinkedList.h"
 #include "mozilla/Maybe.h"
 #include "mozilla/Mutex.h"
 #include "mozilla/StateWatching.h"
 #include "mozilla/TaskQueue.h"
-#include "nsAutoPtr.h"
 #include "nsAutoRef.h"
 #include "nsIRunnable.h"
 #include "nsTArray.h"
 #include <speex/speex_resampler.h>
 
 class nsIRunnable;
 class nsIGlobalObject;
 class nsPIDOMWindowInner;
--- a/dom/media/VideoUtils.h
+++ b/dom/media/VideoUtils.h
@@ -16,17 +16,16 @@
 #include "mozilla/AbstractThread.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/CheckedInt.h"
 #include "mozilla/MozPromise.h"
 #include "mozilla/ReentrantMonitor.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/SharedThreadPool.h"
 #include "mozilla/UniquePtr.h"
-#include "nsAutoPtr.h"
 #include "nsCOMPtr.h"
 #include "nsINamed.h"
 #include "nsIThread.h"
 #include "nsITimer.h"
 
 #include "nsThreadUtils.h"
 #include "prtime.h"
 
--- a/dom/media/encoder/VP8TrackEncoder.cpp
+++ b/dom/media/encoder/VP8TrackEncoder.cpp
@@ -45,21 +45,21 @@ VP8TrackEncoder::VP8TrackEncoder(RefPtr<
 
 VP8TrackEncoder::~VP8TrackEncoder() {
   Destroy();
   MOZ_COUNT_DTOR(VP8TrackEncoder);
 }
 
 void VP8TrackEncoder::Destroy() {
   if (mInitialized) {
-    vpx_codec_destroy(mVPXContext);
+    vpx_codec_destroy(mVPXContext.get());
   }
 
   if (mVPXImageWrapper) {
-    vpx_img_free(mVPXImageWrapper);
+    vpx_img_free(mVPXImageWrapper.get());
   }
   mInitialized = false;
 }
 
 nsresult VP8TrackEncoder::Init(int32_t aWidth, int32_t aHeight,
                                int32_t aDisplayWidth, int32_t aDisplayHeight) {
   if (aWidth < 1 || aHeight < 1 || aDisplayWidth < 1 || aDisplayHeight < 1) {
     return NS_ERROR_FAILURE;
@@ -74,28 +74,29 @@ nsresult VP8TrackEncoder::Init(int32_t a
   vpx_codec_enc_cfg_t config;
   nsresult rv = SetConfigurationValues(aWidth, aHeight, aDisplayWidth,
                                        aDisplayHeight, config);
   NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE);
 
   // Creating a wrapper to the image - setting image data to NULL. Actual
   // pointer will be set in encode. Setting align to 1, as it is meaningless
   // (actual memory is not allocated).
-  vpx_img_wrap(mVPXImageWrapper, VPX_IMG_FMT_I420, mFrameWidth, mFrameHeight, 1,
-               nullptr);
+  vpx_img_wrap(mVPXImageWrapper.get(), VPX_IMG_FMT_I420, mFrameWidth,
+               mFrameHeight, 1, nullptr);
 
   vpx_codec_flags_t flags = 0;
   flags |= VPX_CODEC_USE_OUTPUT_PARTITION;
-  if (vpx_codec_enc_init(mVPXContext, vpx_codec_vp8_cx(), &config, flags)) {
+  if (vpx_codec_enc_init(mVPXContext.get(), vpx_codec_vp8_cx(), &config,
+                         flags)) {
     return NS_ERROR_FAILURE;
   }
 
-  vpx_codec_control(mVPXContext, VP8E_SET_STATIC_THRESHOLD, 1);
-  vpx_codec_control(mVPXContext, VP8E_SET_CPUUSED, -6);
-  vpx_codec_control(mVPXContext, VP8E_SET_TOKEN_PARTITIONS,
+  vpx_codec_control(mVPXContext.get(), VP8E_SET_STATIC_THRESHOLD, 1);
+  vpx_codec_control(mVPXContext.get(), VP8E_SET_CPUUSED, -6);
+  vpx_codec_control(mVPXContext.get(), VP8E_SET_TOKEN_PARTITIONS,
                     VP8_ONE_TOKENPARTITION);
 
   SetInitialized();
 
   return NS_OK;
 }
 
 nsresult VP8TrackEncoder::Reconfigure(int32_t aWidth, int32_t aHeight,
@@ -108,18 +109,19 @@ nsresult VP8TrackEncoder::Reconfigure(in
   }
 
   if (!mInitialized) {
     MOZ_ASSERT(false);
     return NS_ERROR_FAILURE;
   }
 
   // Recreate image wrapper
-  vpx_img_free(mVPXImageWrapper);
-  vpx_img_wrap(mVPXImageWrapper, VPX_IMG_FMT_I420, aWidth, aHeight, 1, nullptr);
+  vpx_img_free(mVPXImageWrapper.get());
+  vpx_img_wrap(mVPXImageWrapper.get(), VPX_IMG_FMT_I420, aWidth, aHeight, 1,
+               nullptr);
   // Encoder configuration structure.
   vpx_codec_enc_cfg_t config;
   nsresult rv = SetConfigurationValues(aWidth, aHeight, aDisplayWidth,
                                        aDisplayHeight, config);
   NS_ENSURE_SUCCESS(rv, NS_ERROR_FAILURE);
   // Set new configuration
   if (vpx_codec_enc_config_set(mVPXContext.get(), &config) != VPX_CODEC_OK) {
     VP8LOG(LogLevel::Error, "Failed to set new configuration");
@@ -221,17 +223,17 @@ already_AddRefed<TrackMetadataBase> VP8T
 }
 
 nsresult VP8TrackEncoder::GetEncodedPartitions(
     nsTArray<RefPtr<EncodedFrame>>& aData) {
   vpx_codec_iter_t iter = nullptr;
   EncodedFrame::FrameType frameType = EncodedFrame::VP8_P_FRAME;
   nsTArray<uint8_t> frameData;
   const vpx_codec_cx_pkt_t* pkt = nullptr;
-  while ((pkt = vpx_codec_get_cx_data(mVPXContext, &iter)) != nullptr) {
+  while ((pkt = vpx_codec_get_cx_data(mVPXContext.get(), &iter)) != nullptr) {
     switch (pkt->kind) {
       case VPX_CODEC_CX_FRAME_PKT: {
         // Copy the encoded data from libvpx to frameData
         frameData.AppendElements((uint8_t*)pkt->data.frame.buf,
                                  pkt->data.frame.sz);
         break;
       }
       default: {
@@ -487,19 +489,19 @@ nsresult VP8TrackEncoder::GetEncodedTrac
         if ((mDurationSinceLastKeyframe * 1000 / mTrackRate) >=
             mKeyFrameInterval) {
           mDurationSinceLastKeyframe = 0;
           flags |= VPX_EFLAG_FORCE_KF;
         }
         mDurationSinceLastKeyframe += chunk.GetDuration();
       }
 
-      if (vpx_codec_encode(mVPXContext, mVPXImageWrapper, mEncodedTimestamp,
-                           (unsigned long)chunk.GetDuration(), flags,
-                           VPX_DL_REALTIME)) {
+      if (vpx_codec_encode(
+              mVPXContext.get(), mVPXImageWrapper.get(), mEncodedTimestamp,
+              (unsigned long)chunk.GetDuration(), flags, VPX_DL_REALTIME)) {
         VP8LOG(LogLevel::Error, "vpx_codec_encode failed to encode the frame.");
         return NS_ERROR_FAILURE;
       }
       // Get the encoded data from VP8 encoder.
       rv = GetEncodedPartitions(aData);
       if (rv != NS_OK && rv != NS_ERROR_NOT_AVAILABLE) {
         VP8LOG(LogLevel::Error, "GetEncodedPartitions failed.");
         return NS_ERROR_FAILURE;
@@ -545,17 +547,17 @@ nsresult VP8TrackEncoder::GetEncodedTrac
 
   // End of stream, pull the rest frames in encoder.
   if (mEndOfStream) {
     VP8LOG(LogLevel::Debug, "mEndOfStream is true");
     mEncodingComplete = true;
     // Bug 1243611, keep calling vpx_codec_encode and vpx_codec_get_cx_data
     // until vpx_codec_get_cx_data return null.
     while (true) {
-      if (vpx_codec_encode(mVPXContext, nullptr, mEncodedTimestamp, 0, 0,
+      if (vpx_codec_encode(mVPXContext.get(), nullptr, mEncodedTimestamp, 0, 0,
                            VPX_DL_REALTIME)) {
         return NS_ERROR_FAILURE;
       }
       nsresult rv = GetEncodedPartitions(aData);
       if (rv == NS_ERROR_NOT_AVAILABLE) {
         // End-of-stream
         break;
       }
--- a/dom/media/encoder/VP8TrackEncoder.h
+++ b/dom/media/encoder/VP8TrackEncoder.h
@@ -91,16 +91,16 @@ class VP8TrackEncoder : public VideoTrac
   /**
    * A local segment queue which takes the raw data out from mRawSegment in the
    * call of GetEncodedTrack().
    */
   VideoSegment mSourceSegment;
 
   // VP8 relative members.
   // Codec context structure.
-  nsAutoPtr<vpx_codec_ctx_t> mVPXContext;
+  UniquePtr<vpx_codec_ctx_t> mVPXContext;
   // Image Descriptor.
-  nsAutoPtr<vpx_image_t> mVPXImageWrapper;
+  UniquePtr<vpx_image_t> mVPXImageWrapper;
 };
 
 }  // namespace mozilla
 
 #endif
--- a/dom/media/flac/FlacDemuxer.h
+++ b/dom/media/flac/FlacDemuxer.h
@@ -5,17 +5,16 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef FLAC_DEMUXER_H_
 #define FLAC_DEMUXER_H_
 
 #include "mozilla/Attributes.h"
 #include "MediaDataDemuxer.h"
 #include "MediaResource.h"
-#include "nsAutoPtr.h"
 namespace mozilla {
 
 namespace flac {
 class Frame;
 class FrameParser;
 }  // namespace flac
 class FlacTrackDemuxer;
 
@@ -91,17 +90,17 @@ class FlacTrackDemuxer : public MediaTra
 
   // Returns the average frame length derived from the previously parsed frames.
   double AverageFrameLength() const;
 
   // The (hopefully) Flac resource.
   MediaResourceIndex mSource;
 
   // Flac frame parser used to detect frames and extract side info.
-  nsAutoPtr<flac::FrameParser> mParser;
+  UniquePtr<flac::FrameParser> mParser;
 
   // Total duration of parsed frames.
   media::TimeUnit mParsedFramesDuration;
 
   // Sum of parsed frames' lengths in bytes.
   uint64_t mTotalFrameLen;
 
   // Audio track config info.
--- a/dom/media/flac/FlacFrameParser.cpp
+++ b/dom/media/flac/FlacFrameParser.cpp
@@ -139,17 +139,17 @@ Result<Ok, nsresult> FlacFrameParser::De
 
       mInfo.mMimeType = "audio/flac";
       mInfo.mRate = sampleRate;
       mInfo.mChannels = numChannels;
       mInfo.mBitDepth = bps;
       mInfo.mCodecSpecificConfig->AppendElements(blockDataStart, blockDataSize);
       auto duration = FramesToTimeUnit(mNumFrames, sampleRate);
       mInfo.mDuration = duration.IsValid() ? duration : media::TimeUnit::Zero();
-      mParser = new OpusParser;
+      mParser = MakeUnique<OpusParser>();
       break;
     }
     case FLAC_METADATA_TYPE_VORBIS_COMMENT: {
       if (!mParser) {
         // We must have seen a valid streaminfo first.
         return Err(NS_ERROR_FAILURE);
       }
       nsTArray<uint8_t> comments(blockDataSize + 8);
--- a/dom/media/flac/FlacFrameParser.h
+++ b/dom/media/flac/FlacFrameParser.h
@@ -4,17 +4,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef FLAC_FRAME_PARSER_H_
 #define FLAC_FRAME_PARSER_H_
 
 #include "mozilla/Maybe.h"
 #include "mozilla/Result.h"
-#include "nsAutoPtr.h"
 #include "MediaDecoder.h"  // For MetadataTags
 #include "MediaInfo.h"
 #include "MediaResource.h"
 
 namespace mozilla {
 
 #define FLAC_MAX_CHANNELS 8
 #define FLAC_MIN_BLOCKSIZE 16
@@ -60,14 +59,14 @@ class FlacFrameParser {
   uint32_t mMaxBlockSize;
   uint32_t mMinFrameSize;
   uint32_t mMaxFrameSize;
   uint64_t mNumFrames;
   bool mFullMetadata;
   uint32_t mPacketCount;
 
   // Used to decode the vorbis comment metadata.
-  nsAutoPtr<OpusParser> mParser;
+  UniquePtr<OpusParser> mParser;
 };
 
 }  // namespace mozilla
 
 #endif  // FLAC_FRAME_PARSER_H_
--- a/dom/media/gmp/GMPServiceParent.cpp
+++ b/dom/media/gmp/GMPServiceParent.cpp
@@ -22,17 +22,16 @@
 #  include "mozilla/SandboxInfo.h"
 #endif
 #include "mozilla/Services.h"
 #include "mozilla/StaticPrefs_media.h"
 #include "mozilla/SyncRunnable.h"
 #include "mozilla/SystemGroup.h"
 #include "mozilla/Unused.h"
 #include "nsAppDirectoryServiceDefs.h"
-#include "nsAutoPtr.h"
 #include "nsComponentManagerUtils.h"
 #include "nsDirectoryServiceDefs.h"
 #include "nsDirectoryServiceUtils.h"
 #include "nsHashKeys.h"
 #include "nsIFile.h"
 #include "nsIObserverService.h"
 #include "nsIXULRuntime.h"
 #include "nsNativeCharsetUtils.h"
@@ -1730,29 +1729,30 @@ bool GMPServiceParent::Create(Endpoint<P
     // Shutdown is initiated. There is no point creating a new actor.
     return false;
   }
 
   nsCOMPtr<nsIThread> gmpThread;
   nsresult rv = gmp->GetThread(getter_AddRefs(gmpThread));
   NS_ENSURE_SUCCESS(rv, false);
 
-  nsAutoPtr<GMPServiceParent> serviceParent(new GMPServiceParent(gmp));
+  UniquePtr<GMPServiceParent> serviceParent(new GMPServiceParent(gmp));
   bool ok;
-  rv = gmpThread->Dispatch(
-      new OpenPGMPServiceParent(serviceParent, std::move(aGMPService), &ok),
-      NS_DISPATCH_SYNC);
+  rv =
+      gmpThread->Dispatch(new OpenPGMPServiceParent(
+                              serviceParent.get(), std::move(aGMPService), &ok),
+                          NS_DISPATCH_SYNC);
 
   if (NS_WARN_IF(NS_FAILED(rv) || !ok)) {
     return false;
   }
 
   // Now that the service parent is set up, it will be destroyed by
   // ActorDestroy.
-  Unused << serviceParent.forget();
+  Unused << serviceParent.release();
 
   return true;
 }
 
 }  // namespace gmp
 }  // namespace mozilla
 
 #undef NS_DispatchToMainThread
--- a/dom/media/gmp/GMPTimerParent.cpp
+++ b/dom/media/gmp/GMPTimerParent.cpp
@@ -2,17 +2,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "GMPTimerParent.h"
 
 #include "GMPLog.h"
 #include "mozilla/Unused.h"
-#include "nsAutoPtr.h"
 #include "nsComponentManagerUtils.h"
 
 namespace mozilla {
 
 extern LogModule* GetGMPLog();
 
 #ifdef __CLASS__
 #  undef __CLASS__
@@ -31,28 +30,28 @@ mozilla::ipc::IPCResult GMPTimerParent::
 
   MOZ_ASSERT(mGMPEventTarget->IsOnCurrentThread());
 
   if (!mIsOpen) {
     return IPC_OK();
   }
 
   nsresult rv;
-  nsAutoPtr<Context> ctx(new Context());
+  UniquePtr<Context> ctx(new Context());
 
   rv = NS_NewTimerWithFuncCallback(
-      getter_AddRefs(ctx->mTimer), &GMPTimerParent::GMPTimerExpired, ctx,
+      getter_AddRefs(ctx->mTimer), &GMPTimerParent::GMPTimerExpired, ctx.get(),
       aTimeoutMs, nsITimer::TYPE_ONE_SHOT, "gmp::GMPTimerParent::RecvSetTimer",
       mGMPEventTarget);
   NS_ENSURE_SUCCESS(rv, IPC_OK());
 
   ctx->mId = aTimerId;
   ctx->mParent = this;
 
-  mTimers.PutEntry(ctx.forget());
+  mTimers.PutEntry(ctx.release());
 
   return IPC_OK();
 }
 
 void GMPTimerParent::Shutdown() {
   GMP_LOG_DEBUG("%s::%s: %p mIsOpen=%d", __CLASS__, __FUNCTION__, this,
                 mIsOpen);
 
@@ -73,20 +72,20 @@ void GMPTimerParent::ActorDestroy(ActorD
                 mIsOpen);
 
   Shutdown();
 }
 
 /* static */
 void GMPTimerParent::GMPTimerExpired(nsITimer* aTimer, void* aClosure) {
   MOZ_ASSERT(aClosure);
-  nsAutoPtr<Context> ctx(static_cast<Context*>(aClosure));
+  UniquePtr<Context> ctx(static_cast<Context*>(aClosure));
   MOZ_ASSERT(ctx->mParent);
   if (ctx->mParent) {
-    ctx->mParent->TimerExpired(ctx);
+    ctx->mParent->TimerExpired(ctx.get());
   }
 }
 
 void GMPTimerParent::TimerExpired(Context* aContext) {
   GMP_LOG_DEBUG("%s::%s: %p mIsOpen=%d", __CLASS__, __FUNCTION__, this,
                 mIsOpen);
   MOZ_ASSERT(mGMPEventTarget->IsOnCurrentThread());
 
--- a/dom/media/gmp/GMPVideoEncodedFrameImpl.h
+++ b/dom/media/gmp/GMPVideoEncodedFrameImpl.h
@@ -30,17 +30,16 @@
 
 #ifndef GMPVideoEncodedFrameImpl_h_
 #define GMPVideoEncodedFrameImpl_h_
 
 #include "gmp-errors.h"
 #include "gmp-video-frame.h"
 #include "gmp-video-frame-encoded.h"
 #include "mozilla/ipc/Shmem.h"
-#include "nsAutoPtr.h"
 
 namespace mozilla {
 class CryptoSample;
 
 namespace gmp {
 
 class GMPVideoHostImpl;
 class GMPVideoEncodedFrameData;
--- a/dom/media/gtest/TestMediaSpan.cpp
+++ b/dom/media/gtest/TestMediaSpan.cpp
@@ -4,17 +4,16 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include <gtest/gtest.h>
 #include <stdint.h>
 
 #include "MediaSpan.h"
 
 #include "mozilla/ArrayUtils.h"
-#include "nsAutoPtr.h"
 
 using namespace mozilla;
 
 already_AddRefed<MediaByteBuffer> makeBuffer(uint8_t aStart, uint8_t aEnd) {
   RefPtr<MediaByteBuffer> buffer(new MediaByteBuffer);
   for (uint8_t i = aStart; i <= aEnd; i++) {
     buffer->AppendElement(i);
   }
--- a/dom/media/mediasource/ContainerParser.cpp
+++ b/dom/media/mediasource/ContainerParser.cpp
@@ -18,17 +18,16 @@
 #include "nsMimeTypes.h"
 #ifdef MOZ_FMP4
 #  include "AtomType.h"
 #  include "BufferReader.h"
 #  include "Index.h"
 #  include "MP4Interval.h"
 #  include "ByteStream.h"
 #endif
-#include "nsAutoPtr.h"
 #include "SourceBufferResource.h"
 #include <algorithm>
 
 extern mozilla::LogModule* GetMediaSourceSamplesLog();
 
 #define MSE_DEBUG(arg, ...)                                            \
   DDMOZ_LOG(GetMediaSourceSamplesLog(), mozilla::LogLevel::Debug,      \
             "(%s)::%s: " arg, mType.OriginalString().Data(), __func__, \
@@ -522,18 +521,18 @@ class MP4ContainerParser : public Contai
     if (initSegment) {
       mResource = new SourceBufferResource();
       DDLINKCHILD("resource", mResource.get());
       mStream = new MP4Stream(mResource);
       // We use a timestampOffset of 0 for ContainerParser, and require
       // consumers of ParseStartAndEndTimestamps to add their timestamp offset
       // manually. This allows the ContainerParser to be shared across different
       // timestampOffsets.
-      mParser = new MoofParser(mStream, AsVariant(ParseAllTracks{}),
-                               /* aIsAudio = */ false);
+      mParser = MakeUnique<MoofParser>(mStream, AsVariant(ParseAllTracks{}),
+                                       /* aIsAudio = */ false);
       DDLINKCHILD("parser", mParser.get());
       mInitData = new MediaByteBuffer();
       mCompleteInitSegmentRange = MediaByteRange();
       mCompleteMediaHeaderRange = MediaByteRange();
       mCompleteMediaSegmentRange = MediaByteRange();
       mGlobalOffset = mTotalParsed;
     } else if (!mStream || !mParser) {
       mTotalParsed += aData.Length();
@@ -586,17 +585,17 @@ class MP4ContainerParser : public Contai
   }
 
   // Gaps of up to 35ms (marginally longer than a single frame at 30fps) are
   // considered to be sequential frames.
   int64_t GetRoundingError() override { return 35000; }
 
  private:
   RefPtr<MP4Stream> mStream;
-  nsAutoPtr<MoofParser> mParser;
+  UniquePtr<MoofParser> mParser;
 };
 #endif  // MOZ_FMP4
 
 #ifdef MOZ_FMP4
 DDLoggedTypeDeclNameAndBase(ADTSContainerParser, ContainerParser);
 
 class ADTSContainerParser
     : public ContainerParser,
@@ -732,33 +731,33 @@ class ADTSContainerParser
 
   // Audio shouldn't have gaps.
   // Especially when we generate the timestamps ourselves.
   int64_t GetRoundingError() override { return 0; }
 };
 #endif  // MOZ_FMP4
 
 /*static*/
-ContainerParser* ContainerParser::CreateForMIMEType(
+UniquePtr<ContainerParser> ContainerParser::CreateForMIMEType(
     const MediaContainerType& aType) {
   if (aType.Type() == MEDIAMIMETYPE(VIDEO_WEBM) ||
       aType.Type() == MEDIAMIMETYPE(AUDIO_WEBM)) {
-    return new WebMContainerParser(aType);
+    return MakeUnique<WebMContainerParser>(aType);
   }
 
 #ifdef MOZ_FMP4
   if (aType.Type() == MEDIAMIMETYPE(VIDEO_MP4) ||
       aType.Type() == MEDIAMIMETYPE(AUDIO_MP4)) {
-    return new MP4ContainerParser(aType);
+    return MakeUnique<MP4ContainerParser>(aType);
   }
   if (aType.Type() == MEDIAMIMETYPE("audio/aac")) {
-    return new ADTSContainerParser(aType);
+    return MakeUnique<ADTSContainerParser>(aType);
   }
 #endif
 
-  return new ContainerParser(aType);
+  return MakeUnique<ContainerParser>(aType);
 }
 
 #undef MSE_DEBUG
 #undef MSE_DEBUGV
 #undef MSE_DEBUGVEX
 
 }  // namespace mozilla
--- a/dom/media/mediasource/ContainerParser.h
+++ b/dom/media/mediasource/ContainerParser.h
@@ -3,16 +3,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_CONTAINERPARSER_H_
 #define MOZILLA_CONTAINERPARSER_H_
 
 #include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtr.h"
 #include "MediaSpan.h"
 #include "MediaContainerType.h"
 #include "MediaResource.h"
 #include "MediaResult.h"
 
 namespace mozilla {
 
 class MediaByteBuffer;
@@ -69,17 +70,18 @@ class ContainerParser : public DecoderDo
   MediaByteRange InitSegmentRange();
   // Returns the byte range of the first complete media segment header,
   // or an empty range if not complete.
   MediaByteRange MediaHeaderRange();
   // Returns the byte range of the first complete media segment or an empty
   // range if not complete.
   MediaByteRange MediaSegmentRange();
 
-  static ContainerParser* CreateForMIMEType(const MediaContainerType& aType);
+  static UniquePtr<ContainerParser> CreateForMIMEType(
+      const MediaContainerType& aType);
 
   const MediaContainerType& ContainerType() const { return mType; }
 
  protected:
   RefPtr<MediaByteBuffer> mInitData;
   RefPtr<SourceBufferResource> mResource;
   bool mHasInitData;
   uint64_t mTotalParsed;
--- a/dom/media/mediasource/TrackBuffersManager.h
+++ b/dom/media/mediasource/TrackBuffersManager.h
@@ -17,17 +17,16 @@
 #include "MediaContainerType.h"
 #include "MediaData.h"
 #include "MediaDataDemuxer.h"
 #include "MediaResult.h"
 #include "MediaSourceDecoder.h"
 #include "MediaSpan.h"
 #include "SourceBufferTask.h"
 #include "TimeUnits.h"
-#include "nsAutoPtr.h"
 #include "nsTArray.h"
 
 namespace mozilla {
 
 class AbstractThread;
 class ContainerParser;
 class MediaByteBuffer;
 class MediaRawData;
@@ -217,17 +216,17 @@ class TrackBuffersManager final
   MediaContainerType mType;
 
   // ContainerParser objects and methods.
   // Those are used to parse the incoming input buffer.
 
   // Recreate the ContainerParser and if aReuseInitData is true then
   // feed it with the previous init segment found.
   void RecreateParser(bool aReuseInitData);
-  nsAutoPtr<ContainerParser> mParser;
+  UniquePtr<ContainerParser> mParser;
 
   // Demuxer objects and methods.
   void AppendDataToCurrentInputBuffer(const MediaSpan& aData);
 
   RefPtr<MediaByteBuffer> mInitData;
   // Temporary input buffer to handle partial media segment header.
   // We store the current input buffer content into it should we need to
   // reinitialize the demuxer once we have some samples and a discontinuity is
--- a/dom/media/mediasource/gtest/TestContainerParser.cpp
+++ b/dom/media/mediasource/gtest/TestContainerParser.cpp
@@ -3,25 +3,24 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include <gtest/gtest.h>
 #include <stdint.h>
 
 #include "ContainerParser.h"
 #include "mozilla/ArrayUtils.h"
-#include "nsAutoPtr.h"
 
 using namespace mozilla;
 
 TEST(ContainerParser, MIMETypes)
 {
   const char* containerTypes[] = {"video/webm", "audio/webm", "video/mp4",
                                   "audio/mp4", "audio/aac"};
-  nsAutoPtr<ContainerParser> parser;
+  UniquePtr<ContainerParser> parser;
   for (size_t i = 0; i < ArrayLength(containerTypes); ++i) {
     Maybe<MediaContainerType> containerType =
         MakeMediaContainerType(containerTypes[i]);
     ASSERT_TRUE(containerType.isSome());
     parser = ContainerParser::CreateForMIMEType(*containerType);
     ASSERT_NE(parser, nullptr);
   }
 }
@@ -30,17 +29,17 @@ already_AddRefed<MediaByteBuffer> make_a
   const uint8_t test[] = {0xff, 0xf1, 0x50, 0x80, 0x03, 0x1f, 0xfc};
   RefPtr<MediaByteBuffer> buffer(new MediaByteBuffer);
   buffer->AppendElements(test, ArrayLength(test));
   return buffer.forget();
 }
 
 TEST(ContainerParser, ADTSHeader)
 {
-  nsAutoPtr<ContainerParser> parser;
+  UniquePtr<ContainerParser> parser;
   parser = ContainerParser::CreateForMIMEType(
       MediaContainerType(MEDIAMIMETYPE("audio/aac")));
   ASSERT_NE(parser, nullptr);
 
   // Audio data should have no gaps.
   EXPECT_EQ(parser->GetRoundingError(), 0);
 
   // Test a valid header.
@@ -96,17 +95,17 @@ TEST(ContainerParser, ADTSHeader)
             MediaByteRange(0, int64_t(header->Length())));
   // Media segment range should be empty here.
   EXPECT_EQ(parser->MediaHeaderRange(), MediaByteRange());
   EXPECT_EQ(parser->MediaSegmentRange(), MediaByteRange());
 }
 
 TEST(ContainerParser, ADTSBlankMedia)
 {
-  nsAutoPtr<ContainerParser> parser;
+  UniquePtr<ContainerParser> parser;
   parser = ContainerParser::CreateForMIMEType(
       MediaContainerType(MEDIAMIMETYPE("audio/aac")));
   ASSERT_NE(parser, nullptr);
 
   // Audio data should have no gaps.
   EXPECT_EQ(parser->GetRoundingError(), 0);
 
   // Test the header only.
--- a/dom/media/mp4/Index.cpp
+++ b/dom/media/mp4/Index.cpp
@@ -6,17 +6,16 @@
 
 #include <algorithm>
 #include <limits>
 
 #include "BufferReader.h"
 #include "mozilla/RefPtr.h"
 #include "MP4Interval.h"
 #include "MP4Metadata.h"
-#include "nsAutoPtr.h"
 #include "SinfParser.h"
 
 using namespace mozilla::media;
 
 namespace mozilla {
 
 class MOZ_STACK_CLASS RangeFinder {
  public:
@@ -413,17 +412,18 @@ Microseconds SampleIterator::GetNextKeyf
   }
   return -1;
 }
 
 Index::Index(const IndiceWrapper& aIndices, ByteStream* aSource,
              uint32_t aTrackId, bool aIsAudio)
     : mSource(aSource), mIsAudio(aIsAudio) {
   if (!aIndices.Length()) {
-    mMoofParser = new MoofParser(aSource, AsVariant(aTrackId), aIsAudio);
+    mMoofParser =
+        MakeUnique<MoofParser>(aSource, AsVariant(aTrackId), aIsAudio);
   } else {
     if (!mIndex.SetCapacity(aIndices.Length(), fallible)) {
       // OOM.
       return;
     }
     media::IntervalSet<int64_t> intervalTime;
     MediaByteRange intervalRange;
     bool haveSync = false;
--- a/dom/media/mp4/Index.h
+++ b/dom/media/mp4/Index.h
@@ -6,17 +6,16 @@
 #define INDEX_H_
 
 #include "ByteStream.h"
 #include "MediaData.h"
 #include "MediaResource.h"
 #include "MoofParser.h"
 #include "mozilla/Result.h"
 #include "MP4Interval.h"
-#include "nsAutoPtr.h"
 #include "nsISupportsImpl.h"
 #include "TimeUnits.h"
 
 namespace mozilla {
 class IndiceWrapper;
 struct Sample;
 struct CencSampleEncryptionInfoEntry;
 
@@ -106,29 +105,29 @@ class Index {
   void UpdateMoofIndex(const mozilla::MediaByteRangeSet& aByteRanges,
                        bool aCanEvict);
   void UpdateMoofIndex(const mozilla::MediaByteRangeSet& aByteRanges);
   Microseconds GetEndCompositionIfBuffered(
       const mozilla::MediaByteRangeSet& aByteRanges);
   mozilla::media::TimeIntervals ConvertByteRangesToTimeRanges(
       const mozilla::MediaByteRangeSet& aByteRanges);
   uint64_t GetEvictionOffset(Microseconds aTime);
-  bool IsFragmented() { return mMoofParser; }
+  bool IsFragmented() { return !!mMoofParser; }
 
   friend class SampleIterator;
 
  private:
   ~Index();
   void RegisterIterator(SampleIterator* aIterator);
   void UnregisterIterator(SampleIterator* aIterator);
 
   ByteStream* mSource;
   FallibleTArray<Sample> mIndex;
   FallibleTArray<MP4DataOffset> mDataOffset;
-  nsAutoPtr<MoofParser> mMoofParser;
+  UniquePtr<MoofParser> mMoofParser;
   nsTArray<SampleIterator*> mIterators;
 
   // ConvertByteRangesToTimeRanges cache
   mozilla::MediaByteRangeSet mLastCachedRanges;
   mozilla::media::TimeIntervals mLastBufferedRanges;
   bool mIsAudio;
 };
 }  // namespace mozilla
--- a/dom/media/mp4/MP4Demuxer.cpp
+++ b/dom/media/mp4/MP4Demuxer.cpp
@@ -17,17 +17,16 @@
 #include "MP4Decoder.h"
 #include "MP4Metadata.h"
 #include "MoofParser.h"
 #include "ResourceStream.h"
 #include "VPXDecoder.h"
 #include "mozilla/Span.h"
 #include "mozilla/StaticPrefs_media.h"
 #include "mozilla/Telemetry.h"
-#include "nsAutoPtr.h"
 #include "nsPrintfCString.h"
 
 extern mozilla::LazyLogModule gMediaDemuxerLog;
 mozilla::LogModule* GetDemuxerLog() { return gMediaDemuxerLog; }
 
 #define LOG(arg, ...)                                                 \
   DDMOZ_LOG(gMediaDemuxerLog, mozilla::LogLevel::Debug, "::%s: " arg, \
             __func__, ##__VA_ARGS__)
--- a/dom/media/ogg/OggCodecState.cpp
+++ b/dom/media/ogg/OggCodecState.cpp
@@ -26,31 +26,32 @@ namespace mozilla {
 extern LazyLogModule gMediaDecoderLog;
 #define LOG(type, msg) MOZ_LOG(gMediaDecoderLog, type, msg)
 
 using media::TimeUnit;
 
 /** Decoder base class for Ogg-encapsulated streams. */
 OggCodecState* OggCodecState::Create(ogg_page* aPage) {
   NS_ASSERTION(ogg_page_bos(aPage), "Only call on BOS page!");
-  nsAutoPtr<OggCodecState> codecState;
+  UniquePtr<OggCodecState> codecState;
   if (aPage->body_len > 6 && memcmp(aPage->body + 1, "theora", 6) == 0) {
-    codecState = new TheoraState(aPage);
+    codecState = MakeUnique<TheoraState>(aPage);
   } else if (aPage->body_len > 6 && memcmp(aPage->body + 1, "vorbis", 6) == 0) {
-    codecState = new VorbisState(aPage);
+    codecState = MakeUnique<VorbisState>(aPage);
   } else if (aPage->body_len > 8 && memcmp(aPage->body, "OpusHead", 8) == 0) {
-    codecState = new OpusState(aPage);
+    codecState = MakeUnique<OpusState>(aPage);
   } else if (aPage->body_len > 8 && memcmp(aPage->body, "fishead\0", 8) == 0) {
-    codecState = new SkeletonState(aPage);
+    codecState = MakeUnique<SkeletonState>(aPage);
   } else if (aPage->body_len > 5 && memcmp(aPage->body, "\177FLAC", 5) == 0) {
-    codecState = new FlacState(aPage);
+    codecState = MakeUnique<FlacState>(aPage);
   } else {
-    codecState = new OggCodecState(aPage, false);
+    // Can't use MakeUnique here, OggCodecState is protected.
+    codecState.reset(new OggCodecState(aPage, false));
   }
-  return codecState->OggCodecState::InternalInit() ? codecState.forget()
+  return codecState->OggCodecState::InternalInit() ? codecState.release()
                                                    : nullptr;
 }
 
 OggCodecState::OggCodecState(ogg_page* aBosPage, bool aActive)
     : mPacketCount(0),
       mSerial(ogg_page_serialno(aBosPage)),
       mActive(aActive),
       mDoneReadingHeaders(!aActive) {
@@ -935,17 +936,17 @@ bool OpusState::Init(void) {
 
   return error == OPUS_OK;
 }
 
 bool OpusState::DecodeHeader(OggPacketPtr aPacket) {
   switch (mPacketCount++) {
     // Parse the id header.
     case 0:
-      mParser = new OpusParser;
+      mParser = MakeUnique<OpusParser>();
       if (!mParser->DecodeHeader(aPacket->packet, aPacket->bytes)) {
         return false;
       }
       mHeaders.Append(std::move(aPacket));
       break;
 
     // Parse the metadata header.
     case 1:
@@ -1416,17 +1417,17 @@ bool SkeletonState::DecodeIndex(ogg_pack
     // field is possibly malicious. Don't try decoding this index, we may run
     // out of memory.
     LOG(LogLevel::Debug, ("Possibly malicious number of key points reported "
                           "(%" PRId64 ") in index packet for stream %u.",
                           numKeyPoints, serialno));
     return (mActive = false);
   }
 
-  nsAutoPtr<nsKeyFrameIndex> keyPoints(new nsKeyFrameIndex(startTime, endTime));
+  UniquePtr<nsKeyFrameIndex> keyPoints(new nsKeyFrameIndex(startTime, endTime));
 
   p = aPacket->packet + INDEX_KEYPOINT_OFFSET;
   const unsigned char* limit = aPacket->packet + aPacket->bytes;
   int64_t numKeyPointsRead = 0;
   CheckedInt64 offset = 0;
   CheckedInt64 time = 0;
   while (p < limit && numKeyPointsRead < numKeyPoints) {
     int64_t delta = 0;
@@ -1446,17 +1447,17 @@ bool SkeletonState::DecodeIndex(ogg_pack
       return (mActive = false);
     }
     keyPoints->Add(offset.value(), timeUsecs.value());
     numKeyPointsRead++;
   }
 
   int32_t keyPointsRead = keyPoints->Length();
   if (keyPointsRead > 0) {
-    mIndex.Put(serialno, keyPoints.forget());
+    mIndex.Put(serialno, keyPoints.release());
   }
 
   LOG(LogLevel::Debug, ("Loaded %d keypoints for Skeleton on stream %u",
                         keyPointsRead, serialno));
   return true;
 }
 
 nsresult SkeletonState::IndexedSeekTargetForTrack(uint32_t aSerialno,
@@ -1564,17 +1565,17 @@ bool SkeletonState::DecodeFisbone(ogg_pa
       CheckedUint32(FISBONE_MSG_FIELDS_OFFSET) + offsetMsgField;
   if (!checked_fields_pos.isValid() ||
       aPacket->bytes < static_cast<int64_t>(checked_fields_pos.value())) {
     return false;
   }
   int64_t msgLength = aPacket->bytes - checked_fields_pos.value();
   char* msgProbe = (char*)aPacket->packet + checked_fields_pos.value();
   char* msgHead = msgProbe;
-  nsAutoPtr<MessageField> field(new MessageField());
+  UniquePtr<MessageField> field(new MessageField());
 
   const static FieldPatternType kFieldTypeMaps[] = {
       {"Content-Type:", eContentType},
       {"Role:", eRole},
       {"Name:", eName},
       {"Language:", eLanguage},
       {"Title:", eTitle},
       {"Display-hint:", eDisplayHint},
@@ -1621,17 +1622,17 @@ bool SkeletonState::DecodeFisbone(ogg_pa
     msgProbe++;
   }
 
   auto entry = mMsgFieldStore.LookupForAdd(serialno);
   if (entry) {
     // mMsgFieldStore has an entry for serialno already.
     return false;
   }
-  entry.OrInsert([&field]() { return field.forget(); });
+  entry.OrInsert([&field]() { return field.release(); });
   return true;
 }
 
 bool SkeletonState::DecodeHeader(OggPacketPtr aPacket) {
   if (IsSkeletonBOS(aPacket.get())) {
     uint16_t verMajor = LittleEndian::readUint16(aPacket->packet +
                                                  SKELETON_VERSION_MAJOR_OFFSET);
     uint16_t verMinor = LittleEndian::readUint16(aPacket->packet +
--- a/dom/media/ogg/OggCodecState.h
+++ b/dom/media/ogg/OggCodecState.h
@@ -406,17 +406,17 @@ class OpusState : public OggCodecState {
 
   // Returns the end time that a granulepos represents.
   static int64_t Time(int aPreSkip, int64_t aGranulepos);
 
   // Construct and return a table of tags from the metadata header.
   UniquePtr<MetadataTags> GetTags() override;
 
  private:
-  nsAutoPtr<OpusParser> mParser;
+  UniquePtr<OpusParser> mParser;
   OpusMSDecoder* mDecoder;
 
   // Granule position (end sample) of the last decoded Opus packet. This is
   // used to calculate the amount we should trim from the last packet.
   int64_t mPrevPacketGranulepos;
 
   // Reconstructs the granulepos of Opus packets stored in the
   // mUnstamped array. mUnstamped must be filled with consecutive packets from
--- a/dom/media/ogg/OggDemuxer.cpp
+++ b/dom/media/ogg/OggDemuxer.cpp
@@ -577,18 +577,17 @@ bool OggDemuxer::ReadOggChain(const medi
     return false;
   }
 
   int serial = ogg_page_serialno(&page);
   if (mCodecStore.Contains(serial)) {
     return false;
   }
 
-  nsAutoPtr<OggCodecState> codecState;
-  codecState = OggCodecState::Create(&page);
+  UniquePtr<OggCodecState> codecState(OggCodecState::Create(&page));
   if (!codecState) {
     return false;
   }
 
   if (mVorbisState && (codecState->GetType() == OggCodecState::TYPE_VORBIS)) {
     newVorbisState = static_cast<VorbisState*>(codecState.get());
   } else if (mOpusState &&
              (codecState->GetType() == OggCodecState::TYPE_OPUS)) {
@@ -597,17 +596,17 @@ bool OggDemuxer::ReadOggChain(const medi
              (codecState->GetType() == OggCodecState::TYPE_FLAC)) {
     newFlacState = static_cast<FlacState*>(codecState.get());
   } else {
     return false;
   }
 
   OggCodecState* state;
 
-  mCodecStore.Add(serial, codecState.forget());
+  mCodecStore.Add(serial, codecState.release());
   state = mCodecStore.Get(serial);
 
   NS_ENSURE_TRUE(state != nullptr, false);
 
   if (NS_FAILED(state->PageIn(&page))) {
     return false;
   }
 
@@ -665,17 +664,17 @@ bool OggDemuxer::ReadOggChain(const medi
 
   if (chained) {
     SetChained();
     mInfo.mMediaSeekable = false;
     mDecodedAudioDuration += aLastEndTime;
     if (mTimedMetadataEvent) {
       mTimedMetadataEvent->Notify(
           TimedMetadata(mDecodedAudioDuration, std::move(tags),
-                        nsAutoPtr<MediaInfo>(new MediaInfo(mInfo))));
+                        UniquePtr<MediaInfo>(new MediaInfo(mInfo))));
     }
     // Setup a new TrackInfo so that the MediaFormatReader will flush the
     // current decoder.
     mSharedAudioTrackInfo =
         new TrackInfoSharedPtr(mInfo.mAudio, ++sStreamSourceID);
     return true;
   }
 
--- a/dom/media/platforms/agnostic/OpusDecoder.cpp
+++ b/dom/media/platforms/agnostic/OpusDecoder.cpp
@@ -135,17 +135,17 @@ RefPtr<MediaDataDecoder::InitPromise> Op
 
 nsresult OpusDataDecoder::DecodeHeader(const unsigned char* aData,
                                        size_t aLength) {
   MOZ_ASSERT(!mOpusParser);
   MOZ_ASSERT(!mOpusDecoder);
   MOZ_ASSERT(!mDecodedHeader);
   mDecodedHeader = true;
 
-  mOpusParser = new OpusParser;
+  mOpusParser = MakeUnique<OpusParser>();
   if (!mOpusParser->DecodeHeader(const_cast<unsigned char*>(aData), aLength)) {
     return NS_ERROR_FAILURE;
   }
   int channels = mOpusParser->mChannels;
 
   mMappingTable.SetLength(channels);
   AudioConfig::ChannelLayout vorbisLayout(
       channels, VorbisDataDecoder::VorbisLayout(channels));
--- a/dom/media/platforms/agnostic/OpusDecoder.h
+++ b/dom/media/platforms/agnostic/OpusDecoder.h
@@ -4,17 +4,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #if !defined(OpusDecoder_h_)
 #  define OpusDecoder_h_
 
 #  include "PlatformDecoderModule.h"
 
 #  include "mozilla/Maybe.h"
-#  include "nsAutoPtr.h"
 #  include "nsTArray.h"
 
 struct OpusMSDecoder;
 
 namespace mozilla {
 
 class OpusParser;
 
@@ -49,17 +48,17 @@ class OpusDataDecoder : public MediaData
   nsresult DecodeHeader(const unsigned char* aData, size_t aLength);
 
   RefPtr<DecodePromise> ProcessDecode(MediaRawData* aSample);
 
   const AudioInfo& mInfo;
   const RefPtr<TaskQueue> mTaskQueue;
 
   // Opus decoder state
-  nsAutoPtr<OpusParser> mOpusParser;
+  UniquePtr<OpusParser> mOpusParser;
   OpusMSDecoder* mOpusDecoder;
 
   uint16_t mSkip;  // Samples left to trim before playback.
   bool mDecodedHeader;
 
   // Opus padding should only be discarded on the final packet.  Once this
   // is set to true, if the reader attempts to decode any further packets it
   // will raise an error so we can indicate that the file is invalid.
--- a/dom/media/platforms/apple/AppleVTDecoder.cpp
+++ b/dom/media/platforms/apple/AppleVTDecoder.cpp
@@ -10,17 +10,16 @@
 #include <IOSurface/IOSurface.h>
 
 #include "AppleDecoderModule.h"
 #include "AppleUtils.h"
 #include "MacIOSurfaceImage.h"
 #include "MediaData.h"
 #include "mozilla/ArrayUtils.h"
 #include "H264.h"
-#include "nsAutoPtr.h"
 #include "nsThreadUtils.h"
 #include "mozilla/Logging.h"
 #include "VideoUtils.h"
 #include "gfxPlatform.h"
 #include "MacIOSurfaceImage.h"
 
 #define LOG(...) DDMOZ_LOG(sPDMLog, mozilla::LogLevel::Debug, __VA_ARGS__)
 #define LOGEX(_this, ...) \
@@ -271,17 +270,17 @@ static void PlatformCallback(void* decom
                              VTDecodeInfoFlags flags, CVImageBufferRef image,
                              CMTime presentationTimeStamp,
                              CMTime presentationDuration) {
   AppleVTDecoder* decoder =
       static_cast<AppleVTDecoder*>(decompressionOutputRefCon);
   LOGEX(decoder, "AppleVideoDecoder %s status %d flags %d", __func__,
         static_cast<int>(status), flags);
 
-  nsAutoPtr<AppleVTDecoder::AppleFrameRef> frameRef(
+  UniquePtr<AppleVTDecoder::AppleFrameRef> frameRef(
       static_cast<AppleVTDecoder::AppleFrameRef*>(sourceFrameRefCon));
 
   // Validate our arguments.
   if (status != noErr || !image) {
     NS_WARNING("VideoToolbox decoder returned no data");
     image = nullptr;
   } else if (flags & kVTDecodeInfo_FrameDropped) {
     NS_WARNING("  ...frame tagged as dropped...");
--- a/dom/media/platforms/omx/OmxPromiseLayer.cpp
+++ b/dom/media/platforms/omx/OmxPromiseLayer.cpp
@@ -20,18 +20,18 @@
           ("OmxPromiseLayer(%p)::%s: " arg, this, __func__, ##__VA_ARGS__))
 
 namespace mozilla {
 
 OmxPromiseLayer::OmxPromiseLayer(TaskQueue* aTaskQueue,
                                  OmxDataDecoder* aDataDecoder,
                                  layers::ImageContainer* aImageContainer)
     : mTaskQueue(aTaskQueue) {
-  mPlatformLayer =
-      OmxPlatformLayer::Create(aDataDecoder, this, aTaskQueue, aImageContainer);
+  mPlatformLayer.reset(OmxPlatformLayer::Create(aDataDecoder, this, aTaskQueue,
+                                                aImageContainer));
   MOZ_ASSERT(!!mPlatformLayer);
 }
 
 RefPtr<OmxPromiseLayer::OmxCommandPromise> OmxPromiseLayer::Init(
     const TrackInfo* aInfo) {
   MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn());
 
   OMX_ERRORTYPE err = mPlatformLayer->InitOmxToStateLoaded(aInfo);
--- a/dom/media/platforms/omx/OmxPromiseLayer.h
+++ b/dom/media/platforms/omx/OmxPromiseLayer.h
@@ -4,17 +4,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #if !defined(OmxPromiseLayer_h_)
 #  define OmxPromiseLayer_h_
 
 #  include "mozilla/MozPromise.h"
 #  include "mozilla/TaskQueue.h"
-#  include "nsAutoPtr.h"
 
 #  include "OMX_Core.h"
 #  include "OMX_Types.h"
 
 namespace mozilla {
 
 namespace layers {
 class ImageContainer;
@@ -217,17 +216,17 @@ class OmxPromiseLayer {
   MozPromiseHolder<OmxCommandPromise> mPortDisablePromise;
 
   MozPromiseHolder<OmxCommandPromise> mPortEnablePromise;
 
   MozPromiseHolder<OmxCommandPromise> mFlushPromise;
 
   nsTArray<FlushCommand> mFlushCommands;
 
-  nsAutoPtr<OmxPlatformLayer> mPlatformLayer;
+  UniquePtr<OmxPlatformLayer> mPlatformLayer;
 
  private:
   // Elements are added to holders when FillBuffer() or FillBuffer(). And
   // removing element when the promise is resolved. Buffers in these lists
   // should NOT be used by other component; for example, output it to audio
   // output. These lists should be empty when engine is about to shutdown.
   //
   // Note:
--- a/dom/media/platforms/wmf/DXVA2Manager.cpp
+++ b/dom/media/platforms/wmf/DXVA2Manager.cpp
@@ -508,20 +508,20 @@ DXVA2Manager* DXVA2Manager::CreateD3D9DX
   // videos we use DXVA with at any one time.
   uint32_t dxvaLimit = StaticPrefs::media_wmf_dxva_max_videos();
 
   if (sDXVAVideosCount == dxvaLimit) {
     aFailureReason.AssignLiteral("Too many DXVA videos playing");
     return nullptr;
   }
 
-  nsAutoPtr<D3D9DXVA2Manager> d3d9Manager(new D3D9DXVA2Manager());
+  UniquePtr<D3D9DXVA2Manager> d3d9Manager(new D3D9DXVA2Manager());
   hr = d3d9Manager->Init(aKnowsCompositor, aFailureReason);
   if (SUCCEEDED(hr)) {
-    return d3d9Manager.forget();
+    return d3d9Manager.release();
   }
 
   // No hardware accelerated video decoding. :(
   return nullptr;
 }
 
 bool D3D9DXVA2Manager::CanCreateDecoder(const DXVA2_VideoDesc& aDesc,
                                         const float aFramerate) const {
@@ -1226,21 +1226,21 @@ DXVA2Manager* DXVA2Manager::CreateD3D11D
   // videos we use DXVA with at any one time.
   uint32_t dxvaLimit = StaticPrefs::media_wmf_dxva_max_videos();
 
   if (sDXVAVideosCount == dxvaLimit) {
     aFailureReason.AssignLiteral("Too many DXVA videos playing");
     return nullptr;
   }
 
-  nsAutoPtr<D3D11DXVA2Manager> manager(new D3D11DXVA2Manager());
+  UniquePtr<D3D11DXVA2Manager> manager(new D3D11DXVA2Manager());
   HRESULT hr = manager->Init(aKnowsCompositor, aFailureReason, aDevice);
   NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
 
-  return manager.forget();
+  return manager.release();
 }
 
 DXVA2Manager::DXVA2Manager() : mLock("DXVA2Manager") {
   if (NS_IsMainThread()) {
     ++sDXVAVideosCount;
   }
 }
 
--- a/dom/media/platforms/wmf/DXVA2Manager.h
+++ b/dom/media/platforms/wmf/DXVA2Manager.h
@@ -4,17 +4,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #if !defined(DXVA2Manager_h_)
 #  define DXVA2Manager_h_
 
 #  include "MediaInfo.h"
 #  include "WMF.h"
 #  include "mozilla/Mutex.h"
-#  include "nsAutoPtr.h"
 #  include "mozilla/gfx/Rect.h"
 #  include "d3d11.h"
 
 namespace mozilla {
 
 namespace layers {
 class Image;
 class ImageContainer;
--- a/dom/media/platforms/wmf/WMFDecoderModule.cpp
+++ b/dom/media/platforms/wmf/WMFDecoderModule.cpp
@@ -19,17 +19,16 @@
 #include "WMFVideoMFTManager.h"
 #include "mozilla/DebugOnly.h"
 #include "mozilla/Maybe.h"
 #include "mozilla/StaticMutex.h"
 #include "mozilla/StaticPrefs_media.h"
 #include "mozilla/WindowsVersion.h"
 #include "mozilla/gfx/gfxVars.h"
 #include "mozilla/mscom/EnsureMTA.h"
-#include "nsAutoPtr.h"
 #include "nsComponentManagerUtils.h"
 #include "nsIXULRuntime.h"
 #include "nsServiceManagerUtils.h"
 #include "nsWindowsHelpers.h"
 #include "prsystem.h"
 
 extern const GUID CLSID_WebmMfVpxDec;
 
@@ -118,45 +117,45 @@ already_AddRefed<MediaDataDecoder> WMFDe
   // Temporary - forces use of VPXDecoder when alpha is present.
   // Bug 1263836 will handle alpha scenario once implemented. It will shift
   // the check for alpha to PDMFactory but not itself remove the need for a
   // check.
   if (aParams.VideoConfig().HasAlpha()) {
     return nullptr;
   }
 
-  nsAutoPtr<WMFVideoMFTManager> manager(new WMFVideoMFTManager(
+  UniquePtr<WMFVideoMFTManager> manager(new WMFVideoMFTManager(
       aParams.VideoConfig(), aParams.mKnowsCompositor, aParams.mImageContainer,
       aParams.mRate.mValue, aParams.mOptions, sDXVAEnabled));
 
   MediaResult result = manager->Init();
   if (NS_FAILED(result)) {
     if (aParams.mError) {
       *aParams.mError = result;
     }
     return nullptr;
   }
 
   RefPtr<MediaDataDecoder> decoder =
-      new WMFMediaDataDecoder(manager.forget(), aParams.mTaskQueue);
+      new WMFMediaDataDecoder(manager.release(), aParams.mTaskQueue);
 
   return decoder.forget();
 }
 
 already_AddRefed<MediaDataDecoder> WMFDecoderModule::CreateAudioDecoder(
     const CreateDecoderParams& aParams) {
-  nsAutoPtr<WMFAudioMFTManager> manager(
+  UniquePtr<WMFAudioMFTManager> manager(
       new WMFAudioMFTManager(aParams.AudioConfig()));
 
   if (!manager->Init()) {
     return nullptr;
   }
 
   RefPtr<MediaDataDecoder> decoder =
-      new WMFMediaDataDecoder(manager.forget(), aParams.mTaskQueue);
+      new WMFMediaDataDecoder(manager.release(), aParams.mTaskQueue);
   return decoder.forget();
 }
 
 template <const GUID& aGuid>
 static bool CanCreateWMFDecoder() {
   static StaticMutex sMutex;
   StaticMutexAutoLock lock(sMutex);
   static Maybe<bool> result;
--- a/dom/media/platforms/wmf/WMFMediaDataDecoder.h
+++ b/dom/media/platforms/wmf/WMFMediaDataDecoder.h
@@ -6,17 +6,16 @@
 
 #if !defined(WMFMediaDataDecoder_h_)
 #  define WMFMediaDataDecoder_h_
 
 #  include "MFTDecoder.h"
 #  include "PlatformDecoderModule.h"
 #  include "WMF.h"
 #  include "mozilla/RefPtr.h"
-#  include "nsAutoPtr.h"
 
 namespace mozilla {
 
 // Encapsulates the initialization of the MFTDecoder appropriate for decoding
 // a given stream, and the process of converting the IMFSample produced
 // by the MFT into a MediaData object.
 class MFTManager {
  public:
@@ -132,17 +131,17 @@ class WMFMediaDataDecoder
   // Called on the task queue. Orders the MFT to drain, and then extracts
   // all available output.
   RefPtr<DecodePromise> ProcessDrain();
 
   RefPtr<ShutdownPromise> ProcessShutdown();
 
   const RefPtr<TaskQueue> mTaskQueue;
 
-  nsAutoPtr<MFTManager> mMFTManager;
+  UniquePtr<MFTManager> mMFTManager;
 
   // The last offset into the media resource that was passed into Input().
   // This is used to approximate the decoder's position in the media resource.
   int64_t mLastStreamOffset;
   Maybe<media::TimeUnit> mLastTime;
   media::TimeUnit mLastDuration;
   int64_t mSamplesCount = 0;
 
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.cpp
@@ -97,31 +97,31 @@ static bool IsWin7H264Decoder4KCapable()
   }
   // Can't get file version -> Assume it's the old DLL.
   return false;
 }
 
 template <class T>
 class DeleteObjectTask : public Runnable {
  public:
-  explicit DeleteObjectTask(nsAutoPtr<T>& aObject)
-      : Runnable("VideoUtils::DeleteObjectTask"), mObject(aObject) {}
+  explicit DeleteObjectTask(UniquePtr<T>&& aObject)
+      : Runnable("VideoUtils::DeleteObjectTask"), mObject(std::move(aObject)) {}
   NS_IMETHOD Run() override {
     NS_ASSERTION(NS_IsMainThread(), "Must be on main thread.");
     mObject = nullptr;
     return NS_OK;
   }
 
  private:
-  nsAutoPtr<T> mObject;
+  UniquePtr<T> mObject;
 };
 
 template <class T>
-void DeleteOnMainThread(nsAutoPtr<T>& aObject) {
-  nsCOMPtr<nsIRunnable> r = new DeleteObjectTask<T>(aObject);
+void DeleteOnMainThread(UniquePtr<T>&& aObject) {
+  nsCOMPtr<nsIRunnable> r = new DeleteObjectTask<T>(std::move(aObject));
   SystemGroup::Dispatch(TaskCategory::Other, r.forget());
 }
 
 LayersBackend GetCompositorBackendType(
     layers::KnowsCompositor* aKnowsCompositor) {
   if (aKnowsCompositor) {
     return aKnowsCompositor->GetCompositorBackendType();
   }
@@ -170,17 +170,17 @@ WMFVideoMFTManager::WMFVideoMFTManager(
     mDecodedImageSize.height += 16 - (mDecodedImageSize.height % 16);
   }
 }
 
 WMFVideoMFTManager::~WMFVideoMFTManager() {
   MOZ_COUNT_DTOR(WMFVideoMFTManager);
   // Ensure DXVA/D3D9 related objects are released on the main thread.
   if (mDXVA2Manager) {
-    DeleteOnMainThread(mDXVA2Manager);
+    DeleteOnMainThread(std::move(mDXVA2Manager));
   }
 }
 
 const GUID& WMFVideoMFTManager::GetMFTGUID() {
   MOZ_ASSERT(mStreamType != Unknown);
   switch (mStreamType) {
     case H264:
       return CLSID_CMSH264DecoderMFT;
@@ -427,41 +427,41 @@ class CreateDXVAManagerEvent : public Ru
     nsCString secondFailureReason;
     if (mBackend == LayersBackend::LAYERS_D3D11 &&
         StaticPrefs::media_wmf_dxva_d3d11_enabled() && IsWin8OrLater()) {
       const nsCString& blacklistedDLL = FindD3D11BlacklistedDLL();
       if (!deblacklistingForTelemetry && !blacklistedDLL.IsEmpty()) {
         failureReason->AppendPrintf("D3D11 blacklisted with DLL %s",
                                     blacklistedDLL.get());
       } else {
-        mDXVA2Manager =
-            DXVA2Manager::CreateD3D11DXVA(mKnowsCompositor, *failureReason);
+        mDXVA2Manager.reset(
+            DXVA2Manager::CreateD3D11DXVA(mKnowsCompositor, *failureReason));
         if (mDXVA2Manager) {
           return NS_OK;
         }
       }
       // Try again with d3d9, but record the failure reason
       // into a new var to avoid overwriting the d3d11 failure.
       failureReason = &secondFailureReason;
       mFailureReason.AppendLiteral("; ");
     }
 
     const nsCString& blacklistedDLL = FindD3D9BlacklistedDLL();
     if (!deblacklistingForTelemetry && !blacklistedDLL.IsEmpty()) {
       mFailureReason.AppendPrintf("D3D9 blacklisted with DLL %s",
                                   blacklistedDLL.get());
     } else {
-      mDXVA2Manager =
-          DXVA2Manager::CreateD3D9DXVA(mKnowsCompositor, *failureReason);
+      mDXVA2Manager.reset(
+          DXVA2Manager::CreateD3D9DXVA(mKnowsCompositor, *failureReason));
       // Make sure we include the messages from both attempts (if applicable).
       mFailureReason.Append(secondFailureReason);
     }
     return NS_OK;
   }
-  nsAutoPtr<DXVA2Manager> mDXVA2Manager;
+  UniquePtr<DXVA2Manager> mDXVA2Manager;
   layers::LayersBackend mBackend;
   layers::KnowsCompositor* mKnowsCompositor;
   nsACString& mFailureReason;
 };
 
 bool WMFVideoMFTManager::InitializeDXVA() {
   // If we use DXVA but aren't running with a D3D layer manager then the
   // readback of decoded video frames from GPU to CPU memory grinds painting
@@ -483,17 +483,17 @@ bool WMFVideoMFTManager::InitializeDXVA(
 
   if (NS_IsMainThread()) {
     event->Run();
   } else {
     // This logic needs to run on the main thread
     mozilla::SyncRunnable::DispatchToThread(
         SystemGroup::EventTargetFor(mozilla::TaskCategory::Other), event);
   }
-  mDXVA2Manager = event->mDXVA2Manager;
+  mDXVA2Manager = std::move(event->mDXVA2Manager);
 
   return mDXVA2Manager != nullptr;
 }
 
 MediaResult WMFVideoMFTManager::ValidateVideoInfo() {
   if (mStreamType != H264 ||
       StaticPrefs::media_wmf_allow_unsupported_resolutions()) {
     return NS_OK;
@@ -598,17 +598,17 @@ MediaResult WMFVideoMFTManager::InitInte
     }
   }
 
   if (!mUseHwAccel) {
     if (mDXVA2Manager) {
       // Either mDXVAEnabled was set to false prior the second call to
       // InitInternal() due to CanUseDXVA() returning false, or
       // MFT_MESSAGE_SET_D3D_MANAGER failed
-      DeleteOnMainThread(mDXVA2Manager);
+      DeleteOnMainThread(std::move(mDXVA2Manager));
     }
     if (mStreamType == VP9 || mStreamType == VP8) {
       return MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
                          RESULT_DETAIL("Use VP8/9 MFT only if HW acceleration "
                                        "is available."));
     }
     Telemetry::Accumulate(Telemetry::MEDIA_DECODER_BACKEND_USED,
                           uint32_t(media::MediaDecoderBackend::WMFSoftware));
@@ -793,17 +793,17 @@ bool WMFVideoMFTManager::CanUseDXVA(IMFM
   // SupportsConfig only checks for valid h264 decoders currently.
   if (mStreamType != H264) {
     return true;
   }
 
   // The supports config check must be done on the main thread since we have
   // a crash guard protecting it.
   RefPtr<SupportsConfigEvent> event =
-      new SupportsConfigEvent(mDXVA2Manager, aType, aFramerate);
+      new SupportsConfigEvent(mDXVA2Manager.get(), aType, aFramerate);
 
   if (NS_IsMainThread()) {
     event->Run();
   } else {
     // This logic needs to run on the main thread
     mozilla::SyncRunnable::DispatchToThread(
         SystemGroup::EventTargetFor(mozilla::TaskCategory::Other), event);
   }
@@ -1120,17 +1120,17 @@ WMFVideoMFTManager::Output(int64_t aStre
     mGotValidOutputAfterNullOutput = true;
   }
 
   return S_OK;
 }
 
 void WMFVideoMFTManager::Shutdown() {
   mDecoder = nullptr;
-  DeleteOnMainThread(mDXVA2Manager);
+  DeleteOnMainThread(std::move(mDXVA2Manager));
 }
 
 bool WMFVideoMFTManager::IsHardwareAccelerated(
     nsACString& aFailureReason) const {
   aFailureReason = mDXVAFailureReason;
   return mDecoder && mUseHwAccel;
 }
 
--- a/dom/media/platforms/wmf/WMFVideoMFTManager.h
+++ b/dom/media/platforms/wmf/WMFVideoMFTManager.h
@@ -8,17 +8,16 @@
 #  define WMFVideoMFTManager_h_
 
 #  include "MFTDecoder.h"
 #  include "MediaResult.h"
 #  include "WMF.h"
 #  include "WMFMediaDataDecoder.h"
 #  include "mozilla/Atomics.h"
 #  include "mozilla/RefPtr.h"
-#  include "nsAutoPtr.h"
 #  include "mozilla/gfx/Rect.h"
 
 namespace mozilla {
 
 class DXVA2Manager;
 
 class WMFVideoMFTManager : public MFTManager {
  public:
@@ -71,17 +70,17 @@ class WMFVideoMFTManager : public MFTMan
   const gfx::IntSize mImageSize;
   gfx::IntSize mDecodedImageSize;
   uint32_t mVideoStride;
   Maybe<gfx::YUVColorSpace> mColorSpace;
   gfx::ColorRange mColorRange;
 
   RefPtr<layers::ImageContainer> mImageContainer;
   RefPtr<layers::KnowsCompositor> mKnowsCompositor;
-  nsAutoPtr<DXVA2Manager> mDXVA2Manager;
+  UniquePtr<DXVA2Manager> mDXVA2Manager;
 
   media::TimeUnit mLastDuration;
 
   bool mDXVAEnabled;
   bool mUseHwAccel;
 
   nsCString mDXVAFailureReason;
 
--- a/dom/media/webaudio/AudioNodeTrack.cpp
+++ b/dom/media/webaudio/AudioNodeTrack.cpp
@@ -226,19 +226,19 @@ void AudioNodeTrack::SetReverb(WebCore::
    public:
     Message(AudioNodeTrack* aTrack, WebCore::Reverb* aReverb,
             uint32_t aImpulseChannelCount)
         : ControlMessage(aTrack),
           mReverb(aReverb),
           mImpulseChanelCount(aImpulseChannelCount) {}
     void Run() override {
       static_cast<AudioNodeTrack*>(mTrack)->Engine()->SetReverb(
-          mReverb.forget(), mImpulseChanelCount);
+          mReverb.release(), mImpulseChanelCount);
     }
-    nsAutoPtr<WebCore::Reverb> mReverb;
+    UniquePtr<WebCore::Reverb> mReverb;
     uint32_t mImpulseChanelCount;
   };
 
   GraphImpl()->AppendMessage(
       MakeUnique<Message>(this, aReverb, aImpulseChannelCount));
 }
 
 void AudioNodeTrack::SetRawArrayData(nsTArray<float>& aData) {
--- a/dom/media/webaudio/AudioNodeTrack.h
+++ b/dom/media/webaudio/AudioNodeTrack.h
@@ -3,17 +3,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MOZILLA_AUDIONODETRACK_H_
 #define MOZILLA_AUDIONODETRACK_H_
 
 #include "MediaTrackGraph.h"
 #include "mozilla/dom/AudioNodeBinding.h"
-#include "nsAutoPtr.h"
 #include "AlignedTArray.h"
 #include "AudioBlock.h"
 #include "AudioSegment.h"
 
 namespace WebCore {
 class Reverb;
 }  // namespace WebCore
 
@@ -147,17 +146,17 @@ class AudioNodeTrack : public ProcessedM
 
   const OutputChunks& LastChunks() const { return mLastChunks; }
   bool MainThreadNeedsUpdates() const override {
     return ((mFlags & NEED_MAIN_THREAD_ENDED) && mEnded) ||
            (mFlags & NEED_MAIN_THREAD_CURRENT_TIME);
   }
 
   // Any thread
-  AudioNodeEngine* Engine() { return mEngine; }
+  AudioNodeEngine* Engine() { return mEngine.get(); }
 
   size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override;
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override;
 
   void SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf,
                                      AudioNodeSizes& aUsage) const;
 
   /*
@@ -201,17 +200,17 @@ class AudioNodeTrack : public ProcessedM
                          DownmixBufferType& aDownmixBuffer);
 
   uint32_t ComputedNumberOfChannels(uint32_t aInputChannelCount);
   void ObtainInputBlock(AudioBlock& aTmpChunk, uint32_t aPortIndex);
   void IncrementActiveInputCount();
   void DecrementActiveInputCount();
 
   // The engine that will generate output for this node.
-  const nsAutoPtr<AudioNodeEngine> mEngine;
+  const UniquePtr<AudioNodeEngine> mEngine;
   // The mixed input blocks are kept from iteration to iteration to avoid
   // reallocating channel data arrays and any buffers for mixing.
   OutputChunks mInputChunks;
   // The last block produced by this node.
   OutputChunks mLastChunks;
   // Whether this is an internal or external track
   const Flags mFlags;
   // The number of input tracks that may provide non-silent input.
--- a/dom/media/webaudio/ConvolverNode.cpp
+++ b/dom/media/webaudio/ConvolverNode.cpp
@@ -1,17 +1,16 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "ConvolverNode.h"
 #include "mozilla/dom/ConvolverNodeBinding.h"
-#include "nsAutoPtr.h"
 #include "AlignmentUtils.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeTrack.h"
 #include "blink/Reverb.h"
 #include "PlayingRefChangeHandler.h"
 
 namespace mozilla {
 namespace dom {
@@ -95,17 +94,17 @@ class ConvolverNodeEngine final : public
     if (aReverb) {
       mRightConvolverMode = aImpulseChannelCount == 1
                                 ? RightConvolverMode::Direct
                                 : RightConvolverMode::Always;
     } else {
       mRightConvolverMode = RightConvolverMode::Always;
     }
 
-    mReverb = aReverb;
+    mReverb.reset(aReverb);
   }
 
   void AllocateReverbInput(const AudioBlock& aInput,
                            uint32_t aTotalChannelCount) {
     uint32_t inputChannelCount = aInput.ChannelCount();
     MOZ_ASSERT(inputChannelCount <= aTotalChannelCount);
     mReverbInput.AllocateChannels(aTotalChannelCount);
     // Pre-multiply the input's volume
@@ -141,17 +140,17 @@ class ConvolverNodeEngine final : public
 
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
  private:
   // Keeping mReverbInput across process calls avoids unnecessary reallocation.
   AudioBlock mReverbInput;
-  nsAutoPtr<WebCore::Reverb> mReverb;
+  UniquePtr<WebCore::Reverb> mReverb;
   // Tracks samples of the tail remaining to be output.  INT32_MIN is a
   // special value to indicate that the end of any previous tail has been
   // handled.
   int32_t mRemainingLeftOutput = INT32_MIN;
   // mRemainingRightOutput and mRemainingRightHistory are only used when
   // mRightOutputMode != Always.  There is no special handling required at the
   // end of tail times and so INT32_MIN is not used.
   // mRemainingRightOutput tracks how much longer this node needs to continue
@@ -454,21 +453,21 @@ void ConvolverNode::SetBuffer(JSContext*
     // audio thread, it's important not to make this too high.  In this case
     // 8192 is a good value. But, the Reverb object is multi-threaded, so we
     // want this as high as possible without losing too much accuracy. Very
     // large FFTs will have worse phase errors. Given these constraints 32768 is
     // a good compromise.
     const size_t MaxFFTSize = 32768;
 
     bool allocationFailure = false;
-    nsAutoPtr<WebCore::Reverb> reverb(new WebCore::Reverb(
+    UniquePtr<WebCore::Reverb> reverb(new WebCore::Reverb(
         data, MaxFFTSize, !Context()->IsOffline(), mNormalize,
         aBuffer->SampleRate(), &allocationFailure));
     if (!allocationFailure) {
-      ns->SetReverb(reverb.forget(), data.ChannelCount());
+      ns->SetReverb(reverb.release(), data.ChannelCount());
     } else {
       aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
       return;
     }
   } else {
     ns->SetReverb(nullptr, 0);
   }
   mBuffer = aBuffer;
--- a/dom/media/webaudio/DynamicsCompressorNode.cpp
+++ b/dom/media/webaudio/DynamicsCompressorNode.cpp
@@ -1,17 +1,16 @@
 /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "DynamicsCompressorNode.h"
 #include "mozilla/dom/DynamicsCompressorNodeBinding.h"
-#include "nsAutoPtr.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeTrack.h"
 #include "AudioDestinationNode.h"
 #include "WebAudioUtils.h"
 #include "blink/DynamicsCompressor.h"
 
 using WebCore::DynamicsCompressor;
 
@@ -77,18 +76,18 @@ class DynamicsCompressorNodeEngine final
       // Just output silence
       *aOutput = aInput;
       return;
     }
 
     const uint32_t channelCount = aInput.ChannelCount();
     if (mCompressor->numberOfChannels() != channelCount) {
       // Create a new compressor object with a new channel count
-      mCompressor = new WebCore::DynamicsCompressor(aTrack->mSampleRate,
-                                                    aInput.ChannelCount());
+      mCompressor = MakeUnique<WebCore::DynamicsCompressor>(
+          aTrack->mSampleRate, aInput.ChannelCount());
     }
 
     TrackTime pos = mDestination->GraphTimeToTrackTime(aFrom);
     mCompressor->setParameterValue(DynamicsCompressor::ParamThreshold,
                                    mThreshold.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamKnee,
                                    mKnee.GetValueAtTime(pos));
     mCompressor->setParameterValue(DynamicsCompressor::ParamRatio,
@@ -152,17 +151,17 @@ class DynamicsCompressorNodeEngine final
 
  private:
   RefPtr<AudioNodeTrack> mDestination;
   AudioParamTimeline mThreshold;
   AudioParamTimeline mKnee;
   AudioParamTimeline mRatio;
   AudioParamTimeline mAttack;
   AudioParamTimeline mRelease;
-  nsAutoPtr<DynamicsCompressor> mCompressor;
+  UniquePtr<DynamicsCompressor> mCompressor;
 };
 
 DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* aContext)
     : AudioNode(aContext, 2, ChannelCountMode::Clamped_max,
                 ChannelInterpretation::Speakers),
       mReduction(0) {
   CreateAudioParam(mThreshold, DynamicsCompressorNodeEngine::THRESHOLD,
                    "threshold", -24.f, -100.f, 0.f);
--- a/dom/media/webaudio/IIRFilterNode.cpp
+++ b/dom/media/webaudio/IIRFilterNode.cpp
@@ -68,17 +68,18 @@ class IIRFilterNodeEngine final : public
       } else {
         WebAudioUtils::LogToDeveloperConsole(
             mWindowID, "IIRFilterChannelCountChangeWarning");
       }
 
       // Adjust the number of filters based on the number of channels
       mIIRFilters.SetLength(aInput.ChannelCount());
       for (size_t i = 0; i < aInput.ChannelCount(); ++i) {
-        mIIRFilters[i] = new blink::IIRFilter(&mFeedforward, &mFeedback);
+        mIIRFilters[i] =
+            MakeUnique<blink::IIRFilter>(&mFeedforward, &mFeedback);
       }
     }
 
     uint32_t numberOfChannels = mIIRFilters.Length();
     aOutput->AllocateChannels(numberOfChannels);
 
     for (uint32_t i = 0; i < numberOfChannels; ++i) {
       const float* input;
@@ -110,17 +111,17 @@ class IIRFilterNodeEngine final : public
   }
 
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
  private:
   RefPtr<AudioNodeTrack> mDestination;
-  nsTArray<nsAutoPtr<blink::IIRFilter>> mIIRFilters;
+  nsTArray<UniquePtr<blink::IIRFilter>> mIIRFilters;
   AudioDoubleArray mFeedforward;
   AudioDoubleArray mFeedback;
   uint64_t mWindowID;
 };
 
 IIRFilterNode::IIRFilterNode(AudioContext* aContext,
                              const Sequence<double>& aFeedforward,
                              const Sequence<double>& aFeedback)
--- a/dom/media/webaudio/PannerNode.cpp
+++ b/dom/media/webaudio/PannerNode.cpp
@@ -10,17 +10,16 @@
 #include "AudioNodeEngine.h"
 #include "AudioNodeTrack.h"
 #include "AudioListener.h"
 #include "PanningUtils.h"
 #include "AudioBufferSourceNode.h"
 #include "PlayingRefChangeHandler.h"
 #include "blink/HRTFPanner.h"
 #include "blink/HRTFDatabaseLoader.h"
-#include "nsAutoPtr.h"
 
 using WebCore::HRTFDatabaseLoader;
 using WebCore::HRTFPanner;
 
 namespace mozilla {
 namespace dom {
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(PannerNode)
@@ -97,18 +96,18 @@ class PannerNodeEngine final : public Au
     MOZ_ASSERT(NS_IsMainThread());
     if (mHRTFPanner) {
       return;
     }
     // HRTFDatabaseLoader needs to be fetched on the main thread.
     RefPtr<HRTFDatabaseLoader> loader =
         HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(
             NodeMainThread()->Context()->SampleRate());
-    mHRTFPanner = new HRTFPanner(NodeMainThread()->Context()->SampleRate(),
-                                 loader.forget());
+    mHRTFPanner = MakeUnique<HRTFPanner>(
+        NodeMainThread()->Context()->SampleRate(), loader.forget());
   }
 
   void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override {
     switch (aIndex) {
       case PannerNode::PANNING_MODEL:
         switch (PanningModelType(aParam)) {
           case PanningModelType::Equalpower:
             mPanningModelFunction =
@@ -241,17 +240,17 @@ class PannerNodeEngine final : public Au
   size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override {
     return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
   }
 
   RefPtr<AudioNodeTrack> mDestination;
   // This member is set on the main thread, but is not accessed on the rendering
   // thread untile mPanningModelFunction has changed, and this happens strictly
   // later, via a MediaTrackGraph ControlMessage.
-  nsAutoPtr<HRTFPanner> mHRTFPanner;
+  UniquePtr<HRTFPanner> mHRTFPanner;
   RefPtr<AudioListenerEngine> mListenerEngine;
   typedef void (PannerNodeEngine::*PanningModelFunction)(
       const AudioBlock& aInput, AudioBlock* aOutput, TrackTime tick);
   PanningModelFunction mPanningModelFunction;
   typedef float (PannerNodeEngine::*DistanceModelFunction)(double aDistance);
   DistanceModelFunction mDistanceModelFunction;
   AudioParamTimeline mPositionX;
   AudioParamTimeline mPositionY;
--- a/dom/media/webaudio/ScriptProcessorNode.cpp
+++ b/dom/media/webaudio/ScriptProcessorNode.cpp
@@ -10,17 +10,16 @@
 #include "AudioDestinationNode.h"
 #include "AudioNodeEngine.h"
 #include "AudioNodeTrack.h"
 #include "AudioProcessingEvent.h"
 #include "WebAudioUtils.h"
 #include "mozilla/dom/ScriptSettings.h"
 #include "mozilla/Mutex.h"
 #include "mozilla/PodOperations.h"
-#include "nsAutoPtr.h"
 #include <deque>
 
 namespace mozilla {
 namespace dom {
 
 // The maximum latency, in seconds, that we can live with before dropping
 // buffers.
 static const float MAX_LATENCY_S = 0.5;
@@ -244,17 +243,17 @@ class ScriptProcessorNodeEngine final : 
                             uint32_t aNumberOfInputChannels)
       : AudioNodeEngine(aNode),
         mDestination(aDestination->Track()),
         mSharedBuffers(new SharedBuffers(mDestination->mSampleRate)),
         mBufferSize(aBufferSize),
         mInputChannelCount(aNumberOfInputChannels),
         mInputWriteIndex(0) {}
 
-  SharedBuffers* GetSharedBuffers() const { return mSharedBuffers; }
+  SharedBuffers* GetSharedBuffers() const { return mSharedBuffers.get(); }
 
   enum {
     IS_CONNECTED,
   };
 
   void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override {
     switch (aIndex) {
       case IS_CONNECTED:
@@ -452,17 +451,17 @@ class ScriptProcessorNodeEngine final : 
     RefPtr<Command> command =
         new Command(aTrack, mInputBuffer.forget(), playbackTime);
     mAbstractMainThread->Dispatch(command.forget());
   }
 
   friend class ScriptProcessorNode;
 
   RefPtr<AudioNodeTrack> mDestination;
-  nsAutoPtr<SharedBuffers> mSharedBuffers;
+  UniquePtr<SharedBuffers> mSharedBuffers;
   RefPtr<ThreadSharedFloatArrayBufferList> mInputBuffer;
   const uint32_t mBufferSize;
   const uint32_t mInputChannelCount;
   // The write index into the current input buffer
   uint32_t mInputWriteIndex;
   bool mIsConnected = false;
 };
 
--- a/dom/media/webaudio/blink/DynamicsCompressor.h
+++ b/dom/media/webaudio/blink/DynamicsCompressor.h
@@ -28,27 +28,27 @@
 
 #ifndef DynamicsCompressor_h
 #define DynamicsCompressor_h
 
 #include "DynamicsCompressorKernel.h"
 #include "ZeroPole.h"
 
 #include "nsTArray.h"
-#include "nsAutoPtr.h"
 #include "mozilla/MemoryReporting.h"
 #include "mozilla/UniquePtr.h"
 
 namespace mozilla {
 class AudioBlock;
 }  // namespace mozilla
 
 namespace WebCore {
 
 using mozilla::AudioBlock;
+using mozilla::UniquePtr;
 
 // DynamicsCompressor implements a flexible audio dynamics compression effect
 // such as is commonly used in musical production and game audio. It lowers the
 // volume of the loudest parts of the signal and raises the volume of the
 // softest parts, making the sound richer, fuller, and more controlled.
 
 class DynamicsCompressor {
  public:
@@ -110,18 +110,18 @@ class DynamicsCompressor {
   typedef struct {
     ZeroPole filters[4];
     size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const {
       return aMallocSizeOf(this);
     }
   } ZeroPoleFilterPack4;
 
   // Per-channel emphasis filters.
-  nsTArray<nsAutoPtr<ZeroPoleFilterPack4> > m_preFilterPacks;
-  nsTArray<nsAutoPtr<ZeroPoleFilterPack4> > m_postFilterPacks;
+  nsTArray<UniquePtr<ZeroPoleFilterPack4> > m_preFilterPacks;
+  nsTArray<UniquePtr<ZeroPoleFilterPack4> > m_postFilterPacks;
 
   mozilla::UniquePtr<const float*[]> m_sourceChannels;
   mozilla::UniquePtr<float*[]> m_destinationChannels;
 
   void setEmphasisStageParameters(unsigned stageIndex, float gain,
                                   float normalizedFrequency /* 0 -> 1 */);
   void setEmphasisParameters(float gain, float anchorFreq,
                              float filterStageRatio);
--- a/dom/media/webaudio/blink/HRTFKernel.cpp
+++ b/dom/media/webaudio/blink/HRTFKernel.cpp
@@ -74,17 +74,17 @@ HRTFKernel::HRTFKernel(float* impulseRes
     for (unsigned i = length - numberOfFadeOutFrames; i < length; ++i) {
       float x =
           1.0f - static_cast<float>(i - (length - numberOfFadeOutFrames)) /
                      numberOfFadeOutFrames;
       impulseResponse[i] *= x;
     }
   }
 
-  m_fftFrame = new FFTBlock(fftSize);
+  m_fftFrame = MakeUnique<FFTBlock>(fftSize);
   m_fftFrame->PadAndMakeScaledDFT(impulseResponse, length);
 }
 
 // Interpolates two kernels with x: 0 -> 1 and returns the result.
 nsReturnRef<HRTFKernel> HRTFKernel::createInterpolatedKernel(
     HRTFKernel* kernel1, HRTFKernel* kernel2, float x) {
   MOZ_ASSERT(kernel1 && kernel2);
   if (!kernel1 || !kernel2) return nsReturnRef<HRTFKernel>();
@@ -95,14 +95,15 @@ nsReturnRef<HRTFKernel> HRTFKernel::crea
   float sampleRate1 = kernel1->sampleRate();
   float sampleRate2 = kernel2->sampleRate();
   MOZ_ASSERT(sampleRate1 == sampleRate2);
   if (sampleRate1 != sampleRate2) return nsReturnRef<HRTFKernel>();
 
   float frameDelay =
       (1 - x) * kernel1->frameDelay() + x * kernel2->frameDelay();
 
-  nsAutoPtr<FFTBlock> interpolatedFrame(FFTBlock::CreateInterpolatedBlock(
+  UniquePtr<FFTBlock> interpolatedFrame(FFTBlock::CreateInterpolatedBlock(
       *kernel1->fftFrame(), *kernel2->fftFrame(), x));
-  return HRTFKernel::create(interpolatedFrame, frameDelay, sampleRate1);
+  return HRTFKernel::create(std::move(interpolatedFrame), frameDelay,
+                            sampleRate1);
 }
 
 }  // namespace WebCore
--- a/dom/media/webaudio/blink/HRTFKernel.h
+++ b/dom/media/webaudio/blink/HRTFKernel.h
@@ -24,25 +24,26 @@
  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef HRTFKernel_h
 #define HRTFKernel_h
 
-#include "nsAutoPtr.h"
 #include "nsAutoRef.h"
 #include "nsTArray.h"
 #include "mozilla/FFTBlock.h"
 #include "mozilla/MemoryReporting.h"
+#include "mozilla/UniquePtr.h"
 
 namespace WebCore {
 
 using mozilla::FFTBlock;
+using mozilla::UniquePtr;
 
 // HRTF stands for Head-Related Transfer Function.
 // HRTFKernel is a frequency-domain representation of an impulse-response used
 // as part of the spatialized panning system. For a given azimuth / elevation
 // angle there will be one HRTFKernel for the left ear transfer function, and
 // one for the right ear. The leading delay (average group delay) for each
 // impulse response is extracted:
 //      m_fftFrame is the frequency-domain representation of the impulse
@@ -51,17 +52,17 @@ using mozilla::FFTBlock;
 class HRTFKernel {
  public:
   // Note: this is destructive on the passed in |impulseResponse|.
   // The |length| of |impulseResponse| must be a power of two.
   // The size of the DFT will be |2 * length|.
   static nsReturnRef<HRTFKernel> create(float* impulseResponse, size_t length,
                                         float sampleRate);
 
-  static nsReturnRef<HRTFKernel> create(nsAutoPtr<FFTBlock> fftFrame,
+  static nsReturnRef<HRTFKernel> create(UniquePtr<FFTBlock> fftFrame,
                                         float frameDelay, float sampleRate);
 
   // Given two HRTFKernels, and an interpolation factor x: 0 -> 1, returns an
   // interpolated HRTFKernel.
   static nsReturnRef<HRTFKernel> createInterpolatedKernel(HRTFKernel* kernel1,
                                                           HRTFKernel* kernel2,
                                                           float x);
 
@@ -81,22 +82,22 @@ class HRTFKernel {
 
  private:
   HRTFKernel(const HRTFKernel& other) = delete;
   void operator=(const HRTFKernel& other) = delete;
 
   // Note: this is destructive on the passed in |impulseResponse|.
   HRTFKernel(float* impulseResponse, size_t fftSize, float sampleRate);
 
-  HRTFKernel(nsAutoPtr<FFTBlock> fftFrame, float frameDelay, float sampleRate)
-      : m_fftFrame(fftFrame),
+  HRTFKernel(UniquePtr<FFTBlock> fftFrame, float frameDelay, float sampleRate)
+      : m_fftFrame(std::move(fftFrame)),
         m_frameDelay(frameDelay),
         m_sampleRate(sampleRate) {}
 
-  nsAutoPtr<FFTBlock> m_fftFrame;
+  UniquePtr<FFTBlock> m_fftFrame;
   float m_frameDelay;
   float m_sampleRate;
 };
 
 typedef nsTArray<nsAutoRef<HRTFKernel> > HRTFKernelList;
 
 }  // namespace WebCore
 
@@ -111,18 +112,18 @@ namespace WebCore {
 
 inline nsReturnRef<HRTFKernel> HRTFKernel::create(float* impulseResponse,
                                                   size_t length,
                                                   float sampleRate) {
   return nsReturnRef<HRTFKernel>(
       new HRTFKernel(impulseResponse, length, sampleRate));
 }
 
-inline nsReturnRef<HRTFKernel> HRTFKernel::create(nsAutoPtr<FFTBlock> fftFrame,
+inline nsReturnRef<HRTFKernel> HRTFKernel::create(UniquePtr<FFTBlock> fftFrame,
                                                   float frameDelay,
                                                   float sampleRate) {
   return nsReturnRef<HRTFKernel>(
-      new HRTFKernel(fftFrame, frameDelay, sampleRate));
+      new HRTFKernel(std::move(fftFrame), frameDelay, sampleRate));
 }
 
 }  // namespace WebCore
 
 #endif  // HRTFKernel_h
--- a/dom/media/webaudio/blink/PeriodicWave.cpp
+++ b/dom/media/webaudio/blink/PeriodicWave.cpp
@@ -52,18 +52,20 @@ already_AddRefed<PeriodicWave> PeriodicW
     RefPtr<PeriodicWave> periodicWave =
         new PeriodicWave(sampleRate, numberOfComponents, disableNormalization);
 
     // Limit the number of components used to those for frequencies below the
     // Nyquist of the fixed length inverse FFT.
     size_t halfSize = periodicWave->m_periodicWaveSize / 2;
     numberOfComponents = std::min(numberOfComponents, halfSize);
     periodicWave->m_numberOfComponents = numberOfComponents;
-    periodicWave->m_realComponents = new AudioFloatArray(numberOfComponents);
-    periodicWave->m_imagComponents = new AudioFloatArray(numberOfComponents);
+    periodicWave->m_realComponents =
+        MakeUnique<AudioFloatArray>(numberOfComponents);
+    periodicWave->m_imagComponents =
+        MakeUnique<AudioFloatArray>(numberOfComponents);
     memcpy(periodicWave->m_realComponents->Elements(), real,
            numberOfComponents * sizeof(float));
     memcpy(periodicWave->m_imagComponents->Elements(), imag,
            numberOfComponents * sizeof(float));
 
     return periodicWave.forget();
   }
   return nullptr;
@@ -252,19 +254,18 @@ void PeriodicWave::createBandLimitedTabl
   }
 
   // Clear any DC-offset.
   frame.RealData(0) = 0;
   // Clear value which has no effect.
   frame.ImagData(0) = 0;
 
   // Create the band-limited table.
-  AlignedAudioFloatArray* table =
-      new AlignedAudioFloatArray(m_periodicWaveSize);
-  m_bandLimitedTables[rangeIndex] = table;
+  m_bandLimitedTables[rangeIndex] =
+      MakeUnique<AlignedAudioFloatArray>(m_periodicWaveSize);
 
   // Apply an inverse FFT to generate the time-domain table data.
   float* data = m_bandLimitedTables[rangeIndex]->Elements();
   frame.GetInverseWithoutScaling(data);
 
   // For the first range (which has the highest power), calculate
   // its peak value then compute normalization scale.
   if (m_disableNormalization) {
@@ -283,18 +284,18 @@ void PeriodicWave::createBandLimitedTabl
 }
 
 void PeriodicWave::generateBasicWaveform(OscillatorType shape) {
   const float piFloat = float(M_PI);
   unsigned fftSize = periodicWaveSize();
   unsigned halfSize = fftSize / 2;
 
   m_numberOfComponents = halfSize;
-  m_realComponents = new AudioFloatArray(halfSize);
-  m_imagComponents = new AudioFloatArray(halfSize);
+  m_realComponents = MakeUnique<AudioFloatArray>(halfSize);
+  m_imagComponents = MakeUnique<AudioFloatArray>(halfSize);
   float* realP = m_realComponents->Elements();
   float* imagP = m_imagComponents->Elements();
 
   // Clear DC and imag value which is ignored.
   realP[0] = 0;
   imagP[0] = 0;
 
   for (unsigned n = 1; n < halfSize; ++n) {
--- a/dom/media/webaudio/blink/PeriodicWave.h
+++ b/dom/media/webaudio/blink/PeriodicWave.h
@@ -25,26 +25,28 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef PeriodicWave_h
 #define PeriodicWave_h
 
 #include "mozilla/dom/OscillatorNodeBinding.h"
-#include <nsAutoPtr.h>
+#include "mozilla/UniquePtr.h"
 #include <nsTArray.h>
 #include "AlignedTArray.h"
 #include "mozilla/MemoryReporting.h"
 
 namespace WebCore {
 
 typedef AlignedTArray<float> AlignedAudioFloatArray;
 typedef nsTArray<float> AudioFloatArray;
 
+using mozilla::UniquePtr;
+
 class PeriodicWave {
  public:
   NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebCore::PeriodicWave);
 
   static already_AddRefed<PeriodicWave> createSine(float sampleRate);
   static already_AddRefed<PeriodicWave> createSquare(float sampleRate);
   static already_AddRefed<PeriodicWave> createSawtooth(float sampleRate);
   static already_AddRefed<PeriodicWave> createTriangle(float sampleRate);
@@ -85,18 +87,18 @@ class PeriodicWave {
 
   void generateBasicWaveform(mozilla::dom::OscillatorType);
 
   float m_sampleRate;
   unsigned m_periodicWaveSize;
   unsigned m_numberOfRanges;
   float m_centsPerRange;
   unsigned m_numberOfComponents;
-  nsAutoPtr<AudioFloatArray> m_realComponents;
-  nsAutoPtr<AudioFloatArray> m_imagComponents;
+  UniquePtr<AudioFloatArray> m_realComponents;
+  UniquePtr<AudioFloatArray> m_imagComponents;
 
   // The lowest frequency (in Hertz) where playback will include all of the
   // partials.  Playing back lower than this frequency will gradually lose
   // more high-frequency information.
   // This frequency is quite low (~10Hz @ // 44.1KHz)
   float m_lowestFundamentalFrequency;
 
   float m_rateScale;
@@ -108,14 +110,14 @@ class PeriodicWave {
 
   unsigned numberOfPartialsForRange(unsigned rangeIndex) const;
 
   // Creates table for specified index based on fundamental frequency.
   void createBandLimitedTables(float fundamentalFrequency, unsigned rangeIndex);
   unsigned m_maxPartialsInBandLimitedTable;
   float m_normalizationScale;
   bool m_disableNormalization;
-  nsTArray<nsAutoPtr<AlignedAudioFloatArray> > m_bandLimitedTables;
+  nsTArray<UniquePtr<AlignedAudioFloatArray> > m_bandLimitedTables;
 };
 
 }  // namespace WebCore
 
 #endif  // PeriodicWave_h
--- a/dom/media/webaudio/blink/Reverb.cpp
+++ b/dom/media/webaudio/blink/Reverb.cpp
@@ -142,20 +142,20 @@ void Reverb::initialize(const nsTArray<c
   m_convolvers.SetCapacity(numConvolvers);
 
   int convolverRenderPhase = 0;
   for (size_t i = 0; i < numConvolvers; ++i) {
     size_t channelIndex = i < numResponseChannels ? i : 0;
     const float* channel = impulseResponseBuffer[channelIndex];
     size_t length = impulseResponseBufferLength;
 
-    nsAutoPtr<ReverbConvolver> convolver(
+    UniquePtr<ReverbConvolver> convolver(
         new ReverbConvolver(channel, length, maxFFTSize, convolverRenderPhase,
                             useBackgroundThreads));
-    m_convolvers.AppendElement(convolver.forget());
+    m_convolvers.AppendElement(std::move(convolver));
 
     convolverRenderPhase += WEBAUDIO_BLOCK_SIZE;
   }
 
   // For "True" stereo processing we allocate a temporary buffer to avoid
   // repeatedly allocating it in the process() method. It can be bad to allocate
   // memory in a real-time thread.
   if (numResponseChannels == 4) {
--- a/dom/media/webaudio/blink/Reverb.h
+++ b/dom/media/webaudio/blink/Reverb.h
@@ -25,23 +25,25 @@
  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef Reverb_h
 #define Reverb_h
 
 #include "ReverbConvolver.h"
-#include "nsAutoPtr.h"
 #include "nsTArray.h"
 #include "AudioBlock.h"
 #include "mozilla/MemoryReporting.h"
+#include "mozilla/UniquePtr.h"
 
 namespace WebCore {
 
+using mozilla::UniquePtr;
+
 // Multi-channel convolution reverb with channel matrixing - one or more
 // ReverbConvolver objects are used internally.
 
 class Reverb {
  public:
   enum { MaxFrameSize = 256 };
 
   // renderSliceSize is a rendering hint, so the FFTs can be optimized to not
@@ -62,17 +64,17 @@ class Reverb {
 
  private:
   void initialize(const nsTArray<const float*>& impulseResponseBuffer,
                   size_t impulseResponseBufferLength, size_t maxFFTSize,
                   bool useBackgroundThreads);
 
   size_t m_impulseResponseLength;
 
-  nsTArray<nsAutoPtr<ReverbConvolver> > m_convolvers;
+  nsTArray<UniquePtr<ReverbConvolver> > m_convolvers;
 
   // For "True" stereo processing
   mozilla::AudioBlock m_tempBuffer;
 };
 
 }  // namespace WebCore
 
 #endif  // Reverb_h
--- a/dom/media/webaudio/blink/ReverbConvolver.cpp
+++ b/dom/media/webaudio/blink/ReverbConvolver.cpp
@@ -97,27 +97,27 @@ ReverbConvolver::ReverbConvolver(const f
         fftSize *= 2;
       }
     }
 
     // This "staggers" the time when each FFT happens so they don't all happen
     // at the same time
     int renderPhase = convolverRenderPhase + stagePhase;
 
-    nsAutoPtr<ReverbConvolverStage> stage(new ReverbConvolverStage(
+    UniquePtr<ReverbConvolverStage> stage(new ReverbConvolverStage(
         response, totalResponseLength, reverbTotalLatency, stageOffset,
         stageSize, fftSize, renderPhase, &m_accumulationBuffer));
 
     bool isBackgroundStage = false;
 
     if (this->useBackgroundThreads() && stageOffset > RealtimeFrameLimit) {
-      m_backgroundStages.AppendElement(stage.forget());
+      m_backgroundStages.AppendElement(std::move(stage));
       isBackgroundStage = true;
     } else
-      m_stages.AppendElement(stage.forget());
+      m_stages.AppendElement(std::move(stage));
 
     // Figure out next FFT size
     fftSize *= 2;
 
     stageOffset += stageSize;
 
     if (hasRealtimeConstraint && !isBackgroundStage &&
         fftSize > MaxRealtimeFFTSize) {
--- a/dom/media/webaudio/blink/ReverbConvolver.h
+++ b/dom/media/webaudio/blink/ReverbConvolver.h
@@ -26,26 +26,28 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef ReverbConvolver_h
 #define ReverbConvolver_h
 
 #include "ReverbAccumulationBuffer.h"
 #include "ReverbInputBuffer.h"
-#include "nsAutoPtr.h"
 #include "mozilla/MemoryReporting.h"
 #include "mozilla/Monitor.h"
+#include "mozilla/UniquePtr.h"
 #ifdef LOG
 #  undef LOG
 #endif
 #include "base/thread.h"
 
 namespace WebCore {
 
+using mozilla::UniquePtr;
+
 class ReverbConvolverStage;
 
 class ReverbConvolver {
  public:
   // maxFFTSize can be adjusted (from say 2048 to 32768) depending on how much
   // precision is necessary. For certain tweaky de-convolving applications the
   // phase errors add up quickly and lead to non-sensical results with larger
   // FFT sizes and single-precision floats.  In these cases 2048 is a good size.
@@ -62,18 +64,18 @@ class ReverbConvolver {
   ReverbInputBuffer* inputBuffer() { return &m_inputBuffer; }
 
   bool useBackgroundThreads() const { return m_useBackgroundThreads; }
   void backgroundThreadEntry();
 
   size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
 
  private:
-  nsTArray<nsAutoPtr<ReverbConvolverStage> > m_stages;
-  nsTArray<nsAutoPtr<ReverbConvolverStage> > m_backgroundStages;
+  nsTArray<UniquePtr<ReverbConvolverStage> > m_stages;
+  nsTArray<UniquePtr<ReverbConvolverStage> > m_backgroundStages;
   size_t m_impulseResponseLength;
 
   ReverbAccumulationBuffer m_accumulationBuffer;
 
   // One or more background threads read from this input buffer which is fed
   // from the realtime thread.
   ReverbInputBuffer m_inputBuffer;
 
--- a/dom/media/webaudio/blink/ReverbConvolverStage.cpp
+++ b/dom/media/webaudio/blink/ReverbConvolverStage.cpp
@@ -42,19 +42,19 @@ ReverbConvolverStage::ReverbConvolverSta
     size_t stageOffset, size_t stageLength, size_t fftSize, size_t renderPhase,
     ReverbAccumulationBuffer* accumulationBuffer)
     : m_accumulationBuffer(accumulationBuffer),
       m_accumulationReadIndex(0),
       m_inputReadIndex(0) {
   MOZ_ASSERT(impulseResponse);
   MOZ_ASSERT(accumulationBuffer);
 
-  m_fftKernel = new FFTBlock(fftSize);
+  m_fftKernel = MakeUnique<FFTBlock>(fftSize);
   m_fftKernel->PadAndMakeScaledDFT(impulseResponse + stageOffset, stageLength);
-  m_fftConvolver = new FFTConvolver(fftSize, renderPhase);
+  m_fftConvolver = MakeUnique<FFTConvolver>(fftSize, renderPhase);
 
   // The convolution stage at offset stageOffset needs to have a corresponding
   // delay to cancel out the offset.
   size_t totalDelay = stageOffset + reverbTotalLatency;
 
   // But, the FFT convolution itself incurs latency, so subtract this out...
   size_t fftLatency = m_fftConvolver->latencyFrames();
   MOZ_ASSERT(totalDelay >= fftLatency);
@@ -86,16 +86,16 @@ void ReverbConvolverStage::processInBack
 }
 
 void ReverbConvolverStage::process(const float* source) {
   MOZ_ASSERT(source);
   if (!source) return;
 
   // Now, run the convolution (into the delay buffer).
   // An expensive FFT will happen every fftSize / 2 frames.
-  const float* output = m_fftConvolver->process(m_fftKernel, source);
+  const float* output = m_fftConvolver->process(m_fftKernel.get(), source);
 
   // Now accumulate into reverb's accumulation buffer.
   m_accumulationBuffer->accumulate(output, WEBAUDIO_BLOCK_SIZE,
                                    &m_accumulationReadIndex, m_postDelayLength);
 }
 
 }  // namespace WebCore
--- a/dom/media/webaudio/blink/ReverbConvolverStage.h
+++ b/dom/media/webaudio/blink/ReverbConvolverStage.h
@@ -26,17 +26,16 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
 #ifndef ReverbConvolverStage_h
 #define ReverbConvolverStage_h
 
 #include "FFTConvolver.h"
 
-#include "nsAutoPtr.h"
 #include "nsTArray.h"
 #include "mozilla/FFTBlock.h"
 #include "mozilla/MemoryReporting.h"
 
 namespace WebCore {
 
 using mozilla::FFTBlock;
 
@@ -62,18 +61,18 @@ class ReverbConvolverStage {
   void processInBackground(ReverbConvolver* convolver);
 
   // Useful for background processing
   int inputReadIndex() const { return m_inputReadIndex; }
 
   size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
 
  private:
-  nsAutoPtr<FFTBlock> m_fftKernel;
-  nsAutoPtr<FFTConvolver> m_fftConvolver;
+  UniquePtr<FFTBlock> m_fftKernel;
+  UniquePtr<FFTConvolver> m_fftConvolver;
 
   ReverbAccumulationBuffer* m_accumulationBuffer;
   int m_accumulationReadIndex;
   int m_inputReadIndex;
 
   size_t m_postDelayLength;
 
   nsTArray<float> m_temporaryBuffer;
--- a/dom/media/webm/WebMWriter.cpp
+++ b/dom/media/webm/WebMWriter.cpp
@@ -5,22 +5,21 @@
 
 #include "WebMWriter.h"
 #include "EbmlComposer.h"
 #include "GeckoProfiler.h"
 #include "OpusTrackEncoder.h"
 
 namespace mozilla {
 
-WebMWriter::WebMWriter() : ContainerWriter() {
-  mEbmlComposer = new EbmlComposer();
-}
+WebMWriter::WebMWriter()
+    : ContainerWriter(), mEbmlComposer(new EbmlComposer()) {}
 
 WebMWriter::~WebMWriter() {
-  // Out-of-line dtor so mEbmlComposer nsAutoPtr can delete a complete type.
+  // Out-of-line dtor so mEbmlComposer UniquePtr can delete a complete type.
 }
 
 nsresult WebMWriter::WriteEncodedTrack(
     const nsTArray<RefPtr<EncodedFrame>>& aData, uint32_t aFlags) {
   AUTO_PROFILER_LABEL("WebMWriter::WriteEncodedTrack", OTHER);
   for (uint32_t i = 0; i < aData.Length(); i++) {
     mEbmlComposer->WriteSimpleBlock(aData.ElementAt(i).get());
   }
--- a/dom/media/webm/WebMWriter.h
+++ b/dom/media/webm/WebMWriter.h
@@ -2,17 +2,16 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef WebMWriter_h_
 #define WebMWriter_h_
 
 #include "ContainerWriter.h"
-#include "nsAutoPtr.h"
 
 namespace mozilla {
 
 class EbmlComposer;
 
 // Vorbis meta data structure
 class VorbisMetadata : public TrackMetadataBase {
  public:
@@ -57,14 +56,14 @@ class WebMWriter : public ContainerWrite
   nsresult GetContainerData(nsTArray<nsTArray<uint8_t>>* aOutputBufs,
                             uint32_t aFlags = 0) override;
 
   // Assign metadata into muxer
   nsresult SetMetadata(
       const nsTArray<RefPtr<TrackMetadataBase>>& aMetadata) override;
 
  private:
-  nsAutoPtr<EbmlComposer> mEbmlComposer;
+  UniquePtr<EbmlComposer> mEbmlComposer;
 };
 
 }  // namespace mozilla
 
 #endif
--- a/dom/media/webrtc/MediaEngineDefault.h
+++ b/dom/media/webrtc/MediaEngineDefault.h
@@ -2,17 +2,16 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef MEDIAENGINEDEFAULT_H_
 #define MEDIAENGINEDEFAULT_H_
 
 #include "nsITimer.h"
 
-#include "nsAutoPtr.h"
 #include "nsCOMPtr.h"
 #include "DOMMediaStream.h"
 #include "nsComponentManagerUtils.h"
 #include "mozilla/Mutex.h"
 
 #include "VideoUtils.h"
 #include "MediaEngine.h"
 #include "MediaEnginePrefs.h"
--- a/dom/media/webrtc/MediaEngineWebRTC.h
+++ b/dom/media/webrtc/MediaEngineWebRTC.h
@@ -22,17 +22,16 @@
 #include "ipc/IPCMessageUtils.h"
 #include "mozilla/Mutex.h"
 #include "mozilla/Mutex.h"
 #include "mozilla/Sprintf.h"
 #include "mozilla/StaticMutex.h"
 #include "mozilla/UniquePtr.h"
 #include "mozilla/dom/File.h"
 #include "mozilla/dom/MediaStreamTrackBinding.h"
-#include "nsAutoPtr.h"
 #include "nsCOMPtr.h"
 #include "nsComponentManagerUtils.h"
 #include "nsDirectoryServiceDefs.h"
 #include "nsRefPtrHashtable.h"
 #include "nsThreadUtils.h"
 #include "prcvar.h"
 #include "prthread.h"
 
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -10,17 +10,16 @@
 
 #include "AudioConverter.h"
 #include "MediaManager.h"
 #include "MediaTrackGraphImpl.h"
 #include "MediaTrackConstraints.h"
 #include "mozilla/Assertions.h"
 #include "mozilla/ErrorNames.h"
 #include "mtransport/runnable_utils.h"
-#include "nsAutoPtr.h"
 #include "Tracing.h"
 
 // scoped_ptr.h uses FF
 #ifdef FF
 #  undef FF
 #endif
 #include "webrtc/voice_engine/voice_engine_defines.h"
 #include "webrtc/modules/audio_processing/include/audio_processing.h"
@@ -794,18 +793,18 @@ void AudioInputProcessing::NotifyOutputD
                                             uint32_t aChannels) {
   MOZ_ASSERT(aGraph->OnGraphThread());
   MOZ_ASSERT(mEnabled);
 
   if (!mPacketizerOutput || mPacketizerOutput->PacketSize() != aRate / 100u ||
       mPacketizerOutput->Channels() != aChannels) {
     // It's ok to drop the audio still in the packetizer here: if this changes,
     // we changed devices or something.
-    mPacketizerOutput =
-        new AudioPacketizer<AudioDataValue, float>(aRate / 100, aChannels);
+    mPacketizerOutput = MakeUnique<AudioPacketizer<AudioDataValue, float>>(
+        aRate / 100, aChannels);
   }
 
   mPacketizerOutput->Input(aBuffer, aFrames);
 
   while (mPacketizerOutput->PacketsAvailable()) {
     uint32_t samplesPerPacket =
         mPacketizerOutput->PacketSize() * mPacketizerOutput->Channels();
     if (mOutputBuffer.Length() < samplesPerPacket) {
@@ -884,18 +883,18 @@ void AudioInputProcessing::PacketizeAndP
   MOZ_ASSERT(!PassThrough(aGraph),
              "This should be bypassed when in PassThrough mode.");
   MOZ_ASSERT(mEnabled);
   size_t offset = 0;
 
   if (!mPacketizerInput || mPacketizerInput->PacketSize() != aRate / 100u ||
       mPacketizerInput->Channels() != aChannels) {
     // It's ok to drop the audio still in the packetizer here.
-    mPacketizerInput =
-        new AudioPacketizer<AudioDataValue, float>(aRate / 100, aChannels);
+    mPacketizerInput = MakeUnique<AudioPacketizer<AudioDataValue, float>>(
+        aRate / 100, aChannels);
   }
 
   // Packetize our input data into 10ms chunks, deinterleave into planar channel
   // buffers, process, and append to the right MediaStreamTrack.
   mPacketizerInput->Input(aBuffer, static_cast<uint32_t>(aFrames));
 
   while (mPacketizerInput->PacketsAvailable()) {
     uint32_t samplesPerPacket =
--- a/dom/media/webrtc/MediaEngineWebRTCAudio.h
+++ b/dom/media/webrtc/MediaEngineWebRTCAudio.h
@@ -202,20 +202,20 @@ class AudioInputProcessing : public Audi
   // This implements the processing algoritm to apply to the input (e.g. a
   // microphone). If all algorithms are disabled, this class in not used. This
   // class only accepts audio chunks of 10ms. It has two inputs and one output:
   // it is fed the speaker data and the microphone data. It outputs processed
   // input data.
   const UniquePtr<webrtc::AudioProcessing> mAudioProcessing;
   // Packetizer to be able to feed 10ms packets to the input side of
   // mAudioProcessing. Not used if the processing is bypassed.
-  nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
+  UniquePtr<AudioPacketizer<AudioDataValue, float>> mPacketizerInput;
   // Packetizer to be able to feed 10ms packets to the output side of
   // mAudioProcessing. Not used if the processing is bypassed.
-  nsAutoPtr<AudioPacketizer<AudioDataValue, float>> mPacketizerOutput;
+  UniquePtr<AudioPacketizer<AudioDataValue, float>> mPacketizerOutput;
   // The number of channels asked for by content, after clamping to the range of
   // legal channel count for this particular device. This is the number of
   // channels of the input buffer passed as parameter in NotifyInputData.
   uint32_t mRequestedInputChannelCount;
   // mSkipProcessing is true if none of the processing passes are enabled,
   // because of prefs or constraints. This allows simply copying the audio into
   // the MTG, skipping resampling and the whole webrtc.org code.
   bool mSkipProcessing;
--- a/dom/media/webrtc/WebrtcGlobal.h
+++ b/dom/media/webrtc/WebrtcGlobal.h
@@ -3,20 +3,20 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef _WEBRTC_GLOBAL_H_
 #define _WEBRTC_GLOBAL_H_
 
 #include "ipc/IPCMessageUtils.h"
 #include "mozilla/dom/BindingDeclarations.h"
 #include "mozilla/dom/RTCStatsReportBinding.h"
-#include "nsAutoPtr.h"
+#include "mozilla/UniquePtr.h"
 
 typedef mozilla::dom::RTCStatsReportInternal StatsReport;
-typedef nsTArray<nsAutoPtr<StatsReport>> RTCReports;
+typedef nsTArray<mozilla::UniquePtr<StatsReport>> RTCReports;
 typedef mozilla::dom::Sequence<nsString> WebrtcGlobalLog;
 
 namespace mozilla {
 namespace dom {
 // webidl dictionaries don't have move semantics, which is something that ipdl
 // needs for async returns. So, we create a "moveable" subclass that just
 // copies. _Really_ lame, but it gets the job done.
 struct NotReallyMovableButLetsPretendItIsRTCStatsCollection
--- a/dom/media/webspeech/recognition/energy_endpointer.h
+++ b/dom/media/webspeech/recognition/energy_endpointer.h
@@ -58,17 +58,17 @@
 // accept. The false accepts can be ignored by setting
 // ep_contamination_rejection_period.
 
 #ifndef CONTENT_BROWSER_SPEECH_ENDPOINTER_ENERGY_ENDPOINTER_H_
 #define CONTENT_BROWSER_SPEECH_ENDPOINTER_ENERGY_ENDPOINTER_H_
 
 #include <vector>
 
-#include "nsAutoPtr.h"
+#include "mozilla/UniquePtr.h"
 
 #include "energy_endpointer_params.h"
 
 namespace mozilla {
 
 // Endpointer status codes
 enum EpStatus {
   EP_PRE_SPEECH = 10,
@@ -137,17 +137,17 @@ class EnergyEndpointer {
   float offset_confirm_dur_sec_;  // max on time allowed to confirm POST_SPEECH
   int64_t endpointer_time_us_;  // Time of the most recently received audio frame.
   int64_t fast_update_frames_; // Number of frames for initial level adaptation.
   int64_t frame_counter_;  // Number of frames seen. Used for initial adaptation.
   float max_window_dur_;  // Largest search window size (seconds)
   float sample_rate_;  // Sampling rate.
 
   // Ring buffers to hold the speech activity history.
-  nsAutoPtr<HistoryRing> history_;
+  UniquePtr<HistoryRing> history_;
 
   // Configuration parameters.
   EnergyEndpointerParams params_;
 
   // RMS which must be exceeded to conclude frame is speech.
   float decision_threshold_;
 
   // Flag to indicate that audio should be used to estimate environment, prior