Bug 1204606 - Reformat of dom/media r=jya
authorSylvestre Ledru <sledru@mozilla.com>
Mon, 19 Nov 2018 13:25:37 +0000
changeset 446960 0ceae9db9ec0be18daa1a279511ad305723185d4
parent 446959 aed811658fae595beec6344b597ab8bca2ac9c83
child 446992 62c4741625fed62804f105d8d979f4b5b3d0f191
child 446993 c8dd8f4166c9287abf6964e55f3b86754d6cf806
push id35063
push userdvarga@mozilla.com
push dateMon, 19 Nov 2018 16:59:56 +0000
treeherdermozilla-central@0ceae9db9ec0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjya
bugs1204606
milestone65.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1204606 - Reformat of dom/media r=jya # skip-blame Differential Revision: https://phabricator.services.mozilla.com/D12251
dom/media/ADTSDecoder.cpp
dom/media/ADTSDecoder.h
dom/media/ADTSDemuxer.cpp
dom/media/ADTSDemuxer.h
dom/media/AsyncLogger.h
dom/media/AudioBufferUtils.h
dom/media/AudioCaptureStream.cpp
dom/media/AudioCaptureStream.h
dom/media/AudioChannelFormat.cpp
dom/media/AudioChannelFormat.h
dom/media/AudioCompactor.cpp
dom/media/AudioCompactor.h
dom/media/AudioConfig.cpp
dom/media/AudioConfig.h
dom/media/AudioConverter.cpp
dom/media/AudioConverter.h
dom/media/AudioDeviceInfo.cpp
dom/media/AudioDeviceInfo.h
dom/media/AudioMixer.h
dom/media/AudioNotificationReceiver.cpp
dom/media/AudioNotificationReceiver.h
dom/media/AudioNotificationSender.cpp
dom/media/AudioNotificationSender.h
dom/media/AudioPacketizer.h
dom/media/AudioSampleFormat.h
dom/media/AudioSegment.cpp
dom/media/AudioSegment.h
dom/media/AudioStream.cpp
dom/media/AudioStream.h
dom/media/AudioStreamTrack.cpp
dom/media/AudioStreamTrack.h
dom/media/AudioTrack.cpp
dom/media/AudioTrack.h
dom/media/AudioTrackList.cpp
dom/media/AudioTrackList.h
dom/media/AutoplayPolicy.cpp
dom/media/AutoplayPolicy.h
dom/media/BackgroundVideoDecodingPermissionObserver.cpp
dom/media/BackgroundVideoDecodingPermissionObserver.h
dom/media/BaseMediaResource.cpp
dom/media/BaseMediaResource.h
dom/media/Benchmark.cpp
dom/media/Benchmark.h
dom/media/BitReader.cpp
dom/media/BitReader.h
dom/media/BitWriter.cpp
dom/media/BitWriter.h
dom/media/BufferMediaResource.h
dom/media/BufferReader.h
dom/media/ByteWriter.h
dom/media/CanvasCaptureMediaStream.cpp
dom/media/CanvasCaptureMediaStream.h
dom/media/ChannelMediaDecoder.cpp
dom/media/ChannelMediaDecoder.h
dom/media/ChannelMediaResource.cpp
dom/media/ChannelMediaResource.h
dom/media/CloneableWithRangeMediaResource.cpp
dom/media/CloneableWithRangeMediaResource.h
dom/media/CubebUtils.cpp
dom/media/CubebUtils.h
dom/media/DOMMediaStream.cpp
dom/media/DOMMediaStream.h
dom/media/DecoderTraits.cpp
dom/media/DecoderTraits.h
dom/media/FileBlockCache.cpp
dom/media/FileBlockCache.h
dom/media/FileMediaResource.cpp
dom/media/FileMediaResource.h
dom/media/FrameStatistics.h
dom/media/GetUserMediaRequest.cpp
dom/media/GetUserMediaRequest.h
dom/media/GraphDriver.cpp
dom/media/GraphDriver.h
dom/media/ImageToI420.cpp
dom/media/ImageToI420.h
dom/media/Intervals.h
dom/media/MediaBlockCacheBase.h
dom/media/MediaCache.cpp
dom/media/MediaCache.h
dom/media/MediaChannelStatistics.h
dom/media/MediaContainerType.cpp
dom/media/MediaContainerType.h
dom/media/MediaData.cpp
dom/media/MediaData.h
dom/media/MediaDataDemuxer.h
dom/media/MediaDecoder.cpp
dom/media/MediaDecoder.h
dom/media/MediaDecoderOwner.h
dom/media/MediaDecoderStateMachine.cpp
dom/media/MediaDecoderStateMachine.h
dom/media/MediaDeviceInfo.cpp
dom/media/MediaDeviceInfo.h
dom/media/MediaDevices.cpp
dom/media/MediaDevices.h
dom/media/MediaEventSource.h
dom/media/MediaFormatReader.cpp
dom/media/MediaFormatReader.h
dom/media/MediaInfo.cpp
dom/media/MediaInfo.h
dom/media/MediaMIMETypes.cpp
dom/media/MediaMIMETypes.h
dom/media/MediaManager.cpp
dom/media/MediaManager.h
dom/media/MediaMetadataManager.h
dom/media/MediaPromiseDefs.h
dom/media/MediaQueue.h
dom/media/MediaRecorder.cpp
dom/media/MediaRecorder.h
dom/media/MediaResource.cpp
dom/media/MediaResource.h
dom/media/MediaResourceCallback.h
dom/media/MediaResult.h
dom/media/MediaSegment.h
dom/media/MediaShutdownManager.cpp
dom/media/MediaShutdownManager.h
dom/media/MediaStatistics.h
dom/media/MediaStreamError.cpp
dom/media/MediaStreamError.h
dom/media/MediaStreamGraph.cpp
dom/media/MediaStreamGraph.h
dom/media/MediaStreamGraphImpl.h
dom/media/MediaStreamListener.cpp
dom/media/MediaStreamListener.h
dom/media/MediaStreamTrack.cpp
dom/media/MediaStreamTrack.h
dom/media/MediaStreamTypes.h
dom/media/MediaStreamVideoSink.cpp
dom/media/MediaStreamVideoSink.h
dom/media/MediaTimer.cpp
dom/media/MediaTimer.h
dom/media/MediaTrack.cpp
dom/media/MediaTrack.h
dom/media/MediaTrackList.cpp
dom/media/MediaTrackList.h
dom/media/MemoryBlockCache.cpp
dom/media/MemoryBlockCache.h
dom/media/PrincipalChangeObserver.h
dom/media/QueueObject.cpp
dom/media/QueueObject.h
dom/media/ReaderProxy.cpp
dom/media/ReaderProxy.h
dom/media/SeekJob.cpp
dom/media/SeekJob.h
dom/media/SeekTarget.h
dom/media/SelfRef.h
dom/media/SharedBuffer.h
dom/media/StreamTracks.cpp
dom/media/StreamTracks.h
dom/media/TextTrack.cpp
dom/media/TextTrack.h
dom/media/TextTrackCue.cpp
dom/media/TextTrackCue.h
dom/media/TextTrackCueList.cpp
dom/media/TextTrackCueList.h
dom/media/TextTrackList.cpp
dom/media/TextTrackList.h
dom/media/TextTrackRegion.cpp
dom/media/TextTrackRegion.h
dom/media/ThreadPoolCOMListener.cpp
dom/media/ThreadPoolCOMListener.h
dom/media/TimeUnits.h
dom/media/Tracing.cpp
dom/media/Tracing.h
dom/media/TrackID.h
dom/media/TrackUnionStream.cpp
dom/media/TrackUnionStream.h
dom/media/VideoFrameContainer.cpp
dom/media/VideoFrameContainer.h
dom/media/VideoLimits.h
dom/media/VideoPlaybackQuality.cpp
dom/media/VideoPlaybackQuality.h
dom/media/VideoSegment.cpp
dom/media/VideoSegment.h
dom/media/VideoStreamTrack.cpp
dom/media/VideoStreamTrack.h
dom/media/VideoTrack.cpp
dom/media/VideoTrack.h
dom/media/VideoTrackList.cpp
dom/media/VideoTrackList.h
dom/media/VideoUtils.cpp
dom/media/VideoUtils.h
dom/media/VorbisUtils.h
dom/media/WebMSample.h
dom/media/WebVTTListener.cpp
dom/media/WebVTTListener.h
dom/media/XiphExtradata.cpp
dom/media/XiphExtradata.h
dom/media/bridge/MediaModule.cpp
dom/media/doctor/DDLifetime.cpp
dom/media/doctor/DDLifetime.h
dom/media/doctor/DDLifetimes.cpp
dom/media/doctor/DDLifetimes.h
dom/media/doctor/DDLogCategory.cpp
dom/media/doctor/DDLogCategory.h
dom/media/doctor/DDLogMessage.cpp
dom/media/doctor/DDLogMessage.h
dom/media/doctor/DDLogObject.cpp
dom/media/doctor/DDLogObject.h
dom/media/doctor/DDLogUtils.h
dom/media/doctor/DDLogValue.cpp
dom/media/doctor/DDLogValue.h
dom/media/doctor/DDLoggedTypeTraits.h
dom/media/doctor/DDMediaLog.cpp
dom/media/doctor/DDMediaLog.h
dom/media/doctor/DDMediaLogs.cpp
dom/media/doctor/DDMediaLogs.h
dom/media/doctor/DDMessageIndex.h
dom/media/doctor/DDTimeStamp.cpp
dom/media/doctor/DDTimeStamp.h
dom/media/doctor/DecoderDoctorDiagnostics.cpp
dom/media/doctor/DecoderDoctorDiagnostics.h
dom/media/doctor/DecoderDoctorLogger.cpp
dom/media/doctor/DecoderDoctorLogger.h
dom/media/doctor/MultiWriterQueue.h
dom/media/doctor/RollingNumber.h
dom/media/doctor/gtest/TestMultiWriterQueue.cpp
dom/media/doctor/gtest/TestRollingNumber.cpp
dom/media/eme/CDMCaps.cpp
dom/media/eme/CDMCaps.h
dom/media/eme/CDMProxy.h
dom/media/eme/DataMutex.h
dom/media/eme/DecryptorProxyCallback.h
dom/media/eme/DetailedPromise.cpp
dom/media/eme/DetailedPromise.h
dom/media/eme/EMEUtils.cpp
dom/media/eme/EMEUtils.h
dom/media/eme/MediaEncryptedEvent.cpp
dom/media/eme/MediaEncryptedEvent.h
dom/media/eme/MediaKeyError.cpp
dom/media/eme/MediaKeyError.h
dom/media/eme/MediaKeyMessageEvent.cpp
dom/media/eme/MediaKeyMessageEvent.h
dom/media/eme/MediaKeySession.cpp
dom/media/eme/MediaKeySession.h
dom/media/eme/MediaKeyStatusMap.cpp
dom/media/eme/MediaKeyStatusMap.h
dom/media/eme/MediaKeySystemAccess.cpp
dom/media/eme/MediaKeySystemAccess.h
dom/media/eme/MediaKeySystemAccessManager.cpp
dom/media/eme/MediaKeySystemAccessManager.h
dom/media/eme/MediaKeys.cpp
dom/media/eme/MediaKeys.h
dom/media/eme/mediadrm/MediaDrmCDMCallbackProxy.cpp
dom/media/eme/mediadrm/MediaDrmCDMCallbackProxy.h
dom/media/eme/mediadrm/MediaDrmCDMProxy.cpp
dom/media/eme/mediadrm/MediaDrmCDMProxy.h
dom/media/eme/mediadrm/MediaDrmProxySupport.cpp
dom/media/eme/mediadrm/MediaDrmProxySupport.h
dom/media/encoder/ContainerWriter.h
dom/media/encoder/EncodedFrameContainer.h
dom/media/encoder/MediaEncoder.cpp
dom/media/encoder/MediaEncoder.h
dom/media/encoder/OpusTrackEncoder.cpp
dom/media/encoder/OpusTrackEncoder.h
dom/media/encoder/TrackEncoder.cpp
dom/media/encoder/TrackEncoder.h
dom/media/encoder/TrackMetadataBase.h
dom/media/encoder/VP8TrackEncoder.cpp
dom/media/encoder/VP8TrackEncoder.h
dom/media/fake-cdm/cdm-fake.cpp
dom/media/fake-cdm/cdm-test-decryptor.cpp
dom/media/fake-cdm/cdm-test-decryptor.h
dom/media/fake-cdm/cdm-test-output-protection.h
dom/media/fake-cdm/cdm-test-storage.cpp
dom/media/fake-cdm/cdm-test-storage.h
dom/media/flac/FlacDecoder.cpp
dom/media/flac/FlacDecoder.h
dom/media/flac/FlacDemuxer.cpp
dom/media/flac/FlacDemuxer.h
dom/media/flac/FlacFrameParser.cpp
dom/media/flac/FlacFrameParser.h
dom/media/gmp-plugin-openh264/gmp-fake-openh264.cpp
dom/media/gmp/CDMStorageIdProvider.cpp
dom/media/gmp/CDMStorageIdProvider.h
dom/media/gmp/ChromiumCDMAdapter.cpp
dom/media/gmp/ChromiumCDMAdapter.h
dom/media/gmp/ChromiumCDMCallback.h
dom/media/gmp/ChromiumCDMCallbackProxy.cpp
dom/media/gmp/ChromiumCDMCallbackProxy.h
dom/media/gmp/ChromiumCDMChild.cpp
dom/media/gmp/ChromiumCDMChild.h
dom/media/gmp/ChromiumCDMParent.cpp
dom/media/gmp/ChromiumCDMParent.h
dom/media/gmp/ChromiumCDMProxy.cpp
dom/media/gmp/ChromiumCDMProxy.h
dom/media/gmp/DecryptJob.cpp
dom/media/gmp/DecryptJob.h
dom/media/gmp/GMPCallbackBase.h
dom/media/gmp/GMPChild.cpp
dom/media/gmp/GMPChild.h
dom/media/gmp/GMPContentChild.cpp
dom/media/gmp/GMPContentChild.h
dom/media/gmp/GMPContentParent.cpp
dom/media/gmp/GMPContentParent.h
dom/media/gmp/GMPCrashHelper.cpp
dom/media/gmp/GMPCrashHelper.h
dom/media/gmp/GMPCrashHelperHolder.cpp
dom/media/gmp/GMPCrashHelperHolder.h
dom/media/gmp/GMPDiskStorage.cpp
dom/media/gmp/GMPLoader.cpp
dom/media/gmp/GMPLoader.h
dom/media/gmp/GMPLog.h
dom/media/gmp/GMPMemoryStorage.cpp
dom/media/gmp/GMPMessageUtils.h
dom/media/gmp/GMPParent.cpp
dom/media/gmp/GMPParent.h
dom/media/gmp/GMPPlatform.cpp
dom/media/gmp/GMPPlatform.h
dom/media/gmp/GMPProcessChild.cpp
dom/media/gmp/GMPProcessChild.h
dom/media/gmp/GMPProcessParent.cpp
dom/media/gmp/GMPProcessParent.h
dom/media/gmp/GMPService.cpp
dom/media/gmp/GMPService.h
dom/media/gmp/GMPServiceChild.cpp
dom/media/gmp/GMPServiceChild.h
dom/media/gmp/GMPServiceParent.cpp
dom/media/gmp/GMPServiceParent.h
dom/media/gmp/GMPSharedMemManager.cpp
dom/media/gmp/GMPSharedMemManager.h
dom/media/gmp/GMPStorage.h
dom/media/gmp/GMPStorageChild.cpp
dom/media/gmp/GMPStorageChild.h
dom/media/gmp/GMPStorageParent.cpp
dom/media/gmp/GMPStorageParent.h
dom/media/gmp/GMPTimerChild.cpp
dom/media/gmp/GMPTimerChild.h
dom/media/gmp/GMPTimerParent.cpp
dom/media/gmp/GMPTimerParent.h
dom/media/gmp/GMPUtils.cpp
dom/media/gmp/GMPUtils.h
dom/media/gmp/GMPVideoDecoderChild.cpp
dom/media/gmp/GMPVideoDecoderChild.h
dom/media/gmp/GMPVideoDecoderParent.cpp
dom/media/gmp/GMPVideoDecoderParent.h
dom/media/gmp/GMPVideoDecoderProxy.h
dom/media/gmp/GMPVideoEncodedFrameImpl.cpp
dom/media/gmp/GMPVideoEncodedFrameImpl.h
dom/media/gmp/GMPVideoEncoderChild.cpp
dom/media/gmp/GMPVideoEncoderChild.h
dom/media/gmp/GMPVideoEncoderParent.cpp
dom/media/gmp/GMPVideoEncoderParent.h
dom/media/gmp/GMPVideoEncoderProxy.h
dom/media/gmp/GMPVideoHost.cpp
dom/media/gmp/GMPVideoHost.h
dom/media/gmp/GMPVideoPlaneImpl.cpp
dom/media/gmp/GMPVideoPlaneImpl.h
dom/media/gmp/GMPVideoi420FrameImpl.cpp
dom/media/gmp/GMPVideoi420FrameImpl.h
dom/media/gmp/gmp-api/gmp-entrypoints.h
dom/media/gmp/gmp-api/gmp-errors.h
dom/media/gmp/gmp-api/gmp-platform.h
dom/media/gmp/gmp-api/gmp-storage.h
dom/media/gmp/gmp-api/gmp-video-codec.h
dom/media/gmp/gmp-api/gmp-video-decode.h
dom/media/gmp/gmp-api/gmp-video-encode.h
dom/media/gmp/gmp-api/gmp-video-frame-encoded.h
dom/media/gmp/gmp-api/gmp-video-frame-i420.h
dom/media/gmp/gmp-api/gmp-video-frame.h
dom/media/gmp/gmp-api/gmp-video-host.h
dom/media/gmp/gmp-api/gmp-video-plane.h
dom/media/gmp/widevine-adapter/WidevineFileIO.cpp
dom/media/gmp/widevine-adapter/WidevineFileIO.h
dom/media/gmp/widevine-adapter/WidevineUtils.cpp
dom/media/gmp/widevine-adapter/WidevineUtils.h
dom/media/gmp/widevine-adapter/WidevineVideoFrame.cpp
dom/media/gmp/widevine-adapter/WidevineVideoFrame.h
dom/media/hls/HLSDecoder.cpp
dom/media/hls/HLSDecoder.h
dom/media/hls/HLSDemuxer.cpp
dom/media/hls/HLSDemuxer.h
dom/media/hls/HLSUtils.cpp
dom/media/hls/HLSUtils.h
dom/media/imagecapture/CaptureTask.cpp
dom/media/imagecapture/CaptureTask.h
dom/media/imagecapture/ImageCapture.cpp
dom/media/imagecapture/ImageCapture.h
dom/media/ipc/GpuDecoderModule.cpp
dom/media/ipc/GpuDecoderModule.h
dom/media/ipc/IRemoteDecoderChild.h
dom/media/ipc/MediaIPCUtils.h
dom/media/ipc/RDDChild.cpp
dom/media/ipc/RDDChild.h
dom/media/ipc/RDDParent.cpp
dom/media/ipc/RDDParent.h
dom/media/ipc/RDDProcessHost.cpp
dom/media/ipc/RDDProcessHost.h
dom/media/ipc/RDDProcessImpl.cpp
dom/media/ipc/RDDProcessImpl.h
dom/media/ipc/RDDProcessManager.cpp
dom/media/ipc/RDDProcessManager.h
dom/media/ipc/RemoteDecoderManagerChild.cpp
dom/media/ipc/RemoteDecoderManagerChild.h
dom/media/ipc/RemoteDecoderManagerParent.cpp
dom/media/ipc/RemoteDecoderManagerParent.h
dom/media/ipc/RemoteDecoderModule.cpp
dom/media/ipc/RemoteDecoderModule.h
dom/media/ipc/RemoteMediaDataDecoder.cpp
dom/media/ipc/RemoteMediaDataDecoder.h
dom/media/ipc/RemoteVideoDecoderChild.cpp
dom/media/ipc/RemoteVideoDecoderChild.h
dom/media/ipc/RemoteVideoDecoderParent.cpp
dom/media/ipc/RemoteVideoDecoderParent.h
dom/media/ipc/VideoDecoderChild.cpp
dom/media/ipc/VideoDecoderChild.h
dom/media/ipc/VideoDecoderManagerChild.cpp
dom/media/ipc/VideoDecoderManagerChild.h
dom/media/ipc/VideoDecoderManagerParent.cpp
dom/media/ipc/VideoDecoderManagerParent.h
dom/media/ipc/VideoDecoderParent.cpp
dom/media/ipc/VideoDecoderParent.h
dom/media/mediacapabilities/MediaCapabilities.cpp
dom/media/mediacapabilities/MediaCapabilities.h
dom/media/mediasink/AudioSink.cpp
dom/media/mediasink/AudioSink.h
dom/media/mediasink/AudioSinkWrapper.cpp
dom/media/mediasink/AudioSinkWrapper.h
dom/media/mediasink/DecodedStream.cpp
dom/media/mediasink/DecodedStream.h
dom/media/mediasink/MediaSink.h
dom/media/mediasink/OutputStreamManager.cpp
dom/media/mediasink/OutputStreamManager.h
dom/media/mediasink/VideoSink.cpp
dom/media/mediasink/VideoSink.h
dom/media/mediasource/AsyncEventRunner.h
dom/media/mediasource/ContainerParser.cpp
dom/media/mediasource/ContainerParser.h
dom/media/mediasource/MediaSource.cpp
dom/media/mediasource/MediaSource.h
dom/media/mediasource/MediaSourceDecoder.cpp
dom/media/mediasource/MediaSourceDecoder.h
dom/media/mediasource/MediaSourceDemuxer.cpp
dom/media/mediasource/MediaSourceDemuxer.h
dom/media/mediasource/MediaSourceUtils.cpp
dom/media/mediasource/MediaSourceUtils.h
dom/media/mediasource/ResourceQueue.cpp
dom/media/mediasource/ResourceQueue.h
dom/media/mediasource/SourceBuffer.cpp
dom/media/mediasource/SourceBuffer.h
dom/media/mediasource/SourceBufferAttributes.h
dom/media/mediasource/SourceBufferList.cpp
dom/media/mediasource/SourceBufferList.h
dom/media/mediasource/SourceBufferResource.cpp
dom/media/mediasource/SourceBufferResource.h
dom/media/mediasource/SourceBufferTask.h
dom/media/mediasource/TrackBuffersManager.cpp
dom/media/mediasource/TrackBuffersManager.h
dom/media/mediasource/gtest/TestContainerParser.cpp
dom/media/mediasource/gtest/TestExtractVPXCodecDetails.cpp
dom/media/mp3/MP3Decoder.cpp
dom/media/mp3/MP3Decoder.h
dom/media/mp3/MP3Demuxer.cpp
dom/media/mp3/MP3Demuxer.h
dom/media/mp3/MP3FrameParser.cpp
dom/media/mp3/MP3FrameParser.h
dom/media/mp4/Atom.h
dom/media/mp4/AtomType.h
dom/media/mp4/Box.cpp
dom/media/mp4/Box.h
dom/media/mp4/BufferStream.cpp
dom/media/mp4/BufferStream.h
dom/media/mp4/ByteStream.h
dom/media/mp4/DecoderData.cpp
dom/media/mp4/DecoderData.h
dom/media/mp4/Index.cpp
dom/media/mp4/Index.h
dom/media/mp4/MP4Decoder.cpp
dom/media/mp4/MP4Decoder.h
dom/media/mp4/MP4Demuxer.cpp
dom/media/mp4/MP4Demuxer.h
dom/media/mp4/MP4Interval.h
dom/media/mp4/MP4Metadata.cpp
dom/media/mp4/MP4Metadata.h
dom/media/mp4/MoofParser.cpp
dom/media/mp4/MoofParser.h
dom/media/mp4/ResourceStream.cpp
dom/media/mp4/ResourceStream.h
dom/media/mp4/SinfParser.cpp
dom/media/mp4/SinfParser.h
dom/media/nsIDocumentActivity.h
dom/media/ogg/OggCodecState.cpp
dom/media/ogg/OggCodecState.h
dom/media/ogg/OggCodecStore.cpp
dom/media/ogg/OggCodecStore.h
dom/media/ogg/OggDecoder.cpp
dom/media/ogg/OggDecoder.h
dom/media/ogg/OggDemuxer.cpp
dom/media/ogg/OggDemuxer.h
dom/media/ogg/OggWriter.cpp
dom/media/ogg/OggWriter.h
dom/media/ogg/OpusParser.cpp
dom/media/ogg/OpusParser.h
dom/media/platforms/AllocationPolicy.cpp
dom/media/platforms/AllocationPolicy.h
dom/media/platforms/MediaTelemetryConstants.h
dom/media/platforms/PDMFactory.cpp
dom/media/platforms/PDMFactory.h
dom/media/platforms/PlatformDecoderModule.h
dom/media/platforms/ReorderQueue.h
dom/media/platforms/SimpleMap.h
dom/media/platforms/agnostic/AOMDecoder.cpp
dom/media/platforms/agnostic/AOMDecoder.h
dom/media/platforms/agnostic/AgnosticDecoderModule.cpp
dom/media/platforms/agnostic/AgnosticDecoderModule.h
dom/media/platforms/agnostic/BlankDecoderModule.cpp
dom/media/platforms/agnostic/BlankDecoderModule.h
dom/media/platforms/agnostic/DummyMediaDataDecoder.cpp
dom/media/platforms/agnostic/DummyMediaDataDecoder.h
dom/media/platforms/agnostic/NullDecoderModule.cpp
dom/media/platforms/agnostic/OpusDecoder.cpp
dom/media/platforms/agnostic/OpusDecoder.h
dom/media/platforms/agnostic/TheoraDecoder.cpp
dom/media/platforms/agnostic/TheoraDecoder.h
dom/media/platforms/agnostic/VPXDecoder.cpp
dom/media/platforms/agnostic/VPXDecoder.h
dom/media/platforms/agnostic/VorbisDecoder.cpp
dom/media/platforms/agnostic/VorbisDecoder.h
dom/media/platforms/agnostic/WAVDecoder.cpp
dom/media/platforms/agnostic/WAVDecoder.h
dom/media/platforms/agnostic/bytestreams/Adts.cpp
dom/media/platforms/agnostic/bytestreams/Adts.h
dom/media/platforms/agnostic/bytestreams/AnnexB.cpp
dom/media/platforms/agnostic/bytestreams/AnnexB.h
dom/media/platforms/agnostic/bytestreams/H264.cpp
dom/media/platforms/agnostic/bytestreams/H264.h
dom/media/platforms/agnostic/eme/ChromiumCDMVideoDecoder.cpp
dom/media/platforms/agnostic/eme/ChromiumCDMVideoDecoder.h
dom/media/platforms/agnostic/eme/DecryptThroughputLimit.h
dom/media/platforms/agnostic/eme/EMEDecoderModule.cpp
dom/media/platforms/agnostic/eme/EMEDecoderModule.h
dom/media/platforms/agnostic/eme/SamplesWaitingForKey.cpp
dom/media/platforms/agnostic/eme/SamplesWaitingForKey.h
dom/media/platforms/agnostic/gmp/GMPDecoderModule.cpp
dom/media/platforms/agnostic/gmp/GMPDecoderModule.h
dom/media/platforms/agnostic/gmp/GMPVideoDecoder.cpp
dom/media/platforms/agnostic/gmp/GMPVideoDecoder.h
dom/media/platforms/android/AndroidDecoderModule.cpp
dom/media/platforms/android/AndroidDecoderModule.h
dom/media/platforms/android/JavaCallbacksSupport.h
dom/media/platforms/android/RemoteDataDecoder.cpp
dom/media/platforms/android/RemoteDataDecoder.h
dom/media/platforms/apple/AppleATDecoder.cpp
dom/media/platforms/apple/AppleATDecoder.h
dom/media/platforms/apple/AppleCMLinker.cpp
dom/media/platforms/apple/AppleCMLinker.h
dom/media/platforms/apple/AppleDecoderModule.cpp
dom/media/platforms/apple/AppleDecoderModule.h
dom/media/platforms/apple/AppleUtils.h
dom/media/platforms/apple/AppleVTDecoder.cpp
dom/media/platforms/apple/AppleVTDecoder.h
dom/media/platforms/apple/AppleVTLinker.cpp
dom/media/platforms/apple/AppleVTLinker.h
dom/media/platforms/apple/VideoToolbox/VideoToolbox.h
dom/media/platforms/ffmpeg/FFmpegAudioDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegAudioDecoder.h
dom/media/platforms/ffmpeg/FFmpegDataDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegDataDecoder.h
dom/media/platforms/ffmpeg/FFmpegDecoderModule.cpp
dom/media/platforms/ffmpeg/FFmpegDecoderModule.h
dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
dom/media/platforms/ffmpeg/FFmpegLibs.h
dom/media/platforms/ffmpeg/FFmpegLog.h
dom/media/platforms/ffmpeg/FFmpegRDFTTypes.h
dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.h
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/avcodec.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/avfft.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/vaapi.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/vdpau.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavcodec/version.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/attributes.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/avutil.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/buffer.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/channel_layout.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/common.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/cpu.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/dict.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/error.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/frame.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/intfloat.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/log.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/macros.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/mathematics.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/mem.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/pixfmt.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/rational.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/samplefmt.h
dom/media/platforms/ffmpeg/ffmpeg57/include/libavutil/version.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/avcodec.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/avfft.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/vaapi.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/vdpau.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavcodec/version.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/attributes.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/avutil.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/buffer.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/channel_layout.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/common.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/cpu.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/dict.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/error.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/frame.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/hwcontext.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/intfloat.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/log.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/macros.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/mathematics.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/mem.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/pixfmt.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/rational.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/samplefmt.h
dom/media/platforms/ffmpeg/ffmpeg58/include/libavutil/version.h
dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp
dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.h
dom/media/platforms/omx/OmxCoreLibLinker.cpp
dom/media/platforms/omx/OmxCoreLibLinker.h
dom/media/platforms/omx/OmxDataDecoder.cpp
dom/media/platforms/omx/OmxDataDecoder.h
dom/media/platforms/omx/OmxDecoderModule.cpp
dom/media/platforms/omx/OmxDecoderModule.h
dom/media/platforms/omx/OmxPlatformLayer.cpp
dom/media/platforms/omx/OmxPlatformLayer.h
dom/media/platforms/omx/OmxPromiseLayer.cpp
dom/media/platforms/omx/OmxPromiseLayer.h
dom/media/platforms/omx/PureOmxPlatformLayer.cpp
dom/media/platforms/omx/PureOmxPlatformLayer.h
dom/media/platforms/wmf/DXVA2Manager.cpp
dom/media/platforms/wmf/DXVA2Manager.h
dom/media/platforms/wmf/MFTDecoder.cpp
dom/media/platforms/wmf/MFTDecoder.h
dom/media/platforms/wmf/WMF.h
dom/media/platforms/wmf/WMFAudioMFTManager.cpp
dom/media/platforms/wmf/WMFAudioMFTManager.h
dom/media/platforms/wmf/WMFDecoderModule.cpp
dom/media/platforms/wmf/WMFDecoderModule.h
dom/media/platforms/wmf/WMFMediaDataDecoder.cpp
dom/media/platforms/wmf/WMFMediaDataDecoder.h
dom/media/platforms/wmf/WMFUtils.cpp
dom/media/platforms/wmf/WMFUtils.h
dom/media/platforms/wmf/WMFVideoMFTManager.cpp
dom/media/platforms/wmf/WMFVideoMFTManager.h
dom/media/platforms/wrappers/MediaChangeMonitor.cpp
dom/media/platforms/wrappers/MediaChangeMonitor.h
dom/media/platforms/wrappers/MediaDataDecoderProxy.cpp
dom/media/platforms/wrappers/MediaDataDecoderProxy.h
dom/media/systemservices/CamerasChild.cpp
dom/media/systemservices/CamerasChild.h
dom/media/systemservices/CamerasParent.cpp
dom/media/systemservices/CamerasParent.h
dom/media/systemservices/CamerasTypes.h
dom/media/systemservices/DeviceChangeCallback.h
dom/media/systemservices/MediaChild.cpp
dom/media/systemservices/MediaChild.h
dom/media/systemservices/MediaParent.cpp
dom/media/systemservices/MediaParent.h
dom/media/systemservices/MediaSystemResourceClient.cpp
dom/media/systemservices/MediaSystemResourceClient.h
dom/media/systemservices/MediaSystemResourceManager.cpp
dom/media/systemservices/MediaSystemResourceManager.h
dom/media/systemservices/MediaSystemResourceManagerChild.cpp
dom/media/systemservices/MediaSystemResourceManagerChild.h
dom/media/systemservices/MediaSystemResourceManagerParent.cpp
dom/media/systemservices/MediaSystemResourceManagerParent.h
dom/media/systemservices/MediaSystemResourceMessageUtils.h
dom/media/systemservices/MediaSystemResourceService.cpp
dom/media/systemservices/MediaSystemResourceService.h
dom/media/systemservices/MediaSystemResourceTypes.h
dom/media/systemservices/MediaTaskUtils.h
dom/media/systemservices/MediaUtils.cpp
dom/media/systemservices/MediaUtils.h
dom/media/systemservices/OSXRunLoopSingleton.cpp
dom/media/systemservices/OSXRunLoopSingleton.h
dom/media/systemservices/OpenSLESProvider.cpp
dom/media/systemservices/OpenSLESProvider.h
dom/media/systemservices/ShmemPool.cpp
dom/media/systemservices/ShmemPool.h
dom/media/systemservices/VideoEngine.cpp
dom/media/systemservices/VideoEngine.h
dom/media/systemservices/VideoFrameUtils.cpp
dom/media/systemservices/VideoFrameUtils.h
dom/media/wave/WaveDecoder.cpp
dom/media/wave/WaveDecoder.h
dom/media/wave/WaveDemuxer.cpp
dom/media/wave/WaveDemuxer.h
dom/media/webaudio/AlignedTArray.h
dom/media/webaudio/AlignmentUtils.h
dom/media/webaudio/AnalyserNode.cpp
dom/media/webaudio/AnalyserNode.h
dom/media/webaudio/AudioBlock.cpp
dom/media/webaudio/AudioBlock.h
dom/media/webaudio/AudioBuffer.cpp
dom/media/webaudio/AudioBuffer.h
dom/media/webaudio/AudioBufferSourceNode.cpp
dom/media/webaudio/AudioBufferSourceNode.h
dom/media/webaudio/AudioContext.cpp
dom/media/webaudio/AudioContext.h
dom/media/webaudio/AudioDestinationNode.cpp
dom/media/webaudio/AudioDestinationNode.h
dom/media/webaudio/AudioEventTimeline.cpp
dom/media/webaudio/AudioEventTimeline.h
dom/media/webaudio/AudioListener.cpp
dom/media/webaudio/AudioListener.h
dom/media/webaudio/AudioNode.cpp
dom/media/webaudio/AudioNode.h
dom/media/webaudio/AudioNodeEngine.cpp
dom/media/webaudio/AudioNodeEngine.h
dom/media/webaudio/AudioNodeEngineNEON.cpp
dom/media/webaudio/AudioNodeEngineNEON.h
dom/media/webaudio/AudioNodeEngineSSE2.cpp
dom/media/webaudio/AudioNodeEngineSSE2.h
dom/media/webaudio/AudioNodeExternalInputStream.cpp
dom/media/webaudio/AudioNodeExternalInputStream.h
dom/media/webaudio/AudioNodeStream.cpp
dom/media/webaudio/AudioNodeStream.h
dom/media/webaudio/AudioParam.cpp
dom/media/webaudio/AudioParam.h
dom/media/webaudio/AudioParamMap.cpp
dom/media/webaudio/AudioParamMap.h
dom/media/webaudio/AudioParamTimeline.h
dom/media/webaudio/AudioProcessingEvent.cpp
dom/media/webaudio/AudioProcessingEvent.h
dom/media/webaudio/AudioScheduledSourceNode.cpp
dom/media/webaudio/AudioScheduledSourceNode.h
dom/media/webaudio/AudioWorkletGlobalScope.cpp
dom/media/webaudio/AudioWorkletGlobalScope.h
dom/media/webaudio/AudioWorkletImpl.cpp
dom/media/webaudio/AudioWorkletImpl.h
dom/media/webaudio/AudioWorkletNode.cpp
dom/media/webaudio/AudioWorkletNode.h
dom/media/webaudio/AudioWorkletProcessor.cpp
dom/media/webaudio/AudioWorkletProcessor.h
dom/media/webaudio/BiquadFilterNode.cpp
dom/media/webaudio/BiquadFilterNode.h
dom/media/webaudio/ChannelMergerNode.cpp
dom/media/webaudio/ChannelMergerNode.h
dom/media/webaudio/ChannelSplitterNode.cpp
dom/media/webaudio/ChannelSplitterNode.h
dom/media/webaudio/ConstantSourceNode.cpp
dom/media/webaudio/ConstantSourceNode.h
dom/media/webaudio/ConvolverNode.cpp
dom/media/webaudio/ConvolverNode.h
dom/media/webaudio/DelayBuffer.cpp
dom/media/webaudio/DelayBuffer.h
dom/media/webaudio/DelayNode.cpp
dom/media/webaudio/DelayNode.h
dom/media/webaudio/DynamicsCompressorNode.cpp
dom/media/webaudio/DynamicsCompressorNode.h
dom/media/webaudio/FFTBlock.h
dom/media/webaudio/GainNode.cpp
dom/media/webaudio/GainNode.h
dom/media/webaudio/IIRFilterNode.cpp
dom/media/webaudio/IIRFilterNode.h
dom/media/webaudio/MediaBufferDecoder.cpp
dom/media/webaudio/MediaBufferDecoder.h
dom/media/webaudio/MediaElementAudioSourceNode.cpp
dom/media/webaudio/MediaElementAudioSourceNode.h
dom/media/webaudio/MediaStreamAudioDestinationNode.cpp
dom/media/webaudio/MediaStreamAudioDestinationNode.h
dom/media/webaudio/MediaStreamAudioSourceNode.cpp
dom/media/webaudio/MediaStreamAudioSourceNode.h
dom/media/webaudio/OscillatorNode.cpp
dom/media/webaudio/OscillatorNode.h
dom/media/webaudio/PannerNode.cpp
dom/media/webaudio/PannerNode.h
dom/media/webaudio/PanningUtils.h
dom/media/webaudio/PeriodicWave.cpp
dom/media/webaudio/PeriodicWave.h
dom/media/webaudio/PlayingRefChangeHandler.h
dom/media/webaudio/ReportDecodeResultTask.h
dom/media/webaudio/ScriptProcessorNode.cpp
dom/media/webaudio/ScriptProcessorNode.h
dom/media/webaudio/StereoPannerNode.cpp
dom/media/webaudio/StereoPannerNode.h
dom/media/webaudio/ThreeDPoint.cpp
dom/media/webaudio/ThreeDPoint.h
dom/media/webaudio/WaveShaperNode.cpp
dom/media/webaudio/WaveShaperNode.h
dom/media/webaudio/WebAudioUtils.cpp
dom/media/webaudio/WebAudioUtils.h
dom/media/webm/EbmlComposer.cpp
dom/media/webm/EbmlComposer.h
dom/media/webm/NesteggPacketHolder.h
dom/media/webm/WebMBufferedParser.cpp
dom/media/webm/WebMBufferedParser.h
dom/media/webm/WebMDecoder.cpp
dom/media/webm/WebMDecoder.h
dom/media/webm/WebMDemuxer.cpp
dom/media/webm/WebMDemuxer.h
dom/media/webm/WebMWriter.cpp
dom/media/webm/WebMWriter.h
dom/media/webrtc/AllocationHandle.h
dom/media/webrtc/CubebDeviceEnumerator.cpp
dom/media/webrtc/CubebDeviceEnumerator.h
dom/media/webrtc/MediaEngine.h
dom/media/webrtc/MediaEngineDefault.cpp
dom/media/webrtc/MediaEngineDefault.h
dom/media/webrtc/MediaEnginePrefs.h
dom/media/webrtc/MediaEngineRemoteVideoSource.cpp
dom/media/webrtc/MediaEngineRemoteVideoSource.h
dom/media/webrtc/MediaEngineSource.cpp
dom/media/webrtc/MediaEngineSource.h
dom/media/webrtc/MediaEngineTabVideoSource.cpp
dom/media/webrtc/MediaEngineTabVideoSource.h
dom/media/webrtc/MediaEngineWebRTC.cpp
dom/media/webrtc/MediaEngineWebRTC.h
dom/media/webrtc/MediaEngineWebRTCAudio.cpp
dom/media/webrtc/MediaEngineWebRTCAudio.h
dom/media/webrtc/MediaTrackConstraints.cpp
dom/media/webrtc/MediaTrackConstraints.h
dom/media/webrtc/PeerIdentity.cpp
dom/media/webrtc/PeerIdentity.h
dom/media/webrtc/RTCCertificate.cpp
dom/media/webrtc/RTCCertificate.h
dom/media/webrtc/RTCIdentityProviderRegistrar.cpp
dom/media/webrtc/RTCIdentityProviderRegistrar.h
dom/media/webrtc/SineWaveGenerator.h
dom/media/webrtc/WebrtcGlobal.h
dom/media/webspeech/recognition/SpeechGrammar.cpp
dom/media/webspeech/recognition/SpeechGrammar.h
dom/media/webspeech/recognition/SpeechGrammarList.cpp
dom/media/webspeech/recognition/SpeechGrammarList.h
dom/media/webspeech/recognition/SpeechRecognition.cpp
dom/media/webspeech/recognition/SpeechRecognition.h
dom/media/webspeech/recognition/SpeechRecognitionAlternative.cpp
dom/media/webspeech/recognition/SpeechRecognitionAlternative.h
dom/media/webspeech/recognition/SpeechRecognitionResult.cpp
dom/media/webspeech/recognition/SpeechRecognitionResult.h
dom/media/webspeech/recognition/SpeechRecognitionResultList.cpp
dom/media/webspeech/recognition/SpeechRecognitionResultList.h
dom/media/webspeech/recognition/SpeechStreamListener.cpp
dom/media/webspeech/recognition/SpeechStreamListener.h
dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.cpp
dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.h
dom/media/webspeech/synth/SpeechSynthesis.cpp
dom/media/webspeech/synth/SpeechSynthesis.h
dom/media/webspeech/synth/SpeechSynthesisUtterance.cpp
dom/media/webspeech/synth/SpeechSynthesisUtterance.h
dom/media/webspeech/synth/SpeechSynthesisVoice.cpp
dom/media/webspeech/synth/SpeechSynthesisVoice.h
dom/media/webspeech/synth/android/AndroidSpeechModule.cpp
dom/media/webspeech/synth/android/SpeechSynthesisService.cpp
dom/media/webspeech/synth/android/SpeechSynthesisService.h
dom/media/webspeech/synth/cocoa/OSXSpeechSynthesizerModule.cpp
dom/media/webspeech/synth/cocoa/OSXSpeechSynthesizerService.h
dom/media/webspeech/synth/ipc/SpeechSynthesisChild.cpp
dom/media/webspeech/synth/ipc/SpeechSynthesisChild.h
dom/media/webspeech/synth/ipc/SpeechSynthesisParent.cpp
dom/media/webspeech/synth/ipc/SpeechSynthesisParent.h
dom/media/webspeech/synth/nsSpeechTask.cpp
dom/media/webspeech/synth/nsSpeechTask.h
dom/media/webspeech/synth/nsSynthVoiceRegistry.cpp
dom/media/webspeech/synth/nsSynthVoiceRegistry.h
dom/media/webspeech/synth/speechd/SpeechDispatcherModule.cpp
dom/media/webspeech/synth/speechd/SpeechDispatcherService.cpp
dom/media/webspeech/synth/speechd/SpeechDispatcherService.h
dom/media/webspeech/synth/test/FakeSynthModule.cpp
dom/media/webspeech/synth/test/nsFakeSynthServices.cpp
dom/media/webspeech/synth/test/nsFakeSynthServices.h
dom/media/webspeech/synth/windows/SapiModule.cpp
dom/media/webspeech/synth/windows/SapiService.cpp
dom/media/webspeech/synth/windows/SapiService.h
--- a/dom/media/ADTSDecoder.cpp
+++ b/dom/media/ADTSDecoder.cpp
@@ -5,45 +5,41 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "ADTSDecoder.h"
 #include "MediaContainerType.h"
 #include "PDMFactory.h"
 
 namespace mozilla {
 
-/* static */ bool
-ADTSDecoder::IsEnabled()
-{
+/* static */ bool ADTSDecoder::IsEnabled() {
   RefPtr<PDMFactory> platform = new PDMFactory();
   return platform->SupportsMimeType(NS_LITERAL_CSTRING("audio/mp4a-latm"),
                                     /* DecoderDoctorDiagnostics* */ nullptr);
 }
 
-/* static */ bool
-ADTSDecoder::IsSupportedType(const MediaContainerType& aContainerType)
-{
+/* static */ bool ADTSDecoder::IsSupportedType(
+    const MediaContainerType& aContainerType) {
   if (aContainerType.Type() == MEDIAMIMETYPE("audio/aac") ||
       aContainerType.Type() == MEDIAMIMETYPE("audio/aacp") ||
       aContainerType.Type() == MEDIAMIMETYPE("audio/x-aac")) {
     return IsEnabled() && (aContainerType.ExtendedType().Codecs().IsEmpty() ||
                            aContainerType.ExtendedType().Codecs() == "aac");
   }
 
   return false;
 }
 
-/* static */ nsTArray<UniquePtr<TrackInfo>>
-ADTSDecoder::GetTracksInfo(const MediaContainerType& aType)
-{
+/* static */ nsTArray<UniquePtr<TrackInfo>> ADTSDecoder::GetTracksInfo(
+    const MediaContainerType& aType) {
   nsTArray<UniquePtr<TrackInfo>> tracks;
   if (!IsSupportedType(aType)) {
     return tracks;
   }
 
   tracks.AppendElement(
-    CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters(
-      NS_LITERAL_CSTRING("audio/mp4a-latm"), aType));
+      CreateTrackInfoWithMIMETypeAndContainerTypeExtraParameters(
+          NS_LITERAL_CSTRING("audio/mp4a-latm"), aType));
 
   return tracks;
 }
 
-} // namespace mozilla
+}  // namespace mozilla
--- a/dom/media/ADTSDecoder.h
+++ b/dom/media/ADTSDecoder.h
@@ -10,22 +10,21 @@
 #include "mozilla/UniquePtr.h"
 #include "nsTArray.h"
 
 namespace mozilla {
 
 class MediaContainerType;
 class TrackInfo;
 
-class ADTSDecoder
-{
-public:
+class ADTSDecoder {
+ public:
   // Returns true if the ADTS backend is pref'ed on, and we're running on a
   // platform that is likely to have decoders for the format.
   static bool IsEnabled();
   static bool IsSupportedType(const MediaContainerType& aContainerType);
   static nsTArray<UniquePtr<TrackInfo>> GetTracksInfo(
-    const MediaContainerType& aType);
+      const MediaContainerType& aType);
 };
 
-} // namespace mozilla
+}  // namespace mozilla
 
-#endif // !ADTS_DECODER_H_
+#endif  // !ADTS_DECODER_H_
--- a/dom/media/ADTSDemuxer.cpp
+++ b/dom/media/ADTSDemuxer.cpp
@@ -1,36 +1,37 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
-* License, v. 2.0. If a copy of the MPL was not distributed with this
-* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "ADTSDemuxer.h"
 
 #include "TimeUnits.h"
 #include "VideoUtils.h"
 #include "mozilla/UniquePtr.h"
 #include <inttypes.h>
 
 extern mozilla::LazyLogModule gMediaDemuxerLog;
-#define ADTSLOG(msg, ...)                                                      \
+#define ADTSLOG(msg, ...) \
   DDMOZ_LOG(gMediaDemuxerLog, LogLevel::Debug, msg, ##__VA_ARGS__)
-#define ADTSLOGV(msg, ...)                                                     \
+#define ADTSLOGV(msg, ...) \
   DDMOZ_LOG(gMediaDemuxerLog, LogLevel::Verbose, msg, ##__VA_ARGS__)
 
 namespace mozilla {
 namespace adts {
 
 // adts::FrameHeader - Holds the ADTS frame header and its parsing
 // state.
 //
 // ADTS Frame Structure
 //
-// 11111111 1111BCCD EEFFFFGH HHIJKLMM MMMMMMMM MMMOOOOO OOOOOOPP(QQQQQQQQ QQQQQQQQ)
+// 11111111 1111BCCD EEFFFFGH HHIJKLMM MMMMMMMM MMMOOOOO OOOOOOPP(QQQQQQQQ
+// QQQQQQQQ)
 //
 // Header consists of 7 or 9 bytes(without or with CRC).
 // Letter   Length(bits)  Description
 // { sync } 12            syncword 0xFFF, all bits must be 1
 // B        1             MPEG Version: 0 for MPEG-4, 1 for MPEG-2
 // C        2             Layer: always 0
 // D        1             protection absent, Warning, set to 1 if there is no
 //                        CRC and 0 if there is CRC
@@ -41,310 +42,268 @@ namespace adts {
 // M        13            frame length, this value must include 7 or 9 bytes of
 //                        header length: FrameLength =
 //                          (ProtectionAbsent == 1 ? 7 : 9) + size(AACFrame)
 // O        11            Buffer fullness
 // P        2             Number of AAC frames(RDBs) in ADTS frame minus 1, for
 //                        maximum compatibility always use 1 AAC frame per ADTS
 //                        frame
 // Q        16            CRC if protection absent is 0
-class FrameHeader
-{
-public:
+class FrameHeader {
+ public:
   uint32_t mFrameLength;
   uint32_t mSampleRate;
   uint32_t mSamples;
   uint32_t mChannels;
-  uint8_t  mObjectType;
-  uint8_t  mSamplingIndex;
-  uint8_t  mChannelConfig;
-  uint8_t  mNumAACFrames;
-  bool     mHaveCrc;
+  uint8_t mObjectType;
+  uint8_t mSamplingIndex;
+  uint8_t mChannelConfig;
+  uint8_t mNumAACFrames;
+  bool mHaveCrc;
 
   // Returns whether aPtr matches a valid ADTS header sync marker
-  static bool MatchesSync(const uint8_t* aPtr)
-  {
+  static bool MatchesSync(const uint8_t* aPtr) {
     return aPtr[0] == 0xFF && (aPtr[1] & 0xF6) == 0xF0;
   }
 
   FrameHeader() { Reset(); }
 
   // Header size
   size_t HeaderSize() const { return (mHaveCrc) ? 9 : 7; }
 
   bool IsValid() const { return mFrameLength > 0; }
 
   // Resets the state to allow for a new parsing session.
   void Reset() { PodZero(this); }
 
   // Returns whether the byte creates a valid sequence up to this point.
-  bool Parse(const uint8_t* aPtr)
-  {
+  bool Parse(const uint8_t* aPtr) {
     const uint8_t* p = aPtr;
 
     if (!MatchesSync(p)) {
       return false;
     }
 
     // AAC has 1024 samples per frame per channel.
     mSamples = 1024;
 
     mHaveCrc = !(p[1] & 0x01);
     mObjectType = ((p[2] & 0xC0) >> 6) + 1;
     mSamplingIndex = (p[2] & 0x3C) >> 2;
     mChannelConfig = (p[2] & 0x01) << 2 | (p[3] & 0xC0) >> 6;
     mFrameLength =
-      (p[3] & 0x03) << 11 | (p[4] & 0xFF) << 3 | (p[5] & 0xE0) >> 5;
+        (p[3] & 0x03) << 11 | (p[4] & 0xFF) << 3 | (p[5] & 0xE0) >> 5;
     mNumAACFrames = (p[6] & 0x03) + 1;
 
-    static const int32_t SAMPLE_RATES[16] = {
-      96000, 88200, 64000, 48000,
-      44100, 32000, 24000, 22050,
-      16000, 12000, 11025,  8000,
-      7350
-    };
+    static const int32_t SAMPLE_RATES[16] = {96000, 88200, 64000, 48000, 44100,
+                                             32000, 24000, 22050, 16000, 12000,
+                                             11025, 8000,  7350};
     mSampleRate = SAMPLE_RATES[mSamplingIndex];
 
     MOZ_ASSERT(mChannelConfig < 8);
     mChannels = (mChannelConfig == 7) ? 8 : mChannelConfig;
 
     return true;
   }
 };
 
-
 // adts::Frame - Frame meta container used to parse and hold a frame
 // header and side info.
-class Frame
-{
-public:
-  Frame() : mOffset(0), mHeader() { }
+class Frame {
+ public:
+  Frame() : mOffset(0), mHeader() {}
 
   int64_t Offset() const { return mOffset; }
-  size_t Length() const
-  {
+  size_t Length() const {
     // TODO: If fields are zero'd when invalid, this check wouldn't be
     // necessary.
     if (!mHeader.IsValid()) {
       return 0;
     }
 
     return mHeader.mFrameLength;
   }
 
   // Returns the offset to the start of frame's raw data.
   int64_t PayloadOffset() const { return mOffset + mHeader.HeaderSize(); }
 
   // Returns the length of the frame's raw data (excluding the header) in bytes.
-  size_t PayloadLength() const
-  {
+  size_t PayloadLength() const {
     // TODO: If fields are zero'd when invalid, this check wouldn't be
     // necessary.
     if (!mHeader.IsValid()) {
       return 0;
     }
 
     return mHeader.mFrameLength - mHeader.HeaderSize();
   }
 
   // Returns the parsed frame header.
   const FrameHeader& Header() const { return mHeader; }
 
   bool IsValid() const { return mHeader.IsValid(); }
 
   // Resets the frame header and data.
-  void Reset()
-  {
+  void Reset() {
     mHeader.Reset();
     mOffset = 0;
   }
 
   // Returns whether the valid
-  bool Parse(int64_t aOffset, const uint8_t* aStart, const uint8_t* aEnd)
-  {
+  bool Parse(int64_t aOffset, const uint8_t* aStart, const uint8_t* aEnd) {
     MOZ_ASSERT(aStart && aEnd);
 
     bool found = false;
     const uint8_t* ptr = aStart;
     // Require at least 7 bytes of data at the end of the buffer for the minimum
     // ADTS frame header.
     while (ptr < aEnd - 7 && !found) {
       found = mHeader.Parse(ptr);
       ptr++;
     }
 
     mOffset = aOffset + (ptr - aStart) - 1;
 
     return found;
   }
 
-private:
+ private:
   // The offset to the start of the header.
   int64_t mOffset;
 
   // The currently parsed frame header.
   FrameHeader mHeader;
 };
 
-
-class FrameParser
-{
-public:
-
+class FrameParser {
+ public:
   // Returns the currently parsed frame. Reset via Reset or EndFrameSession.
   const Frame& CurrentFrame() const { return mFrame; }
 
-
   // Returns the first parsed frame. Reset via Reset.
   const Frame& FirstFrame() const { return mFirstFrame; }
 
   // Resets the parser. Don't use between frames as first frame data is reset.
-  void Reset()
-  {
+  void Reset() {
     EndFrameSession();
     mFirstFrame.Reset();
   }
 
   // Clear the last parsed frame to allow for next frame parsing, i.e.:
   // - sets PrevFrame to CurrentFrame
   // - resets the CurrentFrame
   // - resets ID3Header if no valid header was parsed yet
-  void EndFrameSession()
-  {
-    mFrame.Reset();
-  }
+  void EndFrameSession() { mFrame.Reset(); }
 
   // Parses contents of given ByteReader for a valid frame header and returns
   // true if one was found. After returning, the variable passed to
   // 'aBytesToSkip' holds the amount of bytes to be skipped (if any) in order to
   // jump across a large ID3v2 tag spanning multiple buffers.
-  bool Parse(int64_t aOffset, const uint8_t* aStart, const uint8_t* aEnd)
-  {
+  bool Parse(int64_t aOffset, const uint8_t* aStart, const uint8_t* aEnd) {
     const bool found = mFrame.Parse(aOffset, aStart, aEnd);
 
     if (mFrame.Length() && !mFirstFrame.Length()) {
       mFirstFrame = mFrame;
     }
 
     return found;
   }
 
-private:
+ private:
   // We keep the first parsed frame around for static info access, the
   // previously parsed frame for debugging and the currently parsed frame.
   Frame mFirstFrame;
   Frame mFrame;
 };
 
 // Initialize the AAC AudioSpecificConfig.
 // Only handles two-byte version for AAC-LC.
-static void
-InitAudioSpecificConfig(const Frame& frame,
-                        MediaByteBuffer* aBuffer)
-{
+static void InitAudioSpecificConfig(const Frame& frame,
+                                    MediaByteBuffer* aBuffer) {
   const FrameHeader& header = frame.Header();
   MOZ_ASSERT(header.IsValid());
 
   int audioObjectType = header.mObjectType;
   int samplingFrequencyIndex = header.mSamplingIndex;
   int channelConfig = header.mChannelConfig;
 
   uint8_t asc[2];
   asc[0] = (audioObjectType & 0x1F) << 3 | (samplingFrequencyIndex & 0x0E) >> 1;
   asc[1] = (samplingFrequencyIndex & 0x01) << 7 | (channelConfig & 0x0F) << 3;
 
   aBuffer->AppendElements(asc, 2);
 }
 
-} // namespace adts
+}  // namespace adts
 
 using media::TimeUnit;
 
 // ADTSDemuxer
 
-ADTSDemuxer::ADTSDemuxer(MediaResource* aSource)
-  : mSource(aSource)
-{
+ADTSDemuxer::ADTSDemuxer(MediaResource* aSource) : mSource(aSource) {
   DDLINKCHILD("source", aSource);
 }
 
-bool
-ADTSDemuxer::InitInternal()
-{
+bool ADTSDemuxer::InitInternal() {
   if (!mTrackDemuxer) {
     mTrackDemuxer = new ADTSTrackDemuxer(mSource);
     DDLINKCHILD("track demuxer", mTrackDemuxer.get());
   }
   return mTrackDemuxer->Init();
 }
 
-RefPtr<ADTSDemuxer::InitPromise>
-ADTSDemuxer::Init()
-{
+RefPtr<ADTSDemuxer::InitPromise> ADTSDemuxer::Init() {
   if (!InitInternal()) {
     ADTSLOG("Init() failure: waiting for data");
 
-    return InitPromise::CreateAndReject(
-      NS_ERROR_DOM_MEDIA_METADATA_ERR, __func__);
+    return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_METADATA_ERR,
+                                        __func__);
   }
 
   ADTSLOG("Init() successful");
   return InitPromise::CreateAndResolve(NS_OK, __func__);
 }
 
-uint32_t
-ADTSDemuxer::GetNumberTracks(TrackInfo::TrackType aType) const
-{
+uint32_t ADTSDemuxer::GetNumberTracks(TrackInfo::TrackType aType) const {
   return (aType == TrackInfo::kAudioTrack) ? 1 : 0;
 }
 
-already_AddRefed<MediaTrackDemuxer>
-ADTSDemuxer::GetTrackDemuxer(TrackInfo::TrackType aType, uint32_t aTrackNumber)
-{
+already_AddRefed<MediaTrackDemuxer> ADTSDemuxer::GetTrackDemuxer(
+    TrackInfo::TrackType aType, uint32_t aTrackNumber) {
   if (!mTrackDemuxer) {
     return nullptr;
   }
 
   return RefPtr<ADTSTrackDemuxer>(mTrackDemuxer).forget();
 }
 
-bool
-ADTSDemuxer::IsSeekable() const
-{
+bool ADTSDemuxer::IsSeekable() const {
   int64_t length = mSource->GetLength();
-  if (length > -1)
-    return true;
+  if (length > -1) return true;
   return false;
 }
 
-
 // ADTSTrackDemuxer
 ADTSTrackDemuxer::ADTSTrackDemuxer(MediaResource* aSource)
-  : mSource(aSource)
-  , mParser(new adts::FrameParser())
-  , mOffset(0)
-  , mNumParsedFrames(0)
-  , mFrameIndex(0)
-  , mTotalFrameLen(0)
-  , mSamplesPerFrame(0)
-  , mSamplesPerSecond(0)
-  , mChannels(0)
-{
+    : mSource(aSource),
+      mParser(new adts::FrameParser()),
+      mOffset(0),
+      mNumParsedFrames(0),
+      mFrameIndex(0),
+      mTotalFrameLen(0),
+      mSamplesPerFrame(0),
+      mSamplesPerSecond(0),
+      mChannels(0) {
   DDLINKCHILD("source", aSource);
   Reset();
 }
 
-ADTSTrackDemuxer::~ADTSTrackDemuxer()
-{
-  delete mParser;
-}
+ADTSTrackDemuxer::~ADTSTrackDemuxer() { delete mParser; }
 
-bool
-ADTSTrackDemuxer::Init()
-{
+bool ADTSTrackDemuxer::Init() {
   FastSeek(TimeUnit::Zero());
   // Read the first frame to fetch sample rate and other meta data.
   RefPtr<MediaRawData> frame(GetNextFrame(FindNextFrame(true)));
 
   ADTSLOG("Init StreamLength()=%" PRId64 " first-frame-found=%d",
           StreamLength(), !!frame);
 
   if (!frame) {
@@ -365,228 +324,202 @@ ADTSTrackDemuxer::Init()
 
   // AAC Specific information
   mInfo->mMimeType = "audio/mp4a-latm";
 
   // Configure AAC codec-specific values.
   // For AAC, mProfile and mExtendedProfile contain the audioObjectType from
   // Table 1.3 -- Audio Profile definition, ISO/IEC 14496-3. Eg. 2 == AAC LC
   mInfo->mProfile = mInfo->mExtendedProfile =
-    mParser->FirstFrame().Header().mObjectType;
+      mParser->FirstFrame().Header().mObjectType;
   InitAudioSpecificConfig(mParser->FirstFrame(), mInfo->mCodecSpecificConfig);
 
   ADTSLOG("Init mInfo={mRate=%u mChannels=%u mBitDepth=%u mDuration=%" PRId64
           "}",
           mInfo->mRate, mInfo->mChannels, mInfo->mBitDepth,
           mInfo->mDuration.ToMicroseconds());
 
   return mSamplesPerSecond && mChannels;
 }
 
-UniquePtr<TrackInfo>
-ADTSTrackDemuxer::GetInfo() const
-{
+UniquePtr<TrackInfo> ADTSTrackDemuxer::GetInfo() const {
   return mInfo->Clone();
 }
 
-RefPtr<ADTSTrackDemuxer::SeekPromise>
-ADTSTrackDemuxer::Seek(const TimeUnit& aTime)
-{
+RefPtr<ADTSTrackDemuxer::SeekPromise> ADTSTrackDemuxer::Seek(
+    const TimeUnit& aTime) {
   // Efficiently seek to the position.
   FastSeek(aTime);
   // Correct seek position by scanning the next frames.
   const TimeUnit seekTime = ScanUntil(aTime);
 
   return SeekPromise::CreateAndResolve(seekTime, __func__);
 }
 
-TimeUnit
-ADTSTrackDemuxer::FastSeek(const TimeUnit& aTime)
-{
+TimeUnit ADTSTrackDemuxer::FastSeek(const TimeUnit& aTime) {
   ADTSLOG("FastSeek(%" PRId64 ") avgFrameLen=%f mNumParsedFrames=%" PRIu64
-         " mFrameIndex=%" PRId64 " mOffset=%" PRIu64,
-         aTime.ToMicroseconds(), AverageFrameLength(), mNumParsedFrames,
-         mFrameIndex, mOffset);
+          " mFrameIndex=%" PRId64 " mOffset=%" PRIu64,
+          aTime.ToMicroseconds(), AverageFrameLength(), mNumParsedFrames,
+          mFrameIndex, mOffset);
 
   const int64_t firstFrameOffset = mParser->FirstFrame().Offset();
   if (!aTime.ToMicroseconds()) {
     // Quick seek to the beginning of the stream.
     mOffset = firstFrameOffset;
   } else if (AverageFrameLength() > 0) {
-    mOffset = firstFrameOffset + FrameIndexFromTime(aTime) *
-      AverageFrameLength();
+    mOffset =
+        firstFrameOffset + FrameIndexFromTime(aTime) * AverageFrameLength();
   }
 
   if (mOffset > firstFrameOffset && StreamLength() > 0) {
     mOffset = std::min(StreamLength() - 1, mOffset);
   }
 
   mFrameIndex = FrameIndexFromOffset(mOffset);
   mParser->EndFrameSession();
 
   ADTSLOG("FastSeek End avgFrameLen=%f mNumParsedFrames=%" PRIu64
-          " mFrameIndex=%" PRId64 " mFirstFrameOffset=%" PRIu64 " mOffset=%" PRIu64
-          " SL=%" PRIu64 "",
-          AverageFrameLength(), mNumParsedFrames, mFrameIndex,
-          firstFrameOffset, mOffset, StreamLength());
+          " mFrameIndex=%" PRId64 " mFirstFrameOffset=%" PRIu64
+          " mOffset=%" PRIu64 " SL=%" PRIu64 "",
+          AverageFrameLength(), mNumParsedFrames, mFrameIndex, firstFrameOffset,
+          mOffset, StreamLength());
 
   return Duration(mFrameIndex);
 }
 
-TimeUnit
-ADTSTrackDemuxer::ScanUntil(const TimeUnit& aTime)
-{
+TimeUnit ADTSTrackDemuxer::ScanUntil(const TimeUnit& aTime) {
   ADTSLOG("ScanUntil(%" PRId64 ") avgFrameLen=%f mNumParsedFrames=%" PRIu64
           " mFrameIndex=%" PRId64 " mOffset=%" PRIu64,
           aTime.ToMicroseconds(), AverageFrameLength(), mNumParsedFrames,
           mFrameIndex, mOffset);
 
   if (!aTime.ToMicroseconds()) {
     return FastSeek(aTime);
   }
 
   if (Duration(mFrameIndex) > aTime) {
     FastSeek(aTime);
   }
 
   while (SkipNextFrame(FindNextFrame()) && Duration(mFrameIndex + 1) < aTime) {
     ADTSLOGV("ScanUntil* avgFrameLen=%f mNumParsedFrames=%" PRIu64
              " mFrameIndex=%" PRId64 " mOffset=%" PRIu64 " Duration=%" PRId64,
-             AverageFrameLength(), mNumParsedFrames, mFrameIndex,
-             mOffset, Duration(mFrameIndex + 1).ToMicroseconds());
+             AverageFrameLength(), mNumParsedFrames, mFrameIndex, mOffset,
+             Duration(mFrameIndex + 1).ToMicroseconds());
   }
 
   ADTSLOG("ScanUntil End avgFrameLen=%f mNumParsedFrames=%" PRIu64
           " mFrameIndex=%" PRId64 " mOffset=%" PRIu64,
           AverageFrameLength(), mNumParsedFrames, mFrameIndex, mOffset);
 
   return Duration(mFrameIndex);
 }
 
-RefPtr<ADTSTrackDemuxer::SamplesPromise>
-ADTSTrackDemuxer::GetSamples(int32_t aNumSamples)
-{
+RefPtr<ADTSTrackDemuxer::SamplesPromise> ADTSTrackDemuxer::GetSamples(
+    int32_t aNumSamples) {
   ADTSLOGV("GetSamples(%d) Begin mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64
            " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64
            " mSamplesPerFrame=%d "
            "mSamplesPerSecond=%d mChannels=%d",
            aNumSamples, mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen,
            mSamplesPerFrame, mSamplesPerSecond, mChannels);
 
   MOZ_ASSERT(aNumSamples);
 
   RefPtr<SamplesHolder> frames = new SamplesHolder();
 
   while (aNumSamples--) {
     RefPtr<MediaRawData> frame(GetNextFrame(FindNextFrame()));
-    if (!frame)
-      break;
+    if (!frame) break;
 
     frames->mSamples.AppendElement(frame);
   }
 
-  ADTSLOGV("GetSamples() End mSamples.Size()=%zu aNumSamples=%d mOffset=%" PRIu64
-           " mNumParsedFrames=%" PRIu64 " mFrameIndex=%" PRId64
-           " mTotalFrameLen=%" PRIu64
-           " mSamplesPerFrame=%d mSamplesPerSecond=%d "
-           "mChannels=%d",
-           frames->mSamples.Length(), aNumSamples, mOffset, mNumParsedFrames,
-           mFrameIndex, mTotalFrameLen, mSamplesPerFrame, mSamplesPerSecond,
-           mChannels);
+  ADTSLOGV(
+      "GetSamples() End mSamples.Size()=%zu aNumSamples=%d mOffset=%" PRIu64
+      " mNumParsedFrames=%" PRIu64 " mFrameIndex=%" PRId64
+      " mTotalFrameLen=%" PRIu64
+      " mSamplesPerFrame=%d mSamplesPerSecond=%d "
+      "mChannels=%d",
+      frames->mSamples.Length(), aNumSamples, mOffset, mNumParsedFrames,
+      mFrameIndex, mTotalFrameLen, mSamplesPerFrame, mSamplesPerSecond,
+      mChannels);
 
   if (frames->mSamples.IsEmpty()) {
-    return SamplesPromise::CreateAndReject(
-      NS_ERROR_DOM_MEDIA_END_OF_STREAM, __func__);
+    return SamplesPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_END_OF_STREAM,
+                                           __func__);
   }
 
   return SamplesPromise::CreateAndResolve(frames, __func__);
 }
 
-void
-ADTSTrackDemuxer::Reset()
-{
+void ADTSTrackDemuxer::Reset() {
   ADTSLOG("Reset()");
   MOZ_ASSERT(mParser);
   if (mParser) {
     mParser->Reset();
   }
   FastSeek(TimeUnit::Zero());
 }
 
 RefPtr<ADTSTrackDemuxer::SkipAccessPointPromise>
-ADTSTrackDemuxer::SkipToNextRandomAccessPoint(const TimeUnit& aTimeThreshold)
-{
+ADTSTrackDemuxer::SkipToNextRandomAccessPoint(const TimeUnit& aTimeThreshold) {
   // Will not be called for audio-only resources.
   return SkipAccessPointPromise::CreateAndReject(
-    SkipFailureHolder(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, 0), __func__);
+      SkipFailureHolder(NS_ERROR_DOM_MEDIA_DEMUXER_ERR, 0), __func__);
 }
 
-int64_t
-ADTSTrackDemuxer::GetResourceOffset() const
-{
-  return mOffset;
-}
+int64_t ADTSTrackDemuxer::GetResourceOffset() const { return mOffset; }
 
-media::TimeIntervals
-ADTSTrackDemuxer::GetBuffered()
-{
+media::TimeIntervals ADTSTrackDemuxer::GetBuffered() {
   auto duration = Duration();
 
   if (!duration.IsPositive()) {
     return media::TimeIntervals();
   }
 
   AutoPinned<MediaResource> stream(mSource.GetResource());
   return GetEstimatedBufferedTimeRanges(stream, duration.ToMicroseconds());
 }
 
-int64_t
-ADTSTrackDemuxer::StreamLength() const
-{
-  return mSource.GetLength();
-}
+int64_t ADTSTrackDemuxer::StreamLength() const { return mSource.GetLength(); }
 
-TimeUnit
-ADTSTrackDemuxer::Duration() const
-{
+TimeUnit ADTSTrackDemuxer::Duration() const {
   if (!mNumParsedFrames) {
     return TimeUnit::FromMicroseconds(-1);
   }
 
   const int64_t streamLen = StreamLength();
   if (streamLen < 0) {
     // Unknown length, we can't estimate duration.
     return TimeUnit::FromMicroseconds(-1);
   }
   const int64_t firstFrameOffset = mParser->FirstFrame().Offset();
   int64_t numFrames = (streamLen - firstFrameOffset) / AverageFrameLength();
   return Duration(numFrames);
 }
 
-TimeUnit
-ADTSTrackDemuxer::Duration(int64_t aNumFrames) const
-{
+TimeUnit ADTSTrackDemuxer::Duration(int64_t aNumFrames) const {
   if (!mSamplesPerSecond) {
     return TimeUnit::FromMicroseconds(-1);
   }
 
   return FramesToTimeUnit(aNumFrames * mSamplesPerFrame, mSamplesPerSecond);
 }
 
-const adts::Frame&
-ADTSTrackDemuxer::FindNextFrame(bool findFirstFrame /*= false*/)
-{
+const adts::Frame& ADTSTrackDemuxer::FindNextFrame(
+    bool findFirstFrame /*= false*/) {
   static const int BUFFER_SIZE = 4096;
   static const int MAX_SKIPPED_BYTES = 10 * BUFFER_SIZE;
 
   ADTSLOGV("FindNext() Begin mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64
-          " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64
-          " mSamplesPerFrame=%d mSamplesPerSecond=%d mChannels=%d",
-          mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen,
-          mSamplesPerFrame, mSamplesPerSecond, mChannels);
+           " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64
+           " mSamplesPerFrame=%d mSamplesPerSecond=%d mChannels=%d",
+           mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen,
+           mSamplesPerFrame, mSamplesPerSecond, mChannels);
 
   uint8_t buffer[BUFFER_SIZE];
   int32_t read = 0;
 
   bool foundFrame = false;
   int64_t frameHeaderOffset = mOffset;
 
   // Prepare the parser for the next frame parsing session.
@@ -607,17 +540,17 @@ ADTSTrackDemuxer::FindNextFrame(bool fin
     const adts::Frame& currentFrame = mParser->CurrentFrame();
     foundFrame = mParser->Parse(frameHeaderOffset, buffer, buffer + read);
     if (findFirstFrame && foundFrame) {
       // Check for sync marker after the found frame, since it's
       // possible to find sync marker in AAC data. If sync marker
       // exists after the current frame then we've found a frame
       // header.
       int64_t nextFrameHeaderOffset =
-        currentFrame.Offset() + currentFrame.Length();
+          currentFrame.Offset() + currentFrame.Length();
       int32_t read = Read(buffer, nextFrameHeaderOffset, 2);
       if (read != 2 || !adts::FrameHeader::MatchesSync(buffer)) {
         frameHeaderOffset = currentFrame.Offset() + 1;
         mParser->Reset();
         foundFrame = false;
         continue;
       }
     }
@@ -633,59 +566,59 @@ ADTSTrackDemuxer::FindNextFrame(bool fin
     if (frameHeaderOffset + advance <= frameHeaderOffset) {
       break;
     }
 
     frameHeaderOffset += advance;
   }
 
   if (!foundFrame || !mParser->CurrentFrame().Length()) {
-    ADTSLOG("FindNext() Exit foundFrame=%d mParser->CurrentFrame().Length()=%zu ",
-           foundFrame, mParser->CurrentFrame().Length());
+    ADTSLOG(
+        "FindNext() Exit foundFrame=%d mParser->CurrentFrame().Length()=%zu ",
+        foundFrame, mParser->CurrentFrame().Length());
     mParser->Reset();
     return mParser->CurrentFrame();
   }
 
   ADTSLOGV("FindNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64
-          " mFrameIndex=%" PRId64 " frameHeaderOffset=%" PRId64
-          " mTotalFrameLen=%" PRIu64 " mSamplesPerFrame=%d mSamplesPerSecond=%d"
-          " mChannels=%d",
-          mOffset, mNumParsedFrames, mFrameIndex, frameHeaderOffset,
-          mTotalFrameLen, mSamplesPerFrame, mSamplesPerSecond, mChannels);
+           " mFrameIndex=%" PRId64 " frameHeaderOffset=%" PRId64
+           " mTotalFrameLen=%" PRIu64
+           " mSamplesPerFrame=%d mSamplesPerSecond=%d"
+           " mChannels=%d",
+           mOffset, mNumParsedFrames, mFrameIndex, frameHeaderOffset,
+           mTotalFrameLen, mSamplesPerFrame, mSamplesPerSecond, mChannels);
 
   return mParser->CurrentFrame();
 }
 
-bool
-ADTSTrackDemuxer::SkipNextFrame(const adts::Frame& aFrame)
-{
+bool ADTSTrackDemuxer::SkipNextFrame(const adts::Frame& aFrame) {
   if (!mNumParsedFrames || !aFrame.Length()) {
     RefPtr<MediaRawData> frame(GetNextFrame(aFrame));
     return frame;
   }
 
   UpdateState(aFrame);
 
   ADTSLOGV("SkipNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64
-          " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64
-          " mSamplesPerFrame=%d mSamplesPerSecond=%d mChannels=%d",
-          mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen,
-          mSamplesPerFrame, mSamplesPerSecond, mChannels);
+           " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64
+           " mSamplesPerFrame=%d mSamplesPerSecond=%d mChannels=%d",
+           mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen,
+           mSamplesPerFrame, mSamplesPerSecond, mChannels);
 
   return true;
 }
 
-already_AddRefed<MediaRawData>
-ADTSTrackDemuxer::GetNextFrame(const adts::Frame& aFrame)
-{
-  ADTSLOG("GetNext() Begin({mOffset=%" PRId64 " HeaderSize()=%zu"
+already_AddRefed<MediaRawData> ADTSTrackDemuxer::GetNextFrame(
+    const adts::Frame& aFrame) {
+  ADTSLOG("GetNext() Begin({mOffset=%" PRId64
+          " HeaderSize()=%zu"
           " Length()=%zu})",
-         aFrame.Offset(), aFrame.Header().HeaderSize(), aFrame.PayloadLength());
-  if (!aFrame.IsValid())
-    return nullptr;
+          aFrame.Offset(), aFrame.Header().HeaderSize(),
+          aFrame.PayloadLength());
+  if (!aFrame.IsValid()) return nullptr;
 
   const int64_t offset = aFrame.PayloadOffset();
   const uint32_t length = aFrame.PayloadLength();
 
   RefPtr<MediaRawData> frame = new MediaRawData();
   frame->mOffset = offset;
 
   UniquePtr<MediaRawDataWriter> frameWriter(frame->CreateWriter());
@@ -714,46 +647,41 @@ ADTSTrackDemuxer::GetNextFrame(const adt
            " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64
            " mSamplesPerFrame=%d mSamplesPerSecond=%d mChannels=%d",
            mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen,
            mSamplesPerFrame, mSamplesPerSecond, mChannels);
 
   return frame.forget();
 }
 
-int64_t
-ADTSTrackDemuxer::FrameIndexFromOffset(int64_t aOffset) const
-{
+int64_t ADTSTrackDemuxer::FrameIndexFromOffset(int64_t aOffset) const {
   int64_t frameIndex = 0;
 
   if (AverageFrameLength() > 0) {
     frameIndex =
-      (aOffset - mParser->FirstFrame().Offset()) / AverageFrameLength();
+        (aOffset - mParser->FirstFrame().Offset()) / AverageFrameLength();
   }
 
-  ADTSLOGV("FrameIndexFromOffset(%" PRId64 ") -> %" PRId64, aOffset, frameIndex);
+  ADTSLOGV("FrameIndexFromOffset(%" PRId64 ") -> %" PRId64, aOffset,
+           frameIndex);
   return std::max<int64_t>(0, frameIndex);
 }
 
-int64_t
-ADTSTrackDemuxer::FrameIndexFromTime(const TimeUnit& aTime) const
-{
+int64_t ADTSTrackDemuxer::FrameIndexFromTime(const TimeUnit& aTime) const {
   int64_t frameIndex = 0;
   if (mSamplesPerSecond > 0 && mSamplesPerFrame > 0) {
     frameIndex = aTime.ToSeconds() * mSamplesPerSecond / mSamplesPerFrame - 1;
   }
 
-  ADTSLOGV("FrameIndexFromOffset(%fs) -> %" PRId64,
-           aTime.ToSeconds(), frameIndex);
+  ADTSLOGV("FrameIndexFromOffset(%fs) -> %" PRId64, aTime.ToSeconds(),
+           frameIndex);
   return std::max<int64_t>(0, frameIndex);
 }
 
-void
-ADTSTrackDemuxer::UpdateState(const adts::Frame& aFrame)
-{
+void ADTSTrackDemuxer::UpdateState(const adts::Frame& aFrame) {
   int32_t frameLength = aFrame.Length();
   // Prevent overflow.
   if (mTotalFrameLen + frameLength < mTotalFrameLen) {
     // These variables have a linear dependency and are only used to derive the
     // average frame length.
     mTotalFrameLen /= 2;
     mNumParsedFrames /= 2;
   }
@@ -769,49 +697,45 @@ ADTSTrackDemuxer::UpdateState(const adts
     mChannels = header.mChannels;
   }
 
   ++mNumParsedFrames;
   ++mFrameIndex;
   MOZ_ASSERT(mFrameIndex > 0);
 }
 
-int32_t
-ADTSTrackDemuxer::Read(uint8_t* aBuffer, int64_t aOffset, int32_t aSize)
-{
-  ADTSLOGV("ADTSTrackDemuxer::Read(%p %" PRId64 " %d)",
-           aBuffer, aOffset, aSize);
+int32_t ADTSTrackDemuxer::Read(uint8_t* aBuffer, int64_t aOffset,
+                               int32_t aSize) {
+  ADTSLOGV("ADTSTrackDemuxer::Read(%p %" PRId64 " %d)", aBuffer, aOffset,
+           aSize);
 
   const int64_t streamLen = StreamLength();
   if (mInfo && streamLen > 0) {
     // Prevent blocking reads after successful initialization.
     aSize = std::min<int64_t>(aSize, streamLen - aOffset);
   }
 
   uint32_t read = 0;
   ADTSLOGV("ADTSTrackDemuxer::Read        -> ReadAt(%d)", aSize);
   const nsresult rv = mSource.ReadAt(aOffset, reinterpret_cast<char*>(aBuffer),
                                      static_cast<uint32_t>(aSize), &read);
   NS_ENSURE_SUCCESS(rv, 0);
   return static_cast<int32_t>(read);
 }
 
-double
-ADTSTrackDemuxer::AverageFrameLength() const
-{
+double ADTSTrackDemuxer::AverageFrameLength() const {
   if (mNumParsedFrames) {
     return static_cast<double>(mTotalFrameLen) / mNumParsedFrames;
   }
 
   return 0.0;
 }
 
-/* static */ bool
-ADTSDemuxer::ADTSSniffer(const uint8_t* aData, const uint32_t aLength)
-{
+/* static */ bool ADTSDemuxer::ADTSSniffer(const uint8_t* aData,
+                                           const uint32_t aLength) {
   if (aLength < 7) {
     return false;
   }
   if (!adts::FrameHeader::MatchesSync(aData)) {
     return false;
   }
   auto parser = MakeUnique<adts::FrameParser>();
 
@@ -824,9 +748,9 @@ ADTSDemuxer::ADTSSniffer(const uint8_t* 
   // exists after the current frame then we've found a frame
   // header.
   int64_t nextFrameHeaderOffset = currentFrame.Offset() + currentFrame.Length();
   return int64_t(aLength) > nextFrameHeaderOffset &&
          aLength - nextFrameHeaderOffset >= 2 &&
          adts::FrameHeader::MatchesSync(aData + nextFrameHeaderOffset);
 }
 
-} // namespace mozilla
+}  // namespace mozilla
--- a/dom/media/ADTSDemuxer.h
+++ b/dom/media/ADTSDemuxer.h
@@ -1,63 +1,59 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
-* License, v. 2.0. If a copy of the MPL was not distributed with this
-* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef ADTS_DEMUXER_H_
 #define ADTS_DEMUXER_H_
 
 #include "mozilla/Attributes.h"
 #include "mozilla/Maybe.h"
 #include "MediaDataDemuxer.h"
 #include "MediaResource.h"
 
 namespace mozilla {
 
 namespace adts {
 class Frame;
 class FrameParser;
-}
+}  // namespace adts
 
 class ADTSTrackDemuxer;
 
 DDLoggedTypeDeclNameAndBase(ADTSDemuxer, MediaDataDemuxer);
 
-class ADTSDemuxer
-  : public MediaDataDemuxer
-  , public DecoderDoctorLifeLogger<ADTSDemuxer>
-{
-public:
+class ADTSDemuxer : public MediaDataDemuxer,
+                    public DecoderDoctorLifeLogger<ADTSDemuxer> {
+ public:
   // MediaDataDemuxer interface.
   explicit ADTSDemuxer(MediaResource* aSource);
   RefPtr<InitPromise> Init() override;
   uint32_t GetNumberTracks(TrackInfo::TrackType aType) const override;
-  already_AddRefed<MediaTrackDemuxer>
-  GetTrackDemuxer(TrackInfo::TrackType aType, uint32_t aTrackNumber) override;
+  already_AddRefed<MediaTrackDemuxer> GetTrackDemuxer(
+      TrackInfo::TrackType aType, uint32_t aTrackNumber) override;
   bool IsSeekable() const override;
 
   // Return true if a valid ADTS frame header could be found.
   static bool ADTSSniffer(const uint8_t* aData, const uint32_t aLength);
 
-private:
+ private:
   bool InitInternal();
 
   RefPtr<MediaResource> mSource;
   RefPtr<ADTSTrackDemuxer> mTrackDemuxer;
 };
 
 DDLoggedTypeNameAndBase(ADTSTrackDemuxer, MediaTrackDemuxer);
 
-class ADTSTrackDemuxer
-  : public MediaTrackDemuxer
-  , public DecoderDoctorLifeLogger<ADTSTrackDemuxer>
-{
-public:
+class ADTSTrackDemuxer : public MediaTrackDemuxer,
+                         public DecoderDoctorLifeLogger<ADTSTrackDemuxer> {
+ public:
   explicit ADTSTrackDemuxer(MediaResource* aSource);
 
   // Initializes the track demuxer by reading the first frame for meta data.
   // Returns initialization success state.
   bool Init();
 
   // Returns the total stream length if known, -1 otherwise.
   int64_t StreamLength() const;
@@ -70,28 +66,29 @@ public:
   media::TimeUnit Duration(int64_t aNumFrames) const;
 
   // MediaTrackDemuxer interface.
   UniquePtr<TrackInfo> GetInfo() const override;
   RefPtr<SeekPromise> Seek(const media::TimeUnit& aTime) override;
   RefPtr<SamplesPromise> GetSamples(int32_t aNumSamples = 1) override;
   void Reset() override;
   RefPtr<SkipAccessPointPromise> SkipToNextRandomAccessPoint(
-    const media::TimeUnit& aTimeThreshold) override;
+      const media::TimeUnit& aTimeThreshold) override;
   int64_t GetResourceOffset() const override;
   media::TimeIntervals GetBuffered() override;
 
-private:
+ private:
   // Destructor.
   ~ADTSTrackDemuxer();
 
   // Fast approximate seeking to given time.
   media::TimeUnit FastSeek(const media::TimeUnit& aTime);
 
-  // Seeks by scanning the stream up to the given time for more accurate results.
+  // Seeks by scanning the stream up to the given time for more accurate
+  // results.
   media::TimeUnit ScanUntil(const media::TimeUnit& aTime);
 
   // Finds the next valid frame and returns its byte range.
   const adts::Frame& FindNextFrame(bool findFirstFrame = false);
 
   // Skips the next frame given the provided byte range.
   bool SkipNextFrame(const adts::Frame& aFrame);
 
@@ -130,21 +127,22 @@ private:
   int64_t mFrameIndex;
 
   // Sum of parsed frames' lengths in bytes.
   uint64_t mTotalFrameLen;
 
   // Samples per frame metric derived from frame headers or 0 if none available.
   uint32_t mSamplesPerFrame;
 
-  // Samples per second metric derived from frame headers or 0 if none available.
+  // Samples per second metric derived from frame headers or 0 if none
+  // available.
   uint32_t mSamplesPerSecond;
 
   // Channel count derived from frame headers or 0 if none available.
   uint32_t mChannels;
 
   // Audio track config info.
   UniquePtr<AudioInfo> mInfo;
 };
 
-} // mozilla
+}  // namespace mozilla
 
-#endif // !ADTS_DEMUXER_H_
+#endif  // !ADTS_DEMUXER_H_
--- a/dom/media/AsyncLogger.h
+++ b/dom/media/AsyncLogger.h
@@ -13,242 +13,223 @@
 #include "mozilla/Attributes.h"
 #include "mozilla/MathAlgorithms.h"
 #include "mozilla/Sprintf.h"
 
 namespace mozilla {
 
 namespace detail {
 
- // This class implements a lock-free multiple producer single consumer queue of
- // fixed size log messages, with the following characteristics:
- // - Unbounded (uses a intrinsic linked list)
- // - Allocates on Push. Push can be called on any thread.
- // - Deallocates on Pop. Pop MUST always be called on the same thread for the
- // life-time of the queue.
- //
- // In our scenario, the producer threads are real-time, they can't block. The
- // consummer thread runs every now and then and empties the queue to a log
- // file, on disk.
- //
- // Having fixed size messages and jemalloc is probably not the fastest, but
- // allows having a simpler design, we count on the fact that jemalloc will get
- // the memory from a thread-local source most of the time.
-template<size_t MESSAGE_LENGTH>
-class MPSCQueue
-{
-public:
-    struct Message {
-        Message()
-        {
-           mNext.store(nullptr, std::memory_order_relaxed);
-        }
-        Message(const Message& aMessage) = delete;
-        void operator=(const Message& aMessage) = delete;
+// This class implements a lock-free multiple producer single consumer queue of
+// fixed size log messages, with the following characteristics:
+// - Unbounded (uses a intrinsic linked list)
+// - Allocates on Push. Push can be called on any thread.
+// - Deallocates on Pop. Pop MUST always be called on the same thread for the
+// life-time of the queue.
+//
+// In our scenario, the producer threads are real-time, they can't block. The
+// consummer thread runs every now and then and empties the queue to a log
+// file, on disk.
+//
+// Having fixed size messages and jemalloc is probably not the fastest, but
+// allows having a simpler design, we count on the fact that jemalloc will get
+// the memory from a thread-local source most of the time.
+template <size_t MESSAGE_LENGTH>
+class MPSCQueue {
+ public:
+  struct Message {
+    Message() { mNext.store(nullptr, std::memory_order_relaxed); }
+    Message(const Message& aMessage) = delete;
+    void operator=(const Message& aMessage) = delete;
+
+    char data[MESSAGE_LENGTH];
+    std::atomic<Message*> mNext;
+  };
+  // Creates a new MPSCQueue. Initially, the queue has a single sentinel node,
+  // pointed to by both mHead and mTail.
+  MPSCQueue()
+      // At construction, the initial message points to nullptr (it has no
+      // successor). It is a sentinel node, that does not contain meaningful
+      // data.
+      : mHead(new Message()), mTail(mHead.load(std::memory_order_relaxed)) {}
+
+  ~MPSCQueue() {
+    Message dummy;
+    while (this->Pop(dummy.data)) {
+    }
+    Message* front = mHead.load(std::memory_order_relaxed);
+    delete front;
+  }
 
-        char data[MESSAGE_LENGTH];
-        std::atomic<Message*> mNext;
-    };
-    // Creates a new MPSCQueue. Initially, the queue has a single sentinel node,
-    // pointed to by both mHead and mTail.
-    MPSCQueue()
-    // At construction, the initial message points to nullptr (it has no
-    // successor). It is a sentinel node, that does not contain meaningful
-    // data.
-    : mHead(new Message())
-    , mTail(mHead.load(std::memory_order_relaxed))
-    { }
-
-    ~MPSCQueue()
-    {
-        Message dummy;
-        while (this->Pop(dummy.data)) {}
-        Message* front = mHead.load(std::memory_order_relaxed);
-        delete front;
-    }
+  void Push(MPSCQueue<MESSAGE_LENGTH>::Message* aMessage) {
+    // The next two non-commented line are called A and B in this paragraph.
+    // Producer threads i, i-1, etc. are numbered in the order they reached
+    // A in time, thread i being the thread that has reached A first.
+    // Atomically, on line A the new `mHead` is set to be the node that was
+    // just allocated, with strong memory order. From now one, any thread
+    // that reaches A will see that the node just allocated is
+    // effectively the head of the list, and will make itself the new head
+    // of the list.
+    // In a bad case (when thread i executes A and then
+    // is not scheduled for a long time), it is possible that thread i-1 and
+    // subsequent threads create a seemingly disconnected set of nodes, but
+    // they all have the correct value for the next node to set as their
+    // mNext member on their respective stacks (in `prev`), and this is
+    // always correct. When the scheduler resumes, and line B is executed,
+    // the correct linkage is resumed.
+    // Before line B, since mNext for the node the was the last element of
+    // the queue still has an mNext of nullptr, Pop will not see the node
+    // added.
+    // For line A, it's critical to have strong ordering both ways (since
+    // it's going to possibly be read and write repeatidly by multiple
+    // threads)
+    // Line B can have weaker guarantees, it's only going to be written by a
+    // single thread, and we just need to ensure it's read properly by a
+    // single other one.
+    Message* prev = mHead.exchange(aMessage, std::memory_order_acq_rel);
+    prev->mNext.store(aMessage, std::memory_order_release);
+  }
 
-    void
-    Push(MPSCQueue<MESSAGE_LENGTH>::Message* aMessage)
-    {
-        // The next two non-commented line are called A and B in this paragraph.
-        // Producer threads i, i-1, etc. are numbered in the order they reached
-        // A in time, thread i being the thread that has reached A first.
-        // Atomically, on line A the new `mHead` is set to be the node that was
-        // just allocated, with strong memory order. From now one, any thread
-        // that reaches A will see that the node just allocated is
-        // effectively the head of the list, and will make itself the new head
-        // of the list.
-        // In a bad case (when thread i executes A and then
-        // is not scheduled for a long time), it is possible that thread i-1 and
-        // subsequent threads create a seemingly disconnected set of nodes, but
-        // they all have the correct value for the next node to set as their
-        // mNext member on their respective stacks (in `prev`), and this is
-        // always correct. When the scheduler resumes, and line B is executed,
-        // the correct linkage is resumed.
-        // Before line B, since mNext for the node the was the last element of
-        // the queue still has an mNext of nullptr, Pop will not see the node
-        // added.
-        // For line A, it's critical to have strong ordering both ways (since
-        // it's going to possibly be read and write repeatidly by multiple
-        // threads)
-        // Line B can have weaker guarantees, it's only going to be written by a
-        // single thread, and we just need to ensure it's read properly by a
-        // single other one.
-        Message* prev = mHead.exchange(aMessage, std::memory_order_acq_rel);
-        prev->mNext.store(aMessage, std::memory_order_release);
+  // Allocates a new node, copy aInput to the new memory location, and pushes
+  // it to the end of the list.
+  void Push(const char aInput[MESSAGE_LENGTH]) {
+    // Create a new message, and copy the messages passed on argument to the
+    // new memory location. We are not touching the queue right now. The
+    // successor for this new node is set to be nullptr.
+    Message* msg = new Message();
+    strncpy(msg->data, aInput, MESSAGE_LENGTH);
+
+    Push(msg);
+  }
+
+  // Copy the content of the first message of the queue to aOutput, and
+  // frees the message. Returns true if there was a message, in which case
+  // `aOutput` contains a valid value. If the queue was empty, returns false,
+  // in which case `aOutput` is left untouched.
+  bool Pop(char aOutput[MESSAGE_LENGTH]) {
+    // Similarly, in this paragraph, the two following lines are called A
+    // and B, and threads are called thread i, i-1, etc. in order of
+    // execution of line A.
+    // On line A, the first element of the queue is acquired. It is simply a
+    // sentinel node.
+    // On line B, we acquire the node that has the data we want. If B is
+    // null, then only the sentinel node was present in the queue, we can
+    // safely return false.
+    // mTail can be loaded with relaxed ordering, since it's not written nor
+    // read by any other thread (this queue is single consumer).
+    // mNext can be written to by one of the producer, so it's necessary to
+    // ensure those writes are seen, hence the stricter ordering.
+    Message* tail = mTail.load(std::memory_order_relaxed);
+    Message* next = tail->mNext.load(std::memory_order_acquire);
+
+    if (next == nullptr) {
+      return false;
     }
 
-    // Allocates a new node, copy aInput to the new memory location, and pushes
-    // it to the end of the list.
-    void
-    Push(const char aInput[MESSAGE_LENGTH])
-    {
-        // Create a new message, and copy the messages passed on argument to the
-        // new memory location. We are not touching the queue right now. The
-        // successor for this new node is set to be nullptr.
-        Message* msg = new Message();
-        strncpy(msg->data, aInput, MESSAGE_LENGTH);
-
-        Push(msg);
-    }
+    strncpy(aOutput, next->data, MESSAGE_LENGTH);
 
-    // Copy the content of the first message of the queue to aOutput, and
-    // frees the message. Returns true if there was a message, in which case
-    // `aOutput` contains a valid value. If the queue was empty, returns false,
-    // in which case `aOutput` is left untouched.
-    bool
-    Pop(char aOutput[MESSAGE_LENGTH])
-    {
-        // Similarly, in this paragraph, the two following lines are called A
-        // and B, and threads are called thread i, i-1, etc. in order of
-        // execution of line A.
-        // On line A, the first element of the queue is acquired. It is simply a
-        // sentinel node.
-        // On line B, we acquire the node that has the data we want. If B is
-        // null, then only the sentinel node was present in the queue, we can
-        // safely return false.
-        // mTail can be loaded with relaxed ordering, since it's not written nor
-        // read by any other thread (this queue is single consumer).
-        // mNext can be written to by one of the producer, so it's necessary to
-        // ensure those writes are seen, hence the stricter ordering.
-        Message* tail = mTail.load(std::memory_order_relaxed);
-        Message* next = tail->mNext.load(std::memory_order_acquire);
+    // Simply shift the queue one node further, so that the sentinel node is
+    // now pointing to the correct most ancient node. It contains stale data,
+    // but this data will never be read again.
+    // It's only necessary to ensure the previous load on this thread is not
+    // reordered past this line, so release ordering is sufficient here.
+    mTail.store(next, std::memory_order_release);
+
+    // This thread is now the only thing that points to `tail`, it can be
+    // safely deleted.
+    delete tail;
+
+    return true;
+  }
 
-        if (next == nullptr) {
-            return false;
-        }
-
-        strncpy(aOutput, next->data, MESSAGE_LENGTH);
+ private:
+  // An atomic pointer to the most recent message in the queue.
+  std::atomic<Message*> mHead;
+  // An atomic pointer to a sentinel node, that points to the oldest message
+  // in the queue.
+  std::atomic<Message*> mTail;
 
-        // Simply shift the queue one node further, so that the sentinel node is
-        // now pointing to the correct most ancient node. It contains stale data,
-        // but this data will never be read again.
-        // It's only necessary to ensure the previous load on this thread is not
-        // reordered past this line, so release ordering is sufficient here.
-        mTail.store(next, std::memory_order_release);
-
-        // This thread is now the only thing that points to `tail`, it can be
-        // safely deleted.
-        delete tail;
-
-        return true;
-    }
+  MPSCQueue(const MPSCQueue&) = delete;
+  void operator=(const MPSCQueue&) = delete;
 
-private:
-    // An atomic pointer to the most recent message in the queue.
-    std::atomic<Message*> mHead;
-    // An atomic pointer to a sentinel node, that points to the oldest message
-    // in the queue.
-    std::atomic<Message*> mTail;
+ public:
+  // The goal here is to make it easy on the allocator. We pack a pointer in the
+  // message struct, and we still want to do power of two allocations to
+  // minimize allocator slop. The allocation size are going to be constant, so
+  // the allocation is probably going to hit the thread local cache in jemalloc,
+  // making it cheap and, more importantly, lock-free enough.
+  static const size_t MESSAGE_PADDING = sizeof(Message::mNext);
 
-    MPSCQueue(const MPSCQueue&) = delete;
-    void operator=(const MPSCQueue&) = delete;
-public:
-    // The goal here is to make it easy on the allocator. We pack a pointer in the
-    // message struct, and we still want to do power of two allocations to
-    // minimize allocator slop. The allocation size are going to be constant, so
-    // the allocation is probably going to hit the thread local cache in jemalloc,
-    // making it cheap and, more importantly, lock-free enough.
-    static const size_t MESSAGE_PADDING = sizeof(Message::mNext);
-private:
-    static_assert(IsPowerOfTwo(MESSAGE_LENGTH + MESSAGE_PADDING),
-                  "MPSCQueue internal allocations must have a size that is a"
-                  "power of two ");
+ private:
+  static_assert(IsPowerOfTwo(MESSAGE_LENGTH + MESSAGE_PADDING),
+                "MPSCQueue internal allocations must have a size that is a"
+                "power of two ");
 };
-} // end namespace detail
+}  // end namespace detail
 
 // This class implements a lock-free asynchronous logger, that outputs to
 // MOZ_LOG.
 // Any thread can use this logger without external synchronization and without
 // being blocked. This log is suitable for use in real-time audio threads.
 // Log formatting is best done externally, this class implements the output
 // mechanism only.
 // This class uses a thread internally, and must be started and stopped
 // manually.
 // If logging is disabled, all the calls are no-op.
-class AsyncLogger
-{
-public:
-  static const uint32_t MAX_MESSAGE_LENGTH = 512 - detail::MPSCQueue<sizeof(void*)>::MESSAGE_PADDING;
+class AsyncLogger {
+ public:
+  static const uint32_t MAX_MESSAGE_LENGTH =
+      512 - detail::MPSCQueue<sizeof(void*)>::MESSAGE_PADDING;
 
   // aLogModuleName is the name of the MOZ_LOG module.
   explicit AsyncLogger(const char* aLogModuleName)
-  : mThread(nullptr)
-  , mLogModule(aLogModuleName)
-  , mRunning(false)
-  { }
+      : mThread(nullptr), mLogModule(aLogModuleName), mRunning(false) {}
 
-  ~AsyncLogger()
-  {
+  ~AsyncLogger() {
     if (Enabled()) {
       Stop();
     }
   }
 
-  void Start()
-  {
+  void Start() {
     MOZ_ASSERT(!mRunning, "Double calls to AsyncLogger::Start");
     if (Enabled()) {
       mRunning = true;
       Run();
     }
   }
 
-  void Stop()
-  {
+  void Stop() {
     if (Enabled()) {
       if (mRunning) {
         mRunning = false;
         mThread->join();
       }
     } else {
       MOZ_ASSERT(!mRunning && !mThread);
     }
   }
 
-  void Log(const char* format, ...) MOZ_FORMAT_PRINTF(2,3)
-  {
+  void Log(const char* format, ...) MOZ_FORMAT_PRINTF(2, 3) {
     if (Enabled()) {
       auto* msg = new detail::MPSCQueue<MAX_MESSAGE_LENGTH>::Message();
       va_list args;
       va_start(args, format);
       VsprintfLiteral(msg->data, format, args);
       va_end(args);
       mMessageQueue.Push(msg);
     }
   }
 
-  bool Enabled()
-  {
+  bool Enabled() {
     return MOZ_LOG_TEST(mLogModule, mozilla::LogLevel::Verbose);
   }
 
-private:
-  void Run()
-  {
+ private:
+  void Run() {
     MOZ_ASSERT(Enabled());
     mThread.reset(new std::thread([this]() {
       while (mRunning) {
         char message[MAX_MESSAGE_LENGTH];
         while (mMessageQueue.Pop(message) && mRunning) {
           MOZ_LOG(mLogModule, mozilla::LogLevel::Verbose, ("%s", message));
         }
         Sleep();
@@ -259,11 +240,11 @@ private:
   void Sleep() { std::this_thread::sleep_for(std::chrono::milliseconds(10)); }
 
   std::unique_ptr<std::thread> mThread;
   mozilla::LazyLogModule mLogModule;
   detail::MPSCQueue<MAX_MESSAGE_LENGTH> mMessageQueue;
   std::atomic<bool> mRunning;
 };
 
-} // end namespace mozilla
+}  // end namespace mozilla
 
-#endif // mozilla_dom_AsyncLogger_h
+#endif  // mozilla_dom_AsyncLogger_h
--- a/dom/media/AudioBufferUtils.h
+++ b/dom/media/AudioBufferUtils.h
@@ -24,39 +24,34 @@ static inline uint32_t SamplesToFrames(u
   return aSamples / aChannels;
 }
 
 /**
  * Class that gets a buffer pointer from an audio callback and provides a safe
  * interface to manipulate this buffer, and to ensure we are not missing frames
  * by the end of the callback.
  */
-template<typename T>
-class AudioCallbackBufferWrapper
-{
-public:
+template <typename T>
+class AudioCallbackBufferWrapper {
+ public:
   AudioCallbackBufferWrapper()
-    : mBuffer(nullptr)
-    , mSamples(0)
-    , mSampleWriteOffset(1)
-    , mChannels(0)
-  {}
+      : mBuffer(nullptr), mSamples(0), mSampleWriteOffset(1), mChannels(0) {}
 
   explicit AudioCallbackBufferWrapper(uint32_t aChannels)
-    : mBuffer(nullptr)
-    , mSamples(0)
-    , mSampleWriteOffset(1)
-    , mChannels(aChannels)
+      : mBuffer(nullptr),
+        mSamples(0),
+        mSampleWriteOffset(1),
+        mChannels(aChannels)
 
   {
     MOZ_ASSERT(aChannels);
   }
 
-  AudioCallbackBufferWrapper& operator=(const AudioCallbackBufferWrapper& aOther)
-  {
+  AudioCallbackBufferWrapper& operator=(
+      const AudioCallbackBufferWrapper& aOther) {
     MOZ_ASSERT(!aOther.mBuffer,
                "Don't use this ctor after AudioCallbackDriver::Init");
     MOZ_ASSERT(aOther.mSamples == 0,
                "Don't use this ctor after AudioCallbackDriver::Init");
     MOZ_ASSERT(aOther.mSampleWriteOffset == 1,
                "Don't use this ctor after AudioCallbackDriver::Init");
     MOZ_ASSERT(aOther.mChannels != 0);
 
@@ -68,33 +63,32 @@ public:
     return *this;
   }
 
   /**
    * Set the buffer in this wrapper. This is to be called at the beginning of
    * the callback.
    */
   void SetBuffer(T* aBuffer, uint32_t aFrames) {
-    MOZ_ASSERT(!mBuffer && !mSamples,
-        "SetBuffer called twice.");
+    MOZ_ASSERT(!mBuffer && !mSamples, "SetBuffer called twice.");
     mBuffer = aBuffer;
     mSamples = FramesToSamples(mChannels, aFrames);
     mSampleWriteOffset = 0;
   }
 
   /**
    * Write some frames to the internal buffer. Free space in the buffer should
    * be check prior to calling this.
    */
   void WriteFrames(T* aBuffer, uint32_t aFrames) {
     MOZ_ASSERT(aFrames <= Available(),
-        "Writing more that we can in the audio buffer.");
+               "Writing more that we can in the audio buffer.");
 
-    PodCopy(mBuffer + mSampleWriteOffset, aBuffer, FramesToSamples(mChannels,
-                                                                   aFrames));
+    PodCopy(mBuffer + mSampleWriteOffset, aBuffer,
+            FramesToSamples(mChannels, aFrames));
     mSampleWriteOffset += FramesToSamples(mChannels, aFrames);
   }
 
   /**
    * Number of frames that can be written to the buffer.
    */
   uint32_t Available() {
     return SamplesToFrames(mChannels, mSamples - mSampleWriteOffset);
@@ -102,36 +96,37 @@ public:
 
   /**
    * Check that the buffer is completly filled, and reset internal state so this
    * instance can be reused.
    */
   void BufferFilled() {
     // It's okay to have exactly zero samples here, it can happen we have an
     // audio callback driver because of a hint on MSG creation, but the
-    // AudioOutputStream has not been created yet, or if all the streams have finished
-    // but we're still running.
-    // Note: it's also ok if we had data in the scratch buffer - and we usually do - and
-    // all the streams were ended (no mixer callback occured).
+    // AudioOutputStream has not been created yet, or if all the streams have
+    // finished but we're still running. Note: it's also ok if we had data in
+    // the scratch buffer - and we usually do - and all the streams were ended
+    // (no mixer callback occured).
     // XXX Remove this warning, or find a way to avoid it if the mixer callback
     // isn't called.
     NS_WARNING_ASSERTION(
-      Available() == 0 || mSampleWriteOffset == 0,
-      "Audio Buffer is not full by the end of the callback.");
+        Available() == 0 || mSampleWriteOffset == 0,
+        "Audio Buffer is not full by the end of the callback.");
     // Make sure the data returned is always set and not random!
     if (Available()) {
-      PodZero(mBuffer + mSampleWriteOffset, FramesToSamples(mChannels, Available()));
+      PodZero(mBuffer + mSampleWriteOffset,
+              FramesToSamples(mChannels, Available()));
     }
     MOZ_ASSERT(mSamples, "Buffer not set.");
     mSamples = 0;
     mSampleWriteOffset = 0;
     mBuffer = nullptr;
   }
 
-private:
+ private:
   /* This is not an owned pointer, but the pointer passed to use via the audio
    * callback. */
   T* mBuffer;
   /* The number of samples of this audio buffer. */
   uint32_t mSamples;
   /* The position at which new samples should be written. We want to return to
    * the audio callback iff this is equal to mSamples. */
   uint32_t mSampleWriteOffset;
@@ -139,91 +134,86 @@ private:
 };
 
 /**
  * This is a class that interfaces with the AudioCallbackBufferWrapper, and is
  * responsible for storing the excess of data produced by the MediaStreamGraph
  * because of different rounding constraints, to be used the next time the audio
  * backend calls back.
  */
-template<typename T, uint32_t BLOCK_SIZE>
-class SpillBuffer
-{
-public:
-  SpillBuffer()
-    : mBuffer(nullptr)
-    , mPosition(0)
-    , mChannels(0)
-  {}
+template <typename T, uint32_t BLOCK_SIZE>
+class SpillBuffer {
+ public:
+  SpillBuffer() : mBuffer(nullptr), mPosition(0), mChannels(0) {}
 
   explicit SpillBuffer(uint32_t aChannels)
-  : mPosition(0)
-  , mChannels(aChannels)
-  {
+      : mPosition(0), mChannels(aChannels) {
     MOZ_ASSERT(aChannels);
     mBuffer = MakeUnique<T[]>(BLOCK_SIZE * mChannels);
     PodZero(mBuffer.get(), BLOCK_SIZE * mChannels);
   }
 
-  SpillBuffer& operator=(SpillBuffer& aOther)
-  {
+  SpillBuffer& operator=(SpillBuffer& aOther) {
     MOZ_ASSERT(aOther.mPosition == 0,
-        "Don't use this ctor after AudioCallbackDriver::Init");
+               "Don't use this ctor after AudioCallbackDriver::Init");
     MOZ_ASSERT(aOther.mChannels != 0);
     MOZ_ASSERT(aOther.mBuffer);
 
     mPosition = aOther.mPosition;
     mChannels = aOther.mChannels;
     mBuffer = std::move(aOther.mBuffer);
 
     return *this;
   }
 
-  SpillBuffer& operator=(SpillBuffer&& aOther)
-  {
+  SpillBuffer& operator=(SpillBuffer&& aOther) {
     return this->operator=(aOther);
   }
 
   /* Empty the spill buffer into the buffer of the audio callback. This returns
    * the number of frames written. */
   uint32_t Empty(AudioCallbackBufferWrapper<T>& aBuffer) {
-    uint32_t framesToWrite = std::min(aBuffer.Available(),
-                                      SamplesToFrames(mChannels, mPosition));
+    uint32_t framesToWrite =
+        std::min(aBuffer.Available(), SamplesToFrames(mChannels, mPosition));
 
     aBuffer.WriteFrames(mBuffer.get(), framesToWrite);
 
     mPosition -= FramesToSamples(mChannels, framesToWrite);
-    // If we didn't empty the spill buffer for some reason, shift the remaining data down
+    // If we didn't empty the spill buffer for some reason, shift the remaining
+    // data down
     if (mPosition > 0) {
-      MOZ_ASSERT(FramesToSamples(mChannels, framesToWrite) + mPosition <= BLOCK_SIZE * mChannels);
-      PodMove(mBuffer.get(), mBuffer.get() + FramesToSamples(mChannels, framesToWrite),
+      MOZ_ASSERT(FramesToSamples(mChannels, framesToWrite) + mPosition <=
+                 BLOCK_SIZE * mChannels);
+      PodMove(mBuffer.get(),
+              mBuffer.get() + FramesToSamples(mChannels, framesToWrite),
               mPosition);
     }
 
     return framesToWrite;
   }
   /* Fill the spill buffer from aInput, containing aFrames frames, return the
    * number of frames written to the spill buffer */
   uint32_t Fill(T* aInput, uint32_t aFrames) {
-    uint32_t framesToWrite = std::min(aFrames,
-                                      BLOCK_SIZE - SamplesToFrames(mChannels,
-                                                                   mPosition));
+    uint32_t framesToWrite =
+        std::min(aFrames, BLOCK_SIZE - SamplesToFrames(mChannels, mPosition));
 
-    MOZ_ASSERT(FramesToSamples(mChannels, framesToWrite) + mPosition <= BLOCK_SIZE * mChannels);
-    PodCopy(mBuffer.get() + mPosition, aInput, FramesToSamples(mChannels,
-                                                         framesToWrite));
+    MOZ_ASSERT(FramesToSamples(mChannels, framesToWrite) + mPosition <=
+               BLOCK_SIZE * mChannels);
+    PodCopy(mBuffer.get() + mPosition, aInput,
+            FramesToSamples(mChannels, framesToWrite));
 
     mPosition += FramesToSamples(mChannels, framesToWrite);
 
     return framesToWrite;
   }
-private:
+
+ private:
   /* The spilled data. */
   UniquePtr<T[]> mBuffer;
   /* The current write position, in samples, in the buffer when filling, or the
    * amount of buffer filled when emptying. */
   uint32_t mPosition;
   uint32_t mChannels;
 };
 
-} // namespace mozilla
+}  // namespace mozilla
 
-#endif // MOZILLA_SCRATCHBUFFER_H_
+#endif  // MOZILLA_SCRATCHBUFFER_H_
--- a/dom/media/AudioCaptureStream.cpp
+++ b/dom/media/AudioCaptureStream.cpp
@@ -19,75 +19,65 @@
 #include "webaudio/MediaStreamAudioDestinationNode.h"
 #include <algorithm>
 #include "DOMMediaStream.h"
 
 using namespace mozilla::layers;
 using namespace mozilla::dom;
 using namespace mozilla::gfx;
 
-namespace mozilla
-{
+namespace mozilla {
 
 // We are mixing to mono until PeerConnection can accept stereo
 static const uint32_t MONO = 1;
 
 AudioCaptureStream::AudioCaptureStream(TrackID aTrackId)
-  : ProcessedMediaStream()
-  , mTrackId(aTrackId)
-  , mStarted(false)
-  , mTrackCreated(false)
-{
+    : ProcessedMediaStream(),
+      mTrackId(aTrackId),
+      mStarted(false),
+      mTrackCreated(false) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_COUNT_CTOR(AudioCaptureStream);
   mMixer.AddCallback(this);
 }
 
-AudioCaptureStream::~AudioCaptureStream()
-{
+AudioCaptureStream::~AudioCaptureStream() {
   MOZ_COUNT_DTOR(AudioCaptureStream);
   mMixer.RemoveCallback(this);
 }
 
-void
-AudioCaptureStream::Start()
-{
+void AudioCaptureStream::Start() {
   class Message : public ControlMessage {
-  public:
+   public:
     explicit Message(AudioCaptureStream* aStream)
-      : ControlMessage(aStream), mStream(aStream) {}
+        : ControlMessage(aStream), mStream(aStream) {}
 
-    virtual void Run()
-    {
-      mStream->mStarted = true;
-    }
+    virtual void Run() { mStream->mStarted = true; }
 
-  protected:
+   protected:
     AudioCaptureStream* mStream;
   };
   GraphImpl()->AppendMessage(MakeUnique<Message>(this));
 }
 
-void
-AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
-                                 uint32_t aFlags)
-{
+void AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
+                                      uint32_t aFlags) {
   if (!mStarted) {
     return;
   }
 
   uint32_t inputCount = mInputs.Length();
   StreamTracks::Track* track = EnsureTrack(mTrackId);
   // Notify the DOM everything is in order.
   if (!mTrackCreated) {
     for (uint32_t i = 0; i < mListeners.Length(); i++) {
       MediaStreamListener* l = mListeners[i];
       AudioSegment tmp;
-      l->NotifyQueuedTrackChanges(
-        Graph(), mTrackId, 0, TrackEventCommand::TRACK_EVENT_CREATED, tmp);
+      l->NotifyQueuedTrackChanges(Graph(), mTrackId, 0,
+                                  TrackEventCommand::TRACK_EVENT_CREATED, tmp);
       l->NotifyFinishedTrackCreation(Graph());
     }
     mTrackCreated = true;
   }
 
   if (IsFinishedOnGraphThread()) {
     return;
   }
@@ -132,40 +122,40 @@ AudioCaptureStream::ProcessInput(GraphTi
     // This calls MixerCallback below
     mMixer.FinishMixing();
   }
 
   // Regardless of the status of the input tracks, we go foward.
   mTracks.AdvanceKnownTracksTime(GraphTimeToStreamTimeWithBlocking((aTo)));
 }
 
-void
-AudioCaptureStream::MixerCallback(AudioDataValue* aMixedBuffer,
-                                  AudioSampleFormat aFormat, uint32_t aChannels,
-                                  uint32_t aFrames, uint32_t aSampleRate)
-{
+void AudioCaptureStream::MixerCallback(AudioDataValue* aMixedBuffer,
+                                       AudioSampleFormat aFormat,
+                                       uint32_t aChannels, uint32_t aFrames,
+                                       uint32_t aSampleRate) {
   AutoTArray<nsTArray<AudioDataValue>, MONO> output;
   AutoTArray<const AudioDataValue*, MONO> bufferPtrs;
   output.SetLength(MONO);
   bufferPtrs.SetLength(MONO);
 
   uint32_t written = 0;
   // We need to copy here, because the mixer will reuse the storage, we should
   // not hold onto it. Buffers are in planar format.
   for (uint32_t channel = 0; channel < aChannels; channel++) {
     AudioDataValue* out = output[channel].AppendElements(aFrames);
     PodCopy(out, aMixedBuffer + written, aFrames);
     bufferPtrs[channel] = out;
     written += aFrames;
   }
   AudioChunk chunk;
-  chunk.mBuffer = new mozilla::SharedChannelArrayBuffer<AudioDataValue>(&output);
+  chunk.mBuffer =
+      new mozilla::SharedChannelArrayBuffer<AudioDataValue>(&output);
   chunk.mDuration = aFrames;
   chunk.mBufferFormat = aFormat;
   chunk.mChannelData.SetLength(MONO);
   for (uint32_t channel = 0; channel < aChannels; channel++) {
     chunk.mChannelData[channel] = bufferPtrs[channel];
   }
 
   // Now we have mixed data, simply append it to out track.
   EnsureTrack(mTrackId)->Get<AudioSegment>()->AppendAndConsumeChunk(&chunk);
 }
-}
+}  // namespace mozilla
--- a/dom/media/AudioCaptureStream.h
+++ b/dom/media/AudioCaptureStream.h
@@ -6,40 +6,38 @@
 #ifndef MOZILLA_AUDIOCAPTURESTREAM_H_
 #define MOZILLA_AUDIOCAPTURESTREAM_H_
 
 #include "MediaStreamGraph.h"
 #include "AudioMixer.h"
 #include "StreamTracks.h"
 #include <algorithm>
 
-namespace mozilla
-{
+namespace mozilla {
 
 class AbstractThread;
 class DOMMediaStream;
 
 /**
  * See MediaStreamGraph::CreateAudioCaptureStream.
  */
 class AudioCaptureStream : public ProcessedMediaStream,
-                           public MixerCallbackReceiver
-{
-public:
+                           public MixerCallbackReceiver {
+ public:
   explicit AudioCaptureStream(TrackID aTrackId);
   virtual ~AudioCaptureStream();
 
   void Start();
 
   void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
 
-protected:
+ protected:
   void MixerCallback(AudioDataValue* aMixedBuffer, AudioSampleFormat aFormat,
                      uint32_t aChannels, uint32_t aFrames,
                      uint32_t aSampleRate) override;
   AudioMixer mMixer;
   TrackID mTrackId;
   bool mStarted;
   bool mTrackCreated;
 };
-}
+}  // namespace mozilla
 
 #endif /* MOZILLA_AUDIOCAPTURESTREAM_H_ */
--- a/dom/media/AudioChannelFormat.cpp
+++ b/dom/media/AudioChannelFormat.cpp
@@ -4,15 +4,13 @@
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioChannelFormat.h"
 
 #include <algorithm>
 
 namespace mozilla {
 
-uint32_t
-GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2)
-{
+uint32_t GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2) {
   return std::max(aChannels1, aChannels2);
 }
 
-} // namespace mozilla
+}  // namespace mozilla
--- a/dom/media/AudioChannelFormat.h
+++ b/dom/media/AudioChannelFormat.h
@@ -43,25 +43,24 @@ enum {
 const uint32_t CUSTOM_CHANNEL_LAYOUTS = 6;
 
 // This is defined by some Windows SDK header.
 #undef IGNORE
 
 const int IGNORE = CUSTOM_CHANNEL_LAYOUTS;
 const float IGNORE_F = 0.0f;
 
-const int gMixingMatrixIndexByChannels[CUSTOM_CHANNEL_LAYOUTS - 1] =
-  { 0, 5, 9, 12, 14 };
+const int gMixingMatrixIndexByChannels[CUSTOM_CHANNEL_LAYOUTS - 1] = {0, 5, 9,
+                                                                      12, 14};
 
 /**
  * Return a channel count whose channel layout includes all the channels from
  * aChannels1 and aChannels2.
  */
-uint32_t
-GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2);
+uint32_t GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2);
 
 /**
  * DownMixMatrix represents a conversion matrix efficiently by exploiting the
  * fact that each input channel contributes to at most one output channel,
  * except possibly for the C input channel in layouts that have one. Also,
  * every input channel is multiplied by the same coefficient for every output
  * channel it contributes to.
  */
@@ -72,160 +71,165 @@ struct DownMixMatrix {
   // after multiplying by mInputCoefficient[c].
   uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS];
   // If not IGNORE, then the C channel is copied to this output channel after
   // multiplying by its coefficient.
   uint8_t mCExtraDestination;
   float mInputCoefficient[CUSTOM_CHANNEL_LAYOUTS];
 };
 
-static const DownMixMatrix
-gDownMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] =
-{
-  // Downmixes to mono
-  { { 0, 0 }, IGNORE, { 0.5f, 0.5f } },
-  { { 0, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F } },
-  { { 0, 0, 0, 0 }, IGNORE, { 0.25f, 0.25f, 0.25f, 0.25f } },
-  { { 0, IGNORE, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F, IGNORE_F, IGNORE_F } },
-  { { 0, 0, 0, IGNORE, 0, 0 }, IGNORE, { SQRT_ONE_HALF, SQRT_ONE_HALF, 1.0f, IGNORE_F, 0.5f, 0.5f } },
-  // Downmixes to stereo
-  { { 0, 1, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F } },
-  { { 0, 1, 0, 1 }, IGNORE, { 0.5f, 0.5f, 0.5f, 0.5f } },
-  { { 0, 1, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
-  { { 0, 1, 0, IGNORE, 0, 1 }, 1, { 1.0f, 1.0f, SQRT_ONE_HALF, IGNORE_F, SQRT_ONE_HALF, SQRT_ONE_HALF } },
-  // Downmixes to 3-channel
-  { { 0, 1, 2, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F } },
-  { { 0, 1, 2, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F } },
-  { { 0, 1, 2, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
-  // Downmixes to quad
-  { { 0, 1, 2, 3, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } },
-  { { 0, 1, 0, IGNORE, 2, 3 }, 1, { 1.0f, 1.0f, SQRT_ONE_HALF, IGNORE_F, 1.0f, 1.0f } },
-  // Downmixes to 5-channel
-  { { 0, 1, 2, 3, 4, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } }
-};
+static const DownMixMatrix gDownMixMatrices[CUSTOM_CHANNEL_LAYOUTS *
+                                            (CUSTOM_CHANNEL_LAYOUTS - 1) /
+                                            2] = {
+    // Downmixes to mono
+    {{0, 0}, IGNORE, {0.5f, 0.5f}},
+    {{0, IGNORE, IGNORE}, IGNORE, {1.0f, IGNORE_F, IGNORE_F}},
+    {{0, 0, 0, 0}, IGNORE, {0.25f, 0.25f, 0.25f, 0.25f}},
+    {{0, IGNORE, IGNORE, IGNORE, IGNORE},
+     IGNORE,
+     {1.0f, IGNORE_F, IGNORE_F, IGNORE_F, IGNORE_F}},
+    {{0, 0, 0, IGNORE, 0, 0},
+     IGNORE,
+     {SQRT_ONE_HALF, SQRT_ONE_HALF, 1.0f, IGNORE_F, 0.5f, 0.5f}},
+    // Downmixes to stereo
+    {{0, 1, IGNORE}, IGNORE, {1.0f, 1.0f, IGNORE_F}},
+    {{0, 1, 0, 1}, IGNORE, {0.5f, 0.5f, 0.5f, 0.5f}},
+    {{0, 1, IGNORE, IGNORE, IGNORE},
+     IGNORE,
+     {1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F}},
+    {{0, 1, 0, IGNORE, 0, 1},
+     1,
+     {1.0f, 1.0f, SQRT_ONE_HALF, IGNORE_F, SQRT_ONE_HALF, SQRT_ONE_HALF}},
+    // Downmixes to 3-channel
+    {{0, 1, 2, IGNORE}, IGNORE, {1.0f, 1.0f, 1.0f, IGNORE_F}},
+    {{0, 1, 2, IGNORE, IGNORE}, IGNORE, {1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F}},
+    {{0, 1, 2, IGNORE, IGNORE, IGNORE},
+     IGNORE,
+     {1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F}},
+    // Downmixes to quad
+    {{0, 1, 2, 3, IGNORE}, IGNORE, {1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F}},
+    {{0, 1, 0, IGNORE, 2, 3},
+     1,
+     {1.0f, 1.0f, SQRT_ONE_HALF, IGNORE_F, 1.0f, 1.0f}},
+    // Downmixes to 5-channel
+    {{0, 1, 2, 3, 4, IGNORE},
+     IGNORE,
+     {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F}}};
 
 /**
  * Given an array of input channels, downmix to aOutputChannelCount, and copy
  * the results to the channel buffers in aOutputChannels.  Don't call this with
  * input count <= output count.
  */
-template<typename T>
+template <typename T>
 void AudioChannelsDownMix(const nsTArray<const T*>& aChannelArray,
-                          T** aOutputChannels,
-                          uint32_t aOutputChannelCount,
-                          uint32_t aDuration)
-{
+                          T** aOutputChannels, uint32_t aOutputChannelCount,
+                          uint32_t aDuration) {
   uint32_t inputChannelCount = aChannelArray.Length();
   const T* const* inputChannels = aChannelArray.Elements();
   NS_ASSERTION(inputChannelCount > aOutputChannelCount, "Nothing to do");
 
   if (inputChannelCount > 6) {
     // Just drop the unknown channels.
     for (uint32_t o = 0; o < aOutputChannelCount; ++o) {
       PodCopy(aOutputChannels[o], inputChannels[o], aDuration);
     }
     return;
   }
 
   // Ignore unknown channels, they're just dropped.
   inputChannelCount = std::min<uint32_t>(6, inputChannelCount);
 
-  const DownMixMatrix& m = gDownMixMatrices[
-    gMixingMatrixIndexByChannels[aOutputChannelCount - 1] +
-    inputChannelCount - aOutputChannelCount - 1];
+  const DownMixMatrix& m =
+      gDownMixMatrices[gMixingMatrixIndexByChannels[aOutputChannelCount - 1] +
+                       inputChannelCount - aOutputChannelCount - 1];
 
   // This is slow, but general. We can define custom code for special
   // cases later.
   for (uint32_t s = 0; s < aDuration; ++s) {
     // Reserve an extra junk channel at the end for the cases where we
     // want an input channel to contribute to nothing
     T outputChannels[CUSTOM_CHANNEL_LAYOUTS + 1] = {0};
     for (uint32_t c = 0; c < inputChannelCount; ++c) {
       outputChannels[m.mInputDestination[c]] +=
-        m.mInputCoefficient[c]*(static_cast<const T*>(inputChannels[c]))[s];
+          m.mInputCoefficient[c] * (static_cast<const T*>(inputChannels[c]))[s];
     }
     // Utilize the fact that in every layout, C is the third channel.
     if (m.mCExtraDestination != IGNORE) {
       outputChannels[m.mCExtraDestination] +=
-        m.mInputCoefficient[SURROUND_C]*(static_cast<const T*>(inputChannels[SURROUND_C]))[s];
+          m.mInputCoefficient[SURROUND_C] *
+          (static_cast<const T*>(inputChannels[SURROUND_C]))[s];
     }
 
     for (uint32_t c = 0; c < aOutputChannelCount; ++c) {
       aOutputChannels[c][s] = outputChannels[c];
     }
   }
 }
 
 /**
  * UpMixMatrix represents a conversion matrix by exploiting the fact that
  * each output channel comes from at most one input channel.
  */
 struct UpMixMatrix {
   uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS];
 };
 
-static const UpMixMatrix
-gUpMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] =
-{
-  // Upmixes from mono
-  { { 0, 0 } },
-  { { 0, IGNORE, IGNORE } },
-  { { 0, 0, IGNORE, IGNORE } },
-  { { 0, IGNORE, IGNORE, IGNORE, IGNORE } },
-  { { IGNORE, IGNORE, 0, IGNORE, IGNORE, IGNORE } },
-  // Upmixes from stereo
-  { { 0, 1, IGNORE } },
-  { { 0, 1, IGNORE, IGNORE } },
-  { { 0, 1, IGNORE, IGNORE, IGNORE } },
-  { { 0, 1, IGNORE, IGNORE, IGNORE, IGNORE } },
-  // Upmixes from 3-channel
-  { { 0, 1, 2, IGNORE } },
-  { { 0, 1, 2, IGNORE, IGNORE } },
-  { { 0, 1, 2, IGNORE, IGNORE, IGNORE } },
-  // Upmixes from quad
-  { { 0, 1, 2, 3, IGNORE } },
-  { { 0, 1, IGNORE, IGNORE, 2, 3 } },
-  // Upmixes from 5-channel
-  { { 0, 1, 2, 3, 4, IGNORE } }
-};
-
+static const UpMixMatrix gUpMixMatrices[CUSTOM_CHANNEL_LAYOUTS *
+                                        (CUSTOM_CHANNEL_LAYOUTS - 1) / 2] = {
+    // Upmixes from mono
+    {{0, 0}},
+    {{0, IGNORE, IGNORE}},
+    {{0, 0, IGNORE, IGNORE}},
+    {{0, IGNORE, IGNORE, IGNORE, IGNORE}},
+    {{IGNORE, IGNORE, 0, IGNORE, IGNORE, IGNORE}},
+    // Upmixes from stereo
+    {{0, 1, IGNORE}},
+    {{0, 1, IGNORE, IGNORE}},
+    {{0, 1, IGNORE, IGNORE, IGNORE}},
+    {{0, 1, IGNORE, IGNORE, IGNORE, IGNORE}},
+    // Upmixes from 3-channel
+    {{0, 1, 2, IGNORE}},
+    {{0, 1, 2, IGNORE, IGNORE}},
+    {{0, 1, 2, IGNORE, IGNORE, IGNORE}},
+    // Upmixes from quad
+    {{0, 1, 2, 3, IGNORE}},
+    {{0, 1, IGNORE, IGNORE, 2, 3}},
+    // Upmixes from 5-channel
+    {{0, 1, 2, 3, 4, IGNORE}}};
 
 /**
  * Given an array of input channel data, and an output channel count,
  * replaces the array with an array of upmixed channels.
  * This shuffles the array and may set some channel buffers to aZeroChannel.
  * Don't call this with input count >= output count.
  * This may return *more* channels than requested. In that case, downmixing
  * is required to to get to aOutputChannelCount. (This is how we handle
  * odd cases like 3 -> 4 upmixing.)
  * If aChannelArray.Length() was the input to one of a series of
  * GetAudioChannelsSuperset calls resulting in aOutputChannelCount,
  * no downmixing will be required.
  */
-template<typename T>
-void
-AudioChannelsUpMix(nsTArray<const T*>* aChannelArray,
-                   uint32_t aOutputChannelCount,
-                   const T* aZeroChannel)
-{
+template <typename T>
+void AudioChannelsUpMix(nsTArray<const T*>* aChannelArray,
+                        uint32_t aOutputChannelCount, const T* aZeroChannel) {
   uint32_t inputChannelCount = aChannelArray->Length();
   uint32_t outputChannelCount =
-    GetAudioChannelsSuperset(aOutputChannelCount, inputChannelCount);
-  NS_ASSERTION(outputChannelCount > inputChannelCount,
-               "No up-mix needed");
+      GetAudioChannelsSuperset(aOutputChannelCount, inputChannelCount);
+  NS_ASSERTION(outputChannelCount > inputChannelCount, "No up-mix needed");
   MOZ_ASSERT(inputChannelCount > 0, "Bad number of channels");
   MOZ_ASSERT(outputChannelCount > 0, "Bad number of channels");
 
   aChannelArray->SetLength(outputChannelCount);
 
   if (inputChannelCount < CUSTOM_CHANNEL_LAYOUTS &&
       outputChannelCount <= CUSTOM_CHANNEL_LAYOUTS) {
-    const UpMixMatrix& m = gUpMixMatrices[
-      gMixingMatrixIndexByChannels[inputChannelCount - 1] +
-      outputChannelCount - inputChannelCount - 1];
+    const UpMixMatrix& m =
+        gUpMixMatrices[gMixingMatrixIndexByChannels[inputChannelCount - 1] +
+                       outputChannelCount - inputChannelCount - 1];
 
     const T* outputChannels[CUSTOM_CHANNEL_LAYOUTS];
 
     for (uint32_t i = 0; i < outputChannelCount; ++i) {
       uint8_t channelIndex = m.mInputDestination[i];
       if (channelIndex == IGNORE) {
         outputChannels[i] = aZeroChannel;
       } else {
@@ -238,11 +242,11 @@ AudioChannelsUpMix(nsTArray<const T*>* a
     return;
   }
 
   for (uint32_t i = inputChannelCount; i < outputChannelCount; ++i) {
     aChannelArray->ElementAt(i) = aZeroChannel;
   }
 }
 
-} // namespace mozilla
+}  // namespace mozilla
 
 #endif /* MOZILLA_AUDIOCHANNELFORMAT_H_ */
--- a/dom/media/AudioCompactor.cpp
+++ b/dom/media/AudioCompactor.cpp
@@ -1,62 +1,54 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 #include "AudioCompactor.h"
 #if defined(MOZ_MEMORY)
-# include "mozmemory.h"
+#include "mozmemory.h"
 #endif
 
 namespace mozilla {
 
-static size_t
-MallocGoodSize(size_t aSize)
-{
-# if defined(MOZ_MEMORY)
+static size_t MallocGoodSize(size_t aSize) {
+#if defined(MOZ_MEMORY)
   return malloc_good_size(aSize);
-# else
+#else
   return aSize;
-# endif
+#endif
 }
 
-static size_t
-TooMuchSlop(size_t aSize, size_t aAllocSize, size_t aMaxSlop)
-{
+static size_t TooMuchSlop(size_t aSize, size_t aAllocSize, size_t aMaxSlop) {
   // If the allocated size is less then our target size, then we
   // are chunking.  This means it will be completely filled with
   // zero slop.
   size_t slop = (aAllocSize > aSize) ? (aAllocSize - aSize) : 0;
   return slop > aMaxSlop;
 }
 
-uint32_t
-AudioCompactor::GetChunkSamples(uint32_t aFrames, uint32_t aChannels,
-                                size_t aMaxSlop)
-{
+uint32_t AudioCompactor::GetChunkSamples(uint32_t aFrames, uint32_t aChannels,
+                                         size_t aMaxSlop) {
   size_t size = AudioDataSize(aFrames, aChannels);
   size_t chunkSize = MallocGoodSize(size);
 
   // Reduce the chunk size until we meet our slop goal or the chunk
   // approaches an unreasonably small size.
   while (chunkSize > 64 && TooMuchSlop(size, chunkSize, aMaxSlop)) {
     chunkSize = MallocGoodSize(chunkSize / 2);
   }
 
   // Calculate the number of samples based on expected malloc size
   // in order to allow as many frames as possible to be packed.
   return chunkSize / sizeof(AudioDataValue);
 }
 
-uint32_t
-AudioCompactor::NativeCopy::operator()(AudioDataValue *aBuffer,
-                                       uint32_t aSamples)
-{
+uint32_t AudioCompactor::NativeCopy::operator()(AudioDataValue *aBuffer,
+                                                uint32_t aSamples) {
   NS_ASSERTION(aBuffer, "cannot copy to null buffer pointer");
   NS_ASSERTION(aSamples, "cannot copy zero values");
 
   size_t bufferBytes = aSamples * sizeof(AudioDataValue);
   size_t maxBytes = std::min(bufferBytes, mSourceBytes - mNextByte);
   uint32_t frames = maxBytes / BytesPerFrame(mChannels);
   size_t bytes = frames * BytesPerFrame(mChannels);
 
@@ -65,9 +57,9 @@ AudioCompactor::NativeCopy::operator()(A
   NS_ASSERTION(bytes <= bufferBytes, "tried to copy beyond destination buffer");
 
   memcpy(aBuffer, mSource + mNextByte, bytes);
 
   mNextByte += bytes;
   return frames;
 }
 
-} // namespace mozilla
+}  // namespace mozilla
--- a/dom/media/AudioCompactor.h
+++ b/dom/media/AudioCompactor.h
@@ -7,22 +7,19 @@
 #define AudioCompactor_h
 
 #include "MediaQueue.h"
 #include "MediaData.h"
 #include "VideoUtils.h"
 
 namespace mozilla {
 
-class AudioCompactor
-{
-public:
-  explicit AudioCompactor(MediaQueue<AudioData>& aQueue)
-    : mQueue(aQueue)
-  {
+class AudioCompactor {
+ public:
+  explicit AudioCompactor(MediaQueue<AudioData>& aQueue) : mQueue(aQueue) {
     // Determine padding size used by AlignedBuffer.
     size_t paddedSize = AlignedAudioBuffer::AlignmentPaddingSize();
     mSamplesPadding = paddedSize / sizeof(AudioDataValue);
     if (mSamplesPadding * sizeof(AudioDataValue) < paddedSize) {
       // Round up.
       mSamplesPadding++;
     }
   }
@@ -34,20 +31,19 @@ public:
   //
   //   uint32_t operator()(AudioDataValue *aBuffer, uint32_t aSamples);
   //
   // The functor must copy as many complete frames as possible to the provided
   // buffer given its length (in AudioDataValue elements).  The number of frames
   // copied must be returned.  This copy functor must support being called
   // multiple times in order to copy the audio data fully.  The copy functor
   // must copy full frames as partial frames will be ignored.
-  template<typename CopyFunc>
+  template <typename CopyFunc>
   bool Push(int64_t aOffset, int64_t aTime, int32_t aSampleRate,
-            uint32_t aFrames, uint32_t aChannels, CopyFunc aCopyFunc)
-  {
+            uint32_t aFrames, uint32_t aChannels, CopyFunc aCopyFunc) {
     auto time = media::TimeUnit::FromMicroseconds(aTime);
 
     // If we are losing more than a reasonable amount to padding, try to chunk
     // the data.
     size_t maxSlop = AudioDataSize(aFrames, aChannels) / MAX_SLOP_DIVISOR;
 
     while (aFrames > 0) {
       uint32_t samples = GetChunkSamples(aFrames, aChannels, maxSlop);
@@ -65,76 +61,66 @@ public:
       NS_ASSERTION(framesCopied <= aFrames, "functor copied too many frames");
       buffer.SetLength(size_t(framesCopied) * aChannels);
 
       auto duration = FramesToTimeUnit(framesCopied, aSampleRate);
       if (!duration.IsValid()) {
         return false;
       }
 
-      mQueue.Push(new AudioData(aOffset,
-                                time,
-                                duration,
-                                framesCopied,
-                                std::move(buffer),
-                                aChannels,
-                                aSampleRate));
+      mQueue.Push(new AudioData(aOffset, time, duration, framesCopied,
+                                std::move(buffer), aChannels, aSampleRate));
 
       // Remove the frames we just pushed into the queue and loop if there is
       // more to be done.
       time += duration;
       aFrames -= framesCopied;
 
       // NOTE: No need to update aOffset as its only an approximation anyway.
     }
 
     return true;
   }
 
   // Copy functor suitable for copying audio samples already in the
   // AudioDataValue format/layout expected by AudioStream on this platform.
-  class NativeCopy
-  {
-  public:
-    NativeCopy(const uint8_t* aSource, size_t aSourceBytes,
-               uint32_t aChannels)
-      : mSource(aSource)
-      , mSourceBytes(aSourceBytes)
-      , mChannels(aChannels)
-      , mNextByte(0)
-    { }
+  class NativeCopy {
+   public:
+    NativeCopy(const uint8_t* aSource, size_t aSourceBytes, uint32_t aChannels)
+        : mSource(aSource),
+          mSourceBytes(aSourceBytes),
+          mChannels(aChannels),
+          mNextByte(0) {}
 
-    uint32_t operator()(AudioDataValue *aBuffer, uint32_t aSamples);
+    uint32_t operator()(AudioDataValue* aBuffer, uint32_t aSamples);
 
-  private:
+   private:
     const uint8_t* const mSource;
     const size_t mSourceBytes;
     const uint32_t mChannels;
     size_t mNextByte;
   };
 
   // Allow 12.5% slop before chunking kicks in.  Public so that the gtest can
   // access it.
   static const size_t MAX_SLOP_DIVISOR = 8;
 
-private:
+ private:
   // Compute the number of AudioDataValue samples that will be fit the most
   // frames while keeping heap allocation slop less than the given threshold.
-  static uint32_t
-  GetChunkSamples(uint32_t aFrames, uint32_t aChannels, size_t aMaxSlop);
+  static uint32_t GetChunkSamples(uint32_t aFrames, uint32_t aChannels,
+                                  size_t aMaxSlop);
 
-  static size_t BytesPerFrame(uint32_t aChannels)
-  {
+  static size_t BytesPerFrame(uint32_t aChannels) {
     return sizeof(AudioDataValue) * aChannels;
   }
 
-  static size_t AudioDataSize(uint32_t aFrames, uint32_t aChannels)
-  {
+  static size_t AudioDataSize(uint32_t aFrames, uint32_t aChannels) {
     return aFrames * BytesPerFrame(aChannels);
   }
 
-  MediaQueue<AudioData> &mQueue;
+  MediaQueue<AudioData>& mQueue;
   size_t mSamplesPadding;
 };
 
-} // namespace mozilla
+}  // namespace mozilla
 
-#endif // AudioCompactor_h
+#endif  // AudioCompactor_h
--- a/dom/media/AudioConfig.cpp
+++ b/dom/media/AudioConfig.cpp
@@ -31,30 +31,26 @@ typedef AudioConfig::ChannelLayout Chann
  2F2            L   R   LS   RS
  2F2-LFE        L   R   LFE  LS   RS
  3F2            L   R   C    LS   RS
  3F2-LFE        L   R   C    LFE  LS   RS
  3F3R-LFE       L   R   C    LFE  BC   LS   RS
  3F4-LFE        L   R   C    LFE  Rls  Rrs  LS   RS
 */
 
-void
-AudioConfig::ChannelLayout::UpdateChannelMap()
-{
+void AudioConfig::ChannelLayout::UpdateChannelMap() {
   mValid = mChannels.Length() <= MAX_CHANNELS;
   mChannelMap = UNKNOWN_MAP;
   if (mValid) {
     mChannelMap = Map();
     mValid = mChannelMap > 0;
   }
 }
 
-auto
-AudioConfig::ChannelLayout::Map() const -> ChannelMap
-{
+auto AudioConfig::ChannelLayout::Map() const -> ChannelMap {
   if (mChannelMap != UNKNOWN_MAP) {
     return mChannelMap;
   }
   if (mChannels.Length() > MAX_CHANNELS) {
     return UNKNOWN_MAP;
   }
   ChannelMap map = UNKNOWN_MAP;
   for (size_t i = 0; i < mChannels.Length(); i++) {
@@ -67,164 +63,148 @@ AudioConfig::ChannelLayout::Map() const 
       return UNKNOWN_MAP;
     }
     map |= mask;
   }
   return map;
 }
 
 const AudioConfig::Channel*
-AudioConfig::ChannelLayout::DefaultLayoutForChannels(uint32_t aChannels) const
-{
+AudioConfig::ChannelLayout::DefaultLayoutForChannels(uint32_t aChannels) const {
   switch (aChannels) {
-    case 1: // MONO
+    case 1:  // MONO
     {
-      static const Channel config[] = { CHANNEL_FRONT_CENTER };
+      static const Channel config[] = {CHANNEL_FRONT_CENTER};
       return config;
     }
-    case 2: // STEREO
+    case 2:  // STEREO
     {
-      static const Channel config[] = { CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT };
+      static const Channel config[] = {CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT};
       return config;
     }
-    case 3: // 3F
+    case 3:  // 3F
     {
-      static const Channel config[] = { CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_FRONT_CENTER };
+      static const Channel config[] = {CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT,
+                                       CHANNEL_FRONT_CENTER};
+      return config;
+    }
+    case 4:  // QUAD
+    {
+      static const Channel config[] = {CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT,
+                                       CHANNEL_BACK_LEFT, CHANNEL_BACK_RIGHT};
       return config;
     }
-    case 4: // QUAD
+    case 5:  // 3F2
     {
-      static const Channel config[] = { CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_BACK_LEFT, CHANNEL_BACK_RIGHT };
+      static const Channel config[] = {CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT,
+                                       CHANNEL_FRONT_CENTER, CHANNEL_SIDE_LEFT,
+                                       CHANNEL_SIDE_RIGHT};
       return config;
     }
-    case 5: // 3F2
+    case 6:  // 3F2-LFE
     {
-      static const Channel config[] = { CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_FRONT_CENTER, CHANNEL_SIDE_LEFT, CHANNEL_SIDE_RIGHT };
+      static const Channel config[] = {
+          CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_FRONT_CENTER,
+          CHANNEL_LFE,        CHANNEL_SIDE_LEFT,   CHANNEL_SIDE_RIGHT};
       return config;
     }
-    case 6: // 3F2-LFE
+    case 7:  // 3F3R-LFE
     {
-      static const Channel config[] = { CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_FRONT_CENTER, CHANNEL_LFE, CHANNEL_SIDE_LEFT, CHANNEL_SIDE_RIGHT };
+      static const Channel config[] = {
+          CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_FRONT_CENTER,
+          CHANNEL_LFE,        CHANNEL_BACK_CENTER, CHANNEL_SIDE_LEFT,
+          CHANNEL_SIDE_RIGHT};
       return config;
     }
-    case 7: // 3F3R-LFE
+    case 8:  // 3F4-LFE
     {
-      static const Channel config[] = { CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_FRONT_CENTER, CHANNEL_LFE, CHANNEL_BACK_CENTER, CHANNEL_SIDE_LEFT, CHANNEL_SIDE_RIGHT };
-      return config;
-    }
-    case 8: // 3F4-LFE
-    {
-      static const Channel config[] = { CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_FRONT_CENTER, CHANNEL_LFE, CHANNEL_BACK_LEFT, CHANNEL_BACK_RIGHT, CHANNEL_SIDE_LEFT, CHANNEL_SIDE_RIGHT };
+      static const Channel config[] = {
+          CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_FRONT_CENTER,
+          CHANNEL_LFE,        CHANNEL_BACK_LEFT,   CHANNEL_BACK_RIGHT,
+          CHANNEL_SIDE_LEFT,  CHANNEL_SIDE_RIGHT};
       return config;
     }
     default:
       return nullptr;
   }
 }
 
 /* static */ AudioConfig::ChannelLayout
-AudioConfig::ChannelLayout::SMPTEDefault(
-  const ChannelLayout& aChannelLayout)
-{
+AudioConfig::ChannelLayout::SMPTEDefault(const ChannelLayout& aChannelLayout) {
   if (!aChannelLayout.IsValid()) {
     return aChannelLayout;
   }
   return SMPTEDefault(aChannelLayout.Map());
 }
 
-/* static */ ChannelLayout
-AudioConfig::ChannelLayout::SMPTEDefault(ChannelMap aMap)
-{
+/* static */ ChannelLayout AudioConfig::ChannelLayout::SMPTEDefault(
+    ChannelMap aMap) {
   // First handle the most common cases.
   switch (aMap) {
     case LMONO_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_CENTER };
+      return ChannelLayout{CHANNEL_FRONT_CENTER};
     case LSTEREO_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT };
+      return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT};
     case L3F_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,
-                            CHANNEL_FRONT_RIGHT,
-                            CHANNEL_FRONT_CENTER };
+      return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT,
+                           CHANNEL_FRONT_CENTER};
     case L3F_LFE_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,
-                            CHANNEL_FRONT_RIGHT,
-                            CHANNEL_FRONT_CENTER,
-                            CHANNEL_LFE };
+      return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT,
+                           CHANNEL_FRONT_CENTER, CHANNEL_LFE};
     case L2F1_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,
-                            CHANNEL_FRONT_RIGHT,
-                            CHANNEL_BACK_CENTER };
+      return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT,
+                           CHANNEL_BACK_CENTER};
     case L2F1_LFE_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,
-                            CHANNEL_FRONT_RIGHT,
-                            CHANNEL_LFE,
-                            CHANNEL_BACK_CENTER };
+      return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_LFE,
+                           CHANNEL_BACK_CENTER};
     case L3F1_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,
-                            CHANNEL_FRONT_RIGHT,
-                            CHANNEL_FRONT_CENTER,
-                            CHANNEL_BACK_CENTER };
+      return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT,
+                           CHANNEL_FRONT_CENTER, CHANNEL_BACK_CENTER};
     case L3F1_LFE_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,
-                            CHANNEL_FRONT_RIGHT,
-                            CHANNEL_FRONT_CENTER,
-                            CHANNEL_LFE,
-                            CHANNEL_BACK_CENTER };
+      return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT,
+                           CHANNEL_FRONT_CENTER, CHANNEL_LFE,
+                           CHANNEL_BACK_CENTER};
     case L2F2_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,
-                            CHANNEL_FRONT_RIGHT,
-                            CHANNEL_SIDE_LEFT,
-                            CHANNEL_SIDE_RIGHT };
+      return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT,
+                           CHANNEL_SIDE_LEFT, CHANNEL_SIDE_RIGHT};
     case L2F2_LFE_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,
-                            CHANNEL_FRONT_RIGHT,
-                            CHANNEL_LFE,
-                            CHANNEL_SIDE_LEFT,
-                            CHANNEL_SIDE_RIGHT };
+      return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_LFE,
+                           CHANNEL_SIDE_LEFT, CHANNEL_SIDE_RIGHT};
     case LQUAD_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,
-                            CHANNEL_FRONT_RIGHT,
-                            CHANNEL_BACK_LEFT,
-                            CHANNEL_BACK_RIGHT };
+      return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT,
+                           CHANNEL_BACK_LEFT, CHANNEL_BACK_RIGHT};
     case LQUAD_LFE_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,
-                            CHANNEL_FRONT_RIGHT,
-                            CHANNEL_LFE,
-                            CHANNEL_BACK_LEFT,
-                            CHANNEL_BACK_RIGHT };
+      return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT, CHANNEL_LFE,
+                           CHANNEL_BACK_LEFT, CHANNEL_BACK_RIGHT};
     case L3F2_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,
-                            CHANNEL_FRONT_RIGHT,
-                            CHANNEL_FRONT_CENTER,
-                            CHANNEL_SIDE_LEFT,
-                            CHANNEL_SIDE_RIGHT };
+      return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT,
+                           CHANNEL_FRONT_CENTER, CHANNEL_SIDE_LEFT,
+                           CHANNEL_SIDE_RIGHT};
     case L3F2_LFE_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,   CHANNEL_FRONT_RIGHT,
-                            CHANNEL_FRONT_CENTER, CHANNEL_LFE,
-                            CHANNEL_SIDE_LEFT,    CHANNEL_SIDE_RIGHT };
+      return ChannelLayout{CHANNEL_FRONT_LEFT,   CHANNEL_FRONT_RIGHT,
+                           CHANNEL_FRONT_CENTER, CHANNEL_LFE,
+                           CHANNEL_SIDE_LEFT,    CHANNEL_SIDE_RIGHT};
     case L3F2_BACK_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,
-                            CHANNEL_FRONT_RIGHT,
-                            CHANNEL_FRONT_CENTER,
-                            CHANNEL_BACK_LEFT,
-                            CHANNEL_BACK_RIGHT };
+      return ChannelLayout{CHANNEL_FRONT_LEFT, CHANNEL_FRONT_RIGHT,
+                           CHANNEL_FRONT_CENTER, CHANNEL_BACK_LEFT,
+                           CHANNEL_BACK_RIGHT};
     case L3F2_BACK_LFE_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,   CHANNEL_FRONT_RIGHT,
-                            CHANNEL_FRONT_CENTER, CHANNEL_LFE,
-                            CHANNEL_BACK_LEFT,    CHANNEL_BACK_RIGHT };
+      return ChannelLayout{CHANNEL_FRONT_LEFT,   CHANNEL_FRONT_RIGHT,
+                           CHANNEL_FRONT_CENTER, CHANNEL_LFE,
+                           CHANNEL_BACK_LEFT,    CHANNEL_BACK_RIGHT};
     case L3F3R_LFE_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,   CHANNEL_FRONT_RIGHT,
-                            CHANNEL_FRONT_CENTER, CHANNEL_LFE,
-                            CHANNEL_BACK_CENTER,  CHANNEL_SIDE_LEFT,
-                            CHANNEL_SIDE_RIGHT };
+      return ChannelLayout{CHANNEL_FRONT_LEFT,   CHANNEL_FRONT_RIGHT,
+                           CHANNEL_FRONT_CENTER, CHANNEL_LFE,
+                           CHANNEL_BACK_CENTER,  CHANNEL_SIDE_LEFT,
+                           CHANNEL_SIDE_RIGHT};
     case L3F4_LFE_MAP:
-      return ChannelLayout{ CHANNEL_FRONT_LEFT,   CHANNEL_FRONT_RIGHT,
-                            CHANNEL_FRONT_CENTER, CHANNEL_LFE,
-                            CHANNEL_BACK_LEFT,    CHANNEL_BACK_RIGHT,
-                            CHANNEL_SIDE_LEFT,    CHANNEL_SIDE_RIGHT };
+      return ChannelLayout{CHANNEL_FRONT_LEFT,   CHANNEL_FRONT_RIGHT,
+                           CHANNEL_FRONT_CENTER, CHANNEL_LFE,
+                           CHANNEL_BACK_LEFT,    CHANNEL_BACK_RIGHT,
+                           CHANNEL_SIDE_LEFT,    CHANNEL_SIDE_RIGHT};
     default:
       break;
   }
 
   static_assert(MAX_CHANNELS <= sizeof(ChannelMap) * 8,
                 "Must be able to fit channels on bit mask");
   AutoTArray<Channel, MAX_CHANNELS> layout;
   uint32_t channels = 0;
@@ -239,20 +219,18 @@ AudioConfig::ChannelLayout::SMPTEDefault
       layout.AppendElement(static_cast<Channel>(i));
     }
     aMap >>= 1;
     i++;
   }
   return ChannelLayout(channels, layout.Elements());
 }
 
-bool
-AudioConfig::ChannelLayout::MappingTable(const ChannelLayout& aOther,
-                                         nsTArray<uint8_t>* aMap) const
-{
+bool AudioConfig::ChannelLayout::MappingTable(const ChannelLayout& aOther,
+                                              nsTArray<uint8_t>* aMap) const {
   if (!IsValid() || !aOther.IsValid() || Map() != aOther.Map()) {
     if (aMap) {
       aMap->SetLength(0);
     }
     return false;
   }
   if (!aMap) {
     return true;
@@ -268,88 +246,98 @@ AudioConfig::ChannelLayout::MappingTable
   }
   return true;
 }
 
 /**
  * AudioConfig::ChannelConfig
  */
 
-/* static */ const char*
-AudioConfig::FormatToString(AudioConfig::SampleFormat aFormat)
-{
+/* static */ const char* AudioConfig::FormatToString(
+    AudioConfig::SampleFormat aFormat) {
   switch (aFormat) {
-    case FORMAT_U8:     return "unsigned 8 bit";
-    case FORMAT_S16:    return "signed 16 bit";
-    case FORMAT_S24:    return "signed 24 bit MSB";
-    case FORMAT_S24LSB: return "signed 24 bit LSB";
-    case FORMAT_S32:    return "signed 32 bit";
-    case FORMAT_FLT:    return "32 bit floating point";
-    case FORMAT_NONE:   return "none";
-    default:            return "unknown";
+    case FORMAT_U8:
+      return "unsigned 8 bit";
+    case FORMAT_S16:
+      return "signed 16 bit";
+    case FORMAT_S24:
+      return "signed 24 bit MSB";
+    case FORMAT_S24LSB:
+      return "signed 24 bit LSB";
+    case FORMAT_S32:
+      return "signed 32 bit";
+    case FORMAT_FLT:
+      return "32 bit floating point";
+    case FORMAT_NONE:
+      return "none";
+    default:
+      return "unknown";
   }
 }
-/* static */ uint32_t
-AudioConfig::SampleSize(AudioConfig::SampleFormat aFormat)
-{
+/* static */ uint32_t AudioConfig::SampleSize(
+    AudioConfig::SampleFormat aFormat) {
   switch (aFormat) {
-    case FORMAT_U8:     return 1;
-    case FORMAT_S16:    return 2;
-    case FORMAT_S24:    MOZ_FALLTHROUGH;
-    case FORMAT_S24LSB: MOZ_FALLTHROUGH;
-    case FORMAT_S32:    MOZ_FALLTHROUGH;
-    case FORMAT_FLT:    return 4;
+    case FORMAT_U8:
+      return 1;
+    case FORMAT_S16:
+      return 2;
+    case FORMAT_S24:
+      MOZ_FALLTHROUGH;
+    case FORMAT_S24LSB:
+      MOZ_FALLTHROUGH;
+    case FORMAT_S32:
+      MOZ_FALLTHROUGH;
+    case FORMAT_FLT:
+      return 4;
     case FORMAT_NONE:
-    default:            return 0;
+    default:
+      return 0;
   }
 }
 
-/* static */ uint32_t
-AudioConfig::FormatToBits(AudioConfig::SampleFormat aFormat)
-{
+/* static */ uint32_t AudioConfig::FormatToBits(
+    AudioConfig::SampleFormat aFormat) {
   switch (aFormat) {
-    case FORMAT_U8:     return 8;
-    case FORMAT_S16:    return 16;
-    case FORMAT_S24LSB: MOZ_FALLTHROUGH;
-    case FORMAT_S24:    return 24;
-    case FORMAT_S32:    MOZ_FALLTHROUGH;
-    case FORMAT_FLT:    return 32;
-    case FORMAT_NONE:   MOZ_FALLTHROUGH;
-    default:            return 0;
+    case FORMAT_U8:
+      return 8;
+    case FORMAT_S16:
+      return 16;
+    case FORMAT_S24LSB:
+      MOZ_FALLTHROUGH;
+    case FORMAT_S24:
+      return 24;
+    case FORMAT_S32:
+      MOZ_FALLTHROUGH;
+    case FORMAT_FLT:
+      return 32;
+    case FORMAT_NONE:
+      MOZ_FALLTHROUGH;
+    default:
+      return 0;
   }
 }
 
 AudioConfig::AudioConfig(const ChannelLayout& aChannelLayout, uint32_t aRate,
                          AudioConfig::SampleFormat aFormat, bool aInterleaved)
-  : mChannelLayout(aChannelLayout)
-  , mChannels(aChannelLayout.Count())
-  , mRate(aRate)
-  , mFormat(aFormat)
-  , mInterleaved(aInterleaved)
-{
-}
+    : mChannelLayout(aChannelLayout),
+      mChannels(aChannelLayout.Count()),
+      mRate(aRate),
+      mFormat(aFormat),
+      mInterleaved(aInterleaved) {}
 
 AudioConfig::AudioConfig(const ChannelLayout& aChannelLayout,
-                         uint32_t aChannels,
-                         uint32_t aRate,
-                         AudioConfig::SampleFormat aFormat,
-                         bool aInterleaved)
-  : mChannelLayout(aChannelLayout)
-  , mChannels(aChannels)
-  , mRate(aRate)
-  , mFormat(aFormat)
-  , mInterleaved(aInterleaved)
-{
-}
+                         uint32_t aChannels, uint32_t aRate,
+                         AudioConfig::SampleFormat aFormat, bool aInterleaved)
+    : mChannelLayout(aChannelLayout),
+      mChannels(aChannels),
+      mRate(aRate),
+      mFormat(aFormat),
+      mInterleaved(aInterleaved) {}
 
-AudioConfig::AudioConfig(uint32_t aChannels,
-                         uint32_t aRate,
-                         AudioConfig::SampleFormat aFormat,
-                         bool aInterleaved)
-  : mChannelLayout(aChannels)
-  , mChannels(aChannels)
-  , mRate(aRate)
-  , mFormat(aFormat)
-  , mInterleaved(aInterleaved)
-{
-}
+AudioConfig::AudioConfig(uint32_t aChannels, uint32_t aRate,
+                         AudioConfig::SampleFormat aFormat, bool aInterleaved)
+    : mChannelLayout(aChannels),
+      mChannels(aChannels),
+      mRate(aRate),
+      mFormat(aFormat),
+      mInterleaved(aInterleaved) {}
 
-} // namespace mozilla
+}  // namespace mozilla
--- a/dom/media/AudioConfig.h