--- a/dom/media/platforms/apple/AppleATDecoder.cpp
+++ b/dom/media/platforms/apple/AppleATDecoder.cpp
@@ -271,26 +271,35 @@ AppleATDecoder::DecodeSample(MediaRawDat
}
#ifdef LOG_SAMPLE_DECODE
LOG("pushed audio at time %lfs; duration %lfs\n",
(double)aSample->mTime / USECS_PER_S,
duration.ToSeconds());
#endif
- AlignedAudioBuffer data(outputData.Length());
- if (!data) {
+ AudioSampleBuffer data(outputData.Elements(), outputData.Length());
+ if (!data.Data()) {
return NS_ERROR_OUT_OF_MEMORY;
}
- PodCopy(data.get(), &outputData[0], outputData.Length());
+ if (mChannelLayout && !mAudioConverter) {
+ AudioConfig in(*mChannelLayout.get(), rate);
+ AudioConfig out(channels, rate);
+ mAudioConverter = MakeUnique<AudioConverter>(in, out);
+ }
+ if (mAudioConverter) {
+ MOZ_ASSERT(mAudioConverter->CanWorkInPlace());
+ mAudioConverter->Process(data);
+ }
+
RefPtr<AudioData> audio = new AudioData(aSample->mOffset,
aSample->mTime,
duration.ToMicroseconds(),
numFrames,
- Move(data),
+ data.Forget(),
channels,
rate);
mCallback->Output(audio);
return NS_OK;
}
nsresult
AppleATDecoder::GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
@@ -360,16 +369,146 @@ AppleATDecoder::GetInputAudioDescription
return NS_OK;
}
aDesc = formatList[itemIndex].mASBD;
return NS_OK;
}
+AudioConfig::Channel
+ConvertChannelLabel(AudioChannelLabel id)
+{
+ switch (id) {
+ case kAudioChannelLabel_Mono:
+ return AudioConfig::CHANNEL_MONO;
+ case kAudioChannelLabel_Left:
+ return AudioConfig::CHANNEL_LEFT;
+ case kAudioChannelLabel_Right:
+ return AudioConfig::CHANNEL_RIGHT;
+ case kAudioChannelLabel_Center:
+ return AudioConfig::CHANNEL_CENTER;
+ case kAudioChannelLabel_LFEScreen:
+ return AudioConfig::CHANNEL_LFE;
+ case kAudioChannelLabel_LeftSurround:
+ return AudioConfig::CHANNEL_LS;
+ case kAudioChannelLabel_RightSurround:
+ return AudioConfig::CHANNEL_RS;
+ case kAudioChannelLabel_CenterSurround:
+ return AudioConfig::CHANNEL_RCENTER;
+ case kAudioChannelLabel_RearSurroundLeft:
+ return AudioConfig::CHANNEL_RLS;
+ case kAudioChannelLabel_RearSurroundRight:
+ return AudioConfig::CHANNEL_RRS;
+ default:
+ return AudioConfig::CHANNEL_INVALID;
+ }
+}
+
+// Will set mChannelLayout if a channel layout could properly be identified
+// and is supported.
+nsresult
+AppleATDecoder::SetupChannelLayout()
+{
+ // Determine the channel layout.
+ UInt32 propertySize;
+ UInt32 size;
+ OSStatus status =
+ AudioConverterGetPropertyInfo(mConverter,
+ kAudioConverterOutputChannelLayout,
+ &propertySize, NULL);
+ if (status || !propertySize) {
+ LOG("Couldn't get channel layout property (%s)", FourCC2Str(status));
+ return NS_ERROR_FAILURE;
+ }
+
+ auto data = MakeUnique<uint8_t[]>(propertySize);
+ size = propertySize;
+ status =
+ AudioConverterGetProperty(mConverter, kAudioConverterInputChannelLayout,
+ &size, data.get());
+ if (status || size != propertySize) {
+ LOG("Couldn't get channel layout property (%s)",
+ FourCC2Str(status));
+ return NS_ERROR_FAILURE;
+ }
+
+ AudioChannelLayout* layout =
+ reinterpret_cast<AudioChannelLayout*>(data.get());
+ AudioChannelLayoutTag tag = layout->mChannelLayoutTag;
+
+ // if tag is kAudioChannelLayoutTag_UseChannelDescriptions then the structure
+ // directly contains the the channel layout mapping.
+ // If tag is kAudioChannelLayoutTag_UseChannelBitmap then the layout will
+ // be defined via the bitmap and can be retrieved using
+ // kAudioFormatProperty_ChannelLayoutForBitmap property.
+ // Otherwise the tag itself describes the layout.
+ if (tag != kAudioChannelLayoutTag_UseChannelDescriptions) {
+ AudioFormatPropertyID property =
+ tag == kAudioChannelLayoutTag_UseChannelBitmap
+ ? kAudioFormatProperty_ChannelLayoutForBitmap
+ : kAudioFormatProperty_ChannelLayoutForTag;
+
+ if (property == kAudioFormatProperty_ChannelLayoutForBitmap) {
+ status =
+ AudioFormatGetPropertyInfo(property,
+ sizeof(UInt32), &layout->mChannelBitmap,
+ &propertySize);
+ } else {
+ status =
+ AudioFormatGetPropertyInfo(property,
+ sizeof(AudioChannelLayoutTag), &tag,
+ &propertySize);
+ }
+ if (status || !propertySize) {
+ LOG("Couldn't get channel layout property info (%s:%s)",
+ FourCC2Str(property), FourCC2Str(status));
+ return NS_ERROR_FAILURE;
+ }
+ data = MakeUnique<uint8_t[]>(propertySize);
+ layout = reinterpret_cast<AudioChannelLayout*>(data.get());
+ size = propertySize;
+
+ if (property == kAudioFormatProperty_ChannelLayoutForBitmap) {
+ status = AudioFormatGetProperty(property,
+ sizeof(UInt32), &layout->mChannelBitmap,
+ &size, layout);
+ } else {
+ status = AudioFormatGetProperty(property,
+ sizeof(AudioChannelLayoutTag), &tag,
+ &size, layout);
+ }
+ if (status || size != propertySize) {
+ LOG("Couldn't get channel layout property (%s:%s)",
+ FourCC2Str(property), FourCC2Str(status));
+ return NS_ERROR_FAILURE;
+ }
+ // We have retrieved the channel layout from the tag or bitmap.
+ // We can now directly use the channel descriptions.
+ layout->mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelDescriptions;
+ }
+
+ if (layout->mNumberChannelDescriptions > MAX_AUDIO_CHANNELS ||
+ layout->mNumberChannelDescriptions != mOutputFormat.mChannelsPerFrame) {
+ LOG("Nonsensical channel layout or not matching the original channel number");
+ return NS_ERROR_FAILURE;
+ }
+
+ AudioConfig::Channel channels[MAX_AUDIO_CHANNELS];
+ for (uint32_t i = 0; i < layout->mNumberChannelDescriptions; i++) {
+ AudioChannelLabel id = layout->mChannelDescriptions[i].mChannelLabel;
+ AudioConfig::Channel channel = ConvertChannelLabel(id);
+ channels[i] = channel;
+ }
+ mChannelLayout =
+ MakeUnique<AudioConfig::ChannelLayout>(mOutputFormat.mChannelsPerFrame,
+ channels);
+ return NS_OK;
+}
+
nsresult
AppleATDecoder::SetupDecoder(MediaRawData* aSample)
{
if (mFormatID == kAudioFormatMPEG4AAC &&
mConfig.mExtendedProfile == 2) {
// Check for implicit SBR signalling if stream is AAC-LC
// This will provide us with an updated magic cookie for use with
// GetInputAudioDescription.
@@ -414,16 +553,21 @@ AppleATDecoder::SetupDecoder(MediaRawDat
= mOutputFormat.mChannelsPerFrame * mOutputFormat.mBitsPerChannel / 8;
OSStatus status = AudioConverterNew(&inputFormat, &mOutputFormat, &mConverter);
if (status) {
LOG("Error %d constructing AudioConverter", status);
mConverter = nullptr;
return NS_ERROR_FAILURE;
}
+
+ if (NS_FAILED(SetupChannelLayout())) {
+ NS_WARNING("Couldn't retrieve channel layout, will use default layout");
+ }
+
return NS_OK;
}
static void
_MetadataCallback(void* aAppleATDecoder,
AudioFileStreamID aStream,
AudioFileStreamPropertyID aProperty,
UInt32* aFlags)
--- a/dom/media/platforms/apple/AppleATDecoder.h
+++ b/dom/media/platforms/apple/AppleATDecoder.h
@@ -7,16 +7,17 @@
#ifndef mozilla_AppleATDecoder_h
#define mozilla_AppleATDecoder_h
#include <AudioToolbox/AudioToolbox.h>
#include "PlatformDecoderModule.h"
#include "mozilla/ReentrantMonitor.h"
#include "mozilla/Vector.h"
#include "nsIThread.h"
+#include "AudioConverter.h"
namespace mozilla {
class FlushableTaskQueue;
class MediaDataDecoderCallback;
class AppleATDecoder : public MediaDataDecoder {
public:
@@ -48,22 +49,25 @@ public:
private:
RefPtr<FlushableTaskQueue> mTaskQueue;
MediaDataDecoderCallback* mCallback;
AudioConverterRef mConverter;
AudioStreamBasicDescription mOutputFormat;
UInt32 mFormatID;
AudioFileStreamID mStream;
nsTArray<RefPtr<MediaRawData>> mQueuedSamples;
+ UniquePtr<AudioConfig::ChannelLayout> mChannelLayout;
+ UniquePtr<AudioConverter> mAudioConverter;
void SubmitSample(MediaRawData* aSample);
nsresult DecodeSample(MediaRawData* aSample);
nsresult GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
const nsTArray<uint8_t>& aExtraData);
// Setup AudioConverter once all information required has been gathered.
// Will return NS_ERROR_NOT_INITIALIZED if more data is required.
nsresult SetupDecoder(MediaRawData* aSample);
nsresult GetImplicitAACMagicCookie(const MediaRawData* aSample);
+ nsresult SetupChannelLayout();
};
} // namespace mozilla
#endif // mozilla_AppleATDecoder_h