Bug 830707. Part 3: Don't constrain AudioSegment to a fixed number of channels. r=jesup
authorRobert O'Callahan <robert@ocallahan.org>
Mon, 21 Jan 2013 09:44:44 +1300
changeset 130429 aecf9fd2ea567f327a5828bac0e723493c54d80e
parent 130428 7653791740524ffe83e3a7d195b3bc81739b40a9
child 130430 4444d9e3af9a24af8f4081eacc3148756c76a802
push id2323
push userbbajaj@mozilla.com
push dateMon, 01 Apr 2013 19:47:02 +0000
treeherdermozilla-beta@7712be144d91 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjesup
bugs830707
milestone21.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 830707. Part 3: Don't constrain AudioSegment to a fixed number of channels. r=jesup
content/media/AudioSegment.h
content/media/MediaDecoderStateMachine.cpp
content/media/MediaSegment.h
content/media/MediaStreamGraph.cpp
content/media/VideoSegment.h
content/media/webrtc/MediaEngineDefault.cpp
content/media/webrtc/MediaEngineWebRTCAudio.cpp
media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h
media/webrtc/signaling/test/FakeMediaStreamsImpl.h
media/webrtc/signaling/test/mediapipeline_unittest.cpp
--- a/content/media/AudioSegment.h
+++ b/content/media/AudioSegment.h
@@ -82,81 +82,54 @@ struct AudioChunk {
 /**
  * A list of audio samples consisting of a sequence of slices of SharedBuffers.
  * The audio rate is determined by the track, not stored in this class.
  */
 class AudioSegment : public MediaSegmentBase<AudioSegment, AudioChunk> {
 public:
   typedef mozilla::AudioSampleFormat SampleFormat;
 
-  AudioSegment() : MediaSegmentBase<AudioSegment, AudioChunk>(AUDIO), mChannels(0) {}
+  AudioSegment() : MediaSegmentBase<AudioSegment, AudioChunk>(AUDIO) {}
 
-  bool IsInitialized()
-  {
-    return mChannels > 0;
-  }
-  void Init(int32_t aChannels)
-  {
-    NS_ASSERTION(aChannels > 0, "Bad number of channels");
-    NS_ASSERTION(!IsInitialized(), "Already initialized");
-    mChannels = aChannels;
-  }
-  int32_t GetChannels()
-  {
-    NS_ASSERTION(IsInitialized(), "Not initialized");
-    return mChannels;
-  }
   void AppendFrames(already_AddRefed<ThreadSharedObject> aBuffer,
                     const nsTArray<const float*>& aChannelData,
                     int32_t aDuration)
   {
-    NS_ASSERTION(mChannels > 0, "Not initialized");
-    NS_ASSERTION(!aBuffer.get() || aChannelData.Length() == uint32_t(mChannels),
-                 "Wrong number of channels");
     AudioChunk* chunk = AppendChunk(aDuration);
     chunk->mBuffer = aBuffer;
     for (uint32_t channel = 0; channel < aChannelData.Length(); ++channel) {
       chunk->mChannelData.AppendElement(aChannelData[channel]);
     }
     chunk->mVolume = 1.0f;
     chunk->mBufferFormat = AUDIO_FORMAT_FLOAT32;
   }
   void AppendFrames(already_AddRefed<ThreadSharedObject> aBuffer,
                     const nsTArray<const int16_t*>& aChannelData,
                     int32_t aDuration)
   {
-    NS_ASSERTION(mChannels > 0, "Not initialized");
-    NS_ASSERTION(!aBuffer.get() || aChannelData.Length() == uint32_t(mChannels),
-                 "Wrong number of channels");
     AudioChunk* chunk = AppendChunk(aDuration);
     chunk->mBuffer = aBuffer;
     for (uint32_t channel = 0; channel < aChannelData.Length(); ++channel) {
       chunk->mChannelData.AppendElement(aChannelData[channel]);
     }
     chunk->mVolume = 1.0f;
     chunk->mBufferFormat = AUDIO_FORMAT_S16;
   }
+  // Consumes aChunk, and returns a pointer to the persistent copy of aChunk
+  // in the segment.
+  AudioChunk* AppendAndConsumeChunk(AudioChunk* aChunk)
+  {
+    AudioChunk* chunk = AppendChunk(aChunk->mDuration);
+    chunk->mBuffer = aChunk->mBuffer.forget();
+    chunk->mChannelData.SwapElements(aChunk->mChannelData);
+    chunk->mVolume = aChunk->mVolume;
+    chunk->mBufferFormat = aChunk->mBufferFormat;
+    return chunk;
+  }
   void ApplyVolume(float aVolume);
-  /**
-   * aOutput must have a matching number of channels, but we will automatically
-   * convert sample formats.
-   */
   void WriteTo(AudioStream* aOutput);
 
-  // Segment-generic methods not in MediaSegmentBase
-  void InitFrom(const AudioSegment& aOther)
-  {
-    NS_ASSERTION(mChannels == 0, "Channels already set");
-    mChannels = aOther.mChannels;
-  }
-  void CheckCompatible(const AudioSegment& aOther) const
-  {
-    NS_ASSERTION(aOther.mChannels == mChannels, "Non-matching channels");
-  }
   static Type StaticType() { return AUDIO; }
-
-protected:
-  int32_t mChannels;
 };
 
 }
 
 #endif /* MOZILLA_AUDIOSEGMENT_H_ */
--- a/content/media/MediaDecoderStateMachine.cpp
+++ b/content/media/MediaDecoderStateMachine.cpp
@@ -512,32 +512,28 @@ void MediaDecoderStateMachine::SendStrea
 
   if (aAudio->mTime <= aStream->mLastAudioPacketTime) {
     // ignore packet that we've already processed
     return;
   }
   aStream->mLastAudioPacketTime = aAudio->mTime;
   aStream->mLastAudioPacketEndTime = aAudio->GetEnd();
 
-  NS_ASSERTION(aOutput->GetChannels() == int32_t(aAudio->mChannels),
-               "Wrong number of channels");
-
   // This logic has to mimic AudioLoop closely to make sure we write
   // the exact same silences
   CheckedInt64 audioWrittenOffset = UsecsToFrames(mInfo.mAudioRate,
       aStream->mInitialTime + mStartTime) + aStream->mAudioFramesWritten;
   CheckedInt64 frameOffset = UsecsToFrames(mInfo.mAudioRate, aAudio->mTime);
   if (!audioWrittenOffset.isValid() || !frameOffset.isValid())
     return;
   if (audioWrittenOffset.value() < frameOffset.value()) {
     // Write silence to catch up
     LOG(PR_LOG_DEBUG, ("%p Decoder writing %d frames of silence to MediaStream",
                        mDecoder.get(), int32_t(frameOffset.value() - audioWrittenOffset.value())));
     AudioSegment silence;
-    silence.InitFrom(*aOutput);
     silence.InsertNullDataAtStart(frameOffset.value() - audioWrittenOffset.value());
     aStream->mAudioFramesWritten += silence.GetDuration();
     aOutput->AppendFrom(&silence);
   }
 
   int64_t offset;
   if (aStream->mAudioFramesWritten == 0) {
     NS_ASSERTION(frameOffset.value() <= audioWrittenOffset.value(),
@@ -599,33 +595,31 @@ void MediaDecoderStateMachine::SendStrea
 
   int64_t minLastAudioPacketTime = INT64_MAX;
   SourceMediaStream* mediaStream = stream->mStream;
   StreamTime endPosition = 0;
 
   if (!stream->mStreamInitialized) {
     if (mInfo.mHasAudio) {
       AudioSegment* audio = new AudioSegment();
-      audio->Init(mInfo.mAudioChannels);
       mediaStream->AddTrack(TRACK_AUDIO, mInfo.mAudioRate, 0, audio);
     }
     if (mInfo.mHasVideo) {
       VideoSegment* video = new VideoSegment();
       mediaStream->AddTrack(TRACK_VIDEO, RATE_VIDEO, 0, video);
     }
     stream->mStreamInitialized = true;
   }
 
   if (mInfo.mHasAudio) {
     nsAutoTArray<AudioData*,10> audio;
     // It's OK to hold references to the AudioData because while audio
     // is captured, only the decoder thread pops from the queue (see below).
     mReader->AudioQueue().GetElementsAfter(stream->mLastAudioPacketTime, &audio);
     AudioSegment output;
-    output.Init(mInfo.mAudioChannels);
     for (uint32_t i = 0; i < audio.Length(); ++i) {
       SendStreamAudio(audio[i], stream, &output);
     }
     if (output.GetDuration() > 0) {
       mediaStream->AppendToTrack(TRACK_AUDIO, &output);
     }
     if (mReader->AudioQueue().IsFinished() && !stream->mHaveSentFinishAudio) {
       mediaStream->EndTrack(TRACK_AUDIO);
--- a/content/media/MediaSegment.h
+++ b/content/media/MediaSegment.h
@@ -114,19 +114,17 @@ protected:
 /**
  * C is the implementation class subclassed from MediaSegmentBase.
  * C must contain a Chunk class.
  */
 template <class C, class Chunk> class MediaSegmentBase : public MediaSegment {
 public:
   virtual MediaSegment* CreateEmptyClone() const
   {
-    C* s = new C();
-    s->InitFrom(*static_cast<const C*>(this));
-    return s;
+    return new C();
   }
   virtual void AppendFrom(MediaSegment* aSource)
   {
     NS_ASSERTION(aSource->GetType() == C::StaticType(), "Wrong type");
     AppendFromInternal(static_cast<C*>(aSource));
   }
   void AppendFrom(C* aSource)
   {
@@ -137,21 +135,16 @@ public:
   {
     NS_ASSERTION(aSource.GetType() == C::StaticType(), "Wrong type");
     AppendSliceInternal(static_cast<const C&>(aSource), aStart, aEnd);
   }
   void AppendSlice(const C& aOther, TrackTicks aStart, TrackTicks aEnd)
   {
     AppendSliceInternal(aOther, aStart, aEnd);
   }
-  void InitToSlice(const C& aOther, TrackTicks aStart, TrackTicks aEnd)
-  {
-    static_cast<C*>(this)->InitFrom(aOther);
-    AppendSliceInternal(aOther, aStart, aEnd);
-  }
   /**
    * Replace the first aDuration ticks with null media data, because the data
    * will not be required again.
    */
   virtual void ForgetUpTo(TrackTicks aDuration)
   {
     if (mChunks.IsEmpty() || aDuration <= 0) {
       return;
@@ -210,32 +203,30 @@ public:
 protected:
   MediaSegmentBase(Type aType) : MediaSegment(aType) {}
 
   /**
    * Appends the contents of aSource to this segment, clearing aSource.
    */
   void AppendFromInternal(MediaSegmentBase<C, Chunk>* aSource)
   {
-    static_cast<C*>(this)->CheckCompatible(*static_cast<C*>(aSource));
     MOZ_ASSERT(aSource->mDuration >= 0);
     mDuration += aSource->mDuration;
     aSource->mDuration = 0;
     if (!mChunks.IsEmpty() && !aSource->mChunks.IsEmpty() &&
         mChunks[mChunks.Length() - 1].CanCombineWithFollowing(aSource->mChunks[0])) {
       mChunks[mChunks.Length() - 1].mDuration += aSource->mChunks[0].mDuration;
       aSource->mChunks.RemoveElementAt(0);
     }
     mChunks.MoveElementsFrom(aSource->mChunks);
   }
 
   void AppendSliceInternal(const MediaSegmentBase<C, Chunk>& aSource,
                            TrackTicks aStart, TrackTicks aEnd)
   {
-    static_cast<C*>(this)->CheckCompatible(static_cast<const C&>(aSource));
     NS_ASSERTION(aStart <= aEnd, "Endpoints inverted");
     NS_ASSERTION(aStart >= 0 && aEnd <= aSource.mDuration,
                  "Slice out of range");
     mDuration += aEnd - aStart;
     TrackTicks offset = 0;
     for (uint32_t i = 0; i < aSource.mChunks.Length() && offset < aEnd; ++i) {
       const Chunk& c = aSource.mChunks[i];
       TrackTicks start = std::max(aStart, offset);
--- a/content/media/MediaStreamGraph.cpp
+++ b/content/media/MediaStreamGraph.cpp
@@ -1153,24 +1153,24 @@ MediaStreamGraphImpl::CreateOrDestroyAud
           // The stream wants to play audio, but nothing will play for the forseeable
           // future, so don't create the stream.
           continue;
         }
 
         // XXX allocating a AudioStream could be slow so we're going to have to do
         // something here ... preallocation, async allocation, multiplexing onto a single
         // stream ...
-        AudioSegment* audio = tracks->Get<AudioSegment>();
         MediaStream::AudioOutputStream* audioOutputStream =
           aStream->mAudioOutputStreams.AppendElement();
         audioOutputStream->mAudioPlaybackStartTime = aAudioOutputStartTime;
         audioOutputStream->mBlockedAudioTime = 0;
         audioOutputStream->mStream = AudioStream::AllocateStream();
-        audioOutputStream->mStream->Init(audio->GetChannels(),
-                                         tracks->GetRate(), AUDIO_CHANNEL_NORMAL);
+        // XXX for now, allocate stereo output. But we need to fix this to
+        // match the system's ideal channel configuration.
+        audioOutputStream->mStream->Init(2, tracks->GetRate(), AUDIO_CHANNEL_NORMAL);
         audioOutputStream->mTrackID = tracks->GetID();
       }
     }
   }
 
   for (int32_t i = audioOutputStreamsFound.Length() - 1; i >= 0; --i) {
     if (!audioOutputStreamsFound[i]) {
       aStream->mAudioOutputStreams[i].mStream->Shutdown();
@@ -1206,17 +1206,16 @@ MediaStreamGraphImpl::PlayAudio(MediaStr
     // some amount of blocked time after the current time.
     GraphTime t = aFrom;
     while (t < aTo) {
       GraphTime end;
       bool blocked = aStream->mBlocked.GetAt(t, &end);
       end = std::min(end, aTo);
 
       AudioSegment output;
-      output.InitFrom(*audio);
       if (blocked) {
         // Track total blocked time in aStream->mBlockedAudioTime so that
         // the amount of silent samples we've inserted for blocking never gets
         // more than one sample away from the ideal amount.
         TrackTicks startTicks =
             TimeToTicksRoundDown(track->GetRate(), audioOutput.mBlockedAudioTime);
         audioOutput.mBlockedAudioTime += end - t;
         TrackTicks endTicks =
--- a/content/media/VideoSegment.h
+++ b/content/media/VideoSegment.h
@@ -99,20 +99,14 @@ public:
     }
     if (aStart) {
       *aStart = mDuration - c->mDuration;
     }
     return &c->mFrame;
   }
 
   // Segment-generic methods not in MediaSegmentBase
-  void InitFrom(const VideoSegment& aOther)
-  {
-  }
-  void CheckCompatible(const VideoSegment& aOther) const
-  {
-  }
   static Type StaticType() { return VIDEO; }
 };
 
 }
 
 #endif /* MOZILLA_VIDEOSEGMENT_H_ */
--- a/content/media/webrtc/MediaEngineDefault.cpp
+++ b/content/media/webrtc/MediaEngineDefault.cpp
@@ -12,17 +12,16 @@
 #include "ImageTypes.h"
 #include "prmem.h"
 
 #ifdef MOZ_WIDGET_ANDROID
 #include "AndroidBridge.h"
 #include "nsISupportsUtils.h"
 #endif
 
-#define CHANNELS 1
 #define VIDEO_RATE USECS_PER_S
 #define AUDIO_RATE 16000
 
 namespace mozilla {
 
 NS_IMPL_THREADSAFE_ISUPPORTS1(MediaEngineDefaultVideoSource, nsITimerCallback)
 /**
  * Default video source.
@@ -331,17 +330,16 @@ MediaEngineDefaultAudioSource::Start(Sou
   if (!mTimer) {
     return NS_ERROR_FAILURE;
   }
 
   mSource = aStream;
 
   // AddTrack will take ownership of segment
   AudioSegment* segment = new AudioSegment();
-  segment->Init(CHANNELS);
   mSource->AddTrack(aID, AUDIO_RATE, 0, segment);
 
   // We aren't going to add any more tracks
   mSource->AdvanceKnownTracksTime(STREAM_TIME_MAX);
 
   // Remember TrackID so we can finish later
   mTrackID = aID;
 
@@ -377,17 +375,16 @@ MediaEngineDefaultAudioSource::Snapshot(
 {
    return NS_ERROR_NOT_IMPLEMENTED;
 }
 
 NS_IMETHODIMP
 MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
 {
   AudioSegment segment;
-  segment.Init(CHANNELS);
   segment.InsertNullDataAtStart(AUDIO_RATE/100); // 10ms of fake data
 
   mSource->AppendToTrack(mTrackID, &segment);
 
   return NS_OK;
 }
 
 void
--- a/content/media/webrtc/MediaEngineWebRTCAudio.cpp
+++ b/content/media/webrtc/MediaEngineWebRTCAudio.cpp
@@ -144,17 +144,16 @@ MediaEngineWebRTCAudioSource::Start(Sour
   }
 
   {
     ReentrantMonitorAutoEnter enter(mMonitor);
     mSources.AppendElement(aStream);
   }
 
   AudioSegment* segment = new AudioSegment();
-  segment->Init(CHANNELS);
   aStream->AddTrack(aID, SAMPLE_FREQUENCY, 0, segment);
   aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
   LOG(("Initial audio"));
   mTrackID = aID;
 
   if (mState == kStarted) {
     return NS_OK;
   }
@@ -358,17 +357,16 @@ MediaEngineWebRTCAudioSource::Process(co
   uint32_t len = mSources.Length();
   for (uint32_t i = 0; i < len; i++) {
     nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));
 
     sample* dest = static_cast<sample*>(buffer->Data());
     memcpy(dest, audio10ms, length * sizeof(sample));
 
     AudioSegment segment;
-    segment.Init(CHANNELS);
     nsAutoTArray<const sample*,1> channels;
     channels.AppendElement(dest);
     segment.AppendFrames(buffer.forget(), channels, length);
 
     SourceMediaStream *source = mSources[i];
     if (source) {
       // This is safe from any thread, and is safe if the track is Finished
       // or Destroyed
--- a/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
+++ b/media/webrtc/signaling/src/mediapipeline/MediaPipeline.cpp
@@ -837,17 +837,16 @@ nsresult MediaPipelineReceiveAudio::Init
 MediaPipelineReceiveAudio::PipelineListener::PipelineListener(
     SourceMediaStream * source, TrackID track_id,
     const RefPtr<MediaSessionConduit>& conduit)
     : source_(source),
       track_id_(track_id),
       conduit_(conduit),
       played_(0) {
   mozilla::AudioSegment *segment = new mozilla::AudioSegment();
-  segment->Init(1); // 1 Channel
   source_->AddTrack(track_id_, 16000, 0, segment);
   source_->AdvanceKnownTracksTime(STREAM_TIME_MAX);
 }
 
 void MediaPipelineReceiveAudio::PipelineListener::
 NotifyPull(MediaStreamGraph* graph, StreamTime desired_time) {
   MOZ_ASSERT(source_);
   if (!source_) {
@@ -870,17 +869,16 @@ NotifyPull(MediaStreamGraph* graph, Stre
             samples_length);
 
     if (err != kMediaConduitNoError)
       return;
 
     MOZ_MTLOG(PR_LOG_DEBUG, "Audio conduit returned buffer of length " << samples_length);
 
     AudioSegment segment;
-    segment.Init(1);
     nsAutoTArray<const int16_t*,1> channels;
     channels.AppendElement(samples_data);
     segment.AppendFrames(samples.forget(), channels, samples_length);
 
     source_->AppendToTrack(track_id_, &segment);
 
     played_ += 10;
   }
--- a/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h
+++ b/media/webrtc/signaling/src/peerconnection/PeerConnectionMedia.h
@@ -48,17 +48,16 @@ class PeerConnectionImpl;
 class Fake_AudioGenerator {
  public:
 Fake_AudioGenerator(nsDOMMediaStream* aStream) : mStream(aStream), mCount(0) {
     mTimer = do_CreateInstance("@mozilla.org/timer;1");
     MOZ_ASSERT(mTimer);
 
     // Make a track
     mozilla::AudioSegment *segment = new mozilla::AudioSegment();
-    segment->Init(1); // 1 Channel
     mStream->GetStream()->AsSourceStream()->AddTrack(1, 16000, 0, segment);
 
     // Set the timer
     mTimer->InitWithFuncCallback(Callback, this, 100, nsITimer::TYPE_REPEATING_PRECISE);
   }
 
   static void Callback(nsITimer* timer, void *arg) {
     Fake_AudioGenerator* gen = static_cast<Fake_AudioGenerator*>(arg);
@@ -66,17 +65,16 @@ Fake_AudioGenerator(nsDOMMediaStream* aS
     nsRefPtr<mozilla::SharedBuffer> samples = mozilla::SharedBuffer::Create(1600 * sizeof(int16_t));
     int16_t* data = static_cast<int16_t*>(samples->Data());
     for (int i=0; i<1600; i++) {
       data[i] = ((gen->mCount % 8) * 4000) - (7*4000)/2;
       ++gen->mCount;
     }
 
     mozilla::AudioSegment segment;
-    segment.Init(1);
     nsAutoTArray<const int16_t*,1> channelData;
     channelData.AppendElement(data);
     segment.AppendFrames(samples.forget(), channelData, 1600);
     gen->mStream->GetStream()->AsSourceStream()->AppendToTrack(1, &segment);
   }
 
  private:
   nsCOMPtr<nsITimer> mTimer;
--- a/media/webrtc/signaling/test/FakeMediaStreamsImpl.h
+++ b/media/webrtc/signaling/test/FakeMediaStreamsImpl.h
@@ -94,17 +94,16 @@ void Fake_AudioStreamSource::Periodic() 
   int16_t* data = reinterpret_cast<int16_t *>(samples->Data());
   for(int i=0; i<(1600*2); i++) {
     //saw tooth audio sample
     data[i] = ((mCount % 8) * 4000) - (7*4000)/2;
     mCount++;
   }
 
   mozilla::AudioSegment segment;
-  segment.Init(1);
   nsAutoTArray<const int16_t *,1> channels;
   channels.AppendElement(data);
   segment.AppendFrames(samples.forget(), channels, AUDIO_BUFFER_SIZE);
 
   for(std::set<Fake_MediaStreamListener *>::iterator it = mListeners.begin();
        it != mListeners.end(); ++it) {
     (*it)->NotifyQueuedTrackChanges(NULL, // Graph
                                     0, // TrackID
--- a/media/webrtc/signaling/test/mediapipeline_unittest.cpp
+++ b/media/webrtc/signaling/test/mediapipeline_unittest.cpp
@@ -161,17 +161,16 @@ class TestAgentSend : public TestAgent {
 
 class TestAgentReceive : public TestAgent {
  public:
   TestAgentReceive() {
     mozilla::SourceMediaStream *audio = new Fake_SourceMediaStream();
     audio->SetPullEnabled(true);
 
     mozilla::AudioSegment* segment= new mozilla::AudioSegment();
-    segment->Init(1);
     audio->AddTrack(0, 100, 0, segment);
     audio->AdvanceKnownTracksTime(mozilla::STREAM_TIME_MAX);
 
     audio_ = new Fake_nsDOMMediaStream(audio);
 
     std::vector<mozilla::AudioCodecConfig *> codecs;
     codecs.push_back(&audio_config_);