Bug 1508434 - p2: move codec output processing to task queue. r=jya
☠☠ backed out by ef80c62a3070 ☠ ☠
authorJohn Lin <jolin@mozilla.com>
Tue, 08 Jan 2019 20:57:47 +0000
changeset 510223 8fbed32432174128f2aa82d82b88b5c386f0c52d
parent 510222 25b67aa0ef55c463fb947630f0313b334aff51f8
child 510224 12424313d637e32e06fd12a758d14908d0099a15
push id10547
push userffxbld-merge
push dateMon, 21 Jan 2019 13:03:58 +0000
treeherdermozilla-beta@24ec1916bffe [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjya
bugs1508434
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1508434 - p2: move codec output processing to task queue. r=jya Processing MediaCodec output in Android binder threads while flushing in task queue could cause race condition and leftover frames. Dispatch the processing to task queue ensures all frames prior to flushing will be cleared (by mDecodedData.Clear()) or ignored (by mInputInfos.Clear()). Also consolidate all flushing operations in one task to avoid frame insertion between emptying mDecodedData and mInputInfos. Differential Revision: https://phabricator.services.mozilla.com/D15228
dom/media/platforms/android/RemoteDataDecoder.cpp
dom/media/platforms/android/RemoteDataDecoder.h
--- a/dom/media/platforms/android/RemoteDataDecoder.cpp
+++ b/dom/media/platforms/android/RemoteDataDecoder.cpp
@@ -33,18 +33,17 @@ using media::TimeUnit;
 namespace mozilla {
 
 class RemoteVideoDecoder : public RemoteDataDecoder {
  public:
   // Hold an output buffer and render it to the surface when the frame is sent
   // to compositor, or release it if not presented.
   class RenderOrReleaseOutput : public VideoData::Listener {
    public:
-    RenderOrReleaseOutput(java::CodecProxy::Param aCodec,
-                          java::Sample::Param aSample)
+    RenderOrReleaseOutput(CodecProxy::Param aCodec, Sample::Param aSample)
         : mCodec(aCodec), mSample(aSample) {}
 
     ~RenderOrReleaseOutput() { ReleaseOutput(false); }
 
     void OnSentToCompositor() override {
       ReleaseOutput(true);
       mCodec = nullptr;
       mSample = nullptr;
@@ -52,18 +51,18 @@ class RemoteVideoDecoder : public Remote
 
    private:
     void ReleaseOutput(bool aToRender) {
       if (mCodec && mSample) {
         mCodec->ReleaseOutput(mSample, aToRender);
       }
     }
 
-    java::CodecProxy::GlobalRef mCodec;
-    java::Sample::GlobalRef mSample;
+    CodecProxy::GlobalRef mCodec;
+    Sample::GlobalRef mSample;
   };
 
   class InputInfo {
    public:
     InputInfo() {}
 
     InputInfo(const int64_t aDurationUs, const gfx::IntSize& aImageSize,
               const gfx::IntSize& aDisplaySize)
@@ -81,66 +80,18 @@ class RemoteVideoDecoder : public Remote
     explicit CallbacksSupport(RemoteVideoDecoder* aDecoder)
         : mDecoder(aDecoder) {}
 
     void HandleInput(int64_t aTimestamp, bool aProcessed) override {
       mDecoder->UpdateInputStatus(aTimestamp, aProcessed);
     }
 
     void HandleOutput(Sample::Param aSample) override {
-      UniquePtr<VideoData::Listener> releaseSample(
-          new RenderOrReleaseOutput(mDecoder->mJavaDecoder, aSample));
-
-      BufferInfo::LocalRef info = aSample->Info();
-
-      int32_t flags;
-      bool ok = NS_SUCCEEDED(info->Flags(&flags));
-
-      int32_t offset;
-      ok &= NS_SUCCEEDED(info->Offset(&offset));
-
-      int32_t size;
-      ok &= NS_SUCCEEDED(info->Size(&size));
-
-      int64_t presentationTimeUs;
-      ok &= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
-
-      if (!ok) {
-        HandleError(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
-                                RESULT_DETAIL("VideoCallBack::HandleOutput")));
-        return;
-      }
-
-      InputInfo inputInfo;
-      ok = mDecoder->mInputInfos.Find(presentationTimeUs, inputInfo);
-      bool isEOS = !!(flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM);
-      if (!ok && !isEOS) {
-        // Ignore output with no corresponding input.
-        return;
-      }
-
-      if (ok && (size > 0 || presentationTimeUs >= 0)) {
-        RefPtr<layers::Image> img = new SurfaceTextureImage(
-            mDecoder->mSurfaceHandle, inputInfo.mImageSize,
-            false /* NOT continuous */, gl::OriginPos::BottomLeft);
-
-        RefPtr<VideoData> v = VideoData::CreateFromImage(
-            inputInfo.mDisplaySize, offset,
-            TimeUnit::FromMicroseconds(presentationTimeUs),
-            TimeUnit::FromMicroseconds(inputInfo.mDurationUs), img,
-            !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME),
-            TimeUnit::FromMicroseconds(presentationTimeUs));
-
-        v->SetListener(std::move(releaseSample));
-        mDecoder->UpdateOutputStatus(std::move(v));
-      }
-
-      if (isEOS) {
-        mDecoder->DrainComplete();
-      }
+      // aSample will be implicitly converted into a GlobalRef.
+      mDecoder->ProcessOutput(std::move(aSample));
     }
 
     void HandleError(const MediaResult& aError) override {
       mDecoder->Error(aError);
     }
 
     friend class RemoteDataDecoder;
 
@@ -187,24 +138,22 @@ class RemoteVideoDecoder : public Remote
     mIsCodecSupportAdaptivePlayback =
         mJavaDecoder->IsAdaptivePlaybackSupported();
     mIsHardwareAccelerated = mJavaDecoder->IsHardwareAccelerated();
     return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__);
   }
 
   RefPtr<MediaDataDecoder::FlushPromise> Flush() override {
     RefPtr<RemoteVideoDecoder> self = this;
-    return RemoteDataDecoder::Flush()->Then(
-        mTaskQueue, __func__,
-        [self](const FlushPromise::ResolveOrRejectValue& aValue) {
-          self->mInputInfos.Clear();
-          self->mSeekTarget.reset();
-          self->mLatestOutputTime.reset();
-          return FlushPromise::CreateAndResolveOrReject(aValue, __func__);
-        });
+    return InvokeAsync(mTaskQueue, __func__, [self, this]() {
+      mInputInfos.Clear();
+      mSeekTarget.reset();
+      mLatestOutputTime.reset();
+      return RemoteDataDecoder::ProcessFlush();
+    });
   }
 
   RefPtr<MediaDataDecoder::DecodePromise> Decode(
       MediaRawData* aSample) override {
     const VideoInfo* config =
         aSample->mTrackInfo ? aSample->mTrackInfo->GetAsVideoInfo() : &mConfig;
     MOZ_ASSERT(config);
 
@@ -254,38 +203,104 @@ class RemoteVideoDecoder : public Remote
     return mIsHardwareAccelerated;
   }
 
   ConversionRequired NeedsConversion() const override {
     return ConversionRequired::kNeedAnnexB;
   }
 
  private:
+  // Param and LocalRef are only valid for the duration of a JNI method call.
+  // Use GlobalRef as the parameter type to keep the Java object referenced
+  // until running.
+  void ProcessOutput(Sample::GlobalRef&& aSample) {
+    if (!mTaskQueue->IsCurrentThreadIn()) {
+      nsresult rv = mTaskQueue->Dispatch(NewRunnableMethod<Sample::GlobalRef&&>(
+          "RemoteVideoDecoder::ProcessOutput", this,
+          &RemoteVideoDecoder::ProcessOutput, std::move(aSample)));
+      MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
+      Unused << rv;
+      return;
+    }
+
+    AssertOnTaskQueue();
+
+    UniquePtr<VideoData::Listener> releaseSample(
+        new RenderOrReleaseOutput(mJavaDecoder, aSample));
+
+    BufferInfo::LocalRef info = aSample->Info();
+
+    int32_t flags;
+    bool ok = NS_SUCCEEDED(info->Flags(&flags));
+
+    int32_t offset;
+    ok &= NS_SUCCEEDED(info->Offset(&offset));
+
+    int32_t size;
+    ok &= NS_SUCCEEDED(info->Size(&size));
+
+    int64_t presentationTimeUs;
+    ok &= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
+
+    if (!ok) {
+      Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
+                        RESULT_DETAIL("VideoCallBack::HandleOutput")));
+      return;
+    }
+
+    InputInfo inputInfo;
+    ok = mInputInfos.Find(presentationTimeUs, inputInfo);
+    bool isEOS = !!(flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM);
+    if (!ok && !isEOS) {
+      // Ignore output with no corresponding input.
+      return;
+    }
+
+    if (ok && (size > 0 || presentationTimeUs >= 0)) {
+      RefPtr<layers::Image> img = new SurfaceTextureImage(
+          mSurfaceHandle, inputInfo.mImageSize, false /* NOT continuous */,
+          gl::OriginPos::BottomLeft);
+
+      RefPtr<VideoData> v = VideoData::CreateFromImage(
+          inputInfo.mDisplaySize, offset,
+          TimeUnit::FromMicroseconds(presentationTimeUs),
+          TimeUnit::FromMicroseconds(inputInfo.mDurationUs), img,
+          !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME),
+          TimeUnit::FromMicroseconds(presentationTimeUs));
+
+      v->SetListener(std::move(releaseSample));
+      RemoteDataDecoder::UpdateOutputStatus(std::move(v));
+    }
+
+    if (isEOS) {
+      DrainComplete();
+    }
+  }
+
   const VideoInfo mConfig;
   GeckoSurface::GlobalRef mSurface;
   AndroidSurfaceTextureHandle mSurfaceHandle;
   // Only accessed on reader's task queue.
   bool mIsCodecSupportAdaptivePlayback = false;
   // Can be accessed on any thread, but only written on during init.
   bool mIsHardwareAccelerated = false;
-  // Accessed on mTaskQueue, reader's TaskQueue and Java callback tread.
-  // SimpleMap however is thread-safe, so it's okay to do so.
+  // Accessed on mTaskQueue and reader's TaskQueue. SimpleMap however is
+  // thread-safe, so it's okay to do so.
   SimpleMap<InputInfo> mInputInfos;
   // Only accessed on the TaskQueue.
   Maybe<TimeUnit> mSeekTarget;
   Maybe<TimeUnit> mLatestOutputTime;
 };
 
 class RemoteAudioDecoder : public RemoteDataDecoder {
  public:
   RemoteAudioDecoder(const AudioInfo& aConfig, MediaFormat::Param aFormat,
                      const nsString& aDrmStubId, TaskQueue* aTaskQueue)
       : RemoteDataDecoder(MediaData::Type::AUDIO_DATA, aConfig.mMimeType,
-                          aFormat, aDrmStubId, aTaskQueue),
-        mConfig(aConfig) {
+                          aFormat, aDrmStubId, aTaskQueue) {
     JNIEnv* const env = jni::GetEnvForThread();
 
     bool formatHasCSD = false;
     NS_ENSURE_SUCCESS_VOID(
         aFormat->ContainsKey(NS_LITERAL_STRING("csd-0"), &formatHasCSD));
 
     if (!formatHasCSD && aConfig.mCodecSpecificConfig->Length() >= 2) {
       jni::ByteBuffer::LocalRef buffer(env);
@@ -320,92 +335,130 @@ class RemoteAudioDecoder : public Remote
     explicit CallbacksSupport(RemoteAudioDecoder* aDecoder)
         : mDecoder(aDecoder) {}
 
     void HandleInput(int64_t aTimestamp, bool aProcessed) override {
       mDecoder->UpdateInputStatus(aTimestamp, aProcessed);
     }
 
     void HandleOutput(Sample::Param aSample) override {
-      BufferInfo::LocalRef info = aSample->Info();
-
-      int32_t flags;
-      bool ok = NS_SUCCEEDED(info->Flags(&flags));
-
-      int32_t offset;
-      ok &= NS_SUCCEEDED(info->Offset(&offset));
+      // aSample will be implicitly converted into a GlobalRef.
+      mDecoder->ProcessOutput(std::move(aSample));
+    }
 
-      int64_t presentationTimeUs;
-      ok &= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
-
-      int32_t size;
-      ok &= NS_SUCCEEDED(info->Size(&size));
-
-      if (!ok) {
-        HandleError(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR,
-                                RESULT_DETAIL("AudioCallBack::HandleOutput")));
+    void HandleOutputFormatChanged(MediaFormat::Param aFormat) override {
+      int32_t outputChannels = 0;
+      aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &outputChannels);
+      AudioConfig::ChannelLayout layout(outputChannels);
+      if (!layout.IsValid()) {
+        mDecoder->Error(MediaResult(
+            NS_ERROR_DOM_MEDIA_FATAL_ERR,
+            RESULT_DETAIL("Invalid channel layout:%d", outputChannels)));
         return;
       }
 
-      if (size > 0) {
-#ifdef MOZ_SAMPLE_TYPE_S16
-        const int32_t numSamples = size / 2;
-#else
-#error We only support 16-bit integer PCM
-#endif
-
-        const int32_t numFrames = numSamples / mOutputChannels;
-        AlignedAudioBuffer audio(numSamples);
-        if (!audio) {
-          mDecoder->Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
-          return;
-        }
-
-        jni::ByteBuffer::LocalRef dest =
-            jni::ByteBuffer::New(audio.get(), size);
-        aSample->WriteToByteBuffer(dest);
+      int32_t sampleRate = 0;
+      aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &sampleRate);
+      LOG("Audio output format changed: channels:%d sample rate:%d",
+          outputChannels, sampleRate);
 
-        RefPtr<AudioData> data = new AudioData(
-            0, TimeUnit::FromMicroseconds(presentationTimeUs),
-            FramesToTimeUnit(numFrames, mOutputSampleRate), numFrames,
-            std::move(audio), mOutputChannels, mOutputSampleRate);
-
-        mDecoder->UpdateOutputStatus(std::move(data));
-      }
-
-      if ((flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) != 0) {
-        mDecoder->DrainComplete();
-      }
-    }
-
-    void HandleOutputFormatChanged(MediaFormat::Param aFormat) override {
-      aFormat->GetInteger(NS_LITERAL_STRING("channel-count"), &mOutputChannels);
-      AudioConfig::ChannelLayout layout(mOutputChannels);
-      if (!layout.IsValid()) {
-        mDecoder->Error(MediaResult(
-            NS_ERROR_DOM_MEDIA_FATAL_ERR,
-            RESULT_DETAIL("Invalid channel layout:%d", mOutputChannels)));
-        return;
-      }
-      aFormat->GetInteger(NS_LITERAL_STRING("sample-rate"), &mOutputSampleRate);
-      LOG("Audio output format changed: channels:%d sample rate:%d",
-          mOutputChannels, mOutputSampleRate);
+      mDecoder->ProcessOutputFormatChange(outputChannels, sampleRate);
     }
 
     void HandleError(const MediaResult& aError) override {
       mDecoder->Error(aError);
     }
 
    private:
     RemoteAudioDecoder* mDecoder;
-    int32_t mOutputChannels;
-    int32_t mOutputSampleRate;
   };
 
-  const AudioInfo mConfig;
+  // Param and LocalRef are only valid for the duration of a JNI method call.
+  // Use GlobalRef as the parameter type to keep the Java object referenced
+  // until running.
+  void ProcessOutput(Sample::GlobalRef&& aSample) {
+    if (!mTaskQueue->IsCurrentThreadIn()) {
+      nsresult rv = mTaskQueue->Dispatch(NewRunnableMethod<Sample::GlobalRef&&>(
+          "RemoteAudioDecoder::ProcessOutput", this,
+          &RemoteAudioDecoder::ProcessOutput, std::move(aSample)));
+      MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
+      Unused << rv;
+      return;
+    }
+
+    AssertOnTaskQueue();
+
+    BufferInfo::LocalRef info = aSample->Info();
+
+    int32_t flags;
+    bool ok = NS_SUCCEEDED(info->Flags(&flags));
+
+    int32_t offset;
+    ok &= NS_SUCCEEDED(info->Offset(&offset));
+
+    int64_t presentationTimeUs;
+    ok &= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs));
+
+    int32_t size;
+    ok &= NS_SUCCEEDED(info->Size(&size));
+
+    if (!ok) {
+      Error(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__));
+      return;
+    }
+
+    if (size > 0) {
+#ifdef MOZ_SAMPLE_TYPE_S16
+      const int32_t numSamples = size / 2;
+#else
+#error We only support 16-bit integer PCM
+#endif
+
+      const int32_t numFrames = numSamples / mOutputChannels;
+      AlignedAudioBuffer audio(numSamples);
+      if (!audio) {
+        Error(MediaResult(NS_ERROR_OUT_OF_MEMORY, __func__));
+        return;
+      }
+
+      jni::ByteBuffer::LocalRef dest = jni::ByteBuffer::New(audio.get(), size);
+      aSample->WriteToByteBuffer(dest);
+
+      RefPtr<AudioData> data = new AudioData(
+          0, TimeUnit::FromMicroseconds(presentationTimeUs),
+          FramesToTimeUnit(numFrames, mOutputSampleRate), numFrames,
+          std::move(audio), mOutputChannels, mOutputSampleRate);
+
+      UpdateOutputStatus(std::move(data));
+    }
+
+    if ((flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM) != 0) {
+      DrainComplete();
+    }
+  }
+
+  void ProcessOutputFormatChange(int32_t aChannels, int32_t aSampleRate) {
+    if (!mTaskQueue->IsCurrentThreadIn()) {
+      nsresult rv = mTaskQueue->Dispatch(NewRunnableMethod<int32_t, int32_t>(
+          "RemoteAudioDecoder::ProcessOutputFormatChange", this,
+          &RemoteAudioDecoder::ProcessOutputFormatChange, aChannels,
+          aSampleRate));
+      MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
+      Unused << rv;
+      return;
+    }
+
+    AssertOnTaskQueue();
+
+    mOutputChannels = aChannels;
+    mOutputSampleRate = aSampleRate;
+  }
+
+  int32_t mOutputChannels;
+  int32_t mOutputSampleRate;
 };
 
 already_AddRefed<MediaDataDecoder> RemoteDataDecoder::CreateAudioDecoder(
     const CreateDecoderParams& aParams, const nsString& aDrmStubId,
     CDMProxy* aProxy) {
   const AudioInfo& config = aParams.AudioConfig();
   MediaFormat::LocalRef format;
   NS_ENSURE_SUCCESS(
@@ -448,25 +501,28 @@ RemoteDataDecoder::RemoteDataDecoder(Med
       mMimeType(aMimeType),
       mFormat(aFormat),
       mDrmStubId(aDrmStubId),
       mTaskQueue(aTaskQueue),
       mNumPendingInputs(0) {}
 
 RefPtr<MediaDataDecoder::FlushPromise> RemoteDataDecoder::Flush() {
   RefPtr<RemoteDataDecoder> self = this;
-  return InvokeAsync(mTaskQueue, __func__, [self, this]() {
-    mDecodedData = DecodedData();
-    mNumPendingInputs = 0;
-    mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
-    mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
-    mDrainStatus = DrainStatus::DRAINED;
-    mJavaDecoder->Flush();
-    return FlushPromise::CreateAndResolve(true, __func__);
-  });
+  return InvokeAsync(mTaskQueue, this, __func__,
+                     &RemoteDataDecoder::ProcessFlush);
+}
+
+RefPtr<MediaDataDecoder::FlushPromise> RemoteDataDecoder::ProcessFlush() {
+  mDecodedData = DecodedData();
+  mNumPendingInputs = 0;
+  mDecodePromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+  mDrainPromise.RejectIfExists(NS_ERROR_DOM_MEDIA_CANCELED, __func__);
+  mDrainStatus = DrainStatus::DRAINED;
+  mJavaDecoder->Flush();
+  return FlushPromise::CreateAndResolve(true, __func__);
 }
 
 RefPtr<MediaDataDecoder::DecodePromise> RemoteDataDecoder::Drain() {
   RefPtr<RemoteDataDecoder> self = this;
   return InvokeAsync(mTaskQueue, __func__, [self, this]() {
     if (mShutdown) {
       return DecodePromise::CreateAndReject(NS_ERROR_DOM_MEDIA_CANCELED,
                                             __func__);
@@ -630,25 +686,16 @@ void RemoteDataDecoder::UpdateInputStatu
   if (mNumPendingInputs ==
           0 ||  // Input has been processed, request the next one.
       !mDecodedData.IsEmpty()) {  // Previous output arrived before Decode().
     ReturnDecodedData();
   }
 }
 
 void RemoteDataDecoder::UpdateOutputStatus(RefPtr<MediaData>&& aSample) {
-  if (!mTaskQueue->IsCurrentThreadIn()) {
-    nsresult rv =
-        mTaskQueue->Dispatch(NewRunnableMethod<const RefPtr<MediaData>>(
-            "RemoteDataDecoder::UpdateOutputStatus", this,
-            &RemoteDataDecoder::UpdateOutputStatus, std::move(aSample)));
-    MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv));
-    Unused << rv;
-    return;
-  }
   AssertOnTaskQueue();
   if (mShutdown) {
     return;
   }
   if (IsUsefulData(aSample)) {
     mDecodedData.AppendElement(std::move(aSample));
   }
   ReturnDecodedData();
--- a/dom/media/platforms/android/RemoteDataDecoder.h
+++ b/dom/media/platforms/android/RemoteDataDecoder.h
@@ -37,16 +37,17 @@ class RemoteDataDecoder : public MediaDa
 
  protected:
   virtual ~RemoteDataDecoder() {}
   RemoteDataDecoder(MediaData::Type aType, const nsACString& aMimeType,
                     java::sdk::MediaFormat::Param aFormat,
                     const nsString& aDrmStubId, TaskQueue* aTaskQueue);
 
   // Methods only called on mTaskQueue.
+  RefPtr<FlushPromise> ProcessFlush();
   RefPtr<ShutdownPromise> ProcessShutdown();
   void UpdateInputStatus(int64_t aTimestamp, bool aProcessed);
   void UpdateOutputStatus(RefPtr<MediaData>&& aSample);
   void ReturnDecodedData();
   void DrainComplete();
   void Error(const MediaResult& aError);
   void AssertOnTaskQueue() { MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); }