Bug 1489278 - part1 : show doorhanger when create AudioContext r=padenot
authoralwu <alwu@mozilla.com>
Thu, 13 Sep 2018 16:51:07 +0000
changeset 436373 4cf5ee8d6112bce30a01cce856edf60ed09a3ccc
parent 436372 9bb04fb52dd70c9b868f12220d6dc777546ae527
child 436374 2477110ccfba50833934d4641ccd91a2e88d64a1
push id34643
push userbtara@mozilla.com
push dateFri, 14 Sep 2018 21:48:55 +0000
treeherdermozilla-central@750e71a8f79b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerspadenot
bugs1489278
milestone64.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1489278 - part1 : show doorhanger when create AudioContext r=padenot In the ideal situation, sites should create AudioContext only when sites are going to produce sound, so we would show doorhanger to ask users whether they want to allow autoplay. We delay the AudioContext's state transition from `suspended` to `running` until (1) user click 'Allow' button in doorhanger (2) user interact with sites, and then AudioContext calls resume() again Differential Revision: https://phabricator.services.mozilla.com/D5610
dom/media/AutoplayPolicy.cpp
dom/media/AutoplayPolicy.h
dom/media/webaudio/AudioContext.cpp
dom/media/webaudio/AudioContext.h
--- a/dom/media/AutoplayPolicy.cpp
+++ b/dom/media/AutoplayPolicy.cpp
@@ -190,36 +190,36 @@ AutoplayPolicy::IsAllowedToPlay(const HT
 
   AUTOPLAY_LOG("IsAllowedToPlay, mediaElement=%p, isAllowToPlay=%s",
                 &aElement, AllowAutoplayToStr(result));
 
   return result;
 }
 
 /* static */ bool
-AutoplayPolicy::IsAudioContextAllowedToPlay(NotNull<AudioContext*> aContext)
+AutoplayPolicy::IsAllowedToPlay(const AudioContext& aContext)
 {
   if (!Preferences::GetBool("media.autoplay.block-webaudio", false)) {
     return true;
   }
 
   if (DefaultAutoplayBehaviour() == nsIAutoplay::ALLOWED) {
     return true;
   }
 
   if (!Preferences::GetBool("media.autoplay.enabled.user-gestures-needed", false)) {
     return true;
   }
 
   // Offline context won't directly output sound to audio devices.
-  if (aContext->IsOffline()) {
+  if (aContext.IsOffline()) {
     return true;
   }
 
-  if (IsWindowAllowedToPlay(aContext->GetOwner())) {
+  if (IsWindowAllowedToPlay(aContext.GetParentObject())) {
     return true;
   }
 
   return false;
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/AutoplayPolicy.h
+++ b/dom/media/AutoplayPolicy.h
@@ -33,26 +33,26 @@ class AudioContext;
  * 3) Document's origin has the "autoplay-media" permission.
  */
 class AutoplayPolicy
 {
 public:
   // Returns whether a given media element is allowed to play.
   static bool IsAllowedToPlay(const HTMLMediaElement& aElement);
 
+  // Returns whether a given AudioContext is allowed to play.
+  static bool IsAllowedToPlay(const AudioContext& aContext);
+
   // Returns true if a given media element would be allowed to play
   // if block autoplay was enabled. If this returns false, it means we would
   // either block or ask for permission.
   // Note: this is for telemetry purposes, and doesn't check the prefs
   // which enable/disable block autoplay. Do not use for blocking logic!
   static bool WouldBeAllowedToPlayIfAutoplayDisabled(const HTMLMediaElement& aElement);
 
-  // Returns whether a given AudioContext is allowed to play.
-  static bool IsAudioContextAllowedToPlay(NotNull<AudioContext*> aContext);
-
   // Returns the AutoplayPermissionManager that a given document must request on
   // for autoplay permission.
   static already_AddRefed<AutoplayPermissionManager> RequestFor(
     const nsIDocument& aDocument);
 };
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webaudio/AudioContext.cpp
+++ b/dom/media/webaudio/AudioContext.cpp
@@ -3,16 +3,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioContext.h"
 
 #include "blink/PeriodicWave.h"
 
+#include "mozilla/AutoplayPermissionManager.h"
 #include "mozilla/ErrorResult.h"
 #include "mozilla/NotNull.h"
 #include "mozilla/OwningNonNull.h"
 #include "mozilla/RefPtr.h"
 #include "mozilla/Preferences.h"
 
 #include "mozilla/dom/AnalyserNode.h"
 #include "mozilla/dom/AnalyserNodeBinding.h"
@@ -69,16 +70,21 @@
 #include "nsRFPService.h"
 #include "OscillatorNode.h"
 #include "PannerNode.h"
 #include "PeriodicWave.h"
 #include "ScriptProcessorNode.h"
 #include "StereoPannerNode.h"
 #include "WaveShaperNode.h"
 
+extern mozilla::LazyLogModule gAutoplayPermissionLog;
+
+#define AUTOPLAY_LOG(msg, ...)                                             \
+  MOZ_LOG(gAutoplayPermissionLog, LogLevel::Debug, (msg, ##__VA_ARGS__))
+
 namespace mozilla {
 namespace dom {
 
 // 0 is a special value that MediaStreams use to denote they are not part of a
 // AudioContext.
 static dom::AudioContext::AudioContextId gAudioContextId = 1;
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(AudioContext)
@@ -148,42 +154,70 @@ AudioContext::AudioContext(nsPIDOMWindow
   , mCloseCalled(false)
   , mSuspendCalled(false)
   , mIsDisconnecting(false)
 {
   bool mute = aWindow->AddAudioContext(this);
 
   // Note: AudioDestinationNode needs an AudioContext that must already be
   // bound to the window.
-  bool allowedToStart = AutoplayPolicy::IsAudioContextAllowedToPlay(WrapNotNull(this));
+  bool allowedToStart = AutoplayPolicy::IsAllowedToPlay(*this);
   mDestination = new AudioDestinationNode(this,
                                           aIsOffline,
                                           allowedToStart,
                                           aNumberOfChannels,
                                           aLength,
                                           aSampleRate);
 
   // The context can't be muted until it has a destination.
   if (mute) {
     Mute();
   }
 
-  // If we won't allow audio context to start, we need to suspend all its stream
-  // in order to delay the state changing from 'suspend' to 'start'.
   if (!allowedToStart) {
-    ErrorResult rv;
-    RefPtr<Promise> dummy = Suspend(rv);
-    MOZ_ASSERT(!rv.Failed(), "can't create promise");
-    MOZ_ASSERT(dummy->State() != Promise::PromiseState::Rejected,
-               "suspend failed");
+    // Not allowed to start, delay the transition from `suspended` to `running`.
+    SuspendInternal(nullptr);
+    EnsureAutoplayRequested();
   }
 
   FFTBlock::MainThreadInit();
 }
 
+void
+AudioContext::EnsureAutoplayRequested()
+{
+  nsPIDOMWindowInner* parent = GetParentObject();
+  if (!parent || !parent->AsGlobal()) {
+    return;
+  }
+
+  RefPtr<AutoplayPermissionManager> request =
+    AutoplayPolicy::RequestFor(*(parent->GetExtantDoc()));
+  if (!request) {
+    return;
+  }
+
+  RefPtr<AudioContext> self = this;
+  request->RequestWithPrompt()
+    ->Then(parent->AsGlobal()->AbstractMainThreadFor(TaskCategory::Other),
+           __func__,
+           [ self, request ](
+             bool aApproved) {
+              AUTOPLAY_LOG("%p Autoplay request approved request=%p",
+                          self.get(),
+                          request.get());
+              self->ResumeInternal();
+           },
+           [self, request](nsresult aError) {
+              AUTOPLAY_LOG("%p Autoplay request denied request=%p",
+                          self.get(),
+                          request.get());
+           });
+}
+
 nsresult
 AudioContext::Init()
 {
   if (!mIsOffline) {
     nsresult rv = mDestination->CreateAudioChannelAgent();
     if (NS_WARN_IF(NS_FAILED(rv))) {
       return rv;
     }
@@ -925,21 +959,16 @@ AudioContext::OnStateChanged(void* aProm
             static_cast<int>(mAudioContextState), static_cast<int>(aNewState));
     MOZ_ASSERT(false);
   }
 
 #endif // DEBUG
 #endif // XP_MACOSX
 #endif // WIN32
 
-  MOZ_ASSERT(
-    mIsOffline || aPromise || aNewState == AudioContextState::Running,
-    "We should have a promise here if this is a real-time AudioContext."
-    "Or this is the first time we switch to \"running\".");
-
   if (aPromise) {
     Promise* promise = reinterpret_cast<Promise*>(aPromise);
     // It is possible for the promise to have been removed from
     // mPromiseGripArray if the cycle collector has severed our connections. DO
     // NOT dereference the promise pointer in that case since it may point to
     // already freed memory.
     if (mPromiseGripArray.Contains(promise)) {
       promise->MaybeResolveWithUndefined();
@@ -993,35 +1022,40 @@ AudioContext::Suspend(ErrorResult& aRv)
   }
 
   if (mAudioContextState == AudioContextState::Closed ||
       mCloseCalled) {
     promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
     return promise.forget();
   }
 
-  Destination()->Suspend();
+  mPromiseGripArray.AppendElement(promise);
+  SuspendInternal(promise);
+  return promise.forget();
+}
 
-  mPromiseGripArray.AppendElement(promise);
+void
+AudioContext::SuspendInternal(void* aPromise)
+{
+  Destination()->Suspend();
 
   nsTArray<MediaStream*> streams;
   // If mSuspendCalled is true then we already suspended all our streams,
   // so don't suspend them again (since suspend(); suspend(); resume(); should
   // cancel both suspends). But we still need to do ApplyAudioContextOperation
   // to ensure our new promise is resolved.
   if (!mSuspendCalled) {
     streams = GetAllStreams();
   }
   Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
                                       streams,
-                                      AudioContextOperation::Suspend, promise);
+                                      AudioContextOperation::Suspend,
+                                      aPromise);
 
   mSuspendCalled = true;
-
-  return promise.forget();
 }
 
 already_AddRefed<Promise>
 AudioContext::Resume(ErrorResult& aRv)
 {
   nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
   RefPtr<Promise> promise;
   promise = Promise::Create(parentObject, aRv);
@@ -1037,34 +1071,43 @@ AudioContext::Resume(ErrorResult& aRv)
   if (mAudioContextState == AudioContextState::Closed ||
       mCloseCalled) {
     promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
     return promise.forget();
   }
 
   mPendingResumePromises.AppendElement(promise);
 
-  if (AutoplayPolicy::IsAudioContextAllowedToPlay(WrapNotNull(this))) {
-    Destination()->Resume();
+  const bool isAllowedToPlay = AutoplayPolicy::IsAllowedToPlay(*this);
+  if (isAllowedToPlay) {
+    ResumeInternal();
+  }
+  AUTOPLAY_LOG("Resume AudioContext %p, IsAllowedToPlay=%d",
+    this, isAllowedToPlay);
+  return promise.forget();
+}
+
+void
+AudioContext::ResumeInternal()
+{
+  Destination()->Resume();
 
-    nsTArray<MediaStream*> streams;
-    // If mSuspendCalled is false then we already resumed all our streams,
-    // so don't resume them again (since suspend(); resume(); resume(); should
-    // be OK). But we still need to do ApplyAudioContextOperation
-    // to ensure our new promise is resolved.
-    if (mSuspendCalled) {
-      streams = GetAllStreams();
-    }
-    Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
-                                        streams,
-                                        AudioContextOperation::Resume, promise);
-    mSuspendCalled = false;
+  nsTArray<MediaStream*> streams;
+  // If mSuspendCalled is false then we already resumed all our streams,
+  // so don't resume them again (since suspend(); resume(); resume(); should
+  // be OK). But we still need to do ApplyAudioContextOperation
+  // to ensure our new promise is resolved.
+  if (mSuspendCalled) {
+    streams = GetAllStreams();
   }
-
-  return promise.forget();
+  Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(),
+                                      streams,
+                                      AudioContextOperation::Resume,
+                                      nullptr);
+  mSuspendCalled = false;
 }
 
 already_AddRefed<Promise>
 AudioContext::Close(ErrorResult& aRv)
 {
   nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject());
   RefPtr<Promise> promise;
   promise = Promise::Create(parentObject, aRv);
--- a/dom/media/webaudio/AudioContext.h
+++ b/dom/media/webaudio/AudioContext.h
@@ -336,16 +336,22 @@ private:
 
   size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
   NS_DECL_NSIMEMORYREPORTER
 
   friend struct ::mozilla::WebAudioDecodeJob;
 
   nsTArray<MediaStream*> GetAllStreams() const;
 
+  // Request the prompt to ask for user's approval for autoplay.
+  void EnsureAutoplayRequested();
+
+  void ResumeInternal();
+  void SuspendInternal(void* aPromise);
+
 private:
   // Each AudioContext has an id, that is passed down the MediaStreams that
   // back the AudioNodes, so we can easily compute the set of all the
   // MediaStreams for a given context, on the MediasStreamGraph side.
   const AudioContextId mId;
   // Note that it's important for mSampleRate to be initialized before
   // mDestination, as mDestination's constructor needs to access it!
   const float mSampleRate;