dom/media/webaudio/AudioContext.h
author Alastor Wu <alwu@mozilla.com>
Thu, 17 Jan 2019 02:56:22 +0000
changeset 514222 521176ad7ae8c205307c4447c70a4de546c6cac8
parent 513517 c34e287f2f7ca98ad1945804c2a20f61249b36af
child 514223 987aa2594ba62b25ca9a36ae51e8c4d7afb0939e
permissions -rw-r--r--
Bug 1519430 - part1 : add new methods for calling suspend/resume from chrome. r=padenot In order to separate resume/suspend called from chrome and content side, we need to create new methods. Differential Revision: https://phabricator.services.mozilla.com/D16612

/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

#ifndef AudioContext_h_
#define AudioContext_h_

#include "mozilla/dom/OfflineAudioContextBinding.h"
#include "MediaBufferDecoder.h"
#include "mozilla/Attributes.h"
#include "mozilla/DOMEventTargetHelper.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/dom/TypedArray.h"
#include "mozilla/RelativeTimeline.h"
#include "mozilla/UniquePtr.h"
#include "nsCOMPtr.h"
#include "nsCycleCollectionParticipant.h"
#include "nsHashKeys.h"
#include "nsTHashtable.h"
#include "js/TypeDecls.h"
#include "nsIMemoryReporter.h"

// X11 has a #define for CurrentTime. Unbelievable :-(.
// See dom/media/DOMMediaStream.h for more fun!
#ifdef CurrentTime
#undef CurrentTime
#endif

namespace WebCore {
class PeriodicWave;
}  // namespace WebCore

class nsPIDOMWindowInner;

namespace mozilla {

class DOMMediaStream;
class ErrorResult;
class MediaStream;
class MediaStreamGraph;
class AudioNodeStream;

namespace dom {

enum class AudioContextState : uint8_t;
class AnalyserNode;
class AudioBuffer;
class AudioBufferSourceNode;
class AudioDestinationNode;
class AudioListener;
class AudioNode;
class BiquadFilterNode;
class ChannelMergerNode;
class ChannelSplitterNode;
class ConstantSourceNode;
class ConvolverNode;
class DelayNode;
class DynamicsCompressorNode;
class GainNode;
class GlobalObject;
class HTMLMediaElement;
class IIRFilterNode;
class MediaElementAudioSourceNode;
class MediaStreamAudioDestinationNode;
class MediaStreamAudioSourceNode;
class OscillatorNode;
class PannerNode;
class ScriptProcessorNode;
class StereoPannerNode;
class WaveShaperNode;
class Worklet;
class PeriodicWave;
struct PeriodicWaveConstraints;
class Promise;
enum class OscillatorType : uint8_t;

// This is addrefed by the OscillatorNodeEngine on the main thread
// and then used from the MSG thread.
// It can be released either from the graph thread or the main thread.
class BasicWaveFormCache {
 public:
  explicit BasicWaveFormCache(uint32_t aSampleRate);
  NS_INLINE_DECL_THREADSAFE_REFCOUNTING(BasicWaveFormCache)
  WebCore::PeriodicWave* GetBasicWaveForm(OscillatorType aType);

 private:
  ~BasicWaveFormCache();
  RefPtr<WebCore::PeriodicWave> mSawtooth;
  RefPtr<WebCore::PeriodicWave> mSquare;
  RefPtr<WebCore::PeriodicWave> mTriangle;
  uint32_t mSampleRate;
};

/* This runnable allows the MSG to notify the main thread when audio is actually
 * flowing */
class StateChangeTask final : public Runnable {
 public:
  /* This constructor should be used when this event is sent from the main
   * thread. */
  StateChangeTask(AudioContext* aAudioContext, void* aPromise,
                  AudioContextState aNewState);

  /* This constructor should be used when this event is sent from the audio
   * thread. */
  StateChangeTask(AudioNodeStream* aStream, void* aPromise,
                  AudioContextState aNewState);

  NS_IMETHOD Run() override;

 private:
  RefPtr<AudioContext> mAudioContext;
  void* mPromise;
  RefPtr<AudioNodeStream> mAudioNodeStream;
  AudioContextState mNewState;
};

enum class AudioContextOperation { Suspend, Resume, Close };
struct AudioContextOptions;

class AudioContext final : public DOMEventTargetHelper,
                           public nsIMemoryReporter,
                           public RelativeTimeline {
  AudioContext(nsPIDOMWindowInner* aParentWindow, bool aIsOffline,
               uint32_t aNumberOfChannels = 0, uint32_t aLength = 0,
               float aSampleRate = 0.0f);
  ~AudioContext();

  nsresult Init();

 public:
  typedef uint64_t AudioContextId;

  NS_DECL_ISUPPORTS_INHERITED
  NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioContext, DOMEventTargetHelper)
  MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf)

  nsPIDOMWindowInner* GetParentObject() const { return GetOwner(); }

  virtual void DisconnectFromOwner() override;
  virtual void BindToOwner(nsIGlobalObject* aNew) override;

  void Shutdown();  // idempotent

  JSObject* WrapObject(JSContext* aCx,
                       JS::Handle<JSObject*> aGivenProto) override;

  using DOMEventTargetHelper::DispatchTrustedEvent;

  // Constructor for regular AudioContext
  static already_AddRefed<AudioContext> Constructor(
      const GlobalObject& aGlobal, const AudioContextOptions& aOptions,
      ErrorResult& aRv);

  // Constructor for offline AudioContext with options object
  static already_AddRefed<AudioContext> Constructor(
      const GlobalObject& aGlobal, const OfflineAudioContextOptions& aOptions,
      ErrorResult& aRv);

  // Constructor for offline AudioContext
  static already_AddRefed<AudioContext> Constructor(const GlobalObject& aGlobal,
                                                    uint32_t aNumberOfChannels,
                                                    uint32_t aLength,
                                                    float aSampleRate,
                                                    ErrorResult& aRv);

  // AudioContext methods

  AudioDestinationNode* Destination() const { return mDestination; }

  float SampleRate() const { return mSampleRate; }

  bool ShouldSuspendNewStream() const { return mSuspendCalled; }

  double CurrentTime();

  AudioListener* Listener();

  AudioContextState State() const { return mAudioContextState; }

  Worklet* GetAudioWorklet(ErrorResult& aRv);

  bool IsRunning() const;

  // Called when an AudioScheduledSourceNode started or the source node starts,
  // this method might resume the AudioContext if it was not allowed to start.
  void StartBlockedAudioContextIfAllowed();

  // Those three methods return a promise to content, that is resolved when an
  // (possibly long) operation is completed on the MSG (and possibly other)
  // thread(s). To avoid having to match the calls and asychronous result when
  // the operation is completed, we keep a reference to the promises on the main
  // thread, and then send the promises pointers down the MSG thread, as a void*
  // (to make it very clear that the pointer is to merely be treated as an ID).
  // When back on the main thread, we can resolve or reject the promise, by
  // casting it back to a `Promise*` while asserting we're back on the main
  // thread and removing the reference we added.
  already_AddRefed<Promise> Suspend(ErrorResult& aRv);
  already_AddRefed<Promise> Resume(ErrorResult& aRv);
  already_AddRefed<Promise> Close(ErrorResult& aRv);
  IMPL_EVENT_HANDLER(statechange)

  // These two functions are similar with Suspend() and Resume(), the difference
  // is they are designed for calling from chrome side, not content side. eg.
  // calling from inner window, so we won't need to return promise for caller.
  void SuspendFromChrome();
  void ResumeFromChrome();

  already_AddRefed<AudioBufferSourceNode> CreateBufferSource(ErrorResult& aRv);

  already_AddRefed<ConstantSourceNode> CreateConstantSource(ErrorResult& aRv);

  already_AddRefed<AudioBuffer> CreateBuffer(uint32_t aNumberOfChannels,
                                             uint32_t aLength,
                                             float aSampleRate,
                                             ErrorResult& aRv);

  already_AddRefed<MediaStreamAudioDestinationNode>
  CreateMediaStreamDestination(ErrorResult& aRv);

  already_AddRefed<ScriptProcessorNode> CreateScriptProcessor(
      uint32_t aBufferSize, uint32_t aNumberOfInputChannels,
      uint32_t aNumberOfOutputChannels, ErrorResult& aRv);

  already_AddRefed<StereoPannerNode> CreateStereoPanner(ErrorResult& aRv);

  already_AddRefed<AnalyserNode> CreateAnalyser(ErrorResult& aRv);

  already_AddRefed<GainNode> CreateGain(ErrorResult& aRv);

  already_AddRefed<WaveShaperNode> CreateWaveShaper(ErrorResult& aRv);

  already_AddRefed<MediaElementAudioSourceNode> CreateMediaElementSource(
      HTMLMediaElement& aMediaElement, ErrorResult& aRv);
  already_AddRefed<MediaStreamAudioSourceNode> CreateMediaStreamSource(
      DOMMediaStream& aMediaStream, ErrorResult& aRv);

  already_AddRefed<DelayNode> CreateDelay(double aMaxDelayTime,
                                          ErrorResult& aRv);

  already_AddRefed<PannerNode> CreatePanner(ErrorResult& aRv);

  already_AddRefed<ConvolverNode> CreateConvolver(ErrorResult& aRv);

  already_AddRefed<ChannelSplitterNode> CreateChannelSplitter(
      uint32_t aNumberOfOutputs, ErrorResult& aRv);

  already_AddRefed<ChannelMergerNode> CreateChannelMerger(
      uint32_t aNumberOfInputs, ErrorResult& aRv);

  already_AddRefed<DynamicsCompressorNode> CreateDynamicsCompressor(
      ErrorResult& aRv);

  already_AddRefed<BiquadFilterNode> CreateBiquadFilter(ErrorResult& aRv);

  already_AddRefed<IIRFilterNode> CreateIIRFilter(
      const Sequence<double>& aFeedforward, const Sequence<double>& aFeedback,
      mozilla::ErrorResult& aRv);

  already_AddRefed<OscillatorNode> CreateOscillator(ErrorResult& aRv);

  already_AddRefed<PeriodicWave> CreatePeriodicWave(
      const Float32Array& aRealData, const Float32Array& aImagData,
      const PeriodicWaveConstraints& aConstraints, ErrorResult& aRv);

  already_AddRefed<Promise> DecodeAudioData(
      const ArrayBuffer& aBuffer,
      const Optional<OwningNonNull<DecodeSuccessCallback>>& aSuccessCallback,
      const Optional<OwningNonNull<DecodeErrorCallback>>& aFailureCallback,
      ErrorResult& aRv);

  // OfflineAudioContext methods
  already_AddRefed<Promise> StartRendering(ErrorResult& aRv);
  IMPL_EVENT_HANDLER(complete)
  unsigned long Length();

  bool IsOffline() const { return mIsOffline; }

  MediaStreamGraph* Graph() const;
  AudioNodeStream* DestinationStream() const;

  // Nodes register here if they will produce sound even if they have silent
  // or no input connections.  The AudioContext will keep registered nodes
  // alive until the context is collected.  This takes care of "playing"
  // references and "tail-time" references.
  void RegisterActiveNode(AudioNode* aNode);
  // Nodes unregister when they have finished producing sound for the
  // foreseeable future.
  // Do NOT call UnregisterActiveNode from an AudioNode destructor.
  // If the destructor is called, then the Node has already been unregistered.
  // The destructor may be called during hashtable enumeration, during which
  // unregistering would not be safe.
  void UnregisterActiveNode(AudioNode* aNode);

  uint32_t MaxChannelCount() const;

  uint32_t ActiveNodeCount() const;

  void Mute() const;
  void Unmute() const;

  JSObject* GetGlobalJSObject() const;

  void RegisterNode(AudioNode* aNode);
  void UnregisterNode(AudioNode* aNode);

  void OnStateChanged(void* aPromise, AudioContextState aNewState);

  BasicWaveFormCache* GetBasicWaveFormCache();

  bool CheckClosed(ErrorResult& aRv);

  void Dispatch(already_AddRefed<nsIRunnable>&& aRunnable);

 private:
  void DisconnectFromWindow();
  void RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob);
  void ShutdownDecoder();

  size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
  NS_DECL_NSIMEMORYREPORTER

  friend struct ::mozilla::WebAudioDecodeJob;

  nsTArray<MediaStream*> GetAllStreams() const;

  void ResumeInternal();
  void SuspendInternal(void* aPromise);

  // Will report error message to console and dispatch testing event if needed
  // when AudioContext is blocked by autoplay policy.
  void ReportBlocked();

  void ReportToConsole(uint32_t aErrorFlags, const char* aMsg) const;

  // This function should be called everytime we decide whether allow to start
  // audio context, it's used to update Telemetry related variables.
  void UpdateAutoplayAssumptionStatus();

  // These functions are used for updating Telemetry.
  // - MaybeUpdateAutoplayTelemetry: update category 'AllowedAfterBlocked'
  // - MaybeUpdateAutoplayTelemetryWhenShutdown: update category 'NeverBlocked'
  //   and 'NeverAllowed', so we need to call it when shutdown AudioContext
  void MaybeUpdateAutoplayTelemetry();
  void MaybeUpdateAutoplayTelemetryWhenShutdown();

 private:
  // Each AudioContext has an id, that is passed down the MediaStreams that
  // back the AudioNodes, so we can easily compute the set of all the
  // MediaStreams for a given context, on the MediasStreamGraph side.
  const AudioContextId mId;
  // Note that it's important for mSampleRate to be initialized before
  // mDestination, as mDestination's constructor needs to access it!
  const float mSampleRate;
  AudioContextState mAudioContextState;
  RefPtr<AudioDestinationNode> mDestination;
  RefPtr<AudioListener> mListener;
  RefPtr<Worklet> mWorklet;
  nsTArray<UniquePtr<WebAudioDecodeJob>> mDecodeJobs;
  // This array is used to keep the suspend/close promises alive until
  // they are resolved, so we can safely pass them accross threads.
  nsTArray<RefPtr<Promise>> mPromiseGripArray;
  // This array is used to onlly keep the resume promises alive until they are
  // resolved, so we can safely pass them accross threads. If the audio context
  // is not allowed to play, the promise would be pending in this array and be
  // resolved until audio context has been allowed and user call resume() again.
  nsTArray<RefPtr<Promise>> mPendingResumePromises;
  // See RegisterActiveNode.  These will keep the AudioContext alive while it
  // is rendering and the window remains alive.
  nsTHashtable<nsRefPtrHashKey<AudioNode>> mActiveNodes;
  // Raw (non-owning) references to all AudioNodes for this AudioContext.
  nsTHashtable<nsPtrHashKey<AudioNode>> mAllNodes;
  // Cache to avoid recomputing basic waveforms all the time.
  RefPtr<BasicWaveFormCache> mBasicWaveFormCache;
  // Number of channels passed in the OfflineAudioContext ctor.
  uint32_t mNumberOfChannels;
  bool mIsOffline;
  bool mIsStarted;
  bool mIsShutDown;
  // Close has been called, reject suspend and resume call.
  bool mCloseCalled;
  // Suspend has been called with no following resume.
  bool mSuspendCalled;
  bool mIsDisconnecting;
  // This flag stores the value of previous status of `allowed-to-start`.
  bool mWasAllowedToStart;

  // These variables are used for telemetry, they're not reflect the actual
  // status of AudioContext, they are based on the "assumption" of enabling
  // blocking web audio. Because we want to record Telemetry no matter user
  // enable blocking autoplay or not.
  // - 'mWasEverAllowedToStart' would be true when AudioContext had ever been
  //   allowed to start if we enable blocking web audio.
  // - 'mWasEverBlockedToStart' would be true when AudioContext had ever been
  //   blocked to start if we enable blocking web audio.
  // - 'mWouldBeAllowedToStart' stores the value of previous status of
  //   `allowed-to-start` if we enable blocking web audio.
  bool mWasEverAllowedToStart;
  bool mWasEverBlockedToStart;
  bool mWouldBeAllowedToStart;
};

static const dom::AudioContext::AudioContextId NO_AUDIO_CONTEXT = 0;

}  // namespace dom
}  // namespace mozilla

#endif