Bug 1178738 - Have SpeechSynthesis::mSpeechQueue not contain spoken utterance. r=smaug
☠☠ backed out by 61db0bb716b2 ☠ ☠
authorEitan Isaacson <eitan@monotonous.org>
Thu, 26 May 2016 14:22:37 -0700
changeset 339228 ab50796d261607416f6c9b785015d6601edaa348
parent 339227 4c66c0ddf56267ad34c5a3538a66d7f9fcfaafa2
child 339229 5c995bbdfa3ac09234e829d5a8ba91741c89ff35
push id6249
push userjlund@mozilla.com
push dateMon, 01 Aug 2016 13:59:36 +0000
treeherdermozilla-beta@bad9d4f5bf7e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssmaug
bugs1178738
milestone49.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1178738 - Have SpeechSynthesis::mSpeechQueue not contain spoken utterance. r=smaug MozReview-Commit-ID: CyXGDbenWtq
dom/media/webspeech/synth/SpeechSynthesis.cpp
dom/media/webspeech/synth/SpeechSynthesis.h
dom/media/webspeech/synth/nsSpeechTask.h
--- a/dom/media/webspeech/synth/SpeechSynthesis.cpp
+++ b/dom/media/webspeech/synth/SpeechSynthesis.cpp
@@ -75,54 +75,61 @@ SpeechSynthesis::~SpeechSynthesis()
 }
 
 JSObject*
 SpeechSynthesis::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
 {
   return SpeechSynthesisBinding::Wrap(aCx, this, aGivenProto);
 }
 
+SpeechSynthesisUtterance*
+SpeechSynthesis::CurrentUtterance() const
+{
+  return mCurrentTask ? mCurrentTask->mUtterance.get() : nullptr;
+}
+
 bool
 SpeechSynthesis::Pending() const
 {
-  switch (mSpeechQueue.Length()) {
-  case 0:
-    return false;
-
-  case 1:
-    return mSpeechQueue.ElementAt(0)->GetState() == SpeechSynthesisUtterance::STATE_PENDING;
-
-  default:
+  if (mSpeechQueue.Length() > 0) {
     return true;
   }
+
+  SpeechSynthesisUtterance* utterance = CurrentUtterance();
+  if (utterance && utterance->GetState() == SpeechSynthesisUtterance::STATE_PENDING) {
+    return true;
+  }
+
+  return false;
 }
 
 bool
 SpeechSynthesis::Speaking() const
 {
-  if (!mSpeechQueue.IsEmpty() &&
-      mSpeechQueue.ElementAt(0)->GetState() == SpeechSynthesisUtterance::STATE_SPEAKING) {
+  SpeechSynthesisUtterance* utterance = CurrentUtterance();
+  if (utterance && utterance->GetState() == SpeechSynthesisUtterance::STATE_SPEAKING) {
     return true;
   }
 
   // Returns global speaking state if global queue is enabled. Or false.
   return nsSynthVoiceRegistry::GetInstance()->IsSpeaking();
 }
 
 bool
 SpeechSynthesis::Paused() const
 {
+  SpeechSynthesisUtterance* utterance = CurrentUtterance();
   return mHoldQueue || (mCurrentTask && mCurrentTask->IsPrePaused()) ||
-         (!mSpeechQueue.IsEmpty() && mSpeechQueue.ElementAt(0)->IsPaused());
+         (utterance && utterance->IsPaused());
 }
 
 bool
 SpeechSynthesis::HasEmptyQueue() const
 {
-  return mSpeechQueue.Length() == 0;
+  return !mCurrentTask && mSpeechQueue.Length() == 0;
 }
 
 bool SpeechSynthesis::HasVoices() const
 {
   uint32_t voiceCount = mVoiceCache.Count();
   if (voiceCount == 0) {
     nsresult rv = nsSynthVoiceRegistry::GetInstance()->GetVoiceCount(&voiceCount);
     if(NS_WARN_IF(NS_FAILED(rv))) {
@@ -157,16 +164,17 @@ SpeechSynthesis::AdvanceQueue()
   LOG(LogLevel::Debug,
       ("SpeechSynthesis::AdvanceQueue length=%d", mSpeechQueue.Length()));
 
   if (mSpeechQueue.IsEmpty()) {
     return;
   }
 
   RefPtr<SpeechSynthesisUtterance> utterance = mSpeechQueue.ElementAt(0);
+  mSpeechQueue.RemoveElementAt(0);
 
   nsAutoString docLang;
   nsCOMPtr<nsPIDOMWindowInner> window = GetOwner();
   nsIDocument* doc = window ? window->GetExtantDoc() : nullptr;
 
   if (doc) {
     Element* elm = doc->GetHtmlElement();
 
@@ -183,39 +191,32 @@ SpeechSynthesis::AdvanceQueue()
   }
 
   return;
 }
 
 void
 SpeechSynthesis::Cancel()
 {
-  if (!mSpeechQueue.IsEmpty() &&
-      mSpeechQueue.ElementAt(0)->GetState() == SpeechSynthesisUtterance::STATE_SPEAKING) {
-    // Remove all queued utterances except for current one, we will remove it
-    // in OnEnd
-    mSpeechQueue.RemoveElementsAt(1, mSpeechQueue.Length() - 1);
-  } else {
-    mSpeechQueue.Clear();
-  }
+  mSpeechQueue.Clear();
 
   if (mCurrentTask) {
     mCurrentTask->Cancel();
   }
 }
 
 void
 SpeechSynthesis::Pause()
 {
   if (Paused()) {
     return;
   }
 
-  if (mCurrentTask && !mSpeechQueue.IsEmpty() &&
-      mSpeechQueue.ElementAt(0)->GetState() != SpeechSynthesisUtterance::STATE_ENDED) {
+  SpeechSynthesisUtterance* utterance = CurrentUtterance();
+  if (utterance && utterance->GetState() != SpeechSynthesisUtterance::STATE_ENDED) {
     mCurrentTask->Pause();
   } else {
     mHoldQueue = true;
   }
 }
 
 void
 SpeechSynthesis::Resume()
@@ -232,20 +233,16 @@ SpeechSynthesis::Resume()
   }
 }
 
 void
 SpeechSynthesis::OnEnd(const nsSpeechTask* aTask)
 {
   MOZ_ASSERT(mCurrentTask == aTask);
 
-  if (!mSpeechQueue.IsEmpty()) {
-    mSpeechQueue.RemoveElementAt(0);
-  }
-
   mCurrentTask = nullptr;
   AdvanceQueue();
 }
 
 void
 SpeechSynthesis::GetVoices(nsTArray< RefPtr<SpeechSynthesisVoice> >& aResult)
 {
   aResult.Clear();
--- a/dom/media/webspeech/synth/SpeechSynthesis.h
+++ b/dom/media/webspeech/synth/SpeechSynthesis.h
@@ -39,32 +39,35 @@ public:
   JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
 
   bool Pending() const;
 
   bool Speaking() const;
 
   bool Paused() const;
 
+  /* Returns true if speech queue is empty and there is no speaking utterance */
   bool HasEmptyQueue() const;
 
   void Speak(SpeechSynthesisUtterance& aUtterance);
 
   void Cancel();
 
   void Pause();
 
   void Resume();
 
   void OnEnd(const nsSpeechTask* aTask);
 
   void GetVoices(nsTArray< RefPtr<SpeechSynthesisVoice> >& aResult);
 
   void ForceEnd();
 
+  SpeechSynthesisUtterance* CurrentUtterance() const;
+
   IMPL_EVENT_HANDLER(voiceschanged)
 
 private:
   virtual ~SpeechSynthesis();
 
   void AdvanceQueue();
 
   bool HasVoices() const;
--- a/dom/media/webspeech/synth/nsSpeechTask.h
+++ b/dom/media/webspeech/synth/nsSpeechTask.h
@@ -19,16 +19,17 @@ class SpeechSynthesisUtterance;
 class SpeechSynthesis;
 class SynthStreamListener;
 
 class nsSpeechTask : public nsISpeechTask
                    , public nsIAudioChannelAgentCallback
                    , public nsSupportsWeakReference
 {
   friend class SynthStreamListener;
+  friend class SpeechSynthesis;
 
 public:
   NS_DECL_CYCLE_COLLECTING_ISUPPORTS
   NS_DECL_CYCLE_COLLECTION_CLASS_AMBIGUOUS(nsSpeechTask, nsISpeechTask)
 
   NS_DECL_NSISPEECHTASK
   NS_DECL_NSIAUDIOCHANNELAGENTCALLBACK