Backed out changeset ab50796d2616 (bug 1178738)
authorSebastian Hengst <archaeopteryx@coole-files.de>
Thu, 02 Jun 2016 20:32:46 +0200
changeset 339253 61db0bb716b2ec54fc79d12d69311115380a7c40
parent 339252 f19dab43cca7b07844907792d861570933f3f500
child 339254 8d2e6a40be15cd9fcb17a3c5e8da11c8b8a85a27
push id6249
push userjlund@mozilla.com
push dateMon, 01 Aug 2016 13:59:36 +0000
treeherdermozilla-beta@bad9d4f5bf7e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1178738
milestone49.0a1
backs outab50796d261607416f6c9b785015d6601edaa348
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset ab50796d2616 (bug 1178738)
dom/media/webspeech/synth/SpeechSynthesis.cpp
dom/media/webspeech/synth/SpeechSynthesis.h
dom/media/webspeech/synth/nsSpeechTask.h
--- a/dom/media/webspeech/synth/SpeechSynthesis.cpp
+++ b/dom/media/webspeech/synth/SpeechSynthesis.cpp
@@ -75,61 +75,54 @@ SpeechSynthesis::~SpeechSynthesis()
 }
 
 JSObject*
 SpeechSynthesis::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
 {
   return SpeechSynthesisBinding::Wrap(aCx, this, aGivenProto);
 }
 
-SpeechSynthesisUtterance*
-SpeechSynthesis::CurrentUtterance() const
-{
-  return mCurrentTask ? mCurrentTask->mUtterance.get() : nullptr;
-}
-
 bool
 SpeechSynthesis::Pending() const
 {
-  if (mSpeechQueue.Length() > 0) {
+  switch (mSpeechQueue.Length()) {
+  case 0:
+    return false;
+
+  case 1:
+    return mSpeechQueue.ElementAt(0)->GetState() == SpeechSynthesisUtterance::STATE_PENDING;
+
+  default:
     return true;
   }
-
-  SpeechSynthesisUtterance* utterance = CurrentUtterance();
-  if (utterance && utterance->GetState() == SpeechSynthesisUtterance::STATE_PENDING) {
-    return true;
-  }
-
-  return false;
 }
 
 bool
 SpeechSynthesis::Speaking() const
 {
-  SpeechSynthesisUtterance* utterance = CurrentUtterance();
-  if (utterance && utterance->GetState() == SpeechSynthesisUtterance::STATE_SPEAKING) {
+  if (!mSpeechQueue.IsEmpty() &&
+      mSpeechQueue.ElementAt(0)->GetState() == SpeechSynthesisUtterance::STATE_SPEAKING) {
     return true;
   }
 
   // Returns global speaking state if global queue is enabled. Or false.
   return nsSynthVoiceRegistry::GetInstance()->IsSpeaking();
 }
 
 bool
 SpeechSynthesis::Paused() const
 {
-  SpeechSynthesisUtterance* utterance = CurrentUtterance();
   return mHoldQueue || (mCurrentTask && mCurrentTask->IsPrePaused()) ||
-         (utterance && utterance->IsPaused());
+         (!mSpeechQueue.IsEmpty() && mSpeechQueue.ElementAt(0)->IsPaused());
 }
 
 bool
 SpeechSynthesis::HasEmptyQueue() const
 {
-  return !mCurrentTask && mSpeechQueue.Length() == 0;
+  return mSpeechQueue.Length() == 0;
 }
 
 bool SpeechSynthesis::HasVoices() const
 {
   uint32_t voiceCount = mVoiceCache.Count();
   if (voiceCount == 0) {
     nsresult rv = nsSynthVoiceRegistry::GetInstance()->GetVoiceCount(&voiceCount);
     if(NS_WARN_IF(NS_FAILED(rv))) {
@@ -164,17 +157,16 @@ SpeechSynthesis::AdvanceQueue()
   LOG(LogLevel::Debug,
       ("SpeechSynthesis::AdvanceQueue length=%d", mSpeechQueue.Length()));
 
   if (mSpeechQueue.IsEmpty()) {
     return;
   }
 
   RefPtr<SpeechSynthesisUtterance> utterance = mSpeechQueue.ElementAt(0);
-  mSpeechQueue.RemoveElementAt(0);
 
   nsAutoString docLang;
   nsCOMPtr<nsPIDOMWindowInner> window = GetOwner();
   nsIDocument* doc = window ? window->GetExtantDoc() : nullptr;
 
   if (doc) {
     Element* elm = doc->GetHtmlElement();
 
@@ -191,32 +183,39 @@ SpeechSynthesis::AdvanceQueue()
   }
 
   return;
 }
 
 void
 SpeechSynthesis::Cancel()
 {
-  mSpeechQueue.Clear();
+  if (!mSpeechQueue.IsEmpty() &&
+      mSpeechQueue.ElementAt(0)->GetState() == SpeechSynthesisUtterance::STATE_SPEAKING) {
+    // Remove all queued utterances except for current one, we will remove it
+    // in OnEnd
+    mSpeechQueue.RemoveElementsAt(1, mSpeechQueue.Length() - 1);
+  } else {
+    mSpeechQueue.Clear();
+  }
 
   if (mCurrentTask) {
     mCurrentTask->Cancel();
   }
 }
 
 void
 SpeechSynthesis::Pause()
 {
   if (Paused()) {
     return;
   }
 
-  SpeechSynthesisUtterance* utterance = CurrentUtterance();
-  if (utterance && utterance->GetState() != SpeechSynthesisUtterance::STATE_ENDED) {
+  if (mCurrentTask && !mSpeechQueue.IsEmpty() &&
+      mSpeechQueue.ElementAt(0)->GetState() != SpeechSynthesisUtterance::STATE_ENDED) {
     mCurrentTask->Pause();
   } else {
     mHoldQueue = true;
   }
 }
 
 void
 SpeechSynthesis::Resume()
@@ -233,16 +232,20 @@ SpeechSynthesis::Resume()
   }
 }
 
 void
 SpeechSynthesis::OnEnd(const nsSpeechTask* aTask)
 {
   MOZ_ASSERT(mCurrentTask == aTask);
 
+  if (!mSpeechQueue.IsEmpty()) {
+    mSpeechQueue.RemoveElementAt(0);
+  }
+
   mCurrentTask = nullptr;
   AdvanceQueue();
 }
 
 void
 SpeechSynthesis::GetVoices(nsTArray< RefPtr<SpeechSynthesisVoice> >& aResult)
 {
   aResult.Clear();
--- a/dom/media/webspeech/synth/SpeechSynthesis.h
+++ b/dom/media/webspeech/synth/SpeechSynthesis.h
@@ -39,35 +39,32 @@ public:
   JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
 
   bool Pending() const;
 
   bool Speaking() const;
 
   bool Paused() const;
 
-  /* Returns true if speech queue is empty and there is no speaking utterance */
   bool HasEmptyQueue() const;
 
   void Speak(SpeechSynthesisUtterance& aUtterance);
 
   void Cancel();
 
   void Pause();
 
   void Resume();
 
   void OnEnd(const nsSpeechTask* aTask);
 
   void GetVoices(nsTArray< RefPtr<SpeechSynthesisVoice> >& aResult);
 
   void ForceEnd();
 
-  SpeechSynthesisUtterance* CurrentUtterance() const;
-
   IMPL_EVENT_HANDLER(voiceschanged)
 
 private:
   virtual ~SpeechSynthesis();
 
   void AdvanceQueue();
 
   bool HasVoices() const;
--- a/dom/media/webspeech/synth/nsSpeechTask.h
+++ b/dom/media/webspeech/synth/nsSpeechTask.h
@@ -19,17 +19,16 @@ class SpeechSynthesisUtterance;
 class SpeechSynthesis;
 class SynthStreamListener;
 
 class nsSpeechTask : public nsISpeechTask
                    , public nsIAudioChannelAgentCallback
                    , public nsSupportsWeakReference
 {
   friend class SynthStreamListener;
-  friend class SpeechSynthesis;
 
 public:
   NS_DECL_CYCLE_COLLECTING_ISUPPORTS
   NS_DECL_CYCLE_COLLECTION_CLASS_AMBIGUOUS(nsSpeechTask, nsISpeechTask)
 
   NS_DECL_NSISPEECHTASK
   NS_DECL_NSIAUDIOCHANNELAGENTCALLBACK