Backed out 4 changesets (bug 1188099) for B2G test failures and crashes.
authorRyan VanderMeulen <ryanvm@gmail.com>
Wed, 05 Aug 2015 14:36:49 -0400
changeset 288093 25d448978f89083662611831740bbc7308cfb280
parent 288092 27d20d1b6f9a0f0361b2ef60875366d08de6c617
child 288094 a7860794b00e186b56dff097c1727c5d0ab3474e
push id5067
push userraliiev@mozilla.com
push dateMon, 21 Sep 2015 14:04:52 +0000
treeherdermozilla-beta@14221ffe5b2f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1188099
milestone42.0a1
backs outf89e9a209c6c24a35580d8d18bd78204f4ef175b
e5b42fb931a655c9db67c988321f3aa26b526d39
a300bd34d135cc28ec719591aacc33c875984a99
02f399ce296dca053566537ef058270b539bae03
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 4 changesets (bug 1188099) for B2G test failures and crashes. Backed out changeset f89e9a209c6c (bug 1188099) Backed out changeset e5b42fb931a6 (bug 1188099) Backed out changeset a300bd34d135 (bug 1188099) Backed out changeset 02f399ce296d (bug 1188099)
dom/media/webspeech/synth/SpeechSynthesis.cpp
dom/media/webspeech/synth/SpeechSynthesis.h
dom/media/webspeech/synth/ipc/PSpeechSynthesis.ipdl
dom/media/webspeech/synth/ipc/PSpeechSynthesisRequest.ipdl
dom/media/webspeech/synth/ipc/SpeechSynthesisChild.cpp
dom/media/webspeech/synth/ipc/SpeechSynthesisChild.h
dom/media/webspeech/synth/ipc/SpeechSynthesisParent.cpp
dom/media/webspeech/synth/ipc/SpeechSynthesisParent.h
dom/media/webspeech/synth/nsISynthVoiceRegistry.idl
dom/media/webspeech/synth/nsSpeechTask.cpp
dom/media/webspeech/synth/nsSpeechTask.h
dom/media/webspeech/synth/nsSynthVoiceRegistry.cpp
dom/media/webspeech/synth/nsSynthVoiceRegistry.h
dom/media/webspeech/synth/pico/nsPicoService.cpp
dom/media/webspeech/synth/test/common.js
dom/media/webspeech/synth/test/file_global_queue.html
dom/media/webspeech/synth/test/file_global_queue_cancel.html
dom/media/webspeech/synth/test/file_global_queue_pause.html
dom/media/webspeech/synth/test/file_indirect_service_events.html
dom/media/webspeech/synth/test/file_speech_cancel.html
dom/media/webspeech/synth/test/file_speech_queue.html
dom/media/webspeech/synth/test/mochitest.ini
dom/media/webspeech/synth/test/nsFakeSynthServices.cpp
dom/media/webspeech/synth/test/test_global_queue.html
dom/media/webspeech/synth/test/test_global_queue_cancel.html
dom/media/webspeech/synth/test/test_global_queue_pause.html
dom/media/webspeech/synth/test/test_indirect_service_events.html
dom/media/webspeech/synth/test/test_speech_cancel.html
dom/media/webspeech/synth/test/test_speech_queue.html
dom/media/webspeech/synth/windows/SapiService.cpp
dom/webidl/SpeechSynthesis.webidl
--- a/dom/media/webspeech/synth/SpeechSynthesis.cpp
+++ b/dom/media/webspeech/synth/SpeechSynthesis.cpp
@@ -106,29 +106,27 @@ SpeechSynthesis::Pending() const
   default:
     return true;
   }
 }
 
 bool
 SpeechSynthesis::Speaking() const
 {
-  if (!mSpeechQueue.IsEmpty() &&
-      mSpeechQueue.ElementAt(0)->GetState() == SpeechSynthesisUtterance::STATE_SPEAKING) {
-    return true;
+  if (mSpeechQueue.IsEmpty()) {
+    return false;
   }
 
-  // Returns global speaking state if global queue is enabled. Or false.
-  return nsSynthVoiceRegistry::GetInstance()->IsSpeaking();
+  return mSpeechQueue.ElementAt(0)->GetState() == SpeechSynthesisUtterance::STATE_SPEAKING;
 }
 
 bool
 SpeechSynthesis::Paused() const
 {
-  return mHoldQueue || (mCurrentTask && mCurrentTask->IsPrePaused()) ||
+  return mHoldQueue ||
          (!mSpeechQueue.IsEmpty() && mSpeechQueue.ElementAt(0)->IsPaused());
 }
 
 void
 SpeechSynthesis::Speak(SpeechSynthesisUtterance& aUtterance)
 {
   if (aUtterance.mState != SpeechSynthesisUtterance::STATE_NONE) {
     // XXX: Should probably raise an error
@@ -175,28 +173,25 @@ SpeechSynthesis::AdvanceQueue()
   }
 
   return;
 }
 
 void
 SpeechSynthesis::Cancel()
 {
-  if (!mSpeechQueue.IsEmpty() &&
-      mSpeechQueue.ElementAt(0)->GetState() == SpeechSynthesisUtterance::STATE_SPEAKING) {
-    // Remove all queued utterances except for current one, we will remove it
-    // in OnEnd
-    mSpeechQueue.RemoveElementsAt(1, mSpeechQueue.Length() - 1);
+  if (mCurrentTask) {
+    if (mSpeechQueue.Length() > 1) {
+      // Remove all queued utterances except for current one.
+      mSpeechQueue.RemoveElementsAt(1, mSpeechQueue.Length() - 1);
+    }
+    mCurrentTask->Cancel();
   } else {
     mSpeechQueue.Clear();
   }
-
-  if (mCurrentTask) {
-    mCurrentTask->Cancel();
-  }
 }
 
 void
 SpeechSynthesis::Pause()
 {
   if (Paused()) {
     return;
   }
@@ -269,20 +264,10 @@ SpeechSynthesis::GetVoices(nsTArray< nsR
   mVoiceCache.Clear();
 
   for (uint32_t i = 0; i < aResult.Length(); i++) {
     SpeechSynthesisVoice* voice = aResult[i];
     mVoiceCache.Put(voice->mUri, voice);
   }
 }
 
-// For testing purposes, allows us to cancel the current task that is
-// misbehaving, and flush the queue.
-void
-SpeechSynthesis::ForceEnd()
-{
-  if (mCurrentTask) {
-    mCurrentTask->ForceEnd();
-  }
-}
-
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webspeech/synth/SpeechSynthesis.h
+++ b/dom/media/webspeech/synth/SpeechSynthesis.h
@@ -49,18 +49,16 @@ public:
   void Pause();
 
   void Resume();
 
   void OnEnd(const nsSpeechTask* aTask);
 
   void GetVoices(nsTArray< nsRefPtr<SpeechSynthesisVoice> >& aResult);
 
-  void ForceEnd();
-
 private:
   virtual ~SpeechSynthesis();
 
   void AdvanceQueue();
 
   nsCOMPtr<nsPIDOMWindow> mParent;
 
   nsTArray<nsRefPtr<SpeechSynthesisUtterance> > mSpeechQueue;
--- a/dom/media/webspeech/synth/ipc/PSpeechSynthesis.ipdl
+++ b/dom/media/webspeech/synth/ipc/PSpeechSynthesis.ipdl
@@ -10,38 +10,33 @@ include protocol PSpeechSynthesisRequest
 namespace mozilla {
 namespace dom {
 
 struct RemoteVoice {
   nsString voiceURI;
   nsString name;
   nsString lang;
   bool localService;
-  bool queued;
 };
 
 sync protocol PSpeechSynthesis
 {
   manager PContent;
   manages PSpeechSynthesisRequest;
 
 child:
 
     VoiceAdded(RemoteVoice aVoice);
 
     VoiceRemoved(nsString aUri);
 
     SetDefaultVoice(nsString aUri, bool aIsDefault);
 
-    IsSpeakingChanged(bool aIsSpeaking);
-
 parent:
     __delete__();
 
     PSpeechSynthesisRequest(nsString aText, nsString aUri, nsString aLang,
                             float aVolume, float aRate, float aPitch);
-
-    sync ReadVoicesAndState() returns (RemoteVoice[] aVoices,
-                                       nsString[] aDefaults, bool aIsSpeaking);
+    sync ReadVoiceList() returns (RemoteVoice[] aVoices, nsString[] aDefaults);
 };
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webspeech/synth/ipc/PSpeechSynthesisRequest.ipdl
+++ b/dom/media/webspeech/synth/ipc/PSpeechSynthesisRequest.ipdl
@@ -16,18 +16,16 @@ async protocol PSpeechSynthesisRequest
  parent:
 
   Pause();
 
   Resume();
 
   Cancel();
 
-  ForceEnd();
-
  child:
 
   __delete__(bool aIsError, float aElapsedTime, uint32_t aCharIndex);
 
   OnStart(nsString aUri);
 
   OnPause(float aElapsedTime, uint32_t aCharIndex);
 
--- a/dom/media/webspeech/synth/ipc/SpeechSynthesisChild.cpp
+++ b/dom/media/webspeech/synth/ipc/SpeechSynthesisChild.cpp
@@ -35,23 +35,16 @@ SpeechSynthesisChild::RecvVoiceRemoved(c
 bool
 SpeechSynthesisChild::RecvSetDefaultVoice(const nsString& aUri,
                                           const bool& aIsDefault)
 {
   nsSynthVoiceRegistry::RecvSetDefaultVoice(aUri, aIsDefault);
   return true;
 }
 
-bool
-SpeechSynthesisChild::RecvIsSpeakingChanged(const bool& aIsSpeaking)
-{
-  nsSynthVoiceRegistry::RecvIsSpeakingChanged(aIsSpeaking);
-  return true;
-}
-
 PSpeechSynthesisRequestChild*
 SpeechSynthesisChild::AllocPSpeechSynthesisRequestChild(const nsString& aText,
                                                         const nsString& aLang,
                                                         const nsString& aUri,
                                                         const float& aVolume,
                                                         const float& aRate,
                                                         const float& aPitch)
 {
@@ -179,17 +172,10 @@ SpeechTaskChild::Resume()
 
 void
 SpeechTaskChild::Cancel()
 {
   MOZ_ASSERT(mActor);
   mActor->SendCancel();
 }
 
-void
-SpeechTaskChild::ForceEnd()
-{
-  MOZ_ASSERT(mActor);
-  mActor->SendForceEnd();
-}
-
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webspeech/synth/ipc/SpeechSynthesisChild.h
+++ b/dom/media/webspeech/synth/ipc/SpeechSynthesisChild.h
@@ -23,18 +23,16 @@ class SpeechSynthesisChild : public PSpe
 
 public:
   bool RecvVoiceAdded(const RemoteVoice& aVoice) override;
 
   bool RecvVoiceRemoved(const nsString& aUri) override;
 
   bool RecvSetDefaultVoice(const nsString& aUri, const bool& aIsDefault) override;
 
-  bool RecvIsSpeakingChanged(const bool& aIsSpeaking) override;
-
 protected:
   SpeechSynthesisChild();
   virtual ~SpeechSynthesisChild();
 
   PSpeechSynthesisRequestChild* AllocPSpeechSynthesisRequestChild(const nsString& aLang,
                                                                   const nsString& aUri,
                                                                   const nsString& aText,
                                                                   const float& aVolume,
@@ -85,18 +83,16 @@ public:
   NS_IMETHOD SendAudioNative(int16_t* aData, uint32_t aDataLen) override;
 
   virtual void Pause() override;
 
   virtual void Resume() override;
 
   virtual void Cancel() override;
 
-  virtual void ForceEnd() override;
-
 private:
   SpeechSynthesisRequestChild* mActor;
 };
 
 } // namespace dom
 } // namespace mozilla
 
 #endif
--- a/dom/media/webspeech/synth/ipc/SpeechSynthesisParent.cpp
+++ b/dom/media/webspeech/synth/ipc/SpeechSynthesisParent.cpp
@@ -20,22 +20,20 @@ SpeechSynthesisParent::~SpeechSynthesisP
 
 void
 SpeechSynthesisParent::ActorDestroy(ActorDestroyReason aWhy)
 {
   // Implement me! Bug 1005141
 }
 
 bool
-SpeechSynthesisParent::RecvReadVoicesAndState(InfallibleTArray<RemoteVoice>* aVoices,
-                                              InfallibleTArray<nsString>* aDefaults,
-                                              bool* aIsSpeaking)
+SpeechSynthesisParent::RecvReadVoiceList(InfallibleTArray<RemoteVoice>* aVoices,
+                                         InfallibleTArray<nsString>* aDefaults)
 {
-  nsSynthVoiceRegistry::GetInstance()->SendVoicesAndState(aVoices, aDefaults,
-                                                          aIsSpeaking);
+  nsSynthVoiceRegistry::GetInstance()->SendVoices(aVoices, aDefaults);
   return true;
 }
 
 PSpeechSynthesisRequestParent*
 SpeechSynthesisParent::AllocPSpeechSynthesisRequestParent(const nsString& aText,
                                                           const nsString& aLang,
                                                           const nsString& aUri,
                                                           const float& aVolume,
@@ -114,24 +112,16 @@ SpeechSynthesisRequestParent::RecvResume
 bool
 SpeechSynthesisRequestParent::RecvCancel()
 {
   MOZ_ASSERT(mTask);
   mTask->Cancel();
   return true;
 }
 
-bool
-SpeechSynthesisRequestParent::RecvForceEnd()
-{
-  MOZ_ASSERT(mTask);
-  mTask->ForceEnd();
-  return true;
-}
-
 // SpeechTaskParent
 
 nsresult
 SpeechTaskParent::DispatchStartImpl(const nsAString& aUri)
 {
   MOZ_ASSERT(mActor);
   if(NS_WARN_IF(!(mActor->SendOnStart(nsString(aUri))))) {
     return NS_ERROR_FAILURE;
--- a/dom/media/webspeech/synth/ipc/SpeechSynthesisParent.h
+++ b/dom/media/webspeech/synth/ipc/SpeechSynthesisParent.h
@@ -19,19 +19,18 @@ class SpeechSynthesisRequestParent;
 class SpeechSynthesisParent : public PSpeechSynthesisParent
 {
   friend class ContentParent;
   friend class SpeechSynthesisRequestParent;
 
 public:
   virtual void ActorDestroy(ActorDestroyReason aWhy) override;
 
-  bool RecvReadVoicesAndState(InfallibleTArray<RemoteVoice>* aVoices,
-                              InfallibleTArray<nsString>* aDefaults,
-                              bool* aIsSpeaking) override;
+  bool RecvReadVoiceList(InfallibleTArray<RemoteVoice>* aVoices,
+                         InfallibleTArray<nsString>* aDefaults) override;
 
 protected:
   SpeechSynthesisParent();
   virtual ~SpeechSynthesisParent();
   PSpeechSynthesisRequestParent* AllocPSpeechSynthesisRequestParent(const nsString& aText,
                                                                     const nsString& aLang,
                                                                     const nsString& aUri,
                                                                     const float& aVolume,
@@ -62,18 +61,16 @@ protected:
 
   virtual void ActorDestroy(ActorDestroyReason aWhy) override;
 
   virtual bool RecvPause() override;
 
   virtual bool RecvResume() override;
 
   virtual bool RecvCancel() override;
-
-  virtual bool RecvForceEnd() override;
 };
 
 class SpeechTaskParent : public nsSpeechTask
 {
   friend class SpeechSynthesisRequestParent;
 public:
   SpeechTaskParent(float aVolume, const nsAString& aUtterance)
     : nsSpeechTask(aVolume, aUtterance) {}
--- a/dom/media/webspeech/synth/nsISynthVoiceRegistry.idl
+++ b/dom/media/webspeech/synth/nsISynthVoiceRegistry.idl
@@ -2,32 +2,31 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "nsISupports.idl"
 
 interface nsISpeechService;
 
-[scriptable, builtinclass, uuid(dac09c3a-156e-4025-a4ab-bc88b0ea92e7)]
+[scriptable, builtinclass, uuid(53dcc868-4193-4c3c-a1d9-fe5a0a6af2fb)]
 interface nsISynthVoiceRegistry : nsISupports
 {
   /**
    * Register a speech synthesis voice.
    *
-   * @param aService          the service that provides this voice.
-   * @param aUri              a unique identifier for this voice.
-   * @param aName             human-readable name for this voice.
-   * @param aLang             a BCP 47 language tag.
-   * @param aLocalService     true if service does not require network.
-   * @param aQueuesUtterances true if voice only speaks one utterance at a time
+   * @param aService      the service that provides this voice.
+   * @param aUri          a unique identifier for this voice.
+   * @param aName         human-readable name for this voice.
+   * @param aLang         a BCP 47 language tag.
+   * @param aLocalService true if service does not require network.
    */
   void addVoice(in nsISpeechService aService, in DOMString aUri,
                 in DOMString aName, in DOMString aLang,
-                in boolean aLocalService, in boolean aQueuesUtterances);
+                in boolean aLocalService);
 
   /**
    * Remove a speech synthesis voice.
    *
    * @param aService the service that was used to add the voice.
    * @param aUri     a unique identifier of an existing voice.
    */
   void removeVoice(in nsISpeechService aService, in DOMString aUri);
--- a/dom/media/webspeech/synth/nsSpeechTask.cpp
+++ b/dom/media/webspeech/synth/nsSpeechTask.cpp
@@ -2,17 +2,16 @@
 /* vim:set ts=2 sw=2 sts=2 et cindent: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "AudioSegment.h"
 #include "nsSpeechTask.h"
 #include "SpeechSynthesis.h"
-#include "nsSynthVoiceRegistry.h"
 
 // GetCurrentTime is defined in winbase.h as zero argument macro forwarding to
 // GetTickCount() and conflicts with nsSpeechTask::GetCurrentTime().
 #ifdef GetCurrentTime
 #undef GetCurrentTime
 #endif
 
 #undef LOG
@@ -29,25 +28,25 @@ public:
     mSpeechTask(aSpeechTask),
     mStarted(false)
   {
   }
 
   void DoNotifyStarted()
   {
     if (mSpeechTask) {
-      mSpeechTask->DispatchStartInner();
+      mSpeechTask->DispatchStartImpl();
     }
   }
 
   void DoNotifyFinished()
   {
     if (mSpeechTask) {
-      mSpeechTask->DispatchEndInner(mSpeechTask->GetCurrentTime(),
-                                    mSpeechTask->GetCurrentCharOffset());
+      mSpeechTask->DispatchEndImpl(mSpeechTask->GetCurrentTime(),
+                                   mSpeechTask->GetCurrentCharOffset());
     }
   }
 
   virtual void NotifyEvent(MediaStreamGraph* aGraph,
                            MediaStreamListener::MediaStreamGraphEvent event) override
   {
     switch (event) {
       case EVENT_FINISHED:
@@ -92,33 +91,27 @@ NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(
   NS_INTERFACE_MAP_ENTRY_AMBIGUOUS(nsISupports, nsISpeechTask)
 NS_INTERFACE_MAP_END
 
 NS_IMPL_CYCLE_COLLECTING_ADDREF(nsSpeechTask)
 NS_IMPL_CYCLE_COLLECTING_RELEASE(nsSpeechTask)
 
 nsSpeechTask::nsSpeechTask(SpeechSynthesisUtterance* aUtterance)
   : mUtterance(aUtterance)
-  , mInited(false)
-  , mPrePaused(false)
-  , mPreCanceled(false)
   , mCallback(nullptr)
   , mIndirectAudio(false)
 {
   mText = aUtterance->mText;
   mVolume = aUtterance->Volume();
 }
 
 nsSpeechTask::nsSpeechTask(float aVolume, const nsAString& aText)
   : mUtterance(nullptr)
   , mVolume(aVolume)
   , mText(aText)
-  , mInited(false)
-  , mPrePaused(false)
-  , mPreCanceled(false)
   , mCallback(nullptr)
   , mIndirectAudio(false)
 {
 }
 
 nsSpeechTask::~nsSpeechTask()
 {
   LOG(LogLevel::Debug, ("~nsSpeechTask"));
@@ -132,26 +125,20 @@ nsSpeechTask::~nsSpeechTask()
 
   if (mPort) {
     mPort->Destroy();
     mPort = nullptr;
   }
 }
 
 void
-nsSpeechTask::Init(ProcessedMediaStream* aStream)
+nsSpeechTask::BindStream(ProcessedMediaStream* aStream)
 {
-  if (aStream) {
-    mStream = MediaStreamGraph::GetInstance()->CreateSourceStream(nullptr);
-    mPort = aStream->AllocateInputPort(mStream, 0);
-    mIndirectAudio = false;
-  } else {
-    mIndirectAudio = true;
-  }
-  mInited = true;
+  mStream = MediaStreamGraph::GetInstance()->CreateSourceStream(nullptr);
+  mPort = aStream->AllocateInputPort(mStream, 0);
 }
 
 void
 nsSpeechTask::SetChosenVoiceURI(const nsAString& aUri)
 {
   mChosenVoiceURI = aUri;
 }
 
@@ -161,24 +148,23 @@ nsSpeechTask::Setup(nsISpeechTaskCallbac
 {
   MOZ_ASSERT(XRE_IsParentProcess());
 
   LOG(LogLevel::Debug, ("nsSpeechTask::Setup"));
 
   mCallback = aCallback;
 
   if (mIndirectAudio) {
-    MOZ_ASSERT(!mStream);
     if (argc > 0) {
       NS_WARNING("Audio info arguments in Setup() are ignored for indirect audio services.");
     }
     return NS_OK;
   }
 
-  // mStream is set up in Init() that should be called before this.
+  // mStream is set up in BindStream() that should be called before this.
   MOZ_ASSERT(mStream);
 
   mStream->AddListener(new SynthStreamListener(this));
 
   // XXX: Support more than one channel
   if(NS_WARN_IF(!(aChannels == 1))) {
     return NS_ERROR_FAILURE;
   }
@@ -303,23 +289,16 @@ nsSpeechTask::SendAudioImpl(nsRefPtr<moz
 NS_IMETHODIMP
 nsSpeechTask::DispatchStart()
 {
   if (!mIndirectAudio) {
     NS_WARNING("Can't call DispatchStart() from a direct audio speech service");
     return NS_ERROR_FAILURE;
   }
 
-  return DispatchStartInner();
-}
-
-nsresult
-nsSpeechTask::DispatchStartInner()
-{
-  nsSynthVoiceRegistry::GetInstance()->SetIsSpeaking(true);
   return DispatchStartImpl();
 }
 
 nsresult
 nsSpeechTask::DispatchStartImpl()
 {
   return DispatchStartImpl(mChosenVoiceURI);
 }
@@ -345,26 +324,16 @@ nsSpeechTask::DispatchStartImpl(const ns
 NS_IMETHODIMP
 nsSpeechTask::DispatchEnd(float aElapsedTime, uint32_t aCharIndex)
 {
   if (!mIndirectAudio) {
     NS_WARNING("Can't call DispatchEnd() from a direct audio speech service");
     return NS_ERROR_FAILURE;
   }
 
-  return DispatchEndInner(aElapsedTime, aCharIndex);
-}
-
-nsresult
-nsSpeechTask::DispatchEndInner(float aElapsedTime, uint32_t aCharIndex)
-{
-  if (!mPreCanceled) {
-    nsSynthVoiceRegistry::GetInstance()->SpeakNext();
-  }
-
   return DispatchEndImpl(aElapsedTime, aCharIndex);
 }
 
 nsresult
 nsSpeechTask::DispatchEndImpl(float aElapsedTime, uint32_t aCharIndex)
 {
   LOG(LogLevel::Debug, ("nsSpeechTask::DispatchEnd\n"));
 
@@ -415,21 +384,19 @@ nsSpeechTask::DispatchPauseImpl(float aE
   if(NS_WARN_IF(mUtterance->mPaused)) {
     return NS_ERROR_NOT_AVAILABLE;
   }
   if(NS_WARN_IF(mUtterance->mState == SpeechSynthesisUtterance::STATE_ENDED)) {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
   mUtterance->mPaused = true;
-  if (mUtterance->mState == SpeechSynthesisUtterance::STATE_SPEAKING) {
-    mUtterance->DispatchSpeechSynthesisEvent(NS_LITERAL_STRING("pause"),
-                                             aCharIndex, aElapsedTime,
-                                             EmptyString());
-  }
+  mUtterance->DispatchSpeechSynthesisEvent(NS_LITERAL_STRING("pause"),
+                                           aCharIndex, aElapsedTime,
+                                           EmptyString());
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsSpeechTask::DispatchResume(float aElapsedTime, uint32_t aCharIndex)
 {
   if (!mIndirectAudio) {
     NS_WARNING("Can't call DispatchResume() from a direct audio speech service");
@@ -447,22 +414,19 @@ nsSpeechTask::DispatchResumeImpl(float a
   if(NS_WARN_IF(!(mUtterance->mPaused))) {
     return NS_ERROR_NOT_AVAILABLE;
   }
   if(NS_WARN_IF(mUtterance->mState == SpeechSynthesisUtterance::STATE_ENDED)) {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
   mUtterance->mPaused = false;
-  if (mUtterance->mState == SpeechSynthesisUtterance::STATE_SPEAKING) {
-    mUtterance->DispatchSpeechSynthesisEvent(NS_LITERAL_STRING("resume"),
-                                             aCharIndex, aElapsedTime,
-                                             EmptyString());
-  }
-
+  mUtterance->DispatchSpeechSynthesisEvent(NS_LITERAL_STRING("resume"),
+                                           aCharIndex, aElapsedTime,
+                                           EmptyString());
   return NS_OK;
 }
 
 NS_IMETHODIMP
 nsSpeechTask::DispatchError(float aElapsedTime, uint32_t aCharIndex)
 {
   if (!mIndirectAudio) {
     NS_WARNING("Can't call DispatchError() from a direct audio speech service");
@@ -548,47 +512,32 @@ nsSpeechTask::Pause()
 
   if (mCallback) {
     DebugOnly<nsresult> rv = mCallback->OnPause();
     NS_WARN_IF_FALSE(NS_SUCCEEDED(rv), "Unable to call onPause() callback");
   }
 
   if (mStream) {
     mStream->ChangeExplicitBlockerCount(1);
-  }
-
-  if (!mInited) {
-    mPrePaused = true;
-  }
-
-  if (!mIndirectAudio) {
     DispatchPauseImpl(GetCurrentTime(), GetCurrentCharOffset());
   }
 }
 
 void
 nsSpeechTask::Resume()
 {
   MOZ_ASSERT(XRE_IsParentProcess());
 
   if (mCallback) {
     DebugOnly<nsresult> rv = mCallback->OnResume();
     NS_WARN_IF_FALSE(NS_SUCCEEDED(rv), "Unable to call onResume() callback");
   }
 
   if (mStream) {
     mStream->ChangeExplicitBlockerCount(-1);
-  }
-
-  if (mPrePaused) {
-    mPrePaused = false;
-    nsSynthVoiceRegistry::GetInstance()->ResumeQueue();
-  }
-
-  if (!mIndirectAudio) {
     DispatchResumeImpl(GetCurrentTime(), GetCurrentCharOffset());
   }
 }
 
 void
 nsSpeechTask::Cancel()
 {
   MOZ_ASSERT(XRE_IsParentProcess());
@@ -597,41 +546,20 @@ nsSpeechTask::Cancel()
 
   if (mCallback) {
     DebugOnly<nsresult> rv = mCallback->OnCancel();
     NS_WARN_IF_FALSE(NS_SUCCEEDED(rv), "Unable to call onCancel() callback");
   }
 
   if (mStream) {
     mStream->ChangeExplicitBlockerCount(1);
-  }
-
-  if (!mInited) {
-    mPreCanceled = true;
-  }
-
-  if (!mIndirectAudio) {
-    DispatchEndInner(GetCurrentTime(), GetCurrentCharOffset());
+    DispatchEndImpl(GetCurrentTime(), GetCurrentCharOffset());
   }
 }
 
-void
-nsSpeechTask::ForceEnd()
-{
-  if (mStream) {
-    mStream->ChangeExplicitBlockerCount(1);
-  }
-
-  if (!mInited) {
-    mPreCanceled = true;
-  }
-
-  DispatchEndInner(GetCurrentTime(), GetCurrentCharOffset());
-}
-
 float
 nsSpeechTask::GetCurrentTime()
 {
   return mStream ? (float)(mStream->GetCurrentTime() / 1000000.0) : 0;
 }
 
 uint32_t
 nsSpeechTask::GetCurrentCharOffset()
--- a/dom/media/webspeech/synth/nsSpeechTask.h
+++ b/dom/media/webspeech/synth/nsSpeechTask.h
@@ -32,38 +32,28 @@ public:
   nsSpeechTask(float aVolume, const nsAString& aText);
 
   virtual void Pause();
 
   virtual void Resume();
 
   virtual void Cancel();
 
-  virtual void ForceEnd();
-
   float GetCurrentTime();
 
   uint32_t GetCurrentCharOffset();
 
   void SetSpeechSynthesis(SpeechSynthesis* aSpeechSynthesis);
 
-  void Init(ProcessedMediaStream* aStream);
+  void SetIndirectAudio(bool aIndirectAudio) { mIndirectAudio = aIndirectAudio; }
+
+  void BindStream(ProcessedMediaStream* aStream);
 
   void SetChosenVoiceURI(const nsAString& aUri);
 
-  bool IsPreCanceled()
-  {
-    return mPreCanceled;
-  };
-
-  bool IsPrePaused()
-  {
-    return mPrePaused;
-  }
-
 protected:
   virtual ~nsSpeechTask();
 
   nsresult DispatchStartImpl();
 
   virtual nsresult DispatchStartImpl(const nsAString& aUri);
 
   virtual nsresult DispatchEndImpl(float aElapsedTime, uint32_t aCharIndex);
@@ -82,31 +72,21 @@ protected:
                                     float aElapsedTime, uint32_t aCharIndex);
 
   nsRefPtr<SpeechSynthesisUtterance> mUtterance;
 
   float mVolume;
 
   nsString mText;
 
-  bool mInited;
-
-  bool mPrePaused;
-
-  bool mPreCanceled;
-
 private:
   void End();
 
   void SendAudioImpl(nsRefPtr<mozilla::SharedBuffer>& aSamples, uint32_t aDataLen);
 
-  nsresult DispatchStartInner();
-
-  nsresult DispatchEndInner(float aElapsedTime, uint32_t aCharIndex);
-
   nsRefPtr<SourceMediaStream> mStream;
 
   nsRefPtr<MediaInputPort> mPort;
 
   nsCOMPtr<nsISpeechTaskCallback> mCallback;
 
   uint32_t mChannels;
 
--- a/dom/media/webspeech/synth/nsSynthVoiceRegistry.cpp
+++ b/dom/media/webspeech/synth/nsSynthVoiceRegistry.cpp
@@ -12,17 +12,16 @@
 #include "SpeechSynthesisVoice.h"
 #include "nsSynthVoiceRegistry.h"
 #include "nsSpeechTask.h"
 
 #include "nsString.h"
 #include "mozilla/StaticPtr.h"
 #include "mozilla/dom/ContentChild.h"
 #include "mozilla/dom/ContentParent.h"
-#include "mozilla/Preferences.h"
 #include "mozilla/unused.h"
 
 #include "SpeechSynthesisChild.h"
 #include "SpeechSynthesisParent.h"
 
 #undef LOG
 extern PRLogModuleInfo* GetSpeechSynthLog();
 #define LOG(type, msg) MOZ_LOG(GetSpeechSynthLog(), type, msg)
@@ -68,140 +67,93 @@ namespace dom {
 class VoiceData final
 {
 private:
   // Private destructor, to discourage deletion outside of Release():
   ~VoiceData() {}
 
 public:
   VoiceData(nsISpeechService* aService, const nsAString& aUri,
-            const nsAString& aName, const nsAString& aLang,
-            bool aIsLocal, bool aQueuesUtterances)
+            const nsAString& aName, const nsAString& aLang, bool aIsLocal)
     : mService(aService)
     , mUri(aUri)
     , mName(aName)
     , mLang(aLang)
-    , mIsLocal(aIsLocal)
-    , mIsQueued(aQueuesUtterances) {}
+    , mIsLocal(aIsLocal) {}
 
   NS_INLINE_DECL_REFCOUNTING(VoiceData)
 
   nsCOMPtr<nsISpeechService> mService;
 
   nsString mUri;
 
   nsString mName;
 
   nsString mLang;
 
   bool mIsLocal;
-
-  bool mIsQueued;
-};
-
-// GlobalQueueItem
-
-class GlobalQueueItem final
-{
-private:
-  // Private destructor, to discourage deletion outside of Release():
-  ~GlobalQueueItem() {}
-
-public:
-  GlobalQueueItem(VoiceData* aVoice, nsSpeechTask* aTask, const nsAString& aText,
-                  const float& aVolume, const float& aRate, const float& aPitch)
-    : mVoice(aVoice)
-    , mTask(aTask)
-    , mText(aText)
-    , mVolume(aVolume)
-    , mRate(aRate)
-    , mPitch(aPitch) {}
-
-  NS_INLINE_DECL_REFCOUNTING(GlobalQueueItem)
-
-  nsRefPtr<VoiceData> mVoice;
-
-  nsRefPtr<nsSpeechTask> mTask;
-
-  nsString mText;
-
-  float mVolume;
-
-  float mRate;
-
-  float mPitch;
-
-  bool mIsLocal;
 };
 
 // nsSynthVoiceRegistry
 
 static StaticRefPtr<nsSynthVoiceRegistry> gSynthVoiceRegistry;
-static bool sForceGlobalQueue = false;
 
 NS_IMPL_ISUPPORTS(nsSynthVoiceRegistry, nsISynthVoiceRegistry)
 
 nsSynthVoiceRegistry::nsSynthVoiceRegistry()
   : mSpeechSynthChild(nullptr)
-  , mUseGlobalQueue(false)
-  , mIsSpeaking(false)
 {
   if (XRE_IsContentProcess()) {
 
     mSpeechSynthChild = new SpeechSynthesisChild();
     ContentChild::GetSingleton()->SendPSpeechSynthesisConstructor(mSpeechSynthChild);
 
     InfallibleTArray<RemoteVoice> voices;
     InfallibleTArray<nsString> defaults;
-    bool isSpeaking;
 
-    mSpeechSynthChild->SendReadVoicesAndState(&voices, &defaults, &isSpeaking);
+    mSpeechSynthChild->SendReadVoiceList(&voices, &defaults);
 
     for (uint32_t i = 0; i < voices.Length(); ++i) {
       RemoteVoice voice = voices[i];
       AddVoiceImpl(nullptr, voice.voiceURI(),
                    voice.name(), voice.lang(),
-                   voice.localService(), voice.queued());
+                   voice.localService());
     }
 
     for (uint32_t i = 0; i < defaults.Length(); ++i) {
       SetDefaultVoice(defaults[i], true);
     }
-
-    mIsSpeaking = isSpeaking;
   }
 }
 
 nsSynthVoiceRegistry::~nsSynthVoiceRegistry()
 {
   LOG(LogLevel::Debug, ("~nsSynthVoiceRegistry"));
 
   // mSpeechSynthChild's lifecycle is managed by the Content protocol.
   mSpeechSynthChild = nullptr;
 
   if (mStream) {
     if (!mStream->IsDestroyed()) {
-      mStream->Destroy();
-    }
+     mStream->Destroy();
+   }
 
-    mStream = nullptr;
+   mStream = nullptr;
   }
 
   mUriVoiceMap.Clear();
 }
 
 nsSynthVoiceRegistry*
 nsSynthVoiceRegistry::GetInstance()
 {
   MOZ_ASSERT(NS_IsMainThread());
 
   if (!gSynthVoiceRegistry) {
     gSynthVoiceRegistry = new nsSynthVoiceRegistry();
-    Preferences::AddBoolVarCache(&sForceGlobalQueue,
-                                 "media.webspeech.synth.force_global_queue");
   }
 
   return gSynthVoiceRegistry;
 }
 
 already_AddRefed<nsSynthVoiceRegistry>
 nsSynthVoiceRegistry::GetInstanceForService()
 {
@@ -209,37 +161,34 @@ nsSynthVoiceRegistry::GetInstanceForServ
 
   return registry.forget();
 }
 
 void
 nsSynthVoiceRegistry::Shutdown()
 {
   LOG(LogLevel::Debug, ("[%s] nsSynthVoiceRegistry::Shutdown()",
-                        (XRE_IsContentProcess()) ? "Content" : "Default"));
+                     (XRE_IsContentProcess()) ? "Content" : "Default"));
   gSynthVoiceRegistry = nullptr;
 }
 
 void
-nsSynthVoiceRegistry::SendVoicesAndState(InfallibleTArray<RemoteVoice>* aVoices,
-                                         InfallibleTArray<nsString>* aDefaults,
-                                         bool* aIsSpeaking)
+nsSynthVoiceRegistry::SendVoices(InfallibleTArray<RemoteVoice>* aVoices,
+                                 InfallibleTArray<nsString>* aDefaults)
 {
   for (uint32_t i=0; i < mVoices.Length(); ++i) {
     nsRefPtr<VoiceData> voice = mVoices[i];
 
     aVoices->AppendElement(RemoteVoice(voice->mUri, voice->mName, voice->mLang,
-                                       voice->mIsLocal, voice->mIsQueued));
+                                       voice->mIsLocal));
   }
 
   for (uint32_t i=0; i < mDefaultVoices.Length(); ++i) {
     aDefaults->AppendElement(mDefaultVoices[i]->mUri);
   }
-
-  *aIsSpeaking = IsSpeaking();
 }
 
 void
 nsSynthVoiceRegistry::RecvRemoveVoice(const nsAString& aUri)
 {
   // If we dont have a local instance of the registry yet, we will recieve current
   // voices at contruction time.
   if(!gSynthVoiceRegistry) {
@@ -255,63 +204,50 @@ nsSynthVoiceRegistry::RecvAddVoice(const
   // If we dont have a local instance of the registry yet, we will recieve current
   // voices at contruction time.
   if(!gSynthVoiceRegistry) {
     return;
   }
 
   gSynthVoiceRegistry->AddVoiceImpl(nullptr, aVoice.voiceURI(),
                                     aVoice.name(), aVoice.lang(),
-                                    aVoice.localService(), aVoice.queued());
+                                    aVoice.localService());
 }
 
 void
 nsSynthVoiceRegistry::RecvSetDefaultVoice(const nsAString& aUri, bool aIsDefault)
 {
   // If we dont have a local instance of the registry yet, we will recieve current
   // voices at contruction time.
   if(!gSynthVoiceRegistry) {
     return;
   }
 
   gSynthVoiceRegistry->SetDefaultVoice(aUri, aIsDefault);
 }
 
-void
-nsSynthVoiceRegistry::RecvIsSpeakingChanged(bool aIsSpeaking)
-{
-  // If we dont have a local instance of the registry yet, we will get the
-  // speaking state on construction.
-  if(!gSynthVoiceRegistry) {
-    return;
-  }
-
-  gSynthVoiceRegistry->mIsSpeaking = aIsSpeaking;
-}
-
 NS_IMETHODIMP
 nsSynthVoiceRegistry::AddVoice(nsISpeechService* aService,
                                const nsAString& aUri,
                                const nsAString& aName,
                                const nsAString& aLang,
-                               bool aLocalService,
-                               bool aQueuesUtterances)
+                               bool aLocalService)
 {
   LOG(LogLevel::Debug,
-      ("nsSynthVoiceRegistry::AddVoice uri='%s' name='%s' lang='%s' local=%s queued=%s",
+      ("nsSynthVoiceRegistry::AddVoice uri='%s' name='%s' lang='%s' local=%s",
        NS_ConvertUTF16toUTF8(aUri).get(), NS_ConvertUTF16toUTF8(aName).get(),
        NS_ConvertUTF16toUTF8(aLang).get(),
-       aLocalService ? "true" : "false",
-       aQueuesUtterances ? "true" : "false"));
+       aLocalService ? "true" : "false"));
 
   if(NS_WARN_IF(XRE_IsContentProcess())) {
     return NS_ERROR_NOT_AVAILABLE;
   }
 
-  return AddVoiceImpl(aService, aUri, aName, aLang, aLocalService, aQueuesUtterances);
+  return AddVoiceImpl(aService, aUri, aName, aLang,
+                      aLocalService);
 }
 
 NS_IMETHODIMP
 nsSynthVoiceRegistry::RemoveVoice(nsISpeechService* aService,
                                   const nsAString& aUri)
 {
   LOG(LogLevel::Debug,
       ("nsSynthVoiceRegistry::RemoveVoice uri='%s' (%s)",
@@ -327,32 +263,16 @@ nsSynthVoiceRegistry::RemoveVoice(nsISpe
   if(NS_WARN_IF(!(aService == retval->mService))) {
     return NS_ERROR_INVALID_ARG;
   }
 
   mVoices.RemoveElement(retval);
   mDefaultVoices.RemoveElement(retval);
   mUriVoiceMap.Remove(aUri);
 
-  if (retval->mIsQueued && !sForceGlobalQueue) {
-    // Check if this is the last queued voice, and disable the global queue if
-    // it is.
-    bool queued = false;
-    for (uint32_t i = 0; i < mVoices.Length(); i++) {
-      VoiceData* voice = mVoices[i];
-      if (voice->mIsQueued) {
-        queued = true;
-        break;
-      }
-    }
-    if (!queued) {
-      mUseGlobalQueue = false;
-    }
-  }
-
   nsTArray<SpeechSynthesisParent*> ssplist;
   GetAllSpeechSynthActors(ssplist);
 
   for (uint32_t i = 0; i < ssplist.Length(); ++i)
     unused << ssplist[i]->SendVoiceRemoved(nsString(aUri));
 
   return NS_OK;
 }
@@ -470,41 +390,38 @@ nsSynthVoiceRegistry::GetVoiceName(const
   return NS_OK;
 }
 
 nsresult
 nsSynthVoiceRegistry::AddVoiceImpl(nsISpeechService* aService,
                                    const nsAString& aUri,
                                    const nsAString& aName,
                                    const nsAString& aLang,
-                                   bool aLocalService,
-                                   bool aQueuesUtterances)
+                                   bool aLocalService)
 {
   bool found = false;
   mUriVoiceMap.GetWeak(aUri, &found);
   if(NS_WARN_IF(found)) {
     return NS_ERROR_INVALID_ARG;
   }
 
   nsRefPtr<VoiceData> voice = new VoiceData(aService, aUri, aName, aLang,
-                                            aLocalService, aQueuesUtterances);
+                                            aLocalService);
 
   mVoices.AppendElement(voice);
   mUriVoiceMap.Put(aUri, voice);
-  mUseGlobalQueue |= aQueuesUtterances;
 
   nsTArray<SpeechSynthesisParent*> ssplist;
   GetAllSpeechSynthActors(ssplist);
 
   if (!ssplist.IsEmpty()) {
     mozilla::dom::RemoteVoice ssvoice(nsString(aUri),
                                       nsString(aName),
                                       nsString(aLang),
-                                      aLocalService,
-                                      aQueuesUtterances);
+                                      aLocalService);
 
     for (uint32_t i = 0; i < ssplist.Length(); ++i) {
       unused << ssplist[i]->SendVoiceAdded(ssvoice);
     }
   }
 
   return NS_OK;
 }
@@ -652,139 +569,45 @@ void
 nsSynthVoiceRegistry::Speak(const nsAString& aText,
                             const nsAString& aLang,
                             const nsAString& aUri,
                             const float& aVolume,
                             const float& aRate,
                             const float& aPitch,
                             nsSpeechTask* aTask)
 {
-  MOZ_ASSERT(XRE_IsParentProcess());
+  LOG(LogLevel::Debug,
+      ("nsSynthVoiceRegistry::Speak text='%s' lang='%s' uri='%s' rate=%f pitch=%f",
+       NS_ConvertUTF16toUTF8(aText).get(), NS_ConvertUTF16toUTF8(aLang).get(),
+       NS_ConvertUTF16toUTF8(aUri).get(), aRate, aPitch));
 
   VoiceData* voice = FindBestMatch(aUri, aLang);
 
   if (!voice) {
     NS_WARNING("No voices found.");
     aTask->DispatchError(0, 0);
     return;
   }
 
   aTask->SetChosenVoiceURI(voice->mUri);
 
-  if (mUseGlobalQueue || sForceGlobalQueue) {
-    LOG(LogLevel::Debug,
-        ("nsSynthVoiceRegistry::Speak queueing text='%s' lang='%s' uri='%s' rate=%f pitch=%f",
-         NS_ConvertUTF16toUTF8(aText).get(), NS_ConvertUTF16toUTF8(aLang).get(),
-         NS_ConvertUTF16toUTF8(aUri).get(), aRate, aPitch));
-    nsRefPtr<GlobalQueueItem> item = new GlobalQueueItem(voice, aTask, aText,
-                                                         aVolume, aRate, aPitch);
-    mGlobalQueue.AppendElement(item);
-
-    if (mGlobalQueue.Length() == 1) {
-      SpeakImpl(item->mVoice, item->mTask, item->mText, item->mVolume, item->mRate,
-                item->mPitch);
-    }
-  } else {
-    SpeakImpl(voice, aTask, aText, aVolume, aRate, aPitch);
-  }
-}
-
-void
-nsSynthVoiceRegistry::SpeakNext()
-{
-  MOZ_ASSERT(XRE_IsParentProcess());
-
-  LOG(LogLevel::Debug,
-      ("nsSynthVoiceRegistry::SpeakNext %d", mGlobalQueue.IsEmpty()));
-
-  SetIsSpeaking(false);
-
-  if (mGlobalQueue.IsEmpty()) {
-    return;
-  }
-
-  mGlobalQueue.RemoveElementAt(0);
-
-  while (!mGlobalQueue.IsEmpty()) {
-    nsRefPtr<GlobalQueueItem> item = mGlobalQueue.ElementAt(0);
-    if (item->mTask->IsPreCanceled()) {
-      mGlobalQueue.RemoveElementAt(0);
-      continue;
-    }
-    if (!item->mTask->IsPrePaused()) {
-      SpeakImpl(item->mVoice, item->mTask, item->mText, item->mVolume,
-                item->mRate, item->mPitch);
-    }
-    break;
-  }
-}
-
-void
-nsSynthVoiceRegistry::ResumeQueue()
-{
-  MOZ_ASSERT(XRE_IsParentProcess());
-  LOG(LogLevel::Debug,
-      ("nsSynthVoiceRegistry::ResumeQueue %d", mGlobalQueue.IsEmpty()));
-
-  if (mGlobalQueue.IsEmpty()) {
-    return;
-  }
-
-  nsRefPtr<GlobalQueueItem> item = mGlobalQueue.ElementAt(0);
-  if (!item->mTask->IsPrePaused()) {
-    SpeakImpl(item->mVoice, item->mTask, item->mText, item->mVolume,
-              item->mRate, item->mPitch);
-  }
-}
-
-bool
-nsSynthVoiceRegistry::IsSpeaking()
-{
-  return mIsSpeaking;
-}
-
-void
-nsSynthVoiceRegistry::SetIsSpeaking(bool aIsSpeaking)
-{
-  MOZ_ASSERT(XRE_IsParentProcess());
-
-  // Only set to 'true' if global queue is enabled.
-  mIsSpeaking = aIsSpeaking && (mUseGlobalQueue || sForceGlobalQueue);
-
-  nsTArray<SpeechSynthesisParent*> ssplist;
-  GetAllSpeechSynthActors(ssplist);
-  for (uint32_t i = 0; i < ssplist.Length(); ++i) {
-    unused << ssplist[i]->SendIsSpeakingChanged(aIsSpeaking);
-  }
-}
-
-void
-nsSynthVoiceRegistry::SpeakImpl(VoiceData* aVoice,
-                                nsSpeechTask* aTask,
-                                const nsAString& aText,
-                                const float& aVolume,
-                                const float& aRate,
-                                const float& aPitch)
-{
-  LOG(LogLevel::Debug,
-      ("nsSynthVoiceRegistry::SpeakImpl queueing text='%s' uri='%s' rate=%f pitch=%f",
-       NS_ConvertUTF16toUTF8(aText).get(), NS_ConvertUTF16toUTF8(aVoice->mUri).get(),
-       aRate, aPitch));
+  LOG(LogLevel::Debug, ("nsSynthVoiceRegistry::Speak - Using voice URI: %s",
+                     NS_ConvertUTF16toUTF8(voice->mUri).get()));
 
   SpeechServiceType serviceType;
 
-  DebugOnly<nsresult> rv = aVoice->mService->GetServiceType(&serviceType);
+  DebugOnly<nsresult> rv = voice->mService->GetServiceType(&serviceType);
   NS_WARN_IF_FALSE(NS_SUCCEEDED(rv), "Failed to get speech service type");
 
   if (serviceType == nsISpeechService::SERVICETYPE_INDIRECT_AUDIO) {
-    aTask->Init(nullptr);
+    aTask->SetIndirectAudio(true);
   } else {
     if (!mStream) {
       mStream = MediaStreamGraph::GetInstance()->CreateTrackUnionStream(nullptr);
     }
-    aTask->Init(mStream);
+    aTask->BindStream(mStream);
   }
 
-  aVoice->mService->Speak(aText, aVoice->mUri, aVolume, aRate, aPitch, aTask);
+  voice->mService->Speak(aText, voice->mUri, aVolume, aRate, aPitch, aTask);
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webspeech/synth/nsSynthVoiceRegistry.h
+++ b/dom/media/webspeech/synth/nsSynthVoiceRegistry.h
@@ -18,93 +18,67 @@ class nsISpeechService;
 namespace mozilla {
 namespace dom {
 
 class RemoteVoice;
 class SpeechSynthesisUtterance;
 class SpeechSynthesisChild;
 class nsSpeechTask;
 class VoiceData;
-class GlobalQueueItem;
 
 class nsSynthVoiceRegistry final : public nsISynthVoiceRegistry
 {
 public:
   NS_DECL_ISUPPORTS
   NS_DECL_NSISYNTHVOICEREGISTRY
 
   nsSynthVoiceRegistry();
 
   already_AddRefed<nsSpeechTask> SpeakUtterance(SpeechSynthesisUtterance& aUtterance,
                                                 const nsAString& aDocLang);
 
   void Speak(const nsAString& aText, const nsAString& aLang,
              const nsAString& aUri, const float& aVolume,  const float& aRate,
              const float& aPitch, nsSpeechTask* aTask);
 
-  void SendVoicesAndState(InfallibleTArray<RemoteVoice>* aVoices,
-                          InfallibleTArray<nsString>* aDefaults,
-                          bool* aIsSpeaking);
-
-  void SpeakNext();
-
-  void ResumeQueue();
-
-  bool IsSpeaking();
-
-  void SetIsSpeaking(bool aIsSpeaking);
+  void SendVoices(InfallibleTArray<RemoteVoice>* aVoices,
+                  InfallibleTArray<nsString>* aDefaults);
 
   static nsSynthVoiceRegistry* GetInstance();
 
   static already_AddRefed<nsSynthVoiceRegistry> GetInstanceForService();
 
   static void RecvRemoveVoice(const nsAString& aUri);
 
   static void RecvAddVoice(const RemoteVoice& aVoice);
 
   static void RecvSetDefaultVoice(const nsAString& aUri, bool aIsDefault);
 
-  static void RecvIsSpeakingChanged(bool aIsSpeaking);
-
   static void Shutdown();
 
 private:
   virtual ~nsSynthVoiceRegistry();
 
   VoiceData* FindBestMatch(const nsAString& aUri, const nsAString& lang);
 
   bool FindVoiceByLang(const nsAString& aLang, VoiceData** aRetval);
 
   nsresult AddVoiceImpl(nsISpeechService* aService,
                         const nsAString& aUri,
                         const nsAString& aName,
                         const nsAString& aLang,
-                        bool aLocalService,
-                        bool aQueuesUtterances);
+                        bool aLocalService);
 
-  void SpeakImpl(VoiceData* aVoice,
-                 nsSpeechTask* aTask,
-                 const nsAString& aText,
-                 const float& aVolume,
-                 const float& aRate,
-                 const float& aPitch);
+  nsTArray<nsRefPtr<VoiceData> > mVoices;
 
-  nsTArray<nsRefPtr<VoiceData>> mVoices;
-
-  nsTArray<nsRefPtr<VoiceData>> mDefaultVoices;
+  nsTArray<nsRefPtr<VoiceData> > mDefaultVoices;
 
   nsRefPtrHashtable<nsStringHashKey, VoiceData> mUriVoiceMap;
 
   SpeechSynthesisChild* mSpeechSynthChild;
 
   nsRefPtr<ProcessedMediaStream> mStream;
-
-  bool mUseGlobalQueue;
-
-  nsTArray<nsRefPtr<GlobalQueueItem>> mGlobalQueue;
-
-  bool mIsSpeaking;
 };
 
 } // namespace dom
 } // namespace mozilla
 
 #endif
--- a/dom/media/webspeech/synth/pico/nsPicoService.cpp
+++ b/dom/media/webspeech/synth/pico/nsPicoService.cpp
@@ -514,21 +514,19 @@ PicoAddVoiceTraverser(const nsAString& a
   }
 
   VoiceTraverserData* data = static_cast<VoiceTraverserData*>(aUserArg);
 
   nsAutoString name;
   name.AssignLiteral("Pico ");
   name.Append(aVoice->mLanguage);
 
-  // This service is multi-threaded and can handle more than one utterance at a
-  // time before previous utterances end. So, aQueuesUtterances == false
   DebugOnly<nsresult> rv =
     data->mRegistry->AddVoice(
-      data->mService, aUri, name, aVoice->mLanguage, true, false);
+      data->mService, aUri, name, aVoice->mLanguage, true);
   NS_WARN_IF_FALSE(NS_SUCCEEDED(rv), "Failed to add voice");
 
   return PL_DHASH_NEXT;
 }
 
 void
 nsPicoService::Init()
 {
--- a/dom/media/webspeech/synth/test/common.js
+++ b/dom/media/webspeech/synth/test/common.js
@@ -1,19 +1,18 @@
 function synthTestQueue(aTestArgs, aEndFunc) {
   var utterances = [];
   for (var i in aTestArgs) {
     var uargs = aTestArgs[i][0];
-    var win = uargs.win || window;
-    var u = new win.SpeechSynthesisUtterance(uargs.text);
+    var u = new SpeechSynthesisUtterance(uargs.text);
 
-    if (uargs.args) {
-      for (var attr in uargs.args)
-        u[attr] = uargs.args[attr];
-    }
+    delete uargs.text;
+
+    for (var attr in uargs)
+      u[attr] = uargs[attr];
 
     function onend_handler(e) {
       is(e.target, utterances.shift(), "Target matches utterances");
       ok(!speechSynthesis.speaking, "speechSynthesis is not speaking.");
 
       isnot(e.eventType, 'error', "Error in utterance");
 
       if (utterances.length) {
@@ -39,32 +38,14 @@ function synthTestQueue(aTestArgs, aEndF
     u.addEventListener('error', onend_handler);
 
     u.addEventListener(
       'error', function onerror_handler(e) {
         ok(false, "Error in speech utterance '" + e.target.text + "'");
       });
 
     utterances.push(u);
-    win.speechSynthesis.speak(u);
+    speechSynthesis.speak(u);
   }
 
   ok(!speechSynthesis.speaking, "speechSynthesis is not speaking yet.");
   ok(speechSynthesis.pending, "speechSynthesis has an utterance queued.");
 }
-
-function loadFrame(frameId) {
-  return new Promise(function(resolve, reject) {
-    var frame = document.getElementById(frameId);
-    frame.addEventListener('load', function (e) {
-      frame.contentWindow.document.title = frameId;
-      resolve(frame);
-    });
-    frame1.src = 'data:text/html,' + encodeURI('<html><head></head><body></body></html>');
-  });
-}
-
-function testSynthState(win, expectedState) {
-  for (var attr in expectedState) {
-    is(win.speechSynthesis[attr], expectedState[attr],
-      win.document.title + ": '" + attr + '" does not match');
-  }
-}
\ No newline at end of file
deleted file mode 100644
--- a/dom/media/webspeech/synth/test/file_global_queue.html
+++ /dev/null
@@ -1,69 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<!--
-https://bugzilla.mozilla.org/show_bug.cgi?id=1188099
--->
-<head>
-  <meta charset="utf-8">
-  <title>Test for Bug 1188099: Global queue should correctly schedule utterances</title>
-  <script type="application/javascript">
-    window.SimpleTest = parent.SimpleTest;
-    window.info = parent.info;
-    window.is = parent.is;
-    window.isnot = parent.isnot;
-    window.ok = parent.ok;
-    window.todo = parent.todo;
-  </script>
-  <script type="application/javascript" src="common.js"></script>
-</head>
-<body>
-<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1188099">Mozilla Bug 1188099</a>
-<iframe id="frame1"></iframe>
-<iframe id="frame2"></iframe>
-<div id="content" style="display: none">
-
-</div>
-<pre id="test">
-<script type="application/javascript">
-  Promise.all([loadFrame('frame1'), loadFrame('frame2')]).then(function ([frame1, frame2]) {
-    var win1 = frame1.contentWindow;
-    var win2 = frame2.contentWindow;
-    var utterance1 = new win1.SpeechSynthesisUtterance("hello, losers");
-    var utterance2 = new win1.SpeechSynthesisUtterance("hello, losers three");
-    var utterance3 = new win2.SpeechSynthesisUtterance("hello, losers too");
-    var eventOrder = ['start1', 'end1', 'start3', 'end3', 'start2', 'end2'];
-    utterance1.addEventListener('start', function(e) {
-      is(eventOrder.shift(), 'start1', 'start1');
-      testSynthState(win1, { speaking: true, pending: true });
-      testSynthState(win2, { speaking: true, pending: true });
-    });
-    utterance1.addEventListener('end', function(e) {
-      is(eventOrder.shift(), 'end1', 'end1');
-    });
-    utterance3.addEventListener('start', function(e) {
-      is(eventOrder.shift(), 'start3', 'start3');
-      testSynthState(win1, { speaking: true, pending: true });
-      testSynthState(win2, { speaking: true, pending: false });
-    });
-    utterance3.addEventListener('end', function(e) {
-      is(eventOrder.shift(), 'end3', 'end3');
-    });
-    utterance2.addEventListener('start', function(e) {
-      is(eventOrder.shift(), 'start2', 'start2');
-      testSynthState(win1, { speaking: true, pending: false });
-      testSynthState(win2, { speaking: true, pending: false });
-    });
-    utterance2.addEventListener('end', function(e) {
-      is(eventOrder.shift(), 'end2', 'end2');
-      testSynthState(win1, { speaking: false, pending: false });
-      testSynthState(win2, { speaking: false, pending: false });
-      SimpleTest.finish();
-    });
-    win1.speechSynthesis.speak(utterance1);
-    win1.speechSynthesis.speak(utterance2);
-    win2.speechSynthesis.speak(utterance3);
-  });
-</script>
-</pre>
-</body>
-</html>
deleted file mode 100644
--- a/dom/media/webspeech/synth/test/file_global_queue_cancel.html
+++ /dev/null
@@ -1,86 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<!--
-https://bugzilla.mozilla.org/show_bug.cgi?id=1188099
--->
-<head>
-  <meta charset="utf-8">
-  <title>Test for Bug 1188099: Calling cancel() should work correctly with global queue</title>
-  <script type="application/javascript">
-    window.SimpleTest = parent.SimpleTest;
-    window.info = parent.info;
-    window.is = parent.is;
-    window.isnot = parent.isnot;
-    window.ok = parent.ok;
-    window.todo = parent.todo;
-  </script>
-  <script type="application/javascript" src="common.js"></script>
-</head>
-<body>
-<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1188099">Mozilla Bug 1188099</a>
-<iframe id="frame1"></iframe>
-<iframe id="frame2"></iframe>
-<div id="content" style="display: none">
-
-</div>
-<pre id="test">
-<script type="application/javascript">
-  Promise.all([loadFrame('frame1'), loadFrame('frame2')]).then(function ([frame1, frame2]) {
-    var win1 = frame1.contentWindow;
-    var win2 = frame2.contentWindow;
-
-    var utterance1 = new win1.SpeechSynthesisUtterance(
-      "u1: Donec ac nunc feugiat, posuere");
-    var utterance2 = new win1.SpeechSynthesisUtterance("u2: hello, losers too");
-    utterance2.lang = 'it-IT-noend';
-    var utterance3 = new win1.SpeechSynthesisUtterance("u3: hello, losers three");
-
-    var utterance4 = new win2.SpeechSynthesisUtterance("u4: hello, losers same!");
-    utterance4.lang = 'it-IT-noend';
-    var utterance5 = new win2.SpeechSynthesisUtterance("u5: hello, losers too");
-    utterance5.lang = 'it-IT-noend';
-
-    var eventOrder = ['start1', 'end1', 'start2', 'end2'];
-    utterance1.addEventListener('start', function(e) {
-      is(eventOrder.shift(), 'start1', 'start1');
-      testSynthState(win1, { speaking: true, pending: true });
-      testSynthState(win2, { speaking: true, pending: true });
-      win2.speechSynthesis.cancel();
-
-    });
-    utterance1.addEventListener('end', function(e) {
-      is(eventOrder.shift(), 'end1', 'end1');
-      testSynthState(win1, { pending: true });
-      testSynthState(win2, { pending: false });
-    });
-    utterance2.addEventListener('start', function(e) {
-      is(eventOrder.shift(), 'start2', 'start2');
-      testSynthState(win1, { speaking: true, pending: true });
-      testSynthState(win2, { speaking: true, pending: false });
-      win1.speechSynthesis.cancel();
-    });
-    utterance2.addEventListener('end', function(e) {
-      is(eventOrder.shift(), 'end2', 'end2');
-      testSynthState(win1, { speaking: false, pending: false });
-      testSynthState(win2, { speaking: false, pending: false });
-      SimpleTest.finish();
-    });
-
-    function wrongUtterance(e) {
-      ok(false, 'This shall not be uttered: "' + e.target.text + '"');
-    }
-
-    utterance3.addEventListener('start', wrongUtterance);
-    utterance4.addEventListener('start', wrongUtterance);
-    utterance5.addEventListener('start', wrongUtterance);
-
-    win1.speechSynthesis.speak(utterance1);
-    win1.speechSynthesis.speak(utterance2);
-    win1.speechSynthesis.speak(utterance3);
-    win2.speechSynthesis.speak(utterance4);
-    win2.speechSynthesis.speak(utterance5);
-  });
-</script>
-</pre>
-</body>
-</html>
deleted file mode 100644
--- a/dom/media/webspeech/synth/test/file_global_queue_pause.html
+++ /dev/null
@@ -1,128 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<!--
-https://bugzilla.mozilla.org/show_bug.cgi?id=1188099
--->
-<head>
-  <meta charset="utf-8">
-  <title>Test for Bug 1188099: Calling pause() should work correctly with global queue</title>
-  <script type="application/javascript">
-    window.SimpleTest = parent.SimpleTest;
-    window.info = parent.info;
-    window.is = parent.is;
-    window.isnot = parent.isnot;
-    window.ok = parent.ok;
-    window.todo = parent.todo;
-  </script>
-  <script type="application/javascript" src="common.js"></script>
-</head>
-<body>
-<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1188099">Mozilla Bug 1188099</a>
-<iframe id="frame1"></iframe>
-<iframe id="frame2"></iframe>
-<div id="content" style="display: none">
-
-</div>
-<pre id="test">
-<script type="application/javascript">
-  Promise.all([loadFrame('frame1'), loadFrame('frame2')]).then(function ([frame1, frame2]) {
-    var win1 = frame1.contentWindow;
-    var win2 = frame2.contentWindow;
-
-    var utterance1 = new win1.SpeechSynthesisUtterance("Speak utterance 1.");
-    var utterance2 = new win2.SpeechSynthesisUtterance("Speak utterance 2.");
-    var utterance3 = new win1.SpeechSynthesisUtterance("Speak utterance 3.");
-    var utterance4 = new win2.SpeechSynthesisUtterance("Speak utterance 4.");
-    var eventOrder = ['start1', 'pause1', 'resume1', 'end1', 'start2', 'end2',
-      'start4', 'end4', 'start3', 'end3'];
-
-    utterance1.addEventListener('start', function(e) {
-      is(eventOrder.shift(), 'start1', 'start1');
-      win1.speechSynthesis.pause();
-    });
-    utterance1.addEventListener('pause', function(e) {
-      info('pause??');
-      var expectedEvent = eventOrder.shift()
-      is(expectedEvent, 'pause1', 'pause1');
-      testSynthState(win1, { speaking: true, pending: false, paused: true});
-      testSynthState(win2, { speaking: true, pending: true, paused: false});
-
-      if (expectedEvent == 'pause1') {
-        win1.speechSynthesis.resume();
-      }
-    });
-    utterance1.addEventListener('resume', function(e) {
-      is(eventOrder.shift(), 'resume1', 'resume1');
-      testSynthState(win1, { speaking: true, pending: false, paused: false});
-      testSynthState(win2, { speaking: true, pending: true, paused: false});
-
-      win2.speechSynthesis.pause();
-
-      testSynthState(win1, { speaking: true, pending: false, paused: false});
-      // 1188099: currently, paused state is not gaurenteed to be immediate.
-      testSynthState(win2, { speaking: true, pending: true });
-    });
-    utterance1.addEventListener('end', function(e) {
-      is(eventOrder.shift(), 'end1', 'end1');
-      testSynthState(win1, { speaking: false, pending: false, paused: false});
-      testSynthState(win2, { speaking: false, pending: true, paused: true});
-
-      win2.speechSynthesis.resume();
-    });
-
-    utterance2.addEventListener('start', function(e) {
-      is(eventOrder.shift(), 'start2', 'start2');
-      testSynthState(win1, { speaking: true, pending: false, paused: false});
-      testSynthState(win2, { speaking: true, pending: false, paused: false});
-    });
-    utterance2.addEventListener('end', function(e) {
-      is(eventOrder.shift(), 'end2', 'end2');
-      testSynthState(win1, { speaking: false, pending: false, paused: false});
-      testSynthState(win2, { speaking: false, pending: false, paused: false});
-
-      win1.speechSynthesis.pause();
-
-      testSynthState(win1, { speaking: false, pending: false, paused: true});
-      testSynthState(win2, { speaking: false, pending: false, paused: false});
-
-      win1.speechSynthesis.speak(utterance3);
-      win2.speechSynthesis.speak(utterance4);
-
-      testSynthState(win1, { speaking: false, pending: true, paused: true});
-      testSynthState(win2, { speaking: false, pending: true, paused: false});
-    });
-
-    utterance4.addEventListener('start', function(e) {
-      is(eventOrder.shift(), 'start4', 'start4');
-      testSynthState(win1, { speaking: true, pending: true, paused: true});
-      testSynthState(win2, { speaking: true, pending: false, paused: false});
-
-      win1.speechSynthesis.resume();
-    });
-    utterance4.addEventListener('end', function(e) {
-      is(eventOrder.shift(), 'end4', 'end4');
-      testSynthState(win1, { speaking: false, pending: true, paused: false});
-      testSynthState(win2, { speaking: false, pending: false, paused: false});
-    });
-
-    utterance3.addEventListener('start', function(e) {
-      is(eventOrder.shift(), 'start3', 'start3');
-      testSynthState(win1, { speaking: true, pending: false, paused: false});
-      testSynthState(win2, { speaking: true, pending: false, paused: false});
-    });
-
-    utterance3.addEventListener('end', function(e) {
-      is(eventOrder.shift(), 'end3', 'end3');
-      testSynthState(win1, { speaking: false, pending: false, paused: false});
-      testSynthState(win2, { speaking: false, pending: false, paused: false});
-
-      SimpleTest.finish();
-    });
-
-    win1.speechSynthesis.speak(utterance1);
-    win2.speechSynthesis.speak(utterance2);
-  });
-</script>
-</pre>
-</body>
-</html>
--- a/dom/media/webspeech/synth/test/file_indirect_service_events.html
+++ b/dom/media/webspeech/synth/test/file_indirect_service_events.html
@@ -21,82 +21,73 @@ https://bugzilla.mozilla.org/show_bug.cg
 <div id="content" style="display: none">
 
 </div>
 <pre id="test">
 <script type="application/javascript">
 
 /** Test for Bug 1155034 **/
 
-function testFunc(done_cb) {
-  function test_with_events() {
-    info('test_with_events');
-    var utterance = new SpeechSynthesisUtterance("never end, callback events");
-    utterance.lang = 'it-IT-noend';
-
-    utterance.addEventListener('start', function(e) {
-      info('start test_with_events');
-      speechSynthesis.pause();
-    // Wait to see if we get some bad events we didn't expect.
-    });
+function test_with_events() {
+  info('test_with_events');
+  var utterance = new SpeechSynthesisUtterance("never end, callback events");
+  utterance.lang = 'it-IT-noend';
 
-    utterance.addEventListener('pause', function(e) {
-      is(e.charIndex, 1, 'pause event charIndex matches service arguments');
-      is(e.elapsedTime, 1.5, 'pause event elapsedTime matches service arguments');
-      speechSynthesis.resume();
-    });
-
-    utterance.addEventListener('resume', function(e) {
-      is(e.charIndex, 1, 'resume event charIndex matches service arguments');
-      is(e.elapsedTime, 1.5, 'resume event elapsedTime matches service arguments');
-      speechSynthesis.cancel();
-    });
-
-    utterance.addEventListener('end', function(e) {
-      ok(e.charIndex, 1, 'resume event charIndex matches service arguments');
-      ok(e.elapsedTime, 1.5, 'end event elapsedTime matches service arguments');
-      test_no_events();
-    });
+  utterance.addEventListener('start', function(e) {
+    speechSynthesis.pause();
+  // Wait to see if we get some bad events we didn't expect.
+  });
 
-    info('start speak');
-    speechSynthesis.speak(utterance);
-  }
-
-  function forbiddenEvent(e) {
-    ok(false, 'no "' + e.type + '" event was explicitly dispatched from the service')
-  }
+  utterance.addEventListener('pause', function(e) {
+    is(e.charIndex, 1, 'pause event charIndex matches service arguments');
+    is(e.elapsedTime, 1.5, 'pause event elapsedTime matches service arguments');
+    speechSynthesis.resume();
+  });
 
-  function test_no_events() {
-    info('test_no_events');
-    var utterance = new SpeechSynthesisUtterance("never end");
-    utterance.lang = "it-IT-noevents-noend";
-    utterance.addEventListener('start', function(e) {
-      speechSynthesis.pause();
-      // Wait to see if we get some bad events we didn't expect.
-      setTimeout(function() {
-        ok(true, 'didn\'t get any unwanted events');
-        utterance.removeEventListener('end', forbiddenEvent);
-        SpecialPowers.wrap(speechSynthesis).forceEnd();
-        done_cb();
-      }, 1000);
-    });
+  utterance.addEventListener('resume', function(e) {
+    is(e.charIndex, 1, 'resume event charIndex matches service arguments');
+    is(e.elapsedTime, 1.5, 'resume event elapsedTime matches service arguments');
+    speechSynthesis.cancel();
+  });
 
-    utterance.addEventListener('pause', forbiddenEvent);
-    utterance.addEventListener('end', forbiddenEvent);
+  utterance.addEventListener('end', function(e) {
+    ok(e.charIndex, 1, 'resume event charIndex matches service arguments');
+    ok(e.elapsedTime, 1.5, 'end event elapsedTime matches service arguments');
+    test_no_events();
+  });
 
-    speechSynthesis.speak(utterance);
-  }
-
-  test_with_events();
+  speechSynthesis.speak(utterance);
 }
 
-// Run test with no global queue, and then run it with a global queue.
-testFunc(function() {
-  SpecialPowers.pushPrefEnv(
-    { set: [['media.webspeech.synth.force_global_queue', true]] }, function() {
-      testFunc(SimpleTest.finish)
-    });
-});
+function test_no_events() {
+  var utterance = new SpeechSynthesisUtterance("never end");
+  utterance.lang = "it-IT-noevents-noend";
+  utterance.addEventListener('start', function(e) {
+    speechSynthesis.pause();
+    // Wait to see if we get some bad events we didn't expect.
+    setTimeout(function() {
+      SimpleTest.finish();
+    }, 1000);
+  });
+
+  utterance.addEventListener('pause', function(e) {
+    ok(false, 'no pause event was explicitly dispatched from the service')
+    speechSynthesis.resume();
+  });
+
+  utterance.addEventListener('resume', function(e) {
+    ok(false, 'no resume event was explicitly dispatched from the service')
+    speechSynthesis.cancel();
+  });
+
+  utterance.addEventListener('end', function(e) {
+    ok(false, 'no end event was explicitly dispatched from the service')
+  });
+
+  speechSynthesis.speak(utterance);
+}
+
+test_with_events();
 
 </script>
 </pre>
 </body>
 </html>
--- a/dom/media/webspeech/synth/test/file_speech_cancel.html
+++ b/dom/media/webspeech/synth/test/file_speech_cancel.html
@@ -21,80 +21,69 @@ https://bugzilla.mozilla.org/show_bug.cg
 <div id="content" style="display: none">
 
 </div>
 <pre id="test">
 <script type="application/javascript">
 
 /** Test for Bug 1150315 **/
 
-function testFunc(done_cb) {
-  var gotEndEvent = false;
-  // A long utterance that we will interrupt.
-  var utterance = new SpeechSynthesisUtterance("Donec ac nunc feugiat, posuere " +
-    "mauris id, pharetra velit. Donec fermentum orci nunc, sit amet maximus" +
-    "dui tincidunt ut. Sed ultricies ac nisi a laoreet. Proin interdum," +
-    "libero maximus hendrerit posuere, lorem risus egestas nisl, a" +
-    "ultricies massa justo eu nisi. Duis mattis nibh a ligula tincidunt" +
-    "tincidunt non eu erat. Sed bibendum varius vulputate. Cras leo magna," +
-    "ornare ac posuere vel, luctus id metus. Mauris nec quam ac augue" +
-    "consectetur bibendum. Integer a commodo tortor. Duis semper dolor eu" +
-    "facilisis facilisis. Etiam venenatis turpis est, quis tincidunt velit" +
-    "suscipit a. Cras semper orci in sapien rhoncus bibendum. Suspendisse" +
-    "eu ex lobortis, finibus enim in, condimentum quam. Maecenas eget dui" +
-    "ipsum. Aliquam tortor leo, interdum eget congue ut, tempor id elit.");
-  utterance.addEventListener('start', function(e) {
-    ok(true, 'start utterance 1');
-    speechSynthesis.cancel();
-    info('cancel!');
-    speechSynthesis.speak(utterance2);
-    info('speak??');
-  });
+var gotEndEvent = false;
+// A long utterance that we will interrupt.
+var utterance = new SpeechSynthesisUtterance("Donec ac nunc feugiat, posuere " +
+  "mauris id, pharetra velit. Donec fermentum orci nunc, sit amet maximus" +
+  "dui tincidunt ut. Sed ultricies ac nisi a laoreet. Proin interdum," +
+  "libero maximus hendrerit posuere, lorem risus egestas nisl, a" +
+  "ultricies massa justo eu nisi. Duis mattis nibh a ligula tincidunt" +
+  "tincidunt non eu erat. Sed bibendum varius vulputate. Cras leo magna," +
+  "ornare ac posuere vel, luctus id metus. Mauris nec quam ac augue" +
+  "consectetur bibendum. Integer a commodo tortor. Duis semper dolor eu" +
+  "facilisis facilisis. Etiam venenatis turpis est, quis tincidunt velit" +
+  "suscipit a. Cras semper orci in sapien rhoncus bibendum. Suspendisse" +
+  "eu ex lobortis, finibus enim in, condimentum quam. Maecenas eget dui" +
+  "ipsum. Aliquam tortor leo, interdum eget congue ut, tempor id elit.");
+utterance.addEventListener('start', function(e) {
+  ok(true, 'start utterance 1');
+  speechSynthesis.cancel();
+  speechSynthesis.speak(utterance2);
+});
 
-  var utterance2 = new SpeechSynthesisUtterance("Proin ornare neque vitae " +
-    "risus mattis rutrum. Suspendisse a velit ut est convallis aliquet." +
-    "Nullam ante elit, malesuada vel luctus rutrum, ultricies nec libero." +
-    "Praesent eu iaculis orci. Sed nisl diam, sodales ac purus et," +
-    "volutpat interdum tortor. Nullam aliquam porta elit et maximus. Cras" +
-    "risus lectus, elementum vel sodales vel, ultricies eget lectus." +
-    "Curabitur velit lacus, mollis vel finibus et, molestie sit amet" +
-    "sapien. Proin vitae dolor ac augue posuere efficitur ac scelerisque" +
-    "diam. Nulla sed odio elit.");
-  utterance2.addEventListener('start', function() {
-    info('start');
-    speechSynthesis.cancel();
-    speechSynthesis.speak(utterance3);
-  });
-  utterance2.addEventListener('end', function(e) {
-    gotEndEvent = true;
-  });
+var utterance2 = new SpeechSynthesisUtterance("Proin ornare neque vitae " +
+  "risus mattis rutrum. Suspendisse a velit ut est convallis aliquet." +
+  "Nullam ante elit, malesuada vel luctus rutrum, ultricies nec libero." +
+  "Praesent eu iaculis orci. Sed nisl diam, sodales ac purus et," +
+  "volutpat interdum tortor. Nullam aliquam porta elit et maximus. Cras" +
+  "risus lectus, elementum vel sodales vel, ultricies eget lectus." +
+  "Curabitur velit lacus, mollis vel finibus et, molestie sit amet" +
+  "sapien. Proin vitae dolor ac augue posuere efficitur ac scelerisque" +
+  "diam. Nulla sed odio elit.");
+utterance2.addEventListener('start', function() {
+  speechSynthesis.cancel();
+  speechSynthesis.speak(utterance3);
+});
+utterance2.addEventListener('end', function(e) {
+  gotEndEvent = true;
+});
 
-  var utterance3 = new SpeechSynthesisUtterance("Hello, world 3!");
-  utterance3.addEventListener('start', function() {
-    ok(gotEndEvent, "didn't get start event for this utterance");
-  });
-  utterance3.addEventListener('end', done_cb);
+var utterance3 = new SpeechSynthesisUtterance("Hello, world 3!");
+utterance3.addEventListener('start', function() {
+  ok(gotEndEvent, "didn't get start event for this utterance");
+});
+utterance3.addEventListener('end', function(e) {
+  SimpleTest.finish();
+});
 
-  // Speak/cancel while paused (Bug 1187105)
-  speechSynthesis.pause();
-  speechSynthesis.speak(new SpeechSynthesisUtterance("hello."));
-  ok(speechSynthesis.pending, "paused speechSynthesis has an utterance queued.");
-  speechSynthesis.cancel();
-  ok(!speechSynthesis.pending, "paused speechSynthesis has no utterance queued.");
-  speechSynthesis.resume();
+// Speak/cancel while paused (Bug 1187105)
+speechSynthesis.pause();
+speechSynthesis.speak(new SpeechSynthesisUtterance("hello."));
+ok(speechSynthesis.pending, "paused speechSynthesis has an utterance queued.");
+speechSynthesis.cancel();
+ok(!speechSynthesis.pending, "paused speechSynthesis has no utterance queued.");
+speechSynthesis.resume();
 
-  speechSynthesis.speak(utterance);
-  ok(!speechSynthesis.speaking, "speechSynthesis is not speaking yet.");
-  ok(speechSynthesis.pending, "speechSynthesis has an utterance queued.");
-}
-
-// Run test with no global queue, and then run it with a global queue.
-testFunc(function() {
-  SpecialPowers.pushPrefEnv(
-    { set: [['media.webspeech.synth.force_global_queue', true]] }, function() {
-      testFunc(SimpleTest.finish)
-    });
-});
+speechSynthesis.speak(utterance);
+ok(!speechSynthesis.speaking, "speechSynthesis is not speaking yet.");
+ok(speechSynthesis.pending, "speechSynthesis has an utterance queued.");
 
 </script>
 </pre>
 </body>
 </html>
--- a/dom/media/webspeech/synth/test/file_speech_queue.html
+++ b/dom/media/webspeech/synth/test/file_speech_queue.html
@@ -39,47 +39,40 @@ for (var voice of speechSynthesis.getVoi
 }
 
 ok(langUriMap['en-JM'], 'No English-Jamaican voice');
 ok(langUriMap['en-GB'], 'No English-British voice');
 ok(langUriMap['en-CA'], 'No English-Canadian voice');
 ok(langUriMap['fr-CA'], 'No French-Canadian voice');
 ok(langUriMap['es-MX'], 'No Spanish-Mexican voice');
 
-function testFunc(done_cb) {
-  synthTestQueue(
-    [[{text: "Hello, world."},
-      { uri: langUriMap['en-JM'] }],
-     [{text: "Bonjour tout le monde .",
-       args: { lang: "fr", rate: 0.5, pitch: 0.75 }},
-      { uri: langUriMap['fr-CA'], rate: 0.5, pitch: 0.75}],
-     [{text: "How are you doing?", args: { lang: "en-GB" } },
-      { rate: 1, pitch: 1, uri: langUriMap['en-GB']}],
-     [{text: "¡hasta mañana!", args: { lang: "es-MX" } },
-      { uri: langUriMap['es-MX'] }]],
-    function () {
-      var test_data = [];
-      var voices = speechSynthesis.getVoices();
-      for (var voice of voices) {
-        if (voice.voiceURI.indexOf('urn:moz-tts:fake-direct') < 0) {
-          continue;
-        }
-        test_data.push([{text: "Hello world", args: { voice: voice} },
-                        {uri: voice.voiceURI}]);
+synthTestQueue(
+  [[{text: "Hello, world."},
+    { uri: langUriMap['en-JM'] }],
+   [{text: "Bonjour tout le monde .", lang: "fr", rate: 0.5, pitch: 0.75},
+    { uri: langUriMap['fr-CA'], rate: 0.5, pitch: 0.75}],
+   [{text: "How are you doing?", lang: "en-GB"},
+    { rate: 1, pitch: 1, uri: langUriMap['en-GB']}],
+   [{text: "¡hasta mañana!", lang: "es-MX"},
+    { uri: langUriMap['es-MX'] }]],
+  function () {
+    var test_data = [];
+    var voices = speechSynthesis.getVoices();
+    for (var voice of voices) {
+      if (voice.voiceURI.indexOf('urn:moz-tts:fake-direct') < 0) {
+        continue;
       }
-
-      synthTestQueue(test_data, done_cb);
-    });
-}
+      test_data.push([{text: "Hello world", voice: voice},
+                      {uri: voice.voiceURI}]);
+    }
 
-// Run test with no global queue, and then run it with a global queue.
-testFunc(function() {
-  SpecialPowers.pushPrefEnv(
-    { set: [['media.webspeech.synth.force_global_queue', true]] }, function() {
-      testFunc(SimpleTest.finish)
-    });
-});
+    synthTestQueue(test_data,
+                   function () {
+                     SimpleTest.finish();
+                   });
+  });
+
 
 
 </script>
 </pre>
 </body>
 </html>
--- a/dom/media/webspeech/synth/test/mochitest.ini
+++ b/dom/media/webspeech/synth/test/mochitest.ini
@@ -2,20 +2,14 @@
 tags=msg
 support-files =
   common.js
   file_setup.html
   file_speech_queue.html
   file_speech_simple.html
   file_speech_cancel.html
   file_indirect_service_events.html
-  file_global_queue.html
-  file_global_queue_cancel.html
-  file_global_queue_pause.html
 
 [test_setup.html]
 [test_speech_queue.html]
 [test_speech_simple.html]
 [test_speech_cancel.html]
 [test_indirect_service_events.html]
-[test_global_queue.html]
-[test_global_queue_cancel.html]
-[test_global_queue_pause.html]
--- a/dom/media/webspeech/synth/test/nsFakeSynthServices.cpp
+++ b/dom/media/webspeech/synth/test/nsFakeSynthServices.cpp
@@ -282,19 +282,17 @@ nsFakeSynthServices::~nsFakeSynthService
 static void
 AddVoices(nsISpeechService* aService, const VoiceDetails* aVoices, uint32_t aLength)
 {
   nsSynthVoiceRegistry* registry = nsSynthVoiceRegistry::GetInstance();
   for (uint32_t i = 0; i < aLength; i++) {
     NS_ConvertUTF8toUTF16 name(aVoices[i].name);
     NS_ConvertUTF8toUTF16 uri(aVoices[i].uri);
     NS_ConvertUTF8toUTF16 lang(aVoices[i].lang);
-    // These services can handle more than one utterance at a time and have
-    // several speaking simultaniously. So, aQueuesUtterances == false
-    registry->AddVoice(aService, uri, name, lang, true, false);
+    registry->AddVoice(aService, uri, name, lang, true);
     if (aVoices[i].defaultVoice) {
       registry->SetDefaultVoice(uri, true);
     }
   }
 }
 
 void
 nsFakeSynthServices::Init()
deleted file mode 100644
--- a/dom/media/webspeech/synth/test/test_global_queue.html
+++ /dev/null
@@ -1,34 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<!--
-https://bugzilla.mozilla.org/show_bug.cgi?id=1188099
--->
-<head>
-  <meta charset="utf-8">
-  <title>Test for Bug 1188099: Global queue should correctly schedule utterances</title>
-  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
-  <script type="application/javascript" src="common.js"></script>
-  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
-</head>
-<body>
-<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1188099">Mozilla Bug 1188099</a>
-<p id="display"></p>
-<iframe id="testFrame"></iframe>
-<div id="content" style="display: none">
-  
-</div>
-<pre id="test">
-<script type="application/javascript">
-
-/** Test for Bug 525444 **/
-
-SimpleTest.waitForExplicitFinish();
-
-SpecialPowers.pushPrefEnv({ set: [['media.webspeech.synth.enabled', true],
-                                  ['media.webspeech.synth.force_global_queue', true]] },
-                          function() { document.getElementById("testFrame").src = "file_global_queue.html"; });
-
-</script>
-</pre>
-</body>
-</html>
\ No newline at end of file
deleted file mode 100644
--- a/dom/media/webspeech/synth/test/test_global_queue_cancel.html
+++ /dev/null
@@ -1,34 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<!--
-https://bugzilla.mozilla.org/show_bug.cgi?id=1188099
--->
-<head>
-  <meta charset="utf-8">
-  <title>Test for Bug 1188099: Calling cancel() should work correctly with global queue</title>
-  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
-  <script type="application/javascript" src="common.js"></script>
-  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
-</head>
-<body>
-<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1188099">Mozilla Bug 1188099</a>
-<p id="display"></p>
-<iframe id="testFrame"></iframe>
-<div id="content" style="display: none">
-
-</div>
-<pre id="test">
-<script type="application/javascript">
-
-/** Test for Bug 525444 **/
-
-SimpleTest.waitForExplicitFinish();
-
-SpecialPowers.pushPrefEnv({ set: [['media.webspeech.synth.enabled', true],
-                                  ['media.webspeech.synth.force_global_queue', true]] },
-                          function() { document.getElementById("testFrame").src = "file_global_queue_cancel.html"; });
-
-</script>
-</pre>
-</body>
-</html>
\ No newline at end of file
deleted file mode 100644
--- a/dom/media/webspeech/synth/test/test_global_queue_pause.html
+++ /dev/null
@@ -1,34 +0,0 @@
-<!DOCTYPE HTML>
-<html>
-<!--
-https://bugzilla.mozilla.org/show_bug.cgi?id=1188099
--->
-<head>
-  <meta charset="utf-8">
-  <title>Test for Bug 1188099: Calling pause() should work correctly with global queue</title>
-  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
-  <script type="application/javascript" src="common.js"></script>
-  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
-</head>
-<body>
-<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1188099">Mozilla Bug 1188099</a>
-<p id="display"></p>
-<iframe id="testFrame"></iframe>
-<div id="content" style="display: none">
-
-</div>
-<pre id="test">
-<script type="application/javascript">
-
-/** Test for Bug 525444 **/
-
-SimpleTest.waitForExplicitFinish();
-
-SpecialPowers.pushPrefEnv({ set: [['media.webspeech.synth.enabled', true],
-                                  ['media.webspeech.synth.force_global_queue', true]] },
-                          function() { document.getElementById("testFrame").src = "file_global_queue_pause.html"; });
-
-</script>
-</pre>
-</body>
-</html>
\ No newline at end of file
--- a/dom/media/webspeech/synth/test/test_indirect_service_events.html
+++ b/dom/media/webspeech/synth/test/test_indirect_service_events.html
@@ -19,18 +19,15 @@ https://bugzilla.mozilla.org/show_bug.cg
 </div>
 <pre id="test">
 <script type="application/javascript">
 
 /** Test for Bug 1155034 **/
 
 SimpleTest.waitForExplicitFinish();
 
-SpecialPowers.pushPrefEnv(
-  { set: [['media.webspeech.synth.enabled', true],
-          ['media.webspeech.synth.force_global_queue', false]] },
-  function() { document.getElementById("testFrame").src = "file_indirect_service_events.html"; });
-
+SpecialPowers.pushPrefEnv({ set: [['media.webspeech.synth.enabled', true]] },
+                          function() { document.getElementById("testFrame").src = "file_indirect_service_events.html"; });
 
 </script>
 </pre>
 </body>
 </html>
--- a/dom/media/webspeech/synth/test/test_speech_cancel.html
+++ b/dom/media/webspeech/synth/test/test_speech_cancel.html
@@ -19,17 +19,15 @@ https://bugzilla.mozilla.org/show_bug.cg
 </div>
 <pre id="test">
 <script type="application/javascript">
 
 /** Test for Bug 1150315 **/
 
 SimpleTest.waitForExplicitFinish();
 
-SpecialPowers.pushPrefEnv(
-  { set: [['media.webspeech.synth.enabled', true],
-          ['media.webspeech.synth.force_global_queue', false]] },
-  function() { document.getElementById("testFrame").src = "file_speech_cancel.html"; });
+SpecialPowers.pushPrefEnv({ set: [['media.webspeech.synth.enabled', true]] },
+                          function() { document.getElementById("testFrame").src = "file_speech_cancel.html"; });
 
 </script>
 </pre>
 </body>
 </html>
--- a/dom/media/webspeech/synth/test/test_speech_queue.html
+++ b/dom/media/webspeech/synth/test/test_speech_queue.html
@@ -19,17 +19,15 @@ https://bugzilla.mozilla.org/show_bug.cg
 </div>
 <pre id="test">
 <script type="application/javascript">
 
 /** Test for Bug 525444 **/
 
 SimpleTest.waitForExplicitFinish();
 
-SpecialPowers.pushPrefEnv(
-  { set: [['media.webspeech.synth.enabled', true],
-          ['media.webspeech.synth.force_global_queue', false]] },
-  function() { document.getElementById("testFrame").src = "file_speech_queue.html"; });
+SpecialPowers.pushPrefEnv({ set: [['media.webspeech.synth.enabled', true]] },
+                          function() { document.getElementById("testFrame").src = "file_speech_queue.html"; });
 
 </script>
 </pre>
 </body>
 </html>
--- a/dom/media/webspeech/synth/windows/SapiService.cpp
+++ b/dom/media/webspeech/synth/windows/SapiService.cpp
@@ -273,21 +273,18 @@ SapiService::RegisterVoices()
     }
 
     nsAutoString uri;
     uri.AssignLiteral("urn:moz-tts:sapi:");
     uri.Append(description);
     uri.AppendLiteral("?");
     uri.Append(locale);
 
-    // This service can only speak one utterance at a time, se we set
-    // aQueuesUtterances to true in order to track global state and schedule
-    // access to this service.
     rv = registry->AddVoice(this, uri, nsDependentString(description), locale,
-                            true, true);
+                            true);
     CoTaskMemFree(description);
     if (NS_FAILED(rv)) {
       continue;
     }
 
     mVoices.Put(uri, voiceToken);
   }
 
--- a/dom/webidl/SpeechSynthesis.webidl
+++ b/dom/webidl/SpeechSynthesis.webidl
@@ -18,13 +18,9 @@ interface SpeechSynthesis {
 
   [UnsafeInPrerendering]
   void speak(SpeechSynthesisUtterance utterance);
   void cancel();
   void pause();
   [UnsafeInPrerendering]
   void resume();
   sequence<SpeechSynthesisVoice> getVoices();
-
-  [ChromeOnly]
-  // Force an utterance to end. Circumvents bad speech service implementations.
-  void forceEnd();
 };