Backed out changesets b2fb4270e0a6 and 2a0493fb37db (bug 1051148) for introducing new rooting hazards.
authorRyan VanderMeulen <ryanvm@gmail.com>
Wed, 03 Jun 2015 13:14:59 -0400
changeset 246973 51b0f06606ade3299e8cd05c454e6953a4c16adf
parent 246972 486356079205ba0908b9150225426768dc0214d1
child 246974 31b85f5bb71cd3d4cfe696dbd6f3c03221ddfa6d
child 247053 0920f2325a6dd87f10aab26499601eb0c1c8a57d
push id60586
push userryanvm@gmail.com
push dateWed, 03 Jun 2015 17:17:42 +0000
treeherdermozilla-inbound@51b0f06606ad [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs1051148
milestone41.0a1
backs outb2fb4270e0a62758946d49cdc7c31be832afe251
2a0493fb37db4366bca761e585544aeeb4650af2
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changesets b2fb4270e0a6 and 2a0493fb37db (bug 1051148) for introducing new rooting hazards. CLOSED TREE IGNORE IDL
b2g/app/b2g.js
b2g/confvars.sh
configure.in
dom/events/test/test_all_synthetic_events.html
dom/media/webspeech/recognition/PocketSphinxSpeechRecognitionService.cpp
dom/media/webspeech/recognition/PocketSphinxSpeechRecognitionService.h
dom/media/webspeech/recognition/SpeechGrammar.cpp
dom/media/webspeech/recognition/SpeechGrammar.h
dom/media/webspeech/recognition/SpeechGrammarList.cpp
dom/media/webspeech/recognition/SpeechGrammarList.h
dom/media/webspeech/recognition/SpeechRecognition.cpp
dom/media/webspeech/recognition/SpeechRecognitionAlternative.cpp
dom/media/webspeech/recognition/SpeechRecognitionResult.cpp
dom/media/webspeech/recognition/SpeechRecognitionResult.h
dom/media/webspeech/recognition/SpeechRecognitionResultList.h
dom/media/webspeech/recognition/moz.build
dom/media/webspeech/recognition/nsISpeechRecognitionService.idl
dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.cpp
dom/media/webspeech/recognition/test/head.js
dom/tests/mochitest/general/test_interfaces.html
dom/webidl/SpeechRecognitionEvent.webidl
dom/webidl/SpeechRecognitionResult.webidl
layout/build/moz.build
layout/build/nsLayoutModule.cpp
--- a/b2g/app/b2g.js
+++ b/b2g/app/b2g.js
@@ -998,20 +998,16 @@ pref("network.proxy.pac_generator", true
 
 // List of app origins to apply browsing traffic proxy setting, separated by
 // comma.  Specify '*' in the list to apply to all apps.
 pref("network.proxy.browsing.app_origins", "app://system.gaiamobile.org");
 
 // Enable Web Speech synthesis API
 pref("media.webspeech.synth.enabled", true);
 
-// Enable Web Speech recognition API
-pref("media.webspeech.recognition.enable", true);
-pref("media.webspeech.service.default", "pocketsphinx");
-
 // Downloads API
 pref("dom.mozDownloads.enabled", true);
 pref("dom.downloads.max_retention_days", 7);
 
 // External Helper Application Handling
 //
 // All external helper application handling can require the docshell to be
 // active before allowing the external helper app service to handle content.
--- a/b2g/confvars.sh
+++ b/b2g/confvars.sh
@@ -28,21 +28,18 @@ MOZ_APP_STATIC_INI=1
 NSS_NO_LIBPKIX=1
 NSS_DISABLE_DBM=1
 MOZ_NO_EV_CERTS=1
 MOZ_DISABLE_EXPORT_JS=1
 
 # Bug 1171082 - Broken on Windows B2G Desktop
 if test "$OS_TARGET" != "WINNT"; then
 MOZ_WEBSPEECH=1
-
-if test -n "$NIGHTLY_BUILD"; then
 MOZ_WEBSPEECH_MODELS=1
 MOZ_WEBSPEECH_POCKETSPHINX=1
-fi # NIGHTLY_BUILD
 MOZ_WEBSPEECH_TEST_BACKEND=1
 fi # !WINNT
 
 if test "$OS_TARGET" = "Android"; then
 MOZ_CAPTURE=1
 MOZ_RAW=1
 MOZ_AUDIO_CHANNEL_MANAGER=1
 fi
--- a/configure.in
+++ b/configure.in
@@ -5179,16 +5179,21 @@ if test -n "$MOZ_WEBSPEECH_TEST_BACKEND"
     AC_DEFINE(MOZ_WEBSPEECH_TEST_BACKEND)
 fi
 
 AC_SUBST(MOZ_WEBSPEECH_TEST_BACKEND)
 
 dnl ========================================================
 dnl = Disable Speech API pocketsphinx backend
 dnl ========================================================
+MOZ_ARG_DISABLE_BOOL(webspeechpocketsphinx,
+[  --disable-webspeechpocketsphinx        Disable support for HTML Speech API Pocketsphinx Backend],
+    MOZ_WEBSPEECH_POCKETSPHINX=,
+    MOZ_WEBSPEECH_POCKETSPHINX=1)
+
 if test -n "$MOZ_WEBSPEECH_POCKETSPHINX"; then
     AC_DEFINE(MOZ_WEBSPEECH_POCKETSPHINX)
 fi
 
 AC_SUBST(MOZ_WEBSPEECH_POCKETSPHINX)
 
 dnl ========================================================
 dnl = Disable Speech API code
@@ -5202,16 +5207,21 @@ if test -n "$MOZ_WEBSPEECH"; then
     AC_DEFINE(MOZ_WEBSPEECH)
 fi
 
 AC_SUBST(MOZ_WEBSPEECH)
 
 dnl ========================================================
 dnl = Disable Speech API models
 dnl ========================================================
+MOZ_ARG_DISABLE_BOOL(webspeechmodels,
+[  --disable-webspeechmodels        Disable support for HTML Speech API Models],
+    MOZ_WEBSPEECH_MODELS=,
+    MOZ_WEBSPEECH_MODELS=1)
+
 if test -n "$MOZ_WEBSPEECH_MODELS"; then
     AC_DEFINE(MOZ_WEBSPEECH_MODELS)
 fi
 
 AC_SUBST(MOZ_WEBSPEECH_MODELS)
 
 dnl ========================================================
 dnl = Enable Raw Codecs
--- a/dom/events/test/test_all_synthetic_events.html
+++ b/dom/events/test/test_all_synthetic_events.html
@@ -422,20 +422,16 @@ const kEventConstructors = {
                                                                                   aProps.clientX, aProps.clientY,
                                                                                   aProps.ctrlKey, aProps.altKey, aProps.shiftKey, aProps.metaKey,
                                                                                   aProps.button, aProps.relatedTarget,
                                                                                   aProps.allowedDirections, aProps.direction, aProps.delta || 0.0,
                                                                                   aProps.clickCount);
                                                          return e;
                                                        },
                                              },
-  SpeechRecognitionError:                    { create: function (aName, aProps) {
-                                                         return new SpeechRecognitionError(aName, aProps);
-                                                       },
-                                             },
   SpeechRecognitionEvent:                    { create: function (aName, aProps) {
                                                          return new SpeechRecognitionEvent(aName, aProps);
                                                        },
                                              },
   SpeechSynthesisEvent:                      { create: function (aName, aProps) {
                                                          return new SpeechSynthesisEvent(aName, aProps);
                                                        },
                                              },
deleted file mode 100644
--- a/dom/media/webspeech/recognition/PocketSphinxSpeechRecognitionService.cpp
+++ /dev/null
@@ -1,345 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "nsThreadUtils.h"
-#include "nsXPCOMCIDInternal.h"
-#include "PocketSphinxSpeechRecognitionService.h"
-#include "nsIFile.h"
-#include "SpeechGrammar.h"
-#include "SpeechRecognition.h"
-#include "SpeechRecognitionAlternative.h"
-#include "SpeechRecognitionResult.h"
-#include "SpeechRecognitionResultList.h"
-#include "nsIObserverService.h"
-#include "mozilla/Services.h"
-#include "nsDirectoryServiceDefs.h"
-#include "nsDirectoryServiceUtils.h"
-#include "nsMemory.h"
-
-extern "C" {
-#include "pocketsphinx/pocketsphinx.h"
-#include "sphinxbase/sphinx_config.h"
-#include "sphinxbase/jsgf.h"
-}
-
-namespace mozilla {
-
-using namespace dom;
-
-class DecodeResultTask : public nsRunnable
-{
-public:
-  DecodeResultTask(const nsString& hypstring,
-                   WeakPtr<dom::SpeechRecognition> recognition)
-      : mResult(hypstring),
-        mRecognition(recognition),
-        mWorkerThread(do_GetCurrentThread())
-  {
-    MOZ_ASSERT(
-      !NS_IsMainThread()); // This should be running on the worker thread
-  }
-
-  NS_IMETHOD
-  Run()
-  {
-    MOZ_ASSERT(NS_IsMainThread()); // This method is supposed to run on the main
-                                   // thread!
-
-    // Declare javascript result events
-    nsRefPtr<SpeechEvent> event = new SpeechEvent(
-      mRecognition, SpeechRecognition::EVENT_RECOGNITIONSERVICE_FINAL_RESULT);
-    SpeechRecognitionResultList* resultList =
-      new SpeechRecognitionResultList(mRecognition);
-    SpeechRecognitionResult* result = new SpeechRecognitionResult(mRecognition);
-    SpeechRecognitionAlternative* alternative =
-      new SpeechRecognitionAlternative(mRecognition);
-
-    alternative->mTranscript = mResult;
-    alternative->mConfidence = 100;
-
-    result->mItems.AppendElement(alternative);
-    resultList->mItems.AppendElement(result);
-
-    event->mRecognitionResultList = resultList;
-    NS_DispatchToMainThread(event);
-
-    // If we don't destroy the thread when we're done with it, it will hang
-    // around forever... bad!
-    // But thread->Shutdown must be called from the main thread, not from the
-    // thread itself.
-    return mWorkerThread->Shutdown();
-  }
-
-private:
-  nsString mResult;
-  WeakPtr<dom::SpeechRecognition> mRecognition;
-  nsCOMPtr<nsIThread> mWorkerThread;
-};
-
-class DecodeTask : public nsRunnable
-{
-public:
-  DecodeTask(WeakPtr<dom::SpeechRecognition> recogntion,
-             const nsTArray<int16_t>& audiovector, ps_decoder_t* ps)
-      : mRecognition(recogntion), mAudiovector(audiovector), mPs(ps)
-  {
-  }
-
-  NS_IMETHOD
-  Run()
-  {
-    char const* hyp;
-    int rv;
-    int32 score;
-    nsAutoCString hypoValue;
-
-    rv = ps_start_utt(mPs);
-    rv = ps_process_raw(mPs, &mAudiovector[0], mAudiovector.Length(), FALSE,
-                        FALSE);
-
-    rv = ps_end_utt(mPs);
-    if (rv >= 0) {
-      hyp = ps_get_hyp(mPs, &score);
-      if (hyp == nullptr) {
-        hypoValue.Assign("ERROR");
-      } else {
-        hypoValue.Assign(hyp);
-      }
-    }
-
-    nsCOMPtr<nsIRunnable> resultrunnable =
-      new DecodeResultTask(NS_ConvertUTF8toUTF16(hypoValue), mRecognition);
-    return NS_DispatchToMainThread(resultrunnable);
-  }
-
-private:
-  WeakPtr<dom::SpeechRecognition> mRecognition;
-  nsTArray<int16_t> mAudiovector;
-  ps_decoder_t* mPs;
-};
-
-NS_IMPL_ISUPPORTS(PocketSphinxSpeechRecognitionService,
-                  nsISpeechRecognitionService, nsIObserver)
-
-PocketSphinxSpeechRecognitionService::PocketSphinxSpeechRecognitionService()
-{
-  mSpeexState = nullptr;
-
-  // get root folder
-  nsCOMPtr<nsIFile> tmpFile;
-  nsAutoString aStringAMPath;   // am folder
-  nsAutoString aStringDictPath; // dict folder
-
-  NS_GetSpecialDirectory(NS_GRE_DIR, getter_AddRefs(tmpFile));
-#if defined(XP_WIN) // for some reason, on windows NS_GRE_DIR is not bin root,
-                    // but bin/browser
-  tmpFile->AppendRelativePath(NS_LITERAL_STRING(".."));
-#endif
-  tmpFile->AppendRelativePath(NS_LITERAL_STRING("models"));
-  tmpFile->AppendRelativePath(NS_LITERAL_STRING("en-us-semi"));
-  tmpFile->GetPath(aStringAMPath);
-
-  NS_GetSpecialDirectory(NS_GRE_DIR, getter_AddRefs(tmpFile));
-#if defined(XP_WIN) // for some reason, on windows NS_GRE_DIR is not bin root,
-                    // but bin/browser
-  tmpFile->AppendRelativePath(NS_LITERAL_STRING(".."));
-#endif
-  tmpFile->AppendRelativePath(NS_LITERAL_STRING("models"));     //
-  tmpFile->AppendRelativePath(NS_LITERAL_STRING("dict"));       //
-  tmpFile->AppendRelativePath(NS_LITERAL_STRING("cmu07a.dic")); //
-  tmpFile->GetPath(aStringDictPath);
-
-  // FOR B2G PATHS HARDCODED (APPEND /DATA ON THE BEGINING, FOR DESKTOP, ONLY
-  // MODELS/ RELATIVE TO ROOT
-  mPSConfig = cmd_ln_init(nullptr, ps_args(), TRUE, "-hmm",
-                          ToNewUTF8String(aStringAMPath), // acoustic model
-                          "-dict", ToNewUTF8String(aStringDictPath), nullptr);
-  if (mPSConfig == nullptr) {
-    ISDecoderCreated = false;
-  } else {
-    mPSHandle = ps_init(mPSConfig);
-    if (mPSHandle == nullptr) {
-      ISDecoderCreated = false;
-    } else {
-      ISDecoderCreated = true;
-    }
-  }
-
-  ISGrammarCompiled = false;
-}
-
-PocketSphinxSpeechRecognitionService::~PocketSphinxSpeechRecognitionService()
-{
-  if (mPSConfig) {
-    free(mPSConfig);
-  }
-  if (mPSHandle) {
-    free(mPSHandle);
-  }
-
-  mSpeexState = nullptr;
-}
-
-// CALL START IN JS FALLS HERE
-NS_IMETHODIMP
-PocketSphinxSpeechRecognitionService::Initialize(
-    WeakPtr<SpeechRecognition> aSpeechRecognition)
-{
-  if (!ISDecoderCreated || !ISGrammarCompiled) {
-    return NS_ERROR_NOT_INITIALIZED;
-  } else {
-    mAudioVector.Clear();
-
-    if (mSpeexState) {
-      mSpeexState = nullptr;
-    }
-
-    mRecognition = aSpeechRecognition;
-    nsCOMPtr<nsIObserverService> obs = services::GetObserverService();
-    obs->AddObserver(this, SPEECH_RECOGNITION_TEST_EVENT_REQUEST_TOPIC, false);
-    obs->AddObserver(this, SPEECH_RECOGNITION_TEST_END_TOPIC, false);
-    return NS_OK;
-  }
-}
-
-NS_IMETHODIMP
-PocketSphinxSpeechRecognitionService::ProcessAudioSegment(
-  AudioSegment* aAudioSegment, int32_t aSampleRate)
-{
-  if (!mSpeexState) {
-    mSpeexState = speex_resampler_init(1, aSampleRate, 16000,
-                                       SPEEX_RESAMPLER_QUALITY_MAX, nullptr);
-  }
-  aAudioSegment->ResampleChunks(mSpeexState, aSampleRate, 16000);
-
-  AudioSegment::ChunkIterator iterator(*aAudioSegment);
-
-  while (!iterator.IsEnded()) {
-    mozilla::AudioChunk& chunk = *(iterator);
-    MOZ_ASSERT(chunk.mBuffer);
-    const int16_t* buf = static_cast<const int16_t*>(chunk.mChannelData[0]);
-
-    for (int i = 0; i < iterator->mDuration; i++) {
-      mAudioVector.AppendElement((int16_t)buf[i]);
-    }
-    iterator.Next();
-  }
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-PocketSphinxSpeechRecognitionService::SoundEnd()
-{
-  speex_resampler_destroy(mSpeexState);
-  mSpeexState = nullptr;
-
-  // To create a new thread, get the thread manager
-  nsCOMPtr<nsIThreadManager> tm = do_GetService(NS_THREADMANAGER_CONTRACTID);
-  nsCOMPtr<nsIThread> decodethread;
-  nsresult rv = tm->NewThread(0, 0, getter_AddRefs(decodethread));
-  if (NS_FAILED(rv)) {
-    // In case of failure, call back immediately with an empty string which
-    // indicates failure
-    return NS_OK;
-  }
-
-  nsCOMPtr<nsIRunnable> r =
-    new DecodeTask(mRecognition, mAudioVector, mPSHandle);
-  decodethread->Dispatch(r, nsIEventTarget::DISPATCH_NORMAL);
-
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-PocketSphinxSpeechRecognitionService::ValidateAndSetGrammarList(
-  SpeechGrammar* aSpeechGrammar,
-  nsISpeechGrammarCompilationCallback* aCallback)
-{
-  if (!ISDecoderCreated) {
-    ISGrammarCompiled = false;
-  } else if (aSpeechGrammar) {
-    nsAutoString grammar;
-    ErrorResult rv;
-    aSpeechGrammar->GetSrc(grammar, rv);
-
-    int result = ps_set_jsgf_string(mPSHandle, "name",
-                                    NS_ConvertUTF16toUTF8(grammar).get());
-
-    ps_set_search(mPSHandle, "name");
-
-    if (result != 0) {
-      ISGrammarCompiled = false;
-    } else {
-      ISGrammarCompiled = true;
-    }
-  } else {
-    ISGrammarCompiled = false;
-  }
-
-  return ISGrammarCompiled ? NS_OK : NS_ERROR_NOT_INITIALIZED;
-}
-
-NS_IMETHODIMP
-PocketSphinxSpeechRecognitionService::Abort()
-{
-  return NS_OK;
-}
-
-NS_IMETHODIMP
-PocketSphinxSpeechRecognitionService::Observe(nsISupports* aSubject,
-                                              const char* aTopic,
-                                              const char16_t* aData)
-{
-  MOZ_ASSERT(mRecognition->mTestConfig.mFakeRecognitionService,
-             "Got request to fake recognition service event, "
-             "but " TEST_PREFERENCE_FAKE_RECOGNITION_SERVICE " is not set");
-
-  if (!strcmp(aTopic, SPEECH_RECOGNITION_TEST_END_TOPIC)) {
-    nsCOMPtr<nsIObserverService> obs = services::GetObserverService();
-    obs->RemoveObserver(this, SPEECH_RECOGNITION_TEST_EVENT_REQUEST_TOPIC);
-    obs->RemoveObserver(this, SPEECH_RECOGNITION_TEST_END_TOPIC);
-
-    return NS_OK;
-  }
-
-  const nsDependentString eventName = nsDependentString(aData);
-
-  if (eventName.EqualsLiteral("EVENT_RECOGNITIONSERVICE_ERROR")) {
-    mRecognition->DispatchError(
-      SpeechRecognition::EVENT_RECOGNITIONSERVICE_ERROR,
-      SpeechRecognitionErrorCode::Network, // TODO different codes?
-      NS_LITERAL_STRING("RECOGNITIONSERVICE_ERROR test event"));
-
-  } else if (eventName.EqualsLiteral("EVENT_RECOGNITIONSERVICE_FINAL_RESULT")) {
-    nsRefPtr<SpeechEvent> event = new SpeechEvent(
-      mRecognition, SpeechRecognition::EVENT_RECOGNITIONSERVICE_FINAL_RESULT);
-
-    event->mRecognitionResultList = BuildMockResultList();
-    NS_DispatchToMainThread(event);
-  }
-
-  return NS_OK;
-}
-
-SpeechRecognitionResultList*
-PocketSphinxSpeechRecognitionService::BuildMockResultList()
-{
-  SpeechRecognitionResultList* resultList =
-    new SpeechRecognitionResultList(mRecognition);
-  SpeechRecognitionResult* result = new SpeechRecognitionResult(mRecognition);
-  SpeechRecognitionAlternative* alternative =
-    new SpeechRecognitionAlternative(mRecognition);
-
-  alternative->mTranscript = NS_LITERAL_STRING("Mock final result");
-  alternative->mConfidence = 0.0f;
-
-  result->mItems.AppendElement(alternative);
-  resultList->mItems.AppendElement(result);
-
-  return resultList;
-}
-
-} // namespace mozilla
deleted file mode 100644
--- a/dom/media/webspeech/recognition/PocketSphinxSpeechRecognitionService.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
-/* vim:set ts=2 sw=2 sts=2 et cindent: */
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef mozilla_dom_PocketSphinxRecognitionService_h
-#define mozilla_dom_PocketSphinxRecognitionService_h
-
-#include "nsCOMPtr.h"
-#include "nsTArray.h"
-#include "nsIObserver.h"
-#include "nsISpeechRecognitionService.h"
-#include "speex/speex_resampler.h"
-
-extern "C" {
-#include <pocketsphinx/pocketsphinx.h>
-#include <sphinxbase/sphinx_config.h>
-}
-
-#define NS_POCKETSPHINX_SPEECH_RECOGNITION_SERVICE_CID                         \
-  {                                                                            \
-    0x0ff5ce56, 0x5b09, 0x4db8, {                                              \
-      0xad, 0xc6, 0x82, 0x66, 0xaf, 0x95, 0xf8, 0x64                           \
-    }                                                                          \
-  };
-
-namespace mozilla {
-
-/**
- * Pocketsphix implementation of the nsISpeechRecognitionService interface
- */
-class PocketSphinxSpeechRecognitionService : public nsISpeechRecognitionService,
-                                             public nsIObserver
-{
-public:
-  // Add XPCOM glue code
-  NS_DECL_ISUPPORTS
-  NS_DECL_NSISPEECHRECOGNITIONSERVICE
-
-  // Add nsIObserver code
-  NS_DECL_NSIOBSERVER
-
-  /**
-   * Default constructs a PocketSphinxSpeechRecognitionService loading default
-   * files
-   */
-  PocketSphinxSpeechRecognitionService();
-
-private:
-  /**
-   * Private destructor to prevent bypassing of reference counting
-   */
-  virtual ~PocketSphinxSpeechRecognitionService();
-
-  /** The associated SpeechRecognition */
-  WeakPtr<dom::SpeechRecognition> mRecognition;
-
-  /**
-   * Builds a mock SpeechRecognitionResultList
-   */
-  dom::SpeechRecognitionResultList* BuildMockResultList();
-
-  /** Speex state */
-  SpeexResamplerState* mSpeexState;
-
-  /** Pocksphix decoder */
-  ps_decoder_t* mPSHandle;
-
-  /** Sphinxbase parsed command line arguments */
-  cmd_ln_t* mPSConfig;
-
-  /** Flag to verify if decoder was created */
-  bool ISDecoderCreated;
-
-  /** Flag to verify if grammar was compiled */
-  bool ISGrammarCompiled;
-
-  /** Audio data */
-  nsTArray<int16_t> mAudioVector;
-};
-
-} // namespace mozilla
-
-#endif
--- a/dom/media/webspeech/recognition/SpeechGrammar.cpp
+++ b/dom/media/webspeech/recognition/SpeechGrammar.cpp
@@ -48,24 +48,24 @@ JSObject*
 SpeechGrammar::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
 {
   return SpeechGrammarBinding::Wrap(aCx, this, aGivenProto);
 }
 
 void
 SpeechGrammar::GetSrc(nsString& aRetVal, ErrorResult& aRv) const
 {
-  aRetVal = mSrc;
+  aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
   return;
 }
 
 void
 SpeechGrammar::SetSrc(const nsAString& aArg, ErrorResult& aRv)
 {
-  mSrc = aArg;
+  aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
   return;
 }
 
 float
 SpeechGrammar::GetWeight(ErrorResult& aRv) const
 {
   aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
   return 0;
--- a/dom/media/webspeech/recognition/SpeechGrammar.h
+++ b/dom/media/webspeech/recognition/SpeechGrammar.h
@@ -44,16 +44,14 @@ public:
   float GetWeight(ErrorResult& aRv) const;
 
   void SetWeight(float aArg, ErrorResult& aRv);
 
 private:
   ~SpeechGrammar();
 
   nsCOMPtr<nsISupports> mParent;
-
-  nsString mSrc;
 };
 
 } // namespace dom
 } // namespace mozilla
 
 #endif
--- a/dom/media/webspeech/recognition/SpeechGrammarList.cpp
+++ b/dom/media/webspeech/recognition/SpeechGrammarList.cpp
@@ -10,17 +10,17 @@
 #include "mozilla/ErrorResult.h"
 #include "nsCOMPtr.h"
 #include "nsXPCOMStrings.h"
 #include "SpeechRecognition.h"
 
 namespace mozilla {
 namespace dom {
 
-NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(SpeechGrammarList, mParent, mItems)
+NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(SpeechGrammarList, mParent)
 NS_IMPL_CYCLE_COLLECTING_ADDREF(SpeechGrammarList)
 NS_IMPL_CYCLE_COLLECTING_RELEASE(SpeechGrammarList)
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(SpeechGrammarList)
   NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
   NS_INTERFACE_MAP_ENTRY(nsISupports)
 NS_INTERFACE_MAP_END
 
 SpeechGrammarList::SpeechGrammarList(nsISupports* aParent, nsISpeechRecognitionService* aRecognitionService)
@@ -59,53 +59,45 @@ nsISupports*
 SpeechGrammarList::GetParentObject() const
 {
   return mParent;
 }
 
 uint32_t
 SpeechGrammarList::Length() const
 {
-  return mItems.Length();
+  return 0;
 }
 
 already_AddRefed<SpeechGrammar>
 SpeechGrammarList::Item(uint32_t aIndex, ErrorResult& aRv)
 {
-  nsRefPtr<SpeechGrammar> result = mItems.ElementAt(aIndex);
-  return result.forget();
+  aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
+  return nullptr;
 }
 
 void
 SpeechGrammarList::AddFromURI(const nsAString& aSrc,
                               const Optional<float>& aWeight,
                               ErrorResult& aRv)
 {
   aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
   return;
 }
 
 void
 SpeechGrammarList::AddFromString(const nsAString& aString,
                                  const Optional<float>& aWeight,
                                  ErrorResult& aRv)
 {
-  SpeechGrammar* speechGrammar = new SpeechGrammar(mParent);
-  speechGrammar->SetSrc(aString, aRv);
-  mItems.AppendElement(speechGrammar);
-  mRecognitionService->ValidateAndSetGrammarList(speechGrammar, nullptr);
+  mRecognitionService->ValidateAndSetGrammarList(this, nullptr);
   return;
 }
 
 already_AddRefed<SpeechGrammar>
 SpeechGrammarList::IndexedGetter(uint32_t aIndex, bool& aPresent,
                                  ErrorResult& aRv)
 {
-  if (aIndex >= Length()) {
-    aPresent = false;
-    return nullptr;
-  }
-  ErrorResult rv;
-  aPresent = true;
-  return Item(aIndex, rv);
+  aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
+  return nullptr;
 }
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webspeech/recognition/SpeechGrammarList.h
+++ b/dom/media/webspeech/recognition/SpeechGrammarList.h
@@ -51,16 +51,14 @@ public:
   already_AddRefed<SpeechGrammar> IndexedGetter(uint32_t aIndex, bool& aPresent, ErrorResult& aRv);
 
   nsCOMPtr<nsISpeechRecognitionService> mRecognitionService;
 
 private:
   ~SpeechGrammarList();
 
   nsCOMPtr<nsISupports> mParent;
-
-  nsTArray<nsRefPtr<SpeechGrammar>> mItems;
 };
 
 } // namespace dom
 } // namespace mozilla
 
 #endif
--- a/dom/media/webspeech/recognition/SpeechRecognition.cpp
+++ b/dom/media/webspeech/recognition/SpeechRecognition.cpp
@@ -29,17 +29,17 @@
 #if defined(XP_WIN) && defined(GetMessage)
 #undef GetMessage
 #endif
 
 namespace mozilla {
 namespace dom {
 
 #define PREFERENCE_DEFAULT_RECOGNITION_SERVICE "media.webspeech.service.default"
-#define DEFAULT_RECOGNITION_SERVICE "pocketsphinx"
+#define DEFAULT_RECOGNITION_SERVICE "google"
 
 #define PREFERENCE_ENDPOINTER_SILENCE_LENGTH "media.webspeech.silence_length"
 #define PREFERENCE_ENDPOINTER_LONG_SILENCE_LENGTH "media.webspeech.long_silence_length"
 #define PREFERENCE_ENDPOINTER_LONG_SPEECH_LENGTH "media.webspeech.long_speech_length"
 
 static const uint32_t kSAMPLE_RATE = 16000;
 static const uint32_t kSPEECH_DETECTION_TIMEOUT_MS = 10000;
 
@@ -79,19 +79,19 @@ GetSpeechRecognitionService()
     speechRecognitionServiceCID =
       NS_LITERAL_CSTRING(NS_SPEECH_RECOGNITION_SERVICE_CONTRACTID_PREFIX) +
       speechRecognitionService;
   } else {
     speechRecognitionServiceCID =
       NS_SPEECH_RECOGNITION_SERVICE_CONTRACTID_PREFIX "fake";
   }
 
-  nsresult rv;
+  nsresult aRv;
   nsCOMPtr<nsISpeechRecognitionService> recognitionService;
-  recognitionService = do_GetService(speechRecognitionServiceCID.get(), &rv);
+  recognitionService = do_GetService(speechRecognitionServiceCID.get(), &aRv);
   return recognitionService.forget();
 }
 
 NS_INTERFACE_MAP_BEGIN(SpeechRecognition)
   NS_INTERFACE_MAP_ENTRY(nsIObserver)
 NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
 
 NS_IMPL_ADDREF_INHERITED(SpeechRecognition, DOMEventTargetHelper)
@@ -472,17 +472,17 @@ SpeechRecognition::NotifyFinalResult(Spe
 {
   ResetAndEnd();
 
   SpeechRecognitionEventInit init;
   init.mBubbles = true;
   init.mCancelable = false;
   // init.mResultIndex = 0;
   init.mResults = aEvent->mRecognitionResultList;
-  init.mInterpretation = JS::NullValue();
+  init.mInterpretation = NS_LITERAL_STRING("NOT_IMPLEMENTED");
   // init.mEmma = nullptr;
 
   nsRefPtr<SpeechRecognitionEvent> event =
     SpeechRecognitionEvent::Constructor(this, NS_LITERAL_STRING("result"), init);
   event->SetTrusted(true);
 
   bool defaultActionEnabled;
   this->DispatchEvent(event, &defaultActionEnabled);
@@ -530,19 +530,17 @@ SpeechRecognition::NotifyError(SpeechEve
  **************************************/
 NS_IMETHODIMP
 SpeechRecognition::StartRecording(DOMMediaStream* aDOMStream)
 {
   // hold a reference so that the underlying stream
   // doesn't get Destroy()'ed
   mDOMStream = aDOMStream;
 
-  if (NS_WARN_IF(!mDOMStream->GetStream())) {
-    return NS_ERROR_UNEXPECTED;
-  }
+  NS_ENSURE_STATE(mDOMStream->GetStream());
   mSpeechListener = new SpeechStreamListener(this);
   mDOMStream->GetStream()->AddListener(mSpeechListener);
 
   mEndpointer.StartSession();
 
   return mSpeechDetectionTimer->Init(this, kSPEECH_DETECTION_TIMEOUT_MS,
                                      nsITimer::TYPE_ONE_SHOT);
 }
@@ -695,25 +693,21 @@ void
 SpeechRecognition::Start(const Optional<NonNull<DOMMediaStream>>& aStream, ErrorResult& aRv)
 {
   if (mCurrentState != STATE_IDLE) {
     aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
     return;
   }
 
   mRecognitionService = GetSpeechRecognitionService();
-  if (NS_WARN_IF(!mRecognitionService)) {
-    return;
-  }
+  NS_ENSURE_TRUE_VOID(mRecognitionService);
 
   nsresult rv;
   rv = mRecognitionService->Initialize(this);
-  if (NS_WARN_IF(NS_FAILED(rv))) {
-    return;
-  }
+  NS_ENSURE_SUCCESS_VOID(rv);
 
   MediaStreamConstraints constraints;
   constraints.mAudio.SetAsBoolean() = true;
 
   if (aStream.WasPassed()) {
     StartRecording(&aStream.Value());
   } else {
     AutoNoJSAPI();
@@ -958,25 +952,25 @@ NS_IMETHODIMP
 SpeechRecognition::GetUserMediaErrorCallback::OnError(nsISupports* aError)
 {
   nsRefPtr<MediaStreamError> error = do_QueryObject(aError);
   if (!error) {
     return NS_OK;
   }
   SpeechRecognitionErrorCode errorCode;
 
-  nsAutoString name;
+  nsString name;
   error->GetName(name);
   if (name.EqualsLiteral("PERMISSION_DENIED")) {
     errorCode = SpeechRecognitionErrorCode::Not_allowed;
   } else {
     errorCode = SpeechRecognitionErrorCode::Audio_capture;
   }
 
-  nsAutoString message;
+  nsString message;
   error->GetMessage(message);
   mRecognition->DispatchError(SpeechRecognition::EVENT_AUDIO_ERROR, errorCode,
                               message);
   return NS_OK;
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webspeech/recognition/SpeechRecognitionAlternative.cpp
+++ b/dom/media/webspeech/recognition/SpeechRecognitionAlternative.cpp
@@ -17,17 +17,18 @@ NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(Sp
 NS_IMPL_CYCLE_COLLECTING_ADDREF(SpeechRecognitionAlternative)
 NS_IMPL_CYCLE_COLLECTING_RELEASE(SpeechRecognitionAlternative)
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(SpeechRecognitionAlternative)
   NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
   NS_INTERFACE_MAP_ENTRY(nsISupports)
 NS_INTERFACE_MAP_END
 
 SpeechRecognitionAlternative::SpeechRecognitionAlternative(SpeechRecognition* aParent)
-  : mConfidence(0)
+  : mTranscript(NS_LITERAL_STRING(""))
+  , mConfidence(0)
   , mParent(aParent)
 {
 }
 
 SpeechRecognitionAlternative::~SpeechRecognitionAlternative()
 {
 }
 
--- a/dom/media/webspeech/recognition/SpeechRecognitionResult.cpp
+++ b/dom/media/webspeech/recognition/SpeechRecognitionResult.cpp
@@ -62,14 +62,14 @@ SpeechRecognitionResult::Length() const
 already_AddRefed<SpeechRecognitionAlternative>
 SpeechRecognitionResult::Item(uint32_t aIndex)
 {
   nsRefPtr<SpeechRecognitionAlternative> alternative = mItems.ElementAt(aIndex);
   return alternative.forget();
 }
 
 bool
-SpeechRecognitionResult::IsFinal() const
+SpeechRecognitionResult::Final() const
 {
   return true; // TODO
 }
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webspeech/recognition/SpeechRecognitionResult.h
+++ b/dom/media/webspeech/recognition/SpeechRecognitionResult.h
@@ -33,21 +33,21 @@ public:
   nsISupports* GetParentObject() const;
 
   virtual JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
 
   uint32_t Length() const;
 
   already_AddRefed<SpeechRecognitionAlternative> Item(uint32_t aIndex);
 
-  bool IsFinal() const;
+  bool Final() const;
 
   already_AddRefed<SpeechRecognitionAlternative> IndexedGetter(uint32_t aIndex, bool& aPresent);
 
-  nsTArray<nsRefPtr<SpeechRecognitionAlternative>> mItems;
+  nsTArray<nsRefPtr<SpeechRecognitionAlternative> > mItems;
 
 private:
   ~SpeechRecognitionResult();
 
   nsRefPtr<SpeechRecognition> mParent;
 };
 
 } // namespace dom
--- a/dom/media/webspeech/recognition/SpeechRecognitionResultList.h
+++ b/dom/media/webspeech/recognition/SpeechRecognitionResultList.h
@@ -36,17 +36,17 @@ public:
   virtual JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
 
   uint32_t Length() const;
 
   already_AddRefed<SpeechRecognitionResult> Item(uint32_t aIndex);
 
   already_AddRefed<SpeechRecognitionResult> IndexedGetter(uint32_t aIndex, bool& aPresent);
 
-  nsTArray<nsRefPtr<SpeechRecognitionResult>> mItems;
+  nsTArray<nsRefPtr<SpeechRecognitionResult> > mItems;
 private:
   ~SpeechRecognitionResultList();
 
   nsRefPtr<SpeechRecognition> mParent;
 };
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webspeech/recognition/moz.build
+++ b/dom/media/webspeech/recognition/moz.build
@@ -21,21 +21,16 @@ EXPORTS.mozilla.dom += [
     'SpeechStreamListener.h',
 ]
 
 if CONFIG['MOZ_WEBSPEECH_TEST_BACKEND']:
     EXPORTS.mozilla.dom += [
         'test/FakeSpeechRecognitionService.h',
     ]
 
-if CONFIG['MOZ_WEBSPEECH_POCKETSPHINX']:
-    EXPORTS.mozilla.dom += [
-        'PocketSphinxSpeechRecognitionService.h',
-    ]
-
 UNIFIED_SOURCES += [
     'endpointer.cc',
     'energy_endpointer.cc',
     'energy_endpointer_params.cc',
     'SpeechGrammar.cpp',
     'SpeechGrammarList.cpp',
     'SpeechRecognition.cpp',
     'SpeechRecognitionAlternative.cpp',
@@ -44,28 +39,17 @@ UNIFIED_SOURCES += [
     'SpeechStreamListener.cpp',
 ]
 
 if CONFIG['MOZ_WEBSPEECH_TEST_BACKEND']:
     UNIFIED_SOURCES += [
         'test/FakeSpeechRecognitionService.cpp',
     ]
 
-if CONFIG['MOZ_WEBSPEECH_POCKETSPHINX']:
-    UNIFIED_SOURCES += [
-        'PocketSphinxSpeechRecognitionService.cpp',
-    ]
-
 LOCAL_INCLUDES += [
     '/dom/base',
-    '/media/sphinxbase',
 ]
 
-if CONFIG['MOZ_WEBSPEECH_POCKETSPHINX']:
-    LOCAL_INCLUDES += [
-        '/media/pocketsphinx',
-    ]
-
 include('/ipc/chromium/chromium-config.mozbuild')
 
 FINAL_LIBRARY = 'xul'
 
 FAIL_ON_WARNINGS = True
--- a/dom/media/webspeech/recognition/nsISpeechRecognitionService.idl
+++ b/dom/media/webspeech/recognition/nsISpeechRecognitionService.idl
@@ -16,28 +16,28 @@ class SpeechRecognitionResultList;
 class SpeechGrammarList;
 class SpeechGrammar;
 }
 }
 %}
 
 native SpeechRecognitionWeakPtr(mozilla::WeakPtr<mozilla::dom::SpeechRecognition>);
 [ptr] native AudioSegmentPtr(mozilla::AudioSegment);
+[ptr] native SpeechGrammarListPtr(mozilla::dom::SpeechGrammarList);
 [ptr] native SpeechGrammarPtr(mozilla::dom::SpeechGrammar);
-[ptr] native SpeechGrammarListPtr(mozilla::dom::SpeechGrammarList);
 
 [uuid(374583f0-4507-11e4-a183-164230d1df67)]
 interface nsISpeechGrammarCompilationCallback : nsISupports {
     void grammarCompilationEnd(in SpeechGrammarPtr grammarObject, in boolean success);
 };
 
 [uuid(857f3fa2-a980-4d3e-a959-a2f53af74232)]
 interface nsISpeechRecognitionService : nsISupports {
     void initialize(in SpeechRecognitionWeakPtr aSpeechRecognition);
     void processAudioSegment(in AudioSegmentPtr aAudioSegment, in long aSampleRate);
-    void validateAndSetGrammarList(in SpeechGrammarPtr aSpeechGrammar, in nsISpeechGrammarCompilationCallback aCallback);
+    void validateAndSetGrammarList(in SpeechGrammarListPtr aSpeechGramarList, in nsISpeechGrammarCompilationCallback aCallback);
     void soundEnd();
     void abort();
 };
 
 %{C++
 #define NS_SPEECH_RECOGNITION_SERVICE_CONTRACTID_PREFIX "@mozilla.org/webspeech/service;1?name="
 %}
--- a/dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.cpp
+++ b/dom/media/webspeech/recognition/test/FakeSpeechRecognitionService.cpp
@@ -47,17 +47,17 @@ FakeSpeechRecognitionService::ProcessAud
 
 NS_IMETHODIMP
 FakeSpeechRecognitionService::SoundEnd()
 {
   return NS_OK;
 }
 
 NS_IMETHODIMP
-FakeSpeechRecognitionService::ValidateAndSetGrammarList(mozilla::dom::SpeechGrammar*, nsISpeechGrammarCompilationCallback*)
+FakeSpeechRecognitionService::ValidateAndSetGrammarList(mozilla::dom::SpeechGrammarList*, nsISpeechGrammarCompilationCallback*)
 {
   return NS_OK;
 }
 
 NS_IMETHODIMP
 FakeSpeechRecognitionService::Abort()
 {
   return NS_OK;
--- a/dom/media/webspeech/recognition/test/head.js
+++ b/dom/media/webspeech/recognition/test/head.js
@@ -33,17 +33,17 @@ function EventManager(sr) {
     "nomatch",
     "error",
     "start",
     "end"
   ];
 
   var eventDependencies = {
     "speechend": "speechstart",
-    "soundend": "soundstart",
+    "soundent": "soundstart",
     "audioend": "audiostart"
   };
 
   var isDone = false;
 
   // AUDIO_DATA events are asynchronous,
   // so we queue events requested while they are being
   // issued to make them seem synchronous
--- a/dom/tests/mochitest/general/test_interfaces.html
+++ b/dom/tests/mochitest/general/test_interfaces.html
@@ -934,32 +934,16 @@ var interfaceNamesInGlobalScope =
     "SimpleGestureEvent",
 // IMPORTANT: Do not change this list without review from a DOM peer!
     {name: "SimpleTest", xbl: false},
 // IMPORTANT: Do not change this list without review from a DOM peer!
     {name: "SourceBuffer", linux: false, release: false},
 // IMPORTANT: Do not change this list without review from a DOM peer!
     {name: "SourceBufferList", linux: false, release: false},
 // IMPORTANT: Do not change this list without review from a DOM peer!
-    {name: "SpeechRecognition", b2g: true, nightly: true},
-// IMPORTANT: Do not change this list without review from a DOM peer!
-    {name: "SpeechRecognitionError", b2g: true, nightly: true},
-// IMPORTANT: Do not change this list without review from a DOM peer!
-    {name: "SpeechRecognitionAlternative", b2g: true, nightly: true},
-// IMPORTANT: Do not change this list without review from a DOM peer!
-    {name: "SpeechRecognitionResult", b2g: true, nightly: true},
-// IMPORTANT: Do not change this list without review from a DOM peer!
-    {name: "SpeechRecognitionResultList", b2g: true, nightly: true},
-// IMPORTANT: Do not change this list without review from a DOM peer!
-    {name: "SpeechRecognitionEvent", b2g: true, nightly: true},
-// IMPORTANT: Do not change this list without review from a DOM peer!
-    {name: "SpeechGrammar", b2g: true, nightly: true},
-// IMPORTANT: Do not change this list without review from a DOM peer!
-    {name: "SpeechGrammarList", b2g: true, nightly: true},
-// IMPORTANT: Do not change this list without review from a DOM peer!
     {name: "SpeechSynthesisEvent", b2g: true},
 // IMPORTANT: Do not change this list without review from a DOM peer!
     {name: "SpeechSynthesis", b2g: true},
 // IMPORTANT: Do not change this list without review from a DOM peer!
     {name: "SpeechSynthesisUtterance", b2g: true},
 // IMPORTANT: Do not change this list without review from a DOM peer!
     {name: "SpeechSynthesisVoice", b2g: true},
 // IMPORTANT: Do not change this list without review from a DOM peer!
--- a/dom/webidl/SpeechRecognitionEvent.webidl
+++ b/dom/webidl/SpeechRecognitionEvent.webidl
@@ -5,20 +5,20 @@
  */
 interface nsISupports;
 
 [Pref="media.webspeech.recognition.enable",
  Constructor(DOMString type, optional SpeechRecognitionEventInit eventInitDict)]
 interface SpeechRecognitionEvent : Event
 {
   readonly attribute unsigned long resultIndex;
-  readonly attribute SpeechRecognitionResultList? results;
-  readonly attribute any interpretation;
+  readonly attribute nsISupports? results;
+  readonly attribute DOMString? interpretation;
   readonly attribute Document? emma;
 };
 
 dictionary SpeechRecognitionEventInit : EventInit
 {
   unsigned long resultIndex = 0;
-  SpeechRecognitionResultList? results = null;
-  any interpretation = null;
+  nsISupports? results = null;
+  DOMString interpretation = "";
   Document? emma = null;
 };
--- a/dom/webidl/SpeechRecognitionResult.webidl
+++ b/dom/webidl/SpeechRecognitionResult.webidl
@@ -9,10 +9,10 @@
  * Copyright © 2012 W3C® (MIT, ERCIM, Keio), All Rights Reserved. W3C
  * liability, trademark and document use rules apply.
  */
 
 [Pref="media.webspeech.recognition.enable"]
 interface SpeechRecognitionResult {
     readonly attribute unsigned long length;
     getter SpeechRecognitionAlternative item(unsigned long index);
-    readonly attribute boolean isFinal;
+    readonly attribute boolean final;
 };
--- a/layout/build/moz.build
+++ b/layout/build/moz.build
@@ -114,17 +114,16 @@ if CONFIG['MOZ_B2G_BT']:
         ]
     else:
         LOCAL_INCLUDES += [
             '/dom/bluetooth/bluetooth1',
         ]
 
 if CONFIG['MOZ_WEBSPEECH']:
     LOCAL_INCLUDES += [
-        '/dom/media/webspeech/recognition',
         '/dom/media/webspeech/synth',
     ]
 
 if CONFIG['MOZ_WEBSPEECH_POCKETSPHINX']:
     LOCAL_INCLUDES += [
         '/media/pocketsphinx',
         '/media/sphinxbase',
     ]
--- a/layout/build/nsLayoutModule.cpp
+++ b/layout/build/nsLayoutModule.cpp
@@ -94,19 +94,16 @@
 #include "mozilla/dom/workers/ServiceWorkerPeriodicUpdater.h"
 #include "mozilla/dom/workers/WorkerDebuggerManager.h"
 #include "mozilla/OSFileConstants.h"
 #include "mozilla/Services.h"
 
 #ifdef MOZ_WEBSPEECH_TEST_BACKEND
 #include "mozilla/dom/FakeSpeechRecognitionService.h"
 #endif
-#ifdef MOZ_WEBSPEECH_POCKETSPHINX
-#include "mozilla/dom/PocketSphinxSpeechRecognitionService.h"
-#endif
 #ifdef MOZ_WEBSPEECH
 #include "mozilla/dom/nsSynthVoiceRegistry.h"
 #endif
 
 #ifdef MOZ_WIDGET_GONK
 #include "SystemWorkerManager.h"
 using mozilla::dom::gonk::SystemWorkerManager;
 #define SYSTEMWORKERMANAGER_CID \
@@ -633,19 +630,16 @@ NS_GENERIC_FACTORY_SINGLETON_CONSTRUCTOR
 
 NS_GENERIC_FACTORY_SINGLETON_CONSTRUCTOR(AudioChannelService, AudioChannelService::GetOrCreateAudioChannelService)
 
 NS_GENERIC_FACTORY_SINGLETON_CONSTRUCTOR(DataStoreService, DataStoreService::GetOrCreate)
 
 #ifdef MOZ_WEBSPEECH_TEST_BACKEND
 NS_GENERIC_FACTORY_CONSTRUCTOR(FakeSpeechRecognitionService)
 #endif
-#ifdef MOZ_WEBSPEECH_POCKETSPHINX
-NS_GENERIC_FACTORY_CONSTRUCTOR(PocketSphinxSpeechRecognitionService)
-#endif
 
 NS_GENERIC_FACTORY_CONSTRUCTOR(nsCSPContext)
 NS_GENERIC_FACTORY_CONSTRUCTOR(CSPService)
 NS_GENERIC_FACTORY_CONSTRUCTOR(nsMixedContentBlocker)
 
 NS_GENERIC_FACTORY_CONSTRUCTOR(nsPrincipal)
 NS_GENERIC_FACTORY_SINGLETON_CONSTRUCTOR(nsSystemPrincipal,
     nsScriptSecurityManager::SystemPrincipalSingletonConstructor)
@@ -832,19 +826,16 @@ NS_DEFINE_NAMED_CID(NS_TIMESERVICE_CID);
 NS_DEFINE_NAMED_CID(NS_MEDIASTREAMCONTROLLERSERVICE_CID);
 NS_DEFINE_NAMED_CID(NS_MEDIAMANAGERSERVICE_CID);
 #ifdef MOZ_GAMEPAD
 NS_DEFINE_NAMED_CID(NS_GAMEPAD_TEST_CID);
 #endif
 #ifdef MOZ_WEBSPEECH_TEST_BACKEND
 NS_DEFINE_NAMED_CID(NS_FAKE_SPEECH_RECOGNITION_SERVICE_CID);
 #endif
-#ifdef MOZ_WEBSPEECH_POCKETSPHINX
-NS_DEFINE_NAMED_CID(NS_POCKETSPHINX_SPEECH_RECOGNITION_SERVICE_CID);
-#endif
 #ifdef MOZ_WEBSPEECH
 NS_DEFINE_NAMED_CID(NS_SYNTHVOICEREGISTRY_CID);
 #endif
 
 #ifdef ACCESSIBILITY
 NS_DEFINE_NAMED_CID(NS_ACCESSIBILITY_SERVICE_CID);
 #endif
 NS_DEFINE_NAMED_CID(FAKE_TV_SERVICE_CID);
@@ -1092,19 +1083,16 @@ static const mozilla::Module::CIDEntry k
   { &kNS_GEOLOCATION_SERVICE_CID, false, nullptr, nsGeolocationServiceConstructor },
   { &kNS_GEOLOCATION_CID, false, nullptr, GeolocationConstructor },
   { &kNS_AUDIOCHANNEL_SERVICE_CID, false, nullptr, AudioChannelServiceConstructor },
   { &kNS_DATASTORE_SERVICE_CID, false, nullptr, DataStoreServiceConstructor },
   { &kNS_FOCUSMANAGER_CID, false, nullptr, CreateFocusManager },
 #ifdef MOZ_WEBSPEECH_TEST_BACKEND
   { &kNS_FAKE_SPEECH_RECOGNITION_SERVICE_CID, false, nullptr, FakeSpeechRecognitionServiceConstructor },
 #endif
-#ifdef MOZ_WEBSPEECH_POCKETSPHINX
-  { &kNS_POCKETSPHINX_SPEECH_RECOGNITION_SERVICE_CID, false, nullptr, PocketSphinxSpeechRecognitionServiceConstructor },
-#endif
 #ifdef MOZ_WEBSPEECH
   { &kNS_SYNTHVOICEREGISTRY_CID, true, nullptr, nsSynthVoiceRegistryConstructor },
 #endif
   { &kCSPSERVICE_CID, false, nullptr, CSPServiceConstructor },
   { &kNS_CSPCONTEXT_CID, false, nullptr, nsCSPContextConstructor },
   { &kNS_MIXEDCONTENTBLOCKER_CID, false, nullptr, nsMixedContentBlockerConstructor },
   { &kNS_EVENTLISTENERSERVICE_CID, false, nullptr, CreateEventListenerService },
   { &kNS_GLOBALMESSAGEMANAGER_CID, false, nullptr, CreateGlobalMessageManager },
@@ -1259,19 +1247,16 @@ static const mozilla::Module::ContractID
   { "@mozilla.org/geolocation/service;1", &kNS_GEOLOCATION_SERVICE_CID },
   { "@mozilla.org/geolocation;1", &kNS_GEOLOCATION_CID },
   { "@mozilla.org/audiochannel/service;1", &kNS_AUDIOCHANNEL_SERVICE_CID },
   { "@mozilla.org/datastore-service;1", &kNS_DATASTORE_SERVICE_CID },
   { "@mozilla.org/focus-manager;1", &kNS_FOCUSMANAGER_CID },
 #ifdef MOZ_WEBSPEECH_TEST_BACKEND
   { NS_SPEECH_RECOGNITION_SERVICE_CONTRACTID_PREFIX "fake", &kNS_FAKE_SPEECH_RECOGNITION_SERVICE_CID },
 #endif
-#ifdef MOZ_WEBSPEECH_POCKETSPHINX
-  { NS_SPEECH_RECOGNITION_SERVICE_CONTRACTID_PREFIX "pocketsphinx", &kNS_POCKETSPHINX_SPEECH_RECOGNITION_SERVICE_CID },
-#endif
 #ifdef MOZ_WEBSPEECH
   { NS_SYNTHVOICEREGISTRY_CONTRACTID, &kNS_SYNTHVOICEREGISTRY_CID },
 #endif
   { CSPSERVICE_CONTRACTID, &kCSPSERVICE_CID },
   { NS_CSPCONTEXT_CONTRACTID, &kNS_CSPCONTEXT_CID },
   { NS_MIXEDCONTENTBLOCKER_CONTRACTID, &kNS_MIXEDCONTENTBLOCKER_CID },
   { NS_EVENTLISTENERSERVICE_CONTRACTID, &kNS_EVENTLISTENERSERVICE_CID },
   { NS_GLOBALMESSAGEMANAGER_CONTRACTID, &kNS_GLOBALMESSAGEMANAGER_CID },