Bug 1185018 - Part 1 of 1 - Made speech recognition services language dependent and removed assumption of a single service. r=smaug
authorKelly Davis <kdavis@mozilla.com>
Sun, 02 Aug 2015 13:43:00 +0200
changeset 287516 ee7df762c1950867b6ff57be5dac80266c33882f
parent 287515 5490127f9b99dc432aab33cc01b05362874c30cf
child 287517 dfe577ec4e569e36f4f79d9c9c731ab9d717c093
push id5067
push userraliiev@mozilla.com
push dateMon, 21 Sep 2015 14:04:52 +0000
treeherdermozilla-beta@14221ffe5b2f [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssmaug
bugs1185018
milestone42.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1185018 - Part 1 of 1 - Made speech recognition services language dependent and removed assumption of a single service. r=smaug
b2g/app/b2g.js
dom/media/webspeech/recognition/SpeechGrammarList.cpp
dom/media/webspeech/recognition/SpeechGrammarList.h
dom/media/webspeech/recognition/SpeechRecognition.cpp
dom/media/webspeech/recognition/SpeechRecognition.h
dom/media/webspeech/recognition/test/head.js
dom/media/webspeech/recognition/test/test_audio_capture_error.html
dom/webidl/SpeechRecognition.webidl
layout/build/nsLayoutModule.cpp
--- a/b2g/app/b2g.js
+++ b/b2g/app/b2g.js
@@ -1011,17 +1011,16 @@ pref("network.proxy.pac_generator", true
 // comma.  Specify '*' in the list to apply to all apps.
 pref("network.proxy.browsing.app_origins", "app://system.gaiamobile.org");
 
 // Enable Web Speech synthesis API
 pref("media.webspeech.synth.enabled", true);
 
 // Enable Web Speech recognition API
 pref("media.webspeech.recognition.enable", true);
-pref("media.webspeech.service.default", "pocketsphinx");
 
 // Downloads API
 pref("dom.mozDownloads.enabled", true);
 pref("dom.downloads.max_retention_days", 7);
 
 // External Helper Application Handling
 //
 // All external helper application handling can require the docshell to be
--- a/dom/media/webspeech/recognition/SpeechGrammarList.cpp
+++ b/dom/media/webspeech/recognition/SpeechGrammarList.cpp
@@ -18,40 +18,32 @@ namespace dom {
 NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(SpeechGrammarList, mParent, mItems)
 NS_IMPL_CYCLE_COLLECTING_ADDREF(SpeechGrammarList)
 NS_IMPL_CYCLE_COLLECTING_RELEASE(SpeechGrammarList)
 NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION(SpeechGrammarList)
   NS_WRAPPERCACHE_INTERFACE_MAP_ENTRY
   NS_INTERFACE_MAP_ENTRY(nsISupports)
 NS_INTERFACE_MAP_END
 
-SpeechGrammarList::SpeechGrammarList(nsISupports* aParent, nsISpeechRecognitionService* aRecognitionService)
+SpeechGrammarList::SpeechGrammarList(nsISupports* aParent)
   : mParent(aParent)
 {
-  this->mRecognitionService = aRecognitionService;
 }
 
 SpeechGrammarList::~SpeechGrammarList()
 {
 }
 
 already_AddRefed<SpeechGrammarList>
 SpeechGrammarList::Constructor(const GlobalObject& aGlobal,
                                ErrorResult& aRv)
 {
-  nsCOMPtr<nsISpeechRecognitionService> recognitionService;
-  recognitionService = GetSpeechRecognitionService();
-  if (!recognitionService) {
-    aRv.Throw(NS_ERROR_NOT_AVAILABLE);
-    return nullptr;
-  } else {
-    nsRefPtr<SpeechGrammarList> speechGrammarList =
-      new SpeechGrammarList(aGlobal.GetAsSupports(), recognitionService);
-    return speechGrammarList.forget();
-  }
+  nsRefPtr<SpeechGrammarList> speechGrammarList =
+    new SpeechGrammarList(aGlobal.GetAsSupports());
+  return speechGrammarList.forget();
 }
 
 JSObject*
 SpeechGrammarList::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto)
 {
   return SpeechGrammarListBinding::Wrap(aCx, this, aGivenProto);
 }
 
@@ -86,17 +78,16 @@ SpeechGrammarList::AddFromURI(const nsAS
 void
 SpeechGrammarList::AddFromString(const nsAString& aString,
                                  const Optional<float>& aWeight,
                                  ErrorResult& aRv)
 {
   SpeechGrammar* speechGrammar = new SpeechGrammar(mParent);
   speechGrammar->SetSrc(aString, aRv);
   mItems.AppendElement(speechGrammar);
-  mRecognitionService->ValidateAndSetGrammarList(speechGrammar, nullptr);
   return;
 }
 
 already_AddRefed<SpeechGrammar>
 SpeechGrammarList::IndexedGetter(uint32_t aIndex, bool& aPresent,
                                  ErrorResult& aRv)
 {
   if (aIndex >= Length()) {
--- a/dom/media/webspeech/recognition/SpeechGrammarList.h
+++ b/dom/media/webspeech/recognition/SpeechGrammarList.h
@@ -6,17 +6,16 @@
 
 #ifndef mozilla_dom_SpeechGrammarList_h
 #define mozilla_dom_SpeechGrammarList_h
 
 #include "mozilla/Attributes.h"
 #include "nsCOMPtr.h"
 #include "nsCycleCollectionParticipant.h"
 #include "nsWrapperCache.h"
-#include "nsISpeechRecognitionService.h"
 
 struct JSContext;
 
 namespace mozilla {
 
 class ErrorResult;
 
 namespace dom {
@@ -24,17 +23,17 @@ namespace dom {
 class GlobalObject;
 class SpeechGrammar;
 template<typename> class Optional;
 
 class SpeechGrammarList final : public nsISupports,
                                 public nsWrapperCache
 {
 public:
-  explicit SpeechGrammarList(nsISupports* aParent, nsISpeechRecognitionService* aRecognitionService);
+  explicit SpeechGrammarList(nsISupports* aParent);
 
   NS_DECL_CYCLE_COLLECTING_ISUPPORTS
   NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_CLASS(SpeechGrammarList)
 
   static already_AddRefed<SpeechGrammarList> Constructor(const GlobalObject& aGlobal, ErrorResult& aRv);
 
   nsISupports* GetParentObject() const;
 
@@ -45,18 +44,16 @@ public:
   already_AddRefed<SpeechGrammar> Item(uint32_t aIndex, ErrorResult& aRv);
 
   void AddFromURI(const nsAString& aSrc, const Optional<float>& aWeight, ErrorResult& aRv);
 
   void AddFromString(const nsAString& aString, const Optional<float>& aWeight, ErrorResult& aRv);
 
   already_AddRefed<SpeechGrammar> IndexedGetter(uint32_t aIndex, bool& aPresent, ErrorResult& aRv);
 
-  nsCOMPtr<nsISpeechRecognitionService> mRecognitionService;
-
 private:
   ~SpeechGrammarList();
 
   nsCOMPtr<nsISupports> mParent;
 
   nsTArray<nsRefPtr<SpeechGrammar>> mItems;
 };
 
--- a/dom/media/webspeech/recognition/SpeechRecognition.cpp
+++ b/dom/media/webspeech/recognition/SpeechRecognition.cpp
@@ -5,42 +5,46 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "SpeechRecognition.h"
 
 #include "nsCOMPtr.h"
 #include "nsCycleCollectionParticipant.h"
 
 #include "mozilla/dom/BindingUtils.h"
+#include "mozilla/dom/Element.h"
 #include "mozilla/dom/SpeechRecognitionBinding.h"
 #include "mozilla/dom/MediaStreamTrackBinding.h"
 #include "mozilla/dom/MediaStreamError.h"
 #include "mozilla/MediaManager.h"
 #include "mozilla/Services.h"
 
 #include "AudioSegment.h"
 #include "endpointer.h"
 
 #include "mozilla/dom/SpeechRecognitionEvent.h"
+#include "nsIDocument.h"
 #include "nsIObserverService.h"
+#include "nsPIDOMWindow.h"
 #include "nsServiceManagerUtils.h"
 #include "nsQueryObject.h"
 
 #include <algorithm>
 
 // Undo the windows.h damage
 #if defined(XP_WIN) && defined(GetMessage)
 #undef GetMessage
 #endif
 
 namespace mozilla {
 namespace dom {
 
 #define PREFERENCE_DEFAULT_RECOGNITION_SERVICE "media.webspeech.service.default"
-#define DEFAULT_RECOGNITION_SERVICE "pocketsphinx"
+#define DEFAULT_RECOGNITION_SERVICE_PREFIX "pocketsphinx-"
+#define DEFAULT_RECOGNITION_SERVICE "pocketsphinx-en-US"
 
 #define PREFERENCE_ENDPOINTER_SILENCE_LENGTH "media.webspeech.silence_length"
 #define PREFERENCE_ENDPOINTER_LONG_SILENCE_LENGTH "media.webspeech.long_silence_length"
 #define PREFERENCE_ENDPOINTER_LONG_SPEECH_LENGTH "media.webspeech.long_speech_length"
 
 static const uint32_t kSAMPLE_RATE = 16000;
 static const uint32_t kSPEECH_DETECTION_TIMEOUT_MS = 10000;
 
@@ -57,59 +61,66 @@ GetSpeechRecognitionLog()
     sLog = PR_NewLogModule("SpeechRecognition");
   }
 
   return sLog;
 }
 #define SR_LOG(...) MOZ_LOG(GetSpeechRecognitionLog(), mozilla::LogLevel::Debug, (__VA_ARGS__))
 
 already_AddRefed<nsISpeechRecognitionService>
-GetSpeechRecognitionService()
+GetSpeechRecognitionService(const nsAString& aLang)
 {
   nsAutoCString speechRecognitionServiceCID;
 
   nsAdoptingCString prefValue =
   Preferences::GetCString(PREFERENCE_DEFAULT_RECOGNITION_SERVICE);
   nsAutoCString speechRecognitionService;
 
-  if (!prefValue.get() || prefValue.IsEmpty()) {
+  if (!aLang.IsEmpty()) {
+    speechRecognitionService =
+      NS_LITERAL_CSTRING(DEFAULT_RECOGNITION_SERVICE_PREFIX) +
+      NS_ConvertUTF16toUTF8(aLang);
+  } else if (!prefValue.IsEmpty()) {
+    speechRecognitionService = prefValue;
+  } else {
     speechRecognitionService = DEFAULT_RECOGNITION_SERVICE;
-  } else {
-    speechRecognitionService = prefValue;
   }
 
-  if (!SpeechRecognition::mTestConfig.mFakeRecognitionService){
+  if (SpeechRecognition::mTestConfig.mFakeRecognitionService) {
+    speechRecognitionServiceCID =
+      NS_SPEECH_RECOGNITION_SERVICE_CONTRACTID_PREFIX "fake";
+  } else {
     speechRecognitionServiceCID =
       NS_LITERAL_CSTRING(NS_SPEECH_RECOGNITION_SERVICE_CONTRACTID_PREFIX) +
       speechRecognitionService;
-  } else {
-    speechRecognitionServiceCID =
-      NS_SPEECH_RECOGNITION_SERVICE_CONTRACTID_PREFIX "fake";
   }
 
   nsresult rv;
   nsCOMPtr<nsISpeechRecognitionService> recognitionService;
   recognitionService = do_GetService(speechRecognitionServiceCID.get(), &rv);
   return recognitionService.forget();
 }
 
-NS_INTERFACE_MAP_BEGIN(SpeechRecognition)
+NS_IMPL_CYCLE_COLLECTION_INHERITED(SpeechRecognition, DOMEventTargetHelper, mDOMStream, mSpeechGrammarList)
+
+NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(SpeechRecognition)
   NS_INTERFACE_MAP_ENTRY(nsIObserver)
 NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper)
 
 NS_IMPL_ADDREF_INHERITED(SpeechRecognition, DOMEventTargetHelper)
 NS_IMPL_RELEASE_INHERITED(SpeechRecognition, DOMEventTargetHelper)
 
 struct SpeechRecognition::TestConfig SpeechRecognition::mTestConfig;
 
 SpeechRecognition::SpeechRecognition(nsPIDOMWindow* aOwnerWindow)
   : DOMEventTargetHelper(aOwnerWindow)
   , mEndpointer(kSAMPLE_RATE)
   , mAudioSamplesPerChunk(mEndpointer.FrameSize())
   , mSpeechDetectionTimer(do_CreateInstance(NS_TIMER_CONTRACTID))
+  , mSpeechGrammarList(new SpeechGrammarList(GetParentObject()))
 {
   SR_LOG("created SpeechRecognition");
 
   mTestConfig.Init();
   if (mTestConfig.mEnableTests) {
     nsCOMPtr<nsIObserverService> obs = services::GetObserverService();
     obs->AddObserver(this, SPEECH_RECOGNITION_TEST_EVENT_REQUEST_TOPIC, false);
     obs->AddObserver(this, SPEECH_RECOGNITION_TEST_END_TOPIC, false);
@@ -614,41 +625,38 @@ SpeechRecognition::ProcessTestEventReque
 
     // let the fake recognition service handle the request
   }
 
   return;
 }
 
 already_AddRefed<SpeechGrammarList>
-SpeechRecognition::GetGrammars(ErrorResult& aRv) const
+SpeechRecognition::Grammars() const
 {
-  aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
-  return nullptr;
+  nsRefPtr<SpeechGrammarList> speechGrammarList = mSpeechGrammarList;
+  return speechGrammarList.forget();
 }
 
 void
-SpeechRecognition::SetGrammars(SpeechGrammarList& aArg, ErrorResult& aRv)
+SpeechRecognition::SetGrammars(SpeechGrammarList& aArg)
 {
-  aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
-  return;
+  mSpeechGrammarList = &aArg;
 }
 
 void
-SpeechRecognition::GetLang(nsString& aRetVal, ErrorResult& aRv) const
+SpeechRecognition::GetLang(nsString& aRetVal) const
 {
-  aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
-  return;
+  aRetVal = mLang;
 }
 
 void
-SpeechRecognition::SetLang(const nsAString& aArg, ErrorResult& aRv)
+SpeechRecognition::SetLang(const nsAString& aArg)
 {
-  aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
-  return;
+  mLang = aArg;
 }
 
 bool
 SpeechRecognition::GetContinuous(ErrorResult& aRv) const
 {
   aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
   return false;
 }
@@ -705,18 +713,21 @@ SpeechRecognition::SetServiceURI(const n
 void
 SpeechRecognition::Start(const Optional<NonNull<DOMMediaStream>>& aStream, ErrorResult& aRv)
 {
   if (mCurrentState != STATE_IDLE) {
     aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
     return;
   }
 
-  mRecognitionService = GetSpeechRecognitionService();
-  if (NS_WARN_IF(!mRecognitionService)) {
+  if (!SetRecognitionService(aRv)) {
+    return;
+  }
+
+  if (!ValidateAndSetGrammarList(aRv)) {
     return;
   }
 
   nsresult rv;
   rv = mRecognitionService->Initialize(this);
   if (NS_WARN_IF(NS_FAILED(rv))) {
     return;
   }
@@ -734,16 +745,87 @@ SpeechRecognition::Start(const Optional<
                           new GetUserMediaSuccessCallback(this),
                           new GetUserMediaErrorCallback(this));
   }
 
   nsRefPtr<SpeechEvent> event = new SpeechEvent(this, EVENT_START);
   NS_DispatchToMainThread(event);
 }
 
+bool
+SpeechRecognition::SetRecognitionService(ErrorResult& aRv)
+{
+  // See: https://dvcs.w3.org/hg/speech-api/raw-file/tip/webspeechapi.html#dfn-lang
+  if (!mLang.IsEmpty()) {
+    mRecognitionService = GetSpeechRecognitionService(mLang);
+
+    if (!mRecognitionService) {
+      aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
+      return false;
+    }
+
+    return true;
+  }
+
+  nsCOMPtr<nsPIDOMWindow> window = GetOwner();
+  if(!window) {
+    aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
+    return false;
+  }
+  nsCOMPtr<nsIDocument> document = window->GetExtantDoc();
+  if(!document) {
+    aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
+    return false;
+  }
+  nsCOMPtr<Element> element = document->GetRootElement();
+  if(!element) {
+    aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
+    return false;
+  }
+
+  nsAutoString lang;
+  element->GetLang(lang);
+  mRecognitionService = GetSpeechRecognitionService(lang);
+
+  if (!mRecognitionService) {
+    aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
+    return false;
+  }
+
+  return true;
+}
+
+bool
+SpeechRecognition::ValidateAndSetGrammarList(ErrorResult& aRv)
+{
+  if (!mSpeechGrammarList) {
+    aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
+    return false;
+  }
+
+  uint32_t grammarListLength = mSpeechGrammarList->Length();
+  if (0 == grammarListLength) {
+    aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
+    return false;
+  }
+
+  for (uint32_t count = 0; count < grammarListLength; ++count) {
+    nsRefPtr<SpeechGrammar> speechGrammar = mSpeechGrammarList->Item(count, aRv);
+    if (aRv.Failed()) {
+      return false;
+    }
+    if (NS_FAILED(mRecognitionService->ValidateAndSetGrammarList(speechGrammar.get(), nullptr))) {
+      aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
+      return false;
+    }
+  }
+
+  return true;
+}
+
 void
 SpeechRecognition::Stop()
 {
   nsRefPtr<SpeechEvent> event = new SpeechEvent(this, EVENT_STOP);
   NS_DispatchToMainThread(event);
 }
 
 void
--- a/dom/media/webspeech/recognition/SpeechRecognition.h
+++ b/dom/media/webspeech/recognition/SpeechRecognition.h
@@ -44,46 +44,45 @@ namespace dom {
 #define SPEECH_RECOGNITION_TEST_END_TOPIC "SpeechRecognitionTest:End"
 
 class GlobalObject;
 class SpeechEvent;
 
 PRLogModuleInfo* GetSpeechRecognitionLog();
 #define SR_LOG(...) MOZ_LOG(GetSpeechRecognitionLog(), mozilla::LogLevel::Debug, (__VA_ARGS__))
 
-already_AddRefed<nsISpeechRecognitionService> GetSpeechRecognitionService();
-
 class SpeechRecognition final : public DOMEventTargetHelper,
                                 public nsIObserver,
                                 public SupportsWeakPtr<SpeechRecognition>
 {
 public:
   MOZ_DECLARE_WEAKREFERENCE_TYPENAME(SpeechRecognition)
   explicit SpeechRecognition(nsPIDOMWindow* aOwnerWindow);
 
   NS_DECL_ISUPPORTS_INHERITED
+  NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(SpeechRecognition, DOMEventTargetHelper)
 
   NS_DECL_NSIOBSERVER
 
   nsISupports* GetParentObject() const;
 
   virtual JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override;
 
   static bool IsAuthorized(JSContext* aCx, JSObject* aGlobal);
 
   static already_AddRefed<SpeechRecognition>
   Constructor(const GlobalObject& aGlobal, ErrorResult& aRv);
 
-  already_AddRefed<SpeechGrammarList> GetGrammars(ErrorResult& aRv) const;
+  already_AddRefed<SpeechGrammarList> Grammars() const;
 
-  void SetGrammars(mozilla::dom::SpeechGrammarList& aArg, ErrorResult& aRv);
+  void SetGrammars(mozilla::dom::SpeechGrammarList& aArg);
 
-  void GetLang(nsString& aRetVal, ErrorResult& aRv) const;
+  void GetLang(nsString& aRetVal) const;
 
-  void SetLang(const nsAString& aArg, ErrorResult& aRv);
+  void SetLang(const nsAString& aArg);
 
   bool GetContinuous(ErrorResult& aRv) const;
 
   void SetContinuous(bool aArg, ErrorResult& aRv);
 
   bool GetInterimResults(ErrorResult& aRv) const;
 
   void SetInterimResults(bool aArg, ErrorResult& aRv);
@@ -171,16 +170,19 @@ private:
     STATE_RECOGNIZING,
     STATE_WAITING_FOR_RESULT,
     STATE_COUNT
   };
 
   void SetState(FSMState state);
   bool StateBetween(FSMState begin, FSMState end);
 
+  bool SetRecognitionService(ErrorResult& aRv);
+  bool ValidateAndSetGrammarList(ErrorResult& aRv);
+
   class GetUserMediaSuccessCallback : public nsIDOMGetUserMediaSuccessCallback
   {
   public:
     NS_DECL_ISUPPORTS
     NS_DECL_NSIDOMGETUSERMEDIASUCCESSCALLBACK
 
     explicit GetUserMediaSuccessCallback(SpeechRecognition* aRecognition)
       : mRecognition(aRecognition)
@@ -244,16 +246,20 @@ private:
   // buffer holds one chunk of mAudioSamplesPerChunk
   // samples before feeding it to mEndpointer
   nsRefPtr<SharedBuffer> mAudioSamplesBuffer;
   uint32_t mBufferedSamples;
 
   nsCOMPtr<nsITimer> mSpeechDetectionTimer;
   bool mAborted;
 
+  nsString mLang;
+
+  nsRefPtr<SpeechGrammarList> mSpeechGrammarList;
+
   void ProcessTestEventRequest(nsISupports* aSubject, const nsAString& aEventName);
 
   const char* GetName(FSMState aId);
   const char* GetName(SpeechEvent* aId);
 };
 
 class SpeechEvent : public nsRunnable
 {
--- a/dom/media/webspeech/recognition/test/head.js
+++ b/dom/media/webspeech/recognition/test/head.js
@@ -39,16 +39,21 @@ function EventManager(sr) {
   var eventDependencies = {
     "speechend": "speechstart",
     "soundend": "soundstart",
     "audioend": "audiostart"
   };
 
   var isDone = false;
 
+  // set up grammar
+  var sgl = new SpeechGrammarList();
+  sgl.addFromString("#JSGF V1.0; grammar test; public <simple> = hello ;", 1);
+  sr.grammars = sgl;
+
   // AUDIO_DATA events are asynchronous,
   // so we queue events requested while they are being
   // issued to make them seem synchronous
   var isSendingAudioData = false;
   var queuedEventRequests = [];
 
   // register default handlers
   for (var i = 0; i < allEvents.length; i++) {
--- a/dom/media/webspeech/recognition/test/test_audio_capture_error.html
+++ b/dom/media/webspeech/recognition/test/test_audio_capture_error.html
@@ -18,18 +18,23 @@ https://bugzilla.mozilla.org/show_bug.cg
 </div>
 <pre id="test">
 <script type="text/javascript">
   SimpleTest.waitForExplicitFinish();
 
   performTest({
     eventsToRequest: ['EVENT_AUDIO_ERROR'],
     expectedEvents: {
+      'start': null,
+      'audiostart': null,
+      'speechstart': null,
+      'speechend': null,
+      'audioend': null,
       'error': buildErrorCallback(errorCodes.AUDIO_CAPTURE),
       'end': null
     },
     doneFunc: SimpleTest.finish,
-    prefs: [["media.webspeech.test.fake_fsm_events", true]]
+    prefs: [["media.webspeech.test.fake_fsm_events", true], ["media.webspeech.test.fake_recognition_service", true]]
   });
 </script>
 </pre>
 </body>
 </html>
--- a/dom/webidl/SpeechRecognition.webidl
+++ b/dom/webidl/SpeechRecognition.webidl
@@ -10,19 +10,17 @@
  * liability, trademark and document use rules apply.
  */
 
 [Constructor,
  Pref="media.webspeech.recognition.enable",
  Func="SpeechRecognition::IsAuthorized"]
 interface SpeechRecognition : EventTarget {
     // recognition parameters
-    [Throws]
     attribute SpeechGrammarList grammars;
-    [Throws]
     attribute DOMString lang;
     [Throws]
     attribute boolean continuous;
     [Throws]
     attribute boolean interimResults;
     [Throws]
     attribute unsigned long maxAlternatives;
     [Throws]
--- a/layout/build/nsLayoutModule.cpp
+++ b/layout/build/nsLayoutModule.cpp
@@ -1260,17 +1260,17 @@ static const mozilla::Module::ContractID
   { "@mozilla.org/geolocation;1", &kNS_GEOLOCATION_CID },
   { "@mozilla.org/audiochannel/service;1", &kNS_AUDIOCHANNEL_SERVICE_CID },
   { "@mozilla.org/datastore-service;1", &kNS_DATASTORE_SERVICE_CID },
   { "@mozilla.org/focus-manager;1", &kNS_FOCUSMANAGER_CID },
 #ifdef MOZ_WEBSPEECH_TEST_BACKEND
   { NS_SPEECH_RECOGNITION_SERVICE_CONTRACTID_PREFIX "fake", &kNS_FAKE_SPEECH_RECOGNITION_SERVICE_CID },
 #endif
 #ifdef MOZ_WEBSPEECH_POCKETSPHINX
-  { NS_SPEECH_RECOGNITION_SERVICE_CONTRACTID_PREFIX "pocketsphinx", &kNS_POCKETSPHINX_SPEECH_RECOGNITION_SERVICE_CID },
+  { NS_SPEECH_RECOGNITION_SERVICE_CONTRACTID_PREFIX "pocketsphinx-en-US", &kNS_POCKETSPHINX_SPEECH_RECOGNITION_SERVICE_CID },
 #endif
 #ifdef MOZ_WEBSPEECH
   { NS_SYNTHVOICEREGISTRY_CONTRACTID, &kNS_SYNTHVOICEREGISTRY_CID },
 #endif
   { CSPSERVICE_CONTRACTID, &kCSPSERVICE_CID },
   { NS_CSPCONTEXT_CONTRACTID, &kNS_CSPCONTEXT_CID },
   { NS_MIXEDCONTENTBLOCKER_CONTRACTID, &kNS_MIXEDCONTENTBLOCKER_CID },
   { NS_EVENTLISTENERSERVICE_CONTRACTID, &kNS_EVENTLISTENERSERVICE_CID },