Bug 1155917 - Added volume argument to nsISpeechService.speak(). r=smaug
authorEitan Isaacson <eitan@monotonous.org>
Thu, 23 Apr 2015 13:06:43 -0700
changeset 240858 365450b3d3eff4ea2fc758f3618ad872009a7c7e
parent 240857 95fd54df106b927e09148720caacf6d979b5489b
child 240859 89f436c87a2fc2c7227d6bfa1587a7d402c91447
push id28647
push usercbook@mozilla.com
push dateFri, 24 Apr 2015 12:37:38 +0000
treeherdermozilla-central@86d3308ec888 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerssmaug
bugs1155917
milestone40.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1155917 - Added volume argument to nsISpeechService.speak(). r=smaug
dom/media/webspeech/synth/ipc/SpeechSynthesisParent.cpp
dom/media/webspeech/synth/nsISpeechService.idl
dom/media/webspeech/synth/nsSynthVoiceRegistry.cpp
dom/media/webspeech/synth/nsSynthVoiceRegistry.h
dom/media/webspeech/synth/pico/nsPicoService.cpp
dom/media/webspeech/synth/test/common.js
--- a/dom/media/webspeech/synth/ipc/SpeechSynthesisParent.cpp
+++ b/dom/media/webspeech/synth/ipc/SpeechSynthesisParent.cpp
@@ -59,17 +59,17 @@ SpeechSynthesisParent::RecvPSpeechSynthe
                                                               const nsString& aUri,
                                                               const float& aVolume,
                                                               const float& aRate,
                                                               const float& aPitch)
 {
   MOZ_ASSERT(aActor);
   SpeechSynthesisRequestParent* actor =
     static_cast<SpeechSynthesisRequestParent*>(aActor);
-  nsSynthVoiceRegistry::GetInstance()->Speak(aText, aLang, aUri, aRate,
+  nsSynthVoiceRegistry::GetInstance()->Speak(aText, aLang, aUri, aVolume, aRate,
                                              aPitch, actor->mTask);
   return true;
 }
 
 // SpeechSynthesisRequestParent
 
 SpeechSynthesisRequestParent::SpeechSynthesisRequestParent(SpeechTaskParent* aTask)
   : mTask(aTask)
--- a/dom/media/webspeech/synth/nsISpeechService.idl
+++ b/dom/media/webspeech/synth/nsISpeechService.idl
@@ -129,32 +129,33 @@ interface nsISpeechTask : nsISupports
  *    The service calls the nsISpeechTask.dispatch* methods directly. Starting
  *    with dispatchStart() and ending with dispatchEnd or dispatchError().
  *
  *  2. Direct audio - the service provides us with PCM-16 data, and we output it.
  *    The service does not call the dispatch task methods directly. Instead,
  *    audio information is provided at setup(), and audio data is sent with
  *    sendAudio(). The utterance is terminated with an empty sendAudio().
  */
-[scriptable, uuid(3952d388-050c-47ba-a70f-5fc1cadf1db0)]
+[scriptable, uuid(9b7d59db-88ff-43d0-b6ee-9f63d042d08f)]
 interface nsISpeechService : nsISupports
 {
   /**
    * Speak the given text using the voice identified byu the given uri. See
    * W3C Speech API spec for information about pitch and rate.
    * https://dvcs.w3.org/hg/speech-api/raw-file/tip/speechapi.html#utterance-attributes
    *
-   * @param aText  text to utter.
-   * @param aUri   unique voice identifier.
-   * @param aRate  rate to speak voice in.
-   * @param aPitch pitch to speak voice in.
+   * @param aText   text to utter.
+   * @param aUri    unique voice identifier.
+   * @param aVolume volume to speak voice in. Only relevant for indirect audio.
+   * @param aRate   rate to speak voice in.
+   * @param aPitch  pitch to speak voice in.
    * @param aTask  task instance for utterance, used for sending events or audio
    *                 data back to browser.
    */
   void speak(in DOMString aText, in DOMString aUri,
-             in float aRate, in float aPitch,
+             in float aVolume, in float aRate, in float aPitch,
              in nsISpeechTask aTask);
 
   const SpeechServiceType SERVICETYPE_DIRECT_AUDIO = 1;
   const SpeechServiceType SERVICETYPE_INDIRECT_AUDIO = 2;
 
   readonly attribute SpeechServiceType serviceType;
 };
--- a/dom/media/webspeech/synth/nsSynthVoiceRegistry.cpp
+++ b/dom/media/webspeech/synth/nsSynthVoiceRegistry.cpp
@@ -534,26 +534,27 @@ nsSynthVoiceRegistry::SpeakUtterance(Spe
                                                               lang,
                                                               uri,
                                                               aUtterance.Volume(),
                                                               aUtterance.Rate(),
                                                               aUtterance.Pitch());
   } else {
     task = new nsSpeechTask(&aUtterance);
     Speak(aUtterance.mText, lang, uri,
-          aUtterance.Rate(), aUtterance.Pitch(), task);
+          aUtterance.Volume(), aUtterance.Rate(), aUtterance.Pitch(), task);
   }
 
   return task.forget();
 }
 
 void
 nsSynthVoiceRegistry::Speak(const nsAString& aText,
                             const nsAString& aLang,
                             const nsAString& aUri,
+                            const float& aVolume,
                             const float& aRate,
                             const float& aPitch,
                             nsSpeechTask* aTask)
 {
   LOG(PR_LOG_DEBUG,
       ("nsSynthVoiceRegistry::Speak text='%s' lang='%s' uri='%s' rate=%f pitch=%f",
        NS_ConvertUTF16toUTF8(aText).get(), NS_ConvertUTF16toUTF8(aLang).get(),
        NS_ConvertUTF16toUTF8(aUri).get(), aRate, aPitch));
@@ -578,13 +579,13 @@ nsSynthVoiceRegistry::Speak(const nsAStr
     aTask->SetIndirectAudio(true);
   } else {
     if (!mStream) {
       mStream = MediaStreamGraph::GetInstance()->CreateTrackUnionStream(nullptr);
     }
     aTask->BindStream(mStream);
   }
 
-  voice->mService->Speak(aText, voice->mUri, aRate, aPitch, aTask);
+  voice->mService->Speak(aText, voice->mUri, aVolume, aRate, aPitch, aTask);
 }
 
 } // namespace dom
 } // namespace mozilla
--- a/dom/media/webspeech/synth/nsSynthVoiceRegistry.h
+++ b/dom/media/webspeech/synth/nsSynthVoiceRegistry.h
@@ -31,18 +31,18 @@ public:
   NS_DECL_NSISYNTHVOICEREGISTRY
 
   nsSynthVoiceRegistry();
 
   already_AddRefed<nsSpeechTask> SpeakUtterance(SpeechSynthesisUtterance& aUtterance,
                                                 const nsAString& aDocLang);
 
   void Speak(const nsAString& aText, const nsAString& aLang,
-             const nsAString& aUri, const float& aRate, const float& aPitch,
-             nsSpeechTask* aTask);
+             const nsAString& aUri, const float& aVolume,  const float& aRate,
+             const float& aPitch, nsSpeechTask* aTask);
 
   void SendVoices(InfallibleTArray<RemoteVoice>* aVoices,
                   InfallibleTArray<nsString>* aDefaults);
 
   static nsSynthVoiceRegistry* GetInstance();
 
   static already_AddRefed<nsSynthVoiceRegistry> GetInstanceForService();
 
--- a/dom/media/webspeech/synth/pico/nsPicoService.cpp
+++ b/dom/media/webspeech/synth/pico/nsPicoService.cpp
@@ -457,17 +457,18 @@ nsPicoService::Observe(nsISupports* aSub
   MOZ_ASSERT(NS_SUCCEEDED(rv));
   return mThread->Dispatch(
     NS_NewRunnableMethod(this, &nsPicoService::Init), NS_DISPATCH_NORMAL);
 }
 // nsISpeechService
 
 NS_IMETHODIMP
 nsPicoService::Speak(const nsAString& aText, const nsAString& aUri,
-                     float aRate, float aPitch, nsISpeechTask* aTask)
+                     float aVolume, float aRate, float aPitch,
+                     nsISpeechTask* aTask)
 {
   NS_ENSURE_TRUE(mInitialized, NS_ERROR_NOT_AVAILABLE);
 
   MonitorAutoLock autoLock(mVoicesMonitor);
   bool found = false;
   PicoVoice* voice = mVoices.GetWeak(aUri, &found);
   NS_ENSURE_TRUE(found, NS_ERROR_NOT_AVAILABLE);
 
--- a/dom/media/webspeech/synth/test/common.js
+++ b/dom/media/webspeech/synth/test/common.js
@@ -35,17 +35,17 @@ SpeechTaskCallback.prototype = {
 };
 
 var TestSpeechServiceWithAudio = SpecialPowers.wrapCallbackObject({
   CHANNELS: 1,
   SAMPLE_RATE: 16000,
 
   serviceType: SpecialPowers.Ci.nsISpeechService.SERVICETYPE_DIRECT_AUDIO,
 
-  speak: function speak(aText, aUri, aRate, aPitch, aTask) {
+  speak: function speak(aText, aUri, aVolume, aRate, aPitch, aTask) {
     var task = SpecialPowers.wrap(aTask);
 
     window.setTimeout(
       function () {
         task.setup(SpecialPowers.wrapCallbackObject(new SpeechTaskCallback()), this.CHANNELS, this.SAMPLE_RATE);
         // 0.025 seconds per character.
         task.sendAudio(new Int16Array((this.SAMPLE_RATE/40)*aText.length), []);
         task.sendAudio(new Int16Array(0), []);
@@ -59,17 +59,17 @@ var TestSpeechServiceWithAudio = Special
   getInterfaces: function(c) {},
 
   getScriptableHelper: function() {}
 });
 
 var TestSpeechServiceNoAudio = SpecialPowers.wrapCallbackObject({
   serviceType: SpecialPowers.Ci.nsISpeechService.SERVICETYPE_INDIRECT_AUDIO,
 
-  speak: function speak(aText, aUri, aRate, aPitch, aTask) {
+  speak: function speak(aText, aUri, aVolume, aRate, aPitch, aTask) {
     var pair = this.expectedSpeaks.shift();
     if (pair) {
       // XXX: These tests do not happen in OOP
       var utterance = pair[0];
       var expected = pair[1];
 
       is(aText, utterance.text, "Speak text matches utterance text");