Merge m-c to fx-team
authorWes Kocher <wkocher@mozilla.com>
Wed, 26 Feb 2014 18:34:07 -0800
changeset 171300 d8a99aa4df22cf799e7732e749119c9378ea7e75
parent 171299 aaf67168e062a182040dbe0e1745cab2c39f23ed (current diff)
parent 171218 de2ce8226ca8b1852486da1b7d861ab46caed020 (diff)
child 171301 85fbd0ccd0977a7b603f59cb8bdbce345b7fa985
push id270
push userpvanderbeken@mozilla.com
push dateThu, 06 Mar 2014 09:24:21 +0000
milestone30.0a1
Merge m-c to fx-team
b2g/chrome/content/settings.js
--- a/b2g/chrome/content/settings.js
+++ b/b2g/chrome/content/settings.js
@@ -718,8 +718,12 @@ SettingsListener.observe('layers.enable-
 SettingsListener.observe('layers.progressive-paint', false, function(value) {
   Services.prefs.setBoolPref('layers.progressive-paint', value);
 });
 
 SettingsListener.observe('layers.draw-tile-borders', false, function(value) {
   Services.prefs.setBoolPref('layers.draw-tile-borders', value);
 });
 
+SettingsListener.observe('layers.dump', false, function(value) {
+  Services.prefs.setBoolPref('layers.dump', value);
+});
+
--- a/b2g/chrome/content/shell.css
+++ b/b2g/chrome/content/shell.css
@@ -9,9 +9,10 @@ html {
   height: 100%;
   padding: 0 !important;
 }
 
 body {
   margin: 0;
   width: 100%;
   height: 100%;
+  overflow: hidden;
 }
--- a/b2g/config/emulator-ics/sources.xml
+++ b/b2g/config/emulator-ics/sources.xml
@@ -14,17 +14,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="refs/tags/android-4.0.4_r2.1" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="59605a7c026ff06cc1613af3938579b1dddc6cfe">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="c8d34e6e98d4b99921fda59ddd89f2dcdce201fc"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="548965a5866aa38b5f44c336b3a7fb723164e277"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="15e8982284c4560f9c74c2b9fe8bb361ebfe0cb6"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="d11f524d00cacf5ba0dfbf25e4aa2158b1c3a036"/>
   <project name="platform_external_qemu" path="external/qemu" remote="b2g" revision="022eadd5917615ff00c47eaaafa792b45e9c8a28"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="3d5c964015967ca8c86abe6dbbebee3cb82b1609"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="52ca41d9fa6ef88e65d9da52e375716c68d48646"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="dd924f92906085b831bf1cbbc7484d3c043d613c"/>
--- a/b2g/config/emulator-jb/sources.xml
+++ b/b2g/config/emulator-jb/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <!-- B2G specific things. -->
   <project name="platform_build" path="build" remote="b2g" revision="97a5b461686757dbb8ecab2aac5903e41d2e1afe">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
-  <project name="gaia" path="gaia" remote="mozillaorg" revision="c8d34e6e98d4b99921fda59ddd89f2dcdce201fc"/>
+  <project name="gaia" path="gaia" remote="mozillaorg" revision="548965a5866aa38b5f44c336b3a7fb723164e277"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="15e8982284c4560f9c74c2b9fe8bb361ebfe0cb6"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="3d5c964015967ca8c86abe6dbbebee3cb82b1609"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="52ca41d9fa6ef88e65d9da52e375716c68d48646"/>
   <project name="valgrind" path="external/valgrind" remote="b2g" revision="905bfa3548eb75cf1792d0d8412b92113bbd4318"/>
   <project name="vex" path="external/VEX" remote="b2g" revision="c3d7efc45414f1b44cd9c479bb2758c91c4707c0"/>
   <!-- Stock Android things -->
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.1" path="prebuilts/clang/linux-x86/3.1" revision="5c45f43419d5582949284eee9cef0c43d866e03b"/>
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.2" path="prebuilts/clang/linux-x86/3.2" revision="3748b4168e7bd8d46457d4b6786003bc6a5223ce"/>
--- a/b2g/config/emulator/sources.xml
+++ b/b2g/config/emulator/sources.xml
@@ -14,17 +14,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="refs/tags/android-4.0.4_r2.1" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="59605a7c026ff06cc1613af3938579b1dddc6cfe">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="c8d34e6e98d4b99921fda59ddd89f2dcdce201fc"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="548965a5866aa38b5f44c336b3a7fb723164e277"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="15e8982284c4560f9c74c2b9fe8bb361ebfe0cb6"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="d11f524d00cacf5ba0dfbf25e4aa2158b1c3a036"/>
   <project name="platform_external_qemu" path="external/qemu" remote="b2g" revision="022eadd5917615ff00c47eaaafa792b45e9c8a28"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="3d5c964015967ca8c86abe6dbbebee3cb82b1609"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="52ca41d9fa6ef88e65d9da52e375716c68d48646"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="dd924f92906085b831bf1cbbc7484d3c043d613c"/>
--- a/b2g/config/gaia.json
+++ b/b2g/config/gaia.json
@@ -1,9 +1,9 @@
 {
     "git": {
         "remote": "", 
         "branch": "", 
         "revision": ""
     }, 
-    "revision": "8bb0cf53956e54999a5f876434207216d9d8982a", 
+    "revision": "34d0fa6b3f36fd1a56419726546c9344a4b6222a", 
     "repo_path": "/integration/gaia-central"
 }
--- a/b2g/config/hamachi/sources.xml
+++ b/b2g/config/hamachi/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="b2g/ics_strawberry" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="59605a7c026ff06cc1613af3938579b1dddc6cfe">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="c8d34e6e98d4b99921fda59ddd89f2dcdce201fc"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="548965a5866aa38b5f44c336b3a7fb723164e277"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="15e8982284c4560f9c74c2b9fe8bb361ebfe0cb6"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="84f2f2fce22605e17d511ff1767e54770067b5b5"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="3d5c964015967ca8c86abe6dbbebee3cb82b1609"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="52ca41d9fa6ef88e65d9da52e375716c68d48646"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="6426040f1be4a844082c9769171ce7f5341a5528"/>
   <project name="platform/bionic" path="bionic" revision="d2eb6c7b6e1bc7643c17df2d9d9bcb1704d0b9ab"/>
--- a/b2g/config/helix/sources.xml
+++ b/b2g/config/helix/sources.xml
@@ -10,17 +10,17 @@
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <default remote="caf" revision="b2g/ics_strawberry" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="59605a7c026ff06cc1613af3938579b1dddc6cfe">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="c8d34e6e98d4b99921fda59ddd89f2dcdce201fc"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="548965a5866aa38b5f44c336b3a7fb723164e277"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="15e8982284c4560f9c74c2b9fe8bb361ebfe0cb6"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="84f2f2fce22605e17d511ff1767e54770067b5b5"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="3d5c964015967ca8c86abe6dbbebee3cb82b1609"/>
   <project name="gonk-patches" path="patches" remote="b2g" revision="223a2421006e8f5da33f516f6891c87cae86b0f6"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="6426040f1be4a844082c9769171ce7f5341a5528"/>
   <project name="platform/bionic" path="bionic" revision="d2eb6c7b6e1bc7643c17df2d9d9bcb1704d0b9ab"/>
--- a/b2g/config/inari/sources.xml
+++ b/b2g/config/inari/sources.xml
@@ -14,17 +14,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="ics_chocolate_rb4.2" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="59605a7c026ff06cc1613af3938579b1dddc6cfe">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="c8d34e6e98d4b99921fda59ddd89f2dcdce201fc"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="548965a5866aa38b5f44c336b3a7fb723164e277"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="15e8982284c4560f9c74c2b9fe8bb361ebfe0cb6"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="84f2f2fce22605e17d511ff1767e54770067b5b5"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="3d5c964015967ca8c86abe6dbbebee3cb82b1609"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="52ca41d9fa6ef88e65d9da52e375716c68d48646"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="6426040f1be4a844082c9769171ce7f5341a5528"/>
   <project name="platform/bionic" path="bionic" revision="cd5dfce80bc3f0139a56b58aca633202ccaee7f8"/>
--- a/b2g/config/leo/sources.xml
+++ b/b2g/config/leo/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="b2g/ics_strawberry" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="59605a7c026ff06cc1613af3938579b1dddc6cfe">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="c8d34e6e98d4b99921fda59ddd89f2dcdce201fc"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="548965a5866aa38b5f44c336b3a7fb723164e277"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="15e8982284c4560f9c74c2b9fe8bb361ebfe0cb6"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="84f2f2fce22605e17d511ff1767e54770067b5b5"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="3d5c964015967ca8c86abe6dbbebee3cb82b1609"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="52ca41d9fa6ef88e65d9da52e375716c68d48646"/>
   <project name="gonk-patches" path="patches" remote="b2g" revision="223a2421006e8f5da33f516f6891c87cae86b0f6"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="6426040f1be4a844082c9769171ce7f5341a5528"/>
--- a/b2g/config/mako/sources.xml
+++ b/b2g/config/mako/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <!-- B2G specific things. -->
   <project name="platform_build" path="build" remote="b2g" revision="97a5b461686757dbb8ecab2aac5903e41d2e1afe">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
-  <project name="gaia" path="gaia" remote="mozillaorg" revision="c8d34e6e98d4b99921fda59ddd89f2dcdce201fc"/>
+  <project name="gaia" path="gaia" remote="mozillaorg" revision="548965a5866aa38b5f44c336b3a7fb723164e277"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="15e8982284c4560f9c74c2b9fe8bb361ebfe0cb6"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="3d5c964015967ca8c86abe6dbbebee3cb82b1609"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="52ca41d9fa6ef88e65d9da52e375716c68d48646"/>
   <project name="valgrind" path="external/valgrind" remote="b2g" revision="905bfa3548eb75cf1792d0d8412b92113bbd4318"/>
   <project name="vex" path="external/VEX" remote="b2g" revision="c3d7efc45414f1b44cd9c479bb2758c91c4707c0"/>
   <!-- Stock Android things -->
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.1" path="prebuilts/clang/linux-x86/3.1" revision="5c45f43419d5582949284eee9cef0c43d866e03b"/>
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.2" path="prebuilts/clang/linux-x86/3.2" revision="3748b4168e7bd8d46457d4b6786003bc6a5223ce"/>
--- a/b2g/config/wasabi/sources.xml
+++ b/b2g/config/wasabi/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="ics_chocolate_rb4.2" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="59605a7c026ff06cc1613af3938579b1dddc6cfe">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="c8d34e6e98d4b99921fda59ddd89f2dcdce201fc"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="548965a5866aa38b5f44c336b3a7fb723164e277"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="15e8982284c4560f9c74c2b9fe8bb361ebfe0cb6"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="84f2f2fce22605e17d511ff1767e54770067b5b5"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="3d5c964015967ca8c86abe6dbbebee3cb82b1609"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="52ca41d9fa6ef88e65d9da52e375716c68d48646"/>
   <project name="gonk-patches" path="patches" remote="b2g" revision="223a2421006e8f5da33f516f6891c87cae86b0f6"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="6426040f1be4a844082c9769171ce7f5341a5528"/>
--- a/browser/app/profile/firefox.js
+++ b/browser/app/profile/firefox.js
@@ -799,19 +799,17 @@ pref("browser.safebrowsing.gethashURL", 
 pref("browser.safebrowsing.reportURL", "https://safebrowsing.google.com/safebrowsing/report?");
 pref("browser.safebrowsing.reportGenericURL", "http://%LOCALE%.phish-generic.mozilla.com/?hl=%LOCALE%");
 pref("browser.safebrowsing.reportErrorURL", "http://%LOCALE%.phish-error.mozilla.com/?hl=%LOCALE%");
 pref("browser.safebrowsing.reportPhishURL", "http://%LOCALE%.phish-report.mozilla.com/?hl=%LOCALE%");
 pref("browser.safebrowsing.reportMalwareURL", "http://%LOCALE%.malware-report.mozilla.com/?hl=%LOCALE%");
 pref("browser.safebrowsing.reportMalwareErrorURL", "http://%LOCALE%.malware-error.mozilla.com/?hl=%LOCALE%");
 
 pref("browser.safebrowsing.malware.reportURL", "https://safebrowsing.google.com/safebrowsing/diagnostic?client=%NAME%&hl=%LOCALE%&site=");
-#ifndef MOZILLA_OFFICIAL
 pref("browser.safebrowsing.appRepURL", "https://sb-ssl.google.com/safebrowsing/clientreport/download&key=%GOOGLE_API_KEY%");
-#endif
 
 #ifdef MOZILLA_OFFICIAL
 // Normally the "client ID" sent in updates is appinfo.name, but for
 // official Firefox releases from Mozilla we use a special identifier.
 pref("browser.safebrowsing.id", "navclient-auto-ffox");
 #endif
 
 // Name of the about: page contributed by safebrowsing to handle display of error
@@ -822,18 +820,23 @@ pref("urlclassifier.alternate_error_page
 pref("urlclassifier.gethashnoise", 4);
 
 // If an urlclassifier table has not been updated in this number of seconds,
 // a gethash request will be forced to check that the result is still in
 // the database.
 pref("urlclassifier.max-complete-age", 2700);
 // Tables for application reputation.
 pref("urlclassifier.download_block_table", "goog-badbinurl-shavar");
+#ifdef XP_WIN
+// Only download the whitelist on Windows, since the whitelist is
+// only useful for suppressing remote lookups for signed binaries which we can
+// only verify on Windows (Bug 974579).
 pref("urlclassifier.download_allow_table", "goog-downloadwhite-digest256");
 #endif
+#endif
 
 pref("browser.geolocation.warning.infoURL", "https://www.mozilla.org/%LOCALE%/firefox/geolocation/");
 
 pref("browser.EULA.version", 3);
 pref("browser.rights.version", 3);
 pref("browser.rights.3.shown", false);
 
 #ifdef DEBUG
--- a/build/gyp.mozbuild
+++ b/build/gyp.mozbuild
@@ -12,16 +12,17 @@ gyp_vars = {
     'include_alsa_audio': 1 if CONFIG['MOZ_ALSA'] else 0,
     'include_pulse_audio': 1 if CONFIG['MOZ_PULSEAUDIO'] else 0,
     # basic stuff for everything
     'include_internal_video_render': 0,
     'clang_use_chrome_plugins': 0,
     'enable_protobuf': 0,
     'include_tests': 0,
     'enable_android_opensl': 1,
+    'enable_android_opensl_output': 0,
     # use_system_lib* still seems to be in use in trunk/build
     'use_system_libjpeg': 0,
     'use_system_libvpx': 0,
     'build_libjpeg': 0,
     'build_libvpx': 0,
     'build_libyuv': 0,
     'libyuv_dir': '/media/libyuv',
     'yuv_disable_avx2': 0 if CONFIG['HAVE_X86_AVX2'] else 1,
--- a/dom/bindings/BindingUtils.h
+++ b/dom/bindings/BindingUtils.h
@@ -17,17 +17,16 @@
 #include "mozilla/dom/DOMJSClass.h"
 #include "mozilla/dom/DOMJSProxyHandler.h"
 #include "mozilla/dom/Exceptions.h"
 #include "mozilla/dom/NonRefcountedDOMObject.h"
 #include "mozilla/dom/Nullable.h"
 #include "mozilla/dom/RootedDictionary.h"
 #include "mozilla/dom/workers/Workers.h"
 #include "mozilla/ErrorResult.h"
-#include "mozilla/HoldDropJSObjects.h"
 #include "mozilla/Likely.h"
 #include "mozilla/MemoryReporting.h"
 #include "nsCycleCollector.h"
 #include "nsIXPConnect.h"
 #include "MainThreadUtils.h"
 #include "nsTraceRefcnt.h"
 #include "qsObjectHelper.h"
 #include "xpcpublic.h"
@@ -2422,17 +2421,19 @@ CreateGlobal(JSContext* aCx, T* aObject,
   JS::Handle<JSObject*> proto = ProtoGetter(aCx, global);
   NS_ENSURE_TRUE(proto, nullptr);
 
   if (!JS_SetPrototype(aCx, global, proto)) {
     NS_WARNING("Failed to set proto");
     return nullptr;
   }
 
-  mozilla::HoldJSObjects(aObject);
+  MOZ_ALWAYS_TRUE(TryPreserveWrapper(global));
+
+  MOZ_ASSERT(UnwrapDOMObjectToISupports(global));
 
   return global;
 }
 
 /*
  * Holds a jsid that is initialized to an interned string, with conversion to
  * Handle<jsid>.
  */
--- a/dom/bindings/Codegen.py
+++ b/dom/bindings/Codegen.py
@@ -294,26 +294,31 @@ def PrototypeIDAndDepth(descriptor):
         depth = "0"
     return (prototypeID, depth)
 
 def UseHolderForUnforgeable(descriptor):
     return (descriptor.concrete and
             descriptor.proxy and
             any(m for m in descriptor.interface.members if m.isAttr() and m.isUnforgeable()))
 
-def CallOnUnforgeableHolder(descriptor, code, isXrayCheck=None):
+def CallOnUnforgeableHolder(descriptor, code, isXrayCheck=None,
+                            useSharedRoot=False):
     """
     Generate the code to execute the code in "code" on an unforgeable holder if
     needed. code should be a string containing the code to execute. If it
     contains a ${holder} string parameter it will be replaced with the
     unforgeable holder object.
 
     If isXrayCheck is not None it should be a string that contains a statement
     returning whether proxy is an Xray. If isXrayCheck is None the generated
     code won't try to unwrap Xrays.
+
+    If useSharedRoot is true, we will use an existing
+    JS::Rooted<JSObject*> sharedRoot for storing our unforgeable holder instead
+    of declaring a new Rooted.
     """
     code = string.Template(code).substitute({ "holder": "unforgeableHolder" })
     if not isXrayCheck is None:
         pre = """// Scope for 'global', 'ac' and 'unforgeableHolder'
 {
   JS::Rooted<JSObject*> global(cx);
   Maybe<JSAutoCompartment> ac;
   if (""" + isXrayCheck + """) {
@@ -322,21 +327,26 @@ def CallOnUnforgeableHolder(descriptor, 
   } else {
     global = js::GetGlobalForObjectCrossCompartment(proxy);
   }"""
     else:
         pre = """// Scope for 'global' and 'unforgeableHolder'
 {
   JSObject* global = js::GetGlobalForObjectCrossCompartment(proxy);"""
 
+    if useSharedRoot:
+        holderDecl = "JS::Rooted<JSObject*>& unforgeableHolder(sharedRoot)"
+    else:
+        holderDecl = "JS::Rooted<JSObject*> unforgeableHolder(cx)"
     return (pre + """
-  JS::Rooted<JSObject*> unforgeableHolder(cx, GetUnforgeableHolder(global, prototypes::id::%s));
+  %s;
+  unforgeableHolder = GetUnforgeableHolder(global, prototypes::id::%s);
 """ + CGIndenter(CGGeneric(code)).define() + """
 }
-""") % descriptor.name
+""") % (holderDecl, descriptor.name)
 
 class CGPrototypeJSClass(CGThing):
     def __init__(self, descriptor, properties):
         CGThing.__init__(self)
         self.descriptor = descriptor
         self.properties = properties
     def declare(self):
         # We're purely for internal consumption
@@ -4773,31 +4783,19 @@ def getRetvalDeclarationForType(returnTy
             result = CGTemplatedType("nsRefPtr", result)
         else:
             result = CGWrapper(result, post="*")
         return result, False, None, None
     if returnType.isCallback():
         name = returnType.unroll().identifier.name
         return CGGeneric("nsRefPtr<%s>" % name), False, None, None
     if returnType.isAny():
-        result = CGGeneric("JS::Value")
-        if isMember:
-            resultArgs = None
-        else:
-            result = CGTemplatedType("JS::Rooted", result)
-            resultArgs = "cx"
-        return result, False, None, resultArgs
+        return CGGeneric("JS::Value"), False, None, None
     if returnType.isObject() or returnType.isSpiderMonkeyInterface():
-        result = CGGeneric("JSObject*")
-        if isMember:
-            resultArgs = None
-        else:
-            result = CGTemplatedType("JS::Rooted", result)
-            resultArgs = "cx"
-        return result, False, None, resultArgs
+        return CGGeneric("JSObject*"), False, None, None
     if returnType.isSequence():
         nullable = returnType.nullable()
         if nullable:
             returnType = returnType.inner
         # If our result is already addrefed, use the right type in the
         # sequence argument here.
         (result, _, _, _) = getRetvalDeclarationForType(returnType.inner,
                                                         descriptorProvider,
@@ -8538,40 +8536,43 @@ class CGDOMJSProxyHandler_get(ClassMetho
         args = [Argument('JSContext*', 'cx'),
                 Argument('JS::Handle<JSObject*>', 'proxy'),
                 Argument('JS::Handle<JSObject*>', 'receiver'),
                 Argument('JS::Handle<jsid>', 'id'),
                 Argument('JS::MutableHandle<JS::Value>', 'vp')]
         ClassMethod.__init__(self, "get", "bool", args)
         self.descriptor = descriptor
     def getBody(self):
+        getUnforgeableOrExpando = "JS::Rooted<JSObject*> sharedRoot(cx);\n"
         if UseHolderForUnforgeable(self.descriptor):
             hasUnforgeable = (
                 "bool hasUnforgeable;\n"
                  "if (!JS_AlreadyHasOwnPropertyById(cx, ${holder}, id, &hasUnforgeable)) {\n"
                  "  return false;\n"
                  "}\n"
                  "if (hasUnforgeable) {\n"
                  "  return JS_ForwardGetPropertyTo(cx, ${holder}, id, proxy, vp);\n"
                  "}")
-            getUnforgeableOrExpando = CallOnUnforgeableHolder(self.descriptor,
-                                                              hasUnforgeable)
-        else:
-            getUnforgeableOrExpando = ""
-        getUnforgeableOrExpando += """JS::Rooted<JSObject*> expando(cx, DOMProxyHandler::GetExpandoObject(proxy));
-if (expando) {
-  bool hasProp;
-  if (!JS_HasPropertyById(cx, expando, id, &hasProp)) {
-    return false;
-  }
-
-  if (hasProp) {
-    // Forward the get to the expando object, but our receiver is whatever our
-    // receiver is.
-    return JS_ForwardGetPropertyTo(cx, expando, id, receiver, vp);
+            getUnforgeableOrExpando += CallOnUnforgeableHolder(self.descriptor,
+                                                               hasUnforgeable,
+                                                               useSharedRoot=True)
+        getUnforgeableOrExpando += """{ // Scope for expando
+  JS::Rooted<JSObject*>& expando(sharedRoot);
+  expando = DOMProxyHandler::GetExpandoObject(proxy);
+  if (expando) {
+    bool hasProp;
+    if (!JS_HasPropertyById(cx, expando, id, &hasProp)) {
+      return false;
+    }
+
+    if (hasProp) {
+      // Forward the get to the expando object, but our receiver is whatever our
+      // receiver is.
+      return JS_ForwardGetPropertyTo(cx, expando, id, receiver, vp);
+    }
   }
 }"""
 
         templateValues = {'jsvalRef': 'vp', 'jsvalHandle': 'vp', 'obj': 'proxy'}
 
         if self.descriptor.supportsIndexedProperties():
             getIndexedOrExpando = ("int32_t index = GetArrayIndexFromId(cx, id);\n" +
                                    "if (IsArrayIndex(index)) {\n" +
--- a/dom/bindings/Exceptions.cpp
+++ b/dom/bindings/Exceptions.cpp
@@ -255,18 +255,18 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(St
 NS_IMPL_CYCLE_COLLECTION_UNLINK_END
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(StackDescriptionOwner)
   NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
 NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(StackDescriptionOwner)
   JS::StackDescription* desc = tmp->mDescription;
   if (tmp->mDescription) {
     for (size_t i = 0; i < desc->nframes; ++i) {
-      NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mDescription->frames[i].script());
-      NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mDescription->frames[i].fun());
+      NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mDescription->frames[i].markedLocation1());
+      NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mDescription->frames[i].markedLocation2());
     }
   }
 NS_IMPL_CYCLE_COLLECTION_TRACE_END
 
 class JSStackFrame : public nsIStackFrame
 {
 public:
   NS_DECL_CYCLE_COLLECTING_ISUPPORTS
@@ -372,23 +372,18 @@ NS_IMETHODIMP JSStackFrame::GetLanguageN
   return NS_OK;
 }
 
 /* readonly attribute string filename; */
 NS_IMETHODIMP JSStackFrame::GetFilename(nsACString& aFilename)
 {
   if (!mFilenameInitialized) {
     JS::FrameDescription& desc = mStackDescription->FrameAt(mIndex);
-    if (desc.script()) {
-      ThreadsafeAutoSafeJSContext cx;
-      JSAutoCompartment ac(cx, desc.script());
-      const char* filename = JS_GetScriptFilename(cx, desc.script());
-      if (filename) {
-        mFilename.Assign(filename);
-      }
+    if (const char *filename = desc.filename()) {
+      mFilename.Assign(filename);
     }
     mFilenameInitialized = true;
   }
 
   // The filename must be set to null if empty.
   if (mFilename.IsEmpty()) {
     aFilename.SetIsVoid(true);
   } else {
@@ -398,24 +393,18 @@ NS_IMETHODIMP JSStackFrame::GetFilename(
   return NS_OK;
 }
 
 /* readonly attribute string name; */
 NS_IMETHODIMP JSStackFrame::GetName(nsACString& aFunction)
 {
   if (!mFunnameInitialized) {
     JS::FrameDescription& desc = mStackDescription->FrameAt(mIndex);
-    if (desc.fun() && desc.script()) {
-      ThreadsafeAutoSafeJSContext cx;
-      JSAutoCompartment ac(cx, desc.script());
-      JS::Rooted<JSFunction*> fun(cx, desc.fun());
-      JS::Rooted<JSString*> funid(cx, JS_GetFunctionDisplayId(fun));
-      if (funid) {
-        CopyUTF16toUTF8(JS_GetStringCharsZ(cx, funid), mFunname);
-      }
+    if (JSFlatString *name = desc.funDisplayName()) {
+      CopyUTF16toUTF8(JS_GetFlatStringChars(name), mFunname);
     }
     mFunnameInitialized = true;
   }
 
   // The function name must be set to null if empty.
   if (mFunname.IsEmpty()) {
     aFunction.SetIsVoid(true);
   } else {
--- a/dom/camera/DOMCameraControl.cpp
+++ b/dom/camera/DOMCameraControl.cpp
@@ -1053,21 +1053,23 @@ nsDOMCameraControl::OnRecorderStateChang
 
   switch (aState) {
     case CameraControlListener::kRecorderStarted:
       if (mStartRecordingOnSuccessCb) {
         nsCOMPtr<CameraStartRecordingCallback> cb = mStartRecordingOnSuccessCb.forget();
         mStartRecordingOnErrorCb = nullptr;
         cb->Call(ignored);
       }
-      return;
+      state = NS_LITERAL_STRING("Started");
+      break;
 
     case CameraControlListener::kRecorderStopped:
       NotifyRecordingStatusChange(NS_LITERAL_STRING("shutdown"));
-      return;
+      state = NS_LITERAL_STRING("Stopped");
+      break;
 
 #ifdef MOZ_B2G_CAMERA
     case CameraControlListener::kFileSizeLimitReached:
       state = NS_LITERAL_STRING("FileSizeLimitReached");
       break;
 
     case CameraControlListener::kVideoLengthLimitReached:
       state = NS_LITERAL_STRING("VideoLengthLimitReached");
--- a/dom/promise/PromiseCallback.cpp
+++ b/dom/promise/PromiseCallback.cpp
@@ -212,17 +212,17 @@ WrapperPromiseCallback::Call(JS::Handle<
         JSAutoCompartment ac(cx, unwrapped);
         if (JS_ObjectIsFunction(cx, unwrapped)) {
           JS::Rooted<JS::Value> asValue(cx, JS::ObjectValue(*unwrapped));
           JS::Rooted<JSFunction*> func(cx, JS_ValueToFunction(cx, asValue));
 
           MOZ_ASSERT(func);
           JSScript* script = JS_GetFunctionScript(cx, func);
           if (script) {
-            fileName = JS_GetScriptFilename(cx, script);
+            fileName = JS_GetScriptFilename(script);
             lineNumber = JS_GetScriptBaseLineNumber(cx, script);
           }
         }
       }
 
       // We're back in aValue's compartment here.
       JS::Rooted<JSString*> stack(cx, JS_GetEmptyString(JS_GetRuntime(cx)));
       JS::Rooted<JSString*> fn(cx, JS_NewStringCopyZ(cx, fileName));
--- a/dom/webidl/MutationObserver.webidl
+++ b/dom/webidl/MutationObserver.webidl
@@ -3,26 +3,35 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/.
  *
  * The origin of this IDL file is
  * http://dom.spec.whatwg.org
  */
 
 interface MutationRecord {
+  [Constant]
   readonly attribute DOMString type;
   // .target is not nullable per the spec, but in order to prevent crashes,
   // if there are GC/CC bugs in Gecko, we let the property to be null.
+  [Constant]
   readonly attribute Node? target;
+  [Constant]
   readonly attribute NodeList addedNodes;
+  [Constant]
   readonly attribute NodeList removedNodes;
+  [Constant]
   readonly attribute Node? previousSibling;
+  [Constant]
   readonly attribute Node? nextSibling;
+  [Constant]
   readonly attribute DOMString? attributeName;
+  [Constant]
   readonly attribute DOMString? attributeNamespace;
+  [Constant]
   readonly attribute DOMString? oldValue;
 };
 
 [Constructor(MutationCallback mutationCallback)]
 interface MutationObserver {
   [Throws]
   void observe(Node target, optional MutationObserverInit options);
   void disconnect();
--- a/dom/workers/WorkerScope.cpp
+++ b/dom/workers/WorkerScope.cpp
@@ -37,18 +37,17 @@ WorkerGlobalScope::WorkerGlobalScope(Wor
 {
   mWorkerPrivate->AssertIsOnWorkerThread();
 
   SetIsDOMBinding();
 }
 
 WorkerGlobalScope::~WorkerGlobalScope()
 {
-  // Matches the HoldJSObjects in CreateGlobal.
-  mozilla::DropJSObjects(this);
+  mWorkerPrivate->AssertIsOnWorkerThread();
 }
 
 NS_IMPL_CYCLE_COLLECTION_CLASS(WorkerGlobalScope)
 
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(WorkerGlobalScope,
                                                   nsDOMEventTargetHelper)
   tmp->mWorkerPrivate->AssertIsOnWorkerThread();
 NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
--- a/gfx/layers/client/CanvasClient.h
+++ b/gfx/layers/client/CanvasClient.h
@@ -52,16 +52,18 @@ public:
   CanvasClient(CompositableForwarder* aFwd, TextureFlags aFlags)
     : CompositableClient(aFwd)
   {
     mTextureInfo.mTextureFlags = aFlags;
   }
 
   virtual ~CanvasClient() {}
 
+  virtual void Clear() {};
+
   virtual void Update(gfx::IntSize aSize, ClientCanvasLayer* aLayer) = 0;
 
   virtual void Updated() { }
 
 protected:
   TextureInfo mTextureInfo;
 };
 
@@ -75,16 +77,21 @@ public:
   {
   }
 
   TextureInfo GetTextureInfo() const
   {
     return TextureInfo(COMPOSITABLE_IMAGE);
   }
 
+  virtual void Clear() MOZ_OVERRIDE
+  {
+    mBuffer = nullptr;
+  }
+
   virtual void Update(gfx::IntSize aSize, ClientCanvasLayer* aLayer) MOZ_OVERRIDE;
 
   virtual bool AddTextureClient(TextureClient* aTexture) MOZ_OVERRIDE
   {
     MOZ_ASSERT((mTextureInfo.mTextureFlags & aTexture->GetFlags()) == mTextureInfo.mTextureFlags);
     return CompositableClient::AddTextureClient(aTexture);
   }
 
@@ -108,16 +115,21 @@ class CanvasClientSurfaceStream : public
 public:
   CanvasClientSurfaceStream(CompositableForwarder* aLayerForwarder, TextureFlags aFlags);
 
   TextureInfo GetTextureInfo() const
   {
     return TextureInfo(COMPOSITABLE_IMAGE);
   }
 
+  virtual void Clear() MOZ_OVERRIDE
+  {
+    mBuffer = nullptr;
+  }
+
   virtual void Update(gfx::IntSize aSize, ClientCanvasLayer* aLayer) MOZ_OVERRIDE;
 
   virtual void OnDetach() MOZ_OVERRIDE
   {
     mBuffer = nullptr;
   }
 
 private:
--- a/gfx/layers/client/ClientCanvasLayer.h
+++ b/gfx/layers/client/ClientCanvasLayer.h
@@ -51,16 +51,23 @@ public:
                  "Can only set properties in construction phase");
     CanvasLayer::SetVisibleRegion(aRegion);
   }
 
   virtual void Initialize(const Data& aData);
 
   virtual void RenderLayer();
 
+  virtual void ClearCachedResources()
+  {
+    if (mCanvasClient) {
+      mCanvasClient->Clear();
+    }
+  }
+
   virtual void FillSpecificAttributes(SpecificLayerAttributes& aAttrs)
   {
     aAttrs = CanvasLayerAttributes(mFilter, mBounds);
   }
 
   virtual Layer* AsLayer() { return this; }
   virtual ShadowableLayer* AsShadowableLayer() { return this; }
 
--- a/gfx/layers/client/ContentClient.h
+++ b/gfx/layers/client/ContentClient.h
@@ -201,17 +201,22 @@ public:
     , mIsNewBuffer(false)
     , mFrontAndBackBufferDiffer(false)
     , mSurfaceFormat(gfx::SurfaceFormat::B8G8R8A8)
   {}
 
   typedef RotatedContentBuffer::PaintState PaintState;
   typedef RotatedContentBuffer::ContentType ContentType;
 
-  virtual void Clear() { RotatedContentBuffer::Clear(); }
+  virtual void Clear()
+  {
+    RotatedContentBuffer::Clear();
+    mTextureClient = nullptr;
+    mTextureClientOnWhite = nullptr;
+  }
 
   virtual PaintState BeginPaintBuffer(ThebesLayer* aLayer,
                                       uint32_t aFlags) MOZ_OVERRIDE
   {
     return RotatedContentBuffer::BeginPaint(aLayer, aFlags);
   }
   virtual gfx::DrawTarget* BorrowDrawTargetForPainting(ThebesLayer* aLayer,
                                                        const PaintState& aPaintState) MOZ_OVERRIDE
@@ -419,16 +424,23 @@ class ContentClientDoubleBuffered : publ
 public:
   ContentClientDoubleBuffered(CompositableForwarder* aFwd)
     : ContentClientRemoteBuffer(aFwd)
   {
     mTextureInfo.mCompositableType = COMPOSITABLE_CONTENT_DOUBLE;
   }
   virtual ~ContentClientDoubleBuffered() {}
 
+  virtual void Clear() MOZ_OVERRIDE
+  {
+    ContentClientRemoteBuffer::Clear();
+    mFrontClient = nullptr;
+    mFrontClientOnWhite = nullptr;
+  }
+
   virtual void SwapBuffers(const nsIntRegion& aFrontUpdatedRegion) MOZ_OVERRIDE;
 
   virtual void PrepareFrame() MOZ_OVERRIDE;
 
   virtual void FinalizeFrame(const nsIntRegion& aRegionToDraw) MOZ_OVERRIDE;
 
 protected:
   virtual void CreateFrontBuffer(const nsIntRect& aBufferRect) MOZ_OVERRIDE;
--- a/gfx/layers/client/TextureClient.cpp
+++ b/gfx/layers/client/TextureClient.cpp
@@ -300,18 +300,19 @@ void
 TextureClient::Finalize()
 {
   // Always make a temporary strong reference to the actor before we use it,
   // in case TextureChild::ActorDestroy might null mActor concurrently.
   RefPtr<TextureChild> actor = mActor;
 
   if (actor) {
     // this will call ForceRemove in the right thread, using a sync proxy if needed
-    actor->GetForwarder()->RemoveTexture(this);
-
+    if (actor->GetForwarder()) {
+      actor->GetForwarder()->RemoveTexture(this);
+    }
     // The actor has a raw pointer to us, actor->mTextureClient. Null it before we die.
     actor->mTextureClient = nullptr;
   }
 }
 
 bool
 TextureClient::ShouldDeallocateInDestructor() const
 {
--- a/gfx/thebes/gfxPlatform.cpp
+++ b/gfx/thebes/gfxPlatform.cpp
@@ -2096,26 +2096,27 @@ InitLayersAccelerationPrefs()
 
     sPrefLayersOffMainThreadCompositionEnabled = Preferences::GetBool("layers.offmainthreadcomposition.enabled", false);
     sPrefLayersOffMainThreadCompositionTestingEnabled = Preferences::GetBool("layers.offmainthreadcomposition.testing.enabled", false);
     sPrefLayersOffMainThreadCompositionForceEnabled = Preferences::GetBool("layers.offmainthreadcomposition.force-enabled", false);
     sPrefLayersAccelerationForceEnabled = Preferences::GetBool("layers.acceleration.force-enabled", false);
     sPrefLayersAccelerationDisabled = Preferences::GetBool("layers.acceleration.disabled", false);
     sPrefLayersPreferOpenGL = Preferences::GetBool("layers.prefer-opengl", false);
     sPrefLayersPreferD3D9 = Preferences::GetBool("layers.prefer-d3d9", false);
-    sPrefLayersDrawFPS = Preferences::GetBool("layers.acceleration.draw-fps", false);
     sPrefLayersDump = Preferences::GetBool("layers.dump", false);
     sPrefLayersScrollGraph = Preferences::GetBool("layers.scroll-graph", false);
     sPrefLayersEnableTiles = Preferences::GetBool("layers.enable-tiles", false);
     sPrefLayoutFrameRate = Preferences::GetInt("layout.frame_rate", -1);
     sPrefLayersCompositionFrameRate = Preferences::GetInt("layers.offmainthreadcomposition.frame-rate", -1);
     sBufferRotationEnabled = Preferences::GetBool("layers.bufferrotation.enabled", true);
     sComponentAlphaEnabled = Preferences::GetBool("layers.componentalpha.enabled", true);
     sPrefBrowserTabsRemoteAutostart = Preferences::GetBool("browser.tabs.remote.autostart", false);
 
+    Preferences::AddBoolVarCache(&sPrefLayersDrawFPS, "layers.acceleration.draw-fps", false);
+
 #ifdef XP_WIN
     if (sPrefLayersAccelerationForceEnabled) {
       sLayersSupportsD3D9 = true;
     } else {
       nsCOMPtr<nsIGfxInfo> gfxInfo = do_GetService("@mozilla.org/gfx/info;1");
       if (gfxInfo) {
         int32_t status;
         if (NS_SUCCEEDED(gfxInfo->GetFeatureStatus(nsIGfxInfo::FEATURE_DIRECT3D_9_LAYERS, &status))) {
--- a/ipc/glue/Shmem.cpp
+++ b/ipc/glue/Shmem.cpp
@@ -451,19 +451,26 @@ Shmem::OpenExisting(IHadBetterBeIPDLCode
   else {
     NS_ERROR("unknown shmem type");
     return nullptr;
   }
 
   if (!segment)
     return 0;
 
+  Header* header = GetHeader(segment);
+
+  if (size != header->mSize) {
+    NS_ERROR("Wrong size for this Shmem!");
+    delete segment;
+    return nullptr;
+  }
+
   // The caller of this function may not know whether the segment is
   // unsafe or not
-  Header* header = GetHeader(segment);
   if (!header->mUnsafe && aProtect)
     Protect(segment);
 
   return segment;
 }
 
 // static
 void
@@ -566,18 +573,19 @@ Shmem::OpenExisting(IHadBetterBeIPDLCode
 #endif
   else {
     return nullptr;
   }
 
   if (!segment)
     return 0;
 
-  // this is the only validity check done OPT builds
+  // this is the only validity check done in non-DEBUG builds
   if (size != static_cast<size_t>(*PtrToSize(segment))) {
+    delete segment;
     return nullptr;
   }
 
   return segment;
 }
 
 // static
 void
--- a/js/jsd/jsd_obj.cpp
+++ b/js/jsd/jsd_obj.cpp
@@ -92,17 +92,17 @@ jsd_Constructing(JSDContext* jsdc, JSCon
 
     JSD_LOCK_OBJECTS(jsdc);
     jsdobj = jsd_GetJSDObjectForJSObject(jsdc, obj);
     if( jsdobj && !jsdobj->ctorURL )
     {
         script = frame.script();
         if( script )
         {
-            ctorURL = JS_GetScriptFilename(cx, script);
+            ctorURL = JS_GetScriptFilename(script);
             if( ctorURL )
                 jsdobj->ctorURL = jsd_AddAtom(jsdc, ctorURL);
 
             JSD_LOCK_SCRIPTS(jsdc);
             jsdscript = jsd_FindOrCreateJSDScript(jsdc, cx, script, frame);
             JSD_UNLOCK_SCRIPTS(jsdc);
             if( jsdscript && (ctorNameStr = jsd_GetScriptFunctionId(jsdc, jsdscript)) ) {
                 if( (ctorName = JS_EncodeString(cx, ctorNameStr)) ) {
--- a/js/jsd/jsd_scpt.cpp
+++ b/js/jsd/jsd_scpt.cpp
@@ -57,17 +57,17 @@ static JSDScript*
     lineno = (unsigned) JS_GetScriptBaseLineNumber(cx, script);
     if( lineno == 0 )
         return nullptr;
 
     jsdscript = (JSDScript*) calloc(1, sizeof(JSDScript));
     if( ! jsdscript )
         return nullptr;
 
-    raw_filename = JS_GetScriptFilename(cx,script);
+    raw_filename = JS_GetScriptFilename(script);
 
     JS_HashTableAdd(jsdc->scriptsTable, (void *)script, (void *)jsdscript);
     JS_APPEND_LINK(&jsdscript->links, &jsdc->scripts);
     jsdscript->jsdc         = jsdc;
     jsdscript->script       = script;  
     jsdscript->lineBase     = lineno;
     jsdscript->lineExtent   = (unsigned)NOT_SET_YET;
     jsdscript->data         = nullptr;
--- a/js/public/OldDebugAPI.h
+++ b/js/public/OldDebugAPI.h
@@ -8,16 +8,17 @@
 #define js_OldDebugAPI_h
 
 /*
  * JS debugger API.
  */
 
 #include "mozilla/NullPtr.h"
 
+#include "jsapi.h"
 #include "jsbytecode.h"
 
 #include "js/CallArgs.h"
 #include "js/TypeDecls.h"
 
 class JSAtom;
 class JSFreeOp;
 
@@ -25,50 +26,54 @@ namespace js {
 class StackFrame;
 class ScriptFrameIter;
 }
 
 // Raw JSScript* because this needs to be callable from a signal handler.
 extern JS_PUBLIC_API(unsigned)
 JS_PCToLineNumber(JSContext *cx, JSScript *script, jsbytecode *pc);
 
+extern JS_PUBLIC_API(const char *)
+JS_GetScriptFilename(JSScript *script);
+
 namespace JS {
 
 class FrameDescription
 {
   public:
-    FrameDescription(JSScript *script, JSFunction *fun, jsbytecode *pc)
-        : script_(script)
-        , fun_(fun)
-        , pc_(pc)
-        , linenoComputed(false)
-    {
-    }
-
     explicit FrameDescription(const js::ScriptFrameIter& iter);
 
     unsigned lineno() {
         if (!linenoComputed) {
             lineno_ = JS_PCToLineNumber(nullptr, script_, pc_);
             linenoComputed = true;
         }
         return lineno_;
     }
 
-    Heap<JSScript*> &script() {
+    const char *filename() const {
+        return JS_GetScriptFilename(script_);
+    }
+
+    JSFlatString *funDisplayName() const {
+        return funDisplayName_ ? JS_ASSERT_STRING_IS_FLAT(funDisplayName_) : nullptr;
+    }
+
+    // Both these locations should be traced during GC but otherwise not used;
+    // they are implementation details.
+    Heap<JSScript*> &markedLocation1() {
         return script_;
     }
-
-    Heap<JSFunction*> &fun() {
-        return fun_;
+    Heap<JSString*> &markedLocation2() {
+        return funDisplayName_;
     }
 
   private:
     Heap<JSScript*> script_;
-    Heap<JSFunction*> fun_;
+    Heap<JSString*> funDisplayName_;
     jsbytecode *pc_;
     unsigned lineno_;
     bool linenoComputed;
 };
 
 struct StackDescription
 {
     unsigned nframes;
@@ -292,19 +297,16 @@ JS_GetParentOrScopeChain(JSContext *cx, 
  * of any scope (returned via JS_GetFrameScopeChain or JS_GetFrameCalleeObject)
  * from "Proxy" to "Call", "Block", "With" etc.
  */
 extern JS_PUBLIC_API(const char *)
 JS_GetDebugClassName(JSObject *obj);
 
 /************************************************************************/
 
-extern JS_PUBLIC_API(const char *)
-JS_GetScriptFilename(JSContext *cx, JSScript *script);
-
 extern JS_PUBLIC_API(const jschar *)
 JS_GetScriptSourceMap(JSContext *cx, JSScript *script);
 
 extern JS_PUBLIC_API(unsigned)
 JS_GetScriptBaseLineNumber(JSContext *cx, JSScript *script);
 
 extern JS_PUBLIC_API(unsigned)
 JS_GetScriptLineExtent(JSContext *cx, JSScript *script);
--- a/js/src/jit-test/tests/asm.js/testCloning.js
+++ b/js/src/jit-test/tests/asm.js/testCloning.js
@@ -30,8 +30,15 @@ var code = asmCompile(USE_ASM + 'var g =
 var h1 = code();
 assertEq(h1(), 1);
 assertEq(h1(), 2);
 var h2 = code();
 assertEq(h2(), 1);
 assertEq(h1(), 3);
 assertEq(h2(), 2);
 assertEq(h1(), 4);
+
+var code = asmCompile(USE_ASM + "return {}");
+var h1 = code();
+var h2 = code();
+assertEq(h1 === h2, false);
+assertEq(Object.keys(h1).length, 0);
+assertEq(Object.keys(h2).length, 0);
--- a/js/src/jit/AsmJSModule.cpp
+++ b/js/src/jit/AsmJSModule.cpp
@@ -902,44 +902,52 @@ AsmJSModule::clone(JSContext *cx, Scoped
     return true;
 }
 
 void
 AsmJSModule::protectCode(JSRuntime *rt) const
 {
     JS_ASSERT(rt->currentThreadOwnsOperationCallbackLock());
 
+    codeIsProtected_ = true;
+
+    if (!pod.functionBytes_)
+        return;
+
     // Technically, we should be able to only take away the execute permissions,
     // however this seems to break our emulators which don't always check
     // execute permissions while executing code.
 #if defined(XP_WIN)
     DWORD oldProtect;
     if (!VirtualProtect(codeBase(), functionBytes(), PAGE_NOACCESS, &oldProtect))
         MOZ_CRASH();
 #else  // assume Unix
     if (mprotect(codeBase(), functionBytes(), PROT_NONE))
         MOZ_CRASH();
 #endif
-
-    codeIsProtected_ = true;
 }
 
 void
 AsmJSModule::unprotectCode(JSRuntime *rt) const
 {
+    JS_ASSERT(rt->currentThreadOwnsOperationCallbackLock());
+
+    codeIsProtected_ = false;
+
+    if (!pod.functionBytes_)
+        return;
+
 #if defined(XP_WIN)
     DWORD oldProtect;
     if (!VirtualProtect(codeBase(), functionBytes(), PAGE_EXECUTE_READWRITE, &oldProtect))
         MOZ_CRASH();
 #else  // assume Unix
     if (mprotect(codeBase(), functionBytes(), PROT_READ | PROT_WRITE | PROT_EXEC))
         MOZ_CRASH();
 #endif
-
-    codeIsProtected_ = false;
 }
 
 bool
 AsmJSModule::codeIsProtected(JSRuntime *rt) const
 {
     JS_ASSERT(rt->currentThreadOwnsOperationCallbackLock());
     return codeIsProtected_;
 }
--- a/js/src/jit/AsmJSModule.h
+++ b/js/src/jit/AsmJSModule.h
@@ -412,17 +412,17 @@ class AsmJSModule
     HeapPtr<ArrayBufferObject>            maybeHeap_;
 
     uint32_t                              charsBegin_;
     ScriptSource *                        scriptSource_;
 
     FunctionCountsVector                  functionCounts_;
 
     // This field is accessed concurrently when triggering the operation
-    // callback and access must be sychronized via the runtime's operation
+    // callback and access must be synchronized via the runtime's operation
     // callback lock.
     mutable bool                          codeIsProtected_;
 
   public:
     explicit AsmJSModule(ScriptSource *scriptSource, uint32_t charsBegin);
     ~AsmJSModule();
 
     void trace(JSTracer *trc) {
--- a/js/src/jit/AsmJSSignalHandlers.cpp
+++ b/js/src/jit/AsmJSSignalHandlers.cpp
@@ -453,16 +453,18 @@ HandleException(PEXCEPTION_POINTERS exce
     // If we faulted trying to execute code in 'module', this must be an
     // operation callback (see TriggerOperationCallbackForAsmJSCode). Redirect
     // execution to a trampoline which will call js_HandleExecutionInterrupt.
     // The trampoline will jump to activation->resumePC if execution isn't
     // interrupted.
     if (module.containsPC(faultingAddress)) {
         activation->setResumePC(pc);
         *ppc = module.operationCallbackExit();
+
+        JSRuntime::AutoLockForOperationCallback lock(rt);
         module.unprotectCode(rt);
         return true;
     }
 
 # if defined(JS_CODEGEN_X64)
     // These checks aren't necessary, but, since we can, check anyway to make
     // sure we aren't covering up a real bug.
     if (!module.maybeHeap() ||
@@ -638,31 +640,34 @@ HandleMachException(JSRuntime *rt, const
         return true;
 
     AsmJSActivation *activation = rt->mainThread.asmJSActivationStackFromAnyThread();
     if (!activation)
         return false;
 
     const AsmJSModule &module = activation->module();
     if (HandleSimulatorInterrupt(rt, activation, faultingAddress)) {
+        JSRuntime::AutoLockForOperationCallback lock(rt);
         module.unprotectCode(rt);
         return true;
     }
 
     if (!module.containsPC(pc))
         return false;
 
     // If we faulted trying to execute code in 'module', this must be an
     // operation callback (see TriggerOperationCallbackForAsmJSCode). Redirect
     // execution to a trampoline which will call js_HandleExecutionInterrupt.
     // The trampoline will jump to activation->resumePC if execution isn't
     // interrupted.
     if (module.containsPC(faultingAddress)) {
         activation->setResumePC(pc);
         *ppc = module.operationCallbackExit();
+
+        JSRuntime::AutoLockForOperationCallback lock(rt);
         module.unprotectCode(rt);
 
         // Update the thread state with the new pc.
         kret = thread_set_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, x86_THREAD_STATE_COUNT);
         return kret == KERN_SUCCESS;
     }
 
 # if defined(JS_CODEGEN_X64)
@@ -885,31 +890,34 @@ HandleSignal(int signum, siginfo_t *info
         return true;
 
     AsmJSActivation *activation = InnermostAsmJSActivation();
     if (!activation)
         return false;
 
     const AsmJSModule &module = activation->module();
     if (HandleSimulatorInterrupt(rt, activation, faultingAddress)) {
+        JSRuntime::AutoLockForOperationCallback lock(rt);
         module.unprotectCode(rt);
         return true;
     }
 
     if (!module.containsPC(pc))
         return false;
 
     // If we faulted trying to execute code in 'module', this must be an
     // operation callback (see TriggerOperationCallbackForAsmJSCode). Redirect
     // execution to a trampoline which will call js_HandleExecutionInterrupt.
     // The trampoline will jump to activation->resumePC if execution isn't
     // interrupted.
     if (module.containsPC(faultingAddress)) {
         activation->setResumePC(pc);
         *ppc = module.operationCallbackExit();
+
+        JSRuntime::AutoLockForOperationCallback lock(rt);
         module.unprotectCode(rt);
         return true;
     }
 
 # if defined(JS_CODEGEN_X64)
     // These checks aren't necessary, but, since we can, check anyway to make
     // sure we aren't covering up a real bug.
     if (!module.maybeHeap() ||
--- a/js/src/jit/mips/Assembler-mips.cpp
+++ b/js/src/jit/mips/Assembler-mips.cpp
@@ -560,19 +560,19 @@ Assembler::as_xori(Register rd, Register
 BufferOffset
 Assembler::as_bal(BOffImm16 off)
 {
     BufferOffset bo = writeInst(InstImm(op_regimm, zero, rt_bgezal, off).encode());
     return bo;
 }
 
 InstImm
-Assembler::getBranchCode(bool isCall)
+Assembler::getBranchCode(JumpOrCall jumpOrCall)
 {
-    if (isCall)
+    if (jumpOrCall == BranchIsCall)
         return InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
 
     return InstImm(op_beq, zero, zero, BOffImm16(0));
 }
 
 InstImm
 Assembler::getBranchCode(Register s, Register t, Condition c)
 {
@@ -603,20 +603,20 @@ Assembler::getBranchCode(Register s, Con
       case Assembler::LessThanOrEqual:
         return InstImm(op_blez, s, zero, BOffImm16(0));
       default:
         MOZ_ASSUME_UNREACHABLE("Condition not supported.");
     }
 }
 
 InstImm
-Assembler::getBranchCode(bool testTrue, FPConditionBit fcc)
+Assembler::getBranchCode(FloatTestKind testKind, FPConditionBit fcc)
 {
     JS_ASSERT(!(fcc && FccMask));
-    uint32_t rtField = ((testTrue ? 1 : 0) | (fcc << FccShift)) << RTShift;
+    uint32_t rtField = ((testKind == TestForTrue ? 1 : 0) | (fcc << FccShift)) << RTShift;
 
     return InstImm(op_cop1, rs_bc1, rtField, BOffImm16(0));
 }
 
 BufferOffset
 Assembler::as_j(JOffImm26 off)
 {
     BufferOffset bo = writeInst(InstJump(op_j, off).encode());
@@ -1167,110 +1167,71 @@ Assembler::as_sqrts(FloatRegister fd, Fl
 BufferOffset
 Assembler::as_sqrtd(FloatRegister fd, FloatRegister fs)
 {
     return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_sqrt_fmt).encode());
 }
 
 // FP compare instructions
 BufferOffset
-Assembler::as_cfs(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
-{
-    return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_f_fmt).encode());
-}
-
-BufferOffset
-Assembler::as_cuns(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+Assembler::as_cf(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
 {
-    return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_un_fmt).encode());
-}
-
-BufferOffset
-Assembler::as_ceqs(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
-{
-    return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_eq_fmt).encode());
+    RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+    return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_f_fmt).encode());
 }
 
 BufferOffset
-Assembler::as_cueqs(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+Assembler::as_cun(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
 {
-    return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ueq_fmt).encode());
+    RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+    return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_un_fmt).encode());
 }
 
 BufferOffset
-Assembler::as_colts(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+Assembler::as_ceq(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
 {
-    return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_olt_fmt).encode());
+    RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+    return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_eq_fmt).encode());
 }
 
 BufferOffset
-Assembler::as_cults(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+Assembler::as_cueq(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
 {
-    return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ult_fmt).encode());
-}
-
-BufferOffset
-Assembler::as_coles(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
-{
-    return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ole_fmt).encode());
+    RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+    return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ueq_fmt).encode());
 }
 
 BufferOffset
-Assembler::as_cules(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+Assembler::as_colt(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
 {
-    return writeInst(InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ule_fmt).encode());
+    RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+    return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_olt_fmt).encode());
 }
 
 BufferOffset
-Assembler::as_cfd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
-{
-    return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_f_fmt).encode());
-}
-
-BufferOffset
-Assembler::as_cund(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+Assembler::as_cult(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
 {
-    return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_un_fmt).encode());
-}
-
-BufferOffset
-Assembler::as_ceqd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
-{
-    return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_eq_fmt).encode());
+    RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+    return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ult_fmt).encode());
 }
 
 BufferOffset
-Assembler::as_cueqd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+Assembler::as_cole(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
 {
-    return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ueq_fmt).encode());
-}
-
-BufferOffset
-Assembler::as_coltd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
-{
-    return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_olt_fmt).encode());
+    RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+    return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ole_fmt).encode());
 }
 
 BufferOffset
-Assembler::as_cultd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
+Assembler::as_cule(FloatFormat fmt, FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
 {
-    return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ult_fmt).encode());
+    RSField rs = fmt == DoubleFloat ? rs_d : rs_s;
+    return writeInst(InstReg(op_cop1, rs, ft, fs, fcc << FccShift, ff_c_ule_fmt).encode());
 }
 
-BufferOffset
-Assembler::as_coled(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
-{
-    return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ole_fmt).encode());
-}
-
-BufferOffset
-Assembler::as_culed(FloatRegister fs, FloatRegister ft, FPConditionBit fcc)
-{
-    return writeInst(InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ule_fmt).encode());
-}
 
 void
 Assembler::bind(Label *label, BufferOffset boff)
 {
     // If our caller didn't give us an explicit target to bind to
     // then we want to bind to the location of the next instruction
     BufferOffset dest = boff.assigned() ? boff : nextOffset();
     if (label->used()) {
--- a/js/src/jit/mips/Assembler-mips.h
+++ b/js/src/jit/mips/Assembler-mips.h
@@ -612,16 +612,31 @@ class Assembler
         FCC2,
         FCC3,
         FCC4,
         FCC5,
         FCC6,
         FCC7
     };
 
+    enum FloatFormat {
+        SingleFloat,
+        DoubleFloat
+    };
+
+    enum JumpOrCall {
+        BranchIsJump,
+        BranchIsCall
+    };
+
+    enum FloatTestKind {
+        TestForTrue,
+        TestForFalse
+    };
+
     // :( this should be protected, but since CodeGenerator
     // wants to use it, It needs to go out here :(
 
     BufferOffset nextOffset() {
         return m_buffer.nextOffset();
     }
 
   protected:
@@ -745,20 +760,20 @@ class Assembler
 
   public:
     BufferOffset align(int alignment);
     BufferOffset as_nop();
 
     // Branch and jump instructions
     BufferOffset as_bal(BOffImm16 off);
 
-    InstImm getBranchCode(bool isCall);
+    InstImm getBranchCode(JumpOrCall jumpOrCall);
     InstImm getBranchCode(Register s, Register t, Condition c);
     InstImm getBranchCode(Register s, Condition c);
-    InstImm getBranchCode(bool testTrue, FPConditionBit fcc);
+    InstImm getBranchCode(FloatTestKind testKind, FPConditionBit fcc);
 
     BufferOffset as_j(JOffImm26 off);
     BufferOffset as_jal(JOffImm26 off);
 
     BufferOffset as_jr(Register rs);
     BufferOffset as_jalr(Register rs);
 
     // Arithmetic instructions
@@ -890,34 +905,32 @@ class Assembler
     BufferOffset as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft);
     BufferOffset as_muld(FloatRegister fd, FloatRegister fs, FloatRegister ft);
     BufferOffset as_divs(FloatRegister fd, FloatRegister fs, FloatRegister ft);
     BufferOffset as_divd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
     BufferOffset as_sqrts(FloatRegister fd, FloatRegister fs);
     BufferOffset as_sqrtd(FloatRegister fd, FloatRegister fs);
 
     // FP compare instructions
-    BufferOffset as_cfs(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_cuns(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_ceqs(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_cueqs(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_colts(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_cults(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_coles(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_cules(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-
-    BufferOffset as_cfd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_cund(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_ceqd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_cueqd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_coltd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_cultd(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_coled(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-    BufferOffset as_culed(FloatRegister fs, FloatRegister ft, FPConditionBit fcc = FCC0);
-
+    BufferOffset as_cf(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+                       FPConditionBit fcc = FCC0);
+    BufferOffset as_cun(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+                        FPConditionBit fcc = FCC0);
+    BufferOffset as_ceq(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+                        FPConditionBit fcc = FCC0);
+    BufferOffset as_cueq(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+                         FPConditionBit fcc = FCC0);
+    BufferOffset as_colt(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+                         FPConditionBit fcc = FCC0);
+    BufferOffset as_cult(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+                         FPConditionBit fcc = FCC0);
+    BufferOffset as_cole(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+                         FPConditionBit fcc = FCC0);
+    BufferOffset as_cule(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+                         FPConditionBit fcc = FCC0);
 
     // label operations
     void bind(Label *label, BufferOffset boff = BufferOffset());
     void bind(RepatchLabel *label);
     uint32_t currentOffset() {
         return nextOffset().getOffset();
     }
     void retarget(Label *label, Label *target);
new file mode 100644
--- /dev/null
+++ b/js/src/jit/mips/MacroAssembler-mips.cpp
@@ -0,0 +1,3174 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips/MacroAssembler-mips.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineRegisters.h"
+#include "jit/MoveEmitter.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Abs;
+
+static const int32_t PAYLOAD_OFFSET = NUNBOX32_PAYLOAD_OFFSET;
+static const int32_t TAG_OFFSET = NUNBOX32_TYPE_OFFSET;
+
+static_assert(sizeof(intptr_t) == 4, "Not 64-bit clean.");
+
+void
+MacroAssemblerMIPS::convertBoolToInt32(Register src, Register dest)
+{
+    // Note that C++ bool is only 1 byte, so zero extend it to clear the
+    // higher-order bits.
+    ma_and(dest, src, Imm32(0xff));
+}
+
+void
+MacroAssemblerMIPS::convertInt32ToDouble(const Register &src, const FloatRegister &dest)
+{
+    as_mtc1(src, dest);
+    as_cvtdw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS::convertInt32ToDouble(const Address &src, FloatRegister dest)
+{
+    ma_lw(ScratchRegister, src);
+    as_mtc1(ScratchRegister, dest);
+    as_cvtdw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS::convertUInt32ToDouble(const Register &src, const FloatRegister &dest)
+{
+    // We use SecondScratchFloatReg because MacroAssembler::loadFromTypedArray
+    // calls with ScratchFloatReg as dest.
+    MOZ_ASSERT(dest != SecondScratchFloatReg);
+
+    // Subtract INT32_MIN to get a positive number
+    ma_subu(ScratchRegister, src, Imm32(INT32_MIN));
+
+    // Convert value
+    as_mtc1(ScratchRegister, dest);
+    as_cvtdw(dest, dest);
+
+    // Add unsigned value of INT32_MIN
+    ma_lid(SecondScratchFloatReg, 2147483648.0);
+    as_addd(dest, dest, SecondScratchFloatReg);
+}
+
+void
+MacroAssemblerMIPS::convertUInt32ToFloat32(const Register &src, const FloatRegister &dest)
+{
+    MOZ_ASSUME_UNREACHABLE("NYI");
+}
+
+void
+MacroAssemblerMIPS::convertDoubleToFloat32(const FloatRegister &src, const FloatRegister &dest)
+{
+    as_cvtsd(dest, src);
+}
+
+// Convert the floating point value to an integer, if it did not fit, then it
+// was clamped to INT32_MIN/INT32_MAX, and we can test it.
+// NOTE: if the value really was supposed to be INT32_MAX / INT32_MIN then it
+// will be wrong.
+void
+MacroAssemblerMIPS::branchTruncateDouble(const FloatRegister &src, const Register &dest,
+                                         Label *fail)
+{
+    Label test, success;
+    as_truncwd(ScratchFloatReg, src);
+    as_mfc1(dest, ScratchFloatReg);
+
+    ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+}
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerMIPS::convertDoubleToInt32(const FloatRegister &src, const Register &dest,
+                                         Label *fail, bool negativeZeroCheck)
+{
+    // Convert double to int, then convert back and check if we have the
+    // same number.
+    as_cvtwd(ScratchFloatReg, src);
+    as_mfc1(dest, ScratchFloatReg);
+    as_cvtdw(ScratchFloatReg, ScratchFloatReg);
+    ma_bc1d(src, ScratchFloatReg, fail, Assembler::DoubleNotEqualOrUnordered);
+
+    if (negativeZeroCheck) {
+        Label notZero;
+        ma_b(dest, Imm32(0), &notZero, Assembler::NotEqual, ShortJump);
+        // Test and bail for -0.0, when integer result is 0
+        // Move the top word of the double into the output reg, if it is
+        // non-zero, then the original value was -0.0
+        as_mfc1_Odd(dest, src);
+        ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+        bind(&notZero);
+    }
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void
+MacroAssemblerMIPS::convertFloat32ToInt32(const FloatRegister &src, const Register &dest,
+                                          Label *fail, bool negativeZeroCheck)
+{
+    // convert the floating point value to an integer, if it did not fit, then
+    // when we convert it *back* to  a float, it will have a different value,
+    // which we can test.
+    as_cvtws(ScratchFloatReg, src);
+    as_mfc1(dest, ScratchFloatReg);
+    as_cvtsw(ScratchFloatReg, ScratchFloatReg);
+    ma_bc1s(src, ScratchFloatReg, fail, Assembler::DoubleNotEqualOrUnordered);
+
+    if (negativeZeroCheck) {
+        Label notZero;
+        ma_b(dest, Imm32(0), &notZero, Assembler::NotEqual, ShortJump);
+        // Test and bail for -0.0, when integer result is 0
+        // Move the top word of the double into the output reg,
+        // if it is non-zero, then the original value was -0.0
+        as_mfc1_Odd(dest, src);
+        ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+        bind(&notZero);
+    }
+}
+
+void
+MacroAssemblerMIPS::convertFloat32ToDouble(const FloatRegister &src, const FloatRegister &dest)
+{
+    as_cvtds(dest, src);
+}
+
+void
+MacroAssemblerMIPS::branchTruncateFloat32(const FloatRegister &src, const Register &dest,
+                                          Label *fail)
+{
+    Label test, success;
+    as_truncws(ScratchFloatReg, src);
+    as_mfc1(dest, ScratchFloatReg);
+
+    ma_b(dest, Imm32(INT32_MAX), fail, Assembler::Equal);
+}
+
+void
+MacroAssemblerMIPS::convertInt32ToFloat32(const Register &src, const FloatRegister &dest)
+{
+    as_mtc1(src, dest);
+    as_cvtsw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS::convertInt32ToFloat32(const Address &src, FloatRegister dest)
+{
+    ma_lw(ScratchRegister, src);
+    as_mtc1(ScratchRegister, dest);
+    as_cvtsw(dest, dest);
+}
+
+void
+MacroAssemblerMIPS::addDouble(FloatRegister src, FloatRegister dest)
+{
+    as_addd(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS::subDouble(FloatRegister src, FloatRegister dest)
+{
+    as_subd(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS::mulDouble(FloatRegister src, FloatRegister dest)
+{
+    as_muld(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS::divDouble(FloatRegister src, FloatRegister dest)
+{
+    as_divd(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPS::negateDouble(FloatRegister reg)
+{
+    as_negd(reg, reg);
+}
+
+void
+MacroAssemblerMIPS::inc64(AbsoluteAddress dest)
+{
+    ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
+    as_lw(secondScratchReg_, ScratchRegister, 0);
+
+    as_addiu(secondScratchReg_, secondScratchReg_, 1);
+    as_sw(secondScratchReg_, ScratchRegister, 0);
+
+    as_sltiu(secondScratchReg_, secondScratchReg_, 1);
+    as_lw(ScratchRegister, ScratchRegister, 4);
+
+    as_addu(secondScratchReg_, ScratchRegister, secondScratchReg_);
+
+    ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
+    as_sw(secondScratchReg_, ScratchRegister, 4);
+}
+
+void
+MacroAssemblerMIPS::ma_move(Register rd, Register rs)
+{
+    as_or(rd, rs, zero);
+}
+
+void
+MacroAssemblerMIPS::ma_li(Register dest, const ImmGCPtr &ptr)
+{
+    writeDataRelocation(ptr);
+    ma_liPatchable(dest, Imm32(ptr.value));
+}
+
+void
+MacroAssemblerMIPS::ma_li(const Register &dest, AbsoluteLabel *label)
+{
+    MOZ_ASSERT(!label->bound());
+    // Thread the patch list through the unpatched address word in the
+    // instruction stream.
+    BufferOffset bo = m_buffer.nextOffset();
+    ma_liPatchable(dest, Imm32(label->prev()));
+    label->setPrev(bo.getOffset());
+}
+
+void
+MacroAssemblerMIPS::ma_li(Register dest, Imm32 imm)
+{
+    if (Imm16::isInSignedRange(imm.value)) {
+        as_addiu(dest, zero, imm.value);
+    } else if (Imm16::isInUnsignedRange(imm.value)) {
+        as_ori(dest, zero, Imm16::lower(imm).encode());
+    } else if (Imm16::lower(imm).encode() == 0) {
+        as_lui(dest, Imm16::upper(imm).encode());
+    } else {
+        as_lui(dest, Imm16::upper(imm).encode());
+        as_ori(dest, dest, Imm16::lower(imm).encode());
+    }
+}
+
+
+// This method generates lui and ori instruction pair that can be modified by
+// updateLuiOriValue, either during compilation (eg. Assembler::bind), or
+// during execution (eg. jit::PatchJump).
+void
+MacroAssemblerMIPS::ma_liPatchable(Register dest, Imm32 imm)
+{
+    m_buffer.ensureSpace(2 * sizeof(uint32_t));
+    as_lui(dest, Imm16::upper(imm).encode());
+    as_ori(dest, dest, Imm16::lower(imm).encode());
+}
+
+void
+MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmPtr imm)
+{
+    return ma_liPatchable(dest, Imm32(int32_t(imm.value)));
+}
+
+// Shifts
+void
+MacroAssemblerMIPS::ma_sll(Register rd, Register rt, Imm32 shift)
+{
+    as_sll(rd, rt, shift.value % 32);
+}
+void
+MacroAssemblerMIPS::ma_srl(Register rd, Register rt, Imm32 shift)
+{
+    as_srl(rd, rt, shift.value % 32);
+}
+
+void
+MacroAssemblerMIPS::ma_sra(Register rd, Register rt, Imm32 shift)
+{
+    as_sra(rd, rt, shift.value % 32);
+}
+
+void
+MacroAssemblerMIPS::ma_ror(Register rd, Register rt, Imm32 shift)
+{
+    as_rotr(rd, rt, shift.value % 32);
+}
+
+void
+MacroAssemblerMIPS::ma_rol(Register rd, Register rt, Imm32 shift)
+{
+    as_rotr(rd, rt, 32 - (shift.value % 32));
+}
+
+void
+MacroAssemblerMIPS::ma_sll(Register rd, Register rt, Register shift)
+{
+    as_sllv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS::ma_srl(Register rd, Register rt, Register shift)
+{
+    as_srlv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS::ma_sra(Register rd, Register rt, Register shift)
+{
+    as_srav(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS::ma_ror(Register rd, Register rt, Register shift)
+{
+    as_rotrv(rd, rt, shift);
+}
+
+void
+MacroAssemblerMIPS::ma_rol(Register rd, Register rt, Register shift)
+{
+    ma_negu(ScratchRegister, shift);
+    as_rotrv(rd, rt, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPS::ma_negu(Register rd, Register rs)
+{
+    as_subu(rd, zero, rs);
+}
+
+void
+MacroAssemblerMIPS::ma_not(Register rd, Register rs)
+{
+    as_nor(rd, rs, zero);
+}
+
+// And.
+void
+MacroAssemblerMIPS::ma_and(Register rd, Register rs)
+{
+    as_and(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPS::ma_and(Register rd, Register rs, Register rt)
+{
+    as_and(rd, rs, rt);
+}
+
+void
+MacroAssemblerMIPS::ma_and(Register rd, Imm32 imm)
+{
+    ma_and(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPS::ma_and(Register rd, Register rs, Imm32 imm)
+{
+    if (Imm16::isInUnsignedRange(imm.value)) {
+        as_andi(rd, rs, imm.value);
+    } else {
+        ma_li(ScratchRegister, imm);
+        as_and(rd, rs, ScratchRegister);
+    }
+}
+
+// Or.
+void
+MacroAssemblerMIPS::ma_or(Register rd, Register rs)
+{
+    as_or(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPS::ma_or(Register rd, Register rs, Register rt)
+{
+    as_or(rd, rs, rt);
+}
+
+void
+MacroAssemblerMIPS::ma_or(Register rd, Imm32 imm)
+{
+    ma_or(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPS::ma_or(Register rd, Register rs, Imm32 imm)
+{
+    if (Imm16::isInSignedRange(imm.value)) {
+        as_ori(rd, rs, imm.value);
+    } else {
+        ma_li(ScratchRegister, imm);
+        as_or(rd, rs, ScratchRegister);
+    }
+}
+
+// xor
+void
+MacroAssemblerMIPS::ma_xor(Register rd, Register rs)
+{
+    as_xor(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPS::ma_xor(Register rd, Register rs, Register rt)
+{
+    as_xor(rd, rs, rt);
+}
+
+void
+MacroAssemblerMIPS::ma_xor(Register rd, Imm32 imm)
+{
+    ma_xor(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPS::ma_xor(Register rd, Register rs, Imm32 imm)
+{
+    if (Imm16::isInSignedRange(imm.value)) {
+        as_xori(rd, rs, imm.value);
+    } else {
+        ma_li(ScratchRegister, imm);
+        as_xor(rd, rs, ScratchRegister);
+    }
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void
+MacroAssemblerMIPS::ma_addu(Register rd, Register rs, Imm32 imm)
+{
+    if (Imm16::isInSignedRange(imm.value)) {
+        as_addiu(rd, rs, imm.value);
+    } else {
+        ma_li(ScratchRegister, imm);
+        as_addu(rd, rs, ScratchRegister);
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_addu(Register rd, Register rs)
+{
+    as_addu(rd, rd, rs);
+}
+
+void
+MacroAssemblerMIPS::ma_addu(Register rd, Imm32 imm)
+{
+    ma_addu(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Register rt, Label *overflow)
+{
+    Label goodAddition;
+    as_addu(secondScratchReg_, rs, rt);
+
+    as_xor(ScratchRegister, rs, rt); // If different sign, no overflow
+    ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
+
+    // If different sign, then overflow
+    as_xor(ScratchRegister, rs, secondScratchReg_);
+    ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+
+    bind(&goodAddition);
+    ma_move(rd, secondScratchReg_);
+}
+
+void
+MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label *overflow)
+{
+    // Check for signed range because of as_addiu
+    // Check for unsigned range because of as_xori
+    if (Imm16::isInSignedRange(imm.value) && Imm16::isInUnsignedRange(imm.value)) {
+        Label goodAddition;
+        as_addiu(secondScratchReg_, rs, imm.value);
+
+        // If different sign, no overflow
+        as_xori(ScratchRegister, rs, imm.value);
+        ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
+
+        // If different sign, then overflow
+        as_xor(ScratchRegister, rs, secondScratchReg_);
+        ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+
+        bind(&goodAddition);
+        ma_move(rd, secondScratchReg_);
+    } else {
+        ma_li(ScratchRegister, imm);
+        ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
+    }
+}
+
+// Subtract.
+void
+MacroAssemblerMIPS::ma_subu(Register rd, Register rs, Register rt)
+{
+    as_subu(rd, rs, rt);
+}
+
+void
+MacroAssemblerMIPS::ma_subu(Register rd, Register rs, Imm32 imm)
+{
+    if (Imm16::isInSignedRange(-imm.value)) {
+        as_addiu(rd, rs, -imm.value);
+    } else {
+        ma_li(ScratchRegister, imm);
+        as_subu(rd, rs, ScratchRegister);
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_subu(Register rd, Imm32 imm)
+{
+    ma_subu(rd, rd, imm);
+}
+
+void
+MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs, Register rt, Label *overflow)
+{
+    Label goodSubtraction;
+    // Use second scratch. The instructions generated by ma_b don't use the
+    // second scratch register.
+    ma_subu(secondScratchReg_, rs, rt);
+
+    as_xor(ScratchRegister, rs, rt); // If same sign, no overflow
+    ma_b(ScratchRegister, Imm32(0), &goodSubtraction, Assembler::GreaterThanOrEqual, ShortJump);
+
+    // If different sign, then overflow
+    as_xor(ScratchRegister, rs, secondScratchReg_);
+    ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+
+    bind(&goodSubtraction);
+    ma_move(rd, secondScratchReg_);
+}
+
+void
+MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs, Imm32 imm, Label *overflow)
+{
+    if (imm.value != INT32_MIN) {
+        ma_addTestOverflow(rd, rs, Imm32(-imm.value), overflow);
+    } else {
+        ma_li(ScratchRegister, Imm32(imm.value));
+        ma_subTestOverflow(rd, rs, ScratchRegister, overflow);
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_mult(Register rs, Imm32 imm)
+{
+    ma_li(ScratchRegister, imm);
+    as_mult(rs, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPS::ma_mul_branch_overflow(Register rd, Register rs, Register rt, Label *overflow)
+{
+    as_mult(rs, rt);
+    as_mflo(rd);
+    as_sra(ScratchRegister, rd, 31);
+    as_mfhi(secondScratchReg_);
+    ma_b(ScratchRegister, secondScratchReg_, overflow, Assembler::NotEqual);
+}
+
+void
+MacroAssemblerMIPS::ma_mul_branch_overflow(Register rd, Register rs, Imm32 imm, Label *overflow)
+{
+    ma_li(ScratchRegister, imm);
+    ma_mul_branch_overflow(rd, rs, ScratchRegister, overflow);
+}
+
+void
+MacroAssemblerMIPS::ma_div_branch_overflow(Register rd, Register rs, Register rt, Label *overflow)
+{
+    as_div(rs, rt);
+    as_mflo(rd);
+    as_mfhi(ScratchRegister);
+    ma_b(ScratchRegister, ScratchRegister, overflow, Assembler::NonZero);
+}
+
+void
+MacroAssemblerMIPS::ma_div_branch_overflow(Register rd, Register rs, Imm32 imm, Label *overflow)
+{
+    ma_li(ScratchRegister, imm);
+    ma_div_branch_overflow(rd, rs, ScratchRegister, overflow);
+}
+
+void
+MacroAssemblerMIPS::ma_mod_mask(Register src, Register dest, Register hold, int32_t shift,
+                                Label *negZero)
+{
+    // MATH:
+    // We wish to compute x % (1<<y) - 1 for a known constant, y.
+    // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
+    // dividend as a number in base b, namely
+    // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
+    // now, since both addition and multiplication commute with modulus,
+    // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+    // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+    // now, since b == C + 1, b % C == 1, and b^n % C == 1
+    // this means that the whole thing simplifies to:
+    // c_0 + c_1 + c_2 ... c_n % C
+    // each c_n can easily be computed by a shift/bitextract, and the modulus
+    // can be maintained by simply subtracting by C whenever the number gets
+    // over C.
+    int32_t mask = (1 << shift) - 1;
+    Label head, negative, sumSigned, done;
+
+    // hold holds -1 if the value was negative, 1 otherwise.
+    // ScratchRegister holds the remaining bits that have not been processed
+    // lr serves as a temporary location to store extracted bits into as well
+    // as holding the trial subtraction as a temp value dest is the
+    // accumulator (and holds the final result)
+
+    // move the whole value into the scratch register, setting the codition
+    // codes so we can muck with them later.
+    ma_move(ScratchRegister, src);
+    // Zero out the dest.
+    ma_subu(dest, dest, dest);
+    // Set the hold appropriately.
+    ma_b(ScratchRegister, ScratchRegister, &negative, Signed, ShortJump);
+    ma_li(hold, Imm32(1));
+    ma_b(&head, ShortJump);
+
+    bind(&negative);
+    ma_li(hold, Imm32(-1));
+    ma_negu(ScratchRegister, ScratchRegister);
+
+    // Begin the main loop.
+    bind(&head);
+
+    // Extract the bottom bits into lr.
+    ma_and(secondScratchReg_, ScratchRegister, Imm32(mask));
+    // Add those bits to the accumulator.
+    as_addu(dest, dest, secondScratchReg_);
+    // Do a trial subtraction, this is the same operation as cmp, but we
+    // store the dest
+    ma_subu(secondScratchReg_, dest, Imm32(mask));
+    // If (sum - C) > 0, store sum - C back into sum, thus performing a
+    // modulus.
+    ma_b(secondScratchReg_, secondScratchReg_, &sumSigned, Signed, ShortJump);
+    ma_move(dest, secondScratchReg_);
+    bind(&sumSigned);
+    // Get rid of the bits that we extracted before.
+    as_srl(ScratchRegister, ScratchRegister, shift);
+    // If the shift produced zero, finish, otherwise, continue in the loop.
+    ma_b(ScratchRegister, ScratchRegister, &head, NonZero, ShortJump);
+    // Check the hold to see if we need to negate the result.
+    ma_b(hold, hold, &done, NotSigned, ShortJump);
+
+    // If the hold was non-zero, negate the result to be in line with
+    // what JS wants
+    if (negZero != nullptr) {
+        // Jump out in case of negative zero.
+        ma_b(hold, hold, negZero, Zero);
+        ma_negu(dest, dest);
+    } else {
+        ma_negu(dest, dest);
+    }
+
+    bind(&done);
+}
+
+// Memory.
+
+void
+MacroAssemblerMIPS::ma_load(const Register &dest, Address address,
+                            LoadStoreSize size, LoadStoreExtension extension)
+{
+    int16_t encodedOffset;
+    Register base;
+    if (!Imm16::isInSignedRange(address.offset)) {
+        ma_li(ScratchRegister, Imm32(address.offset));
+        as_addu(ScratchRegister, address.base, ScratchRegister);
+        base = ScratchRegister;
+        encodedOffset = Imm16(0).encode();
+    } else {
+        encodedOffset = Imm16(address.offset).encode();
+        base = address.base;
+    }
+
+    switch (size) {
+      case SizeByte:
+        if (ZeroExtend == extension)
+            as_lbu(dest, base, encodedOffset);
+        else
+            as_lb(dest, base, encodedOffset);
+        break;
+      case SizeHalfWord:
+        if (ZeroExtend == extension)
+            as_lhu(dest, base, encodedOffset);
+        else
+            as_lh(dest, base, encodedOffset);
+        break;
+      case SizeWord:
+        as_lw(dest, base, encodedOffset);
+        break;
+      default:
+        MOZ_ASSUME_UNREACHABLE("Invalid argument for ma_load");
+        break;
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_load(const Register &dest, const BaseIndex &src,
+                            LoadStoreSize size, LoadStoreExtension extension)
+{
+    computeScaledAddress(src, secondScratchReg_);
+    ma_load(dest, Address(secondScratchReg_, src.offset), size, extension);
+}
+
+void
+MacroAssemblerMIPS::ma_store(const Register &data, Address address, LoadStoreSize size,
+                             LoadStoreExtension extension)
+{
+    int16_t encodedOffset;
+    Register base;
+    if (!Imm16::isInSignedRange(address.offset)) {
+        ma_li(ScratchRegister, Imm32(address.offset));
+        as_addu(ScratchRegister, address.base, ScratchRegister);
+        base = ScratchRegister;
+        encodedOffset = Imm16(0).encode();
+    } else {
+        encodedOffset = Imm16(address.offset).encode();
+        base = address.base;
+    }
+
+    switch (size) {
+      case SizeByte:
+        as_sb(data, base, encodedOffset);
+        break;
+      case SizeHalfWord:
+        as_sh(data, base, encodedOffset);
+        break;
+      case SizeWord:
+        as_sw(data, base, encodedOffset);
+        break;
+      default:
+        MOZ_ASSUME_UNREACHABLE("Invalid argument for ma_store");
+        break;
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_store(const Register &data, const BaseIndex &dest,
+                             LoadStoreSize size, LoadStoreExtension extension)
+{
+    computeScaledAddress(dest, secondScratchReg_);
+    ma_store(data, Address(secondScratchReg_, dest.offset), size, extension);
+}
+
+void
+MacroAssemblerMIPS::ma_store(const Imm32 &imm, const BaseIndex &dest,
+                             LoadStoreSize size, LoadStoreExtension extension)
+{
+    // Make sure that secondScratchReg_ contains absolute address so that
+    // offset is 0.
+    computeEffectiveAddress(dest, secondScratchReg_);
+
+    // Scrach register is free now, use it for loading imm value
+    ma_li(ScratchRegister, imm);
+
+    // with offset=0 ScratchRegister will not be used in ma_store()
+    // so we can use it as a parameter here
+    ma_store(ScratchRegister, Address(secondScratchReg_, 0), size, extension);
+}
+
+void
+MacroAssemblerMIPS::computeScaledAddress(const BaseIndex &address, Register dest)
+{
+    int32_t shift = Imm32::ShiftOf(address.scale).value;
+    if (shift) {
+        ma_sll(dest, address.index, Imm32(shift));
+        as_addu(dest, address.base, dest);
+    } else {
+        as_addu(dest, address.base, address.index);
+    }
+}
+
+// Shortcut for when we know we're transferring 32 bits of data.
+void
+MacroAssemblerMIPS::ma_lw(Register data, Address address)
+{
+    ma_load(data, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS::ma_sw(Register data, Address address)
+{
+    ma_store(data, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPS::ma_sw(Imm32 imm, Address address)
+{
+    MOZ_ASSERT(address.base != ScratchRegister);
+    ma_li(ScratchRegister, imm);
+
+    if (Imm16::isInSignedRange(address.offset)) {
+        as_sw(ScratchRegister, address.base, Imm16(address.offset).encode());
+    } else {
+        MOZ_ASSERT(address.base != secondScratchReg_);
+
+        ma_li(secondScratchReg_, Imm32(address.offset));
+        as_addu(secondScratchReg_, address.base, secondScratchReg_);
+        as_sw(ScratchRegister, secondScratchReg_, 0);
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_pop(Register r)
+{
+    as_lw(r, StackPointer, 0);
+    as_addiu(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void
+MacroAssemblerMIPS::ma_push(Register r)
+{
+    if (r == sp) {
+        // Pushing sp requires one more instruction.
+        ma_move(ScratchRegister, sp);
+        r = ScratchRegister;
+    }
+
+    as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
+    as_sw(r, StackPointer, 0);
+}
+
+// Branches when done from within mips-specific code.
+void
+MacroAssemblerMIPS::ma_b(Register lhs, Register rhs, Label *label, Condition c, JumpKind jumpKind)
+{
+    switch (c) {
+      case Equal :
+      case NotEqual:
+        branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
+        break;
+      case Always:
+        ma_b(label, jumpKind);
+        break;
+      case Zero:
+      case NonZero:
+      case Signed:
+      case NotSigned:
+        MOZ_ASSERT(lhs == rhs);
+        branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+        break;
+      default:
+        Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
+        branchWithCode(getBranchCode(ScratchRegister, cond), label, jumpKind);
+        break;
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_b(Register lhs, Imm32 imm, Label *label, Condition c, JumpKind jumpKind)
+{
+    MOZ_ASSERT(c != Overflow);
+    if (imm.value == 0) {
+        if (c == Always || c == AboveOrEqual)
+            ma_b(label, jumpKind);
+        else if (c == Below)
+            ; // This condition is always false. No branch required.
+        else
+            branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+    } else {
+        MOZ_ASSERT(lhs != ScratchRegister);
+        ma_li(ScratchRegister, imm);
+        ma_b(lhs, ScratchRegister, label, c, jumpKind);
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_b(Register lhs, Address addr, Label *label, Condition c, JumpKind jumpKind)
+{
+    MOZ_ASSERT(lhs != ScratchRegister);
+    ma_lw(ScratchRegister, addr);
+    ma_b(lhs, ScratchRegister, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_b(Address addr, Imm32 imm, Label *label, Condition c, JumpKind jumpKind)
+{
+    ma_lw(secondScratchReg_, addr);
+    ma_b(secondScratchReg_, imm, label, c, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_b(Label *label, JumpKind jumpKind)
+{
+    branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_bal(Label *label, JumpKind jumpKind)
+{
+    branchWithCode(getBranchCode(BranchIsCall), label, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::branchWithCode(InstImm code, Label *label, JumpKind jumpKind)
+{
+    InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+    InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+    if (label->bound()) {
+        int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
+
+        if (BOffImm16::isInRange(offset))
+            jumpKind = ShortJump;
+
+        if (jumpKind == ShortJump) {
+            MOZ_ASSERT(BOffImm16::isInRange(offset));
+            code.setBOffImm16(BOffImm16(offset));
+            writeInst(code.encode());
+            as_nop();
+            return;
+        }
+
+        // Generate long jump because target is out of range of short jump.
+        if (code.encode() == inst_bgezal.encode()) {
+            // Handle long call
+            addLongJump(nextOffset());
+            ma_liPatchable(ScratchRegister, Imm32(label->offset()));
+            as_jalr(ScratchRegister);
+            as_nop();
+            return;
+        }
+        if (code.encode() == inst_beq.encode()) {
+            // Handle long jump
+            addLongJump(nextOffset());
+            ma_liPatchable(ScratchRegister, Imm32(label->offset()));
+            as_jr(ScratchRegister);
+            as_nop();
+            return;
+        }
+
+        // Handle long conditional branch
+        writeInst(invertBranch(code, BOffImm16(5 * sizeof(uint32_t))).encode());
+        // No need for a "nop" here because we can clobber scratch.
+        addLongJump(nextOffset());
+        ma_liPatchable(ScratchRegister, Imm32(label->offset()));
+        as_jr(ScratchRegister);
+        as_nop();
+        return;
+    }
+
+    // Generate open jump and link it to a label.
+
+    // Second word holds a pointer to the next branch in label's chain.
+    uint32_t nextInChain = label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+    if (jumpKind == ShortJump) {
+        // Make the whole branch continous in the buffer.
+        m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+        // Indicate that this is short jump with offset 4.
+        code.setBOffImm16(BOffImm16(4));
+        BufferOffset bo = writeInst(code.encode());
+        writeInst(nextInChain);
+        label->use(bo.getOffset());
+        return;
+    }
+
+    bool conditional = (code.encode() != inst_bgezal.encode() &&
+                        code.encode() != inst_beq.encode());
+
+    // Make the whole branch continous in the buffer.
+    m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
+
+    BufferOffset bo = writeInst(code.encode());
+    writeInst(nextInChain);
+    label->use(bo.getOffset());
+    // Leave space for potential long jump.
+    as_nop();
+    as_nop();
+    if (conditional)
+        as_nop();
+}
+
+Assembler::Condition
+MacroAssemblerMIPS::ma_cmp(Register scratch, Register lhs, Register rhs, Condition c)
+{
+    switch (c) {
+      case Above:
+        // bgtu s,t,label =>
+        //   sltu at,t,s
+        //   bne at,$zero,offs
+        as_sltu(scratch, rhs, lhs);
+        return NotEqual;
+      case AboveOrEqual:
+        // bgeu s,t,label =>
+        //   sltu at,s,t
+        //   beq at,$zero,offs
+        as_sltu(scratch, lhs, rhs);
+        return Equal;
+      case Below:
+        // bltu s,t,label =>
+        //   sltu at,s,t
+        //   bne at,$zero,offs
+        as_sltu(scratch, lhs, rhs);
+        return NotEqual;
+      case BelowOrEqual:
+        // bleu s,t,label =>
+        //   sltu at,t,s
+        //   beq at,$zero,offs
+        as_sltu(scratch, rhs, lhs);
+        return Equal;
+      case GreaterThan:
+        // bgt s,t,label =>
+        //   slt at,t,s
+        //   bne at,$zero,offs
+        as_slt(scratch, rhs, lhs);
+        return NotEqual;
+      case GreaterThanOrEqual:
+        // bge s,t,label =>
+        //   slt at,s,t
+        //   beq at,$zero,offs
+        as_slt(scratch, lhs, rhs);
+        return Equal;
+      case LessThan:
+        // blt s,t,label =>
+        //   slt at,s,t
+        //   bne at,$zero,offs
+        as_slt(scratch, lhs, rhs);
+        return NotEqual;
+      case LessThanOrEqual:
+        // ble s,t,label =>
+        //   slt at,t,s
+        //   beq at,$zero,offs
+        as_slt(scratch, rhs, lhs);
+        return Equal;
+      case Equal :
+      case NotEqual:
+      case Zero:
+      case NonZero:
+      case Always:
+      case Signed:
+      case NotSigned:
+        MOZ_ASSUME_UNREACHABLE("There is a better way to compare for equality.");
+        break;
+      case Overflow:
+        MOZ_ASSUME_UNREACHABLE("Overflow condition not supported for MIPS.");
+        break;
+      default:
+        MOZ_ASSUME_UNREACHABLE("Invalid condition for branch.");
+    }
+    return Always;
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Register rt, Condition c)
+{
+    switch (c) {
+      case Equal :
+        // seq d,s,t =>
+        //   xor d,s,t
+        //   sltiu d,d,1
+        as_xor(rd, rs, rt);
+        as_sltiu(rd, rd, 1);
+        break;
+      case NotEqual:
+        // sne d,s,t =>
+        //   xor d,s,t
+        //   sltu d,$zero,d
+        as_xor(rd, rs, rt);
+        as_sltu(rd, zero, rd);
+        break;
+      case Above:
+        // sgtu d,s,t =>
+        //   sltu d,t,s
+        as_sltu(rd, rt, rs);
+        break;
+      case AboveOrEqual:
+        // sgeu d,s,t =>
+        //   sltu d,s,t
+        //   xori d,d,1
+        as_sltu(rd, rs, rt);
+        as_xori(rd, rd, 1);
+        break;
+      case Below:
+        // sltu d,s,t
+        as_sltu(rd, rs, rt);
+        break;
+      case BelowOrEqual:
+        // sleu d,s,t =>
+        //   sltu d,t,s
+        //   xori d,d,1
+        as_sltu(rd, rt, rs);
+        as_xori(rd, rd, 1);
+        break;
+      case GreaterThan:
+        // sgt d,s,t =>
+        //   slt d,t,s
+        as_slt(rd, rt, rs);
+        break;
+      case GreaterThanOrEqual:
+        // sge d,s,t =>
+        //   slt d,s,t
+        //   xori d,d,1
+        as_slt(rd, rs, rt);
+        as_xori(rd, rd, 1);
+        break;
+      case LessThan:
+        // slt d,s,t
+        as_slt(rd, rs, rt);
+        break;
+      case LessThanOrEqual:
+        // sle d,s,t =>
+        //   slt d,t,s
+        //   xori d,d,1
+        as_slt(rd, rt, rs);
+        as_xori(rd, rd, 1);
+        break;
+      case Zero:
+        MOZ_ASSERT(rs == rt);
+        // seq d,s,$zero =>
+        //   xor d,s,$zero
+        //   sltiu d,d,1
+        as_xor(rd, rs, zero);
+        as_sltiu(rd, rd, 1);
+        break;
+      case NonZero:
+        // sne d,s,$zero =>
+        //   xor d,s,$zero
+        //   sltu d,$zero,d
+        as_xor(rd, rs, zero);
+        as_sltu(rd, zero, rd);
+        break;
+      case Signed:
+        as_slt(rd, rs, zero);
+        break;
+      case NotSigned:
+        // sge d,s,$zero =>
+        //   slt d,s,$zero
+        //   xori d,d,1
+        as_slt(rd, rs, zero);
+        as_xori(rd, rd, 1);
+        break;
+      default:
+        MOZ_ASSUME_UNREACHABLE("Invalid condition for ma_cmp_set.");
+        break;
+    }
+}
+
+void
+MacroAssemblerMIPS::compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+                                         DoubleCondition c, FloatTestKind *testKind,
+                                         FPConditionBit fcc)
+{
+    switch (c) {
+      case DoubleOrdered:
+        as_cun(fmt, lhs, rhs, fcc);
+        *testKind = TestForFalse;
+        break;
+      case DoubleEqual:
+        as_ceq(fmt, lhs, rhs, fcc);
+        *testKind = TestForTrue;
+        break;
+      case DoubleNotEqual:
+        as_cueq(fmt, lhs, rhs, fcc);
+        *testKind = TestForFalse;
+        break;
+      case DoubleGreaterThan:
+        as_colt(fmt, rhs, lhs, fcc);
+        *testKind = TestForTrue;
+        break;
+      case DoubleGreaterThanOrEqual:
+        as_cole(fmt, rhs, lhs, fcc);
+        *testKind = TestForTrue;
+        break;
+      case DoubleLessThan:
+        as_colt(fmt, lhs, rhs, fcc);
+        *testKind = TestForTrue;
+        break;
+      case DoubleLessThanOrEqual:
+        as_cole(fmt, lhs, rhs, fcc);
+        *testKind = TestForTrue;
+        break;
+      case DoubleUnordered:
+        as_cun(fmt, lhs, rhs, fcc);
+        *testKind = TestForTrue;
+        break;
+      case DoubleEqualOrUnordered:
+        as_cueq(fmt, lhs, rhs, fcc);
+        *testKind = TestForTrue;
+        break;
+      case DoubleNotEqualOrUnordered:
+        as_ceq(fmt, lhs, rhs, fcc);
+        *testKind = TestForFalse;
+        break;
+      case DoubleGreaterThanOrUnordered:
+        as_cult(fmt, rhs, lhs, fcc);
+        *testKind = TestForTrue;
+        break;
+      case DoubleGreaterThanOrEqualOrUnordered:
+        as_cule(fmt, rhs, lhs, fcc);
+        *testKind = TestForTrue;
+        break;
+      case DoubleLessThanOrUnordered:
+        as_cult(fmt, lhs, rhs, fcc);
+        *testKind = TestForTrue;
+        break;
+      case DoubleLessThanOrEqualOrUnordered:
+        as_cule(fmt, lhs, rhs, fcc);
+        *testKind = TestForTrue;
+        break;
+      default:
+        MOZ_ASSUME_UNREACHABLE("Invalid DoubleCondition.");
+        break;
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set_double(Register dest, FloatRegister lhs, FloatRegister rhs,
+                                      DoubleCondition c)
+{
+    ma_li(dest, Imm32(0));
+    ma_li(ScratchRegister, Imm32(1));
+
+    FloatTestKind moveCondition;
+    compareFloatingPoint(DoubleFloat, lhs, rhs, c, &moveCondition);
+
+    if (moveCondition == TestForTrue)
+        as_movt(dest, ScratchRegister);
+    else
+        as_movf(dest, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set_float32(Register dest, FloatRegister lhs, FloatRegister rhs,
+                                       DoubleCondition c)
+{
+    ma_li(dest, Imm32(0));
+    ma_li(ScratchRegister, Imm32(1));
+
+    FloatTestKind moveCondition;
+    compareFloatingPoint(SingleFloat, lhs, rhs, c, &moveCondition);
+
+    if (moveCondition == TestForTrue)
+        as_movt(dest, ScratchRegister);
+    else
+        as_movf(dest, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Imm32 imm, Condition c)
+{
+    ma_li(ScratchRegister, imm);
+    ma_cmp_set(rd, rs, ScratchRegister, c);
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set(Register rd, Register rs, Address addr, Condition c)
+{
+    ma_lw(ScratchRegister, addr);
+    ma_cmp_set(rd, rs, ScratchRegister, c);
+}
+
+void
+MacroAssemblerMIPS::ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c)
+{
+    ma_lw(ScratchRegister, lhs);
+    ma_cmp_set(dst, ScratchRegister, rhs, c);
+}
+
+// fp instructions
+void
+MacroAssemblerMIPS::ma_lis(FloatRegister dest, float value)
+{
+    Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
+
+    ma_li(ScratchRegister, imm);
+    as_mtc1(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS::ma_lid(FloatRegister dest, double value)
+{
+    struct DoubleStruct {
+        uint32_t lo;
+        uint32_t hi;
+    } ;
+    DoubleStruct intStruct = mozilla::BitwiseCast<DoubleStruct>(value);
+
+    // put hi part of 64 bit value into the odd register
+    if (intStruct.hi == 0) {
+        as_mtc1_Odd(zero, dest);
+    } else {
+        ma_li(ScratchRegister, Imm32(intStruct.hi));
+        as_mtc1_Odd(ScratchRegister, dest);
+    }
+
+    // put low part of 64 bit value into the even register
+    if (intStruct.lo == 0) {
+        as_mtc1(zero, dest);
+    } else {
+        ma_li(ScratchRegister, Imm32(intStruct.lo));
+        as_mtc1(ScratchRegister, dest);
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_liNegZero(FloatRegister dest)
+{
+    as_mtc1(zero, dest);
+    ma_li(ScratchRegister, Imm32(INT_MIN));
+    as_mtc1_Odd(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPS::ma_mv(FloatRegister src, ValueOperand dest)
+{
+    as_mfc1(dest.payloadReg(), src);
+    as_mfc1_Odd(dest.typeReg(), src);
+}
+
+void
+MacroAssemblerMIPS::ma_mv(ValueOperand src, FloatRegister dest)
+{
+    as_mtc1(src.payloadReg(), dest);
+    as_mtc1_Odd(src.typeReg(), dest);
+}
+
+void
+MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address)
+{
+    if (Imm16::isInSignedRange(address.offset)) {
+        as_ls(ft, address.base, Imm16(address.offset).encode());
+    } else {
+        MOZ_ASSERT(address.base != ScratchRegister);
+        ma_li(ScratchRegister, Imm32(address.offset));
+        as_addu(ScratchRegister, address.base, ScratchRegister);
+        as_ls(ft, ScratchRegister, 0);
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address)
+{
+    // Use single precision load instructions so we don't have to worry about
+    // alignment.
+
+    int32_t off2 = address.offset + TAG_OFFSET;
+    if (Imm16::isInSignedRange(address.offset) && Imm16::isInSignedRange(off2)) {
+        as_ls(ft, address.base, Imm16(address.offset).encode());
+        as_ls_Odd(ft, address.base, Imm16(off2).encode());
+    } else {
+        ma_li(ScratchRegister, Imm32(address.offset));
+        as_addu(ScratchRegister, address.base, ScratchRegister);
+        as_ls(ft, ScratchRegister, PAYLOAD_OFFSET);
+        as_ls_Odd(ft, ScratchRegister, TAG_OFFSET);
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address)
+{
+    int32_t off2 = address.offset + TAG_OFFSET;
+    if (Imm16::isInSignedRange(address.offset) && Imm16::isInSignedRange(off2)) {
+        as_ss(ft, address.base, Imm16(address.offset).encode());
+        as_ss_Odd(ft, address.base, Imm16(off2).encode());
+    } else {
+        ma_li(ScratchRegister, Imm32(address.offset));
+        as_addu(ScratchRegister, address.base, ScratchRegister);
+        as_ss(ft, ScratchRegister, PAYLOAD_OFFSET);
+        as_ss_Odd(ft, ScratchRegister, TAG_OFFSET);
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_sd(FloatRegister ft, BaseIndex address)
+{
+    computeScaledAddress(address, secondScratchReg_);
+    ma_sd(ft, Address(secondScratchReg_, address.offset));
+}
+
+void
+MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address)
+{
+    if (Imm16::isInSignedRange(address.offset)) {
+        as_ss(ft, address.base, Imm16(address.offset).encode());
+    } else {
+        ma_li(ScratchRegister, Imm32(address.offset));
+        as_addu(ScratchRegister, address.base, ScratchRegister);
+        as_ss(ft, ScratchRegister, 0);
+    }
+}
+
+void
+MacroAssemblerMIPS::ma_ss(FloatRegister ft, BaseIndex address)
+{
+    computeScaledAddress(address, secondScratchReg_);
+    ma_ss(ft, Address(secondScratchReg_, address.offset));
+}
+
+void
+MacroAssemblerMIPS::ma_pop(FloatRegister fs)
+{
+    ma_ld(fs, Address(StackPointer, 0));
+    as_addiu(StackPointer, StackPointer, sizeof(double));
+}
+
+void
+MacroAssemblerMIPS::ma_push(FloatRegister fs)
+{
+    as_addiu(StackPointer, StackPointer, -sizeof(double));
+    ma_sd(fs, Address(StackPointer, 0));
+}
+
+void
+MacroAssemblerMIPS::ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label *label,
+                            DoubleCondition c, JumpKind jumpKind, FPConditionBit fcc)
+{
+    FloatTestKind testKind;
+    compareFloatingPoint(SingleFloat, lhs, rhs, c, &testKind, fcc);
+    branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+}
+
+void
+MacroAssemblerMIPS::ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label *label,
+                            DoubleCondition c, JumpKind jumpKind, FPConditionBit fcc)
+{
+    FloatTestKind testKind;
+    compareFloatingPoint(DoubleFloat, lhs, rhs, c, &testKind, fcc);
+    branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+}
+
+bool
+MacroAssemblerMIPSCompat::buildFakeExitFrame(const Register &scratch, uint32_t *offset)
+{
+    mozilla::DebugOnly<uint32_t> initialDepth = framePushed();
+
+    CodeLabel cl;
+    ma_li(scratch, cl.dest());
+
+    uint32_t descriptor = MakeFrameDescriptor(framePushed(), IonFrame_OptimizedJS);
+    Push(Imm32(descriptor));
+    Push(scratch);
+
+    bind(cl.src());
+    *offset = currentOffset();
+
+    MOZ_ASSERT(framePushed() == initialDepth + IonExitFrameLayout::Size());
+    return addCodeLabel(cl);
+}
+
+bool
+MacroAssemblerMIPSCompat::buildOOLFakeExitFrame(void *fakeReturnAddr)
+{
+    DebugOnly<uint32_t> initialDepth = framePushed();
+    uint32_t descriptor = MakeFrameDescriptor(framePushed(), IonFrame_OptimizedJS);
+
+    Push(Imm32(descriptor)); // descriptor_
+    Push(ImmPtr(fakeReturnAddr));
+
+    return true;
+}
+
+void
+MacroAssemblerMIPSCompat::callWithExitFrame(JitCode *target)
+{
+    uint32_t descriptor = MakeFrameDescriptor(framePushed(), IonFrame_OptimizedJS);
+    Push(Imm32(descriptor)); // descriptor
+
+    addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE);
+    ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+    ma_callIonHalfPush(ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSCompat::callWithExitFrame(JitCode *target, Register dynStack)
+{
+    ma_addu(dynStack, dynStack, Imm32(framePushed()));
+    makeFrameDescriptor(dynStack, IonFrame_OptimizedJS);
+    Push(dynStack); // descriptor
+
+    addPendingJump(m_buffer.nextOffset(), ImmPtr(target->raw()), Relocation::JITCODE);
+    ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+    ma_callIonHalfPush(ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSCompat::callIon(const Register &callee)
+{
+    MOZ_ASSERT((framePushed() & 3) == 0);
+    if ((framePushed() & 7) == 4) {
+        ma_callIonHalfPush(callee);
+    } else {
+        adjustFrame(sizeof(uint32_t));
+        ma_callIon(callee);
+    }
+}
+
+void
+MacroAssemblerMIPSCompat::reserveStack(uint32_t amount)
+{
+    if (amount)
+        ma_subu(StackPointer, StackPointer, Imm32(amount));
+    adjustFrame(amount);
+}
+
+void
+MacroAssemblerMIPSCompat::freeStack(uint32_t amount)
+{
+    MOZ_ASSERT(amount <= framePushed_);
+    if (amount)
+        ma_addu(StackPointer, StackPointer, Imm32(amount));
+    adjustFrame(-amount);
+}
+
+void
+MacroAssemblerMIPSCompat::freeStack(Register amount)
+{
+    as_addu(StackPointer, StackPointer, amount);
+}
+
+void
+MacroAssemblerMIPSCompat::add32(Register src, Register dest)
+{
+    as_addu(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::add32(Imm32 imm, Register dest)
+{
+    ma_addu(dest, dest, imm);
+}
+
+void
+
+MacroAssemblerMIPSCompat::add32(Imm32 imm, const Address &dest)
+{
+    load32(dest, secondScratchReg_);
+    ma_addu(secondScratchReg_, imm);
+    store32(secondScratchReg_, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::sub32(Imm32 imm, Register dest)
+{
+    ma_subu(dest, dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::sub32(Register src, Register dest)
+{
+    ma_subu(dest, dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::addPtr(Register src, Register dest)
+{
+    ma_addu(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::addPtr(const Address &src, Register dest)
+{
+    loadPtr(src, ScratchRegister);
+    ma_addu(dest, ScratchRegister);
+}
+
+void
+MacroAssemblerMIPSCompat::not32(Register reg)
+{
+    ma_not(reg, reg);
+}
+
+// Logical operations
+void
+MacroAssemblerMIPSCompat::and32(Imm32 imm, Register dest)
+{
+    ma_and(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::and32(Imm32 imm, const Address &dest)
+{
+    load32(dest, secondScratchReg_);
+    ma_and(secondScratchReg_, imm);
+    store32(secondScratchReg_, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::or32(Imm32 imm, const Address &dest)
+{
+    load32(dest, secondScratchReg_);
+    ma_or(secondScratchReg_, imm);
+    store32(secondScratchReg_, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::xor32(Imm32 imm, Register dest)
+{
+    ma_xor(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::xorPtr(Imm32 imm, Register dest)
+{
+    ma_xor(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::xorPtr(Register src, Register dest)
+{
+    ma_xor(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::orPtr(Imm32 imm, Register dest)
+{
+    ma_or(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::orPtr(Register src, Register dest)
+{
+    ma_or(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::andPtr(Imm32 imm, Register dest)
+{
+    ma_and(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::andPtr(Register src, Register dest)
+{
+    ma_and(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::move32(const Imm32 &imm, const Register &dest)
+{
+    ma_li(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::move32(const Register &src, const Register &dest)
+{
+    ma_move(dest, src);
+}
+
+void
+MacroAssemblerMIPSCompat::movePtr(const Register &src, const Register &dest)
+{
+    ma_move(dest, src);
+}
+void
+MacroAssemblerMIPSCompat::movePtr(const ImmWord &imm, const Register &dest)
+{
+    ma_li(dest, Imm32(imm.value));
+}
+
+void
+MacroAssemblerMIPSCompat::movePtr(const ImmGCPtr &imm, const Register &dest)
+{
+    ma_li(dest, imm);
+}
+void
+MacroAssemblerMIPSCompat::movePtr(const ImmPtr &imm, const Register &dest)
+{
+    movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+void
+MacroAssemblerMIPSCompat::movePtr(const AsmJSImmPtr &imm, const Register &dest)
+{
+    MOZ_ASSUME_UNREACHABLE("NYI");
+}
+
+void
+MacroAssemblerMIPSCompat::load8ZeroExtend(const Address &address, const Register &dest)
+{
+    ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load8ZeroExtend(const BaseIndex &src, const Register &dest)
+{
+    ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load8SignExtend(const Address &address, const Register &dest)
+{
+    ma_load(dest, address, SizeByte, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load8SignExtend(const BaseIndex &src, const Register &dest)
+{
+    ma_load(dest, src, SizeByte, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16ZeroExtend(const Address &address, const Register &dest)
+{
+    ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16ZeroExtend(const BaseIndex &src, const Register &dest)
+{
+    ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16SignExtend(const Address &address, const Register &dest)
+{
+    ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load16SignExtend(const BaseIndex &src, const Register &dest)
+{
+    ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(const Address &address, const Register &dest)
+{
+    ma_lw(dest, address);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(const BaseIndex &address, const Register &dest)
+{
+    ma_load(dest, address, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::load32(const AbsoluteAddress &address, const Register &dest)
+{
+    ma_li(ScratchRegister, Imm32((uint32_t)address.addr));
+    as_lw(dest, ScratchRegister, 0);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(const Address &address, const Register &dest)
+{
+    ma_lw(dest, address);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(const BaseIndex &src, const Register &dest)
+{
+    load32(src, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPtr(const AbsoluteAddress &address, const Register &dest)
+{
+    ma_li(ScratchRegister, Imm32((uint32_t)address.addr));
+    as_lw(dest, ScratchRegister, 0);
+}
+void
+MacroAssemblerMIPSCompat::loadPtr(const AsmJSAbsoluteAddress &address, const Register &dest)
+{
+    movePtr(AsmJSImmPtr(address.kind()), ScratchRegister);
+    loadPtr(Address(ScratchRegister, 0x0), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadPrivate(const Address &address, const Register &dest)
+{
+    ma_lw(dest, Address(address.base, address.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::loadDouble(const Address &address, const FloatRegister &dest)
+{
+    ma_ld(dest, address);
+}
+
+void
+MacroAssemblerMIPSCompat::loadDouble(const BaseIndex &src, const FloatRegister &dest)
+{
+    computeScaledAddress(src, secondScratchReg_);
+    ma_ld(dest, Address(secondScratchReg_, src.offset));
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloatAsDouble(const Address &address, const FloatRegister &dest)
+{
+    ma_ls(dest, address);
+    as_cvtds(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloatAsDouble(const BaseIndex &src, const FloatRegister &dest)
+{
+    loadFloat32(src, dest);
+    as_cvtds(dest, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloat32(const Address &address, const FloatRegister &dest)
+{
+    ma_ls(dest, address);
+}
+
+void
+MacroAssemblerMIPSCompat::loadFloat32(const BaseIndex &src, const FloatRegister &dest)
+{
+    computeScaledAddress(src, secondScratchReg_);
+    ma_ls(dest, Address(secondScratchReg_, src.offset));
+}
+
+void
+MacroAssemblerMIPSCompat::store8(const Imm32 &imm, const Address &address)
+{
+    ma_li(secondScratchReg_, imm);
+    ma_store(secondScratchReg_, address, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(const Register &src, const Address &address)
+{
+    ma_store(src, address, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(const Imm32 &imm, const BaseIndex &dest)
+{
+    ma_store(imm, dest, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store8(const Register &src, const BaseIndex &dest)
+{
+    ma_store(src, dest, SizeByte);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(const Imm32 &imm, const Address &address)
+{
+    ma_li(secondScratchReg_, imm);
+    ma_store(secondScratchReg_, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(const Register &src, const Address &address)
+{
+    ma_store(src, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(const Imm32 &imm, const BaseIndex &dest)
+{
+    ma_store(imm, dest, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store16(const Register &src, const BaseIndex &address)
+{
+    ma_store(src, address, SizeHalfWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(const Register &src, const AbsoluteAddress &address)
+{
+    storePtr(src, address);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(const Register &src, const Address &address)
+{
+    storePtr(src, address);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(const Imm32 &src, const Address &address)
+{
+    move32(src, ScratchRegister);
+    storePtr(ScratchRegister, address);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(const Imm32 &imm, const BaseIndex &dest)
+{
+    ma_store(imm, dest, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::store32(const Register &src, const BaseIndex &dest)
+{
+    ma_store(src, dest, SizeWord);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(ImmWord imm, const Address &address)
+{
+    ma_li(ScratchRegister, Imm32(imm.value));
+    ma_sw(ScratchRegister, address);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(ImmPtr imm, const Address &address)
+{
+    storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(ImmGCPtr imm, const Address &address)
+{
+    ma_li(ScratchRegister, imm);
+    ma_sw(ScratchRegister, address);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(Register src, const Address &address)
+{
+    ma_sw(src, address);
+}
+
+void
+MacroAssemblerMIPSCompat::storePtr(const Register &src, const AbsoluteAddress &dest)
+{
+    ma_li(ScratchRegister, Imm32((uint32_t)dest.addr));
+    as_sw(src, ScratchRegister, 0);
+}
+
+void
+MacroAssemblerMIPSCompat::subPtr(Imm32 imm, const Register dest)
+{
+    ma_subu(dest, dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::addPtr(Imm32 imm, const Register dest)
+{
+    ma_addu(dest, imm);
+}
+
+void
+MacroAssemblerMIPSCompat::addPtr(Imm32 imm, const Address &dest)
+{
+    loadPtr(dest, ScratchRegister);
+    addPtr(imm, ScratchRegister);
+    storePtr(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::branchDouble(DoubleCondition cond, const FloatRegister &lhs,
+                                       const FloatRegister &rhs, Label *label)
+{
+    ma_bc1d(lhs, rhs, label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchFloat(DoubleCondition cond, const FloatRegister &lhs,
+                                      const FloatRegister &rhs, Label *label)
+{
+    ma_bc1s(lhs, rhs, label, cond);
+}
+
+// higher level tag testing code
+Operand
+ToPayload(Operand base)
+{
+    return Operand(Register::FromCode(base.base()), base.disp() + PAYLOAD_OFFSET);
+}
+
+Operand
+ToType(Operand base)
+{
+    return Operand(Register::FromCode(base.base()), base.disp() + TAG_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestGCThing(Condition cond, const Address &address, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(address, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
+         (cond == Equal) ? AboveOrEqual : Below);
+}
+void
+MacroAssemblerMIPSCompat::branchTestGCThing(Condition cond, const BaseIndex &src, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmTag(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET), label,
+         (cond == Equal) ? AboveOrEqual : Below);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestPrimitive(Condition cond, const ValueOperand &value,
+                                              Label *label)
+{
+    branchTestPrimitive(cond, value.typeReg(), label);
+}
+void
+MacroAssemblerMIPSCompat::branchTestPrimitive(Condition cond, const Register &tag, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET), label,
+         (cond == Equal) ? Below : AboveOrEqual);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const ValueOperand &value, Label *label)
+{
+    MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    ma_b(value.typeReg(), ImmType(JSVAL_TYPE_INT32), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const Register &tag, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const Address &address, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(address, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestInt32(Condition cond, const BaseIndex &src, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat:: branchTestBoolean(Condition cond, const ValueOperand &value,
+                                             Label *label)
+{
+    MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    ma_b(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat:: branchTestBoolean(Condition cond, const Register &tag, Label *label)
+{
+    MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    ma_b(tag, ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestBoolean(Condition cond, const BaseIndex &src, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const ValueOperand &value, Label *label)
+{
+    MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    Assembler::Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+    ma_b(value.typeReg(), ImmTag(JSVAL_TAG_CLEAR), label, actual);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const Register &tag, Label *label)
+{
+    MOZ_ASSERT(cond == Assembler::Equal || cond == NotEqual);
+    Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+    ma_b(tag, ImmTag(JSVAL_TAG_CLEAR), label, actual);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const Address &address, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(address, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_CLEAR), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestDouble(Condition cond, const BaseIndex &src, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+    extractTag(src, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_CLEAR), label, actual);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const ValueOperand &value, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(value.typeReg(), ImmType(JSVAL_TYPE_NULL), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const Register &tag, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestNull(Condition cond, const BaseIndex &src, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+
+void
+MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const ValueOperand &value, Label *label)
+{
+    branchTestObject(cond, value.typeReg(), label);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const Register &tag, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestObject(Condition cond, const BaseIndex &src, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+
+void
+MacroAssemblerMIPSCompat::branchTestString(Condition cond, const ValueOperand &value, Label *label)
+{
+    branchTestString(cond, value.typeReg(), label);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestString(Condition cond, const Register &tag, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestString(Condition cond, const BaseIndex &src, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const ValueOperand &value,
+                                              Label *label)
+{
+    MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+    ma_b(value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const Register &tag, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const BaseIndex &src, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestUndefined(Condition cond, const Address &address, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(address, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+
+void
+MacroAssemblerMIPSCompat::branchTestNumber(Condition cond, const ValueOperand &value, Label *label)
+{
+    branchTestNumber(cond, value.typeReg(), label);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestNumber(Condition cond, const Register &tag, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET), label,
+         cond == Equal ? BelowOrEqual : Above);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const ValueOperand &value, Label *label)
+{
+    branchTestMagic(cond, value.typeReg(), label);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const Register &tag, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const Address &address, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(address, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestMagic(Condition cond, const BaseIndex &src, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+    extractTag(src, secondScratchReg_);
+    ma_b(secondScratchReg_, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestValue(Condition cond, const ValueOperand &value,
+                                          const Value &v, Label *label)
+{
+    moveData(v, ScratchRegister);
+
+    if (cond == Equal) {
+        Label done;
+        ma_b(value.payloadReg(), ScratchRegister, &done, NotEqual, ShortJump);
+        {
+            ma_b(value.typeReg(), Imm32(getType(v)), label, Equal);
+        }
+        bind(&done);
+    } else {
+        MOZ_ASSERT(cond == NotEqual);
+        ma_b(value.payloadReg(), ScratchRegister, label, NotEqual);
+
+        ma_b(value.typeReg(), Imm32(getType(v)), label, NotEqual);
+    }
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestValue(Condition cond, const Address &valaddr,
+                                          const ValueOperand &value, Label *label)
+{
+    MOZ_ASSERT(cond == Equal || cond == NotEqual);
+
+    // Load tag.
+    ma_lw(ScratchRegister, Address(valaddr.base, valaddr.offset + TAG_OFFSET));
+    branchPtr(cond, ScratchRegister, value.typeReg(), label);
+
+    // Load payload
+    ma_lw(ScratchRegister, Address(valaddr.base, valaddr.offset + PAYLOAD_OFFSET));
+    branchPtr(cond, ScratchRegister, value.payloadReg(), label);
+}
+
+// unboxing code
+void
+MacroAssemblerMIPSCompat::unboxInt32(const ValueOperand &operand, const Register &dest)
+{
+    ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxInt32(const Address &src, const Register &dest)
+{
+    ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxBoolean(const ValueOperand &operand, const Register &dest)
+{
+    ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxBoolean(const Address &src, const Register &dest)
+{
+    ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxDouble(const ValueOperand &operand, const FloatRegister &dest)
+{
+    MOZ_ASSERT(dest != ScratchFloatReg);
+    as_mtc1(operand.payloadReg(), dest);
+    as_mtc1_Odd(operand.typeReg(), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::unboxDouble(const Address &src, const FloatRegister &dest)
+{
+    ma_lw(ScratchRegister, Address(src.base, src.offset + PAYLOAD_OFFSET));
+    as_mtc1(ScratchRegister, dest);
+    ma_lw(ScratchRegister, Address(src.base, src.offset + TAG_OFFSET));
+    as_mtc1_Odd(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::unboxString(const ValueOperand &operand, const Register &dest)
+{
+    ma_move(dest, operand.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxString(const Address &src, const Register &dest)
+{
+    ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::unboxObject(const ValueOperand &src, const Register &dest)
+{
+    ma_move(dest, src.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::unboxValue(const ValueOperand &src, AnyRegister dest)
+{
+    if (dest.isFloat()) {
+        Label notInt32, end;
+        branchTestInt32(Assembler::NotEqual, src, &notInt32);
+        convertInt32ToDouble(src.payloadReg(), dest.fpu());
+        ma_b(&end, ShortJump);
+        bind(&notInt32);
+        unboxDouble(src, dest.fpu());
+        bind(&end);
+    } else if (src.payloadReg() != dest.gpr()) {
+        ma_move(dest.gpr(), src.payloadReg());
+    }
+}
+
+void
+MacroAssemblerMIPSCompat::unboxPrivate(const ValueOperand &src, Register dest)
+{
+    ma_move(dest, src.payloadReg());
+}
+
+void
+MacroAssemblerMIPSCompat::boxDouble(const FloatRegister &src, const ValueOperand &dest)
+{
+    as_mfc1(dest.payloadReg(), src);
+    as_mfc1_Odd(dest.typeReg(), src);
+}
+
+void
+MacroAssemblerMIPSCompat::boxNonDouble(JSValueType type, const Register &src,
+                                       const ValueOperand &dest)
+{
+    if (src != dest.payloadReg())
+        ma_move(dest.payloadReg(), src);
+    ma_li(dest.typeReg(), ImmType(type));
+}
+
+void
+MacroAssemblerMIPSCompat::boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest)
+{
+    convertBoolToInt32(ScratchRegister, operand.payloadReg());
+    convertInt32ToDouble(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::int32ValueToDouble(const ValueOperand &operand,
+                                             const FloatRegister &dest)
+{
+    convertInt32ToDouble(operand.payloadReg(), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::boolValueToFloat32(const ValueOperand &operand,
+                                             const FloatRegister &dest)
+{
+
+    convertBoolToInt32(ScratchRegister, operand.payloadReg());
+    convertInt32ToFloat32(ScratchRegister, dest);
+}
+
+void
+MacroAssemblerMIPSCompat::int32ValueToFloat32(const ValueOperand &operand,
+                                              const FloatRegister &dest)
+{
+    convertInt32ToFloat32(operand.payloadReg(), dest);
+}
+
+void
+MacroAssemblerMIPSCompat::loadConstantFloat32(float f, const FloatRegister &dest)
+{
+    ma_lis(dest, f);
+}
+
+void
+MacroAssemblerMIPSCompat::loadInt32OrDouble(const Address &src, const FloatRegister &dest)
+{
+    Label notInt32, end;
+    // If it's an int, convert it to double.
+    ma_lw(secondScratchReg_, Address(src.base, src.offset + TAG_OFFSET));
+    branchTestInt32(Assembler::NotEqual, secondScratchReg_, &notInt32);
+    ma_lw(secondScratchReg_, Address(src.base, src.offset + PAYLOAD_OFFSET));
+    convertInt32ToDouble(secondScratchReg_, dest);
+    ma_b(&end, ShortJump);
+
+    // Not an int, just load as double.
+    bind(&notInt32);
+    ma_ld(dest, src);
+    bind(&end);
+}
+
+void
+MacroAssemblerMIPSCompat::loadInt32OrDouble(Register base, Register index,
+                                            const FloatRegister &dest, int32_t shift)
+{
+    Label notInt32, end;
+
+    // If it's an int, convert it to double.
+
+    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), secondScratchReg_);
+    // Since we only have one scratch, we need to stomp over it with the tag.
+    load32(Address(secondScratchReg_, TAG_OFFSET), secondScratchReg_);
+    branchTestInt32(Assembler::NotEqual, secondScratchReg_, &notInt32);
+
+    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), secondScratchReg_);
+    load32(Address(secondScratchReg_, PAYLOAD_OFFSET), secondScratchReg_);
+    convertInt32ToDouble(secondScratchReg_, dest);
+    ma_b(&end, ShortJump);
+
+    // Not an int, just load as double.
+    bind(&notInt32);
+    // First, recompute the offset that had been stored in the scratch register
+    // since the scratch register was overwritten loading in the type.
+    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), secondScratchReg_);
+    loadDouble(Address(secondScratchReg_, 0), dest);
+    bind(&end);
+}
+
+void
+MacroAssemblerMIPSCompat::loadConstantDouble(double dp, const FloatRegister &dest)
+{
+    ma_lid(dest, dp);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestInt32Truthy(bool b, const ValueOperand &value, Label *label)
+{
+    ma_and(ScratchRegister, value.payloadReg(), value.payloadReg());
+    ma_b(ScratchRegister, ScratchRegister, label, b ? NonZero : Zero);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestStringTruthy(bool b, const ValueOperand &value, Label *label)
+{
+    Register string = value.payloadReg();
+    size_t mask = (0xFFFFFFFF << JSString::LENGTH_SHIFT);
+    ma_lw(secondScratchReg_, Address(string, JSString::offsetOfLengthAndFlags()));
+
+    // Use secondScratchReg_ because ma_and will clobber ScratchRegister
+    ma_and(ScratchRegister, secondScratchReg_, Imm32(mask));
+    ma_b(ScratchRegister, ScratchRegister, label, b ? NonZero : Zero);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestDoubleTruthy(bool b, const FloatRegister &value, Label *label)
+{
+    ma_lid(ScratchFloatReg, 0.0);
+    DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
+    ma_bc1d(value, ScratchFloatReg, label, cond);
+}
+
+void
+MacroAssemblerMIPSCompat::branchTestBooleanTruthy(bool b, const ValueOperand &operand,
+                                                  Label *label)
+{
+    ma_b(operand.payloadReg(), operand.payloadReg(), label, b ? NonZero : Zero);
+}
+
+Register
+MacroAssemblerMIPSCompat::extractObject(const Address &address, Register scratch)
+{
+    ma_lw(scratch, Address(address.base, address.offset + PAYLOAD_OFFSET));
+    return scratch;
+}
+
+Register
+MacroAssemblerMIPSCompat::extractTag(const Address &address, Register scratch)
+{
+    ma_lw(scratch, Address(address.base, address.offset + TAG_OFFSET));
+    return scratch;
+}
+
+Register
+MacroAssemblerMIPSCompat::extractTag(const BaseIndex &address, Register scratch)
+{
+    computeScaledAddress(address, scratch);
+    return extractTag(Address(scratch, address.offset), scratch);
+}
+
+
+uint32_t
+MacroAssemblerMIPSCompat::getType(const Value &val)
+{
+    jsval_layout jv = JSVAL_TO_IMPL(val);
+    return jv.s.tag;
+}
+
+void
+MacroAssemblerMIPSCompat::moveData(const Value &val, Register data)
+{
+    jsval_layout jv = JSVAL_TO_IMPL(val);
+    if (val.isMarkable())
+        ma_li(data, ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())));
+    else
+        ma_li(data, Imm32(jv.s.payload.i32));
+}
+
+void
+MacroAssemblerMIPSCompat::moveValue(const Value &val, Register type, Register data)
+{
+    MOZ_ASSERT(type != data);
+    ma_li(type, Imm32(getType(val)));
+    moveData(val, data);
+}
+void
+MacroAssemblerMIPSCompat::moveValue(const Value &val, const ValueOperand &dest)
+{
+    moveValue(val, dest.typeReg(), dest.payloadReg());
+}
+
+CodeOffsetJump
+MacroAssemblerMIPSCompat::jumpWithPatch(RepatchLabel *label)
+{
+    // Only one branch per label.
+    MOZ_ASSERT(!label->used());
+    uint32_t dest = label->bound() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+    BufferOffset bo = nextOffset();
+    label->use(bo.getOffset());
+    addLongJump(bo);
+    ma_liPatchable(ScratchRegister, Imm32(dest));
+    as_jr(ScratchRegister);
+    as_nop();
+    return CodeOffsetJump(bo.getOffset());
+}
+
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/MIPS interface.
+/////////////////////////////////////////////////////////////////
+void
+MacroAssemblerMIPSCompat::storeValue(ValueOperand val, Operand dst)
+{
+    storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const BaseIndex &dest)
+{
+    computeScaledAddress(dest, secondScratchReg_);
+    storeValue(val, Address(secondScratchReg_, dest.offset));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, BaseIndex dest)
+{
+    computeScaledAddress(dest, ScratchRegister);
+
+    // Make sure that ma_sw doesn't clobber ScratchRegister
+    int32_t offset = dest.offset;
+    if (!Imm16::isInSignedRange(offset)) {
+        ma_li(secondScratchReg_, Imm32(offset));
+        as_addu(ScratchRegister, ScratchRegister, secondScratchReg_);
+        offset = 0;
+    }
+
+    storeValue(type, reg, Address(ScratchRegister, offset));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(ValueOperand val, const Address &dest)
+{
+    ma_sw(val.payloadReg(), Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+    ma_sw(val.typeReg(), Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg, Address dest)
+{
+    MOZ_ASSERT(dest.base != secondScratchReg_);
+
+    ma_sw(reg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+    ma_li(secondScratchReg_, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+    ma_sw(secondScratchReg_, Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(const Value &val, Address dest)
+{
+    MOZ_ASSERT(dest.base != secondScratchReg_);
+
+    ma_li(secondScratchReg_, Imm32(getType(val)));
+    ma_sw(secondScratchReg_, Address(dest.base, dest.offset + TAG_OFFSET));
+    moveData(val, secondScratchReg_);
+    ma_sw(secondScratchReg_, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeValue(const Value &val, BaseIndex dest)
+{
+    computeScaledAddress(dest, ScratchRegister);
+
+    // Make sure that ma_sw doesn't clobber ScratchRegister
+    int32_t offset = dest.offset;
+    if (!Imm16::isInSignedRange(offset)) {
+        ma_li(secondScratchReg_, Imm32(offset));
+        as_addu(ScratchRegister, ScratchRegister, secondScratchReg_);
+        offset = 0;
+    }
+    storeValue(val, Address(ScratchRegister, offset));
+}
+
+void
+MacroAssemblerMIPSCompat::loadValue(const BaseIndex &addr, ValueOperand val)
+{
+    computeScaledAddress(addr, secondScratchReg_);
+    loadValue(Address(secondScratchReg_, addr.offset), val);
+}
+
+void
+MacroAssemblerMIPSCompat::loadValue(Address src, ValueOperand val)
+{
+    // Ensure that loading the payload does not erase the pointer to the
+    // Value in memory.
+    if (src.base != val.payloadReg()) {
+        ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
+        ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
+    } else {
+        ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
+        ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
+    }
+}
+
+void
+MacroAssemblerMIPSCompat::tagValue(JSValueType type, Register payload, ValueOperand dest)
+{
+    MOZ_ASSERT(payload != dest.typeReg());
+    ma_li(dest.typeReg(), ImmType(type));
+    if (payload != dest.payloadReg())
+        ma_move(dest.payloadReg(), payload);
+}
+
+void
+MacroAssemblerMIPSCompat::pushValue(ValueOperand val)
+{
+    // Allocate stack slots for type and payload. One for each.
+    ma_subu(StackPointer, StackPointer, Imm32(sizeof(Value)));
+    // Store type and payload.
+    storeValue(val, Address(StackPointer, 0));
+}
+
+void
+MacroAssemblerMIPSCompat::pushValue(const Address &addr)
+{
+    // Allocate stack slots for type and payload. One for each.
+    ma_subu(StackPointer, StackPointer, Imm32(sizeof(Value)));
+    // Store type and payload.
+    ma_lw(ScratchRegister, Address(addr.base, addr.offset + TAG_OFFSET));
+    ma_sw(ScratchRegister, Address(StackPointer, TAG_OFFSET));
+    ma_lw(ScratchRegister, Address(addr.base, addr.offset + PAYLOAD_OFFSET));
+    ma_sw(ScratchRegister, Address(StackPointer, PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::popValue(ValueOperand val)
+{
+    // Load payload and type.
+    as_lw(val.payloadReg(), StackPointer, PAYLOAD_OFFSET);
+    as_lw(val.typeReg(), StackPointer, TAG_OFFSET);
+    // Free stack.
+    as_addiu(StackPointer, StackPointer, sizeof(Value));
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(const Value &val, Address dest)
+{
+    moveData(val, secondScratchReg_);
+    ma_sw(secondScratchReg_, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(Register src, Address dest)
+{
+    ma_sw(src, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+    return;
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(const Value &val, Register base, Register index,
+                                       int32_t shift)
+{
+    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), secondScratchReg_);
+
+    moveData(val, ScratchRegister);
+
+    as_sw(ScratchRegister, secondScratchReg_, NUNBOX32_PAYLOAD_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::storePayload(Register src, Register base, Register index, int32_t shift)
+{
+    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), secondScratchReg_);
+    as_sw(src, secondScratchReg_, NUNBOX32_PAYLOAD_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Address dest)
+{
+    ma_li(secondScratchReg_, tag);
+    ma_sw(secondScratchReg_, Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void
+MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Register base, Register index, int32_t shift)
+{
+    computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)), secondScratchReg_);
+    ma_li(ScratchRegister, tag);
+    as_sw(ScratchRegister, secondScratchReg_, TAG_OFFSET);
+}
+
+void
+MacroAssemblerMIPSCompat::linkExitFrame()
+{
+    uint8_t *dest = (uint8_t*)GetIonContext()->runtime->addressOfIonTop();
+    movePtr(ImmPtr(dest), ScratchRegister);
+    ma_sw(StackPointer, Address(ScratchRegister, 0));
+}
+
+void
+MacroAssemblerMIPSCompat::linkParallelExitFrame(const Register &pt)
+{
+    ma_sw(StackPointer, Address(pt, offsetof(PerThreadData, ionTop)));
+}
+
+// This macrosintruction calls the ion code and pushes the return address to
+// the stack in the case when stack is alligned.
+void
+MacroAssemblerMIPS::ma_callIon(const Register r)
+{
+    // This is a MIPS hack to push return address during jalr delay slot.
+    as_addiu(StackPointer, StackPointer, -2 * sizeof(intptr_t));
+    as_jalr(r);
+    as_sw(ra, StackPointer, 0);
+}
+
+// This macrosintruction calls the ion code and pushes the return address to
+// the stack in the case when stack is not alligned.
+void
+MacroAssemblerMIPS::ma_callIonHalfPush(const Register r)
+{
+    // This is a MIPS hack to push return address during jalr delay slot.
+    as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
+    as_jalr(r);
+    as_sw(ra, StackPointer, 0);
+}
+
+void
+MacroAssemblerMIPS::ma_call(ImmPtr dest)
+{
+    ma_liPatchable(CallReg, dest);
+    as_jalr(CallReg);
+    as_nop();
+}
+
+void
+MacroAssemblerMIPS::ma_jump(ImmPtr dest)
+{
+    ma_liPatchable(ScratchRegister, dest);
+    as_jr(ScratchRegister);
+    as_nop();
+}
+
+void
+MacroAssemblerMIPSCompat::breakpoint()
+{
+    as_break(0);
+}
+
+void
+MacroAssemblerMIPSCompat::ensureDouble(const ValueOperand &source, FloatRegister dest,
+                                       Label *failure)
+{
+    Label isDouble, done;
+    branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
+    branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
+
+    convertInt32ToDouble(source.payloadReg(), dest);
+    jump(&done);
+
+    bind(&isDouble);
+    unboxDouble(source, dest);
+
+    bind(&done);
+}
+
+void
+MacroAssemblerMIPSCompat::setupABICall(uint32_t args)
+{
+    MOZ_ASSERT(!inCall_);
+    inCall_ = true;
+    args_ = args;
+    passedArgs_ = 0;
+
+    usedArgSlots_ = 0;
+    firstArgType = MoveOp::GENERAL;
+}
+
+void
+MacroAssemblerMIPSCompat::setupAlignedABICall(uint32_t args)
+{
+    setupABICall(args);
+
+    dynamicAlignment_ = false;
+}
+
+void
+MacroAssemblerMIPSCompat::setupUnalignedABICall(uint32_t args, const Register &scratch)
+{
+    setupABICall(args);
+    dynamicAlignment_ = true;
+
+    ma_move(scratch, StackPointer);
+
+    // Force sp to be aligned
+    ma_subu(StackPointer, StackPointer, Imm32(sizeof(uint32_t)));
+    ma_and(StackPointer, StackPointer, Imm32(~(StackAlignment - 1)));
+    as_sw(scratch, StackPointer, 0);
+}
+
+void
+MacroAssemblerMIPSCompat::passABIArg(const MoveOperand &from, MoveOp::Type type)
+{
+    ++passedArgs_;
+    if (!enoughMemory_)
+        return;
+    switch (type) {
+      case MoveOp::FLOAT32:
+        if (!usedArgSlots_) {
+            if (from.floatReg() != f12)
+                enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f12), type);
+            firstArgType = MoveOp::FLOAT32;
+        } else if ((usedArgSlots_ == 1 && firstArgType == MoveOp::FLOAT32) ||
+                  (usedArgSlots_ == 2 && firstArgType == MoveOp::DOUBLE)) {
+            if (from.floatReg() != f14)
+                enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f14), type);
+        } else {
+            Register destReg;
+            if (GetIntArgReg(usedArgSlots_, &destReg)) {
+                if (from.isGeneralReg() && from.reg() == destReg) {
+                    // Nothing to do. Value is in the right register already
+                } else {
+                    enoughMemory_ = moveResolver_.addMove(from, MoveOperand(destReg), type);
+                }
+            } else {
+                uint32_t disp = GetArgStackDisp(usedArgSlots_);
+                enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), type);
+            }
+        }
+        usedArgSlots_++;
+        break;
+      case MoveOp::DOUBLE:
+        if (!usedArgSlots_) {
+            if (from.floatReg() != f12)
+                enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f12), type);
+            usedArgSlots_ = 2;
+            firstArgType = MoveOp::DOUBLE;
+        } else if (usedArgSlots_ <= 2) {
+            if ((usedArgSlots_ == 1 && firstArgType == MoveOp::FLOAT32) ||
+               (usedArgSlots_ == 2 && firstArgType == MoveOp::DOUBLE)) {
+                if (from.floatReg() != f14)
+                    enoughMemory_ = moveResolver_.addMove(from, MoveOperand(f14), type);
+            } else {
+                // Create two moves so that cycles are found. Move emitter
+                // will have special case to handle this.
+                enoughMemory_ = moveResolver_.addMove(from, MoveOperand(a2), type);
+                enoughMemory_ = moveResolver_.addMove(from, MoveOperand(a3), type);
+            }
+            usedArgSlots_ = 4;
+        } else {
+            // Align if necessary
+            usedArgSlots_ += usedArgSlots_ % 2;
+
+            uint32_t disp = GetArgStackDisp(usedArgSlots_);
+            enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), type);
+            usedArgSlots_ += 2;
+        }
+        break;
+      case MoveOp::GENERAL:
+        Register destReg;
+        if (GetIntArgReg(usedArgSlots_, &destReg)) {
+            if (from.isGeneralReg() && from.reg() == destReg) {
+                // Nothing to do. Value is in the right register already
+            } else {
+                enoughMemory_ = moveResolver_.addMove(from, MoveOperand(destReg), type);
+            }
+        } else {
+            uint32_t disp = GetArgStackDisp(usedArgSlots_);
+            enoughMemory_ = moveResolver_.addMove(from, MoveOperand(sp, disp), type);
+        }
+        usedArgSlots_++;
+        break;
+      default:
+        MOZ_ASSUME_UNREACHABLE("Unexpected argument type");
+    }
+}
+
+void
+MacroAssemblerMIPSCompat::passABIArg(const Register &reg)
+{
+    passABIArg(MoveOperand(reg), MoveOp::GENERAL);
+}
+
+void
+MacroAssemblerMIPSCompat::passABIArg(const FloatRegister &freg, MoveOp::Type type)
+{
+    passABIArg(MoveOperand(freg), type);
+}
+
+void MacroAssemblerMIPSCompat::checkStackAlignment()
+{
+#ifdef DEBUG
+    Label aligned;
+    as_andi(ScratchRegister, sp, StackAlignment - 1);
+    ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
+    as_break(MAX_BREAK_CODE);
+    bind(&aligned);
+#endif
+}
+
+void
+MacroAssemblerMIPSCompat::callWithABIPre(uint32_t *stackAdjust)
+{
+    MOZ_ASSERT(inCall_);
+
+    // Reserve place for $ra.
+    *stackAdjust = sizeof(intptr_t);
+
+    *stackAdjust += usedArgSlots_ > NumIntArgRegs ?
+                    usedArgSlots_ * sizeof(intptr_t) :
+                    NumIntArgRegs * sizeof(intptr_t);
+
+    if (dynamicAlignment_) {
+        *stackAdjust += ComputeByteAlignment(*stackAdjust, StackAlignment);
+    } else {
+        *stackAdjust += ComputeByteAlignment(framePushed_ + *stackAdjust, StackAlignment);
+    }
+
+    reserveStack(*stackAdjust);
+
+    // Save $ra because call is going to clobber it. Restore it in
+    // callWithABIPost. NOTE: This is needed for calls from BaselineIC.
+    // Maybe we can do this differently.
+    ma_sw(ra, Address(StackPointer, *stackAdjust - sizeof(intptr_t)));
+
+    // Position all arguments.
+    {
+        enoughMemory_ = enoughMemory_ && moveResolver_.resolve();
+        if (!enoughMemory_)
+            return;
+
+        MoveEmitter emitter(*this);
+        emitter.emit(moveResolver_);
+        emitter.finish();
+    }
+
+    checkStackAlignment();
+}
+
+void
+MacroAssemblerMIPSCompat::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result)
+{
+    // Restore ra value (as stored in callWithABIPre()).
+    ma_lw(ra, Address(StackPointer, stackAdjust - sizeof(intptr_t)));
+
+    if (dynamicAlignment_) {
+        // Restore sp value from stack (as stored in setupUnalignedABICall()).
+        ma_lw(StackPointer, Address(StackPointer, stackAdjust));
+        // Use adjustFrame instead of freeStack because we already restored sp.
+        adjustFrame(-stackAdjust);
+    } else {
+        freeStack(stackAdjust);
+    }
+
+    MOZ_ASSERT(inCall_);
+    inCall_ = false;
+}
+
+void
+MacroAssemblerMIPSCompat::callWithABI(void *fun, MoveOp::Type result)
+{
+    uint32_t stackAdjust;
+    callWithABIPre(&stackAdjust);
+    ma_call(ImmPtr(fun));
+    callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssemblerMIPSCompat::callWithABI(AsmJSImmPtr imm, MoveOp::Type result)
+{
+    uint32_t stackAdjust;
+    callWithABIPre(&stackAdjust);
+    call(imm);
+    callWithABIPost(stackAdjust, result);
+}
+
+void
+MacroAssemblerMIPSCompat::callWithABI(const Address &fun, MoveOp::Type result)
+{
+    // Load the callee in t9, no instruction between the lw and call
+    // should clobber it. Note that we can't use fun.base because it may
+    // be one of the IntArg registers clobbered before the call.
+    ma_lw(t9, Address(fun.base, fun.offset));
+    uint32_t stackAdjust;
+    callWithABIPre(&stackAdjust);
+    call(t9);
+    callWithABIPost(stackAdjust, result);
+
+}
+
+void
+MacroAssemblerMIPSCompat::handleFailureWithHandler(void *handler)
+{
+    // Reserve space for exception information.
+    int size = (sizeof(ResumeFromException) + StackAlignment) & ~(StackAlignment - 1);
+    ma_subu(StackPointer, StackPointer, Imm32(size));
+    ma_move(a0, StackPointer); // Use a0 since it is a first function argument
+
+    // Ask for an exception handler.
+    setupUnalignedABICall(1, a1);
+    passABIArg(a0);
+    callWithABI(handler);
+
+    JitCode *excTail = GetIonContext()->runtime->jitRuntime()->getExceptionTail();
+    branch(excTail);
+}
+
+void
+MacroAssemblerMIPSCompat::handleFailureWithHandlerTail()
+{
+    Label entryFrame;
+    Label catch_;
+    Label finally;
+    Label return_;
+    Label bailout;
+
+    // Already clobbered a0, so use it...
+    ma_lw(a0, Address(StackPointer, offsetof(ResumeFromException, kind)));
+    branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_ENTRY_FRAME), &entryFrame);
+    branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_CATCH), &catch_);
+    branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FINALLY), &finally);
+    branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
+    branch32(Assembler::Equal, a0, Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
+
+    breakpoint(); // Invalid kind.
+
+    // No exception handler. Load the error value, load the new stack pointer
+    // and return from the entry frame.
+    bind(&entryFrame);
+    moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+    ma_lw(StackPointer, Address(StackPointer, offsetof(ResumeFromException, stackPointer)));
+
+    // We're going to be returning by the ion calling convention
+    ma_pop(ra);
+    as_jr(ra);
+    as_nop();
+
+    // If we found a catch handler, this must be a baseline frame. Restore
+    // state and jump to the catch block.
+    bind(&catch_);
+    ma_lw(a0, Address(StackPointer, offsetof(ResumeFromException, target)));
+    ma_lw(BaselineFrameReg, Address(StackPointer, offsetof(ResumeFromException, framePointer)));
+    ma_lw(StackPointer, Address(StackPointer, offsetof(ResumeFromException, stackPointer)));
+    jump(a0);
+
+    // If we found a finally block, this must be a baseline frame. Push
+    // two values expected by JSOP_RETSUB: BooleanValue(true) and the
+    // exception.
+    bind(&finally);
+    ValueOperand exception = ValueOperand(a1, a2);
+    loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
+
+    ma_lw(a0, Address(sp, offsetof(ResumeFromException, target)));
+    ma_lw(BaselineFrameReg, Address(sp, offsetof(ResumeFromException, framePointer)));
+    ma_lw(sp, Address(sp, offsetof(ResumeFromException, stackPointer)));
+
+    pushValue(BooleanValue(true));
+    pushValue(exception);
+    jump(a0);
+
+    // Only used in debug mode. Return BaselineFrame->returnValue() to the
+    // caller.
+    bind(&return_);
+    ma_lw(BaselineFrameReg, Address(StackPointer, offsetof(ResumeFromException, framePointer)));
+    ma_lw(StackPointer, Address(StackPointer, offsetof(ResumeFromException, stackPointer)));
+    loadValue(Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
+              JSReturnOperand);
+    ma_move(StackPointer, BaselineFrameReg);
+    pop(BaselineFrameReg);
+    ret();
+
+    // If we are bailing out to baseline to handle an exception, jump to
+    // the bailout tail stub.
+    bind(&bailout);
+    ma_lw(a2, Address(sp, offsetof(ResumeFromException, bailoutInfo)));
+    ma_li(ReturnReg, Imm32(BAILOUT_RETURN_OK));
+    ma_lw(a1, Address(sp, offsetof(ResumeFromException, target)));
+    jump(a1);
+}
+
+CodeOffsetLabel
+MacroAssemblerMIPSCompat::toggledJump(Label *label)
+{
+    CodeOffsetLabel ret(nextOffset().getOffset());
+    ma_b(label);
+    return ret;
+}
+
+CodeOffsetLabel
+MacroAssemblerMIPSCompat::toggledCall(JitCode *target, bool enabled)
+{
+    BufferOffset bo = nextOffset();
+    CodeOffsetLabel offset(bo.getOffset());
+    addPendingJump(bo, ImmPtr(target->raw()), Relocation::JITCODE);
+    ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+    if (enabled) {
+        as_jalr(ScratchRegister);
+        as_nop();
+    } else {
+        as_nop();
+        as_nop();
+    }
+    MOZ_ASSERT(nextOffset().getOffset() - offset.offset() == ToggledCallSize());
+    return offset;
+}
new file mode 100644
--- /dev/null
+++ b/js/src/jit/mips/MacroAssembler-mips.h
@@ -0,0 +1,1136 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_MacroAssembler_mips_h
+#define jit_mips_MacroAssembler_mips_h
+
+#include "mozilla/DebugOnly.h"
+
+#include "jsopcode.h"
+
+#include "jit/IonCaches.h"
+#include "jit/IonFrames.h"
+#include "jit/mips/Assembler-mips.h"
+#include "jit/MoveResolver.h"
+
+using mozilla::DebugOnly;
+
+namespace js {
+namespace jit {
+
+
+enum LoadStoreSize
+{
+    SizeByte = 8,
+    SizeHalfWord = 16,
+    SizeWord = 32,
+    SizeDouble = 64
+};
+
+enum LoadStoreExtension
+{
+    ZeroExtend = 0,
+    SignExtend = 1
+};
+
+enum JumpKind
+{
+    LongJump = 0,
+    ShortJump = 1
+};
+
+struct ImmTag : public Imm32
+{
+    ImmTag(JSValueTag mask)
+      : Imm32(int32_t(mask))
+    { }
+};
+
+struct ImmType : public ImmTag
+{
+    ImmType(JSValueType type)
+      : ImmTag(JSVAL_TYPE_TO_TAG(type))
+    { }
+};
+
+static const ValueOperand JSReturnOperand = ValueOperand(JSReturnReg_Type, JSReturnReg_Data);
+static const ValueOperand softfpReturnOperand = ValueOperand(v1, v0);
+
+static Register CallReg = t9;
+static const int defaultShift = 3;
+static_assert(1 << defaultShift == sizeof(jsval), "The defaultShift is wrong");
+
+class MacroAssemblerMIPS : public Assembler
+{
+  protected:
+    Register secondScratchReg_;
+
+  public:
+    MacroAssemblerMIPS() : secondScratchReg_(t8)
+    { }
+
+    Register  secondScratch() {
+        return secondScratchReg_;
+    }
+
+    void convertBoolToInt32(Register source, Register dest);
+    void convertInt32ToDouble(const Register &src, const FloatRegister &dest);
+    void convertInt32ToDouble(const Address &src, FloatRegister dest);
+    void convertUInt32ToDouble(const Register &src, const FloatRegister &dest);
+    void convertUInt32ToFloat32(const Register &src, const FloatRegister &dest);
+    void convertDoubleToFloat32(const FloatRegister &src, const FloatRegister &dest);
+    void branchTruncateDouble(const FloatRegister &src, const Register &dest, Label *fail);
+    void convertDoubleToInt32(const FloatRegister &src, const Register &dest, Label *fail,
+                              bool negativeZeroCheck = true);
+    void convertFloat32ToInt32(const FloatRegister &src, const Register &dest, Label *fail,
+                               bool negativeZeroCheck = true);
+
+    void convertFloat32ToDouble(const FloatRegister &src, const FloatRegister &dest);
+    void branchTruncateFloat32(const FloatRegister &src, const Register &dest, Label *fail);
+    void convertInt32ToFloat32(const Register &src, const FloatRegister &dest);
+    void convertInt32ToFloat32(const Address &src, FloatRegister dest);
+
+
+    void addDouble(FloatRegister src, FloatRegister dest);
+    void subDouble(FloatRegister src, FloatRegister dest);
+    void mulDouble(FloatRegister src, FloatRegister dest);
+    void divDouble(FloatRegister src, FloatRegister dest);
+
+    void negateDouble(FloatRegister reg);
+    void inc64(AbsoluteAddress dest);
+
+  public:
+
+    void ma_move(Register rd, Register rs);
+
+    void ma_li(Register dest, const ImmGCPtr &ptr);
+
+    void ma_li(const Register &dest, AbsoluteLabel *label);
+
+    void ma_li(Register dest, Imm32 imm);
+    void ma_liPatchable(Register dest, Imm32 imm);
+    void ma_liPatchable(Register dest, ImmPtr imm);
+
+    // Shift operations
+    void ma_sll(Register rd, Register rt, Imm32 shift);
+    void ma_srl(Register rd, Register rt, Imm32 shift);
+    void ma_sra(Register rd, Register rt, Imm32 shift);
+    void ma_ror(Register rd, Register rt, Imm32 shift);
+    void ma_rol(Register rd, Register rt, Imm32 shift);
+
+    void ma_sll(Register rd, Register rt, Register shift);
+    void ma_srl(Register rd, Register rt, Register shift);
+    void ma_sra(Register rd, Register rt, Register shift);
+    void ma_ror(Register rd, Register rt, Register shift);
+    void ma_rol(Register rd, Register rt, Register shift);
+
+    // Negate
+    void ma_negu(Register rd, Register rs);
+
+    void ma_not(Register rd, Register rs);
+
+    // and
+    void ma_and(Register rd, Register rs);
+    void ma_and(Register rd, Register rs, Register rt);
+    void ma_and(Register rd, Imm32 imm);
+    void ma_and(Register rd, Register rs, Imm32 imm);
+
+    // or
+    void ma_or(Register rd, Register rs);
+    void ma_or(Register rd, Register rs, Register rt);
+    void ma_or(Register rd, Imm32 imm);
+    void ma_or(Register rd, Register rs, Imm32 imm);
+
+    // xor
+    void ma_xor(Register rd, Register rs);
+    void ma_xor(Register rd, Register rs, Register rt);
+    void ma_xor(Register rd, Imm32 imm);
+    void ma_xor(Register rd, Register rs, Imm32 imm);
+
+    // load
+    void ma_load(const Register &dest, Address address, LoadStoreSize size = SizeWord,
+                 LoadStoreExtension extension = SignExtend);
+    void ma_load(const Register &dest, const BaseIndex &src, LoadStoreSize size = SizeWord,
+                 LoadStoreExtension extension = SignExtend);
+
+    // store
+    void ma_store(const Register &data, Address address, LoadStoreSize size = SizeWord,
+                  LoadStoreExtension extension = SignExtend);
+    void ma_store(const Register &data, const BaseIndex &dest, LoadStoreSize size = SizeWord,
+                  LoadStoreExtension extension = SignExtend);
+    void ma_store(const Imm32 &imm, const BaseIndex &dest, LoadStoreSize size = SizeWord,
+                  LoadStoreExtension extension = SignExtend);
+
+    void computeScaledAddress(const BaseIndex &address, Register dest);
+
+    void computeEffectiveAddress(const Address &address, Register dest) {
+        ma_addu(dest, address.base, Imm32(address.offset));
+    }
+
+    void computeEffectiveAddress(const BaseIndex &address, Register dest) {
+        computeScaledAddress(address, dest);
+        if (address.offset) {
+            ma_addu(dest, dest, Imm32(address.offset));
+        }
+    }
+
+    // arithmetic based ops
+    // add
+    void ma_addu(Register rd, Register rs, Imm32 imm);
+    void ma_addu(Register rd, Register rs);
+    void ma_addu(Register rd, Imm32 imm);
+    void ma_addTestOverflow(Register rd, Register rs, Register rt, Label *overflow);
+    void ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label *overflow);
+
+    // subtract
+    void ma_subu(Register rd, Register rs, Register rt);
+    void ma_subu(Register rd, Register rs, Imm32 imm);
+    void ma_subu(Register rd, Imm32 imm);
+    void ma_subTestOverflow(Register rd, Register rs, Register rt, Label *overflow);
+    void ma_subTestOverflow(Register rd, Register rs, Imm32 imm, Label *overflow);
+
+    // multiplies.  For now, there are only few that we care about.
+    void ma_mult(Register rs, Imm32 imm);
+    void ma_mul_branch_overflow(Register rd, Register rs, Register rt, Label *overflow);
+    void ma_mul_branch_overflow(Register rd, Register rs, Imm32 imm, Label *overflow);
+
+    // divisions
+    void ma_div_branch_overflow(Register rd, Register rs, Register rt, Label *overflow);
+    void ma_div_branch_overflow(Register rd, Register rs, Imm32 imm, Label *overflow);
+
+    // fast mod, uses scratch registers, and thus needs to be in the assembler
+    // implicitly assumes that we can overwrite dest at the beginning of the sequence
+    void ma_mod_mask(Register src, Register dest, Register hold, int32_t shift,
+                     Label *negZero = nullptr);
+
+    // memory
+    // shortcut for when we know we're transferring 32 bits of data
+    void ma_lw(Register data, Address address);
+
+    void ma_sw(Register data, Address address);
+    void ma_sw(Imm32 imm, Address address);
+
+    void ma_pop(Register r);
+    void ma_push(Register r);
+
+    // branches when done from within mips-specific code
+    void ma_b(Register lhs, Register rhs, Label *l, Condition c, JumpKind jumpKind = LongJump);
+    void ma_b(Register lhs, Imm32 imm, Label *l, Condition c, JumpKind jumpKind = LongJump);
+    void ma_b(Register lhs, Address addr, Label *l, Condition c, JumpKind jumpKind = LongJump);
+    void ma_b(Address addr, Imm32 imm, Label *l, Condition c, JumpKind jumpKind = LongJump);
+    void ma_b(Label *l, JumpKind jumpKind = LongJump);
+    void ma_bal(Label *l, JumpKind jumpKind = LongJump);
+
+    // fp instructions
+    void ma_lis(FloatRegister dest, float value);
+    void ma_lid(FloatRegister dest, double value);
+    void ma_liNegZero(FloatRegister dest);
+
+    void ma_mv(FloatRegister src, ValueOperand dest);
+    void ma_mv(ValueOperand src, FloatRegister dest);
+
+    void ma_ls(FloatRegister fd, Address address);
+    void ma_ld(FloatRegister fd, Address address);
+    void ma_sd(FloatRegister fd, Address address);
+    void ma_sd(FloatRegister fd, BaseIndex address);
+    void ma_ss(FloatRegister fd, Address address);
+    void ma_ss(FloatRegister fd, BaseIndex address);
+
+    void ma_pop(FloatRegister fs);
+    void ma_push(FloatRegister fs);
+
+    //FP branches
+    void ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label *label, DoubleCondition c,
+                 JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
+    void ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label *label, DoubleCondition c,
+                 JumpKind jumpKind = LongJump, FPConditionBit fcc = FCC0);
+
+  protected:
+    void branchWithCode(InstImm code, Label *label, JumpKind jumpKind);
+    Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c);
+
+    void compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
+                              DoubleCondition c, FloatTestKind *testKind,
+                              FPConditionBit fcc = FCC0);
+
+  public:
+    // calls an Ion function, assumes that the stack is untouched (8 byte alinged)
+    void ma_callIon(const Register reg);
+    // callso an Ion function, assuming that sp has already been decremented
+    void ma_callIonNoPush(const Register reg);
+    // calls an ion function, assuming that the stack is currently not 8 byte aligned
+    void ma_callIonHalfPush(const Register reg);
+
+    void ma_call(ImmPtr dest);
+
+    void ma_jump(ImmPtr dest);
+
+    void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c);
+    void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c);
+    void ma_cmp_set(Register rd, Register rs, Address addr, Condition c);
+    void ma_cmp_set(Register dst, Address lhs, Register imm, Condition c);
+    void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
+    void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
+};
+
+class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
+{
+    // Number of bytes the stack is adjusted inside a call to C. Calls to C may
+    // not be nested.
+    bool inCall_;
+    uint32_t args_;
+    // The actual number of arguments that were passed, used to assert that
+    // the initial number of arguments declared was correct.
+    uint32_t passedArgs_;
+
+    uint32_t usedArgSlots_;
+    MoveOp::Type firstArgType;
+
+    bool dynamicAlignment_;
+
+    bool enoughMemory_;
+    // Compute space needed for the function call and set the properties of the
+    // callee.  It returns the space which has to be allocated for calling the
+    // function.
+    //
+    // arg            Number of arguments of the function.
+    void setupABICall(uint32_t arg);
+
+  protected:
+    MoveResolver moveResolver_;
+
+    // Extra bytes currently pushed onto the frame beyond frameDepth_. This is
+    // needed to compute offsets to stack slots while temporary space has been
+    // reserved for unexpected spills or C++ function calls. It is maintained
+    // by functions which track stack alignment, which for clear distinction
+    // use StudlyCaps (for example, Push, Pop).
+    uint32_t framePushed_;
+    void adjustFrame(int value) {
+        setFramePushed(framePushed_ + value);
+    }
+  public:
+    MacroAssemblerMIPSCompat()
+      : inCall_(false),
+        enoughMemory_(true),
+        framePushed_(0)
+    { }
+    bool oom() const {
+        return Assembler::oom();
+    }
+
+  public:
+    using MacroAssemblerMIPS::call;
+
+    void j(Label *dest) {
+        ma_b(dest);
+    }
+
+    void mov(Register src, Register dest) {
+        as_or(dest, src, zero);
+    }
+    void mov(ImmWord imm, Register dest) {
+        ma_li(dest, Imm32(imm.value));
+    }
+    void mov(ImmPtr imm, Register dest) {
+        mov(ImmWord(uintptr_t(imm.value)), dest);
+    }
+    void mov(Register src, Address dest) {
+        MOZ_ASSUME_UNREACHABLE("NYI-IC");
+    }
+    void mov(Address src, Register dest) {
+        MOZ_ASSUME_UNREACHABLE("NYI-IC");
+    }
+
+    void call(const Register reg) {
+        as_jalr(reg);
+        as_nop();
+    }
+
+    void call(Label *label) {
+        // for now, assume that it'll be nearby?
+        ma_bal(label);
+    }
+
+    void call(ImmWord imm) {
+        call(ImmPtr((void*)imm.value));
+    }
+    void call(ImmPtr imm) {
+        BufferOffset bo = m_buffer.nextOffset();
+        addPendingJump(bo, imm, Relocation::HARDCODED);
+        ma_call(imm);
+    }
+    void call(AsmJSImmPtr imm) {
+        movePtr(imm, CallReg);
+        call(CallReg);
+    }
+    void call(JitCode *c) {
+        BufferOffset bo = m_buffer.nextOffset();
+        addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+        ma_liPatchable(ScratchRegister, Imm32((uint32_t)c->raw()));
+        ma_callIonHalfPush(ScratchRegister);
+    }
+    void branch(JitCode *c) {
+        BufferOffset bo = m_buffer.nextOffset();
+        addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);
+        ma_liPatchable(ScratchRegister, Imm32((uint32_t)c->raw()));
+        as_jr(ScratchRegister);
+        as_nop();
+    }
+    void branch(const Register reg) {
+        as_jr(reg);
+        as_nop();
+    }
+    void nop() {
+        as_nop();
+    }
+    void ret() {
+        ma_pop(ra);
+        as_jr(ra);
+        as_nop();
+    }
+    void retn(Imm32 n) {
+        // pc <- [sp]; sp += n
+        ma_lw(ra, Address(StackPointer, 0));
+        ma_addu(StackPointer, StackPointer, n);
+        as_jr(ra);
+        as_nop();
+    }
+    void push(Imm32 imm) {
+        ma_li(ScratchRegister, imm);
+        ma_push(ScratchRegister);
+    }
+    void push(ImmWord imm) {
+        ma_li(ScratchRegister, Imm32(imm.value));
+        ma_push(ScratchRegister);
+    }
+    void push(ImmGCPtr imm) {
+        ma_li(ScratchRegister, imm);
+        ma_push(ScratchRegister);
+    }
+    void push(const Address &address) {
+        ma_lw(ScratchRegister, address);
+        ma_push(ScratchRegister);
+    }
+    void push(const Register &reg) {
+        ma_push(reg);
+    }
+    void push(const FloatRegister &reg) {
+        ma_push(reg);
+    }
+    void pop(const Register &reg) {
+        ma_pop(reg);
+    }
+    void pop(const FloatRegister &reg) {
+        ma_pop(reg);
+    }
+
+    // Emit a branch that can be toggled to a non-operation. On MIPS we use
+    // "andi" instruction to toggle the branch.
+    // See ToggleToJmp(), ToggleToCmp().
+    CodeOffsetLabel toggledJump(Label *label);
+
+    // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+    // this instruction.
+    CodeOffsetLabel toggledCall(JitCode *target, bool enabled);
+
+    static size_t ToggledCallSize() {
+        // Four instructions used in: MacroAssemblerMIPSCompat::toggledCall
+        return 4 * sizeof(uint32_t);
+    }
+
+    CodeOffsetLabel pushWithPatch(ImmWord imm) {
+        CodeOffsetLabel label = movWithPatch(imm, ScratchRegister);
+        ma_push(ScratchRegister);
+        return label;
+    }
+
+    CodeOffsetLabel movWithPatch(ImmWord imm, Register dest) {
+        CodeOffsetLabel label = currentOffset();
+        ma_liPatchable(dest, Imm32(imm.value));
+        return label;
+    }
+    CodeOffsetLabel movWithPatch(ImmPtr imm, Register dest) {
+        return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+    }
+
+    void jump(Label *label) {
+        ma_b(label);
+    }
+    void jump(Register reg) {
+        as_jr(reg);
+        as_nop();
+    }
+    void jump(const Address &address) {
+        ma_lw(ScratchRegister, address);
+        as_jr(ScratchRegister);
+        as_nop();
+    }
+
+    void neg32(Register reg) {
+        ma_negu(reg, reg);
+    }
+    void negl(Register reg) {
+        ma_negu(reg, reg);
+    }
+
+    // Returns the register containing the type tag.
+    Register splitTagForTest(const ValueOperand &value) {
+        return value.typeReg();
+    }
+
+    void branchTestGCThing(Condition cond, const Address &address, Label *label);
+    void branchTestGCThing(Condition cond, const BaseIndex &src, Label *label);
+
+    void branchTestPrimitive(Condition cond, const ValueOperand &value, Label *label);
+    void branchTestPrimitive(Condition cond, const Register &tag, Label *label);
+
+    void branchTestValue(Condition cond, const ValueOperand &value, const Value &v, Label *label);
+    void branchTestValue(Condition cond, const Address &valaddr, const ValueOperand &value,
+                         Label *label);
+
+    // unboxing code
+    void unboxInt32(const ValueOperand &operand, const Register &dest);
+    void unboxInt32(const Address &src, const Register &dest);
+    void unboxBoolean(const ValueOperand &operand, const Register &dest);
+    void unboxBoolean(const Address &src, const Register &dest);
+    void unboxDouble(const ValueOperand &operand, const FloatRegister &dest);
+    void unboxDouble(const Address &src, const FloatRegister &dest);
+    void unboxString(const ValueOperand &operand, const Register &dest);
+    void unboxString(const Address &src, const Register &dest);
+    void unboxObject(const ValueOperand &src, const Register &dest);
+    void unboxValue(const ValueOperand &src, AnyRegister dest);
+    void unboxPrivate(const ValueOperand &src, Register dest);
+
+    void notBoolean(const ValueOperand &val) {
+        as_xori(val.payloadReg(), val.payloadReg(), 1);
+    }
+
+    // boxing code
+    void boxDouble(const FloatRegister &src, const ValueOperand &dest);
+    void boxNonDouble(JSValueType type, const Register &src, const ValueOperand &dest);
+
+    // Extended unboxing API. If the payload is already in a register, returns
+    // that register. Otherwise, provides a move to the given scratch register,
+    // and returns that.
+    Register extractObject(const Address &address, Register scratch);
+    Register extractObject(const ValueOperand &value, Register scratch) {
+        return value.payloadReg();
+    }
+    Register extractInt32(const ValueOperand &value, Register scratch) {
+        return value.payloadReg();
+    }
+    Register extractBoolean(const ValueOperand &value, Register scratch) {
+        return value.payloadReg();
+    }
+    Register extractTag(const Address &address, Register scratch);
+    Register extractTag(const BaseIndex &address, Register scratch);
+    Register extractTag(const ValueOperand &value, Register scratch) {
+        return value.typeReg();
+    }
+
+    void boolValueToDouble(const ValueOperand &operand, const FloatRegister &dest);
+    void int32ValueToDouble(const ValueOperand &operand, const FloatRegister &dest);
+    void loadInt32OrDouble(const Address &address, const FloatRegister &dest);
+    void loadInt32OrDouble(Register base, Register index,
+                           const FloatRegister &dest, int32_t shift = defaultShift);
+    void loadConstantDouble(double dp, const FloatRegister &dest);
+
+    void boolValueToFloat32(const ValueOperand &operand, const FloatRegister &dest);
+    void int32ValueToFloat32(const ValueOperand &operand, const FloatRegister &dest);
+    void loadConstantFloat32(float f, const FloatRegister &dest);
+
+    void branchTestInt32(Condition cond, const ValueOperand &value, Label *label);
+    void branchTestInt32(Condition cond, const Register &tag, Label *label);
+    void branchTestInt32(Condition cond, const Address &address, Label *label);
+    void branchTestInt32(Condition cond, const BaseIndex &src, Label *label);
+
+    void branchTestBoolean(Condition cond, const ValueOperand &value, Label *label);
+    void branchTestBoolean(Condition cond, const Register &tag, Label *label);
+    void branchTestBoolean(Condition cond, const BaseIndex &src, Label *label);
+
+    void branch32(Condition cond, Register lhs, Register rhs, Label *label) {
+        ma_b(lhs, rhs, label, cond);
+    }
+    void branch32(Condition cond, Register lhs, Imm32 imm, Label *label) {
+        ma_b(lhs, imm, label, cond);
+    }
+    void branch32(Condition cond, const Operand &lhs, Register rhs, Label *label) {
+        if (lhs.getTag() == Operand::REG) {
+            ma_b(lhs.toReg(), rhs, label, cond);
+        } else {
+            branch32(cond, lhs.toAddress(), rhs, label);
+        }
+    }
+    void branch32(Condition cond, const Operand &lhs, Imm32 rhs, Label *label) {
+        if (lhs.getTag() == Operand::REG) {
+            ma_b(lhs.toReg(), rhs, label, cond);
+        } else {
+            branch32(cond, lhs.toAddress(), rhs, label);
+        }
+    }
+    void branch32(Condition cond, const Address &lhs, Register rhs, Label *label) {
+        ma_lw(ScratchRegister, lhs);
+        ma_b(ScratchRegister, rhs, label, cond);
+    }
+    void branch32(Condition cond, const Address &lhs, Imm32 rhs, Label *label) {
+        ma_lw(secondScratchReg_, lhs);
+        ma_b(secondScratchReg_, rhs, label, cond);
+    }
+    void branchPtr(Condition cond, const Address &lhs, Register rhs, Label *label) {
+        branch32(cond, lhs, rhs, label);
+    }
+
+    void branchPrivatePtr(Condition cond, const Address &lhs, ImmPtr ptr, Label *label) {
+        branchPtr(cond, lhs, ptr, label);
+    }
+
+    void branchPrivatePtr(Condition cond, const Address &lhs, Register ptr, Label *label) {
+        branchPtr(cond, lhs, ptr, label);
+    }
+
+    void branchPrivatePtr(Condition cond, Register lhs, ImmWord ptr, Label *label) {
+        branchPtr(cond, lhs, ptr, label);
+    }
+
+    void branchTestDouble(Condition cond, const ValueOperand &value, Label *label);
+    void branchTestDouble(Condition cond, const Register &tag, Label *label);
+    void branchTestDouble(Condition cond, const Address &address, Label *label);
+    void branchTestDouble(Condition cond, const BaseIndex &src, Label *label);
+
+    void branchTestNull(Condition cond, const ValueOperand &value, Label *label);
+    void branchTestNull(Condition cond, const Register &tag, Label *label);
+    void branchTestNull(Condition cond, const BaseIndex &src, Label *label);
+
+    void branchTestObject(Condition cond, const ValueOperand &value, Label *label);
+    void branchTestObject(Condition cond, const Register &tag, Label *label);
+    void branchTestObject(Condition cond, const BaseIndex &src, Label *label);
+
+    void branchTestString(Condition cond, const ValueOperand &value, Label *label);
+    void branchTestString(Condition cond, const Register &tag, Label *label);
+    void branchTestString(Condition cond, const BaseIndex &src, Label *label);
+
+    void branchTestUndefined(Condition cond, const ValueOperand &value, Label *label);
+    void branchTestUndefined(Condition cond, const Register &tag, Label *label);
+    void branchTestUndefined(Condition cond, const BaseIndex &src, Label *label);
+    void branchTestUndefined(Condition cond, const Address &address, Label *label);
+
+    void branchTestNumber(Condition cond, const ValueOperand &value, Label *label);
+    void branchTestNumber(Condition cond, const Register &tag, Label *label);
+
+    void branchTestMagic(Condition cond, const ValueOperand &value, Label *label);
+    void branchTestMagic(Condition cond, const Register &tag, Label *label);
+    void branchTestMagic(Condition cond, const Address &address, Label *label);
+    void branchTestMagic(Condition cond, const BaseIndex &src, Label *label);
+
+    void branchTestMagicValue(Condition cond, const ValueOperand &val, JSWhyMagic why,
+                              Label *label) {
+        MOZ_ASSERT(cond == Equal || cond == NotEqual);
+        // Test for magic
+        Label notmagic;
+        branchTestMagic(cond, val, &notmagic);
+        // Test magic value
+        branch32(cond, val.payloadReg(), Imm32(static_cast<int32_t>(why)), label);
+        bind(&notmagic);
+    }
+
+    void branchTestInt32Truthy(bool b, const ValueOperand &value, Label *label);
+
+    void branchTestStringTruthy(bool b, const ValueOperand &value, Label *label);
+
+    void branchTestDoubleTruthy(bool b, const FloatRegister &value, Label *label);
+
+    void branchTestBooleanTruthy(bool b, const ValueOperand &operand, Label *label);
+
+    void branchTest32(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
+        MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
+        if (lhs == rhs) {
+            ma_b(lhs, rhs, label, cond);
+        } else {
+            as_and(ScratchRegister, lhs, rhs);
+            ma_b(ScratchRegister, ScratchRegister, label, cond);
+        }
+    }
+    void branchTest32(Condition cond, const Register &lhs, Imm32 imm, Label *label) {
+        ma_li(ScratchRegister, imm);
+        branchTest32(cond, lhs, ScratchRegister, label);
+    }
+    void branchTest32(Condition cond, const Address &address, Imm32 imm, Label *label) {
+        ma_lw(secondScratchReg_, address);
+        branchTest32(cond, secondScratchReg_, imm, label);
+    }
+    void branchTestPtr(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
+        branchTest32(cond, lhs, rhs, label);
+    }
+    void branchTestPtr(Condition cond, const Register &lhs, const Imm32 rhs, Label *label) {
+        branchTest32(cond, lhs, rhs, label);
+    }
+    void branchTestPtr(Condition cond, const Address &lhs, Imm32 imm, Label *label) {
+        branchTest32(cond, lhs, imm, label);
+    }
+    void branchPtr(Condition cond, Register lhs, Register rhs, Label *label) {
+        ma_b(lhs, rhs, label, cond);
+    }
+    void branchPtr(Condition cond, Register lhs, ImmGCPtr ptr, Label *label) {
+        ma_li(ScratchRegister, ptr);
+        ma_b(lhs, ScratchRegister, label, cond);
+    }
+    void branchPtr(Condition cond, Register lhs, ImmWord imm, Label *label) {
+        ma_b(lhs, Imm32(imm.value), label, cond);
+    }
+    void branchPtr(Condition cond, Register lhs, ImmPtr imm, Label *label) {
+        branchPtr(cond, lhs, ImmWord(uintptr_t(imm.value)), label);
+    }
+    void branchPtr(Condition cond, Register lhs, AsmJSImmPtr imm, Label *label) {
+        movePtr(imm, ScratchRegister);
+        branchPtr(cond, lhs, ScratchRegister, label);
+    }
+    void decBranchPtr(Condition cond, const Register &lhs, Imm32 imm, Label *label) {
+        subPtr(imm, lhs);
+        branch32(cond, lhs, Imm32(0), label);
+    }
+
+protected:
+    uint32_t getType(const Value &val);
+    void moveData(const Value &val, Register data);
+public:
+    void moveValue(const Value &val, Register type, Register data);
+
+    CodeOffsetJump jumpWithPatch(RepatchLabel *label);
+
+    template <typename T>
+    CodeOffsetJump branchPtrWithPatch(Condition cond, Register reg, T ptr, RepatchLabel *label) {
+        movePtr(ptr, ScratchRegister);
+        Label skipJump;
+        ma_b(reg, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
+        CodeOffsetJump off = jumpWithPatch(label);
+        bind(&skipJump);
+        return off;
+    }
+
+    template <typename T>
+    CodeOffsetJump branchPtrWithPatch(Condition cond, Address addr, T ptr, RepatchLabel *label) {
+        loadPtr(addr, secondScratchReg_);
+        movePtr(ptr, ScratchRegister);
+        Label skipJump;
+        ma_b(secondScratchReg_, ScratchRegister, &skipJump, InvertCondition(cond), ShortJump);
+        CodeOffsetJump off = jumpWithPatch(label);
+        bind(&skipJump);
+        return off;
+    }
+    void branchPtr(Condition cond, Address addr, ImmGCPtr ptr, Label *label) {
+        ma_lw(secondScratchReg_, addr);
+        ma_li(ScratchRegister, ptr);
+        ma_b(secondScratchReg_, ScratchRegister, label, cond);
+    }
+    void branchPtr(Condition cond, Address addr, ImmWord ptr, Label *label) {
+        ma_lw(secondScratchReg_, addr);
+        ma_b(secondScratchReg_, Imm32(ptr.value), label, cond);
+    }
+    void branchPtr(Condition cond, Address addr, ImmPtr ptr, Label *label) {
+        branchPtr(cond, addr, ImmWord(uintptr_t(ptr.value)), label);
+    }
+    void branchPtr(Condition cond, const AbsoluteAddress &addr, const Register &ptr, Label *label) {
+        loadPtr(addr, ScratchRegister);
+        ma_b(ScratchRegister, ptr, label, cond);
+    }
+    void branchPtr(Condition cond, const AsmJSAbsoluteAddress &addr, const Register &ptr,
+                   Label *label) {
+        loadPtr(addr, ScratchRegister);
+        ma_b(ScratchRegister, ptr, label, cond);
+    }
+    void branch32(Condition cond, const AbsoluteAddress &lhs, Imm32 rhs, Label *label) {
+        loadPtr(lhs, secondScratchReg_); // ma_b might use scratch
+        ma_b(secondScratchReg_, rhs, label, cond);
+    }
+    void branch32(Condition cond, const AbsoluteAddress &lhs, const Register &rhs, Label *label) {
+        loadPtr(lhs, ScratchRegister);
+        ma_b(ScratchRegister, rhs, label, cond);
+    }
+
+    void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
+        if (dest.isFloat())
+            loadInt32OrDouble(address, dest.fpu());
+        else
+            ma_lw(dest.gpr(), address);
+    }
+
+    void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
+        if (dest.isFloat())
+            loadInt32OrDouble(address.base, address.index, dest.fpu(), address.scale);
+        else
+            load32(address, dest.gpr());
+    }
+
+    void moveValue(const Value &val, const ValueOperand &dest);
+
+    void moveValue(const ValueOperand &src, const ValueOperand &dest) {
+        MOZ_ASSERT(src.typeReg() != dest.payloadReg());
+        MOZ_ASSERT(src.payloadReg() != dest.typeReg());
+        if (src.typeReg() != dest.typeReg())
+            ma_move(dest.typeReg(), src.typeReg());
+        if (src.payloadReg() != dest.payloadReg())
+            ma_move(dest.payloadReg(), src.payloadReg());
+    }
+
+    void storeValue(ValueOperand val, Operand dst);
+    void storeValue(ValueOperand val, const BaseIndex &dest);
+    void storeValue(JSValueType type, Register reg, BaseIndex dest);
+    void storeValue(ValueOperand val, const Address &dest);
+    void storeValue(JSValueType type, Register reg, Address dest);
+    void storeValue(const Value &val, Address dest);
+    void storeValue(const Value &val, BaseIndex dest);
+
+    void loadValue(Address src, ValueOperand val);
+    void loadValue(Operand dest, ValueOperand val) {
+        loadValue(dest.toAddress(), val);
+    }
+    void loadValue(const BaseIndex &addr, ValueOperand val);
+    void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+    void pushValue(ValueOperand val);
+    void popValue(ValueOperand val);
+    void pushValue(const Value &val) {
+        jsval_layout jv = JSVAL_TO_IMPL(val);
+        push(Imm32(jv.s.tag));
+        if (val.isMarkable())
+            push(ImmGCPtr(reinterpret_cast<gc::Cell *>(val.toGCThing())));
+        else
+            push(Imm32(jv.s.payload.i32));
+    }
+    void pushValue(JSValueType type, Register reg) {
+        push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+        ma_push(reg);
+    }
+    void pushValue(const Address &addr);
+    void Push(const ValueOperand &val) {
+        pushValue(val);
+        framePushed_ += sizeof(Value);
+    }
+    void Pop(const ValueOperand &val) {
+        popValue(val);
+        framePushed_ -= sizeof(Value);
+    }
+    void storePayload(const Value &val, Address dest);
+    void storePayload(Register src, Address dest);
+    void storePayload(const Value &val, Register base, Register index, int32_t shift = defaultShift);
+    void storePayload(Register src, Register base, Register index, int32_t shift = defaultShift);
+    void storeTypeTag(ImmTag tag, Address dest);
+    void storeTypeTag(ImmTag tag, Register base, Register index, int32_t shift = defaultShift);
+
+    void makeFrameDescriptor(Register frameSizeReg, FrameType type) {
+        ma_sll(frameSizeReg, frameSizeReg, Imm32(FRAMESIZE_SHIFT));
+        ma_or(frameSizeReg, frameSizeReg, Imm32(type));
+    }
+
+    void linkExitFrame();
+    void linkParallelExitFrame(const Register &pt);
+    void handleFailureWithHandler(void *handler);
+    void handleFailureWithHandlerTail();
+
+    /////////////////////////////////////////////////////////////////
+    // Common interface.
+    /////////////////////////////////////////////////////////////////
+  public:
+    // The following functions are exposed for use in platform-shared code.
+    void Push(const Register &reg) {
+        ma_push(reg);
+        adjustFrame(sizeof(intptr_t));
+    }
+    void Push(const Imm32 imm) {
+        ma_li(ScratchRegister, imm);
+        ma_push(ScratchRegister);
+        adjustFrame(sizeof(intptr_t));
+    }
+    void Push(const ImmWord imm) {
+        ma_li(ScratchRegister, Imm32(imm.value));
+        ma_push(ScratchRegister);
+        adjustFrame(sizeof(intptr_t));
+    }
+    void Push(const ImmPtr imm) {
+        Push(ImmWord(uintptr_t(imm.value)));
+    }
+    void Push(const ImmGCPtr ptr) {
+        ma_li(ScratchRegister, ptr);
+        ma_push(ScratchRegister);
+        adjustFrame(sizeof(intptr_t));
+    }
+    void Push(const FloatRegister &f) {
+        ma_push(f);
+        adjustFrame(sizeof(double));
+    }
+
+    CodeOffsetLabel PushWithPatch(const ImmWord &word) {
+        framePushed_ += sizeof(word.value);
+        return pushWithPatch(word);
+    }
+    CodeOffsetLabel PushWithPatch(const ImmPtr &imm) {
+        return PushWithPatch(ImmWord(uintptr_t(imm.value)));
+    }
+
+    void Pop(const Register &reg) {
+        ma_pop(reg);
+        adjustFrame(-sizeof(intptr_t));
+    }
+    void implicitPop(uint32_t args) {
+        MOZ_ASSERT(args % sizeof(intptr_t) == 0);
+        adjustFrame(-args);
+    }
+    uint32_t framePushed() const {
+        return framePushed_;
+    }
+    void setFramePushed(uint32_t framePushed) {
+        framePushed_ = framePushed;
+    }
+
+    // Builds an exit frame on the stack, with a return address to an internal
+    // non-function. Returns offset to be passed to markSafepointAt().
+    bool buildFakeExitFrame(const Register &scratch, uint32_t *offset);
+
+    void callWithExitFrame(JitCode *target);
+    void callWithExitFrame(JitCode *target, Register dynStack);
+
+    // Makes an Ion call using the only two methods that it is sane for
+    // indep code to make a call
+    void callIon(const Register &callee);
+
+    void reserveStack(uint32_t amount);
+    void freeStack(uint32_t amount);
+    void freeStack(Register amount);
+
+    void add32(Register src, Register dest);
+    void add32(Imm32 imm, Register dest);
+    void add32(Imm32 imm, const Address &dest);
+    void sub32(Imm32 imm, Register dest);
+    void sub32(Register src, Register dest);
+
+    void and32(Imm32 imm, Register dest);
+    void and32(Imm32 imm, const Address &dest);
+    void or32(Imm32 imm, const Address &dest);
+    void xor32(Imm32 imm, Register dest);
+    void xorPtr(Imm32 imm, Register dest);
+    void xorPtr(Register src, Register dest);
+    void orPtr(Imm32 imm, Register dest);
+    void orPtr(Register src, Register dest);
+    void andPtr(Imm32 imm, Register dest);
+    void andPtr(Register src, Register dest);
+    void addPtr(Register src, Register dest);
+    void addPtr(const Address &src, Register dest);
+    void not32(Register reg);
+
+    void move32(const Imm32 &imm, const Register &dest);
+    void move32(const Register &src, const Register &dest);
+
+    void movePtr(const Register &src, const Register &dest);
+    void movePtr(const ImmWord &imm, const Register &dest);
+    void movePtr(const ImmPtr &imm, const Register &dest);
+    void movePtr(const AsmJSImmPtr &imm, const Register &dest);
+    void movePtr(const ImmGCPtr &imm, const Register &dest);
+
+    void load8SignExtend(const Address &address, const Register &dest);
+    void load8SignExtend(const BaseIndex &src, const Register &dest);
+
+    void load8ZeroExtend(const Address &address, const Register &dest);
+    void load8ZeroExtend(const BaseIndex &src, const Register &dest);
+
+    void load16SignExtend(const Address &address, const Register &dest);
+    void load16SignExtend(const BaseIndex &src, const Register &dest);
+
+    void load16ZeroExtend(const Address &address, const Register &dest);
+    void load16ZeroExtend(const BaseIndex &src, const Register &dest);
+
+    void load32(const Address &address, const Register &dest);
+    void load32(const BaseIndex &address, const Register &dest);
+    void load32(const AbsoluteAddress &address, const Register &dest);
+
+    void loadPtr(const Address &address, const Register &dest);
+    void loadPtr(const BaseIndex &src, const Register &dest);
+    void loadPtr(const AbsoluteAddress &address, const Register &dest);
+    void loadPtr(const AsmJSAbsoluteAddress &address, const Register &dest);
+
+    void loadPrivate(const Address &address, const Register &dest);
+
+    void loadDouble(const Address &addr, const FloatRegister &dest);
+    void loadDouble(const BaseIndex &src, const FloatRegister &dest);
+
+    // Load a float value into a register, then expand it to a double.
+    void loadFloatAsDouble(const Address &addr, const FloatRegister &dest);
+    void loadFloatAsDouble(const BaseIndex &src, const FloatRegister &dest);
+
+    void loadFloat32(const Address &addr, const FloatRegister &dest);
+    void loadFloat32(const BaseIndex &src, const FloatRegister &dest);
+
+    void store8(const Register &src, const Address &address);
+    void store8(const Imm32 &imm, const Address &address);
+    void store8(const Register &src, const BaseIndex &address);
+    void store8(const Imm32 &imm, const BaseIndex &address);
+
+    void store16(const Register &src, const Address &address);
+    void store16(const Imm32 &imm, const Address &address);
+    void store16(const Register &src, const BaseIndex &address);
+    void store16(const Imm32 &imm, const BaseIndex &address);
+
+    void store32(const Register &src, const AbsoluteAddress &address);
+    void store32(const Register &src, const Address &address);
+    void store32(const Register &src, const BaseIndex &address);
+    void store32(const Imm32 &src, const Address &address);
+    void store32(const Imm32 &src, const BaseIndex &address);
+
+    void storePtr(ImmWord imm, const Address &address);
+    void storePtr(ImmPtr imm, const Address &address);
+    void storePtr(ImmGCPtr imm, const Address &address);
+    void storePtr(Register src, const Address &address);
+    void storePtr(const Register &src, const AbsoluteAddress &dest);
+    void storeDouble(FloatRegister src, Address addr) {
+        ma_sd(src, addr);
+    }
+    void storeDouble(FloatRegister src, BaseIndex addr) {
+        MOZ_ASSERT(addr.offset == 0);
+        ma_sd(src, addr);
+    }
+    void moveDouble(FloatRegister src, FloatRegister dest) {
+        as_movd(dest, src);
+    }
+
+    void storeFloat32(FloatRegister src, Address addr) {
+        ma_ss(src, addr);
+    }
+    void storeFloat32(FloatRegister src, BaseIndex addr) {
+        MOZ_ASSERT(addr.offset == 0);
+        ma_ss(src, addr);
+    }
+
+    void zeroDouble(FloatRegister reg) {
+        as_mtc1(zero, reg);
+        as_mtc1_Odd(zero, reg);
+    }
+
+    void clampIntToUint8(Register reg) {
+        // look at (reg >> 8) if it is 0, then src shouldn't be clamped
+        // if it is <0, then we want to clamp to 0,
+        // otherwise, we wish to clamp to 255
+        Label done;
+        ma_move(ScratchRegister, reg);
+        as_sra(ScratchRegister, ScratchRegister, 8);
+        ma_b(ScratchRegister, ScratchRegister, &done, Assembler::Zero, ShortJump);
+        {
+            Label negative;
+            ma_b(ScratchRegister, ScratchRegister, &negative, Assembler::Signed, ShortJump);
+            {
+                ma_li(reg, Imm32(255));
+                ma_b(&done, ShortJump);
+            }
+            bind(&negative);
+            {
+                ma_move(reg, zero);
+            }
+        }
+        bind(&done);
+    }
+
+    void subPtr(Imm32 imm, const Register dest);
+    void addPtr(Imm32 imm, const Register dest);
+    void addPtr(Imm32 imm, const Address &dest);
+    void addPtr(ImmWord imm, const Register dest) {
+        addPtr(Imm32(imm.value), dest);
+    }
+    void addPtr(ImmPtr imm, const Register dest) {
+        addPtr(ImmWord(uintptr_t(imm.value)), dest);
+    }
+
+    void breakpoint();
+
+    void branchDouble(DoubleCondition cond, const FloatRegister &lhs, const FloatRegister &rhs,
+                      Label *label);
+
+    void branchFloat(DoubleCondition cond, const FloatRegister &lhs, const FloatRegister &rhs,
+                     Label *label);
+
+    void checkStackAlignment();
+
+    void rshiftPtr(Imm32 imm, Register dest) {
+        ma_srl(dest, dest, imm);
+    }
+    void lshiftPtr(Imm32 imm, Register dest) {
+        ma_sll(dest, dest, imm);
+    }
+
+    // If source is a double, load it into dest. If source is int32,
+    // convert it to double. Else, branch to failure.
+    void ensureDouble(const ValueOperand &source, FloatRegister dest, Label *failure);
+
+    // Setup a call to C/C++ code, given the number of general arguments it
+    // takes. Note that this only supports cdecl.
+    //
+    // In order for alignment to work correctly, the MacroAssembler must have a
+    // consistent view of the stack displacement. It is okay to call "push"
+    // manually, however, if the stack alignment were to change, the macro
+    // assembler should be notified before starting a call.
+    void setupAlignedABICall(uint32_t args);
+
+    // Sets up an ABI call for when the alignment is not known. This may need a
+    // scratch register.
+    void setupUnalignedABICall(uint32_t args, const Register &scratch);
+
+    // Arguments must be assigned in a left-to-right order. This process may
+    // temporarily use more stack, in which case sp-relative addresses will be
+    // automatically adjusted. It is extremely important that sp-relative
+    // addresses are computed *after* setupABICall(). Furthermore, no
+    // operations should be emitted while setting arguments.
+    void passABIArg(const MoveOperand &from, MoveOp::Type type);
+    void passABIArg(const Register &reg);
+    void passABIArg(const FloatRegister &reg, MoveOp::Type type);
+    void passABIArg(const ValueOperand &regs);
+
+  protected:
+    bool buildOOLFakeExitFrame(void *fakeReturnAddr);
+
+  private:
+    void callWithABIPre(uint32_t *stackAdjust);
+    void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result);
+
+  public:
+    // Emits a call to a C/C++ function, resolving all argument moves.
+    void callWithABI(void *fun, MoveOp::Type result = MoveOp::GENERAL);
+    void callWithABI(AsmJSImmPtr imm, MoveOp::Type result = MoveOp::GENERAL);
+    void callWithABI(const Address &fun, MoveOp::Type result = MoveOp::GENERAL);
+
+    CodeOffsetLabel labelForPatch() {
+        return CodeOffsetLabel(nextOffset().getOffset());
+    }
+
+    void memIntToValue(Address Source, Address Dest) {
+        MOZ_ASSUME_UNREACHABLE("NYI");
+    }
+
+    void lea(Operand addr, Register dest) {
+        MOZ_ASSUME_UNREACHABLE("NYI");
+    }
+
+    void abiret() {
+        MOZ_ASSUME_UNREACHABLE("NYI");
+    }
+
+    void ma_storeImm(Imm32 imm, const Address &addr) {
+        ma_sw(imm, addr);
+    }
+
+    BufferOffset ma_BoundsCheck(Register bounded) {
+        BufferOffset bo = m_buffer.nextOffset();
+        ma_liPatchable(bounded, Imm32(0));
+        return bo;
+    }
+
+    void moveFloat32(FloatRegister src, FloatRegister dest) {
+        as_movs(dest, src);
+    }
+};
+
+typedef MacroAssemblerMIPSCompat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_MacroAssembler_mips_h */
--- a/js/src/jsapi-tests/testScriptInfo.cpp
+++ b/js/src/jsapi-tests/testScriptInfo.cpp
@@ -33,17 +33,17 @@ BEGIN_TEST(testScriptInfo)
                                                  options));
 
     CHECK(script);
 
     jsbytecode *start = JS_LineNumberToPC(cx, script, startLine);
     CHECK_EQUAL(JS_GetScriptBaseLineNumber(cx, script), startLine);
     CHECK_EQUAL(JS_PCToLineNumber(cx, script, start), startLine);
     CHECK_EQUAL(JS_GetScriptLineExtent(cx, script), 11);
-    CHECK(strcmp(JS_GetScriptFilename(cx, script), __FILE__) == 0);
+    CHECK(strcmp(JS_GetScriptFilename(script), __FILE__) == 0);
     const jschar *sourceMap = JS_GetScriptSourceMap(cx, script);
     CHECK(sourceMap);
     CHECK(CharsMatch(sourceMap, "http://example.com/path/to/source-map.json"));
 
     return true;
 }
 static bool
 CharsMatch(const jschar *p, const char *q)
--- a/js/src/jsobj.cpp
+++ b/js/src/jsobj.cpp
@@ -5879,17 +5879,17 @@ js_DumpStackFrame(JSContext *cx, StackFr
 
 JS_FRIEND_API(void)
 js_DumpBacktrace(JSContext *cx)
 {
     Sprinter sprinter(cx);
     sprinter.init();
     size_t depth = 0;
     for (ScriptFrameIter i(cx); !i.done(); ++i, ++depth) {
-        const char *filename = JS_GetScriptFilename(cx, i.script());
+        const char *filename = JS_GetScriptFilename(i.script());
         unsigned line = JS_PCToLineNumber(cx, i.script(), i.pc());
         JSScript *script = i.script();
         sprinter.printf("#%d %14p   %s:%d (%p @ %d)\n",
                         depth, (i.isJit() ? 0 : i.interpFrame()), filename, line,
                         script, script->pcToOffset(i.pc()));
     }
     fprintf(stdout, "%s", sprinter.string());
 }
--- a/js/src/vm/OldDebugAPI.cpp
+++ b/js/src/vm/OldDebugAPI.cpp
@@ -550,17 +550,17 @@ JS_GetDebugClassName(JSObject *obj)
     if (obj->is<DebugScopeObject>())
         return obj->as<DebugScopeObject>().scope().getClass()->name;
     return obj->getClass()->name;
 }
 
 /************************************************************************/
 
 JS_PUBLIC_API(const char *)
-JS_GetScriptFilename(JSContext *cx, JSScript *script)
+JS_GetScriptFilename(JSScript *script)
 {
     return script->filename();
 }
 
 JS_PUBLIC_API(const jschar *)
 JS_GetScriptSourceMap(JSContext *cx, JSScript *script)
 {
     ScriptSource *source = script->scriptSource();
@@ -920,17 +920,24 @@ js_CallContextDebugHandler(JSContext *cx
 }
 
 /*
  * A contructor that crates a FrameDescription from a ScriptFrameIter, to avoid
  * constructing a FrameDescription on the stack just to append it to a vector.
  * FrameDescription contains Heap<T> fields that should not live on the stack.
  */
 JS::FrameDescription::FrameDescription(const ScriptFrameIter& iter)
-  : script_(iter.script()), fun_(iter.maybeCallee()), pc_(iter.pc()), linenoComputed(false) {}
+  : script_(iter.script()),
+    funDisplayName_(nullptr),
+    pc_(iter.pc()),
+    linenoComputed(false)
+{
+    if (JSFunction *fun = iter.maybeCallee())
+        funDisplayName_ = fun->displayAtom();
+}
 
 JS_PUBLIC_API(JS::StackDescription *)
 JS::DescribeStack(JSContext *cx, unsigned maxFrames)
 {
     Vector<FrameDescription> frames(cx);
 
     for (NonBuiltinScriptFrameIter i(cx); !i.done(); ++i) {
         if (!frames.append(i))
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_opensles_android.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_opensles_android.cc
@@ -11,116 +11,173 @@
 #include "webrtc/modules/audio_device/android/audio_device_opensles_android.h"
 
 #include "webrtc/modules/audio_device/android/opensles_input.h"
 #include "webrtc/modules/audio_device/android/opensles_output.h"
 
 namespace webrtc {
 
 AudioDeviceAndroidOpenSLES::AudioDeviceAndroidOpenSLES(const int32_t id)
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
     : output_(id),
-      input_(id, &output_) {
+      input_(id, &output_)
+#else
+    : input_(id, 0)
+#endif
+{
 }
 
 AudioDeviceAndroidOpenSLES::~AudioDeviceAndroidOpenSLES() {
 }
 
 int32_t AudioDeviceAndroidOpenSLES::ActiveAudioLayer(
     AudioDeviceModule::AudioLayer& audioLayer) const { // NOLINT
   return 0;
 }
 
 int32_t AudioDeviceAndroidOpenSLES::Init() {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.Init() | input_.Init();
+#else
+  return input_.Init();
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::Terminate()  {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.Terminate() | input_.Terminate();
+#else
+  return input_.Terminate();
+#endif
 }
 
 bool AudioDeviceAndroidOpenSLES::Initialized() const {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.Initialized() && input_.Initialized();
+#else
+  return input_.Initialized();
+#endif
 }
 
 int16_t AudioDeviceAndroidOpenSLES::PlayoutDevices() {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.PlayoutDevices();
+#else
+  return 0;
+#endif
 }
 
 int16_t AudioDeviceAndroidOpenSLES::RecordingDevices() {
   return input_.RecordingDevices();
 }
 
 int32_t AudioDeviceAndroidOpenSLES::PlayoutDeviceName(
     uint16_t index,
     char name[kAdmMaxDeviceNameSize],
     char guid[kAdmMaxGuidSize]) {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.PlayoutDeviceName(index, name, guid);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::RecordingDeviceName(
     uint16_t index,
     char name[kAdmMaxDeviceNameSize],
     char guid[kAdmMaxGuidSize]) {
   return input_.RecordingDeviceName(index, name, guid);
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SetPlayoutDevice(uint16_t index) {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SetPlayoutDevice(index);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SetPlayoutDevice(
     AudioDeviceModule::WindowsDeviceType device) {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SetPlayoutDevice(device);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SetRecordingDevice(uint16_t index) {
   return input_.SetRecordingDevice(index);
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SetRecordingDevice(
     AudioDeviceModule::WindowsDeviceType device) {
   return input_.SetRecordingDevice(device);
 }
 
 int32_t AudioDeviceAndroidOpenSLES::PlayoutIsAvailable(
     bool& available) {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.PlayoutIsAvailable(available);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::InitPlayout() {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.InitPlayout();
+#else
+  return -1;
+#endif
 }
 
 bool AudioDeviceAndroidOpenSLES::PlayoutIsInitialized() const {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.PlayoutIsInitialized();
+#else
+  return false;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::RecordingIsAvailable(
     bool& available) {  // NOLINT
   return input_.RecordingIsAvailable(available);
 }
 
 int32_t AudioDeviceAndroidOpenSLES::InitRecording() {
   return input_.InitRecording();
 }
 
 bool AudioDeviceAndroidOpenSLES::RecordingIsInitialized() const {
   return input_.RecordingIsInitialized();
 }
 
 int32_t AudioDeviceAndroidOpenSLES::StartPlayout() {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.StartPlayout();
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::StopPlayout() {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.StopPlayout();
+#else
+  return -1;
+#endif
 }
 
 bool AudioDeviceAndroidOpenSLES::Playing() const {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.Playing();
+#else
+  return false;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::StartRecording() {
   return input_.StartRecording();
 }
 
 int32_t AudioDeviceAndroidOpenSLES::StopRecording() {
   return input_.StopRecording();
@@ -146,25 +203,37 @@ int32_t AudioDeviceAndroidOpenSLES::SetW
 int32_t AudioDeviceAndroidOpenSLES::WaveOutVolume(
     uint16_t& volumeLeft,           // NOLINT
     uint16_t& volumeRight) const {  // NOLINT
   return -1;
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SpeakerIsAvailable(
     bool& available) {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SpeakerIsAvailable(available);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::InitSpeaker() {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.InitSpeaker();
+#else
+  return -1;
+#endif
 }
 
 bool AudioDeviceAndroidOpenSLES::SpeakerIsInitialized() const {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SpeakerIsInitialized();
+#else
+  return false;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::MicrophoneIsAvailable(
     bool& available) {  // NOLINT
   return input_.MicrophoneIsAvailable(available);
 }
 
 int32_t AudioDeviceAndroidOpenSLES::InitMicrophone() {
@@ -172,41 +241,65 @@ int32_t AudioDeviceAndroidOpenSLES::Init
 }
 
 bool AudioDeviceAndroidOpenSLES::MicrophoneIsInitialized() const {
   return input_.MicrophoneIsInitialized();
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SpeakerVolumeIsAvailable(
     bool& available) {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SpeakerVolumeIsAvailable(available);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SetSpeakerVolume(uint32_t volume) {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SetSpeakerVolume(volume);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SpeakerVolume(
     uint32_t& volume) const {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SpeakerVolume(volume);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::MaxSpeakerVolume(
     uint32_t& maxVolume) const {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.MaxSpeakerVolume(maxVolume);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::MinSpeakerVolume(
     uint32_t& minVolume) const {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.MinSpeakerVolume(minVolume);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SpeakerVolumeStepSize(
     uint16_t& stepSize) const {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SpeakerVolumeStepSize(stepSize);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::MicrophoneVolumeIsAvailable(
     bool& available) {  // NOLINT
   return input_.MicrophoneVolumeIsAvailable(available);
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SetMicrophoneVolume(uint32_t volume) {
@@ -230,26 +323,38 @@ int32_t AudioDeviceAndroidOpenSLES::MinM
 
 int32_t AudioDeviceAndroidOpenSLES::MicrophoneVolumeStepSize(
     uint16_t& stepSize) const {  // NOLINT
   return input_.MicrophoneVolumeStepSize(stepSize);
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SpeakerMuteIsAvailable(
     bool& available) {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SpeakerMuteIsAvailable(available);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SetSpeakerMute(bool enable) {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SetSpeakerMute(enable);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SpeakerMute(
     bool& enabled) const {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SpeakerMute(enabled);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::MicrophoneMuteIsAvailable(
     bool& available) {  // NOLINT
   return input_.MicrophoneMuteIsAvailable(available);
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SetMicrophoneMute(bool enable) {
@@ -272,26 +377,38 @@ int32_t AudioDeviceAndroidOpenSLES::SetM
 
 int32_t AudioDeviceAndroidOpenSLES::MicrophoneBoost(
     bool& enabled) const {  // NOLINT
   return input_.MicrophoneBoost(enabled);
 }
 
 int32_t AudioDeviceAndroidOpenSLES::StereoPlayoutIsAvailable(
     bool& available) {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.StereoPlayoutIsAvailable(available);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SetStereoPlayout(bool enable) {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SetStereoPlayout(enable);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::StereoPlayout(
     bool& enabled) const {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.StereoPlayout(enabled);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::StereoRecordingIsAvailable(
     bool& available) {  // NOLINT
   return input_.StereoRecordingIsAvailable(available);
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SetStereoRecording(bool enable) {
@@ -301,80 +418,118 @@ int32_t AudioDeviceAndroidOpenSLES::SetS
 int32_t AudioDeviceAndroidOpenSLES::StereoRecording(
     bool& enabled) const {  // NOLINT
   return input_.StereoRecording(enabled);
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SetPlayoutBuffer(
     const AudioDeviceModule::BufferType type,
     uint16_t sizeMS) {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SetPlayoutBuffer(type, sizeMS);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::PlayoutBuffer(
     AudioDeviceModule::BufferType& type,
     uint16_t& sizeMS) const {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.PlayoutBuffer(type, sizeMS);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::PlayoutDelay(
     uint16_t& delayMS) const {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.PlayoutDelay(delayMS);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::RecordingDelay(
     uint16_t& delayMS) const {  // NOLINT
   return input_.RecordingDelay(delayMS);
 }
 
 int32_t AudioDeviceAndroidOpenSLES::CPULoad(
     uint16_t& load) const {  // NOLINT
   return -1;
 }
 
 bool AudioDeviceAndroidOpenSLES::PlayoutWarning() const {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.PlayoutWarning();
+#else
+  return false;
+#endif
 }
 
 bool AudioDeviceAndroidOpenSLES::PlayoutError() const {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.PlayoutError();
+#else
+  return false;
+#endif
 }
 
 bool AudioDeviceAndroidOpenSLES::RecordingWarning() const {
   return input_.RecordingWarning();
 }
 
 bool AudioDeviceAndroidOpenSLES::RecordingError() const {
   return input_.RecordingError();
 }
 
 void AudioDeviceAndroidOpenSLES::ClearPlayoutWarning() {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.ClearPlayoutWarning();
+#else
+  return;
+#endif
 }
 
 void AudioDeviceAndroidOpenSLES::ClearPlayoutError() {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.ClearPlayoutError();
+#else
+  return;
+#endif
 }
 
 void AudioDeviceAndroidOpenSLES::ClearRecordingWarning() {
   return input_.ClearRecordingWarning();
 }
 
 void AudioDeviceAndroidOpenSLES::ClearRecordingError() {
   return input_.ClearRecordingError();
 }
 
 void AudioDeviceAndroidOpenSLES::AttachAudioBuffer(
     AudioDeviceBuffer* audioBuffer) {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   output_.AttachAudioBuffer(audioBuffer);
+#endif
   input_.AttachAudioBuffer(audioBuffer);
 }
 
 int32_t AudioDeviceAndroidOpenSLES::SetLoudspeakerStatus(bool enable) {
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.SetLoudspeakerStatus(enable);
+#else
+  return -1;
+#endif
 }
 
 int32_t AudioDeviceAndroidOpenSLES::GetLoudspeakerStatus(
     bool& enable) const {  // NOLINT
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   return output_.GetLoudspeakerStatus(enable);
+#else
+  return -1;
+#endif
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_opensles_android.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/audio_device_opensles_android.h
@@ -150,15 +150,17 @@ class AudioDeviceAndroidOpenSLES : publi
   // Attach audio buffer
   virtual void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
 
   // Speaker audio routing
   virtual int32_t SetLoudspeakerStatus(bool enable);
   virtual int32_t GetLoudspeakerStatus(bool& enable) const;
 
  private:
+#ifdef WEBRTC_ANDROID_OPENSLES_OUTPUT
   OpenSlesOutput output_;
+#endif
   OpenSlesInput input_;
 };
 
 }  // namespace webrtc
 
 #endif  // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_OPENSLES_ANDROID_H_
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.cc
@@ -549,16 +549,17 @@ bool OpenSlesInput::CbThreadImpl() {
   CriticalSectionScoped lock(crit_sect_.get());
   if (HandleOverrun(event_id, event_msg)) {
     return recording_;
   }
   // If the fifo_ has audio data process it.
   while (fifo_->size() > 0 && recording_) {
     int8_t* audio = fifo_->Pop();
     audio_buffer_->SetRecordedBuffer(audio, buffer_size_samples());
-    audio_buffer_->SetVQEData(delay_provider_->PlayoutDelayMs(),
+    audio_buffer_->SetVQEData(delay_provider_ ?
+                              delay_provider_->PlayoutDelayMs() : 0,
                               recording_delay_, 0);
     audio_buffer_->DeliverRecordedData();
   }
   return recording_;
 }
 
 }  // namespace webrtc
--- a/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/android/opensles_input.h
@@ -116,17 +116,17 @@ class OpenSlesInput {
   void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
 
  private:
   enum {
     kNumInterfaces = 2,
     // Keep as few OpenSL buffers as possible to avoid wasting memory. 2 is
     // minimum for playout. Keep 2 for recording as well.
     kNumOpenSlBuffers = 2,
-    kNum10MsToBuffer = 4,
+    kNum10MsToBuffer = 8,
   };
 
   int InitSampleRate();
   int buffer_size_samples() const;
   int buffer_size_bytes() const;
   void UpdateRecordingDelay();
   void UpdateSampleRate();
   void CalculateNumFifoBuffersNeeded();
--- a/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
+++ b/media/webrtc/trunk/webrtc/modules/audio_device/audio_device.gypi
@@ -148,31 +148,38 @@
                     'opensl/fine_audio_buffer.cc',
                     'opensl/fine_audio_buffer.h',
                     'opensl/low_latency_event_posix.cc',
                     'opensl/low_latency_event.h',
                     'opensl/opensles_common.cc',
                     'opensl/opensles_common.h',
                     'opensl/opensles_input.cc',
                     'opensl/opensles_input.h',
-                    'opensl/opensles_output.cc',
                     'opensl/opensles_output.h',
                     'opensl/single_rw_fifo.cc',
                     'opensl/single_rw_fifo.h',
 		    'shared/audio_device_utility_shared.cc',
 		    'shared/audio_device_utility_shared.h',
                   ],
                 }, {
                   'sources': [
 		    'shared/audio_device_utility_shared.cc',
 		    'shared/audio_device_utility_shared.h',
 		    'android/audio_device_jni_android.cc',
 		    'android/audio_device_jni_android.h',
                   ],
                 }],
+                ['enable_android_opensl_output==1', {
+                  'sources': [
+                    'opensl/opensles_output.cc'
+                  ],
+                  'defines': [
+                    'WEBRTC_ANDROID_OPENSLES_OUTPUT',
+                  ]},
+                ],
               ],
             }],
             ['OS=="linux"', {
               'link_settings': {
                 'libraries': [
                   '-ldl','-lX11',
                 ],
               },
new file mode 100644
--- /dev/null
+++ b/mfbt/double-conversion/fix-aarch64-macro.patch
@@ -0,0 +1,23 @@
+Backport from upstream.
+
+https://code.google.com/p/double-conversion/source/detail?r=4e24bb31bcc76d6d218f3056b4c24a109d367561
+
+---
+ mfbt/double-conversion/utils.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mfbt/double-conversion/utils.h
++++ b/mfbt/double-conversion/utils.h
+@@ -58,11 +58,11 @@
+     defined(__mips__) || \
+     defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \
+     defined(__sparc__) || defined(__sparc) || defined(__s390__) || \
+     defined(__SH4__) || defined(__alpha__) || \
+     defined(_MIPS_ARCH_MIPS32R2) || \
+-    defined(_AARCH64EL_)
++    defined(__AARCH64EL__)
+ #define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
+ #elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
+ #if defined(_WIN32)
+ // Windows uses a 64bit wide floating point stack.
+ #define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
--- a/mfbt/double-conversion/update.sh
+++ b/mfbt/double-conversion/update.sh
@@ -15,9 +15,12 @@ cp $1/src/*.h ./
 
 # Source
 cp $1/src/*.cc ./
 
 patch -p3 < add-mfbt-api-markers.patch
 patch -p3 < use-StandardInteger.patch
 patch -p3 < use-mozilla-assertions.patch
 patch -p3 < use-static_assert.patch
-patch -p3 < ToPrecision-exponential.patch
\ No newline at end of file
+patch -p3 < ToPrecision-exponential.patch
+
+# Merged upstream, part of 2.0.1 version
+patch -p3 < fix-aarch64-macro.patch
--- a/mfbt/double-conversion/utils.h
+++ b/mfbt/double-conversion/utils.h
@@ -55,17 +55,17 @@
 #if defined(_M_X64) || defined(__x86_64__) || \
     defined(__ARMEL__) || defined(__avr32__) || \
     defined(__hppa__) || defined(__ia64__) || \
     defined(__mips__) || \
     defined(__powerpc__) || defined(__ppc__) || defined(__ppc64__) || \
     defined(__sparc__) || defined(__sparc) || defined(__s390__) || \
     defined(__SH4__) || defined(__alpha__) || \
     defined(_MIPS_ARCH_MIPS32R2) || \
-    defined(_AARCH64EL_)
+    defined(__AARCH64EL__)
 #define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
 #elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
 #if defined(_WIN32)
 // Windows uses a 64bit wide floating point stack.
 #define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
 #else
 #undef DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS
 #endif  // _WIN32
--- a/mobile/android/config/mozconfigs/android/debug
+++ b/mobile/android/config/mozconfigs/android/debug
@@ -1,13 +1,12 @@
 . "$topsrcdir/mobile/android/config/mozconfigs/common"
 
 # Global options
 ac_add_options --enable-debug
-ac_add_options --disable-unified-compilation
 
 # Build Fennec
 ac_add_options --enable-application=mobile/android
 
 # Android
 ac_add_options --target=arm-linux-androideabi
 
 if test `uname -m` = 'x86_64'; then
new file mode 100644
--- /dev/null
+++ b/mobile/android/config/mozconfigs/android/debug-nonunified
@@ -0,0 +1,3 @@
+. "$topsrcdir/mobile/android/config/mozconfigs/android/debug"
+
+ac_add_options --disable-unified-compilation
new file mode 100644
--- /dev/null
+++ b/mobile/android/config/mozconfigs/android/nightly-nonunified
@@ -0,0 +1,3 @@
+. "$topsrcdir/mobile/android/config/mozconfigs/android/nightly"
+
+ac_add_options --disable-unified-compilation
--- a/xpcom/reflect/xptcall/src/md/unix/moz.build
+++ b/xpcom/reflect/xptcall/src/md/unix/moz.build
@@ -144,16 +144,23 @@ if CONFIG['OS_ARCH'] == 'NetBSD':
     if CONFIG['OS_TEST'] in ('amiga', 'atari', 'hp300', 'mac68k', 'mvme68k',
                              'next68k', 'sun3', 'sun3x', 'x68k'):
         SOURCES += [
             'xptcinvoke_netbsd_m68k.cpp',
             'xptcstubs_netbsd_m68k.cpp'
         ]
 
 if CONFIG['OS_ARCH'] == 'Linux':
+    if CONFIG['OS_TEST'] == 'aarch64':
+        SOURCES += [
+            'xptcinvoke_aarch64.cpp',
+            'xptcinvoke_asm_aarch64.s',
+            'xptcstubs_aarch64.cpp',
+            'xptcstubs_asm_aarch64.s',
+        ]
     if CONFIG['OS_TEST'] == 'm68k':
         SOURCES += [
             'xptcinvoke_linux_m68k.cpp',
             'xptcstubs_linux_m68k.cpp',
         ]
     if CONFIG['OS_TEST'].find('mips') != -1:
         if CONFIG['OS_TEST'].find('mips64') != -1:
             SOURCES += [
new file mode 100644
--- /dev/null
+++ b/xpcom/reflect/xptcall/src/md/unix/xptcinvoke_aarch64.cpp
@@ -0,0 +1,136 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* Platform specific code to invoke XPCOM methods on native objects */
+
+#include "xptcprivate.h"
+
+#if !defined(__aarch64__)
+#error "This code is for Linux AArch64 only."
+#endif
+
+
+/* "Procedure Call Standard for the ARM 64-bit Architecture" document, sections
+ * "5.4 Parameter Passing" and "6.1.2 Procedure Calling" contain all the
+ * needed information.
+ *
+ * http://infocenter.arm.com/help/topic/com.arm.doc.ihi0042d/IHI0042D_aapcs.pdf
+ */
+
+#ifndef __AARCH64EL__
+#error "Only little endian compatibility was tested"
+#endif
+
+/*
+ * Allocation of integer function arguments initially to registers r1-r7
+ * and then to stack. Handling of 'that' argument which goes to register r0
+ * is handled separately and does not belong here.
+ *
+ * 'ireg_args'  - pointer to the current position in the buffer,
+ *                corresponding to the register arguments
+ * 'stack_args' - pointer to the current position in the buffer,
+ *                corresponding to the arguments on stack
+ * 'end'        - pointer to the end of the registers argument
+ *                buffer.
+ */
+static inline void alloc_word(uint64_t* &ireg_args,
+                              uint64_t* &stack_args,
+                              uint64_t* end,
+                              uint64_t  data)
+{
+    if (ireg_args < end) {
+        *ireg_args = data;
+        ireg_args++;
+    } else {
+        *stack_args = data;
+        stack_args++;
+    }
+}
+
+static inline void alloc_double(double* &freg_args,
+                                uint64_t* &stack_args,
+                                double* end,
+                                double  data)
+{
+    if (freg_args < end) {
+        *freg_args = data;
+        freg_args++;
+    } else {
+        memcpy(stack_args, &data, sizeof(data));
+        stack_args++;
+    }
+}
+
+static inline void alloc_float(double* &freg_args,
+                               uint64_t* &stack_args,
+                               double* end,
+                               float  data)
+{
+    if (freg_args < end) {
+        memcpy(freg_args, &data, sizeof(data));
+        freg_args++;
+    } else {
+        memcpy(stack_args, &data, sizeof(data));
+        stack_args++;
+    }
+}
+
+
+extern "C" void
+invoke_copy_to_stack(uint64_t* stk, uint64_t *end,
+                     uint32_t paramCount, nsXPTCVariant* s)
+{
+    uint64_t *ireg_args = stk;
+    uint64_t *ireg_end  = ireg_args + 8;
+    double *freg_args = (double *)ireg_end;
+    double *freg_end  = freg_args + 8;
+    uint64_t *stack_args = (uint64_t *)freg_end;
+
+    // leave room for 'that' argument in x0
+    ++ireg_args;
+
+    for (uint32_t i = 0; i < paramCount; i++, s++) {
+        if (s->IsPtrData()) {
+            alloc_word(ireg_args, stack_args, ireg_end, (uint64_t)s->ptr);
+            continue;
+        }
+        // According to the ABI, integral types that are smaller than 8 bytes
+        // are to be passed in 8-byte registers or 8-byte stack slots.
+        switch (s->type) {
+            case nsXPTType::T_FLOAT:
+                alloc_float(freg_args, stack_args, freg_end, s->val.f);
+                break;
+            case nsXPTType::T_DOUBLE:
+                alloc_double(freg_args, stack_args, freg_end, s->val.d);
+                break;
+            case nsXPTType::T_I8:  alloc_word(ireg_args, stk, end, s->val.i8);   break;
+            case nsXPTType::T_I16: alloc_word(ireg_args, stk, end, s->val.i16);  break;
+            case nsXPTType::T_I32: alloc_word(ireg_args, stk, end, s->val.i32);  break;
+            case nsXPTType::T_I64: alloc_word(ireg_args, stk, end, s->val.i64);  break;
+            case nsXPTType::T_U8:  alloc_word(ireg_args, stk, end, s->val.u8);   break;
+            case nsXPTType::T_U16: alloc_word(ireg_args, stk, end, s->val.u16);  break;
+            case nsXPTType::T_U32: alloc_word(ireg_args, stk, end, s->val.u32);  break;
+            case nsXPTType::T_U64: alloc_word(ireg_args, stk, end, s->val.u64);  break;
+            case nsXPTType::T_BOOL: alloc_word(ireg_args, stk, end, s->val.b);   break;
+            case nsXPTType::T_CHAR: alloc_word(ireg_args, stk, end, s->val.c);   break;
+            case nsXPTType::T_WCHAR: alloc_word(ireg_args, stk, end, s->val.wc); break;
+            default:
+                // all the others are plain pointer types
+                alloc_word(ireg_args, stack_args, ireg_end,
+                           reinterpret_cast<uint64_t>(s->val.p));
+                break;
+        }
+    }
+}
+
+extern "C" nsresult _NS_InvokeByIndex(nsISupports* that, uint32_t methodIndex,
+                                      uint32_t paramCount, nsXPTCVariant* params);
+
+EXPORT_XPCOM_API(nsresult)
+NS_InvokeByIndex(nsISupports* that, uint32_t methodIndex,
+                 uint32_t paramCount, nsXPTCVariant* params)
+{
+    return _NS_InvokeByIndex(that, methodIndex, paramCount, params);
+}
new file mode 100644
--- /dev/null
+++ b/xpcom/reflect/xptcall/src/md/unix/xptcinvoke_asm_aarch64.s
@@ -0,0 +1,67 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+        .section ".text"
+            .globl _NS_InvokeByIndex
+            .type  _NS_InvokeByIndex,@function
+
+/*
+ * _NS_InvokeByIndex(nsISupports* that, uint32_t methodIndex,
+ *                   uint32_t paramCount, nsXPTCVariant* params)
+ */
+
+_NS_InvokeByIndex:
+            # set up frame
+            stp         x29, x30, [sp,#-32]!
+            mov         x29, sp
+            stp         x19, x20, [sp,#16]
+
+            # save methodIndex across function calls
+            mov         w20, w1
+
+            # end of stack area passed to invoke_copy_to_stack
+            mov         x1, sp
+
+            # assume 8 bytes of stack for each argument with 16-byte alignment
+            add         w19, w2, #1
+            and         w19, w19, #0xfffffffe
+            sub         sp, sp, w19, uxth #3
+
+            # temporary place to store args passed in r0-r7,v0-v7
+            sub         sp, sp, #128
+
+            # save 'that' on stack
+            str         x0, [sp]
+
+            # start of stack area passed to invoke_copy_to_stack
+            mov         x0, sp
+            bl          invoke_copy_to_stack
+
+            # load arguments passed in r0-r7
+            ldp         x6, x7, [sp, #48]
+            ldp         x4, x5, [sp, #32]
+            ldp         x2, x3, [sp, #16]
+            ldp         x0, x1, [sp],#64
+
+            # load arguments passed in v0-v7
+            ldp         d6, d7, [sp, #48]
+            ldp         d4, d5, [sp, #32]
+            ldp         d2, d3, [sp, #16]
+            ldp         d0, d1, [sp],#64
+
+            # call the method
+            ldr         x16, [x0]
+            add         x16, x16, w20, uxth #3
+            ldr         x16, [x16]
+            blr         x16
+
+            add         sp, sp, w19, uxth #3
+            ldp         x19, x20, [sp,#16]
+            ldp         x29, x30, [sp],#32
+            ret
+
+            .size _NS_InvokeByIndex, . - _NS_InvokeByIndex
+
+
new file mode 100644
--- /dev/null
+++ b/xpcom/reflect/xptcall/src/md/unix/xptcstubs_aarch64.cpp
@@ -0,0 +1,219 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "xptcprivate.h"
+#include "xptiprivate.h"
+
+#ifndef __AARCH64EL__
+#error "Only little endian compatibility was tested"
+#endif
+
+/*
+ * This is for AArch64 ABI
+ *
+ * When we're called, the "gp" registers are stored in gprData and
+ * the "fp" registers are stored in fprData. Each array has 8 regs
+ * but first reg in gprData is a placeholder for 'self'.
+ */
+extern "C" nsresult
+PrepareAndDispatch(nsXPTCStubBase* self, uint32_t methodIndex, uint64_t* args,
+                   uint64_t *gprData, double *fprData)
+{
+#define PARAM_BUFFER_COUNT        16
+#define PARAM_GPR_COUNT            8
+#define PARAM_FPR_COUNT            8
+
+    nsXPTCMiniVariant paramBuffer[PARAM_BUFFER_COUNT];
+    nsXPTCMiniVariant* dispatchParams = NULL;
+    const nsXPTMethodInfo* info;
+    nsresult result = NS_ERROR_FAILURE;
+
+    NS_ASSERTION(self,"no self");
+
+    self->mEntry->GetMethodInfo(uint16_t(methodIndex), &info);
+    NS_ASSERTION(info,"no method info");
+
+    uint32_t paramCount = info->GetParamCount();
+
+    // setup variant array pointer
+    if (paramCount > PARAM_BUFFER_COUNT) {
+        dispatchParams = new nsXPTCMiniVariant[paramCount];
+    } else {
+        dispatchParams = paramBuffer;
+    }
+    NS_ASSERTION(dispatchParams,"no place for params");
+
+    uint64_t* ap = args;
+    uint32_t next_gpr = 1; // skip first arg which is 'self'
+    uint32_t next_fpr = 0;
+    for (uint32_t i = 0; i < paramCount; i++) {
+        const nsXPTParamInfo& param = info->GetParam(i);
+        const nsXPTType& type = param.GetType();
+        nsXPTCMiniVariant* dp = &dispatchParams[i];
+
+        if (param.IsOut() || !type.IsArithmetic()) {
+            if (next_gpr < PARAM_GPR_COUNT) {
+                dp->val.p = (void*)gprData[next_gpr++];
+            } else {
+                dp->val.p = (void*)*ap++;
+            }
+            continue;
+        }
+
+        switch (type) {
+            case nsXPTType::T_I8:
+                if (next_gpr < PARAM_GPR_COUNT) {
+                    dp->val.i8  = (int8_t)gprData[next_gpr++];
+                } else {
+                    dp->val.i8  = (int8_t)*ap++;
+                }
+                break;
+
+            case nsXPTType::T_I16:
+                if (next_gpr < PARAM_GPR_COUNT) {
+                    dp->val.i16  = (int16_t)gprData[next_gpr++];
+                } else {
+                    dp->val.i16  = (int16_t)*ap++;
+                }
+                break;
+
+            case nsXPTType::T_I32:
+                if (next_gpr < PARAM_GPR_COUNT) {
+                    dp->val.i32  = (int32_t)gprData[next_gpr++];
+                } else {
+                    dp->val.i32  = (int32_t)*ap++;
+                }
+                break;
+
+            case nsXPTType::T_I64:
+                if (next_gpr < PARAM_GPR_COUNT) {
+                    dp->val.i64  = (int64_t)gprData[next_gpr++];
+                } else {
+                    dp->val.i64  = (int64_t)*ap++;
+                }
+                break;
+
+            case nsXPTType::T_U8:
+                if (next_gpr < PARAM_GPR_COUNT) {
+                    dp->val.u8  = (uint8_t)gprData[next_gpr++];
+                } else {
+                    dp->val.u8  = (uint8_t)*ap++;
+                }
+                break;
+
+            case nsXPTType::T_U16:
+                if (next_gpr < PARAM_GPR_COUNT) {
+                    dp->val.u16  = (uint16_t)gprData[next_gpr++];
+                } else {
+                    dp->val.u16  = (uint16_t)*ap++;
+                }
+                break;
+
+            case nsXPTType::T_U32:
+                if (next_gpr < PARAM_GPR_COUNT) {
+                    dp->val.u32  = (uint32_t)gprData[next_gpr++];
+                } else {
+                    dp->val.u32  = (uint32_t)*ap++;
+                }
+                break;
+
+            case nsXPTType::T_U64:
+                if (next_gpr < PARAM_GPR_COUNT) {
+                    dp->val.u64  = (uint64_t)gprData[next_gpr++];
+                } else {
+                    dp->val.u64  = (uint64_t)*ap++;
+                }
+                break;
+
+            case nsXPTType::T_FLOAT:
+                if (next_fpr < PARAM_FPR_COUNT) {
+                    memcpy(&dp->val.f, &fprData[next_fpr++], sizeof(dp->val.f));
+                } else {
+                    memcpy(&dp->val.f, ap++, sizeof(dp->val.f));
+                }
+                break;
+
+            case nsXPTType::T_DOUBLE:
+                if (next_fpr < PARAM_FPR_COUNT) {
+                    memcpy(&dp->val.d, &fprData[next_fpr++], sizeof(dp->val.d));
+                } else {
+                    memcpy(&dp->val.d, ap++, sizeof(dp->val.d));
+                }
+                break;
+
+            case nsXPTType::T_BOOL:
+                if (next_gpr < PARAM_GPR_COUNT) {
+                    dp->val.b  = (bool)gprData[next_gpr++];
+                } else {
+                    dp->val.b  = (bool)*ap++;
+                }
+                break;
+
+            case nsXPTType::T_CHAR:
+                if (next_gpr < PARAM_GPR_COUNT) {
+                    dp->val.c  = (char)gprData[next_gpr++];
+                } else {
+                    dp->val.c  = (char)*ap++;
+                }
+                break;
+
+            case nsXPTType::T_WCHAR:
+                if (next_gpr < PARAM_GPR_COUNT) {
+                    dp->val.wc  = (wchar_t)gprData[next_gpr++];
+                } else {
+                    dp->val.wc  = (wchar_t)*ap++;
+                }
+                break;
+
+            default:
+                NS_ASSERTION(0, "bad type");
+                break;
+        }
+    }
+
+    result = self->mOuter->CallMethod((uint16_t)methodIndex, info, dispatchParams);
+
+    if (dispatchParams != paramBuffer) {
+        delete [] dispatchParams;
+    }
+
+    return result;
+}
+
+// Load w17 with the constant 'n' and branch to SharedStub().
+# define STUB_ENTRY(n)                                                  \
+    __asm__ (                                                           \
+            ".section \".text\" \n\t"                                   \
+            ".align 2\n\t"                                              \
+            ".if "#n" < 10 \n\t"                                        \
+            ".globl  _ZN14nsXPTCStubBase5Stub"#n"Ev \n\t"               \
+            ".hidden _ZN14nsXPTCStubBase5Stub"#n"Ev \n\t"               \
+            ".type   _ZN14nsXPTCStubBase5Stub"#n"Ev,@function \n\n"     \
+            "_ZN14nsXPTCStubBase5Stub"#n"Ev: \n\t"                      \
+            ".elseif "#n" < 100 \n\t"                                   \
+            ".globl  _ZN14nsXPTCStubBase6Stub"#n"Ev \n\t"               \
+            ".hidden _ZN14nsXPTCStubBase6Stub"#n"Ev \n\t"               \
+            ".type   _ZN14nsXPTCStubBase6Stub"#n"Ev,@function \n\n"     \
+            "_ZN14nsXPTCStubBase6Stub"#n"Ev: \n\t"                      \
+            ".elseif "#n" < 1000 \n\t"                                  \
+            ".globl  _ZN14nsXPTCStubBase7Stub"#n"Ev \n\t"               \
+            ".hidden _ZN14nsXPTCStubBase7Stub"#n"Ev \n\t"               \
+            ".type   _ZN14nsXPTCStubBase7Stub"#n"Ev,@function \n\n"     \
+            "_ZN14nsXPTCStubBase7Stub"#n"Ev: \n\t"                      \
+            ".else  \n\t"                                               \
+            ".err   \"stub number "#n" >= 1000 not yet supported\"\n"   \
+            ".endif \n\t"                                               \
+            "mov    w17,#"#n" \n\t"                                     \
+            "b      SharedStub \n"                                      \
+);
+
+#define SENTINEL_ENTRY(n)                              \
+    nsresult nsXPTCStubBase::Sentinel##n()             \
+{                                                      \
+    NS_ASSERTION(0,"nsXPTCStubBase::Sentinel called"); \
+    return NS_ERROR_NOT_IMPLEMENTED;                   \
+}
+
+#include "xptcstubsdef.inc"
new file mode 100644
--- /dev/null
+++ b/xpcom/reflect/xptcall/src/md/unix/xptcstubs_asm_aarch64.s
@@ -0,0 +1,39 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+            .set NGPREGS,8
+            .set NFPREGS,8
+
+            .section ".text"
+            .globl SharedStub
+            .hidden SharedStub
+            .type  SharedStub,@function
+SharedStub:
+            stp         x29, x30, [sp,#-16]!
+            mov         x29, sp
+
+            sub         sp, sp, #8*(NGPREGS+NFPREGS)
+            stp         x0, x1, [sp, #64+(0*8)]
+            stp         x2, x3, [sp, #64+(2*8)]
+            stp         x4, x5, [sp, #64+(4*8)]
+            stp         x6, x7, [sp, #64+(6*8)]
+            stp         d0, d1, [sp, #(0*8)]
+            stp         d2, d3, [sp, #(2*8)]
+            stp         d4, d5, [sp, #(4*8)]
+            stp         d6, d7, [sp, #(6*8)]
+
+            # methodIndex passed from stub
+            mov         w1, w17
+
+            add         x2, sp, #16+(8*(NGPREGS+NFPREGS))
+            add         x3, sp, #8*NFPREGS
+            add         x4, sp, #0
+
+            bl          PrepareAndDispatch
+
+            add         sp, sp, #8*(NGPREGS+NFPREGS)
+            ldp         x29, x30, [sp],#16
+            ret
+
+            .size SharedStub, . - SharedStub